Для этого потребуется удерживать весь поток в памяти, пока у вас нет максимального объема ОЗУ на вашем компьютере, этот код не будет работать и в какой-то момент даст вам системное исключение памяти.
Я бы порекомендовал вам загрузитьпоток в чанках вместо записи за один раз.
Вот функция загрузки потока в чанках
def _upload_blob_chunks(blob_service, container_name, blob_name,
blob_size, block_size, stream, max_connections,
progress_callback, validate_content, lease_id, uploader_class,
maxsize_condition=None, if_modified_since=None, if_unmodified_since=None, if_match=None,
if_none_match=None, timeout=None,
content_encryption_key=None, initialization_vector=None, resource_properties=None):
encryptor, padder = _get_blob_encryptor_and_padder(content_encryption_key, initialization_vector,
uploader_class is not _PageBlobChunkUploader)
uploader = uploader_class(
blob_service,
container_name,
blob_name,
blob_size,
block_size,
stream,
max_connections > 1,
progress_callback,
validate_content,
lease_id,
timeout,
encryptor,
padder
)
uploader.maxsize_condition = maxsize_condition
# Access conditions do not work with parallelism
if max_connections > 1:
uploader.if_match = uploader.if_none_match = uploader.if_modified_since = uploader.if_unmodified_since = None
else:
uploader.if_match = if_match
uploader.if_none_match = if_none_match
uploader.if_modified_since = if_modified_since
uploader.if_unmodified_since = if_unmodified_since
if progress_callback is not None:
progress_callback(0, blob_size)
if max_connections > 1:
import concurrent.futures
from threading import BoundedSemaphore
'''
Ensures we bound the chunking so we only buffer and submit 'max_connections' amount of work items to the executor.
This is necessary as the executor queue will keep accepting submitted work items, which results in buffering all the blocks if
the max_connections + 1 ensures the next chunk is already buffered and ready for when the worker thread is available.
'''
chunk_throttler = BoundedSemaphore(max_connections + 1)
executor = concurrent.futures.ThreadPoolExecutor(max_connections)
futures = []
running_futures = []
# Check for exceptions and fail fast.
for chunk in uploader.get_chunk_streams():
for f in running_futures:
if f.done():
if f.exception():
raise f.exception()
else:
running_futures.remove(f)
chunk_throttler.acquire()
future = executor.submit(uploader.process_chunk, chunk)
# Calls callback upon completion (even if the callback was added after the Future task is done).
future.add_done_callback(lambda x: chunk_throttler.release())
futures.append(future)
running_futures.append(future)
# result() will wait until completion and also raise any exceptions that may have been set.
range_ids = [f.result() for f in futures]
else:
range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()]
if resource_properties:
resource_properties.last_modified = uploader.last_modified
resource_properties.etag = uploader.etag
return range_ids
Для справки, вы можете просмотреть ниже темы
https://github.com/Azure/azure-storage-python/blob/master/azure-storage-blob/azure/storage/blob/_upload_chunking.py
Также существует аналогичный поток для запроса того же типа
, как передать файл в хранилище BLOB-объектов Azure в чанках без записи в файл с помощью python
В качестве альтернативы, вы можете использовать powershell для загрузки VHD в учетную запись хранения vm, как показано ниже
$rgName = "myResourceGroup"
$urlOfUploadedImageVhd = "https://mystorageaccount.blob.core.windows.net/mycontainer/myUploadedVHD.vhd"
Add-AzVhd -ResourceGroupName $rgName -Destination $urlOfUploadedImageVhd `
-LocalFilePath "C:\Users\Public\Documents\Virtual hard disks\myVHD.vhd"
Вот ссылка для того же
https://docs.microsoft.com/en-us/azure/virtual-machines/windows/upload-generalized-managed
Надеюсь, это поможет.