With Azure Databricks i'm able to list the files in the blob storage, get them in a array.
But when I try to open one f the file i'm getting a error. Probably due to the special syntax.
storage_account_name = "tesb"
storage_container_name = "rttracking-in"
storage_account_access_key = "xyz"
file_location = "wasbs://rttracking-in"
file_type = "xml"
spark.conf.set(
"fs.azure.account.key."+storage_account_name+".blob.core.windows.net",
storage_account_access_key)
xmlfiles = dbutils.fs.ls("wasbs://"+storage_container_name+"#"+storage_account_name+".blob.core.windows.net/")
import pandas as pd
import xml.etree.ElementTree as ET
import re
import os
firstfile = xmlfiles[0].path
root = ET.parse(firstfile).getroot()
The error is
IOError: [Errno 2] No such file or directory: u'wasbs://rttracking-in#tstoweuyptoesb.blob.core.windows.net/rtTracking_00001.xml'
My guess is that ET.parse() does not know the Spark context in which you have set up the connection to the Storage Account. Alternatively you can try to mount the storage. Then you can access files through native paths as if the files were local.
See here: https://docs.databricks.com/spark/latest/data-sources/azure/azure-storage.html#mount-an-azure-blob-storage-container
This should work then:
root = ET.parse("/mnt/<mount-name>/...")
I did mount the Storage and then this does the trick
firstfile = xmlfiles[0].path.replace('dbfs:','/dbfs')
root = ET.parse(firstfile).getroot()
Related
I have been trying to read a PDF file using formrecognizer using python/pyspark inside databricks notebook. The file syntax is only letting read file inside a url. When I try to pass the adls path I get "no such file or directory" error in this line " poller = document_analysis_client.begin_analyze_document_from_url("prebuilt-layout", formUrl)".
import os
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer import DocumentAnalysisClient
endpoint = "endpoint"
key = "key"
formUrl = "file:\\dev-app\data\zPathology.pdf"
document_analysis_client = DocumentAnalysisClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
poller = document_analysis_client.begin_analyze_document_from_url("prebuilt-layout", formUrl)
result = poller.result()
I am trying to to download a list of csv files from an Azure Blob Storage using a shared SAS token, but I am getting all sorts of errors.
I tried looking this up and tried multiple code samples from contributors on Slackoverflow and Azure documentation. here is the final state of the code sample I constructed from those sources! It tries to download the list of csv files in a pooled manner (blob storage contains 200 csv files):
NB: I left commented code snippets to show different approaches I tried testing. sorry if they are confusing!
from itertools import tee
from multiprocessing import Process
from multiprocessing.pool import ThreadPool
import os
from azure.storage.blob import BlobServiceClient, BlobClient
from azure.storage.blob import ContentSettings, ContainerClient
#from azure.storage.blob import BlockBlobService
STORAGEACCOUNTURL = "https://myaccount.blob.core.windows.net"
STORAGEACCOUNTKEY = "sv=2020-08-04&si=blobpolicyXYZ&sr=c&sig=xxxxxxxxxxxxxxxxxxxxxxxxxxxx"
CONTAINERNAME = "mycontainer"
##BLOBNAME = "??"
sas_url = 'https://myaccount.blob.core.windows.net/mycontainer/mydir?sv=2020-08-04&si=blobpolicyXYZ&sr=c&sig=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
LOCAL_BLOB_PATH = "./downloads"
class AzureBlobFileDownloader:
def __init__(self):
print("Intializing AzureBlobFileDownloader")
# Initialize the connection to Azure storage account
self.blob_service_client_instance = ContainerClient.from_container_url #BlobClient.from_blob_url(sas_url) #BlobServiceClient(account_url=STORAGEACCOUNTURL, credential=STORAGEACCOUNTKEY)
#self.blob_client_instance = self.blob_service_client_instance.get_blob_client(CONTAINERNAME, BLOBNAME)
#self.blob_service_client = BlobServiceClient.from_connection_string(MY_CONNECTION_STRING)
#self.my_container = self.blob_service_client.get_container_client(MY_BLOB_CONTAINER)
#self.blob_service_client = BlockBlobService("storage_account",sas_token="?sv=2018-03-28&ss=bfqt&srt=sco&sp=rwdlacup&se=2019-04-24T10:01:58Z&st=2019-04-23T02:01:58Z&spr=https&sig=xxxxxxxxx")
#self.my_container = self.blob_service_client.get_blob_to_path("container_name","blob_name","local_file_path")
def save_blob(self,file_name,file_content):
# Get full path to the file
download_file_path = os.path.join(LOCAL_BLOB_PATH, file_name)
# for nested blobs, create local path as well!
os.makedirs(os.path.dirname(download_file_path), exist_ok=True)
with open(download_file_path, "wb") as file:
file.write(file_content)
def download_all_blobs_in_container(self):
# get a list of blobs
my_blobs = self.blob_service_client_instance.get_block_list() #list_blobs() #self.blob_client_instance.list_blobs() download_blob() #
print(my_blobs)
#iterate through the iterable object for testing purposes, maybe wrong approach!
result, result_backup = tee(my_blobs)
print("**first iterate**")
for i, r in enumerate(result):
print(r)
#start downloading my_blobs
result = self.run(my_blobs)
print(result)
def run(self,blobs):
# Download 3 files at a time!
with ThreadPool(processes=int(3)) as pool:
return pool.map(self.save_blob_locally, blobs)
def save_blob_locally(self,blob):
file_name = blob.name
print(file_name)
bytes = self.blob_service_client_instance.get_blob_client(CONTAINERNAME,blob).download_blob().readall()
# Get full path to the file
download_file_path = os.path.join(LOCAL_BLOB_PATH, file_name)
# for nested blobs, create local path as well!
os.makedirs(os.path.dirname(download_file_path), exist_ok=True)
with open(download_file_path, "wb") as file:
file.write(bytes)
return file_name
# Initialize class and download files
azure_blob_file_downloader = AzureBlobFileDownloader()
azure_blob_file_downloader.download_all_blobs_in_container()
could someone help me get to achieve this task in python:
get a list of all files in the blob storage, those files names are prefixed with part-
download them to a folder locally
thanks
could someone help me get to achieve this task in python:
get a list of all files in the blob storage, those files names are prefixed with part-
To List all the blobs whose prefix is "part-" you can use blob_service.list_blobs(<Container Name>, prefix="<Your Prefix>"). Below is the code to get the list of blobs for the same.
print("\nList blobs in the container")
generator = blob_service.list_blobs(CONTAINER_NAME, prefix="part-")
for blob in generator:
print("\t Blob name: " + blob.name)
download them to a folder locally
To download the blob you can use blob_client = blob_service.get_blob_to_path(<Container Name>,<Blob Name>,<File Path>). Below is the code to download the blob as per your requirement.
blob_client = blob_service.get_blob_to_path(CONTAINER_NAME,blob.name,fname)
Below is the complete code that worked for us which achieves your requirement.
import os
from azure.storage.blob import BlockBlobService
ACCOUNT_NAME = "<Your_ACCOUNT_NAME>"
ACCOUNT_KEY = "<YOUR_ACCOUNT_KEY>"
CONTAINER_NAME = "<YOUR_CONTAINER_NAME>"
LOCAL_BLOB_PATH = "C:\\<YOUR_PATH>\\downloadedfiles"
blob_service = BlockBlobService(ACCOUNT_NAME, ACCOUNT_KEY)
# Lists All Blobs which has a prefic of part-
print("\nList blobs in the container")
generator = blob_service.list_blobs(CONTAINER_NAME, prefix="part-")
for blob in generator:
print("\t Blob name: " + blob.name)
# Downloading the blob to a folder
for blob in generator:
# Adds blob name to the path
fname = os.path.join(LOCAL_BLOB_PATH, blob.name)
print(f'Downloading {blob.name} to {fname}')
# Downloading blob into file
blob_client = blob_service.get_blob_to_path(CONTAINER_NAME,blob.name,fname)
RESULT :
Files in my Storage Account
Files in my Local Folder
Updated Answer
blob_service = BlockBlobService(account_name=ACCOUNT_NAME,account_key=None,sas_token=SAS_TOKEN)
# Lists All Blobs which has a prefic of part-
print("\nList blobs in the container")
generator = blob_service.list_blobs(CONTAINER_NAME, prefix="directory1"+"/"+"part-")
for blob in generator:
print("\t Blob name: " + blob.name)
# Downloading the blob to a folder
for blob in generator:
# Adds blob name to the path
fname = os.path.join(LOCAL_BLOB_PATH, blob.name)
print(f'Downloading {blob.name} to {fname}')
# Downloading blob into file
blob_client = blob_service.get_blob_to_path(CONTAINER_NAME,blob.name,fname)
I am following this link and getting some error:
How to upload folder on Google Cloud Storage using Python API
I have saved model in container environment and from there I want to copy to GCP bucket.
Here is my code:
storage_client = storage.Client(project='*****')
def upload_local_directory_to_gcs(local_path, bucket, gcs_path):
bucket = storage_client.bucket(bucket)
assert os.path.isdir(local_path)
for local_file in glob.glob(local_path + '/**'):
print(local_file)
print("this is bucket",bucket)
blob = bucket.blob(gcs_path)
print("here")
blob.upload_from_filename(local_file)
print("done")
path="/pythonPackage/trainer/model_mlm_demo" #this is local absolute path where my folder is. Folder name is **model_mlm_demo**
buc="py*****" #this is my GCP bucket address
gcs="model_mlm_demo2/" #this is the new folder that I want to store files in GCP
upload_local_directory_to_gcs(local_path=path, bucket=buc, gcs_path=gcs)
/pythonPackage/trainer/model_mlm_demo has 3 files in it config, model.bin and arguments.bin`
ERROR
The codes doesn't throw any error, but there is no files uploaded in GCP bucket. It just creates empty folder.
What I can see the error is, you don't need to pass the gs:// as the bucket parameter. Actually, here is an example you may need to check out,
https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python
def upload_blob(bucket_name, source_file_name, destination_blob_name):
"""Uploads a file to the bucket."""
# The ID of your GCS bucket
# bucket_name = "your-bucket-name"
# The path to your file to upload
# source_file_name = "local/path/to/file"
# The ID of your GCS object
# destination_blob_name = "storage-object-name"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
blob.upload_from_filename(source_file_name)
print(
"File {} uploaded to {}.".format(
source_file_name, destination_blob_name
)
)
I have reproduced your issue and the below code snippet works fine. I have updated the code based on folders and names you have mentioned in the question. Let me know if you have any issues.
import os
import glob
from google.cloud import storage
storage_client = storage.Client(project='')
def upload_local_directory_to_gcs(local_path, bucket, gcs_path):
bucket = storage_client.bucket(bucket)
assert os.path.isdir(local_path)
for local_file in glob.glob(local_path + '/**'):
print(local_file)
print("this is bucket", bucket)
filename=local_file.split('/')[-1]
blob = bucket.blob(gcs_path+filename)
print("here")
blob.upload_from_filename(local_file)
print("done")
# this is local absolute path where my folder is. Folder name is **model_mlm_demo**
path = "/pythonPackage/trainer/model_mlm_demo"
buc = "py*****" # this is my GCP bucket address
gcs = "model_mlm_demo2/" # this is the new folder that I want to store files in GCP
upload_local_directory_to_gcs(local_path=path, bucket=buc, gcs_path=gcs)
I just came across the gcsfs library which seems to be also about better interfaces
You could copy an entire directory into a gcs location like this:
def upload_to_gcs(src_dir: str, gcs_dst: str):
fs = gcsfs.GCSFileSystem()
fs.put(src_dir, gcs_dst, recursive=True)
I figured out a way using subprocess to upload model artefacts in GCP bucket.
import subprocess
subprocess.call('gsutil cp -r source_folder_in_local gs://*****/folder_name', shell=True, stdout=subprocess.PIPE)
If gsutil is not installed. You can install using this link:
https://cloud.google.com/storage/docs/gsutil_install
I have files in azure file storage, so I listed the files by the date upload and then I want to select the most recent file uploaded.
So to do this, I created a function that should have returned me, the list of the files. However when I see the output, it return only one file and the other are missing.
Here is my code:
file_service = FileService(account_name='', account_key='')
generator = list(file_service.list_directories_and_files(''))
def list_files_in(generator,file_service):
list_files=[]
for file_or_dir in generator:
file_in = file_service.get_file_properties(share_name='', directory_name="", file_name=file_or_dir.name)
file_date= file_in.properties.last_modified.date()
list_tuple = (file_date,file_or_dir.name)
list_files.append(list_tuple)
return list_files
To get the latest files in azure blob you need to write the logic to get that, below are the code which will give you the same :
For Files
result = file_service.list_directories_and_files(share_name, directory_name)
for file_or_dir in result:
if isinstance(file_or_dir, File):
file = file_service.get_file_properties(share_name, directory_name, file_name, timeout=None, snapshot=None)
print(file_or_dir.name, file.properties.last_modified)
For Blob
from azure.storage.blob import ContainerClient
container = ContainerClient.from_connection_string(conn_str={your_connection_string}, container_name = {your_container_name})
for blob in container.list_blobs():
print(f'{blob.name} : {blob.last_modified}')
You can get the key's and account details from the azure portal for the account access. In this blob.last_modified will give you latest blob item.
I would like to read csv file from Azure blob storage with python Azure function. Using the code below, I get the error:
[Errno 30] Read-only file system: '/home/site/wwwroot/my_csv_file'
And the code snippet is:
work_directory= os.getcwd()
filename = my_csv_file
account_name = <blob_storage>
account_key = <blob_key>
os.chmod(work_directory, 0o777)
input_fpath=os.path.join(work_directory, filename)
block_blob_service = BlockBlobService(account_name, account_key)
block_blob_service.get_blob_to_path(container_name=<input_container_name>, blob_name=filename,
file_path=input_fpath)
How could I change permissions or how I can read csv to python in other way?
SOLVED
using get_blob_to_text instead of get_blob_to_path:
blobstring = block_blob_service.get_blob_to_text(<input_container_name>, file_name).content
The solution was found here.
chmod is not required. So, the whole code is the following:
filename = my_csv_file
account_name = <blob_storage>
account_key = <blob_key>
block_blob_service = BlockBlobService(account_name, account_key)
blobstring = block_blob_service.get_blob_to_text(<input_container_name>, filename).content
For now the Python Azure function doesn't allow to write the file, it's read-only mode and this is not able to change. So you could neither use chmod method nor use get_blob_to_path cause you could write file to your disk.
So maybe you could read your file to a stream then send it to the response. You could refer to my code, I use the Blob binding to read a text file.
def main(req: func.HttpRequest,inputblob: func.InputStream) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
name = req.params.get('name')
if not name:
try:
req_body = req.get_json()
except ValueError:
pass
else:
name = req_body.get('name')
if name:
return func.HttpResponse(inputblob.read(size=-1))
else:
return func.HttpResponse(
"Please pass a name on the query string or in the request body",
status_code=400
)