how to download files from s3 bucket based on files modified date? - python-3.x

I want to download files from a particular s3 bucket based on files Last modified date.
I have researched on how to connect boto3 and there is plenty of code and documentation available for downloading the file without any conditions. I made a pseudo code
def download_file_s3(bucket_name,modified_date)
# connect to reseource s3
s3 = boto3.resource('s3',aws_access_key_id='demo', aws_secret_access_key='demo')
# connect to the desired bucket
my_bucket = s3.Bucket(bucket_name)
# Get files
for file in my_bucket.objects.all():
I want to complete this function, basically, passing a modified date the function returns the files in the s3 bucket for that particular modified date.

I have a Better solution or a function which could do this automatically. Just pass In the Bucket name and Download path name.
from boto3.session import Session
from datetime import date, timedelta
import boto3
import re
def Download_pdf_specifc_date_subfolder(bucket_name,download_path)
ACCESS_KEY = 'XYZ'
SECRET_KEY = 'ABC'
Bucket_name=bucket_name
# code to create a session
session = Session(aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY)
s3 = session.resource('s3')
bucket = s3.Bucket(Bucket_name)
# code to get the yesterdays date
yesterday = date.today() - timedelta(days=1)
x=yesterday.strftime('20%y-%m-%d')
print(x)
#code to add the files to a list which needs to be downloaded
files_to_downloaded = []
#code to take all the files from s3 under a specific bucket
for fileObject in bucket.objects.all():
file_name = str(fileObject.key)
last_modified=str(fileObject.last_modified)
last_modified=last_modified.split()
if last_modified[0]==x:
# Enter the specific bucketname in the regex in place of Airports to filter only the particluar subfolder
if re.findall(r"Airports/[a-zA-Z]+", file_name):
files_to_downloaded.append(file_name)
# code to Download into a specific Folder
for fileObject in bucket.objects.all():
file_name = str(fileObject.key)
if file_name in files_to_downloaded:
print(file_name)
d_path=download_path + file_name
print(d_path)
bucket.download_file(file_name,d_path)
Download_pdf_specifc_date_subfolder(bucket_name,download_path)
Ultimately the function will give the results in the specific Folder with the files to be downloaded.

Here is my test code and it will print the last_modified datetime of objects which have the datetime after what I set.
import boto3
from datetime import datetime
from datetime import timezone
s3 = boto3.resource('s3')
response = s3.Bucket('<bucket name>').objects.all()
for item in response:
obj = s3.Object(item.bucket_name, item.key)
if obj.last_modified > datetime(2019, 8, 1, 0, 0, 0, tzinfo=timezone.utc):
print(obj.last_modified)
If you have a specific date, then
import boto3
from datetime import datetime, timezone
s3 = boto3.resource('s3')
response = s3.Bucket('<bucket name>').objects.all()
date = '20190827' # input('Insert Date as a form YYYYmmdd')
for item in response:
obj = s3.Object(item.bucket_name, item.key)
if obj.last_modified.strftime('%Y%m%d') == date:
print(obj.last_modified)
will give the results as follows.
2019-08-27 07:13:04+00:00
2019-08-27 07:13:36+00:00
2019-08-27 07:13:39+00:00

If edited this answer to download all files after a certain timestamp and then write the current time to a file for use in the next iteration. You can easily adapt this to only download files of a specific date, month, year, yesterday, etc.
import os
import boto3
import datetime
import pandas as pd
### Load AWS Key, Secret and Region
# ....
###
# Open file to read last download time and update file with current time
latesttime_file = "latest request.txt"
with open(latesttime_file, 'r') as f:
latest_download = pd.to_datetime(f.read(), utc=True)
with open(latesttime_file, 'w') as f:
f.write(str(datetime.datetime.utcnow()))
# Initialize S3-client
s3_client = boto3.client('s3',
region_name=AWS_REGION,
aws_access_key_id=AWS_KEY_ID,
aws_secret_access_key=AWS_SECRET)
def download_dir(prefix, local, bucket, timestamp, client=s3_client):
"""
params:
- prefix: pattern to match in s3
- local: local path to folder in which to place files
- bucket: s3 bucket with target contents
- client: initialized s3 client object
"""
keys = []
dirs = []
next_token = ''
base_kwargs = {
'Bucket':bucket,
'Prefix':prefix,
}
while next_token is not None:
kwargs = base_kwargs.copy()
if next_token != '':
kwargs.update({'ContinuationToken': next_token})
results = client.list_objects_v2(**kwargs)
contents = results.get('Contents')
for i in contents:
k = i.get('Key')
t = i.get('LastModified')
if k[-1] != '/':
if t > timestamp:
keys.append(k)
else:
dirs.append(k)
next_token = results.get('NextContinuationToken')
for d in dirs:
dest_pathname = os.path.join(local, d)
if not os.path.exists(os.path.dirname(dest_pathname)):
os.makedirs(os.path.dirname(dest_pathname))
for k in keys:
dest_pathname = os.path.join(local, k)
if not os.path.exists(os.path.dirname(dest_pathname)):
os.makedirs(os.path.dirname(dest_pathname))
client.download_file(bucket, k, dest_pathname)
download_dir(<prefix or ''>, <local folder to download to>, <bucketname>, latest_download)

Related

Reading file from a different project gcloud -file not found error even though the file exists

I am trying to read a json file from project B in Google cloud using a service account from project A. The service account in project A is granted read role in the project B. But when I am trying to open the json file I am getting:
"FileNotFoundError: [Errno 2] No such file or directory: 'gs://x.json'. The file x.json does exist.
I checked the list of privileges (storage.objects.get ,storage.objects.list) to read a file from https://cloud.google.com/storage/docs/access-control/iam-permissions.
Any help is appreciated. Thanks.
from google.cloud import bigquery
from analytics import Clients, ClientType
from datetime import datetime, date, timedelta
from pytz import timezone
from typing import List
from pyarrow import json as pyj
import pyarrow.parquet as pq
import newlinejson as nlj
bigquery_client = Clients.get_client(ClientType.STORAGE, name='w')
write_client = Clients.get_client(ClientType.BIGQUERY, name='w')
k_client = Clients.get_client(ClientType.BIGQUERY, name='w')
bucket ='update'
file_name_prefix = "al_"
target_table = k_client.get_table("w.junk.json_table1")
def get_dates() -> List[str]:
"""
Return dates for which log files have to be checked
"""
end = date.fromisoformat(str(datetime.date(datetime.now(timezone("EST")))))
return [str(end - timedelta(days=1)), str(end)]
def get_bucket_files(bucket, file_name_prefix):
# if full_path:
path = "gs://{}/{}"
#path = "https://storage.googleapis.com/{}/{}"
return [
path.format(bucket, b.name)
for b in bigquery_client.list_blobs(bucket, prefix=file_name_prefix)
]
def get_latest_file() -> str:
"""
Get all files for the current prefix between start and end date
"""
files = []
files_json = []
for d in get_dates():
prefix = file_name_prefix + d[4:] + "-" + d[:4]
files += get_bucket_files(bucket, file_name_prefix)
for k in files:
filename = k.split('/')[-1]
if 'json' in filename:
files_json.append(k)
return max(files_json)
def pipeline():
job_config = bigquery.LoadJobConfig(
# schema=[
# bigquery.SchemaField("name", "STRING")
# ],
autodetect=True,
source_format=bigquery.SourceFormat.NEWLINE_DELIMITED_JSON,
)
f = get_latest_file()
print(f)
table = pyj.read_json(f)
# pq.write_table(target_table, table.parquet)
# with nlj.open(f) as src:
# with nlj.open('out.json', 'w') as dst:
# for line in src:
# dst.write(line)
# k_client.load_table_from_uri(
# f, target_table, job_config=job_config
# ).result()
pipeline()
You may consider and try the below approach in listing objects in buckets:
from google.cloud import storage
my_bucket = 'your-bucket-name'
my_prefix = 'al_'
client = storage.Client()
def get_bucket_files(bucket, file_name_prefix):
for blob in client.list_blobs(bucket, prefix=file_name_prefix):
print(str(blob))
get_bucket_files(my_bucket, my_prefix)
Output:

How to get multiple inputs (JSON files for me) in AWS Lambda from the same user's S3 bucket?

I have hereby attached my hardcoded python program which appends two JSON files in the S3 storage to be appended manually. Can someone please tell me how to get multiple input files (JSON files) from the S3 bucket automatically. I know we can do the same in python using *json in the directory of the program but I don't understand how to do the same in AWS Lambda.
Python Code:
import glob
result = []
for f in glob.glob("*.json"):
with open(f, "r") as infile:
result += json.load(infile)
with open("merge.json", "w") as outfile:
json.dump(result, outfile)
For doing the same in lambda I am able to do it for like 2 files, can someone please suggest how to do the same (like taking all JSON files from S3 automatically) in lambda. Thanks in advance.
import boto3
import json
s3_client = boto3.client("s3")
S3_BUCKET = 'bucket-for-json-files'
def lambda_handler(event, context):
object_key = "sample1.json" # replace object key
file_content = s3_client.get_object(Bucket=S3_BUCKET, Key=object_key)["Body"].read()
print(file_content)
object_key2 = "sample2.json" # replace object key
file_content2 = s3_client.get_object(Bucket=S3_BUCKET, Key=object_key2)["Body"].read()
print(file_content2)
result = []
result += json.loads(file_content)
result += json.loads(file_content2)
print(result)
Have followed the syntax from the documentation but I still get the timeout error.
import boto3
# Create a client
client = boto3.client('s3', region_name='us-east-1')
# Create a reusable Paginator
paginator = client.get_paginator('list_objects')
# Create a PageIterator from the Paginator
page_iterator = paginator.paginate(Bucket='bucket-for-json-files')
for page in page_iterator:
print(page['Contents'])
Getting a timeout error:
import boto3
s3_client = boto3.client("s3")
S3_BUCKET = 'bucket-for-json-files'
def iterate_bucket_items(S3_BUCKET):
client = boto3.client('s3')
paginator = client.get_paginator('list_objects_v2')
page_iterator = paginator.paginate(Bucket=S3_BUCKET)
for page in page_iterator:
if page['KeyCount'] > 0:
for item in page['Contents']:
yield item
for i in iterate_bucket_items(bucket='S3_BUCKET'):
print (i)
Have solved the issue with the help of #JeremyThompson, will attach my final code here:
import json
import boto3
import glob
def lambda_handler(event, context):
s3 = boto3.resource('s3')
bucket = s3.Bucket('bucket-for-json-files')
# Create a client
client = boto3.client('s3', region_name='us-east-1')
# Create a reusable Paginator
paginator = client.get_paginator('list_objects')
# Create a PageIterator from the Paginator
page_iterator = paginator.paginate(Bucket='bucket-for-json-files')
result = []
for page in page_iterator:
result += page['Contents']
s3 = boto3.client('s3')
bucket = 'bucket-for-json-files'
merge = []
lst = []
for i in result:
cmd = i['Key']
print(cmd)
The above code prints the key from each json file available in the user's bucket.

Difficulty in executing boto3 S3 copy function using aws lambda

Here is the scenario. I have an S3 bucket (e.g. daily-data-input) where daily files will be written to a specific folder (e.g. S3://daily-data-input/data/test/). Whenever a file is written under the "test" folder a copy should also be written to the "test_copy" folder in the same bucket. If "test_copy" is not existing, it should be created.
I have used S3 event notification and attached it to a lambda function(with python 3.7) which will check if the "test_copy" key is existing if not will be created. I am able to create the "test_copy" folder successfully and couldn't make the S3 copy via boto3 to be working.
Here is the code for your reference:
import boto3
import os
import botocore
s3 = boto3.resource('s3')
s3_cli=boto3.client('s3')
def lambda_handler(event, context):
bucket_name = event ['Records'][0]['s3']['bucket']['name']
bucket_key = event['Records'][0]['s3']['object']['key']
file = (os.path.basename(bucket_key))
source_key_path = (os.path.dirname(bucket_key))
target_keypath = source_key_path+'_'+'copy'+'/'
target_bucket_key = target_keypath+file
copy_source = {'Bucket': bucket_name, 'Key': bucket_key}
try:
s3.Object(bucket_name, target_keypath).load()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
# Create the key
print ("Creating target _copy folder")
s3_cli.put_object(Bucket=bucket_name,Key=target_keypath)
#copy the file
#s3.copy_object(Bucket=bucket_name, Key=target_bucket_key, CopySource=copy_source)
else:
print ("Something went wrong!!")
else:
print ("Key exists!!")
# s3.copy_object(Bucket=bucket_name, Key=target_bucket_key, CopySource=copy_source)
I tried s3.copy_object, s3_cli.meta.client.copy, bucket.copy() and none of them are working. Please let me know if i am doing something wrong.
Here is one simple way to copy an object in S3 within a bucket:
import boto3
s3 = boto3.resource('s3')
bucket = 'mybucket'
src_key = 'data/test/cat.png'
dest_key = 'data/test_copy/cat.png'
s3.Object(bucket, dest_key).copy_from(CopySource=f'{bucket}/{src_key}')
Here is another, lower-level way to do the same thing:
import boto3
s3 = boto3.client('s3')
bucket = 'mybucket'
src_key = 'data/test/cat.png'
dest_key = 'data/test_copy/cat.png'
s3client.copy_object(Bucket=bucket, CopySource={'Bucket':bucket,'Key':src_key}, Key=dest_key)

Uploading file to an s3 bucket path longer than 63 characters

I am writing a lambda function to upload a file from one s3 bucket to another, when the former is updated. I am running into an invalid parameter exception when uploading a file to the s3 path, which is longer than 63 characters. Is there a way to get around this?
import boto3
import datetime
import sys
import os
from os import getenv
import json
import csv
REPORT_BUCKET = getenv('REPORT_BUCKET', 'origin-bucket-name')
now = datetime.datetime.now() - datetime.timedelta(days=1)
today = now.strftime("%m/%d/%y")
today_iso = now.strftime('%Y-%m-%d')
def read_attachment(bucket, key):
print(f'Bucket: {bucket}, Key: {key}')
s3 = boto3.resource('s3')
obj = s3.Object(bucket, key)
return obj.get()['Body'].read()
def upload_file(data, new_file, bucket_name):
temp = '/tmp/tmp-{}.csv'.format(today_iso)
with open(temp, 'w', newline='') as outfile:
writer = csv.writer(outfile)
writer.writerows(data)
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucket_name)
bucket.delete_objects(
Delete={
'Objects': [
{'Key': new_file},
]
}
)
bucket.upload_file(temp, new_file)
bucket.Object(new_file).Acl().put(ACL='authenticated-read')
os.remove(temp)
print(bucket)
print('Uploaded: %s/%s' % (bucket_name, new_file))
def lambda_handler(event, context):
data = read_attachment(REPORT_BUCKET, f'{today_iso}.csv')
attachment = data.split()
arr = []
arr2 = []
for item in range(len(attachment)):
attachment[item] = attachment[item].decode('utf-8')
arr.append(attachment[item].split(','))
arr2.append(arr[item])
upload_file(arr2, f'{today_iso}.csv', 'accountname-useast1-dl-common-0022-in/sub-
folder/org=inc/f=csv/v=1.0/staging/')
return True
if __name__ == '__main__':
lambda_handler({}, None)
In s3 , the bucketname max size is 63 characters long. (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html)
In your code you are calling:
upload_file(arr2, f'{today_iso}.csv', 'accountname-useast1-l-common-0022-in/sub-folder/org=inc/f=csv/v=1.0/staging/')
which means that you are passing
accountname-useast1-l-common-0022-in/sub-folder/org=inc/f=csv/v=1.0/staging/'
as the bucketname. This parameter is longer than 63 characters that's why it throws an error.
In order to resolve this pass as bucket name a shorter name and then name whatever you live your actual object.
For example:
bucketname: accountname-useast1-l-common-0022-in
object name: sub-folder/org=inc/f=csv/v=1.0/staging/
so your line of code that needs to be changed is:
upload_file(arr2, /sub-folder/org=inc/f=csv/v=1.0/staging/f'{today_iso}.csv', 'accountname-useast1-dl-common-0022-in')

Boto3 script to query S3 bucket file dates is slow

I've written a simple script to find the latest file in S3 buckets. It works correctly but is extremely slow. Obviously it has a lot of files to check but if I use something like S3 Browser the file information appears almost immediately. Have I done something wrong or is this just a limitation of bobo3?
#! /usr/bin/python3
import argparse
import boto3
from datetime import datetime
from datetime import timezone
def build_argparser():
parser = argparse.ArgumentParser(description='List S3 buckets by file date.')
parser.add_argument('-p', '--profile', help='Profile to use')
return parser
if __name__ == "__main__":
parser = build_argparser()
args = parser.parse_args()
if args.profile == None:
s3 = boto3.resource('s3')
else:
profile = boto3.session.Session(profile_name=args.profile)
s3 = profile.resource('s3')
for bucket in s3.buckets.all():
print(bucket.name)
latest_key = ""
latest_datetime = datetime
for object in bucket.objects.all():
#print('\t' + str(object.key) + ': ' + str(object.last_modified))
if latest_datetime == datetime or latest_datetime < object.last_modified:
latest_key = object.key
latest_datetime = object.last_modified
print('\t' + str(latest_key) + ': ' + str(latest_datetime))

Resources