Uploaded file does not show in S3 - python-3.x

I am using boto3 using multipart upload with TransferConfig:
Every thing seems okay as the program runs without errors:
import threading,boto3,re,os,sys
from boto3.s3.transfer import TransferConfig
#create resource
s3=boto3.resource('s3',
region_name = region,
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
aws_session_token=SESSION_TOKEN)
BUCKET_NAME="my_bucket"
# the upload function
def multi_part_upload_with_s3():
# Multipart upload
config = TransferConfig(multipart_threshold=1024*25, max_concurrency=10000,
multipart_chunksize=1024*25, use_threads=True)
#file_path = os.path.dirname(__file__)+'/largefile.pdf'
file_path = "C:/Users/Documents/MyFile.out"
key_path = 'MyDir/MySubDir/'
s3.meta.client.upload_file(file_path, BUCKET_NAME, key_path,
#ExtraArgs={'ACL': 'public-read', 'ContentType':
'text/pdf'},
Config=config, Callback=ProgressPercentage(file_path))
#Not really important, just tells you what percentage of your file has uploaded.
class ProgressPercentage(object):
def __init__(self, filename):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far/self._size)*100
sys.stdout.write("\r%s %s/%s (%.2f%%)" % (self._filename,
self._seen_so_far, self._size, percentage))
sys.stdout.flush()
#Now call fucntion
if __name__=='__main__':
multi_part_upload_with_s3()
Output:
C:/Users/Documents/MyFile.out 1295607/1295607.0 (100.00%)
So it appears to run without errors. However, when I look in S3, 'MySubDir' is created, but 'MyFile.out' is not in there. I thought maybe having the max concurrency in S3 was the culprit thinking it would take a while to join it back up, but I've waited for over 4 hours and nothing has shown up. I've also used other files and other uploading approaches and files would show up in 'MySubDir'.

You never specify the destination file name, only the path. Try:
file_path = "C:/Users/Documents/MyFile.out"
key_path = 'MyDir/MySubDir/MyFile.out'
s3.meta.client.upload_file(file_path, BUCKET_NAME, key_path,
#ExtraArgs={'ACL': 'public-read', 'ContentType':
'text/pdf'},
Config=config,Callback=ProgressPercentage(file_path))

Related

How to get multiple inputs (JSON files for me) in AWS Lambda from the same user's S3 bucket?

I have hereby attached my hardcoded python program which appends two JSON files in the S3 storage to be appended manually. Can someone please tell me how to get multiple input files (JSON files) from the S3 bucket automatically. I know we can do the same in python using *json in the directory of the program but I don't understand how to do the same in AWS Lambda.
Python Code:
import glob
result = []
for f in glob.glob("*.json"):
with open(f, "r") as infile:
result += json.load(infile)
with open("merge.json", "w") as outfile:
json.dump(result, outfile)
For doing the same in lambda I am able to do it for like 2 files, can someone please suggest how to do the same (like taking all JSON files from S3 automatically) in lambda. Thanks in advance.
import boto3
import json
s3_client = boto3.client("s3")
S3_BUCKET = 'bucket-for-json-files'
def lambda_handler(event, context):
object_key = "sample1.json" # replace object key
file_content = s3_client.get_object(Bucket=S3_BUCKET, Key=object_key)["Body"].read()
print(file_content)
object_key2 = "sample2.json" # replace object key
file_content2 = s3_client.get_object(Bucket=S3_BUCKET, Key=object_key2)["Body"].read()
print(file_content2)
result = []
result += json.loads(file_content)
result += json.loads(file_content2)
print(result)
Have followed the syntax from the documentation but I still get the timeout error.
import boto3
# Create a client
client = boto3.client('s3', region_name='us-east-1')
# Create a reusable Paginator
paginator = client.get_paginator('list_objects')
# Create a PageIterator from the Paginator
page_iterator = paginator.paginate(Bucket='bucket-for-json-files')
for page in page_iterator:
print(page['Contents'])
Getting a timeout error:
import boto3
s3_client = boto3.client("s3")
S3_BUCKET = 'bucket-for-json-files'
def iterate_bucket_items(S3_BUCKET):
client = boto3.client('s3')
paginator = client.get_paginator('list_objects_v2')
page_iterator = paginator.paginate(Bucket=S3_BUCKET)
for page in page_iterator:
if page['KeyCount'] > 0:
for item in page['Contents']:
yield item
for i in iterate_bucket_items(bucket='S3_BUCKET'):
print (i)
Have solved the issue with the help of #JeremyThompson, will attach my final code here:
import json
import boto3
import glob
def lambda_handler(event, context):
s3 = boto3.resource('s3')
bucket = s3.Bucket('bucket-for-json-files')
# Create a client
client = boto3.client('s3', region_name='us-east-1')
# Create a reusable Paginator
paginator = client.get_paginator('list_objects')
# Create a PageIterator from the Paginator
page_iterator = paginator.paginate(Bucket='bucket-for-json-files')
result = []
for page in page_iterator:
result += page['Contents']
s3 = boto3.client('s3')
bucket = 'bucket-for-json-files'
merge = []
lst = []
for i in result:
cmd = i['Key']
print(cmd)
The above code prints the key from each json file available in the user's bucket.

How to find where an S3 multipart upload is failing in Python?

I am implementing a cron job that will upload a large daily backup file to an S3 Bucket. It works most of the time, but every once in a while, I will check the bucket, and the file size is significantly smaller than the actual size.
It should be roughly 50GB, but the last time it happened, it showed 34GB. My main problem is that I am unsure of what error to try/catch.
I am still learning Python as I go, so bare with me.
from progress import ProgressPercentage # class file progress.py
from slack import * # function file for Slack notifications
import random
import glob
import os
import boto3
import botocore
from boto3.s3.transfer import TransferConfig
bucket = "my-s3-backup"
s3 = boto3.resource('s3')
# Grabbing the last file, and removing the full path from the string
pattern = "/path/to/backup/file/xb_*"
files = list(filter(os.path.isfile, glob.glob(pattern)))
files.sort(key=lambda x: os.path.getmtime(x))
file_to_upload = files[-1]
file_name = file_to_upload.replace('/path/to/backup/file/', '')
key_path = 'physical_db_backups/' + file_name
# Multipart upload function
def multi_part_upload():
config = TransferConfig(multipart_threshold=1024 * 25,
max_concurrency=10,
multipart_chunksize=1024 * 25,
use_threads=True)
try:
s3.meta.client.upload_file(file_to_upload, bucket, key_path, Config=config,
Callback=ProgressPercentage(file_to_upload))
# Custom Slack notification to inform completion
sendslacksuccess("Physical Backup to S3 Complete:\n" + file_name)
except botocore.exceptions.ClientError as error:
# Custom Slack notification to inform of failure
sendslackerror("Physical Backup to S3 Failed:\n" + file_name + "\nError: " + error)
if __name__ == '__main__':
multi_part_upload()
If the script is not "failing," but it's not uploading the complete file size, what exception am I trying to catch here? Should I log output somewhere?
I'm looking through the Botocore Exceptions documentation. I'm just unsure of what to try/catch with this.
For reference, here is the file size difference:
aws s3 ls --summarize --human-readable --recursive s3://my-s3-backup/physical_db_backups/
2022-05-07 14:31:28 50.7 GiB physical_db_backups/xb_202205070101.xb.zst
2022-05-08 12:48:07 50.8 GiB physical_db_backups/xb_202205080101.xb.zst
2022-05-09 01:30:04 34.2 GiB physical_db_backups/xb_202205090101.xb.zst <--- WRONG
Alright, since I was an idiot and didn't realize the file had not completed yet, I made a couple of changes.
I edited the cron to start later.
I have created logic to determine if the backup script is running.
I may incorporate additional checks to make sure the file exists, but for now this is a working POC that has been tested.
from progress import ProgressPercentage # class file progress.py
from slack import * # function file for Slack notifications
import random
from time import sleep
import psutil
import glob
import os
import boto3
import botocore
from boto3.s3.transfer import TransferConfig
import logging
bucket = "fsn-s3-backup"
s3 = boto3.resource('s3')
pattern = "/path/to/backup/file/xb_*"
files = list(filter(os.path.isfile, glob.glob(pattern)))
files.sort(key=lambda x: os.path.getmtime(x))
file_to_upload = files[-1]
file_name = file_to_upload.replace('/path/to/backup/file/', '')
key_path = 'physical_db_backups/' + file_name
logging.basicConfig(filename='/var/log/s3-backup.log', format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filemode='a')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def multi_part_upload():
config = TransferConfig(multipart_threshold=1024 * 25,
max_concurrency=10,
multipart_chunksize=1024 * 25,
use_threads=True)
try:
s3.meta.client.upload_file(file_to_upload, bucket, key_path, Config=config,
Callback=ProgressPercentage(file_to_upload),
ExtraArgs={'ContentType': 'application/zstd'})
logger.info("Physical Backup to S3 Complete")
sendslacksuccess("Physical Backup to S3 Complete:\n" + file_name)
except botocore.exceptions.ClientError as error:
logger.error("Physical Backup to S3 Failed: " + error)
sendslackerror("Physical Backup to S3 Failed:\n" + file_name + "\nError: " + error)
def checkIfProcessRunning(processName):
for proc in psutil.process_iter():
cmdline = proc.cmdline()
if processName in cmdline:
return True
return False
if __name__ == '__main__':
backuprunning = True
while backuprunning:
logger.info("Checking if backup shell script is running")
if checkIfProcessRunning('/path/to/physical_backup.sh'):
logger.info("Backup shell script still running. Sleeping for 60s")
sleep(60)
else:
backuprunning = False
logger.info("Beginning multipart upload")
multi_part_upload()

Uploading file to an s3 bucket path longer than 63 characters

I am writing a lambda function to upload a file from one s3 bucket to another, when the former is updated. I am running into an invalid parameter exception when uploading a file to the s3 path, which is longer than 63 characters. Is there a way to get around this?
import boto3
import datetime
import sys
import os
from os import getenv
import json
import csv
REPORT_BUCKET = getenv('REPORT_BUCKET', 'origin-bucket-name')
now = datetime.datetime.now() - datetime.timedelta(days=1)
today = now.strftime("%m/%d/%y")
today_iso = now.strftime('%Y-%m-%d')
def read_attachment(bucket, key):
print(f'Bucket: {bucket}, Key: {key}')
s3 = boto3.resource('s3')
obj = s3.Object(bucket, key)
return obj.get()['Body'].read()
def upload_file(data, new_file, bucket_name):
temp = '/tmp/tmp-{}.csv'.format(today_iso)
with open(temp, 'w', newline='') as outfile:
writer = csv.writer(outfile)
writer.writerows(data)
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucket_name)
bucket.delete_objects(
Delete={
'Objects': [
{'Key': new_file},
]
}
)
bucket.upload_file(temp, new_file)
bucket.Object(new_file).Acl().put(ACL='authenticated-read')
os.remove(temp)
print(bucket)
print('Uploaded: %s/%s' % (bucket_name, new_file))
def lambda_handler(event, context):
data = read_attachment(REPORT_BUCKET, f'{today_iso}.csv')
attachment = data.split()
arr = []
arr2 = []
for item in range(len(attachment)):
attachment[item] = attachment[item].decode('utf-8')
arr.append(attachment[item].split(','))
arr2.append(arr[item])
upload_file(arr2, f'{today_iso}.csv', 'accountname-useast1-dl-common-0022-in/sub-
folder/org=inc/f=csv/v=1.0/staging/')
return True
if __name__ == '__main__':
lambda_handler({}, None)
In s3 , the bucketname max size is 63 characters long. (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html)
In your code you are calling:
upload_file(arr2, f'{today_iso}.csv', 'accountname-useast1-l-common-0022-in/sub-folder/org=inc/f=csv/v=1.0/staging/')
which means that you are passing
accountname-useast1-l-common-0022-in/sub-folder/org=inc/f=csv/v=1.0/staging/'
as the bucketname. This parameter is longer than 63 characters that's why it throws an error.
In order to resolve this pass as bucket name a shorter name and then name whatever you live your actual object.
For example:
bucketname: accountname-useast1-l-common-0022-in
object name: sub-folder/org=inc/f=csv/v=1.0/staging/
so your line of code that needs to be changed is:
upload_file(arr2, /sub-folder/org=inc/f=csv/v=1.0/staging/f'{today_iso}.csv', 'accountname-useast1-dl-common-0022-in')

Get progress callback in aws boto3 uploads

There's a great question and answer for the original boto uploads here:
How to upload a file to directory in S3 bucket using boto
Which has a callback:
k = Key(bucket)
k.key = 'my test file'
k.set_contents_from_filename(testfile,
cb=percent_cb, num_cb=10)
While I see the boto3 package takes a callback:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.upload_fileobj
I don't see the equivalent of the num_cb argument. How can I get a progress meter for upload_fileobj using boto3?
s3.upload_fileobj(data, 'mybucket', 'mykey')
If you don't need to limit the number of calling callback, (and there is no way to do it with upload_fileobj),
1. show percentage
import os
import boto3
class Test:
def __init__(self):
self.total = 0
self.uploaded = 0
self.s3 = boto3.client('s3')
def upload_callback(self, size):
if self.total == 0:
return
self.uploaded += size
print("{} %".format(int(self.uploaded / self.total * 100)))
def upload(self, bucket, key, file):
self.total = os.stat(file).st_size
with open(file, 'rb') as data:
self.s3.upload_fileobj(
data, bucket, key, Callback=self.upload_callback)
using progressbar
import os
import boto3
import progressbar
class Test2:
def __init__(self):
self.s3 = boto3.client('s3')
def upload_callback(self, size):
self.pg.update(self.pg.currval + size)
def upload(self, bucket, key, file):
self.pg = progressbar.progressbar.ProgressBar(
maxval=os.stat(file).st_size)
self.pg.start()
with open(file, 'rb') as data:
self.s3.upload_fileobj(
data, bucket, key, Callback=self.upload_callback)

How to filter out glacier files with boto3?

I'm writing a script to parse S3 buckets files, without needing to download them locally. It seems the code works as far as it doesn't find glacier files. I'm adding an exception for now (error handling looks better in actual code, I promise), but ideally I'd like to see if it's possible to filter glacier files out.
Here is my code:
import boto3
import gzip
import os
try:
s3_client = boto3.client('s3')
bucket = 'my_bucket'
prefix = 'path_to_file/file_name.csv.gz'
obj = s3_client.get_object(Bucket=bucket, Key=prefix)
body = obj['Body']
with gzip.open(body, 'rt') as gf:
for ln in gf:
print(ln)
except Exception as e:
print(e)
I see that using AWS CLI, I can at lest sort files in the way glacier files are at the bottom, so there must be a way to either way sort or filter them out in boto3:
aws s3api list-objects --bucket my-bucket --query "reverse(sort_by(Contents,&LastModified))"
Solved using StorageClass == 'STANDARD' (vs == 'GLACIER'):
bucket = 'my_bucket'
prefix = 'path/to/files/'
s3_client = boto3.client('s3')
response = s3_client.list_objects(Bucket=bucket, Prefix=prefix)
for file in response['Contents']:
if file['StorageClass'] == 'STANDARD':
name = file['Key'].rsplit('/', 1)
if name[1] != '':
file_name = name[1]
obj = s3_client.get_object(Bucket=bucket, Key=prefix + file_name)
body = obj['Body']
lns = []
i = 0
with gzip.open(body, 'rt') as gf:
for ln in gf:
i += 1
lns.append(ln.rstrip())
if i == 10:
break

Resources