Aws Lambda "Runtime.HandlerNotFound" python - python-3.x

I am new to python and AWS lambda. I am trying to run this script from the lambda function but I am getting error:
Runtime.HandlerNotFound
This script is working fine if I run it from the ec2 instance, but when I run the same script from AWS lambda it throws an error.
I would be really thankful if someone guides me on what I did wrong.
Thank you
import boto3
import requests
import time
AWS_Access_Key_ID =
AWS_Secret_Access_Key =
DELAY_TIME=10 # 10 Seconds
region = 'us-east-2'
# instances = ['']
instances = {
'instance id': 'http://link',
'instance id': 'http://link'
}
ec2 = None
try:
ec2 = boto3.client('ec2', aws_access_key_id=AWS_Access_Key_ID, aws_secret_access_key=AWS_Secret_Access_Key, region_name=region)
# ec2 = boto3.resource('ec2',aws_access_key_id=AWS_Access_Key_ID, aws_secret_access_key=AWS_Secret_Access_Key, region_name=region)
except Exception as e:
print(e)
print("AWS CREDS ERROR, Exiting...")
exit()
def startInstances(instancesIds):
if(type(instancesIds) != list):
instancesIds = [instancesIds]
try:
response = ec2.start_instances(InstanceIds=instancesIds, DryRun=False)
print(response)
print("Instances Started")
except ClientError as e:
print(e)
print("Instances Failed to Start")
def stopInstances(instancesIds):
if(type(instancesIds) != list):
instancesIds = [instancesIds
]
try:
response = ec2.stop_instances(InstanceIds=instancesIds, DryRun=False)
print(response)
print("Instances Stopped")
except ClientError as e:
print(e)
print("Instances Failed to Stop")
def check():
for x in instances:
retry = 0
live = False
print("Checking Webiste " + instances[x])
while(retry < 5):
try:
r = requests.get(instances[x] ,verify=True)
if(r.status_code == 200):
live = True
break
except:
print("Not Live, retry time " + str(retry + 1))
print("Delaying request for " + str(DELAY_TIME) + " seconds...")
retry += 1
time.sleep(DELAY_TIME)
if(live):
print("Website is live")
# call function to start the ec2 instance
startInstances(x)
else:
# call function to stop the ec2 instance
print('Website is dead')
stopInstances(x)
print("")
def main():
check()
if __name__ == '__main__':
main()

https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html You need to specify what is the name of the handler function, which is the function that AWS lambda will call. Then you need to implement that function in your Python script.

I had a similar problem recently. I was able to define a lambda handler function in my python code that solved the problem. Got the guidance from this post
in short, add this code (adjust naming conventions accordingly):
import botocore
import boto3
def lambda_handler(event, context):
s3 = boto3.resource('s3')
bucket = s3.Bucket('bucketname')
exists = True
try:
s3.meta.client.head_bucket(Bucket='bucketname')
except botocore.exceptions.ClientError as e:
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response['Error']['Code'])
if error_code == 404:
exists = False

Related

mocking boto3 client for exceptions

I have successfully mocked boto3 client to test the positive case but unable to mock the same client for exceptions here is the code that i tried to mock the get_secret_value method of boto3 client to raise a ClientError.
class SecretsManager:
def __init__(self):
self.session = boto3.session.Session()
self.client = self.session.client(service_name='secretsmanager', region_name='region-name')
self.get_secrets()
def get_secrets(self): # no-pragma
try:
get_secret_value_response = self.client.get_secret_value(
SecretId=self.secret_name)
except ClientError as e:
if e.response['Error']['Code'] == 'DecryptionFailureException':
raise e
else:
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
self.secrets_dict = json.loads(secret)
else:
decoded_binary_secret = base64.b64decode(
get_secret_value_response['SecretBinary'])
Tests:
class TestSecretsManager:
#mock.patch("boto3.session.Session")
def test_get_secrets(self, mock_session):
# Arrange
mock_session_object = mock.Mock()
mock_client = mock.Mock()
mock_client.get_secret_value.return_value = {"SecretString": '{"my-secret1": "val1"}'}
mock_session_object.client.return_value = mock_client
mock_session.return_value = mock_session_object
secrets = SecretsManager()
assert secrets is not None
assert len(secrets.secrets_dict) == 1
assert secrets.secrets_dict['my-secret1'] == 'val1'
#mock.patch("boto3.session.Session")
def test_get_secrets_exception(self, mock_session):
# Arrange
mock_session.client.get_secret_value.side_effect = ClientError(error_response={"Error": {"Code": "DecryptionFailureException"}}, operation_name='Test')
with pytest.raises(Exception) as err:
secrets = SecretsManager()
assert err['Error']['Code'] == 'DecryptionFailureException'
test 1 is passing but, test 2 is not raising ClientError. Can somebody help me where am going wrong?

How do I pass AMI tag values in lambda function written in python 3.7

I have the following Python 3.7 Lambda function and I want to delete AMIs that are older than 90 days but I want to exclude any AMIs that have the tag/value combo of 'amiarchive' / 'yes'. When I run this I get the error "list indices must be integers or slices, not str". I've done a bunch of research and I can't quite figure this out.
import collections
import sys
from datetime import datetime, timedelta, timezone
from botocore.exceptions import ClientError
region ='us-east-1'
aws_account_numbers = {"accountA":"xxxxxxxxxxxx"}
def lambda_handler(event, context):
delete_time = datetime.now() - timedelta(days=90)
for name, acctnum in aws_account_numbers.items():
roleArn = "arn:aws:iam::%s:role/EOTSS-Snapshot-Cleanup-90days" % acctnum
stsClient = boto3.client('sts')
sts_response = stsClient.assume_role(RoleArn=roleArn,RoleSessionName='AssumeCrossAccountRole', DurationSeconds=1800)
ec2 = boto3.resource(service_name='ec2',region_name=region,aws_access_key_id = sts_response['Credentials']['AccessKeyId'],
aws_secret_access_key = sts_response['Credentials']['SecretAccessKey'], aws_session_token = sts_response['Credentials']['SessionToken'])
ec = boto3.client('ec2', 'us-east-1')
images = ec2.images.filter(Owners=["self"])
tag=[{"Name" :"tag:amiarchive", "Values":[] }]
for image in images:
t = datetime.strptime(image.creation_date, "%Y-%m-%dT%H:%M:%S.%fZ")
try:
if delete_time > t and (tag['Value']) != yes:
print ("AMI %s deregistered in acct: %s" % (image.image_id, acctnum))
response = image.deregister()
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidImage.InUse':
print("Image in use")
continue
else:
print("Unexpected error: %s" % e)
continue
return 'Execution Complete'```
You declare
tag=[{"Name" :"tag:amiarchive", "Values":[] }]
So tag is a list (array) that contains one item (which is an object). You need to use an integer index to access this, such as tag[0]. Then, once you have the object at tag[0] you can get its properties such as Name and Values. Your code calls tag['Value'] and that creates the error you see.
I ended up re-working the code with a co-worker. I changed to boto3.client rather than using boto3.client and added an additional for loop to check for the existence of the tag / value combination and skip the remaining steps if that condition is true. The following code does the trick.
import collections
import sys
from datetime import datetime, timedelta, timezone
from botocore.exceptions import ClientError
region ='us-east-1'
aws_account_numbers = {"AccountA":"xxxxxxxxxxxx"}
def lambda_handler(event, context):
delete_time = datetime.now() - timedelta(days=67)
for name, acctnum in aws_account_numbers.items():
roleArn = "arn:aws:iam::%s:role/EOTSS-Snapshot-Cleanup-90days" % acctnum
stsClient = boto3.client('sts')
sts_response = stsClient.assume_role(RoleArn=roleArn,RoleSessionName='AssumeCrossAccountRole', DurationSeconds=1800)
ec2 = boto3.client(service_name='ec2',region_name=region,aws_access_key_id = sts_response['Credentials']['AccessKeyId'],
aws_secret_access_key = sts_response['Credentials']['SecretAccessKey'], aws_session_token = sts_response['Credentials']['SessionToken'])
ec = boto3.client('ec2', 'us-east-1')
images2 = ec2.describe_images(Owners=['self'])
for image in images2['Images']:
if 'Tags' in image:
for tag in image['Tags']:
if tag['Key'] == 'amiarchive' and tag['Value'] == 'yes':
print("Skipping Image",image['ImageId'],"because its archived")
continue
t = datetime.strptime(image['CreationDate'], "%Y-%m-%dT%H:%M:%S.%fZ")
try:
if delete_time > t:
print ("AMI %s deregistered in acct: %s" % (image['ImageId'], acctnum))
#response = image.deregister()
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidImage.InUse':
print("Image in use")
continue
else:
print("Unexpected error: %s" % e)
continue
return 'Execution Complete''''

AWS Lambda Python script not iterating through 'aws_account_numbers' as expected

I have an AWS Lambda function written in Python 3.7, The function is set to delete snapshots older than 120 days. I'm running this from a management account and hitting 2 child accounts using 'aws_account_numbers'. The function runs successfully but in CloudWatch the logs show the same snapshots being deleted for each account. The log shows snapshot x y z is deleted for accountA but then it shows the same snapshots x y z being deleted for accountB. What is really happening is that all of these snapshots do not live in accountA or accountB but they actually live in the management account.
from datetime import datetime, timedelta, timezone
import boto3
import collections
import sys
from botocore.exceptions import ClientError
region ='us-east-1'
aws_account_numbers = {"accountA":"xxxxxxxxxxxx", "accountB":"xxxxxxxxxxxx"}
def lambda_handler(event, context):
delete_time = datetime.now(tz=timezone.utc) - timedelta(days=120)
ec2_resource = boto3.resource('ec2')
snapshots = ec2_resource.snapshots.filter(OwnerIds=['self'])
for name, acctnum in aws_account_numbers.items():
roleArn = "arn:aws:iam::%s:role/EOTSS-Snapshot-Cleanup-120days" % acctnum
stsClient = boto3.client('sts')
sts_response = stsClient.assume_role(RoleArn=roleArn,RoleSessionName='AssumeCrossAccountRole', DurationSeconds=1800)
ec2 = boto3.resource(service_name='ec2',region_name=region,aws_access_key_id = sts_response['Credentials']['AccessKeyId'],
aws_secret_access_key = sts_response['Credentials']['SecretAccessKey'], aws_session_token = sts_response['Credentials']['SessionToken'])
for snapshot in snapshots:
try:
if not snapshot.description.startswith('Snapshot created by task soe-backup') and delete_time > snapshot.start_time:
#snapshot.delete()
print ("Snapshot %s is deleted in acct: %s" % (snapshot, acctnum))
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidSnapshot.InUse':
print ("Snapshot %s in use in acct: %s" % (snapshot, acctnum))
continue
else:
print("Unexpected error: %s" % e)
continue
return 'Execution Complete'

List returning 0 when I use extend (multiprocessing pool)

I'm trying to do a proxy checker with multiprocessing pool, and I'm getting 0 from a variable that I have to save the proxies working and the proxies that don't work but it just return 0 in both, I'm on python 3.5 debian9.6, the file has 200 lines (one proxy for each line)
#!usr/bin/env python3
from multiprocessing import Pool
import requests
import time
import sys
if (sys.version_info > (3, 0)):
pass
else:
print("This program was written for python 3")
exit()
class ProxyChecker():
def __init__(self, proxy_list_file):
self.proxy_list = proxy_list_file
self.working = []
self.not_working = []
self.time_elapsed = 0
def start_pool_to_check_proxies(self):
start_time = time.time()
with Pool(processes=200) as p:
p.map(self.check_proxies, self.proxy_list)
self.time_elapsed = time.time() - start_time
print("Number of working proxies = " + str(len(self.working)))
print("Number of proxies that don't work = " \
+ str(len(self.not_working)))
print("Number of proxies that have been checked = " + \
str(len(self.proxy_list)))
print("Time elapsed while cheking " + str(len(self.proxy_list) \
+ self.time_elapsed))
def check_proxies(self, proxy):
try:
response = requests.get(
'http://google.com',
proxies={'http': 'http://' + proxy},
timeout=25
)
print('Checking ' + proxy + " ...")
self.working.extend(str(proxy))
except Exception as e:
print("Something went wrong")
self.not_working.extend(str(proxy))
"""else:
if response.status_code == 200:
self.working.extend(proxy)
print(self.working)
else:
self.not_working.extend(proxy)"""
def main():
try:
with open("proxies.txt", 'r') as f:
proxies = f.read().split('\n')
except IOError:
print('Error opening the file')
print('Check the name of the file')
else:
# with open("proxies.txt", 'a') as f:
# f.write("*************Working Proxies*************")
checker = ProxyChecker(proxies)
checker.start_pool_to_check_proxies()
if __name__ == '__main__':
main()
As I said the idea is to save in a list how many proxies works (and which ones) but it just return 0 and the proxy_list return the proxies right.
If anyone could help me I would be so pleased.
Happy new year!!

proc.stderr.readlines() hangs with specific svn path

Python 3.6.5
This is my code for listing directory in svn. It works for most directories quite well, but with one path line error = proc.stderr.readlines() hangs forever (100% repeatable). Any ideas why or workaround on that? From terminal command works
class MyRemoteSvnClient(object):
def __init__(self, url):
self.url = url
def list(self, rel_path=None, retries=5):
url = self.url if self.url.endswith('/') else self.url + '/'
if rel_path:
url = '{}{}'.format(url, rel_path)
# print(url)
retries = retries + 1
for i in range(1, retries):
proc = Popen(['svn', 'ls', url], shell=True, stdout=PIPE,
stderr=PIPE, universal_newlines=True)
error = proc.stderr.readlines()
if i == retries - 1:
raise SvnException(error)
if error:
logger.warning('svn error occurred, retrying {}/{}'.format(i, retries - 1))
sleep(1)
continue
while True:
output = proc.stdout.readline().strip()
if proc.poll() is not None:
break
if output:
yield output
break

Resources