AWS DynamoDB Transaction issues (Python) - python-3.x

I am trying to update 2 tables transactionally. first table is called CUSTOMER_TABLE, second is called CUSTOMER_JOB_TABLE.
for the first table, I create a new row if it doesnt exist. if it does exist i add to the currentProcessedCount column the value from this specific process. for the second table, i always create a new row. the 2 updates need to be transactional. i get the following error and I cant figure out what the reason is. Can someone help?
Response:
{
"errorMessage": "An error occurred (TransactionCanceledException) when calling the TransactWriteItems operation: Transaction cancelled, please refer cancellation reasons for specific reasons [ValidationError, None]",
"errorType": "TransactionCanceledException",
"stackTrace": [
" File \\"/var/task/app.py\\", line 149, in lambda_handler\\n c_table_response = update_customer_table(customer_id, customer_monthly_limit, number_of_rows,\\n",
" File \\"/var/task/app.py\\", line 226, in update_customer_table\\n response = dynamodb_client.transact_write_items(\\n",
" File \\"/opt/python/botocore/client.py\\", line 316, in _api_call\\n return self._make_api_call(operation_name, kwargs)\\n",
" File \\"/opt/python/botocore/client.py\\", line 635, in _make_api_call\\n raise error_class(parsed_response, operation_name)\\n"
]
}
below is my method for the call
import boto3
dynamodb_client = boto3.client('dynamodb')
# grab static env variable
CUSTOMER_ID = os.environ['CUSTOMER_ID']
BUCKET_NAME = os.environ['BUCKET_NAME']
CUSTOMER_TABLE_NAME = os.environ['CUSTOMER_TABLE_NAME']
CUSTOMER_JOB_TABLE_NAME = os.environ['CUSTOMER_JOB_TABLE_NAME']
def update_customer_table(customer_id, customer_monthly_limit, number_of_rows, year_month, uuid, date_time, batch_no):
response = dynamodb_client.transact_write_items(
TransactItems=[
{
'Update': {
'TableName': CUSTOMER_TABLE_NAME,
'Key': {
'PK': {'S': customer_id},
'SK': {'N': str(year_month)},
},
'ExpressionAttributeNames': {
'#ml': "MonthlyLimit",
'#cpc': "currentProcessedCount"
},
'ExpressionAttributeValues': {
':ml': {'N': str(customer_monthly_limit)},
':cpc': {'N': str(number_of_rows)}
},
'UpdateExpression': "SET #ml = :ml ADD #cpc :cpc"
}
},
{
'Put': {
'TableName': CUSTOMER_JOB_TABLE_NAME,
'Item': {
'PK': {'S': f'{customer_id}_{uuid}'},
'SK': {'N': str(year_month)},
'CustomerId': {'S': customer_id},
'UUID': {'S': uuid},
'StartDateTime': {'N': date_time.strftime('%Y%m%d%H%M')},
'NumberOfSplitFiles': {'N': str(batch_no - 1)},
'TotalRowCount': {'N': str(number_of_rows)}
}
}
}
]
)
return response

This is a problem, not sure if it's the only one.
'ExpressionAttributeValues': {
':ml': {'N': str(customer_monthly_limit)},
':cpc': {'N': str(number_of_rows)}
},
'UpdateExpression': "SET #ml = :mlv ADD #cpc :cpc"
:mlv doesn't match :ml

Related

Creating CustomConnector connection (GoogleAds) using AWS Lambda | AppFlow

Does Boto3 client support connectors for GoogleAds and FacebookAds? According to documentation we can use Custom Connector but when i try to use it in the code i get the below error saying it should be one of the built in types.
[ERROR] ParamValidationError: Parameter validation failed:
Unknown parameter in connectorProfileConfig.connectorProfileProperties: "CustomConnector", must be one of: Amplitude, Datadog, Dynatrace, GoogleAnalytics, Honeycode, InforNexus, Marketo, Redshift, Salesforce, ServiceNow, Singular, Slack, Snowflake, Trendmicro, Veeva, Zendesk, SAPOData
Unknown parameter in connectorProfileConfig.connectorProfileCredentials: "CustomConnector", must be one of: Amplitude, Datadog, Dynatrace, GoogleAnalytics, Honeycode, InforNexus, Marketo, Redshift, Salesforce, ServiceNow, Singular, Slack, Snowflake, Trendmicro, Veeva, Zendesk, SAPOData
Traceback (most recent call last):
  File "/var/task/lambda_function.py", line 34, in lambda_handler
    response = client.create_connector_profile(
  File "/var/runtime/botocore/client.py", line 391, in _api_call
    return self._make_api_call(operation_name, kwargs)
  File "/var/runtime/botocore/client.py", line 691, in _make_api_call
    request_dict = self._convert_to_request_dict(
  File "/var/runtime/botocore/client.py", line 739, in _convert_to_request_dict
    request_dict = self._serializer.serialize_to_request(
  File "/var/runtime/botocore/validate.py", line 360, in serialize_to_request
    raise ParamValidationError(report=report.generate_report())
Code in Lambda :
import json
import boto3
def lambda_handler(event, context):
client = boto3.client('appflow')
### Google Ads
response = client.create_connector_profile(
connectorProfileName='GoogleAdsConn',
connectorType='CustomConnector',
# connectorLabel='GoogleAds',
connectionMode='Public',
connectorProfileConfig= {
"connectorProfileProperties": {
'CustomConnector': {
# 'profileProperties': {
# 'string': 'string'
# },
'oAuth2Properties': {
'tokenUrl': 'https://oauth2.googleapis.com/token',
'oAuth2GrantType': 'AUTHORIZATION_CODE'
# ,'tokenUrlCustomProperties': {
# 'string': 'string'
# }
}
}
},
"connectorProfileCredentials": {
"CustomConnector": {
"authenticationType": "OAUTH2",
"oauth2": {
"accessToken": "myaccesstoken",
"clientId": "myclientid",
"clientSecret": "myclientsecret",
"oAuthRequest": {
"authCode": "string",
"redirectUri": "string"
},
"refreshToken": "myrefreshtoken"
}
}
}
}
)
return {
'response': response
}
Any leads on this will be appreciated.
Thanks!
The issue was with older boto3 version. Adding a new lambda layer with latest boto3 version(1.24.70) and updating code with profileProperties it worked seamlessly. Below is the complete working code.
import json
import boto3
def lambda_handler(event, context):
client = boto3.client('appflow')
### Google Ads
response = client.create_connector_profile(
connectorProfileName='GoogleAdsConnThruLambda',
connectorType='CustomConnector',
connectorLabel='GoogleAds',
connectionMode='Public',
connectorProfileConfig= {
"connectorProfileProperties": {
'CustomConnector': {
'profileProperties': {
'developerToken': 'developerToken',
'instanceUrl':'https://googleads.googleapis.com',
'managerID':'managerID',
'apiVersion':'v11'
},
'oAuth2Properties': {
'tokenUrl': 'https://oauth2.googleapis.com/token',
'oAuth2GrantType': 'AUTHORIZATION_CODE'
# ,'tokenUrlCustomProperties': {
# "string":"string"
# }
}
}
},
"connectorProfileCredentials": {
"CustomConnector": {
"authenticationType": "OAUTH2",
"oauth2": {
"accessToken": "accessToken",
"clientId": "clientId",
"clientSecret": "clientSecret",
"oAuthRequest": {
"authCode": "authCode",
"redirectUri": "https://<your_region>.console.aws.amazon.com/appflow/oauth"
},
"refreshToken": "refreshToken"
}
}
}
}
)
return {
'response': response
}

Name error - "name 'ssm_parameter_namee' is not defined",

I am trying to update the parameters in SSM parameters store and got the below error. What mistake am I doing? pls clarify.
Lambda Code:
#Lambda code
logger = logging.getLogger()
logger.setLevel(logging.INFO)
ssm_client = boto3.client('ssm')
parameter_name = ''
def lambda_handler(event, context):
logger.info('Printing event: {}'.format(event))
process_sns_event(event)
return None
def process_sns_event(event):
for record in (event['Records']):
event_message = record['Sns']['Message']
# convert the event message to json
message_json = json.loads(event_message)
# obtain the image state
image_state = (message_json['state']['status'])
# obtain the image name
image_name = (message_json['name'])
# assign SSM parameter based on image_name
#parameter_name = f'/ec2-image-builder/{{image_name}}/latest'
def path(imagename):
first = "/ec2-image-builder/"
last = "/latest"
result = first + imagename + last
return result
parameter_name = path(image_name)
logger.info('image_name: {}'.format(image_name))
logger.info('ssm_parameter_name: {}'.format(parameter_name))
# update the SSM parameter if the image state is available
if (image_state == 'AVAILABLE'):
logger.info('Image is available')
# obtain ami id
ami = message_json['outputResources']['amis'][0]
recipe_name = message_json['name']
logger.info('AMI ID: {}'.format(ami['image']))
# update SSM parameter
response = ssm_client.put_parameter(
#Name=parameter_name,
Name='/ec2-image-builder/linux/latest',
Description='Latest AMI ID',
Value=ami['image'],
Type='String',
Overwrite=True,
Tier='Standard'
)
logger.info('SSM Updated: {}'.format(response))
# add tags to the SSM parameter
ssm_client.add_tags_to_resource(
ResourceType='Parameter',
ResourceId=ssm_parameter_namee,
Tags=[
{
'Key': 'Source',
'Value': 'EC2 Image Builder'
},
{
'Key': 'AMI_REGION',
'Value': ami['region']
},
{
'Key': 'AMI_ID',
'Value': ami['image']
},
{
'Key': 'AMI_NAME',
'Value': ami['name']
},
{
'Key': 'RECIPE_NAME',
'Value': recipe_name
},
{
'Key': 'SOURCE_PIPELINE_ARN',
'Value': message_json['sourcePipelineArn']
},
],
)
return None
Error output
Response on test:
{ "errorMessage": "name 'ssm_parameter_namee' is not defined",
"errorType": "NameError", "requestId":
"54ad245c-84f3-4c46-9e9b-1798f86a8bce", "stackTrace": [
" File "/var/task/lambda_function.py", line 19, in lambda_handler\n process_sns_event(event)\n",
" File "/var/task/lambda_function.py", line 71, in process_sns_event\n ResourceId=ssm_parameter_namee,\n" ] }
The answer is in your error ...
Typo name or namee ? Is it ssm_parameter_namee or ssm_parameter_name ?
I highly recommend using an IDE, that finger points you to such simple things :)
logger.info('ssm_parameter_name: {}'.format(parameter_name))
ResourceId=ssm_parameter_namee

stripe capture charge doesn't process

I have a webhook, that call this function when
event["type"] == "payment_intent.amount_capturable_updated"
def capture_order(session):
ordered_uuid = uuid.UUID(session["metadata"]["pk_order"])
try:
order = Ordered.objects.filter(
pk=ordered_uuid
).prefetch_related(
"from_ordered", "from_ordered__to_product"
).get()
except Ordered.DoesNotExist:
stripe.PaymentIntent.cancel(session["id"])
return HttpResponse(status=400)
try:
products = set()
for ordered_product in order.from_ordered.all():
product = ordered_product.to_product
product.stock -= ordered_product.quantity
if product.stock < 0:
raise ValueError()
products.add(product)
with transaction.atomic():
Product.objects.bulk_update(list(products), ("stock",))
stripe.PaymentIntent.capture(session["id"])
except ValueError:
stripe.PaymentIntent.cancel(session["id"])
except IntegrityError:
stripe.PaymentIntent.cancel(session["id"])
return HttpResponse(status=200)
and I get this error:
This PaymentIntent could not be captured because it has a status of canceled. Only a PaymentIntent with one of the following statuses may be captured: requires_capture.
I had instantiate my checkout like this:
checkout_session = stripe.checkout.Session.create(
payment_method_types=['card'],
line_items=[
{
'price_data': {
'currency': 'eur',
'unit_amount_decimal': get_amount(self.object, with_delivery=True).quantize(TWO_PLACES),
'product_data': {
'name': f'Order #{self.object.pk}',
},
},
'quantity': 1,
},
],
payment_intent_data={
"capture_method": "manual",
"metadata": {
"pk_order": self.object.pk
}
})
any idea ?
You should inspect your dashboard logs - I suspect that one of your error paths is canceling the payment intent before you try to capture it. You'll need to trace through your login to figure out why that's happening.

python regex usage: how to start with , least match , get content in middle [duplicate]

I wrote some code to get data from a web API. I was able to parse the JSON data from the API, but the result I gets looks quite complex. Here is one example:
>>> my_json
{'name': 'ns1:timeSeriesResponseType', 'declaredType': 'org.cuahsi.waterml.TimeSeriesResponseType', 'scope': 'javax.xml.bind.JAXBElement$GlobalScope', 'value': {'queryInfo': {'creationTime': 1349724919000, 'queryURL': 'http://waterservices.usgs.gov/nwis/iv/', 'criteria': {'locationParam': '[ALL:103232434]', 'variableParam': '[00060, 00065]'}, 'note': [{'value': '[ALL:103232434]', 'title': 'filter:sites'}, {'value': '[mode=LATEST, modifiedSince=null]', 'title': 'filter:timeRange'}, {'value': 'sdas01', 'title': 'server'}]}}, 'nil': False, 'globalScope': True, 'typeSubstituted': False}
Looking through this data, I can see the specific data I want: the 1349724919000 value that is labelled as 'creationTime'.
How can I write code that directly gets this value?
I don't need any searching logic to find this value. I can see what I need when I look at the response; I just need to know how to translate that into specific code to extract the specific value, in a hard-coded way. I read some tutorials, so I understand that I need to use [] to access elements of the nested lists and dictionaries; but I can't figure out exactly how it works for a complex case.
More generally, how can I figure out what the "path" is to the data, and write the code for it?
For reference, let's see what the original JSON would look like, with pretty formatting:
>>> print(json.dumps(my_json, indent=4))
{
"name": "ns1:timeSeriesResponseType",
"declaredType": "org.cuahsi.waterml.TimeSeriesResponseType",
"scope": "javax.xml.bind.JAXBElement$GlobalScope",
"value": {
"queryInfo": {
"creationTime": 1349724919000,
"queryURL": "http://waterservices.usgs.gov/nwis/iv/",
"criteria": {
"locationParam": "[ALL:103232434]",
"variableParam": "[00060, 00065]"
},
"note": [
{
"value": "[ALL:103232434]",
"title": "filter:sites"
},
{
"value": "[mode=LATEST, modifiedSince=null]",
"title": "filter:timeRange"
},
{
"value": "sdas01",
"title": "server"
}
]
}
},
"nil": false,
"globalScope": true,
"typeSubstituted": false
}
That lets us see the structure of the data more clearly.
In the specific case, first we want to look at the corresponding value under the 'value' key in our parsed data. That is another dict; we can access the value of its 'queryInfo' key in the same way, and similarly the 'creationTime' from there.
To get the desired value, we simply put those accesses one after another:
my_json['value']['queryInfo']['creationTime'] # 1349724919000
I just need to know how to translate that into specific code to extract the specific value, in a hard-coded way.
If you access the API again, the new data might not match the code's expectation. You may find it useful to add some error handling. For example, use .get() to access dictionaries in the data, rather than indexing:
name = my_json.get('name') # will return None if 'name' doesn't exist
Another way is to test for a key explicitly:
if 'name' in resp_dict:
name = resp_dict['name']
else:
pass
However, these approaches may fail if further accesses are required. A placeholder result of None isn't a dictionary or a list, so attempts to access it that way will fail again (with TypeError). Since "Simple is better than complex" and "it's easier to ask for forgiveness than permission", the straightforward solution is to use exception handling:
try:
creation_time = my_json['value']['queryInfo']['creationTime']
except (TypeError, KeyError):
print("could not read the creation time!")
# or substitute a placeholder, or raise a new exception, etc.
Here is an example of loading a single value from simple JSON data, and converting back and forth to JSON:
import json
# load the data into an element
data={"test1": "1", "test2": "2", "test3": "3"}
# dumps the json object into an element
json_str = json.dumps(data)
# load the json to a string
resp = json.loads(json_str)
# print the resp
print(resp)
# extract an element in the response
print(resp['test1'])
Try this.
Here, I fetch only statecode from the COVID API (a JSON array).
import requests
r = requests.get('https://api.covid19india.org/data.json')
x = r.json()['statewise']
for i in x:
print(i['statecode'])
Try this:
from functools import reduce
import re
def deep_get_imps(data, key: str):
split_keys = re.split("[\\[\\]]", key)
out_data = data
for split_key in split_keys:
if split_key == "":
return out_data
elif isinstance(out_data, dict):
out_data = out_data.get(split_key)
elif isinstance(out_data, list):
try:
sub = int(split_key)
except ValueError:
return None
else:
length = len(out_data)
out_data = out_data[sub] if -length <= sub < length else None
else:
return None
return out_data
def deep_get(dictionary, keys):
return reduce(deep_get_imps, keys.split("."), dictionary)
Then you can use it like below:
res = {
"status": 200,
"info": {
"name": "Test",
"date": "2021-06-12"
},
"result": [{
"name": "test1",
"value": 2.5
}, {
"name": "test2",
"value": 1.9
},{
"name": "test1",
"value": 3.1
}]
}
>>> deep_get(res, "info")
{'name': 'Test', 'date': '2021-06-12'}
>>> deep_get(res, "info.date")
'2021-06-12'
>>> deep_get(res, "result")
[{'name': 'test1', 'value': 2.5}, {'name': 'test2', 'value': 1.9}, {'name': 'test1', 'value': 3.1}]
>>> deep_get(res, "result[2]")
{'name': 'test1', 'value': 3.1}
>>> deep_get(res, "result[-1]")
{'name': 'test1', 'value': 3.1}
>>> deep_get(res, "result[2].name")
'test1'

Cannot create ResourceGroup using boto3: Query format not valid

I am trying to create a resource group using the following boto3 snippet:
kwargs = {
'Name': 'cluster.foo.io',
'Description': 'AWS resources assigned to the foo cluster.',
'ResourceQuery': {
'Type': 'TAG_FILTERS_1_0',
'Query': '[{"Key": "foo.io/cluster", "Values": ["cluster.foo.io"]}]',
},
'Tags': {
'foo.io/cluster': 'cluster.foo.io'
}
}
client = boto3.client("resource-groups")
resp = client.create_group(**kwargs)
But I'm getting the following error:
File "/Users/benjamin/.pyenv/versions/3.7.3/Python.framework/Versions/3.7/lib/python3.7/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/Users/benjamin/.pyenv/versions/3.7.3/Python.framework/Versions/3.7/lib/python3.7/site-packages/botocore/client.py", line 661, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.errorfactory.BadRequestException: An error occurred (BadRequestException)
when calling the CreateGroup operation: Query not valid:
Query format not valid: check JSON syntax
I keep comparing the Query to the example in the documentation but either I'm not seeing a difference or I'm way off in left field. I even used the json module as follows:
resp = self.resource_client.create_group(
Name='cluster.foo.io',
Description="AWS resources assigned to the foo cluster",
ResourceQuery={
"Type": "TAG_FILTERS_1_0",
"Query": json.dumps([{"Key": "foo.io/cluster", "Values": ["cluster.foo.io"]}]),
},
Tags={
"foo.io/cluster": "cluster.foo.io",
},
)
Any help would be appreciated!
The query parameter is missing ResourceTypeFilters and TagFilters. So, ResourceQuery should look like this:
'ResourceQuery': {
'Type': 'TAG_FILTERS_1_0',
'Query': "{\"ResourceTypeFilters\": [\"AWS::AllSupported\"], \"TagFilters\": [{\"Key\": \"foo.io/cluster\", \"Values\": [\"cluster.foo.io\"]}]}"
}
So, your code should be replaced as follows (the main section to be replaced is ResourceQuery:
query = {
"ResourceTypeFilters": ["AWS::AllSupported"],
"TagFilters": [{
"Key": "foo.io/cluster",
"Values": ["cluster.foo.io"]
}]
}
resource_query = {
'Type': 'TAG_FILTERS_1_0',
'Query': json.dumps(query)
}
kwargs = {
'Name': 'cluster.foo.io',
'Description': 'AWS resources assigned to the foo cluster.',
'ResourceQuery': resource_query,
'Tags': {
'foo.io/cluster': 'cluster.foo.io'
}
}
client = boto3.client("resource-groups")
resp = client.create_group(**kwargs)
I referred the example CLI shown here.

Resources