match and originalkey to resize Image in lambda function - node.js

I'm working with lambda function to resize the upload image in my s3 bucket, I'm encountering few problems with the following code, Can anyone explain what is "const match" and "originalKey" I tried several combination . none helped me.
'use strict';
const AWS = require('aws-sdk');
const S3 = new AWS.S3({
accessKeyId: "xxxxxxxxxxxx",
secretAccessKey: "yyyyyyyyyyy",
region: "us-east-1",
signatureVersion: 'v4',
});
const Sharp = require('sharp');
const BUCKET = "patientimg";
const URL = "https://s3.ap-south-1.amazonaws.com";
exports.handler = function(event, context, callback) {
const key = event.Records[0].s3.object.key;
const match = key.match(/(\d+)x(\d+)\/(.*)/);
const width =10;
const height =10;
const originalKey ="ImageName";
S3.getObject({Bucket: BUCKET, Key: originalKey}).promise()
.then(data => Sharp(data.Body)
.resize(width, height)
.toFormat('png')
.toBuffer()
)
.then(buffer => S3.putObject({
Body: buffer,
Bucket: BUCKET,
ContentType: "image/png",
Key: key,
}).promise()
)
.then(() => callback(null, {
statusCode: '301',
headers: {'location': "${URL}/${key}"},
body: "",
})
)
.catch(err => callback(err))
}
output:
error occured in match[1], match[2] and match[3]
Thanks in advance.....

This code assumes the uploaded object key matches a specific pattern /(\d+)x(\d+)\/(.*)/, which means:
\d+ 1 or more digits - match[1]
x a literal x
\d+ 1 or more digits - match[2]
\/ a literal slash
.* zero or more of any character - match[3]
If your object key doesn't match this pattern, the code would break.

In case someone else is struggling with this. Its not a * but its an x.
So the url for starting this should be like this
http://webpageendpointtobucket/200x200/earth.jpg

Related

how do I rename a folder?

I want to do this with aws-sdk library.
I have a folder on my S3 bucket called "abcd/", it has 3 files on it (e.g. abcd/1.jpg, abcd/2.jpg).
I want to rename the folder to 1234/
^ I want there to be 1234/ only
const awsMove = async (path) => {
try {
const s3 = new AWS.S3();
const AWS_BUCKET = 'my-bucket-test';
const copyParams = {
Key: path.newPath,
Bucket: AWS_BUCKET,
CopySource: encodeURI(`/${AWS_BUCKET}/${path.oldPath}`),
};
await s3.copyObject(copyParams).promise();
const deleteParams = {
Key: path.oldPath,
Bucket: AWS_BUCKET,
};
await s3.deleteObject(deleteParams).promise();
} catch (err) {
console.log(err);
}
};
const changePath = { oldPath: 'abcd/', newPath: '1234/' };
awsMove(changePath);
The above code errors with "The specified key does not exist" what am I doing wrong?
AWS S3 does not have the concept of folders as in a file system. You have a bucket and a key that identifies the object/file stored at that location. The pattern of the key is usually a/b/c/d/some_file and the way it is showed on AWS console, it might give you an impression that a, b, c or d are folders but indeed they aren't.
Now, you can't change the key of an object since it is immutable. You'll have to copy the file existing at the current key to the new key and delete the file at current key.
This implies renaming a folder say folder/ is same as copying all files located at key folder/* and creating new ones at newFolder/*. The error:
The specified key does not exist
says that you've not specified the full object key during the copy from source as well as during deletion. The correct implementation would be to list all files at folder/* and copy and delete them one by one. So, your function should be doing something like this:
const awsMove = async (path) => {
try {
const s3 = new AWS.S3();
const AWS_BUCKET = 'my-bucket-test';
const listParams = {
Bucket: AWS_BUCKET,
Delimiter: '/',
Prefix: `${path.oldPath}`
}
await s3.listObjects(listParams, function (err, data) {
if(err)throw err;
data.Contents.forEach(async (elem) => {
const copyParams = {
Key: `${path.newPath}${elem.Key}`,
Bucket: AWS_BUCKET,
CopySource: encodeURI(`/${AWS_BUCKET}/${path.oldPath}/${elem.Key}`),
};
await s3.copyObject(copyParams).promise();
const deleteParams = {
Key: `${path.newPath}${elem.Key}`,
Bucket: AWS_BUCKET,
};
await s3.deleteObject(deleteParams).promise();
});
}).promise();
} catch (err) {
console.log(err);
}
};
Unfortunately, you will need to copy the old ones to the new name and delete them from the old one.
BOTO 3:
AWS_BUCKET ='my-bucket-test'
s3 = boto3.resource('s3')
s3.Object(AWS_BUCKET,'new_file').copy_from(CopySource='AWS_BUCKET/old_file')
s3.Object(AWS_BUCKET,'old_file').delete()
Node :
var s3 = new AWS.S3();
AWS_BUCKET ='my-bucket-test'
var OLD_S3_KEY = '/old-file.json';
var NEW_S3_KEY = '/new-file.json';
s3.copyObject({
Bucket: BUCKET_NAME,
CopySource: `${BUCKET_NAME}${OLD_KEY}`,
Key: NEW_KEY
})
.promise()
.then(() =>
s3.deleteObject({
Bucket: BUCKET_NAME,
Key: OLD_KEY
}).promise()
)
.catch((e) => console.error(e))

Missing region in AWS rekognition in node js

//Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//PDX-License-Identifier: MIT-0 (For details, see https://github.com/awsdocs/amazon-rekognition-developer-guide/blob/master/LICENSE-SAMPLECODE.)
const AWS = require('aws-sdk')
const bucket = 'bucket' // the bucketname without s3://
const photo_source = 'source.jpg'
const photo_target = 'target.jpg'
const config = new AWS.Config({
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
region: process.env.AWS_REGION
})
const client = new AWS.Rekognition();
const params = {
SourceImage: {
S3Object: {
Bucket: bucket,
Name: photo_source
},
},
TargetImage: {
S3Object: {
Bucket: bucket,
Name: photo_target
},
},
SimilarityThreshold: 70
}
client.compareFaces(params, function(err, response) {
if (err) {
console.log(err, err.stack); // an error occurred
} else {
response.FaceMatches.forEach(data => {
let position = data.Face.BoundingBox
let similarity = data.Similarity
console.log(`The face at: ${position.Left}, ${position.Top} matches with ${similarity} % confidence`)
}) // for response.faceDetails
} // if
});
Above code is from official aws webpage (https://docs.aws.amazon.com/rekognition/latest/dg/faces-comparefaces.html). The code is for implementing comparing faces in image using node js. When comparing images using the above code a error occurs which says missing region on config. Then when i checked the code eventhough config object is created in this code but it is not being used anywhere. Can someone tell me where i must use the config object. If the error is appearing due to any other reason please tell me the reason

Cannot upload to AWS S3 inside my Lambda function

I have the following lambda function. It received an XML, looks through it, finds a base64 pdf file and tries to upload it to S3.
index.js
const AWS = require('aws-sdk');
const xml2js = require('xml2js');
const pdfUpload = require('./upload_pdf');
const s3 = new AWS.S3();
exports.handler = async (event, context, callback) => {
let attachment;
xml2js.parseString(event.body, function(err, result) {
attachment =
result.Attachment[0].Data[0];
if (attachment) {
pdfUpload(attachment);
}
});
return {
statusCode: 200
}
};
upload_pdf.js
/**
*
* #param {string} base64 Data
* #return {string} Image url
*/
const pdfUpload = async (base64) => {
const AWS = require('aws-sdk');
const s3 = new AWS.S3();
const base64Data = new Buffer.from(base64, 'base64');
// With this setup, each time your user uploads an image, will be overwritten.
// To prevent this, use a different Key each time.
// This won't be needed if they're uploading their avatar, hence the filename, userAvatar.js.
const params = {
Bucket: 'mu-bucket',
Key: `123.pdf`,
Body: base64Data,
ACL: 'public-read',
ContentEncoding: 'base64',
ContentType: `application/pdf`
}
let location = '';
let key = '';
try {
const { Location, Key } = await s3.upload(params).promise();
location = Location;
key = Key;
} catch (error) {
// console.log(error)
}
console.log(location, key);
return location;
}
module.exports = pdfUpload;
No matter what I do, the file does not get uploaded. I have checked the permissions, and the lambda has access to the bucket. Running the lambda I'm not receiving any errors either. Can anybody see what might be wrong here?
First, as an advice, I think you should put more logs to see at which steps the function is stuck / failing
The second thing you can try is to put await
await pdfUpload(attachment);

Lambda randomly misses files, create dupes in S3

I have a lambda function to ingest images from an S3 bucket, get some metadata, store this to an AWS RDS instance and then re-upload the image. Should be simple but I fear one of the following is causing issues.
Sometimes creates duplicates
Sometime misses files
It seems to happen with larger sets of images. I uploaded sub-1000 assets and it seems to work reasonably well. 3000+ it seems to be unreliable. The function is not set to time out too early (30 seconds should be fine) it has good memory allocation 512MB (please tell me if these are false assumptions. I am an amateur at this and a novice with Lambda so please so let me know what you think I have done.
const AWS = require('aws-sdk')
const uuidv4 = require('uuid/v4');
AWS.config.update({
accessKeyId: 'XXX',
secretAccessKey: 'XXX'
})
const s3 = new AWS.S3({
signatureVersion: 'v4',
region: 'eu-west-2'
})
const hasha = require('hasha')
const { Pool, Client } = require('pg')
const pool = new Pool({
user: 'XXX',
host: 'XXX',
database: 'XXX',
password: 'XXX',
port: 5432,
})
exports.handler = async (event, context) => {
const bucket = event.Records[0].s3.bucket.name;
const key = decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, ' '));
console.log("Processing: " + key)
//Get file
try {
const file = await s3.getObject({
Bucket: bucket,
Key: key
}).promise()
const hash = await hasha(file.Body, { algorithm: 'md5' })
const id = uuidv4()
newfile = await s3.putObject({
Bucket: 'XXX',
Key: id,
Body: file.Body,
ContentType: file.ContentType
}).promise()
var fileString = key.split('/')
var fileName = fileString[fileString.length - 1]
const text = 'INSERT INTO original(original_filename, mime, file_size, file_path, file_name, hash) VALUES($1, $2, $3, $4, $5, $6) RETURNING *'
const values = [fileName, file.ContentType, file.ContentLength, key, id, hash]
const res = await pool.query(text, values)
console.log(res.rows[0])
return "Done"
} catch (err) {
console.log("####### Error #######: ", err)
return "Error"
}
}
I am expecting that X numbers of files are uploaded and the same number are in the target bucket and within my DB table. This is not always the case and is tricky to unpick where it is going wrong. I am sure there is a more elegant way to do this.

AWS Lambda gives error on putting s3 object

I am working on a function which creates a thumbnail by saving a thumbnail version of the image in the screenshot folder when any image is uploaded to the images folder in the bucket. I am using serverless framework. I keep getting an error shown below. I have pasted exact code so anyone can copy paste and implement this solution. Serverless.yml, handler function file as well as any supporting file is included as well.
I can't figure out when i am referring to buffer why do i get this error that object type is not buffer etc.
{ InvalidParameterType: Expected params.Body to be a string, Buffer, Stream, Blob, or typed array object
at ParamValidator.fail (/var/runtime/node_modules/aws-sdk/lib/param_validator.js:50:37)
at ParamValidator.validatePayload (/var/runtime/node_modules/aws-sdk/lib/param_validator.js:255:10)
at ParamValidator.validateScalar (/var/runtime/node_modules/aws-sdk/lib/param_validator.js:133:21)
at ParamValidator.validateMember (/var/runtime/node_modules/aws-sdk/lib/param_validator.js:94:21)
at ParamValidator.validateStructure (/var/runtime/node_modules/aws-sdk/lib/param_validator.js:75:14)
at ParamValidator.validateMember (/var/runtime/node_modules/aws-sdk/lib/param_validator.js:88:21)
at ParamValidator.validate (/var/runtime/node_modules/aws-sdk/lib/param_validator.js:34:10)
at Request.VALIDATE_PARAMETERS (/var/runtime/node_modules/aws-sdk/lib/event_listeners.js:125:42)
at Request.callListeners (/var/runtime/node_modules/aws-sdk/lib/sequential_executor.js:105:20)
at callNextListener (/var/runtime/node_modules/aws-sdk/lib/sequential_executor.js:95:12)
message: 'Expected params.Body to be a string, Buffer, Stream, Blob, or typed array object',
code: 'InvalidParameterType',
time: 2019-03-12T16:37:26.910Z }
Code:
Handler.js
'use strict';
const resizer = require('./resizer');
module.exports.resizer = (event, context, callback) => {
console.log(event.Records[0].s3);
const bucket = event.Records[0].s3.bucket.name;
const key = event.Records[0].s3.object.key;
console.log(`A file named ${key} was put in a bucket ${bucket}`);
resizer(bucket, key)
.then(() => {
console.log(`The thumbnail was created`);
callback(null, {
message: 'The thumbnail was created'
});
})
.catch(error => {
console.log(error);
callback(error);
});
};
module.exports.thumbnails = (event, context, callback) => {
const bucket = event.Records[0].s3.bucket.name;
const key = event.Records[0].s3.object.key;
console.log(bucket);
console.log(key);
console.log(`A new file ${key} was created in the bucket ${bucket}`);
callback(null, {
message: `A new file ${key} was created in the bucket ${bucket}`
});
};
Resizer.js
'use strict';
const AWS = require('aws-sdk');
const S3 = new AWS.S3();
const Jimp = require('jimp'); //https://github.com/oliver-moran/jimp
module.exports = (bucket, key) => {
const newKey = replacePrefix(key);
const height = 512;
return getS3Object(bucket, key).then(data => resizer(data.Body, height)).then(buffer => putS3Object(bucket, newKey, buffer));
};
function getS3Object(bucket, key) {
return S3.getObject({
Bucket: bucket,
Key: key
}).promise();
}
function putS3Object(bucket, key, body) {
return S3.putObject({
Body: body,
Bucket: bucket,
ContentType: 'image/jpg',
Key: key
}).promise();
}
function replacePrefix(key) {
const uploadPrefix = 'uploads/';
const thumbnailsPrefix = 'thumbnails/';
return key.replace(uploadPrefix, thumbnailsPrefix);
}
function resizer(data, height) {
return Jimp.read(data)
.then(image => {
return image
.resize(Jimp.AUTO, height)
.quality(100) // set JPEG quality
.getBuffer(Jimp.MIME_JPEG, (err, buffer) => {
return buffer;
});
})
.catch(err => err);
}
Serverless.yml
service: serverless-resizer-project # NOTE: update this with your service name
provider:
name: aws
runtime: nodejs6.10
profile: student1
iamRoleStatements:
- Effect: "Allow"
Action:
- "s3:ListBucket"
- "s3:GetObject"
- "s3:PutObject"
Resource: "arn:aws:s3:::serverless-resizer-project-images/*"
functions:
resizer:
handler: handler.resizer
events:
- s3:
bucket: serverless-resizer-project-images
event: s3:ObjectCreated:*
rules:
- prefix: uploads/
- suffix: .jpg
thumbnails:
handler: handler.thumbnails
events:
- s3:
bucket: serverless-resizer-project-images
event: s3:ObjectCreated:*
rules:
- prefix: thumbnails/
- suffix: .jpg
The return value of your resizer function is not what you expect. You're using the getBuffer function with a callback, which means that the buffer of the image is not resolved by the promise, but instead is used in the callback, which is not your intention. You should instead use getBufferAsync, which returns a promise that resolves to the image buffer. Your resizer function should look something like this:
function resizer(data, height) {
return Jimp.read(data)
.then(image => image
.resize(Jimp.AUTO, height)
.quality(100) // set JPEG quality
.getBufferAsync(Jimp.MIME_JPEG)
)
.catch(err => err);
}

Resources