I have updated my original post as have got a bit further and have the querying of my CW Alarms part of my code working. The below now outputs the state of my CW Alarms in the console, and in the format I want. What I'm now trying to do is take the output and upload this as a text file to an S3 bucket. Is this possible?
CW Alarm Code
import { CloudWatchClient, DescribeAlarmsCommand } from "#aws-sdk/client-cloudwatch";
const REGION = "eu-west-2";
const cwclient = new CloudWatchClient({ region: REGION });
export const handler = async() => {
const cwparams = {};
const cw = new DescribeAlarmsCommand(cwparams);
try {
const cwdata = await cwclient.send(cw);
cwdata.MetricAlarms.forEach(function (item) {
console.log('\n%j', {alarmname:item.AlarmName,alarmstate:item.StateValue});
});
} catch (error) {
}
};
Output
Function Logs
START RequestId: xxxxxxxxxxxxxxxxxxx Version: $LATEST
2022-11-30T09:48:34.655Z xxxxxxxxxxxxxxxxxxx INFO
{"alarmname":"my-alarm-1","alarmstate":"OK"}
2022-11-30T09:48:34.655Z xxxxxxxxxxxxxxxxxxx INFO
{"alarmname":"my-alarm-2","alarmstate":"OK"}
END RequestId: xxxxxxxxxxxxxxxxxxx
I have looked at the sdk for the s3 PutObjectCommand and have tested the below, which allows me to upload a file with some text content, but I'm not sure how I can combine my CW Alarm data with this code, so that the "Body" of the text file is my CW Alarm data.
S3 Code
import { S3Client, PutObjectCommand } from "#aws-sdk/client-s3";
export const handler = async() => {
const bucketName = "mybucket";
const keyName = "test.json";
const s3 = new S3Client({});
const s3putCommand = new PutObjectCommand({
Bucket: bucketName,
Key: keyName,
Body: "Hello" // I would like this to be my CW Alarm data
});
try {
await s3.send(s3putCommand);
console.log('Successfully uploaded data to ' + bucketName + '/' + keyName);
} catch (error) {
}
};
Output
Function Logs
START RequestId: xxxxxxxxxxxxxxxxxxx Version: $LATEST
2022-11-30T09:56:45.585Z xxxxxxxxxxxxxxxxxxx INFO Successfully uploaded data to mybucket/test.json
END RequestId: xxxxxxxxxxxxxxxxxxx
My goal is to end up with the test.json file looking like this:
{"alarmname":"my-alarm-1","alarmstate":"OK"} {"alarmname":"my-alarm-2","alarmstate":"OK"}
Thanks.
You are using an outdated AWS SDK for JavaScript. Refer to the new AWS Code Library for the latest recommended SDK to use here:
URL is:
https://docs.aws.amazon.com/code-library/latest/ug/javascript_3_cloudwatch_code_examples.html
With help from a colleague I have found the answer to this. As long as in your lambda function your index file is named "index.mjs"
import { S3Client, PutObjectCommand } from "#aws-sdk/client-s3";
import { CloudWatchClient, DescribeAlarmsCommand } from "#aws-sdk/client-cloudwatch";
import { Upload } from "#aws-sdk/lib-storage";
const REGION = "eu-west-2";
const cwclient = new CloudWatchClient({ region: REGION });
export const handler = async () => {
const cwparams = {};
const cw = new DescribeAlarmsCommand(cwparams);
const alarmData = [];
const bucketName = "mybucket";
const keyName = "test.json";
const s3 = new S3Client({});
try {
const cwdata = await cwclient.send(cw);
cwdata.MetricAlarms.forEach(function (item) {
alarmData.push({
alarmname: item.AlarmName,
alarmstate: item.StateValue,
});
});
} catch (error) {}
const s3putCommand = new Upload({
client: s3,
params: {
Bucket: bucketName,
Key: keyName,
Body: JSON.stringify(alarmData),
},
});
try {
const data = await s3putCommand.done();
console.log(data);
} catch (error) {
console.log(error);
}
};
Related
I have the following lambda function in NodeJs 14.x using AWS SDK V3 for a timestream insertion process:
'use strict'
// https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-timestream-write/index.html
const { TimestreamWriteClient } = require("#aws-sdk/client-timestream-write")
const client = new TimestreamWriteClient({ region: process.env.region })
module.exports.fnPostElectricityTimestream = async event => {
try {
console.log('🚀 START fnPostElectricityTimestream')
const jsonBody = event
const topic = jsonBody.topic
const arrTopic = topic.split('/')
let dbName = arrTopic[4]
dbName = 'smaj56g' //Test
const currentTime = Date.now().toString() // Unix time in milliseconds get jsonBody.e_timestamp
const e_timestamp = (jsonBody.e_timestamp)*1000
const dimensions = [{
'Name': 'n',
'Value': 'v'
}]
const e_ch_1 = {
'Dimensions':dimensions,
'MeasureName': 'e_ch_1',
'MeasureValue': '[1,2,3]',
'MeasureValueType': 'VARCHAR',
'Time': currentTime
}
const records = [e_ch_1]
const params = {
DatabaseName: dbName,
TableName:'e_ch_1_v_w',
Records: records
}
const data = await client.send(params);
console.log('data', data)
return {
message: ''
}
} catch (error) {
console.log('🚀 fnPostElectricityTimestream - error.stack:', error.stack)
return {
message: error.stack
}
}
}
When I run the lambda this is the message I am getting:
2022-08-12T14:58:39.496Z e578a391-06b4-48a9-9f9d-9440a373c19e INFO 🚀 fnPostElectricityTimestream - error.stack: TypeError: command.resolveMiddleware is not a function
at TimestreamWriteClient.send (/var/task/node_modules/#aws-sdk/smithy-client/dist-cjs/client.js:13:33)
at Runtime.module.exports.fnPostElectricityTimestream [as handler] (/var/task/src/ElectricityTimestream/fnPostElectricityTimestream.js:38:31)
at Runtime.handleOnceNonStreaming (/var/runtime/Runtime.js:73:25)
There is something with const data = await client.send(params).
I am following the asyncawait code in this documentation.
How to solve this issue?
Your current insertion code is wrong. In order to write the records in the TimeStream, you need to use the WriteRecordsCommand command. Refer to the doc for a better understanding. Sample code:
import { TimestreamWriteClient, WriteRecordsCommand } from "#aws-sdk/client-timestream-write";
const client = new TimestreamWriteClient({ region: "REGION" }); //your AWS region
const params = {
DatabaseName: dbName, //your database
TableName: tableName, //your table name
Records: records //records you want to insert
}
const command = new WriteRecordsCommand(params);
const data = await client.send(command);
you need to create a command before calling send.
For example:
import { TimestreamWriteClient, CreateDatabaseCommand } from "#aws-sdk/client-timestream-write";
const params = {
DatabaseName: dbName,
TableName:'e_ch_1_v_w',
Records: records
}
const command = new CreateDatabaseCommand(params);
const data = await client.send(command);
I am trying to upload large files to a s3 bucket using the node.js aws-sdk.
the V2 method upload integrally uploads the files in a multipart upload.
I want to use the new V3 aws-sdk. What is the way to upload large files in the new version? The method PutObjectCommand doesn't seem to be doing it.
I've seen there are methods such as CreateMultiPartUpload but I can't seem to find a full working example using them.
Thanks in advance.
As of 2021, I would suggest using the lib-storage package, which abstracts a lot of the implementation details.
Sample code:
import { Upload } from "#aws-sdk/lib-storage";
import { S3Client, S3 } from "#aws-sdk/client-s3";
const target = { Bucket, Key, Body };
try {
const parallelUploads3 = new Upload({
client: new S3({}) || new S3Client({}),
tags: [...], // optional tags
queueSize: 4, // optional concurrency configuration
partSize: 5MB, // optional size of each part
leavePartsOnError: false, // optional manually handle dropped parts
params: target,
});
parallelUploads3.on("httpUploadProgress", (progress) => {
console.log(progress);
});
await parallelUploads3.done();
} catch (e) {
console.log(e);
}
Source: https://github.com/aws/aws-sdk-js-v3/blob/main/lib/lib-storage/README.md
Here's what I came up with, to upload a Buffer as a multipart upload, using aws-sdk v3 for nodejs and TypeScript.
Error handling still needs some work (you might want to abort/retry in case of an error), but it should be a good starting point... I have tested this with XML files up to 15MB, and so far so good. No guarantees, though! ;)
import {
CompleteMultipartUploadCommand,
CompleteMultipartUploadCommandInput,
CreateMultipartUploadCommand,
CreateMultipartUploadCommandInput,
S3Client,
UploadPartCommand,
UploadPartCommandInput
} from '#aws-sdk/client-s3'
const client = new S3Client({ region: 'us-west-2' })
export const uploadMultiPartObject = async (file: Buffer, createParams: CreateMultipartUploadCommandInput): Promise<void> => {
try {
const createUploadResponse = await client.send(
new CreateMultipartUploadCommand(createParams)
)
const { Bucket, Key } = createParams
const { UploadId } = createUploadResponse
console.log('Upload initiated. Upload ID: ', UploadId)
// 5MB is the minimum part size
// Last part can be any size (no min.)
// Single part is treated as last part (no min.)
const partSize = (1024 * 1024) * 5 // 5MB
const fileSize = file.length
const numParts = Math.ceil(fileSize / partSize)
const uploadedParts = []
let remainingBytes = fileSize
for (let i = 1; i <= numParts; i ++) {
let startOfPart = fileSize - remainingBytes
let endOfPart = Math.min(partSize, startOfPart + remainingBytes)
if (i > 1) {
endOfPart = startOfPart + Math.min(partSize, remainingBytes)
startOfPart += 1
}
const uploadParams: UploadPartCommandInput = {
// add 1 to endOfPart due to slice end being non-inclusive
Body: file.slice(startOfPart, endOfPart + 1),
Bucket,
Key,
UploadId,
PartNumber: i
}
const uploadPartResponse = await client.send(new UploadPartCommand(uploadParams))
console.log(`Part #${i} uploaded. ETag: `, uploadPartResponse.ETag)
remainingBytes -= Math.min(partSize, remainingBytes)
// For each part upload, you must record the part number and the ETag value.
// You must include these values in the subsequent request to complete the multipart upload.
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html
uploadedParts.push({ PartNumber: i, ETag: uploadPartResponse.ETag })
}
const completeParams: CompleteMultipartUploadCommandInput = {
Bucket,
Key,
UploadId,
MultipartUpload: {
Parts: uploadedParts
}
}
console.log('Completing upload...')
const completeData = await client.send(new CompleteMultipartUploadCommand(completeParams))
console.log('Upload complete: ', completeData.Key, '\n---')
} catch(e) {
throw e
}
}
Here is the fully working code with AWS SDK v3
import { Upload } from "#aws-sdk/lib-storage";
import { S3Client, S3 } from "#aws-sdk/client-s3";
import { createReadStream } from 'fs';
const inputStream = createReadStream('clamav_db.zip');
const Bucket = process.env.DB_BUCKET
const Key = process.env.FILE_NAME
const Body = inputStream
const target = { Bucket, Key, Body};
try {
const parallelUploads3 = new Upload({
client: new S3Client({
region: process.env.AWS_REGION,
credentials: { accessKeyId: process.env.AWS_ACCESS_KEY, secretAccessKey: process.env.AWS_SECRET_KEY }
}),
queueSize: 4, // optional concurrency configuration
partSize: 5242880, // optional size of each part
leavePartsOnError: false, // optional manually handle dropped parts
params: target,
});
parallelUploads3.on("httpUploadProgress", (progress) => {
console.log(progress);
});
await parallelUploads3.done();
} catch (e) {
console.log(e);
}
I'm still new in NodeJs and AWS, so forgive me if this is a noob question.
I am trying to read the data from an excel file (.xlsx). The lambda function receives the extension of the file type.
Here is my code:
exports.handler = async (event, context, callback) => {
console.log('Received event:', JSON.stringify(event, null, 2));
if (event.fileExt === undefined) {
callback("400 Invalid Input");
}
let returnData = "";
const S3 = require('aws-sdk/clients/s3');
const s3 = new S3();
switch(event.fileExt)
{
case "plain":
case "txt":
// Extract text
const params = {Bucket: 'filestation', Key: 'MyTXT.'+event.fileExt};
try {
await s3.getObject(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else{ // successful response
returnData = data.Body.toString('utf-8');
context.done(null, returnData);
}
}).promise();
} catch (error) {
console.log(error);
return;
}
break;
case "xls":
case "xlsx":
returnData = "Excel";
// Extract text
const params2 = {Bucket: 'filestation', Key: 'MyExcel.'+event.fileExt};
const readXlsxFile = require("read-excel-file/node");
try {
const doc = await s3.getObject(params2);
const parsedDoc = await readXlsxFile(doc);
console.log(parsedDoc)
} catch (err) {
console.log(err);
const message = `Error getting object.`;
console.log(message);
throw new Error(message);
}
break;
case "docx":
returnData = "Word doc";
// Extract text
break;
default:
callback("400 Invalid Operator");
break;
}
callback(null, returnData);
};
The textfile part works. But the xlsx part makes the function time out.
I did install the read-excel-file dependency and uploaded the zip so that I have access to it.
But the function times out with this message:
"errorMessage": "2020-11-02T13:06:50.948Z 120bfb48-f29c-4e3f-9507-fc88125515fd Task timed out after 3.01 seconds"
Any help would be appreciated! Thanks for your time.
using the xlsx npm library. here's how we did it.
assuming the file is under the root project path.
const xlsx = require('xlsx');
// read your excel file
let readFile = xlsx.readFile('file_example_XLSX_5000.xlsx')
// get first-sheet's name
let sheetName = readFile.SheetNames[0];
// convert sheets to JSON. Best if sheet has a headers specified.
console.log(xlsx.utils.sheet_to_json(readFile.Sheets[sheetName]));
You need to install xlsx (SheetJs) library into the project:
npm install xlsx
and then import the "read" function into the lambda, get the s3 object's body and send to xlsx like this:
const { read } = require('sheetjs-style');
const aws = require('aws-sdk');
const s3 = new aws.S3({ apiVersion: '2006-03-01' });
exports.handler = async (event) => {
const bucketName = 'excel-files';
const fileKey = 'Demo Data.xlsx';
// Simple GetObject
let file = await s3.getObject({Bucket: bucketName, Key: fileKey}).promise();
const wb = read(file.Body);
const response = {
statusCode: 200,
body: JSON.stringify({
read: wb.Sheets,
}),
};
return response;
};
(of course, you can receive the bucket and filekey from parameters if you send them...)
Very Important: Use the READ (not the readFile) function and send the Body property (with capital "B") as a paremeter
I changed the timeout to 20 seconds and it works. Only one issue remains: const parsedDoc = await readXlsxFile(doc); wants to receive a string (filepath) and not a file.
Solved by using xlsx NPM library. Using a stream and giving it buffers.
I have the following lambda function. It received an XML, looks through it, finds a base64 pdf file and tries to upload it to S3.
index.js
const AWS = require('aws-sdk');
const xml2js = require('xml2js');
const pdfUpload = require('./upload_pdf');
const s3 = new AWS.S3();
exports.handler = async (event, context, callback) => {
let attachment;
xml2js.parseString(event.body, function(err, result) {
attachment =
result.Attachment[0].Data[0];
if (attachment) {
pdfUpload(attachment);
}
});
return {
statusCode: 200
}
};
upload_pdf.js
/**
*
* #param {string} base64 Data
* #return {string} Image url
*/
const pdfUpload = async (base64) => {
const AWS = require('aws-sdk');
const s3 = new AWS.S3();
const base64Data = new Buffer.from(base64, 'base64');
// With this setup, each time your user uploads an image, will be overwritten.
// To prevent this, use a different Key each time.
// This won't be needed if they're uploading their avatar, hence the filename, userAvatar.js.
const params = {
Bucket: 'mu-bucket',
Key: `123.pdf`,
Body: base64Data,
ACL: 'public-read',
ContentEncoding: 'base64',
ContentType: `application/pdf`
}
let location = '';
let key = '';
try {
const { Location, Key } = await s3.upload(params).promise();
location = Location;
key = Key;
} catch (error) {
// console.log(error)
}
console.log(location, key);
return location;
}
module.exports = pdfUpload;
No matter what I do, the file does not get uploaded. I have checked the permissions, and the lambda has access to the bucket. Running the lambda I'm not receiving any errors either. Can anybody see what might be wrong here?
First, as an advice, I think you should put more logs to see at which steps the function is stuck / failing
The second thing you can try is to put await
await pdfUpload(attachment);
I've got storage trigger function which resize and replace uploaded image into storage and then update URL in my database
}).then(() => {
console.log('Original file deleted', filePath)
const logo = storageRef.file(JPEGFilePath)
return logo.getSignedUrl({ action: 'read', expires: date })
// const logo = storageRef.child(JPEGFilePath)
// return logo.getDownloadURL()
// return storageUrl.getDownloadURL(JPEGFilePath)
}).then((url) => {
const newRef = db.collection("user").doc(uid)
return newRef.set({
profile: { profileImg: url[0] }
}, {
merge: true
})
})
here is how I set expiry date
const d = new Date()
const date = new Date(d.setFullYear(d.getFullYear() + 200)).toString()
However the image expire in few weeks (roughly about 2 weeks). Does anyone know how to fix that? I have even played with getDownloadURL as you can see from commented code but that doesn't seems to work in trigger
Per the following links:
https://stackoverflow.com/a/42959262/370321
https://cloud.google.com/nodejs/docs/reference/storage/2.5.x/File#getSignedPolicy
Not sure which version of #google/cloud-storage you're using, but assuming it's 2.5.x, it looks like any value you pass in the date field is passed into new Date(), so it looks like your code should work as I tried it in my dev tools. The only thing I can guess is it doesn't like that you want a file to live for 200 years.
Per the source code:
https://github.com/googleapis/nodejs-storage/blob/master/src/file.ts#L2358
Have you tried a shorter amount of time -- or formatting it in the dateform at mm-dd-yyyy ?
Ok so I have tried something but I have no idea if this will work or not so I'll come back in 2 weeks to mark my question as answered if it will work. For those with the same problem I'll try to recapitulate what I've done.
1/ Download the service account key from console. Here is the link
https://console.firebase.google.com/project/_/settings/serviceaccounts/adminsdk
2/ Save the downloaded JSON file in your function directory
3/ Include the key in your function storage. But be careful how you set the path to the file. Here is my question about it
https://stackoverflow.com/a/56407592/11486115
UPDATE
I just found mistake in my function. My URL was provided by cloud function by mistake (commented code)
Here is complete function
const {
db
} = require('../../admin')
const projectId = "YOUR-PROJECT-ID"
const { Storage } = require('#google-cloud/storage');
const storage = new Storage({ projectId: projectId ,keyFilename: 'PATH-TO-SERVICE-ACCOUNT'})
const os = require('os');
const fs = require('fs');
const path = require('path');
const spawn = require('child-process-promise').spawn
const JPEG_EXTENSION = '.jpg'
exports.handler = ((object) => {
const bucket = object.bucket;
const contentType = object.contentType;
const filePath = object.name
const JPEGFilePath = path.normalize(path.format({ dir: path.dirname(filePath), name: 'profileImg', ext: JPEG_EXTENSION }))
const destBucket = storage.bucket(bucket)
const tempFilePath = path.join(os.tmpdir(), path.basename(filePath))
const tempLocalJPEGFile = path.join(os.tmpdir(), path.basename(JPEGFilePath))
const metadata = {
contentType: contentType
}
const uid = filePath.split("/").slice(1, 2).join("")
const d = new Date()
const date = new Date(d.setFullYear(d.getFullYear() + 200)).toString()
if (!object.contentType.startsWith('image/')) {
return destBucket.file(filePath).delete().then(() => {
console.log('File is not an image ', filePath, ' DELETED')
return null
});
}
if (object.metadata.modified) {
console.log('Image processed')
return null
}
return destBucket.file(filePath).download({
destination: tempFilePath
})
.then(() => {
console.log('The file has been downloaded to', tempFilePath)
return spawn('convert', [tempFilePath, '-resize', '100x100', tempLocalJPEGFile])
}).then(() => {
console.log('JPEG image created at', tempLocalJPEGFile)
metadata.modified = true
return destBucket.upload(tempLocalJPEGFile,
{
destination: JPEGFilePath,
metadata: { metadata: metadata }
})
}).then(() => {
console.log('JPEG image uploaded to Storage at', JPEGFilePath)
return destBucket.file(filePath).delete()
}).then(() => {
console.log('Original file deleted', filePath)
//const logo = storageRef.file(JPEGFilePath)
const logo = destBucket.file(JPEGFilePath)
return logo.getSignedUrl({ action: 'read', expires: date })
}).then((url) => {
const newRef = db.collection("user").doc(uid)
return newRef.set({
profile: { profileImg: url[0] }
}, {
merge: true
})
}).then(() => {
fs.unlinkSync(tempFilePath);
fs.unlinkSync(tempLocalJPEGFile)
console.log(uid, 'user database updated ')
return null
})
})
I'm pretty confident that this will work now.