I am trying to upload file in aws-s3 but it shows me some error like
NodeCode:
const AWS = require('aws-sdk');
const uploadFile = async (file) => {
const s3 = new AWS.S3({
accessKeyId: "<AWSS3_AccessKey>",
secretAccessKey: "<AWSS3_SecretKey>",
region: "ap-south-1"
});
const params = {
Bucket: "test123", // pass your bucket name
Key: file.name, //filename
Body: file.data, //data
};
s3.upload(params, function(s3Err, data) {
if (s3Err) throw s3Err
//console.log(`File uploaded successfully at ${data.Location}`)
});
};
var files = [];
var fileKeys = Object.keys(req.files);
fileKeys.forEach(function(key) {
var file = req.files[key];
files.push(file.name);
uploadFile(file);
});
your aws cli config file in not configured properly the location of config file is
at ~/.aws/config on Linux, macOS, or Unix, or at C:\Users\USERNAME.aws\config on Windows.
you need to setup this file before you use any sdk to call aws services i'm posting a link which will guide you how to setup aws cli in different operating systems
Setup AWS CLI
Related
I've created an api to upload image to amazon s3 bucket with nodejs, multer, and multer-s3 .
Its working fine in development returning me a response of image URL which is downloadable and accessible but when I host my node app on Aws Lambda (API GATEWAY) the API returns me the same response with the image URL but this time when I open the downloaded image it shows me INVALID FILE FORMATTED
Here is my Code
const uploadImage = async (req,res) => {
let myFile = req.file.originalname.split(".")
const fileType = myFile[myFile.length - 1]
const params = {
Bucket: 'test-bucket2601',
Key: `${Date.now()}.${fileType}`,
Body: req.file.buffer
}
s3.upload(params, (error, data) => {
if(error){
res.status(500).send(error)
}
res.json({data})
})
}
Route of middleware
routes.route('/upload').post(upload,uploadImage);
in post method the first argument is upload middleware
Middleware Code:
const s3 = new aws.S3({
credentials: {
accessKeyId: awsKeys?.accessKeyId,
secretAccessKey: awsKeys?.secretAccessKey
}
});
const storage = multer.memoryStorage({
destination: function(req, file, callback) {
callback(null, '')
}})
const upload = multer({storage}).single('imageUrl')
I'm trying to read from a S3 Bucket, currently I have the code:
var AWS = require('aws-sdk');
AWS.config.update({accessKeyId: 'myAccesID', secretAccessKey: 'superRandomSecretKey', region: 'us-west-2'});
var s3 = new AWS.S3();
var params = {
Bucket: 'my-bucket',
Delimiter: '/'
}
s3.listObjects(params, function (err, data) {
if(err)throw err;
console.log(data);
});
But I get Access Denied, I know that my named profile works because I can list my files with the aws cli command:
aws s3 ls s3://my-bucket --recursive --profile my-named-profile
So, how can I initialize my aws instance with a named profile?
The recommended way to access S3 from an instance is through IAM roles for Amazon EC2.
The basic role could just contain AWS Managed policy AmazonS3ReadOnlyAccess:
Having the role attached to your instance, you don't need to do anything special for aws-sdk to use it. The SDK will automatically recognize it. Therefore, you could be:
var AWS = require('aws-sdk');
var s3 = new AWS.S3();
var params = {
Bucket: 'my-bucket',
Delimiter: '/'
}
s3.listObjects(params, function (err, data) {
if(err)throw err;
console.log(data);
});
Good evening
I have this task. I have to upload an image to the S3 bucket using Node JS and generates a thumbnail on the go and not by using a lambda trigger. Everything should be done on my local machine terminal (or) in the local server(postman). I tried this code.
const fs = require('fs');
const ACESS_ID = 'A**********KV';
const SECRET_ID = 'G***********0';
const BUCKET_NAME = 'node-image-bucket';
// Initializing s3 interface
const s3 = new AWS.S3({
accessKeyId: ACESS_ID,
secretAccessKey: SECRET_ID,
});
// File reading function to S3
const uploadFile = (fileName) => {
// Read content from the file
const fileContent = fs.readFileSync(fileName);
// Setting up S3 upload parameters
const params = {
Bucket: BUCKET_NAME,
Key: 'scene2.jpg',
Body: fileContent
};
// Uploading files to the bucket
s3.upload(params, function(err, data){
if(err){
throw err;
}
console.log(data);
console.log(`File uploaded Successfully. ${data.Location}`);
});
};
uploadFile('./images/bg-hd.jpg');
Above code is working fine with a single image and the problem is every time I upload a file to the S3 bucket I need to change the S3 params key string value
I want to upload multiple images at once and creating a buffer for performance and it should create thumbnails automatically in the same bucket at the different folder.
Could anyone help me, guys! Please Any help Appreciated!!!
You cannot upload multiple files with one s3 operation but you can use the sharp module before uploading https://www.npmjs.com/package/sharp
to resize your image before calling the s3 api.
import * as sharp from 'sharp';
async function resize(buffer , width, height) {
return sharp(buffer).resize(width, height).toBuffer();
}
const thumbnailWidthSize = 200;
const thumbnailWidthHeight = 200;
const thumbnailImage = await resize(fileContent, thumbnailWidthSize, thumbnailWidthHeight)
You can then reuse your current upload function and run it as many times as many image resizes you need with different keys and wrap those calls around promise.all to make the operation fail if any of the upload fails.
await promise.all([
s3upload(image, imageKey),
s3upload(thumbnailImage, thumbnailImageKey)
])
So, there are two parts to your questions -
Converting the image to thumbnail on the fly while uploading to s3 bucket -
You can use the thumbd npm module and create a thumbd server.
Thumbd is an image thumbnailing server built on top of Node.js, SQS, S3, and ImageMagick.
Prerequistes for the thumbd server -
Thumbd requires the following environment variables to be set:
AWS_KEY the key for your AWS account (the IAM user must have access to the appropriate SQS and S3 resources).
AWS_SECRET the AWS secret key.
BUCKET the bucket to download the original images from. The thumbnails will also be placed in this bucket
AWS_REGION the AWS Region of the bucket. Defaults to: us-east-1.
CONVERT_COMMAND the ImageMagick convert command. Defaults to convert.
REQUEST_TIMEOUT how long to wait in milliseconds before aborting a remote request. Defaults to 15000.
S3_ACL the acl to set on the uploaded images. Must be one of private, or public-read. Defaults to private.
S3_STORAGE_CLASS the storage class for the uploaded images. Must be either STANDARD or REDUCED_REDUNDANCY. Defaults to STANDARD.
SQS_QUEUE the queue name to listen for image thumbnailing.
When running locally, I set these environment variables in a .env file and execute thumbd using pm2/forever/foreman.
Setup -
apt-get install imagemagick
npm install thumbd -g
thumbd install
thumbd start // Run thumbd as a server
After the thumbd server is up and running, refer the code below to change image to thumbnail while uploading to s3 bucket.
var aws = require('aws-sdk');
var url = require("url");
var awsS3Config = {
accessKeyId: ACESS_ID,
secretAccessKey: SECRET_ID,
region: 'us-west-2'
}
var BUCKET_NAME = 'node-image-bucket';
var sourceImageDirectory = "/tmp/"
var imageUploadDir = "/thumbnails/"
var imageName = 'image.jpg'
var uploadImageName = 'image.jpg'
aws.config.update(awsS3Config);
var s3 = new aws.S3();
var Client = require('thumbd').Client,
client = new Client({
awsKey: awsS3Config.accessKeyId,
awsSecret: awsS3Config.secretAccessKey,
s3Bucket: BUCKET_NAME,
sqsQueue: 'ThumbnailCreator',
awsRegion: awsS3Config.region,
s3Acl: 'public-read'
});
export function uploadAndResize(sourceImageDirectory, imageName, imageUploadDir, uploadImageName) {
return new Promise((resolve, reject)=>{
client.upload(sourceImageDirectory + imageName, imageUploadDir + uploadImageName, function(err) {
if (err) {
reject(err);
} else {
client.thumbnail(imageUploadDir + uploadImageName, [{
"suffix": "medium",
"width": 360,
"height": 360,
"background": "white",
"strategy": "%(command)s %(localPaths[0])s -resize %(width)sX%(height)s^ -gravity north -extent %(width)sX%(height)s %(convertedPath)s"
}, {
"suffix": "thumb",
"width": 100,
"height": 100,
"background": "white",
"strategy": "%(command)s %(localPaths[0])s -resize %(width)sX%(height)s^ -gravity north -extent %(width)sX%(height)s %(convertedPath)s"
}], {
//notify: 'https://callback.example.com'
});
var response = {};
//https://s3-ap-us-west-2.amazonaws.com/node-image-bucket/1/5825c7d0-127f-4dac-b802-ca24efba2bcd-original.jpeg
response.url = 'https://s3-' + awsS3Config.region + '.amazonaws.com/' + BUCKET_NAME + '/' + imageUploadDir;
response.uploadImageName = uploadImageName;
response.sourceImageName = imageName;
resolve(response);
}
})
})
Second, you wanted to upload multiple images without changing the string -
Loop over the below method for all the files in a localpath and you are good to go.
export function uploadFiles(localPath, localFileName, fileUploadDir, uploadFileName) {
return new Promise((resolve, reject) => {
fs.readFile(localPath + localFileName, function (err, file) {
if (err) {
reject(err);
}
var params = {
ACL: 'public-read',
Bucket: BUCKET_NAME,
Key: uploadFileName,
Body: file
};
s3.upload(params, function (err, data) {
fs.unlink(localPath + localFileName, function (err) {
if (err) {
reject(err);
} else {
resolve(data)
}
});
});
});
})
}
I feel stupid. I'm trying create a function, to download files from my AWS S3 bucket to the client pc using nodeJS. The filePath variable works perfectly when I run the server on localhost, but when the project is uploaded I get Error: EISDIR: illegal operation on a directory, open './data/'
On localhost it download the documents to the data directory in my project. I would like to to download to the downloads directory on the client pc.
I have no idea how to specify a file path to a local directory. I've tried all sorts of paths such as ./d/Users/username/Desktop, same error each time.
AWS.config.update({
accessKeyId: "id",
secretAccessKey: "key"
});
const filePath = './data/'+req.body.file;
const bucketName = 'bucket';
const key =req.body.key;
var s3 = new AWS.S3();
const downloadFile = (filePath, bucketName, key) => {
var params = {
Bucket: bucketName,
Key: key
}
s3.getObject(params, (err, data) => {
if (err) console.log(err)
fs.writeFileSync(filePath, data.Body)
console.log(`${filePath} has been created!`);
res.send("File Downloaded");
})
}
downloadFile(filePath, bucketName, key)
});
I'm sure this is simple, but I can't find any specific examples online. Any help would be highly appreciated.
I have a little app to upload a file to AWS S3, it is uploading ok, but when I download the file from S3 bucket, it is encoded, shows type:buffer etc...
if I upload the same file from the console, it shows fine.
Here the code to upload
const fs = require('fs');
const AWS = require('aws-sdk');
const s3 = new AWS.S3({
accessKeyId: process.env.AWS_ACCESS_KEY,
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY
});
const fileName = 'su.csv';
const uploadFile = () => {
fs.readFile(fileName, (err, data) => {
if (err) throw err;
const params = {
Bucket: 'mybukk22-test', // pass your bucket name
Key: 'su.csv', // file will be saved as testBucket/contacts.csv
Body: JSON.stringify(data, null, 2)
};
s3.upload(params, function (s3Err, data) {
if (s3Err) throw s3Err
console.log(`File uploaded successfully at ${data.Location}`)
});
});
};
uploadFile();
Is the problem on the body? how to save the same file as on client?
thanks
The issue is that you are trying to do a JSON.stringify on a Buffer, because fs.readFile returns you a Buffer. To make it work you could change your params to the following:
const params = {
Bucket: 'mybukk22-test', // pass your bucket name
Key: 'su.csv', // file will be saved as testBucket/contacts.csv
Body: data
};
(Just pass data 1:1 as the Body of your upload operation)
Otherwise, if you like to stick to your solution just cast data to a string like this: JSON.stringify(data.toString(), null, 2)