I'm using the aws-sdk for Nodejs and I'm trying to get an object that's located in a sub-directory.
But I don't get any files back.
The file path is: mybucket/subfoldername/file.txt
getObject = function getListObjects(bucketName, bucketPath, fileName) {
return new Promise((resolve, reject) => {
let params = {Key: fileName, Bucket: bucketName+bucketPath};
s3.getObject(params, function (error, data) {
if (error){
console.log(`Can't connect to S3. S3 Path = [${fileName}] to Bucket = [${params.Bucket}]`, error);
return reject(error);
} else {
console.log(`Found files in S3. S3 Path =[${fileName}] to Bucket = [${params.Bucket}]`);
return resolve(data.Contents);
}
});
});
};
The arguments I pass are:
bucketName: "mybucket"
bucketPath: "/subfoldername"
fileName: "file.txt"
Please advise.
The Keyargument should include the "path" string, too, as S3 bucket files are referenced by its complete path:
let params = { Key: `${bucketPath}/${fileName}`, Bucket: bucketName };
I've taken the liberty of using template quotes there (``). Also, notice how I've added a "/" separator, please check that it's not already included in bucketPath.
Related
I am having a list of 2 videos in a folder which I upload to AWS from my local directory when I call uploadDir function.
The videos are named dynamically to the path of their already existing file, because am re-uploading to replace the same file in the existing directory on aws
Example.
Video 1 name is: https://myBucketName.s3.amazonaws.com/Movies/video/comedy/93dcf22e84918d0efcb90ba2/Tom-41560712aa0f84f49ed130f2/Home Alone.mp4
Video 2 name is:
https://myBucketName.s3.amazonaws.com/Movies/video/action/26wjf31g64918d0efcb90bw6/Ben-71560712aa0f84f49ed130f4/Mission Impossible.mp4
But because I can't save a filename with special characters I then replace the video name with special characters in my folder directory using the function below
var str1 = videoName.replace(':','X');
var str2 = str1.replace(/[&\/\\#,<>{}]/g,'Q');
So I replaced all the ':' with 'X', and replaced all the '/' with 'Q'
So now the video name in my directory I want to upload back to AWS is looking like this
Replaced Video 1 name is: httpsXQQmyBucketName.s3.amazonaws.comQMoviesQvideoQcomedyQ93dcf22e84918d0efcb90ba2QTom-41560712aa0f84f49ed130f2QHome Alone.mp4
Replaced Video 2 name is: httpsXQQmyBucketName.s3.amazonaws.comQMoviesQvideoQactionQ26wjf31g64918d0efcb90bw6QBen-71560712aa0f84f49ed130f4QMission Impossible.mp4
So now I want to upload the videos back to their AWS path, which is originally the video name.
So for me to achieve this I believe I must first replace the original special characters, So all the 'X' will be ':', and all the 'Q' will be '/'
And the next step is for me to upload to the video name path.
How can I achieve this.
I already have a function uploading the videos to Aws bucket, but not to the directly I want the videos to be.
This is the code uploading the videos to Aws
var AWS = require('aws-sdk');
var path = require("path");
var fs = require('fs');
const uploadDir = function(s3Path, bucketName) {
let s3 = new AWS.S3();
function walkSync(currentDirPath, callback) {
fs.readdirSync(currentDirPath).forEach(function (name) {
var filePath = path.join(currentDirPath, name);
var stat = fs.statSync(filePath);
if (stat.isFile()) {
callback(filePath, stat);
} else if (stat.isDirectory()) {
walkSync(filePath, callback);
}
});
}
walkSync(s3Path, function(filePath, stat) {
let bucketPath = filePath.substring(s3Path.length+1);
let params = {Bucket: bucketName, Key: bucketPath, Body: fs.readFileSync(filePath) };
s3.putObject(params, function(err, data) {
if (err) {
console.log(err)
} else {
console.log('Successfully uploaded '+ bucketPath +' to ' + bucketName);
}
});
});
};
uploadDir(path.resolve('path-containing-videos'), 'bucket-name');
I want to do this with aws-sdk library.
I have a folder on my S3 bucket called "abcd/", it has 3 files on it (e.g. abcd/1.jpg, abcd/2.jpg).
I want to rename the folder to 1234/
^ I want there to be 1234/ only
const awsMove = async (path) => {
try {
const s3 = new AWS.S3();
const AWS_BUCKET = 'my-bucket-test';
const copyParams = {
Key: path.newPath,
Bucket: AWS_BUCKET,
CopySource: encodeURI(`/${AWS_BUCKET}/${path.oldPath}`),
};
await s3.copyObject(copyParams).promise();
const deleteParams = {
Key: path.oldPath,
Bucket: AWS_BUCKET,
};
await s3.deleteObject(deleteParams).promise();
} catch (err) {
console.log(err);
}
};
const changePath = { oldPath: 'abcd/', newPath: '1234/' };
awsMove(changePath);
The above code errors with "The specified key does not exist" what am I doing wrong?
AWS S3 does not have the concept of folders as in a file system. You have a bucket and a key that identifies the object/file stored at that location. The pattern of the key is usually a/b/c/d/some_file and the way it is showed on AWS console, it might give you an impression that a, b, c or d are folders but indeed they aren't.
Now, you can't change the key of an object since it is immutable. You'll have to copy the file existing at the current key to the new key and delete the file at current key.
This implies renaming a folder say folder/ is same as copying all files located at key folder/* and creating new ones at newFolder/*. The error:
The specified key does not exist
says that you've not specified the full object key during the copy from source as well as during deletion. The correct implementation would be to list all files at folder/* and copy and delete them one by one. So, your function should be doing something like this:
const awsMove = async (path) => {
try {
const s3 = new AWS.S3();
const AWS_BUCKET = 'my-bucket-test';
const listParams = {
Bucket: AWS_BUCKET,
Delimiter: '/',
Prefix: `${path.oldPath}`
}
await s3.listObjects(listParams, function (err, data) {
if(err)throw err;
data.Contents.forEach(async (elem) => {
const copyParams = {
Key: `${path.newPath}${elem.Key}`,
Bucket: AWS_BUCKET,
CopySource: encodeURI(`/${AWS_BUCKET}/${path.oldPath}/${elem.Key}`),
};
await s3.copyObject(copyParams).promise();
const deleteParams = {
Key: `${path.newPath}${elem.Key}`,
Bucket: AWS_BUCKET,
};
await s3.deleteObject(deleteParams).promise();
});
}).promise();
} catch (err) {
console.log(err);
}
};
Unfortunately, you will need to copy the old ones to the new name and delete them from the old one.
BOTO 3:
AWS_BUCKET ='my-bucket-test'
s3 = boto3.resource('s3')
s3.Object(AWS_BUCKET,'new_file').copy_from(CopySource='AWS_BUCKET/old_file')
s3.Object(AWS_BUCKET,'old_file').delete()
Node :
var s3 = new AWS.S3();
AWS_BUCKET ='my-bucket-test'
var OLD_S3_KEY = '/old-file.json';
var NEW_S3_KEY = '/new-file.json';
s3.copyObject({
Bucket: BUCKET_NAME,
CopySource: `${BUCKET_NAME}${OLD_KEY}`,
Key: NEW_KEY
})
.promise()
.then(() =>
s3.deleteObject({
Bucket: BUCKET_NAME,
Key: OLD_KEY
}).promise()
)
.catch((e) => console.error(e))
I would like to read the content of a .txt file stored within an s3 bucket.
I tried :
var s3 = new AWS.S3({apiVersion: '2006-03-01'});
var params = {Bucket: 'My-Bucket', Key: 'MyFile.txt'};
var s3file = s3.getObject(params)
But the s3file object that i get does not contain the content of the file.
Do you have an idea on what to do ?
Agree with zishone and here is the code with exception handling:
var s3 = new AWS.S3({apiVersion: '2006-03-01'});
var params = {Bucket: 'My-Bucket', Key: 'MyFile.txt'};
s3.getObject(params , function (err, data) {
if (err) {
console.log(err);
} else {
console.log(data.Body.toString());
}
})
According to the docs the contents of your file will be in the Body field of the result and it will be a Buffer.
And another problem there is that s3.getObject( should have a callback.
s3.getObject(params, (err, s3file) => {
const text = s3file.Body.toString();
})
I have an app where user can upload a ZIP archive of resources. My app handles the upload and saves this to S3. At some point I want to run a transformation that will read this S3 bucket unzip it, and write it to a new S3 bucket. This is all happening on a node service.
I am using the unzipper library to handle unzipping. Here is my initial code.
async function downloadFromS3() {
let s3 = new AWS.S3();
try {
const object = s3
.getObject({
Bucket: "zip-bucket",
Key: "Archive.zip"
})
.createReadStream();
object.on("error", err => {
console.log(err);
});
await streaming_unzipper(object, s3);
} catch (e) {
console.log(e);
}
}
async function streaming_unzipper(s3ObjectStream, s3) {
await s3.createBucket({ Bucket: "unzip-bucket" }).promise();
const unzipStream = s3ObjectStream.pipe(unzipper.Parse());
unzipStream.pipe(
stream.Transform({
objectMode: true,
transform: function(entry, e, next) {
const fileName = entry.path;
const type = entry.type; // 'Directory' or 'File'
const size = entry.vars.uncompressedSize; // There is also compressedSize;
if (type === "File") {
s3.upload(
{ Bucket: "unzip-bucket", Body: entry, Key: entry.path },
{},
function(err, data) {
if (err) console.error(err);
console.log(data);
entry.autodrain();
}
);
next();
} else {
entry.autodrain();
next();
}
}
})
);
This code is works but I feel like it could be optimized. Ideally I would like to pipe the download stream -> unzipper stream -> uploader stream. So that chunks are uploaded to S3 as they get unzipped, instead of waiting for the full fill uzip to finish then uploading.
The problem I am running into is that I need the file name (to set as an S3 key), which I only have after unzipping. Before I can start to upload.
Is there any good way to create a streaming upload to S3. Initiated with a temporaryId, that gets rewritten with the final final name after the full stream is finished.
I want to be able to copy files within the same bucket from the root directory to a subfolder within a subfolder however excluding that subfolder by using the aws-sdk.
i.e:
I want to use this AWS-CLI command in a gulp file task:
aws s3 cp s3://bucketName s3://bucketName/last_good/YYYYMMDD --recursive --exclude "last_good/*"
I've used the copy examples used from How to copy/move all objects in Amazon S3 from one prefix to other using the AWS SDK for Node.js
I am just not sure how to specify the folder to exclude. In my above example it would be the last_good folder.
var gulp = require('gulp');
var AWS = require('aws-sdk');
var async = require('async');
var bucketName = 'bucketname';
var oldPrefix = '';
var newPrefix = 'last_good/20190817/';
var s3 = new AWS.S3({params: {Bucket: bucketName}, region: 'us-west-2'});
gulp.task('publish', function() {
CopyToLastGood();
}
function CopyToLastGood() {
var done = function(err, data) {
if (err) console.log(err);
else console.log(data);
};
s3.listObjects({Prefix: oldPrefix}, function(err, data) {
if (data.Contents.length) {
async.each(data.Contents, function(file, cb) {
var params = {
CopySource: bucketName + '/' + file.Key,
Key: file.Key.replace(oldPrefix, newPrefix)
};
s3.copyObject(params, function(copyErr, copyData){
if (copyErr) { // an error occured
console.log(err);
}
else {
console.log('Copied: ', params.Key); //successful response
cb();
}
});
}, done);
}
});
}
I expect contents of root to update last_good/20190817/ however not copying the last_good folder itself.
I've solved my solution using a delimiter option on the s3.listObjects params.
i.e:
s3.listObjects({Prefix: oldPrefix, Delimiter:'/'}
this only lists files within the root.