Url from getSignedUrl will expire in few weeks - node.js

I've got storage trigger function which resize and replace uploaded image into storage and then update URL in my database
}).then(() => {
console.log('Original file deleted', filePath)
const logo = storageRef.file(JPEGFilePath)
return logo.getSignedUrl({ action: 'read', expires: date })
// const logo = storageRef.child(JPEGFilePath)
// return logo.getDownloadURL()
// return storageUrl.getDownloadURL(JPEGFilePath)
}).then((url) => {
const newRef = db.collection("user").doc(uid)
return newRef.set({
profile: { profileImg: url[0] }
}, {
merge: true
})
})
here is how I set expiry date
const d = new Date()
const date = new Date(d.setFullYear(d.getFullYear() + 200)).toString()
However the image expire in few weeks (roughly about 2 weeks). Does anyone know how to fix that? I have even played with getDownloadURL as you can see from commented code but that doesn't seems to work in trigger

Per the following links:
https://stackoverflow.com/a/42959262/370321
https://cloud.google.com/nodejs/docs/reference/storage/2.5.x/File#getSignedPolicy
Not sure which version of #google/cloud-storage you're using, but assuming it's 2.5.x, it looks like any value you pass in the date field is passed into new Date(), so it looks like your code should work as I tried it in my dev tools. The only thing I can guess is it doesn't like that you want a file to live for 200 years.
Per the source code:
https://github.com/googleapis/nodejs-storage/blob/master/src/file.ts#L2358
Have you tried a shorter amount of time -- or formatting it in the dateform at mm-dd-yyyy ?

Ok so I have tried something but I have no idea if this will work or not so I'll come back in 2 weeks to mark my question as answered if it will work. For those with the same problem I'll try to recapitulate what I've done.
1/ Download the service account key from console. Here is the link
https://console.firebase.google.com/project/_/settings/serviceaccounts/adminsdk
2/ Save the downloaded JSON file in your function directory
3/ Include the key in your function storage. But be careful how you set the path to the file. Here is my question about it
https://stackoverflow.com/a/56407592/11486115
UPDATE
I just found mistake in my function. My URL was provided by cloud function by mistake (commented code)
Here is complete function
const {
db
} = require('../../admin')
const projectId = "YOUR-PROJECT-ID"
const { Storage } = require('#google-cloud/storage');
const storage = new Storage({ projectId: projectId ,keyFilename: 'PATH-TO-SERVICE-ACCOUNT'})
const os = require('os');
const fs = require('fs');
const path = require('path');
const spawn = require('child-process-promise').spawn
const JPEG_EXTENSION = '.jpg'
exports.handler = ((object) => {
const bucket = object.bucket;
const contentType = object.contentType;
const filePath = object.name
const JPEGFilePath = path.normalize(path.format({ dir: path.dirname(filePath), name: 'profileImg', ext: JPEG_EXTENSION }))
const destBucket = storage.bucket(bucket)
const tempFilePath = path.join(os.tmpdir(), path.basename(filePath))
const tempLocalJPEGFile = path.join(os.tmpdir(), path.basename(JPEGFilePath))
const metadata = {
contentType: contentType
}
const uid = filePath.split("/").slice(1, 2).join("")
const d = new Date()
const date = new Date(d.setFullYear(d.getFullYear() + 200)).toString()
if (!object.contentType.startsWith('image/')) {
return destBucket.file(filePath).delete().then(() => {
console.log('File is not an image ', filePath, ' DELETED')
return null
});
}
if (object.metadata.modified) {
console.log('Image processed')
return null
}
return destBucket.file(filePath).download({
destination: tempFilePath
})
.then(() => {
console.log('The file has been downloaded to', tempFilePath)
return spawn('convert', [tempFilePath, '-resize', '100x100', tempLocalJPEGFile])
}).then(() => {
console.log('JPEG image created at', tempLocalJPEGFile)
metadata.modified = true
return destBucket.upload(tempLocalJPEGFile,
{
destination: JPEGFilePath,
metadata: { metadata: metadata }
})
}).then(() => {
console.log('JPEG image uploaded to Storage at', JPEGFilePath)
return destBucket.file(filePath).delete()
}).then(() => {
console.log('Original file deleted', filePath)
//const logo = storageRef.file(JPEGFilePath)
const logo = destBucket.file(JPEGFilePath)
return logo.getSignedUrl({ action: 'read', expires: date })
}).then((url) => {
const newRef = db.collection("user").doc(uid)
return newRef.set({
profile: { profileImg: url[0] }
}, {
merge: true
})
}).then(() => {
fs.unlinkSync(tempFilePath);
fs.unlinkSync(tempLocalJPEGFile)
console.log(uid, 'user database updated ')
return null
})
})
I'm pretty confident that this will work now.

Related

Delivering image from S3 to React client via Context API and Express server

I'm trying to download a photo from an AWS S3 bucket via an express server to serve to a react app but I'm not having much luck. Here are my (unsuccessful) attempts so far.
The Workflow is as follows:
Client requests photo after retrieving key from database via Context API
Request sent to express server route (important so as to hide the true location from the client)
Express server route requests blob file from AWS S3 bucket
Express server parses image to base64 and serves to client
Client updates state with new image
React Client
const [profilePic, setProfilePic] = useState('');
useEffect(() => {
await actions.getMediaSource(tempPhoto.key)
.then(resp => {
console.log('server resp: ', resp.data.data.newTest) // returns ����\u0000�\u0000\b\u0006\
const url = window.URL || window.webkitURL;
const blobUrl = url.createObjectURL(resp.data.data.newTest);
console.log("blob ", blobUrl);
setProfilePic({ ...profilePic, image : resp.data.data.newTest });
})
.catch(err => errors.push(err));
}
Context API - just axios wrapped into its own library
getMediaContents = async ( key ) => {
return await this.API.call(`http://localhost:5000/${MEDIA}/mediaitem/${key}`, "GET", null, true, this.state.accessToken, null);
}
Express server route
router.get("/mediaitem/:key", async (req, res, next) => {
try{
const { key } = req.params;
// Attempt 1 was to try with s3.getObject(downloadParams).createReadStream();
const readStream = getFileStream(key);
readStream.pipe(res);
// Attempt 2 - attempt to convert response to base 64 encoding
var data = await getFileStream(key);
var test = data.Body.toString("utf-8");
var container = '';
if ( data.Body ) {
container = data.Body.toString("utf-8");
} else {
container = undefined;
}
var buffer = (new Buffer.from(container));
var test = buffer.toString("base64");
require('fs').writeFileSync('../uploads', test); // it never wrote to this directory
console.log('conversion: ', test); // prints: 77+977+977+977+9AO+/vQAIBgYH - this doesn't look like base64 to me.
delete buffer;
res.status(201).json({ newTest: test });
} catch (err){
next(ApiError.internal(`Unexpected error > mediaData/:id GET -> Error: ${err.message}`));
return;
}
});
AWS S3 Library - I made my own library for using the s3 bucket as I'll need to use more functionality later.
const getFileStream = async (fileKey) => {
const downloadParams = {
Key: fileKey,
Bucket: bucketName
}
// This was attempt 1's return without async in the parameter
return s3.getObject(downloadParams).createReadStream();
// Attempt 2's intention was just to wait for the promise to be fulfilled.
return await s3.getObject(downloadParams).promise();
}
exports.getFileStream = getFileStream;
If you've gotten this far you may have realised that I've tried a couple of things from different sources and documentation but I'm not getting any further. I would really appreciate some pointers and advice on what I'm doing wrong and what I could improve on.
If any further information is needed then just let me know.
Thanks in advance for your time!
Maybe it be useful for you, that's how i get image from S3, and process image on server
Create temporary directory
createTmpDir(): Promise<string> {
return mkdtemp(path.join(os.tmpdir(), 'tmp-'));
}
Gets the file
readStream(path: string) {
return this.s3
.getObject({
Bucket: this.awsConfig.bucketName,
Key: path,
})
.createReadStream();
}
How i process file
async MainMethod(fileName){
const dir = await this.createTmpDir();
const serverPath = path.join(
dir,
fileName
);
await pipeline(
this.readStream(attachent.key),
fs.createWriteStream(serverPath + '.jpg')
);
const createFile= await sharp(serverPath + '.jpg')
.jpeg()
.resize({
width: 640,
fit: sharp.fit.inside,
})
.toFile(serverPath + '.jpeg');
const imageBuffer = fs.readFileSync(serverPath + '.jpeg');
//my manipulations
fs.rmSync(dir, { recursive: true, force: true }); //delete temporary folder
}

Use original file name in AWS s3 uploader

I have implemented a s3 uploader per these instructions https://aws.amazon.com/blogs/compute/uploading-to-amazon-s3-directly-from-a-web-or-mobile-application/
This is the Lambda function code
AWS.config.update({ region: process.env.AWS_REGION })
const s3 = new AWS.S3()
const URL_EXPIRATION_SECONDS = 300
// Main Lambda entry point
exports.handler = async (event) => {
return await getUploadURL(event)
}
const getUploadURL = async function(event) {
const randomID = parseInt(Math.random() * 10000000)
const Key = `${randomID}.jpg`
// Get signed URL from S3
const s3Params = {
Bucket: process.env.UploadBucket,
Key,
Expires: URL_EXPIRATION_SECONDS,
Currently the filename (key) is generated using a random ID.
I would like to change that to use the original filename of the uploaded file.
I tried a couple approaches such as using the the fs.readfile() to get the filename but have not had any luck.
There is a webpage with a form that works in conjunction with the Lambda to upload the file to s3.
How do I get the filename?
If you want to save the file with the original filename, you have to pass that filename as part of the key you use to request the signed url. You don't show how you're getting the file to upload, but if it is part of a web site, you get this from the client.
On the client side you have the user identify the file to upload and pass that to your code that calls getUploadURL(). Maybe in your code it is part of event? Then you send the signed URL back to the client and then the client can send the file to the signed URL.
Therefore to upload a file, your client has to send two requests to your server -- one to get the URL and one to upload the file.
You do mention that you're using fs.readFile() If you're able to get the file with this call, then you already have the file name. All you have to do is pass the same name to getUploadURL() as an additional parameter or as part of event. You may have to parse the filename first or within getUploadURL() if it includes a path to someplace other than your current working directory.
The code above looks like it may be a Lambda that's getting called with some event. If that event is a trigger of some sort that you can include a file name, then you can look pull it from that variable. For example:
const getUploadURL = async function(event) {
const randomID = parseInt(Math.random() * 10000000)
const Key = `${event.fileNameFromTrigger}`
// Get signed URL from S3
const s3Params = {
Bucket: process.env.UploadBucket,
Key,
Expires: URL_EXPIRATION_SECONDS.
...
}
If the file name includes the extension, then you don't need to append that as you were with the random name.
I modified the Lambda
changed this
const randomID = parseInt(Math.random() * 10000000)
const Key = `${randomID}.jpg`
to this
const Key = event.queryStringParameters.filename
And this the frontend code with my endpoint redacted. Note the query ?filename= appended to the endpoint and how I used this.filename = file.name
<script>
const MAX_IMAGE_SIZE = 1000000
/* ENTER YOUR ENDPOINT HERE */
const API_ENDPOINT = '{api-endpoint}/uploads?filename=' // e.g. https://ab1234ab123.execute-api.us-east-1.amazonaws.com/uploads
new Vue({
el: "#app",
data: {
image: '',
uploadURL: '',
filename: ''
},
methods: {
onFileChange (e) {
let files = e.target.files || e.dataTransfer.files
//let filename = files[0].name
if (!files.length) return
this.createImage(files[0])
},
createImage (file) {
// var image = new Image()
let reader = new FileReader()
reader.onload = (e) => {
//console.log(file.name)
console.log('length: ', e.target.result.includes('data:image/jpeg'))
if (!e.target.result.includes('data:image/jpeg')) {
return alert('Wrong file type - JPG only.')
}
if (e.target.result.length > MAX_IMAGE_SIZE) {
return alert('Image is loo large.')
}
this.image = e.target.result
this.filename = file.name
}
reader.readAsDataURL(file)
},
removeImage: function (e) {
console.log('Remove clicked')
this.image = ''
this.filename = ''
},
uploadImage: async function (e) {
console.log('Upload clicked')
// Get the presigned URL
const response = await axios({
method: 'GET',
url: API_ENDPOINT + this.filename
})
console.log('Response: ', response)
console.log('Uploading: ', this.image)
let binary = atob(this.image.split(',')[1])
let array = []
for (var i = 0; i < binary.length; i++) {
array.push(binary.charCodeAt(i))
}
let blobData = new Blob([new Uint8Array(array)], {type: 'image/jpeg'})
console.log('Uploading to: ', response.uploadURL)
const result = await fetch(response.uploadURL, {
method: 'PUT',
body: blobData
})
console.log('Result: ', result)
// Final URL for the user doesn't need the query string params
this.uploadURL = response.uploadURL.split('?')[0]
}
}
})
</script>

generated sitemaps are corrupted using sitemap library for node/js

I'm using a library called sitemap to generate files from an array of objects constructed during runtime. My goal is to upload these generated sitemaps to an S3 bucket.
So far, the function is hosted on AWS lambda and uploading generated files correctly to the bucket.
My problem is that, the generated sitemaps are corrupted. When I run the function locally, they get generated correctly without any issues.
Here's my handler:
module.exports.handler = async () => {
try {
console.log("inside handler....");
await clearGeneratedSitemapsFromTmpDir();
const sms = new SitemapAndIndexStream({
limit: 10000,
getSitemapStream: (i) => {
const sitemapStream = new SitemapStream({
lastmodDateOnly: true,
});
const linkPath = `/sitemap-${i + 1}.xml`;
const writePath = `/tmp/${linkPath}`;
sitemapStream.pipe(createWriteStream(resolve(writePath)));
return [new URL(linkPath, hostName).toString(), sitemapStream];
},
});
const data = await generateSiteMap();
sms.pipe(createWriteStream(resolve("/tmp/sitemap-index.xml")));
// data.forEach((item) => sms.write(item));
Readable.from(data).pipe(sms);
sms.end();
await uploadToS3();
await clearGeneratedSitemapsFromTmpDir();
} catch (error) {
console.log("🚀 ~ file: index.js ~ line 228 ~ exec ~ error", error);
Sentry.captureException(error);
}
};
The data variable has an array of around 11k items, so according to the code above, two sitemap files would be generated(first 10k, rest to second sitemap) in addition to a sitemap index where it lists the two generated sitemaps.
Here's my uploadToS3 function:
const uploadToS3 = async () => {
try {
console.log("uploading to s3....");
const files = await getGeneratedXmlFilesNames();
for (let i = 0; i < files.length; i += 1) {
const file = files[i];
const filePath = `/tmp/${file}`;
// const stream = createReadStream(resolve(filePath));
const fileRead = await readFileAsync(filePath, { encoding: "utf-8" });
const params = {
Body: fileRead,
Key: `${file}`,
ACL: "public-read",
ContentType: "application/xml",
ContentDisposition: "inline",
};
// const result = await s3Client.upload(params).promise();
const result = await s3Client.putObject(params).promise();
console.log(
"🚀 ~ file: index.js ~ line 228 ~ uploadToS3 ~ result",
result
);
}
} catch (error) {
console.log("uploadToS3 => error", error);
// Sentry.captureException(error);
}
};
And here's the function that cleans up the generated files from lambda's /tmp directory after upload to S3:
const clearGeneratedSitemapsFromTmpDir = async () => {
try {
console.log("cleaning up....");
const readLocalTempDirDir = await readDirAsync("/tmp");
const xmlFiles = readLocalTempDirDir.filter((file) =>
file.includes(".xml")
);
for (const file of xmlFiles) {
await unlinkAsync(`/tmp/${file}`);
console.log("deleting file....");
}
} catch (error) {
console.log(
"🚀 ~ file: index.js ~ line 207 ~ clearGeneratedSitemapsFromTmpDir ~ error",
error
);
}
};
My hunch is that the issue is related to streams as I haven't fully understood them yet.
Any help here is highly appreciated.
Side note: I tried to sleep for 10s before uploading, but that didn't work either.
As a workaround, I did this:
const data = await generateSiteMap();
const logger = createWriteStream(resolve("/tmp/all-urls.json.txt"), {
flags: "a",
});
data.forEach((el) => {
logger.write(JSON.stringify(el));
logger.write("\n");
});
logger.end();
const stream = lineSeparatedURLsToSitemapOptions(
createReadStream(resolve("/tmp/all-urls.json.txt"))
)
.pipe(sms)
.pipe(createWriteStream(resolve("/tmp/sitemap-index.xml")));
await new Promise((fulfill) => stream.on("finish", fulfill));
await uploadToS3();
await clearGeneratedSitemapsFromTmpDir();
Will keep question open in case somebody answers it correctly.

How to mark a file private before it's uploaded to Google Cloud Storage?

I'm using #google-cloud/storage package and generating signed url to upload file like this:
const path = require("path");
const { Storage } = require("#google-cloud/storage");
const GOOGLE_CLOUD_KEYFILE = path.resolve(
__dirname + "/../gcloud_media_access.json"
);
const storage = new Storage({
keyFilename: GOOGLE_CLOUD_KEYFILE,
});
exports.uploadUrlGCloud = async (bucketName, key, isPrivate = false) => {
let bucket = storage.bucket(bucketName);
let file = bucket.file(key);
const options = {
version: "v4",
action: "write",
expires: Date.now() + 15 * 60 * 1000 // 15 minutes
};
let signedUrl = (await file.getSignedUrl(options))[0];
if(isPrivate){
await file.makePrivate({strict: true});
}
return signedUrl;
};
However when I call this function like this:
const url = await uploadUrlGCloud(bucket, key, true);
I'm getting 404 api error like this:
ApiError: No such object: testbucket/account/upload/4aac0fb0-92dd-11eb-8723-6b3ad09f80fa_demo.jpg
What I want to ask is is there a way to generate the signedUrl private? Before the file is uploaded, I want to mark it as private and prevent public access.
Edit:
I uploaded a file to the created signed URL, and made makePrivate again to the uploaded file. This time I didn't get any errors. However, when I checked the file again, I realized that is still public.
This is the function I tried to make file private:
const makeFilePrivate = async (bucketName, key) => {
return new Promise((resolve, reject) => {
let bucket = storage.bucket(bucketName);
let file = bucket.file(key);
try {
file.makePrivate({strict: true}, err => {
if(!err) {
resolve(file.isPublic());
} else
reject(err);
})
} catch (err) {
reject(err);
}
})
};
console.log(await makeFilePrivate(bucket, remotePath));
// True
You can't make the objects of a public bucket private due to the way how IAM and ACLs interact with one another.

How to process a scraped image and upload to firebase storage using firebase functions?

I'm trying to grab some HD images from urls, resize them and upload to storage.
So far, i've gotten the image, and resized using sharp. The output API of sharp uses .toFile('output.jpg') or .toBuffer(), and I'm not sure how to proceed from here. What would be the easiest way to output the image, and upload it to firebase storage?
My code so far:
const functions = require('firebase-functions');
const admin = require('firebase-admin');
admin.initializeApp();
const request = require('request').defaults({ encoding: null });
const sharp = require('sharp');
exports.copyThumbnailImage = functions.firestore.document('users/{userId}/vocab/{vocabId}').onCreate((snapshot, context) => {
// mock: copyThumbnailImage({ chosenImages: [{ googleThumbnailUrl: "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQlC7Vnu9CuZlA-nTpW8TLPd8dAE456LCpeXoadUKHoxB7WEmM1rfahqsfr", mime: "image/jpeg", originalUrl: "https://storage.tenki.jp/storage/static-images/suppl/article/image/2/27/278/27810/1/large.jpg" }] }, { params: { userId: 'zYCw6DmcEiOS8Yk4QltYPikykwP2', vocabId: 1420970 } })
const data = snapshot.data()
const vocabId = context.params.vocabId
const images = data.chosenImages
const checkExistencePromises = []
// Promises check if image ref already exists in firestore
images.forEach(imageObj => {
checkExistencePromises.push(db.collection('userImages').where('originalUrl', '==', imageObj.originalUrl).where('vocabId', '==', vocabId).get())
})
return Promise.all(checkExistencePromises)
.then(snapshots => {
const getImagePromises = []
snapshots.forEach((snapshot, i) => {
if (snapshot.empty) {
// if image doesn't exist already, upload thumbnail to DB, add doc to userImages and add link to review
const originalUrl = images[i].originalUrl
getImagePromises.push(getImage(originalUrl))
} else {
// add link to review
}
})
return Promise.all(getImagePromises)
})
.then(responses => {
responses.forEach(response => {
sharp(response).resize(200, 200).toBuffer()
// ????????
})
})
.then(() => {
})
.catch(error => {
console.log(error)
})
})
function getImage (url) {
return new Promise((resolve, reject) => {
request.get(url, (err, res, body) => {
if (err) reject(err)
resolve(body)
})
})
}
You can save it to the local file system (the local /tmp disk) and upload it to Cloud Storage from there.
Have a look at this official Cloud Functions sample: https://github.com/firebase/functions-samples/blob/master/convert-images/functions/index.js. (I copy below the code for future reference)
In particular, look at how they save a temporary file with
return spawn('convert', [tempLocalFile, tempLocalJPEGFile]);
and how they upload it with:
return bucket.upload(tempLocalJPEGFile, {destination: JPEGFilePath});
In your case, instead of calling spawn() you would call
.toFile(-theTemporaryFielName-)
Finally, have a look at Write temporary files from Google Cloud Function and Attach firebase cloud function or cache its data from cloud function call about the /tmp disk.
Code from the Cloud Function Sample as of 08/01/2018 (link above)
const functions = require('firebase-functions');
const mkdirp = require('mkdirp-promise');
const gcs = require('#google-cloud/storage')();
const spawn = require('child-process-promise').spawn;
const path = require('path');
const os = require('os');
const fs = require('fs');
// File extension for the created JPEG files.
const JPEG_EXTENSION = '.jpg';
/**
* When an image is uploaded in the Storage bucket it is converted to JPEG automatically using
* ImageMagick.
*/
exports.imageToJPG = functions.storage.object().onFinalize((object) => {
const filePath = object.name;
const baseFileName = path.basename(filePath, path.extname(filePath));
const fileDir = path.dirname(filePath);
const JPEGFilePath = path.normalize(path.format({dir: fileDir, name: baseFileName, ext: JPEG_EXTENSION}));
const tempLocalFile = path.join(os.tmpdir(), filePath);
const tempLocalDir = path.dirname(tempLocalFile);
const tempLocalJPEGFile = path.join(os.tmpdir(), JPEGFilePath);
// Exit if this is triggered on a file that is not an image.
if (!object.contentType.startsWith('image/')) {
console.log('This is not an image.');
return null;
}
// Exit if the image is already a JPEG.
if (object.contentType.startsWith('image/jpeg')) {
console.log('Already a JPEG.');
return null;
}
const bucket = gcs.bucket(object.bucket);
// Create the temp directory where the storage file will be downloaded.
return mkdirp(tempLocalDir).then(() => {
// Download file from bucket.
return bucket.file(filePath).download({destination: tempLocalFile});
}).then(() => {
console.log('The file has been downloaded to', tempLocalFile);
// Convert the image to JPEG using ImageMagick.
return spawn('convert', [tempLocalFile, tempLocalJPEGFile]);
}).then(() => {
console.log('JPEG image created at', tempLocalJPEGFile);
// Uploading the JPEG image.
return bucket.upload(tempLocalJPEGFile, {destination: JPEGFilePath});
}).then(() => {
console.log('JPEG image uploaded to Storage at', JPEGFilePath);
// Once the image has been converted delete the local files to free up disk space.
fs.unlinkSync(tempLocalJPEGFile);
fs.unlinkSync(tempLocalFile);
return;
});
});

Resources