trying to upload files using "firebase-admin" and "express-fileupload" , all firebase and express-fileupload configurations are setup as well :
my requires libs:
const firebaseAdmin = require("firebase-admin");
const fileUpload = require('express-fileupload');
my express-fileupload configs:
app.use(fileUpload({ createParentPath: true }))
app.use('/resources', express.static(path.resolve(__dirname, '../uploads')));
app.use(bodyParser.json());
app.use(bodyParser.urlencoded({ extended: true }));
i tested express-fileuploa and it works fine:
debug for req.files.file:
{
name: '66032925_2210721569219339_1152747532661555200_n.jpg',
data: <Buffer ff d8 ff e0 00 10 4a 46 49 46 00 01 02 00 00 01 00 01 00 00 ff ed 00 9c 50 68 6f 74 6f 73 68 6f 70 20 33 2e 30 00 38 42 49 4d 04 04 00 00 00 00 00 80 ... 23648 more bytes>,
size: 23698,
encoding: '7bit',
tempFilePath: '',
truncated: false,
mimetype: 'image/jpeg',
md5: '41145e65bb895e594e4657c9f6ce90a3',
mv: [Function: mv]
}
ok , now lets see my firebase configs:
var serviceAccount = require("../serviceAccountKey.json");
firebaseAdmin.initializeApp({
credential: firebaseAdmin.credential.cert(serviceAccount),
storageBucket: "*****-*****.appspot.com"
});
the code bellow is the post callback upload endpoint , there i am working only to upload a myfile .
app.post("/upload", (req, res) => {
try {
var file = req.files.file;
console.debug(file)
var bucket = firebaseAdmin.storage().bucket();
bucket.upload(file.data, { destination: "clients/avatars" }).then(data => {
console.debug("data", data);
return res.json({ message: "data", data: data });
}, error => {
console.debug("really ???", error)
return res.json({ message: "error", data: error + "" });
})
} catch (err) {
return res.json({ message: "error", data: err + "" });
}
});
now , when i test it , i get this error :
"TypeError [ERR_INVALID_ARG_VALUE]: The argument 'path' must be a
string or Uint8Array without null bytes. Received <Buffer ff d8 ff e0
00 10 4a 46 49 46 00 01 02 00 00 01 00 01 00 00 ff ed 00 9c 50 68 6f
74 6f 73 68 6f 70 20 33 2e 30 00 38 42 ..."
i debug console i see the error is throwing from ***
bucket.upload(file.data, { destination: "clients/avatars" })
*** method call.any help with ?
the reason for this is that you are passing the file data (as buffer) to a function that expects the file path.
bucket.upload(filePath, {
destination: remoteFile,
uploadType: "media",
metadata: {
contentType: fileMime,
metadata: {
firebaseStorageDownloadTokens: uuid
}
}
})
refer complete code here: Upload files to Firebase Storage using Node.js
Related
I'm trying to upload from my lambda (nodejs express) to s3 bucket. But whenever I upload, and look for my uploaded file in S3, it only shows a white small box. 1
I already try converting my file buffer to base64, but it still not working.
My uploaded file only show if I upload it using my local api(localhost).
Here's my code:
// multer middleware
const multer = require("multer");
const helpers = require("../helpers/image-upload-helper");
const storage =multer.memoryStorage();
let upload = multer({
storage: storage,
fileFilter: helpers.imageFilter,
}).any();
//controller
try {
if(req.files){
for (const file of req.files) {
const ImageName = randomImageNameGenerator.randomImageName()
const params = {
Bucket: process.env.BUCKET_NAME,
Key: ImageName,
Body: file.buffer,
ContentType : file.mimetype,
}
const command = new PutObjectCommand(params)
const myData = await s3.send(command)
}
}
//log of my command
PutObjectCommand {
middlewareStack: {
add: [Function: add],
addRelativeTo: [Function: addRelativeTo],
clone: [Function: clone],
use: [Function: use],
remove: [Function: remove],
removeByTag: [Function: removeByTag],
concat: [Function: concat],
applyToStack: [Function: cloneTo],
identify: [Function: identify],
resolve: [Function: resolve]
},
input: {
Bucket: 'orex-product-images',
Key: 'b465138efab90aba02e5376ef247f536cfb1e7e32e34877bf21ab1bd655b3749',
Body: <Buffer 89 50 4e 47 0d 0a 1a 0a 00 00 00 0d 49 48 44 52 00 00 01 f8 00 00 00 f5 08 06 00 00 00 bc c1 e7 15 00 00 00 01 73 52 47 42 00 ae ce 1c e9 00 00 20 00 ... 10640 more bytes>,
ContentType: 'image/png'
}
}
// log of myData
{
'$metadata': {
httpStatusCode: 200,
requestId: '6C1EM009PP420NRK',
extendedRequestId: 'ZfGR4AR4mElYOSGes68YqEegptyO5PY5iPCvplP89wr1nqT4DZHwo0D0bl5qyZ7aAB0HaDaTAKU=',
cfId: undefined,
attempts: 1,
totalRetryDelay: 0
},
ETag: '"96425366df243451e35a941524b2a019a6ad2b"',
ServerSideEncryption: 'ABDS256',
VersionId: 'rpgj.L5AwGNCcKVzatIY5zHf_SYVNWt0'
}
Note: I didn't see any error in my cloud watch
1 Example of what the white box looks like
For those that arrived here as I did with the same issue, this answer solved it for me:
Using Lambda to get image from S3 returns a white box in Python
And for those using serverless, this is also relevant:
Serverless I image upload to S3 broken after deploy, local worked only
Im trying to parse incoming "http" messages over WebSockets, (more precisely headers) which are ssdp messages.
server.js
const http = require("http");
const WebSocket = require("ws");
const wss = new WebSocket.Server({
port: 8080
});
wss.on("connection", (ws) => {
console.log("WebSocket client connected")
let req = new http.IncomingMessage();
req.socket = WebSocket.createWebSocketStream(ws, {
end: true
});
ws.on("message", (msg) => {
req.push(msg)
});
req.on("data", (data) => {
console.log("http.req.data", data);
});
req.on("close", () => {
console.log("http.req.end");
});
});
output:
node index.js
WebSocket client connected
http.req.data <Buffer 4d 2d 53 45 41 52 43 48 20 2a 20 48 54 54 50 2f 31 2e 31>
http.req.data <Buffer 48 4f 53 54 3a 20 32 33 39 2e 32 35 35 2e 32 35 35 2e 32 35 30 3a 31 39 30 30>
http.req.data <Buffer 4d 41 4e 3a 20 22 73 73 64 70 3a 64 69 73 63 6f 76 65 72 22>
http.req.data <Buffer 53 54 3a 20 75 70 6e 70 3a 72 6f 6f 74 64 65 76 69 63 65>
http.req.data <Buffer 4d 58 3a 20 35>
To bridge ssdp trafic over the WebSocket connection i use socat & wscat:
socat UDP4-RECVFROM:1900,ip-add-membership=239.255.255.250:0.0.0.0,fork - | wscat --connect=ws://127.0.0.1:8080
The problem is the end event is never emitted and req.headers are not set.
What is the correct way to handle/parse incoming http messages?
(Preferred with built in node.js modules)
My web allows user to drag and drop a zip file and upload it to AWS S3 bucket. The steps are:
User drag-and-drops a zip file to the drop-zone of the UI;
User clicks send;
A request is made to AWS Lambda function and the function will generate a pre-signed URL that allows the user to upload any file.
An axios PUT request is made to the pre-signed S3 URL to upload the file.
I used local node.js code to test the pre-signed S3 URL:
const fileToUpload = fs.readFileSync(test_file_path);
console.log("fileToUpload: type: ", typeof fileToUpload, ", content: ", fileToUpload);
try {
const uploadResponse = await axios({
method: 'PUT',
url: presignedUrl,
data: fileToUpload,
headers: {
'Content-Type': '',
},
maxContentLength: Infinity,
maxBodyLength: Infinity
});
return uploadResponse.data;
} catch (error) {
console.error('Error while uploading object to S3:', error.message);
}
And it works well, which proves that the generated pre-signed URL is valid.
However, on client side Reactjs:
console.log(`formState.file: type: ${formState.file}, content: ${formState.file}`);
const uploadResponse = await axios({
method: 'PUT',
url: presignedS3Url,
data: formState.file,
headers: {
'Content-Type': ''
},
maxContentLength: Infinity,
maxBodyLength: Infinity
});
It fails and the request ends in a 403 forbidden error.
The difference is that in nodejs code, the fileToUpload is:
type: object, content: <Buffer 50 4b 03 04 14 00 08 00 08 00 78 84 cb 50 00 00 00 00 00 00 00 00 24 ae 12 01 3e 00 20 00 31 2e 32 2e 38 34 30 2e 31 31 33 35 36 34 2e 31 30 2e 31 2e ... 10573784 more bytes>
Whereas in client side, the formState.file is initialized by react-dropzone lib and has the type: formState.file: type: [object File] and its content is:
path: "1.2.840.113564.10.1.312260962047571814316520322884140128208155.zip"
lastModified: 1625164188712
lastModifiedDate: Fri Jul 02 2021 03:29:48 GMT+0900 (Japan Standard Time) {}
name: "1.2.840.113564.10.1.312260962047571814316520322884140128208155.zip"
size: 10573834
type: "application/zip"
webkitRelativePath: ""
[[Prototype]]: File
length: 1
I am not entirely sure that this is the cause. A few thoughts:
fs.readFileSync() is nodejs only, and it is not available in client side Reactjs.
On client side, should I get the zip file in the form of <Buffer ....> and how should I do it?
Or is it ok with the current [object File] type on client side? maybe there is another way to upload it to S3 bucket?
I have tried to save the image in mongoose. I have followed this tutorial. My dataSchema is look like below,
const dataSchema = new mongoose.Schema({
name: {
type: String,
required: false,
},
img: {
type: Buffer,
contentType: String,
required: false,
}
});
I have created a separate folder for routers. Here is my routers.js file,
//Define storage for multer
const storage = multer.diskStorage({
destination: (req, file, cb) => {
cb(null, "uploads");
},
filename: (req, file, cb) => {
const uniqueSuffix = Date.now() + "-" + Math.round(Math.random() * 1e9);
cb(null, file.fieldname + "-" + uniqueSuffix);
},
});
const upload = multer({ storage: storage });
// Post request
router.post("/", upload.single("img"), async (req, res) => {
const data = new Data({
name: req.body.name,
img: {
data: fs.readFileSync(
path.join(__dirname, "../uploads/" + req.file.filename)
),
contentType: "image/png",
},
});
await Data.create(data);
res.send(req.body);
});
I have tried without img field, It was working fine. But with the image field, it is given the following error. I am testing this API in Postman.
(node:476) UnhandledPromiseRejectionWarning: ValidationError: Data validation failed: img: Cast to Buffer failed for value " {
data: <Buffer 89 50 4e 47 0d 0a 1a 0a 00 00 00 0d 49 48 44 52 00 00 01 f0 00 00 01 0a 08 02 00 00 00 62 c7 90 ce 00 00 00 01 73 52 47 42 00 ae ce 1c e9 00 00 00 04 ... 22098 more bytes>,
contentType: 'image/png'
}" (type Object) at path "img"
at model.Document.invalidate (C:\Users\gic\Downloads\FloodAPI\MEN API\node_modules\mongoose\lib\document.js:2879:32)
at model.$set (C:\Users\gic\Downloads\FloodAPI\MEN API\node_modules\mongoose\lib\document.js:1426:12) at model.$set (C:\Users\gic\Downloads\FloodAPI\MEN API\node_modules\mongoose\lib\document.js:1128:16) at model.Document (C:\Users\gic\Downloads\FloodAPI\MEN API\node_modules\mongoose\lib\document.js:148:12)
at model.Model (C:\Users\gic\Downloads\FloodAPI\MEN API\node_modules\mongoose\lib\model.js:106:12)
at new model (C:\Users\gic\Downloads\FloodAPI\MEN API\node_modules\mongoose\lib\model.js:4752:15)
at C:\Users\gic\Downloads\FloodAPI\MEN API\routers\flood.js:51:17
at Layer.handle [as handle_request] (C:\Users\gic\Downloads\FloodAPI\MEN API\node_modules\express\lib\router\layer.js:95:5)
at next (C:\Users\gic\Downloads\FloodAPI\MEN API\node_modules\express\lib\router\route.js:137:13)
at Immediate.<anonymous> (C:\Users\gic\Downloads\FloodAPI\node_modules\multer\lib\make-middleware.js:53:37)
(node:476) UnhandledPromiseRejectionWarning: Unhandled promise rejection. This error originated either by throwing inside of an async function without a catch block, or by rejecting a promise which was not handled with .catch(). To terminate the node process on unhandled promise rejection, use the CLI flag `--unhandled-rejections=strict` (see https://nodejs.org/api/cli.html#cli_unhandled_rejections_mode). (rejection
id: 1)
(node:476) [DEP0018] DeprecationWarning: Unhandled promise rejections are deprecated. In the future, promise rejections that are not handled will terminate the Node.js process with a non-zero exit code.
I used Django before. It was very easy to handle the files. Nodejs seems very complicated for me. Any detailed explanation would be highly appreciated. Please help me to solve this issue.
I am using dropbox for node: "dropbox": "^4.0.17" and trying to upload a file.
Here is the example code:
require('dotenv').config();
const fs = require('fs');
const fetch = require('isomorphic-fetch');
const Dropbox = require('dropbox').Dropbox;
const config = { accessToken: process.env.DROPBOX_ACCESS_TOKEN, fetch: fetch };
const dbx = new Dropbox(config);
const fileContent = fs.readFileSync('full path to some pdf files');
dbx.filesUpload(fileContent)
.then((response) => {
console.log('response', response);
})
.catch((err) => {
console.log('error', err);
});
and here is the response:
{ error: '<html>\r\n<head><title>400 Request Header Or Cookie Too Large</title></head>\r\n<body>\r\n<center><h1>400 Bad Request</h1></center>\r\n<center>Request Header Or Cookie Too Large</center>\r\n<hr><center>nginx</center>\r\n</body>\r\n</html>\r\n',
response:
Body {
url: 'https://content.dropboxapi.com/2/files/upload',
status: 400,
statusText: 'Bad Request',
headers: Headers { _headers: [Object] },
ok: false,
body:
PassThrough {
_readableState: [ReadableState],
readable: false,
domain: null,
_events: [Object],
_eventsCount: 4,
_maxListeners: undefined,
_writableState: [WritableState],
writable: false,
allowHalfOpen: true,
_transformState: [Object] },
bodyUsed: true,
size: 0,
timeout: 0,
_raw:
[ <Buffer 3c 68 74 6d 6c 3e 0d 0a 3c 68 65 61 64 3e 3c 74 69 74 6c 65 3e 34 30 30 20 52 65 71 75 65 73 74 20 48 65 61 64 65 72 20 4f 72 20 43 6f 6f 6b 69 65 20 ... > ],
_abort: false,
_bytes: 226 },
status: 400 }
The argument passed to filesUpload should be a FilesCommitInfo, not just the file contents directly. You can find an example of what it should look like here.
So, for your code, instead of:
dbx.filesUpload(fileContent)
you should so something like:
dbx.filesUpload({ path: '/some/destination/file/path/and/name.ext', contents: fileContent})
(The way you currently have it will end up trying to send the entire file contents as the API call parameters, which happen to be sent in a header, causing the error you get.)