Thank you! for taking time to read this post.
I an new to JS and stuck in a callback issue related to promise() when querying AWS DAX. There is a post (using the .promise() call on a dax-services dynamodb document client returns error: Error: ValidationException: Request object already used) related to this issue but am not able to follow the answer.
I also visited below page but got stuck as the page does not provide a sample for client.send(command)
https://www.npmjs.com/package/#aws-sdk/client-dax
I keep getting below error:
DaxClientError: ValidationException: Request object already used.
Can you please help correcting below code and with explanation to the solution?
I will truly appreciate your help. I spent an entire day on this but did not make any progress.
const express = require('express');
const AWS = require('aws-sdk');
const AmazonDaxClient = require('amazon-dax-client');
var region = "us-west-2";
AWS.config.update({
region: region
});
const myendpoint = "daxs://mydax.xxxx.dax-clusters.us-west-2.amazonaws.com";
const dax = new AmazonDaxClient({endpoints: [myendpoint], region: region});
// If using AWS.DynamoDB.DocumentClient ...
const doc = new AWS.DynamoDB.DocumentClient({service: dax});
// Constants
const PORT = 8080;
const HOST = '0.0.0.0';
// App
const app = express();
app.get('/', (req, res) => {
res.send('Hello World');
});
app.get('/readreview', async (req, res) => {
try{
var myresult='empty';
var params = {
TableName: 'test',
Key:{
"id": 'p12',
"key": 'description'
}
};
await doc.get(params, function(err, data) {
if (err) {
console.error("Unable to read item. Error JSON:", JSON.stringify(err, null, 2));
} else {
myresult=data;
console.log("#########"+data);
}
}).promise();
res.send(myresult);
}
catch (e) {
console.log(e);
}
});
app.listen(PORT, HOST);
console.log(`Running on http://${HOST}:${PORT}`);
Thank you!
Related
I know this question have many duplicates, but I have already wasted too much time searching for the right solution.
First take a look at my Node.JS:
var express = require('express');
var app = express();
app.get('/', function (req, res) {
var sql = require("mssql");
// config for your database
var config = {
user: 'myuser',
password: 'mypass',
server: 'myip',
database: 'mydatabase'
};
sql.close();
// connect to your database
sql.connect(config, function (err) {
if (err) console.log(err);
var dataqu = '';
// create Request object
var request = new sql.Request();
// query to the database and get the records
request.query("select * from AR_Invoices", function (err, recordset) {
if (err) console.log(err)
res.json(recordset);
sql.close();
});
});
});
var server = app.listen(5000,'0.0.0.0', function () {
console.log('Server is running..');
});
This code runs fine, but the json result structure is like this :
{"recordsets":[[{"Tipe":"Invoices","InvoiceID":411891,"InvoiceNumber":"SR.1701.0001"}]],"recordset":[{"Tipe":"Invoices","InvoiceID":411891,"InvoiceNumber":"SR.1701.0001"}],"output":{},"rowsAffected":[1]}
I don't know why but for some reason the result is always resulting in duplicate.
And how to just select InvoiceID and InvoiceNumber ?
I already tested using recordset.InvoiceID or recordset[0].InvoiceID but all is always in vain, and the result always in duplicate.
Can anyone explain how to do this properly?
I want the final result became like this :
[
{ "InvoiceID":"1", "InvoiceNumber":"mynumber" }
]
For the future reference, i finally got how to do this here is my full code
var express = require('express');
var app = express();
var dateFormat = require('dateformat');
app.get('/', function (req, res) {
var sql = require("mssql");
// config for your database
var config = {
user: 'myuser',
password: 'mypassword',
server: 'myip',
database: 'mydb'
};
sql.close();
// connect to your database
sql.connect(config, function (err) {
if (err) console.log(err);
// create Request object
var request = new sql.Request();
// query to the database and get the records
request.query("select top 2 'Invoices' as Tipe,InvoiceID,InvoiceNumber,InvoiceDate,(select top 1DriverPicture from dbDigitalApp.dbo.tbdriver) as Blob from AR_Invoices", function (err, result) {
if (err) console.log(err)
var myarr = new Array();
for (var i = 0; i < result.recordset.length; ++i) {
var InvoiceNumber = result.recordset[i].InvoiceNumber;
var InvoiceDate = dateFormat(result.recordset[i].InvoiceDate, "dd mmmm yyyy");
var Blob = result.recordset[i].Blob;
myarr.push({'InvoiceNumber':InvoiceNumber,'InvoiceDate':InvoiceDate,'Blob':Buffer.from(Blob).toString('base64')});
}
res.json(myarr);
sql.close();
});
});
});
var server = app.listen(5000,'0.0.0.0', function () {
console.log('Server is running..');
});
and the result of above code is like this :
answers
with above code you can get specific field only and do whatever you want with those specific data, such as change date format or encode base64.
i don't know if this the cleanest way to do this since the node.js has its own function using res.json that can set all field of retrieved data without need to loop through it.
But at least here is my kind of solution, hope it will be helpful to there future people who wondering the same thing like me.
I built a nodejs server to act as an adapter server, which upon receiving a post request containing some data, extracts the data from the request body and then forwards it to a few other external servers. Finally, my server will send a response consisting of the responses from each of the external server (success/fail).
If there's only 1 endpoint to forward to, it seems fairly straightforward. However, when I have to forward to more than one servers, I have to rely on things like Promise.All(), which has a fail-fast behaviour. That means if one promise is rejected (an external server is down), all other promises will also be rejected immediately and the rest the servers will not receive my data.
May be this ain't be the exact solution. But, what I am posting could be the work around of your problem.
Few days back I had the same problem, as I wanted to implement API versioning. Here is the solution I implemented, please have a look.
Architecture Diagram
Let me explain this diagram
Here in the diagram is the initial configuration for the server as we do. all the api request come here will pass to the "index.js" file inside the release directory.
index.js (in release directory)
const express = require('express');
const fid = require('./core/file.helper');
const router = express.Router();
fid.getFiles(__dirname,'./release').then(releases => {
releases.forEach(release => {
// release = release.replace(/.js/g,'');
router.use(`/${release}`,require(`./release/${release}/index`))
})
})
module.exports = router
code snippet for helper.js
//requiring path and fs modules
const path = require('path');
const fs = require('fs');
module.exports = {
getFiles: (presentDirectory, directoryName) => {
return new Promise((resolve, reject) => {
//joining path of directory
const directoryPath = path.join(presentDirectory, directoryName);
//passsing directoryPath and callback function
fs.readdir(directoryPath, function (err, files) {
// console.log(files);
//handling error
if (err) {
console.log('Unable to scan directory: ' + err);
reject(err)
}
//listing all files using forEach
// files.forEach(function (file) {
// // Do whatever you want to do with the file
// console.log(file);
// });
resolve(files)
});
})
}
}
Now, from this index file all the index.js inside each version folder is mapped
Here is the code bellow for "index.js" inside v1 or v2 ...
const express = require('express');
const mongoose = require('mongoose');
const fid = require('../../core/file.helper');
const dbconf = require('./config/datastore');
const router = express.Router();
// const connection_string = `mongodb+srv://${dbconf.atlas.username}:${dbconf.atlas.password}#${dbconf.atlas.host}/${dbconf.atlas.database}`;
const connection_string = `mongodb://${dbconf.default.username}:${dbconf.default.password}#${dbconf.default.host}:${dbconf.default.port}/${dbconf.default.database}`;
mongoose.connect(connection_string,{
useCreateIndex: true,
useNewUrlParser:true
}).then(status => {
console.log(`Database connected to mongodb://${dbconf.atlas.username}#${dbconf.atlas.host}/${dbconf.atlas.database}`);
fid.getFiles(__dirname,'./endpoints').then(files => {
files.forEach(file => {
file = file.replace(/.js/g,'');
router.use(`/${file}`,require(`./endpoints/${file}`))
});
})
}).catch(err => {
console.log(`Error connecting database ${err}`);
})
module.exports = router
In each of this index.js inside version folder is actually mapped to each endpoints inside endpoints folder.
code for one of the endpoints is given bellow
const express = require('express');
const router = express.Router();
const userCtrl = require('../controllers/users');
router.post('/signup', userCtrl.signup);
router.post('/login', userCtrl.login);
module.exports = router;
Here in this file actually we are connecting the endpoints to its controllers.
var config = {'targets':
[
'https://abc.api.xxx',
'https://xyz.abc',
'https://stackoverflow.net'
]};
relay(req, resp, config);
function relay(req, resp, config) {
doRelay(req, resp, config['targets'], relayOne);
}
function doRelay(req, resp, servers, relayOne) {
var finalresponses = [];
if (servers.length > 0) {
var loop = function(servers, index, relayOne, done) {
relayOne(req, servers[index], function(response) {
finalresponses.push[response];
if (++index < servers.length) {
setTimeout(function(){
loop(servers, index, relayOne, done);
}, 0);
} else {
done(resp, finalresponses);
}
});
};
loop(servers, 0, relayOne, done);
} else {
done(resp, finalresponses);
}
}
function relayOne(req, targetserver, relaydone) {
//call the targetserver and return the response data
/*return relaydone(response data);*/
}
function done(resp, finalresponses){
console.log('ended');
resp.writeHead(200, 'OK', {
'Content-Type' : 'text/plain'
});
resp.end(finalresponses);
return;
}
It sounds like you are trying to design a reverse proxy. If you are struggling to get custom code to work, there is a free npm library which is very robust.
I would recommend node-http-proxy
I have posted link below, which will lead you directly to the "modify response", since you mentioned modification of the API format in your question. Be sure to read the entire page though.
https://github.com/http-party/node-http-proxy#modify-a-response-from-a-proxied-server
Note: this library is also very good because it can support SSL, and proxies to both localhost (servers on the same machine) and servers on other machines (remote).
Promise.all() from MDN
It rejects with the reason of the first promise that rejects.
To overcome the problem, you'll need to catch() each request you've made.
e.g.
Promise.all([
request('<url 1>').catch(err => /* .. error handling */),
request('<url 2>').catch(err => /* .. error handling */),
request('<url 3>').catch(err => /* .. error handling */)
])
.then(([result1, result2, result3]) => {
if(result1.err) { }
if(result2.err) { }
if(result3.err) { }
})
I am able to upload a file using openDownloadStream of GridFSBucket and see that the file is uploaded and visible under songs.files chunks. But for some reason, get the following error while trying to download it -
Caught exception: Error: FileNotFound: file def1.txt was not found
My code is -
var express = require('express');
var gridModule = express.Router();
var mongoose = require('mongoose');
var fs = require('fs');
gridModule.post('/', (req, res) => {
console.log("::::grid");
//const gridfs = new mongoose.mongo.GridFSBucket(mongoose.connection.db);
//const writeStream = gridfs.openUploadStream('test.dat');
var gridfs = new mongoose.mongo.GridFSBucket(mongoose.connection.db, {
chunkSizeBytes: 1024,
bucketName: 'songs'
});
fs.createReadStream('./def.txt').
pipe(gridfs.openUploadStream('def1.txt')).
on('error', function (error) {
assert.ifError(error);
}).
on('finish', function () {
console.log('done!');
process.exit(0);
});
});
gridModule.get('/', (req, res) => {
var gridfs = new mongoose.mongo.GridFSBucket(mongoose.connection.db, {
chunkSizeBytes: 1024,
bucketName: 'songs'
});
/* var bucket = new mongodb.GridFSBucket(db, {
chunkSizeBytes: 1024,
bucketName: 'songs'
}); */
gridfs.openDownloadStream('def1.txt').
pipe(fs.createWriteStream('./def1.txt')).
on('error', function(error) {
console.log(":::error");
assert.ifError(error);
}).
on('finish', function() {
console.log('done!');
process.exit(0);
});
});
module.exports = gridModule;
I tried using ObjectId id as well but same error. Anyone any guesses what I may be doing wrong here?
Note - Code may not seem optimized here like declaring bucket twice, kindly ignore it for now as I will correct it once it works.
According to the API doc here, in order to use filename as argument you should use
openDownloadStreamByName(filename, options)
not openDownloadStream. openDownloadStream takes id
of the file.
Another possible explanation for this, if you're already calling openDownloadStream and still experiencing the FileNotFound error, and you are 100% the id is correct, is that you didn't pass an ObjectId type.
In my case, I was passing an id string instead of an id as an ObjectId.
bucket.openDownloadStream(mongoose.Types.ObjectId(id));
vs
bucket.openDownloadStream(id);
I am trying a simple FireBase and NodeJS-Express Routing app. The problem I am facing is that when I send a POST after GET request, my NodeJS backend creates the record on the Firebase Backend, but after that it crashes, stating that "Cannot set headers after they are sent to the client".
The functions are:
/* GET all existing groups listing. */
router.get('/', function (req, res, next) {
var db = admin.database();
var dataToShow = [];
var source = req.query.source;
var destination = req.query.destination;
var groupsListing = db.ref(url).orderByChild('source').equalTo(source);
groupsListing.on('value', function(snapshot) {
snapshot.forEach(function(childSnapshot){
var childKey = childSnapshot.key;
var childData = childSnapshot.val();
if(childData.destination == destination) {
dataToShow.push(childData);
}
});
return res.status(200).json(dataToShow);
});
});
/* POST a new group listing. */
router.post('/', function (req, res, next) {
var db = admin.database();
var newKey = db.ref(url).push().key;
var groupData = {
key: newKey,
source: req.body.source,
destination: req.body.destination,
time: req.body.time,
};
db.ref(url + newKey).update(groupData, function () {
return res.send("Added");
});
});
I am unable to understand why it isn't working, because I cant see if multiple res is being sent from anywhere. I have tried a couple of other things like returning JSON objects. But that didn't really help. My simple queries through POSTMAN fail.
Any help on this would be highly appreciated.
I think that your groupList.on is executed many times. It is probabily that your code execute thw content inside of the 'groupList.on' for many times.
I'm using the mongoose ODM for a project. My schema looks like this:
const applicantSchema = new Schema({
regno: {
type: String,
unique: true,
required: true
},
email: {
type: String,
unique: true,
required: true
}
});
const Applicant = mongoose.model('Applicant', applicantSchema);
I created a wrapper function to add a new document which looks like this:
function addApplicant(newApplicant, callback){
mongoose.connect(url);
const db = mongoose.connection;
console.log(newApplicant);
console.log(typeof newApplicant);
const applicant = new Applicant(newApplicant);
applicant.save((err) => {
if(err) return callback(err);
let info = "successfully saved target";
return callback(null, info);
});
}
I call this function within my route that handles the concerned post request.
router.post('/applicant/response', (req, res) => {
//process sent response here and add to DB
//console.log(req.body);
let newApplicant = {
regno: req.body.regno,
email: req.body.email
}
//console.log(newApplicant);
applicant.addApplicant(newApplicant, (err, info) => {
if(err){ console.log(err); res.end(err);}
res.end('complete, resp: ' + info);
});
});
However, mongoose gives me a validation error (path 'regno' is required) even though I am supplying a value for regno. This happens with all the fields marked as required.
If I remove the 'required: true' option the document is saved to the db as expected.
Any help will be appreciated. :)
It turns out that in this case, something was wrong with the way postman was sending data in a POST request. When I tested this later in postman using JSON as the data format (and ensuring that the Content-Type header is set to application/json), the code worked as expected.
To those facing a similar issue, check the headers postman sends with the request, and ensure that they are what you'd expect them to be.
In your express entry file where you expose your endpoints and setup express you should have app.use(express.json()); written above the endpoint.
const express = require("express");
require("./src/db/mongoose");
const User = require("./src/models/user");
const app = express();
const port = process.env.PORT || 3000;
// THIS LINE IS MANDATORY
app.use(express.json());
app.post("/users", async(req, res) => {
const user = new User(req.body);
try {
await user.status(201).save();
res.send(user);
} catch (error) {
res.status(400).send(error);
}
});
app.listen(port, () => {
console.log(`Server is runnung in port ${port}`);
});