How to handle ESOCKETTIMEDOUT while getblobtostream() azure storage? - node.js

While trying to get all the blobs using getBlobToStream() following error is displayed.How can I handle it?
I also came across "ExponentialRetryPolicyFilter" but not sure how to use it.
So far did not find any code related to it.
blobService.listBlobsSegmentedWithPrefix(containerName, path, null, (err, data) => {
data.entries.forEach(entry => {
var options = {
rangeStart: 0,
rangeEnd: entry.contentLength
};
if (fs.existsSync(fileUploadPath)) {
var sourceFilePath = fileUploadPath + '/' + project.id + '/' + entry.name;
if (!fs.existsSync(sourceFilePath)) {
fs.mkdir(require('path').dirname(sourceFilePath), { recursive: true }, (err) => {
if (err) {
console.log("Failed to mkdir:" + err);
}
blobService.getBlobToStream(containerName, entry.name, fs.createWriteStream(sourceFilePath, { flags: 'a' }), options, (error, data) => {
if (error) {
console.log('getblobtostream error', error)
}
});
});
}
Error:
getblobtostream error Error: read ECONNRESET
at TLSWrap.onStreamRead (internal/stream_base_commons.js:209:20) {
errno: -4077,
code: 'ECONNRESET',
syscall: 'read'
}
getblobtostream error Error: ESOCKETTIMEDOUT
at ClientRequest.<anonymous> (D:\tiger3\Nexus-services\node_modules\request\request.js:816:19)
{
code: 'ESOCKETTIMEDOUT',
connect: false
}

ESOCKETTIMEDOUT is basically network error and it can happen due to poor or unstable network or due to continuous heavy usage. Try to add retry filter on client library side.
You can try use it before getBlobToStream to create blobservice object .
var azure = require('azure-storage');
var retryOperations = new azure.ExponentialRetryPolicyFilter();
var blobService = azure.createBlobService().withFilter(retryOperations)
See azure storage-nodejs ,use-blob-storage. GitHub
References:
ESOCKETTIMEDOUT error · Issue #604 · Azure/azure-storage-node ·
GitHub
Using BlobService.getBlobToStream from javascript client SDK · Issue
· GitHub

Related

How do I get around Internal Server Error when testing Azure Function

Does this Nodejs code look right? Is there anything missing?
const mysql = require('mysql');
const fs = require('fs');
var config =
{
host: 'mydemoserver.mysql.database.azure.com',
user: 'myadmin#mydemoserver',
password: 'your_password',
database: 'quickstartdb',
port: 3306,
ssl: {ca: fs.readFileSync("your_path_to_ca_cert_file_BaltimoreCyberTrustRoot.crt.pem")}
};
const conn = new mysql.createConnection(config);
conn.connect(
function (err) {
if (err) {
console.log("!!! Cannot connect !!! Error:");
throw err;
}
else {
console.log("Connection established.");
readData();
}
});
function readData(){
conn.query('SELECT * FROM inventory',
function (err, results, fields) {
if (err) throw err;
else console.log('Selected ' + results.length + ' row(s).');
for (i = 0; i < results.length; i++) {
console.log('Row: ' + JSON.stringify(results[i]));
}
console.log('Done.');
})
conn.end(
function (err) {
if (err) throw err;
else console.log('Closing connection.')
});
};
This is to go inside an Azure Function that reads data from a Azure for MySQL database.
When I run it inside the Kudu window by just typing node index.js it works
When I try and test it on there Azure Function Test Page it throws a Internal Server Error 500 with the following error message
Unable to determine function entry point. If multiple functions are exported, you must indicate the entry point, either by naming it 'run' or 'index', or by naming it explicitly via the 'entryPoint' metadata property.' Stack: Error: Worker was unable to load function ListBrands: 'Unable to determine function entry point. If multiple functions are exported, you must indicate the entry point, either by naming it 'run' or 'index', or by naming it explicitly via the 'entryPoint' metadata property.' at C:\Program Files (x86)\SiteExtensions\Functions\4.14.0\workers\node\dist\src\worker-bundle.js:2:13853 at t.LegacyFunctionLoader. (C:\Program Files (x86)\SiteExtensions\Functions\4.14.0\workers\node\dist\src\worker-bundle.js:2:14092) at Generator.next () at o (C:\Program Files (x86)\SiteExtensions\Functions\4.14.0\workers\node\dist\src\worker-bundle.js:2:12538) at processTicksAndRejections (node:internal/process/task_queues:96:5)
Thanks in advance
Todd

Error: read ECONNRESET for DynamoDB put request using DocumentClient

I am trying to create a new item in my DynamoDB table using the put function for DocumentClient, but am getting an error that references ECONNRESET. When others have referenced ECONNRESET on stack overflow, it seems that it might be a proxy issue for them. I am not sure how I would go about debugging this though.
Here are the docs I have been using:
https://docs.aws.amazon.com/sdk-for-javascript/v2/developer-guide/dynamodb-example-document-client.html
https://docs.amplify.aws/guides/functions/dynamodb-from-js-lambda/q/platform/js/
Here is the code
import AWS from 'aws-sdk';
AWS.config.update({region: 'us-east-1'})
const docClient = new AWS.DynamoDB.DocumentClient({apiVersion: '2012-08-10'});
export const createItem = async (tableName, item) => {
const params = {
TableName: tableName,
Item: item
};
console.log(params);
try {
await docClient.put(params).promise();
console.log("Success");
} catch (err) {
console.log(err);
}
}
and here is the error I get
Error: read ECONNRESET
at TLSWrap.onStreamRead (internal/stream_base_commons.js:209:20) {
errno: -4077
code: 'TimeoutError',
syscall: 'read',
time: 2021-09-25T12:30:23.577z,
region: 'us-east-1',
hostname: 'dynamodb.us-east-1.amazonaws.com',
retryable: true
}
Screenshot of code and terminal:
https://i.stack.imgur.com/f4JvP.png
Somebody helped me out. I was using a company CLI via a proxy to do manual local testing. I had to use this command in the CLI pc login aws --shared-credentials which is pretty specific to where I work.
I also had to include this code:
const proxy = require('proxy-agent');
AWS.config.update({
httpOptions: {
agent: proxy(process.env.HTTP_PROXY)
}
});

getting Error: getaddrinfo ENOTFOUND while performing rest api call in node.js using http.request

i have created api in node.js which consume set of api hosted at http://dev.abc.co.in:20081
not every time but randomly sometimes it throws the error
Error: getaddrinfo ENOTFOUND dev.abc.co.in
at GetAddrInfoReqWrap.onlookup [as oncomplete] (dns.js:60:26) {
errno: 'ENOTFOUND',
code: 'ENOTFOUND',
syscall: 'getaddrinfo',
hostname: 'dev.abc.co.in'
}
to call those api i have used request node module because i started getting this error i switched to fetch-node npm module and finally replace the code with internal node module http but getting same error
here is the code i have written using http.request
try{
const options = {
hostname: "dev.abc.co.in",
port : 20081,
path: "/api/entity/workorder",
method: Config.method
};
if(Config.headers){
options.headers = Config.headers
}
const req = http.request(options, (res) => {
let data = '';
res.on('data', (chunk) => {
data += chunk;
});
res.on('end', () => {
callback(res, data);
});
req.socket.destroy();
}).on("error", (err) => {
console.log("===Error: ", err);
callback(null, err);
});
if(Config.method!="GET" && Config.body){
Config.headers["Content-Length"] = Config.body.length;
req.write(Config.body);
}
req.end();
}catch(e){
console.log("Exception=====",e);
}
as shown in error message issue related to DNS so i try to resolve this DNS using
node -pe 'require("dns").lookup("dev-vsg.dovertech.co.in",function(){console.dir(arguments)})
but still not resolved.
1) Omit 'http://' from the beginning of your demain and all slashes from the end or any path after the actual domain.
2) Try to resolve your hostname:
const dns = require('dns');
dns.resolve("testdomain.com", 'ANY', (err, records) => {
if (err) {
console.log("Error: ", err);
} else {
console.log(records);
}
});
If dns records has been returned, then you will know it's a node js problem and after that we can investigate further. If not, then it's a domain configuration issue.

How to catch UnhandledPromiseRejectionWarning for GCS WriteStream

Observed Application Behavior
I'm getting a UnhandledPromiseRejectionWarning: Error: Upload failed when using #google-cloud/storage in node.js.
These errors come when processing thousands of requests. It's a small percentage that cause errors, but due to the lack of ability to handle the errors, and the lack of proper context from the error message, it's very difficult to determine WHICH files are failing.
I know in general promises must have a .catch or be surrounded by a try/catch block. But in this case I'm using a write stream. I'm a little bit confused as to where the promise that's being rejected is actually located and how I would intercept it. The stack trace is unhelpful, as it only contains library code:
UnhandledPromiseRejectionWarning: Error: Upload failed
at Request.requestStream.on.resp (.../node_modules/gcs-resumable-upload/build/src/index.js:163:34)
at emitTwo (events.js:131:20)
at Request.emit (events.js:214:7)
at Request.<anonymous> (.../node_modules/request/request.js:1161:10)
at emitOne (events.js:121:20)
at Request.emit (events.js:211:7)
at IncomingMessage.<anonymous> (.../node_modules/request/request.js:1083:12)
at Object.onceWrapper (events.js:313:30)
at emitNone (events.js:111:20)
at IncomingMessage.emit (events.js:208:7)
My Code
The code that's creating the writeStream looks like this:
const {join} = require('path')
const {Storage} = require('#google-cloud/storage')
module.exports = (config) => {
const storage = new Storage({
projectId: config.gcloud.project,
keyFilename: config.gcloud.auth_file
})
return {
getBucketWS(path, contentType) {
const {bucket, path_prefix} = config.gcloud
// add path_prefix if we have one
if (path_prefix) {
path = join(path_prefix, path)
}
let setup = storage.bucket(bucket).file(path)
let opts = {}
if (contentType) {
opts = {
contentType,
metadata: {contentType}
}
}
const stream = setup.createWriteStream(opts)
stream._bucket = bucket
stream._path = path
return stream
}
}
}
And the consuming code looks like this:
const gcs = require('./gcs-helper.js')
module.exports = ({writePath, contentType, item}, done) => {
let ws = gcs.getBucketWS(writePath, contentType)
ws.on('error', (err) => {
err.message = `Could not open gs://${ws._bucket}/${ws._path}: ${err.message}`
done(err)
})
ws.on('finish', () => {
done(null, {
path: writePath,
item
})
})
ws.write(item)
ws.end()
}
Given that I'm already listening for the error event on the stream, I don't see what else I can do here. There isn't a promise happening at the level of #google-cloud/storage that I'm consuming.
Digging into the #google-cloud/storage Library
The first line of the stack trace brings us to a code block in the gcs-resumable-upload node module that looks like this:
requestStream.on('complete', resp => {
if (resp.statusCode < 200 || resp.statusCode > 299) {
this.destroy(new Error('Upload failed'));
return;
}
this.emit('metadata', resp.body);
this.deleteConfig();
this.uncork();
});
This is passing the error to the destroy method on the stream. The stream is being created by the #google-cloud/common project's utility module, and this is using the duplexify node module to create the stream. The destroy method is defined on the duplexify stream and can be found in the README documentation.
Reading the duplexify code, I see that it first checks this._ondrain before emitting an error. Maybe I can provide a callback to avoid this error being unhandled?
I tried ws.write(item, null, cb) and still got the same UnhandledPromiseRejectionWarning. I tried ws.end(item, null, cb) and even wrapped the .end call in a try catch, and ended up getting this error which crashed the process entirely:
events.js:183
throw er; // Unhandled 'error' event
^
Error: The uploaded data did not match the data from the server. As a precaution, the file has been deleted. To be sure the content is the same, you should try uploading the file again.
at delete (.../node_modules/#google-cloud/storage/build/src/file.js:1295:35)
at Util.handleResp (.../node_modules/#google-cloud/common/build/src/util.js:123:9)
at retryRequest (.../node_modules/#google-cloud/common/build/src/util.js:404:22)
at onResponse (.../node_modules/retry-request/index.js:200:7)
at .../node_modules/teeny-request/build/src/index.js:208:17
at <anonymous>
at process._tickCallback (internal/process/next_tick.js:189:7)
My final code looks something like this:
let ws = gcs.getBucketWS(writePath, contentType)
const handleErr = (err) => {
if (err) err.message = `Could not open gs://${ws._bucket}/${ws._path}: ${err.message}`
done(err)
}
ws.on('error', handleErr)
// trying to do everything we can to handle these errors
// for some reason we still get UnhandledPromiseRejectionWarning
try {
ws.write(item, null, err => {
handleErr(err)
})
ws.end()
} catch (e) {
handleErr(e)
}
Conclusion
It's still a mystery to me how a user of the #google-cloud/storage library, or duplexify for that matter, is supposed to perform proper error handling. Comments from library maintainers of either project would be appreciated. Thanks!

Error from Azure IoT Hub library when sending a message

Im using the Node.JS libraries for Azure IoT Hub to send some telemetry data, and intermittently I get the following error thrown from the libraries.
Unhandled rejection TypeError: Cannot read property 'on' of undefined
at C:\Source\Messenger\app.js:156:17
at Amqp. (C:\Source\Messenger\node_modules\azure-iot-device\node_modules\azure-iot-common\lib\amqp.js:157:17)
at C:\Source\Messenger\node_modules\azure-iot-device\node_modules\azure-iot-common\lib\amqp.js:54:19
at C:\Source\Messenger\node_modules\azure-iot-device\node_modules\azure-iot-common\lib\amqp.js:84:17
at tryCatcher (C:\Source\Messenger\node_modules\azure-iot-device\node_modules\azure-iot-common\node_modules\amqp10\node_modules\bluebird\js\release\util.js:11:23)
at Promise._settlePromiseFromHandler (C:\Source\Messenger\node_modules\azure-iot-device\node_modules\azure-iot-common\node_modules\amqp10\node_modules\bluebird\js\release\promise.js:489:31)
at Promise._settlePromise (C:\Source\Messenger\node_modules\azure-iot-device\node_modules\azure-iot-common\node_modules\amqp10\node_modules\bluebird\js\release\promise.js:546:18)
at Promise._settlePromise0 (C:\Source\Messenger\node_modules\azure-iot-device\node_modules\azure-iot-common\node_modules\amqp10\node_modules\bluebird\js\release\promise.js:591:10)
at Promise._settlePromises (C:\Source\Messenger\node_modules\azure-iot-device\node_modules\azure-iot-common\node_modules\amqp10\node_modules\bluebird\js\release\promise.js:670:18)
at Async._drainQueue (C:\Source\Messenger\node_modules\azure-iot-device\node_modules\azure-iot-common\node_modules\amqp10\node_modules\bluebird\js\release\async.js:129:16)
at Async._drainQueues (C:\Source\Messenger\node_modules\azure-iot-device\node_modules\azure-iot-common\node_modules\amqp10\node_modules\bluebird\js\release\async.js:139:10)
at Immediate.Async.drainQueues [as _onImmediate] (C:\Source\Messenger\node_modules\azure-iot-device\node_modules\azure-iot-common\node_modules\amqp10\node_modules\bluebird\js\release\async.js:16:14)
At the moment im running my scripts through forever so it recovers when the error is generated.
Has anyone else had this issue and managed to resolve it ?
My code looks like as follows
azureClient = azureDevice.Client.fromConnectionString(connectionString, azureDevice.Amqp);
var message = new azureDevice.Message(JSON.stringify('TEST MESSAGE'));
azureClient.sendEvent(message, function (err) {
if (err != null) {
Configure();
}
});
For this issue, I recommend you can use the AMQPWS or HTTP transport to send and receive messages.
Please refer to my steps to achieve to send and receive the message:
Firstly, we should create a device and get the device id and SAS. After created the IOT hub on Azure new portal, I ran those command on node.js command line:
create IOT hub on new portal.
npm install -g iothub-explorer
iothub-explorer HostName=****.azure-devices.net;SharedAccessKeyName=iothubowner;SharedAccessKey=****/****= create mydevice --connection-string
I got the result as following:
Created device mydevice
-
deviceId: mydevice
generationId: 635881160181557968
etag: MA==
connectionState: Disconnected
status: enabled
statusReason: null
connectionStateUpdatedTime: 0001-01-01T00:00:00
statusUpdatedTime: 0001-01-01T00:00:00
lastActivityTime: 0001-01-01T00:00:00
cloudToDeviceMessageCount: 0
authentication:
SymmetricKey:
primaryKey: ****/****=
secondaryKey: ****/****=
-
connectionString: HostName=****.azure-devices.net;DeviceId=mydevice;SharedAccessKey=****/****=
Secondly, I installed the SDK on node.js project, the folder structure is same as this picture:
Thirdly, I run this code on my project:
var device = require('azure-iot-device');
var connectionString = 'HostName=****.azure-devices.net;DeviceId=mydevice;SharedAccessKey=****/****=';
var client = device.Client.fromConnectionString(connectionString,device.AmqpWS);
setInterval(function () {
var windSpeed = 10 + (Math.random() * 4); // range: [10, 14]
var data = JSON.stringify({ deviceId: 'mydevice', windSpeed: windSpeed });
var message = new device.Message(data);
message.properties.add('myproperty', 'myvalue');
console.log("Sending message: " + message.getData());
client.sendEvent(message, printResultFor('send'));
}, 1000);
client.getReceiver(function (err, receiver) {
receiver.on('message', function (msg) {
console.log('Id: ' + msg.properties.messageId + ' Body: ' + msg.body);
receiver.complete(msg, function () {
console.log('completed');
// receiver.reject(msg, function() {
// console.log('rejected');
// });
// receiver.abandon(msg, function() {
// console.log('abandoned');
// });
});
receiver.on('errorReceived', function (err) {
console.warn(err);
});
});
});
function printResultFor(op) {
return function printResult(err, res) {
if (err) console.log(op + ' error: ' + err.toString());
if (res) console.log(op + ' status: ' + res);
};
}
it works fine. Also if you used the "Http" transport, it also works fine.
You need use this code:
var client = device.Client.fromConnectionString(connectionString);
This is the reason why I recommend you use the AMQPWS, because of this code in "azure-iot-device\node_modules\azure-iot-common\node_modules\amqp10-transport-ws":
if (this.uri.startsWith('wss')) {
var wsTransport = require('amqp10-transport-ws');
wsTransport.register(amqp10.TransportProvider);
}
If the URI is format with 'wss', it will be register the AMQP transport. But AMQP transport's format is like "amqp://". Hope this helps.
The Azure IoT Hub was by far a preview feature service on Azure. And the Azure IoT device SDK for Node.js is on the fast development.
I am trying to involve someone familiar with this topic to further look at this issue. There might be some time delay.
Appreciate your patience.

Resources