Google Cloud Function failed with timeout - node.js

I have a Pub/Sub triggered cloud function that calls an API end-point and logs the message. But I am not seeing all log messages being logged in console except everything right before calling API.
Once the API is called I am logging the response, and exception messages in case of any error.
It is logging: Function execution took 120015 ms. Finished with status: timeout Earlier the default timeout was set to 60 sec, later I increased it to 120 sec. Still the problem persist.
I am not understanding the issue here since it is working locally without any issues.
Here I have custom module to log messages to Winston and GCP console (it doesn't have any issue and working fine).
Code calling the API module:
const console = require('./logging-utils');
const portal_api = require('./api-utils');
exports.triggerPortalNotifier = async (event, context) => {
try {
/*
.....
*/
console.metadata.cloudFunction = cf_name;
console.metadata.requestId = requestId;
console.metadata.organizationId = organizationId;
console.metadata.instanceId = instanceId;
console.logMessage(`Event received with payload: some message`);
var payload = {
//payload to API
}
var response = await portal_api.notifyPortal(payload);
console.logMessage(`Response received from portal API is: ${JSON.stringify(response.data)}`);
}
else {
throw new Error(`Invalid message received: ${_message}`);
}
}
catch (error) {
console.logMessage(`Portal API failed with exception: ${error}`);
throw new Error(`${error.message}`);
}
}
Code that make API request (using axios module)
require('dotenv').config();
const axios = require('./axios-instance');
const console = require('./logging-utils');
const nextgen_api = {
notifyPortal: async (payload) => {
try {
const config = {
headers: {
'Authorization': process.env.PORTAL_AUTHORIZATION_TOKEN,
'Content-Type': "application/json",
'Accept': "application/plain"
}
}
console.logMessage(`Input paylod for API end-point: ${process.env.PORTAL_API} => ${JSON.stringify(payload)}`)
const response = await axios.post(process.env.PORTAL_API, JSON.parse(JSON.stringify(payload)), config);
console.logMessage(`Response from API: ${JSON.stringify(response.data)}`);
return response;
}
catch (err) {
if (err.response && err.response.status !== 200) {
console.logMessage(`API call failed with status code: ${err.response.status} `);
throw new Error(`API call failed with status code: ${err.response.status} `);
}
else {
console.logMessage(`API call failed with ${err.stack}`);
throw new Error(`API call failed with status code: ${err.stack} `);
}
}
}
}
module.exports = my_api;
Message Response from API: ${JSON.stringify(response.data)} is not being logged.
Any help here is appreciated.

Related

Cloud Run Readablestream takes 5 minutes to cancel enqueue

I created an adapter-node Sveltekit API endpoint, which streams quotes using a readable stream. When I quit the client route The streaming has to stop. This works fine in development using Sveltekit "npm run dev" (vite dev) or using a windows desktop container (node build).
onDestroy(async () => {
await reader.cancel(); // stop streaming
controller.abort(); // signal fetch abort
});
But when I build and deploy the node container on Google Cloud Run the streaming works fine. Except when I quit the client route: the API endpoint keeps on streaming. The log shows: enqueus for 5 more minutes followed by a delayed Readablestream cancel() on the API server.
Why this 5 minutes between the client cancel / abort and the cancel on the server?
The API +server.js
import { YahooFinanceTicker } from "yahoo-finance-ticker";
/** #type {import('./$types').RequestHandler} */
export async function POST({ request }) {
const { logging, symbols } = await request.json();
const controller = new AbortController();
const ticker = new YahooFinanceTicker();
ticker.setLogging(logging);
if (logging) console.log("api ticker", symbols);
const stream = new ReadableStream({
start(controller) {
(async () => {
const tickerListener = await ticker.subscribe(symbols);
tickerListener.on("ticker", (quote) => {
if (logging) console.log("api", JSON.stringify(quote, ["id", "price", "changePercent"]));
controller.enqueue(JSON.stringify(quote, ["id", "price", "changePercent"]));
});
})().catch(err => console.error(`api listen exeption: ${err}`));
},
cancel() { // arrives after 5 minutes !!!
console.log("api", "cancel: unsubscribe ticker and abort");
ticker.unsubscribe();
controller.abort();
},
});
return new Response(stream, {
headers: {
'content-type': 'text/event-stream',
}
});
}
Route +page.svelte
const controller = new AbortController();
let reader = null;
const signal = controller.signal;
async function streaming(params) {
try {
const response = await fetch("/api/yahoo-finance-ticker", {
method: "POST",
body: JSON.stringify(params),
headers: {
"content-type": "application/json",
},
signal: signal,
});
const stream = response.body.pipeThrough(new TextDecoderStream("utf-8"));
reader = stream.getReader();
while (true) {
const { value, done } = await reader.read();
if (logging) console.log("resp", done, value);
if (done) break;
... and more to get the quotes
}
} catch (err) {
if (!["AbortError"].includes(err.name)) throw err;
}
}
...
The behavior you are observing is expected, Cloud Run does not support client-side disconnects yet.
It is mentioned in this article, that
Cloud Run (fully managed) currently only supports server-side
streaming. Having only "server-side streaming" basically means when
the "client" disconnects, "server" will not know about it and will
carry on with the request. This happens because "server" is not
connected directly to the "client" and the request from the "client"
is buffered (in its entirety) and then sent to the "server".
You can also check this similar thread
It is a known issue, there is already a public issue exists for the same. You can follow that issue for future updates and also add your concerns there.

Error [ERR_HTTP_HEADERS_SENT]: Cannot set headers after they are sent to the client NodeJS with Firebase

Description
Everytime I include response.send(snapshot.val()); this error is occurred and the NodeJS server crashed. I am using Postman to test the API endpoint with Header it is worked okay.
Output
On terminal 401 Unauthorized
[2020-11-05T13:23:24.421Z] #firebase/database: FIREBASE WARNING: Exception was thrown by user callback. Error [ERR_HTTP_HEADERS_SENT]: Cannot set headers after they are sent to the client
Error [ERR_HTTP_HEADERS_SENT]: Cannot set headers after they are sent to the client
NodeJS
const getsRoute = require('./routes/gets');
app.use('/api/v1', getsRoute);
//on other file
const auth = require('../middleware/auth');
router.get('/products', auth, (req, res) => {
initialPage.getBusinesses(res);
});
Code to Produce error
const initialPage = {
getBusinesses(response) {
ref.orderByKey()
.limitToLast(20)
.on('value', function(snapshot){
response.json(snapshot.val());
return snapshot.val();
})
}
}
Client side using React Native
searchApi = async() => {
const response = await axios.get('http://33c75838823c90.ngrok.io/api/v1/products',{
headers: {
"Content-Type": "application/json",
"Accept": "application/json",
"x-auth-token":"jgiiHDgfdeizI1NiIJ9.eyJfaWQiOiI1ZmEwMWMzZmM4YjIwYjBjZDQyMmJkNzUiLCJpYXQiOjE2MDQ0MTAwMDZ0KwFAgVtsJUQw"
}
}
).catch((error) => {
console.log("ERROR FROM AXIOS:", error)
});
console.log("RESPONSE DATA: %%%%%%%", response.data)
this.setState({results: [response.data]});
}
I have checked so many documents and questions forum but no one get exactly the solution suit for general problem.
The problem is here:
getBusinesses(response) {
ref.orderByKey()
.limitToLast(20)
.on('value', function(snapshot){
response.json(snapshot.val());
return snapshot.val();
})
}
Since you're using on(...), your callback will be called:
As soon as the data is loaded,
After that, any time the data changes.
The first one is working as intended, but if the data ever changes this means you're trying to send another response, which is what's causing the error.
To solve this, use once instead of on:
getBusinesses(response) {
ref.orderByKey()
.limitToLast(20)
.once('value', function(snapshot){
response.json(snapshot.val());
return snapshot.val();
})
}

AWS Lambda - unable to make call to 3rd resource

I have Lambda Node function behind API Gateway, what I am trying to do is:
Make call to API
Do some logic
Send a message to Slack
Problem:
When I run default test request, the function finishes successfully but it doesn't send a message to Slack (logs only print Slack and not HAHA1/2),however, it does when I run this function from local machine which leads me to believe that AWS stops any none client-server traffic. My function is not in VPC. What can I do to allow outgoing traffic to Slack? Thanks.
const AWS = require('aws-sdk');
const Slack = require('slack-node');
const dynamo = new AWS.DynamoDB.DocumentClient();
/**
* Demonstrates a simple HTTP endpoint using API Gateway. You have full
* access to the request and response payload, including headers and
* status code.
*
* To scan a DynamoDB table, make a GET request with the TableName as a
* query string parameter. To put, update, or delete an item, make a POST,
* PUT, or DELETE request respectively, passing in the payload to the
* DynamoDB API as a JSON body.
*/
sendSlack()
function sendSlack() {
webhookUri = "https://hooks.slack.com/services/....";
slack = new Slack();
slack.setWebhook(webhookUri);
console.log('Slack');
slack.webhook({
channel: "...",
username: "...",
text: "..."
}, function(err, response) {
console.log("HAHA1");
console.log(response);
console.log("HAHA2");
});
}
exports.handler = async (event, context) => {
console.log('Received event:', JSON.stringify(event, null, 2));
sendSlack()
let body;
let statusCode = '200';
const headers = {
'Content-Type': 'application/json',
};
try {
switch (event.httpMethod) {
case 'DELETE':
body = await dynamo.delete(JSON.parse(event.body)).promise();
break;
case 'GET':
body = await dynamo.scan({ TableName: event.queryStringParameters.TableName }).promise();
break;
case 'POST':
body = await dynamo.put(JSON.parse(event.body)).promise();
break;
case 'PUT':
body = await dynamo.update(JSON.parse(event.body)).promise();
break;
default:
throw new Error(`Unsupported method "${event.httpMethod}"`);
}
} catch (err) {
statusCode = '400';
body = err.message;
} finally {
body = JSON.stringify(body);
}
return {
statusCode,
body,
headers,
};
};
You will need to wait for the slack call by either providing a callback or promisifying it:
function sendSlack() {
return new Promise((resolve, reject) => {
webhookUri = "https://hooks.slack.com/services/....";
slack = new Slack();
slack.setWebhook(webhookUri);
console.log('Slack');
slack.webhook({
channel: "...",
username: "...",
text: "..."
}, function(err, response) {
if(err) return reject(err)
resolve(response)
});
})
}
Then in your code
await sendSlack()
It could be that your lambda handler is completing before your call to Slack is successfully executed.
Since the call to Slack is async, I would suggest you try to await its response before your handler returns a response.
I'm not 100% certain, by my guess is that the async Slack call gets scheduled but not run until the main body of the function has completed, at which point the lambda is "spun down" and nothing further is run.

unexpected behavior using zip-stream NPM on Google k8s

I am working on creating a zip of multiple files on the server and stream it to the client while creating. Initially, I was using ArchiverJs It was working fine if I was appending buffer to it but it fails when I need to add streams into it. Then after having some discussion on Github, I switched to Node zip-stream which started working fine thanks to jntesteves. But as I deploy the code on GKE k8s I Started getting Network Failed errors for huge files.
Here is my sample code :
const ZipStream = require("zip-stream");
/**
* #summary Adding readable stream provided by https module into zipStreamer using entry method
*/
const handleEntryCB = ({ readableStream, zipStreamer, fileName, resolve }) => {
readableStream.on("error", () => {
console.error("Error while listening readableStream : ", error);
resolve("done");
});
zipStreamer.entry(readableStream, { name: fileName }, error => {
if (!error) {
resolve("done");
} else {
console.error("Error while listening zipStream readableStream : ", error);
resolve("done");
}
});
};
/**
* #summary Handling downloading of files using native https, http and request modules
*/
const handleUrl = ({ elem, zipStreamer }) => {
return new Promise((resolve, reject) => {
let fileName = elem.fileName;
const url = elem.url;
//Used in most of the cases
if (url.startsWith("https")) {
https.get(url, readableStream => {
handleEntryCB({ readableStream, zipStreamer, url, fileName, resolve, reject });
});
} else if (url.startsWith("http")) {
http.get(url, readableStream => {
handleEntryCB({ readableStream, zipStreamer, url, fileName, resolve, reject });
});
} else {
const readableStream = request(url);
handleEntryCB({ readableStream, zipStreamer, url, fileName, resolve, reject });
}
});
};
const downloadZipFile = async (data, resp) => {
let { urls = [] } = data || {};
if (!urls.length) {
throw new Error("URLs are mandatory.");
}
//Output zip name
const outputFileName = `Test items.zip`;
console.log("Downloading using streams.");
//Initialize zip-stream instance
const zipStreamer = new ZipStream();
//Set headers to response
resp.writeHead(200, {
"Content-Type": "application/zip",
"Content-Disposition": `attachment; filename="${outputFileName}"`,
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "GET, POST, OPTIONS"
});
//piping zipStreamer to the resp so that client starts getting response
//as soon as first chunk is added to the zipStreamer
zipStreamer.pipe(resp);
for (const elem of urls) {
await handleUrl({ elem, zipStreamer });
}
zipStreamer.finish();
};
app.post(restPrefix + "/downloadFIle", (req, resp) => {
try {
const { data } = req.body || {};
downloadZipFile(data, resp);
} catch (error) {
console.error("[FileBundler] unknown error : ", error);
if (resp.headersSent) {
resp.end("Unknown error while archiving.");
} else {
resp.status(500).end("Unknown error while archiving.");
}
}
});
I tested for 7-8 files of ~4.5 GB each on local, it works fine and when I tried the same on google k8s, I got network failed error.
After some more research, I Increased server timeout on k8s t0 3000 seconds, than it starts working fine, but I guess the increasing timeout is not good.
Is there anything I am missing on code level or can you suggest some good GKE deployment configuration for a server that can download large files with many concurrent users?
I am stuck on this for the past 1.5+ months. please help!
Edit 1: I edited the timeout in the ingress i.e Network services-> Load Balancing ->edit the timeout in the service

How can I change the result status in Axios with an adapter?

The why
We're using the axios-retry library, which uses this code internally:
axios.interceptors.response.use(null, error => {
Since it only specifies the error callback, the Axios documentation says:
Any status codes that falls outside the range of 2xx cause this function to trigger
Unfortunately we're calling a non-RESTful API that can return 200 with an error code in the body, and we need to retry that.
We've tried adding an Axios interceptor before axios-retry does and changing the result status in this case; that did not trigger the subsequent interceptor error callback though.
What did work was specifying a custom adapter. However this is not well-documented and our code does not handle every case.
The code
const axios = require('axios');
const httpAdapter = require('axios/lib/adapters/http');
const settle = require('axios/lib/core/settle');
const axiosRetry = require('axios-retry');
const myAdapter = async function(config) {
return new Promise((resolve, reject) => {
// Delegate to default http adapter
return httpAdapter(config).then(result => {
// We would have more logic here in the production code
if (result.status === 200) result.status = 500;
settle(resolve, reject, result);
return result;
});
});
}
const axios2 = axios.create({
adapter: myAdapter
});
function isErr(error) {
console.log('retry checking response', error.response.status);
return !error.response || (error.response.status === 500);
}
axiosRetry(axios2, {
retries: 3,
retryCondition: isErr
});
// httpstat.us can return various status codes for testing
axios2.get('http://httpstat.us/200')
.then(result => {
console.log('Result:', result.data);
})
.catch(e => console.error('Service returned', e.message));
This works in the error case, printing:
retry checking response 500
retry checking response 500
retry checking response 500
retry checking response 500
Service returned Request failed with status code 500
It works in the success case too (change the URL to http://httpstat.us/201):
Result: { code: 201, description: 'Created' }
The issue
Changing the URL to http://httpstat.us/404, though, results in:
(node:19759) UnhandledPromiseRejectionWarning: Error: Request failed with status code 404
at createError (.../node_modules/axios/lib/core/createError.js:16:15)
at settle (.../node_modules/axios/lib/core/settle.js:18:12)
A catch on the httpAdapter call will catch that error, but how do we pass that down the chain?
What is the correct way to implement an Axios adapter?
If there is a better way to handle this (short of forking the axios-retry library), that would be an acceptable answer.
Update
A coworker figured out that doing .catch(e => reject(e)) (or just .catch(reject)) on the httpAdapter call appears to handle the issue. However we'd still like to have a canonical example of implementing an Axios adapter that wraps the default http adapter.
Here's what worked (in node):
const httpAdapter = require('axios/lib/adapters/http');
const settle = require('axios/lib/core/settle');
const customAdapter = config =>
new Promise((resolve, reject) => {
httpAdapter(config).then(response => {
if (response.status === 200)
// && response.data contains particular error
{
// log if desired
response.status = 503;
}
settle(resolve, reject, response);
}).catch(reject);
});
// Then do axios.create() and pass { adapter: customAdapter }
// Now set up axios-retry and its retryCondition will be checked
Workaround with interceptor and custom error
const axios = require("axios").default;
const axiosRetry = require("axios-retry").default;
axios.interceptors.response.use(async (response) => {
if (response.status == 200) {
const err = new Error("I want to retry");
err.config = response.config; // axios-retry using this
throw err;
}
return response;
});
axiosRetry(axios, {
retries: 1,
retryCondition: (error) => {
console.log("retryCondition");
return false;
},
});
axios
.get("https://example.com/")
.catch((err) => console.log(err.message)); // gonna be here anyway as we'll fail due to interceptor logic

Resources