I use the Pre sign-up trigger of Cognito to validate the user registration. If some validation fails the function responses with a custom error. The client only receive the UserLambdaValidationException as ErrorCode.
Is there a way to receive the custom error name instead?
Current using sample:
exports.handler = function(event, context, callback) {
function AccountAlreadyExistsError(message) {
this.name = "AccountAlreadyExistsError";
this.message = message;
}
AccountAlreadyExistsError.prototype = new Error();
const error = new AccountAlreadyExistsError("Account is in use!");
callback(error);
};
I want to get AccountAlreadyExistsError in our client instead of UserLambdaValidationException.
You can customise exception headers only if it is a Lambda Proxy, with the API Gateway (x-amzn-errortype header), in any other case you need to parse the exception message to get your custom exception, here is an example how I manage exceptions for Cognito lambda triggers, it's just to show you the idea:
/*
Lambda Code, could be in a Lambda layer
*/
const EXCEPTION_TYPE_DELIMITER = "etype"
const EXCEPTION_MESSAGE_DELIMITER = "emsg";
const beTagged = (tag, txt) => {
return "<" + tag + ">" + txt + "</" + tag + ">";
};
class UserNotAllowedException extends Error {
constructor (message) {
super(beTagged(EXCEPTION_MESSAGE_DELIMITER, message))
Error.captureStackTrace(this, this.constructor);
this.name = beTagged(EXCEPTION_TYPE_DELIMITER, this.constructor.name);
}
}
exports.handler = async (event, context, callback) => {
// Your logic, etc..
throw new UserNotAllowedException("You are blocked :(");
}
Here is the parser
/*
Client Code
*/
const EXCEPTION_TYPE_DELIMITER_REGEX = /<etype>(.*?)<\/etype>/g;
const EXCEPTION_TYPE_UNTAG_REGEX = /<\/?etype>/g;
const EXCEPTION_MESSAGE_DELIMITER_REGEX = /<emsg>(.*?)<\/emsg>/g;
const EXCEPTION_MESSAGE_UNTAG_REGEX = /<\/?emsg>/g;
const untag = (txt, delimiterRegex, untagRegex) => {
return txt.match(delimiterRegex).map(etype => {
return etype.replace(untagRegex,"");
})[0];
};
const resolveException = (exceptionMessage) => {
const exceptionType = untag(exceptionMessage, EXCEPTION_TYPE_DELIMITER_REGEX, EXCEPTION_TYPE_UNTAG_REGEX);
const exceptionMessage = untag(exceptionMessage, EXCEPTION_MESSAGE_DELIMITER_REGEX, EXCEPTION_MESSAGE_UNTAG_REGEX);
// Your logic to determine what exception is the exceptionType
return new YourResolvedException(exceptionMessage);
};
try {
// This guy should throw the exception
return await Auth.signUp(params);
} catch (e) {
// Expected message from Cognito Lambda Trigger
// PreSignUp failed with error <etype>UserNotAllowedException</etype>: <emsg>You are blocked :(</emsg>.
// For example, here your custom exception resolver
throw resolveException(e.message);
}
It can be done in a thousand ways, but the best thing for us would be if the AWS services were able to fully customise the exceptions without so much hassle.
Regards!
I tried it on my end and was able to get the error message in the client. Tested it using AWS CLI, Nodejs & the built-in Cognito UI. I returned the error using:
var error = new Error('something went wrong..!');
callback(error,event);
I got the error UserLambdaValidationException: PreSignUp failed with error something went wrong..! in all my clients. Even callback(error) worked.
Related
I am starting to improve at Unit testing, but still thrown off by a few things. So I have the following service function
const connectToService = async (connectionParams) => {
try {
const xmlRequest = await getLoginXml(
connectionParams.username,
connectionParams.password,
connectionParams.url,
);
const res = await axios.post(connectionParams.msHost, xmlRequest);
const dom = new DOMParser().parseFromString(res.data, 'text/xml');
if (
!dom.documentElement
.getElementsByTagName('wsse:BinarySecurityToken')
.item(0)
) {
throw new Error('Invalid Username/Password');
}
return dom.documentElement
.getElementsByTagName('wsse:BinarySecurityToken')
.item(0).firstChild.nodeValue;
} catch (e) {
throw new Error(`Error making connection - ${e.message}`);
}
};
module.exports = {
connectToService,
};
So I have a relatively straight forward function above. Takes connectionParams and injects some params using getLoginXml, which just returns some XML.
I then make a POST request with that xml and receive a response. I then parse this response and return a token if there is one, if not, I throw an Error.
The thing that is throwing me is that there are a few things going on in this function, and I am not sure what to stub.
SO what would be the process first? Stub getLoginXml and then let the rest of the function run? And go through the file sequentially?
Any advice appreciated - I have done well with my integration tests, but unit test I am struggling to fully get to grips with.
Thanks
I have a Lambda function that is designed to take a message from a SQS queue and then input a value called perf_value which is just an integer. The CloudWatch logs show it firing each time and logging Done as seen in the .then() block of my write point. With it firing each time I am still only seeing a single data point in InfluxDB Cloud. I can't figure out why it is only inputting a single value then nothing after that. I don't see a backlog in SQS and no error messages in CloudWatch either. I'm guessing it is a code issue or InfluxDB Cloud setup though I used defaults which you would expect to actually work for multiple data points
'use strict';
const {InfluxDB, Point, HttpError} = require('#influxdata/influxdb-client')
const InfluxURL = 'https://us-west-2-1.aws.cloud2.influxdata.com'
const token = '<my token>=='
const org = '<my org>'
const bucket= '<bucket name>'
const writeApi = new InfluxDB({url: InfluxURL, token}).getWriteApi(org, bucket, 'ms')
module.exports.perf = function (event, context, callback) {
context.callbackWaitsForEmptyEventLoop = false;
let input = JSON.parse(event.Records[0].body);
console.log(input)
const point = new Point('elapsedTime')
.tag(input.monitorID, 'monitorID')
.floatField('elapsedTime', input.perf_value)
// .timestamp(input.time)
writeApi.writePoint(point)
writeApi
.close()
.then(() => {
console.log('Done')
})
.catch(e => {
console.error(e)
if (e instanceof HttpError && e.statusCode === 401) {
console.log('Unauthorized request')
}
console.log('\nFinished ERROR')
})
return true
};
EDIT**
Still have been unable to resolve the issue. I can get one datapoint to go into the influxdb and then nothing will show up.
#Joshk132 -
I believe the problem is here:
writeApi
.close() // <-- here
.then(() => {
console.log('Done')
})
You are closing the API client object after the first write so you are only able to write once. You can use flush() instead if you want to force sending the Point immediately.
Env:
nodejs 10.15.2, express 4.16.4
happens on azure app service as well as local test env
using "#azure/storage-blob": "^10.3.0"
I'm creating an express app to up/download blobs to/from azure blob storage.
Everything works fine: Can upload blobs, set metadata, download blobs etc.
But: When the aborter(s) run into their timeout, they cause an ancaught excpetion which terminates the node process.
The error stack is:
Fri Apr 19 2019 07:38:28 GMT+0000 (Greenwich Mean Time): Application has thrown an uncaught exception and is terminated:
Error: The request was aborted
at new RestError (D:\home\site\wwwroot\node_modules\#azure\ms-rest-js\dist\msRest.node.js:1397:28)
at a.<anonymous> (D:\home\site\wwwroot\node_modules\mql\node_modules\#azure\storage-blob\dist\index.js:1:11269)
at D:\home\site\wwwroot\node_modules\mql\node_modules\#azure\storage-blob\dist\index.js:1:1277
at Array.forEach (<anonymous>)
at a.abort (D:\home\site\wwwroot\node_modules\mql\node_modules\#azure\storage-blob\dist\index.js:1:1255)
at Timeout.<anonymous> (D:\home\site\wwwroot\node_modules\mql\node_modules\#azure\storage-blob\dist\index.js:1:519)
at ontimeout (timers.js:436:11)
at tryOnTimeout (timers.js:300:5)
at unrefdHandle (timers.js:520:7)
at Timer.processTimers (timers.js:222:12)
In my local dev-env I put a breakpoint on uncaught exception and found:
Event is finally thrown in event.js (emit)
It is caused by RetriableReadableStream, (#azure/storage-blob/dist/esm/lib/utils/RetriableReadableStream), see "aborter.addEventListener":
function RetriableReadableStream(aborter, source, getter, offset, count, options) {
if (options === void 0) { options = {}; }
var _this = _super.call(this) || this;
_this.retries = 0;
_this.aborter = aborter;
_this.getter = getter;
_this.source = source;
_this.start = offset;
_this.offset = offset;
_this.end = offset + count - 1;
_this.maxRetryRequests =
options.maxRetryRequests && options.maxRetryRequests >= 0
? options.maxRetryRequests
: 0;
_this.progress = options.progress;
_this.options = options;
aborter.addEventListener("abort", function () {
_this.source.pause();
_this.emit("error", new RestError("The request was aborted", RestError.REQUEST_ABORTED_ERROR));
});
...
Which in turn is caused by the aborter:
Aborter.prototype.abort = function () {
var _this = this;
if (this.aborted) {
return;
}
this.cancelTimer();
if (this.onabort) {
this.onabort.call(this);
}
this.abortEventListeners.forEach(function (listener) {
listener.call(_this);
});
this.children.forEach(function (child) { return child.cancelByParent(); });
this._aborted = true;
};
My Aborter is created like this:
createAborter(): Aborter {
let aborter = Aborter.timeout(5 * ONE_MINUTE);
aborter.onabort = () => {
console.warn(`AzureBlog.createAborter.onAbort: Request was aborted.`);
}
return aborter;
}
... and my download looks like this:
async download(blobName: string): Promise<NodeJS.ReadableStream> {
const blockBlobURL = this.getBlockBlobUrl(blobName);
const downloadResponse = await blockBlobURL.download(this.createAborter(), 0);
if (!downloadResponse) {
throw new Error(`Download returned undefined.`);
}
if (!downloadResponse.readableStreamBody) {
throw new Error(`downloadResponse.readableStreamBody is undefined.`);
}
return downloadResponse.readableStreamBody;
}
... and I stream it to the client like this:
self.expressApp.route('/download')
.get(jsonParser, async (req: Request, resp: Response) => {
handleDownload(req, resp);
}).post(jsonParser, async (req: Request, resp: Response) => {
handleDownload(req, resp);
});
...
let blobReadStream = await self.azureBlobStore.download(id);
blobReadStream.pipe(resp);
As mentioned, everything works fine until the timeout. What I don't get is:
Why would someone throw an uncaught error in a base-library whithout allowing the lib-consumers to catch it? (Btw, had the same problem with azure gremlin nodejs)
My aborter "onAbort()" just adds another event-listener. Do I have to remove all the other listeners to prevent that error?
Why are the "abort" listeners fired at all? All requests (download, upstream etc.) work fine within seconds. The samples/docu does not say anything about manually destroying/disabling the aborter after a sucessfull request.
I assume that I have misunderstood the Aborter-conecpt. So any help is much appreciated.
Thanks a lot!
I get the error Invalid Lambda Response: Lambda response provided invalid slot names [slotId] when lambda sends response to lex using elicitSlot to get slot values for undefined slot.
I referred the lex blueprint code as following.
const handleOrder = async (intentRequest, callback) => {
const source = intentRequest.invocationSource
const id = intentRequest.currentIntent.slots.slotId
if (source === 'DialogCodeHook') {
// Perform basic validation on the supplied input slots. Use the elicitSlot dialog action to re-prompt for the first violation detected.
const slots = intentRequest.currentIntent.slots
const validationResult = validateOrderRequest(id)
if (!validationResult.isValid) {
//reset the slot value
slots[`${validationResult.violatedSlot}`] = null
callback(elicitSlot(intentRequest.sessionAttributes, intentRequest.currentIntent.name, slots, validationResult.violatedSlot, validationResult.message))
return;
}
const outputSessionAttributes = intentRequest.sessionAttributes || {}
callback(delegate(outputSessionAttributes, intentRequest.currentIntent.slots))
return;
}
...
}
function validateOrderRequest(id) {
if(!id){
return buildValidationResult(false, 'slotId', `Tell me the ID.`);
}
}
What could be giving the error?
You need not to use the statement
slots[${validationResult.violatedSlot}] = null;
It's not good practice to assign a slot value to null.
Remove this statement. I hope you won't get that error.
I'm running a relatively simple AWS Function to add a subscription to Stripe.
It runs fine unless I hit it shortly after I just hit it. Just trying to run it in PostMan one after the other fails and returns:
{"errorMessage": "Process exited before completing request"}
The requests are delivered via API Gateway.
Function is configured with 30s timeout and is take ~1300ms to run on the base 128M RAM (issue reproducible # 256M).
I thought this was exactly what Lambda was designed to avoid... I'm second guessing my decision to use Lambda for a (synchronous) mission critical component.
EDIT: As requested, here's the function code:
var stripe = require('stripe');
exports.handler = function (event, context, callback) {
var self = this;
stripe = stripe(getKey(event.stage, 'STRIPE_SECRET_KEY'));
self.createSubscription = createSubscription;
self.validPayload = validPayload;
console.log('event: ', event);
if (self.validPayload(event, context)) {
self.createSubscription(event, stripe, callback, context);
}
/**
* checks that the necessary payload has been received
* if YES: returns true and allows process to continue
* if NO: throws context.fail with useful error message(s)
* operating under custom error code naming convention of
* http code + 3 digit ULM error code
* #param event - from Lambda
* #param context - from Lambda
* #returns {boolean} - whether the payload contains the required data
*/
function validPayload (event, context) {
var errorResponse = {
status: 400,
errors: []
};
if (!event.billing_email) {
errorResponse.errors.push({
code: 400001,
message: "No billing email provided."
})
}
if (!event.plan) {
errorResponse.errors.push({
code: 400002,
message: "No plan was selected."
})
}
if (!event.token) {
errorResponse.errors.push({
code: 400003,
message: "A valid credit card was not provided."
})
}
if (!!errorResponse.errors.length) {
context.fail(JSON.stringify(errorResponse));
return false;
} else {
return true;
}
}
/**
* Creates a new customer & subscription using stripe package method
* if success, executes callback with response data
* if fail, throws context.fail with useful error message(s)
* #param event - from Lambda
* #param stripe - probably not necessary...
* #param callback - from Lambda
* #param context - probably not necessary...
*/
function createSubscription (event, stripe, callback, context) {
stripe.customers.create({
source: event.token,
plan: event.plan,
email: event.billing_email
}, function (err, customer) {
if (err) {
var errorResponse = {
status: 400,
errors: []
};
errorResponse.errors.push({
code: 400004,
message: err.message
});
console.error('Customer/Plan Creation Failed');
callback(JSON.stringify(errorResponse));
} else {
callback(null, {
status: 200,
customer: customer
});
}
});
}
function getKey (stage, keyId) {
var keys = {
STRIPE_SECRET_KEY: {
staging: 'sk_test_123456',
prod: 'sk_live_123456'
}
};
if (stage === 'prod') {
return keys[keyId][stage];
} else {
return keys[keyId]['staging'];
}
}
};
EDIT 2: Dug into CloudWatch and found this error log: TypeError: stripe is not a function at exports.handler (/var/task/exports.js:5:14)
#rowanu is correct, your problem is on this line stripe = stripe(getKey(event.stage, 'STRIPE_SECRET_KEY'));. Since the Lambda stays hot to handle subsequent requests any variables declared outside of the handler function will be seen by each new request that comes in. This should be a simple fix, don't redefine the stripe variable. Something like this would do the trick:
var stripe = require('stripe');
var stripeInstance; // undefined on startup
exports.handler = function (event, context, callback) {
// now define stripeInstance if its not already defined
if(!stripeInstance) {
stripeInstance = stripe(getKey(event.stage, 'STRIPE_SECRET_KEY'));
}
// now all functions will be able to use the same instance of stripe.
// This assumes the event.stage is always the same, if you need a new instance for every request then remove the if statement
// rename all references of stripe to stripeInstance
...
"Process exited before completing request" indicates that your function exited without calling the callback. It is not related to timeouts or throttling.
Usually this indicates that an exception is thrown from a code path that doesn't have adequate error handling.
You will simply need to handle or fix "stripe is not a function at exports.handler (/var/task/exports.js:5:14)" and call the appropriate callback.