How to do deferred jobs in Node API (Express) - node.js

I have an API in Node with many endpoints. It works well but there is one that can have large requests run up to 1 hour of processing which can often break. I'm thinking of only sending back a url on a request where you can check the status of the request and then download it once it's ready. What would be the best way to handle a queue of jobs in Node for this?
Sample code below for the current endpoint
const router = express.Router();
const schema = joi.object().keys // This is a schema to validate the json input
router.post('/', async (req, res) => {
let conn = await connect(); // Util method that connects to a Q/KDB server
let request = req.body;
joi.validate(request, schema, (err, _result) => {
if (err) {
res.status(400).send({ error: err['details'][0]['message'] });
}
else {
let qRequest = buildRequest(request); // Util function to build request
// Connect to Q/KDB server with node-q package and process request
conn.k('api.process', qRequest, function(err, resp) {
if (err) {
log // Write to log
res.status(400).send({ error: err['details'][0]['message']
}
else {
res.status(200).send(resp);
}
});
}
});
});
EDIT:
I have found that I basically just have to build a job queue with job ids corresponding to them. The package Bull seems to be good but I don't want to add another dependency such as Redis.

Conceptually there are a couple ways to approach a problem like this:
You can return a jobID and let the client query that jobID on some recurring basis using a URL that contains the jobID until they get a result (this sounds like what you envisioned)
You can have the client connect a webSocket or socket.io connection to the server and when the result is done, the server can directly send the result over the websocket/socket.io connection.
You can use Server Sent Events (SSE) to "push" the result to the client when it's done.
Here's the outline of a scheme for the first option above:
Coins a unique jobID for each incoming job to process
Creates a route for querying the status of a jobID
Has a Map object that contains a list of jobs in process that is indexed by jobID
Has a setInterval() that sweeps through the jobs in the job Map to remove any expired jobs (jobs where the client never came back to get them). You can set the frequency of that sweep and the amount of time that you keep the job.
When a request comes in, it coins a new jobID, adds a "pending" job to the Map and returns back to the client a URL which they can query the job status on.
When you eventually finish processing the job, the result is added to the job object and its status is changed to "complete".
A route is added to query job status that includes the jobID.
If, when queried, the job status is "complete", then the result is returned and the job is removed from the Map.
If, when queried, the job status is "error", then the error is returned and the job is removed from the Map.
If, when queried, the jobID is not present, 404 status is returned
If, when queried, the job status is anything other than "complete" or "error", then the job.status and optional job.progress is returned. This allows your long running process to communicate back any progress if you want and you can use multiple status values if you want.
Here's code to illustrate the concept:
// A map of objects,
// the key is the jobID
// data is an object {status: "complete", result: someResult, timeStarted: someTime}
// If the job is not yet complete, status will be something other than "complete"
// and result will not yet exist
const jobs = new Map();
// check for expired jobs
const expirationInterval = 60 * 60 * 1000; // run expiration check once an hour
const expirationTime = 12 * 60 * 60 * 1000; // let jobs stay here for 12 hours
setInterval(() => {
// accumulate an array of items to remove so we aren't modifying while iterating
const expired = [];
const now = Date.now();
for (let [key, job] of jobs) {
if (now - job.timeStarted > expirationTime) {
expired.push(key);
}
}
// now remove all expired jobs
for (let key of expired) {
jobs.delete(key);
}
}, expirationInterval);
// make a job id that consists of current time (in ms) plus random number
// jobs can then be sorted or aged by time also
function makeJobID() {
const base = Date.now().toString();
const random = Math.random().toFixed(6).toString().slice(2);
return base + "_" + random;
}
// fetch data for a jobID
// The job may either not exist any more,
// may still be "pending" (or have some other status)
// or may be "complete"
// Note: if this router is not at the top level, you will have to make
// this path line up with the URL you sent back to the client
router.get("/jobstatus/:jobID", (req, res) => {
let job = jobs.get(req.params.jobID);
if (!job) {
res.sendStatus(404);
return;
}
if (job.status === "complete") {
// remove it from the jobs Map and send the data
jobs.delete(req.params.jobID);
res.send({
status: "complete",
result: job.result
});
} else if (job.status === "error") {
// remove it from the jobs Map and send the data
jobs.delete(req.params.jobID);
res.send({
status: "error",
error: job.error
});
} else {
// optional job.progress can also be communicated back. This can be
// a number, a string or an object of other data
if (job.progress) {
res.send({ status: job.status, progress: job.progress });
} else {
res.send({ status: job.status });
}
}
});
router.post('/', async (req, res) => {
let conn;
try {
conn = await connect(); // Util method that connects to a Q/KDB server
} catch (e) {
console.log(e);
res.sendStatus(500);
}
let request = req.body;
joi.validate(request, schema, (err, _result) => {
if (err) {
res.status(400).send({ error: err['details'][0]['message'] });
} else {
// coin job id and add a job object to the jobs map
const jobID = makeJobID();
const job = {
timeStarted: Date.now(),
status: "pending"
};
jobs.set(jobID, job);
// send response now that gives them a URL to query
res.status(202).send({
status: "Job submitted",
url: `https://yourdomain.com/jobstatus/${jobID}` // pick whatever URL you want here
});
let qRequest = buildRequest(request); // Util function to build request
// Connect to Q/KDB server with node-q package and process request
conn.k('api.process', qRequest, function(err, resp) {
if (err) {
// set job status to "error"
job.status = "error";
job.timeCompleted = Date.now();
try {
job.error = err['details'][0]['message'];
} catch (e) {
console.log(e);
job.error = "known";
}
} else {
// job has finished, update the job
// we can update the job object directly because the job Map
// points at this same object
job.status = "complete";
job.timeCompleted = Date.now();
job.result = resp;
}
});
}
});
});

Related

Sequelize not retrieving all data after insert

I have noticed that my backend is not retrieving the expected data after an insert.
In my React application, I have one function which inserts data into the database and after getting a response, a new request is sent to update the current component state with the newly fetched data.
All my functions are using await/async and in the backend, all transactions are correctly used and committed in order.
My client is calling the following endpoints:
-POST: api/ticket ( INSERT AN ITEM)
-GET: api/ticket (GET ALL ITEMS)
Here is what the backend is showing which looks correct to me, the problem is that in the 'SELECT' statement, the inserted item is not retrieved.
The transactions are started from two different routes but I don't see why it should be an issue.
In addition, I tried to change the AddItem function to output the same findAll statement which is called when using the GET method and the data returned are correct.
So why if I separate these two flows I do not get all the items? I always need to refresh the page to get the added item.
START TRANSACTION;
Executing (a9d14d5c-c0ac-4821-9b88-293b086debaa): INSERT INTO `messages` (`id`,`message`,`createdAt`,`updatedAt`,`ticketId`,`userId`) VALUES (DEFAULT,?,?,?,?,?);
Executing (a9d14d5c-c0ac-4821-9b88-293b086debaa): COMMIT;
Executing (9ee9ddaa-294e-41d1-9e03-9f02a2737030): START TRANSACTION;
Executing (9ee9ddaa-294e-41d1-9e03-9f02a2737030): SELECT `ticket`.`id`, `ticket`.`subject`, `ticket`.`status`, `ticket`.`createdAt`, `ticket`.`updatedAt`, `ticket`.`deletedAt`, `ticket`.`userId`, `messages`.`id` AS `messages.id`, `messages`.`message` AS `messages.message`, `messages`.`sender` AS `messages.sender`, `messages`.`createdAt` AS `messages.createdAt`, `messages`.`updatedAt` AS `messages.updatedAt`, `messages`.`deletedAt` AS `messages.deletedAt`, `messages`.`ticketId` AS `messages.ticketId`, `messages`.`userId` AS `messages.userId`, `messages->user`.`id` AS `messages.user.id`, `messages->user`.`firstname` AS `messages.user.firstname`, `messages->user`.`surname` AS `messages.user.surname`, `messages->user`.`email` AS `messages.user.email`, `messages->user`.`password` AS `messages.user.password`, `messages->user`.`stripeId` AS `messages.user.stripeId`, `messages->user`.`token` AS `messages.user.token`, `messages->user`.`birthDate` AS `messages.user.birthDate`, `messages->user`.`status` AS `messages.user.status`, `messages->user`.`confirmationCode` AS `messages.user.confirmationCode`, `messages->user`.`createdAt` AS `messages.user.createdAt`, `messages->user`.`updatedAt` AS `messages.user.updatedAt`, `messages->user`.`deletedAt` AS `messages.user.deletedAt` FROM `tickets` AS `ticket` LEFT OUTER JOIN `messages` AS `messages` ON `ticket`.`id` = `messages`.`ticketId` AND (`messages`.`deletedAt` IS NULL) LEFT OUTER JOIN `users` AS `messages->user` ON `messages`.`userId` = `messages->user`.`id` AND (`messages->user`.`deletedAt` IS NULL) WHERE (`ticket`.`deletedAt` IS NULL);
Executing (9ee9ddaa-294e-41d1-9e03-9f02a2737030): COMMIT;
-- POST '/api/ticket
exports.addMessage = async (req, res) => {
try {
const result = await sequelize.transaction(async (t) => {
var ticketId = req.body.ticketId;
const userId = req.body.userId;
const message = req.body.message;
const subject = req.body.subject;
// Validate input - If new ticket, a subject must be provided
if (!ticketId && !subject) {
return res
.status(400)
.send({ message: "New ticket must have a subject" });
}
// Validate input - If ticket exists, userId and message must be provided
if (!userId && !message && ticketId) {
return res
.status(400)
.send({ message: "UserID and message are required" });
}
// Create ticket is no ticketID was provided
if (!ticketId) {
const [ticket, created] = await Ticket.findOrCreate({
where: {
subject: subject,
userId: userId,
},
transaction: t,
});
ticketId = ticket.id;
}
// Create a new message object
const messageObject = await db.message.create(
{
message: message,
userId: userId,
ticketId: ticketId,
},
{ transaction: t }
);
// Output message object
return res.send(messageObject);
});
} catch (err) {
console.log(err);
return res.status(500).send({
message:
err.message || "Some error occurred while creating the ticket message.",
});
}
};
-- GET: api/ticket
exports.findAll = async (req, res) => {
try {
const result = await sequelize.transaction(async (t) => {
const tickets = await db.ticket.findAll(
{
include: [{ model: db.message, include: [db.user] }],
},
{ transaction: t }
);
tickets.forEach((ticket) => {
console.log(JSON.stringify(ticket.messages.length));
});
return res.send(tickets);
});
} catch (err) {
console.log(err);
res.status(500).send({
message: err.message || "Some error occurred while retrieving Tickets.",
});
}
};
You sent a response to a client before the transaction actually was committed. You just need to move res.send(messageObject); outside the transaction call.
You can try to look what's going on in the current version of your code if you add several console.log with messages to see what the actual order of actions is (I mean a couple of messages in POST (the last statement inside transaction and after transaction before res.send) and at least one at the beginning of GET).
Actually if the transaction was rolled back you'd send an uncommited and already removed object/record that I suppose is not your goal.

Firestore simple "get" query takes 10 seconds within cloud function

I have a cloud function that runs a simple firestore query by doc ID. The function logs show a ~7 second delay around running the query.
Here's the function code:
exports.cancelUnpaidOrder = functions.https.onCall(async (orderId, context) => {
console.log("in cancelUnpaidOrder");
const uid = context.auth.uid;
console.log("awaiting get order doc");
const orderSnap = await admin
.firestore()
.collection("order")
.doc(orderId)
.get();
console.log("getting order doc data");
const order = orderSnap.data();
console.log("asserting");
assert.ok(order.userId == uid && !order.paid);
console.log("awaiting update order doc");
await admin.firestore().collection("order").doc(orderId).update({
canceled: true,
cancelMsg: "canceled by user before pay",
open: false,
});
console.log("finished cancelUnpaidOrder");
});
Here are the logs, notice the very long query time between 9:52:29 and 9:52:36:
9:52:37.003 Function execution took 9292 ms, finished with status code: 200
9:52:37.001 finished cancelUnpaidOrder
9:52:36.487 awaiting update order doc
9:52:36.487 asserting
9:52:36.486 getting order doc data
9:52:29.558 awaiting get order doc
9:52:29.558 in cancelUnpaidOrder
9:52:29.558 Callable request verification passed
9:52:27.712 Function execution started
Function zone: us-central1.
Firestore zone: nam5 (us-central)
Thanks.
EDIT:
Firestore document size is 6.57K
Note that, like a Cloud Functions instance, the Admin SDK, experiences a cold-start. While your code may be just a simple query, the first API request also triggers an exchange of authentication tokens between the service account client and the authentication server before your query is executed. For any given functions instance, this performance hit should only happen once, unless that particular instance manages to stay alive for long enough (usually an hour) where it will reauthenticate. If multiple instances are fired up to soak up demand, they will each have this hit to performance just once.
Minimal data transfer
You can help your function's performance by using a field mask on the returned data. For the Admin SDK, this is done using select(). Using a field mask for your document drops its size down from 6.57KB to just 51B.
exports.cancelUnpaidOrder = functions.https.onCall(async (orderId, context) => {
console.log("in cancelUnpaidOrder");
const uid = context.auth.uid;
const orderRef = admin.firestore() // <- new: store reference in variable
.collection("order")
.doc(orderId);
console.log("awaiting get order doc");
console.time("getDoc");
const orderSnap = await orderRef
.select('userId', 'paid') // <- new: only fetch userId and paid, ignore other fields
.get();
console.timeEnd("getDoc"); // <-- new: calculate timings locally
console.log("getting order doc data");
const order = orderSnap.data();
console.log("asserting");
assert.ok(order.userId === uid && !order.paid); // <- new: use === for user ID checks!
console.log("awaiting update order doc");
console.time("setDoc");
await orderRef.update({
canceled: true,
cancelMsg: "canceled by user before pay",
open: false,
});
console.timeEnd("setDoc"); // <-- new: calculate timings locally
console.log("finished cancelUnpaidOrder");
});
Note: canceled should probably be corrected to cancelled.
Handling errors
Rather than use assert.ok, you should use a helper function that does the same job, but throws a HttpsError instead so that your front end can receive a meaningful error instead of one with a error code of "internal". In a similar fashion, converting the Firestore calls to throw a HttpsError as well may also be desirable.
These helper functions would look like:
// Typescript: function assertOk(condition: unknown, message?: string): asserts condition {
function assertOk(condition, message = undefined) {
if (!condition) {
throw new functions.https.HttpsError(
"failed-precondition",
message || "failed-precondition"
);
}
}
function throwAsHttpsError(error, message = undefined) {
let err, errCode = (!!error && typeof error === "object" && error.code) || "internal";
try {
// attempt to use original error's code
err = new functions.https.HttpsError(
errCode,
message || "INTERNAL"
);
} catch {
// unexpected error code, use fallback code of "internal"
err = new functions.https.HttpsError(
"internal",
message || errCode || "INTERNAL"
);
}
throw err;
}
Applying them gives:
exports.cancelUnpaidOrder = functions.https.onCall(async (orderId, context) => {
console.log("in cancelUnpaidOrder");
const uid = context.auth.uid;
const orderRef = admin.firestore()
.collection("order")
.doc(orderId);
console.log("awaiting get order doc");
console.time("getDoc");
const orderSnap = await orderRef
.select('userId', 'paid')
.get()
.catch(e => throwAsHttpsError(e, "failed to read order"));
console.timeEnd("getDoc");
console.log("getting order doc data");
const order = orderSnap.data();
console.log("asserting");
assertOk(order.userId === uid, "User mismatch");
assertOk(!order.paid, "Order already paid");
console.log("awaiting update order doc");
console.time("setDoc");
await orderRef
.update({
canceled: true,
cancelMsg: "canceled by user before pay",
open: false,
})
.catch(e => throwAsHttpsError(e, "failed to cancel order"));
console.timeEnd("setDoc");
console.log("finished cancelUnpaidOrder");
});
Testing timings
When testing Cloud Function timings, you should make sure to test calling the function twice. The first time is to test cold-start performance of spinning up a new Cloud Function instance. The second time is to attempt to reuse the same instance used by the first call once it has gone idle. Either call may or may not experience a cold-start.
const cancelUnpaidOrder = firebase.functions().httpsCallable('cancelUnpaidOrder');
function testCall(orderId) {
const tag = "Call for Order #" + orderId;
console.time(tag);
return cancelUnpaidOrder(orderId1)
.then(() => {
console.timeEnd(tag);
console.log(tag + ": success");
}, (err) => {
console.timeEnd(tag);
console.error(tag + ": error -> ", err);
});
}
const orderId1 = /* first test ID to be cancelled */;
const orderId2 = /* second test ID to be cancelled */;
// attempt to invoke a cold-start
testCall(orderId1);
// attempt to catch the cooled down instance, timings may need adjustment
setTimeout(() => testCall(orderId2), 15000);

MongoError: pool destroyed when fetching all data without conditions

I am new to mongoDb, as I am trying to query from different collection and in order to do that, when I am fetching data from category collection I mean when I am running select * from collection it is throwing error, MongoError: pool destroyed.
As per my understanding it is because of some find({}) is creating a pool and that is being destroyed.
The code which I am using inside model is below,
const MongoClient = require('mongodb').MongoClient;
const dbConfig = require('../configurations/database.config.js');
export const getAllCategoriesApi = (req, res, next) => {
return new Promise((resolve, reject ) => {
let finalCategory = []
const client = new MongoClient(dbConfig.url, { useNewUrlParser: true });
client.connect(err => {
const collection = client.db(dbConfig.db).collection("categories");
debugger
if (err) throw err;
let query = { CAT_PARENT: { $eq: '0' } };
collection.find(query).toArray(function(err, data) {
if(err) return next(err);
finalCategory.push(data);
resolve(finalCategory);
// db.close();
});
client.close();
});
});
}
When my finding here is when I am using
let query = { CAT_PARENT: { $eq: '0' } };
collection.find(query).toArray(function(err, data) {})
When I am using find(query) it is returning data but with {} or $gte/gt it is throwing Pool error.
The code which I have written in controller is below,
import { getAllCategoriesListApi } from '../models/fetchAllCategory';
const redis = require("redis");
const client = redis.createClient(process.env.REDIS_PORT);
export const getAllCategoriesListData = (req, res, next, query) => {
// Try fetching the result from Redis first in case we have it cached
return client.get(`allstorescategory:${query}`, (err, result) => {
// If that key exist in Redis store
if (false) {
res.send(result)
} else {
// Key does not exist in Redis store
getAllCategoriesListApi(req, res, next).then( function ( data ) {
const responseJSON = data;
// Save the Wikipedia API response in Redis store
client.setex(`allstorescategory:${query}`, 3600, JSON.stringify({ source: 'Redis Cache', responseJSON }));
res.send(responseJSON)
}).catch(function (err) {
console.log(err)
})
}
});
}
Can any one tell me what mistake I am doing here. How I can fix pool issue.
Thanking you in advance.
I assume that toArray is asynchronous (i.e. it invokes the callback passed in as results become available, i.e. read from the network).
If this is true the client.close(); call is going to get executed prior to results having been read, hence likely yielding your error.
The close call needs to be done after you have finished iterating the results.
Separately from this, you should probably not be creating the client instance in the request handler like this. Client instances are expensive to create (they must talk to all of the servers in the deployment before they can actually perform queries) and generally should be created per running process rather than per request.

Node.js - How to keep describing Cloudwatch log queries for Completed status of a specific query

So for anyone familiar with CW Logs..this is not the typical DB query call that returns with results.
You send an API call to start a query, that returns a Query ID.
You then send a different API call to get the query results, with the "hope" that the query completed, and if it hasn't, you're out of luck.
That's where I am right now.
I have a query that takes a bit of time and I'm "guessing" the way to handle this is to keep looping the DescribeQueries call until I find a match among that returned array of Completed queries, then go on with the rest of the code. I am unable to pull this off! Grrrr!!
I have tried While and Do..While loops that completely do NOT work. I tried setting the escape condition value when a match is found.. but it never gets set and the Lambda function times out.
function checkQueryStatus (logGroup, qID, callback) {
var params = {
logGroupName: logGroup,
maxResults: '3',
status: 'Complete'
};
let found = 0;
do {
cwlogs.describeQueries(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else {
// console.log(data.queries); // successful response
const qList = data.queries;
if (qList.some(query => query.queryId === qID)) {
console.log('Query Done');
callback(1);
found = 1;
} else {
console.log('Query not done');
}
}
});
} while (found == 0);
}
checkQueryStatus(logGroupName, queryID, function(qStatus) {
console.log('Query Status: ', qStatus);
if (qStatus == 1) {
console.log('Query Done');
<do other code...>
How can I do it? I'm now looking into Promises to see what thats all about..
If the DescribeQueries find a match, I want to trigger the GetQueryResults API call.
I've run into a similar problem with AWS Athena. When I start a query, I get back the response that is has started but get no notification when it finishes. The best solution I came up with was to use setTimeout to check its status every 100ms or so and continue when the query is completed. Hope that helps.
Here is a generic function which you can use to await until the query finish and returns the query result.
async function queryCloudWatch(queryRequest: StartQueryRequest): Promise<GetQueryResultsResponse> {
const queryResponse: StartQueryResponse = await cloudwatchLogs.startQuery(queryRequest).promise()
if (!queryResponse.queryId) return {}
let response: GetQueryResultsResponse | undefined = undefined
while (!response || response.status === 'Running') {
response = await cloudwatchLogs.getQueryResults({
"queryId": queryResponse.queryId
}).promise()
}
return response;
}
You have two options : query log Insights or query log groups
If you would like to query log Insights :
Use cloudwatchlogs.startQuery and cloudwatchlogs.getQueryResults APIs
A very good example can be found at https://gist.github.com/zenoyu/f63799a9079a5df376d5daf3cea27be4
To query log groups use filterLogEvents API :
const AWS = require('aws-sdk');
AWS.config.setPromisesDependency(require('bluebird'));
AWS.config.update({region: 'us-west-2'});
const cloudWatchLogs = new AWS.CloudWatchLogs({apiVersion: '2014-03-28'});
const timestamp = new Date();
const endtTime = timestamp.getTime();
const params = {
endTime: endtTime,
filterPattern: `"${stringToSearch}"`,
startTime: new Date (endtTime - 5 * 60 * 60* 24 * 1000).getTime(), // Last 5 days
logGroupName: 'myLogGroup',
limit : 10
};
const events = await cloudWatchLogs.filterLogEvents(params).promise();
console.log(`successfully queryCloudWatchLogs ${stringToSearch} results: ${JSON.stringify(events)}`);
const results = events.events.map(e => e.message)
console.log(`successfully queryCloudWatchLogs ${stringToSearch} results (${results.length}): ${JSON.stringify(results)}`);

Agenda.js job scheduling, jobs repeat and loop

Thanks in advance for anyone who reads this.
I need to be able to send gcm messages (notifications) to a list of client IDs at a certain time.
I am trying to use Agenda.js since it has a persistence layer.
The following code seems to work just fine initially, executing exactly when it is supposed to. But, after a while of letting the server just chill doing nothing, the job will start executing in a loop.
It will also include
"WARNING: Date in past. Will never be fired."
Here is the relevant code.
var agenda = new agenda({db: {address: configParams.db}});
schedule_notifications = function(req) {
// define an agenda task named notify
agenda.define('notify', function(job, done) {
// create a gcm message
var message = new gcm.Message({
notification: { "body": 'test' }
});
var sender = new gcm.Sender('server id');
var regTokens = ['phone id'];
// send the message
sender.send(message, { registrationTokens: regTokens }, function(err, response) {
if (err) console.error(err);
else console.log(response);
done();
});
});
// get the object from the request
var req_json = JSON.parse(req.body.data),
keys = Object.keys(req_json),
key_string = keys[0],
start_obj = new Date(req_json[key_string][0].start);
// schedule the job with the date object found in the request
// start_obj, for example could be made using
// start_obj = new Date();
// notify is the name of the job to run
agenda.schedule(start_obj, 'notify');
agenda.start();
// can comment agenda.schedule and uncomment the following line to delete the unfinished jobs in db
// agenda.purge(function(err, numRemoved) {});
}
Does anyone have any idea of why this could be happening? Any tips on how to debug this issue?
Thanks!
I fixed the problem. I added in the job.remove function and it no longer spazzes.
var agenda = new agenda({db: {address: configParams.db}});
schedule_notifications = function(req) {
// define an agenda task named notify
agenda.define('notify', function(job, done) {
// create a gcm message
var message = new gcm.Message({
notification: { "body": 'test' }
});
var sender = new gcm.Sender('server id');
var regTokens = ['phone id'];
// send the message
sender.send(message, { registrationTokens: regTokens }, function(err, response) {
if (err) console.error(err);
else console.log(response);
done();
});
job.remove(function(err) {
if(!err) console.log("Successfully removed job from collection");
})
});
// get the object from the request
var req_json = JSON.parse(req.body.data),
keys = Object.keys(req_json),
key_string = keys[0],
start_obj = new Date(req_json[key_string][0].start);
// schedule the job with the date object found in the request
// start_obj, for example could be made using
// start_obj = new Date();
// notify is the name of the job to run
agenda.schedule(start_obj, 'notify');
agenda.start();
// can comment agenda.schedule and uncomment the following line to delete the unfinished jobs in db
// agenda.purge(function(err, numRemoved) {});
}

Resources