node.js - setInterval & setTimeout incrementally repeating command - node.js

I'm trying to help a friend setup a bot for Picarto.tv, we have the bot LINK and there's no default plugin there for repeating messages, so I tried to make a very crude (seriously, it's terrible. I'm not a developer.) plugin for it, and I tried to use SetInterval/SetTimeout, and both times when I use them it will put the message in chat once, at the set interval, then it will wait, then after the interval it will say the message twice, then three times and so on.
It looks like this:
Time 1:
Testing...
Time 2:
Testing...
Testing...
And so on. Here's the code, as I said, it's terribly made, don't bash me too hard for it.
var api;
function handleChatMsg(data) {
var recursive = function () {
api.Messages.send("Testing Bot Repeat...");
setTimeout(recursive,15000);
}
recursive();
}
module.exports = {
meta_inf: {
name: "Repeat Message",
version: "1.0.0",
description: "Repeats a message every 5 minutes. Message and interval can be changed.",
author: "ZX6R"
},
load: function (_api) {
api = _api;
},
start: function () {
api.Events.on("userMsg", handleChatMsg);
}
}
Can anybody help me figure out why it's incrementally saying the message more times?
EDIT: Issue fixed, new code is
var api;
// Function to call for the repeating
function handleChatMsg() {
// This sets the interval of 5 minutes, and calls the variable. Edit the numbers after the comma to change the interval. You MUST put it into milliseconds.
setInterval(function(){xyz()}, 15000);
// This sets the variable, edit the text in "api.Messages.send" to change what the bot repeats.
var xyz = function()
{
api.Messages.send("Testing...")
}
}
// defines some information about the plugin, and sets up stuff we need.
module.exports = {
meta_inf: {
name: "Repeat Message",
version: "1.1.1",
description: "Repeats a message every 5 minutes. Message and interval can be changed.",
author: "ZX6R"
},
load: function (_api) {
api = _api;
},
start: function () {
handleChatMsg();
}
}
// The MIT License (MIT)
// Copyright (c) 2016 RedFalconv2 - ZX6R - WalnutGaming
//Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

Let's not complicate things and make it simpler.
setInterval(function(){xyz()}, 300000);
var xyz = function()
{
//Do what you want your function to do.
}
Here function xyz will be called every 300,000 milliseconds which means 5 minutes.
Take a look at Node Cron btw if you are going to use it regularly for an application.

Perhaps something like the following:
// define and export module
var plugin = {};
module.exports = plugin;
// plugin metadata
plugin.meta_inf = {
name: "Repeat Message",
version: "1.0.0",
description: "Repeats a message every 5 minutes. Message and interval can be changed.",
author: "ZX6R"
};
/**
* To be called on plugin load.
* #param {Object} api - api instance.
*/
plugin.load = function(api){
plugin._api = api;
};
/**
* Called on plugin start.
*/
plugin.start = function(){
if(!plugin._api){
// api instance should have been available
throw new Error("api not defined, is plugin load()'ed ?");
}
// each user message event (re)configures the repeating timer
plugin._api.Events.on("userMsg",function(){
plugin._configure(true, "Testing echo bot...",15000);
});
};
/**
* Configure the repeating timer
* #param {Boolean} enabled - true to enable timer, false to disable
* #param {String} message - message to repeat, required if enabled
* #param {Number} interval - milliseconds between repeats, required if enabled
*/
plugin._configure = function(enabled, message, interval){
if(plugin._timer){
// always clear any old timer
clearInterval(plugin._timer);
}
if(enabled){
if(!message || !interval){
// message and interval are required
throw new Error("message and interval are required.");
}
// set a repeating timer
plugin._timer = setInterval(function(){
plugin._api.Messages.send(message);
},interval);
}
};
Notes:
user message events apply a new setting on timer. You can have the bot repeat user message, or something custom.
Or, you can only configure the timer once.

Related

i want to run a function on a future date/time - once | nodejs - backend task?

i have a event web app on react.js | when logged-in user set an event on event page - assume four days later from today and there is another dropdown input filed of setReminder with values 4 hours ago / 3 hours ago and so on and on submit i'm calling or hiting an
route/api/endpoint/postRequest post->api->userSchema->mongoDB->req.body -
json
{ setReminderTime: currentDateTime - req.body.data.setReminderValue } etc. etc.
and saving other more data and so now i want to my code to run a function in there i write some code i want that code to exicute on that event date/time - {minus} that reminder date/time (4 hours or 3 or 2 hours ago ) so in reminder i send a notification or a smg or want to do other things more and i don't want to hit my databse each second and i also don't want to do use setTimeout staf beacuse my server refresh again again due to puch->updates
I'm not sure if i had understand your problem well, but you can try to use a reminder on the server (backend side) and you have to run the reminder every hour by using cron service like that :
const schedulerFactory = function() {
return {
start: function() {
// this action will be executed every day at 12:00 u can choose what you wan't
new CronJob('00 00 12 * * *', function() {
//CronJob('0 */1 * * * *') // this for execute each one minute
console.log('Running Send Notifications Worker for ' + moment().format());
notificationsWorker.run(); // this should be the function to call and it will search for all instance where some condition are true and than execute them
}, null, true, '');
},
};
};
on the database you can stock some informations to know if you have to execute this action or no (for example last connection or something like that to know when to send notification)
Good luck Bro.

Run a Cron Job every 30mins after onCreate Firestore event

I want to have a cron job/scheduler that will run every 30 minutes after an onCreate event occurs in Firestore. The cron job should trigger a cloud function that picks the documents created in the last 30 minutes-validates them against a json schema-and saves them in another collection.How do I achieve this,programmatically writing such a scheduler?
What would also be fail-safe mechanism and some sort of queuing/tracking the documents created before the cron job runs to push them to another collection.
Building a queue with Firestore is simple and fits perfectly for your use-case. The idea is to write tasks to a queue collection with a due date that will then be processed when being due.
Here's an example.
Whenever your initial onCreate event for your collection occurs, write a document with the following data to a tasks collection:
duedate: new Date() + 30 minutes
type: 'yourjob'
status: 'scheduled'
data: '...' // <-- put whatever data here you need to know when processing the task
Have a worker pick up available work regularly - e.g. every minute depending on your needs
// Define what happens on what task type
const workers: Workers = {
yourjob: (data) => db.collection('xyz').add({ foo: data }),
}
// The following needs to be scheduled
export const checkQueue = functions.https.onRequest(async (req, res) => {
// Consistent timestamp
const now = admin.firestore.Timestamp.now();
// Check which tasks are due
const query = db.collection('tasks').where('duedate', '<=', new Date()).where('status', '==', 'scheduled');
const tasks = await query.get();
// Process tasks and mark it in queue as done
tasks.forEach(snapshot => {
const { type, data } = snapshot.data();
console.info('Executing job for task ' + JSON.stringify(type) + ' with data ' + JSON.stringify(data));
const job = workers[type](data)
// Update task doc with status or error
.then(() => snapshot.ref.update({ status: 'complete' }))
.catch((err) => {
console.error('Error when executing worker', err);
return snapshot.ref.update({ status: 'error' });
});
jobs.push(job);
});
return Promise.all(jobs).then(() => {
res.send('ok');
return true;
}).catch((onError) => {
console.error('Error', onError);
});
});
You have different options to trigger the checking of the queue if there is a task that is due:
Using a http callable function as in the example above. This requires you to perform a http call to this function regularly so it executes and checks if there is a task to be done. Depending on your needs you could do it from an own server or use a service like cron-job.org to perform the calls. Note that the HTTP callable function will be available publicly and potentially, others could also call it. However, if you make your check code idempotent, it shouldn't be an issue.
Use the Firebase "internal" cron option that uses Cloud Scheduler internally. Using that you can directly trigger the queue checking:
export scheduledFunctionCrontab =
functions.pubsub.schedule('* * * * *').onRun((context) => {
console.log('This will be run every minute!');
// Include code from checkQueue here from above
});
Using such a queue also makes your system more robust - if something goes wrong in between, you will not loose tasks that would somehow only exist in memory but as long as they are not marked as processed, a fixed worker will pick them up and reprocess them. This of course depends on your implementation.
You can trigger a cloud function on the Firestore Create event which will schedule the Cloud Task after 30 minutes. This will have queuing and retrying mechanism.
An easy way is that you could add a created field with a timestamp, and then have a scheduled function run at a predefined period (say, once a minute) and execute certain code for all records where created >= NOW - 31 mins AND created <= NOW - 30 mins (pseudocode). If your time precision requirements are not extremely high, that should work for most cases.
If this doesn't suit your needs, you can add a Cloud Task (Google Cloud product). The details are specified in this good article.

Google Cloud PubSub not ack messages

We have the system of publisher and subscriber systems based on GCP PubSub. Subscriber processing single message quite long, about 1 minute. We already set subscribers ack deadline to 600 seconds (10 minutes) (maximal one) to make sure, that pubsub will not start redelivery too earlier, as basically we have long running operation here.
I'm seeing this behavior of PubSub. While code sending ack, and monitor confirms that PubSub acknowledgement request has been accepted and acknowledgement itself completed with success status, total number of unacked messages still the same.
Metrics on the charts showing the same for sum, count and mean aggregation aligner. On the picture above aligner is mean and no reducers enabled.
I'm using #google-cloud/pubsub Node.js library. Different versions has been tried (0.18.1, 0.22.2, 0.24.1), but I guess issue not in them.
The following class can be used to check.
TypeScript 3.1.1, Node 8.x.x - 10.x.x
import { exponential, Backoff } from "backoff";
const pubsub = require("#google-cloud/pubsub");
export interface IMessageHandler {
handle (message): Promise<void>;
}
export class PubSubSyncListener {
private readonly client;
private listener: Backoff;
private runningOperations: Promise<unknown>[] = [];
constructor (
private readonly handler: IMessageHandler,
private readonly options: {
/**
* Maximal messages number to be processed simultaniosly.
* Listener will try to keep processing number as close to provided value
* as possible.
*/
maxMessages: number;
/**
* Formatted full subscrption name /projects/{projectName}/subscriptions/{subscriptionName}
*/
subscriptionName: string;
/**
* In milliseconds
*/
minimalListenTimeout?: number;
/**
* In milliseconds
*/
maximalListenTimeout?: number;
}
) {
this.client = new pubsub.v1.SubscriberClient();
this.options = Object.assign({
minimalListenTimeout: 300,
maximalListenTimeout: 30000
}, this.options);
}
public async listen () {
this.listener = exponential({
maxDelay: this.options.maximalListenTimeout,
initialDelay: this.options.minimalListenTimeout
});
this.listener.on("ready", async () => {
if (this.runningOperations.length < this.options.maxMessages) {
const [response] = await this.client.pull({
subscription: this.options.subscriptionName,
maxMessages: this.options.maxMessages - this.runningOperations.length
});
for (const m of response.receivedMessages) {
this.startMessageProcessing(m);
}
this.listener.reset();
this.listener.backoff();
} else {
this.listener.backoff();
}
});
this.listener.backoff();
}
private startMessageProcessing (message) {
const index = this.runningOperations.length;
const removeFromRunning = () => {
this.runningOperations.splice(index, 1);
};
this.runningOperations.push(
this.handler.handle(this.getHandlerMessage(message))
.then(removeFromRunning, removeFromRunning)
);
}
private getHandlerMessage (message) {
message.message.ack = async () => {
const ackRequest = {
subscription: this.options.subscriptionName,
ackIds: [message.ackId]
};
await this.client.acknowledge(ackRequest);
};
return message.message;
}
public async stop () {
this.listener.reset();
this.listener = null;
await Promise.all(
this.runningOperations
);
}
}
This is basically partial implementation of async pulling of the messages and immediate acknowledgment. Because one of the proposed solutions was in usage of the synchronous pulling.
I found similar reported issue in java repository, if I'm not mistaken in symptoms of the issue.
https://github.com/googleapis/google-cloud-java/issues/3567
The last detail here is that acknowledgment seems to work on the low number of requests. In case if I fire single message in pubsub and then immediately process it, undelivered messages number decreases (drops to 0 as only one message was there before).
The question itself - what is happening and why unacked messages number is not reducing as it should when ack has been received?
To quote from the documentation, the subscription/num_undelivered_messages metric that you're using is the "Number of unacknowledged messages (a.k.a. backlog messages) in a subscription. Sampled every 60 seconds. After sampling, data is not visible for up to 120 seconds."
You should not expect this metric to decrease immediately upon acking a message. In addition, it sounds as if you are trying to use pubsub for an exactly once delivery case, attempting to ensure the message will not be delivered again. Cloud Pub/Sub does not provide these semantics. It provides at least once semantics. In other words, even if you have received a value, acked it, received the ack response, and seen the metric drop from 1 to 0, it is still possible and correct for the same worker or another to receive an exact duplicate of that message. Although in practice this is unlikely, you should focus on building a system that is duplicate tolerant instead of trying to ensure your ack succeeded so your message won't be redelivered.

With bookshelf.js, how do I update a model record atomically?

I have a one-to-many relationship with a JOB model and many TASK(s). I have a route for individual tasks, where I fetch the TASK model for display, and some data from its JOB model. When I request a TASK, I need to update the locked and a user_id fields, so I can lock the task and show who has it locked, so other users can't access that task view. Therefore, I need to be guaranteed the task has locked=0, and instantly update that field with a time stamp.
My current router code is:
var route_task = function(req, res, next) {
new Model.Task({id: req.params.id}).fetch(withRelated: ['jobs']})
.then(function(task) {
if (!task) {
res.status(404);
res.render('404_tpl');
return;
}
if (task.get('locked') !== 0) {
res.status(403);
res.render('403_tpl', {title: '403 - Access Denied - Task is locked'});
return;
}
else {
/* I would update it here, but there's a slim */
/* chance someone else can come in and select */
/* the task. */
}
/* .. I set some res.locals vals from my task here .. */
var jobs = task.related('jobs');
jobs.fetch().then(function(job) {
/* .. I set some res.local vals here from my jobs .. */
switch (task.get('task_type')) {
case 0:
res.render('task_alpha_tpl');
break;
/* ... */
}
});
})
}
When I hit my router for a particular task ID, I pretty much want to select * where tasks.id = id and locked = 0, and then set locked with the current timestamp, but, I need to be able to determine if the record with that ID didn't exist, or if it did, but was just locked.
I hope this makes sense. I'm coming from the C and PHP world, so I'm slowly learning async programming.
I think you should do it in a transaction I guess if you want the value not to change, I don't think you should use semaphore or other stuff in the client side to simulate a critical section or something in that mindset.
The general purpose solution for this problem is to run something in the form of:
UPDATE task SET locked=1,user=? WHERE job=? AND locked=0
And then check that the update actually modified at least one row.
If you're doing that in node.js, then I would do something like:
Tasks.forge().where({job: req.param('code'), locked:0}).save({locked:1},{method:"update"});

How to retreieve jobs with specific status in kue?

I am using kue for my job queue, and I'd like to know without using the GUI how many jobs are still left, how many have failed, etc. How can I retrieve this kind of information?
For example, after a few minutes of starting the processing of the job queue, I'd like to o update the status of all jobs that failed so far to 'inactive', in order to restart them.
The only related question I could find on stackoverflow was this, however, it deals with one job at a time, after it fires a certain event as it is being processed. My concern is different, as I am interested in retrieving all jobs in the database with a certain status.
The answer to this question mentions the function .complete of the kue library, which retrieves all the completed jobs in the database. Are there similar functions for other possible job statuses?
I found a solution by browsing the kue source code. The following code achieves what I need:
var redis = require ('redis'),
kue = require ('kue'),
redisClient = redis.createClient(6379, "127.0.0.1");
kue.redis.createClient = function () {
return redisClient;
};
kue.app.listen(3000);
kue.Job.rangeByType ('job', 'failed', 0, 10, 'asc', function (err, selectedJobs) {
selectedJobs.forEach(function (job) {
job.state('inactive').save();
});
});
For reference, here is the relevant kue source code:
/queue/job.js:123:
/**
* Get jobs of `type` and `state`, with the range `from`..`to`
* and invoke callback `fn(err, ids)`.
*
* #param {String} type
* #param {String} state
* #param {Number} from
* #param {Number} to
* #param {String} order
* #param {Function} fn
* #api public
*/
exports.rangeByType = function(type, state, from, to, order, fn){
redis.client().zrange('q:jobs:' + type + ':' + state, from, to, get(fn, order));
};
Kue source code indicating that:
type is the job type
from, to is the job ranges by index (for example, you can specify load jobs from index 0 to 10, 11 jobs in total.)
order specifies the order of fetched jobs. Default is asc. You can also sort it by desc
The following works, uses the pre-existing queue object and hence, no double Redis connection issue as mentioned by Japrescott in the comments of the accepted answer.
queue.cardByType("notifications", "complete", function( err, count ) {
console.log(count);
});
Feel free to replace with a valid state, the following is a list of valid states.
inactive
complete
active
failed
delayed

Resources