Google Cloud PubSub - implement pull - node.js

I am using google pub sub in nodejs.
I have a service which publishes messages, and then a worker which listens to the messages and implements some processes on the messages.
Now I want my service to pull five subscription at a time and pull another one when it finished to process .
How do I implement this?
Here is my start code how do I continue?
var pubsubClient = require('google-cloud').pubsub({
projectId: 'my-project'
});
var topic = pubsubClient.topic('my-first-pub-sub');
var options = {
reuseExisting: true,
ackDeadlineSeconds: 90,
autoAck: true,
interval: 5000
};
topic.subscribe('MySub', options, function(err, subscription, apiResponse) {
// Register listeners to start pulling for messages.
function onError(err) {
console.log("err", err);
}
function onMessage(message) {
console.log("subscription", message);
}
// subscription.on('error', onError);
// // subscription.on('message', onMessage);
//
var opts = {
maxResults: 1
};
subscription.pull(opts, function(err, message) {
console.log("subscription", err);
console.log("subscription", message);
});
});

Related

How to consume the latest message from Kafka-consumer using NodeJs?

I have created a NodeJS application to insert data into a MongoDB collection. This database insertion is done by using a Kafka. Kafka-node is the plugin I have used to call Kafka.
I can create the topic and send a message to the consumer at the producer level. The message and topic are taken from the POST request.
This is how I call the Kafka. Parameters are topic and message.
Every time I call this API, the producer is creating a new message and sent it to the consumer. In each call, all previous messages will be returned to the consumer.
I have used the fromOffset: 'earliest' and fromOffset: 'latest' options to restrict the previous messages, not working.
Can anyone give me a suggestion?
Version of Kafka-node
"kafka-node": "^5.0.0",
Code I have used
var kafka = require('kafka-node');
const {MongoClient} = require('mongodb');
var url = 'mongodb://127.0.0.1:27017/';
const mongoClient = new MongoClient(url);
var Producer = kafka.Producer,
client = new kafka.KafkaClient(),
offset = new kafka.Offset(client),
Consumer = kafka.Consumer,
producer = new Producer(client);
producer.on('ready', function () {
console.log('Producer is ready');
});
producer.on('error', function (err) {
console.log('Producer is in error state');
console.log(err);
})
const createProducer = async(req,res,next) => {
var topic = req.body.topic;
var sentMessage = JSON.stringify(req.body.messages);
producer.send(payloads, async function( err, data) {
})
client = new kafka.KafkaClient(),
consumer = new Consumer(client,
[
{ topic: topic, partition: 0 }
],
{
autoCommit: false,
fromOffset: 'earliest'
}
);
consumer.on('message', async function (message) {
console.log("Message : "+JSON.stringify(message))
try {
var currentdate = new Date();
var datetime = "Last Sync: " + currentdate.getDate() + "/"
+ (currentdate.getMonth()+1) + "/"
+ currentdate.getFullYear() + " # "
+ currentdate.getHours() + ":"
+ currentdate.getMinutes() + ":"
+ currentdate.getSeconds();
var abb = await createListing(mongoClient,
{
topic: topic,
message: sentMessage,
time: datetime
}
);
} catch (e) {
console.error(":"+e);
}
finally {
}
});
await mongoClient.close();
res.send({
message: 'Successfully send data from producer',
payloads: payloads
})
async function createListing(client, newListing){
await mongoClient.connect();
const result = await
client.db("sample_airbnb").collection("listingsAndReviews").insertOne(newListing);
console.log(`New listing created with the following id: ${result.insertedId}`);
return result.insertedId;
}
}
The Messages consumed at the consumer are
Thanks,
You consumer will always consume all offsets that have not been marked consumed by its consumer group before.
This means that after consuming a given message (or a batch of messages), you need to commit the highest consumed offset to your Kafka cluster, to effectively mark those messages as consumed. Only then will your consumer group not re-consume those messages on startup.
To commit your offsets, you can either use kafka.js’s autoCommit feature (which you explicitly disabled in your implementation), or manually commit your offsets using the API provided by kafka.js.
You can find the documentation to both here: https://kafka.js.org/docs/consuming#a-name-auto-commit-a-autocommit
I made some changes in the code, Now I can retrieve the latest message from my topic.
I have created consumer inside the offset.fetchLatestOffsets([topics],cb), and made some changes in the consumer options.
var payloads = [
{ topic: topicName, messages: messageTotopic, partition: 0}
];
producer.send(payloads, async function(err, data) {
});
var client = new kafka.KafkaClient();
offset.fetchLatestOffsets([topic], async function (error, offsets) {
if (error)
console.log(error);
offsetA = JSON.stringify(offsets[topic][0])
console.log('offset Value:: '+offsetA);
var consumer = new Consumer(
client,
[
{
topic: topic,
partition: 0,
offset: offsetA-1, // Offset value starts from 0
}
], {
autoCommit: false,
fromOffset: true,
}
);
consumer.on('message', async function (message) {
console.log("Message from last offset:: " + JSON.stringify(message)); // will return the latest message.
consumer.close();
});
});
Using this way I am able to overcome the memory leakage issue related to the event emitters in the KafkaClient.

why stompit.ConnectFailover would generate thousands of consumers

I use the code shown below, to setup a connection for an active message queue via nodejs
var reconnectOptions = {
'maxReconnects': 10,
connect: {
connectHeaders: {
'heart-beat': '5000,5000'
}
}
};
var servers = [mqConfig.connectOption];
var manager = new stompit.ConnectFailover(servers, reconnectOptions);
manager.connect(function(error, client, reconnect){
client.subscribe(mqConfig.receiveHeaders, function(error, message) {
client.on('error', function(error) {
reconnect();
});
message.readString('utf-8', function (error, body) {
if (error) {
console.log('read message error ' + error.message);
return;
}
....
It works to connect to activemq and send out messages, but it would generate thousands of consumers with session id equal to -1. Is there any reason for that to happen, any ideas?

Node Js / Typescript - AMQP Consumer

I am trying my hand at node.js/typescript for the first time and having a bit of trouble making a consumer for a rabbit queue.
Code:
let amqp = require('amqp');
let connection = amqp.createConnection({url: "amqp://" + RABBITMQ_USER + ":" + RABBITMQ_PASSWORD + "#" + RABBITMQ_HOST + ":" + RABBITMQ_PORT + RABBITMQ_VHOST});
connection.on('ready', function() {
connection.exchange(RABBITMQ_WORKER_EXCHANGE, function (exchange) {
connection.queue(RABBITMQ_QUEUE, function (queue) {
queue.bind(exchange, function() {
queue.publish(function (message) {
console.log('subscribed to queue');
let encoded_payload = unescape(message.data);
let payload = JSON.parse(encoded_payload);
console.log('Received a message:');
console.log(payload);
})
})
})
})
})
It seems to connect to the amqp server and throws no errors but it just sits there and doesn't consume anything. Is there a step I am missing?
Any help would be greatly appreciated,
Thank you.
Here is my solution that is working based off of amqp's JS tutorial.
https://www.rabbitmq.com/tutorials/tutorial-three-javascript.html
Probably not up to TypeScript standards, feel free to correct me if there's a better way.
#!/usr/bin/env node
require('dotenv').config();
import amqp = require('amqplib/callback_api');
import db = require('./database');
amqp.connect({
protocol: process.env.RABBITMQ_PROTOCOL,
hostname: process.env.RABBITMQ_HOST,
port: process.env.RABBITMQ_PORT,
username: process.env.RABBITMQ_USER,
password: process.env.RABBITMQ_PASSWORD,
vhost: process.env.RABBITMQ_VHOST
}, function(err, conn) {
conn.createChannel(function (err, ch) {
// set exchange that is being used
ch.assertExchange(process.env.RABBITMQ_WORKER_EXCHANGE, 'direct', {durable: true});
// set queue that is being used
ch.assertQueue(process.env.RABBITMQ_QUEUE, {durable: true}, function (err, q) {
console.log(" [*] Waiting for messages in %s. To exit press CTRL+C", q.queue);
// bind the queue to the exchange
ch.bindQueue(q.queue, process.env.RABBITMQ_WORKER_EXCHANGE, '');
// consume from the queue, one message at a time.
ch.consume(q.queue, function (msg) {
console.log("Message received: %s", msg.content.toString());
//save message to db
db.store(msg.content.toString()).then(function() {
//acknowledge receipt of message to amqp
console.log("Acknowledging message");
ch.ack(msg, true);
});
}, {noAck: false});
});
});
});
import * as Amqp from "amqp-ts";
var connection = new Amqp.Connection("amqp://localhost");
var exchange = connection.declareExchange("ExchangeName");
var queue = connection.declareQueue("QueueName");
queue.bind(exchange);
queue.activateConsumer((message) => {
console.log("Message received: " + message.getContent());
});
// it is possible that the following message is not received because
// it can be sent before the queue, binding or consumer exist
var msg = new Amqp.Message("Test");
exchange.send(msg);
connection.completeConfiguration().then(() => {
// the following message will be received because
// everything you defined earlier for this connection now exists
var msg2 = new Amqp.Message("Test2");
exchange.send(msg2);
});

Why is the descisionTask not receiving any task from AWS-SWF service (SWF)?

I am using Nodejs for the backend. I tried this npm package to create a simple work flow (AMAZON-SWF). The package has an example folder which contains files which I put in my node project so that I understand how it works.
The problem is that the Decider is not receiving any task from the SWF server. because of which my work flow never runs. Is there some configuration problem. Please point out what errors I have done.
Below is the code for quick reference. The only change the code has is the version number change and change in the domain name. Otherwise it is the same code as the code which you can find here.
Following is the decider code.
var swf = require('./index');
var myDecider = new swf.Decider({
"domain": "test-domain",
"taskList": {"name": "my-workflow-tasklist"},
"identity": "Decider-01",
"maximumPageSize": 100,
"reverseOrder": false // IMPORTANT: must replay events in the right order, ie. from the start
});
myDecider.on('decisionTask', function (decisionTask) {
console.log("Got a new decision task !");
if(!decisionTask.eventList.scheduled('step1')) {
decisionTask.response.schedule({
name: 'step1',
activity: 'simple-activity'
});
}
else {
decisionTask.response.stop({
result: "some workflow output data"
});
}
decisionTask.response.respondCompleted(decisionTask.response.decisions, function(err, result) {
if(err) {
console.log(err);
return;
}
console.log("responded with some data !");
});
});
myDecider.on('poll', function(d) {
//console.log(_this.config.identity + ": polling for decision tasks...");
console.log("polling for tasks...", d);
});
// Start polling
myDecider.start();
/**
* It is not recommanded to stop the poller in the middle of a long-polling request,
* because SWF might schedule an DecisionTask to this poller anyway, which will obviously timeout.
*
* The .stop() method will wait for the end of the current polling request,
* eventually wait for a last decision execution, then stop properly :
*/
process.on('SIGINT', function () {
console.log('Got SIGINT ! Stopping decider poller after this request...please wait...');
myDecider.stop();
});
Following is activity code:
/**
* This simple worker example will respond to any incoming task
* on the 'my-workflow-tasklist, by setting the input parameters as the results of the task
*/
var swf = require('./index');
var activityPoller = new swf.ActivityPoller({
domain: 'test-domain-newspecies',
taskList: { name: 'my-workflow-tasklist' },
identity: 'simple-activity'
});
activityPoller.on('error',function() {
console.log('error');
});
activityPoller.on('activityTask', function(task) {
console.log("Received new activity task !");
var output = task.input;
task.respondCompleted(output, function (err) {
if(err) {
console.log(err);
return;
}
console.log("responded with some data !");
});
});
activityPoller.on('poll', function(d) {
console.log("polling for activity tasks...", d);
});
activityPoller.on('error', function(error) {
console.log(error);
});
// Start polling
activityPoller.start();
/**
* It is not recommanded to stop the poller in the middle of a long-polling request,
* because SWF might schedule an ActivityTask to this poller anyway, which will obviously timeout.
*
* The .stop() method will wait for the end of the current polling request,
* eventually wait for a last activity execution, then stop properly :
*/
process.on('SIGINT', function () {
console.log('Got SIGINT ! Stopping activity poller after this request...please wait...');
activityPoller.stop();
});
Following is the code which registers:
var awsswf = require('./index');
var swf = awsswf.createClient();
/**
* Register the domain "test-domain"
*/
swf.registerDomain({
name: "test-domain-newspecies",
description: "this is a just a test domain",
workflowExecutionRetentionPeriodInDays: "3"
}, function (err, results) {
if (err && err.code != 'DomainAlreadyExistsFault') {
console.log("Unable to register domain: ", err);
return;
}
console.log("'test-domain-newspecies' registered !")
/**
* Register the WorkflowType "simple-workflow"
*/
swf.registerWorkflowType({
domain: "test-domain-newspecies",
name: "simple-workflow",
version: "2.0"
}, function (err, results) {
if (err && err.code != 'TypeAlreadyExistsFault') {
console.log("Unable to register workflow: ", err);
return;
}
console.log("'simple-workflow' registered !")
/**
* Register the ActivityType "simple-activity"
*/
swf.registerActivityType({
domain: "test-domain-newspecies",
name: "simple-activity",
version: "2.0"
}, function (err, results) {
if (err && err.code != 'TypeAlreadyExistsFault') {
console.log("Unable to register activity type: ", err);
return;
}
console.log("'simple-activity' registered !");
});
});
});
Following is the code which starts the workflow execution:
var swf = require('./index');
var workflow = new swf.Workflow({
"domain": "test-domain-newspecies",
"workflowType": {
"name": "simple-workflow",
"version": "2.0"
},
"taskList": { "name": "my-workflow-tasklist" },
"executionStartToCloseTimeout": "1800",
"taskStartToCloseTimeout": "1800",
"tagList": ["example"],
"childPolicy": "TERMINATE"
});
var workflowExecution = workflow.start({ input: "any data ..."}, function (err, runId) {
if (err) { console.log("Cannot start workflow : ", err); return; }
console.log("Workflow started, runId: " +runId);
});
Following is index.js file
var basePath = "../node_modules/aws-swf/lib/";
exports.AWS = require('aws-swf').AWS;
exports.AWS.config.loadFromPath(__dirname + '/../config/awsConfig.json');
exports.createClient = require(basePath+"swf").createClient;
exports.Workflow = require(basePath+"workflow").Workflow;
exports.WorkflowExecution = require(basePath+"workflow-execution").WorkflowExecution;
exports.ActivityPoller = require(basePath+"activity-poller").ActivityPoller;
exports.ActivityTask = require(basePath+"activity-task").ActivityTask;
exports.Decider = require(basePath+"decider").Decider;
exports.DecisionTask = require(basePath+"decision-task").DecisionTask;
exports.EventList = require(basePath+"event-list").EventList;
exports.DecisionResponse = require(basePath+"decision-response").DecisionResponse;
exports.Poller = require(basePath+"poller").Poller;
The way run this code is by opening three terminal simultaneously. Then I execute the following command in respective terminal.
activity
node <activity-file-name>
decider
node <decider-file-name>
start and register I run in the same terminal.
node <register-file-name>
node <start-file-name>
It stands out that in the decider you are using "test-domain", but in the rest of the code you are using"test-domain-newspecies".
If the domain "test-domain" is not registered you should get an UnknownResourceFault error when polling for a decision task.

Dead Lettered Message Not Being Consumed in RabbitMQ and Node Using AMQP.Node

I want to receive a message after a certain amount of time in one of my workers. I decided to go with Node and RabbitMQ after discovering so-called dead letter exchanges.
The message seems to get send to the queue in DeadExchange, but the consumer is never receiving the message after the elapsed time in the WorkQueue in the WorkExchange. Either the bindQueue is off, or the dead-letter'ing doesn't work?
I've tried a lot of different values now. Can someone please point out what I'm missing?
var amqp = require('amqplib');
var url = 'amqp://dev.rabbitmq.com';
amqp.connect(url).then(function(conn) {
//Subscribe to the WorkQueue in WorkExchange to which the "delayed" messages get dead-letter'ed (is that a verb?) to.
return conn.createChannel().then(function(ch) {
return ch.assertExchange('WorkExchange', 'direct').then(function() {
return ch.assertQueue('WorkQueue', {
autoDelete: false,
durable: true
})
}).then(function() {
return ch.bindQueue('WorkQueue', 'WorkExchange', '');
}).then(function() {
console.log('Waiting for consume.');
return ch.consume('WorkQueue', function(msg) {
console.log('Received message.');
console.log(msg.content.toString());
ch.ack(msg);
});
});
})
}).then(function() {
//Now send a test message to DeadExchange to a random (unique) queue.
return amqp.connect(url).then(function(conn) {
return conn.createChannel();
}).then(function(ch) {
return ch.assertExchange('DeadExchange', 'direct').then(function() {
return ch.assertQueue('', {
arguments: {
'x-dead-letter-exchange': 'WorkExchange',
'x-message-ttl': 2000,
'x-expires': 10000
}
})
}).then(function(ok) {
console.log('Sending delayed message');
return ch.sendToQueue(ok.queue, new Buffer(':)'));
});
})
}).then(null, function(error) {
console.log('error\'ed')
console.log(error);
console.log(error.stack);
});
I'm using amqp.node (https://github.com/squaremo/amqp.node) which is amqplib in npm. Although node-amqp (https://github.com/postwait/node-amqp) seems to be so much more popular, it doesn't implement the full protocol and there are quite some outstanding issues regarding reconnecting.
dev.rabbitmq.com is running RabbitMQ 3.1.3.
This is a working code.When a message spends more than ttl in DeadExchange, it is pushed to WorkExchange. The key to success is defining the right routing key. The exchange-queue to which you wish to send post ttl, should be bounded with a routing key(note: not default), and 'x-dead-letter-routing-key' attributes value should match that route-key.
var amqp = require('amqplib');
var url = 'amqp://localhost';
amqp.connect(url).then(function(conn) {
//Subscribe to the WorkQueue in WorkExchange to which the "delayed" messages get dead-letter'ed (is that a verb?) to.
return conn.createChannel().then(function(ch) {
return ch.assertExchange('WorkExchange', 'direct').then(function() {
return ch.assertQueue('WorkQueue', {
autoDelete: false,
durable: true
})
}).then(function() {
return ch.bindQueue('WorkQueue', 'WorkExchange', 'rk1');
}).then(function() {
console.log('Waiting for consume.');
return ch.consume('WorkQueue', function(msg) {
console.log('Received message.');
console.log(msg.content.toString());
ch.ack(msg);
});
});
})
}).then(function() {
//Now send a test message to DeadExchange to DEQ queue.
return amqp.connect(url).then(function(conn) {
return conn.createChannel();
}).then(function(ch) {
return ch.assertExchange('DeadExchange', 'direct').then(function() {
return ch.assertQueue('DEQ', {
arguments: {
'x-dead-letter-exchange': 'WorkExchange',
'x-dead-letter-routing-key': 'rk1',
'x-message-ttl': 15000,
'x-expires': 100000
}
})
}).then(function() {
return ch.bindQueue('DEQ', 'DeadExchange', '');
}).then(function() {
console.log('Sending delayed message');
return ch.publish('DeadExchange', '', new Buffer("Over the Hills and Far Away!"));
});
})
}).then(null, function(error) {
console.log('error\'ed')
console.log(error);
console.log(error.stack);
});
Here's an example using AMQP Connection Manager for Node. I noticed no examples seemed to match what we were doing in our code, so I made a repo with a simple example and one with retry counts via republishing back to the main exchange: https://github.com/PritchardAlexander/node-amqp-dead-letter-queue
Here's the simple example:
const amqp = require('amqp-connection-manager');
const username = encodeURIComponent('queue');
const password = encodeURIComponent('pass');
const port = '5672';
const host = 'localhost';
const connectionString = `amqp://${username}:${password}#${host}:${port}`;
// Ask the connection manager for a ChannelWrapper. Specify a setup function to
// run every time we reconnect to the broker.
connection = amqp.connect([connectionString]);
// A channel is your ongoing connection to RabbitMQ.
// All commands go through your channel.
connection.createChannel({
json: true,
setup: function (channel) {
channel.prefetch(100);
// Setup EXCHANGES - which are hubs you PUBLISH to that dispatch MESSAGES to QUEUES
return Promise.all([
channel.assertExchange('Test_MainExchange', 'topic', {
durable: false,
autoDelete: true,
noAck: false
}),
channel.assertExchange('Test_DeadLetterExchange', 'topic', {
durable: false,
autoDelete: true,
maxLength: 1000,
noAck: true // This means dead letter messages will not need an explicit acknowledgement or rejection
})
])
// Setup QUEUES - which are delegated MESSAGES by EXCHANGES.
// The MESSAGES then need to be CONSUMED.
.then(() => {
return Promise.all([
channel.assertQueue(
'Test_MainQueue',
options = {
durable: true,
autoDelete: true,
exclusive: false,
messageTtl: 1000*60*60*1,
deadLetterExchange: 'Test_DeadLetterExchange'
}
),
channel.assertQueue('Test_DeadLetterQueue',
options = {
durable: false,
autoDelete: true,
exclusive: false
}
)
]);
})
// This glues the QUEUES and EXCHANGES together
// The last parameter is a routing key. A hash/pound just means: give me all messages in the exchange.
.then(() => {
return Promise.all([
channel.bindQueue('Test_MainQueue', 'Test_MainExchange', '#'),
channel.bindQueue('Test_DeadLetterQueue', 'Test_DeadLetterExchange', '#')
]);
})
// Setup our CONSUMERS
// They pick MESSAGES off of QUEUES and do something with them (either ack or nack them)
.then(() => {
return Promise.all([
channel.consume('Test_MainQueue', (msg) => {
const stringifiedContent = msg.content ? msg.content.toString() : '{}';
console.log('Test_MainQueue::CONSUME ' + stringifiedContent);
const messageData = JSON.parse(stringifiedContent);
if (messageData.value === 0) {
console.log('Test_MainQueue::REJECT ' + stringifiedContent);
// the 'false' param at the very end means, don't retry! dead letter this instead!
return channel.nack(msg, true, false);
}
return channel.ack(msg);
})
]),
channel.consume('Test_DeadLetterQueue', (msg) => {
const stringifiedContent = msg.content ? msg.content.toString() : '{}';
console.log('');
console.log('Test_DeadLetterQueue::CONSUME ' + stringifiedContent);
console.log('');
});
})
.then(() => {
setInterval(function () {
const messageData = {
text: 'Dead letter if 0',
value: Math.floor(Math.random()*5)
};
const stringifiedMessage = JSON.stringify(messageData);
// Publish message to exchange
if (channel.publish('Test_MainExchange', '', new Buffer(stringifiedMessage))) {
console.log(`Sent ${stringifiedMessage}`);
} else {
console.log(`Failed to send ${stringifiedMessage}`);
};
}, 300);
});
}
});
There was a bug in Channel#assertQueue in AMQP.Node which just got fixed, see https://github.com/squaremo/amqp.node/commit/3749c66b448875d2df374e6a89946c0bdd0cb918. The fix is on GitHub but not in npm just yet.

Resources