I have an Django project using Celery with RabbitMQ broker. And now I want to call django (celery) task from NodeJS server.
In my NodeJS I'm using amqplib. Which is allow me to send tasks to RabbitMQ:
amqp.connect('amqp://localhost', function(err, conn) {
conn.createChannel(function(err, ch) {
var q = 'celery';
ch.assertQueue(q, {durable: true});
ch.sendToQueue(q, new Buffer('What should I write here?'));
});
});
My question is what format celery use? What should I write to Buffer to call celery worker?
For example, in my CELERY_ROUTES (django settings) I have blabla.tasks.add:
CELERY_ROUTES = {
...
'blabla.tasks.add': 'high-priority',
}
how to call this blabla.tasks.add function?
I've tried many ways but celery worker giving me error: Received and deleted unknown message. Wrong destination?!?
I found solution http://docs.celeryproject.org/en/latest/internals/protocol.html here.
Example message format is:
{
"id": "4cc7438e-afd4-4f8f-a2f3-f46567e7ca77",
"task": "celery.task.PingTask",
"args": [],
"kwargs": {},
"retries": 0,
"eta": "2009-11-17T12:30:56.527191"
}
So code should be:
amqp.connect('amqp://localhost', function(err, conn) {
conn.createChannel(function(err, ch) {
var q = 'celery';
ch.assertQueue(q, {durable: true});
ch.sendToQueue(q, new Buffer('{"id": "this-is-soo-unique-id", "task": "blabla.tasks.add", "args": [1, 2], "kwargs": {}, "retries": 0}'), {
contentType: 'application/json',
contentEncoding: 'utf-8',
});
});
});
Related
This is the first time i post a question here, sorry if some data is missing.
I'm trying to do some web scraping to get some info of a table.
The page only responds with an index.php and when i use the search form, it makes a POST to index.php?go=le with some formData.
To avoid the CORS problem, im making the post with my own API running in localhost. I'm pointing my frontend to my API and i get the response from localhost.
No problem there.
My problem appears when i try to make a second request to my API. The first GET works fine but after that response it keeps failing.
When i restart the server, it works again but only one time.
Here is my API code. I use nodemon server.js to start my server.
server.js
const express = require("express");
const axios = require("axios");
const scrape = require("scrape-it");
const FormData = require("form-data")
const cors = require("cors")
const app = express();
const PORT = process.env.PORT || 5000;
app.use(cors())
const config = {
headers: {
'Content-type': 'multipart/form-data'
},
}
app.get("/get-projects", async (req,res) => {
const testJSON = await axios.post(baseURL +"/index.php?go=le",formData,config)
.then(res => {
console.log("Post successfull...");
return res
}
)
.catch(err => {
console.log("Server error");
return err
}
);
if(testJSON && testJSON.data){
res.send({status: 200, data: testJSON.data});
}else{
res.status(508).send({status: 508, msg: "Unhandled Server Error", failedResponse: testJSON || "empty"})
}
})
app.listen(PORT,()=>console.log(`App running in port: ${PORT}`))
And in my front-end i only have a button with an event that makes a get to my API (http://localhost:5000)
This is my fetch.js that is included by a script tag. Nothing fancy there.
fetch.js
const btn = document.getElementById("btn-fetch-proyects")
const axios = window.axios
const fetchProjects = async () => {
console.log("Fetching...")
axios.get("http://localhost:5000/get-projects")
.then(res=>
console.log("The server responded with the following data: ",res.data)
)
.catch(err => console.log("Failed with error: ",err)
)
return null
}
btn.addEventListener("click",fetchProjects);
In the console where im running the server, i get Server error with this err object:
{
"message": "socket hang up",
"name": "Error",
"stack": "Error: socket hang up\n at connResetException (internal/errors.js:607:14)\n at Socket.socketOnEnd (_http_client.js:493:23)\n at Socket.emit (events.js:327:22)\n at endReadableNT (internal/streams/readable.js:1327:12)\n at processTicksAndRejections (internal/process/task_queues.js:80:21)",
"config": {
"url": "http://186.153.176.242:8095/index.php?go=le",
"method": "post",
"data": {
"_overheadLength": 1216,
"_valueLength": 3,
"_valuesToMeasure": [],
"writable": false,
"readable": true,
"dataSize": 0,
"maxDataSize": 2097152,
"pauseStreams": true,
"_released": true,
"_streams": [],
"_currentStream": null,
"_insideLoop": false,
"_pendingNext": false,
"_boundary": "--------------------------935763531826714388665103",
"_events": {
"error": [
null,
null
]
},
"_eventsCount": 1
},
"headers": {
"Accept": "application/json, text/plain, */*",
"Content-Type": "multipart/form-data",
"User-Agent": "axios/0.21.1"
},
"transformRequest": [
null
],
"transformResponse": [
null
],
"timeout": 0,
"xsrfCookieName": "XSRF-TOKEN",
"xsrfHeaderName": "X-XSRF-TOKEN",
"maxContentLength": -1,
"maxBodyLength": -1
},
"code": "ECONNRESET"
}
I hope someone has a clue about what's happening. I tried all day and i couldn't solve it.
I tried posting to other sites, and it works fine. I thing the problem is with the form POST.
Thanks for reading!!!
At a first glance I see an error in your front-end code. You are using async on the function but then you do not await but you use .then, try not mixing up styles, either you use async/await or .then .catch.
Check if that helps! :)
Obviously the socket is hanging!
Use node unirest and it closes the data stream.
var unirest = require('unirest');
var req = unirest('POST', 'localhost:3200/store/artifact/metamodel')
.attach('file', '/home/arsene/DB.ecore')
.field('description', 'We are trying to save the metamodel')
.field('project', '6256d72a81c4b80ccfc1768b')
.end(function (res) {
if (res.error) throw new Error(res.error);
console.log(res.raw_body);
});
Hope this helps!
I am using serverless framework for lambda deployment and I am facing this really weird problem. I use Sequelize to connect to RDS Aurora MySql DB. The deployment is successful, but when I invoke the APIs I see SequelizeConnectionError: Connect ETIMEDOUT. The API works fine when I run offline. But the APIs deployed don't work. They start working as soon as I make any small change on the console and save it, like changing time out from 30 to 31. But when I redeploy I face the same problem and I just can't figure out what the problem is.
Error:
SequelizeConnectionError: connect ETIMEDOUT
Edits:
Yes. This is Aurora serverless with Data API enabled. The lambda function runs in the same VPC as the DB is in. The security group and subnets are are also same. Here is my DB config snippet:
DB_PORT: 3306
DB_POOL_MAX: 10
DB_POOL_MIN: 2
DB_POOL_ACQUIRE: 30000
DB_POOL_IDLE: 10000
This is my db.js:
const sequelize = new Sequelize(process.env.DB_NAME, process.env.DB_USER, process.env.DB_PASSWORD, {
host: process.env.DB_HOST,
port: process.env.DB_PORT,
dialect: 'mysql',
pool: {
max: process.env.DB_POOL_MAX,
min: process.env.DB_POOL_MIN,
acquire: process.env.DB_POOL_ACQUIRE,
idle: process.env.DB_POOL_IDLE
}
});
My handler file is really long with over 33 APIs. Below is one of them:
context.callbackWaitsForEmptyEventLoop = false;
if (event.source === 'serverless-plugin-warmup') {
try {
const { Datalogger } = await connectToDatabase();
} catch (err) { }
return {
statusCode: 200,
headers: { 'Access-Control-Allow-Origin': '*' },
body: 'warm-up'
}
}
try {
const { Datalogger } = await connectToDatabase();
var datalogger;
if (!event.headers.dsn && !event.headers.sftp)
throw new HTTPError(404, `Datalogger serial number and SFTP root directory is mandatory`);
datalogger = await Datalogger.findOne({
where: {
dsn: event.headers.dsn,
sftpRootDir: event.headers.sftp,
}
})
if (!datalogger)
throw new HTTPError(404, `Datalogger with this input was not found`);
console.log('datalogger', datalogger);
await datalogger.destroy();
return {
statusCode: 200,
headers: { 'Access-Control-Allow-Origin': '*' },
body: JSON.stringify(datalogger)
}
} catch (err) {
console.log('Error in destroy: ', JSON.stringify(err));
return {
statusCode: err.statusCode || 500,
headers: { 'Content-Type': 'text/plain', 'Access-Control-Allow-Origin': '*' },
body: err.message || 'Could not destroy the datalogger.'
}
}
I reached out to AWS support for this issue and they suspect the error has occurred due to node js version upgrade. I was previously using 8 and it was working. After upgrading to 10 its resulting in intermittent timeouts. Few calls are successful and then one failure. Now even if I go back to version 8, its same issue.
How can we configure connection pooling for elasticsearch in node js? For handling instance failures and detecting dead nodes.
How can I customize Transport, ConnectionPool, Connections classes of elasticsearch in nodejs.
This feature is supported with the new RC1 client
Here an example:
'use strict'
// docker run -p 9200:9200 -p 9300:9300 --rm -e "discovery.type=single-node" docker.elastic.co/elasticsearch/elasticsearch:6.7.0
const { Client } = require('#elastic/elasticsearch')
const client = new Client({
nodes: ['http://127.0.0.1:9200/'],
requestTimeout: 2000,
sniffInterval: 500,
sniffOnStart: true,
sniffOnConnectionFault: true
})
client.on('sniff', (err, req) => {
console.log('snif', err ? err.message : '', `${JSON.stringify(req.meta.sniff)}`)
})
setInterval(async () => {
try {
const info = await client.info()
console.log(info.body.name)
} catch (err) {
console.log(err.message);
}
}, 1500)
Note install the v6:
"#elastic/elasticsearch": "6.7.0-rc.1",
Let's say I have a list of hostnames and ports, not all of which are HTTP related.
var config = {
"checks": [
{
"name": "NPM",
"hostname": "npmjs.com",
"port": 80
},
{
"name": "AWS",
"hostname": "aws.amazon.com",
"port": 443
},
{
"name": "RabbitMQ",
"hostname": "merry-butterfly.rmq.cloudamqp.com",
"port": 5671
}
]
}
What is an effective way to test that each of these services can be reached? My first thoughts were to use a typical telnet-like approach, I did this:
var telnet = require('telnet-client')
config.checks.forEach(function(check) {
var connection = new telnet()
var params = {
host: check.hostname,
port: check.port,
negotiationMandatory: false
}
connection.on('connect', function() {
connection.send('GET /', {
ors: '\r\n',
waitfor: '\n'
}, function(err, data) {
console.log(err, data);
connection.end()
})
})
connection.connect(params)
results.push(result);
});
The above seems to work, but I'm actually not sure what data to send to each individual services to get back a response that suggests the service is "reachable", I don't need to auth or do any operations with the service, just check that it can be reached. Additionally it raises an exception if the DNS is unreachable:
Error: getaddrinfo ENOTFOUND merry-butterfly.rmq.cloudamqp.com/api merry-butterfly.rmq.cloudamqp.com/api:443
What would be the appropriate way to handle the errors, and async test each of the entries in the list of "checks"?
Ping the host.
https://www.npmjs.com/package/ping
For example:
var ping = require('ping');
var hosts = ['192.168.1.1', 'google.com', 'yahoo.com'];
hosts.forEach(function(host){
ping.sys.probe(host, function(isAlive){
var msg = isAlive ? 'host ' + host + ' is alive' : 'host ' + host + ' is dead';
console.log(msg);
});
});
I succeed to generate all my models from my database. Then, I run the api by executing, 'node .'
I'm able to see all my web services but when I try to try out a service, there is an 500 error saying to me that There is no ACL table.
So, I open model-config.json and I saw that there were 4 models I didn't created before (User, AccessToken, ACL, RoleMapping and Role).
I would like to know if all these models has to exist in my database. And Do you know which properties I have to put in each table?
Thanks you in advance.
Error:
{
"error": {
"name": "Error",
"status": 500,
"message": "ER_NO_SUCH_TABLE: Table 'sed.ACL' doesn't exist",
"code": "ER_NO_SUCH_TABLE",
"errno": 1146,
"sqlState": "42S02",
"index": 0,
"stack": "Error: ER_NO_SUCH_TABLE: Table 'sed.ACL' doesn't exist\n [...]"
}
}
You will need to automigrate these tables yourself. See:
https://groups.google.com/forum/#!searchin/loopbackjs/automigrate$20default$20tables/loopbackjs/IiapgVVf-NQ/32yeCnNxBmIJ
https://github.com/strongloop/loopback/issues/591.
You need to migrate them yourself
Follow the basic procedure in Attaching models to data sources to change from the in-memory data source to the database you want to use.
Create server/create-lb-tables.js file with the following:
var server = require('./server');
var ds = server.dataSources.db;
var lbTables = ['User', 'AccessToken', 'ACL', 'RoleMapping', 'Role'];
ds.automigrate(lbTables, function(er) {
if (er) throw er;
console.log('Loopback tables [' - lbTables - '] created in ', ds.adapter.name);
ds.disconnect();
});
Run the script manually:
$ cd server
$ node create-lb-tables.js
For further details see loopback Documentation
var server = require('./server');
var ds = server.dataSources.db;
var lbTables = ['User', 'AccessToken', 'ACL', 'RoleMapping', 'Role'];
ds.automigrate(lbTables, function(er) {
if (er) throw er;
console.log('Loopback tables [' - lbTables - '] created in ', ds.adapter.name);
ds.disconnect();
});
Here
var ds = server.dataSources.db;
db would be your database name, which you are using in datasource.json .
database connection string:
{
"db": {
"name": "db",
"connector": "memory"
},
"inventory": {
"host": "localhost",
"port": 3306,
"url": "",
"database": "inventory",
"password": "root",
"name": "inventory",
"user": "root",
"connector": "mysql"
}
}
Here is my db name:
"database": "inventory"
Upadated code is
var server = require('./server');
var ds = server.dataSources.inventory;
var lbTables = ['User', 'AccessToken', 'ACL', 'RoleMapping', 'Role'];
ds.automigrate(lbTables, function(er) {
if (er) throw er;
console.log('Loopback tables [' - lbTables - '] created in ', ds.adapter.name);
ds.disconnect();
});
shoot this code with independent saved file in directory with node filename.js
And it works.