I have created a GCP compute engine instance with a static external ip address. Machine type: n1-standard-2 (2 vCPUs, 7.5 GB memory). OS is Linux/Debian.
My intention is to create a plain Node.js TCP server on the machine. The code is as follows:
var net = require('net');
var HOST = '0.0.0.0';
var PORT = 110;
net.createServer(function(sock) {
console.log('CONNECTED: ' + sock.remoteAddress +':'+ sock.remotePort);
sock.on('data', function(data) {
console.log('DATA ' + sock.remoteAddress + ': ' + data);
sock.write('You said "' + data + '"');
});
}).listen(PORT, HOST);
console.log('Server listening on ' + HOST +':'+ PORT);
The client is:
var net = require('net');
var HOST = '104.197.23.132';
var PORT = 110;
var client = new net.Socket();
client.connect(PORT, HOST, function() {
console.log('CONNECTED TO: ' + HOST + ':' + PORT);
client.write('I am Chuck Norris!');
});
client.on('data', function(data) {
console.log('DATA: ' + data);
client.destroy();
});
client.on('close', function() {
console.log('Connection closed');
});
My firewall rules are as follows:
PLEASE NOTE: I am listening on port 110, and the client is trying to connect to the static external ip address. Itt appears that I am enabling TCP traffic over 110 according to firewall rules. The error I see is
Error: connect ETIMEDOUT 104.197.23.132:110
When I ssh into the instance, and run tcp client, I connect successfully. So the final question is, why can't local tcp client (my computer) connect to compute instance? Is there something wrong with my firewall rules / source filters / IP forwarding?
I just solved this problem.
You have the wrong targets. Go to the edit page and click the select menu of "Targets", and then you can select the first option "Apply to all instance" that is the easiest way.
You need to first add firewall rule according to your host's IP, as internal traffic needs to be received from that particular host (your machine)
Then you should be able to ping to GCP Compute Instance.
You should also be able to telnet at the particular port which you configured in your program.
This should be okay.
Also - the customized rule should be added in the Network Tags of instance, to make the rule associated to that instance, after this the instance uses that particular rule.
Related
I am pushing neo4j 2.3.3 application to cloudfoundry.
I have neo4j server and neo4j running cloud, which also runs in my local.
As of the data set is inside the neo4j server.
Neo4j browser is node js application, which start on command "grunt server"
but the browser starts up at http://localhost:9000.
How to make the nodejs application (neo4j browser) listen to 9000. I know there process_env. But how to implement it here.
Neo4j browser has a js file(connect.js) which loads the http protocol , host and port.
I need some guidance, in making modifications here.
I have previously read VCAP_SERVICES into a java code.
1. how to add port 9000 to cloud foundry.
2. how to read port env variable from cloud foundry to jd file.
or
Is there any other way around. ?
This is a node js application.
In local : the host name is 127.0.0.1
port is 9000.
server is running
Neo4j browser is a node js application started using "grunt server"
There are no error in the log.
But I how launch the browser. I get 502 bad gateway error.
Local host :127.0.0.1 then what is the host address for cloud foundry.
I did try 0.0.0.0
https://docs.cloudfoundry.org/buildpacks/node/node-tips.html
--------------
var vcapport = process.env.VCAP_APP_PORT || 3000;
var vcaphost = process.env.VCAP_APP_HOST || '0.0.0.0';
server
.listen(vcapport, vcaphost)
.on('listening', function() {
var address = server.address();
//var hostname = options.hostname || '0.0.0.0';
var hostname = vcaphost;
var targetHostname = 'browser.cfappstpanpz2.ebiz.verizon.com';
var target = options.protocol + "://" + vcaphost +":" + vcapport;
//var target = 'http://browser.cfapps.io/';
grunt.log.writeln('Started connect web server on ' + target);
grunt.config.set('connect.' + taskTarget + '.options.hostname', hostname);
grunt.config.set('connect.' + taskTarget + '.options.port', address.port);
grunt.log.writeln('server.address ' + address);
grunt.log.writeln('address.port ' + address.port);
grunt.log.writeln('hostname ' + hostname);
grunt.log.writeln('vcapport ' + vcapport);
grunt.log.writeln('vcaphost ' + vcaphos);
grunt.event.emit('connect.' + taskTarget + '.listening', hostname, address.port);
I am having a simple server program which just reads and sends reply back to the client. Am connecting 1000 clients pumping 4096 bytes of data concurrently and continuously. But my server is getting crashed showing out of memory error. My analysis is that messages are getting accumulated in the buffer.
My doubt is when i have two different ports in the same server receiving from 500 clients each may decrease accumulation of message in the buffer?
Please help me on this.
My node js code for server
var net = require('net');
var readline=require('readline');
var HOST = '10.44.75.21';
var PORT = 6969;
net.createServer(function(sock) {
var i = readline.createInterface(sock, sock);
console.log('CONNECTED: ' + sock.remoteAddress +':'+ sock.remotePort);
// Add a 'data' event handler to this instance of socket
i.on('line', function(data) {
//console.log('Application Name : ' + data);
// Write the data back to the socket, the client will receive it as data from the server
sock.write('Connected to "' + data + '\n"');
});
// Add a 'close' event handler to this instance of socket
sock.on('close', function(data) {
console.log('CLOSED: ' + sock.remoteAddress +' '+ sock.remotePort);
});
}).listen(PORT, HOST);
console.log('Server listening on ' + HOST +':'+ PORT);
That's highly unlikely to make a difference.
JavaScript is single-threaded. Whether you have 1 listen port, or N listen port is irrelevant. Requests will be handled one after the other, and the incoming port shouldn't make any impact on memory usage.
A better solution is to put a load-balancing reverse proxy in front of multiple instances of the same server logic, where each instance is listening to a different port.
With enough load, this will fail as well, but it will buy you some time.
Combined with an auto-recovery scheme this will guarantee your server is always ready to handle requests.
After prototyping my project using PHP and Unity3D i've decided on building the production version using Cordova and NodeJS.
I'm currently using Socket.io with NodeJS and having some confusion with connections. The way that I had expected this to work out was the following procedure:
The client would connect to the server with a request
The server would respond to the request
The connection would be closed
However, it seems that the connection likes to stay open, and if the connection is closed, it continuously attempts to reconnect which is not what I am looking for. I'm attempting to establish a single state of data transfer, similar to what happens when you make a web-request to a PHP file.
The source code of the project is pretty much boilerplate code:
var application = require('express')();
var http = require('http').Server(application);
var server = require('socket.io')(http);
http.listen(8080, function() {
console.log('Listening on *:8080');
});
server.on('connection', function(socket) {
console.log('SERVER: A new connection has been received.');
server.on('disconnect', function() {
console.log('SERVER: A connection has been closed.');
});
});
I do not need a persistent connection, nor do I want one.
Thoughts: I could send a close handshake from the client. For example:
Send some data to the server
Recieve some data from the server
Send a close request to the server / just close the socket
Continue application logic once the socket is closed
Would this be the proper way to handle this? However then the question arises, what if the data gets lost, then there's a permanently open socket. Would implementing a basic timeout be ideal in this situation? (IE: If a response isn't received within 10 seconds, there was an error or the server was not available).
Then Socket.io is the wrong tool for your scenario. socket.io needs to keep the socket open to get events from the server back to the client (and vice-versa). As a matter of fact, even of the server does not support WebSockets, socket.io will resort back to other mechanisms, such as polling.
Not sure why you're using socket.io for this. Socket IO is used for different purpose and doesn't fir your criteria here. I have seen mainly its uses in real time application and binary streaming.You can try TCP socket in node.js
var net = require('net');
var HOST = '127.0.0.1';
var PORT = 6969;
// Create a server instance, and chain the listen function to it
// The function passed to net.createServer() becomes the event handler for the 'connection' event
// The sock object the callback function receives UNIQUE for each connection
net.createServer(function(sock) {
// We have a connection - a socket object is assigned to the connection automatically
console.log('CONNECTED: ' + sock.remoteAddress +':'+ sock.remotePort);
// Add a 'data' event handler to this instance of socket
sock.on('data', function(data) {
console.log('DATA ' + sock.remoteAddress + ': ' + data);
// Write the data back to the socket, the client will receive it as data from the server
sock.write('You said "' + data + '"');
});
// Add a 'close' event handler to this instance of socket
sock.on('close', function(data) {
console.log('CLOSED: ' + sock.remoteAddress +' '+ sock.remotePort);
});
}).listen(PORT, HOST);
console.log('Server listening on ' + HOST +':'+ PORT);
Check out here
I setup two PC,
one is client in local network behide NAT,
another is server on public network.
The test steps are --
1) client listen udp on port 33333
2) server listen udp on port 22222
1) client send udp to server
2) server received the data and send back
When I test the code on my test network, it's OK.
If put the server on real internet, server can get the message from client,
client can not get response from server.
What's wrong?
Here's testing code with nodejs.
server
var dgram = require('dgram');
var socket = dgram.createSocket('udp4');
socket.on('message', function (message, remote) {
console.log('client ip:' + remote.address + ', port:' + remote.port +', message:' + message);
//send response to client
var message = new Buffer("hello, client!");
socket.send(message, 0, message.length, remote.port, remote.address);
});
//listening port
socket.bind(22222);
client
var dgram = require('dgram');
var socket = dgram.createSocket('udp4');
socket.on('message', function (message, remote) {
//display message from server
console.log('server ip:' + remote.address + ', port:' + remote.port +', message:' + message);
});
//listening port
socket.bind(33333);
//send message to server
function send(server){
var message = new Buffer("hello, server!");
socket.send(message, 0, message.length, 22222, server, function(){
//send again after 1 seconds
setTimeout(function(){
send(server);
}, 1000);
});
};
//suppose that server address is public.server.com
send("public.server.com");
NATed computers cannot be reached from outside and this is particularly painful for peer-to-peer or friend-to-friend software. Basically because your PC has not a public IP address but you NAT device has. So, the NAT is visible, your PC isn't.
The server gets the package from the NAT device and send the response to it. Yes, the NAT receives the response and it has to relay it to your PC, that's the trick. To do so you have to configure a port forwarding in the NAT.
The NAT has a table like the following:
+----------+---------------------+---------------+
| NAT PORT | INTERNAL IP ADDRESS | INTERNAL PORT |
+----------+---------------------+---------------+
| 33333 | 198.162.0.3 (pc ip) | 33333 |
It can be read as: when NAT receives a package in its port #33333 it has to redirected to the internal IP 198.162.0.3 (your PC IP address) and port# 33333.
If your PC uses a fixed IP, you can do this mapping by hand in your NAT. However, if you use a DHCP server, your PC's IP can change after each reboot so you need to do this mapping by software in you project. Most of the NATs support Universal Plug & Play, Port Mapping Protocol or Port Control Protocol to achieve this mapping and you can do it with nodejs given that all you need are the appropiated HTTP request to the NAT.
Yes, you can do it by yourself but it is not so easy. In fact, the discovery process requires you broadcast udp messages in the LAN in specific port. I strongly recommend you to use a third-party component to do it.
I hope this helps you.
I'm a nooby mobile developer trying to take advantage of cloudfoundry's service to run my server to handle some chats and character movements.
I'm using Noobhub to achieve this (TCP connection between server and client using Node.js and Corona SDK's TCP connection API)
So basically I'm trying a non-http TCP connection between Cloudfoundry(Node.js) and my machine(lua).
Link to Noobhub(There is a github repo with server AND client side implementation.
I am doing
Client
...
socket.connect("myappname.cloudfoundry.com", 45234)
...
(45234 is from server's process.env.VCAP_APP_PORT value I retrieved from console output I got through "vmc logs myappname" after running the application.)
Server
...
server.listen(process.env.VCAP_APP_PORT)
When I try to connect, it just times out.
On my local machine, doing
Client
...
socket.connect("localhost",8989)
Server
...
server.listen(8989)
works as expected. It's just on cloudfoundry that it doesn't work.
I tried a bunch of other ways of doing this such as setting the client's port connection to 80 and a bunch of others. I saw a few resources but none of them solved it.
I usually blow at asking questions so if you need more information, please ask me!
P.S.
Before you throw this link at me with an angry face D:< , here's a question that shows a similar problem that another person posted.
cannot connect to TCP server on CloudFoundry (localhost node.js works fine)
From here, I can see that this guy was trying to do a similar thing I was doing.
Does the selected answer mean that I MUST use host header (i.e. use http protocol) to connect? Does that also mean cloudfoundry will not support a "TRUE" TCP socket much like heroku or app fog?
Actually, process.env.VCAP_APP_PORT environment variable provides you the port, to which your HTTP traffic is redirected by the Cloud Foundry L7 router (nginx) based on the your apps route (e.g. nodejsapp.vcap.me:80 is redirected to the process.env.VCAP_APP_PORT port on the virtual machine), so you definitely should not use it for the TCP connection. This port should be used to listen HTTP traffic. That is why you example do work locally and do not work on Cloud Foundry.
The approach that worked for me is to listen to the port provided by CF with an HTTP server and then attach Websocket server (websocket.io in my example below) to it. I've created sample echo server that works both locally and in the CF. The content of my Node.js file named example.js is
var host = process.env.VCAP_APP_HOST || "localhost";
var port = process.env.VCAP_APP_PORT || 1245;
var webServerApp = require("http").createServer(webServerHandler);
var websocket = require("websocket.io");
var http = webServerApp.listen(port, host);
var webSocketServer = websocket.attach(http);
function webServerHandler (req, res) {
res.writeHead(200);
res.end("Node.js websockets.");
}
console.log("Web server running at " + host + ":" + port);
//Web Socket part
webSocketServer.on("connection", function (socket) {
console.log("Connection established.");
socket.send("Hi from webSocketServer on connect");
socket.on("message", function (message) {
console.log("Message to echo: " + message);
//Echo back
socket.send(message);
});
socket.on("error", function(error){
console.log("Error: " + error);
});
socket.on("close", function () { console.log("Connection closed."); });
});
The dependency lib websocket.io could be installed running npm install websocket.io command in the same directory. Also there is a manifest.yml file which describes CF deploy arguments:
---
applications:
- name: websocket
command: node example.js
memory: 128M
instances: 1
host: websocket
domain: vcap.me
path: .
So, running cf push from this directory deployed app to my local CFv2 instance (set up with the help of cf_nise_installer)
To test this echo websocket server, I used simple index.html file, which connects to server and sends messages (everything is logged into the console):
<!DOCTYPE html>
<head>
<script>
var socket = null;
var pingData = 1;
var prefix = "ws://";
function connect(){
socket = new WebSocket(prefix + document.getElementById("websocket_url").value);
socket.onopen = function() {
console.log("Connection established");
};
socket.onclose = function(event) {
if (event.wasClean) {
console.log("Connection closed clean");
} else {
console.log("Connection aborted (e.g. server process killed)");
}
console.log("Code: " + event.code + " reason: " + event.reason);
};
socket.onmessage = function(event) {
console.log("Data received: " + event.data);
};
socket.onerror = function(error) {
console.log("Error: " + error.message);
};
}
function ping(){
if( !socket || (socket.readyState != WebSocket.OPEN)){
console.log("Websocket connection not establihed");
return;
}
socket.send(pingData++);
}
</script>
</head>
<body>
ws://<input id="websocket_url">
<button onclick="connect()">connect</button>
<button onclick="ping()">ping</button>
</body>
</html>
Only thing to do left is to enter server address into the textbox of the Index page (websocket.vcap.me in my case), press Connect button and we have working Websocket connection over TCP which could be tested by sending Ping and receiving echo. That worked well in Chrome, however there were some issues with IE 10 and Firefox.
What about "TRUE" TCP socket, there is no exact info: according to the last paragraph here you cannot use any port except 80 and 443 (HTTP and HTTPS) to communicate with your app from outside of Cloud Foundry, which makes me think TCP socket cannot be implemented. However, according to this answer, you can actually use any other port... It seems that some deep investigation on this question is required...
"Cloud Foundry uses an L7 router (ngnix) between clients and apps. The router needs to parse HTTP before it can route requests to apps. This approach does not work for non-HTTP protocols like WebSockets. Folks running node.js are going to run into this issue but there are no easy fixes in the current architecture of Cloud Foundry."
- http://www.subbu.org/blog/2012/03/my-gripes-with-cloud-foundry
I decided to go with pubnub for all my messaging needs.