Charles Proxy Not forwarding Requests - linux

I have installed redsocks to create a transparent proxy for a binary file in my linux machine that sends requests to a server , the redsocks.conf file conatins the following:
base {
log_debug = on;
log_info = on;
log = "syslog:daemon";
daemon = on;
user = redsocks;
group = redsocks;
redirector = iptables;
}
redsocks {
local_ip = 127.0.0.1;
local_port = 9050;
ip = 127.0.0.1;
port = 8888;
type = socks5;
}
and added the following config in the /etc/iptables/rules.v4:
*filter
:INPUT ACCEPT [8:26556]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [8:26556]
COMMIT
*nat
:OUTPUT ACCEPT [0:0]
-A OUTPUT -p tcp -d 192.168.0.115 -m multiport --dports 80,443 -m owner ! --uid-owner redsocks -j REDIRECT --to-ports 9050
COMMIT
when i start charles proxy the requests are received ,but no response is captured from the server as follows or in other words request is not forwarded :
Click to view Image
when i turn every thing off and back to normal everything works fine , what could be wrong ?
Regards

Related

Node.JS proxy for FAST protocol

I'm trying to make a Node.JS proxy for FAST protocol over UDP multicast (PPTP connection).
const os = require('os');
const osInterfaceName = 'VPN-подключение';
const endpoints = {
'FUT-TRADES': {
A: {ip: '239.195.9.9', port: 42009},
B: {ip: '239.195.137.9', port: 43009},
},
'OPT-TRADES': {
A: {ip: '239.195.9.25', port: 42025},
B: {ip: '239.195.137.25', port: 43025},
}
};
const dgram = require('dgram');
const osInterface = os.networkInterfaces()[osInterfaceName] !== undefined ? os.networkInterfaces()[osInterfaceName][0] : { address: '0.0.0.0' };
const clients = {};
for (let key in endpoints) {
for (let serverId in endpoints[key]) {
const client = dgram.createSocket('udp4');
client.on('listening', function () {
const address = client.address();
console.log(`UDP Client listening on ${address.address}: ${address.port} [${serverId}]`);
client.setBroadcast(true)
client.setMulticastTTL(128);
client.addMembership(endpoints[key][serverId].ip, osInterface.address);
});
client.on('message', function (message, remote) {
console.log('Message from: ' + remote.address + ':' + remote.port +' - ' + message);
});
client.on('error', function (error) {
console.log(`UDP error: ${error.stack}`);
});
client.bind(endpoints[key][serverId].port, osInterface.address);
clients[`${key}:${serverId}`] = client;
}
}
If i test it locally (starting server and send a multicast message - it's shown on client), but i can't use it with MOEX stream. In logs nothing except of "UDP Client listening on 1.1.5.171:42009 [A]..." (for every stream in endpoints list).
But according to netsh interface ip show joins client has successfully joined multicast groups.
Looks like i have found the source of problem, so does alternate resolution.
It's not enough just to join multicast group, it's also required to enable source filtering of packets:
install and start smcroute daemon:
apt-get install smcroute
smcroute -d
join multicast group with enabled filtering (source IP required)
smcroute -j ppp0 91.203.253.235 239.195.9.9
then application starts to get multicast packets:
tcpdump -i ppp0 -s 65534 host 239.195.9.9
Additional info: while i was searching for answer i've found UDP to TCP proxy tool: https://github.com/MarkoPaul0/DatagramTunneler (which solves multicast join params shortage, as i couldn't find multicast join param for source ip filter in Node.JS)

Node: PORT 443 requires elevated privileges error

I'm using PM2 to start the application and I pass PORT=443 as a parameter while starting the app. However, it returns with an error saying "PORT 443 requires elevated privileges". Though I have generated the certificate and key using openssl and referenced in the code. Appreciate your support
#!/usr/bin/env node
var app = require('../app');
var debug = require('debug')('ls-templates-server:server');
var https = require('https');
var fs = require('fs');
/**
* Get port from environment and store in Express.
*/
var port = normalizePort(process.env.PORT || '3000');
app.set('port', port);
var options = {
key: fs.readFileSync('/home/admin/cert/server.key'),
cert: fs.readFileSync('/home/admin/cert/server.cert')
};
var httpsServer = https.createServer(options, app);
/* for https (production stage) */
httpsServer.listen(port, "0.0.0.0");
httpsServer.on('error', onError);
httpsServer.on('listening', onListening);
/**
* Normalize a port into a number, string, or false.
*/
function normalizePort(val) {
var port = parseInt(val, 10);
if (isNaN(port)) {
// named pipe
return val;
}
if (port >= 0) {
// port number
return port;
}
return false;
}
/**
* Event listener for HTTP server "listening" event.
*/
function onListening() {
var addr = httpsServer.address();
var bind = typeof addr === 'string'
? 'pipe ' + addr
: 'port ' + addr.port;
debug('Listening on ' + bind);
}
Option 1 .
Run PM2 as sudo
If it doesn't work, achieve it using authbind
sudo apt-get install authbind
sudo touch /etc/authbind/byport/443
sudo chown %user% /etc/authbind/byport/443
sudo chmod 755 /etc/authbind/byport/443
Edit ~/.bashrc file and add
+alias pm2='authbind --deep pm2'
at the end of the file, and run
source ~/.bashrc
Finally ensure that pm2 is updated with authbind:
authbind --deep pm2 update
Option 2
Use a different PORT and use Nginx to reverse proxy your application
Eg : change your PORT to 3000
In Nginx , create a server block which forwards the request to your application.
Server Block Eg :
server {
#listen [::]:80;
server_name your-domain.com
#root /var/www/example.com;
#index index.html;
client_max_body_size 20M;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://localhost:3000;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_connect_timeout 500000;
proxy_send_timeout 500000;
proxy_read_timeout 500000;
send_timeout 500000;
}
listen 443 ssl;
ssl_certificate /home/admin/cert/server.cert;
ssl_certificate_key /home/admin/cert/server.key;
if ($scheme = http) {
return 301 https://$server_name$request_uri;
}
}
It's best to use Nginx / Authbind method, It's good to run as little as possible as a privileged user, as you want to restrict the potential damage in case someone exploits your program. You don't want to run your Node code as root unless you absolutely have to.
refer
https://pm2.keymetrics.io/docs/usage/specifics/#listening-on-port-80-w-o-root
https://www.digitalocean.com/community/tutorials/how-to-use-pm2-to-setup-a-node-js-production-environment-on-an-ubuntu-vps

Varnish Vcl for multiple incoming and outgoing port connections

Since a little while I am using Varnish Cache solutions and as long as the configuration was more or less the same as it was after the installation everything worked out very well.
But now I like to do a little bit more with Varnish. Currently I am using the following setup for my servers :
Visitors -> CloudFlare -> HaProxy -> Varnish (separate servers) -> Apache2 Content.
What I like to know is how am I able to make the right vcl script to accept let's say an incoming request from ip A on port B and redirect this to ip C on port D. (And this more than once.)
Example :
Default.Varnish works well like this:
DAEMON_OPTS="-a :8085,:8087 \
-T localhost:6082 \
-f /etc/varnish/default.vcl \
-S /etc/varnish/secret \
-s malloc,768m"
But now about the Varnish.Default :
backend default_1 { .host = "11.22.333.444"; .port = "8885"; }
backend default_2 { .host = "11.22.333.444"; .port = "8887"; }
And I tried something like this :
sub vcl_recv {
if (server.port == 8885) { set req.backend = default_1; }
if (server.port == 8887) { set req.backend = default_2; }
}
(Please be aware of the fact that both requests are going to the
same outgoing server. Only the port is different!)
Someone who knows enough from Varnish already knows what I want I guess. I just like to use Varnish to proxy separate 'channels' based on different ports.
Try to check Varnish bind port, not backend port
sub vcl_recv {
if (server.port == 8085) { set req.backend = default_1; }
if (server.port == 8087) { set req.backend = default_2; }
}

Hanging file descriptors in nodejs with simple web server

I have had a very interesting problem that I cannot seem to solve. It actually may not be a problem at all but rather something built into node.js. I am having an issue with file descriptors staying around longer than expected after a response has been sent. They seem to persist in the ESTABLISHED state for up to 2 minutes after any data has been sent. This is causing issues with our production servers. Even though we are serving static files, the number of file descriptors that can be open at one time can be very high. They frequently hit our system limit (linux) and cause EMFILE issues. I realize we could modify our hard ulimit above 1024, but this seems like a hacky fix to a different issue. It would seem to me as soon as the socket is closed, the file descriptor should be released.
I have node.js version: 0.10.24
I have been able to replicate this issue with a small amount of code.
http server:
var http = require("http"),
url = require("url"),
path = require("path"),
fs = require("fs")
port = process.argv[2] || 8888;
http.createServer(function(request, response) {
var uri = url.parse(request.url).pathname
, filename = path.join(process.cwd(), uri);
var contentTypesByExtension = {
'.html': "text/html",
'.css': "text/css",
'.js': "text/javascript"
};
path.exists(filename, function(exists) {
fs.readFile(filename, "binary", function(err, file) {
if(err) {
response.writeHead(500, {"Content-Type": "text/plain"});
response.write(err + "\n");
response.end();
return;
}
var headers = {};
var contentType = contentTypesByExtension[path.extname(filename)];
if (contentType) headers["Content-Type"] = contentType;
response.writeHead(200, headers);
response.write(file, "binary");
response.end();
});
});
}).listen(parseInt(port, 10));
console.log("Static file server running...");
```
Make a request to the server. I made a simple request for a static js file. Monitor the file descriptors before and after making the request.
$ lsof -i -n -P | grep node
My results:
After starting server and before request:
node 16787 ncswenson 11u IPv4 0x884c32306a467c65 0t0 TCP *:8888 (LISTEN)
After request completes (up to 2 minutes after request):
node 16787 ncswenson 11u IPv4 0x884c32306a467c65 0t0 TCP *:8888 (LISTEN)
node 16787 ncswenson 12u IPv4 0x884c32306a2773b5 0t0 TCP 127.0.0.1:8888->127.0.0.1:49399 (ESTABLISHED)
Is this how node should properly behave? Leave the file descriptor open for minutes after a request? Is there a way to circumvent this issue? Is there a way to better investigate this issue?

NodeJS - Socket.IO Setup: served static content no handshake (Ubuntu on Rackspace Cloud Server)

I have installed on Rackspace with Ubuntu node.js with Socket.IO
When I tried a simple server app and try to use client request I got 'served static content' only instead of hand shake. In browser in debug I can see "Hello S..." and on server side:
# node socket-server.js
info - socket.io started
debug - served static content /socket.io.js
debug - served static content /socket.io.js
I'm not sure where to start look for a problem (same script works in local development)
Why node.js serve only static content and doesn't handshake?
Iptables allow 8866 port:# iptables -L
Chain INPUT (policy ACCEPT)
target prot opt source destination
ACCEPT all -- anywhere anywhere
ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED
ACCEPT tcp -- anywhere anywhere tcp dpt:ssh
ACCEPT tcp -- anywhere anywhere tcp dpt:www
ACCEPT tcp -- anywhere anywhere tcp dpt:8866
DROP all -- anywhere anywhere
Chain FORWARD (policy ACCEPT)
target prot opt source destination
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
ACCEPT tcp -- anywhere anywhere tcp dpt:8866
Here is a simple server app 'socket-server.js':
// Require HTTP module (to start server) and Socket.IO
var http = require('http'), io = require('socket.io');
// Start the server at port 8866
var server = http.createServer(function(req, res){
// Send HTML headers and message
res.writeHead(200,{ 'Content-Type': 'text/html' });
res.end('<h1>Hello Socket Lover!</h1>');
});
server.listen(8866);
// Create a Socket.IO instance, passing it our server
var socket = io.listen(server);
// Add a connect listener
socket.on('connection', function(client){
// Create periodical which ends a message to the client every 5 seconds
var interval = setInterval(function() {
client.send('This is a message from the server! ' + new Date().getTime());
},5000);
// Success! Now listen to messages to be received
client.on('message',function(event){
console.log('Received message from client!',event);
});
client.on('disconnect',function(){
clearInterval(interval);
console.log('Server has disconnected');
});
});
Here is a simple client (SERVERDOMAIN is replaced with real domain):
<!DOCTYPE html>
<html>
<head>
<script src="http://SERVERDOMAIN:8866/socket.io/socket.io.js"></script>
<script>
// Create SocketIO instance
var socket = io.connect('SERVERDOMAIN:8866');
// Add a connect listener
socket.on('connect',function() {
log('<span style="color:green;">Client has connected to the server!</span>');
});
// Add a connect listener
socket.on('message',function(data) {
log('Received a message from the server: ' + data);
});
// Add a disconnect listener
socket.on('disconnect',function() {
log('<span style="color:red;">The client has disconnected!</span>');
});
// Sends a message to the server via sockets
function sendMessageToServer(message) {
socket.send(message);
log('<span style="color:#888">Sending "' + message + '" to the server!</span>');
}
// Outputs to console and list
function log(message) {
var li = document.createElement('li');
li.innerHTML = message;
document.getElementById('message-list').appendChild(li);
}
</script>
</head>
<body>
<p>Messages will appear below (and in the console).</p><br />
<ul id="message-list"></ul>
<ul style="margin:20px 0 0 20px;">
<li>Type <code>socket.disconnect()</code> to disconnect</li>
<li>Type <code>socket.connect()</code> to reconnect</li>
<li>Type <code>sendMessageToServer('Your Message')</code> to send a message to the server</li>
</ul>
</body>
</html>
In your client code try this
var socket = io.connect('http://SERVERDOMAIN:8866');
This problem is mainly associated with incorrect url.
I was testing my socket.io application with a couple of Android ( phone and tablet ) devices along with desktop browsers on a wifi connection. Based on beNerd's answer above I changed my
io.connect('http://localhost:3000');
to
io.connect('http://192.168.5.3:3000'); // ip address of machine where the node.js app is running.
and it worked.
Hope the answer is helpful to users testing their NodeJS and Socket.IO application with a Wifi connection.
I have end up with completely new server installation and everything is working fine now.
I have used Ubuntu 12.04 LTS (Precise Pangolin) and install node.js with socket.io
For the client I'm using local copy of socket.io.js with Flash file.
;-)

Resources