Problem postulating quality selector in http live streaming using nginx and video,js - http-live-streaming

I recvently started using nginx and video.js to establish a live stream. Problem: If I use videojs-contrib-quality-levels and videojs-hls-quality-selector as addon for video.js they should automatically insert a quality chooser based on the downloaded playlist with the hls variants. But this is not the case it just adds the quality menu with only the Auto option activated. Why does the HLS Playlist or the player does not access the variants and render the menu correct?
Versions:
video.js:7.6.6
videojs-contrib-quality-levels: 2.0.9
videojs-hls-quality-selector: 1.1.1
Here is my code to insert and start the player:
this.videoJSplayer = videojs('video_player', {
html5: {
hls: {
overrideNative:true,
//withCredentials: true
},
controls: false,
autoplay: false,
preload: 'auto'
}
this.videoJSplayer.src([{type:'application/x-mpegURL',src: URL + ".m3u8"}]);
this.videoJSplayer.controls('true');
this.videoJSplayer.play();
this.isButtonVisible = false;
this.videoJSplayer.hlsQualitySelector();
This is how my playlist looks like:
#EXT-X-STREAM-INF:PROGRAM-ID=1,CLOSED-CAPTIONS=NONE,BANDWIDTH=288000
test2_low.m3u8
#EXT-X-STREAM-INF:PROGRAM-ID=1,CLOSED-CAPTIONS=NONE,BANDWIDTH=2048000
test2_hd720.m3u8
#EXT-X-STREAM-INF:PROGRAM-ID=1,CLOSED-CAPTIONS=NONE,BANDWIDTH=4096000
test2_src.m3u8

The solution is rather easy: In the playlist add the option RESOLUTION like
hls_variant _src BANDWIDTH=4096000 RESOLUTION=1920x1080;
This will enable the plugin to correctly render the chooser. There is no manual about this.
The config now looks like this
worker_processes 1;
events {
worker_connections 1024;
}
# RTMP configuration
rtmp {
server {
listen 1935; # Listen on standard RTMP port
chunk_size 2048;
# This application is to accept incoming stream
application 00kSLqEV5a6LYVfFa1jG {
live on; # Allows live input
exec ffmpeg -i rtmp://127.0.0.1/$app/$name
-c:v libx264 -c:a libfdk_aac -b:v 768k -b:a 96k -vf "scale=720:trunc(ow/a/2)*2" -tune zerolatency -preset veryfast -crf 23 -f flv rtmp://127.0.0.1/show/$name_low
-c:v libx264 -c:a libfdk_aac -b:v 1920k -b:a 128k -vf "scale=1280:trunc(ow/a/2)*2" -tune zerolatency -preset veryfast -crf 23 -f flv rtmp://127.0.0.1/show/$name_hd720
-c copy -f flv rtmp://127.0.0.1/show/$name_src;
on_publish #server_auth;
}
application show {
live on;
# Turn on HLS
hls on;
hls_path #YOUR_PATH;
hls_fragment 5;
hls_playlist_length 30;
# disable consuming the stream from nginx as rtmp
allow publish 127.0.0.1;
allow publish 139.18.13.224;
deny publish all;
hls_variant _low BANDWIDTH=288000 RESOLUTION=848x480;
hls_variant _hd720 BANDWIDTH=2048000 RESOLUTION=1280x720;
hls_variant _src BANDWIDTH=4096000 RESOLUTION=1920x1080;
}
}
}
http {
sendfile off;
tcp_nopush on;
# aio on;
directio 512;
default_type application/octet-stream;
include /usr/local/nginx/conf/mime.types;
server {
listen 80;
location / {
# Disable cache
add_header 'Cache-Control' 'no-cache';
# rewrite ^(/hls)/(\w+)$ $1/$200kSLqEV5a6LYVfFa1jG.m3u8;
# CORS setup
add_header 'Access-Control-Allow-Origin' '*' always;
add_header 'Access-Control-Expose-Headers' 'Content-Length';
add_header 'X-Frame-Options' 'DENY' always;
# allow CORS preflight requests
if ($request_method = 'OPTIONS') {
add_header 'Access-Control-Allow-Origin' '*' always;
add_header 'Access-Control-Max-Age' 1728000;
add_header 'Content-Type' 'text/plain charset=UTF-8';
add_header 'Content-Length' 0;
return 204;
}
types {
application/dash+xml mpd;
application/vnd.apple.mpegurl m3u8;
video/mp2t ts;
text/html html;
application/x-javascript js;
# text/javascript js;
text/css css;
}
index index.html;
root #stream_root;
location ~ \.php$ {
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_pass unix:/var/run/php-fpm.sock;
set $path_info $fastcgi_path_info;
fastcgi_param PATH_INFO $path_info;
fastcgi_index index.php;
include fastcgi.conf;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param SCRIPT_NAME $fastcgi_script_name;
fastcgi_param HTTP_PROXY "";
proxy_set_header X-Forwarded-Uri /matomo;
}
}
}
}

Related

ERR_CONNECTION_TIMED_OUT when calling backend NodeJS server behind VPN

I just created a website in reactJS (create-react-app) with a login possibility.
I configured autosigned SSL certificates with letsencrypt, added some DNS entries for HSTS and everything that make my website properly traffic-encrypted. My website is running on port 3001 (front-end) and my nodeJs backend is up on port 3000.
Everything works fine, however when some people try to connect to my website behind some VPN (not all of them), they see the page of my app (front-end) but when they try to login (connection to back-end), they get an ERR_CONNECTION_TIMED_OUT.
I cannot reproduce the bug because I do not have such VPN (with my NordVPN its working ok). So I would like you to help me to discover where this problem stems from.
Here is my nginx config file:
# xxx.fr nginx config file
user xxx;
worker_processes 1;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
events {
worker_connections 1024;
use epoll;
}
http {
upstream frontends {
server xxx.fr:3001;
}
charset utf-8;
include /etc/nginx/mime.types;
default_type application/octet-stream;
access_log /var/log/nginx/access.log;
keepalive_timeout 65;
proxy_read_timeout 200;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
gzip on;
gzip_min_length 1000;
gzip_proxied any;
gzip_types text/html text/css text/xml
application/x-javascript application/xml
application/atom+xml text-javascript;
proxy_next_upstream error;
#include /etc/nginx/sites-enabled/*;
server {
# default_server;
#listen [::]:80;
add_header Strict-Transport-Security 'max-age=31536000; includeSubDomains; preload ';
#add_header Content-Security-Policy "default-src 'self';";
server_name xxx.fr www.xxx.fr;
client_max_body_size 50M;
location ^~ /build/static {
root /home/xxx/x/public;
index index.html;
if ($query_string) {
expires max;
}
}
location = /favicon.ico {
rewrite (.*) /public/favicon.ico;
}
location = robots.txt {
rewrite (.*) /public/robots.txt;
}
location / {
proxy_pass_header Server;
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains, preload" always;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_redirect off;
proxy_set_header X-Scheme $scheme;
proxy_pass http://frontends;
proxy_ssl_name $host;
proxy_ssl_server_name on;
}
listen 443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/xxx.fr/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/xxx.fr/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}
server {
if ($host = www.xxx.fr) {
return 301 https://$host$request_uri;
} # managed by Certbot
if ($host = xxx.fr) {
return 301 https://$host$request_uri;
} # managed by Certbot
#listen 80;
server_name xxx.fr www.xxx.fr;
return 404; # managed by Certbot
}
}
And in my nodeJS server (back-end), I have the following header set:
app.use(function (req, res, next) {
res.header("Access-Control-Allow-Origin", "*");
res.header("Access-Control-Allow-Methods", "*");
res.header("Access-Control-Allow-Credentials", true);
// res.header("Access-Control-Allow-Credentials", "true");
res.header(
"Access-Control-Allow-Headers",
"Origin, X-Requested-With, Content-Type, Accept, Authorization"
);
next();
});
Can anyone tell me how to fix the ERR_CONNECTION_TIMED_OUT ?
Thank you so much

modsecurity does not work if no required SSL certificate was sent

I have a lot of rules in modsecurity but none works if the host is numeric in SSL https://SERVER_IP, i get this response:
400 Bad Request No required SSL certificate was sent
My SSL is only valid to my domain name, but should not modsecurity work anyways? Because any request pass thru modsecurity before go to the application or something like that.
Question:
1 - How can i fix it?
2 - Why modsecurity does not work, and am i vunerable if i don't fix it?
This is my nginx.conf:
load_module modules/ngx_http_modsecurity_module.so;
user nobody;
worker_processes 1;
error_log /var/log/nginx/error.log error;
pid /var/run/nginx.pid;
events {
worker_connections 5000;
use epoll;
multi_accept on;
}
http {
sendfile on;
tcp_nopush on;
tcp_nodelay on;
modsecurity on;
modsecurity_rules_file /etc/nginx/modsec/main.conf;
client_header_timeout 20s;
client_body_timeout 20s;
client_max_body_size 20m;
client_header_buffer_size 6k;
client_body_buffer_size 128k;
large_client_header_buffers 2 2k;
send_timeout 10s;
keepalive_timeout 30 30;
reset_timedout_connection on;
server_names_hash_max_size 1024;
server_names_hash_bucket_size 1024;
ignore_invalid_headers on;
connection_pool_size 256;
request_pool_size 4k;
output_buffers 4 32k;
postpone_output 1460;
include mime.types;
default_type application/octet-stream;
# SSL Settings
ssl_certificate /etc/nginx/ssl/cf_cert.pem;
ssl_certificate_key /etc/nginx/ssl/cf_key.pem;
ssl_client_certificate /etc/nginx/ssl/origin-pull-ca.pem;
ssl_verify_client on;
ssl_verify_depth 5;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 1h;
ssl_protocols TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ciphers "EECDH+ECDSA+AESGCM:EECDH+aRSA+AESGCM:EECDH+ECDSA+SHA384:EECDH+ECDSA+SHA256:EECDH+aRSA+SHA384:EECDH+aRSA+SHA256:EECDH+aRSA!RC4:EECDH:!RC4:!aNULL:!eNULL:!LOW:!3DES:!MD5:!EXP:!PSK:!SRP:!DSS";
ssl_session_tickets on;
ssl_session_ticket_key /etc/nginx/ssl/ticket.key;
ssl_dhparam /etc/nginx/ssl/dhparam.pem;
ssl_ecdh_curve secp384r1;
ssl_buffer_size 4k;
# Logs
log_format main '$remote_addr - $remote_user [$time_local] $request '
'"$status" $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
log_format bytes '$body_bytes_sent';
access_log off;
# Cache bypass
map $http_cookie $no_cache {
default 0;
~SESS 1;
~wordpress_logged_in 1;
}
etag off;
server_tokens off;
# Headers
add_header X-XSS-Protection "1; mode=block" always;
add_header X-Frame-Options deny always;
server {
listen 443 ssl http2;
server_name domain.com;
root /home/user/public_html;
index index.php index.html;
access_log /var/log/domain/domain.com.bytes bytes;
access_log /var/log/domain/domain.com.log combined;
error_log /var/log/domain/domain.com.error.log warn;
location / {
location ~.*\.(jpeg|jpg|png|gif|bmp|ico|svg|css|js)$ {
expires max;
}
location ~ [^/]\.php(/|$) {
try_files $uri =404;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_pass unix:/opt/alt/php-fpm73/usr/var/sockets/user.sock;
fastcgi_index index.php;
include /etc/nginx/fastcgi_params;
}
}
}
}
In short: This is unrelated to modsecurity.
Your server configuration requires the client to send client certificate. The TLS handshake will fail, if the client does not send such certificate - and this is the error you see.
modsecurity only analyzes the application data at the HTTP level. With HTTPS the TLS handshake first needs to be successfully done before the any application data gets exchanged. Since in this case the TLS handshake fails due to no certificate send by the client, the connection gets closed before any HTTP data gets exchanged and thus before modsecurity is used.

Graphs not showing in Observium

I have installed observium which is suceessfully pulling all information from all devices but it is not showing graphs.
I can see files in /opt/observium/rrd
I do not see any error when I manually execute:
cd /opt/observium && ./discovery.php -h all && ./poller.php -h all
I have tested in Chrome and Firefox.
This is my config.php
<?php
## Check http://www.observium.org/docs/config_options/ for documentation of possible settings
// Database config --- This MUST be configured
$config['db_extension'] = 'mysqli';
$config['db_host'] = '192.168.1.10';
$config['db_user'] = 'observium';
$config['db_pass'] = 'somepass';
$config['db_name'] = 'observium';
// Base directory
#$config['install_dir'] = "/opt/observium";
// Default community list to use when adding/discovering
$config['snmp']['community'] = array("public");
// Authentication Model
$config['auth_mechanism'] = "mysql"; // default, other options: ldap, http-auth, please see documentation for config help
// Enable alerter
// $config['poller-wrapper']['alerter'] = TRUE;
//$config['web_show_disabled'] = FALSE; // Show or not disabled devices on major pages.
// Set up a default alerter (email to a single address)
$config['email']['default'] = "admin#mydomain.com";
$config['email']['from'] = "Observium <observium#mydomain.com>";
$config['email']['default_only'] = TRUE;
$config['enable_syslog'] = 1; // Enable Syslog
// End config.php
If I can provide any other information which could solve this please let me know.
Seetings:
Device graph settings:
Nginx conf:
server {
listen 80;
server_name observium.mydomain.com;
root /opt/observium/html;
client_max_body_size 10m;
client_body_buffer_size 8K;
client_header_buffer_size 1k;
large_client_header_buffers 4 8k;
location / {
index index.php index.html index.htm;
autoindex on;
try_files $uri $uri/ /index.php;
location ~ \.php$ {
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_pass unix:/var/run/php/php7.0-fpm.sock;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
include fastcgi_params;
}
}
}
Thank you
Found problem.
At the top of config.php file was empty line before <?php

API endpoint performance

I am attempting to benchmark the perf of my rest API endpoint. It's a node/mongdb endpoint with a pretty nasty aggregate query - a few unwraps, groups and a lookup. I am using Apache Bench (ab) to test it perf under 100 concurrency 1000 times. My issue: I am getting different results based on where I run the ab test:
localhost:3000/api_endpoint/ using ngrok to tunnel it = I am
getting around 50 requests per second.
Running the test on the
production app server directly where the app is hosed I am seeing
6000 requests per second.
Running it from my web server/reverse proxy I am seeing similarly 5000reqs per second -if I use a relative path to the API.
NOW: If I run the same test on the web server
but this time use the domain name instead or run it from local dev
machine but hitting the domain, now it drops to 9 requests per
second.
What can I deduce this? To me, It seems the query must not be that nasty if I am getting such high perf when I hit it directly. It seems I either have something configured wrong in my nginx.conf file for handing my domain requests or there is a DNS resolution problem? Its an exotic TLD (.ly) could that be it? Where should I look?
Edit: Adding my nginx.conf file for analysis:
user www-data;
worker_processes 1;
#error_log /log/nginx/error.log;
pid /run/nginx.pid;
# Load dynamic modules. See /usr/share/nginx/README.dynamic.
#include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
http {
#optimizations per digitalOcean
client_body_buffer_size 10K;
client_header_buffer_size 1k;
client_max_body_size 8m;
large_client_header_buffers 2 1k;
client_body_timeout 12;
client_header_timeout 12;
keepalive_timeout 15;
send_timeout 10;
#compression
gzip on;
gzip_disable "msie6";
gzip_proxied any;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_min_length 256;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain application/x-javascript text/xml text/css application/xml application/json text/xml application/xml+rss application/javascript text/javascript application/vnd.ms-fontobject application/x-font-ttf font/opentype image/svg+xml image/x-icon;
access_log off;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
#access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
#redirect to https
server {
server_name www.staq.ly staq.ly getstackrank.com www.getstackrank.com;
return 301 https://staq.ly;
}
server {
listen 80;
listen 443 ssl;
server_name staq.ly;
ssl_certificate /etc/nginx/ssl/staqly.crt;
ssl_certificate_key /etc/nginx/ssl/staqly.key;
root /var/local;
location ~ ^/(sitemap.xml) {
}
location ~ ^/(robots.txt) {
}
location ~ ^/(googlee828ea2f1ef594b3.html) {
}
location / {
try_files $uri #prerender;
}
location #prerender {
proxy_set_header X-Prerender-Token XXXXXXXX;
set $prerender 0;
if ($http_user_agent ~* "baiduspider|twitterbot|facebookexternalhit|rogerbot|linkedinbot|embedly|quora link preview|showyoubot|outbrain|pinterest|slackbot|vkShare|W3C_Validator") {
set $prerender 1;
}
if ($args ~ "_escaped_fragment_") {
set $prerender 1;
}
if ($http_user_agent ~ "Prerender") {
set $prerender 0;
}
if ($uri ~* "\.(js|css|xml|less|png|jpg|jpeg|gif|pdf|doc|txt|ico|rss|zip|mp3|rar|exe|wmv|doc|avi|ppt|mpg|mpeg|tif|wav|mov|psd|ai|xls|mp4|m4a|swf|dat|dmg|iso|flv|m4v|torrent|ttf|woff|svg|eot)") {
set $prerender 0;
}
#resolve using Google's DNS server to force DNS resolution and prevent caching of IPs
resolver 8.8.8.8;
if ($prerender = 1) {
#setting prerender as a variable forces DNS resolution since nginx caches IPs and doesnt play well with load balancing
set $prerender "service.prerender.io";
rewrite .* /$scheme://$host$request_uri? break;
proxy_pass http://$prerender;
}
if ($prerender = 0) {
# rewrite .* /index.ejs break; # Throw away the path because this is a single page web app with only an index.html
proxy_pass http://10.132.126.36:3000;
}
}
}
}

I get 502 and 504 error from nginx in node.js

I changed the config file to increase the time and uploading size and doesn't found the php-fsm file. whenever I try to click after some time it stuck and gives 504 error and then 502 error every time.
This is my nginx.conf file.. which is located on etc/nginx/nginx.conf Please give me the solution for this issue as my project is on hold because of these things and i need to launch my web app as soon as possible.
user www-data;
worker_processes auto;
pid /run/nginx.pid;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 600;
types_hash_max_size 2048;
# set client body size to 500M #
proxy_connect_timeout 600;
proxy_send_timeout 600;
proxy_read_timeout 600;
send_timeout 600;
client_header_timeout 3000;
client_body_timeout 3000;
fastcgi_read_timeout 3000;
client_max_body_size 800m;
fastcgi_buffers 8 128k;
fastcgi_buffer_size 128k;
#client_max_body_size 800M;
#client_header_timeout 3000;
#client_body_timeout 3000;
#fastcgi_read_timeout 3000;
#client_max_body_size 800m;
#fastcgi_buffers 8 128k;
#fastcgi_buffer_size 128k;
#proxy_connect_timeout 600;
#proxy_send_timeout 600;
#proxy_read_timeout 600;
#send_timeout 600;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
#mail {
# # See sample authentication script at:
# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
#
# # auth_http localhost/auth.php;
# # pop3_capabilities "TOP" "USER";
# # imap_capabilities "IMAP4rev1" "UIDPLUS";
#
# server {
# listen localhost:110;
# protocol pop3;
# proxy on;
# }
#
# server {
# listen localhost:143;
# protocol imap;
# proxy on;
# }
#}

Resources