Is it possible to use varnish-counters in VCL in conditions?
I would like to write a condition based on current value of MAIN.backend_conn but I cannot figure out, whether can I use statistics in VCL even in inline C.
Current Solution
Now I am using configuration like this:
backend default {
.host = "192.168.122.11";
.probe = {
.url = "/check-connections.php";
.interval = 1s;
.threshold = 4;
}
}
backend sessionWorker {
.host = "192.168.122.11";
.probe = {
.url = "/other-probe";
.interval = 5s;
.threshold = 2;
}
}
sub vcl_recv {
if (req.http.cookie ~ "(^|;\s*)(SESS=)" || std.healthy(req.backend_hint)) {
set req.backend_hint = sessionWorker;
} else {
return (synth(503, "Server overloaded"));
}
}
check-connections.php reads nginx status - active connections and triggers an error, if there are more active connections:
if ($active > 10) {
http_response_code(502);
} else {
http_response_code(200);
}
I would like to find a solution how to replace std.healty(req.backend_hint) with current connections (VBE.conn) to the backend directly in the VCL.
There are some counters implemented already. Like bereq.retries:
sub vcl_backend_response {
if (beresp.status == 503 && bereq.retries < 5 ) {
return(retry);
}
}
sub vcl_backend_error {
if (beresp.status == 503 && bereq.retries == 5) {
synthetic(std.fileread("/path/to/my/file/varnish503.html"));
return(deliver);
}
}
Maybe this is already what you need. Otherwise here is a list of other build in counters.
Related
I have a simple rule to redirect traffic to special backend if the User-Agent == GlobalSign or if the request url is /globalsign. I have noticed on a rare occasion varnish will return content from the special backend incorrectly. It seems happens randomly and does not repeat.
if (req.http.User-Agent ~ "(?i)GlobalSign" || req.url ~ "^/globalsign" ) {
set req.url = "/";
set req.backend = dgs1;
return(pipe);
}
Backend rules
backend b1 {
//Backend 1
.host = "10.8.8.16";
.port = "80";
.probe = {
.url = "/service_up";
.timeout = 1s;
.interval = 5s;
.window = 10;
.threshold = 8;
}
}
backend gs1 {
// Set host: Globalsign
.host = "10.8.8.15";
.port = "80";
.probe = {
.url = "/service_up";
.timeout = 5s;
.interval = 5s;
.window = 10;
.threshold = 8;
}
}
director dgs1 random {
{
.backend = gs1;
.weight = 1;
}
}
director d01 random {
{
.backend = b1;
.weight = 1;
}
}
Full VCL
include "backends.vcl";
include "bans.vcl";
include "acl.vcl";
sub vcl_recv {
// Use the director we set up above to answer the request if it's not cached.
set req.backend = d01;
if( req.url ~ "^/service_up" ) {
return(lookup);
}
if(client.ip ~ evil_networks){
error 403 "Forbidden";
}
if (req.http.User-Agent ~ "(?i)GlobalSign" || req.url ~ "^/globalsign" ) {
set req.url = "/";
set req.backend = dgs1;
return(pipe);
}
return(pass)
}
sub vcl_fetch {
set beresp.grace = 24h;
if (beresp.status >= 400) {
return (hit_for_pass);
}
// New Set Longer Cache
if (req.http.user-agent ~ "(Googlebot|msnbot|Yandex|Slurp|Bot|Crawl|bot|Baid|Mediapartners-Google)") {
unset beresp.http.set-cookie;
set beresp.ttl = 5d;
return (deliver);
}
if (req.request == "GET" && req.url ~ "\.(css|xml|txt)$") {
set beresp.ttl = 5d;
unset beresp.http.set-cookie;
return (deliver);
}
// multimedia
if (req.request == "GET" && req.url ~ "\.(gif|jpg|jpeg|bmp|png|tiff|tif|ico|img|tga|woff|eot|ttf|svg|wmf|js|swf|ico)$") {
unset beresp.http.set-cookie;
set beresp.ttl = 5d;
return (deliver);
}
set beresp.ttl = 5d;
return (deliver);
}
include "errors.vcl";
sub vcl_deliver {
return(deliver);
}
I guess return(pipe); is the suspect one.
If you have keep-alive HTTP client making just one request with GlobalSign user agent or to /globalsign url, all subsequent requests will be piped to dgs1, even if they do not meet the criteria.
Try to avoid piping if possible, it's common source of a lot of hard to track issues. And possibly security hole too.
So I'm running a website, on the front it's a simple Apache home page nothing important. But I want to run a sync client on a sub domain
backend apache {
.host = "127.0.0.1";
.port = "8051";
.connect_timeout = 1s;
}
backend node {
.host = "127.0.0.1";
.port = "8081";
.connect_timeout = 1s;
}
backend bot {
.host = "127.0.0.1";
.port = "8080";
.connect_timeout = 1s;
}
sub vcl_recv {
if(req.http.host == "Base-domain.com") {
set req.backend = apache;
}
if(req.http.host == "bot.base-domain.com") {
set req.backend = bot;
}
if(req.http.host == "sync.base-domain.com") {
set req.backend = node;
}
if (req.http.Upgrade ~ "(?i)websocket") {
set req.backend = node;
return (pipe);
}
if (req.http.host == "sync.based-domain.moe") {
if (req.url ~ "^/socket.io/") {
set req.backend = node;
return (pipe);
}
return (pass);
}
}
sub vcl_pipe {
if (req.http.upgrade) {
set bereq.http.upgrade = req.http.upgrade;
}
}
Searching all over for information on how to run the node JS as the one with a socket is turning up no help. Due to a connection issue the websocket is on 1338 rather than 1337. Pretty new to this so not sure what to do.
Should I be changing my varnish or modification the node's config further? Thanks in advance.
i write a varnish vcl rules for my server and i don't know this is sufficient for my intent. i want run my project on 2 ports of server and on another server such that if one server gone down ,requests forward to other server. moreover i want to serve static files such that don't working apache web server and request for them directly sent to tornado. this is my vcl rules , can u help me ?
import directors;
probe healthcheck {
.url = "/";
.interval = 30s;
.timeout = 1 s;
.window = 5;
.threshold = 2;
.initial = 2;
.expected_response = 200;
}
backend server1{
.host="127.0.0.1"
.port = 8004
.probe = healtcheck;
}
backend server1-2{
.host="127.0.0.1"
.port = 8005
.probe = healtcheck;
}
backend server2{
.host="192.168.1.1"
.port = 8004
.probe = healtcheck;
}
sub vcl_init {
new vdir = directors.round_robin();
vdir.add_backend(server1);
vdir.add_backend(server1-2);
vdir.add_backend(server2);
}
sub vcl_recv{
set req.grace = 600s;
### always cache these items:
if (req.request == "GET" && req.url ~ "\.(js)") {
lookup;
}
## images
if (req.request == "GET" && req.url ~ "\.(gif|jpg|jpeg|bmp|png|tiff|tif|ico|img|tga|wmf)$") {
lookup;
}
## various other content pages
if (req.request == "GET" && req.url ~ "\.(css|html)$") {
lookup;
}
## multimedia
if (req.request == "GET" && req.url ~ "\.(svg|swf|ico|mp3|mp4|m4a|ogg|mov|avi|wmv)$") {
lookup;
}
## xml
if (req.request == "GET" && req.url ~ "\.(xml)$") {
lookup;
}
### do not cache these rules:
if (req.request != "GET" && req.request != "HEAD") {
pipe;
}
### if it passes all these tests, do a lookup anyway;
lookup;
}
sub vcl_fetch {
if (req.url ~ "\.(gif|jpg|jpeg|swf|css|js|flv|mp3|mp4|pdf|ico|png)(\?.*|)$") {
set beresp.ttl = 365d;
}
}
Depending on if you require sessions to work for users you should switch from round_robin to hash. This will ensure the same users will be directed to the same backend.
I have multiple backend servers and I "round-robin" between them using a director.
Is there a way and - if there is - how to log which backend gets used (either the backend name or the backend host name)?
The above refers to using std.log("key:value") and %{VCL_Log:*key*}x with varnishncsa.
My vcl config:
backend aws_frontend1 {
.host = "aws1.domain.mobi";
.port = "80";
}
backend aws_frontend2 {
.host = "aws2.domain.mobi";
.port = "80";
}
director lb_aws_frontend round-robin {
{
.backend = aws_frontend1;
}
{
.backend = aws_frontend2;
}
}
sub vcl_recv {
set req.backend = lb_aws_frontend;
unset req.http.Cookie;
}
sub vcl_fetch {
if (beresp.http.cache-control ~ "(no-cache|private)" || beresp.http.pragma ~ "no-cache") {
set beresp.ttl = 0s;
} else {
set beresp.ttl = 168h;
}
}
sub vcl_error {
if (obj.status == 750) {
set obj.status = 403;
return(deliver);
}
}
Edit:
Below is the updated code thanks to NITEMAN...
This code prints out the backend name when a miss and "varnish cache" is a hit:
import std;
backend aws_frontend1 {
.host = "aws1.domain.mobi";
.port = "80";
}
backend aws_frontend2 {
.host = "aws2.domain.mobi";
.port = "80";
}
director lb_aws_frontend round-robin {
{
.backend = aws_frontend1;
}
{
.backend = aws_frontend2;
}
}
sub vcl_recv {
set req.backend = lb_aws_frontend;
unset req.http.Cookie;
}
sub vcl_fetch {
if (beresp.http.cache-control ~ "(no-cache|private)" || beresp.http.pragma ~ "no-cache") {
set beresp.ttl = 0s;
} else {
set beresp.ttl = 168h;
}
std.log("backend_used:" + beresp.backend.name);
}
sub vcl_hit {
std.log("backend_used:varnish cache");
}
sub vcl_error {
if (obj.status == 750) {
set obj.status = 403;
return(deliver);
}
}
Backend's name (even when a director is used) is available on vcl_fetch, for debugging purposes I usually use:
sub vcl_fetch {
# ...
set beresp.http.X-Backend = beresp.backend.name;
# ...
}
We have a set of 6 backends that serve our website. Because we use multiple datacenters we have found the best performance occurs for us when we have varnish send the request to the localhost httpd server (running on port 81). This of course is a very basic configuration well supported by varnish and can be accomplished using the fallback director:
director default fallback {
{ .backend = localbackend; }
{ .backend = web1; }
{ .backend = web2; }
{ .backend = web3; }
etc...
}
However the fallback director tries the other backends in order till it finds a healthy one. The BIG problem is that in the above config web1 will take all the traffic if localbackend fails !!! This will overload web1 and it will become sick. Then all requests go to web3... and now it will get 3x the normal traffic... cause a cascading failure.
Thus we need instead a config that will allow all requests to sent to the localhost httpd server if it is health, but if not, send the requests to the other healthy servers in a round robin type fashion.
Thank you in advance for any suggestions and solutions you might think of... Your help is GREATLY appreciated.
It can be accomplished in several ways, one of the simplest is:
Set localhost as default backend
Create a round robind director for the rest of backends
Check on top of your vcl_recv if default backend is healthy and if it isn't switch to round robind director.
With this approach you can even switch backends if current request has failed.
Something like:
probe my_probe {
.url = "/";
.interval = 1s;
.timeout = 0.2 s;
.window = 3;
.threshold = 2;
.initial = 1;
}
backend default {
.host = 127.0.0.1;
.probe = my_probe;
}
backend server1 {
.host = 192.168.8.21;
.probe = my_probe;
}
backend server2 {
.host = 192.168.8.22;
.probe = my_probe;
}
backend server3 {
.host = 192.168.8.23;
.probe = my_probe;
}
director server_pool round-robin {
{ .backend = server1; }
{ .backend = server2; }
{ .backend = server3; }
}
sub vcl_recv {
if ( req.backend == default
&& ! req.backend.healthy
) {
set req.backend = server_pool;
}
/* Uncomment if you want to divert restarted requests to the server pool
* if (req.restarts > 0) {
* set req.backend = server_pool;
* }
*/
}
This is completely untested, but should theoretically work.
probe healthcheck {
.url = "/status.php";
.interval = 60s;
.timeout = 0.3 s;
.window = 8;
.threshold = 3;
.initial = 3;
.expected_response = 200;
}
backend server1 {
.host = "server1.example.com";
.probe = healthcheck;
}
backend server2 {
.host = "server2.example.com";
.probe = healthcheck;
}
backend server3 {
.host = "server3.example.com";
.probe = healthcheck;
}
director fallback round-robin {
{ .backend = server1; }
{ .backend = server2; }
{ .backend = server3; }
}
sub vcl_recv {
# Set backend to fallback director until we find the proper localhost backend.
# If we can't figure out which is localhost, at least we still work.
set req.backend = fallback;
# Set the default backend to localhost by hostname
if (server.hostname == "server1") {
set req.backend = server1;
set obj.http.X-LocalCache = "YES";
}
else if (server.hostname == "server2") {
set req.backend = server2;
set obj.http.X-LocalCache = "YES";
}
else if (server.hostname == "server3") {
set req.backend = server3;
set obj.http.X-LocalCache = "YES";
}
# If the localhost fails, go to fallback director.
if (obj.http.X-LocalCache ~ "YES") {
if (!req.backend.healthy) {
set req.backend = fallback;
unset obj.http.X-LocalCache
}
}
}
It will automatically figure out which backend is localhost by using the hostname. You just have to make sure the backend names match your hostnames.
You can do some basic load balancing with Varnish. Check this out:
https://www.varnish-cache.org/trac/wiki/LoadBalancing