How to use "req.http.Cookie" in Varnish 4.0.3 - varnish

I want to use the Varnish 4.0.3.
And I want to divide the caching process whether cookie has specific key and value.
When user comes to mypage, if their browser has "loged_in=true" cookie, I do NOT want to cache the page.
Also if it doesn't have the cookie, I want to cache the mypage.
But both settings are not working.
It doesn't cache at all.
Additionaly, when user comes to the "category" page, the varnish caches the page correctly.
Here is my default.vcl.
Does anyone tell me what's wrong with me ?
vcl 4.0;
import directors;
backend ap1 {
.host = "192.168.0.1";
.port = "80";
.first_byte_timeout = 200s;
.probe = {
.url = "/";
.interval = 5s;
.timeout = 1 s;
.window = 5;
.threshold = 3;
.initial = 1;
}
}
backend ap2 {
.host = "192.168.0.2";
.port = "80";
.first_byte_timeout = 200s;
.probe = {
.url = "/";
.interval = 5s;
.timeout = 1 s;
.window = 5;
.threshold = 3;
.initial = 1;
}
}
sub vcl_init{
new ws_hash = directors.hash();
ws_hash.add_backend(ap1, 1.0);
ws_hash.add_backend(ap2, 1.0);
new ws_rand = directors.random();
ws_rand.add_backend(ap1, 1.0);
ws_rand.add_backend(ap2, 1.0);
}
sub vcl_recv{
if( (req.url ~ "(category)") ) {
// Cache
set req.backend_hint = ws_hash.backend(req.url + ":" + req.http.host);
return(hash);
}
if( (req.url ~ "(mypage)") ) {
if (req.http.Cookie ~ "(loged_in=true)" ) {
// NO Cache
set req.backend_hint = ws_rand.backend();
return(pass);
}
// Cache
set req.backend_hint = ws_hash.backend(req.url + ":" + req.http.host);
return(hash);
}
// NO Cache
set req.backend_hint = ws_rand.backend();
return(pass);
}
sub vcl_deliver {
if (obj.hits > 0) {
set resp.http.V-Cache = "HIT";
} else {
set resp.http.V-Cache = "MISS";
}
}

Related

Varnish returns incorrect backend content from User-Agent rule

I have a simple rule to redirect traffic to special backend if the User-Agent == GlobalSign or if the request url is /globalsign. I have noticed on a rare occasion varnish will return content from the special backend incorrectly. It seems happens randomly and does not repeat.
if (req.http.User-Agent ~ "(?i)GlobalSign" || req.url ~ "^/globalsign" ) {
set req.url = "/";
set req.backend = dgs1;
return(pipe);
}
Backend rules
backend b1 {
//Backend 1
.host = "10.8.8.16";
.port = "80";
.probe = {
.url = "/service_up";
.timeout = 1s;
.interval = 5s;
.window = 10;
.threshold = 8;
}
}
backend gs1 {
// Set host: Globalsign
.host = "10.8.8.15";
.port = "80";
.probe = {
.url = "/service_up";
.timeout = 5s;
.interval = 5s;
.window = 10;
.threshold = 8;
}
}
director dgs1 random {
{
.backend = gs1;
.weight = 1;
}
}
director d01 random {
{
.backend = b1;
.weight = 1;
}
}
Full VCL
include "backends.vcl";
include "bans.vcl";
include "acl.vcl";
sub vcl_recv {
// Use the director we set up above to answer the request if it's not cached.
set req.backend = d01;
if( req.url ~ "^/service_up" ) {
return(lookup);
}
if(client.ip ~ evil_networks){
error 403 "Forbidden";
}
if (req.http.User-Agent ~ "(?i)GlobalSign" || req.url ~ "^/globalsign" ) {
set req.url = "/";
set req.backend = dgs1;
return(pipe);
}
return(pass)
}
sub vcl_fetch {
set beresp.grace = 24h;
if (beresp.status >= 400) {
return (hit_for_pass);
}
// New Set Longer Cache
if (req.http.user-agent ~ "(Googlebot|msnbot|Yandex|Slurp|Bot|Crawl|bot|Baid|Mediapartners-Google)") {
unset beresp.http.set-cookie;
set beresp.ttl = 5d;
return (deliver);
}
if (req.request == "GET" && req.url ~ "\.(css|xml|txt)$") {
set beresp.ttl = 5d;
unset beresp.http.set-cookie;
return (deliver);
}
// multimedia
if (req.request == "GET" && req.url ~ "\.(gif|jpg|jpeg|bmp|png|tiff|tif|ico|img|tga|woff|eot|ttf|svg|wmf|js|swf|ico)$") {
unset beresp.http.set-cookie;
set beresp.ttl = 5d;
return (deliver);
}
set beresp.ttl = 5d;
return (deliver);
}
include "errors.vcl";
sub vcl_deliver {
return(deliver);
}
I guess return(pipe); is the suspect one.
If you have keep-alive HTTP client making just one request with GlobalSign user agent or to /globalsign url, all subsequent requests will be piped to dgs1, even if they do not meet the criteria.
Try to avoid piping if possible, it's common source of a lot of hard to track issues. And possibly security hole too.

Using varnish-counters MAIN.* in VCL

Is it possible to use varnish-counters in VCL in conditions?
I would like to write a condition based on current value of MAIN.backend_conn but I cannot figure out, whether can I use statistics in VCL even in inline C.
Current Solution
Now I am using configuration like this:
backend default {
.host = "192.168.122.11";
.probe = {
.url = "/check-connections.php";
.interval = 1s;
.threshold = 4;
}
}
backend sessionWorker {
.host = "192.168.122.11";
.probe = {
.url = "/other-probe";
.interval = 5s;
.threshold = 2;
}
}
sub vcl_recv {
if (req.http.cookie ~ "(^|;\s*)(SESS=)" || std.healthy(req.backend_hint)) {
set req.backend_hint = sessionWorker;
} else {
return (synth(503, "Server overloaded"));
}
}
check-connections.php reads nginx status - active connections and triggers an error, if there are more active connections:
if ($active > 10) {
http_response_code(502);
} else {
http_response_code(200);
}
I would like to find a solution how to replace std.healty(req.backend_hint) with current connections (VBE.conn) to the backend directly in the VCL.
There are some counters implemented already. Like bereq.retries:
sub vcl_backend_response {
if (beresp.status == 503 && bereq.retries < 5 ) {
return(retry);
}
}
sub vcl_backend_error {
if (beresp.status == 503 && bereq.retries == 5) {
synthetic(std.fileread("/path/to/my/file/varnish503.html"));
return(deliver);
}
}
Maybe this is already what you need. Otherwise here is a list of other build in counters.

Running a varnish redirect to a node js server with socket.io

So I'm running a website, on the front it's a simple Apache home page nothing important. But I want to run a sync client on a sub domain
backend apache {
.host = "127.0.0.1";
.port = "8051";
.connect_timeout = 1s;
}
backend node {
.host = "127.0.0.1";
.port = "8081";
.connect_timeout = 1s;
}
backend bot {
.host = "127.0.0.1";
.port = "8080";
.connect_timeout = 1s;
}
sub vcl_recv {
if(req.http.host == "Base-domain.com") {
set req.backend = apache;
}
if(req.http.host == "bot.base-domain.com") {
set req.backend = bot;
}
if(req.http.host == "sync.base-domain.com") {
set req.backend = node;
}
if (req.http.Upgrade ~ "(?i)websocket") {
set req.backend = node;
return (pipe);
}
if (req.http.host == "sync.based-domain.moe") {
if (req.url ~ "^/socket.io/") {
set req.backend = node;
return (pipe);
}
return (pass);
}
}
sub vcl_pipe {
if (req.http.upgrade) {
set bereq.http.upgrade = req.http.upgrade;
}
}
Searching all over for information on how to run the node JS as the one with a socket is turning up no help. Due to a connection issue the websocket is on 1338 rather than 1337. Pretty new to this so not sure what to do.
Should I be changing my varnish or modification the node's config further? Thanks in advance.

How to log the selected director>backend in varnishncsa

I have multiple backend servers and I "round-robin" between them using a director.
Is there a way and - if there is - how to log which backend gets used (either the backend name or the backend host name)?
The above refers to using std.log("key:value") and %{VCL_Log:*key*}x with varnishncsa.
My vcl config:
backend aws_frontend1 {
.host = "aws1.domain.mobi";
.port = "80";
}
backend aws_frontend2 {
.host = "aws2.domain.mobi";
.port = "80";
}
director lb_aws_frontend round-robin {
{
.backend = aws_frontend1;
}
{
.backend = aws_frontend2;
}
}
sub vcl_recv {
set req.backend = lb_aws_frontend;
unset req.http.Cookie;
}
sub vcl_fetch {
if (beresp.http.cache-control ~ "(no-cache|private)" || beresp.http.pragma ~ "no-cache") {
set beresp.ttl = 0s;
} else {
set beresp.ttl = 168h;
}
}
sub vcl_error {
if (obj.status == 750) {
set obj.status = 403;
return(deliver);
}
}
Edit:
Below is the updated code thanks to NITEMAN...
This code prints out the backend name when a miss and "varnish cache" is a hit:
import std;
backend aws_frontend1 {
.host = "aws1.domain.mobi";
.port = "80";
}
backend aws_frontend2 {
.host = "aws2.domain.mobi";
.port = "80";
}
director lb_aws_frontend round-robin {
{
.backend = aws_frontend1;
}
{
.backend = aws_frontend2;
}
}
sub vcl_recv {
set req.backend = lb_aws_frontend;
unset req.http.Cookie;
}
sub vcl_fetch {
if (beresp.http.cache-control ~ "(no-cache|private)" || beresp.http.pragma ~ "no-cache") {
set beresp.ttl = 0s;
} else {
set beresp.ttl = 168h;
}
std.log("backend_used:" + beresp.backend.name);
}
sub vcl_hit {
std.log("backend_used:varnish cache");
}
sub vcl_error {
if (obj.status == 750) {
set obj.status = 403;
return(deliver);
}
}
Backend's name (even when a director is used) is available on vcl_fetch, for debugging purposes I usually use:
sub vcl_fetch {
# ...
set beresp.http.X-Backend = beresp.backend.name;
# ...
}

How can I use varnish to send requests to localhost httpd server but if unhealthy send the request round-robin to a group of other nodes

We have a set of 6 backends that serve our website. Because we use multiple datacenters we have found the best performance occurs for us when we have varnish send the request to the localhost httpd server (running on port 81). This of course is a very basic configuration well supported by varnish and can be accomplished using the fallback director:
director default fallback {
{ .backend = localbackend; }
{ .backend = web1; }
{ .backend = web2; }
{ .backend = web3; }
etc...
}
However the fallback director tries the other backends in order till it finds a healthy one. The BIG problem is that in the above config web1 will take all the traffic if localbackend fails !!! This will overload web1 and it will become sick. Then all requests go to web3... and now it will get 3x the normal traffic... cause a cascading failure.
Thus we need instead a config that will allow all requests to sent to the localhost httpd server if it is health, but if not, send the requests to the other healthy servers in a round robin type fashion.
Thank you in advance for any suggestions and solutions you might think of... Your help is GREATLY appreciated.
It can be accomplished in several ways, one of the simplest is:
Set localhost as default backend
Create a round robind director for the rest of backends
Check on top of your vcl_recv if default backend is healthy and if it isn't switch to round robind director.
With this approach you can even switch backends if current request has failed.
Something like:
probe my_probe {
.url = "/";
.interval = 1s;
.timeout = 0.2 s;
.window = 3;
.threshold = 2;
.initial = 1;
}
backend default {
.host = 127.0.0.1;
.probe = my_probe;
}
backend server1 {
.host = 192.168.8.21;
.probe = my_probe;
}
backend server2 {
.host = 192.168.8.22;
.probe = my_probe;
}
backend server3 {
.host = 192.168.8.23;
.probe = my_probe;
}
director server_pool round-robin {
{ .backend = server1; }
{ .backend = server2; }
{ .backend = server3; }
}
sub vcl_recv {
if ( req.backend == default
&& ! req.backend.healthy
) {
set req.backend = server_pool;
}
/* Uncomment if you want to divert restarted requests to the server pool
* if (req.restarts > 0) {
* set req.backend = server_pool;
* }
*/
}
This is completely untested, but should theoretically work.
probe healthcheck {
.url = "/status.php";
.interval = 60s;
.timeout = 0.3 s;
.window = 8;
.threshold = 3;
.initial = 3;
.expected_response = 200;
}
backend server1 {
.host = "server1.example.com";
.probe = healthcheck;
}
backend server2 {
.host = "server2.example.com";
.probe = healthcheck;
}
backend server3 {
.host = "server3.example.com";
.probe = healthcheck;
}
director fallback round-robin {
{ .backend = server1; }
{ .backend = server2; }
{ .backend = server3; }
}
sub vcl_recv {
# Set backend to fallback director until we find the proper localhost backend.
# If we can't figure out which is localhost, at least we still work.
set req.backend = fallback;
# Set the default backend to localhost by hostname
if (server.hostname == "server1") {
set req.backend = server1;
set obj.http.X-LocalCache = "YES";
}
else if (server.hostname == "server2") {
set req.backend = server2;
set obj.http.X-LocalCache = "YES";
}
else if (server.hostname == "server3") {
set req.backend = server3;
set obj.http.X-LocalCache = "YES";
}
# If the localhost fails, go to fallback director.
if (obj.http.X-LocalCache ~ "YES") {
if (!req.backend.healthy) {
set req.backend = fallback;
unset obj.http.X-LocalCache
}
}
}
It will automatically figure out which backend is localhost by using the hostname. You just have to make sure the backend names match your hostnames.
You can do some basic load balancing with Varnish. Check this out:
https://www.varnish-cache.org/trac/wiki/LoadBalancing

Resources