I'm trying to configure varnish 6.0.0 as a cache server when the webserver is going down for maintanance but I can't get the expected behavior.
I have the next configuration (using grace mode):
vcl 4.0;
import directors;
import std;
backend default {
.host = "some.server.com";
.port = "80";
.probe = {
.url = "/health_check.php";
.interval = 5s;
.timeout = 1s;
.window = 5;
.threshold = 3;
}
}
sub vcl_recv {
std.syslog(180, "RECV: recv");
#Cache - grace mode
set req.http.grace = "none";
}
sub vcl_backend_response {
std.syslog(180, "RECV: backend");
#Cache - grace mode
set beresp.ttl = 10s;
set beresp.grace = 1h;
#set beresp.keep = 24h;
}
sub vcl_deliver {
std.syslog(180, "RECV: deliver");
#Cache
set resp.http.grace = req.http.grace;
}
sub vcl_hit {
std.syslog(180, "RECV: hit************************");
if (obj.ttl >= 0s) {
# normal hit
return (deliver);
}
# We have no fresh fish. Lets look at the stale ones.
if (std.healthy(req.backend_hint)) {
# Backend is healthy. Limit age to 10s.
if (obj.ttl + 10s > 0s) {
set req.http.grace = "normal(limited)";
return (deliver);
} else {
# No candidate for grace. Fetch a fresh object.
return(miss);
}
} else {
# backend is sick - use full grace
if (obj.ttl + obj.grace > 0s) {
set req.http.grace = "full";
return (deliver);
} else {
# no graced object.
return (miss);
}
}
}
Then, when I get the log messages:
tail -f /var/log/messages
I just get the next steps:
varnishd[11801]: RECV: recv
varnishd[11801]: RECV: hash
varnishd[11801]: RECV: backend
varnishd[11801]: RECV: deliver
For this I know that the subroutine "vcl_hit" never is executed, so when the webserver is gone down, immediately I get an error messsage from varnish instead the cache from varnish:
Error 503 Backend fetch failed
Backend fetch failed
Guru Meditation:
XID: 164960
Varnish cache server
Any idea to get the expected behavior?
The webpage that we use have a session for the users, so we need identify when a user login or not.
With the previous configuration even if the cookie is empty, the default varnish configuration doesn't make cache for requests with cookies.
So I added some lines to make content cacheable (explained in code):
sub vcl_recv {
# intial state
set req.http.grace = "none";
# If our backend is down, unset all cookies and serve pages from cache.
if (!std.healthy(req.backend_hint)) {
unset req.http.Cookie;
}
# Are there cookies left with only spaces or that are empty?
if (req.http.cookie ~ "^\s*$") {
unset req.http.cookie;
}
# NO cache si NO es get o head --> ejm, post, read, delete, etc
if (req.method != "GET" && req.method != "HEAD") {
return (pass);
}
# If server is healthy && user is in session --> no cache
if (std.healthy(req.backend_hint)) {
if (req.http.Authorization || req.http.cookie ~ "some_cookie=") {
/* Not cacheable by default */
return (pass);
}
}
return(hash);
}
sub vcl_backend_response {
#Cache - grace mode
set beresp.ttl = 10s;
#set beresp.ttl = 1h;
set beresp.grace = 1h;
set beresp.keep = 24h;
set beresp.http.Cache-Control = "max-age=3600";
}
The others subroutines have the same configuration.
It could be that the response from the backend is "uncacheable".
Does it have any Cache-Control header set by default? or cookies?
You can unset that header or set it to another value that allows caching:
unset beresp.http.Cache-Control
If you want that header to be private, no-cache when sent to the client to prevent upstream caching, then reset it in vcl_deliver
Related
My varnish cache will only keep items cached for a few minutes. I have changed varnish configurations to keep items cached for a week.
Here is my default.vcl:
# VCL version 5.0 is not supported so it should be 4.0 even though actually used Varnish version is 6
vcl 4.0;
import std;
# The minimal Varnish version is 6.0
# For SSL offloading, pass the following header in your proxy server or load balancer: 'SSL-OFFLOADED: https'
backend default {
.host = "172.26.12.6";
.port = "8080";
.first_byte_timeout = 600s;
.probe = {
.url = "/health_check.php";
.timeout = 2s;
.interval = 5s;
.window = 10;
.threshold = 5;
}
}
acl purge {
"172.26.12.6";
}
sub vcl_recv {
if (req.restarts > 0) {
set req.hash_always_miss = true;
}
if (req.method == "PURGE") {
if (client.ip !~ purge) {
return (synth(405, "Method not allowed"));
}
# To use the X-Pool header for purging varnish during automated deployments, make sure the X-Pool header
# has been added to the response in your backend server config. This is used, for example, by the
# capistrano-magento2 gem for purging old content from varnish during it's deploy routine.
if (!req.http.X-Magento-Tags-Pattern && !req.http.X-Pool) {
return (synth(400, "X-Magento-Tags-Pattern or X-Pool header required"));
}
if (req.http.X-Magento-Tags-Pattern) {
ban("obj.http.X-Magento-Tags ~ " + req.http.X-Magento-Tags-Pattern);
}
if (req.http.X-Pool) {
ban("obj.http.X-Pool ~ " + req.http.X-Pool);
}
return (synth(200, "Purged"));
}
if (req.method != "GET" &&
req.method != "HEAD" &&
req.method != "PUT" &&
req.method != "POST" &&
req.method != "TRACE" &&
req.method != "OPTIONS" &&
req.method != "DELETE") {
/* Non-RFC2616 or CONNECT which is weird. */
return (pipe);
}
# We only deal with GET and HEAD by default
if (req.method != "GET" && req.method != "HEAD") {
return (pass);
}
# Bypass shopping cart, checkout and search requests
if (req.url ~ "/checkout" || req.url ~ "/catalogsearch") {
return (pass);
}
# Bypass health check requests
if (req.url ~ "/pub/health_check.php") {
return (pass);
}
# Set initial grace period usage status
set req.http.grace = "none";
# normalize url in case of leading HTTP scheme and domain
set req.url = regsub(req.url, "^http[s]?://", "");
# collect all cookies
std.collect(req.http.Cookie);
# Compression filter. See https://www.varnish-cache.org/trac/wiki/FAQ/Compression
if (req.http.Accept-Encoding) {
if (req.url ~ "\.(jpg|jpeg|png|gif|gz|tgz|bz2|tbz|mp3|ogg|swf|flv)$") {
# No point in compressing these
unset req.http.Accept-Encoding;
} elsif (req.http.Accept-Encoding ~ "gzip") {
set req.http.Accept-Encoding = "gzip";
} elsif (req.http.Accept-Encoding ~ "deflate" && req.http.user-agent !~ "MSIE") {
set req.http.Accept-Encoding = "deflate";
} else {
# unknown algorithm
unset req.http.Accept-Encoding;
}
}
# Remove all marketing get parameters to minimize the cache objects
if (req.url ~ "(\?|&)(gclid|cx|ie|cof|siteurl|zanpid|origin|fbclid|mc_[a-z]+|utm_[a-z]+|_bta_[a-z]+)=") {
set req.url = regsuball(req.url, "(gclid|cx|ie|cof|siteurl|zanpid|origin|fbclid|mc_[a-z]+|utm_[a-z]+|_bta_[a-z]+)=[-_A-z0-9+()%.]+&?", "");
set req.url = regsub(req.url, "[?|&]+$", "");
}
# Static files caching
if (req.url ~ "^/(pub/)?(media|static)/") {
# Static files should not be cached by default
return (pass);
# But if you use a few locales and don't use CDN you can enable caching static files by commenting previous line (#return (pass);) and uncommenting next 3 lines
#unset req.http.Https;
#unset req.http.SSL-OFFLOADED;
#unset req.http.Cookie;
}
return (hash);
}
sub vcl_hash {
if (req.http.cookie ~ "X-Magento-Vary=") {
hash_data(regsub(req.http.cookie, "^.*?X-Magento-Vary=([^;]+);*.*$", "\1"));
}
# For multi site configurations to not cache each other's content
if (req.http.host) {
hash_data(req.http.host);
} else {
hash_data(server.ip);
}
# To make sure http users don't see ssl warning
if (req.http.SSL-OFFLOADED) {
hash_data(req.http.SSL-OFFLOADED);
}
if (req.url ~ "/graphql") {
call process_graphql_headers;
}
}
sub process_graphql_headers {
if (req.http.Store) {
hash_data(req.http.Store);
}
if (req.http.Content-Currency) {
hash_data(req.http.Content-Currency);
}
}
sub vcl_backend_response {
set beresp.grace = 3d;
if (beresp.http.content-type ~ "text") {
set beresp.do_esi = true;
}
if (bereq.url ~ "\.js$" || beresp.http.content-type ~ "text") {
set beresp.do_gzip = true;
}
if (beresp.http.X-Magento-Debug) {
set beresp.http.X-Magento-Cache-Control = beresp.http.Cache-Control;
}
# cache only successfully responses and 404s
if (beresp.status != 200 && beresp.status != 404) {
set beresp.ttl = 0s;
set beresp.uncacheable = true;
return (deliver);
} elsif (beresp.http.Cache-Control ~ "private") {
set beresp.uncacheable = true;
set beresp.ttl = 604800s;
return (deliver);
}
# validate if we need to cache it and prevent from setting cookie
if (beresp.ttl > 0s && (bereq.method == "GET" || bereq.method == "HEAD")) {
unset beresp.http.set-cookie;
}
# If page is not cacheable then bypass varnish for 2 minutes as Hit-For-Pass
if (beresp.ttl <= 0s ||
beresp.http.Surrogate-control ~ "no-store" ||
(!beresp.http.Surrogate-Control &&
beresp.http.Cache-Control ~ "no-cache|no-store") ||
beresp.http.Vary == "*") {
# Mark as Hit-For-Pass for the next 2 minutes
set beresp.ttl = 604800s;
set beresp.uncacheable = true;
}
return (deliver);
}
sub vcl_deliver {
if (resp.http.X-Magento-Debug) {
if (resp.http.x-varnish ~ " ") {
set resp.http.X-Magento-Cache-Debug = "HIT";
set resp.http.Grace = req.http.grace;
} else {
set resp.http.X-Magento-Cache-Debug = "MISS";
}
} else {
unset resp.http.Age;
}
# Not letting browser to cache non-static files.
if (resp.http.Cache-Control !~ "private" && req.url !~ "^/(pub/)?(media|static)/") {
set resp.http.Pragma = "no-cache";
set resp.http.Expires = "-1";
set resp.http.Cache-Control = "no-store, no-cache, must-revalidate, max-age=0";
}
unset resp.http.X-Magento-Debug;
unset resp.http.X-Magento-Tags;
unset resp.http.X-Powered-By;
unset resp.http.Server;
unset resp.http.X-Varnish;
unset resp.http.Via;
unset resp.http.Link;
}
sub vcl_hit {
if (obj.ttl >= 0s) {
# Hit within TTL period
return (deliver);
}
if (std.healthy(req.backend_hint)) {
if (obj.ttl + 300s > 0s) {
# Hit after TTL expiration, but within grace period
set req.http.grace = "normal (healthy server)";
return (deliver);
} else {
# Hit after TTL and grace expiration
return (restart);
}
} else {
# server is not healthy, retrieve from cache
set req.http.grace = "unlimited (unhealthy server)";
return (deliver);
}
}
And here is my systemctl edit --full varnish.service
[Unit]
Description=Varnish HTTP accelerator
Documentation=https://www.varnish-cache.org/docs/6.1/ man:varnishd
[Service]
Type=simple
LimitNOFILE=131072
LimitMEMLOCK=82000
ExecStart=/usr/sbin/varnishd -j unix,user=vcache -F -a :6081 -T localhost:6082 -f /etc/varnish/default.vcl -S /etc/varnish/secret -s malloc,3072m -p default_ttl=604800
ExecReload=/usr/share/varnish/varnishreload
ProtectSystem=full
ProtectHome=true
PrivateTmp=true
PrivateDevices=true
[Install]
WantedBy=multi-user.target
3 GB are allocated for the cache, and at any given time, only about 30 megabytes are being used. I'm not sure why it won't keep the cache and I'm fairly new to varnish so I don't know where to look to figure out what is happening.
Update:
For additional reference, when I say varnish only keeps the cache for a few minutes, what I mean is it keeps it for somewhere between 5-10 minutes, I'm not sure of the exact amount of time.
Update 2:
I had originally used the default.vcl that comes from Magento, only changing the "/pub/health_check.php" url to "/health_check.php". The issue of the cache only being valid for a few minutes still occurred then.
As a rule of thumb, don't tinker with Magento-generated VCL unless you know what you are doing, like fixing an actual bug they refuse to fix (there are quite a few, to admit).
So as for what you did so far...
The following fragment is about hit-for-pass:
if (beresp.ttl <= 0s ||
beresp.http.Surrogate-control ~ "no-store" ||
(!beresp.http.Surrogate-Control &&
beresp.http.Cache-Control ~ "no-cache|no-store") ||
beresp.http.Vary == "*") {
# Mark as Hit-For-Pass for the next 2 minutes
set beresp.ttl = 604800s;
set beresp.uncacheable = true;
}
This is an optimization called "hit-for-pass". It is meant for a case when Varnish expected the page to be cacheable but receives an uncacheable response. When this happens, this VCL marks it as "uncacheable" for some time, and next time a request comes for the same page during that time, it will go directly to the backend. Normally, Varnish waits for other requests to the same page, a feature called "requests coalescing", and it's a good thing. But in the case of "the page is suddenly uncacheable", it's a bad thing and requests coalescing must be disabled by marking the page as uncacheable. "Hit-for-pass" disables requests coalescing for cached pages, saves backend load, and optimizes communication between Varnish and backend.
1 week is overkill and may cause cacheability issues. Reverting 604800s to the original value 120s is recommended.
In varnish.service, the -p default_ttl=604800 is really the default TTL. That is for responses that do not specify cache lifetime via headers. Magento 2 does specify, so the default value won't be used.
Things are pretty simple if you follow the documentation:
Log in to the Magento Admin as an administrator.
Click Stores > Settings > Configuration > Advanced > System > Full Page Cache.
From the Caching Application list, click Varnish Caching.
Enter a value in the TTL for public content field.
Expand Varnish Configuration and enter the following information..
Point 4 about TTL is exactly where you set the value for Varnish cache lifetime.
I'd actually recommend setting it to even longer, like 2 weeks or more. Magento will do the job of purging cache items that need clearing.
Your cache is probably removed after an order is placed.
See: https://github.com/magento/magento2/issues/30128
I'm using varnish as a cache for my live streaming content. I have on the backend a video server (Mistserver) with HLS output on port 8090. I've configured varnish in the following way:
`vcl 4.0;
import std;
backend default {
.host = "127.0.0.1";
.port = "8090";
}
acl purge {
"127.0.0.1";
}
sub vcl_recv {
if(req.method == "PURGE"){
if (!client.ip ~ purge) {
return(synth(405,"Not allowed."));
}
return (purge);
}
return (hash);
}
sub vcl_hash {
return (lookup);
}
sub vcl_hit {
if (obj.ttl >= 0s) {
return (deliver);
}
if (std.healthy(req.backend_hint)) {
if (obj.ttl + 10s > 0s) {
return (deliver);
}
else {
return(fetch);
}
}
else {
if (obj.ttl + obj.grace > 0s) {
return (deliver);
}
else {
return (fetch);
}
}
return (fetch); # Dead code, keep as a safeguard
}
sub vcl_purge{
}
sub vcl_pipe {
return (pipe);
}
sub vcl_init {
return (ok);
}
sub vcl_fini {
return (ok);
}
sub vcl_backend_response {
set beresp.http.Access-Control-Max-Age = 15;
set beresp.ttl = 15s;
set beresp.grace = 15s;
}`
And varnish is being executed with a thread pool timeout of 30 seconds:
/usr/sbin/varnishd -a :6081 -T localhost:6082 -f /etc/varnish/user.vcl -S /etc/varnish/secret -s malloc,1G -t 3 -p thread_pool_timeout=30
The Problem: The content is being cached, apparently, correctly. But after some time the stream is running, I'm getting a difference of time with the original stream of 5 minutes. so for example if the live stream has time 22:25, I might see in the output stream the time 22:20. I can't have such a time difference in a live stream.
I've tried purging the cache, but didn't work. Sometime it is correclty synchronized, If I restart both video server and varnish. What I need is to avoid caching for mor than 1 minute or less.
What am I doing wrong?
Thanks Indeed
(not your issue here, but: I'd remove the vcl_hit section, it's probably not doing anything good for you)
For live video, the manifest needs to be treated careful, essentially, you want to lag as little behind as possible. So the first thing to do is to kill the grace, and the second one is to shorten the TTL to half a chunk duration.
Try with something like:
vcl 4.0;
import std;
backend default {
.host = "127.0.0.1";
.port = "8090";
}
acl purge {
"127.0.0.1";
}
sub vcl_recv {
if(req.method == "PURGE"){
if (!client.ip ~ purge) {
return(synth(405,"Not allowed."));
}
return (purge);
}
return (hash);
}
sub vcl_backend_response {
if (bereq.url ~ "m3u8") {
# assuming chunks are 2 seconds long
set beresp.ttl = 1s;
set beresp.grace = 0s;
} else {
set beresp.ttl = 10m;
set beresp.grace = 5m;
}
}
Note: You don't want the manifest delivered by varnish to lag behind the real one. The current setup can deliver data that is 30 seconds old, which is usually way to much for live. Interestingly, it seems the player will just wait until the manifest shows new chunk to resume playing, I guess this is how the delay accumulate. The usual case is actually worse as the player just stop altogether if there's no new chunk once it's done reading the newest chunks.
I'm using stale-if-error to deliver stale content while the server is marked unhealthy looking at grace and keep object options in vcl_hit.
The question is: Is it possible to deliver a cache object after entering the vcl subroutine vcl_backend_error (fresh request error). Actually, I deliver cached object at vcl_hit but looking at the next diagram, I don't see how it is possible to access the cached object of that request.
Source: https://www.varnish-software.com/book/4.0/chapters/VCL_Basics.html
When using the built-in VCL (see code bellow):
# Built-in 'vcl_hit'.
sub vcl_hit {
if (obj.ttl >= 0s) {
return (deliver);
}
if (obj.ttl + obj.grace > 0s) {
return (deliver);
}
return (fetch);
}
If vcl_backend_error is reached by a background / asynchronous backend fetch triggered by a return (deliver) during vcl_hit you don't need to worry. It's just a background thread to update a stalled object. The stalled content has already been delivered to the client.
If vcl_backend_error is reached by a synchronous backend fetch triggered by a return (fetch) during vcl_hit you don't need to worry too. An error will be delivered to the client, but you have not choice. A stalled object is not available in the Varnish storage.
However, if you have customised vcl_hit to limit grace when the backend is healthy (see VCL example code below), a return (fetch) executed during vcl_hit will be handled as a synchronous backend request. The client will wait for the backend response. If the backend request reaches vcl_backend_error and error will be delivered to the client side. A stalled object is available in the Varnish storage (stalled more than 60 seconds ago in this example), but it's not going to be used.
# Customised 'vcl_hit'.
sub vcl_hit {
if (obj.ttl >= 0s) {
return (deliver);
}
if (std.healthy(req.backend_hint)) {
if (obj.ttl + 60s > 0s) {
return (deliver);
}
} else {
if (obj.ttl + obj.grace > 0s) {
return (deliver);
}
}
return (fetch);
}
If you want to deliver stalled objects when the synchronous backend fetch fails, in this case you need some extra VCL logic. The idea is shown in the code below:
backend fail_be {
.host = "127.0.0.1";
.port = "9000";
.probe = {
.url = "/give-me-a-non-200-please";
.interval = 24h;
.timeout = 1s;
.window = 1;
.threshold = 1;
}
}
sub vcl_recv {
# Force the non-healthy backend in case of restart because of a previous
# failed backend fetch. This will force serving stalled content using
# full grace during 'vcl_hit' (if possible).
if (req.restarts == 0) {
unset req.http.X-Varnish-Restarted-5xx;
} else {
if (req.http.X-Varnish-Restarted-5xx) {
set req.backend_hint = fail_be;
}
}
# ...
}
sub vcl_synth {
# 503 generated for synchronous client requests when abandoning the
# backend request (see 'vcl_backend_fetch') and not executing a POST.
if (resp.status == 503 &&
req.method != "POST" &&
!req.http.X-Varnish-Restarted-5xx) {
set req.http.X-Varnish-Restarted-5xx = "1";
return (restart);
}
# ...
}
sub vcl_backend_fetch {
if (bereq.retries == 0) {
unset bereq.http.X-Varnish-Backend-5xx;
} else {
if (bereq.http.X-Varnish-Backend-5xx) {
# Jump to 'vcl_synth' with a 503 status code.
return (abandon);
}
}
# ...
}
sub vcl_backend_response {
if (beresp.status >= 500 && beresp.status < 600) {
set bereq.http.X-Varnish-Backend-5xx = "1";
return (retry);
}
# ...
}
sub vcl_backend_error {
set bereq.http.X-Varnish-Backend-5xx = "1";
return (retry);
}
I'm using devicedetect.vcl to send the X-UA-Device header to my app, so it knows which layout to render. The possible values that varnish will set for this header are mobile or desktop.
On the way out, this header gets transformed to Vary: User-Agent.
Now, as a separate, isolated project, I need to set another header on the resp object (which gets sent to our Golang proxy before it gets sent to the client). This header will be called X-Analytics-Device and will have the possible values of bot, mobile, tablet, or desktop.
The backend server does not need to do anything with X-Analytics-Device. Only our Go proxy will parse and then remove this header before sending it to the client.
The problem is, I need to set the X-Analytics-Device header based on the results of the subroutine call devicedetect;, which is in vcl_recv. I need to ultimately set it on resp which is in vcl_deliver, and I need to know the best way to pass the data.
The only real way I can think of that might work (based on my limited understanding of Varnish), is that I need to set some other header, and access it later.
Perhaps something like this (I left out bot for now):
if (req.http.X-UA-Device ~ "^mobile") {
set req.http.X-UA-Device = "mobile";
set req.http.X-Analytics-Device = "mobile";
} elseif (req.http.X-UA-Device ~ "^tablet") {
set req.http.X-UA-Device = "desktop";
set req.http.X-Analytics-Device = "tablet";
} else {
set req.http.X-UA-Device = "desktop";
set req.http.X-Analytics-Device = "desktop";
}
After this... I don't know. Do I need to set it like this in vcl_deliver?
set resp.http.X-Analytics-Device = req.http.X-Analytics-Device;
How does it get passed from the resp to the req? What happens if it's a hit or a miss? Does that matter? Is this going to try to cache this header in varnish (which it shouldnt be, obviously)?
My main fear with doing it this way is that there are so many moving pieces I just don't know the best way.
The end result is that... EVERY request needs to check the device, and on the way out it needs to set the header, without that value being cached along with the data in varnish, and while it doesnt hurt to send it to the backend, its not needed.
Here's my full VCL, before I added the pseudo-code lines above.
vcl 4.0;
backend default {
.host = "127.0.0.1";
.port = "8080";
}
import std;
include "purge.vcl";
include "devicedetect.vcl";
acl purge {
"localhost";
"127.0.0.1";
"10.0.0.0"/8;
}
sub vcl_recv {
call devicedetect;
if (req.http.X-UA-Device ~ "^mobile") {
set req.http.X-UA-Device = "mobile";
} else {
set req.http.X-UA-Device = "desktop";
}
if (req.restarts == 0) {
if (req.http.X-Forwarded-For) {
set req.http.X-Forwarded-For = req.http.X-Forwarded-For + ", " + client.ip;
} else {
set req.http.X-Forwarded-For = client.ip;
}
}
if (req.method !~ "^(GET|HEAD|PUT|POST|OPTIONS|DELETE)$") {
return (synth(405));
}
# never cache anything except GET/HEAD
if (req.method != "GET" && req.method != "HEAD") {
return (pass);
}
# don't cache images or assets
if (req.url ~ "\.(js|css|jpg|jpeg|png|gif|ico|tiff|tif|bmp|svg)$") {
return (pass);
}
# fix up the request
unset req.http.cookie;
return (hash);
}
sub vcl_backend_response {
set beresp.do_stream = false;
# device detect
if (bereq.http.X-UA-Device) {
if (!beresp.http.Vary) { # no Vary at all
set beresp.http.Vary = "X-UA-Device";
} elseif (beresp.http.Vary !~ "X-UA-Device") { # add to existing Vary
set beresp.http.Vary = beresp.http.Vary + ", X-UA-Device";
}
}
# bypass cache for files > 5 MB
if (std.integer(beresp.http.Content-Length, 0) > 5242880) {
set beresp.uncacheable = true;
set beresp.ttl = 120s;
return (deliver);
}
# catch obvious reasons we can't cache
if (beresp.http.Set-Cookie) {
set beresp.ttl = 0s;
}
# avoid caching error responses (1m grace period)
if (beresp.status >= 500) {
set beresp.ttl = 1m;
return (deliver);
}
# set times
set beresp.ttl = 24h;
set beresp.grace = 4h;
return (deliver);
}
sub vcl_deliver {
# device detect
if ((req.http.X-UA-Device) && (resp.http.Vary)) {
set resp.http.Vary = regsub(resp.http.Vary, "X-UA-Device", "User-Agent");
}
# remove junk headers
unset resp.http.Server;
unset resp.http.Via;
unset resp.http.X-Powered-By;
unset resp.http.X-Runtime;
unset resp.http.X-Varnish;
if (obj.hits > 0) {
set resp.http.X-Cache = "HIT";
} else {
set resp.http.X-Cache = "MISS";
}
}
This link actually perfectly clarifies and answers all the things I was failing to articulate... https://info.varnish-software.com/blog/adding-headers-gain-insight-vcl
The answer is to shovel all the bits of data you need into the req headers in vcl_recv, and then copy them over to the response in vcl_deliver.
He states the following, about why it won't get cached:
Since the req object is not delivered to the client we need to copy the data from the req object to resp. We do this when we deliver it. If you do it in vcl_backend_response the headers will be stored in the cache and this might not be what you want.
The Answer from #Tallboy saved my day. To sum it up what you want to do is:
# set the value in vcl_recv
sub vcl_recv {
set req.http.X-NAVO-AY = "AYAYAYAYAYAYYYYY";
}
# copy the value from req to resp (because it is not done automatically)
sub vcl_deliver {
set resp.http.X-NAVO-AY = req.http.X-NAVO-AY;
}
first of all, sorry for my poor english it's not my natural language.
I try to configure varnish with cookies for managment users to backend, and i have some issues to loggin and other checks.
My config for recv, fetch and hash:
backend default {
.host = "127.0.0.1";
.port = "8080";
}
sub vcl_recv {
remove req.http.X-Forwarded-For;
set req.http.X-Forwarded-For = client.ip;
if (req.request == "POST"){
return (pass);
}
# Grace mode
if (! req.backend.healthy) {
set req.grace = 30m;
} else {
set req.grace = 15s;
}
if(req.url ~ "^localhost$"){
set req.http.host = "www.micasa.com";
}
# Acces to system URL's is protected
if ((req.url ~ "^/server_status") || (req.url ~ "^/discover/varnish_server")) {
error 403 "Go away, please";
}
# Delete all cookies except from user
if ( !(req.url ~ "^/logout") &&
!(req.url ~ "^/profile") &&
!(req.url ~ "^/playlists") &&
!(req.url ~ "^/users") &&
!(req.url ~ "^/signup") &&
!(req.url ~ "^/comments") &&
!(req.url ~ "^/login") &&
!(req.url ~ "^/remind"))
{
unset req.http.cookie;
}
sub vcl_fetch {
# Grace mode
# https://www.varnish-cache.org/docs/trunk/tutorial/handling_misbehaving_servers.html#grace-mode
set beresp.grace = 30m;
# Saint mode
# https://www.varnish-cache.org/docs/trunk/tutorial/handling_misbehaving_servers.html#saint-mode
if (beresp.status == 500) {
set beresp.saintmode = 10s;
return (restart);
}
if ( !(req.url ~ "^/login") && (req.request == "GET")){
unset beresp.http.set-cookie; # To avoid caching of cookies
}
# Process ESIs if X-RUN-ESI is set. This will be stripped before being sent down to client.
if ( beresp.http.X-RUN-ESI ) {
set beresp.do_esi = true;
remove beresp.http.X-RUN-ESI;
}
# cache 404s and 301s for 5 minute
if (beresp.status == 404 || beresp.status == 301 || beresp.status == 500) {
set beresp.ttl = 15m;
return (deliver);
}
# cache images and static assets during 15m
if ( req.url ~ "\.(png|gif|jpg|css|js|ico)" ) {
set beresp.ttl = 15m;
return (deliver);
}
# If X-VARNISH-TTL is set, use this header's value as the TTL for the varnish cache.
# Expires, cache-control, etc. will be passed directly through to the client
# Cribbed from http://www.lovelysystems.com/configuring-varnish-to-use-custom-http-headers/
if (beresp.http.X-VARNISH-TTL) {
C{
char *ttl;
/* first char in third param is length of header plus colon in octal */
ttl = VRT_GetHdr(sp, HDR_BERESP, "\016X-VARNISH-TTL:");
VRT_l_beresp_ttl(sp, atoi(ttl));
}C
remove beresp.http.X-VARNISH-TTL;
return (deliver);
}
sub vcl_deliver {
unset resp.http.x-url; # Optional
if ( req.url ~ "\.(png|gif|jpg|css|js|ico|woff)" ) {
set resp.http.expires = "3600";
}
#mikel
#remove resp.http.X-Powered-By;
remove resp.http.Server;
#remove resp.http.X-Varnish;
#remove resp.http.Via;
#remove resp.http.Age;
}
sub vcl_hash {
if (req.http.Cookie ~ "_micasa_session") {
hash_data(req.url);
hash_data(req.http.Cookie);
return (hash);
}
}
When i try to loggin with user it's ok, but if i refresh the same page after that, i lose cookie and immediatly logout, maybe the problem is in sub vcl_recv?
Thank's for advance for you help.
You unset all cookies except for on the defined pages. Your site login is almost certainly held in a cookie (session cookie?). The easy way out is to disable cache for logged in users by checking if some cookie identifying a logged in user is set. The good way is to use ESI so that sections that are the same for all users gets cached.