varnish 503 is wrong and backend_health has an error of 400 - varnish

May I ask what causes this problem? This problem has been bothering me for a long time. How should I solve this problem? thank you in advance.'
this is my vcl, I actually tried /health_check.php and /pub/health_check.php
# VCL version 5.0 is not supported so it should be 4.0 even though actually used Varnish version is 6
vcl 4.0;
import std;
# The minimal Varnish version is 6.0
# For SSL offloading, pass the following header in your proxy server or load balancer: 'X-Forwarded-Proto: https'
backend default {
.host = "localhost";
.port = "8080";
.first_byte_timeout = 600s;
.probe = {
.url = "health_check.php";
.timeout = 2s;
.interval = 5s;
.window = 10;
.threshold = 5;
}
}
acl purge {
"localhost";
"127.0.0.1";
"::1";
}
sub vcl_recv {
if (req.restarts > 0) {
set req.hash_always_miss = true;
}
if (req.method == "PURGE") {
if (client.ip !~ purge) {
return (synth(405, "Method not allowed"));
}
# To use the X-Pool header for purging varnish during automated deployments, make sure the X-Pool header
# has been added to the response in your backend server config. This is used, for example, by the
# capistrano-magento2 gem for purging old content from varnish during it's deploy routine.
if (!req.http.X-Magento-Tags-Pattern && !req.http.X-Pool) {
return (synth(400, "X-Magento-Tags-Pattern or X-Pool header required"));
}
if (req.http.X-Magento-Tags-Pattern) {
ban("obj.http.X-Magento-Tags ~ " + req.http.X-Magento-Tags-Pattern);
}
if (req.http.X-Pool) {
ban("obj.http.X-Pool ~ " + req.http.X-Pool);
}
return (synth(200, "Purged"));
}
if (req.method != "GET" &&
req.method != "HEAD" &&
req.method != "PUT" &&
req.method != "POST" &&
req.method != "TRACE" &&
req.method != "OPTIONS" &&
req.method != "DELETE") {
/* Non-RFC2616 or CONNECT which is weird. */
return (pipe);
}
# We only deal with GET and HEAD by default
if (req.method != "GET" && req.method != "HEAD") {
return (pass);
}
# Bypass customer, shopping cart, checkout
if (req.url ~ "/customer" || req.url ~ "/checkout") {
return (pass);
}
# Bypass health check requests
if (req.url ~ "^/(pub/)?(health_check.php)$") {
return (pass);
}
# Set initial grace period usage status
set req.http.grace = "none";
# normalize url in case of leading HTTP scheme and domain
set req.url = regsub(req.url, "^http[s]?://", "");
# collect all cookies
std.collect(req.http.Cookie);
# Compression filter. See https://www.varnish-cache.org/trac/wiki/FAQ/Compression
if (req.http.Accept-Encoding) {
if (req.url ~ "\.(jpg|jpeg|png|gif|gz|tgz|bz2|tbz|mp3|ogg|swf|flv)$") {
# No point in compressing these
unset req.http.Accept-Encoding;
} elsif (req.http.Accept-Encoding ~ "gzip") {
set req.http.Accept-Encoding = "gzip";
} elsif (req.http.Accept-Encoding ~ "deflate" && req.http.user-agent !~ "MSIE") {
set req.http.Accept-Encoding = "deflate";
} else {
# unknown algorithm
unset req.http.Accept-Encoding;
}
}
# Remove all marketing get parameters to minimize the cache objects
if (req.url ~ "(\?|&)(gclid|cx|ie|cof|siteurl|zanpid|origin|fbclid|mc_[a-z]+|utm_[a-z]+|_bta_[a-z]+)=") {
set req.url = regsuball(req.url, "(gclid|cx|ie|cof|siteurl|zanpid|origin|fbclid|mc_[a-z]+|utm_[a-z]+|_bta_[a-z]+)=[-_A-z0-9+()%.]+&?", "");
set req.url = regsub(req.url, "[?|&]+$", "");
}
# Static files caching
if (req.url ~ "^/(pub/)?(media|static)/") {
# Static files should not be cached by default
return (pass);
# But if you use a few locales and don't use CDN you can enable caching static files by commenting previous line (#return (pass);) and uncommenting next 3 lines
#unset req.http.Https;
#unset req.http.X-Forwarded-Proto;
#unset req.http.Cookie;
}
# Authenticated GraphQL requests should not be cached by default
if (req.url ~ "/graphql" && req.http.Authorization ~ "^Bearer") {
return (pass);
}
return (hash);
}
sub vcl_hash {
if (req.http.cookie ~ "X-Magento-Vary=") {
hash_data(regsub(req.http.cookie, "^.*?X-Magento-Vary=([^;]+);*.*$", "\1"));
}
# To make sure http users don't see ssl warning
if (req.http.X-Forwarded-Proto) {
hash_data(req.http.X-Forwarded-Proto);
}
if (req.url ~ "/graphql") {
call process_graphql_headers;
}
}
sub process_graphql_headers {
if (req.http.Store) {
hash_data(req.http.Store);
}
if (req.http.Content-Currency) {
hash_data(req.http.Content-Currency);
}
}
sub vcl_backend_response {
set beresp.grace = 3d;
if (beresp.http.content-type ~ "text") {
set beresp.do_esi = true;
}
if (bereq.url ~ "\.js$" || beresp.http.content-type ~ "text") {
set beresp.do_gzip = true;
}
if (beresp.http.X-Magento-Debug) {
set beresp.http.X-Magento-Cache-Control = beresp.http.Cache-Control;
}
# cache only successfully responses and 404s
if (beresp.status != 200 && beresp.status != 404) {
set beresp.ttl = 0s;
set beresp.uncacheable = true;
return (deliver);
} elsif (beresp.http.Cache-Control ~ "private") {
set beresp.uncacheable = true;
set beresp.ttl = 86400s;
return (deliver);
}
# validate if we need to cache it and prevent from setting cookie
if (beresp.ttl > 0s && (bereq.method == "GET" || bereq.method == "HEAD")) {
unset beresp.http.set-cookie;
}
# If page is not cacheable then bypass varnish for 2 minutes as Hit-For-Pass
if (beresp.ttl <= 0s ||
beresp.http.Surrogate-control ~ "no-store" ||
(!beresp.http.Surrogate-Control &&
beresp.http.Cache-Control ~ "no-cache|no-store") ||
beresp.http.Vary == "*") {
# Mark as Hit-For-Pass for the next 2 minutes
set beresp.ttl = 120s;
set beresp.uncacheable = true;
}
return (deliver);
}
sub vcl_deliver {
if (resp.http.X-Magento-Debug) {
if (resp.http.x-varnish ~ " ") {
set resp.http.X-Magento-Cache-Debug = "HIT";
set resp.http.Grace = req.http.grace;
} else {
set resp.http.X-Magento-Cache-Debug = "MISS";
}
} else {
unset resp.http.Age;
}
# Not letting browser to cache non-static files.
if (resp.http.Cache-Control !~ "private" && req.url !~ "^/(pub/)?(media|static)/") {
set resp.http.Pragma = "no-cache";
set resp.http.Expires = "-1";
set resp.http.Cache-Control = "no-store, no-cache, must-revalidate, max-age=0";
}
unset resp.http.X-Magento-Debug;
unset resp.http.X-Magento-Tags;
unset resp.http.X-Powered-By;
unset resp.http.Server;
unset resp.http.X-Varnish;
unset resp.http.Via;
unset resp.http.Link;
}
sub vcl_hit {
if (obj.ttl >= 0s) {
# Hit within TTL period
return (deliver);
}
if (std.healthy(req.backend_hint)) {
if (obj.ttl + 300s > 0s) {
# Hit after TTL expiration, but within grace period
set req.http.grace = "normal (healthy server)";
return (deliver);
} else {
# Hit after TTL and grace expiration
return (restart);
}
} else {
# server is not healthy, retrieve from cache
set req.http.grace = "unlimited (unhealthy server)";
return (deliver);
}
}
My program is magento and my health_check.php file is in pub
this is my nginx error
2022/11/04 09:15:23 [crit] 3378368#0: *538 SSL_do_handshake() failed (SSL: error:141CF06C:SSL routines:tls_parse_ctos_key_share:bad key share) while SSL handshaking, client: *.*.*.224, server: 0.0.0.0:443
2022/11/04 09:37:30 [notice] 3383586#0: signal process started
I Runing varnishlog -g request -q "ReqUrl eq '/health_check.php'", no response
I ran sudo varnishlog -g request -q "ReqUrl eq '/'", no response. And then I ran sudo varnishlog -g request -q "VCL_call eq 'BACKEND_ERROR'" return 503

It's clear that the 400 error of the health probe causes the backend to be unhealthy. Varnish will return a 503 because the backend is unhealthy regardless of the status code.
Please share the backend & probe configuration from your VCL file so I can figure out how the health checking endpoint is probed.
It also makes sense for you to run a varnishlog -g request -q "ReqUrl eq '/health_check'" where /health_check is replaced by the URL of the health checking probe.
Please also have a look at the webserver logs of the backend to see if there's any indication on why the HTTP 400 status code is returned.
Update: run the health check manually
The Nginx error logs didn't contain anything useful. Please check the access logs as well. If there is useful information in there, don't hesitate to add it to your question.
Please also check the Magento (debug) logs for more information.
You could also run curl -I http://localhost:8080/health_check.php from the command line on the Varnish server to see if anything specific comes up in the output for the script or in the Magento logs.
Update 2: provide the right health check host header
It looks as though your Nginx server doesn't route requests coming from localhost to the right Magento virtual host.
In that case I suggest adding the .host_header property to your backend definition as illustrated below:
backend default {
.host = "localhost";
.host_header = "mydomain";
.port = "8080";
.first_byte_timeout = 600s;
.probe = {
.url = "/health_check.php";
.timeout = 2s;
.interval = 5s;
.window = 10;
.threshold = 5;
}
}
This host header will ensure that the right host header is sent to the backend while performing health checks.
Since you mentioned in your comment that https://mydomain/health_check.php returns a valid 200 OK status code, this seems like a first step in the right direction.
Please update .host_header = "mydomain" and ensure the right domain name is used.
Update 3: figure out why the page doesn't load
Your backend seems to be healthy now (based on the comments), but the site still doesn't work.
In that case, run the following command to debug:
sudo varnishlog -g request -q "ReqUrl eq '/'"
This command will display the full VSL transaction log for the homepage. Go ahead and run it and add the full output to your question in case you need help.
Update 4: first byte timeout issues
The logs you provided give a clear indication of what's causing the failure.
Here's the relevant log extract:
BackendOpen 31 default 127.0.0.1 8080 127.0.0.1 57786
BackendStart 127.0.0.1 8080
Timestamp Bereq: 1668044657.403995 0.000161 0.000161
FetchError first byte timeout
BackendClose 31 default
Timestamp Beresp: 1668045257.502267 600.098433 600.098272
Timestamp Error: 1668045257.502275 600.098441 0.000008
Varnish is opening up a connection to the backend via a backend fetch thread and sends the backend request. It took Varnish 0.000161 seconds to send the request.
Now the Varnish backend thread is waiting for the backend to respond. The first byte timeout is set to 600 seconds, which is incredibly long. However, the timeout is still triggered.
The Timestamp Beresp log line indicates that it took Varnish 600.098272 to receive the backend response. But we know that this is just a timeout and the error is triggered directly after that.
Something really weird is going on and you need to figure out why your backend is taking so long to respond. This doesn't really have anything to do with Varnish itself, but it could be that your application behaves in a strange way when a proxy server is put in front of it.
It's also possible that your backend application is just slow under all circumstances. That's information I don't have and that's beyond the scope of this question.

Related

Varnish with MPEG-TS Traffic

We are trying use Varnish as proxy/cache for our media server. Our streams are MPEG-TS (h264/h265) over http. There is 1000 live streams on this media server and each stream getting multiple connection. We tried to configure Varnish shown as below but we have these problems.
Streams get close after a short period of time
Sometimes cant able to connect to streams, stuck at connecting...
Got these errors on varnislog;
- FetchError Could not get storage
- FetchError Could not get storage
- FetchError Could not get storage
- FetchError Could not get storage
- FetchError Resource temporarily unavailable
- FetchError eof socket fail
- FetchError Resource temporarily unavailable
- FetchError eof socket fail
- FetchError Could not get storage
- FetchError Could not get storage
- FetchError Could not get storage
- FetchError Could not get storage
- FetchError Could not get storage
- FetchError Could not get storage
- FetchError Could not get storage
- FetchError Could not get storage
- FetchError Could not get storage
- FetchError Resource temporarily unavailable
- FetchError eof socket fail
- FetchError Resource temporarily unavailable
- FetchError eof socket fail
- FetchError Resource temporarily unavailable
- FetchError eof socket fail
- FetchError Could not get storage
- FetchError Could not get storage
My config;
vcl 4.0;
import directors;
backend s6855 {
.host = "127.0.0.1";
.port = "6855";
.first_byte_timeout = 10s; # How long to wait before we receive a first byte from our backend?
.connect_timeout = 5s; # How long to wait for a backend connection?
.between_bytes_timeout = 30s; # How long to wait between bytes received from our backend?
}
backend s6866 {
.host = "127.0.0.1";
.port = "6866";
.first_byte_timeout = 10s; # How long to wait before we receive a first byte from our backend?
.connect_timeout = 5s; # How long to wait for a backend connection?
.between_bytes_timeout = 30s; # How long to wait between bytes received from our backend?
}
backend s6877 {
.host = "127.0.0.1";
.port = "6877";
.first_byte_timeout = 10s; # How long to wait before we receive a first byte from our backend?
.connect_timeout = 5s; # How long to wait for a backend connection?
.between_bytes_timeout = 30s; # How long to wait between bytes received from our backend?
}
backend s6888 {
.host = "127.0.0.1";
.port = "6888";
.first_byte_timeout = 10s; # How long to wait before we receive a first byte from our backend?
.connect_timeout = 5s; # How long to wait for a backend connection?
.between_bytes_timeout = 30s; # How long to wait between bytes received from our backend?
}
backend s6899 {
.host = "127.0.0.1";
.port = "6899";
.first_byte_timeout = 10s; # How long to wait before we receive a first byte from our backend?
.connect_timeout = 5s; # How long to wait for a backend connection?
.between_bytes_timeout = 30s; # How long to wait between bytes received from our backend?
}
sub vcl_init {
new fb = directors.round_robin();
fb.add_backend(s6855);
fb.add_backend(s6866);
fb.add_backend(s6877);
fb.add_backend(s6888);
fb.add_backend(s6899);
}
sub vcl_recv {
set req.grace = 120s;
set req.backend_hint = fb.backend();
if (req.url ~ "(\.ts)" ) {
unset req.http.Range;
}
if (req.http.cookie) {
unset req.http.cookie;
}
if (req.method != "GET" && req.method != "HEAD") {
return (pipe);
}
if (req.method == "GET" && req.url ~ "(\.ts)" ) {
unset req.http.Accept-Encoding;
return(hash);
}
return(hash);
}
sub vcl_hash {
hash_data(req.url);
return(lookup);
}
sub vcl_backend_response {
set beresp.grace = 2m;
set beresp.ttl = 120s;
set beresp.do_gunzip = false;
set beresp.do_gzip = false;
if (bereq.url ~ "(\.ts)") {
set beresp.ttl = 60s;
set beresp.http.X-Cacheable = "YES";
}
else {
set beresp.ttl = 10m;
set beresp.http.X-Cacheable = "NO";
}
if ( beresp.status == 404 ) {
set beresp.ttl = 5m;
}
return(deliver);
}
sub vcl_hit {
if (obj.ttl == 0s) {
return(pass);
}
return(deliver);
}
sub vcl_miss {
}
sub vcl_deliver {
set resp.http.X-Served-By = "For Test";
if (obj.hits > 0) {
set resp.http.X-Cache = "HIT";
set resp.http.X-Cache-Hits = obj.hits;
} else {
set resp.http.X-Cache = "MISS";
}
if(resp.http.magicmarker) {
unset resp.http.magicmarker;
set resp.http.Age="0";
}
unset resp.http.Via;
unset resp.http.X-Varnish;
}
Varnish Usage
Since pretty new to Varnish not sure how to debug the problem, your help will be appreciated.
Thanks
The problem you're experiencing is not just a lack of object storage, but the fact that your biggest HTTP response is larger than the total size of the object storage.
This means Varnish cannot LRU evict the required space to fit the object in cache.
Could not get storage is an error that is typically returned when this happens.
Check the sizes
It is important to figure out how big your cache is, and what the size of the object is that fails on you.
Your varnishd runtime settings will tell you how big your object storage is. The -s malloc,<size> contains this value.
You can also use varnishstat to check the size & usage of your memory cache and the transient storage:
varnishstat -f SMA.*.g* -f MAIN.n_lru_nuked
The MAIN.n_lru_nuked counter that is also included in this command, will indicate how many objects that Varnish is forcefully removing from the cache to clear up space for new objects.
Fixing the issue
The easiest way to fix the issue, is to assign more memory to Varnish via -s malloc,<size>. Don't forget to restart Varnish after you have changed these settings.
After that, the following command will help you figure out if there's enough storage, and if Varnish still needs to forcefully remove objects from cache the free up space:
varnishstat -f SMA.*.g* -f MAIN.n_lru_nuked
A more sustainable plan
Another plan is to rely on the Massive Storage Engine (MSE). This is a storage engine that is part of Varnish Enterprise.
It combines memory and disk storage, and is optimized to handle large volumes of data. It avoids fragmentation, and is architected to not suffer from the typical latency of disk access.
There are official machine images for AWS, Azure & Google Cloud that allow you to experiment with this storage engine, without having to buy a license upfront.
A killer MSE feature is the memory governor. This is a mechanism that dynamically sizes the memory storage of your caches based on the needs of requests & responses.
If you run short of memory, and there isn't a lot of memory needed for thread handling, the memory governor will automatically assign more memory to the storage engine.
If you use the persistence layer of MSE, you can host terrabytes of data on a single machine, without running into these issues.
At Varnish Software, the company that builds Varnish Enterprise, we see MSE as the primary feature that OTT video streaming companies use to accelerate video delivery.
What if my assessment is completely wrong
Although the Could not get storage error usually appears when Varnish is trying to store huge objects in cache when the size of the cache is too small, I could also be wrong.
In that case, I would advise you to run varnishlog and see the full trace of what's going on in that specific transaction:
varnishlog -g request -q "ReqUrl eq '/my-url'"
This examples gets all the details of requests for /my-url. Please change this to the URL you're trying to monitor.
The output will usually give you a better understanding of how Varnish is behaving. This can help us figure out how to fix the issue, if my initial assessment was wrong.

Varnish Cache v4: Incorrect Backend Health Check Response

I've setup Varnish Cache (4) in front of my CMS to help cache requests. In the event that my CMS goes down, I'd like to deliver cached items for a set grace period. I've followed many examples provided online but am running into an issue where Varnish doesn't recognize that my backend is down. When I manually shutdown the CMS the std.health(req.backend_hint)) continues to return true and attempts to retrieve an item from the backend, which then returns a 503 response.
Question: Am I incorrectly assuming std.health(req.backend_hint)) will recognize that my CMS is down?
Here is the VCL script I've been using to test:
sub vcl_recv {
# Initial State
set req.http.grace = "none";
set req.http.x-host = req.http.host;
set req.http.x-url = req.url;
return(hash);
}
sub vcl_backend_response {
set beresp.ttl = 10s;
set beresp.grace = 1h;
}
sub vcl_deliver {
# Copy grace to resp so we can tell from client
set resp.http.grace = req.http.grace;
# Add debugging headers to cache requests
if (obj.hits > 0) {
set resp.http.X-Cache = "HIT";
}
else {
set resp.http.X-Cache = "MISS";
}
}
sub vcl_hit {
if (obj.ttl >= 0s) {
# normal hit
return (deliver);
}
# We have no fresh content, lets look at the cache
elsif (std.healthy(req.backend_hint)) {
# Backend is healthy. Limit age to 10s.
if (obj.ttl + 10s > 0s) {
set req.http.grace = "normal(limited)";
return (deliver);
} else {
# No candidate for grace. Fetch a fresh object.
return(fetch);
}
} else {
# backend is sick - use full grace
if (obj.ttl + obj.grace > 0s) {
set req.http.grace = "full";
return (deliver);
} else {
# no graced object.
return (fetch);
}
}
}
Again, when I shutdown the CMS the std.healthy(req.backend_hint)) still reports the backend as healthy and never jumps to the final else statement.
Thanks for taking a look.
To properly use std.healthy you of course need to configure backend probes. So at the top of your VCL file you would first configure a probe:
probe site_probe {
.request =
"HEAD / HTTP/1.1"
"Host: example.com"
"Connection: close";
.interval = 5s; # check the health of each backend every 5 seconds
.timeout = 3s; # timing out after 1 second by default.
.window = 5; # If 3 out of the last 5 polls succeeded the backend is considered healthy, otherwise it will be marked as sick
.threshold = 3;
}
Make sure to replace example.com with your main website domain. It is important to put (or omit) the www. prefix so that the probe will not get redirect and marked as failed.
And of course your backend definition should be configured to use the defined probe:
backend default {
.host = "127.0.0.1";
.port = "8080";
.probe = site_probe;
}

Why my varnish health check always return the backend server is sick?

I used varnish as cache server before my nginx web server. I configured the health check for the definition of backend server in varnish configuration file. But the varnish always return my backend server is sick, while I stopped my varnish and directly connected nginx server, it can works well. I try to use .url or .request to enable backend server check, it never work well. Its crarzy how to resovle it.
My configuration is :
probe backend_healthcheck {
.url = "/";
.window = 5;
.threshold = 3;
.interval = 300s;
}
backend nginx2 {
.host = "10.161.78.249";
.port = "80";
.probe = backend_healthcheck;
}
backend nginx2 {
.host = "10.161.78.249";
.port = "80";
.probe = backend_healthcheck;
}
import directors;
import std;
sub vcl_init {
new webserver = directors.hash();
webserver.add_backend(nginx1,1.0);
webserver.add_backend(nginx2,1.0);
}
sub vcl_recv {
set req.backend_hint = webserver.backend(req.http.cookie);
}
I really don't know which configuration is error? The varnish is running on the server has public ip and the backend server only has internal ip.
Hope some help me, Thanks a lot.
Note that the request has to return 200 OK for Varnish to accept this as healthy.
Could it be that your framework returns 302 on the / URL?
Also, your .window size means that the servers start as sick and remain sick until you get .threshold consecutive healthy responses, which would take 15 minutes in your case.
Also, what does the log show?
Documentation:
https://www.varnish-cache.org/trac/wiki/BackendPolling

How to increase obj.ttl in Varnish vcl_backend_error

Since you still might have this cumbersome backend that isn't very friendly to work with you might want to increase obj.ttl in Varnish 4.x . Like this:
sub vcl_backend_response{
if(beresp.http.X-Response-Error == '1'){
set obj.ttl = 120s;
return (abandon);
}
return (deliver);
}
But obj could not be accessed in vcl_backend_response.
Is there any other way ?
Thanks!
You have to use Grace Mode. When Varnish is in grace mode, it uses an object that has already expired as far as the TTL is concerned.
I used this as base and adapted it to Varnish 4.0
sub vcl_recv {
#This is in case you have configured probe.
if (std.healthy(req.backend_hint)) {
set req.grace = 30s;
} else {
set req.grace = 1h;
}
}
sub vcl_backend_response {
set beresp.grace = 1h;
}
tl;dr (grace mode link)
A graced object is an object that has expired, but is still kept in cache.
Grace mode is when Varnish uses a graced object.
req.grace defines how long overdue an object can be for Varnish to still consider it for grace mode.
beresp.grace defines how long past the beresp.ttl-time Varnish will keep an object
req.grace is often modified in vcl_recv based on the state of the backend.

Varnish round robin director with backend virtual hosts

I've been trying like mad to figure out the VCL for how to do this and am beginning to think it's not possible. I have several backend app servers that serve a variety of different hosts. I need varnish to cache pages for any host and send requests that miss the cache to the app servers with the original host info in the request ("www.site.com"). However, all the VCL examples seem to require me to use a specific host name for my backend server ("backend1" for example). Is there any way around this? I'd love to just point the cache miss to an IP and leave the request host intact.
Here's what I have now:
backend app1 {
.host = "192.168.1.11";
.probe = {
.url = "/heartbeat";
.interval = 5s;
.timeout = 1 s;
.window = 5;
.threshold = 3;
}
}
backend app2 {
.host = "192.168.1.12";
.probe = {
.url = "/heartbeat";
.interval = 5s;
.timeout = 1 s;
.window = 5;
.threshold = 3;
}
}
director pwms_t247 round-robin {
{
.backend = app1;
}
{
.backend = app2;
}
}
sub vcl_recv {
# pass on any requests that varnish should not handle
if (req.request != "HEAD" && req.request != "GET" && req.request != "BAN") {
return (pass);
}
# pass requests to the backend if they have a no-cache header or cookie
if (req.http.x-varnish-no-cache == "true" || (req.http.cookie && req.http.cookie ~ "x-varnish-no-cache=true")) {
return (pass);
}
# Lookup requests that we know should be cached
if (req.url ~ ".*") {
# Clear cookie and authorization headers, set grace time, lookup in the cache
unset req.http.Cookie;
unset req.http.Authorization;
return(lookup);
}
}
etc...
This is my first StackOverflow question so please let me know if I neglected to mention something important! Thanks.
Here is what I actually got to work. I credit ivy because his answer is technically correct, and because one of the problems was my host (they were blocking ports that prevented my normal web requests from getting through). The real problem I was having was that heartbeat messages had no host info, so the vhost couldn't route them correctly. Here's a sample backend definition with a probe that crafts a completely custom request:
backend app1 {
.host = "192.168.1.11";
.port = "80";
.probe = {
.request = "GET /heartbeat HTTP/1.1"
"Host: something.com"
"Connection: close"
"Accept-Encoding: text/html" ;
.interval = 15s;
.timeout = 2s;
.window = 5;
.threshold = 3;
}
}
I need varnish to cache pages for any host and send requests that miss
the cache to the app servers with the original host info in the
request ("www.site.com"). However, all the VCL examples seem to require
me to use a specific host name for my backend server ("backend1" for example)
backend1 is not a hostname, it's a back-end definition with an ip-address. You're defining some routing logic in your vcl file (to which backend a request is proxied), but you're not changing the hostname in the request. What you're asking for (keep the hostname the same) is the default behavior.

Resources