Install Magento 2 and Varnish Cache on different server - varnish

I have 2 servers, one which has magento 2 installed (ip - 129.89.188.244 port 80) and Varnish on another (ip - 129.89.188.245 port 80)
My Varnish Configuration:
File /etc/default/varnish:-
DAEMON_OPTS="-a :80 \
-T 127.0.0.1:6082 \
-b 129.89.188.244:80 \
-f /etc/varnish/default.vcl \
-S /etc/varnish/secret \
-s malloc,256m"
netstat -tulpn :-
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 1288/sshd
tcp 0 0 127.0.0.1:6082 0.0.0.0:* LISTEN 11115/varnishd
tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 11115/varnishd
tcp6 0 0 :::22 :::* LISTEN 1288/sshd
tcp6 0 0 :::80 :::* LISTEN 11115/varnishd
/etc/varnish/default.vcl : -
# VCL version 5.0 is not supported so it should be 4.0 even though actually used Varnish version is 5
vcl 4.0;
import std;
# The minimal Varnish version is 5.0
# For SSL offloading, pass the following header in your proxy server or load balancer: 'X-Forwarded-Proto: https'
backend default {
.host = "129.89.188.244";
.port = "80";
.first_byte_timeout = 600s;
.probe = {
.url = "/pub/health_check.php";
.timeout = 2s;
.interval = 5s;
.window = 10;
.threshold = 5;
}
}
acl purge {
"129.89.188.245";
"127.0.0.1";
"localhost";
}
sub vcl_recv {
if (req.method == "PURGE") {
if (client.ip !~ purge) {
return (synth(405, "Method not allowed"));
}
# To use the X-Pool header for purging varnish during automated deployments, make sure the X-Pool header
# has been added to the response in your backend server config. This is used, for example, by the
# capistrano-magento2 gem for purging old content from varnish during it's deploy routine.
if (!req.http.X-Magento-Tags-Pattern && !req.http.X-Pool) {
return (synth(400, "X-Magento-Tags-Pattern or X-Pool header required"));
}
if (req.http.X-Magento-Tags-Pattern) {
ban("obj.http.X-Magento-Tags ~ " + req.http.X-Magento-Tags-Pattern);
}
if (req.http.X-Pool) {
ban("obj.http.X-Pool ~ " + req.http.X-Pool);
}
return (synth(200, "Purged"));
}
if (req.method != "GET" &&
req.method != "HEAD" &&
req.method != "PUT" &&
req.method != "POST" &&
req.method != "TRACE" &&
req.method != "OPTIONS" &&
req.method != "DELETE") {
/* Non-RFC2616 or CONNECT which is weird. */
return (pipe);
}
# We only deal with GET and HEAD by default
if (req.method != "GET" && req.method != "HEAD") {
return (pass);
}
# Bypass shopping cart, checkout and search requests
if (req.url ~ "/checkout" || req.url ~ "/catalogsearch") {
return (pass);
}
# Bypass health check requests
if (req.url ~ "/pub/health_check.php") {
return (pass);
}
# Set initial grace period usage status
set req.http.grace = "none";
# normalize url in case of leading HTTP scheme and domain
set req.url = regsub(req.url, "^http[s]?://", "");
# collect all cookies
std.collect(req.http.Cookie);
# Compression filter. See https://www.varnish-cache.org/trac/wiki/FAQ/Compression
if (req.http.Accept-Encoding) {
if (req.url ~ "\.(jpg|jpeg|png|gif|gz|tgz|bz2|tbz|mp3|ogg|swf|flv)$") {
# No point in compressing these
unset req.http.Accept-Encoding;
} elsif (req.http.Accept-Encoding ~ "gzip") {
set req.http.Accept-Encoding = "gzip";
} elsif (req.http.Accept-Encoding ~ "deflate" && req.http.user-agent !~ "MSIE") {
set req.http.Accept-Encoding = "deflate";
} else {
# unkown algorithm
unset req.http.Accept-Encoding;
}
}
# Remove Google gclid parameters to minimize the cache objects
set req.url = regsuball(req.url,"\?gclid=[^&]+$",""); # strips when QS = "?gclid=AAA"
set req.url = regsuball(req.url,"\?gclid=[^&]+&","?"); # strips when QS = "?gclid=AAA&foo=bar"
set req.url = regsuball(req.url,"&gclid=[^&]+",""); # strips when QS = "?foo=bar&gclid=AAA" or QS = "?foo=bar&gclid=AAA&bar=baz"
# Static files caching
if (req.url ~ "^/(pub/)?(media|static)/") {
# Static files should not be cached by default
return (pass);
# But if you use a few locales and don't use CDN you can enable caching static files by commenting previous line (#return (pass);) and uncommenting next 3 lines
#unset req.http.Https;
#unset req.http.X-Forwarded-Proto;
#unset req.http.Cookie;
}
return (hash);
}
sub vcl_hash {
if (req.http.cookie ~ "X-Magento-Vary=") {
hash_data(regsub(req.http.cookie, "^.*?X-Magento-Vary=([^;]+);*.*$", "\1"));
}
# For multi site configurations to not cache each other's content
if (req.http.host) {
hash_data(req.http.host);
} else {
hash_data(server.ip);
}
# To make sure http users don't see ssl warning
if (req.http.X-Forwarded-Proto) {
hash_data(req.http.X-Forwarded-Proto);
}
}
sub vcl_backend_response {
set beresp.grace = 3d;
if (beresp.http.content-type ~ "text") {
set beresp.do_esi = true;
}
if (bereq.url ~ "\.js$" || beresp.http.content-type ~ "text") {
set beresp.do_gzip = true;
}
if (beresp.http.X-Magento-Debug) {
set beresp.http.X-Magento-Cache-Control = beresp.http.Cache-Control;
}
# cache only successfully responses and 404s
if (beresp.status != 200 && beresp.status != 404) {
set beresp.ttl = 0s;
set beresp.uncacheable = true;
return (deliver);
} elsif (beresp.http.Cache-Control ~ "private") {
set beresp.uncacheable = true;
set beresp.ttl = 86400s;
return (deliver);
}
# validate if we need to cache it and prevent from setting cookie
# images, css and js are cacheable by default so we have to remove cookie also
if (beresp.ttl > 0s && (bereq.method == "GET" || bereq.method == "HEAD")) {
unset beresp.http.set-cookie;
}
# If page is not cacheable then bypass varnish for 2 minutes as Hit-For-Pass
if (beresp.ttl <= 0s ||
beresp.http.Surrogate-control ~ "no-store" ||
(!beresp.http.Surrogate-Control &&
beresp.http.Cache-Control ~ "no-cache|no-store") ||
beresp.http.Vary == "*") {
# Mark as Hit-For-Pass for the next 2 minutes
set beresp.ttl = 120s;
set beresp.uncacheable = true;
}
return (deliver);
}
sub vcl_deliver {
if (resp.http.X-Magento-Debug) {
if (resp.http.x-varnish ~ " ") {
set resp.http.X-Magento-Cache-Debug = "HIT";
set resp.http.Grace = req.http.grace;
} else {
set resp.http.X-Magento-Cache-Debug = "MISS";
}
} else {
unset resp.http.Age;
}
# Not letting browser to cache non-static files.
if (resp.http.Cache-Control !~ "private" && req.url !~ "^/(pub/)?(media|static)/") {
set resp.http.Pragma = "no-cache";
set resp.http.Expires = "-1";
set resp.http.Cache-Control = "no-store, no-cache, must-revalidate, max-age=0";
}
unset resp.http.X-Magento-Debug;
unset resp.http.X-Magento-Tags;
unset resp.http.X-Powered-By;
unset resp.http.Server;
unset resp.http.X-Varnish;
unset resp.http.Via;
unset resp.http.Link;
}
sub vcl_hit {
if (obj.ttl >= 0s) {
# Hit within TTL period
return (deliver);
}
if (std.healthy(req.backend_hint)) {
if (obj.ttl + 300s > 0s) {
# Hit after TTL expiration, but within grace period
set req.http.grace = "normal (healthy server)";
return (deliver);
} else {
# Hit after TTL and grace expiration
return (miss);
}
} else {
# server is not healthy, retrieve from cache
set req.http.grace = "unlimited (unhealthy server)";
return (deliver);
}
}
Now the issue is, when I open the URL 129.89.188.244 , magento opens but it's not getting cached in varnish. But when I call varnish URL 129.89.188.245, it will redirect to my magento url 129.89.188.244. In my varnish log, it shows that the page is cached already but magento is not getting served from that varnish cache.

In order for the cache to work, your requests always have to go through the proxy/cache (Varnish).
Varnish evaluates your requests and decides if the request is already cached (then returns it from the cache) or not - then it will redirect it to Magento and cache the response, before returning it to the client.
That's how most caches work. You can read in detail how Varnish cache mechanism works here.
If you want to hit your cache, you always have to go through Varnish (129.89.188.245:80).
Consider this diagram form the official documentation:

this is the expected behaviour.
Varnish sits in front of you Magento server and if you ping your Magento server bypassing Varnish, opening 129.89.188.244, then Magento will send you a response without involving Varnish, while if you ping varnish, Varnish will then call Magento and cache. The other way around is not possible and makes no sense.

This solution worked for me. To configure Varnish and Magento on a different server.
Varnish server: xxx.xxx.xxx.xxx port 80
Magento server: yyy.yyy.yyy.yyy port 80
Changes need to be made on the varnish server:
1. login to varnish server
2. go to file /etc/varnish/default.vcl
3. under the "backend default" update
.host = "yyy.yyy.yyy.yyy";//(use Magento server IP for better network)
.port = "80";//(Magento web server port)
4. Restart the Varnish (systemctl restart varnish)
Note: Kindly use the default VCL that is generated during the varnish installation and don't update it with Magento generated VCL for varnish ( available from Magento Admin)
Changes need to be made on the Magento server:
1. Log in to the Magento server
2. Go to the env.php file located in the app/etc directory
3. Update the values in 'http_cache_hosts' => [
[
'host' => 'xxx.xxx.xxx.xxx', //(varnish server public ip)
'port' => '80' // ( varnish server port)
]
]
Now update your base URL's on the core_config_data table to your varnish public Ip (http://xxx.xxx.xxx.xxx/)
flush the Magento caches ( bin/magento ca:fl)

Related

varnish is unable to take hold of port 80 on ec2 machine

I am trying to run varnish in a docker container in an ec2 instance.
I tried doing the same in my local and it worked fine. but it keep on giving error:
Error: Could not get socket :80: Permission denied
My vcl looks like:
vcl 4.0;
backend default {
.host = "x.y.z.y";
.port = "8090";
}
sub vcl_recv {
if (req.method == "BAN") {
ban("obj.http.x-host == " + req.http.host + " && obj.http.x-url ~ " + req.url);
return(synth(200, "Banned added"));
}
}
sub vcl_backend_response {
# Store URL and HOST in the cached response.
set beresp.http.x-url = bereq.url;
set beresp.http.x-host = bereq.http.host;
}
sub vcl_deliver {
# Prevent the client from seeing these additional headers.
unset resp.http.x-url;
unset resp.http.x-host;
}
sub vcl_deliver {
# Prevent the client from seeing these additional headers.
unset resp.http.x-url;
unset resp.http.x-host;
}
and there is no process running on 80 port
To access 80 port requires root permission, try to run docker command from root user or add your user to docker group.

Setting up nginx with multiple IPs

I have my nginx configuration file under /etc/nginx/sites-available/ with two upstreams say
upstream test1 {
server 1.1.1.1:50;
server 1.1.1.2:50;
}
upstream test2 {
server 2.2.2.1:60;
server 2.2.2.2:60;
}
server {
location / {
proxy_pass http://test1;
}
location / {
proxy_pass http://test2;
}
}
Sending a curl request to <PrimaryIP>:80 works but I want to use <SecondaryIP1>:80 for test1 and <SecondaryIP2>:80 for test2. Is it possible to define this in nginx?
You have to have two server directives to accomplish this task:
upstream test1 {
server 1.1.1.1:50;
server 1.1.1.2:50;
}
upstream test2 {
server 2.2.2.1:60;
server 2.2.2.2:60;
}
server {
listen 80
server_name <SecondartIP1>
location / {
proxy_pass http://test1;
}
}
server {
listen 80
server_name <SecondarIP2>
location / {
proxy_pass http://test2;
}
}

Varnish - Seeing backend_busy without max_connection

Using Varnish 3.0.4, we're seeing a lot of backend_busy, but we do not have set max_connection setting in any of our backends.
I'm trying to find out why because it seems some clients requests get aborted.
what could cause a backend_busy ?
thanks for any tips
here's my varnishstat -1
client_conn 0 0.00 Client connections accepted
client_drop 0 0.00 Connection dropped, no sess/wrk
client_req 0 0.00 Client requests received
cache_hit 0 0.00 Cache hits
cache_hitpass 0 0.00 Cache hits for pass
cache_miss 0 0.00 Cache misses
backend_conn 0 0.00 Backend conn. success
backend_unhealthy 0 0.00 Backend conn. not attempted
backend_busy 386013 8.38 Backend conn. too many
backend_fail 0 0.00 Backend conn. failures
backend_reuse 1140751 24.77 Backend conn. reuses
backend_toolate 292966 6.36 Backend conn. was closed
backend_recycle 31329 0.68 Backend conn. recycles
backend_retry 364845 7.92 Backend conn. retry
fetch_head 93857 2.04 Fetch head
fetch_length 0 0.00 Fetch with Length
fetch_chunked 0 0.00 Fetch chunked
fetch_eof 1007 0.02 Fetch EOF
fetch_bad 766163 16.63 Fetch had bad headers
fetch_close 16152 0.35 Fetch wanted close
fetch_oldhttp 783263 17.00 Fetch pre HTTP/1.1 closed
fetch_zero 14372 0.31 Fetch zero len
fetch_failed 83 0.00 Fetch failed
fetch_1xx 792868 17.21 Fetch no body (1xx)
fetch_204 28600 0.62 Fetch no body (204)
fetch_304 0 0.00 Fetch no body (304)
n_sess_mem 0 . N struct sess_mem
n_sess 849 . N struct sess
n_object 0 . N struct object
n_vampireobject 0 . N unresurrected objects
n_objectcore 0 . N struct objectcore
n_objecthead 0 . N struct objecthead
n_waitinglist 0 . N struct waitinglist
n_vbc 674 . N struct vbc
n_wrk 1819 . N worker threads
n_wrk_create 155 0.00 N worker threads created
n_wrk_failed 237296 5.15 N worker threads not created
n_wrk_max 0 0.00 N worker threads limited
n_wrk_lqueue 237394 5.15 work request queue length
n_wrk_queued 237937 5.17 N queued work requests
n_wrk_drop 128 0.00 N dropped work requests
n_backend 96 . N backends
n_expired 135 . N expired objects
n_lru_nuked 7710 . N LRU nuked objects
n_lru_moved 0 . N LRU moved objects
losthdr 0 0.00 HTTP header overflows
n_objsendfile 0 0.00 Objects sent with sendfile
n_objwrite 49182 1.07 Objects sent with write
n_objoverflow 0 0.00 Objects overflowing workspace
s_sess 25 0.00 Total Sessions
s_req 104479 2.27 Total Requests
s_pipe 0 0.00 Total pipe
s_pass 205233 4.46 Total pass
s_fetch 0 0.00 Total fetch
s_hdrbytes 0 0.00 Total header bytes
s_bodybytes 1093243 23.73 Total body bytes
sess_closed 0 0.00 Session Closed
sess_pipeline 385979 8.38 Session Pipeline
sess_readahead 1140751 24.77 Session Read Ahead
sess_linger 758 0.02 Session Linger
sess_herd 482049 10.46 Session herd
shm_records 823074 17.87 SHM records
shm_writes 470696378 10218.54 SHM writes
shm_flushes 10834209993 235204.18 SHM flushes due to overflow
shm_cont 167868 3.64 SHM MTX contention
shm_cycles 19 0.00 SHM cycles through buffer
sms_nreq 18 0.00 SMS allocator requests
sms_nobj 1092005 . SMS outstanding allocations
sms_nbytes 635568 . SMS outstanding bytes
sms_balloc 85592445 . SMS bytes allocated
sms_bfree 5323477 . SMS bytes freed
backend_req 42 0.00 Backend requests made
n_vcl 44231 0.96 N vcl total
n_vcl_avail 37 0.00 N vcl available
n_vcl_discard 23820 0.52 N vcl discarded
n_ban 0 . N total active bans
n_ban_add 0 0.00 N new bans added
n_ban_retire 2136286212 46377.49 N old bans deleted
n_ban_obj_test 2136286212 46377.49 N objects tested
n_ban_re_test 857240 18.61 N regexps tested against
n_ban_dups 1 0.00 N duplicate bans removed
hcb_nolock 1 0.00 HCB Lookups without lock
hcb_lock 0 0.00 HCB Lookups with lock
hcb_insert 28435 0.62 HCB Inserts
esi_errors 14884 0.32 ESI parse errors (unlock)
esi_warnings 246931 5.36 ESI parse warnings (unlock)
accept_fail 218496 4.74 Accept failures
client_drop_late 5603745 121.65 Connection dropped late
uptime 29071691616 631128.92 Client uptime
dir_dns_lookups 46063 1.00 DNS director lookups
dir_dns_failed 685875 14.89 DNS director failed lookups
dir_dns_hit 356122 7.73 DNS director cached lookups hit
dir_dns_cache_full 355962 7.73 DNS director full dnscache
vmods 0 . Loaded VMODs
n_gzip 0 0.00 Gzip operations
n_gunzip 0 0.00 Gunzip operations
LCK.sms.creat 0 0.00 Created locks
LCK.sms.destroy 0 0.00 Destroyed locks
LCK.sms.locks 0 0.00 Lock Operations
LCK.sms.colls 0 0.00 Collisions
LCK.smp.creat 0 0.00 Created locks
LCK.smp.destroy 0 0.00 Destroyed locks
LCK.smp.locks 0 0.00 Lock Operations
LCK.smp.colls 0 0.00 Collisions
LCK.sma.creat 0 0.00 Created locks
LCK.sma.destroy 0 0.00 Destroyed locks
LCK.sma.locks 0 0.00 Lock Operations
LCK.sma.colls 0 0.00 Collisions
LCK.smf.creat 0 0.00 Created locks
LCK.smf.destroy 0 0.00 Destroyed locks
LCK.smf.locks 0 0.00 Lock Operations
LCK.smf.colls 0 0.00 Collisions
LCK.hsl.creat 0 0.00 Created locks
LCK.hsl.destroy 0 0.00 Destroyed locks
LCK.hsl.locks 0 0.00 Lock Operations
LCK.hsl.colls 0 0.00 Collisions
LCK.hcb.creat 0 0.00 Created locks
LCK.hcb.destroy 0 0.00 Destroyed locks
LCK.hcb.locks 0 0.00 Lock Operations
LCK.hcb.colls 0 0.00 Collisions
LCK.hcl.creat 0 0.00 Created locks
LCK.hcl.destroy 0 0.00 Destroyed locks
LCK.hcl.locks 0 0.00 Lock Operations
LCK.hcl.colls 0 0.00 Collisions
LCK.vcl.creat 0 0.00 Created locks
LCK.vcl.destroy 0 0.00 Destroyed locks
LCK.vcl.locks 0 0.00 Lock Operations
LCK.vcl.colls 0 0.00 Collisions
LCK.stat.creat 0 0.00 Created locks
LCK.stat.destroy 0 0.00 Destroyed locks
LCK.stat.locks 0 0.00 Lock Operations
LCK.stat.colls 0 0.00 Collisions
LCK.sessmem.creat 0 0.00 Created locks
LCK.sessmem.destroy 0 0.00 Destroyed locks
LCK.sessmem.locks 0 0.00 Lock Operations
LCK.sessmem.colls 0 0.00 Collisions
LCK.wstat.creat 0 0.00 Created locks
LCK.wstat.destroy 0 0.00 Destroyed locks
LCK.wstat.locks 0 0.00 Lock Operations
LCK.wstat.colls 0 0.00 Collisions
LCK.herder.creat 0 0.00 Created locks
LCK.herder.destroy 0 0.00 Destroyed locks
LCK.herder.locks 0 0.00 Lock Operations
LCK.herder.colls 0 0.00 Collisions
LCK.wq.creat 0 0.00 Created locks
LCK.wq.destroy 0 0.00 Destroyed locks
LCK.wq.locks 0 0.00 Lock Operations
LCK.wq.colls 0 0.00 Collisions
LCK.objhdr.creat 0 0.00 Created locks
LCK.objhdr.destroy 0 0.00 Destroyed locks
LCK.objhdr.locks 0 0.00 Lock Operations
LCK.objhdr.colls 0 0.00 Collisions
LCK.exp.creat 0 0.00 Created locks
LCK.exp.destroy 0 0.00 Destroyed locks
LCK.exp.locks 0 0.00 Lock Operations
LCK.exp.colls 0 0.00 Collisions
LCK.lru.creat 0 0.00 Created locks
LCK.lru.destroy 0 0.00 Destroyed locks
LCK.lru.locks 0 0.00 Lock Operations
LCK.lru.colls 0 0.00 Collisions
LCK.cli.creat 0 0.00 Created locks
LCK.cli.destroy 0 0.00 Destroyed locks
LCK.cli.locks 0 0.00 Lock Operations
LCK.cli.colls 0 0.00 Collisions
LCK.ban.creat 0 0.00 Created locks
LCK.ban.destroy 0 0.00 Destroyed locks
LCK.ban.locks 0 0.00 Lock Operations
LCK.ban.colls 0 0.00 Collisions
LCK.vbp.creat 0 0.00 Created locks
LCK.vbp.destroy 0 0.00 Destroyed locks
LCK.vbp.locks 0 0.00 Lock Operations
LCK.vbp.colls 0 0.00 Collisions
LCK.vbe.creat 0 0.00 Created locks
LCK.vbe.destroy 0 0.00 Destroyed locks
LCK.vbe.locks 0 0.00 Lock Operations
LCK.vbe.colls 0 0.00 Collisions
LCK.backend.creat 0 0.00 Created locks
LCK.backend.destroy 0 0.00 Destroyed locks
LCK.backend.locks 0 0.00 Lock Operations
LCK.backend.colls 0 0.00 Collisions
SMA.s0.c_req 0 0.00 Allocator requests
SMA.s0.c_fail 0 0.00 Allocator failures
SMA.s0.c_bytes 0 0.00 Bytes allocated
SMA.s0.c_freed 0 0.00 Bytes freed
SMA.s0.g_alloc 0 . Allocations outstanding
SMA.s0.g_bytes 0 . Bytes outstanding
SMA.s0.g_space 0 . Bytes available
SMA.Transient.c_req 0 0.00 Allocator requests
SMA.Transient.c_fail 0 0.00 Allocator failures
SMA.Transient.c_bytes 0 0.00 Bytes allocated
SMA.Transient.c_freed 0 0.00 Bytes freed
SMA.Transient.g_alloc 0 . Allocations outstanding
SMA.Transient.g_bytes 0 . Bytes outstanding
SMA.Transient.g_space 0 . Bytes available
VBE.default(127.0.0.1,,8080).vcls 0 . VCL references
VBE.default(127.0.0.1,,8080).happy 0 . Happy health probes
VBE.wiki(127.0.0.1,,8081).vcls 0 . VCL references
VBE.wiki(127.0.0.1,,8081).happy 0 . Happy health probes
VBE.tiles(127.0.0.1,,9090).vcls 0 . VCL references
VBE.tiles(127.0.0.1,,9090).happy 0 . Happy health probes
VBE.geoserver(127.0.0.1,,43074).vcls 0 . VCL references
VBE.geoserver(127.0.0.1,,43074).happy 0 . Happy health probes
VBE.iconserver(127.0.0.1,,43077).vcls 0 . VCL references
VBE.iconserver(127.0.0.1,,43077).happy 0 . Happy health probes
VBE.uploadserver(127.0.0.1,,43079).vcls 0 . VCL references
VBE.uploadserver(127.0.0.1,,43079).happy 0 . Happy health probes
VBE.graphserver(127.0.0.1,,43080).vcls 0 . VCL references
VBE.graphserver(127.0.0.1,,43080).happy 0 . Happy health probes
VBE.kmlserver(127.0.0.1,,43082).vcls 0 . VCL references
VBE.kmlserver(127.0.0.1,,43082).happy 0 . Happy health probes
VBE.feedbackserver(127.0.0.1,,43085).vcls 0 . VCL references
VBE.feedbackserver(127.0.0.1,,43085).happy 0 . Happy health probes
VBE.weatherserver(127.0.0.1,,43086).vcls 0 . VCL references
VBE.weatherserver(127.0.0.1,,43086).happy 0 . Happy health probes
VBE.analyticsserver(127.0.0.1,,43087).vcls 0 . VCL references
VBE.analyticsserver(127.0.0.1,,43087).happy 0 . Happy health probes
VBE.chartserver(127.0.0.1,,43088).vcls 0 . VCL references
VBE.chartserver(127.0.0.1,,43088).happy 0 . Happy health probes
VBE.watcher(127.0.0.1,,8090).vcls 0 . VCL references
VBE.watcher(127.0.0.1,,8090).happy 0 . Happy health probes
VBE.render(127.0.0.1,,8040).vcls 0 . VCL references
VBE.render(127.0.0.1,,8040).happy 0 . Happy health probes
VBE.route(127.0.0.1,,43073).vcls 0 . VCL references
VBE.route(127.0.0.1,,43073).happy 0 . Happy health probes
VBE.osmsync(127.0.0.1,,9010).vcls 0 . VCL references
VBE.osmsync(127.0.0.1,,9010).happy 0 . Happy health probes
VBE.search(127.0.0.1,,8100).vcls 0 . VCL references
VBE.search(127.0.0.1,,8100).happy 0 . Happy health probes
VBE.indexserver(127.0.0.1,,8101).vcls 0 . VCL references
VBE.indexserver(127.0.0.1,,8101).happy 0 . Happy health probes
VBE.rewriteproxy(127.0.0.1,,9030).vcls 0 . VCL references
VBE.rewriteproxy(127.0.0.1,,9030).happy 0 . Happy health probes
VBE.pomplo(127.0.0.1,,9081).vcls 0 . VCL references
VBE.pomplo(127.0.0.1,,9081).happy 0 . Happy health probes
VBE.admin(127.0.0.1,,9000).vcls 0 . VCL references
VBE.admin(127.0.0.1,,9000).happy 0 . Happy health probes
VBE.graphite(127.0.0.1,,7070).vcls 0 . VCL references
VBE.graphite(127.0.0.1,,7070).happy 0 . Happy health probes
VBE.notificationserver(127.0.0.1,,8070).vcls 0 . VCL references
VBE.notificationserver(127.0.0.1,,8070).happy 0 . Happy health probes
VBE.orient(127.0.0.1,,2480).vcls 0 . VCL references
VBE.orient(127.0.0.1,,2480).happy 0 . Happy health probes
VBE.geoip(127.0.0.1,,43930).vcls 0 . VCL references
VBE.geoip(127.0.0.1,,43930).happy 0 . Happy health probes
here's my vcl (included in default.vcl)
import std;
acl internal {
"localhost";
"192.168.0.0"/16;
"10.0.0.0"/8;
}
probe status {
.url = "/status.json";
.timeout = 100 ms;
.interval = 1s;
.threshold = 1;
}
probe rootCheck {
.url = "/";
.timeout = 100 ms;
.interval = 1s;
.threshold = 1;
}
backend tiles {
.host = "127.0.0.1";
.port = "9090";
.probe = {
.url = "/serverInfo.json";
.timeout = 100 ms;
.interval = 1s;
.threshold = 1;
}
}
backend geoserver {
.host = "127.0.0.1";
.port = "43074";
.probe = rootCheck;
}
backend iconserver {
.host = "127.0.0.1";
.port = "43077";
.probe = rootCheck;
}
backend uploadserver {
.host = "127.0.0.1";
.port = "43079";
.probe = rootCheck;
}
backend graphserver {
.host = "127.0.0.1";
.port = "43080";
.probe = rootCheck;
}
backend kmlserver {
.host = "127.0.0.1";
.port = "43082";
.probe = rootCheck;
}
backend weatherserver {
.host = "127.0.0.1";
.port = "43086";
.probe = {
.url = "/status";
.timeout = 100 ms;
.interval = 1s;
.threshold = 1;
}
}
backend analyticsserver {
.host = "127.0.0.1";
.port = "43087";
.probe = status;
}
backend chartserver {
.host = "127.0.0.1";
.port = "43088";
.probe = status;
}
backend watcher {
.host = "127.0.0.1";
.port = "8090";
.probe = {
.url = "/test";
.timeout = 100 ms;
.interval = 1s;
.threshold = 1;
}
}
backend route {
.host = "127.0.0.1";
.port = "43073";
.probe = status;
}
backend osmsync {
.host = "127.0.0.1";
.port = "9010";
.probe = {
.url = "/state.json";
.timeout = 100 ms;
.interval = 1s;
.threshold = 1;
}
}
backend search {
.host = "127.0.0.1";
.port = "8100";
.probe = status;
}
backend indexserver {
.host = "127.0.0.1";
.port = "8101";
.probe = rootCheck;
}
backend rewriteproxy {
.host = "127.0.0.1";
.port = "9030";
.probe = rootCheck;
}
backend webserver {
.host = "127.0.0.1";
.port = "9081";
.probe = rootCheck;
}
backend admin {
.host = "127.0.0.1";
.port = "9000";
.probe = rootCheck;
}
backend graphite {
.host = "127.0.0.1";
.port = "7070";
.probe = rootCheck;
}
backend notificationserver {
.host = "127.0.0.1";
.port = "8070";
.probe = status;
}
backend orient {
.host = "127.0.0.1";
.port = "2480";
}
backend geoip {
.host = "127.0.0.1";
.port = "43930";
.probe = status;
}
sub allow_only_internal {
if (!client.ip ~ internal) {
error 405 "Not allowed.";
}
}
sub vcl_recv {
if (req.url ~ "\.(png|gif|jpg|swf|css|js|bis|apk|plist|ipa|woff|svg|eot|ttf|json)(\?.*|)$") {
unset req.http.cookie;
}
if (req.url ~ "^/admin") {
call allow_only_internal;
}
if( req.http.host ~ "^tile" ) {
unset req.http.cookie;
set req.http.host = "tiles";
set req.backend = tiles;
} else if( req.http.host ~ "^data" ) {
set req.backend = geoserver;
} else if( req.http.host ~ "^icon" ) {
unset req.http.cookie;
set req.backend = iconserver;
} else if( req.http.host ~ "^upload" ) {
set req.backend = uploadserver;
} else if( req.http.host ~ "^graphite" ) { // must be before graph
if( req.url !~ "^/render" ) { //allow render API anywhere
call allow_only_internal;
}
set req.backend = graphite;
} else if( req.http.host ~ "^graph" ) {
set req.backend = graphserver;
} else if( req.http.host ~ "^kml" ) {
set req.backend = kmlserver;
} else if( req.http.host ~ "^notification" ) {
set req.backend = notificationserver;
} else if( req.http.host ~ "^osmsync" ) {
set req.backend = osmsync;
} else if( req.http.host ~ "^watcher" ) {
set req.backend = watcher;
} else if( req.http.host ~ "^route" ) {
set req.backend = route;
} else if( req.http.host ~ "^search" ) {
set req.backend = search;
} else if( req.http.host ~ "^index" ) {
call allow_only_internal;
set req.backend = indexserver;
} else if( req.http.host ~ "^weather" ) {
set req.backend = weatherserver;
} else if( req.http.host ~ "^analytics" ) {
set req.backend = analyticsserver;
} else if( req.http.host ~ "^chart" ) {
set req.backend = chartserver;
} else if( req.http.host ~ "^geoip" ) {
set req.backend = geoip;
} else if( req.http.host ~ "^rewriteproxy" || req.http.host ~ "^showroom" ) {
set req.backend = rewriteproxy;
} else if( req.http.host ~ "^admin" ) {
call allow_only_internal;
set req.backend = admin;
} else if( req.http.host ~ "^orient" ) {
call allow_only_internal;
set req.backend = orient;
}
//url matching is at the bottom to avoid confusion
else if( req.url ~ "^/query" ) {
set req.backend = geoserver;
} else if( req.url ~ "^/data/" ) {
set req.url = regsub( req.url, "^/data(/.*)", "\1" );
set req.backend = geoserver;
} else if( req.url ~ "^/icon/" ) {
set req.url = regsub( req.url, "^/icon(/.*)", "\1" );
set req.backend = iconserver;
} else if( req.url ~ "^/graph/" ) {
set req.url = regsub( req.url, "^/graph(/.*)", "\1" );
set req.backend = graphserver;
} else if( req.http.host ~ "^(www\.|static|map)" ) {
set req.backend = webserver;
} else {
// todo: handle HTTPS with req.http.X-Forwarded-Proto
// redirect dev and demo to avoid dead links for old urls
error 750 regsub(req.http.host, "^(w+\.|dev\.|demo\.)?", "http://www.") + req.url;
}
}
sub vcl_deliver {
if (resp.http.Location ~ ".*:8080.*") {
set resp.http.Location = regsub(resp.http.Location, ":[0-9]+", "");
}
if (req.backend == graphite) {
set resp.http.Access-Control-Allow-Origin = "*";
set resp.http.Access-Control-Allow-Methods = "GET, OPTIONS";
set resp.http.Access-Control-Allow-Headers = "origin, authorization, accept";
}
unset resp.http.x-url;
unset resp.http.x-host;
return (deliver);
}
sub vcl_fetch {
if (req.url ~ "\.(png|gif|jpg|swf|css|js|bis|apk|plist|ipa|woff|svg|eot|ttf|json)(\?.*|)$") {
unset beresp.http.set-cookie;
}
if( beresp.http.content-type ~ "application/json"
|| beresp.http.content-type ~ "application/javascript"
|| (beresp.http.content-type ~ "application/octet-stream" && req.url ~ "\.bis$")
|| beresp.http.content-type ~ "text/javascript"
|| beresp.http.content-type ~ "text/plain"
|| beresp.http.content-type ~ "text/html"
|| beresp.http.content-type ~ "text/css" ) {
set beresp.do_gzip = true;
}
//allow ban lurker to work. https://www.varnish-software.com/static/book/Cache_invalidation.html#smart-bans
set beresp.http.x-url = req.url;
set beresp.http.x-host = req.http.host;
}
sub vcl_pipe {
if (req.http.upgrade) {
set bereq.http.upgrade = req.http.upgrade;
}
}
sub vcl_recv {
if (req.http.Upgrade ~ "(?i)websocket") {
return (pipe);
}
}
sub vcl_error {
if (obj.status == 750) {
set obj.http.Location = obj.response;
set obj.status = 302;
return (deliver);
}
set obj.http.Content-Type = "text/html; charset=utf-8";
set obj.http.Retry-After = "3";
if( req.http.host ~ "^map" ) {
synthetic regsuball(regsuball(std.fileread("/etc/varnish/error_map.html"), "\$obj\.status\$", obj.status), "\$obj\.response\$", obj.response);
} else {
synthetic regsuball(regsuball(std.fileread("/etc/varnish/error.html"), "\$obj\.status\$", obj.status), "\$obj\.response\$", obj.response);
}
return (deliver);
}
That would normally be because none of your backends in the requested director are healthy at the time.
None of your servers seems to be used in your VCL nor have any health probes. So it's hard to know why exactly this happens. You should also really be using the backends you define and do health checks on them.
When you see something more like the following for all backends your varnishstat is least giving you some useful information:
VBE.wiki(127.0.0.1,,8081).vcls 1
VBE.wiki(127.0.0.1,,8081).happy 18446744073709551615
You can also check current status and short history by "debug.health" in your varnishadm console.
Try updating your libvarnishapi lib — this helped me (ubuntu):
sudo apt-get install libvarnishapi1
and reboot.
If you are using a wrong version (mine was libvarnishapi-dev), you don't get varnishstat working properly.
By default when backend_busy is incremented, that means the client receives a 5xx error response. However by using VCL (Varnish Configuration Language), it seems that you can apparently configure Varnish to recover from a "busy backend" by using a different backend, or by serving an outdated or synthetic response.
(Source)
Note: I think backend_busy is different from backend_unhealthy.
Actually Varnish periodically pings the backend to make sure it is still up and responsive. If it doesn’t receive a 200 response quickly enough, the backend is marked as unhealthy and every new request to it increments backend_unhealthy until the backend recovers and sends a timely 200 response.

using external redis server for testing tcl scripts

I am running Ubuntu 11.10.
i am trying to run TCL test scripts using external redis server.
using the following :
sb#sb-laptop:~/Redis/redis$ tclsh tests/test_helper.tcl --host 192.168.1.130 --port 6379
Getting the following error :
Testing unit/type/list
[exception]: Executing test client: couldn't open socket: connection refused.
couldn't open socket: connection refused
while executing
"socket $server $port"
(procedure "redis" line 2)
invoked from within
"redis $::host $::port"
(procedure "start_server" line 9)
invoked from within
"start_server {tags {"protocol"}} {
test "Handle an empty query" {
reconnect
r write "\r\n"
r flush
assert_equal "P..."
(file "tests/unit/protocol.tcl" line 1)
invoked from within
"source $path"
(procedure "execute_tests" line 4)
invoked from within
"execute_tests $data"
(procedure "test_client_main" line 9)
invoked from within
"test_client_main $::test_server_port "
the redis.conf is set to default binding, but it is commented out.
If this is possible, what i am doing wrong?
Additional Information:
Below is the tcl code that is responsible for starting the server
proc start_server {options {code undefined}} {
# If we are runnign against an external server, we just push the
# host/port pair in the stack the first time
if {$::external} {
if {[llength $::servers] == 0} {
set srv {}
dict set srv "host" $::host
dict set srv "port" $::port
set client [redis $::host $::port]
dict set srv "client" $client
$client select 9
# append the server to the stack
lappend ::servers $srv
}
uplevel 1 $code
return
}
# setup defaults
set baseconfig "default.conf"
set overrides {}
set tags {}
# parse options
foreach {option value} $options {
switch $option {
"config" {
set baseconfig $value }
"overrides" {
set overrides $value }
"tags" {
set tags $value
set ::tags [concat $::tags $value] }
default {
error "Unknown option $option" }
}
}
set data [split [exec cat "tests/assets/$baseconfig"] "\n"]
set config {}
foreach line $data {
if {[string length $line] > 0 && [string index $line 0] ne "#"} {
set elements [split $line " "]
set directive [lrange $elements 0 0]
set arguments [lrange $elements 1 end]
dict set config $directive $arguments
}
}
# use a different directory every time a server is started
dict set config dir [tmpdir server]
# start every server on a different port
set ::port [find_available_port [expr {$::port+1}]]
dict set config port $::port
# apply overrides from global space and arguments
foreach {directive arguments} [concat $::global_overrides $overrides] {
dict set config $directive $arguments
}
# write new configuration to temporary file
set config_file [tmpfile redis.conf]
set fp [open $config_file w+]
foreach directive [dict keys $config] {
puts -nonewline $fp "$directive "
puts $fp [dict get $config $directive]
}
close $fp
set stdout [format "%s/%s" [dict get $config "dir"] "stdout"]
set stderr [format "%s/%s" [dict get $config "dir"] "stderr"]
if {$::valgrind} {
exec valgrind --suppressions=src/valgrind.sup src/redis-server $config_file > $stdout 2> $stderr &
} else {
exec src/redis-server $config_file > $stdout 2> $stderr &
}
# check that the server actually started
# ugly but tries to be as fast as possible...
set retrynum 100
set serverisup 0
if {$::verbose} {
puts -nonewline "=== ($tags) Starting server ${::host}:${::port} "
}
after 10
if {$code ne "undefined"} {
while {[incr retrynum -1]} {
catch {
if {[ping_server $::host $::port]} {
set serverisup 1
}
}
if {$serverisup} break
after 50
}
} else {
set serverisup 1
}
if {$::verbose} {
puts ""
}
if {!$serverisup} {
error_and_quit $config_file [exec cat $stderr]
}
# find out the pid
while {![info exists pid]} {
regexp {\[(\d+)\]} [exec cat $stdout] _ pid
after 100
}
# setup properties to be able to initialize a client object
set host $::host
set port $::port
if {[dict exists $config bind]} { set host [dict get $config bind] }
if {[dict exists $config port]} { set port [dict get $config port] }
# setup config dict
dict set srv "config_file" $config_file
dict set srv "config" $config
dict set srv "pid" $pid
dict set srv "host" $host
dict set srv "port" $port
dict set srv "stdout" $stdout
dict set srv "stderr" $stderr
# if a block of code is supplied, we wait for the server to become
# available, create a client object and kill the server afterwards
if {$code ne "undefined"} {
set line [exec head -n1 $stdout]
if {[string match {*already in use*} $line]} {
error_and_quit $config_file $line
}
while 1 {
# check that the server actually started and is ready for connections
if {[exec cat $stdout | grep "ready to accept" | wc -l] > 0} {
break
}
after 10
}
# append the server to the stack
lappend ::servers $srv
# connect client (after server dict is put on the stack)
reconnect
# execute provided block
set num_tests $::num_tests
if {[catch { uplevel 1 $code } error]} {
set backtrace $::errorInfo
# Kill the server without checking for leaks
dict set srv "skipleaks" 1
kill_server $srv
# Print warnings from log
puts [format "\nLogged warnings (pid %d):" [dict get $srv "pid"]]
set warnings [warnings_from_file [dict get $srv "stdout"]]
if {[string length $warnings] > 0} {
puts "$warnings"
} else {
puts "(none)"
}
puts ""
error $error $backtrace
}
# Don't do the leak check when no tests were run
if {$num_tests == $::num_tests} {
dict set srv "skipleaks" 1
}
# pop the server object
set ::servers [lrange $::servers 0 end-1]
set ::tags [lrange $::tags 0 end-[llength $tags]]
kill_server $srv
} else {
set ::tags [lrange $::tags 0 end-[llength $tags]]
set _ $srv
}
}
Either there's nothing listening on host 192.168.1.130, port 6379 (well, at a guess) or your firewall configuration is blocking the connection. Impossible to say which, since all the code is really seeing is “the connection didn't work; something said ‘no’…”.

Setting up varnish on same server as webserver

Our company recently decided to start working with the Varnish HTTP accelerator. Most important why we chose this solution was because we are a company that specializes in building web shops (Magento Enterprise) => Magento has a commercial plugin that works together with varnish.
The varnish configuration is already present on our testing environment, which contains 1 (software) load balancer running a varnish instance, 2 apache webservers and 1 storage + 1 mysql server.
However now the time has come to add the Varnish to our development environment (virtualbox with 1GB of ram running debian which has the database, webserver, files running all on the same machine)
Could anyone post a default.vcl configuration file for this setup?
Apache2 runs on port 80.
Thanks in advance,
Kenny
EDIT: I found and posted the solution below.
This link has an excellent discussion of using Varnish on big production Web sites. In particular, look at the /etc/default/varnish or /etc/sysconfig/varnish DAEMON OPTS that put the cache 'file' into memory, instead of disk:
http://www.lullabot.com/articles/varnish-multiple-web-servers-drupal
The snippet I'm talking about:
DAEMON_OPTS="-a :80,:443 \
-T localhost:6082 \
-f /etc/varnish/default.vcl \
-u varnish -g varnish \
-S /etc/varnish/secret \
-p thread_pool_add_delay=2 \
-p thread_pools=2 \
-p thread_pool_min=400 \
-p thread_pool_max=4000 \
-p session_linger=50 \
-p sess_workspace=262144 \
-s malloc,3G"
I found the solution after more searching. Basically we need to sure that varnish is listening on the 80 port and apache on the 8080 port (or anything else!).
Here my default.vcl file (located in /etc/varnish/default.vcl):
# default backend definition. Set this to point to your content server.
backend apache1 {
.host = "127.0.0.1";
.port = "8080";
}
director lb round-robin {
{.backend=apache1;}
}
# add your Magento server IP to allow purges from the backend
acl purge {
"localhost";
"127.0.0.1";
}
# needed for TTL handling
C{
#include <errno.h>
#include <limits.h>
}C
sub vcl_recv {
set req.backend=lb;
if (req.request != "GET" &&
req.request != "HEAD" &&
req.request != "PUT" &&
req.request != "POST" &&
req.request != "TRACE" &&
req.request != "OPTIONS" &&
req.request != "DELETE" &&
req.request != "PURGE") {
/* Non-RFC2616 or CONNECT which is weird. */
return (pipe);
}
# purge request
if (req.request == "PURGE") {
if (!client.ip ~ purge) {
error 405 "Not allowed.";
}
purge("obj.http.X-Purge-Host ~ " req.http.X-Purge-Host " && obj.http.X-Purge-URL ~ " req.http.X-Purge-Regex " && obj.http.Content-Type ~ " req.http.X-Purge-Content-Type);
error 200 "Purged.";
}
# we only deal with GET and HEAD by default
if (req.request != "GET" && req.request != "HEAD") {
return (pass);
}
# static files are always cacheable. remove SSL flag and cookie
if (req.url ~ "^/(media|js|skin)/.*\.(png|jpg|jpeg|gif|css|js|swf|ico)$") {
unset req.http.Https;
unset req.http.Cookie;
}
# not cacheable by default
if (req.http.Authorization || req.http.Https) {
return (pass);
}
# do not cache any page from
# - index files
# - ...
if (req.url ~ "^/(index)") {
return (pass);
}
# as soon as we have a NO_CACHE or admin cookie pass request
if (req.http.cookie ~ "(NO_CACHE|adminhtml)=") {
return (pass);
}
# normalize Aceept-Encoding header
# http://varnish.projects.linpro.no/wiki/FAQ/Compression
if (req.http.Accept-Encoding) {
if (req.url ~ "\.(jpg|png|gif|gz|tgz|bz2|tbz|mp3|ogg|swf|flv)$") {
# No point in compressing these
remove req.http.Accept-Encoding;
} elsif (req.http.Accept-Encoding ~ "gzip") {
set req.http.Accept-Encoding = "gzip";
} elsif (req.http.Accept-Encoding ~ "deflate" && req.http.user-agent !~ "MSIE") {
set req.http.Accept-Encoding = "deflate";
} else {
# unkown algorithm
remove req.http.Accept-Encoding;
}
}
# remove Google gclid parameters
set req.url = regsuball(req.url,"\?gclid=[^&]+$",""); # strips when QS = "?gclid=AAA"
set req.url = regsuball(req.url,"\?gclid=[^&]+&","?"); # strips when QS = "?gclid=AAA&foo=bar"
set req.url = regsuball(req.url,"&gclid=[^&]+",""); # strips when QS = "?foo=bar&gclid=AAA" or QS = "?foo=bar&gclid=AAA&bar=baz"
# decided to cache. remove cookie
#unset req.http.Cookie;
return (lookup);
}
Here's the content of the varnish file (/etc/default/varnish):
# Configuration file for varnish
#
# /etc/init.d/varnish expects the variables $DAEMON_OPTS, $NFILES and $MEMLOCK
# to be set from this shell script fragment.
#
# Should we start varnishd at boot? Set to "yes" to enable.
START=yes
# Maximum number of open files (for ulimit -n)
NFILES=131072
# Maximum locked memory size (for ulimit -l)
# Used for locking the shared memory log in memory. If you increase log size,
# you need to increase this number as well
MEMLOCK=82000
# Default varnish instance name is the local nodename. Can be overridden with
# the -n switch, to have more instances on a single server.
INSTANCE=$(uname -n)
# This file contains 4 alternatives, please use only one.
## Alternative 1, Minimal configuration, no VCL
#
# Listen on port 6081, administration on localhost:6082, and forward to
# content server on localhost:8080. Use a 1GB fixed-size cache file.
#
# DAEMON_OPTS="-a :6081 \
# -T localhost:6082 \
# -b localhost:8080 \
# -u varnish -g varnish \
# -S /etc/varnish/secret \
# -s file,/var/lib/varnish/$INSTANCE/varnish_storage.bin,1G"
## Alternative 2, Configuration with VCL
#
# Listen on port 6081, administration on localhost:6082, and forward to
# one content server selected by the vcl file, based on the request. Use a 1GB
# fixed-size cache file.
#
DAEMON_OPTS="-a :80 \
-T localhost:6082 \
-f /etc/varnish/default.vcl \
-S /etc/varnish/secret \
-s file,/var/lib/varnish/$INSTANCE/varnish_storage.bin,1G"
## Alternative 3, Advanced configuration
#
# See varnishd(1) for more information.
#
# # Main configuration file. You probably want to change it :)
# VARNISH_VCL_CONF=/etc/varnish/default.vcl
#
# # Default address and port to bind to
# # Blank address means all IPv4 and IPv6 interfaces, otherwise specify
# # a host name, an IPv4 dotted quad, or an IPv6 address in brackets.
# VARNISH_LISTEN_ADDRESS=
# VARNISH_LISTEN_PORT=6081
#
# # Telnet admin interface listen address and port
# VARNISH_ADMIN_LISTEN_ADDRESS=127.0.0.1
# VARNISH_ADMIN_LISTEN_PORT=6082
#
# # The minimum number of worker threads to start
# VARNISH_MIN_THREADS=1
#
# # The Maximum number of worker threads to start
# VARNISH_MAX_THREADS=1000
#
# # Idle timeout for worker threads
# VARNISH_THREAD_TIMEOUT=120
#
# # Cache file location
# VARNISH_STORAGE_FILE=/var/lib/varnish/$INSTANCE/varnish_storage.bin
#
# # Cache file size: in bytes, optionally using k / M / G / T suffix,
# # or in percentage of available disk space using the % suffix.
# VARNISH_STORAGE_SIZE=1G
#
# # File containing administration secret
# VARNISH_SECRET_FILE=/etc/varnish/secret
#
# # Backend storage specification
# VARNISH_STORAGE="file,${VARNISH_STORAGE_FILE},${VARNISH_STORAGE_SIZE}"
#
# # Default TTL used when the backend does not specify one
# VARNISH_TTL=120
#
# # DAEMON_OPTS is used by the init script. If you add or remove options, make
# # sure you update this section, too.
# DAEMON_OPTS="-a ${VARNISH_LISTEN_ADDRESS}:${VARNISH_LISTEN_PORT} \
# -f ${VARNISH_VCL_CONF} \
# -T ${VARNISH_ADMIN_LISTEN_ADDRESS}:${VARNISH_ADMIN_LISTEN_PORT} \
# -t ${VARNISH_TTL} \
# -w ${VARNISH_MIN_THREADS},${VARNISH_MAX_THREADS},${VARNISH_THREAD_TIMEOUT} \
# -S ${VARNISH_SECRET_FILE} \
# -s ${VARNISH_STORAGE}"
#
## Alternative 4, Do It Yourself
#
# DAEMON_OPTS=""
After that you can monitor how varnish serves the content (from what source) by typing
varnishlog | grep URL
Apache can be used to SSL terminate (decrypt), check http://noosfero.org/Development/Varnish#SSL

Resources