Related
I am receiving the below error when trying to compile the device_handler code for the Sylvania Smart+ Plug. The code comes from https://images-na.ssl-images-amazon.com/images/I/71PrgM-PamL.pdf
The error:
Org.codehaus.groovy.control.MultipleCompilationErrorsException: startup failed: script_dth_metadata_0631e407_ffd8_4ceb_b49a_877fd47635df: 94: expecting ''', found '\r' # line 94, column 55. nalResult.value == "on" ? '{{ ^ 1 error
Line 94:
def descriptionText = finalResult.value == "on" ? '{{
metadata {
definition (name: "SYLVANIA Smart Plug", namespace: "ledvanceDH", author:
"Ledvance") {
capability "Actuator"
capability "Switch"
capability "Power Meter"
capability "Configuration"
capability "Refresh"
capability "Sensor"
capability "Health Check"
fingerprint profileId: "C05E", inClusters:
"1000,0000,0003,0004,0005,0006,0B04,FC0F", outClusters: "0019", manufacturer: "OSRAM",
model: "Plug 01", deviceJoinName: "SYLVANIA Smart Plug"
fingerprint profileId: "0104", inClusters:
"0000,0003,0004,0005,0006,0B05,FC01,FC08", outClusters: "0003,0019", manufacturer:
"LEDVACE", model: "PLUG", deviceJoinName: "SYLVANIA Smart Plug"
}
// simulator metadata
simulator {
// status messages
status "on": "on/off: 1"
status "off": "on/off: 0"
// reply messages
reply "zcl on-off on": "on/off: 1"
reply "zcl on-off off": "on/off: 0"
}
preferences {
section {
image(name: 'educationalcontent', multiple: true, images: [
"http://cdn.devicegse.smartthings.com/Outlet/US/OutletUS1.jpg",
"http://cdn.devicegse.smartthings.com/Outlet/US/OutletUS2.jpg"
])
}
}
// UI tile definitions
tiles(scale: 2) {
multiAttributeTile(name:"switch", type: "lighting", width: 6, height: 4,
canChangeIcon: true){
tileAttribute ("device.switch", key: "PRIMARY_CONTROL") {
attributeState "on", label: 'On', action: "switch.off",
icon: "st.Appliances.appliances17", backgroundColor: "#79b821", nextState: "turningOff"
attributeState "off", label: 'Off', action: "switch.on",
icon: "st.Appliances.appliances17", backgroundColor: "#565C51", nextState: "turningOn"
attributeState "turningOn", label: 'Turning On', action:
"switch.off", icon: "st.Appliances.appliances17", backgroundColor: "#60903A", nextState:
"turningOff"
attributeState "turningOff", label: 'Turning Off', action:
"switch.on", icon: "st.Appliances.appliances17", backgroundColor: "#CACACA", nextState:
"turningOn"
}
tileAttribute ("power", key: "SECONDARY_CONTROL") {
attributeState "power", label:'${currentValue} W'
}
}
standardTile("refresh", "device.power", inactiveLabel: false, decoration:
"flat", width: 2, height: 2) {
state "default", label:'', action:"refresh.refresh",
icon:"st.secondary.refresh"
}
main "switch"
details(["switch","refresh"])
}
}
// Parse incoming device messages to generate events
def parse(String description) {
log.debug "description is $description"
def finalResult = zigbee.getKnownDescription(description)
def event = [:]
//TODO: Remove this after getKnownDescription can parse it automatically
if (!finalResult && description!="updated")
finalResult =
getPowerDescription(zigbee.parseDescriptionAsMap(description))
if (finalResult) {
log.info "final result = $finalResult"
if (finalResult.type == "update") {
log.info "$device updates: ${finalResult.value}"
event = null
}
else if (finalResult.type == "power") {
def powerValue = (finalResult.value as Integer)/10
event = createEvent(name: "power", value: powerValue,
descriptionText: '{{ device.displayName }} power is {{ value }} Watts', translatable:
true)
/*
Dividing by 10 as the Divisor is 10000 and unit is kW for
the device. AttrId: 0302 and 0300. Simplifying to 10
power level is an integer. The exact power level with
correct units needs to be handled in the device type
to account for the different Divisor value (AttrId: 0302)
and POWER Unit (AttrId: 0300). CLUSTER for simple metering is 0702
*/
}
else {
def descriptionText = finalResult.value == "on" ? '{{
device.displayName }} is On' : '{{ device.displayName }} is Off'
event = createEvent(name: finalResult.type, value:
finalResult.value, descriptionText: descriptionText, translatable: true)
}
}
else {
def cluster = zigbee.parse(description)
if (cluster && cluster.clusterId == 0x0006 && cluster.command == 0x07){
if (cluster.data[0] == 0x00) {
log.debug "ON/OFF REPORTING CONFIG RESPONSE: " + cluster
event = createEvent(name: "checkInterval", value: 60 * 12,
displayed: false, data: [protocol: "zigbee", hubHardwareId: device.hub.hardwareID])
}
else {
log.warn "ON/OFF REPORTING CONFIG FAILED- error
code:${cluster.data[0]}"
event = null
}
}
else {
log.warn "DID NOT PARSE MESSAGE for description : $description"
log.debug "${cluster}"
}
}
return event
}
def off() {
zigbee.off()
}
def on() {
zigbee.on()
}
/**
* PING is used by Device-Watch in attempt to reach the Device
* */
def ping() {
return zigbee.onOffRefresh()
}
def refresh() {
zigbee.onOffRefresh() + zigbee.electricMeasurementPowerRefresh()
}
def configure() {
// Device-Watch allows 2 check-in misses from device + ping (plus 1 min lag time)
// enrolls with default periodic reporting until newer 5 min interval is confirmed
sendEvent(name: "checkInterval", value: 2 * 10 * 60 + 1 * 60, displayed: false,
data: [protocol: "zigbee", hubHardwareId: device.hub.hardwareID])
// OnOff minReportTime 0 seconds, maxReportTime 5 min. Reporting interval if no
activity
refresh() + zigbee.onOffConfig(0, 300) + powerConfig()
}
//power config for devices with min reporting interval as 1 seconds and reporting
interval if no activity as 10min (600s)
//min change in value is 01
def powerConfig() {
[
"zdo bind 0x${device.deviceNetworkId} 1 ${endpointId} 0x0B04
{${device.zigbeeId}} {}", "delay 2000",
"zcl global send-me-a-report 0x0B04 0x050B 0x29 1 600 {05 00}",
//The send-me-a-report is custom to the attribute type for CentraLite
"delay 200",
"send 0x${device.deviceNetworkId} 1 ${endpointId}", "delay 2000"
]
}
private getEndpointId() {
new BigInteger(device.endpointId, 16).toString()
}
//TODO: Remove this after getKnownDescription can parse it automatically
def getPowerDescription(descMap) {
def powerValue = "undefined"
if (descMap.cluster == "0B04") {
if (descMap.attrId == "050b") {
if(descMap.value!="ffff")
powerValue = zigbee.convertHexToInt(descMap.value)
}
}
else if (descMap.clusterId == "0B04") {
if(descMap.command=="07"){
return [type: "update", value : "power (0B04) capability configured
successfully"]
}
}
if (powerValue != "undefined"){
return [type: "power", value : powerValue]
}
else {
return [:]
}
}
I should have thought of it sooner... The problem is with the copy/paste. The long lines were broken by a "return". I removed the returns and the script compiled.
I am no able to execute the radtest command and i cant figure out what the issue it i keep getting the error :
rad_recv: Access-Reject packet from host 127.0.0.1 port 1812, id=82, length=20
here is the execution:
rad_recv: Access-Reject packet from host 127.0.0.1 port 1812, id=75, length=20
root#localhost:/etc/freeradius# radtest testing password 127.0.0.1 0 testing123
Sending Access-Request of id 82 to 127.0.0.1 port 1812
User-Name = "testing"
User-Password = "password"
NAS-IP-Address = 127.0.0.1
NAS-Port = 0
Message-Authenticator = 0x00000000000000000000000000000000
rad_recv: Access-Reject packet from host 127.0.0.1 port 1812, id=82, length=20
here is the debug output:
+group authorize {
++[preprocess] = ok
++policy rewrite_calling_station_id {
+++? if (Calling-Station-Id =~ /([0-9a-f]{2})[-:]?([0-9a-f]{2})[-:.]?([0-9a-f]{2})[-:]?([0-9a-f]{2})[-:.]?([0-9a-f]{2})[-:]?([0-9a-f]{2})/i)
(Attribute Calling-Station-Id was not found)
? Evaluating (Calling-Station-Id =~ /([0-9a-f]{2})[-:]?([0-9a-f]{2})[-:.]?([0-9a-f]{2})[-:]?([0-9a-f]{2})[-:.]?([0-9a-f]{2})[-:]?([0-9a-f]{2})/i) -> FALSE
+++? if (Calling-Station-Id =~ /([0-9a-f]{2})[-:]?([0-9a-f]{2})[-:.]?([0-9a-f]{2})[-:]?([0-9a-f]{2})[-:.]?([0-9a-f]{2})[-:]?([0-9a-f]{2})/i) -> FALSE
+++else else {
++++[noop] = noop
+++} # else else = noop
++} # policy rewrite_calling_station_id = noop
[authorized_macs] expand: %{Calling-Station-ID} ->
++[authorized_macs] = noop
++? if (!ok)
? Evaluating !(ok) -> TRUE
++? if (!ok) -> TRUE
++if (!ok) {
+++[reject] = reject
++} # if (!ok) = reject
+} # group authorize = reject
Using Post-Auth-Type Reject
# Executing group from file /etc/freeradius/sites-enabled/default
+group REJECT {
[eap] Request didn't contain an EAP-Message, not inserting EAP-Failure
++[eap] = noop
[attr_filter.access_reject] expand: %{User-Name} -> testing
attr_filter: Matched entry DEFAULT at line 11
++[attr_filter.access_reject] = updated
+} # group REJECT = updated
Delaying reject of request 2 for 1 seconds
Going to the next request
Waking up in 0.9 seconds.
Sending delayed reject for request 2
Sending Access-Reject of id 82 to 127.0.0.1 port 55664
Waking up in 4.9 seconds.
Cleaning up request 2 ID 82 with timestamp +269
Ready to process requests.
this is my users:
testing Cleartext-Password := "password"
I want to create a script such that it checks the conditions:
Port string should be present in the file and if yes then it's value should be 20000.
The file should have a mention of sslkey, sslCert, ssl_cipher (The values against these strings/keys can be anything).
The attempt was made:
$ awk '/port|sslKey|sslCert|ssl_cipher/ {print $2,$3}' pkg.conf
port 20000
sslKey /usr/product/plat/etc/ssl/server.pem
sslCert /usr/product/plat/etc/ssl/server.cert
ssl_cipher ECDH+AES128:ECDH+AESGCM:ECDH+AES256:DH+AES:DH+AESGCM:DH+AES256:RSA+AES:RSA+AESGCM:!aNULL:!RC4:!MD5:!DSS:!3DES
The problem with the above command is even if one of the strings 'port|sslKey|sslCert|ssl_cipher' is missing even then it runs.
Could this be achieved using only a few lines of awk.
If any of the string/condition is missing then the output should display that condition and also the conditions those are met.
Considering Your details in questions here are input examples:
Correct:
$ cat pkg.conf
port 20000
sslKey /usr/product/plat/etc/ssl/server.pem
sslCert /usr/product/plat/etc/ssl/server.cert
ssl_cipher ECDH+AES128:ECDH+AESGCM:ECDH+AES256:DH+AES:DH+AESGCM:DH+AES256:RSA+AES:RSA+AESGCM:!aNULL:!RC4:!MD5:!DSS:!3DES
Wrong (port 18000 and missing sslCert):
$ cat pkg_wrong.conf
port 18000
sslKey /usr/product/plat/etc/ssl/server.pem
ssl_cipher ECDH+AES128:ECDH+AESGCM:ECDH+AES256:DH+AES:DH+AESGCM:DH+AES256:RSA+AES:RSA+AESGCM:!aNULL:!RC4:!MD5:!DSS:!3DES
AWK solution:
Correct pkg.conf. Returns 0 and outputs nothing:
$ awk '
/^port [0-9]+$/ { if ( $2 == 20000 ) isport=1; port=$2; }
/^sslKey .+$/ { issslKey=1; sslKey=$2; }
/^sslCert .+$/ { issslCert=1; sslCert=$2; }
/^ssl_cipher .+$/ { isssl_cipher=1; ssl_cipher=$2; }
END { if (isport && issslKey && issslCert && isssl_cipher) exit(0);
else { print("port " port); print("sslKey " sslKey); print("sslCert " sslCert); print("ssl_cipher " ssl_cipher); exit(1); }
}
' pkg.conf
Wrong pkg_wrong.conf (port 18000 and missing sslCert):
$ awk '
/^port [0-9]+$/ { if ( $2 == 20000 ) isport=1; port=$2; }
/^sslKey .+$/ { issslKey=1; sslKey=$2; }
/^sslCert .+$/ { issslCert=1; sslCert=$2; }
/^ssl_cipher .+$/ { isssl_cipher=1; ssl_cipher=$2; }
END { if (isport && issslKey && issslCert && isssl_cipher) exit(0);
else { print("port " port); print("sslKey " sslKey); print("sslCert " sslCert); print("ssl_cipher " ssl_cipher); exit(1); }
}
' pkg_wrong.conf
port 18000
sslKey /usr/product/plat/etc/ssl/server.pem
sslCert
ssl_cipher ECDH+AES128:ECDH+AESGCM:ECDH+AES256:DH+AES:DH+AESGCM:DH+AES256:RSA+AES:RSA+AESGCM:!aNULL:!RC4:!MD5:!DSS:!3DES
I have 2 servers, one which has magento 2 installed (ip - 129.89.188.244 port 80) and Varnish on another (ip - 129.89.188.245 port 80)
My Varnish Configuration:
File /etc/default/varnish:-
DAEMON_OPTS="-a :80 \
-T 127.0.0.1:6082 \
-b 129.89.188.244:80 \
-f /etc/varnish/default.vcl \
-S /etc/varnish/secret \
-s malloc,256m"
netstat -tulpn :-
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 1288/sshd
tcp 0 0 127.0.0.1:6082 0.0.0.0:* LISTEN 11115/varnishd
tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 11115/varnishd
tcp6 0 0 :::22 :::* LISTEN 1288/sshd
tcp6 0 0 :::80 :::* LISTEN 11115/varnishd
/etc/varnish/default.vcl : -
# VCL version 5.0 is not supported so it should be 4.0 even though actually used Varnish version is 5
vcl 4.0;
import std;
# The minimal Varnish version is 5.0
# For SSL offloading, pass the following header in your proxy server or load balancer: 'X-Forwarded-Proto: https'
backend default {
.host = "129.89.188.244";
.port = "80";
.first_byte_timeout = 600s;
.probe = {
.url = "/pub/health_check.php";
.timeout = 2s;
.interval = 5s;
.window = 10;
.threshold = 5;
}
}
acl purge {
"129.89.188.245";
"127.0.0.1";
"localhost";
}
sub vcl_recv {
if (req.method == "PURGE") {
if (client.ip !~ purge) {
return (synth(405, "Method not allowed"));
}
# To use the X-Pool header for purging varnish during automated deployments, make sure the X-Pool header
# has been added to the response in your backend server config. This is used, for example, by the
# capistrano-magento2 gem for purging old content from varnish during it's deploy routine.
if (!req.http.X-Magento-Tags-Pattern && !req.http.X-Pool) {
return (synth(400, "X-Magento-Tags-Pattern or X-Pool header required"));
}
if (req.http.X-Magento-Tags-Pattern) {
ban("obj.http.X-Magento-Tags ~ " + req.http.X-Magento-Tags-Pattern);
}
if (req.http.X-Pool) {
ban("obj.http.X-Pool ~ " + req.http.X-Pool);
}
return (synth(200, "Purged"));
}
if (req.method != "GET" &&
req.method != "HEAD" &&
req.method != "PUT" &&
req.method != "POST" &&
req.method != "TRACE" &&
req.method != "OPTIONS" &&
req.method != "DELETE") {
/* Non-RFC2616 or CONNECT which is weird. */
return (pipe);
}
# We only deal with GET and HEAD by default
if (req.method != "GET" && req.method != "HEAD") {
return (pass);
}
# Bypass shopping cart, checkout and search requests
if (req.url ~ "/checkout" || req.url ~ "/catalogsearch") {
return (pass);
}
# Bypass health check requests
if (req.url ~ "/pub/health_check.php") {
return (pass);
}
# Set initial grace period usage status
set req.http.grace = "none";
# normalize url in case of leading HTTP scheme and domain
set req.url = regsub(req.url, "^http[s]?://", "");
# collect all cookies
std.collect(req.http.Cookie);
# Compression filter. See https://www.varnish-cache.org/trac/wiki/FAQ/Compression
if (req.http.Accept-Encoding) {
if (req.url ~ "\.(jpg|jpeg|png|gif|gz|tgz|bz2|tbz|mp3|ogg|swf|flv)$") {
# No point in compressing these
unset req.http.Accept-Encoding;
} elsif (req.http.Accept-Encoding ~ "gzip") {
set req.http.Accept-Encoding = "gzip";
} elsif (req.http.Accept-Encoding ~ "deflate" && req.http.user-agent !~ "MSIE") {
set req.http.Accept-Encoding = "deflate";
} else {
# unkown algorithm
unset req.http.Accept-Encoding;
}
}
# Remove Google gclid parameters to minimize the cache objects
set req.url = regsuball(req.url,"\?gclid=[^&]+$",""); # strips when QS = "?gclid=AAA"
set req.url = regsuball(req.url,"\?gclid=[^&]+&","?"); # strips when QS = "?gclid=AAA&foo=bar"
set req.url = regsuball(req.url,"&gclid=[^&]+",""); # strips when QS = "?foo=bar&gclid=AAA" or QS = "?foo=bar&gclid=AAA&bar=baz"
# Static files caching
if (req.url ~ "^/(pub/)?(media|static)/") {
# Static files should not be cached by default
return (pass);
# But if you use a few locales and don't use CDN you can enable caching static files by commenting previous line (#return (pass);) and uncommenting next 3 lines
#unset req.http.Https;
#unset req.http.X-Forwarded-Proto;
#unset req.http.Cookie;
}
return (hash);
}
sub vcl_hash {
if (req.http.cookie ~ "X-Magento-Vary=") {
hash_data(regsub(req.http.cookie, "^.*?X-Magento-Vary=([^;]+);*.*$", "\1"));
}
# For multi site configurations to not cache each other's content
if (req.http.host) {
hash_data(req.http.host);
} else {
hash_data(server.ip);
}
# To make sure http users don't see ssl warning
if (req.http.X-Forwarded-Proto) {
hash_data(req.http.X-Forwarded-Proto);
}
}
sub vcl_backend_response {
set beresp.grace = 3d;
if (beresp.http.content-type ~ "text") {
set beresp.do_esi = true;
}
if (bereq.url ~ "\.js$" || beresp.http.content-type ~ "text") {
set beresp.do_gzip = true;
}
if (beresp.http.X-Magento-Debug) {
set beresp.http.X-Magento-Cache-Control = beresp.http.Cache-Control;
}
# cache only successfully responses and 404s
if (beresp.status != 200 && beresp.status != 404) {
set beresp.ttl = 0s;
set beresp.uncacheable = true;
return (deliver);
} elsif (beresp.http.Cache-Control ~ "private") {
set beresp.uncacheable = true;
set beresp.ttl = 86400s;
return (deliver);
}
# validate if we need to cache it and prevent from setting cookie
# images, css and js are cacheable by default so we have to remove cookie also
if (beresp.ttl > 0s && (bereq.method == "GET" || bereq.method == "HEAD")) {
unset beresp.http.set-cookie;
}
# If page is not cacheable then bypass varnish for 2 minutes as Hit-For-Pass
if (beresp.ttl <= 0s ||
beresp.http.Surrogate-control ~ "no-store" ||
(!beresp.http.Surrogate-Control &&
beresp.http.Cache-Control ~ "no-cache|no-store") ||
beresp.http.Vary == "*") {
# Mark as Hit-For-Pass for the next 2 minutes
set beresp.ttl = 120s;
set beresp.uncacheable = true;
}
return (deliver);
}
sub vcl_deliver {
if (resp.http.X-Magento-Debug) {
if (resp.http.x-varnish ~ " ") {
set resp.http.X-Magento-Cache-Debug = "HIT";
set resp.http.Grace = req.http.grace;
} else {
set resp.http.X-Magento-Cache-Debug = "MISS";
}
} else {
unset resp.http.Age;
}
# Not letting browser to cache non-static files.
if (resp.http.Cache-Control !~ "private" && req.url !~ "^/(pub/)?(media|static)/") {
set resp.http.Pragma = "no-cache";
set resp.http.Expires = "-1";
set resp.http.Cache-Control = "no-store, no-cache, must-revalidate, max-age=0";
}
unset resp.http.X-Magento-Debug;
unset resp.http.X-Magento-Tags;
unset resp.http.X-Powered-By;
unset resp.http.Server;
unset resp.http.X-Varnish;
unset resp.http.Via;
unset resp.http.Link;
}
sub vcl_hit {
if (obj.ttl >= 0s) {
# Hit within TTL period
return (deliver);
}
if (std.healthy(req.backend_hint)) {
if (obj.ttl + 300s > 0s) {
# Hit after TTL expiration, but within grace period
set req.http.grace = "normal (healthy server)";
return (deliver);
} else {
# Hit after TTL and grace expiration
return (miss);
}
} else {
# server is not healthy, retrieve from cache
set req.http.grace = "unlimited (unhealthy server)";
return (deliver);
}
}
Now the issue is, when I open the URL 129.89.188.244 , magento opens but it's not getting cached in varnish. But when I call varnish URL 129.89.188.245, it will redirect to my magento url 129.89.188.244. In my varnish log, it shows that the page is cached already but magento is not getting served from that varnish cache.
In order for the cache to work, your requests always have to go through the proxy/cache (Varnish).
Varnish evaluates your requests and decides if the request is already cached (then returns it from the cache) or not - then it will redirect it to Magento and cache the response, before returning it to the client.
That's how most caches work. You can read in detail how Varnish cache mechanism works here.
If you want to hit your cache, you always have to go through Varnish (129.89.188.245:80).
Consider this diagram form the official documentation:
this is the expected behaviour.
Varnish sits in front of you Magento server and if you ping your Magento server bypassing Varnish, opening 129.89.188.244, then Magento will send you a response without involving Varnish, while if you ping varnish, Varnish will then call Magento and cache. The other way around is not possible and makes no sense.
This solution worked for me. To configure Varnish and Magento on a different server.
Varnish server: xxx.xxx.xxx.xxx port 80
Magento server: yyy.yyy.yyy.yyy port 80
Changes need to be made on the varnish server:
1. login to varnish server
2. go to file /etc/varnish/default.vcl
3. under the "backend default" update
.host = "yyy.yyy.yyy.yyy";//(use Magento server IP for better network)
.port = "80";//(Magento web server port)
4. Restart the Varnish (systemctl restart varnish)
Note: Kindly use the default VCL that is generated during the varnish installation and don't update it with Magento generated VCL for varnish ( available from Magento Admin)
Changes need to be made on the Magento server:
1. Log in to the Magento server
2. Go to the env.php file located in the app/etc directory
3. Update the values in 'http_cache_hosts' => [
[
'host' => 'xxx.xxx.xxx.xxx', //(varnish server public ip)
'port' => '80' // ( varnish server port)
]
]
Now update your base URL's on the core_config_data table to your varnish public Ip (http://xxx.xxx.xxx.xxx/)
flush the Magento caches ( bin/magento ca:fl)
Using Varnish 3.0.4, we're seeing a lot of backend_busy, but we do not have set max_connection setting in any of our backends.
I'm trying to find out why because it seems some clients requests get aborted.
what could cause a backend_busy ?
thanks for any tips
here's my varnishstat -1
client_conn 0 0.00 Client connections accepted
client_drop 0 0.00 Connection dropped, no sess/wrk
client_req 0 0.00 Client requests received
cache_hit 0 0.00 Cache hits
cache_hitpass 0 0.00 Cache hits for pass
cache_miss 0 0.00 Cache misses
backend_conn 0 0.00 Backend conn. success
backend_unhealthy 0 0.00 Backend conn. not attempted
backend_busy 386013 8.38 Backend conn. too many
backend_fail 0 0.00 Backend conn. failures
backend_reuse 1140751 24.77 Backend conn. reuses
backend_toolate 292966 6.36 Backend conn. was closed
backend_recycle 31329 0.68 Backend conn. recycles
backend_retry 364845 7.92 Backend conn. retry
fetch_head 93857 2.04 Fetch head
fetch_length 0 0.00 Fetch with Length
fetch_chunked 0 0.00 Fetch chunked
fetch_eof 1007 0.02 Fetch EOF
fetch_bad 766163 16.63 Fetch had bad headers
fetch_close 16152 0.35 Fetch wanted close
fetch_oldhttp 783263 17.00 Fetch pre HTTP/1.1 closed
fetch_zero 14372 0.31 Fetch zero len
fetch_failed 83 0.00 Fetch failed
fetch_1xx 792868 17.21 Fetch no body (1xx)
fetch_204 28600 0.62 Fetch no body (204)
fetch_304 0 0.00 Fetch no body (304)
n_sess_mem 0 . N struct sess_mem
n_sess 849 . N struct sess
n_object 0 . N struct object
n_vampireobject 0 . N unresurrected objects
n_objectcore 0 . N struct objectcore
n_objecthead 0 . N struct objecthead
n_waitinglist 0 . N struct waitinglist
n_vbc 674 . N struct vbc
n_wrk 1819 . N worker threads
n_wrk_create 155 0.00 N worker threads created
n_wrk_failed 237296 5.15 N worker threads not created
n_wrk_max 0 0.00 N worker threads limited
n_wrk_lqueue 237394 5.15 work request queue length
n_wrk_queued 237937 5.17 N queued work requests
n_wrk_drop 128 0.00 N dropped work requests
n_backend 96 . N backends
n_expired 135 . N expired objects
n_lru_nuked 7710 . N LRU nuked objects
n_lru_moved 0 . N LRU moved objects
losthdr 0 0.00 HTTP header overflows
n_objsendfile 0 0.00 Objects sent with sendfile
n_objwrite 49182 1.07 Objects sent with write
n_objoverflow 0 0.00 Objects overflowing workspace
s_sess 25 0.00 Total Sessions
s_req 104479 2.27 Total Requests
s_pipe 0 0.00 Total pipe
s_pass 205233 4.46 Total pass
s_fetch 0 0.00 Total fetch
s_hdrbytes 0 0.00 Total header bytes
s_bodybytes 1093243 23.73 Total body bytes
sess_closed 0 0.00 Session Closed
sess_pipeline 385979 8.38 Session Pipeline
sess_readahead 1140751 24.77 Session Read Ahead
sess_linger 758 0.02 Session Linger
sess_herd 482049 10.46 Session herd
shm_records 823074 17.87 SHM records
shm_writes 470696378 10218.54 SHM writes
shm_flushes 10834209993 235204.18 SHM flushes due to overflow
shm_cont 167868 3.64 SHM MTX contention
shm_cycles 19 0.00 SHM cycles through buffer
sms_nreq 18 0.00 SMS allocator requests
sms_nobj 1092005 . SMS outstanding allocations
sms_nbytes 635568 . SMS outstanding bytes
sms_balloc 85592445 . SMS bytes allocated
sms_bfree 5323477 . SMS bytes freed
backend_req 42 0.00 Backend requests made
n_vcl 44231 0.96 N vcl total
n_vcl_avail 37 0.00 N vcl available
n_vcl_discard 23820 0.52 N vcl discarded
n_ban 0 . N total active bans
n_ban_add 0 0.00 N new bans added
n_ban_retire 2136286212 46377.49 N old bans deleted
n_ban_obj_test 2136286212 46377.49 N objects tested
n_ban_re_test 857240 18.61 N regexps tested against
n_ban_dups 1 0.00 N duplicate bans removed
hcb_nolock 1 0.00 HCB Lookups without lock
hcb_lock 0 0.00 HCB Lookups with lock
hcb_insert 28435 0.62 HCB Inserts
esi_errors 14884 0.32 ESI parse errors (unlock)
esi_warnings 246931 5.36 ESI parse warnings (unlock)
accept_fail 218496 4.74 Accept failures
client_drop_late 5603745 121.65 Connection dropped late
uptime 29071691616 631128.92 Client uptime
dir_dns_lookups 46063 1.00 DNS director lookups
dir_dns_failed 685875 14.89 DNS director failed lookups
dir_dns_hit 356122 7.73 DNS director cached lookups hit
dir_dns_cache_full 355962 7.73 DNS director full dnscache
vmods 0 . Loaded VMODs
n_gzip 0 0.00 Gzip operations
n_gunzip 0 0.00 Gunzip operations
LCK.sms.creat 0 0.00 Created locks
LCK.sms.destroy 0 0.00 Destroyed locks
LCK.sms.locks 0 0.00 Lock Operations
LCK.sms.colls 0 0.00 Collisions
LCK.smp.creat 0 0.00 Created locks
LCK.smp.destroy 0 0.00 Destroyed locks
LCK.smp.locks 0 0.00 Lock Operations
LCK.smp.colls 0 0.00 Collisions
LCK.sma.creat 0 0.00 Created locks
LCK.sma.destroy 0 0.00 Destroyed locks
LCK.sma.locks 0 0.00 Lock Operations
LCK.sma.colls 0 0.00 Collisions
LCK.smf.creat 0 0.00 Created locks
LCK.smf.destroy 0 0.00 Destroyed locks
LCK.smf.locks 0 0.00 Lock Operations
LCK.smf.colls 0 0.00 Collisions
LCK.hsl.creat 0 0.00 Created locks
LCK.hsl.destroy 0 0.00 Destroyed locks
LCK.hsl.locks 0 0.00 Lock Operations
LCK.hsl.colls 0 0.00 Collisions
LCK.hcb.creat 0 0.00 Created locks
LCK.hcb.destroy 0 0.00 Destroyed locks
LCK.hcb.locks 0 0.00 Lock Operations
LCK.hcb.colls 0 0.00 Collisions
LCK.hcl.creat 0 0.00 Created locks
LCK.hcl.destroy 0 0.00 Destroyed locks
LCK.hcl.locks 0 0.00 Lock Operations
LCK.hcl.colls 0 0.00 Collisions
LCK.vcl.creat 0 0.00 Created locks
LCK.vcl.destroy 0 0.00 Destroyed locks
LCK.vcl.locks 0 0.00 Lock Operations
LCK.vcl.colls 0 0.00 Collisions
LCK.stat.creat 0 0.00 Created locks
LCK.stat.destroy 0 0.00 Destroyed locks
LCK.stat.locks 0 0.00 Lock Operations
LCK.stat.colls 0 0.00 Collisions
LCK.sessmem.creat 0 0.00 Created locks
LCK.sessmem.destroy 0 0.00 Destroyed locks
LCK.sessmem.locks 0 0.00 Lock Operations
LCK.sessmem.colls 0 0.00 Collisions
LCK.wstat.creat 0 0.00 Created locks
LCK.wstat.destroy 0 0.00 Destroyed locks
LCK.wstat.locks 0 0.00 Lock Operations
LCK.wstat.colls 0 0.00 Collisions
LCK.herder.creat 0 0.00 Created locks
LCK.herder.destroy 0 0.00 Destroyed locks
LCK.herder.locks 0 0.00 Lock Operations
LCK.herder.colls 0 0.00 Collisions
LCK.wq.creat 0 0.00 Created locks
LCK.wq.destroy 0 0.00 Destroyed locks
LCK.wq.locks 0 0.00 Lock Operations
LCK.wq.colls 0 0.00 Collisions
LCK.objhdr.creat 0 0.00 Created locks
LCK.objhdr.destroy 0 0.00 Destroyed locks
LCK.objhdr.locks 0 0.00 Lock Operations
LCK.objhdr.colls 0 0.00 Collisions
LCK.exp.creat 0 0.00 Created locks
LCK.exp.destroy 0 0.00 Destroyed locks
LCK.exp.locks 0 0.00 Lock Operations
LCK.exp.colls 0 0.00 Collisions
LCK.lru.creat 0 0.00 Created locks
LCK.lru.destroy 0 0.00 Destroyed locks
LCK.lru.locks 0 0.00 Lock Operations
LCK.lru.colls 0 0.00 Collisions
LCK.cli.creat 0 0.00 Created locks
LCK.cli.destroy 0 0.00 Destroyed locks
LCK.cli.locks 0 0.00 Lock Operations
LCK.cli.colls 0 0.00 Collisions
LCK.ban.creat 0 0.00 Created locks
LCK.ban.destroy 0 0.00 Destroyed locks
LCK.ban.locks 0 0.00 Lock Operations
LCK.ban.colls 0 0.00 Collisions
LCK.vbp.creat 0 0.00 Created locks
LCK.vbp.destroy 0 0.00 Destroyed locks
LCK.vbp.locks 0 0.00 Lock Operations
LCK.vbp.colls 0 0.00 Collisions
LCK.vbe.creat 0 0.00 Created locks
LCK.vbe.destroy 0 0.00 Destroyed locks
LCK.vbe.locks 0 0.00 Lock Operations
LCK.vbe.colls 0 0.00 Collisions
LCK.backend.creat 0 0.00 Created locks
LCK.backend.destroy 0 0.00 Destroyed locks
LCK.backend.locks 0 0.00 Lock Operations
LCK.backend.colls 0 0.00 Collisions
SMA.s0.c_req 0 0.00 Allocator requests
SMA.s0.c_fail 0 0.00 Allocator failures
SMA.s0.c_bytes 0 0.00 Bytes allocated
SMA.s0.c_freed 0 0.00 Bytes freed
SMA.s0.g_alloc 0 . Allocations outstanding
SMA.s0.g_bytes 0 . Bytes outstanding
SMA.s0.g_space 0 . Bytes available
SMA.Transient.c_req 0 0.00 Allocator requests
SMA.Transient.c_fail 0 0.00 Allocator failures
SMA.Transient.c_bytes 0 0.00 Bytes allocated
SMA.Transient.c_freed 0 0.00 Bytes freed
SMA.Transient.g_alloc 0 . Allocations outstanding
SMA.Transient.g_bytes 0 . Bytes outstanding
SMA.Transient.g_space 0 . Bytes available
VBE.default(127.0.0.1,,8080).vcls 0 . VCL references
VBE.default(127.0.0.1,,8080).happy 0 . Happy health probes
VBE.wiki(127.0.0.1,,8081).vcls 0 . VCL references
VBE.wiki(127.0.0.1,,8081).happy 0 . Happy health probes
VBE.tiles(127.0.0.1,,9090).vcls 0 . VCL references
VBE.tiles(127.0.0.1,,9090).happy 0 . Happy health probes
VBE.geoserver(127.0.0.1,,43074).vcls 0 . VCL references
VBE.geoserver(127.0.0.1,,43074).happy 0 . Happy health probes
VBE.iconserver(127.0.0.1,,43077).vcls 0 . VCL references
VBE.iconserver(127.0.0.1,,43077).happy 0 . Happy health probes
VBE.uploadserver(127.0.0.1,,43079).vcls 0 . VCL references
VBE.uploadserver(127.0.0.1,,43079).happy 0 . Happy health probes
VBE.graphserver(127.0.0.1,,43080).vcls 0 . VCL references
VBE.graphserver(127.0.0.1,,43080).happy 0 . Happy health probes
VBE.kmlserver(127.0.0.1,,43082).vcls 0 . VCL references
VBE.kmlserver(127.0.0.1,,43082).happy 0 . Happy health probes
VBE.feedbackserver(127.0.0.1,,43085).vcls 0 . VCL references
VBE.feedbackserver(127.0.0.1,,43085).happy 0 . Happy health probes
VBE.weatherserver(127.0.0.1,,43086).vcls 0 . VCL references
VBE.weatherserver(127.0.0.1,,43086).happy 0 . Happy health probes
VBE.analyticsserver(127.0.0.1,,43087).vcls 0 . VCL references
VBE.analyticsserver(127.0.0.1,,43087).happy 0 . Happy health probes
VBE.chartserver(127.0.0.1,,43088).vcls 0 . VCL references
VBE.chartserver(127.0.0.1,,43088).happy 0 . Happy health probes
VBE.watcher(127.0.0.1,,8090).vcls 0 . VCL references
VBE.watcher(127.0.0.1,,8090).happy 0 . Happy health probes
VBE.render(127.0.0.1,,8040).vcls 0 . VCL references
VBE.render(127.0.0.1,,8040).happy 0 . Happy health probes
VBE.route(127.0.0.1,,43073).vcls 0 . VCL references
VBE.route(127.0.0.1,,43073).happy 0 . Happy health probes
VBE.osmsync(127.0.0.1,,9010).vcls 0 . VCL references
VBE.osmsync(127.0.0.1,,9010).happy 0 . Happy health probes
VBE.search(127.0.0.1,,8100).vcls 0 . VCL references
VBE.search(127.0.0.1,,8100).happy 0 . Happy health probes
VBE.indexserver(127.0.0.1,,8101).vcls 0 . VCL references
VBE.indexserver(127.0.0.1,,8101).happy 0 . Happy health probes
VBE.rewriteproxy(127.0.0.1,,9030).vcls 0 . VCL references
VBE.rewriteproxy(127.0.0.1,,9030).happy 0 . Happy health probes
VBE.pomplo(127.0.0.1,,9081).vcls 0 . VCL references
VBE.pomplo(127.0.0.1,,9081).happy 0 . Happy health probes
VBE.admin(127.0.0.1,,9000).vcls 0 . VCL references
VBE.admin(127.0.0.1,,9000).happy 0 . Happy health probes
VBE.graphite(127.0.0.1,,7070).vcls 0 . VCL references
VBE.graphite(127.0.0.1,,7070).happy 0 . Happy health probes
VBE.notificationserver(127.0.0.1,,8070).vcls 0 . VCL references
VBE.notificationserver(127.0.0.1,,8070).happy 0 . Happy health probes
VBE.orient(127.0.0.1,,2480).vcls 0 . VCL references
VBE.orient(127.0.0.1,,2480).happy 0 . Happy health probes
VBE.geoip(127.0.0.1,,43930).vcls 0 . VCL references
VBE.geoip(127.0.0.1,,43930).happy 0 . Happy health probes
here's my vcl (included in default.vcl)
import std;
acl internal {
"localhost";
"192.168.0.0"/16;
"10.0.0.0"/8;
}
probe status {
.url = "/status.json";
.timeout = 100 ms;
.interval = 1s;
.threshold = 1;
}
probe rootCheck {
.url = "/";
.timeout = 100 ms;
.interval = 1s;
.threshold = 1;
}
backend tiles {
.host = "127.0.0.1";
.port = "9090";
.probe = {
.url = "/serverInfo.json";
.timeout = 100 ms;
.interval = 1s;
.threshold = 1;
}
}
backend geoserver {
.host = "127.0.0.1";
.port = "43074";
.probe = rootCheck;
}
backend iconserver {
.host = "127.0.0.1";
.port = "43077";
.probe = rootCheck;
}
backend uploadserver {
.host = "127.0.0.1";
.port = "43079";
.probe = rootCheck;
}
backend graphserver {
.host = "127.0.0.1";
.port = "43080";
.probe = rootCheck;
}
backend kmlserver {
.host = "127.0.0.1";
.port = "43082";
.probe = rootCheck;
}
backend weatherserver {
.host = "127.0.0.1";
.port = "43086";
.probe = {
.url = "/status";
.timeout = 100 ms;
.interval = 1s;
.threshold = 1;
}
}
backend analyticsserver {
.host = "127.0.0.1";
.port = "43087";
.probe = status;
}
backend chartserver {
.host = "127.0.0.1";
.port = "43088";
.probe = status;
}
backend watcher {
.host = "127.0.0.1";
.port = "8090";
.probe = {
.url = "/test";
.timeout = 100 ms;
.interval = 1s;
.threshold = 1;
}
}
backend route {
.host = "127.0.0.1";
.port = "43073";
.probe = status;
}
backend osmsync {
.host = "127.0.0.1";
.port = "9010";
.probe = {
.url = "/state.json";
.timeout = 100 ms;
.interval = 1s;
.threshold = 1;
}
}
backend search {
.host = "127.0.0.1";
.port = "8100";
.probe = status;
}
backend indexserver {
.host = "127.0.0.1";
.port = "8101";
.probe = rootCheck;
}
backend rewriteproxy {
.host = "127.0.0.1";
.port = "9030";
.probe = rootCheck;
}
backend webserver {
.host = "127.0.0.1";
.port = "9081";
.probe = rootCheck;
}
backend admin {
.host = "127.0.0.1";
.port = "9000";
.probe = rootCheck;
}
backend graphite {
.host = "127.0.0.1";
.port = "7070";
.probe = rootCheck;
}
backend notificationserver {
.host = "127.0.0.1";
.port = "8070";
.probe = status;
}
backend orient {
.host = "127.0.0.1";
.port = "2480";
}
backend geoip {
.host = "127.0.0.1";
.port = "43930";
.probe = status;
}
sub allow_only_internal {
if (!client.ip ~ internal) {
error 405 "Not allowed.";
}
}
sub vcl_recv {
if (req.url ~ "\.(png|gif|jpg|swf|css|js|bis|apk|plist|ipa|woff|svg|eot|ttf|json)(\?.*|)$") {
unset req.http.cookie;
}
if (req.url ~ "^/admin") {
call allow_only_internal;
}
if( req.http.host ~ "^tile" ) {
unset req.http.cookie;
set req.http.host = "tiles";
set req.backend = tiles;
} else if( req.http.host ~ "^data" ) {
set req.backend = geoserver;
} else if( req.http.host ~ "^icon" ) {
unset req.http.cookie;
set req.backend = iconserver;
} else if( req.http.host ~ "^upload" ) {
set req.backend = uploadserver;
} else if( req.http.host ~ "^graphite" ) { // must be before graph
if( req.url !~ "^/render" ) { //allow render API anywhere
call allow_only_internal;
}
set req.backend = graphite;
} else if( req.http.host ~ "^graph" ) {
set req.backend = graphserver;
} else if( req.http.host ~ "^kml" ) {
set req.backend = kmlserver;
} else if( req.http.host ~ "^notification" ) {
set req.backend = notificationserver;
} else if( req.http.host ~ "^osmsync" ) {
set req.backend = osmsync;
} else if( req.http.host ~ "^watcher" ) {
set req.backend = watcher;
} else if( req.http.host ~ "^route" ) {
set req.backend = route;
} else if( req.http.host ~ "^search" ) {
set req.backend = search;
} else if( req.http.host ~ "^index" ) {
call allow_only_internal;
set req.backend = indexserver;
} else if( req.http.host ~ "^weather" ) {
set req.backend = weatherserver;
} else if( req.http.host ~ "^analytics" ) {
set req.backend = analyticsserver;
} else if( req.http.host ~ "^chart" ) {
set req.backend = chartserver;
} else if( req.http.host ~ "^geoip" ) {
set req.backend = geoip;
} else if( req.http.host ~ "^rewriteproxy" || req.http.host ~ "^showroom" ) {
set req.backend = rewriteproxy;
} else if( req.http.host ~ "^admin" ) {
call allow_only_internal;
set req.backend = admin;
} else if( req.http.host ~ "^orient" ) {
call allow_only_internal;
set req.backend = orient;
}
//url matching is at the bottom to avoid confusion
else if( req.url ~ "^/query" ) {
set req.backend = geoserver;
} else if( req.url ~ "^/data/" ) {
set req.url = regsub( req.url, "^/data(/.*)", "\1" );
set req.backend = geoserver;
} else if( req.url ~ "^/icon/" ) {
set req.url = regsub( req.url, "^/icon(/.*)", "\1" );
set req.backend = iconserver;
} else if( req.url ~ "^/graph/" ) {
set req.url = regsub( req.url, "^/graph(/.*)", "\1" );
set req.backend = graphserver;
} else if( req.http.host ~ "^(www\.|static|map)" ) {
set req.backend = webserver;
} else {
// todo: handle HTTPS with req.http.X-Forwarded-Proto
// redirect dev and demo to avoid dead links for old urls
error 750 regsub(req.http.host, "^(w+\.|dev\.|demo\.)?", "http://www.") + req.url;
}
}
sub vcl_deliver {
if (resp.http.Location ~ ".*:8080.*") {
set resp.http.Location = regsub(resp.http.Location, ":[0-9]+", "");
}
if (req.backend == graphite) {
set resp.http.Access-Control-Allow-Origin = "*";
set resp.http.Access-Control-Allow-Methods = "GET, OPTIONS";
set resp.http.Access-Control-Allow-Headers = "origin, authorization, accept";
}
unset resp.http.x-url;
unset resp.http.x-host;
return (deliver);
}
sub vcl_fetch {
if (req.url ~ "\.(png|gif|jpg|swf|css|js|bis|apk|plist|ipa|woff|svg|eot|ttf|json)(\?.*|)$") {
unset beresp.http.set-cookie;
}
if( beresp.http.content-type ~ "application/json"
|| beresp.http.content-type ~ "application/javascript"
|| (beresp.http.content-type ~ "application/octet-stream" && req.url ~ "\.bis$")
|| beresp.http.content-type ~ "text/javascript"
|| beresp.http.content-type ~ "text/plain"
|| beresp.http.content-type ~ "text/html"
|| beresp.http.content-type ~ "text/css" ) {
set beresp.do_gzip = true;
}
//allow ban lurker to work. https://www.varnish-software.com/static/book/Cache_invalidation.html#smart-bans
set beresp.http.x-url = req.url;
set beresp.http.x-host = req.http.host;
}
sub vcl_pipe {
if (req.http.upgrade) {
set bereq.http.upgrade = req.http.upgrade;
}
}
sub vcl_recv {
if (req.http.Upgrade ~ "(?i)websocket") {
return (pipe);
}
}
sub vcl_error {
if (obj.status == 750) {
set obj.http.Location = obj.response;
set obj.status = 302;
return (deliver);
}
set obj.http.Content-Type = "text/html; charset=utf-8";
set obj.http.Retry-After = "3";
if( req.http.host ~ "^map" ) {
synthetic regsuball(regsuball(std.fileread("/etc/varnish/error_map.html"), "\$obj\.status\$", obj.status), "\$obj\.response\$", obj.response);
} else {
synthetic regsuball(regsuball(std.fileread("/etc/varnish/error.html"), "\$obj\.status\$", obj.status), "\$obj\.response\$", obj.response);
}
return (deliver);
}
That would normally be because none of your backends in the requested director are healthy at the time.
None of your servers seems to be used in your VCL nor have any health probes. So it's hard to know why exactly this happens. You should also really be using the backends you define and do health checks on them.
When you see something more like the following for all backends your varnishstat is least giving you some useful information:
VBE.wiki(127.0.0.1,,8081).vcls 1
VBE.wiki(127.0.0.1,,8081).happy 18446744073709551615
You can also check current status and short history by "debug.health" in your varnishadm console.
Try updating your libvarnishapi lib — this helped me (ubuntu):
sudo apt-get install libvarnishapi1
and reboot.
If you are using a wrong version (mine was libvarnishapi-dev), you don't get varnishstat working properly.
By default when backend_busy is incremented, that means the client receives a 5xx error response. However by using VCL (Varnish Configuration Language), it seems that you can apparently configure Varnish to recover from a "busy backend" by using a different backend, or by serving an outdated or synthetic response.
(Source)
Note: I think backend_busy is different from backend_unhealthy.
Actually Varnish periodically pings the backend to make sure it is still up and responsive. If it doesn’t receive a 200 response quickly enough, the backend is marked as unhealthy and every new request to it increments backend_unhealthy until the backend recovers and sends a timely 200 response.