varnish cache miss almost every request (wordpress + varnish + docker) -
i attempting setup wordpress site using varnish, apache2, mysql, , docker. appears working, varnish on port 80 , apache running on 8080
however, when looking @ request headers, see cache hit on second time request, afterwards see nothing cache misses no matter how many times try.
my config file can found below. modified config found @ https://github.com/mattiasgeniar/varnish-4.0-configuration-templates/blob/master/default.vcl
and complete overkill. see obvious mistakes, or possibly have simpler config file? in advance.
vcl 4.0; # based on: https://github.com/mattiasgeniar/varnish-4.0-configuration-templates/blob/master/default.vcl import std; import directors; backend server1 { # define 1 backend .host = "web"; # ip or hostname of backend .port = "80"; # port apache or whatever listening .max_connections = 300; # that's .probe = { #.url = "/"; # short easy way (get /) # prefer head / .request = "head / http/1.1" "host: localhost" "connection: close"; .interval = 5s; # check health of each backend every 5 seconds .timeout = 60s; # timing out after 1 second. .window = 5; # if 3 out of last 5 polls succeeded backend considered healthy, otherwise marked sick .threshold = 3; } .first_byte_timeout = 300s; # how long wait before receive first byte our backend? .connect_timeout = 5s; # how long wait backend connection? .between_bytes_timeout = 2s; # how long wait between bytes received our backend? } acl purge { # acl we'll use later allow purges "localhost"; "127.0.0.1"; "::1"; } /* acl editors { # acl honor "cache-control: no-cache" header force refresh selected ips "localhost"; "127.0.0.1"; "::1"; } */ sub vcl_init { # called when vcl loaded, before requests pass through it. # typically used initialize vmods. new vdir = directors.round_robin(); vdir.add_backend(server1); # vdir.add_backend(server...); # vdir.add_backend(servern); } sub vcl_recv { # called @ beginning of request, after complete request has been received , parsed. # purpose decide whether or not serve request, how it, and, if applicable, # backend use. # used modify request set req.backend_hint = vdir.backend(); # send traffic vdir director if (req.restarts == 0) { if (req.http.x-forwarded-for) { # set or append client.ip x-forwarded-for header set req.http.x-forwarded-for = req.http.x-forwarded-for + ", " + client.ip; } else { set req.http.x-forwarded-for = client.ip; } } # normalize header, remove port (in case you're testing on various tcp ports) set req.http.host = regsub(req.http.host, ":[0-9]+", ""); # normalize query arguments set req.url = std.querysort(req.url); # allow purging if (req.method == "purge") { if (!client.ip ~ purge) { # purge acl defined @ begining # not allowed ip? die error. return (synth(405, "this ip not allowed send purge requests.")); } # if got stage (and didn't error out above), purge cached result return (purge); } # deal "normal" types if (req.method != "get" && req.method != "head" && req.method != "put" && req.method != "post" && req.method != "trace" && req.method != "options" && req.method != "patch" && req.method != "delete") { /* non-rfc2616 or connect weird. */ return (pipe); } # implementing websocket support (https://www.varnish-cache.org/docs/4.0/users-guide/vcl-example-websockets.html) if (req.http.upgrade ~ "(?i)websocket") { return (pipe); } # cache or head requests. makes sure post requests passed. if (req.method != "get" && req.method != "head") { return (pass); } # generic url manipulation, useful templates follow # first remove google analytics added parameters, useless our backend if (req.url ~ "(\?|&)(utm_source|utm_medium|utm_campaign|utm_content|gclid|cx|ie|cof|siteurl)=") { set req.url = regsuball(req.url, "&(utm_source|utm_medium|utm_campaign|utm_content|gclid|cx|ie|cof|siteurl)=([a-z0-9_\-\.%25]+)", ""); set req.url = regsuball(req.url, "\?(utm_source|utm_medium|utm_campaign|utm_content|gclid|cx|ie|cof|siteurl)=([a-z0-9_\-\.%25]+)", "?"); set req.url = regsub(req.url, "\?&", "?"); set req.url = regsub(req.url, "\?$", ""); } # strip hash, server doesn't need it. if (req.url ~ "\#") { set req.url = regsub(req.url, "\#.*$", ""); } # strip trailing ? if exists if (req.url ~ "\?$") { set req.url = regsub(req.url, "\?$", ""); } # generic cookie manipulation, useful templates follow # remove "has_js" cookie set req.http.cookie = regsuball(req.http.cookie, "has_js=[^;]+(; )?", ""); # remove google analytics based cookies set req.http.cookie = regsuball(req.http.cookie, "__utm.=[^;]+(; )?", ""); set req.http.cookie = regsuball(req.http.cookie, "_ga=[^;]+(; )?", ""); set req.http.cookie = regsuball(req.http.cookie, "_gat=[^;]+(; )?", ""); set req.http.cookie = regsuball(req.http.cookie, "utmctr=[^;]+(; )?", ""); set req.http.cookie = regsuball(req.http.cookie, "utmcmd.=[^;]+(; )?", ""); set req.http.cookie = regsuball(req.http.cookie, "utmccn.=[^;]+(; )?", ""); # remove doubleclick offensive cookies set req.http.cookie = regsuball(req.http.cookie, "__gads=[^;]+(; )?", ""); # remove quant capital cookies (added plugin, __qca) set req.http.cookie = regsuball(req.http.cookie, "__qc.=[^;]+(; )?", ""); # remove addthis cookies set req.http.cookie = regsuball(req.http.cookie, "__atuv.=[^;]+(; )?", ""); # remove ";" prefix in cookie if present set req.http.cookie = regsuball(req.http.cookie, "^;\s*", ""); # there cookies left spaces or empty? if (req.http.cookie ~ "^\s*$") { unset req.http.cookie; } # normalize accept-encoding header # straight manual: https://www.varnish-cache.org/docs/3.0/tutorial/vary.html # todo: test if it's still needed, varnish 4 if http_gzip_support = on # https://www.varnish-cache.org/docs/trunk/users-guide/compression.html # https://www.varnish-cache.org/docs/trunk/phk/gzip.html if (req.http.accept-encoding) { if (req.url ~ "\.(jpg|png|gif|gz|tgz|bz2|tbz|mp3|ogg)$") { # no point in compressing these unset req.http.accept-encoding; } elsif (req.http.accept-encoding ~ "gzip") { set req.http.accept-encoding = "gzip"; } elsif (req.http.accept-encoding ~ "deflate") { set req.http.accept-encoding = "deflate"; } else { # unkown algorithm unset req.http.accept-encoding; } } if (req.http.cache-control ~ "(?i)no-cache") { #if (req.http.cache-control ~ "(?i)no-cache" && client.ip ~ editors) { # create acl editors if want restrict ctrl-f5 # http://varnish.projects.linpro.no/wiki/vclexampleenableforcerefresh # ignore requests via proxy caches , badly behaved crawlers # msnbot send no-cache every request. if (! (req.http.via || req.http.user-agent ~ "(?i)bot" || req.http.x-purge)) { #set req.hash_always_miss = true; # doesn't seems refresh object in cache return(purge); # couple restart in vcl_purge , x-purge header avoid loops } } # large static files delivered directly end-user without # waiting varnish read file first. # varnish 4 supports streaming, set do_stream in vcl_backend_response() if (req.url ~ "^[^?]*\.(mp[34]|rar|tar|tgz|gz|wav|zip|bz2|xz|7z|avi|mov|ogm|mpe?g|mk[av])(\?.*)?$") { unset req.http.cookie; return (hash); } # remove cookies static files # valid discussion held on line: need cache static files don't cause load? if have memory left. # sure, there's disk i/o, chances os have these files in buffers (thus memory). # before blindly enable this, have read here: https://ma.ttias.be/stop-caching-static-files/ if (req.url ~ "^[^?]*\.(bmp|bz2|css|doc|eot|flv|gif|gz|ico|jpeg|jpg|js|less|pdf|png|rtf|swf|txt|woff|xml)(\?.*)?$") { unset req.http.cookie; return (hash); } # send surrogate-capability headers announce esi support backend set req.http.surrogate-capability = "key=esi/1.0"; if (req.http.authorization) { # not cacheable default return (pass); } return (hash); } sub vcl_pipe { # called upon entering pipe mode. # in mode, request passed on backend, , further data both client # , backend passed on unaltered until either end closes connection. basically, varnish # degrade simple tcp proxy, shuffling bytes , forth. connection in pipe mode, # no other vcl subroutine ever called after vcl_pipe. # note first request backend have # x-forwarded-for set. if use x-forwarded-for , want # have set requests, make sure have: # set bereq.http.connection = "close"; # here. not set default might break broken web # applications, iis ntlm authentication. # set bereq.http.connection = "close"; # implementing websocket support (https://www.varnish-cache.org/docs/4.0/users-guide/vcl-example-websockets.html) if (req.http.upgrade) { set bereq.http.upgrade = req.http.upgrade; } return (pipe); } sub vcl_pass { # called upon entering pass mode. in mode, request passed on backend, , # backend's response passed on client, not entered cache. subsequent # requests submitted on same client connection handled normally. # return (pass); } # data on hashing take place sub vcl_hash { # called after vcl_recv create hash value request. used key # object in varnish. hash_data(req.url); if (req.http.host) { hash_data(req.http.host); } else { hash_data(server.ip); } # hash cookies requests have them if (req.http.cookie) { hash_data(req.http.cookie); } } sub vcl_hit { # called when cache lookup successful. if (obj.ttl >= 0s) { # pure unadultered hit, deliver return (deliver); } # https://www.varnish-cache.org/docs/trunk/users-guide/vcl-grace.html # when several clients requesting same page varnish send 1 request backend , place others on hold while fetching 1 copy backend. in products called request coalescing , varnish automatically. # if serving thousands of hits per second queue of waiting requests can huge. there 2 potential problems - 1 thundering herd problem - releasing thousand threads serve content might send load sky high. secondly - nobody likes wait. deal can instruct varnish keep objects in cache beyond ttl , serve waiting requests stale content. # if (!std.healthy(req.backend_hint) && (obj.ttl + obj.grace > 0s)) { # return (deliver); # } else { # return (fetch); # } # have no fresh fish. lets @ stale ones. if (std.healthy(req.backend_hint)) { # backend healthy. limit age 10s. if (obj.ttl + 10s > 0s) { #set req.http.grace = "normal(limited)"; return (deliver); } else { # no candidate grace. fetch fresh object. return(fetch); } } else { # backend sick - use full grace if (obj.ttl + obj.grace > 0s) { #set req.http.grace = "full"; return (deliver); } else { # no graced object. return (fetch); } } # fetch & deliver once result return (fetch); # dead code, keep safeguard } sub vcl_miss { # called after cache lookup if requested document not found in cache. purpose # decide whether or not attempt retrieve document backend, , # backend use. return (fetch); } # handle http request coming our backend sub vcl_backend_response { # called after response headers has been retrieved backend. # pause esi request , remove surrogate-control header if (beresp.http.surrogate-control ~ "esi/1.0") { unset beresp.http.surrogate-control; set beresp.do_esi = true; } # enable cache static files # same argument static caches above: monitor cache size, if data nuked out of it, consider giving static file cache. # before blindly enable this, have read here: https://ma.ttias.be/stop-caching-static-files/ if (bereq.url ~ "^[^?]*\.(bmp|bz2|css|doc|eot|flv|gif|gz|ico|jpeg|jpg|js|less|mp[34]|pdf|png|rar|rtf|swf|tar|tgz|txt|wav|woff|xml|zip)(\?.*)?$") { unset beresp.http.set-cookie; } # large static files delivered directly end-user without # waiting varnish read file first. # varnish 4 supports streaming, use streaming here avoid locking. if (bereq.url ~ "^[^?]*\.(mp[34]|rar|tar|tgz|gz|wav|zip|bz2|xz|7z|avi|mov|ogm|mpe?g|mk[av])(\?.*)?$") { unset beresp.http.set-cookie; set beresp.do_stream = true; # check memory usage it'll grow in fetch_chunksize blocks (128k default) if backend doesn't send content-length header, enable big objects set beresp.do_gzip = false; # don't try compress storage } # sometimes, 301 or 302 redirect formed via apache's mod_rewrite can mess http port being passed along. # happens simple rewrite rules in scenario varnish runs on :80 , apache on :8080 on same box. # redirect can redirect end-user url on :8080, should :80. # may need finetuning on setup. # # prevent accidental replace, filter 301/302 redirects now. if (beresp.status == 301 || beresp.status == 302) { set beresp.http.location = regsub(beresp.http.location, ":[0-9]+", ""); } # set 2min cache if unset static files if (beresp.ttl <= 0s || beresp.http.set-cookie || beresp.http.vary == "*") { set beresp.ttl = 120s; # important, shouldn't rely on this, set headers in backend set beresp.uncacheable = true; return (deliver); } # allow stale content, in case backend goes down. # make varnish keep objects 6 hours beyond ttl set beresp.grace = 6h; return (deliver); } # routine when deliver http request user # last chance modify headers sent client sub vcl_deliver { # called before cached object delivered client. if (obj.hits > 0) { # add debug header see if it's hit/miss , number of hits, disable when not needed set resp.http.x-cache = "hit"; } else { set resp.http.x-cache = "miss"; } # please note obj.hits behaviour changed in 4.0, counts per objecthead, not per object # , obj.hits may not reset in cases bans in use. see bug 1492 details. # take hits grain of salt set resp.http.x-cache-hits = obj.hits; # remove headers: php version unset resp.http.x-powered-by; # remove headers: apache version & os unset resp.http.server; unset resp.http.x-drupal-cache; unset resp.http.x-varnish; unset resp.http.via; unset resp.http.link; unset resp.http.x-generator; return (deliver); } sub vcl_purge { # handle actual purge http methods, else discarded if (req.method != "purge") { # restart request set req.http.x-purge = "yes"; return(restart); } } sub vcl_synth { if (resp.status == 720) { # use special error status 720 force redirects 301 (permanent) redirects # use this, call following anywhere in vcl_recv: return (synth(720, "http://host/new.html")); set resp.http.location = resp.reason; set resp.status = 301; return (deliver); } elseif (resp.status == 721) { # , use error status 721 force redirects 302 (temporary) redirect # use this, call following anywhere in vcl_recv: return (synth(720, "http://host/new.html")); set resp.http.location = resp.reason; set resp.status = 302; return (deliver); } return (deliver); } sub vcl_fini { # called when vcl discarded after requests have exited vcl. # typically used clean vmods. return (ok); }
Comments
Post a Comment