Enterprise-Onion-Toolkit/templates.d/nginx.conf.txt

677 wiersze
20 KiB
Plaintext
Czysty Zwykły widok Historia

2017-02-01 08:38:03 +00:00
# -*- awk -*-
# EMACS awk mode works quite well for nginx configs
# eotk (c) 2019 Alec Muffett
# SECURITY NOTE: the contents of this file, when actualised, should
# not be made world-readable nor published without redaction;
# password-like 128-bit "nonces" are used in the static regexps which
2019-02-01 14:06:41 +00:00
# substitute hostnames. It a leak occurs: simply rebuild the
# configurations (which will create new nonces) and redeploy.
2017-02-01 08:38:03 +00:00
# logs and pids
pid %PROJECT_DIR%/nginx.pid;
error_log %LOG_DIR%/nginx-error.log %NGINX_SYSLOG%;
2019-07-04 10:36:02 +00:00
%%IF %NGINX_MODULES_DIRS%
%%CSV %NGINX_MODULES_DIRS%
include %1%/*.conf;
%%ENDCSV
%%ELSE
# no nginx_modules_dirs specified
%%ENDIF
2017-02-01 08:38:03 +00:00
2017-02-28 17:20:32 +00:00
# TODO: notes for custom 403 error-handling pages:
# https://www.cyberciti.biz/faq/unix-linux-nginx-custom-error-403-page-configuration/
# https://nginx.org/en/docs/http/ngx_http_core_module.html#error_page
2017-02-01 08:38:03 +00:00
# performance
%%IF %IS_SOFTMAP%
worker_processes %SOFTMAP_NGINX_WORKERS%; # softmap
%%ELSE
worker_processes %NGINX_WORKERS%; # hardmap
%%ENDIF
worker_rlimit_nofile %NGINX_RLIM%;
events {
worker_connections %NGINX_RLIM%;
}
http {
# nginx fails without large enough buckets (sigh)
2017-03-16 10:39:58 +00:00
map_hash_bucket_size %NGINX_HASH_BUCKET_SIZE%;
server_names_hash_bucket_size %NGINX_HASH_BUCKET_SIZE%;
2017-02-01 08:38:03 +00:00
# dns for proxy (sigh)
2017-12-02 18:09:29 +00:00
resolver %NGINX_RESOLVER% valid=%NGINX_TIMEOUT%s;
2017-02-01 08:38:03 +00:00
resolver_timeout %NGINX_TIMEOUT%s;
2017-03-10 20:37:07 +00:00
# we walk a line between keeping it small and flooding resources...
proxy_buffering on;
2017-03-16 10:39:58 +00:00
# for initial; impacts SSL header
proxy_buffer_size %NGINX_BLOCK_SIZE%;
# for rest of response
proxy_buffers %NGINX_BLOCK_COUNT% %NGINX_BLOCK_SIZE%;
# how much can be busy sending to client?
proxy_busy_buffers_size %NGINX_BLOCK_BUSY_SIZE%;
2017-03-10 20:37:07 +00:00
2017-10-27 01:02:31 +00:00
# where to stash oversize requests?
client_body_temp_path /tmp/nginx-body-%PROJECT%;
client_max_body_size 4m;
2017-03-10 20:37:07 +00:00
# in case we want to start spooling responses locally
proxy_temp_path /tmp/nginx-proxy-%PROJECT%;
2017-03-16 10:39:58 +00:00
proxy_max_temp_file_size %NGINX_TMPFILE_SIZE%;
proxy_temp_file_write_size %NGINX_BLOCK_SIZE%;
2017-02-01 09:56:44 +00:00
2017-02-28 10:52:08 +00:00
%%IF %NGINX_CACHE_SECONDS%
# nginx caching static responses for %NGINX_CACHE_SECONDS% seconds
2017-02-28 09:54:33 +00:00
# - this is a lightweight cache to reduce "storms", hence the global
# approch of "cache everything for a small number of seconds"
# https://nginx.org/en/docs/http/ngx_http_proxy_module.html
proxy_cache_path /tmp/nginx-cache-%PROJECT% levels=1:2 keys_zone=%PROJECT%:%NGINX_CACHE_SIZE%;
proxy_cache %PROJECT%;
2017-03-10 20:37:07 +00:00
proxy_cache_min_uses %NGINX_CACHE_MIN_USES%;
2017-02-28 09:54:33 +00:00
proxy_cache_revalidate on;
proxy_cache_use_stale timeout updating;
2017-03-10 20:37:07 +00:00
proxy_cache_valid any %NGINX_CACHE_SECONDS%s; # "any" includes 404s, etc
2017-03-10 22:15:25 +00:00
# content-types to not cache
2017-03-10 20:37:07 +00:00
map $http_content_type $no_cache_content_type {
%%CSV %NO_CACHE_CONTENT_TYPE%
%1% 1;
%%ENDCSV
default 0;
}
# hosts not to cache
map $http_host $no_cache_host {
hostnames;
%%CSV %NO_CACHE_HOST%
%1% 1;
%%ENDCSV
default 0;
}
# so, should we skip caching this stuff for some reason?
proxy_no_cache $no_cache_content_type $no_cache_host;
proxy_cache_bypass $no_cache_content_type $no_cache_host;
2017-02-28 09:54:33 +00:00
%%ELSE
# nginx caching disabled
%%ENDIF
2017-02-01 08:38:03 +00:00
# logs
access_log %LOG_DIR%/nginx-access.log;
# global settings
server_tokens off;
# allow/deny (first wins)
allow "unix:";
deny all;
# rewrite these content types; text/html is implicit
subs_filter_types
application/javascript
application/json
application/x-javascript
text/css
text/javascript
text/xml
2017-12-10 23:13:01 +00:00
%%IF %EXTRA_SUBS_FILTER_TYPES%
# extra_subs_filter_types
%EXTRA_SUBS_FILTER_TYPES%
%%ELSE
# no extra_subs_filter_types
%%ENDIF
2017-02-01 08:38:03 +00:00
;
%%IF %PRESERVE_CSV%
# preserve subs (save-phase): 1=description,2=re,3=i_or_empty,4=replacement
%%CSV %PRESERVE_CSV%
# saving regexp '%2%' as '%1%' for replacement with '%4%' (%3%)
subs_filter
(%PRESERVE_PREAMBLE_RE%)(%2%)\\b
2018-01-31 21:42:50 +00:00
$1%PRESERVE_BEFORE%%1%%PRESERVE_AFTER%
g%3%r
;
%%ENDCSV
%%ELSE
# no preserve subs (save-phase)
%%ENDIF
%%BEGIN
2018-08-12 15:47:12 +00:00
# map: %DNS_DOMAIN% -> %ONION_ADDRESS%
subs_filter
\\b%DNS_DOMAIN_RE2%\\b
%ONION_ADDRESS%
gir
;
# map: %DNS_DOMAIN_RE% -> %ONION_ADDRESS_RE%
subs_filter
\\b%DNS_DOMAIN_RE4%\\b
%ONION_ADDRESS_RE2%
gir
;
%%IF %HARD_MODE% > 1
# hard2 map: %DNS_DOMAIN_RE2% -> %ONION_ADDRESS_RE2%
subs_filter
\\b%DNS_DOMAIN_RE8%\\b
%ONION_ADDRESS_RE4%
gir
;
# hard2 map: %DNS_DOMAIN_RE3% -> %ONION_ADDRESS_RE3%
subs_filter
\\b%DNS_DOMAIN_RE12%\\b
%ONION_ADDRESS_RE6%
gir
;
%%ENDIF
%%END
%%IF %FOREIGNMAP_CSV%
# foreignmap subs: 1=onion,2=re,3=re2,4=dns,5=re,6=re2
%%CSV %FOREIGNMAP_CSV%
# for %4% -> %1%
subs_filter
\\b%6%\\b
%1%
gir
;
%%ENDCSV
%%ELSE
# no foreignmap subs
%%ENDIF
%%IF %PRESERVE_CSV%
# preserve subs (restore-phase): 1=description,2=re,3=i_or_empty,4=replacement
%%CSV %PRESERVE_CSV%
# restoring '%1%' with '%4%'
subs_filter
2018-01-31 21:42:50 +00:00
%PRESERVE_BEFORE%%1%%PRESERVE_AFTER%
%4%
g
;
%%ENDCSV
%%ELSE
# no preserve subs (restore-phase)
%%ENDIF
2017-03-04 00:46:44 +00:00
2017-03-16 15:44:16 +00:00
# o_to_d_lookup -> if cannot remap, return input. note: old versions
# of lua-plugin cannot cope with code like o_to_d_mappings[o[1]]
2017-02-26 20:44:39 +00:00
# because of `long bracket syntax`; the `[o[` freaks it out.
# See: https://github.com/openresty/lua-nginx-module/issues/748
2017-02-07 10:05:53 +00:00
init_by_lua_block {
2017-03-20 19:26:41 +00:00
-- helper functions for elsewhere
2017-03-20 19:17:20 +00:00
-- builds data structures
-- http://www.lua.org/pil/11.5.html
2019-02-01 14:46:22 +00:00
TrueMap = function (list)
local set = {}
for _, l in ipairs(list) do set[l] = true end
return set
end
-- for compression sanity-testing
2019-02-01 14:46:22 +00:00
is_compression = TrueMap{ "br", "compress", "deflate", "gzip", }
-- for neutering entire uris to be filled with "<!--EOTK-->"
is_neutered_uri = TrueMap{
-- "/path/goes/here",
-- ...
}
-- for debug messages
Slog = function (s) -- in case of manual debugging
2017-03-20 14:28:56 +00:00
ngx.log(ngx.ERR, s)
2017-02-07 15:27:41 +00:00
return
end
-- for matching uri suffixes, etc
HasSuffix = function (s, x)
2017-03-20 18:40:09 +00:00
return string.sub(s, -string.len(x)) == x
end
2017-03-20 19:17:20 +00:00
-- mapping onions to dns
2017-03-16 15:44:16 +00:00
o_to_d_mappings = {}
2017-02-07 10:05:53 +00:00
%%BEGIN
2017-03-16 15:44:16 +00:00
o_to_d_mappings["%ONION_ADDRESS%"] = "%DNS_DOMAIN%"
2017-02-07 10:05:53 +00:00
%%END
2017-03-20 19:17:20 +00:00
o_to_d_lookup = function (m)
local k = m[1] -- see note above re: array syntax
2017-03-16 15:44:16 +00:00
return ( o_to_d_mappings[k] or k )
2017-02-07 10:05:53 +00:00
end
2017-03-16 15:44:16 +00:00
onion_to_dns = function (i)
if i == nil or i == "" then
return i
2017-02-07 10:05:53 +00:00
end
if (type(i) == "table") then
local j, k, result
result = {}
for j, k in ipairs(i) do
table.insert(result, onion_to_dns(k))
end
return result
end
local o, num, errs = ngx.re.gsub(i, "\\b([a-z2-7]{16}(?:[a-z2-7]{40})?\\.onion)\\b", o_to_d_lookup, "io")
2017-02-07 10:05:53 +00:00
return o
end
2017-03-20 19:17:20 +00:00
-- mapping dns to onions, for experimentation
d_to_o_mappings = {}
%%BEGIN
d_to_o_mappings["%DNS_DOMAIN%"] = "%ONION_ADDRESS%"
%%END
d_to_o_lookup = function (m)
local k = m[1] -- see note above re: array syntax
return ( d_to_o_mappings[k] or k )
end
2017-03-16 15:44:16 +00:00
dns_to_onion = function (i)
if i == nil or i == "" or i == "*" then
return i
end
if (type(i) == "table") then
local j, k, result
result = {}
for j, k in ipairs(i) do
table.insert(result, dns_to_onion(k))
end
return result
end
local num, errs
%%BEGIN
i, num, errs = ngx.re.gsub(i, "\\b%DNS_DOMAIN_RE2%\\b", "%ONION_ADDRESS%", "io")
%%END
return i
end
2017-02-07 10:05:53 +00:00
}
2017-03-16 18:06:39 +00:00
# filter the response headers en-route back to the user
header_filter_by_lua_block {
-- did upstream miss our request for 'identity'?
local ce = ngx.var.upstream_http_content_encoding or ""
if is_compression[ce] then
Slog("compressed data returned from origin: "..ce)
%%IF %DROP_UNREWRITABLE_CONTENT%
-- I'd prefer to do something nice like this:
-- ngx.status = 520
-- ngx.say("upstream content was compressed and therefore not rewritable")
-- ngx.exit(ngx.OK)
-- ...but say() needs an API that is not available in this phase:
-- https://github.com/openresty/lua-nginx-module#header_filter_by_lua
-- therefore:
ngx.exit(520) -- en.wikipedia.org/wiki/List_of_HTTP_status_codes
%%ELSE
-- UNREWRITABLE CONTENT NOT DROPPED, COMPRESSION ONION LEAKS POSSIBLE
%%ENDIF
end
local k, v
2017-03-16 18:06:39 +00:00
local origin_rewrites = {
"Access-Control-Allow-Origin",
%%IF %SUPPRESS_HEADER_CSP%
-- CSP headers are suppressed via SUPPRESS_HEADER_CSP
%%ELSE
"Content-Security-Policy",
"Content-Security-Policy-Report-Only",
%%ENDIF
"Link",
"Location",
"Set-Cookie"
}
2017-03-16 18:06:39 +00:00
local i, k
for i, k in ipairs(origin_rewrites) do
local v = ngx.header[k]
if v then
ngx.header[k] = dns_to_onion(v)
end
2017-03-16 18:06:39 +00:00
end
%%IF %DEBUG_CSP_SANDBOX%
local csp_sandbox =
"sandbox allow-forms allow-modals allow-orientation-lock"..
" allow-pointer-lock allow-popups allow-popups-to-escape-sandbox"..
" allow-presentation allow-same-origin allow-scripts;"
ngx.header["Content-Security-Policy"] = csp_sandbox
%%ELSE
-- no debug csp sandboxing
%%ENDIF
2017-03-16 18:06:39 +00:00
}
%%IF %DEBUG_TRAP%
# debug trap: filter the response body en-route back to the user
# hello! you have found the debug_trap code, which is not
# well-documented but adds repeated chunks of very slow code to
# capture/log any server-supplied content matching the supplied
# regular expressions, with up-to-64 chars of preceding and trailing
# context; use with caution, and probably do not put this into
# production, against ALL CONTENT INCLUDING (eg:) VIDEO; this code
# exists to help debug situations where some HTML or JS content is
# being passed through NGINX to the browser, but you don't know how
# it got there.
# config usage: set debug_trap foo\\.regex\\.tld [...]
2017-03-16 18:06:39 +00:00
body_filter_by_lua_block {
-- change auxh to Content-Encoding or Set-Cookie or whatever, if needed
local auxh = "Access-Control-Allow-Origin"
2017-03-20 14:28:56 +00:00
local i = ngx.arg[1]
local ct = ngx.header["Content-Type"] or ""
local aux = ngx.header[auxh] or ""
local uri = ngx.var.uri or ""
2017-03-20 14:28:56 +00:00
local iterator, err
%%CSV %DEBUG_TRAP%
iterator, err = ngx.re.gmatch(i, ".{0,64}(%1%).{0,64}", "io")
2017-03-20 14:28:56 +00:00
if not iterator then
ngx.log(ngx.ERR, "gmatch error: ", err)
else
while true do
local m, err = iterator()
if err then
ngx.log(ngx.ERR, "iterator error: ", err)
break
end
if not m then
break
end
local msg =
string.format("\n\tTRAP %s\n"..
"\tTYPE %s\n"..
"\tAUXH %s: %s\n"..
"\tCODE %s\n"..
"\tURI %s\n",
m[1],
ct,
auxh, aux,
m[0],
uri)
Slog(msg)
end -- while true
2017-03-20 14:28:56 +00:00
end -- if iterator
%%ENDCSV
-- search and replace; this would be faster to do with a linear CSV unwind,
-- as you could benefit from the regexp optimiser in-place + tune casei
local edit_map = {} -- example config follows:
-- edit_map["regular\\.expression"] = "replacement.string"
2019-02-04 18:45:59 +00:00
-- edit_map["insertion.preamble"] = "${0}inserted.code"
-- ...
for regexp, replacement in pairs(edit_map) do
local m, err = ngx.re.match(ngx.arg[1], regexp, "i")
if m then -- could just gsub it blindly but i want the logs
2019-02-04 18:45:59 +00:00
Slog("REPLACE: "..m[0].." BY "..replacement.."\n")
local i, num, errs = ngx.re.gsub(ngx.arg[1], regexp, replacement, "i")
ngx.arg[1] = i
end
end
-- neutering, if any (return "safe" content)
if is_neutered_uri[uri] then
Slog("neutering: "..uri)
ngx.arg[1] = "<!--EOTK-->"
end
2017-03-16 18:06:39 +00:00
}
%%ELSE
# no debug traps
%%ENDIF
2017-03-16 18:06:39 +00:00
2017-02-28 08:50:15 +00:00
%%IF %SUPPRESS_HEADER_CSP%
2017-02-07 11:00:56 +00:00
# csp suppression
2017-02-06 13:42:48 +00:00
proxy_hide_header "Content-Security-Policy";
2017-02-07 11:00:56 +00:00
proxy_hide_header "Content-Security-Policy-Report-Only";
%%ELSE
2017-03-06 13:50:39 +00:00
# csp not suppressed, will be rewritten instead, see below
2017-02-06 13:42:48 +00:00
%%ENDIF
2017-02-28 08:50:15 +00:00
%%IF %SUPPRESS_HEADER_HSTS%
2017-02-07 11:00:56 +00:00
# hsts suppression
proxy_hide_header "Strict-Transport-Security";
%%ELSE
# hsts not suppressed
%%ENDIF
2017-02-01 08:38:03 +00:00
2017-02-28 08:50:15 +00:00
%%IF %SUPPRESS_HEADER_HPKP%
# hpkp suppression
proxy_hide_header "Public-Key-Pins";
proxy_hide_header "Public-Key-Pins-Report-Only";
%%ELSE
# hpkp not suppressed
2017-02-07 11:00:56 +00:00
%%ENDIF
2017-02-01 08:38:03 +00:00
# global proxy settings
proxy_read_timeout %NGINX_TIMEOUT%;
proxy_connect_timeout %NGINX_TIMEOUT%;
# SSL config
ssl_certificate %SSL_DIR%/%CERT_PREFIX%.cert;
ssl_certificate_key %SSL_DIR%/%CERT_PREFIX%.pem;
2017-02-28 08:44:17 +00:00
ssl_buffer_size 4k;
2017-02-01 13:23:46 +00:00
#ssl_ciphers 'EECDH+CHACHA20:EECDH+AESGCM:EECDH+AES256'; ## LibreSSL, OpenSSL 1.1.0+
ssl_ciphers 'EECDH+AESGCM:EECDH+AES256'; ## OpenSSL 1.0.1% to 1.0.2%
2017-02-28 08:44:17 +00:00
ssl_ecdh_curve prime256v1;
#ssl_ecdh_curve secp384r1:prime256v1; ## NGINX nginx 1.11.0 and later
ssl_prefer_server_ciphers on;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
2017-02-01 13:23:46 +00:00
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
2017-02-01 13:23:46 +00:00
2017-02-28 08:44:17 +00:00
# websockets: on the basis of http_upgrade, set connection_upgrade:
# empty -> empty
# default -> "upgrade"
2017-02-01 08:38:03 +00:00
map $http_upgrade $connection_upgrade {
default "upgrade";
"" "";
}
%%BEGIN
2017-04-03 09:52:34 +00:00
%%IF %FORCE_HTTPS%
# FORCE_HTTPS is in use; set up separate server for port 80 & force redirects
server {
%%IF %IS_SOFTMAP%
%%RANGE I 1 %SOFTMAP_TOR_WORKERS%
listen unix:%PROJECT_DIR%/%TOR_WORKER_PREFIX%-%I%.d/port-80.sock;
%%ENDRANGE
%%ELSE
listen unix:%PROJECT_DIR%/%ONION_DIRNAME%/port-80.sock;
2017-04-03 09:52:34 +00:00
%%ENDIF
# subdomain regexp captures trailing dot, use carefully; does not need "~*"
# NB: this regexp should be kept in-sync with the other FORCE_HTTPS copy
server_name
%ONION_ADDRESS%
~^(?<servernamesubdomain>([-0-9a-z]+\\.)+)%ONION_ADDRESS_RE2%$
;
%%IF %SUPPRESS_TOR2WEB%
# suppress tor2web traffic; "let them use clearnet"
if ( $http_x_tor2web ) {
return 403 "%BLOCK_ERR%";
}
%%ELSE
# tor2web not suppressed
%%ENDIF
2017-04-03 09:52:34 +00:00
# tell the client to try again as HTTPS without ever leaving the onion
# use 307 / temporary redirect because your URIs may change in future
# use $host (not $server) to copy-over subdomains, etc, transparently
2017-12-10 10:27:31 +00:00
# SEND BACK ORIGINAL PARAMS, FIX THEM ONLY UPON FORWARD TO THE PROXY.
return 307 https://$host$request_uri;
2017-04-03 09:52:34 +00:00
}
%%ELSE
# FORCE_HTTPS is not in use, cleartext data may traverse the internet
%%ENDIF
2017-02-01 08:38:03 +00:00
# for %ONION_ADDRESS% -> %DNS_DOMAIN%
server {
%%IF %IS_SOFTMAP%
%%RANGE I 1 %SOFTMAP_TOR_WORKERS%
2017-04-03 09:52:34 +00:00
%%IF not %FORCE_HTTPS%
# FORCE_HTTPS is not in use, cleartext data may traverse the internet
2017-02-01 08:38:03 +00:00
listen unix:%PROJECT_DIR%/%TOR_WORKER_PREFIX%-%I%.d/port-80.sock;
2017-04-03 09:52:34 +00:00
%%ENDIF
listen unix:%PROJECT_DIR%/%TOR_WORKER_PREFIX%-%I%.d/port-443.sock ssl;
2017-02-01 08:38:03 +00:00
%%ENDRANGE
%%ELSE
# hardmap
# unix sockets; use <ONION_ADDRESS>.d as a naming convention
2017-04-03 09:52:34 +00:00
%%IF not %FORCE_HTTPS%
# FORCE_HTTPS is not in use, cleartext data may traverse the internet
listen unix:%PROJECT_DIR%/%ONION_DIRNAME%/port-80.sock;
2017-04-03 09:52:34 +00:00
%%ENDIF
listen unix:%PROJECT_DIR%/%ONION_DIRNAME%/port-443.sock ssl;
2017-02-01 08:38:03 +00:00
%%ENDIF
# subdomain regexp captures trailing dot, use carefully; does not need "~*"
2017-04-03 09:52:34 +00:00
# NB: this regexp should be kept in-sync with the other FORCE_HTTPS copy
2017-02-01 08:38:03 +00:00
server_name
%ONION_ADDRESS%
~^(?<servernamesubdomain>([-0-9a-z]+\\.)+)%ONION_ADDRESS_RE2%$
2017-02-01 08:38:03 +00:00
;
2017-12-02 13:57:51 +00:00
%%INCLUDE templates.d/nginx-generated-blocks.conf
2017-10-25 16:56:33 +00:00
%%IF %COOKIE_LOCK%
2017-12-02 13:57:51 +00:00
# if we are visiting the magic path, open the cookie-lock
2017-10-25 16:56:33 +00:00
location "%COOKIE_LOCK%" {
add_header Set-Cookie "eotk_lock=%COOKIE_LOCK%;Domain=.%ONION_ADDRESS%;Path=/;Max-Age=604800";
return 200 "OK";
}
%%ELSE
# no cookie_lock cookie setting
%%ENDIF
2017-04-14 10:12:58 +00:00
%%IF %NGINX_HELLO_ONION%
# for test & to help SSL certificate acceptance
location ~* ^/hello[-_]onion/?$ {
return 200 "Hello, Onion User!";
}
%%ELSE
# no "hello-onion" endpoint
%%ENDIF
2017-10-18 19:08:18 +00:00
%%IF %HARDCODED_ENDPOINT_CSV%
# hardcoded_endpoints: 1=path_re,2=response
%%CSV %HARDCODED_ENDPOINT_CSV%
2017-12-02 13:57:51 +00:00
location "%1%" {
2017-10-18 19:08:18 +00:00
return 200 %2%;
}
%%ENDCSV
%%ELSE
# no hardcoded_endpoints
%%ENDIF
2018-02-16 16:34:35 +00:00
%%IF exists templates.d/nginx-site-%ONION_ADDRESS%.conf
# A note on spliced content; any file that you splice into the
2018-02-16 16:39:04 +00:00
# final configuration MUST NOT contain any EOTK-variables, nor any
# EOTK template directives, simply because these will all be
# ignored and may cause NGINX syntax errors. The `splice` hack is
# meant for small bits of production-ready NGINX config that MUST
# occur only in the definition of one particular onion out of
2018-02-16 16:34:35 +00:00
# (presumably several) in the eventual configuration file; most
2018-02-16 16:39:04 +00:00
# likely this will be something like: per-onion access control.
2018-02-16 16:34:35 +00:00
# The SPLICE directive is executed at template-generation time,
# and is subject to flow-control like IF/ELSE/ENDIF; by contrast
# the INCLUDE directive is executed at template-load time, and is
# invisible to the IF/ELSE/ENDIF flow control; in this sense the
# INCLUDE directive is more like "#include" from C/C++
# ---- BEGIN SPLICE: templates.d/nginx-site-%ONION_ADDRESS%.conf ----
%%SPLICE templates.d/nginx-site-%ONION_ADDRESS%.conf
# ---- END SPLICE: templates.d/nginx-site-%ONION_ADDRESS%.conf ----
%%ELSE
# splice: no file: templates.d/nginx-site-%ONION_ADDRESS%.conf
%%ENDIF
2017-02-01 08:38:03 +00:00
# for traffic
location / {
%%INCLUDE templates.d/nginx-generated-checks.conf
2017-11-30 13:02:35 +00:00
2017-10-25 16:56:33 +00:00
%%IF %COOKIE_LOCK%
# check for cookie-lock
if ( $cookie_eotk_lock != "%COOKIE_LOCK%" ) { %NGINX_ACTION_ABORT%; }
2017-10-25 16:56:33 +00:00
%%ELSE
2017-12-02 13:57:51 +00:00
# no cookie-lock checks
2017-10-25 16:56:33 +00:00
%%ENDIF
2017-12-10 10:27:31 +00:00
# deonionify the request_uri for forwarding (both path and args)
set_by_lua_block $request_uri2 {
local old = ngx.var.request_uri or ""
2017-12-10 10:27:31 +00:00
-- onion_to_dns is potentially expensive at scale, so do a cheap test
local m, err = ngx.re.match(old, "\\b[a-z2-7]{16}(?:[a-z2-7]{40})?\\.onion\\b", "o")
2017-12-10 10:27:31 +00:00
if not m then -- nothing to attempt to rewrite, quick return
return old
end
return onion_to_dns(old)
}
# note use of both $scheme and the deonionified uri (both path and args)
set $new_url "$scheme://${servernamesubdomain}%DNS_DOMAIN%$request_uri2";
proxy_pass $new_url;
2017-02-01 08:38:03 +00:00
proxy_http_version 1.1;
2017-02-28 19:13:50 +00:00
# a note on proxy_set_header, add_header, similar methods, etc;
# if you override *any* header then you will lose the other
# headers inherited from the parent contexts:
# https://blog.g3rt.nl/nginx-add_header-pitfall.html
2017-10-18 16:53:37 +00:00
proxy_set_header X-From-Onion %X_FROM_ONION_VALUE%;
proxy_set_header Host "${servernamesubdomain}%DNS_DOMAIN%";
2017-03-04 15:34:10 +00:00
proxy_set_header Accept-Encoding "identity";
2017-02-01 08:38:03 +00:00
proxy_set_header Connection $connection_upgrade; # SSL
proxy_set_header Upgrade $http_upgrade; # SSL
proxy_ssl_server_name on; # SSL
2017-02-07 10:05:53 +00:00
# rewrite request referer
2017-03-16 18:06:39 +00:00
set_by_lua_block $referer2 { return onion_to_dns(ngx.var.http_referer) }
2017-02-07 10:05:53 +00:00
proxy_set_header Referer $referer2;
# rewrite request origin
2017-03-16 18:06:39 +00:00
set_by_lua_block $origin2 { return onion_to_dns(ngx.var.http_origin) }
2017-02-07 10:05:53 +00:00
proxy_set_header Origin $origin2;
# rewrite request cookies
set_by_lua_block $cookie2 { return onion_to_dns(ngx.var.http_cookie) }
proxy_set_header Cookie $cookie2;
%%IF %SUPPRESS_METHODS_EXCEPT_GET%
# suppress non-GET methods (e.g.: POST)
limit_except GET {
deny all;
}
%%ELSE
# non-GET methods (e.g.: POST) are not suppressed
%%ENDIF
2017-02-01 08:38:03 +00:00
}
}
%%END
%%IF %DEBUG_ORIGIN_HEADERS%
more_set_headers "EOTK-Upstream: ct=$upstream_http_content_type;ce=$upstream_http_content_encoding"
%%ELSE
# origin headers not debugged
%%ENDIF
2017-02-01 08:38:03 +00:00
# header purge
more_clear_headers "Age";
more_clear_headers "Server";
more_clear_headers "Via";
more_clear_headers "X-From-Nginx";
more_clear_headers "X-NA";
more_clear_headers "X-Powered-By";
more_clear_headers "X-Request-Id";
more_clear_headers "X-Runtime";
more_clear_headers "X-Varnish";
}