-
I'm experiencing random denial of requests on my Nginx server, despite having a standard configuration (at least I think so). I first suspected my firewall to be misconfigured, so I turned it off. The problem persists though. I'm testing with the Tor browser in order to get new IPs. I believe I'm using the standard configuration, except that gzip is turned on. Can someone please confirm that this is the case indeed? Is there anything else I'm missing? Error in the nginx log: docker compose exec nginx-proxy nginx -T: nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful
# configuration file /etc/nginx/nginx.conf:
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log notice;
pid /var/run/nginx.pid;
events {
worker_connections 10240;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}
daemon off;
# configuration file /etc/nginx/mime.types:
types {
text/html html htm shtml;
text/css css;
text/xml xml;
image/gif gif;
image/jpeg jpeg jpg;
application/javascript js;
application/atom+xml atom;
application/rss+xml rss;
text/mathml mml;
text/plain txt;
text/vnd.sun.j2me.app-descriptor jad;
text/vnd.wap.wml wml;
text/x-component htc;
image/avif avif;
image/png png;
image/svg+xml svg svgz;
image/tiff tif tiff;
image/vnd.wap.wbmp wbmp;
image/webp webp;
image/x-icon ico;
image/x-jng jng;
image/x-ms-bmp bmp;
font/woff woff;
font/woff2 woff2;
application/java-archive jar war ear;
application/json json;
application/mac-binhex40 hqx;
application/msword doc;
application/pdf pdf;
application/postscript ps eps ai;
application/rtf rtf;
application/vnd.apple.mpegurl m3u8;
application/vnd.google-earth.kml+xml kml;
application/vnd.google-earth.kmz kmz;
application/vnd.ms-excel xls;
application/vnd.ms-fontobject eot;
application/vnd.ms-powerpoint ppt;
application/vnd.oasis.opendocument.graphics odg;
application/vnd.oasis.opendocument.presentation odp;
application/vnd.oasis.opendocument.spreadsheet ods;
application/vnd.oasis.opendocument.text odt;
application/vnd.openxmlformats-officedocument.presentationml.presentation
pptx;
application/vnd.openxmlformats-officedocument.spreadsheetml.sheet
xlsx;
application/vnd.openxmlformats-officedocument.wordprocessingml.document
docx;
application/vnd.wap.wmlc wmlc;
application/wasm wasm;
application/x-7z-compressed 7z;
application/x-cocoa cco;
application/x-java-archive-diff jardiff;
application/x-java-jnlp-file jnlp;
application/x-makeself run;
application/x-perl pl pm;
application/x-pilot prc pdb;
application/x-rar-compressed rar;
application/x-redhat-package-manager rpm;
application/x-sea sea;
application/x-shockwave-flash swf;
application/x-stuffit sit;
application/x-tcl tcl tk;
application/x-x509-ca-cert der pem crt;
application/x-xpinstall xpi;
application/xhtml+xml xhtml;
application/xspf+xml xspf;
application/zip zip;
application/octet-stream bin exe dll;
application/octet-stream deb;
application/octet-stream dmg;
application/octet-stream iso img;
application/octet-stream msi msp msm;
audio/midi mid midi kar;
audio/mpeg mp3;
audio/ogg ogg;
audio/x-m4a m4a;
audio/x-realaudio ra;
video/3gpp 3gpp 3gp;
video/mp2t ts;
video/mp4 mp4;
video/mpeg mpeg mpg;
video/quicktime mov;
video/webm webm;
video/x-flv flv;
video/x-m4v m4v;
video/x-mng mng;
video/x-ms-asf asx asf;
video/x-ms-wmv wmv;
video/x-msvideo avi;
}
# configuration file /etc/nginx/conf.d/default.conf:
# nginx-proxy version : 1.3.1-2-gffc8b27
# Networks available to the container running docker-gen (which are assumed to
# match the networks available to the container running nginx):
# root_fullstack
# If we receive X-Forwarded-Proto, pass it through; otherwise, pass along the
# scheme used to connect to this server
map $http_x_forwarded_proto $proxy_x_forwarded_proto {
default $scheme;
'' $scheme;
}
map $http_x_forwarded_host $proxy_x_forwarded_host {
default $http_host;
'' $http_host;
}
# If we receive X-Forwarded-Port, pass it through; otherwise, pass along the
# server port the client connected to
map $http_x_forwarded_port $proxy_x_forwarded_port {
default $server_port;
'' $server_port;
}
# If the request from the downstream client has an "Upgrade:" header (set to any
# non-empty value), pass "Connection: upgrade" to the upstream (backend) server.
# Otherwise, the value for the "Connection" header depends on whether the user
# has enabled keepalive to the upstream server.
map $http_upgrade $proxy_connection {
default upgrade;
'' $proxy_connection_noupgrade;
}
map $upstream_keepalive $proxy_connection_noupgrade {
# Preserve nginx's default behavior (send "Connection: close").
default close;
# Use an empty string to cancel nginx's default behavior.
true '';
}
# Abuse the map directive (see <https://stackoverflow.com/q/14433309>) to ensure
# that $upstream_keepalive is always defined. This is necessary because:
# - The $proxy_connection variable is indirectly derived from
# $upstream_keepalive, so $upstream_keepalive must be defined whenever
# $proxy_connection is resolved.
# - The $proxy_connection variable is used in a proxy_set_header directive in
# the http block, so it is always fully resolved for every request -- even
# those where proxy_pass is not used (e.g., unknown virtual host).
map "" $upstream_keepalive {
# The value here should not matter because it should always be overridden in
# a location block (see the "location" template) for all requests where the
# value actually matters.
default false;
}
# Apply fix for very long server names
server_names_hash_bucket_size 128;
# Default dhparam
ssl_dhparam /etc/nginx/dhparam/dhparam.pem;
# Set appropriate X-Forwarded-Ssl header based on $proxy_x_forwarded_proto
map $proxy_x_forwarded_proto $proxy_x_forwarded_ssl {
default off;
https on;
}
gzip_types text/plain text/css application/javascript application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
log_format vhost '$host $remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" "$upstream_addr"';
access_log off;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384';
ssl_prefer_server_ciphers off;
error_log /dev/stderr;
resolver 127.0.0.11;
# HTTP 1.1 support
proxy_http_version 1.1;
proxy_buffering off;
proxy_set_header Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $proxy_connection;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $proxy_x_forwarded_host;
proxy_set_header X-Forwarded-Proto $proxy_x_forwarded_proto;
proxy_set_header X-Forwarded-Ssl $proxy_x_forwarded_ssl;
proxy_set_header X-Forwarded-Port $proxy_x_forwarded_port;
proxy_set_header X-Original-URI $request_uri;
# Mitigate httpoxy attack (see README for details)
proxy_set_header Proxy "";
server {
server_name _; # This is just an invalid value which will never trigger on a real hostname.
server_tokens off;
listen 80;
listen 443 ssl http2;
ssl_session_cache shared:SSL:50m;
ssl_session_tickets off;
access_log /var/log/nginx/access.log vhost;
ssl_certificate /etc/nginx/certs/default.crt;
ssl_certificate_key /etc/nginx/certs/default.key;
return 503;
}
# api.domain1.com/
upstream api.domain1.com-42099b4af021e53fd8fd4e056c2568d7c2e3ffa8 {
# Container: backend
# networks:
# root_fullstack (reachable)
# IP address: MY_IP
# exposed ports: 8000/tcp
# default port: 8000
# using port: 8000
# /!\ WARNING: Virtual port published on host. Clients
# might be able to bypass nginx-proxy and
# access the container's server directly.
server MY_IP:8000;
}
server {
server_name api.domain1.com;
listen 80 ;
access_log /var/log/nginx/access.log vhost;
# Do not HTTPS redirect Let's Encrypt ACME challenge
location ^~ /.well-known/acme-challenge/ {
auth_basic off;
auth_request off;
allow all;
root /usr/share/nginx/html;
try_files $uri =404;
break;
}
location / {
return 301 https://$host$request_uri;
}
}
server {
server_name api.domain1.com;
access_log /var/log/nginx/access.log vhost;
listen 443 ssl http2 ;
ssl_session_timeout 5m;
ssl_session_cache shared:SSL:50m;
ssl_session_tickets off;
ssl_certificate /etc/nginx/certs/api.domain1.com.crt;
ssl_certificate_key /etc/nginx/certs/api.domain1.com.key;
ssl_dhparam /etc/nginx/certs/api.domain1.com.dhparam.pem;
ssl_stapling on;
ssl_stapling_verify on;
ssl_trusted_certificate /etc/nginx/certs/api.domain1.com.chain.pem;
set $sts_header "";
if ($https) {
set $sts_header "max-age=31536000";
}
add_header Strict-Transport-Security $sts_header always;
include /etc/nginx/vhost.d/default;
location / {
# Only allow traffic from internal clients
include /etc/nginx/network_internal.conf;
proxy_pass http://api.domain1.com-42099b4af021e53fd8fd4e056c2568d7c2e3ffa8;
set $upstream_keepalive false;
}
}
# api.domain2.com/
upstream api.domain2.com-42099b4af021e53fd8fd4e056c2568d7c2e3ffa8 {
# Container: backend
# networks:
# root_fullstack (reachable)
# IP address: MY_IP
# exposed ports: 8000/tcp
# default port: 8000
# using port: 8000
# /!\ WARNING: Virtual port published on host. Clients
# might be able to bypass nginx-proxy and
# access the container's server directly.
server MY_IP:8000;
}
server {
server_name api.domain2.com;
listen 80 ;
access_log /var/log/nginx/access.log vhost;
# Do not HTTPS redirect Let's Encrypt ACME challenge
location ^~ /.well-known/acme-challenge/ {
auth_basic off;
auth_request off;
allow all;
root /usr/share/nginx/html;
try_files $uri =404;
break;
}
location / {
return 301 https://$host$request_uri;
}
}
server {
server_name api.domain2.com;
access_log /var/log/nginx/access.log vhost;
listen 443 ssl http2 ;
ssl_session_timeout 5m;
ssl_session_cache shared:SSL:50m;
ssl_session_tickets off;
ssl_certificate /etc/nginx/certs/api.domain2.com.crt;
ssl_certificate_key /etc/nginx/certs/api.domain2.com.key;
ssl_dhparam /etc/nginx/certs/api.domain2.com.dhparam.pem;
ssl_stapling on;
ssl_stapling_verify on;
ssl_trusted_certificate /etc/nginx/certs/api.domain2.com.chain.pem;
set $sts_header "";
if ($https) {
set $sts_header "max-age=31536000";
}
add_header Strict-Transport-Security $sts_header always;
include /etc/nginx/vhost.d/default;
location / {
# Only allow traffic from internal clients
include /etc/nginx/network_internal.conf;
proxy_pass http://api.domain2.com-42099b4af021e53fd8fd4e056c2568d7c2e3ffa8;
set $upstream_keepalive false;
}
}
# domain1.com/
upstream domain1.com-42099b4af021e53fd8fd4e056c2568d7c2e3ffa8 {
# Container: frontend-domain1
# networks:
# root_fullstack (reachable)
# IP address: 172.26.0.2
# exposed ports: 4200/tcp
# default port: 4200
# using port: 4200
# /!\ WARNING: Virtual port published on host. Clients
# might be able to bypass nginx-proxy and
# access the container's server directly.
server 172.26.0.2:4200;
}
server {
server_name domain1.com;
listen 80 ;
access_log /var/log/nginx/access.log vhost;
# Do not HTTPS redirect Let's Encrypt ACME challenge
location ^~ /.well-known/acme-challenge/ {
auth_basic off;
auth_request off;
allow all;
root /usr/share/nginx/html;
try_files $uri =404;
break;
}
location / {
return 301 https://$host$request_uri;
}
}
server {
server_name domain1.com;
access_log /var/log/nginx/access.log vhost;
listen 443 ssl http2 ;
ssl_session_timeout 5m;
ssl_session_cache shared:SSL:50m;
ssl_session_tickets off;
ssl_certificate /etc/nginx/certs/domain1.com.crt;
ssl_certificate_key /etc/nginx/certs/domain1.com.key;
ssl_dhparam /etc/nginx/certs/domain1.com.dhparam.pem;
ssl_stapling on;
ssl_stapling_verify on;
ssl_trusted_certificate /etc/nginx/certs/domain1.com.chain.pem;
set $sts_header "";
if ($https) {
set $sts_header "max-age=31536000";
}
add_header Strict-Transport-Security $sts_header always;
include /etc/nginx/vhost.d/default;
location / {
# Only allow traffic from internal clients
include /etc/nginx/network_internal.conf;
proxy_pass http://domain1.com-42099b4af021e53fd8fd4e056c2568d7c2e3ffa8;
set $upstream_keepalive false;
}
}
# domain2.com/
upstream domain2.com-42099b4af021e53fd8fd4e056c2568d7c2e3ffa8 {
# Container: frontend-domain2
# networks:
# root_fullstack (reachable)
# IP address: 172.26.0.4
# exposed ports: 4210/tcp
# default port: 4210
# using port: 4210
# /!\ WARNING: Virtual port published on host. Clients
# might be able to bypass nginx-proxy and
# access the container's server directly.
server 172.26.0.4:4210;
}
server {
server_name domain2.com;
listen 80 ;
access_log /var/log/nginx/access.log vhost;
# Do not HTTPS redirect Let's Encrypt ACME challenge
location ^~ /.well-known/acme-challenge/ {
auth_basic off;
auth_request off;
allow all;
root /usr/share/nginx/html;
try_files $uri =404;
break;
}
location / {
return 301 https://$host$request_uri;
}
}
server {
server_name domain2.com;
access_log /var/log/nginx/access.log vhost;
listen 443 ssl http2 ;
ssl_session_timeout 5m;
ssl_session_cache shared:SSL:50m;
ssl_session_tickets off;
ssl_certificate /etc/nginx/certs/domain2.com.crt;
ssl_certificate_key /etc/nginx/certs/domain2.com.key;
ssl_dhparam /etc/nginx/certs/domain2.com.dhparam.pem;
ssl_stapling on;
ssl_stapling_verify on;
ssl_trusted_certificate /etc/nginx/certs/domain2.com.chain.pem;
set $sts_header "";
if ($https) {
set $sts_header "max-age=31536000";
}
add_header Strict-Transport-Security $sts_header always;
include /etc/nginx/vhost.d/default;
location / {
# Only allow traffic from internal clients
include /etc/nginx/network_internal.conf;
proxy_pass http://domain2.com-42099b4af021e53fd8fd4e056c2568d7c2e3ffa8;
set $upstream_keepalive false;
}
}
# www.domain1.com/
upstream www.domain1.com-42099b4af021e53fd8fd4e056c2568d7c2e3ffa8 {
# Container: frontend-domain1
# networks:
# root_fullstack (reachable)
# IP address: 172.26.0.2
# exposed ports: 4200/tcp
# default port: 4200
# using port: 4200
# /!\ WARNING: Virtual port published on host. Clients
# might be able to bypass nginx-proxy and
# access the container's server directly.
server 172.26.0.2:4200;
}
server {
server_name www.domain1.com;
listen 80 ;
access_log /var/log/nginx/access.log vhost;
# Do not HTTPS redirect Let's Encrypt ACME challenge
location ^~ /.well-known/acme-challenge/ {
auth_basic off;
auth_request off;
allow all;
root /usr/share/nginx/html;
try_files $uri =404;
break;
}
location / {
return 301 https://$host$request_uri;
}
}
server {
server_name www.domain1.com;
access_log /var/log/nginx/access.log vhost;
listen 443 ssl http2 ;
ssl_session_timeout 5m;
ssl_session_cache shared:SSL:50m;
ssl_session_tickets off;
ssl_certificate /etc/nginx/certs/www.domain1.com.crt;
ssl_certificate_key /etc/nginx/certs/www.domain1.com.key;
ssl_dhparam /etc/nginx/certs/www.domain1.com.dhparam.pem;
ssl_stapling on;
ssl_stapling_verify on;
ssl_trusted_certificate /etc/nginx/certs/www.domain1.com.chain.pem;
set $sts_header "";
if ($https) {
set $sts_header "max-age=31536000";
}
add_header Strict-Transport-Security $sts_header always;
include /etc/nginx/vhost.d/default;
location / {
# Only allow traffic from internal clients
include /etc/nginx/network_internal.conf;
proxy_pass http://www.domain1.com-42099b4af021e53fd8fd4e056c2568d7c2e3ffa8;
set $upstream_keepalive false;
}
}
# www.domain2.com/
upstream www.domain2.com-42099b4af021e53fd8fd4e056c2568d7c2e3ffa8 {
# Container: frontend-domain2
# networks:
# root_fullstack (reachable)
# IP address: 172.26.0.4
# exposed ports: 4210/tcp
# default port: 4210
# using port: 4210
# /!\ WARNING: Virtual port published on host. Clients
# might be able to bypass nginx-proxy and
# access the container's server directly.
server 172.26.0.4:4210;
}
server {
server_name www.domain2.com;
listen 80 ;
access_log /var/log/nginx/access.log vhost;
# Do not HTTPS redirect Let's Encrypt ACME challenge
location ^~ /.well-known/acme-challenge/ {
auth_basic off;
auth_request off;
allow all;
root /usr/share/nginx/html;
try_files $uri =404;
break;
}
location / {
return 301 https://$host$request_uri;
}
}
server {
server_name www.domain2.com;
access_log /var/log/nginx/access.log vhost;
listen 443 ssl http2 ;
ssl_session_timeout 5m;
ssl_session_cache shared:SSL:50m;
ssl_session_tickets off;
ssl_certificate /etc/nginx/certs/www.domain2.com.crt;
ssl_certificate_key /etc/nginx/certs/www.domain2.com.key;
ssl_dhparam /etc/nginx/certs/www.domain2.com.dhparam.pem;
ssl_stapling on;
ssl_stapling_verify on;
ssl_trusted_certificate /etc/nginx/certs/www.domain2.com.chain.pem;
set $sts_header "";
if ($https) {
set $sts_header "max-age=31536000";
}
add_header Strict-Transport-Security $sts_header always;
include /etc/nginx/vhost.d/default;
location / {
# Only allow traffic from internal clients
include /etc/nginx/network_internal.conf;
proxy_pass http://www.domain2.com-42099b4af021e53fd8fd4e056c2568d7c2e3ffa8;
set $upstream_keepalive false;
}
}
# configuration file /etc/nginx/vhost.d/default:
## Start of configuration add by letsencrypt container
location ^~ /.well-known/acme-challenge/ {
auth_basic off;
auth_request off;
allow all;
root /usr/share/nginx/html;
try_files $uri =404;
break;
}
## End of configuration add by letsencrypt container
# configuration file /etc/nginx/network_internal.conf:
# Only allow traffic from internal clients
allow 127.0.0.0/8;
allow 10.0.0.0/8;
allow 192.168.0.0/16;
allow 172.16.0.0/12;
allow fc00::/7; # IPv6 local address range
deny all;
# configuration file /etc/nginx/conf.d/gzip.conf:
# ----------------------------------------------------------------------
# | Compression |
# ----------------------------------------------------------------------
# https://nginx.org/en/docs/http/ngx_http_gzip_module.html
# Enable gzip compression.
# Default: off
gzip on;
# Compression level (1-9).
# 5 is a perfect compromise between size and CPU usage, offering about 75%
# reduction for most ASCII files (almost identical to level 9).
# Default: 1
gzip_comp_level 5;
# Don't compress anything that's already small and unlikely to shrink much if at
# all (the default is 20 bytes, which is bad as that usually leads to larger
# files after gzipping).
# Default: 20
gzip_min_length 256;
# Compress data even for clients that are connecting to us via proxies,
# identified by the "Via" header (required for CloudFront).
# Default: off
gzip_proxied any;
# Tell proxies to cache both the gzipped and regular version of a resource
# whenever the client's Accept-Encoding capabilities header varies;
# Avoids the issue where a non-gzip capable client (which is extremely rare
# today) would display gibberish if their proxy gave them the gzipped version.
# Default: off
gzip_vary on;
# Compress all output labeled with one of the following MIME-types.
# `text/html` is always compressed by gzip module.
# Default: text/html
gzip_types
application/atom+xml
application/geo+json
application/javascript
application/x-javascript
application/json
application/ld+json
application/manifest+json
application/rdf+xml
application/rss+xml
application/vnd.ms-fontobject
application/wasm
application/x-web-app-manifest+json
application/xhtml+xml
application/xml
font/eot
font/otf
font/ttf
image/bmp
image/svg+xml
image/vnd.microsoft.icon
image/x-icon
text/cache-manifest
text/calendar
text/css
text/javascript
text/markdown
text/plain
text/xml
text/vcard
text/vnd.rim.location.xloc
text/vtt
text/x-component
text/x-cross-domain-policy; docker-compose.yml: version: "3"
services:
backend:
container_name: backend
depends_on:
database:
condition: service_healthy
environment:
- ACCESS_TOKEN_SIGNATURE_KEY_DOMAIN1=${ACCESS_TOKEN_SIGNATURE_KEY_DOMAIN1}
- ACCESS_TOKEN_SIGNATURE_KEY_DOMAIN2=${ACCESS_TOKEN_SIGNATURE_KEY_DOMAIN2}
- DOMAIN_API_DOMAIN1=${DOMAIN_API_DOMAIN1}
- DOMAIN_API_DOMAIN2=${DOMAIN_API_DOMAIN2}
- DOMAIN_FRONTEND_DOMAIN1=${DOMAIN_FRONTEND_DOMAIN1}
- DOMAIN_FRONTEND_DOMAIN2=${DOMAIN_FRONTEND_DOMAIN2}
- DSN=${DSN}
- LETSENCRYPT_HOST=api.domain1.com,api.domain2.com
- NETWORK_ACCESS=internal
- PORT=${PORT}
- REFRESH_TOKEN_SIGNATURE_KEY_DOMAIN1=${REFRESH_TOKEN_SIGNATURE_KEY_DOMAIN1}
- REFRESH_TOKEN_SIGNATURE_KEY_DOMAIN2=${REFRESH_TOKEN_SIGNATURE_KEY_DOMAIN2}
- VIRTUAL_HOST=api.domain1.com,api.domain2.com
- VIRTUAL_PATH=/
image: registry.gitlab.com/user/project/backend
networks:
- fullstack
ports:
- 8000:8000
restart: unless-stopped
volumes:
- ./ads:/go/ads
- ./uploads:/go/uploads
database:
container_name: database
environment:
- NETWORK_ACCESS=internal
- POSTGRES_DB=${DB_NAME}
- POSTGRES_PASSWORD=${DB_PASSWORD}
- POSTGRES_USER=${DB_USER}
healthcheck:
test: ["CMD-SHELL", "sh -c 'pg_isready -U ${DB_USER} -d ${DB_NAME}'"]
interval: 10s
timeout: 3s
retries: 3
image: registry.gitlab.com/user/project/database
networks:
- fullstack
ports:
- "5432:5432"
restart: always
shm_size: 1gb
user: postgres
volumes:
- database:/var/lib/postgresql/data
database-migration:
container_name: database-migration
depends_on:
database:
condition: service_healthy
environment:
- DSN=${DSN}
- NETWORK_ACCESS=internal
image: registry.gitlab.com/user/project/database_migration
networks:
- fullstack
frontend-domain1:
container_name: frontend-domain1
environment:
- LETSENCRYPT_HOST=domain1.com,www.domain1.com
- NETWORK_ACCESS=internal
- VIRTUAL_HOST=domain1.com,www.domain1.com
- VIRTUAL_PATH=/
image: registry.gitlab.com/user/project/frontend_domain1
networks:
- fullstack
ports:
- 4200:4200
frontend-domain2:
container_name: frontend-domain2
environment:
- LETSENCRYPT_HOST=domain2.com,www.domain2.com
- NETWORK_ACCESS=internal
- VIRTUAL_HOST=domain2.com,www.domain2.com
- VIRTUAL_PATH=/
image: registry.gitlab.com/user/project/frontend_domain2
networks:
- fullstack
ports:
- 4210:4210
volumes:
- ./ads:/public/dist/public/browser/de-DE/ads
- ./ads:/public/dist/public/browser/en-US/ads
nginx-proxy-certificates:
container_name: nginx-proxy-certificates
environment:
- DEFAULT_EMAIL=user@domain2.com
- NETWORK_ACCESS=internal
image: nginxproxy/acme-companion
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- acme:/etc/acme.sh
volumes_from:
- nginx-proxy
nginx-proxy:
container_name: nginx-proxy
environment:
- TRUST_DOWNSTREAM_PROXY=false
image: nginxproxy/nginx-proxy:alpine
networks:
- fullstack
ports:
- "80:80"
- "443:443"
volumes:
- ./auth:/etc/nginx/htpasswd
- /var/run/docker.sock:/tmp/docker.sock:ro
- ./gzip.conf:/etc/nginx/conf.d/gzip.conf:ro
- certs:/etc/nginx/certs
- vhost:/etc/nginx/vhost.d
- html:/usr/share/nginx/html
networks:
fullstack:
driver: bridge
volumes:
acme:
certs:
vhost:
html:
database:
driver: local
|
Beta Was this translation helpful? Give feedback.
Replies: 1 comment
-
The problem was that I restricted the access to internal for my frontend and backend. I still do not know why I occasionally had access to it though. |
Beta Was this translation helpful? Give feedback.
The problem was that I restricted the access to internal for my frontend and backend. I still do not know why I occasionally had access to it though.