Folder reorg if prep for using git runner
This commit is contained in:
parent
304e85e450
commit
1a3749112d
102 changed files with 833 additions and 244 deletions
60
swarm/Calibre-web.yaml
Normal file
60
swarm/Calibre-web.yaml
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
services:
|
||||
calibre-web-automated:
|
||||
image: crocodilestick/calibre-web-automated:latest
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
environment:
|
||||
# Only change these if you know what you're doing
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
# Edit to match your current timezone https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
|
||||
TZ: America/Chicago
|
||||
|
||||
# Match your standard UID:GID execution model
|
||||
#user: "1001:998"
|
||||
|
||||
volumes:
|
||||
# - /data/nfs/Baxter/Data/media:/data:shared
|
||||
# CW users migrating should stop their existing CW instance, make a copy of the config folder, and bind that here to carry over all of their user settings ect.
|
||||
- /DockerVol/Calibre-web/Config:/config
|
||||
# This is an ingest dir, NOT a library one. Anything added here will be automatically added to your library according to the settings you have configured in CWA Settings page. All files placed here are REMOVED AFTER PROCESSING
|
||||
# - /path/to/the/folder/you/want/to/use/for/book/ingest:/cwa-book-ingest
|
||||
# If you don't have an existing library, CWA will automatically create one at the bind provided here
|
||||
# - /path/to/your/calibre/library:/calibre-library
|
||||
# - /data/nfs/Baxter/Data/media:/data:shared
|
||||
- /data/nfs/Baxter/Data/media/books/library/Netgrimoire:/calibre-library:shared
|
||||
|
||||
ports:
|
||||
# Change the first number to change the port you want to access the Web UI, not the second
|
||||
- target: 8083
|
||||
published: 8079
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
|
||||
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == nas
|
||||
|
||||
labels:
|
||||
homepage.group: "PNCHarris Apps"
|
||||
homepage.name: "Family Library"
|
||||
homepage.icon: "calibre-web.png"
|
||||
homepage.href: "https://books.netgrimoire.com"
|
||||
homepage.description: "Calibre-Web Automated"
|
||||
|
||||
kuma.cwa.http.name: "Calibre-Web Automated"
|
||||
kuma.cwa.http.url: "http://calibre-web-automated:8083"
|
||||
|
||||
caddy: "books.netgrimoire.com , books.pncharris.com"
|
||||
# caddy.import: "authentik"
|
||||
caddy.reverse_proxy: "calibre-web-automated:8083"
|
||||
|
||||
# Diun (image update monitoring)
|
||||
diun.enable: "true"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
52
swarm/JellySeer.yaml
Executable file
52
swarm/JellySeer.yaml
Executable file
|
|
@ -0,0 +1,52 @@
|
|||
services:
|
||||
jellyseer:
|
||||
image: fallenbagel/jellyseerr:latest
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
environment:
|
||||
TZ: America/Chicago
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
LOG_LEVEL: "debug"
|
||||
|
||||
# Match your standard UID:GID execution model
|
||||
# user: "1001:998"
|
||||
|
||||
volumes:
|
||||
- /DockerVol/JellySeer/config:/app/config
|
||||
- /data/nfs/Baxter/Data/media:/data:shared
|
||||
|
||||
ports:
|
||||
- target: 5055
|
||||
published: 5055
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker5
|
||||
|
||||
labels:
|
||||
# Homepage
|
||||
homepage.group: "Media Search"
|
||||
homepage.name: "JellySeer"
|
||||
homepage.icon: "sh-jellyseerr.svg"
|
||||
homepage.href: "https://requests.netgrimoire.com"
|
||||
homepage.description: "Media Server"
|
||||
|
||||
# Kuma
|
||||
kuma.jellyseer.http.name: "JellySeer"
|
||||
kuma.jellyseer.http.url: "http://jellyseer:5055"
|
||||
|
||||
# Caddy
|
||||
caddy: "requests.netgrimoire.com"
|
||||
caddy.reverse_proxy: "jellyseer:5055"
|
||||
|
||||
# Diun (image update monitoring)
|
||||
diun.enable: "true"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
61
swarm/JellyStat.yaml
Executable file
61
swarm/JellyStat.yaml
Executable file
|
|
@ -0,0 +1,61 @@
|
|||
|
||||
services:
|
||||
|
||||
jellystat-db:
|
||||
image: postgres:15.2
|
||||
hostname: postgres
|
||||
container_name: postgres
|
||||
environment:
|
||||
- POSTGRES_DB='jfstat'
|
||||
- POSTGRES_USER=jellystat
|
||||
- POSTGRES_PASSWORD=jellystat
|
||||
- PUID=1001
|
||||
- PGID=998
|
||||
- TZ=America/Chicago
|
||||
volumes:
|
||||
- /DockerVol/jellystat/postgres-data:/var/lib/postgresql/data # Mounting the volume
|
||||
networks:
|
||||
- netgrimoire
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == bruce
|
||||
|
||||
jellystat:
|
||||
image: cyfershepard/jellystat:latest
|
||||
hostname: jellystat
|
||||
container_name: jellystat
|
||||
environment:
|
||||
POSTGRES_USER: jellystat
|
||||
POSTGRES_PASSWORD: jellystat
|
||||
POSTGRES_IP: jellystat-db
|
||||
POSTGRES_PORT: 5432
|
||||
JWT_SECRET: 'my-secret-jwt-key'
|
||||
ports:
|
||||
- "3015:3000" #Server Port
|
||||
volumes:
|
||||
- /DockerVol/jellystat/backup-data:/app/backend/backup-data # Mounting the volume
|
||||
depends_on:
|
||||
- jellystat-db
|
||||
networks:
|
||||
- netgrimoire
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=Library
|
||||
- homepage.name=JellyStat
|
||||
- homepage.icon=jellystat.png
|
||||
- homepage.href=http://jellystat.netgrimoire.com
|
||||
- homepage.description=Jelly Stats
|
||||
- kuma.jellystat.http.name="JellyStat"
|
||||
- kuma.jellystat.http.url=http://jellystat:3000
|
||||
- caddy=jellystat.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 3000}}"
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == bruce
|
||||
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
87
swarm/SQL-mgmt.yaml
Executable file
87
swarm/SQL-mgmt.yaml
Executable file
|
|
@ -0,0 +1,87 @@
|
|||
services:
|
||||
phpmyadmin:
|
||||
image: phpmyadmin/phpmyadmin
|
||||
container_name: phpmyadmin
|
||||
hostname: phpmyadmin
|
||||
networks:
|
||||
- netgrimoire
|
||||
ports:
|
||||
- 8022:80
|
||||
# links:
|
||||
# - mysql:db
|
||||
environment:
|
||||
- PMA_ARBITRARY=1
|
||||
- PUID=1001
|
||||
- PGID=998
|
||||
- TZ=America/Chicago
|
||||
- MYSQL_ROOT_PASSWORD=F@lcon12
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- netgrimoire
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=Management
|
||||
- homepage.name=PHPMyadmin
|
||||
- homepage.icon=phpmyadmin.png
|
||||
- homepage.href=http://phpmyadmin.netgrimoire.com
|
||||
- homepage.description=MySQL Manager
|
||||
- kuma.msql.http.name="PHPMyadmin"
|
||||
- kuma.msql.http.url=http://phpmyadmin:80
|
||||
- caddy=phpmyadmin.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 80}}"
|
||||
|
||||
|
||||
phppgadmin:
|
||||
restart: always
|
||||
image: dockage/phppgadmin:latest
|
||||
networks:
|
||||
- netgrimoire
|
||||
ports:
|
||||
- "8086:80"
|
||||
- "9443:443"
|
||||
environment:
|
||||
- PHP_PG_ADMIN_SERVER_DESC=PostgreSQL
|
||||
- PHP_PG_ADMIN_SERVER_HOST=
|
||||
- PHP_PG_ADMIN_SERVER_PORT=5432
|
||||
- PHP_PG_ADMIN_SERVER_SSL_MODE=allow
|
||||
- PHP_PG_ADMIN_SERVER_DEFAULT_DB=template1
|
||||
- PHP_PG_ADMIN_SERVER_PG_DUMP_PATH=/usr/bin/pg_dump
|
||||
- PHP_PG_ADMIN_SERVER_PG_DUMPALL_PATH=/usr/bin/pg_dumpall
|
||||
|
||||
- PHP_PG_ADMIN_DEFAULT_LANG=auto
|
||||
- PHP_PG_ADMIN_AUTO_COMPLETE=default on
|
||||
- PHP_PG_ADMIN_EXTRA_LOGIN_SECURITY=false
|
||||
- PHP_PG_ADMIN_OWNED_ONLY=false
|
||||
- PHP_PG_ADMIN_SHOW_COMMENTS=true
|
||||
- PHP_PG_ADMIN_SHOW_ADVANCED=false
|
||||
- PHP_PG_ADMIN_SHOW_SYSTEM=false
|
||||
- PHP_PG_ADMIN_MIN_PASSWORD_LENGTH=1
|
||||
- PHP_PG_ADMIN_LEFT_WIDTH=200
|
||||
- PHP_PG_ADMIN_THEME=default
|
||||
- PHP_PG_ADMIN_SHOW_OIDS=false
|
||||
- PHP_PG_ADMIN_MAX_ROWS=30
|
||||
- PHP_PG_ADMIN_MAX_CHARS=50
|
||||
- PHP_PG_ADMIN_USE_XHTML_STRICT=false
|
||||
- PHP_PG_ADMIN_HELP_BASE=http://www.postgresql.org/docs/%s/interactive/
|
||||
- PHP_PG_ADMIN_AJAX_REFRESH
|
||||
networks:
|
||||
- netgrimoire
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=Management
|
||||
- homepage.name=PHPpgmyadmin
|
||||
- homepage.icon=phppgmyadmin.png
|
||||
- homepage.href=http://phppgmyadmin.netgrimoire.com
|
||||
- homepage.description=Postgres Manager
|
||||
- kuma.sql.http.name="PHPPGMyadmin"
|
||||
- kuma.mealie.http.url=http://phppgmyadmin:80
|
||||
- caddy=phppgmyadmin.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 80}}"
|
||||
|
||||
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
75
swarm/authelia.yaml
Normal file
75
swarm/authelia.yaml
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
version: "3.8"
|
||||
|
||||
services:
|
||||
authelia:
|
||||
image: authelia/authelia:latest
|
||||
container_name: authelia
|
||||
hostname: authelia
|
||||
# user: "1001:998"
|
||||
networks:
|
||||
- netgrimoire
|
||||
environment:
|
||||
- PUID=1001
|
||||
- PGID=998
|
||||
- TZ=America/Chicago
|
||||
volumes:
|
||||
- /DockerVol/authelia/config:/config
|
||||
- /DockerVol/authelia/secrets:/secrets
|
||||
ports:
|
||||
# Optional: usually you do NOT need to publish this if only Caddy talks to it
|
||||
# - "9091:9091"
|
||||
- target: 9091
|
||||
published: 9091
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
deploy:
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == nas
|
||||
labels:
|
||||
# Caddy - Authelia portal URL
|
||||
- caddy=login.wasted-bandwidth.net
|
||||
- caddy.reverse_proxy={{upstreams 9091}}
|
||||
|
||||
# Homepage
|
||||
- homepage.group=Management
|
||||
- homepage.name=Authelia
|
||||
- homepage.icon=authelia.png
|
||||
- homepage.href=https://login.wasted-bandwidth.net
|
||||
- homepage.description=SSO / Forward-Auth
|
||||
|
||||
# Uptime Kuma
|
||||
- kuma.authelia.http.name="Authelia"
|
||||
- kuma.authelia.http.url=http://authelia:9091
|
||||
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- redis
|
||||
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
container_name: authelia-redis
|
||||
hostname: authelia-redis
|
||||
#user: "1001:998"
|
||||
networks:
|
||||
- netgrimoire
|
||||
environment:
|
||||
- TZ=America/Chicago
|
||||
- PUID=1001
|
||||
- PGID=998
|
||||
command: >
|
||||
sh -c "redis-server --appendonly yes --dir /data"
|
||||
volumes:
|
||||
- /DockerVol/authelia/redis:/data
|
||||
deploy:
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == nas
|
||||
|
||||
restart: unless-stopped
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
147
swarm/authentik.yaml
Executable file
147
swarm/authentik.yaml
Executable file
|
|
@ -0,0 +1,147 @@
|
|||
services:
|
||||
postgresql:
|
||||
image: docker.io/library/postgres:16-alpine
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}"]
|
||||
start_period: 20s
|
||||
interval: 30s
|
||||
retries: 5
|
||||
timeout: 5s
|
||||
volumes:
|
||||
- /DockerVol/Authentik/Postgres:/var/lib/postgresql/data
|
||||
environment:
|
||||
POSTGRES_PASSWORD: F@lcon13
|
||||
POSTGRES_USER: authentik
|
||||
POSTGRES_DB: authentik
|
||||
TZ: America/Chicago
|
||||
PGID: 998
|
||||
PUID: 1001
|
||||
UMASK: 002
|
||||
networks:
|
||||
- authentik
|
||||
# env_file:
|
||||
# - .env
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == nas
|
||||
|
||||
|
||||
redis:
|
||||
image: docker.io/library/redis:alpine
|
||||
command: --save 60 1 --loglevel warning
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "redis-cli ping | grep PONG"]
|
||||
start_period: 20s
|
||||
interval: 30s
|
||||
retries: 5
|
||||
timeout: 3s
|
||||
environment:
|
||||
TZ: America/Chicago
|
||||
PGID: 998
|
||||
PUID: 1001
|
||||
UMASK: 002
|
||||
volumes:
|
||||
- /DockerVol/Authentik/redis:/data
|
||||
networks:
|
||||
- authentik
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == nas
|
||||
|
||||
authentik:
|
||||
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2025.2}
|
||||
restart: unless-stopped
|
||||
command: server
|
||||
environment:
|
||||
AUTHENTIK_REDIS__HOST: redis
|
||||
AUTHENTIK_POSTGRESQL__HOST: postgresql
|
||||
AUTHENTIK_POSTGRESQL__USER: authentik
|
||||
AUTHENTIK_POSTGRESQL__NAME: authentik
|
||||
AUTHENTIK_POSTGRESQL__PASSWORD: F@lcon13
|
||||
AUTHENTIK_SECRET_KEY: g8JIvopgkcpIeRUKgfT5KwHFUwGNBFobwhHMHx08wPTJTtAlmqllAwmr6u4jk+ng8O1gbV/gwZnYylMn
|
||||
TZ: America/Chicago
|
||||
PGID: 998
|
||||
PUID: 1001
|
||||
UMASK: 002
|
||||
volumes:
|
||||
- /DockerVol/Authentik/media:/media
|
||||
- /DockerVol/Authentik/custom-templates:/templates
|
||||
networks:
|
||||
- netgrimoire
|
||||
- authentik
|
||||
ports:
|
||||
- "9080:9000"
|
||||
- "9443:9443"
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == nas
|
||||
labels:
|
||||
- homepage.group=Management
|
||||
- homepage.name=Authentik
|
||||
- homepage.icon=authentik.png
|
||||
- homepage.href=https://auth.netgrimoire.com
|
||||
- homepage.description=Account Manager
|
||||
- kuma.auth.http.name="Authentik"
|
||||
- kuma.auth.http.url=http://authentik:9000
|
||||
- caddy=auth.netgrimoire.com
|
||||
- caddy.reverse_proxy="{{upstreams 9000}}"
|
||||
depends_on:
|
||||
- postgresql
|
||||
- redis
|
||||
|
||||
worker:
|
||||
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2025.2}
|
||||
restart: unless-stopped
|
||||
command: worker
|
||||
environment:
|
||||
AUTHENTIK_REDIS__HOST: redis
|
||||
AUTHENTIK_POSTGRESQL__HOST: postgresql
|
||||
AUTHENTIK_POSTGRESQL__USER: authentik
|
||||
AUTHENTIK_POSTGRESQL__NAME: authentik
|
||||
AUTHENTIK_POSTGRESQL__PASSWORD: F@lcon13
|
||||
AUTHENTIK_SECRET_KEY: g8JIvopgkcpIeRUKgfT5KwHFUwGNBFobwhHMHx08wPTJTtAlmqllAwmr6u4jk+ng8O1gbV/gwZnYylMn
|
||||
TZ: America/Chicago
|
||||
PGID: 998
|
||||
PUID: 1001
|
||||
UMASK: 002
|
||||
# `user: root` and the docker socket volume are optional.
|
||||
# See more for the docker socket integration here:
|
||||
# https://goauthentik.io/docs/outposts/integrations/docker
|
||||
# Removing `user: root` also prevents the worker from fixing the permissions
|
||||
# on the mounted folders, so when removing this make sure the folders have the correct UID/GID
|
||||
# (1000:1000 by default)
|
||||
user: root
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /DockerVol/Authentik/media:/media
|
||||
- /DockerVol/Authentik/certs:/certs
|
||||
- /DockerVol/Authentik/custom-templates:/templates
|
||||
networks:
|
||||
- authentik
|
||||
depends_on:
|
||||
- postgresql
|
||||
- redis
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == nas
|
||||
|
||||
volumes:
|
||||
database:
|
||||
driver: local
|
||||
redis:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
authentik:
|
||||
external: true
|
||||
|
||||
|
||||
# import the .env file first: set -a; . ./.env; set +a
|
||||
52
swarm/bazarr.yaml
Normal file
52
swarm/bazarr.yaml
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
services:
|
||||
bazarr:
|
||||
image: lscr.io/linuxserver/bazarr:latest
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
environment:
|
||||
TZ: America/Chicago
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
|
||||
# Match your template preference (run container as 1001:998)
|
||||
# user: "1001:998"
|
||||
|
||||
volumes:
|
||||
- /DockerVol/bazarr/config:/config
|
||||
- /data/nfs/Baxter/Data/:/data:shared
|
||||
|
||||
ports:
|
||||
- target: 6767
|
||||
published: 6767
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker4
|
||||
|
||||
labels:
|
||||
# Homepage
|
||||
homepage.group: "Jolly Roger"
|
||||
homepage.name: "Bazarr"
|
||||
homepage.icon: "bazarr.png"
|
||||
homepage.href: "http://bazarr.netgrimoire.com"
|
||||
homepage.description: "Subtitle Search"
|
||||
|
||||
# Kuma
|
||||
kuma.bazarr.http.name: "Bazarr"
|
||||
kuma.bazarr.http.url: "http://bazarr:6767"
|
||||
|
||||
# Caddy
|
||||
caddy: "bazarr.netgrimoire.com"
|
||||
caddy.import: "authentik"
|
||||
caddy.reverse_proxy: "{{upstreams 6767}}"
|
||||
|
||||
# Diun (image update monitoring)
|
||||
diun.enable: "true"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
53
swarm/beets.yaml
Normal file
53
swarm/beets.yaml
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
services:
|
||||
beets:
|
||||
image: lscr.io/linuxserver/beets:latest
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
environment:
|
||||
TZ: America/Chicago
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
|
||||
# Match your standard UID:GID execution model
|
||||
#user: "1001:998"
|
||||
|
||||
volumes:
|
||||
- /DockerVol/beets/config:/config
|
||||
- /data/nfs/Baxter/Data/media/music/Collection:/music
|
||||
- /data/nfs/Baxter/Data/media/music/ingest:/downloads
|
||||
|
||||
ports:
|
||||
- target: 8337
|
||||
published: 8337
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == nas
|
||||
|
||||
labels:
|
||||
# Homepage
|
||||
homepage.group: "Media Management"
|
||||
homepage.name: "Beets"
|
||||
homepage.icon: "beets.png"
|
||||
homepage.href: "https://beets.netgrimoire.com"
|
||||
homepage.description: "Music Manager"
|
||||
|
||||
# Kuma
|
||||
kuma.beets.http.name: "Beets"
|
||||
kuma.beets.http.url: "http://beets:8337"
|
||||
|
||||
# Caddy
|
||||
caddy: "beets.netgrimoire.com"
|
||||
caddy.import: "authentik"
|
||||
caddy.reverse_proxy: "{{upstreams 8337}}"
|
||||
|
||||
# Diun (image update monitoring)
|
||||
diun.enable: "true"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
43
swarm/beszel.yaml
Normal file
43
swarm/beszel.yaml
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
|
||||
services:
|
||||
beszel:
|
||||
image: henrygd/beszel
|
||||
container_name: beszel
|
||||
hostname: beszel
|
||||
networks:
|
||||
- netgrimoire
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/beszel:/beszel_data
|
||||
environment:
|
||||
- TZ=America/Chicago
|
||||
user: "1001:998"
|
||||
ports:
|
||||
- 8098:8090
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: any
|
||||
delay: 10s
|
||||
max_attempts: 5
|
||||
window: 60s
|
||||
labels:
|
||||
- homepage.group=Monitoring
|
||||
- homepage.name=Beszel
|
||||
- homepage.icon=beszel.png
|
||||
- homepage.href=https://beszel.netgrimoire.com
|
||||
- homepage.description=Beszel Service
|
||||
- kuma.beszel.http.name="Beszel"
|
||||
- kuma.beszel.http.url=http://beszel:8090
|
||||
- caddy=beszel.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 8090}}"
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.general == true
|
||||
|
||||
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
|
||||
|
||||
142
swarm/beszel_agents.yaml
Normal file
142
swarm/beszel_agents.yaml
Normal file
|
|
@ -0,0 +1,142 @@
|
|||
#version: "3.8"
|
||||
|
||||
services:
|
||||
|
||||
beszel-agent-docker1:
|
||||
image: henrygd/beszel-agent:latest
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 45876:45876
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
networks:
|
||||
- netgrimoire
|
||||
environment:
|
||||
KEY: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGNx6yKSqJHUiSKTM44/A1luwBije9HkHwnOxwXYx8q4"
|
||||
LISTEN: "45876"
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker1
|
||||
|
||||
beszel-agent-docker2:
|
||||
image: henrygd/beszel-agent:latest
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 45877:45877
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
networks:
|
||||
- netgrimoire
|
||||
environment:
|
||||
KEY: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGNx6yKSqJHUiSKTM44/A1luwBije9HkHwnOxwXYx8q4"
|
||||
LISTEN: "45877"
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker2
|
||||
|
||||
beszel-agent-docker3:
|
||||
image: henrygd/beszel-agent:latest
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 45878:45878
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
networks:
|
||||
- netgrimoire
|
||||
environment:
|
||||
KEY: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGNx6yKSqJHUiSKTM44/A1luwBije9HkHwnOxwXYx8q4"
|
||||
LISTEN: "45878"
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker3
|
||||
|
||||
beszel-agent-docker4:
|
||||
image: henrygd/beszel-agent:latest
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 45879:45879
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
networks:
|
||||
- netgrimoire
|
||||
environment:
|
||||
KEY: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGNx6yKSqJHUiSKTM44/A1luwBije9HkHwnOxwXYx8q4"
|
||||
LISTEN: "45879"
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker4
|
||||
|
||||
beszel-agent-docker5:
|
||||
image: henrygd/beszel-agent:latest
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 45883:45883
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
networks:
|
||||
- netgrimoire
|
||||
environment:
|
||||
KEY: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGNx6yKSqJHUiSKTM44/A1luwBije9HkHwnOxwXYx8q4"
|
||||
LISTEN: "45883"
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker5
|
||||
|
||||
beszel-agent-nas:
|
||||
image: henrygd/beszel-agent:latest
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 45880:45880
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
networks:
|
||||
- netgrimoire
|
||||
environment:
|
||||
KEY: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGNx6yKSqJHUiSKTM44/A1luwBije9HkHwnOxwXYx8q4"
|
||||
LISTEN: "45880"
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == nas
|
||||
|
||||
beszel-agent-dockerpi1:
|
||||
image: henrygd/beszel-agent:latest
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 45884:45884
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
networks:
|
||||
- netgrimoire
|
||||
environment:
|
||||
KEY: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGNx6yKSqJHUiSKTM44/A1luwBije9HkHwnOxwXYx8q4"
|
||||
LISTEN: "45884"
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == DockerPi1
|
||||
|
||||
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
41
swarm/cloudcmd.yaml
Executable file
41
swarm/cloudcmd.yaml
Executable file
|
|
@ -0,0 +1,41 @@
|
|||
services:
|
||||
|
||||
cloudcmd:
|
||||
image: coderaiser/cloudcmd
|
||||
container_name: cloudcmd
|
||||
hostname: cloudcmd
|
||||
networks:
|
||||
- netgrimoire
|
||||
ports:
|
||||
- 8000:8000
|
||||
environment:
|
||||
- PUID=1001
|
||||
- PGID=998
|
||||
- TZ=America/Chicago
|
||||
volumes:
|
||||
- ~:/root
|
||||
- /:/mnt/fs
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=Application
|
||||
- homepage.name=Cloud Commander
|
||||
- homepage.icon=cloudcmd.png
|
||||
- homepage.href=http://commander.netgrimoire.com
|
||||
- homepage.description=Cloud Commander
|
||||
- kuma.cloud.http.name="Cloudcmd
|
||||
- kuma.cloud.http.url=http://cloudcmd:8000
|
||||
- caddy=commander.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 8000}}"
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == nas
|
||||
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
|
||||
|
||||
|
||||
|
||||
52
swarm/comixed.yaml
Normal file
52
swarm/comixed.yaml
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
# /config is where plugins, extensions, and configuration files are placed.
|
||||
# /imports is where comics to be imported are placed
|
||||
# /library is where the comics library is maintaned
|
||||
|
||||
version: "3.6"
|
||||
services:
|
||||
comixed:
|
||||
image: comixed/comixed
|
||||
container_name: comixed
|
||||
environment:
|
||||
- PUID=1001
|
||||
- PGID=998
|
||||
- TZ=America/Chicago
|
||||
volumes:
|
||||
- /DockerVol/comixed/config:/config
|
||||
- /data/nfs/Baxter/Data/media/comics/library:/library:shared
|
||||
- /data/nfs/Baxter/Data/media/comics/downloads:/imports:shared
|
||||
ports:
|
||||
- 7171:7171
|
||||
entrypoint: ["bash", "/app/comixed-release/bin/docker-run.sh", "-L", "/library/comixed.log", "-c", "/config"]
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- netgrimoire
|
||||
logging:
|
||||
driver: "gelf"
|
||||
options:
|
||||
gelf-address: "udp://192.168.5.17:12201"
|
||||
tag: "comixed"
|
||||
deploy:
|
||||
labels:
|
||||
homepage.group: "Application"
|
||||
homepage.name: "Comixed"
|
||||
homepage.icon: "comixed.png"
|
||||
homepage.href: "https://comics.netgrimoire.com"
|
||||
homepage.description: "Task Manager"
|
||||
kuma.cxd.http.name: "Vikunja"
|
||||
kuma.cxd.http.url: "http://comixed:7171"
|
||||
caddy: "comics.netgrimoire.com"
|
||||
caddy.import: "authentik"
|
||||
caddy.reverse_proxy: "{{upstreams 7171}}"
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker1
|
||||
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
|
||||
|
||||
|
||||
|
||||
41
swarm/commander.yaml
Normal file
41
swarm/commander.yaml
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
services:
|
||||
|
||||
cloudcmd:
|
||||
image: coderaiser/cloudcmd
|
||||
container_name: cloudcmd
|
||||
hostname: cloudcmd
|
||||
networks:
|
||||
- netgrimoire
|
||||
ports:
|
||||
- 8024:8000
|
||||
environment:
|
||||
- TZ=America/Chicago
|
||||
user: "1001:998"
|
||||
volumes:
|
||||
- ~:/root
|
||||
- /:/mnt/fs
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == nas
|
||||
labels:
|
||||
# Diun
|
||||
- "diun.enable=true"
|
||||
|
||||
# Homepage
|
||||
- "homepage.group=Applications"
|
||||
- "homepage.name=Cloud Commander"
|
||||
- "homepage.icon=mdi-cloud"
|
||||
- "homepage.href=https://commander.netgrimoire.com"
|
||||
- "homepage.description=Web File Manager"
|
||||
|
||||
|
||||
# Caddy
|
||||
- "caddy=commander.netgrimoire.com"
|
||||
- "caddy.reverse_proxy={{upstreams 8000}}"
|
||||
|
||||
restart: unless-stopped
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
60
swarm/configarr.yaml
Normal file
60
swarm/configarr.yaml
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
services:
|
||||
configarr:
|
||||
image: ghcr.io/raydak-labs/configarr:latest
|
||||
user: "1001:998"
|
||||
environment:
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
TZ: America/Chicago
|
||||
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/configarr/config:/app/config
|
||||
- /data/nfs/Baxter/Docker/configarr/repos:/app/repos
|
||||
- /data/nfs/Baxter/Docker/configarr/cfs:/app/cfs
|
||||
- /data/nfs/Baxter/Docker/configarr/templates:/app/templates
|
||||
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
delay: 5s
|
||||
max_attempts: 3
|
||||
window: 30s
|
||||
|
||||
labels:
|
||||
# -------------------------
|
||||
# Homepage Dashboard Labels
|
||||
# -------------------------
|
||||
homepage.group: "Jolly Roger"
|
||||
homepage.name: "Configarr"
|
||||
homepage.icon: "si-config"
|
||||
homepage.href: "https://configarr.netgrimoire.com"
|
||||
homepage.description: "Automatically sync TRaSH formats & configs"
|
||||
|
||||
# -------------------------
|
||||
# Kuma Monitoring Labels
|
||||
# -------------------------
|
||||
kuma.configarr.http.name: "Configarr"
|
||||
kuma.configarr.http.url: "https://configarr.netgrimoire.com"
|
||||
|
||||
# -------------------------
|
||||
# Caddy Reverse Proxy
|
||||
# (Swarm label syntax works the same)
|
||||
# -------------------------
|
||||
caddy: configarr.netgrimoire.com
|
||||
caddy.import: "authentik"
|
||||
|
||||
caddy.reverse_proxy: "{{upstreams 8000}}"
|
||||
|
||||
# -------------------------
|
||||
# Diun Image Monitoring
|
||||
# -------------------------
|
||||
diun.enable: "true"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
32
swarm/database.yaml
Normal file
32
swarm/database.yaml
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
services:
|
||||
|
||||
|
||||
pgadmin4:
|
||||
|
||||
|
||||
restart: always
|
||||
image: dpage/pgadmin4
|
||||
networks:
|
||||
- netgrimoire
|
||||
ports:
|
||||
- "5052:80"
|
||||
environment:
|
||||
- PGADMIN_DEFAULT_EMAIL=traveler@netgrimoire.com
|
||||
- PGADMIN_DEFAULT_PASSWORD=F@lcon13
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=Management
|
||||
- homepage.name=PGAdmin
|
||||
- homepage.icon=pgadmin.png
|
||||
- homepage.href=https://pgadmin.netgrimoire.com
|
||||
- homepage.description=Postgres Server
|
||||
- caddy=pgadmin.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 80}}"
|
||||
# placement:
|
||||
# constraints:
|
||||
# - node.hostname == docker2
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
34
swarm/diun.yaml
Normal file
34
swarm/diun.yaml
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
services:
|
||||
diun:
|
||||
image: crazymax/diun:latest
|
||||
container_name: diun
|
||||
user: "1001:998"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
# - ./data:/data
|
||||
- /data/nfs/Baxter/Docker/diun:/data
|
||||
networks:
|
||||
- netgrimoire
|
||||
environment:
|
||||
- TZ=America/Chicago
|
||||
- DIUN_WATCH_WORKERS=20
|
||||
- DIUN_WATCH_SCHEDULE=0 */6 * * *
|
||||
- DIUN_PROVIDERS_DOCKER=true
|
||||
- DIUN_PROVIDERS_DOCKER_WATCHBYDEFAULT=true
|
||||
- DIUN_NOTIF_NTFY_ENDPOINT=https://ntfy.netgrimoire.com
|
||||
- DIUN_NOTIF_NTFY_TOPIC=diun
|
||||
# - DIUN_NOTIF_NTFY_PRIORITY=default
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.role == manager
|
||||
labels:
|
||||
diun.enable: "true"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
|
||||
|
||||
|
||||
|
||||
55
swarm/dockpeek.yaml
Normal file
55
swarm/dockpeek.yaml
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
services:
|
||||
dockpeek:
|
||||
image: dockpeek/dockpeek:latest
|
||||
container_name: dockpeek
|
||||
user: "1001:998"
|
||||
environment:
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
TZ: "America/Chicago"
|
||||
|
||||
SECRET_KEY: F@lcon13 # Required
|
||||
USERNAME: traveler
|
||||
PASSWORD: F@lcon13
|
||||
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
ports:
|
||||
- "3420:8000"
|
||||
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
deploy:
|
||||
|
||||
placement:
|
||||
constraints:
|
||||
- node.role == manager
|
||||
|
||||
labels:
|
||||
# Homepage
|
||||
homepage.group: "Services"
|
||||
homepage.name: "Dockpeek"
|
||||
homepage.icon: "docker"
|
||||
homepage.href: "https://dockpeek.netgrimoire.com"
|
||||
|
||||
# Remove homepage.weight per your instruction
|
||||
|
||||
# Kuma integration
|
||||
kuma.dockpeek.http.name: "Dockpeek"
|
||||
kuma.dockpeek.http.url: "https://dockpeek.netgrimoire.com"
|
||||
|
||||
# Caddy (modify domain as needed)
|
||||
caddy: dockpeek.netgrimoire.com
|
||||
caddy.import: authentik
|
||||
caddy.reverse_proxy: "{{upstreams 8000}}"
|
||||
|
||||
# Diun
|
||||
diun.enable: "true"
|
||||
|
||||
restart: unless-stopped
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
27
swarm/dozzle.yaml
Executable file
27
swarm/dozzle.yaml
Executable file
|
|
@ -0,0 +1,27 @@
|
|||
# Run with docker stack deploy -c dozzle-stack.yml <name>
|
||||
services:
|
||||
dozzle:
|
||||
image: amir20/dozzle:latest
|
||||
environment:
|
||||
- DOZZLE_MODE=swarm
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
ports:
|
||||
- :8080
|
||||
networks:
|
||||
- netgrimoire
|
||||
- dozzle
|
||||
deploy:
|
||||
mode: global
|
||||
labels:
|
||||
- homepage.group=Management
|
||||
- homepage.name=Dozzle
|
||||
- homepage.icon=dozzle.png
|
||||
- homepage.href=http://dozzle.netgrimoire.com
|
||||
- homepage.description=Docker logs
|
||||
|
||||
networks:
|
||||
dozzle:
|
||||
driver: overlay
|
||||
netgrimoire:
|
||||
external: true
|
||||
47
swarm/dumbterm.yaml
Normal file
47
swarm/dumbterm.yaml
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
services:
|
||||
dumbterm:
|
||||
container_name: dumbterm
|
||||
image: dumbwareio/dumbterm:latest
|
||||
ports:
|
||||
- 8062:3000
|
||||
networks:
|
||||
- netgrimoire
|
||||
# user: 1001:998
|
||||
environment:
|
||||
# Container timezone
|
||||
TZ: America/Chicago
|
||||
# The title shown in the web interface
|
||||
SITE_TITLE: https://cli.netgrimoire.com
|
||||
# Optional PIN protection (leave empty to disable)
|
||||
DUMBTERM_PIN: 2810
|
||||
# The base URL for the application
|
||||
BASE_URL: http://localhost:3000 # Use ALLOWED_ORIGINS below to restrict cors to specific origins
|
||||
ENABLE_STARSHIP: "true" # Enable starship prompt
|
||||
LOCKOUT_TIME: 15 # Minutes
|
||||
MAX_SESSION_AGE: 24 # Hours
|
||||
|
||||
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=Remote Access
|
||||
- homepage.name=Dumbterm
|
||||
- homepage.icon=dumbterm.png
|
||||
- homepage.href=https://cli.netgrimoire.com
|
||||
- homepage.description=Terminal
|
||||
- kuma.cli.http.name="dumbterm"
|
||||
- kuma.cli.http.url=http://dumbterm:3000
|
||||
- caddy=cli.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 3000}}"
|
||||
|
||||
volumes:
|
||||
#- /data/nfs/Baxter/Docker/dumbterm/config:/root/.config
|
||||
#- /data/nfs/Baxter/Docker/dumbterm/:/root/data
|
||||
- /data/nfs/Baxter/Docker/dumbterm/root:/root
|
||||
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
|
||||
|
||||
35
swarm/filebrowser.yaml
Normal file
35
swarm/filebrowser.yaml
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
services:
|
||||
filebrowser:
|
||||
image: filebrowser/filebrowser:latest
|
||||
container_name: filebrowser
|
||||
hostname: filebrowser
|
||||
networks:
|
||||
- netgrimoire
|
||||
ports:
|
||||
- "8081:80"
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/filebrowser/config:/config
|
||||
- /data/nfs/Baxter/Docker/filebrowser/srv:/srv
|
||||
environment:
|
||||
- TZ=America/Chicago
|
||||
user: "1001:998"
|
||||
restart: unless-stopped
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=Jolly Roger
|
||||
- homepage.name=FileBrowser
|
||||
- homepage.icon=filebrowser.png
|
||||
- homepage.href=http://filebrowser.netgrimoire.com
|
||||
- homepage.description=Web-based file manager
|
||||
- kuma.filebrowser.http.name="FileBrowser"
|
||||
- kuma.filebrowser.http.url=http://filebrowser:80
|
||||
- caddy=filebrowser.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 80}}"
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.general == true
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
43
swarm/firefox.yaml
Normal file
43
swarm/firefox.yaml
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
|
||||
services:
|
||||
|
||||
firefox:
|
||||
image: jlesage/firefox
|
||||
container_name: firefox
|
||||
networks:
|
||||
- netgrimoire
|
||||
environment:
|
||||
- PUID=1001
|
||||
- PGID=998
|
||||
- TZ=America/Chicago
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/firefox:/config:rw
|
||||
ports:
|
||||
- 5910:5900
|
||||
- 5800:5800
|
||||
shm_size: "1gb"
|
||||
restart: unless-stopped
|
||||
deploy:
|
||||
labels:
|
||||
homepage.group: "Remote Access"
|
||||
homepage.name: "Firefox"
|
||||
homepage.icon: "firefox.png"
|
||||
homepage.href: "https://firefox.netgrimoire.com"
|
||||
homepage.description: "Remote Browser"
|
||||
kuma.ff.http.name: "Firefox"
|
||||
kuma.ff.http.url: "http://firefox:5800"
|
||||
caddy: "firefox.netgrimoire.com"
|
||||
caddy.import: "authentik"
|
||||
caddy.reverse_proxy: "http://firefox:5800"
|
||||
|
||||
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
57
swarm/forgejo.yaml
Executable file
57
swarm/forgejo.yaml
Executable file
|
|
@ -0,0 +1,57 @@
|
|||
services:
|
||||
forgejo:
|
||||
image: codeberg.org/forgejo/forgejo:11
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
environment:
|
||||
USER_UID: "1001"
|
||||
USER_GID: "998"
|
||||
TZ: America/Chicago
|
||||
|
||||
# Match your standard UID:GID execution model
|
||||
#user: "1001:998"
|
||||
|
||||
volumes:
|
||||
- /DockerVol/forgejo:/data
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
|
||||
ports:
|
||||
- target: 3000
|
||||
published: 3024
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
- target: 22
|
||||
published: 222
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker2
|
||||
|
||||
labels:
|
||||
# Homepage
|
||||
homepage.group: "Applications"
|
||||
homepage.name: "Forgejo"
|
||||
homepage.icon: "forgejo.png"
|
||||
homepage.href: "https://git.netgrimoire.com"
|
||||
homepage.description: "Git Repository"
|
||||
|
||||
# Kuma
|
||||
kuma.git.http.name: "Forgejo"
|
||||
kuma.git.http.url: "http://forgejo:3000"
|
||||
|
||||
# Caddy
|
||||
caddy: "git.netgrimoire.com"
|
||||
# caddy.import: "authentik"
|
||||
caddy.reverse_proxy: "forgejo:3000"
|
||||
|
||||
# Diun (image update monitoring)
|
||||
diun.enable: "true"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
42
swarm/freshrss.yaml
Normal file
42
swarm/freshrss.yaml
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
services:
|
||||
freshrss:
|
||||
image: lscr.io/linuxserver/freshrss:latest
|
||||
container_name: freshrss
|
||||
user: "1001:998"
|
||||
# user: "0:0"
|
||||
environment:
|
||||
TZ: "America/Chicago"
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/freshrss:/config
|
||||
|
||||
deploy:
|
||||
endpoint_mode: dnsrr
|
||||
labels:
|
||||
# Homepage
|
||||
homepage.group: "Services"
|
||||
homepage.name: "FreshRSS"
|
||||
homepage.icon: "rss"
|
||||
homepage.href: "https://rss.netgrimoire.com"
|
||||
|
||||
# Kuma integration
|
||||
kuma.freshrss.http.name: "FreshRSS"
|
||||
kuma.freshrss.http.url: "https://rss.netgrimoire.com"
|
||||
|
||||
# Caddy (replace with your domain)
|
||||
caddy: "https://rss.netgrimoire.com"
|
||||
caddy.reverse_proxy: "{{upstreams 80}}"
|
||||
|
||||
# Diun
|
||||
diun.enable: "true"
|
||||
|
||||
restart: unless-stopped
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
47
swarm/glance.yaml
Normal file
47
swarm/glance.yaml
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
services:
|
||||
glance:
|
||||
image: glanceapp/glance
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
environment:
|
||||
TZ: America/Chicago
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
|
||||
#user: "1001:998"
|
||||
|
||||
ports:
|
||||
- target: 8080
|
||||
published: 8077
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/glance:/app/config
|
||||
|
||||
|
||||
deploy:
|
||||
labels:
|
||||
# Homepage
|
||||
homepage.group: "PNCHarris Apps"
|
||||
homepage.name: "Glance"
|
||||
homepage.icon: "glance.png"
|
||||
homepage.href: "https://home.netgrimoire.com"
|
||||
homepage.description: "Home Portal"
|
||||
|
||||
# Kuma
|
||||
kuma.glance.http.name: "Glance"
|
||||
kuma.glance.http.url: "http://glance:8080"
|
||||
|
||||
# Caddy
|
||||
caddy: "home.netgrimoire.com"
|
||||
caddy.import: "authentik"
|
||||
caddy.reverse_proxy: "glance:8080"
|
||||
|
||||
# Diun (image update monitoring)
|
||||
diun.enable: "true"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
33
swarm/homepage.yaml
Executable file
33
swarm/homepage.yaml
Executable file
|
|
@ -0,0 +1,33 @@
|
|||
services:
|
||||
homepage:
|
||||
image: ghcr.io/gethomepage/homepage:latest
|
||||
# container_name: homepage
|
||||
environment:
|
||||
- HOMEPAGE_ALLOWED_HOSTS=homepage.netgrimoire.com,glance.netgrimoire.com
|
||||
# PUID: 998 # optional, your user id
|
||||
# PGID: 1001 # optional, your group id
|
||||
# TZ: America/Chicago
|
||||
ports:
|
||||
- 3056:3000
|
||||
volumes:
|
||||
- /DockerVol/homepage/config:/app/config # Make sure your local config directory exists
|
||||
- /DockerVol/homepage/images:/app/public/images:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro # optional, for docker integrations
|
||||
# restart: unless-stopped
|
||||
networks:
|
||||
- netgrimoire
|
||||
deploy:
|
||||
labels:
|
||||
- kuma.homepage.http.name="Homepage"
|
||||
- kuma.homepage.http.url=http://homepage:3000
|
||||
- caddy=homepage.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 3000}}"
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker2
|
||||
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
52
swarm/hydra.yaml
Executable file
52
swarm/hydra.yaml
Executable file
|
|
@ -0,0 +1,52 @@
|
|||
services:
|
||||
hydra2:
|
||||
image: linuxserver/nzbhydra2:dev
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
environment:
|
||||
TZ: America/Chicago
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
|
||||
# Match your standard UID:GID execution model
|
||||
# user: "1001:998"
|
||||
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/hydra2/config:/config
|
||||
- /data/nfs/Baxter/Docker/hydra2/downloads:/downloads
|
||||
|
||||
ports:
|
||||
- target: 5076
|
||||
published: 5076
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.general == true
|
||||
|
||||
labels:
|
||||
# Homepage
|
||||
homepage.group: "Media Search"
|
||||
homepage.name: "NZBHydra"
|
||||
homepage.icon: "nzbhydra2.png"
|
||||
homepage.href: "https://hydra.netgrimoire.com"
|
||||
homepage.description: "Usenet Search"
|
||||
|
||||
# Kuma
|
||||
kuma.hydra.http.name: "NZBHydra"
|
||||
kuma.hydra.http.url: "http://hydra2:5076"
|
||||
|
||||
# Caddy
|
||||
caddy: "hydra.netgrimoire.com"
|
||||
caddy.import: "authentik"
|
||||
caddy.reverse_proxy: "hydra2:5076"
|
||||
|
||||
# Diun (image update monitoring)
|
||||
diun.enable: "true"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
74
swarm/joplin.yaml
Normal file
74
swarm/joplin.yaml
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
|
||||
services:
|
||||
|
||||
postgress:
|
||||
image: postgres:15
|
||||
container_name: postgress
|
||||
hostname: postgress
|
||||
volumes:
|
||||
- /DockerVol/joplindb:/var/lib/postgresql/data
|
||||
restart: unless-stopped
|
||||
user: "1001:998"
|
||||
networks:
|
||||
- netgrimoire
|
||||
environment:
|
||||
- POSTGRES_PASSWORD=postgres
|
||||
- POSTGRES_USER=postgres
|
||||
- POSTGRES_DB=joplin
|
||||
- TZ=America/Chicago
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker3
|
||||
|
||||
joplin:
|
||||
image: joplin/server:latest
|
||||
container_name: joplin
|
||||
hostname: joplin
|
||||
depends_on:
|
||||
- postgress
|
||||
ports:
|
||||
- "22300:22300"
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- netgrimoire
|
||||
environment:
|
||||
- TZ=America/Chicago
|
||||
- APP_PORT=22300
|
||||
- APP_BASE_URL=https://joplin.netgrimoire.com
|
||||
- DB_CLIENT=pg
|
||||
- POSTGRES_PASSWORD=postgres
|
||||
- POSTGRES_DATABASE=joplin
|
||||
- POSTGRES_USER=postgres
|
||||
- POSTGRES_PORT=5432
|
||||
- POSTGRES_HOST=postgress
|
||||
- MAILER_ENABLED=0
|
||||
- MAILER_HOST=smtp.gmail.com
|
||||
- MAILER_PORT=465
|
||||
- MAILER_SECURE=0
|
||||
- MAILER_AUTH_USER=youremail@gmail.com
|
||||
- MAILER_AUTH_PASSWORD=Y0urP@ssw0rd
|
||||
- MAILER_NOREPLY_NAME=Joplin
|
||||
- MAILER_NOREPLY_EMAIL=email@email.com
|
||||
user: "1001:998"
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=Services
|
||||
- homepage.name=Joplin
|
||||
- homepage.icon=joplin.png
|
||||
- homepage.href=https://joplin.netgrimoire.com
|
||||
- homepage.description=Note Server
|
||||
- homepage.widget.type=joplin
|
||||
- homepage.widget.url=http://joplin:22300
|
||||
- kuma.joplin.http.name="Joplin"
|
||||
- kuma.joplin.http.url=http://joplin:22300
|
||||
- caddy=joplin.netgrimoire.com
|
||||
# - caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 22300}}"
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker3
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
0
swarm/journiv.yaml
Normal file
0
swarm/journiv.yaml
Normal file
51
swarm/kavita.yaml
Executable file
51
swarm/kavita.yaml
Executable file
|
|
@ -0,0 +1,51 @@
|
|||
services:
|
||||
kavita:
|
||||
image: jvmilazz0/kavita:latest # Change latest to nightly for latest develop builds (can't go back to stable)
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Data/media/comics:/comics:shared # Use as many as you want
|
||||
# - ./books:/books #
|
||||
- /data/nfs/Baxter/Docker/Kavita/config:/kavita/config # Change './data if you want to have the config files in a different place.
|
||||
# /kavita/config must not be changed
|
||||
|
||||
environment:
|
||||
TZ: America/Chicago
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
|
||||
# Match your standard UID:GID execution model
|
||||
#user: "1001:998"
|
||||
|
||||
ports:
|
||||
# Change the public port (the first 5000) if you have conflicts with other services
|
||||
- target: 5000
|
||||
published: 8054
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
|
||||
deploy:
|
||||
labels:
|
||||
# Homepage
|
||||
homepage.group: "PNCHarris Apps"
|
||||
homepage.name: "Kavita"
|
||||
homepage.icon: "kavita.png"
|
||||
homepage.href: "https://kavita.netgrimoire.com"
|
||||
homepage.description: "Comic Book Reader"
|
||||
|
||||
# Kuma
|
||||
kuma.kavita.http.name: "Kavita"
|
||||
kuma.kavita.http.url: "http://kavita:5000"
|
||||
|
||||
# Caddy
|
||||
caddy: "kavita.netgrimoire.com"
|
||||
caddy.import: "authentik"
|
||||
caddy.reverse_proxy: "Kavita:5000"
|
||||
|
||||
# Diun (image update monitoring)
|
||||
diun.enable: "true"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
55
swarm/kopia.yaml
Normal file
55
swarm/kopia.yaml
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
services:
|
||||
kopia:
|
||||
image: kopia/kopia:latest
|
||||
container_name: kopia
|
||||
hostname: kopia
|
||||
restart: unless-stopped
|
||||
user: "1001:998"
|
||||
ports:
|
||||
- 51515:51515
|
||||
environment:
|
||||
PUID: 1001
|
||||
PGID: 998
|
||||
TZ: America/Chicago
|
||||
KOPIA_PASSWORD: F@lcon13
|
||||
KOPIA_SERVER_USERNAME: admin
|
||||
KOPIA_SERVER_PASSWORD: F@lcon13
|
||||
command:
|
||||
- server
|
||||
- start
|
||||
#- --tls-generate-cert
|
||||
- --tls-cert-file=/app/cert/my.cert
|
||||
- --tls-key-file=/app/cert/my.key
|
||||
- --address=0.0.0.0:51515
|
||||
- --server-username=admin
|
||||
- --server-password=F@lcon13
|
||||
volumes:
|
||||
- /DockerVol/kopia/config:/app/config
|
||||
- /DockerVol/kopia/cache:/app/cache
|
||||
- /DockerVol/kopia/cert:/app/cert
|
||||
- /srv/a18f546e-e9bf-4a8d-8b9d-02e7ec32b632/repository:/repository
|
||||
- /DockerVol/kopia/logs:/app/logs
|
||||
networks:
|
||||
- netgrimoire
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == znas
|
||||
- node.labels.cps == amd
|
||||
labels:
|
||||
diun.enable: "true"
|
||||
homepage.group: "Backup"
|
||||
homepage.name: "Kopia"
|
||||
homepage.icon: "kopia.png"
|
||||
homepage.href: "https://kopia.netgrimoire.com"
|
||||
homepage.description: "Snapshot backup and deduplication"
|
||||
kuma.kopia.http.name: "Kopia Web"
|
||||
kuma.kopia.http.url: "http://kopia:51515"
|
||||
# Optional Caddy reverse proxy
|
||||
caddy: kopia.netgrimoire.com
|
||||
caddy.import: authentik
|
||||
caddy.reverse_proxy: "kopia.netgrimoire.com:51515"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
66
swarm/kuma.yaml
Executable file
66
swarm/kuma.yaml
Executable file
|
|
@ -0,0 +1,66 @@
|
|||
services:
|
||||
kuma:
|
||||
image: louislam/uptime-kuma:1
|
||||
restart: always
|
||||
hostname: kuma
|
||||
container_name: kuma
|
||||
environment:
|
||||
- TZ=America/Chicago
|
||||
- PGID=998
|
||||
- PUID=1001
|
||||
- UMASK-002
|
||||
ports:
|
||||
- 3001:3001
|
||||
networks:
|
||||
- netgrimoire
|
||||
volumes:
|
||||
- /DockerVol/kuma:/app/data
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker2
|
||||
labels:
|
||||
- homepage.group=Monitoring
|
||||
- homepage.name=Kuma Uptime
|
||||
- homepage.icon=kuma.png
|
||||
- homepage.href=http://kuma.netgrimoire.com
|
||||
- homepage.description=Services Monitor
|
||||
- caddy=kuma.netgrimoire.com
|
||||
- caddy.reverse_proxy="{{upstreams 3001}}"
|
||||
|
||||
autokuma:
|
||||
image: ghcr.io/bigboot/autokuma:latest
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
AUTOKUMA__KUMA__URL: http://kuma:3001
|
||||
AUTOKUMA__KUMA__USERNAME: traveler
|
||||
AUTOKUMA__KUMA__PASSWORD: F@lcon12
|
||||
# AUTOKUMA__KUMA__MFA_TOKEN: <token>
|
||||
# AUTOKUMA__KUMA__HEADERS: "<header1_key>=<header1_value>,<header2_key>=<header2_value>,..."
|
||||
# AUTOKUMA__KUMA__CALL_TIMEOUT: 5
|
||||
# AUTOKUMA__KUMA__CONNECT_TIMEOUT: 5
|
||||
AUTOKUMA__TAG_NAME: AutoKuma
|
||||
# AUTOKUMA__TAG_COLOR: "#42C0FB"
|
||||
# AUTOKUMA__DEFAULT_SETTINGS: |-
|
||||
# docker.docker_container: {{container_name}}
|
||||
# http.max_redirects: 10
|
||||
# *.max_retries: 3
|
||||
# AUTOKUMA__SNIPPETS__WEB: |-
|
||||
# {{container_name}}_http.http.name: {{container_name}} HTTP
|
||||
# {{container_name}}_http.http.url: https://{{@0}}:{{@1}}
|
||||
# {{container_name}}_docker.docker.name: {{container_name}} Docker
|
||||
# {{container_name}}_docker.docker.docker_container: {{container_name}}
|
||||
# AUTOKUMA__DOCKER__HOSTS: unix:///var/run/docker.sock
|
||||
# AUTOKUMA__DOCKER__LABEL_PREFIX: kuma
|
||||
AUTOKUMA__DOCKER__SOURCE: "both"
|
||||
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /data/nfs/Baxter/Docker/AutoKuma:/data
|
||||
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
44
swarm/library.yaml
Executable file
44
swarm/library.yaml
Executable file
|
|
@ -0,0 +1,44 @@
|
|||
|
||||
services:
|
||||
Calibre-Netgrimoire:
|
||||
image: lscr.io/linuxserver/calibre-web:latest
|
||||
container_name: Calibre-Netgrimoire
|
||||
hostname: calibre-netgrimoire
|
||||
networks:
|
||||
- netgrimoire
|
||||
environment:
|
||||
- TZ=America/Chicago
|
||||
- PGID=998
|
||||
- PUID=1001
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/Calibre-netgrimoire/Config:/config
|
||||
- /data/nfs/Baxter/Data:/data:shared
|
||||
ports:
|
||||
- 8083:8083
|
||||
restart: unless-stopped
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=Library
|
||||
- homepage.name=Netgrimoire Library
|
||||
- homepage.icon=calibre-web.png
|
||||
- homepage.href=http://books.netgrimoire.com
|
||||
- homepage.description=Curated Library
|
||||
- homepage.widget.type=calibreweb
|
||||
- homepage.widget.url=http://Calibre-netgrimoire:8083
|
||||
- homepage.widget.username=traveler
|
||||
- homepage.widget.password=F@lcon13
|
||||
- kuma.calibre1.http.name="Calibre-Netgrimoire"
|
||||
- kuma.auth.http.url=http://calibre-netgrimoire:8083
|
||||
- caddy=books.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 8083}}"
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.general == true
|
||||
|
||||
|
||||
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
50
swarm/linkding.yaml
Normal file
50
swarm/linkding.yaml
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
services:
|
||||
linkding:
|
||||
image: sissbruecker/linkding:1.13.0
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
environment:
|
||||
TZ: America/Chicago
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
|
||||
#user: "1001:998"
|
||||
|
||||
ports:
|
||||
- target: 9090
|
||||
published: 9090
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/linkding/data:/etc/linkding/data:rw
|
||||
|
||||
deploy:
|
||||
labels:
|
||||
# Homepage
|
||||
homepage.group: "PNCHarris Apps"
|
||||
homepage.name: "Linkding"
|
||||
homepage.icon: "linkding.png"
|
||||
homepage.href: "https://link.netgrimoire.com"
|
||||
homepage.description: "Self-hosted bookmark manager"
|
||||
|
||||
# Kuma
|
||||
kuma.linkding.http.name: "Linkding"
|
||||
kuma.linkding.http.url: "http://linkding:9090"
|
||||
|
||||
# Caddy
|
||||
caddy: "link.netgrimoire.com"
|
||||
# caddy.import: "authentik"
|
||||
caddy.reverse_proxy: "linkding:9090"
|
||||
|
||||
# Diun (image update monitoring)
|
||||
diun.enable: "true"
|
||||
|
||||
# placement:
|
||||
# constraints:
|
||||
# - node.labels.general == true
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
99
swarm/lldap.yaml
Normal file
99
swarm/lldap.yaml
Normal file
|
|
@ -0,0 +1,99 @@
|
|||
version: "3.9"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
|
||||
services:
|
||||
lldap-db:
|
||||
image: postgres:16
|
||||
networks:
|
||||
- netgrimoire
|
||||
#user: "1001:998"
|
||||
environment:
|
||||
- TZ=America/Chicago
|
||||
- PUID=1001
|
||||
- PGID=998
|
||||
- POSTGRES_DB=lldap
|
||||
- POSTGRES_USER=lldap
|
||||
- POSTGRES_PASSWORD=F@lcon13
|
||||
volumes:
|
||||
- /DockerVol/lldap-db/data:/var/lib/postgresql/data
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker4
|
||||
labels:
|
||||
- diun.enable=true
|
||||
restart: unless-stopped
|
||||
|
||||
lldap:
|
||||
image: lldap/lldap:stable
|
||||
networks:
|
||||
- netgrimoire
|
||||
#user: "1001:998"
|
||||
environment:
|
||||
- TZ=America/Chicago
|
||||
- PUID=1001
|
||||
- PGID=998
|
||||
|
||||
# Base DN
|
||||
- LLDAP_LDAP_BASE_DN=dc=netgrimoire,dc=com
|
||||
- LLDAP_DOMAIN=netgrimoire.com
|
||||
|
||||
# User/admin bind password (you will replace)
|
||||
- LLDAP_LDAP_USER_PASS=F@lcon13
|
||||
|
||||
# Generated secrets (leave as-is unless you want to rotate)
|
||||
- LLDAP_JWT_SECRET=lougu9MjGLmLp1SPDkkCBsQm-MdHpGGuOn-wW7FRWRdzglIn1nJRyBQkQ7HDcDh0
|
||||
- LLDAP_KEY_SEED=Kss_fNlMBH3XRo9aYHo_pI9gWQecQ1v3-yYzULckoWUm-iKIkV2DMygPYyKaN-u_
|
||||
|
||||
# Postgres
|
||||
- LLDAP_DATABASE_URL=postgres://lldap:F@lcon13@lldap-db:5432/lldap
|
||||
|
||||
volumes:
|
||||
- /DockerVol/lldap/data:/data
|
||||
|
||||
# Expose to LAN via swarm routing mesh (ingress)
|
||||
ports:
|
||||
- target: 17170
|
||||
published: 17170
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
- target: 3890
|
||||
published: 3890
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
# If/when you enable LDAPS:
|
||||
# - target: 6360
|
||||
# published: 6360
|
||||
# protocol: tcp
|
||||
# mode: ingress
|
||||
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker4
|
||||
labels:
|
||||
# Homepage
|
||||
- homepage.group=Management
|
||||
- homepage.name=LLDAP
|
||||
- homepage.icon=ldap.png
|
||||
- homepage.href=https://ldap.netgrimoire.com
|
||||
- homepage.description=Lightweight LDAP directory
|
||||
|
||||
# Kuma
|
||||
- kuma.lldap.http.name=LLDAP
|
||||
- kuma.lldap.http.url=http://lldap:17170
|
||||
|
||||
# Caddy / Authentik (protect UI)
|
||||
- caddy=ldap.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy=lldap:17170
|
||||
|
||||
# Diun
|
||||
- diun.enable=true
|
||||
|
||||
depends_on:
|
||||
- lldap-db
|
||||
restart: unless-stopped
|
||||
123
swarm/logging.yaml
Normal file
123
swarm/logging.yaml
Normal file
|
|
@ -0,0 +1,123 @@
|
|||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
loki:
|
||||
image: grafana/loki:2.9.3
|
||||
command: -config.file=/etc/loki/loki-config.yaml
|
||||
user: "1001:998"
|
||||
ports:
|
||||
- "3100:3100"
|
||||
environment:
|
||||
- TZ=America/Chicago
|
||||
volumes:
|
||||
- loki_config:/etc/loki
|
||||
- loki_data:/loki
|
||||
- loki_wal:/wal
|
||||
networks:
|
||||
- netgrimoire
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=Monitoring
|
||||
- homepage.name=Loki
|
||||
- homepage.icon=loki.png
|
||||
- homepage.href=https://loki.netgrimoire.com
|
||||
- homepage.description=Log store
|
||||
- caddy=loki.netgrimoire.com
|
||||
- caddy.reverse_proxy={{upstreams 3100}}
|
||||
- diun.enable=true
|
||||
- kuma.monitor=true
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker4
|
||||
|
||||
|
||||
promtail:
|
||||
image: grafana/promtail:2.9.3
|
||||
container_name: promtail
|
||||
user: "1001:998"
|
||||
command: -config.file=/etc/promtail/promtail-config.yaml
|
||||
environment:
|
||||
- TZ=America/Chicago
|
||||
volumes:
|
||||
- /var/log:/var/log:ro
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /data/nfs/Baxter/Docker/promtail:/etc/promtail
|
||||
networks:
|
||||
- netgrimoire
|
||||
deploy:
|
||||
mode: global
|
||||
labels:
|
||||
- diun.enable=true
|
||||
- kuma.monitor=false
|
||||
|
||||
|
||||
|
||||
grafana:
|
||||
image: grafana/grafana:10.4.2
|
||||
user: "1001:998"
|
||||
environment:
|
||||
- TZ=America/Chicago
|
||||
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
- GF_USERS_DEFAULT_THEME=dark
|
||||
volumes:
|
||||
- grafana_data:/var/lib/grafana
|
||||
ports:
|
||||
- "3000:3000"
|
||||
networks:
|
||||
- netgrimoire
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=Monitoring
|
||||
- homepage.name=Grafana
|
||||
- homepage.icon=grafana.png
|
||||
- homepage.href=https://grafana.netgrimoire.com
|
||||
- homepage.description=Metrics Dashboard
|
||||
- caddy=grafana.netgrimoire.com
|
||||
- caddy.reverse_proxy={{upstreams 3000}}
|
||||
- diun.enable=true
|
||||
- kuma.monitor=true
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker4
|
||||
|
||||
volumes:
|
||||
loki_config:
|
||||
driver: local
|
||||
driver_opts:
|
||||
type: none
|
||||
o: bind
|
||||
device: /DockerVol/Loki/config
|
||||
|
||||
loki_data:
|
||||
driver: local
|
||||
driver_opts:
|
||||
type: none
|
||||
o: bind
|
||||
device: /DockerVol/Loki/data
|
||||
|
||||
loki_wal:
|
||||
driver: local
|
||||
driver_opts:
|
||||
type: none
|
||||
o: bind
|
||||
device: /DockerVol/Loki/wal
|
||||
|
||||
promtail_config:
|
||||
driver: local
|
||||
driver_opts:
|
||||
type: none
|
||||
o: bind
|
||||
device: /DockerVol/Promtail/config
|
||||
|
||||
grafana_data:
|
||||
driver: local
|
||||
driver_opts:
|
||||
type: none
|
||||
o: bind
|
||||
device: /DockerVol/Grafana/data
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
68
swarm/mealie.yaml
Executable file
68
swarm/mealie.yaml
Executable file
|
|
@ -0,0 +1,68 @@
|
|||
services:
|
||||
recipe:
|
||||
image: ghcr.io/mealie-recipes/mealie:latest
|
||||
container_name: mealie
|
||||
restart: always
|
||||
ports:
|
||||
- "9925:9000"
|
||||
|
||||
environment:
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
TZ: "America/Chicago"
|
||||
|
||||
MAX_WORKERS: "1"
|
||||
WEB_CONCURRENCY: "1"
|
||||
BASE_URL: "https://recipe.netgrimoire.com"
|
||||
|
||||
# Disable local auth / signup (SSO-only)
|
||||
ALLOW_PASSWORD_LOGIN: "false"
|
||||
ALLOW_SIGNUP: "false"
|
||||
|
||||
# OIDC (authentik)
|
||||
OIDC_AUTH_ENABLED: "true"
|
||||
OIDC_PROVIDER_NAME: "authentik"
|
||||
OIDC_CONFIGURATION_URL: "https://auth.netgrimoire.com/application/o/mealie/.well-known/openid-configuration"
|
||||
OIDC_CLIENT_ID: "tidMeWe3Ak30zRzcmC5vwoCqAIHXQsaVwJEp44Mz"
|
||||
OIDC_CLIENT_SECRET: "OD0CLgELUEWGoZ8IUnduGbxhyhh4vgjMBxBAjyopNOkATWIEWSYeWRDdfY6ulX2Fj7zuUp9dpgzjoFatNviLD8E5Cv2815eDrZxH9gNb52Taur0LzqBPk25yLCvsnjXK"
|
||||
OIDC_AUTO_REDIRECT: "true"
|
||||
OIDC_REMEMBER_ME: "true"
|
||||
|
||||
# User provisioning and claim mapping
|
||||
OIDC_SIGNUP_ENABLED: "true"
|
||||
OIDC_USER_CLAIM: "sub"
|
||||
OIDC_NAME_CLAIM: "preferred_username"
|
||||
OIDC_GROUPS_CLAIM: "groups"
|
||||
OIDC_SCOPES_OVERRIDE: "openid profile email"
|
||||
|
||||
# Group-based role mapping
|
||||
OIDC_USER_GROUP: "mealie-users"
|
||||
OIDC_ADMIN_GROUP: "mealie-admins"
|
||||
|
||||
volumes:
|
||||
- /DockerVol/mealie:/app/data
|
||||
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker4
|
||||
labels:
|
||||
- homepage.group=PNCHarris Apps
|
||||
- homepage.name=Mealie
|
||||
- homepage.icon=mealie.png
|
||||
- homepage.href=https://recipe.netgrimoire.com
|
||||
- homepage.description=Recipe Manager
|
||||
|
||||
- kuma.recipe.http.name="Mealie"
|
||||
- kuma.recipe.http.url=http://recipe:9000
|
||||
|
||||
- caddy=recipe.netgrimoire.com
|
||||
#- caddy.import=authentik
|
||||
- caddy.reverse_proxy=recipe:9000
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
29
swarm/ntfy.yaml
Normal file
29
swarm/ntfy.yaml
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
|
||||
services:
|
||||
ntfy:
|
||||
image: binwiederhier/ntfy
|
||||
command: serve
|
||||
environment:
|
||||
TZ: America/Chicago
|
||||
user: "1001:998"
|
||||
networks:
|
||||
- netgrimoire
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/ntfy/cache:/var/cache/ntfy
|
||||
- /data/nfs/Baxter/Docker/ntfy/etc:/etc/ntfy
|
||||
ports:
|
||||
- 81:80
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: any
|
||||
delay: 10s
|
||||
max_attempts: 5
|
||||
window: 60s
|
||||
labels:
|
||||
caddy: "ntfy.netgrimoire.com"
|
||||
#caddy.import: "authentik"
|
||||
caddy.reverse_proxy: "{{upstreams 80}}"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
35
swarm/nzbget.yaml
Executable file
35
swarm/nzbget.yaml
Executable file
|
|
@ -0,0 +1,35 @@
|
|||
services:
|
||||
nzbget:
|
||||
image: linuxserver/nzbget:latest
|
||||
container_name: nzbget
|
||||
hostname: nzbget
|
||||
networks:
|
||||
- netgrimoire
|
||||
ports:
|
||||
- "6789:6789"
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/nzbget/config:/config
|
||||
- /data/nfs/Baxter/Green/:/data:shared
|
||||
environment:
|
||||
- TZ=America/Chicago
|
||||
- PGID=1001
|
||||
- PUID=998
|
||||
restart: unless-stopped
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=Downloaders
|
||||
- homepage.name=Nzbget
|
||||
- homepage.icon=nzbget.png
|
||||
- homepage.href=http://nzbget.netgrimoire.com
|
||||
- homepage.description=Usenet Downloader
|
||||
- kuma.nzbget.http.name="nzbget"
|
||||
- kuma.nzbget.http.url=http://nzbget:6789
|
||||
- caddy=nzbget.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 6789}}"
|
||||
|
||||
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
84
swarm/phpipam.yaml
Executable file
84
swarm/phpipam.yaml
Executable file
|
|
@ -0,0 +1,84 @@
|
|||
|
||||
services:
|
||||
|
||||
phpipam-web:
|
||||
image: phpipam/phpipam-www:latest
|
||||
container_name: phpipam-web
|
||||
hostname: phpipam-web
|
||||
networks:
|
||||
- netgrimoire
|
||||
ports:
|
||||
- "8094:80"
|
||||
environment:
|
||||
- TZ=America/Chicago
|
||||
- IPAM_DATABASE_HOST=phpipam-mariadb
|
||||
- IPAM_DATABASE_PASS=ipam
|
||||
- IPAM_DATABASE_USER=ipam
|
||||
- IPAM_DATABASE_NAME=ipam
|
||||
volumes:
|
||||
- /DockerVol/phpipam/phpipam-logo:/phpipam/css/images/logo
|
||||
deploy:
|
||||
labels:
|
||||
- diun.enable=true
|
||||
- homepage.group=Management
|
||||
- homepage.name=phpIPAM
|
||||
- homepage.icon=ipam.png
|
||||
- homepage.href=http://ipam.netgrimoire.com
|
||||
- homepage.description=IP Address Management
|
||||
- caddy=ipam.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 80}}"
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker3
|
||||
restart_policy:
|
||||
condition: any
|
||||
|
||||
phpipam-cron:
|
||||
image: phpipam/phpipam-cron:latest
|
||||
container_name: phpipam-cron
|
||||
hostname: phpipam-cron
|
||||
networks:
|
||||
- netgrimoire
|
||||
environment:
|
||||
- TZ=America/Chicago
|
||||
- IPAM_DATABASE_HOST=phpipam-mariadb
|
||||
- IPAM_DATABASE_PASS=ipam
|
||||
- SCAN_INTERVAL=1h
|
||||
volumes:
|
||||
- /DockerVol/phpipam/phpipam-logo:/phpipam/css/images/logo
|
||||
deploy:
|
||||
labels:
|
||||
- diun.enable=true
|
||||
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker3
|
||||
restart_policy:
|
||||
condition: any
|
||||
|
||||
phpipam-mariadb:
|
||||
image: mariadb:latest
|
||||
container_name: phpipam-mariadb
|
||||
hostname: phpipam-mariadb
|
||||
networks:
|
||||
- netgrimoire
|
||||
volumes:
|
||||
- /DockerVol/phpipam/mariadb:/var/lib/mysql
|
||||
environment:
|
||||
- TZ=America/Chicago
|
||||
- MYSQL_ROOT_PASSWORD=ipam
|
||||
-
|
||||
deploy:
|
||||
labels:
|
||||
- diun.enable=true
|
||||
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker3
|
||||
restart_policy:
|
||||
condition: any
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
53
swarm/pinchflat.yaml
Normal file
53
swarm/pinchflat.yaml
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
services:
|
||||
pinchflat:
|
||||
image: ghcr.io/kieraneglin/pinchflat:latest
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
environment:
|
||||
# Set the timezone to your local timezone
|
||||
TZ: America/Chicago
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
|
||||
# Match your standard UID:GID execution model
|
||||
#user: "1001:998"
|
||||
|
||||
ports:
|
||||
- target: 8945
|
||||
published: 8945
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
|
||||
volumes:
|
||||
- /DockerVol/pinchflat/config:/config
|
||||
- /data/nfs/Baxter/Data/media/other/pinchflat:/downloads
|
||||
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == nas
|
||||
|
||||
labels:
|
||||
# Homepage
|
||||
homepage.group: "Downloaders"
|
||||
homepage.name: "PinchFlat"
|
||||
homepage.icon: "pinchflat.png"
|
||||
homepage.href: "https://pinchflat.netgrimoire.com"
|
||||
homepage.description: "YouTube Library"
|
||||
|
||||
# Kuma
|
||||
kuma.pf.http.name: "PinchFlat"
|
||||
kuma.pf.http.url: "http://pinchflat:8945"
|
||||
|
||||
# Caddy
|
||||
caddy: "pinchflat.netgrimoire.com"
|
||||
caddy.import: "authentik"
|
||||
caddy.reverse_proxy: "pinchflat:8945"
|
||||
|
||||
# Diun (image update monitoring)
|
||||
diun.enable: "true"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
45
swarm/portainer-agent-stack.yml
Executable file
45
swarm/portainer-agent-stack.yml
Executable file
|
|
@ -0,0 +1,45 @@
|
|||
services:
|
||||
agent:
|
||||
image: portainer/agent:2.21.5
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/lib/docker/volumes:/var/lib/docker/volumes
|
||||
networks:
|
||||
- netgrimoire
|
||||
deploy:
|
||||
mode: global
|
||||
# placement:
|
||||
# constraints: [node.platform.os == linux]
|
||||
|
||||
portainer:
|
||||
image: portainer/portainer-ce:2.21.5
|
||||
command: -H tcp://tasks.agent:9001 --tlsskipverify
|
||||
ports:
|
||||
- "7443:9443"
|
||||
- "7000:9000"
|
||||
- "7100:8000"
|
||||
volumes:
|
||||
- /DockerVol/portainer:/data
|
||||
networks:
|
||||
- netgrimoire
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=Management
|
||||
- homepage.name=Portainer
|
||||
- homepage.icon=portainer.png
|
||||
- homepage.href=http://docker.netgrimoire.com
|
||||
- homepage.description=Docker Manager
|
||||
- kuma.prt.http.name="Portainer"ping docker
|
||||
- kuma.prt.http.url=http://portainer:9000
|
||||
# - caddy=docker.netgrimoire.com
|
||||
# - caddy.reverse_proxy="{{upstreams 9000}}"
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == nas
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
|
||||
|
||||
|
||||
59
swarm/profilarr.yaml
Normal file
59
swarm/profilarr.yaml
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
services:
|
||||
profilarr:
|
||||
image: santiagosayshey/profilarr:latest
|
||||
container_name: profilarr
|
||||
|
||||
#user: "1001:998"
|
||||
environment:
|
||||
- PUID=1001
|
||||
- PGID=998
|
||||
- TZ=America/Chicago
|
||||
|
||||
ports:
|
||||
- "6868:6868"
|
||||
|
||||
volumes:
|
||||
- /DockerVol/profilarr:/config
|
||||
|
||||
restart: unless-stopped
|
||||
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker4
|
||||
|
||||
labels:
|
||||
# -------------------------
|
||||
# Diun (image update notify)
|
||||
# -------------------------
|
||||
diun.enable: "true"
|
||||
|
||||
# -------------------------
|
||||
# Uptime Kuma (your standard labels)
|
||||
# -------------------------
|
||||
kuma.profilarr.http.name: "profilarr"
|
||||
kuma.profilarr.http.url: "http://profilarr.netgrimoire.com"
|
||||
|
||||
# -------------------------
|
||||
# Homepage (no homepage.weight per your preference)
|
||||
# -------------------------
|
||||
homepage.group: "Jolly Roger"
|
||||
homepage.name: "Profilarr"
|
||||
homepage.description: "Profilarr"
|
||||
homepage.href: "https://profilarr.netgrimoire.com"
|
||||
homepage.icon: "profilarr"
|
||||
|
||||
|
||||
# -------------------------
|
||||
# Caddy (your domain + authentik import)
|
||||
# -------------------------
|
||||
caddy: "profilarr.netgrimoire.com"
|
||||
caddy.reverse_proxy: "{{upstreams 6868}}"
|
||||
caddy.import: "authentik"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
42
swarm/radarr.yaml
Normal file
42
swarm/radarr.yaml
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
|
||||
services:
|
||||
radarr:
|
||||
image: lscr.io/linuxserver/radarr:latest
|
||||
container_name: radarr
|
||||
hostname: radarr
|
||||
user: "1001:998"
|
||||
environment:
|
||||
- TZ=America/Chicago
|
||||
- PGID=998
|
||||
- PUID=1001
|
||||
volumes:
|
||||
- /DockerVol/Radarr:/config
|
||||
- /data/nfs/Baxter/Data/:/data:shared
|
||||
ports:
|
||||
- 7878:7878
|
||||
networks:
|
||||
- netgrimoire
|
||||
restart: unless-stopped
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker4
|
||||
labels:
|
||||
- homepage.group=Jolly Roger
|
||||
- homepage.name=Radarr
|
||||
- homepage.icon=radarr.png
|
||||
- homepage.href=https://radarr.netgrimoire.com
|
||||
- homepage.description=Movie Library
|
||||
- homepage.widget.type=radarr
|
||||
- homepage.widget.url=http://radarr:7878
|
||||
- homepage.widget.key=1fb7e413a68e459cb08ab33b100a444a
|
||||
- kuma.radarr.http.name=Radarr
|
||||
- kuma.radarr.http.url=http://radarr:7878
|
||||
- caddy=radarr.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy={{upstreams 7878}}
|
||||
- diun.enable=true
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
40
swarm/readarr.yaml
Normal file
40
swarm/readarr.yaml
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
services:
|
||||
readarr:
|
||||
image: blampe/rreading-glasses:latest
|
||||
container_name: readarr
|
||||
hostname: readarr
|
||||
environment:
|
||||
- PUID=1001
|
||||
- PGID=998
|
||||
- TZ=America/Chicago
|
||||
networks:
|
||||
- netgrimoire
|
||||
volumes:
|
||||
- /DockerVol/readarr/config:/config
|
||||
- /data/nfs/Baxter/Data:/data:shared
|
||||
ports:
|
||||
- 8787:8787
|
||||
restart: unless-stopped
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker4
|
||||
labels:
|
||||
- homepage.group=Jolly Roger
|
||||
- homepage.name=Readarr
|
||||
- homepage.icon=readarr.png
|
||||
- homepage.href=http://readarr.netgrimoire.com
|
||||
- homepage.description=Ebook Library
|
||||
- homepage.widget.type=readarr
|
||||
- homepage.widget.url=http://readarr:8787
|
||||
- homepage.widget.key=78954fcf696e4da9b2e9391a54e87478
|
||||
- kuma.readarr.http.name="Readarr"
|
||||
- kuma.readarr.http.url=http://readarr:8787
|
||||
- caddy=readarr.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 8787}}"
|
||||
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
53
swarm/recyclarr.yaml
Normal file
53
swarm/recyclarr.yaml
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
version: "3.8"
|
||||
|
||||
services:
|
||||
recyclarr:
|
||||
image: ghcr.io/recyclarr/recyclarr:latest
|
||||
user: "1001:998"
|
||||
environment:
|
||||
- PUID=1001
|
||||
- PGID=998
|
||||
- TZ=America/Chicago
|
||||
|
||||
# Run on an interval (simple + reliable)
|
||||
# Adjust to taste: 6h, 12h, 24h, etc.
|
||||
- RECYCLARR_RUN_MODE=interval
|
||||
- RECYCLARR_INTERVAL=12h
|
||||
|
||||
# Optional: if you want extra logging
|
||||
# - RECYCLARR_LOG_LEVEL=Information
|
||||
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/recyclarr:/config
|
||||
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker4
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
|
||||
restart_policy:
|
||||
condition: any
|
||||
delay: 10s
|
||||
max_attempts: 0
|
||||
window: 30s
|
||||
|
||||
update_config:
|
||||
order: start-first
|
||||
parallelism: 1
|
||||
delay: 10s
|
||||
|
||||
labels:
|
||||
# -------------------------
|
||||
# Diun (image update notify)
|
||||
# -------------------------
|
||||
diun.enable: "true"
|
||||
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
47
swarm/roundcube.yaml
Executable file
47
swarm/roundcube.yaml
Executable file
|
|
@ -0,0 +1,47 @@
|
|||
services:
|
||||
|
||||
roundcube:
|
||||
image: roundcube/roundcubemail:latest
|
||||
container_name: roundcube
|
||||
hostname: roundcube
|
||||
networks:
|
||||
- netgrimoire
|
||||
# restart: unless-stopped
|
||||
# depends_on:
|
||||
# - roundcubedb
|
||||
# links:
|
||||
# - roundcubedb
|
||||
volumes:
|
||||
- /DockerVol/roundcube/www:/var/www/html
|
||||
ports:
|
||||
- 9019:80
|
||||
environment:
|
||||
- ROUNDCUBEMAIL_DB_TYPE=mysql
|
||||
- ROUNDCUBEMAIL_DB_HOST=mysql-mailcow
|
||||
- ROUNDCUBEMAIL_DB_USER=roundcube
|
||||
- ROUNDCUBEMAIL_DB_PASSWORD=R)UNdcUbe!
|
||||
- ROUNDCUBEMAIL_DB_NAME=roundcubemail
|
||||
- ROUNDCUBEMAIL_SKIN=elastic
|
||||
- ROUNDCUBEMAIL_DEFAULT_HOST=tls://hermes.netgrimoire.com
|
||||
- ROUNDCUBEMAIL_SMTP_SERVER=tls://hermes.netgrimoire.com
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=E-Mail
|
||||
- homepage.name=Roundcube
|
||||
- homepage.icon=roundcube.png
|
||||
- homepage.href=http://webmail.netgrimoire.com
|
||||
- homepage.description=E-mail client
|
||||
- kuma.rc.http.name="Mealie"
|
||||
- kuma.mrc.http.url=http://roundcube:80
|
||||
- caddy=webmail.netgrimoire.com, webmail.gnarlypandaproductions.com, webmail.pncharris.com, webmail.pncfishandmore.com, webmail.pncharrisenterprises.com, webmail.florosafd.org
|
||||
# - caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 80}}"
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker4
|
||||
|
||||
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
39
swarm/sabnzbd.yaml
Executable file
39
swarm/sabnzbd.yaml
Executable file
|
|
@ -0,0 +1,39 @@
|
|||
services:
|
||||
Sabnzbd:
|
||||
image: linuxserver/sabnzbd
|
||||
container_name: sabnzbd
|
||||
hostname: sabnzbd
|
||||
networks:
|
||||
- netgrimoire
|
||||
ports:
|
||||
- 8082:8080
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Data/:/data:shared
|
||||
- /data/nfs/Baxter/Docker/Sabnzbd:/config
|
||||
environment:
|
||||
- TZ=America/Chicago
|
||||
- PGID=998
|
||||
- PUID=1001
|
||||
restart: unless-stopped
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=Jolly Roger
|
||||
- homepage.name=Sabnzbd
|
||||
- homepage.icon=sabnzbd.png
|
||||
- homepage.href=http://sabnzbd.netgrimoire.com
|
||||
- homepage.description=Usenet Downloader
|
||||
- homepage.widget.type=sabnzbd
|
||||
- homepage.widget.url=http://sabnzbd:8080
|
||||
- homepage.widget.key=ac2fcd48264c0a113ef33ab593958a52
|
||||
- kuma.sab.http.name="Sabnzbd"
|
||||
- kuma.sab.http.url=http://sabnzbd:8080
|
||||
- caddy=sabnzbd.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 8080}}"
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.general == true
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
155
swarm/scanopy.yaml
Normal file
155
swarm/scanopy.yaml
Normal file
|
|
@ -0,0 +1,155 @@
|
|||
name: scanopy
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:17-alpine
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
environment:
|
||||
TZ: America/Chicago
|
||||
POSTGRES_DB: scanopy
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: "sc@nopY"
|
||||
|
||||
# NOTE: Postgres commonly expects to run as its own internal user.
|
||||
# If you force user: "1001:998" here it may break permissions.
|
||||
# Keep it as-is unless you *know* your Postgres image/data dir supports arbitrary UID.
|
||||
volumes:
|
||||
- /DockerVol/scanopy/postgres:/var/lib/postgresql/data
|
||||
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
|
||||
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker4
|
||||
labels:
|
||||
diun.enable: "true"
|
||||
|
||||
server:
|
||||
image: ghcr.io/scanopy/scanopy/server:latest
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
ports:
|
||||
- target: 60072
|
||||
published: 60072
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
|
||||
environment:
|
||||
TZ: America/Chicago
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
|
||||
SCANOPY_LOG_LEVEL: "info"
|
||||
SCANOPY_SERVER_PORT: "60072"
|
||||
|
||||
SCANOPY_DATABASE_URL: "postgresql://postgres:sc@nopY@postgres:5432/scanopy"
|
||||
SCANOPY_WEB_EXTERNAL_PATH: "/app/static"
|
||||
SCANOPY_PUBLIC_URL: "https://scn.netgrimoire.com"
|
||||
|
||||
# NOTE: The “integrated daemon URL” in the upstream compose assumes Docker bridge gateway (172.17.0.1).
|
||||
# In Swarm + host-network daemon, this is usually NOT what you want. Leave it unset for now.
|
||||
|
||||
user: "1001:998"
|
||||
|
||||
volumes:
|
||||
- /DockerVol/scanopy/server-data:/data
|
||||
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -f http://localhost:60072/api/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
logging:
|
||||
driver: "gelf"
|
||||
options:
|
||||
gelf-address: "udp://192.168.5.17:12201"
|
||||
tag: "scanopy-server"
|
||||
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker4
|
||||
labels:
|
||||
# Homepage
|
||||
homepage.group: "Monitoring"
|
||||
homepage.name: "Scanopy"
|
||||
homepage.icon: "scanopy.png"
|
||||
homepage.href: "https://scan.netgrimoire.com"
|
||||
homepage.description: "Network discovery & topology"
|
||||
|
||||
# Kuma
|
||||
kuma.scanopy.http.name: "Scanopy"
|
||||
kuma.scanopy.http.url: "http://server:60072"
|
||||
|
||||
# Caddy
|
||||
caddy: "scn.netgrimoire.com"
|
||||
caddy.import: "authentik"
|
||||
caddy.reverse_proxy: "{{upstreams 60072}}"
|
||||
|
||||
# Diun
|
||||
diun.enable: "true"
|
||||
|
||||
daemon:
|
||||
image: ghcr.io/scanopy/scanopy/daemon:latest
|
||||
|
||||
# Host networking + privileged is typical for discovery tools to see the real LAN.
|
||||
network_mode: host
|
||||
privileged: true
|
||||
|
||||
environment:
|
||||
TZ: America/Chicago
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
|
||||
SCANOPY_LOG_LEVEL: "info"
|
||||
SCANOPY_DAEMON_PORT: "60073"
|
||||
SCANOPY_PORT: "60073"
|
||||
SCANOPY_BIND_ADDRESS: "0.0.0.0"
|
||||
SCANOPY_NAME: "scanopy-daemon-docker4"
|
||||
SCANOPY_HEARTBEAT_INTERVAL: "30"
|
||||
SCANOPY_MODE: "Push"
|
||||
|
||||
# Server is pinned to docker4 and publishes 60072; daemon on host net can hit it via localhost
|
||||
SCANOPY_SERVER_URL: "http://127.0.0.1:60072"
|
||||
|
||||
user: "1001:998"
|
||||
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -f http://localhost:60073/api/health || exit 1"]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 15
|
||||
|
||||
volumes:
|
||||
- /DockerVol/scanopy/daemon-config:/root/.config/daemon
|
||||
# Comment out the line below to disable docker discovery
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
logging:
|
||||
driver: "gelf"
|
||||
options:
|
||||
gelf-address: "udp://192.168.5.17:12201"
|
||||
tag: "scanopy-daemon"
|
||||
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker4
|
||||
labels:
|
||||
diun.enable: "true"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
44
swarm/sonarr.yaml
Normal file
44
swarm/sonarr.yaml
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
version: '3.8'
|
||||
|
||||
services:
|
||||
sonarr:
|
||||
image: linuxserver/sonarr:latest
|
||||
container_name: sonarr
|
||||
hostname: sonarr
|
||||
user: "1001:998"
|
||||
environment:
|
||||
- TZ=America/Chicago
|
||||
- PGID=998
|
||||
- PUID=1001
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /DockerVol/Sonarr:/config
|
||||
- /data/nfs/Baxter/Data/:/data:shared
|
||||
ports:
|
||||
- 8989:8989
|
||||
networks:
|
||||
- netgrimoire
|
||||
restart: unless-stopped
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker4
|
||||
labels:
|
||||
- homepage.group=Jolly Roger
|
||||
- homepage.name=Sonarr
|
||||
- homepage.icon=sonarr.png
|
||||
- homepage.href=http://sonarr.netgrimoire.com
|
||||
- homepage.description=Television Library
|
||||
- homepage.widget.type=sonarr
|
||||
- homepage.widget.url=http://sonarr:8989
|
||||
- homepage.widget.key=62f710714d604bc8a1e6df9f2ad6a07c
|
||||
- kuma.sonarr.http.name=Sonarr
|
||||
- kuma.sonarr.http.url=http://sonarr:8989
|
||||
- caddy=sonarr.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy={{upstreams 8989}}
|
||||
- diun.enable=true
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
134
swarm/stack/caddy/Caddyfile
Executable file
134
swarm/stack/caddy/Caddyfile
Executable file
|
|
@ -0,0 +1,134 @@
|
|||
(authentik) {
|
||||
route /outpost.goauthentik.io/* {
|
||||
reverse_proxy http://authentik:9000
|
||||
}
|
||||
|
||||
forward_auth http://authentik:9000 {
|
||||
uri /outpost.goauthentik.io/auth/caddy
|
||||
header_up X-Forwarded-Host {http.request.host}
|
||||
header_up X-Forwarded-Proto {http.request.scheme}
|
||||
header_up X-Forwarded-URI {http.request.uri}
|
||||
copy_headers X-Authentik-Username X-Authentik-Groups X-Authentik-Email X-Authentik-Name X-Authentik-Uid X-Authentik-Jwt X-Authentik-Meta-Jwks X-Authentik-Meta-Outpost X-Authentik-Meta-Provider X-Authentik-Meta-App X-Authentik-Meta-Version
|
||||
}
|
||||
}
|
||||
|
||||
(authelia) {
|
||||
forward_auth http://authelia:9091 {
|
||||
uri /api/verify?rd=https://login.wasted-bandwidth.net/
|
||||
copy_headers Remote-User Remote-Groups Remote-Email Remote-Name
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
(email-proxy) {
|
||||
redir https://mail.netgrimoire.com/sogo 301
|
||||
}
|
||||
|
||||
(mailcow-proxy) {
|
||||
reverse_proxy nginx-mailcow:80
|
||||
}
|
||||
|
||||
|
||||
cloud.netgrimoire.com {
|
||||
reverse_proxy http://nextcloud-aio-apache:11000
|
||||
}
|
||||
|
||||
log.netgrimoire.com {
|
||||
reverse_proxy http://graylog:9000
|
||||
}
|
||||
|
||||
win.netgrimoire.com {
|
||||
reverse_proxy http://192.168.5.12:8006
|
||||
}
|
||||
|
||||
|
||||
#jellyfin.netgrimoire.com {
|
||||
# reverse_proxy http://jellyfin:8096
|
||||
#}
|
||||
|
||||
docker.netgrimoire.com {
|
||||
reverse_proxy http://portainer:9000
|
||||
}
|
||||
|
||||
immich.netgrimoire.com {
|
||||
reverse_proxy http://192.168.5.12:2283
|
||||
}
|
||||
|
||||
npm.netgrimoire.com {
|
||||
reverse_proxy http://librenms:8000
|
||||
}
|
||||
|
||||
|
||||
dozzle.netgrimoire.com {
|
||||
import authentik
|
||||
reverse_proxy http://192.168.4.72:8043
|
||||
}
|
||||
|
||||
|
||||
dns.netgrimoire.com {
|
||||
import authentik
|
||||
reverse_proxy http://192.168.5.7:5380
|
||||
}
|
||||
|
||||
webtop.netgrimoire.com {
|
||||
import authentik
|
||||
reverse_proxy http://webtop:3000
|
||||
}
|
||||
|
||||
|
||||
jackett.netgrimoire.com {
|
||||
import authentik
|
||||
reverse_proxy http://gluetun:9117
|
||||
}
|
||||
|
||||
transmission.netgrimoire.com {
|
||||
import authentik
|
||||
reverse_proxy http://gluetun:9091
|
||||
}
|
||||
|
||||
stash.wasted-bandwidth.net {
|
||||
import authelia
|
||||
reverse_proxy http://stash:9999
|
||||
}
|
||||
|
||||
namer.wasted-bandwidth.net {
|
||||
import authelia
|
||||
reverse_proxy http://namer:6980
|
||||
}
|
||||
|
||||
fish.pncharris.com {
|
||||
reverse_proxy http://web
|
||||
}
|
||||
|
||||
www.wasted-bandwidth.net {
|
||||
reverse_proxy http://web
|
||||
}
|
||||
|
||||
scrutiny.netgrimoire.com {
|
||||
import authentik
|
||||
reverse_proxy http://192.168.5.12:8081
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
mail.netgrimoire.com, autodiscover.netgrimoire.com, autoconfig.netgrimoire.com, \
|
||||
mail.wasted-bandwidth.net, autodiscover.wasted-bandwidth.net, autoconfig.wasted-bandwidth.net, \
|
||||
mail.gnarlypandaproductions.com, autodiscover.gnarlypandaproductions.com, autoconfig.gnarlypandaproductions.com, \
|
||||
mail.pncfishandmore.com, autodiscover.pncfishandmore.com, autoconfig.pncfishandmore.com, \
|
||||
mail.pncharrisenterprises.com, autodiscover.pncharrisenterprises.com, autoconfig.pncharrisenterprises.com, \
|
||||
mail.pncharris.com, autodiscover.pncharris.com, autoconfig.pncharris.com, \
|
||||
mail.florosafd.org, autodiscover.florosafd.org, autoconfig.florosafd.org {
|
||||
import mailcow-proxy
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
4
swarm/stack/caddy/caddy.sh
Executable file
4
swarm/stack/caddy/caddy.sh
Executable file
|
|
@ -0,0 +1,4 @@
|
|||
# /bin/sh
|
||||
docker service rm caddy_caddy
|
||||
docker config rm caddy_caddy-basic-content
|
||||
docker stack deploy -c caddy.yaml caddy
|
||||
44
swarm/stack/caddy/caddy.yaml
Executable file
44
swarm/stack/caddy/caddy.yaml
Executable file
|
|
@ -0,0 +1,44 @@
|
|||
configs:
|
||||
caddy-basic-content:
|
||||
file: ./Caddyfile
|
||||
labels:
|
||||
caddy:
|
||||
|
||||
services:
|
||||
caddy:
|
||||
image: lucaslorentz/caddy-docker-proxy:ci-alpine
|
||||
#image: ghcr.io/serfriz/caddy-crowdsec:latest
|
||||
#image: caddy-crowdsec
|
||||
#image: git.netgrimoire.com/traveler/caddy-crowdsec
|
||||
ports:
|
||||
- 8900:80
|
||||
- 443:443
|
||||
environment:
|
||||
- CADDY_INGRESS_NETWORKS=netgrimoire
|
||||
networks:
|
||||
- netgrimoire
|
||||
- vpn
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /export/Docker/caddy/Caddyfile:/etc/caddy/Caddyfile
|
||||
- /export/Docker/caddy:/data
|
||||
- /export/Docker/caddy/logs:/var/log/caddy # Mount logs for CrowdSec
|
||||
logging:
|
||||
driver: "gelf"
|
||||
options:
|
||||
gelf-address: "udp://192.168.5.17:12201"
|
||||
tag: "vikunja"
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == nas
|
||||
|
||||
# restart: unless-stopped
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
vpn:
|
||||
external: true
|
||||
|
||||
|
||||
49
swarm/termix.yaml
Normal file
49
swarm/termix.yaml
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
version: "3.8"
|
||||
|
||||
services:
|
||||
termix:
|
||||
image: ghcr.io/lukegus/termix:latest
|
||||
#user: "1001:998"
|
||||
environment:
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
TZ: "America/Chicago"
|
||||
PORT: "8080"
|
||||
volumes:
|
||||
- /DockerVol/termix:/app/data
|
||||
networks:
|
||||
- netgrimoire
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker4
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
delay: 5s
|
||||
window: 120s
|
||||
labels:
|
||||
# ---------------- Caddy ----------------
|
||||
caddy: "termix.netgrimoire.com"
|
||||
caddy.reverse_proxy: "termix:8080"
|
||||
caddy.import: "authentik"
|
||||
|
||||
# ---------------- Homepage ----------------
|
||||
homepage.group: "Remote Access"
|
||||
homepage.name: "Termix"
|
||||
homepage.icon: "terminal"
|
||||
homepage.href: "https://termix.netgrimoire.com"
|
||||
homepage.description: "Web-based terminal interface"
|
||||
homepage.siteMonitor: "https://termix.netgrimoire.com"
|
||||
|
||||
# ---------------- Uptime Kuma ----------------
|
||||
kuma.termix.http.name: "Termix"
|
||||
kuma.termix.http.url: "https://termix.netgrimoire.com"
|
||||
|
||||
# ---------------- Diun ----------------
|
||||
diun.enable: "true"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
58
swarm/tmm.yaml
Executable file
58
swarm/tmm.yaml
Executable file
|
|
@ -0,0 +1,58 @@
|
|||
services:
|
||||
tinymediamanager_service:
|
||||
image: tinymediamanager/tinymediamanager:latest
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
environment:
|
||||
TZ: America/Chicago
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
USER_ID: "1001"
|
||||
GROUP_ID: "998"
|
||||
PASSWORD: "F@lcon12"
|
||||
ALLOW_DIRECT_VNC: "true"
|
||||
LC_ALL: "en_US.UTF-8" # force UTF8
|
||||
LANG: "en_US.UTF-8" # force UTF8
|
||||
|
||||
# Match your standard UID:GID execution model
|
||||
#user: "1001:998"
|
||||
|
||||
ports:
|
||||
# - target: 5900
|
||||
# published: 5909
|
||||
# protocol: tcp
|
||||
# mode: ingress # VNC port (optional)
|
||||
- target: 4000
|
||||
published: 4000
|
||||
protocol: tcp
|
||||
mode: ingress # Webinterface
|
||||
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/tinymediamanager:/config:rw
|
||||
- /data/nfs/Baxter:/media:rw
|
||||
|
||||
deploy:
|
||||
labels:
|
||||
# Homepage
|
||||
homepage.group: "Media Management"
|
||||
homepage.name: "Tiny Media Manager"
|
||||
homepage.icon: "troddit.png"
|
||||
homepage.href: "https://tmm.netgrimoire.com"
|
||||
homepage.description: "Media Manager"
|
||||
|
||||
# Kuma
|
||||
kuma.app.http.name: "Tiny Media Manager"
|
||||
kuma.app.http.url: "http://tinymediamanager_service:4000"
|
||||
|
||||
# Caddy
|
||||
caddy: "tmm.netgrimoire.com"
|
||||
caddy.import: "authentik"
|
||||
caddy.reverse_proxy: "tinymediamanager_service:4000"
|
||||
|
||||
# Diun (image update monitoring)
|
||||
diun.enable: "true"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
41
swarm/tunarr.yaml
Normal file
41
swarm/tunarr.yaml
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
services:
|
||||
tunarr:
|
||||
image: chrisbenincasa/tunarr
|
||||
container_name: tunarr
|
||||
hostname: tunarr
|
||||
networks:
|
||||
- netgrimoire
|
||||
ports:
|
||||
- "8051:8000" # Web UI
|
||||
- "8052:8001" # Streaming endpoint
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/tunarr/config:/config
|
||||
- /data/nfs/Baxter/Data/:/media # Adjust to your media directory
|
||||
environment:
|
||||
- TZ=America/Chicago
|
||||
- PGID=998
|
||||
- PUID=1001
|
||||
user: "1001:998"
|
||||
restart: unless-stopped
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=Jolly Roger
|
||||
- homepage.name=Tunarr
|
||||
- homepage.icon=tunarr.png
|
||||
- homepage.href=https://tunarr.netgrimoire.com
|
||||
- homepage.description=Virtual TV Channel Creator
|
||||
- kuma.tunarr.http.name="Tunarr"
|
||||
- kuma.tunarr.http.url=http://tunarr:8000
|
||||
- caddy=tunarr.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 8000}}"
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.general == true
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
|
||||
|
||||
|
||||
52
swarm/vaultwarden.yaml
Normal file
52
swarm/vaultwarden.yaml
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
services:
|
||||
bitwarden:
|
||||
image: vaultwarden/server:latest
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
environment:
|
||||
TZ: America/Chicago
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
|
||||
# Match your standard UID:GID execution model
|
||||
#user: "1001:998"
|
||||
|
||||
ports:
|
||||
- target: 80
|
||||
published: 8093
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
|
||||
volumes:
|
||||
- /DockerVol/bitwarden:/data
|
||||
# - /data/nfs/Baxter/Docker/vaultwarden:/data
|
||||
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker3
|
||||
|
||||
labels:
|
||||
# Homepage
|
||||
homepage.group: "PNCHarris Apps"
|
||||
homepage.name: "Vaultwarden"
|
||||
homepage.icon: "vaultwarden.png"
|
||||
homepage.href: "https://pass.netgrimoire.com"
|
||||
homepage.description: "Password Manager"
|
||||
|
||||
# Kuma
|
||||
kuma.pass.http.name: "Vaultwarden"
|
||||
kuma.pass.http.url: "http://bitwarden:80"
|
||||
|
||||
# Caddy
|
||||
caddy: "pass.netgrimoire.com"
|
||||
# caddy.import: "authentik"
|
||||
caddy.reverse_proxy: "bitwarden:80"
|
||||
|
||||
# Diun (image update monitoring)
|
||||
diun.enable: "true"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
94
swarm/vikunja.yaml
Normal file
94
swarm/vikunja.yaml
Normal file
|
|
@ -0,0 +1,94 @@
|
|||
services:
|
||||
vikunja:
|
||||
image: vikunja/vikunja:latest
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
environment:
|
||||
TZ: America/Chicago
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
VIKUNJA_SERVICE_PUBLICURL: "https://task.netgrimoire.com"
|
||||
VIKUNJA_DATABASE_HOST: "vikdb"
|
||||
VIKUNJA_DATABASE_PASSWORD: "F@lcon13"
|
||||
VIKUNJA_DATABASE_TYPE: "mysql"
|
||||
VIKUNJA_DATABASE_USER: "vikunja"
|
||||
VIKUNJA_DATABASE_DATABASE: "vikunja"
|
||||
VIKUNJA_SERVICE_JWTSECRET: "vikunja"
|
||||
|
||||
user: "1001:998"
|
||||
|
||||
ports:
|
||||
- target: 3456
|
||||
published: 3456
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
|
||||
volumes:
|
||||
- /DockerVol/vikunja/files:/app/vikunja/files
|
||||
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker4
|
||||
|
||||
labels:
|
||||
# Homepage
|
||||
homepage.group: "PNCHarris Apps"
|
||||
homepage.name: "Vikunja"
|
||||
homepage.icon: "vikunja.png"
|
||||
homepage.href: "https://task.netgrimoire.com"
|
||||
homepage.description: "Task Manager"
|
||||
|
||||
# Kuma
|
||||
kuma.vik.http.name: "Vikunja"
|
||||
kuma.vik.http.url: "http://vikunja:3456"
|
||||
|
||||
# Caddy
|
||||
caddy: "task.netgrimoire.com"
|
||||
caddy.import: "authentik"
|
||||
caddy.reverse_proxy: "{{upstreams 3456}}"
|
||||
|
||||
# Diun (image update monitoring)
|
||||
diun.enable: "true"
|
||||
|
||||
vikdb:
|
||||
image: mariadb:10.6
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
|
||||
|
||||
environment:
|
||||
TZ: America/Chicago
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
MYSQL_ROOT_PASSWORD: "F@lcon13"
|
||||
MYSQL_USER: "vikunja"
|
||||
MYSQL_PASSWORD: "F@lcon13"
|
||||
MYSQL_DATABASE: "vikunja"
|
||||
|
||||
user: "1001:998"
|
||||
|
||||
volumes:
|
||||
- /DockerVol/vikunja/db:/var/lib/mysql
|
||||
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "mysqladmin ping -h localhost -u $MYSQL_USER --password=$MYSQL_PASSWORD"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
|
||||
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker4
|
||||
|
||||
labels:
|
||||
# Diun (image update monitoring)
|
||||
diun.enable: "true"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
59
swarm/vscode.yaml
Executable file
59
swarm/vscode.yaml
Executable file
|
|
@ -0,0 +1,59 @@
|
|||
services:
|
||||
code-server:
|
||||
image: ghcr.io/linuxserver/code-server
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
environment:
|
||||
TZ: America/Chicago
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
# PASSWORD=password F@lcon12
|
||||
# HASHED_PASSWORD= # optional
|
||||
SUDO_PASSWORD: "F@lcon13" # optional
|
||||
# SUDO_PASSWORD_HASH= # optional
|
||||
PROXY_DOMAIN: "code.netgrimoire.com" # optional
|
||||
|
||||
# Match your standard UID:GID execution model
|
||||
#user: "1001:998"
|
||||
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/Code/config:/config
|
||||
- /DockerVol/code:/Data
|
||||
|
||||
ports:
|
||||
- target: 8443
|
||||
published: 8443
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker2
|
||||
labels:
|
||||
# Homepage
|
||||
homepage.group: "Applications"
|
||||
homepage.name: "Code Server"
|
||||
homepage.icon: "code.png"
|
||||
homepage.href: "https://code.netgrimoire.com"
|
||||
homepage.description: "Code Server"
|
||||
|
||||
# Kuma
|
||||
kuma.cs.http.name: "Code Server"
|
||||
kuma.cs.http.url: "http://code-server:8443"
|
||||
|
||||
# Caddy
|
||||
caddy: "code.netgrimoire.com"
|
||||
caddy.import: "authentik"
|
||||
caddy.reverse_proxy: "code-server:8443"
|
||||
|
||||
# Diun (image update monitoring)
|
||||
diun.enable: "true"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
|
||||
volumes:
|
||||
Projects:
|
||||
49
swarm/wallo.yaml
Executable file
49
swarm/wallo.yaml
Executable file
|
|
@ -0,0 +1,49 @@
|
|||
services:
|
||||
wallos:
|
||||
image: bellamy/wallos:latest
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
environment:
|
||||
TZ: America/Chicago
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
|
||||
# Match your standard UID:GID execution model
|
||||
#user: "1001:998"
|
||||
|
||||
ports:
|
||||
- target: 80
|
||||
published: 8282
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
|
||||
# Volumes store your data between container upgrades
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/wallos/db:/var/www/html/db
|
||||
- /data/nfs/Baxter/Docker/wallos/logos:/var/www/html/images/uploads/logos
|
||||
|
||||
deploy:
|
||||
labels:
|
||||
# Homepage
|
||||
homepage.group: "PNCHarris Apps"
|
||||
homepage.name: "Wallos"
|
||||
homepage.icon: "wallos.png"
|
||||
homepage.href: "https://expense.netgrimoire.com"
|
||||
homepage.description: "Subscription Manager"
|
||||
|
||||
# Kuma
|
||||
kuma.wallos.http.name: "Wallos"
|
||||
kuma.wallos.http.url: "http://wallos:80"
|
||||
|
||||
# Caddy
|
||||
caddy: "expense.netgrimoire.com"
|
||||
caddy.import: "authentik"
|
||||
caddy.reverse_proxy: "wallos:80"
|
||||
|
||||
# Diun (image update monitoring)
|
||||
diun.enable: "true"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
45
swarm/web.yaml
Normal file
45
swarm/web.yaml
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
|
||||
services:
|
||||
web:
|
||||
image: php:8.2-apache
|
||||
container_name: web
|
||||
hostname: web
|
||||
user: "1001:998"
|
||||
networks:
|
||||
- netgrimoire
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/web/pages:/var/www/html:rw
|
||||
- /data/nfs/Baxter/Docker/web/apache:/etc/apache2/sites-enabled:ro
|
||||
|
||||
environment:
|
||||
- PUID=1001
|
||||
- PGID=998
|
||||
- TZ=America/Chicago
|
||||
|
||||
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
delay: 5s
|
||||
max_attempts: 3
|
||||
window: 120s
|
||||
labels:
|
||||
# Homepage
|
||||
- "homepage.name=www.netgrimoire.com"
|
||||
- "homepage.icon=mdi:web"
|
||||
# Kuma
|
||||
- "kuma.web.http.name=www.netgrimoire.com"
|
||||
- "kuma.web.http.url=http://web:80"
|
||||
# Caddy reverse proxy
|
||||
- caddy=www.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy="web:80"
|
||||
# Diun
|
||||
- "diun.enable=true"
|
||||
- "diun.notification.ntfy=true"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
40
swarm/webtop.yaml
Executable file
40
swarm/webtop.yaml
Executable file
|
|
@ -0,0 +1,40 @@
|
|||
services:
|
||||
webtop:
|
||||
image: lscr.io/linuxserver/webtop:debian-xfce
|
||||
container_name: webtop
|
||||
security_opt:
|
||||
- seccomp:unconfined #optional
|
||||
environment:
|
||||
- PUID=1001
|
||||
- PGID=998
|
||||
- TZ=America/Chicago
|
||||
- SUBFOLDER=/ #optional
|
||||
- TITLE=Webtop #optional
|
||||
networks:
|
||||
- netgrimoire
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/Webtop:/config
|
||||
# - /var/run/docker.sock:/var/run/docker.sock #optional
|
||||
ports:
|
||||
- 3004:3000
|
||||
- 3005:3001
|
||||
devices:
|
||||
- /dev/dri:/dev/dri #optional
|
||||
shm_size: "1gb" #optional
|
||||
restart: unless-stopped
|
||||
labels:
|
||||
- homepage.group=Remote Access
|
||||
- homepage.name=webtop
|
||||
- homepage.icon=webtop.png
|
||||
- homepage.href=http://webtop.netgrimoire.com
|
||||
- homepage.description=Remote XFCE
|
||||
- kuma.sab.http.name="Webtop"
|
||||
- kuma.sab.http.url=http://webtop:3000
|
||||
- caddy=webtop.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 3000}}"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
|
||||
156
swarm/whisparr.yaml
Executable file
156
swarm/whisparr.yaml
Executable file
|
|
@ -0,0 +1,156 @@
|
|||
services:
|
||||
|
||||
|
||||
# greenfin:
|
||||
# image: lscr.io/linuxserver/jellyfin
|
||||
# container_name: jellyfin
|
||||
# environment:
|
||||
# - PUID=1001
|
||||
# - PGID=998
|
||||
# - TZ=America/Chicago
|
||||
# volumes:
|
||||
# - /DockerVol/greenfin/config:/config
|
||||
# - /data/nfs/Baxter/Green/media:/data:shared
|
||||
# ports:
|
||||
# - 8069:8096
|
||||
# # - 8920:8920
|
||||
# # - 7359:7359/udp
|
||||
# # - 1900:1900/udp
|
||||
# restart: unless-stopped
|
||||
# networks:
|
||||
# - netgrimoire
|
||||
# deploy:
|
||||
# labels:
|
||||
# - homepage.group=Green Door
|
||||
# - homepage.name=GreenFin
|
||||
# - homepage.icon=jellyfin.png
|
||||
# - homepage.href=http://greenfin.netgrimoire.com:8069
|
||||
# - homepage.description=Media Server
|
||||
# - kuma.jgf.http.name="Greenfin"
|
||||
# - kuma.jgf.http.url=http://greenfin:8069
|
||||
# placement:
|
||||
# constraints:
|
||||
# - node.hostname == docker1
|
||||
|
||||
whisparr:
|
||||
image: ghcr.io/hotio/whisparr
|
||||
container_name: whisparr
|
||||
hostname: whisparr
|
||||
networks:
|
||||
- netgrimoire
|
||||
volumes:
|
||||
- /DockerVol/Whisparr:/config
|
||||
- /data/nfs/Baxter/:/data:shared
|
||||
ports:
|
||||
- 6969:6969
|
||||
environment:
|
||||
- TZ=America/Chicago
|
||||
- PGID=998
|
||||
- PUID=1001
|
||||
restart: unless-stopped
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=Green Door
|
||||
- homepage.name=Whisparr
|
||||
- homepage.icon=whisparr.png
|
||||
- homepage.href=http://whisparr.netgrimoire.com
|
||||
- homepage.description=Media Searc Search
|
||||
- kuma.jgf.http.name="Whisparr"
|
||||
- kuma.hydra.http.url=http://whisparr:6969
|
||||
- caddy=whisparr.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 6969}}"
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker1
|
||||
|
||||
|
||||
|
||||
|
||||
# stash:
|
||||
# image: stashapp/stash:latest
|
||||
# container_name: stash
|
||||
# networks:
|
||||
# - netgrimoire
|
||||
# restart: unless-stopped
|
||||
# ## the container's port must be the same with the STASH_PORT in the environment section
|
||||
# ports:
|
||||
# - "9999:9999"
|
||||
# ## If you intend to use stash's DLNA functionality uncomment the below network mode and comment out the above ports section
|
||||
# # network_mode: host
|
||||
# environment:
|
||||
# - STASH_STASH=/data/
|
||||
# - STASH_GENERATED=/generated/
|
||||
# - STASH_METADATA=/metadata/
|
||||
# - STASH_CACHE=/cache/
|
||||
# ## Adjust below to change default port (9999)
|
||||
# - STASH_PORT=9999
|
||||
# volumes:
|
||||
# - /etc/localtime:/etc/localtime:ro
|
||||
# ## Adjust below paths (the left part) to your liking.
|
||||
# ## E.g. you can change ./config:/root/.stash to ./stash:/root/.stash
|
||||
|
||||
# ## Keep configs, scrapers, and plugins here.
|
||||
# - /DockerVol/Stash/config:/root/.stash
|
||||
# ## Point this at your collection.
|
||||
# - /data/nfs/Baxter/Green:/data
|
||||
# ## This is where your stash's metadata lives
|
||||
# - /DockerVol/Stash/metadata:/metadata
|
||||
# ## Any other cache content.
|
||||
# - ./DockerVol/Stash/cache:/cache
|
||||
# ## Where to store generated content (screenshots,previews,transcodes,sprites)
|
||||
# - /DockerVol/Stash/generated:/generated
|
||||
|
||||
|
||||
# deploy:
|
||||
# labels:
|
||||
# - homepage.group=Green Door
|
||||
# - homepage.name=Stash
|
||||
# - homepage.icon=stash.png
|
||||
# - homepage.href=http://stash.netgrimoire.com:8096
|
||||
# - homepage.description=Media Server
|
||||
# - kuma.jgf.http.name="Stash"
|
||||
# - kuma.jgf.http.url=http://stash:9999
|
||||
# placement:
|
||||
# constraints:
|
||||
# - node.hostname == nas
|
||||
|
||||
# namer: # Note run on NAS in compose
|
||||
# container_name: namer
|
||||
# image: ghcr.io/theporndatabase/namer:latest
|
||||
# networks:
|
||||
# - netgrimoire
|
||||
# environment:
|
||||
# - PUID=1001
|
||||
# - PGID=998
|
||||
# - TZ=America/Chicago
|
||||
# - NAMER_CONFIG=/config/namer.cfg
|
||||
# volumes:
|
||||
# - /DockerVol/namer/:/config
|
||||
# - /data/nfs/Baxter/Green/:/data
|
||||
# ports:
|
||||
# - 6980:6980
|
||||
# restart: always
|
||||
# healthcheck: # <- if on a qnap nas, the default health check will not work for you, domain name is the container_name
|
||||
# test: [ "CMD-SHELL", "curl -f http://namer:6980/api/healthcheck || exit 1" ]
|
||||
# interval: 1m
|
||||
# timeout: 30s
|
||||
# # retries: 3
|
||||
# # start_period: 40s
|
||||
# deploy:
|
||||
# labels:
|
||||
# - homepage.group=Green Door
|
||||
# - homepage.name=Name
|
||||
# - homepage.icon=pritunl.png
|
||||
# - homepage.href=http://namer.netgrimoire.com
|
||||
# - homepage.description=Media Manager
|
||||
# # - kuma.jgf.http.name="Namer"
|
||||
# # - kuma.jgf.http.url=http://namer:6980
|
||||
# placement:
|
||||
# constraints:
|
||||
# - node.hostname == nas
|
||||
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
Loading…
Add table
Add a link
Reference in a new issue