moved unused to archive
This commit is contained in:
parent
5b03dbf90f
commit
275b8989ec
31 changed files with 0 additions and 918 deletions
|
|
@ -1,45 +0,0 @@
|
|||
dns.netgrimoire.com {
|
||||
reverse_proxy 192.168.5.7:5380/
|
||||
}
|
||||
|
||||
webtop.netgrimoire.com {
|
||||
reverse_proxy http://webtop:3000
|
||||
}
|
||||
|
||||
|
||||
mail.netgrimoire.com, imap.netgrimoire.com, smtp.netgrimoire.com, autodiscover.netgrimoire.com, autoconfig.netgrimoire.com {
|
||||
reverse_proxy mailcow-nginx:80
|
||||
|
||||
}
|
||||
|
||||
mail.wasted-bandwidth.net, imap.wasted-bandwidth.net, smtp.wasted-bandwidth.net, autodiscover.wasted-bandwidth.net, autoconfig.wasted-bandwidth.net {
|
||||
reverse_proxy mailcow-nginx:80
|
||||
|
||||
}
|
||||
|
||||
mail.gnarlypandaproductions.com, imap.gnarlypandaproductions.com, smtp.gnarlypandaproductions.com, autodiscover.gnarlypandaproductions.com, autoconfig.gnarlypandaproductions.com {
|
||||
reverse_proxy mailcow-nginx:80
|
||||
|
||||
}
|
||||
|
||||
|
||||
email.gnarlypandaproductions.com, email.netgrimoire.com, email.wasted-bandwidth.net {
|
||||
reverse_proxy mailcow-nginx:80/sogo/
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
(authentik) {
|
||||
# Always forward outpost path to actual outpost
|
||||
reverse_proxy /outpost.goauthentik.io/* http://authentik:9000
|
||||
|
||||
# Forward authentication to outpost
|
||||
forward_auth http://authentik:9000 {
|
||||
uri /outpost.goauthentik.io/auth/caddy
|
||||
|
||||
# Capitalization of the headers is important, otherwise they will be empty
|
||||
copy_headers X-Authentik-Username X-Authentik-Groups X-Authentik-Email X-Authentik-Name X-Authentik-Uid X-Authentik-Jwt X-Authentik-Meta-Jwks X-Authentik-Meta-Outpost X-Authentik-Meta-Provider X-Authentik-Meta-App X-Authentik-Meta-Version
|
||||
}
|
||||
}
|
||||
|
|
@ -1,38 +0,0 @@
|
|||
|
||||
services:
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
watchstate:
|
||||
image: ghcr.io/arabcoders/watchstate:latest
|
||||
# To change the user/group id associated with the tool change the following line.
|
||||
user: "1001:998"
|
||||
container_name: watchstate
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "7980:8080" # The port which the webui will be available on.
|
||||
volumes:
|
||||
- /DockerVol/watchstate:/config:rw # mount current directory to container /config directory.
|
||||
networks:
|
||||
- netgrimoire
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=Library
|
||||
- homepage.name=WatchState
|
||||
- homepage.icon=jellyfin.png
|
||||
- homepage.href=http://watchstate.netgrimoire.com
|
||||
- homepage.description=Media Server Sync
|
||||
- caddy=watchstate.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 8080}}"
|
||||
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == bruce
|
||||
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
75
archive/authelia.yaml
Normal file
75
archive/authelia.yaml
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
version: "3.8"
|
||||
|
||||
services:
|
||||
authelia:
|
||||
image: authelia/authelia:latest
|
||||
container_name: authelia
|
||||
hostname: authelia
|
||||
# user: "1001:998"
|
||||
networks:
|
||||
- netgrimoire
|
||||
environment:
|
||||
- PUID=1001
|
||||
- PGID=998
|
||||
- TZ=America/Chicago
|
||||
volumes:
|
||||
- /DockerVol/authelia/config:/config
|
||||
- /DockerVol/authelia/secrets:/secrets
|
||||
ports:
|
||||
# Optional: usually you do NOT need to publish this if only Caddy talks to it
|
||||
# - "9091:9091"
|
||||
- target: 9091
|
||||
published: 9091
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
deploy:
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == nas
|
||||
labels:
|
||||
# Caddy - Authelia portal URL
|
||||
- caddy=login.wasted-bandwidth.net
|
||||
- caddy.reverse_proxy={{upstreams 9091}}
|
||||
|
||||
# Homepage
|
||||
- homepage.group=Management
|
||||
- homepage.name=Authelia
|
||||
- homepage.icon=authelia.png
|
||||
- homepage.href=https://login.wasted-bandwidth.net
|
||||
- homepage.description=SSO / Forward-Auth
|
||||
|
||||
# Uptime Kuma
|
||||
- kuma.authelia.http.name="Authelia"
|
||||
- kuma.authelia.http.url=http://authelia:9091
|
||||
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- redis
|
||||
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
container_name: authelia-redis
|
||||
hostname: authelia-redis
|
||||
#user: "1001:998"
|
||||
networks:
|
||||
- netgrimoire
|
||||
environment:
|
||||
- TZ=America/Chicago
|
||||
- PUID=1001
|
||||
- PGID=998
|
||||
command: >
|
||||
sh -c "redis-server --appendonly yes --dir /data"
|
||||
volumes:
|
||||
- /DockerVol/authelia/redis:/data
|
||||
deploy:
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == nas
|
||||
|
||||
restart: unless-stopped
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
51
archive/bazarr.yaml
Normal file
51
archive/bazarr.yaml
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
services:
|
||||
bazarr:
|
||||
image: lscr.io/linuxserver/bazarr:latest
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
environment:
|
||||
TZ: America/Chicago
|
||||
PUID: "1964"
|
||||
PGID: "1964"
|
||||
# Match your template preference (run container as 1001:998);l'k'k'klo[oghhghg[]]
|
||||
# user: "1001:998"
|
||||
|
||||
volumes:
|
||||
- /DockerVol/bazarr/config:/config
|
||||
- /data/nfs/Baxter/Data/:/data:shared
|
||||
|
||||
ports:
|
||||
- target: 6767
|
||||
published: 6767
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker4
|
||||
|
||||
labels:
|
||||
# Homepage
|
||||
homepage.group: "Jolly Roger"
|
||||
homepage.name: "Bazarr"
|
||||
homepage.icon: "bazarr.png"
|
||||
homepage.href: "http://bazarr.netgrimoire.com"
|
||||
homepage.description: "Subtitle Search"
|
||||
|
||||
# Kuma
|
||||
kuma.bazarr.http.name: "Bazarr"
|
||||
kuma.bazarr.http.url: "http://bazarr:6767"
|
||||
|
||||
# Caddy
|
||||
caddy: "bazarr.netgrimoire.com"
|
||||
caddy.import: "authentik"
|
||||
caddy.reverse_proxy: "{{upstreams 6767}}"
|
||||
|
||||
# Diun (image update monitoring)
|
||||
diun.enable: "true"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
53
archive/beets.yaml
Normal file
53
archive/beets.yaml
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
services:
|
||||
beets:
|
||||
image: lscr.io/linuxserver/beets:latest
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
environment:
|
||||
TZ: America/Chicago
|
||||
PUID: "1964"
|
||||
PGID: "1964"
|
||||
|
||||
# Match your standard UID:GID execution model
|
||||
#user: "1001:998"
|
||||
|
||||
volumes:
|
||||
- /DockerVol/beets/config:/config
|
||||
- /data/nfs/Baxter/Data/media/music/Collection:/music
|
||||
- /data/nfs/Baxter/Data/media/music/ingest:/downloads
|
||||
|
||||
ports:
|
||||
- target: 8337
|
||||
published: 8337
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == znas
|
||||
|
||||
labels:
|
||||
# Homepage
|
||||
homepage.group: "Media Management"
|
||||
homepage.name: "Beets"
|
||||
homepage.icon: "beets.png"
|
||||
homepage.href: "https://beets.netgrimoire.com"
|
||||
homepage.description: "A Music Manager"
|
||||
|
||||
# Kuma
|
||||
kuma.beets.http.name: "Beets"
|
||||
kuma.beets.http.url: "http://beets:8337"
|
||||
|
||||
# Caddy
|
||||
caddy: "beets.netgrimoire.com"
|
||||
caddy.import: "authentik"
|
||||
caddy.reverse_proxy: "{{upstreams 8337}}"
|
||||
|
||||
# Diun (image update monitoring)
|
||||
diun.enable: "true"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
|
||||
|
||||
services:
|
||||
cadvisor:
|
||||
image: gcr.io/cadvisor/cadvisor:latest
|
||||
deploy:
|
||||
mode: global # Ensures it runs on every node
|
||||
resources:
|
||||
limits:
|
||||
memory: 512M
|
||||
ports:
|
||||
- "8066:8080" # Expose the cAdvisor UI
|
||||
volumes:
|
||||
- "/:/rootfs:ro"
|
||||
- "/var/run:/var/run:ro"
|
||||
- "/sys:/sys:ro"
|
||||
- "/var/lib/docker/:/var/lib/docker:ro"
|
||||
- "/dev/disk/:/dev/disk:ro"
|
||||
networks:
|
||||
- netgrimoire
|
||||
logging:
|
||||
driver: "gelf"
|
||||
options:
|
||||
gelf-address: "udp://192.168.5.17:12201"
|
||||
tag: "cadvisor"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
|
|
@ -1,45 +0,0 @@
|
|||
services:
|
||||
glance-ical-api:
|
||||
image: ghcr.io/awildleon/glance-ical-events:v1.3.1
|
||||
hostname: glance-ical-api
|
||||
networks:
|
||||
- netgrimoire
|
||||
environment:
|
||||
- PUID=1001
|
||||
- PGID=998
|
||||
- TZ=America/Chicago
|
||||
# user: "1001:998"
|
||||
restart: unless-stopped
|
||||
deploy:
|
||||
labels:
|
||||
# -------------------
|
||||
# Caddy
|
||||
# -------------------
|
||||
- caddy=cal.netgrimoire.com
|
||||
- caddy.reverse_proxy="{{upstreams 8076}}"
|
||||
|
||||
# -------------------
|
||||
# Diun
|
||||
# -------------------
|
||||
- diun.enable=true
|
||||
|
||||
# -------------------
|
||||
# Homepage
|
||||
# -------------------
|
||||
- homepage.group=Dashboard
|
||||
- homepage.name=Calendar Events API
|
||||
- homepage.description=Google Calendar → Glance events bridge
|
||||
- homepage.icon=calendar.png
|
||||
- homepage.href=https://cal.netgrimoire.com
|
||||
|
||||
# -------------------
|
||||
# Uptime Kuma
|
||||
# -------------------
|
||||
- kuma.glanceical.http.name="Glance iCal API"
|
||||
- kuma.glanceical.http.url=https://cal.netgrimoire.com/health
|
||||
|
||||
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
41
archive/cloudcmd.yaml
Normal file
41
archive/cloudcmd.yaml
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
services:
|
||||
|
||||
cloudcmd:
|
||||
image: coderaiser/cloudcmd
|
||||
container_name: cloudcmd
|
||||
hostname: cloudcmd
|
||||
networks:
|
||||
- netgrimoire
|
||||
ports:
|
||||
- 8000:8000
|
||||
environment:
|
||||
- PUID=1001
|
||||
- PGID=998
|
||||
- TZ=America/Chicago
|
||||
volumes:
|
||||
- ~:/root
|
||||
- /:/mnt/fs
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=Application
|
||||
- homepage.name=Cloud Commander
|
||||
- homepage.icon=cloudcmd.png
|
||||
- homepage.href=http://commander.netgrimoire.com
|
||||
- homepage.description=Cloud Commander
|
||||
- kuma.cloud.http.name="Cloudcmd
|
||||
- kuma.cloud.http.url=http://cloudcmd:8000
|
||||
- caddy=commander.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 8000}}"
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == nas
|
||||
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
|
||||
|
||||
|
||||
|
||||
48
archive/comixed.yaml
Normal file
48
archive/comixed.yaml
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
# /config is where plugins, extensions, and configuration files are placed.
|
||||
# /imports is where comics to be imported are placed
|
||||
# /library is where the comics library is maintaned
|
||||
# /more
|
||||
version: "3.6"
|
||||
services:
|
||||
comixed:
|
||||
image: comixed/comixed
|
||||
container_name: comixed
|
||||
environment:
|
||||
- PUID=1001
|
||||
- PGID=998
|
||||
- TZ=America/Chicago
|
||||
volumes:
|
||||
- /DockerVol/comixed/config:/config
|
||||
- /data/nfs/Baxter/Data/media/comics/library:/library:shared
|
||||
- /data/nfs/Baxter/Data/media/comics/downloads:/imports:shared
|
||||
ports:
|
||||
- 7171:7171
|
||||
entrypoint: ["bash", "/app/comixed-release/bin/docker-run.sh", "-L", "/library/comixed.log", "-c", "/config"]
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
deploy:
|
||||
labels:
|
||||
homepage.group: "Application"
|
||||
homepage.name: "Comixed"
|
||||
homepage.icon: "comixed.png"
|
||||
homepage.href: "https://comics.netgrimoire.com"
|
||||
homepage.description: "Task Manager"
|
||||
kuma.cxd.http.name: "Vikunja"
|
||||
kuma.cxd.http.url: "http://comixed:7171"
|
||||
caddy: "comics.netgrimoire.com"
|
||||
caddy.import: "authentik"
|
||||
caddy.reverse_proxy: "{{upstreams 7171}}"
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == nas
|
||||
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
|
||||
|
||||
|
||||
|
||||
60
archive/configarr.yaml
Normal file
60
archive/configarr.yaml
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
services:
|
||||
configarr:
|
||||
image: ghcr.io/raydak-labs/configarr:latest
|
||||
user: "1001:998"
|
||||
environment:
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
TZ: America/Chicago
|
||||
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/configarr/config:/app/config
|
||||
- /data/nfs/Baxter/Docker/configarr/repos:/app/repos
|
||||
- /data/nfs/Baxter/Docker/configarr/cfs:/app/cfs
|
||||
- /data/nfs/Baxter/Docker/configarr/templates:/app/templates
|
||||
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
delay: 5s
|
||||
max_attempts: 3
|
||||
window: 30s
|
||||
|
||||
labels:
|
||||
# -------------------------
|
||||
# Homepage Dashboard Labels
|
||||
# -------------------------
|
||||
homepage.group: "Jolly Roger"
|
||||
homepage.name: "Configarr"
|
||||
homepage.icon: "si-config"
|
||||
homepage.href: "https://configarr.netgrimoire.com"
|
||||
homepage.description: "Automatically sync TRaSH formats & configs"
|
||||
|
||||
# -------------------------
|
||||
# Kuma Monitoring Labels
|
||||
# -------------------------
|
||||
kuma.configarr.http.name: "Configarr"
|
||||
kuma.configarr.http.url: "https://configarr.netgrimoire.com"
|
||||
|
||||
# -------------------------
|
||||
# Caddy Reverse Proxy
|
||||
# (Swarm label syntax works the same)
|
||||
# -------------------------
|
||||
caddy: configarr.netgrimoire.com
|
||||
caddy.import: "authentik"
|
||||
|
||||
caddy.reverse_proxy: "{{upstreams 8000}}"
|
||||
|
||||
# -------------------------
|
||||
# Diun Image Monitoring
|
||||
# -------------------------
|
||||
diun.enable: "true"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
services:
|
||||
crowdsec:
|
||||
image: crowdsecurity/crowdsec
|
||||
container_name: crowdsec
|
||||
networks:
|
||||
- netgrimoire
|
||||
volumes:
|
||||
- /export/Docker/crowdsec/config:/etc/crowdsec
|
||||
- /export/Docker/crowdsec/data:/var/lib/crowdsec/data
|
||||
- /export/Docker/caddy/logs:/var/log/caddy:ro # Mount Caddy logs
|
||||
environment:
|
||||
- COLLECTIONS=crowdsecurity/http-dos crowdsecurity/caddy
|
||||
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == nas
|
||||
|
||||
# caddy-bouncer:
|
||||
# image: crowdsecurity/caddy-bouncer
|
||||
# container_name: caddy-bouncer
|
||||
# restart: unless-stopped
|
||||
# volumes:
|
||||
# - /export/Docker/crowdsec/bouncer:/etc/caddy-bouncer
|
||||
# networks:
|
||||
# - netgrimoire
|
||||
|
||||
# deploy:
|
||||
# placement:
|
||||
# constraints:
|
||||
# - node.hostname == nas
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
34
archive/dailytxt.yaml
Normal file
34
archive/dailytxt.yaml
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
services:
|
||||
dailytxt:
|
||||
# choose the correct image tag
|
||||
image: phitux/dailytxt:2.x.x
|
||||
container_name: dailytxt
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
# Change the left path to your needs
|
||||
- ./data:/data
|
||||
environment:
|
||||
# Create a secret token by running: openssl rand -base64 32
|
||||
- SECRET_TOKEN=...
|
||||
|
||||
# If you want to have the json-files pretty-printed, set some indent.
|
||||
# (Otherwise just remove the line)
|
||||
- INDENT=4
|
||||
|
||||
# Allow new user registrations.
|
||||
# I strongly recommend to keep this disabled except for the first user.
|
||||
# You can later temporarily enable it again in the admin panel.
|
||||
- ALLOW_REGISTRATION=true
|
||||
|
||||
# Set the Admin-Password (for the admin-panel).
|
||||
- ADMIN_PASSWORD=your_admin_password
|
||||
|
||||
# After how many days shall the login-cookie expire?
|
||||
- LOGOUT_AFTER_DAYS=40
|
||||
|
||||
# Set the BASE_PATH if you are running DailyTxT under a subpath (e.g. /dailytxt).
|
||||
# - BASE_PATH=/dailytxt
|
||||
ports:
|
||||
# Change the left port to your needs.
|
||||
# You often would only see 8000:80. But this way, port 8000 is publicly accessible (without TLS!).
|
||||
- 127.0.0.1:8000:80
|
||||
47
archive/dumbterm.yaml
Normal file
47
archive/dumbterm.yaml
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
services:
|
||||
dumbterm:
|
||||
container_name: dumbterm
|
||||
image: dumbwareio/dumbterm:latest
|
||||
ports:
|
||||
- 8062:3000
|
||||
networks:
|
||||
- netgrimoire
|
||||
# user: 1001:998
|
||||
environment:
|
||||
# Container timezone
|
||||
TZ: America/Chicago
|
||||
# The title shown in the web interface
|
||||
SITE_TITLE: https://cli.netgrimoire.com
|
||||
# Optional PIN protection (leave empty to disable)
|
||||
DUMBTERM_PIN: 2810
|
||||
# The base URL for the application
|
||||
BASE_URL: http://localhost:3000 # Use ALLOWED_ORIGINS below to restrict cors to specific origins
|
||||
ENABLE_STARSHIP: "true" # Enable starship prompt
|
||||
LOCKOUT_TIME: 15 # Minutes
|
||||
MAX_SESSION_AGE: 24 # Hours
|
||||
|
||||
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=Remote Access
|
||||
- homepage.name=Dumbterm
|
||||
- homepage.icon=dumbterm.png
|
||||
- homepage.href=https://cli.netgrimoire.com
|
||||
- homepage.description=Terminal
|
||||
- kuma.cli.http.name="dumbterm"
|
||||
- kuma.cli.http.url=http://dumbterm:3000
|
||||
- caddy=cli.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 3000}}"
|
||||
|
||||
volumes:
|
||||
#- /data/nfs/Baxter/Docker/dumbterm/config:/root/.config
|
||||
#- /data/nfs/Baxter/Docker/dumbterm/:/root/data
|
||||
- /data/nfs/Baxter/Docker/dumbterm/root:/root
|
||||
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
|
||||
|
||||
42
archive/freshrss.yaml
Normal file
42
archive/freshrss.yaml
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
services:
|
||||
freshrss:
|
||||
image: lscr.io/linuxserver/freshrss:latest
|
||||
container_name: freshrss
|
||||
user: "1001:998"
|
||||
# user: "0:0"
|
||||
environment:
|
||||
TZ: "America/Chicago"
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/freshrss:/config
|
||||
|
||||
deploy:
|
||||
endpoint_mode: dnsrr
|
||||
labels:
|
||||
# Homepage
|
||||
homepage.group: "Services"
|
||||
homepage.name: "FreshRSS"
|
||||
homepage.icon: "rss"
|
||||
homepage.href: "https://rss.netgrimoire.com"
|
||||
|
||||
# Kuma integration
|
||||
kuma.freshrss.http.name: "FreshRSS"
|
||||
kuma.freshrss.http.url: "https://rss.netgrimoire.com"
|
||||
|
||||
# Caddy (replace with your domain)
|
||||
caddy: "https://rss.netgrimoire.com"
|
||||
caddy.reverse_proxy: "{{upstreams 80}}"
|
||||
|
||||
# Diun
|
||||
diun.enable: "true"
|
||||
|
||||
restart: unless-stopped
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
29
archive/gitrunner.yaml
Normal file
29
archive/gitrunner.yaml
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
version: "3.9"
|
||||
|
||||
services:
|
||||
forgejo-runner:
|
||||
image: data.forgejo.org/forgejo/runner:4.0.0
|
||||
user: "1001:998"
|
||||
environment:
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
TZ: "America/Chicago"
|
||||
volumes:
|
||||
- type: bind
|
||||
source: /DockerVol/forgejo-runner
|
||||
target: /data
|
||||
- type: bind
|
||||
source: /var/run/docker.sock
|
||||
target: /var/run/docker.sock
|
||||
networks:
|
||||
- netgrimoire
|
||||
command: ["forgejo-runner", "daemon"]
|
||||
deploy:
|
||||
mode: global
|
||||
placement:
|
||||
constraints:
|
||||
- node.role == manager
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
51
archive/kavita.yaml
Normal file
51
archive/kavita.yaml
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
services:
|
||||
kavita:
|
||||
image: jvmilazz0/kavita:latest # Change latest to nightly for latest develop builds (can't go back to stable)
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Data/media/comics:/comics:shared # Use as many as you want
|
||||
# - ./books:/books #
|
||||
- /data/nfs/Baxter/Docker/Kavita/config:/kavita/config # Change './data if you want to have the config files in a different place.
|
||||
# /kavita/config must not be changed
|
||||
|
||||
environment:
|
||||
TZ: America/Chicago
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
|
||||
# Match your standard UID:GID execution model
|
||||
#user: "1001:998"
|
||||
|
||||
ports:
|
||||
# Change the public port (the first 5000) if you have conflicts with other services
|
||||
- target: 5000
|
||||
published: 8054
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
|
||||
deploy:
|
||||
labels:
|
||||
# Homepage
|
||||
homepage.group: "PNCHarris Apps"
|
||||
homepage.name: "Kavita"
|
||||
homepage.icon: "kavita.png"
|
||||
homepage.href: "https://kavita.netgrimoire.com"
|
||||
homepage.description: "Comic Book Reader"
|
||||
|
||||
# Kuma
|
||||
kuma.kavita.http.name: "Kavita"
|
||||
kuma.kavita.http.url: "http://kavita:5000"
|
||||
|
||||
# Caddy
|
||||
caddy: "kavita.netgrimoire.com"
|
||||
caddy.import: "authentik"
|
||||
caddy.reverse_proxy: "Kavita:5000"
|
||||
|
||||
# Diun (image update monitoring)
|
||||
diun.enable: "true"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
|
|
@ -1,134 +0,0 @@
|
|||
####################################
|
||||
# 🦎 KOMODO COMPOSE - VARIABLES 🦎 #
|
||||
####################################
|
||||
|
||||
## These compose variables can be used with all Komodo deployment options.
|
||||
## Pass these variables to the compose up command using `--env-file komodo/compose.env`.
|
||||
## Additionally, they are passed to both Komodo Core and Komodo Periphery with `env_file: ./compose.env`,
|
||||
## so you can pass any additional environment variables to Core / Periphery directly in this file as well.
|
||||
|
||||
## Stick to a specific version, or use `latest`
|
||||
COMPOSE_KOMODO_IMAGE_TAG=latest
|
||||
|
||||
## Note: 🚨 Podman does NOT support local logging driver 🚨. See Podman options here:
|
||||
## `https://docs.podman.io/en/v4.6.1/markdown/podman-run.1.html#log-driver-driver`
|
||||
COMPOSE_LOGGING_DRIVER=local # Enable log rotation with the local driver.
|
||||
|
||||
## DB credentials - Ignored for Sqlite
|
||||
KOMODO_DB_USERNAME=admin
|
||||
KOMODO_DB_PASSWORD=admin
|
||||
|
||||
## Configure a secure passkey to authenticate between Core / Periphery.
|
||||
KOMODO_PASSKEY=komodo_Passkey
|
||||
|
||||
#=-------------------------=#
|
||||
#= Komodo Core Environment =#
|
||||
#=-------------------------=#
|
||||
|
||||
## Full variable list + descriptions are available here:
|
||||
## 🦎 https://github.com/moghtech/komodo/blob/main/config/core.config.toml 🦎
|
||||
|
||||
## Note. Secret variables also support `${VARIABLE}_FILE` syntax to pass docker compose secrets.
|
||||
## Docs: https://docs.docker.com/compose/how-tos/use-secrets/#examples
|
||||
|
||||
## Used for Oauth / Webhook url suggestion / Caddy reverse proxy.
|
||||
KOMODO_HOST=https://komodo.netgrimoire.com
|
||||
## Displayed in the browser tab.
|
||||
KOMODO_TITLE=Komodo
|
||||
## Create a server matching this address as the "first server".
|
||||
## Use `https://host.docker.internal:8120` when using systemd-managed Periphery.
|
||||
KOMODO_FIRST_SERVER=https://periphery:8120
|
||||
## Make all buttons just double-click, rather than the full confirmation dialog.
|
||||
KOMODO_DISABLE_CONFIRM_DIALOG=false
|
||||
|
||||
## Rate Komodo polls your servers for
|
||||
## status / container status / system stats / alerting.
|
||||
## Options: 1-sec, 5-sec, 15-sec, 1-min, 5-min.
|
||||
## Default: 15-sec
|
||||
KOMODO_MONITORING_INTERVAL="15-sec"
|
||||
## Rate Komodo polls Resources for updates,
|
||||
## like outdated commit hash.
|
||||
## Options: 1-min, 5-min, 15-min, 30-min, 1-hr.
|
||||
## Default: 5-min
|
||||
KOMODO_RESOURCE_POLL_INTERVAL="5-min"
|
||||
|
||||
## Used to auth incoming webhooks. Alt: KOMODO_WEBHOOK_SECRET_FILE
|
||||
KOMODO_WEBHOOK_SECRET=a_random_secret
|
||||
## Used to generate jwt. Alt: KOMODO_JWT_SECRET_FILE
|
||||
KOMODO_JWT_SECRET=a_random_jwt_secret
|
||||
|
||||
## Enable login with username + password.
|
||||
KOMODO_LOCAL_AUTH=true
|
||||
## Disable new user signups.
|
||||
KOMODO_DISABLE_USER_REGISTRATION=false
|
||||
## All new logins are auto enabled
|
||||
KOMODO_ENABLE_NEW_USERS=true
|
||||
## Disable non-admins from creating new resources.
|
||||
KOMODO_DISABLE_NON_ADMIN_CREATE=false
|
||||
## Allows all users to have Read level access to all resources.
|
||||
KOMODO_TRANSPARENT_MODE=false
|
||||
|
||||
## Time to live for jwt tokens.
|
||||
## Options: 1-hr, 12-hr, 1-day, 3-day, 1-wk, 2-wk
|
||||
KOMODO_JWT_TTL="1-day"
|
||||
|
||||
## OIDC Login
|
||||
KOMODO_OIDC_ENABLED=false
|
||||
## Must reachable from Komodo Core container
|
||||
# KOMODO_OIDC_PROVIDER=https://oidc.provider.internal/application/o/komodo
|
||||
## Change the host to one reachable be reachable by users (optional if it is the same as above).
|
||||
## DO NOT include the `path` part of the URL.
|
||||
# KOMODO_OIDC_REDIRECT_HOST=https://oidc.provider.external
|
||||
## Your OIDC client id
|
||||
# KOMODO_OIDC_CLIENT_ID= # Alt: KOMODO_OIDC_CLIENT_ID_FILE
|
||||
## Your OIDC client secret.
|
||||
## If your provider supports PKCE flow, this can be ommitted.
|
||||
# KOMODO_OIDC_CLIENT_SECRET= # Alt: KOMODO_OIDC_CLIENT_SECRET_FILE
|
||||
## Make usernames the full email.
|
||||
## Note. This does not work for all OIDC providers.
|
||||
# KOMODO_OIDC_USE_FULL_EMAIL=true
|
||||
## Add additional trusted audiences for token claims verification.
|
||||
## Supports comma separated list, and passing with _FILE (for compose secrets).
|
||||
# KOMODO_OIDC_ADDITIONAL_AUDIENCES=abc,123 # Alt: KOMODO_OIDC_ADDITIONAL_AUDIENCES_FILE
|
||||
|
||||
## Github Oauth
|
||||
KOMODO_GITHUB_OAUTH_ENABLED=false
|
||||
# KOMODO_GITHUB_OAUTH_ID= # Alt: KOMODO_GITHUB_OAUTH_ID_FILE
|
||||
# KOMODO_GITHUB_OAUTH_SECRET= # Alt: KOMODO_GITHUB_OAUTH_SECRET_FILE
|
||||
|
||||
## Google Oauth
|
||||
KOMODO_GOOGLE_OAUTH_ENABLED=false
|
||||
# KOMODO_GOOGLE_OAUTH_ID= # Alt: KOMODO_GOOGLE_OAUTH_ID_FILE
|
||||
# KOMODO_GOOGLE_OAUTH_SECRET= # Alt: KOMODO_GOOGLE_OAUTH_SECRET_FILE
|
||||
|
||||
## Aws - Used to launch Builder instances and ServerTemplate instances.
|
||||
KOMODO_AWS_ACCESS_KEY_ID= # Alt: KOMODO_AWS_ACCESS_KEY_ID_FILE
|
||||
KOMODO_AWS_SECRET_ACCESS_KEY= # Alt: KOMODO_AWS_SECRET_ACCESS_KEY_FILE
|
||||
|
||||
## Hetzner - Used to launch ServerTemplate instances
|
||||
## Hetzner Builder not supported due to Hetzner pay-by-the-hour pricing model
|
||||
KOMODO_HETZNER_TOKEN= # Alt: KOMODO_HETZNER_TOKEN_FILE
|
||||
|
||||
#=------------------------------=#
|
||||
#= Komodo Periphery Environment =#
|
||||
#=------------------------------=#
|
||||
|
||||
## Full variable list + descriptions are available here:
|
||||
## 🦎 https://github.com/moghtech/komodo/blob/main/config/periphery.config.toml 🦎
|
||||
|
||||
## Periphery passkeys must include KOMODO_PASSKEY to authenticate.
|
||||
PERIPHERY_PASSKEYS=${KOMODO_PASSKEY}
|
||||
|
||||
## Specify the root directory used by Periphery agent.
|
||||
PERIPHERY_ROOT_DIRECTORY=/etc/komodo
|
||||
|
||||
## Enable SSL using self signed certificates.
|
||||
## Connect to Periphery at https://address:8120.
|
||||
PERIPHERY_SSL_ENABLED=true
|
||||
|
||||
## If the disk size is overreporting, can use one of these to
|
||||
## whitelist / blacklist the disks to filter them, whichever is easier.
|
||||
## Accepts comma separated list of paths.
|
||||
## Usually whitelisting just /etc/hostname gives correct size.
|
||||
PERIPHERY_INCLUDE_DISK_MOUNTS=/etc/hostname
|
||||
# PERIPHERY_EXCLUDE_DISK_MOUNTS=/snap,/etc/repos
|
||||
|
|
@ -1,115 +0,0 @@
|
|||
################################
|
||||
# 🦎 KOMODO COMPOSE - MONGO 🦎 #
|
||||
################################
|
||||
|
||||
## This compose file will deploy:
|
||||
## 1. MongoDB
|
||||
## 2. Komodo Core
|
||||
## 3. Komodo Periphery
|
||||
##
|
||||
## Load the .env file first
|
||||
# # set -a
|
||||
# # source .env
|
||||
# # set +a
|
||||
|
||||
services:
|
||||
komodo_mongo:
|
||||
image: mongo
|
||||
labels:
|
||||
komodo.skip: # Prevent Komodo from stopping with StopAllContainers
|
||||
command: --quiet --wiredTigerCacheSizeGB 0.25
|
||||
restart: unless-stopped
|
||||
logging:
|
||||
driver: ${COMPOSE_LOGGING_DRIVER:-local}
|
||||
ports:
|
||||
- 27017:27017
|
||||
volumes:
|
||||
- /DockerVol/komodo/mongo-data:/data/db
|
||||
- /DockerVol/komodo/mongo-config:/data/configdb
|
||||
environment:
|
||||
MONGO_INITDB_ROOT_USERNAME: ${KOMODO_DB_USERNAME}
|
||||
MONGO_INITDB_ROOT_PASSWORD: ${KOMODO_DB_PASSWORD}
|
||||
networks:
|
||||
- komodo
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker2
|
||||
|
||||
komodo_core:
|
||||
image: ghcr.io/moghtech/komodo-core:${COMPOSE_KOMODO_IMAGE_TAG:-latest}
|
||||
labels:
|
||||
komodo.skip: # Prevent Komodo from stopping with StopAllContainers
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- komodo_mongo
|
||||
logging:
|
||||
driver: ${COMPOSE_LOGGING_DRIVER:-local}
|
||||
ports:
|
||||
- 9120:9120
|
||||
networks:
|
||||
- komodo
|
||||
- netgrimoire
|
||||
#env_file: ./compose.env
|
||||
environment:
|
||||
KOMODO_DATABASE_ADDRESS: komodo_mongo:27017
|
||||
KOMODO_DATABASE_USERNAME: ${KOMODO_DB_USERNAME}
|
||||
KOMODO_DATABASE_PASSWORD: ${KOMODO_DB_PASSWORD}
|
||||
volumes:
|
||||
## Core cache for repos for latest commit hash / contents
|
||||
- /DockerVol/komodo/repo-cache:/repo-cache
|
||||
## Store sync files on server
|
||||
# - /path/to/syncs:/syncs
|
||||
## Optionally mount a custom core.config.toml
|
||||
- /DockerVol/komodo/config.toml:/config/config.toml
|
||||
## Allows for systemd Periphery connection at
|
||||
## "http://host.docker.internal:8120"
|
||||
# extra_hosts:
|
||||
# - host.docker.internal:host-gateway
|
||||
|
||||
## Deploy Periphery container using this block,
|
||||
## or deploy the Periphery binary with systemd using
|
||||
## https://github.com/moghtech/komodo/tree/main/scripts
|
||||
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker2
|
||||
periphery:
|
||||
image: ghcr.io/moghtech/komodo-periphery:${COMPOSE_KOMODO_IMAGE_TAG:-latest}
|
||||
labels:
|
||||
komodo.skip: # Prevent Komodo from stopping with StopAllContainers
|
||||
restart: unless-stopped
|
||||
logging:
|
||||
driver: ${COMPOSE_LOGGING_DRIVER:-local}
|
||||
#env_file: ./compose.env
|
||||
environment:
|
||||
PERIPHERY_REPO_DIR: /DockerVol/komodo/repos:/etc/komodo/repos
|
||||
PERIPHERY_STACK_DIR: /DockerVol/komodo/stacks:/etc/komodo/stacks
|
||||
# PERIPHERY_SSL_KEY_FILE: ${PERIPHERY_ROOT_DIRECTORY:-/etc/komodo/ssl/key.pem
|
||||
# PERIPHERY_SSL_CERT_FILE: ${PERIPHERY_ROOT_DIRECTORY:-/etc/komodo/ssl/cert.pem
|
||||
networks:
|
||||
- komodo
|
||||
volumes:
|
||||
## Mount external docker socket
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
## Allow Periphery to see processes outside of container
|
||||
- /proc:/proc
|
||||
## Specify the Periphery agent root directory.
|
||||
## Must be the same inside and outside the container,
|
||||
## or docker will get confused. See https://github.com/moghtech/komodo/discussions/180.
|
||||
## Default: /etc/komodo.
|
||||
- /etc/komodo:/etc/komodo
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker2
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
komodo:
|
||||
external: true
|
||||
|
||||
|
||||
|
||||
|
|
@ -1,48 +0,0 @@
|
|||
services:
|
||||
nessus-essentials:
|
||||
image: tenable/nessus:latest-ubuntu
|
||||
container_name: nessus-essentials
|
||||
ports:
|
||||
- "8834:8834"
|
||||
networks:
|
||||
- netgrimoire
|
||||
environment:
|
||||
- USERNAME=admin
|
||||
- PASSWORD=admin
|
||||
# - ACTIVATION_CODE=3RH7-HX4R-DKS6-EJFK-GNWG
|
||||
# restart: unless-stopped
|
||||
volumes:
|
||||
- data:/opt/nessus/var/nessus
|
||||
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=Monitoring
|
||||
- homepage.name=Nessus
|
||||
- homepage.icon=phoneinfoga.png
|
||||
- homepage.href=http://nessus.netgrimoire.com
|
||||
- homepage.description=Vulnerability Scanner
|
||||
- caddy=nessus.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
# - caddy.reverse_proxy="{{upstreams 8834}}"
|
||||
# - caddy.reverse_proxy=https://nessus-essentials:8834
|
||||
# - caddy.tls_insecure_skip_verify=true # Allow self-signed certs
|
||||
- caddy.reverse_proxy=https://nessus-essentials:8834
|
||||
- caddy.reverse_proxy.transport=http
|
||||
- caddy.reverse_proxy.transport.tls
|
||||
- caddy.reverse_proxy.transport.tls_insecure_skip_verify
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker2
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
|
||||
|
||||
volumes:
|
||||
data:
|
||||
driver: local
|
||||
driver_opts:
|
||||
type: none
|
||||
o: bind
|
||||
device: /DockerVol/nessus
|
||||
|
|
@ -1,55 +0,0 @@
|
|||
services:
|
||||
nexterm:
|
||||
ports:
|
||||
- "6989:6989"
|
||||
container_name: nexterm
|
||||
image: germannewsmaker/nexterm:1.0.2-OPEN-PREVIEW
|
||||
restart: always
|
||||
|
||||
environment:
|
||||
PUID: 1001
|
||||
PGID: 998
|
||||
TZ: America/Chicago
|
||||
MAX_WORKERS: 1
|
||||
WEB_CONCURRENCY: 1
|
||||
networks:
|
||||
- netgrimoire
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=Remote Access
|
||||
- homepage.name=Nexterm
|
||||
- homepage.icon=nexterm.png
|
||||
- homepage.href=http://nexterm.netgrimoire.com
|
||||
- homepage.description=Remote Access
|
||||
- kuma.nxterm.http.name="Nexterm"
|
||||
- kuma.nxterm.http.url=http://nexterm:6989
|
||||
- caddy=nexterm.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 6989}}"
|
||||
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/nexterm:/app/data
|
||||
|
||||
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -1,96 +0,0 @@
|
|||
|
||||
services:
|
||||
peekaping-postgres:
|
||||
image: postgres:17
|
||||
networks:
|
||||
- netgrimoire
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/peekaping/postgres:/var/lib/postgresql/data
|
||||
environment:
|
||||
- POSTGRES_USER=peekaping
|
||||
- POSTGRES_PASSWORD=peekapingpass
|
||||
- POSTGRES_DB=peekaping
|
||||
- TZ=America/Chicago
|
||||
user: "1001:998"
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
labels:
|
||||
diun.enable: "true"
|
||||
|
||||
migrate:
|
||||
image: 0xfurai/peekaping-migrate:latest
|
||||
environment:
|
||||
- DB_USER=peekaping
|
||||
- DB_PASS=peekapingpass
|
||||
- DB_NAME=peekaping
|
||||
- DB_HOST=peekaping-postgres # <-- fix
|
||||
- DB_TYPE=postgres
|
||||
- DB_PORT=5432
|
||||
- TZ=America/Chicago
|
||||
- SERVER_PORT=8034
|
||||
- SERVER_HOST=0.0.0.0
|
||||
- MODE=prod
|
||||
user: "1001:998"
|
||||
networks:
|
||||
- netgrimoire
|
||||
deploy:
|
||||
replicas: 0
|
||||
restart_policy:
|
||||
condition: none
|
||||
|
||||
peekaping-server:
|
||||
image: 0xfurai/peekaping-server:latest
|
||||
networks:
|
||||
- netgrimoire
|
||||
environment:
|
||||
- DB_USER=peekaping
|
||||
- DB_PASS=peekapingpass
|
||||
- DB_NAME=peekaping
|
||||
- DB_HOST=peekaping-postgres # <-- fix
|
||||
- DB_TYPE=postgres
|
||||
- DB_PORT=5432
|
||||
- TZ=America/Chicago
|
||||
- SERVER_PORT=8034
|
||||
- SERVER_HOST=0.0.0.0
|
||||
- MODE=prod
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
labels:
|
||||
diun.enable: "true"
|
||||
# Caddy
|
||||
caddy: peekaping.netgrimoire.com
|
||||
caddy.import: authentik
|
||||
caddy.reverse_proxy: "{{upstreams 8034}}"
|
||||
|
||||
web:
|
||||
image: 0xfurai/peekaping-web:latest
|
||||
networks:
|
||||
- netgrimoire
|
||||
user: "1001:998"
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
labels:
|
||||
# Homepage
|
||||
homepage.group: "Monitoring"
|
||||
homepage.name: "Peekaping"
|
||||
homepage.icon: "peekaping.png"
|
||||
homepage.href: "https://peekaping.netgrimoire.com"
|
||||
homepage.description: "Simple ICMP/HTTP monitor"
|
||||
|
||||
# Kuma integration
|
||||
kuma.monitoring.http.name: "Peekaping"
|
||||
kuma.monitoring.http.url: "http://peekaping-server:8034" # <-- fix
|
||||
|
||||
# Peekaping self-monitor
|
||||
peekaping.name: "Peekaping"
|
||||
peekaping.url: "http://peekaping-server:8034" # <-- fix
|
||||
|
||||
# Diun
|
||||
diun.enable: "true"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
53
archive/pinchflat.yaml
Normal file
53
archive/pinchflat.yaml
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
services:
|
||||
pinchflat:
|
||||
image: ghcr.io/kieraneglin/pinchflat:latest
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
environment:
|
||||
# Set the timezone to your local timezone
|
||||
TZ: America/Chicago
|
||||
PUID: "1001"
|
||||
PGID: "998"
|
||||
|
||||
# Match your standard UID:GID execution model
|
||||
#user: "1001:998"
|
||||
|
||||
ports:
|
||||
- target: 8945
|
||||
published: 8945
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
|
||||
volumes:
|
||||
- /DockerVol/pinchflat/config:/config
|
||||
- /data/nfs/Baxter/Data/media/other/pinchflat:/downloads
|
||||
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == nas
|
||||
|
||||
labels:
|
||||
# Homepage
|
||||
homepage.group: "Downloaders"
|
||||
homepage.name: "PinchFlat"
|
||||
homepage.icon: "pinchflat.png"
|
||||
homepage.href: "https://pinchflat.netgrimoire.com"
|
||||
homepage.description: "YouTube Library"
|
||||
|
||||
# Kuma
|
||||
kuma.pf.http.name: "PinchFlat"
|
||||
kuma.pf.http.url: "http://pinchflat:8945"
|
||||
|
||||
# Caddy
|
||||
caddy: "pinchflat.netgrimoire.com"
|
||||
caddy.import: "authentik"
|
||||
caddy.reverse_proxy: "pinchflat:8945"
|
||||
|
||||
# Diun (image update monitoring)
|
||||
diun.enable: "true"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
|
|
@ -1,38 +0,0 @@
|
|||
services:
|
||||
plex:
|
||||
image: plexinc/pms-docker
|
||||
container_name: plex
|
||||
network_mode: host
|
||||
environment:
|
||||
- PUID=1001
|
||||
- PGID=998
|
||||
- TZ=America/Chicago
|
||||
- VERSION=docker
|
||||
- ADVERTISE_IP="http://192.168.5.134:32401/"
|
||||
# - PLEX_CLAIM= #optional
|
||||
ports:
|
||||
- 32401:32400
|
||||
networks:
|
||||
- netgrimoire
|
||||
volumes:
|
||||
- /DockerVol/Plex:/config
|
||||
- /data/nfs/Baxter/Data/media
|
||||
restart: unless-stopped
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=Library
|
||||
- homepage.name=Plex Media Server
|
||||
- homepage.icon=plex.png
|
||||
- homepage.href=http://plex.netgrimoire.com:32401/web/index.html
|
||||
- homepage.description=Media server
|
||||
- kuma.homepage.http.name="plex"
|
||||
- kuma.homepage.http.url=http://plex:32401
|
||||
# - caddy=plex.netgrimoire.com
|
||||
# # - caddy.import=authentik
|
||||
# - caddy.reverse_proxy="{{upstreams 3000}}"
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == bruce
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
59
archive/profilarr.yaml
Normal file
59
archive/profilarr.yaml
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
services:
|
||||
profilarr:
|
||||
image: santiagosayshey/profilarr:latest
|
||||
container_name: profilarr
|
||||
|
||||
#user: "1001:998"
|
||||
environment:
|
||||
- PUID=1001
|
||||
- PGID=998
|
||||
- TZ=America/Chicago
|
||||
|
||||
ports:
|
||||
- "6868:6868"
|
||||
|
||||
volumes:
|
||||
- /DockerVol/profilarr:/config
|
||||
|
||||
restart: unless-stopped
|
||||
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker4
|
||||
|
||||
labels:
|
||||
# -------------------------
|
||||
# Diun (image update notify)
|
||||
# -------------------------
|
||||
diun.enable: "true"
|
||||
|
||||
# -------------------------
|
||||
# Uptime Kuma (your standard labels)
|
||||
# -------------------------
|
||||
kuma.profilarr.http.name: "profilarr"
|
||||
kuma.profilarr.http.url: "http://profilarr.netgrimoire.com"
|
||||
|
||||
# -------------------------
|
||||
# Homepage (no homepage.weight per your preference)
|
||||
# -------------------------
|
||||
homepage.group: "Jolly Roger"
|
||||
homepage.name: "Profilarr"
|
||||
homepage.description: "Profilarr"
|
||||
homepage.href: "https://profilarr.netgrimoire.com"
|
||||
homepage.icon: "profilarr"
|
||||
|
||||
|
||||
# -------------------------
|
||||
# Caddy (your domain + authentik import)
|
||||
# -------------------------
|
||||
caddy: "profilarr.netgrimoire.com"
|
||||
caddy.reverse_proxy: "{{upstreams 6868}}"
|
||||
caddy.import: "authentik"
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
40
archive/readarr.yaml
Normal file
40
archive/readarr.yaml
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
services:
|
||||
readarr:
|
||||
image: blampe/rreading-glasses:latest
|
||||
container_name: readarr
|
||||
hostname: readarr
|
||||
environment:
|
||||
- PUID=1001
|
||||
- PGID=998
|
||||
- TZ=America/Chicago
|
||||
networks:
|
||||
- netgrimoire
|
||||
volumes:
|
||||
- /DockerVol/readarr/config:/config
|
||||
- /data/nfs/Baxter/Data:/data:shared
|
||||
ports:
|
||||
- 8787:8787
|
||||
restart: unless-stopped
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker4
|
||||
labels:
|
||||
- homepage.group=Jolly Roger
|
||||
- homepage.name=Readarr
|
||||
- homepage.icon=readarr.png
|
||||
- homepage.href=http://readarr.netgrimoire.com
|
||||
- homepage.description=Ebook Library
|
||||
- homepage.widget.type=readarr
|
||||
- homepage.widget.url=http://readarr:8787
|
||||
- homepage.widget.key=78954fcf696e4da9b2e9391a54e87478
|
||||
- kuma.readarr.http.name="Readarr"
|
||||
- kuma.readarr.http.url=http://readarr:8787
|
||||
- caddy=readarr.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 8787}}"
|
||||
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
53
archive/recyclarr.yaml
Normal file
53
archive/recyclarr.yaml
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
version: "3.8"
|
||||
|
||||
services:
|
||||
recyclarr:
|
||||
image: ghcr.io/recyclarr/recyclarr:latest
|
||||
user: "1001:998"
|
||||
environment:
|
||||
- PUID=1001
|
||||
- PGID=998
|
||||
- TZ=America/Chicago
|
||||
|
||||
# Run on an interval (simple + reliable)
|
||||
# Adjust to taste: 6h, 12h, 24h, etc.
|
||||
- RECYCLARR_RUN_MODE=interval
|
||||
- RECYCLARR_INTERVAL=12h
|
||||
|
||||
# Optional: if you want extra logging
|
||||
# - RECYCLARR_LOG_LEVEL=Information
|
||||
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/recyclarr:/config
|
||||
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker4
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
|
||||
restart_policy:
|
||||
condition: any
|
||||
delay: 10s
|
||||
max_attempts: 0
|
||||
window: 30s
|
||||
|
||||
update_config:
|
||||
order: start-first
|
||||
parallelism: 1
|
||||
delay: 10s
|
||||
|
||||
labels:
|
||||
# -------------------------
|
||||
# Diun (image update notify)
|
||||
# -------------------------
|
||||
diun.enable: "true"
|
||||
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
|
|
@ -1,33 +0,0 @@
|
|||
|
||||
version: "3.9"
|
||||
|
||||
services:
|
||||
forgejo-runner:
|
||||
image: code.forgejo.org/forgejo/runner:latest
|
||||
container_name: forgejo-runner
|
||||
networks:
|
||||
- netgrimoire
|
||||
restart: unless-stopped
|
||||
user: "1001:998"
|
||||
environment:
|
||||
TZ: America/Chicago
|
||||
RUNNER_NAME: docker2-runner
|
||||
RUNNER_LABELS: swarm
|
||||
FORGEJO_URL: https://git.netgrimoire.com
|
||||
FORGEJO_TOKEN: QFDPcVXHYQrm8FJj9n4Olp9R5U3Q3GwM56VThGx8
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /DockerVol/runner:/data
|
||||
command: >
|
||||
forgejo-runner daemon --config /data/config.yml
|
||||
deploy:
|
||||
labels:
|
||||
kuma.forgejo-runner.http.name: "Forgejo Runner"
|
||||
kuma.forgejo-runner.http.url: "http://docker2:8080"
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname == docker2
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
|
|
@ -1,53 +0,0 @@
|
|||
services:
|
||||
sshwifty:
|
||||
container_name: sshwifty
|
||||
image: niruix/sshwifty:latest
|
||||
restart: always
|
||||
ports:
|
||||
- 8182:8182
|
||||
environment:
|
||||
PUID: 1001
|
||||
PGID: 998
|
||||
TZ: America/Chicago
|
||||
ALLOW_SIGNUP: "true"
|
||||
MAX_WORKERS: 1
|
||||
WEB_CONCURRENCY: 1
|
||||
networks:
|
||||
- netgrimoire
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=Remote Access
|
||||
- homepage.name=sshwifty
|
||||
- homepage.icon=sshwifty.png
|
||||
- homepage.href=http://ssh.netgrimoire.com
|
||||
- homepage.description=SSH Access
|
||||
- kuma.sshwifty.http.name="sshwifty"
|
||||
- kuma.msshwifty.http.url=http://sshwifty:8182
|
||||
- caddy=ssh.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 8182}}"
|
||||
|
||||
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
41
archive/tunarr.yaml
Normal file
41
archive/tunarr.yaml
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
services:
|
||||
tunarr:
|
||||
image: chrisbenincasa/tunarr
|
||||
container_name: tunarr
|
||||
hostname: tunarr
|
||||
networks:
|
||||
- netgrimoire
|
||||
ports:
|
||||
- "8051:8000" # Web UI
|
||||
- "8052:8001" # Streaming endpoint
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/tunarr/config:/config
|
||||
- /data/nfs/Baxter/Data/:/media # Adjust to your media directory
|
||||
environment:
|
||||
- TZ=America/Chicago
|
||||
- PGID=998
|
||||
- PUID=1001
|
||||
user: "1001:998"
|
||||
restart: unless-stopped
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=Jolly Roger
|
||||
- homepage.name=Tunarr
|
||||
- homepage.icon=tunarr.png
|
||||
- homepage.href=https://tunarr.netgrimoire.com
|
||||
- homepage.description=Virtual TV Channel Creator
|
||||
- kuma.tunarr.http.name="Tunarr"
|
||||
- kuma.tunarr.http.url=http://tunarr:8000
|
||||
- caddy=tunarr.netgrimoire.com
|
||||
- caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 8000}}"
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.general == true
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
|
||||
|
||||
|
||||
|
|
@ -1,113 +0,0 @@
|
|||
# services:
|
||||
|
||||
# wordpress:
|
||||
# image: wordpress
|
||||
# restart: always
|
||||
# ports:
|
||||
# - 8091:80
|
||||
# environment:
|
||||
# WORDPRESS_DB_HOST: wpdb
|
||||
# WORDPRESS_DB_USER: traveler
|
||||
# WORDPRESS_DB_PASSWORD: F@lcon13
|
||||
# WORDPRESS_DB_NAME: wp
|
||||
# volumes:
|
||||
# - /data/nfs/Baxter/Docker/wp-pnc/wp:/var/www/html
|
||||
# networks:
|
||||
# - netgrimoire
|
||||
# deploy:
|
||||
# labels:
|
||||
# - homepage.group=Application
|
||||
# - homepage.name=PNCClassified
|
||||
# - homepage.icon=wallos.png
|
||||
# - homepage.href=http://classifieds.pncfishandmore.com
|
||||
# - homepage.description=Classifies
|
||||
# - kuma.wp.http.name="classified"
|
||||
# - kuma.wallos.http.url=http://wordpress:80
|
||||
# - caddy=classifieds.pncfishandmore.com
|
||||
# - caddy.import=authentik
|
||||
# - caddy.reverse_proxy="{{upstreams 80}}"
|
||||
|
||||
# wpdb:
|
||||
# image: mysql:8.0
|
||||
# restart: always
|
||||
# environment:
|
||||
# MYSQL_DATABASE: wp
|
||||
# MYSQL_USER: traveler
|
||||
# MYSQL_PASSWORD: F@lcon13
|
||||
# MYSQL_RANDOM_ROOT_PASSWORD: '1'
|
||||
# volumes:
|
||||
# - /data/nfs/Baxter/Docker/wp-pnc/sql:/var/lib/mysql
|
||||
# networks:
|
||||
# - netgrimoire
|
||||
|
||||
|
||||
# volumes:
|
||||
# wordpress:
|
||||
# db:
|
||||
|
||||
# networks:
|
||||
# netgrimoire:
|
||||
# external: true
|
||||
|
||||
services:
|
||||
wordpress:
|
||||
image: wordpress
|
||||
restart: always
|
||||
ports:
|
||||
- 8091:80
|
||||
environment:
|
||||
WORDPRESS_DB_HOST: wpdb
|
||||
WORDPRESS_DB_USER: traveler
|
||||
WORDPRESS_DB_PASSWORD: F@lcon13
|
||||
WORDPRESS_DB_NAME: wp
|
||||
# Revised proxy configuration
|
||||
WORDPRESS_CONFIG_EXTRA: |
|
||||
define('FORCE_SSL_ADMIN', true);
|
||||
define('WP_HOME', 'https://classifieds.pncfishandmore.com');
|
||||
define('WP_SITEURL', 'https://classifieds.pncfishandmore.com');
|
||||
define('FORCE_SSL', true);
|
||||
define('WP_PROXY_HOST', 'caddy');
|
||||
define('WP_PROXY_PORT', '443');
|
||||
define('WP_PROXY_SSL', true);
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/wp-pnc/wp:/var/www/html
|
||||
networks:
|
||||
- netgrimoire
|
||||
deploy:
|
||||
labels:
|
||||
- homepage.group=Application
|
||||
- homepage.name=PNCClassified
|
||||
- homepage.icon=wallos.png
|
||||
- homepage.href=https://classifieds.pncfishandmore.com
|
||||
- homepage.description=Classifies
|
||||
- kuma.wp.http.name="classified"
|
||||
- kuma.wallos.http.url=http://wordpress:80
|
||||
- caddy=classifieds.pncfishandmore.com
|
||||
#- caddy.import=authentik
|
||||
- caddy.reverse_proxy="{{upstreams 80}}"
|
||||
- "caddy.header=Strict-Transport-Security \"max-age=31536000; includeSubDomains\""
|
||||
- "caddy.header=X-Forwarded-Proto {http.request.scheme}"
|
||||
- "caddy.header=X-Real-IP {http.request.remote.host}"
|
||||
- "caddy.header=X-Forwarded-For {http.request.remote.host}"
|
||||
- "caddy.header=Host {http.request.host}"
|
||||
|
||||
wpdb:
|
||||
image: mysql:8.0
|
||||
restart: always
|
||||
environment:
|
||||
MYSQL_DATABASE: wp
|
||||
MYSQL_USER: traveler
|
||||
MYSQL_PASSWORD: F@lcon13
|
||||
MYSQL_RANDOM_ROOT_PASSWORD: '1'
|
||||
volumes:
|
||||
- /data/nfs/Baxter/Docker/wp-pnc/sql:/var/lib/mysql
|
||||
networks:
|
||||
- netgrimoire
|
||||
|
||||
volumes:
|
||||
wordpress:
|
||||
db:
|
||||
|
||||
networks:
|
||||
netgrimoire:
|
||||
external: true
|
||||
Loading…
Add table
Add a link
Reference in a new issue