Folder reorg if prep for using git runner

This commit is contained in:
traveler 2026-01-10 21:25:19 -06:00
parent 304e85e450
commit 1a3749112d
102 changed files with 833 additions and 244 deletions

45
archive/Caddyfile.old Executable file
View file

@ -0,0 +1,45 @@
dns.netgrimoire.com {
reverse_proxy 192.168.5.7:5380/
}
webtop.netgrimoire.com {
reverse_proxy http://webtop:3000
}
mail.netgrimoire.com, imap.netgrimoire.com, smtp.netgrimoire.com, autodiscover.netgrimoire.com, autoconfig.netgrimoire.com {
reverse_proxy mailcow-nginx:80
}
mail.wasted-bandwidth.net, imap.wasted-bandwidth.net, smtp.wasted-bandwidth.net, autodiscover.wasted-bandwidth.net, autoconfig.wasted-bandwidth.net {
reverse_proxy mailcow-nginx:80
}
mail.gnarlypandaproductions.com, imap.gnarlypandaproductions.com, smtp.gnarlypandaproductions.com, autodiscover.gnarlypandaproductions.com, autoconfig.gnarlypandaproductions.com {
reverse_proxy mailcow-nginx:80
}
email.gnarlypandaproductions.com, email.netgrimoire.com, email.wasted-bandwidth.net {
reverse_proxy mailcow-nginx:80/sogo/
}
(authentik) {
# Always forward outpost path to actual outpost
reverse_proxy /outpost.goauthentik.io/* http://authentik:9000
# Forward authentication to outpost
forward_auth http://authentik:9000 {
uri /outpost.goauthentik.io/auth/caddy
# Capitalization of the headers is important, otherwise they will be empty
copy_headers X-Authentik-Username X-Authentik-Groups X-Authentik-Email X-Authentik-Name X-Authentik-Uid X-Authentik-Jwt X-Authentik-Meta-Jwks X-Authentik-Meta-Outpost X-Authentik-Meta-Provider X-Authentik-Meta-App X-Authentik-Meta-Version
}
}

38
archive/WatchState.yaml Executable file
View file

@ -0,0 +1,38 @@
services:
watchstate:
image: ghcr.io/arabcoders/watchstate:latest
# To change the user/group id associated with the tool change the following line.
user: "1001:998"
container_name: watchstate
restart: unless-stopped
ports:
- "7980:8080" # The port which the webui will be available on.
volumes:
- /DockerVol/watchstate:/config:rw # mount current directory to container /config directory.
networks:
- netgrimoire
deploy:
labels:
- homepage.group=Library
- homepage.name=WatchState
- homepage.icon=jellyfin.png
- homepage.href=http://watchstate.netgrimoire.com
- homepage.description=Media Server Sync
- caddy=watchstate.netgrimoire.com
- caddy.import=authentik
- caddy.reverse_proxy="{{upstreams 8080}}"
placement:
constraints:
- node.hostname == bruce
networks:
netgrimoire:
external: true

253
archive/arr.yaml Executable file
View file

@ -0,0 +1,253 @@
services:
huntarr:
image: huntarr/huntarr:latest
container_name: huntarr
hostname: huntarr
environment:
- TZ=America/Chicago
- PGID=998
- PUID=1001
volumes:
- /DockerVol/huntarr:/config
- /data/nfs/Baxter/Data/:/data:shared
ports:
- 9705:9705
networks:
- netgrimoire
deploy:
placement:
constraints:
- node.hostname == docker1
labels:
- homepage.group=Jolly Roger
- homepage.name=Huntarr
- homepage.icon=theodeinproject.png
- homepage.href=https://huntarr.netgrimoire.com
- homepage.description=Library Huntarr
- kuma.radarr.http.name="huntarr"
- kuma.radarr.http.url=http://huntarr:9705
- caddy=huntarr.netgrimoire.com
- caddy.import=authentik
- caddy.reverse_proxy="{{upstreams 9705}}"
radarr:
image: lscr.io/linuxserver/radarr:latest
container_name: radarr
hostname: radarr
networks:
- netgrimoire
volumes:
- /DockerVol/Radarr:/config
- /data/nfs/Baxter/Data/:/data:shared
ports:
- 7878:7878
environment:
- TZ=America/Chicago
- PGID=998
- PUID=1001
restart: unless-stopped
deploy:
placement:
constraints:
- node.hostname == docker1
labels:
- homepage.group=Jolly Roger
- homepage.name=Radarr
- homepage.icon=radarr.png
- homepage.href=http://radarr.netgrimoire.com
- homepage.description=Movie Library
- homepage.widget.type=radarr
- homepage.widget.url=http://radarr:7878
- homepage.widget.key=1fb7e413a68e459cb08ab33b100a444a
- kuma.radarr.http.name="Radarr"
- kuma.radarr.http.url=http://radarr:7878
- caddy=radarr.netgrimoire.com
- caddy.import=authentik
- caddy.reverse_proxy="{{upstreams 7878}}"
sonarr:
image: linuxserver/sonarr:latest
container_name: sonarr
hostname: sonarr
networks:
- netgrimoire
volumes:
- /etc/localtime:/etc/localtime:ro
- /DockerVol/Sonarr:/config
- /data/nfs/Baxter/Data/:/data:shared
ports:
- 8989:8989
environment:
- TZ=America/Chicago
- PGID=998
- PUID=1001
restart: unless-stopped
deploy:
placement:
constraints:
- node.hostname == docker1
labels:
- homepage.group=Jolly Roger
- homepage.name=Sonarr
- homepage.icon=sonarr.png
- homepage.href=http://sonarr.netgrimoire.com
- homepage.description=Television Library
- homepage.widget.type=sonarr
- homepage.widget.url=http://sonarr:8989
- homepage.widget.key=62f710714d604bc8a1e6df9f2ad6a07c
- kuma.sonarr.http.name="Sonarr"
- kuma.sonarr.http.url=http://sonarr:8989
- caddy=sonarr.netgrimoire.com
- caddy.import=authentik
- caddy.reverse_proxy="{{upstreams 8989}}"
lidarr:
image: linuxserver/lidarr
container_name: lidarr
hostname: lidarr
ports:
- 8686:8686
networks:
- netgrimoire
volumes:
- /Dockervol/lidarr/config:/config
- /data/nfs/Baxter/Data/:/data:shared
environment:
- TZ=America/Chicago
- PGID=998
- PUID=1001
restart: unless-stopped
deploy:
placement:
constraints:
- node.hostname == docker1
labels:
- homepage.group=Jolly Roger
- homepage.name=Lidarr
- homepage.icon=lidarr.png
- homepage.href=http://lidarr.netgrimoire.com
- homepage.description=Music Library
- homepage.widget.type=lidarr
- homepage.widget.url=http://lidarr:8686
- homepage.widget.key=189afec5b97440209775a192870a2815
- kuma.lidarr.http.name="Lidarr"
- kuma.lidarr.http.url=http://lidarr:8686
- caddy=lidarr.netgrimoire.com
- caddy.import=authentik
- caddy.reverse_proxy="{{upstreams 8686}}"
mylar:
# image: linuxserver/mylar
image: lscr.io/linuxserver/mylar3:latest
container_name: mylar
hostname: mylar
networks:
- netgrimoire
volumes:
- /DockerVol/mylar:/config
- /data/nfs/Baxter/Data/:/data:shared
ports:
- 8090:8090
environment:
- TZ=America/Chicago
- PGID=998
- PUID=1001
restart: unless-stopped
deploy:
placement:
constraints:
- node.hostname == docker1
labels:
- homepage.group=Jolly Roger
- homepage.name=mylar
- homepage.icon=mylar.png
- homepage.href=http://mylar.netgrimoire.com
- homepage.description=Comic Library
- homepage.widget.type=mylar
- homepage.widget.url=http://mylar:8090
- homepage.widget.key=82efe2807c7be56f20b0f4ec358d8694
- kuma.mylar.http.name="Mylar"
- kuma.mylar.http.url=http://mylar:8090
- caddy=mylar.netgrimoire.com
- caddy.import=authentik
- caddy.reverse_proxy="{{upstreams 8090}}"
readarr:
image: lscr.io/linuxserver/readarr:develop
container_name: readarr
hostname: readarr
environment:
- PUID=1001
- PGID=998
- TZ=America/Chicago
networks:
- netgrimoire
volumes:
- /DockerVol/readarr/config:/config
- /data/nfs/Baxter/Data:/data:shared
ports:
- 8787:8787
restart: unless-stopped
deploy:
placement:
constraints:
- node.hostname == docker1
labels:
- homepage.group=Jolly Roger
- homepage.name=Readarr
- homepage.icon=readarr.png
- homepage.href=http://readarr.netgrimoire.com
- homepage.description=Ebook Library
- homepage.widget.type=readarr
- homepage.widget.url=http://readarr:8787
- homepage.widget.key=78954fcf696e4da9b2e9391a54e87478
- kuma.readarr.http.name="Readarr"
- kuma.readarr.http.url=http://readarr:8787
- caddy=readarr.netgrimoire.com
- caddy.import=authentik
- caddy.reverse_proxy="{{upstreams 8787}}"
bazarr:
image: lscr.io/linuxserver/bazarr:latest
container_name: bazarr
networks:
- netgrimoire
environment:
- TZ=America/Chicago
- PGID=998
- PUID=1001
volumes:
- /DockerVol/bazarr/config:/config
- /data/nfs/Baxter/Data/:/data:shared
ports:
- 6767:6767
restart: unless-stopped
deploy:
placement:
constraints:
- node.hostname == docker1
labels:
- homepage.group=Jolly Roger
- homepage.name=Bazarr
- homepage.icon=bazarr.png
- homepage.href=http://bazarr.netgrimoire.com
- homepage.description=Subtitle Search
# - homepage.widget.type=readarr
# - homepage.widget.url=http://readarr:8787
# - homepage.widget.key=78954fcf696e4da9b2e9391a54e87478
- kuma.arr.http.name="Readarr"
- kuma.arr.http.url=http://readarr:8787
- caddy=bazarr.netgrimoire.com
- caddy.import=authentik
- caddy.reverse_proxy="{{upstreams 6767}}"
networks:
netgrimoire:
external: true
vpn:
external: true

29
archive/cadvisor.yaml Normal file
View file

@ -0,0 +1,29 @@
services:
cadvisor:
image: gcr.io/cadvisor/cadvisor:latest
deploy:
mode: global # Ensures it runs on every node
resources:
limits:
memory: 512M
ports:
- "8066:8080" # Expose the cAdvisor UI
volumes:
- "/:/rootfs:ro"
- "/var/run:/var/run:ro"
- "/sys:/sys:ro"
- "/var/lib/docker/:/var/lib/docker:ro"
- "/dev/disk/:/dev/disk:ro"
networks:
- netgrimoire
logging:
driver: "gelf"
options:
gelf-address: "udp://192.168.5.17:12201"
tag: "cadvisor"
networks:
netgrimoire:
external: true

45
archive/cal.yaml Normal file
View file

@ -0,0 +1,45 @@
services:
glance-ical-api:
image: ghcr.io/awildleon/glance-ical-events:v1.3.1
hostname: glance-ical-api
networks:
- netgrimoire
environment:
- PUID=1001
- PGID=998
- TZ=America/Chicago
# user: "1001:998"
restart: unless-stopped
deploy:
labels:
# -------------------
# Caddy
# -------------------
- caddy=cal.netgrimoire.com
- caddy.reverse_proxy="{{upstreams 8076}}"
# -------------------
# Diun
# -------------------
- diun.enable=true
# -------------------
# Homepage
# -------------------
- homepage.group=Dashboard
- homepage.name=Calendar Events API
- homepage.description=Google Calendar → Glance events bridge
- homepage.icon=calendar.png
- homepage.href=https://cal.netgrimoire.com
# -------------------
# Uptime Kuma
# -------------------
- kuma.glanceical.http.name="Glance iCal API"
- kuma.glanceical.http.url=https://cal.netgrimoire.com/health
networks:
netgrimoire:
external: true

35
archive/crowdsec.yaml Normal file
View file

@ -0,0 +1,35 @@
services:
crowdsec:
image: crowdsecurity/crowdsec
container_name: crowdsec
networks:
- netgrimoire
volumes:
- /export/Docker/crowdsec/config:/etc/crowdsec
- /export/Docker/crowdsec/data:/var/lib/crowdsec/data
- /export/Docker/caddy/logs:/var/log/caddy:ro # Mount Caddy logs
environment:
- COLLECTIONS=crowdsecurity/http-dos crowdsecurity/caddy
deploy:
placement:
constraints:
- node.hostname == nas
# caddy-bouncer:
# image: crowdsecurity/caddy-bouncer
# container_name: caddy-bouncer
# restart: unless-stopped
# volumes:
# - /export/Docker/crowdsec/bouncer:/etc/caddy-bouncer
# networks:
# - netgrimoire
# deploy:
# placement:
# constraints:
# - node.hostname == nas
networks:
netgrimoire:
external: true

134
archive/komodo.env Normal file
View file

@ -0,0 +1,134 @@
####################################
# 🦎 KOMODO COMPOSE - VARIABLES 🦎 #
####################################
## These compose variables can be used with all Komodo deployment options.
## Pass these variables to the compose up command using `--env-file komodo/compose.env`.
## Additionally, they are passed to both Komodo Core and Komodo Periphery with `env_file: ./compose.env`,
## so you can pass any additional environment variables to Core / Periphery directly in this file as well.
## Stick to a specific version, or use `latest`
COMPOSE_KOMODO_IMAGE_TAG=latest
## Note: 🚨 Podman does NOT support local logging driver 🚨. See Podman options here:
## `https://docs.podman.io/en/v4.6.1/markdown/podman-run.1.html#log-driver-driver`
COMPOSE_LOGGING_DRIVER=local # Enable log rotation with the local driver.
## DB credentials - Ignored for Sqlite
KOMODO_DB_USERNAME=admin
KOMODO_DB_PASSWORD=admin
## Configure a secure passkey to authenticate between Core / Periphery.
KOMODO_PASSKEY=komodo_Passkey
#=-------------------------=#
#= Komodo Core Environment =#
#=-------------------------=#
## Full variable list + descriptions are available here:
## 🦎 https://github.com/moghtech/komodo/blob/main/config/core.config.toml 🦎
## Note. Secret variables also support `${VARIABLE}_FILE` syntax to pass docker compose secrets.
## Docs: https://docs.docker.com/compose/how-tos/use-secrets/#examples
## Used for Oauth / Webhook url suggestion / Caddy reverse proxy.
KOMODO_HOST=https://komodo.netgrimoire.com
## Displayed in the browser tab.
KOMODO_TITLE=Komodo
## Create a server matching this address as the "first server".
## Use `https://host.docker.internal:8120` when using systemd-managed Periphery.
KOMODO_FIRST_SERVER=https://periphery:8120
## Make all buttons just double-click, rather than the full confirmation dialog.
KOMODO_DISABLE_CONFIRM_DIALOG=false
## Rate Komodo polls your servers for
## status / container status / system stats / alerting.
## Options: 1-sec, 5-sec, 15-sec, 1-min, 5-min.
## Default: 15-sec
KOMODO_MONITORING_INTERVAL="15-sec"
## Rate Komodo polls Resources for updates,
## like outdated commit hash.
## Options: 1-min, 5-min, 15-min, 30-min, 1-hr.
## Default: 5-min
KOMODO_RESOURCE_POLL_INTERVAL="5-min"
## Used to auth incoming webhooks. Alt: KOMODO_WEBHOOK_SECRET_FILE
KOMODO_WEBHOOK_SECRET=a_random_secret
## Used to generate jwt. Alt: KOMODO_JWT_SECRET_FILE
KOMODO_JWT_SECRET=a_random_jwt_secret
## Enable login with username + password.
KOMODO_LOCAL_AUTH=true
## Disable new user signups.
KOMODO_DISABLE_USER_REGISTRATION=false
## All new logins are auto enabled
KOMODO_ENABLE_NEW_USERS=true
## Disable non-admins from creating new resources.
KOMODO_DISABLE_NON_ADMIN_CREATE=false
## Allows all users to have Read level access to all resources.
KOMODO_TRANSPARENT_MODE=false
## Time to live for jwt tokens.
## Options: 1-hr, 12-hr, 1-day, 3-day, 1-wk, 2-wk
KOMODO_JWT_TTL="1-day"
## OIDC Login
KOMODO_OIDC_ENABLED=false
## Must reachable from Komodo Core container
# KOMODO_OIDC_PROVIDER=https://oidc.provider.internal/application/o/komodo
## Change the host to one reachable be reachable by users (optional if it is the same as above).
## DO NOT include the `path` part of the URL.
# KOMODO_OIDC_REDIRECT_HOST=https://oidc.provider.external
## Your OIDC client id
# KOMODO_OIDC_CLIENT_ID= # Alt: KOMODO_OIDC_CLIENT_ID_FILE
## Your OIDC client secret.
## If your provider supports PKCE flow, this can be ommitted.
# KOMODO_OIDC_CLIENT_SECRET= # Alt: KOMODO_OIDC_CLIENT_SECRET_FILE
## Make usernames the full email.
## Note. This does not work for all OIDC providers.
# KOMODO_OIDC_USE_FULL_EMAIL=true
## Add additional trusted audiences for token claims verification.
## Supports comma separated list, and passing with _FILE (for compose secrets).
# KOMODO_OIDC_ADDITIONAL_AUDIENCES=abc,123 # Alt: KOMODO_OIDC_ADDITIONAL_AUDIENCES_FILE
## Github Oauth
KOMODO_GITHUB_OAUTH_ENABLED=false
# KOMODO_GITHUB_OAUTH_ID= # Alt: KOMODO_GITHUB_OAUTH_ID_FILE
# KOMODO_GITHUB_OAUTH_SECRET= # Alt: KOMODO_GITHUB_OAUTH_SECRET_FILE
## Google Oauth
KOMODO_GOOGLE_OAUTH_ENABLED=false
# KOMODO_GOOGLE_OAUTH_ID= # Alt: KOMODO_GOOGLE_OAUTH_ID_FILE
# KOMODO_GOOGLE_OAUTH_SECRET= # Alt: KOMODO_GOOGLE_OAUTH_SECRET_FILE
## Aws - Used to launch Builder instances and ServerTemplate instances.
KOMODO_AWS_ACCESS_KEY_ID= # Alt: KOMODO_AWS_ACCESS_KEY_ID_FILE
KOMODO_AWS_SECRET_ACCESS_KEY= # Alt: KOMODO_AWS_SECRET_ACCESS_KEY_FILE
## Hetzner - Used to launch ServerTemplate instances
## Hetzner Builder not supported due to Hetzner pay-by-the-hour pricing model
KOMODO_HETZNER_TOKEN= # Alt: KOMODO_HETZNER_TOKEN_FILE
#=------------------------------=#
#= Komodo Periphery Environment =#
#=------------------------------=#
## Full variable list + descriptions are available here:
## 🦎 https://github.com/moghtech/komodo/blob/main/config/periphery.config.toml 🦎
## Periphery passkeys must include KOMODO_PASSKEY to authenticate.
PERIPHERY_PASSKEYS=${KOMODO_PASSKEY}
## Specify the root directory used by Periphery agent.
PERIPHERY_ROOT_DIRECTORY=/etc/komodo
## Enable SSL using self signed certificates.
## Connect to Periphery at https://address:8120.
PERIPHERY_SSL_ENABLED=true
## If the disk size is overreporting, can use one of these to
## whitelist / blacklist the disks to filter them, whichever is easier.
## Accepts comma separated list of paths.
## Usually whitelisting just /etc/hostname gives correct size.
PERIPHERY_INCLUDE_DISK_MOUNTS=/etc/hostname
# PERIPHERY_EXCLUDE_DISK_MOUNTS=/snap,/etc/repos

115
archive/komodo.yaml Normal file
View file

@ -0,0 +1,115 @@
################################
# 🦎 KOMODO COMPOSE - MONGO 🦎 #
################################
## This compose file will deploy:
## 1. MongoDB
## 2. Komodo Core
## 3. Komodo Periphery
##
## Load the .env file first
# # set -a
# # source .env
# # set +a
services:
komodo_mongo:
image: mongo
labels:
komodo.skip: # Prevent Komodo from stopping with StopAllContainers
command: --quiet --wiredTigerCacheSizeGB 0.25
restart: unless-stopped
logging:
driver: ${COMPOSE_LOGGING_DRIVER:-local}
ports:
- 27017:27017
volumes:
- /DockerVol/komodo/mongo-data:/data/db
- /DockerVol/komodo/mongo-config:/data/configdb
environment:
MONGO_INITDB_ROOT_USERNAME: ${KOMODO_DB_USERNAME}
MONGO_INITDB_ROOT_PASSWORD: ${KOMODO_DB_PASSWORD}
networks:
- komodo
deploy:
placement:
constraints:
- node.hostname == docker2
komodo_core:
image: ghcr.io/moghtech/komodo-core:${COMPOSE_KOMODO_IMAGE_TAG:-latest}
labels:
komodo.skip: # Prevent Komodo from stopping with StopAllContainers
restart: unless-stopped
depends_on:
- komodo_mongo
logging:
driver: ${COMPOSE_LOGGING_DRIVER:-local}
ports:
- 9120:9120
networks:
- komodo
- netgrimoire
#env_file: ./compose.env
environment:
KOMODO_DATABASE_ADDRESS: komodo_mongo:27017
KOMODO_DATABASE_USERNAME: ${KOMODO_DB_USERNAME}
KOMODO_DATABASE_PASSWORD: ${KOMODO_DB_PASSWORD}
volumes:
## Core cache for repos for latest commit hash / contents
- /DockerVol/komodo/repo-cache:/repo-cache
## Store sync files on server
# - /path/to/syncs:/syncs
## Optionally mount a custom core.config.toml
- /DockerVol/komodo/config.toml:/config/config.toml
## Allows for systemd Periphery connection at
## "http://host.docker.internal:8120"
# extra_hosts:
# - host.docker.internal:host-gateway
## Deploy Periphery container using this block,
## or deploy the Periphery binary with systemd using
## https://github.com/moghtech/komodo/tree/main/scripts
deploy:
placement:
constraints:
- node.hostname == docker2
periphery:
image: ghcr.io/moghtech/komodo-periphery:${COMPOSE_KOMODO_IMAGE_TAG:-latest}
labels:
komodo.skip: # Prevent Komodo from stopping with StopAllContainers
restart: unless-stopped
logging:
driver: ${COMPOSE_LOGGING_DRIVER:-local}
#env_file: ./compose.env
environment:
PERIPHERY_REPO_DIR: /DockerVol/komodo/repos:/etc/komodo/repos
PERIPHERY_STACK_DIR: /DockerVol/komodo/stacks:/etc/komodo/stacks
# PERIPHERY_SSL_KEY_FILE: ${PERIPHERY_ROOT_DIRECTORY:-/etc/komodo/ssl/key.pem
# PERIPHERY_SSL_CERT_FILE: ${PERIPHERY_ROOT_DIRECTORY:-/etc/komodo/ssl/cert.pem
networks:
- komodo
volumes:
## Mount external docker socket
- /var/run/docker.sock:/var/run/docker.sock
## Allow Periphery to see processes outside of container
- /proc:/proc
## Specify the Periphery agent root directory.
## Must be the same inside and outside the container,
## or docker will get confused. See https://github.com/moghtech/komodo/discussions/180.
## Default: /etc/komodo.
- /etc/komodo:/etc/komodo
deploy:
placement:
constraints:
- node.hostname == docker2
networks:
netgrimoire:
external: true
komodo:
external: true

48
archive/nessus.yaml Normal file
View file

@ -0,0 +1,48 @@
services:
nessus-essentials:
image: tenable/nessus:latest-ubuntu
container_name: nessus-essentials
ports:
- "8834:8834"
networks:
- netgrimoire
environment:
- USERNAME=admin
- PASSWORD=admin
# - ACTIVATION_CODE=3RH7-HX4R-DKS6-EJFK-GNWG
# restart: unless-stopped
volumes:
- data:/opt/nessus/var/nessus
deploy:
labels:
- homepage.group=Monitoring
- homepage.name=Nessus
- homepage.icon=phoneinfoga.png
- homepage.href=http://nessus.netgrimoire.com
- homepage.description=Vulnerability Scanner
- caddy=nessus.netgrimoire.com
- caddy.import=authentik
# - caddy.reverse_proxy="{{upstreams 8834}}"
# - caddy.reverse_proxy=https://nessus-essentials:8834
# - caddy.tls_insecure_skip_verify=true # Allow self-signed certs
- caddy.reverse_proxy=https://nessus-essentials:8834
- caddy.reverse_proxy.transport=http
- caddy.reverse_proxy.transport.tls
- caddy.reverse_proxy.transport.tls_insecure_skip_verify
placement:
constraints:
- node.hostname == docker2
networks:
netgrimoire:
external: true
volumes:
data:
driver: local
driver_opts:
type: none
o: bind
device: /DockerVol/nessus

55
archive/nxterm.yaml Executable file
View file

@ -0,0 +1,55 @@
services:
nexterm:
ports:
- "6989:6989"
container_name: nexterm
image: germannewsmaker/nexterm:1.0.2-OPEN-PREVIEW
restart: always
environment:
PUID: 1001
PGID: 998
TZ: America/Chicago
MAX_WORKERS: 1
WEB_CONCURRENCY: 1
networks:
- netgrimoire
deploy:
labels:
- homepage.group=Remote Access
- homepage.name=Nexterm
- homepage.icon=nexterm.png
- homepage.href=http://nexterm.netgrimoire.com
- homepage.description=Remote Access
- kuma.nxterm.http.name="Nexterm"
- kuma.nxterm.http.url=http://nexterm:6989
- caddy=nexterm.netgrimoire.com
- caddy.import=authentik
- caddy.reverse_proxy="{{upstreams 6989}}"
volumes:
- /data/nfs/Baxter/Docker/nexterm:/app/data
networks:
netgrimoire:
external: true

96
archive/peekaping.yaml Normal file
View file

@ -0,0 +1,96 @@
services:
peekaping-postgres:
image: postgres:17
networks:
- netgrimoire
volumes:
- /data/nfs/Baxter/Docker/peekaping/postgres:/var/lib/postgresql/data
environment:
- POSTGRES_USER=peekaping
- POSTGRES_PASSWORD=peekapingpass
- POSTGRES_DB=peekaping
- TZ=America/Chicago
user: "1001:998"
deploy:
restart_policy:
condition: on-failure
labels:
diun.enable: "true"
migrate:
image: 0xfurai/peekaping-migrate:latest
environment:
- DB_USER=peekaping
- DB_PASS=peekapingpass
- DB_NAME=peekaping
- DB_HOST=peekaping-postgres # <-- fix
- DB_TYPE=postgres
- DB_PORT=5432
- TZ=America/Chicago
- SERVER_PORT=8034
- SERVER_HOST=0.0.0.0
- MODE=prod
user: "1001:998"
networks:
- netgrimoire
deploy:
replicas: 0
restart_policy:
condition: none
peekaping-server:
image: 0xfurai/peekaping-server:latest
networks:
- netgrimoire
environment:
- DB_USER=peekaping
- DB_PASS=peekapingpass
- DB_NAME=peekaping
- DB_HOST=peekaping-postgres # <-- fix
- DB_TYPE=postgres
- DB_PORT=5432
- TZ=America/Chicago
- SERVER_PORT=8034
- SERVER_HOST=0.0.0.0
- MODE=prod
deploy:
restart_policy:
condition: on-failure
labels:
diun.enable: "true"
# Caddy
caddy: peekaping.netgrimoire.com
caddy.import: authentik
caddy.reverse_proxy: "{{upstreams 8034}}"
web:
image: 0xfurai/peekaping-web:latest
networks:
- netgrimoire
user: "1001:998"
deploy:
restart_policy:
condition: on-failure
labels:
# Homepage
homepage.group: "Monitoring"
homepage.name: "Peekaping"
homepage.icon: "peekaping.png"
homepage.href: "https://peekaping.netgrimoire.com"
homepage.description: "Simple ICMP/HTTP monitor"
# Kuma integration
kuma.monitoring.http.name: "Peekaping"
kuma.monitoring.http.url: "http://peekaping-server:8034" # <-- fix
# Peekaping self-monitor
peekaping.name: "Peekaping"
peekaping.url: "http://peekaping-server:8034" # <-- fix
# Diun
diun.enable: "true"
networks:
netgrimoire:
external: true

38
archive/plex.yaml Executable file
View file

@ -0,0 +1,38 @@
services:
plex:
image: plexinc/pms-docker
container_name: plex
network_mode: host
environment:
- PUID=1001
- PGID=998
- TZ=America/Chicago
- VERSION=docker
- ADVERTISE_IP="http://192.168.5.134:32401/"
# - PLEX_CLAIM= #optional
ports:
- 32401:32400
networks:
- netgrimoire
volumes:
- /DockerVol/Plex:/config
- /data/nfs/Baxter/Data/media
restart: unless-stopped
deploy:
labels:
- homepage.group=Library
- homepage.name=Plex Media Server
- homepage.icon=plex.png
- homepage.href=http://plex.netgrimoire.com:32401/web/index.html
- homepage.description=Media server
- kuma.homepage.http.name="plex"
- kuma.homepage.http.url=http://plex:32401
# - caddy=plex.netgrimoire.com
# # - caddy.import=authentik
# - caddy.reverse_proxy="{{upstreams 3000}}"
placement:
constraints:
- node.hostname == bruce
networks:
netgrimoire:
external: true

33
archive/runner.yaml Normal file
View file

@ -0,0 +1,33 @@
version: "3.9"
services:
forgejo-runner:
image: code.forgejo.org/forgejo/runner:latest
container_name: forgejo-runner
networks:
- netgrimoire
restart: unless-stopped
user: "1001:998"
environment:
TZ: America/Chicago
RUNNER_NAME: docker2-runner
RUNNER_LABELS: swarm
FORGEJO_URL: https://git.netgrimoire.com
FORGEJO_TOKEN: QFDPcVXHYQrm8FJj9n4Olp9R5U3Q3GwM56VThGx8
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /DockerVol/runner:/data
command: >
forgejo-runner daemon --config /data/config.yml
deploy:
labels:
kuma.forgejo-runner.http.name: "Forgejo Runner"
kuma.forgejo-runner.http.url: "http://docker2:8080"
placement:
constraints:
- node.hostname == docker2
networks:
netgrimoire:
external: true

53
archive/sshwifty.yaml Executable file
View file

@ -0,0 +1,53 @@
services:
sshwifty:
container_name: sshwifty
image: niruix/sshwifty:latest
restart: always
ports:
- 8182:8182
environment:
PUID: 1001
PGID: 998
TZ: America/Chicago
ALLOW_SIGNUP: "true"
MAX_WORKERS: 1
WEB_CONCURRENCY: 1
networks:
- netgrimoire
deploy:
labels:
- homepage.group=Remote Access
- homepage.name=sshwifty
- homepage.icon=sshwifty.png
- homepage.href=http://ssh.netgrimoire.com
- homepage.description=SSH Access
- kuma.sshwifty.http.name="sshwifty"
- kuma.msshwifty.http.url=http://sshwifty:8182
- caddy=ssh.netgrimoire.com
- caddy.import=authentik
- caddy.reverse_proxy="{{upstreams 8182}}"
networks:
netgrimoire:
external: true

View file

@ -0,0 +1,113 @@
# services:
# wordpress:
# image: wordpress
# restart: always
# ports:
# - 8091:80
# environment:
# WORDPRESS_DB_HOST: wpdb
# WORDPRESS_DB_USER: traveler
# WORDPRESS_DB_PASSWORD: F@lcon13
# WORDPRESS_DB_NAME: wp
# volumes:
# - /data/nfs/Baxter/Docker/wp-pnc/wp:/var/www/html
# networks:
# - netgrimoire
# deploy:
# labels:
# - homepage.group=Application
# - homepage.name=PNCClassified
# - homepage.icon=wallos.png
# - homepage.href=http://classifieds.pncfishandmore.com
# - homepage.description=Classifies
# - kuma.wp.http.name="classified"
# - kuma.wallos.http.url=http://wordpress:80
# - caddy=classifieds.pncfishandmore.com
# - caddy.import=authentik
# - caddy.reverse_proxy="{{upstreams 80}}"
# wpdb:
# image: mysql:8.0
# restart: always
# environment:
# MYSQL_DATABASE: wp
# MYSQL_USER: traveler
# MYSQL_PASSWORD: F@lcon13
# MYSQL_RANDOM_ROOT_PASSWORD: '1'
# volumes:
# - /data/nfs/Baxter/Docker/wp-pnc/sql:/var/lib/mysql
# networks:
# - netgrimoire
# volumes:
# wordpress:
# db:
# networks:
# netgrimoire:
# external: true
services:
wordpress:
image: wordpress
restart: always
ports:
- 8091:80
environment:
WORDPRESS_DB_HOST: wpdb
WORDPRESS_DB_USER: traveler
WORDPRESS_DB_PASSWORD: F@lcon13
WORDPRESS_DB_NAME: wp
# Revised proxy configuration
WORDPRESS_CONFIG_EXTRA: |
define('FORCE_SSL_ADMIN', true);
define('WP_HOME', 'https://classifieds.pncfishandmore.com');
define('WP_SITEURL', 'https://classifieds.pncfishandmore.com');
define('FORCE_SSL', true);
define('WP_PROXY_HOST', 'caddy');
define('WP_PROXY_PORT', '443');
define('WP_PROXY_SSL', true);
volumes:
- /data/nfs/Baxter/Docker/wp-pnc/wp:/var/www/html
networks:
- netgrimoire
deploy:
labels:
- homepage.group=Application
- homepage.name=PNCClassified
- homepage.icon=wallos.png
- homepage.href=https://classifieds.pncfishandmore.com
- homepage.description=Classifies
- kuma.wp.http.name="classified"
- kuma.wallos.http.url=http://wordpress:80
- caddy=classifieds.pncfishandmore.com
#- caddy.import=authentik
- caddy.reverse_proxy="{{upstreams 80}}"
- "caddy.header=Strict-Transport-Security \"max-age=31536000; includeSubDomains\""
- "caddy.header=X-Forwarded-Proto {http.request.scheme}"
- "caddy.header=X-Real-IP {http.request.remote.host}"
- "caddy.header=X-Forwarded-For {http.request.remote.host}"
- "caddy.header=Host {http.request.host}"
wpdb:
image: mysql:8.0
restart: always
environment:
MYSQL_DATABASE: wp
MYSQL_USER: traveler
MYSQL_PASSWORD: F@lcon13
MYSQL_RANDOM_ROOT_PASSWORD: '1'
volumes:
- /data/nfs/Baxter/Docker/wp-pnc/sql:/var/lib/mysql
networks:
- netgrimoire
volumes:
wordpress:
db:
networks:
netgrimoire:
external: true