27 Commits

Author SHA1 Message Date
Danilo Reyes
ad9179fe52 testing on lebubu 2026-02-05 12:06:28 -06:00
Danilo Reyes
9e64325f5e nextcloud uses different proxy 2026-02-05 11:12:37 -06:00
Danilo Reyes
6603fac1c4 nextcloud nginx split 2026-02-05 10:58:35 -06:00
Danilo Reyes
cb1776d670 fixing 2026-02-05 10:41:29 -06:00
Danilo Reyes
3517e394c6 nextcloud proxy logic attempt 2026-02-05 06:54:14 -06:00
Danilo Reyes
81f9025dc9 documentation update 2026-02-05 06:36:09 -06:00
Danilo Reyes
2ef113bc0e synapse cert logic 2026-02-05 06:30:45 -06:00
Danilo Reyes
d14a7ba395 private certificate fix 2026-02-05 06:26:40 -06:00
Danilo Reyes
eddef549e7 hmmm 2026-02-05 06:18:42 -06:00
Danilo Reyes
4ba0fa0dd5 nextcloud nginx logic needs to exists in two place 2026-02-05 06:04:42 -06:00
Danilo Reyes
08cc3379ad use merge to segment the complex nginx proxy settings 2026-02-05 05:32:46 -06:00
Danilo Reyes
2a290f2fe2 it was the nginx module... 2026-02-05 05:16:43 -06:00
Danilo Reyes
0c7e745e55 plausible actually ran on server im dumb 2026-02-05 05:06:43 -06:00
Danilo Reyes
caf7fbc590 nginx ip fix attempt 2026-02-05 04:58:41 -06:00
Danilo Reyes
ee11d72de8 domain sandbox 2026-02-05 04:16:21 -06:00
Danilo Reyes
dce2142794 proper ip assignation for nginx 2026-02-05 03:39:27 -06:00
Danilo Reyes
237e120124 working 2026-02-04 19:16:04 -06:00
Danilo Reyes
afdb5bfd99 chichis 2026-02-04 15:03:46 -06:00
Danilo Reyes
d7f9ea971c vps keys fix 2026-02-04 12:39:33 -06:00
Danilo Reyes
f01817a15f iptables 2026-02-04 11:42:39 -06:00
Danilo Reyes
917e741b7f rg_filter 2026-02-04 11:21:35 -06:00
Danilo Reyes
0997fad0c6 plausible + other fixes 2026-02-04 11:16:45 -06:00
Danilo Reyes
ba4cf6c86b root logic 2026-02-04 06:34:40 -06:00
Danilo Reyes
3f13527e51 "fixes" 2026-02-04 06:31:41 -06:00
Danilo Reyes
efe5cb0f99 remediations 2 2026-02-03 20:44:09 -06:00
Danilo Reyes
86557548db remediations 2026-02-03 20:43:25 -06:00
Danilo Reyes
a74adc7f95 init 2026-02-03 20:35:44 -06:00
47 changed files with 2789 additions and 496 deletions

View File

@@ -7,6 +7,8 @@ Auto-generated from feature plans. Last updated: 2026-01-30
- None (in-memory tool definitions; filesystem access for repo interactions) (002-mcp-server)
- Nix (flakes; nixpkgs 25.11) + nixpkgs, flake-parts, sops-nix (003-vps-image-migration)
- N/A (configuration repo) (003-vps-image-migration)
- Nix (flakes; nixpkgs 25.11) + NixOS modules, sops-nix, nginx, wireguard, openssh, iptables (004-vps-migration)
- Files (configuration and secrets) (004-vps-migration)
- Documentation set (AI-facing constitution and playbooks) in Markdown (001-ai-docs)
@@ -28,9 +30,9 @@ specs/001-ai-docs/ # Planning artifacts (plan, research, tasks, data model
- Keep language business-level and technology-agnostic in AI-facing docs.
## Recent Changes
- 004-vps-migration: Added Nix (flakes; nixpkgs 25.11) + NixOS modules, sops-nix, nginx, wireguard, openssh, iptables
- 003-vps-image-migration: Added Nix (flakes; nixpkgs 25.11) + nixpkgs, flake-parts, sops-nix
- 003-vps-image-migration: Added [if applicable, e.g., PostgreSQL, CoreData, files or N/A]
- 003-vps-image-migration: Added Nix (flakes; nixpkgs 25.11) + nixpkgs, flake-parts, sops-nix
<!-- MANUAL ADDITIONS START -->

36
caddy/Caddyfile Normal file
View File

@@ -0,0 +1,36 @@
# The Caddyfile is an easy way to configure your Caddy web server.
#
# https://caddyserver.com/docs/caddyfile
# The configuration below serves a welcome page over HTTP on port 80. To use
# your own domain name with automatic HTTPS, ensure your A/AAAA DNS record is
# pointing to this machine's public IP, then replace `http://` with your domain
# name. Refer to the documentation for full instructions on the address
# specification.
#
# https://caddyserver.com/docs/caddyfile/concepts#addresses
http:// {
# Set this path to your site's directory.
root * /usr/share/caddy
# Enable the static file server.
file_server
# Another common task is to set up a reverse proxy:
# reverse_proxy localhost:8080
# Or serve a PHP site through php-fpm:
# php_fastcgi localhost:9000
# Refer to the directive documentation for more options.
# https://caddyserver.com/docs/caddyfile/directives
}
# As an alternative to editing the above site block, you can add your own site
# block files in the Caddyfile.d directory, and they will be included as long
# as they use the .caddyfile extension.
import Caddyfile.d/*.caddyfile

View File

@@ -0,0 +1,20 @@
cloud.lebubu.org cloud.rotehaare.art {
redir /.well-known/carddav /remote.php/dav/ 301
redir /.well-known/caldav /remote.php/dav/ 301
reverse_proxy 10.77.0.2:8081 {
header_up Host {upstream_hostport}
header_up X-Real-IP {remote_host}
header_up X-Forwarded-For {remote_host}
header_up X-Forwarded-Proto {scheme}
}
header {
X-Frame-Options "SAMEORIGIN"
X-Content-Type-Options "nosniff"
X-Permitted-Cross-Domain-Policies "none"
X-XSS-Protection "1; mode=block"
Referrer-Policy "no-referrer-when-downgrade"
Strict-Transport-Security "max-age=15552000; includeSubDomains"
-Server
}
}

View File

@@ -0,0 +1,18 @@
(secure_mtls) {
tls {
client_auth {
mode require_and_verify
trusted_ca_cert_file /etc/caddy/client_ca.pem
}
}
}
home.lebubu.org, indexer.lebubu.org, xxx.lebubu.org {
import secure_mtls
@home host home.lebubu.org
@indexer host indexer.lebubu.org
reverse_proxy @home 10.77.0.2:8082
reverse_proxy @indexer 10.77.0.2:9696
}

View File

@@ -0,0 +1,29 @@
(oauth2_common) {
@oauth2path path /oauth2/*
handle @oauth2path {
reverse_proxy 10.77.0.2:4180
}
handle {
forward_auth 10.77.0.2:4180 {
uri /oauth2/auth
copy_headers X-Auth-Request-User X-Auth-Request-Email
}
}
}
auth-proxy.lebubu.org {
reverse_proxy 10.77.0.2:4180
}
home.lebubu.org, indexer.lebubu.org, xxx.lebubu.org {
import oauth2_common
@home host home.lebubu.org
@indexer host indexer.lebubu.org
@xxx host xxx.lebubu.org
handle {
reverse_proxy @home 10.77.0.2:8082
reverse_proxy @indexer 10.77.0.2:9696
reverse_proxy @xxx 10.77.0.2:9999
}
}

View File

@@ -0,0 +1,79 @@
analytics.lebubu.org {
reverse_proxy 10.77.0.2:8439
}
cache.lebubu.org {
reverse_proxy 10.77.0.2:2343
}
audiobooks.lebubu.org {
reverse_proxy 10.77.0.2:5687
}
mealie.lebubu.org {
reverse_proxy 10.77.0.2:9925
}
git.lebubu.org {
reverse_proxy 10.77.0.2:9083
}
subs.lebubu.org {
reverse_proxy 10.77.0.2:6767
}
collabora.lebubu.org {
reverse_proxy 10.77.0.2:9980
}
library.lebubu.org {
reverse_proxy 10.77.0.2:5000
}
music.lebubu.org {
reverse_proxy 10.77.0.2:8686
}
maloja.lebubu.org {
reverse_proxy 10.77.0.2:42010
}
copy.lebubu.org {
reverse_proxy 10.77.0.2:8086
}
scrobble.lebubu.org {
reverse_proxy 10.77.0.2:9078
}
plex.lebubu.org plex.rotehaare.art {
reverse_proxy 10.77.0.2:32400
}
movies.lebubu.org {
reverse_proxy 10.77.0.2:7878
}
laters.lebubu.org {
reverse_proxy 10.77.0.2:9546
}
links.lebubu.org {
reverse_proxy 10.77.0.2:3000
}
tracker.lebubu.org {
reverse_proxy 10.77.0.2:8765
}
series.lebubu.org {
reverse_proxy 10.77.0.2:8989
}
vault.lebubu.org {
reverse_proxy 10.77.0.2:8222
}
bajameesta.lebubu.org {
reverse_proxy 10.77.0.2:8881
}

View File

@@ -0,0 +1,98 @@
(hugo_common) {
encode zstd gzip
header {
X-Frame-Options "SAMEORIGIN"
X-Content-Type-Options "nosniff"
X-XSS-Protection "1; mode=block"
Referrer-Policy "strict-origin-when-cross-origin"
Strict-Transport-Security "max-age=31536000; includeSubDomains; preload"
}
@static {
path *.jpg *.jpeg *.png *.gif *.ico *.css *.js *.svg *.woff *.woff2 *.ttf *.xml
}
handle @static {
file_server
header {
Cache-Control "public, max-age=31536000, immutable"
}
}
@html {
path *.html
}
handle @html {
file_server
try_files {path} {path}/ /index.html
}
handle {
file_server
try_files {path} {path}/ /index.html
}
@hidden {
path_regexp ^.*/\..*$
}
respond @hidden 404
handle /js/script.js {
rewrite * /js/script.file-downloads.hash.outbound-links.js
reverse_proxy https://analytics.lebubu.org {
header_up Host analytics.lebubu.org
}
}
handle /api/event {
reverse_proxy https://analytics.lebubu.org {
header_up Host analytics.lebubu.org
}
}
}
www.danilo-reyes.com {
redir https://danilo-reyes.com{uri}
}
www.blog.danilo-reyes.com {
redir https://blog.danilo-reyes.com{uri}
}
danilo-reyes.com {
root * /var/www/html/portfolio
import hugo_common
}
blog.danilo-reyes.com {
route {
handle_path /isso* {
reverse_proxy 10.77.0.2:8180
}
root * /var/www/html/blog
import hugo_common
}
}
mb-report.lebubu.org {
root * /var/www/html/lidarr-mb-gap
file_server
encode gzip zstd
try_files {path} /missing_albums.html
@html {
path *.html
}
header @html Content-Type "text/html; charset=utf-8"
@json {
path *.json
}
header @json Content-Type "application/json"
header {
X-Content-Type-Options "nosniff"
X-Frame-Options "SAMEORIGIN"
}
}

View File

@@ -0,0 +1,13 @@
flix.lebubu.org {
reverse_proxy 10.77.0.2:8096 {
header_up Host {host}
header_up X-Real-IP {remote_host}
header_up X-Forwarded-For {remote_host}
header_up X-Forwarded-Proto {scheme}
header_up X-Forwarded-Host {host}
# WebSocket support (automatic in Caddy, but explicit is fine)
header_up Connection {>Connection}
header_up Upgrade {>Upgrade}
}
}

View File

@@ -0,0 +1,9 @@
auth.lebubu.org {
tls internal
reverse_proxy 10.77.0.2:8090 {
header_up X-Forwarded-Proto https
header_up X-Forwarded-For {remote_host}
header_up X-Forwarded-Host {host}
header_up Host {host}
}
}

View File

@@ -0,0 +1,4 @@
torrent.lebubu.org {
reverse_proxy 127.0.0.1:9345
}

33
caddy/client_ca.pem Normal file
View File

@@ -0,0 +1,33 @@
-----BEGIN CERTIFICATE-----
MIIFmzCCA4OgAwIBAgIUPBgrOAnSgT+y9+zaFaCuVkwi/M4wDQYJKoZIhvcNAQEL
BQAwXTELMAkGA1UEBhMCTVgxEjAQBgNVBAgMCVNvbWVTdGF0ZTERMA8GA1UEBwwI
U29tZUNpdHkxEDAOBgNVBAoMB0phd1pEZXYxFTATBgNVBAMMDEphd1ogUm9vdCBD
QTAeFw0yNTA3MTYxOTMxMTBaFw0zNTA3MTQxOTMxMTBaMF0xCzAJBgNVBAYTAk1Y
MRIwEAYDVQQIDAlTb21lU3RhdGUxETAPBgNVBAcMCFNvbWVDaXR5MRAwDgYDVQQK
DAdKYXdaRGV2MRUwEwYDVQQDDAxKYXdaIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEB
AQUAA4ICDwAwggIKAoICAQDwcWfnMDBzdukPZUa0pbY3tHG2ONEZMDUsxo5T5veq
KrMfsu7U9tE8AY+AVl0Qz9hpBHN+GmktXQlimPkm4tSVKJMjk0iWYgZn8tTMB+AL
i3gl/bt7qP+59U7gQbojkp6B0xCMCynPlsgcMiIcZWFmNVrG6ehh4B+wuG52gWVw
TrwhDjHhxsrc66DkgC/59Pm60JqHlBhuhv9HB/q9JM3HLQ63XUwhvTVJ29tSiJZl
WpKFr5s8nfE2FIXIHzi+o+Lo3n9wvdCzNfaRUStLWbROzF97jY4VIxIDk/loQH4T
6oXBGlRe8M+G1XL/waRDySxL26jRVG8bUEv4mh/Hd9Rs0JcUOl6lFiGndJMjMyom
ZgAlhi2Id2AzkT28utdYQqKUuaTy1SwLkrcOu9k2/dw7Uf7aK5WCraOth5ys+lw+
mzga4gNGc3Am9soFHjI56Qxvhf+Aa5tlASwpzrjsc7PJEZJXorE40uZsB/q1PafP
AIqVsSoT+Q6h6bld0EuQ5W4i1LTipZEPUaF673tGCXuI40AeTI44SFKcGm9XG1ic
I25OxuIKyl5sCANkryOHjNKY4SkzXKSpML3PYbfSKK7xDpeFofIYKnRfJm4qmBNd
lKT+ti4Hnvr8NZDRWyxC5SIDF1fdkslNu/HoAoL8JdXPYnitlTL7A5mF5PVPHom7
XwIDAQABo1MwUTAdBgNVHQ4EFgQUhquhsVpNS4shC+7DMxOK4/wYYEswHwYDVR0j
BBgwFoAUhquhsVpNS4shC+7DMxOK4/wYYEswDwYDVR0TAQH/BAUwAwEB/zANBgkq
hkiG9w0BAQsFAAOCAgEAU8nSV6DqCZSDxWpa8JSBmZFnO2oZIRF9Nw/1QcpMOGUR
pnWyQ03QtEgXYMwvxN/FOcGvYwg0LyYy07rzlpe5n2wRBaTrPCZ928f5j0nhADjC
GYutxhbO4WYvBKUY88qYCrJRa1Aw1B/CsGCmH5f+aND6fyxZ6Lx9CQ8O43f+QCOE
ltkbHRvjxYyVpDkgccDwetMDURKKrzkibUskeCPt0TjZbLKUq/cDspdAjSJgIJrz
a50JbniKUG5Qcav3P2aA6NluOKFJfYh+146uafC6WofUtx2Vv5lViYMlIDnqN4L0
xUzN5hB1kwF+4v1PO9/olafKqmgZ8FD/ipMYq2aYX4u9RJHLD6hMPUJpgKPRhGfi
ul9rYv6rC+pQNIn4s287sAPru5IgIzPBBCbqXSkoue7V/mpqRuZZRX84V6CzlYDc
0knoG2TL6aEWO+vj1mROgOuagyqyb3NZvgySE7GieW4tdvZhdYJJxdXh/tBQCg9E
iVcQH0rNJ+0jsybFWPqdOIZ6sH78SvY+J4KhqZ3Il/WCxCTs/Ccb/RMkhRm+bfSX
1FxoKF20b3RJ6g9N1oOj+12oK8jwMpUbaG/oAZh0TgZf1FUKic2f6jhMZLus8fGe
nyHza9mHbN1M8d9hX7U3gkepY8RVhSNL5erNp1zsBtZ4UNmouGm53wgjYZPYkrc=
-----END CERTIFICATE-----

View File

@@ -9,8 +9,8 @@
- Architecture: Flake-based repo using `flake-parts` with inputs for pkgs (stable/unstable), stylix, home-manager, sops-nix, and service overlays. Common modules are composed through `parts/core.nix` and `parts/hosts.nix`.
- Module auto-import: `modules/modules.nix` auto-imports `.nix` files under `modules/apps`, `modules/dev`, `modules/scripts`, `modules/servers`, `modules/services`, `modules/shell`, and `modules/network`, excluding `librewolf.nix`. Factories live in `modules/factories/` (`mkserver`, `mkscript`), and shared options are in `modules/nix` and `modules/users`.
- Hosts and toggles: Host definitions live in `hosts/<name>/configuration.nix` with host-specific toggles in `hosts/<name>/toggles.nix`. The `my` namespace carries toggles for apps/dev/scripts/services/shell, feature flags like `enableProxy` and `enableContainers`, and per-host `interfaces` and `ips` maps.
- Main server and proxies: `my.mainServer` selects the host that should serve traffic by default (default `miniserver`; overridden to `server` in `hosts/server/toggles.nix`). Reverse proxies use helpers in `parts/core.nix` (`proxy`, `proxyReverse`, `proxyReverseFix`, `proxyReversePrivate`) and pick IPs from `my.ips` plus the hostName/ip set by `mkserver` options.
- Secure hosts and secrets: `my.secureHost` gates SOPS secrets. Secure hosts load secrets from `secrets/*.yaml` and wireguard definitions; non-secure hosts (e.g., `hosts/emacs`) skip secret-dependent services. Default SOPS file is `secrets/secrets.yaml` via `config/base.nix`.
- Main server and proxies: `my.mainServer` selects the host that should serve traffic by default (default `vps`). Reverse proxies use helpers in `parts/core.nix` (`proxy`, `proxyReverse`, `proxyReverseFix`, `proxyReversePrivate`) and pick IPs from `my.ips` plus the hostName/ip set by `mkserver` options. Nginx defaults to `proxyReverse` for any server with `enableProxy = true` unless `useDefaultProxy = false` or the server is listed in the Fix/Private proxy lists.
- Secure hosts and secrets: `my.secureHost` gates SOPS secrets. Secure hosts load secrets from `secrets/*.yaml` and wireguard definitions; non-secure hosts (e.g., `hosts/emacs`) skip secret-dependent services. Default SOPS file is `secrets/secrets.yaml` via `config/base.nix`. Proxy-only services that need private certificates must still define their cert secrets when `enableProxy = true`.
## Coding Conventions
- No blank lines between code blocks; keep markdown examples tight.
@@ -33,7 +33,7 @@ config.services = {
- Factory: Shared option constructors in `modules/factories/` (use `mkserver` for server modules, `mkscript` for script units).
- Options: Settings under the `my` namespace (e.g., `my.services.<service>`, `my.scripts.<script>`).
- Toggles: Enablement maps in `hosts/<name>/toggles.nix` controlling categories (apps/dev/shell/scripts/services/servers/units) and features (`enableProxy`, `enableContainers`).
- Servers: Reverse-proxied services under `modules/servers/`, normally created with `mkserver` options.
- Servers: Reverse-proxied services under `modules/servers/`, normally created with `mkserver` options (including `useDefaultProxy` to opt out of default proxyReverse).
- Scripts: Units defined via `mkscript` with `enable`, `install`, `service`, `users`, `timer`, and `package` fields.
- Playbooks: Workflow guides under `docs/playbooks/` for repeatable tasks.
- Reference map: Navigation index under `docs/reference/index.md` for paths and responsibilities.

View File

@@ -23,8 +23,8 @@
- Active hosts: `workstation`, `server`, `miniserver`, `galaxy`, `emacs`, `vps`.
- Roles:
- workstation: developer desktop; provides build power for distributed builds.
- server: primary services host (overrides `my.mainServer = "server"` and enables proxies/containers).
- miniserver: small-footprint server; default `mainServer` in shared options.
- server: primary services host; runs most services and WireGuard targets.
- miniserver: small-footprint server.
- galaxy: small server variant using nixpkgs-small.
- emacs: VM profile, `my.secureHost = false` for secret-free usage.
- vps: Linode VPS image target, secure host with enrollment-based secrets.
@@ -32,8 +32,9 @@
## Proxy, Firewall, and Networking
- Proxy enablement: `my.enableProxy` toggles Nginx reverse proxy; assertions require at least one `my.servers.*.enableProxy` when enabled.
- Proxy helpers: use `parts/core.nix` helpers (`proxy`, `proxyReverse`, `proxyReverseFix` for header preservation, `proxyReversePrivate` for mutual TLS). `mkserver` supplies `host`, `ip`, `url`, and `enableProxy` defaults per service.
- Main server selection: `my.mainServer` chooses where services live by default; `mkserver` sets `isLocal` based on this and picks IPs from `my.ips`.
- Proxy helpers: use `parts/core.nix` helpers (`proxy`, `proxyReverse`, `proxyReverseFix` for header preservation, `proxyReversePrivate` for mutual TLS). `mkserver` supplies `host`, `ip`, `url`, `enableProxy`, and `useDefaultProxy`.
- Default proxying: any server with `enableProxy = true` gets a `proxyReverse` vhost unless `useDefaultProxy = false` or it is listed in `proxyReverseFix` / `proxyReversePrivate`.
- Main server selection: `my.mainServer` chooses where services live by default (default `vps`); `mkserver` sets `isLocal` based on this and picks IPs from `my.ips`.
- Firewall generation: `inputs.self.lib.generateFirewallPorts` combines static ports, additional ports, and service ports from `my.servers` (excluding native firewall services). Use `my.network.firewall` settings and `getServicesWithNativeFirewall` to derive open ports.
## Secrets Map
@@ -46,7 +47,7 @@
- `secrets/wireguard.yaml` → WireGuard peers and private keys.
- `secrets/secrets.yaml` → default SOPS file (general secrets, fallback when unspecified).
- `secrets/ssh/` → host SSH keys and related artifacts.
- secureHost: Only hosts with `my.secureHost = true` consume SOPS entries and WireGuard interfaces. Keep secret references behind `lib.mkIf config.my.secureHost`.
- secureHost: Only hosts with `my.secureHost = true` consume SOPS entries and WireGuard interfaces. Keep secret references behind `lib.mkIf config.my.secureHost`; proxy-only services that use private certs must still declare their cert secrets when `enableProxy = true`.
## Stylix and Theming
- Stylix module: `config/stylix.nix` and stylix inputs in `flake.nix` apply theming. Host toggle `my.stylix.enable` controls activation (see host toggles).

View File

@@ -4,7 +4,6 @@ let
mkEnabledIp = inputs.self.lib.mkEnabledIp config.my.ips.wg-server;
in
{
mainServer = "server";
emacs = {
enable = true;
users = "jawz";
@@ -71,6 +70,7 @@ in
"microbin"
"multi-scrobbler"
"paperless"
"plausible"
"plex"
"postgres"
"prowlarr"
@@ -78,11 +78,11 @@ in
"radarr"
"sabnzbd"
"sonarr"
"yamtrack"
"stash"
"synapse"
"syncplay"
"unpackerr"
"yamtrack"
]
// enableList mkEnabledIp [
"audiobookshelf"
@@ -90,7 +90,6 @@ in
"keycloak"
"linkwarden"
"oauth2-proxy"
"plausible"
"vaultwarden"
];
}

View File

@@ -1,6 +1,8 @@
{
config,
lib,
inputs,
pkgs,
...
}:
{
@@ -8,7 +10,7 @@
./hardware-configuration.nix
../../config/base.nix
];
my = {
my = import ./toggles.nix { inherit config inputs; } // {
secureHost = true;
users.nixremote = {
enable = true;
@@ -18,19 +20,56 @@
"nixminiserver"
];
};
services.network.enable = true;
interfaces = lib.mkMerge [
{
vps = "eth0";
}
];
};
environment.etc."iptables.rules".source = ../../iptables;
networking.firewall.enable = lib.mkForce false;
networking.nftables.enable = false;
systemd.services.iptables-restore = {
description = "Apply iptables ruleset";
wantedBy = [ "multi-user.target" ];
after = [ "network-pre.target" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart = "${pkgs.iptables}/bin/iptables-restore --wait /etc/iptables.rules";
};
};
image.modules.linode = { };
networking.hostName = "vps";
security.sudo-rs.extraRules = [
{
users = [ "nixremote" ];
commands = [
{
command = "/run/current-system/sw/bin/nixos-rebuild";
options = [ "NOPASSWD" ];
}
];
}
];
services.openssh.ports = [ 3456 ];
sops.age = {
generateKey = true;
keyFile = "/var/lib/sops-nix/key.txt";
sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ];
};
users = {
groups = {
deploy = { };
lidarr-reports = { };
};
users = {
deploy = {
isSystemUser = true;
group = "deploy";
openssh.authorizedKeys.keyFiles = [ ../../secrets/ssh/ed25519_deploy.pub ];
};
lidarr-reports = {
isSystemUser = true;
group = "lidarr-reports";
openssh.authorizedKeys.keyFiles = [ ../../secrets/ssh/ed25519_lidarr-reports.pub ];
};
};
};
environment.systemPackages = [ ];
}

View File

@@ -9,6 +9,7 @@
kernelModules = [ ];
extraModulePackages = [ ];
kernelParams = [ "console=ttyS0,19200n8" ];
kernel.sysctl."net.ipv4.conf.wg0.rp_filter" = 0;
initrd.availableKernelModules = [
"virtio_pci"
"virtio_scsi"

61
hosts/vps/toggles.nix Normal file
View File

@@ -0,0 +1,61 @@
{ config, inputs }:
let
inherit (inputs.self.lib)
enableList
mkEnabled
mkEnabledWithUsers
;
wgServerIp = config.my.ips.wg-server;
mkEnabledProxyIp = inputs.self.lib.mkEnabledProxyIp wgServerIp;
in
{
enableProxy = true;
enableContainers = true;
apps.dictionaries.enable = true;
apps.dictionaries.users = "jawz";
services = enableList mkEnabled [
"network"
"wireguard"
];
shell = enableList mkEnabledWithUsers [
"multimedia"
"tools"
];
dev = enableList mkEnabledWithUsers [
"nix"
"sh"
];
servers = {
nextcloud = {
enableProxy = true;
ip = wgServerIp;
port = 8081;
};
}
// enableList mkEnabledProxyIp [
"audiobookshelf"
"bazarr"
"collabora"
"gitea"
"homepage"
"isso"
"jellyfin"
"kavita"
"keycloak"
"lidarr"
"linkwarden"
"maloja"
"mealie"
"metube"
"microbin"
"multi-scrobbler"
"oauth2-proxy"
"plausible"
"plex"
"prowlarr"
"radarr"
"sonarr"
"vaultwarden"
"yamtrack"
];
}

126
iptables Normal file
View File

@@ -0,0 +1,126 @@
# Generated by iptables-save v1.8.11 (nf_tables) on Fri Jan 2 03:44:23 2026
*mangle
:PREROUTING ACCEPT [95853893:179831236298]
:INPUT ACCEPT [94316554:179510512585]
:FORWARD ACCEPT [1536524:320567864]
:OUTPUT ACCEPT [49857522:93072472240]
:POSTROUTING ACCEPT [51393797:93393029789]
COMMIT
# Completed on Fri Jan 2 03:44:23 2026
# Generated by iptables-save v1.8.11 (nf_tables) on Fri Jan 2 03:44:23 2026
*raw
:PREROUTING ACCEPT [95853893:179831236298]
:OUTPUT ACCEPT [49857522:93072472240]
COMMIT
# Completed on Fri Jan 2 03:44:23 2026
# Generated by iptables-save v1.8.11 (nf_tables) on Fri Jan 2 03:44:23 2026
*filter
:INPUT ACCEPT [94315678:179510353216]
:FORWARD ACCEPT [46534:2774394]
:OUTPUT ACCEPT [49857520:93072471971]
# --- Incoming (INPUT) rules for VPS itself ---
# Accept SSH on port 3456 (new SSH port)
# allow SSH to VPS
-A INPUT -p tcp --dport 3456 -m conntrack --ctstate NEW -j ACCEPT
# allow established connections (responses)
-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
# (Optionally, add other INPUT rules for any services the VPS itself runs, if any, like HTTP/HTTPS if needed)
# If a default DROP policy is desired on INPUT, or an explicit drop rule:
# -A INPUT -j DROP # (optional: lock down any other input)
# --- Forwarding (FORWARD) rules for VPN traffic ---
# allow return traffic for established sessions
-A FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
# Syncthing between 10.8.0.2 and home server
# 10.8.0.2 -> 10.77.0.0 Syncthing
-A FORWARD -s 10.8.0.2/32 -d 10.77.0.2/32 -p tcp --dport 22000 -j ACCEPT
-A FORWARD -s 10.8.0.3/32 -d 10.77.0.2/32 -p tcp --dport 22000 -j ACCEPT
-A FORWARD -s 10.8.0.4/32 -d 10.77.0.2/32 -p tcp --dport 22000 -j ACCEPT
-A FORWARD -s 10.8.0.5/32 -d 10.77.0.2/32 -p tcp --dport 22000 -j ACCEPT
# home -> 10.8.0.0 Syncthing
-A FORWARD -s 10.77.0.2/32 -d 10.8.0.2/32 -p tcp --dport 22000 -j ACCEPT
-A FORWARD -s 10.77.0.2/32 -d 10.8.0.3/32 -p tcp --dport 22000 -j ACCEPT
-A FORWARD -s 10.77.0.2/32 -d 10.8.0.4/32 -p tcp --dport 22000 -j ACCEPT
-A FORWARD -s 10.77.0.2/32 -d 10.8.0.5/32 -p tcp --dport 22000 -j ACCEPT
# Matrix/Synapse access from 10.8 subnet to home server
# allow Matrix client port
-A FORWARD -s 10.8.0.0/24 -d 10.77.0.2/32 -p tcp --dport 8008 -j ACCEPT
# allow Matrix federation port
-A FORWARD -s 10.8.0.0/24 -d 10.77.0.2/32 -p tcp --dport 8448 -j ACCEPT
# allow TURN/other (if used)
-A FORWARD -s 10.8.0.0/24 -d 10.77.0.2/32 -p tcp --dport 8999 -j ACCEPT
# ICMP between 10.8 subnet and home
# ping home from 10.8 clients
-A FORWARD -s 10.8.0.0/24 -d 10.77.0.2/32 -p icmp -j ACCEPT
# ping 10.8 clients from home
-A FORWARD -s 10.77.0.2/32 -d 10.8.0.0/24 -p icmp -j ACCEPT
# New Friend's subnet (10.9) access rule
# allow new subnet to access port 9999 on home
-A FORWARD -s 10.9.0.2/24 -d 10.77.0.2/32 -p tcp --dport 9999 -j ACCEPT
# allow ping to home
-A FORWARD -s 10.9.0.2/24 -d 10.77.0.2/32 -p icmp -j ACCEPT
# allow ping reply from home
-A FORWARD -s 10.77.0.2/32 -d 10.9.0.2/24 -p icmp -j ACCEPT
# Allow VPN subnets to reach Internet (MASQUERADE will SNAT them)
# 10.8 clients to internet
-A FORWARD -s 10.8.0.0/24 -o eth0 -j ACCEPT
# 10.9 clients to internet
-A FORWARD -s 10.9.0.2/24 -o eth0 -j ACCEPT
# Drop all other traffic between these subnets and home or between subnets (isolation)
# drop any 10.8 -> home not allowed
-A FORWARD -s 10.8.0.0/24 -d 10.77.0.0/24 -j DROP
# drop any home -> 10.8 not allowed
-A FORWARD -s 10.77.0.0/24 -d 10.8.0.0/24 -j DROP
# drop any 10.9 -> home not allowed (except 9999/ping above)
-A FORWARD -s 10.9.0.0/24 -d 10.77.0.0/24 -j DROP
# drop any home -> 10.9 not allowed
-A FORWARD -s 10.77.0.0/24 -d 10.9.0.0/24 -j DROP
# drop 10.9 -> 10.8 (no client-to-client)
-A FORWARD -s 10.9.0.0/24 -d 10.8.0.0/24 -j DROP
# drop 10.8 -> 10.9
-A FORWARD -s 10.8.0.0/24 -d 10.9.0.0/24 -j DROP
COMMIT
*nat
:PREROUTING ACCEPT [3368888:178175988]
:INPUT ACCEPT [3348703:174454011]
:OUTPUT ACCEPT [30120:1902454]
:POSTROUTING ACCEPT [32339:2018208]
# Port forwarding (DNAT) rules:
# forward SSH (port 22) to home server
-A PREROUTING -p tcp --dport 22 -j DNAT --to-destination 10.77.0.2:22
# forward port 51412 to home (TCP)
-A PREROUTING -p tcp --dport 51412 -j DNAT --to-destination 10.77.0.2:51412
# forward port 51412 to home (UDP)
-A PREROUTING -p udp --dport 51412 -j DNAT --to-destination 10.77.0.2:51412
# (Remove the above 51412 rules if not used; keep 22 as its for Giteas SSH access)
# Masquerade (SNAT) rules:
# masquerade replies from home for SSH
-A POSTROUTING -d 10.77.0.2/32 -p tcp --dport 22 -j MASQUERADE
# masquerade replies for 51412 (TCP)
-A POSTROUTING -d 10.77.0.2/32 -p tcp --dport 51412 -j MASQUERADE
#masquerade replies for 51412 (UDP)
-A POSTROUTING -d 10.77.0.2/32 -p udp --dport 51412 -j MASQUERADE
# (If 51412 rules removed above, remove their masquerade lines too)
# NAT for 10.8.0.x clients to internet
-A POSTROUTING -s 10.8.0.0/24 -o eth0 -j MASQUERADE
# NAT for 10.9.0.x clients to internet
-A POSTROUTING -s 10.9.0.0/24 -o eth0 -j MASQUERADE
COMMIT

634
jawz_hist Normal file
View File

@@ -0,0 +1,634 @@
exit
cd
ls
ls .ssh
ls ~/.ssh/
ls -lag
ls -la
sudo chown -R jawz:jawz ./
ls -lag
ls -la
ls .ssh/
ls .ssh/ -la
sudo systemctl enable --now wg-quick@wg0
sudo nano /etc/sysctl.d/99-ipforward.conf
ls
sudo -i
sudo systemctl status sshd.service
sudo systemctl restart sshd.service
journalctl -xeu sshd
sudo -i
sudo systemctl status sshd
sudo ss -ltnp | grep ssh
sudo semanage port -l | grep ssh_port_t
sudo ss -ltnp | grep 3456 || sudo ss -ltnp | grep sshd
ping google.com
sudo systemctl stop wg-quick@wg0.service
ping google.com
sudo systemctl disable wg-quick@wg0.service
exi
exit
sudo rmdir /etc/caddy/Caddyfile.d/
sudo -i
exit
ls
rm histfile
rm iptables*
ls
rm sudo_histfile
cat syncthingblocked
rm syncthingblocked
ls
exit
sudoedit /etc/wireguard/wg0.conf
export TERM=xterm-256color
sudoedit /etc/wireguard/wg0.conf
sudo systemctl restart wg-quick
sudo systemctl restart wg-quick@wg0.service
sudoedit /etc/wireguard/wg0.conf
sudo -i
sudo tcpdump
sudo dnf install tcpdump
sudo tcpdump -i wg0 host 10.77.0.2 -n -v
sudoedit /etc/sysconfig/iptables
export TERM=xterm-256color
sudoedit /etc/sysconfig/iptables
sudo systemctl restart iptables.service
ping google.com
sudo ss -ltnp | grep 3456 || sudo ss -ltnp | grep sshd
sudo sed -n '1,200p' /etc/ssh/sshd_config /etc/ssh/sshd_config.d/*.conf 2>/dev/null | egrep -n '^(Port|ListenAddress)'
sudo iptables -S
cat /etc/sysconfig/iptables
sudo cat /etc/sysconfig/iptables
sudo systemctl enable --now iptables
sudo systemctl start iptables
sudo systemctl restart iptables
sudo iptables -S
sudo systemctl enable wg-quick@wg0
sudo systemctl start wg-quick@wg0
ping google.com
sudo -i
sudo wg sow
sudo wg show
ls
cd /etc/caddy/Caddyfile.d/
ls
cat fun.caddyfile__
ls
clear
mv portfolio.caddyfile_bkp portfolio.caddyfile
sudo mv portfolio.caddyfile_bkp portfolio.caddyfile
sudo systemctl restart caddy
clear
export TERM=xterm-256color
iptables-s
sudo iptables -S
sudo iptables -s
sudo iptables -S
clear
cat /etc/sysconfig/iptables
sudo cat /etc/sysconfig/iptables
sudo -i
sudo reboot
exit
ping google.com
sudo systemctl restart iptables
sudo systemctl enable iptables
exit
sudo -i
exit
sudo iptables -vnL FORWARD | grep 22000
sudo -i
sudo iptables -L FORWARD -n -v --line-numbers
cat /etc/sysconfig/iptables
sudo cat /etc/sysconfig/iptables
sudoedit /etc/sysconfig/iptables
export TERM=xterm-256color
sudoedit /etc/sysconfig/iptables
clear
sudo cat /etc/sysconfig/iptables
sudoedit /etc/sysconfig/iptables
sudo systemctl restart iptables.service
sudoedit /etc/sysconfig/iptables
wg show
sudo wg show
ping -c 3 10.8.0.2
nc -zv 10.77.0.2 22000
sudo -i
exit
sudo -i
exit
sudo systemctl disable iptables
sudo systemctl enable iptables
sudo systemctl status iptables
sudo systemctl start iptables
sudo -i
exit
sudo dnf install starship
sudo dnf copr enable atim/starship
sudo dnf install starship
nano .bashrc
export TERM=xterm-256color
nano .bashrc
bash
exit
nano /etc/hostname
export TERM=xterm-256color
nano /etc/hostname
sudoedit /etc/hostname
exit
sudoedit /etc/caddy/Caddyfile.d/20-servers.caddyfile
export TERM=xterm-256color
sudoedit /etc/caddy/Caddyfile.d/20-servers.caddyfile
export EDITOR=neovim
sudoedit /etc/caddy/Caddyfile.d/20-servers.caddyfile
EDITOR=neovim sudoedit /etc/caddy/Caddyfile.d/20-servers.caddyfile
EDITOR=nvim sudoedit /etc/caddy/Caddyfile.d/20-servers.caddyfile
sudo -i
exit
sudoedit /etc/caddy/Caddyfile.d/20-servers.caddyfile
exit
sudoedit /etc/caddy/Caddyfile.d/20-servers.caddyfile
sudo -i
exit
export TERM=xterm-256color
sudoedit /etc/caddy/Caddyfile.d/20-servers.caddyfile
sudo systemctl restart caddy
export TERM=xterm-256color
sudoedit /etc/caddy/Caddyfile.d/20-servers.caddyfile
sudo systemctl restart caddy
sudo -i
exit
sudo mkdir -p /var/www/html
sudo mkdir -p /var/www/html/lidarr-mb-gap
sudo useradd -m -s /bin/bash lidarr-reports
sudo chown -R lidarr-reports:lidarr-reports /var/www/html/lidarr-mb-gap/
exit
sudo -u lidarr-reports bash
exit
sudo -u lidarr-reports
sudo -u lidarr-reports bash
sudo -i
exit
sudo -u lidarr-mb-gap cat /var/lib/lidarr-mb-gap/.ssh/id_ed25519.pub
exit
sudo -u lidarr-reports
sudo -u lidarr-reports bash
exit
sudo -u lidarr-reports ssh-keygen -l -f /home/lidarr-reports/.ssh/ed25519_lidarr-mb-gap.pub
exit
sudo -u lidarr-reports -u bash
sudo -u lidarr-reports bash
exit
sudo nvim /etc/caddy/Caddyfile.d/25-static.caddyfile
exit
sudo dnf install rsync
sudo nvim /etc/caddy/Caddyfile.d/25-static.caddyfile
sudo systemctl restart caddy.service
ls
cd /var/www/html/lidarr-mb-gap/
ls
sudo nvim /etc/caddy/Caddyfile.d/25-static.caddyfile
sudo systemctl restart caddy.service
nc -zv 10.77.0.2 8999
sudo nvim /etc/caddy/Caddyfile.d/20-servers.caddyfile
sudo nvim /etc/caddy/Caddyfile.d/5-keycloak.caddyfile
sudo nvim /etc/caddy/Caddyfile.d/10-nextcloud.caddyfile
sudo nvim /etc/caddy/Caddyfile.d/5-keycloak.caddyfile
sudo systemctl restart caddy
ls
cd /etc/wireguard/
sudo -i
exit
cd /etc/caddy/Caddyfile.d/
ls
nvim 15-private.caddyfile
mv 15-private.caddyfile 15-private.caddyfile_
sudo mv 15-private.caddyfile 15-private.caddyfile_
nvim 15-private.caddyfile
sudo nvim 15-private.caddyfile
sudo systemctl restart caddy
exit
cd /etc/caddy/Caddyfile.d/
sudo nvim 15-private.caddyfile
sudo systemctl restart caddy
exit
cd /etc/caddy/Caddyfile.d/
sudo nvim 15-private.caddyfile
sudo systemctl restart caddy
sudo nvim 15-private.caddyfile
sudo systemctl restart caddy
exit
sudo nvim /etc/caddy/Caddyfile.d/5-keycloak.caddyfile
sudo nvim /etc/caddy/Caddyfile.d/10-nextcloud.caddyfile
sudo nvim /etc/caddy/Caddyfile.d/5-keycloak.caddyfile
sudo systemctl restart caddy
sudo nvim 15-private.caddyfile
cd /etc/caddy/Caddyfile.d/
sudo nvim 15-private.caddyfile
sudo systemctl restart caddy
sudo nvim 15-private.caddyfile
cat 15-private.caddyfile
sudo nvim 15-private.caddyfile
sudo systemctl restart caddy
sudo nvim 15-private.caddyfile
sudo nvim 15-private.caddyfile_
sudo nvim 15-private.caddyfile
sudo systemctl restart caddy
exit
sudo systemctl restart caddy
sudo nvim
cd /etc/caddy/Caddyfile.d/
sudo nvim 15-private.caddyfile
cat 15-private.caddyfile_
sudo nvim 15-private.caddyfile
cat 15-private.caddyfile
sudo nvim 15-private.caddyfile
sudo systemctl restart caddy
sudo nvim 15-private.caddyfile
sudo nvim /etc/caddy/Caddyfile.d/15-private.caddyfile
sudo systemctl restart caddy
systemctl status caddy
sudo nvim /etc/caddy/Caddyfile.d/15-private.caddyfile
sudo systemctl restart caddy
cd /etc/caddy/Caddyfile.d/
ls
sudo nvim 20-servers.caddyfile
sudo nvim 40-jellyfin.caddyfile
sudo systemctl restart jel
sudo systemctl restart caddy
cd /etc/caddy/Caddyfile.d/
ls
mv 15-private.caddyfile 15-private.caddyfile__
sudo mv 15-private.caddyfile 15-private.caddyfile__
sudo mv 15-private.caddyfile_ 15-private.caddyfile
sudo systemctl restart caddy
exit
dig servidos.lat A
sudo dnf install dig
dig servidos.lat A
exit
curl servidos.lat
exit
curl servidos.lat
dig servidos.lat A
curl -v 130.211.27.102
curl -v 130.211.27.102:443
curl -v https://130.211.27.102
curl servidos.lat
curl https://servidos.lat
curl-v https://servidos.lat
curl -v https://servidos.lat
dig servidos.lat A
exit
dig servidos.lat A
exit
dig servidos.lat A
exit
dig servidos.lat A
exit
dig servidos.lat A
exit
dig servidos.lat A
curl -v https://servidos.lat
exit
sudo useradd -m -s /bin/bash deploy
sudo groupadd -f www-data
sudo usermod -aG www-data deploy
ls -lag /var/www/html/
sudo mkdir /var/www/html/portfolio
sudo chown -R root:www-data /var/www/html/portfolio/
sudo chmod -R 775 /var/www/html/portfolio/
ssh-keygen -t ed25519 -C "deploy@portfolio" -f ~/.ssh/portfolio_deploy
cat ~/.ssh/portfolio_deploy.pub
sudo -u deploy
sudo -u deploy bash
ls
ls -lag
cat ~/.ssh/portfolio_deploy
exit
su
sudo -u
sudo -i
cat ~/.ssh/portfolio_deploy
exit
sudo systemctl restart iptables
exit
ls
ls ~/.ssh/authorized_keys
cat ~/.ssh/authorized_keys
sudo systemctl restart iptables.service
sudo systemctl status iptables.service
cat /etc/sysconfig/iptables
sudo cat /etc/sysconfig/iptables
exit
ls
exit
cd /var/www/html/portfolio/
ls -lag
ls
sudo -u deploy bash
ls
exit
sudo systemctl restart caddy
cd /var/www/html/portfolio/
ls
sudo nvim /etc/caddy/Caddyfile.d/25-static.caddyfile
sudo systemctl restart caddy
sudo chown -R deploy:www-data /var/www/html/portfo
sudo chown -R deploy:www-data /var/www/html/portfolio/
exit
sudo nvim /etc/caddy/Caddyfile.d/25-static.caddyfile
sudo mkdir /var/www/html/blog
sudo chown deploy:www-data /var/www/html/blog/ -R
sudo nvim /etc/caddy/Caddyfile.d/25-static.caddyfile
sudo cat /etc/caddy/Caddyfile.d/25-static.caddyfile
sudo nvim /etc/caddy/Caddyfile.d/25-static.caddyfile
sudo systemctl restart caddy
sudo chmod -R 775 /var/www/html/portfolio
ls -la /var/www/html/portfolio/
sudo chown -$ deploy:www-data /var/www/html/portfolio/
sudo chown -R deploy:www-data /var/www/html/portfolio/
sudo -i
ls -la /var/www/html/portfolio/friends/ | grep "001_chicken_hu"
sudo cat /etc/caddy/Caddyfile.d/25-static.caddyfile
df -h
sudo nvim /etc/caddy/Caddyfile.d/20-servers.caddyfile
sudo systemctl restart caddy && exit
sudo nvim /etc/caddy/Caddyfile.d/20-servers.caddyfile
sudo systemctl restart caddy && exit
sudo nvim /etc/caddy/Caddyfile.d/20-servers.caddyfile
sudo nvim /etc/caddy/Caddyfile.d/25-static.caddyfile
sudo systemctl restart caddy
journalctl -xeu caddy.service
sudo nvim /etc/caddy/Caddyfile.d/25-static.caddyfile
sudo systemctl restart caddy
journalctl -xeu caddy.service
sudo nvim /etc/caddy/Caddyfile.d/25-static.caddyfile
sudo systemctl restart caddy
sudo nvim /etc/caddy/Caddyfile.d/20-servers.caddyfile
sudo systemctl restart caddy
sudo nvim /etc/caddy/Caddyfile.d/25-static.caddyfile
sudo nvim /etc/caddy/Caddyfile.d/20-servers.caddyfile
sudo systemctl restart caddy && exit
sudo nvim /etc/caddy/Caddyfile.d/20-servers.caddyfile
curl -sI "https://danilo-reyes.com/isso/js/embed.min.js"
sudo nvim /etc/caddy/Caddyfile.d/20-servers.caddyfile
sudo nvim /etc/caddy/Caddyfile.d/25-static.caddyfile
sudo systemctl restart caddy && exit
curl -sI "https://danilo-reyes.com/isso/js/embed.min.js"
curl -vkI https://blog.danilo-reyes.com/isso/js/embed.min.js
sudo cat /etc/caddy/Caddyfile.d/25-static.caddyfile
sudo nvim /etc/caddy/Caddyfile.d/25-static.caddyfile
sudo systemctl restart caddy
sudo cat /etc/caddy/Caddyfile.d/25-static.caddyfile
sudo systemctl restart caddy
curl -vkI https://blog.danilo-reyes.com/isso/
curl -vkI https://blog.danilo-reyes.com/isso/js/embed.min.js
curl -vkI http://10.77.0.2:8180/
curl -vkI http://10.77.0.2:8180/js/embed.min.js
curl -vkI http://10.77.0.2:8180/
curl -vkI http://10.77.0.2:8180/js/embed.min.js
curl -vkI https://blog.danilo-reyes.com/isso/js/embed.min.js
curl -vkI https://blog.danilo-reyes.com/isso/
curl -vkI https://blog.danilo-reyes.com/isso
9;6u
timedatectl status
date-u
date -u
sudo nvim /etc/caddy/Caddyfile.d/20-servers.caddyfile
sudo systemctl restart caddy
sudo nvim /etc/caddy/Caddyfile.d/25-static.caddyfile
sudo systemctl restart caddy
exit
sudo cat /etc/caddy/Caddyfile.d/25-static.caddyfile
sudo nvim /etc/caddy/Caddyfile.d/25-static.caddyfile
sudo systemctl restart caddy
exit
sudo cat /etc/caddy/Caddyfile.d/25-static.caddyfile
sudo cat /etc/caddy/Caddyfile.d/20-servers.caddyfile
sudo dnf search opentracker
sudo dnf install -y git gcc make libowfat-devel
git clone https://erdgeist.org/gitweb/opentracker
cd opentracker/
make
sudo dnf install -y libowfat-devel
make clean
make CFLAGS="-I/usr/include/libowfat"
sudo dnf install -y zlib-devel
make CFLAGS="-I/usr/include/libowfat"
git submodule update --init
make clean
make
ls
cd ..
git clone git@github.com:masroore/libowfat.git
sudo dnf install libowfat
git clone git@github.com:masroore/libowfat.git
podman
docker
exit
sudo dnf copr enable dlk/rpms
sudo dnf install opentracker
rm opentracker/
rm opentracker/ -rf
sudo systemctl enable --now opentracker
sudo systemctl status opentracker
sudo cat /etc/opentracker.conf
sudo nvim /etc/opentracker.conf
sudo nvim /etc/caddy/Caddyfile.d/15-private.caddyfile
sudo grep -r 6969 /etc/caddy/Caddyfile.d/
sudo nvim /etc/opentracker.conf
sudo systemctl restart opentracker.service
sudo systemctl status opentracker
sudo nvim /etc/opentracker.conf
sudo systemctl restart opentracker.service
sudo systemctl status opentracker
sudo install -d -m 0750 /var/lib/opentracker
sudo install -m 0640 /dev/null /var/lib/opentracker/whitelist
sudo install -m 0640 /dev/null /var/lib/opentracker/blacklist
sudo systemctl restart opentracker.service
sudo systemctl status opentracker
ls -lag /var/lib/opentracker/
sudo ls -lag /var/lib/opentracker/
sudo nvim /etc/opentracker.conf
sudo systemctl restart opentracker.service
sudo systemctl status opentracker
sudo chmod 666 /var/lib/opentracker/blacklist
sudo systemctl restart opentracker.service
sudo systemctl status opentracker
sudo iptables -A INPUT -p tcp --dport 6969 -j ACCEPT
sudo iptables -A INPUT -p udp --dport 6969 -j ACCEPT
sudo iptables -A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
sudo iptables -L INPUT -n -v --line-numbers | grep 6969
sudo service iptables save
exit
ls /etc/wireguard/
sudo ls /etc/wireguard/
sudo cat /etc/wireguard/wg0.conf
cat /etc/sysctl.d/99-forward.conf
sudo ls /etc/sysctl.d
cat /etc/sysctl.d/99-ipforward.conf
sudo sysctl net.ipv4.ip_forward
sudo -i
sudo systemctl status opentracker
journalctl -xefu opentracker
ss -tnp | grep 6969
sudo sysctl -w net.ipv4.conf.all.rp_filter=0
sudo sysctl -w net.ipv4.conf.eth0.rp_filter=0
sudo sysctl -w net.ipv4.conf.wg0.rp_filter=0
journalctl -xefu opentracker
sudo cat /etc/sysconfig/iptables
sysctl -w net.ipv4.ip_forward=1
# ---- NAT (insert at top) ----
iptables -t nat -I PREROUTING 1 -i eth0 -p tcp --dport 51412 -j DNAT --to-destination 10.77.0.2:51412
iptables -t nat -I PREROUTING 2 -i eth0 -p udp --dport 51412 -j DNAT --to-destination 10.77.0.2:51412
iptables -t nat -I POSTROUTING 1 -s 10.77.0.0/24 -o eth0 -j MASQUERADE
# ---- FORWARD ----
iptables -I FORWARD 1 -i eth0 -o wg0 -p tcp -d 10.77.0.2 --dport 51412 -m conntrack --ctstate NEW,ESTABLISHED,RELATED -j ACCEPT
iptables -I FORWARD 2 -i eth0 -o wg0 -p udp -d 10.77.0.2 --dport 51412 -j ACCEPT
iptables -I FORWARD 3 -i wg0 -o eth0 -s 10.77.0.2 -p tcp --sport 51412 -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
iptables -I FORWARD 4 -i wg0 -o eth0 -s 10.77.0.2 -p udp --sport 51412 -j ACCEPT
iptables -I FORWARD 5 -i wg0 -o eth0 -j ACCEPT
iptables -I FORWARD 6 -i eth0 -o wg0 -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
net.ipv4.ip_forward = 1
sudo -i
mkfs.ext4 "/dev/disk/by-id/scsi-0Linode_Volume_box"
sudo -i
mkdir /mnt/box/downloads
sudo mkdir /mnt/box/downloads
sudo chown jawz:users /mnt/box/downloads/
ls -la
sudo chown jawz:jawz /mnt/box/downloads/
qbittorrent-nox
sudo useradd --system --create-home --home-dir /var/lib/qbittorrent --shell /sbin/nologin qbittorrent
sudo mkdir -p /srv/torrents/{downloads,incomplete,watch}
sudo chown -R qbittorrent:qbittorrent /srv/torrents /var/lib/qbittorrent
sudo tee /etc/systemd/system/qbittorrent-nox.service >/dev/null <<'EOF'
[Unit]
Description=qBittorrent (nox)
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
User=qbittorrent
Group=qbittorrent
UMask=0027
WorkingDirectory=/var/lib/qbittorrent
ExecStart=/usr/bin/qbittorrent-nox --profile=/var/lib/qbittorrent
Restart=on-failure
RestartSec=3
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
sudo systemctl daemon-reload
sudo systemctl enable --now qbittorrent-nox
sudo systemctl status qbittorrent-nox --no-pager
sudo -u qbittorrent nano /var/lib/qbittorrent/qBittorrent/config/qBittorrent.conf
sudo systemctl restart qbittorrent-nox
sudo nvim /etc/caddy/Caddyfile.d/75-qbittorrent.caddyfile
sudo -u qbittorrent nano /var/lib/qbittorrent/qBittorrent/config/qBittorrent.conf
sudo systemctl stop qbittorrent-nox
sudo -u qbittorrent nano /var/lib/qbittorrent/qBittorrent/config/qBittorrent.conf
sudo systemctl start qbittorrent-nox
sudo -u qbittorrent nano /var/lib/qbittorrent/qBittorrent/config/qBittorrent.conf
sudo nvim /etc/caddy/Caddyfile.d/75-qbittorrent.caddyfile
sudo systemctl restart caddy
sudo systemctl status qbittorrent-nox --no-pager
ls
cat /etc/sysconfig/iptables
sudo cat /etc/sysconfig/iptables
ls /mnt/
ls /mnt/box/
rm /mnt/box/downloads/
rmdir /mnt/box/downloads/
sudo rmdir /mnt/box/downloads/
sudo mv /srv/torrents/* /mnt/box/
sudo umount /mnt/box
sudo nvim /etc/fstab
sudo mount -a
sudo systemctl daemon-reload
sudo mount -a
ls -lag /srv/torrents/
sudo -u qbittorrent nano /var/lib/qbittorrent/qBittorrent/config/qBittorrent.conf
cd /var/lib/qbittorrent/
sudo -i
exit
sudo -i
ssh server
exitr
exit
ls /srv/torrents/
sudo mkdir /srv/torrents/tits
sudo chown jawz:jawz /srv/torrents/tits/
ls /srv/torrents/tits/
sudo -i
sudo nvim /etc/caddy/Caddyfile.d/20-servers.caddyfile
sudo systemctl restart caddy
exit
ls
df -h
ssh server
exit
clear
sudoedit /etc/sysconfig/iptables
exit
sudo grep 6060 /etc/
sudo grep 6060 /etc/ -r
sudo grep -r 6969 /etc/
sudo cat /etc/ssh/sshd_config
ls
clear
exit
cat /etc/sysconfig/iptables
sudo cat /etc/sysconfig/iptables
sudo ls /etc/wireguard/
sudo cat /etc/wireguard/wg0.conf
sudo -i
exit
sudo -i
sudo -i
sudo -i
iptables -S
sudo iptables -S
sudo nvim /etc/wireguard/wg0.conf
exit
curl # Test paperless (should fail)
curl -v --connect-timeout 5 http://192.168.100.15:8000
# Test sabnzbd (should fail)
curl -v --connect-timeout 5 http://192.168.100.15:3399
curl -v --connect-timeout 5 http://192.168.100.15:8686
sudo wg show
exit
sudo systemctl restart wg-quick@wg0.service
exit
sudo nvim /etc/wireguard/wg0.conf
sudo systemctl restart wg-quick@wg0.service
sudo nvim /etc/wireguard/wg0.conf
exit
sudo wg show
exit
sudo nvim /etc/sysconfig/iptables
sudo systemctl restart iptables.service
exit
sudo systemctl restart wg-quick@wg0.service
sudo nvim /etc/caddy/Caddyfile.d/20-servers.caddyfile
sudo systemctl restart caddy
z nixos
exit
cat .ssh/id_ed25519.pub
cat .ssh/id_ed25519
exit
cat /etc/sysconfig/iptables
sudo cat /etc/sysconfig/iptables
exit
sudo -i
ls
cat vps_public.key
ls .ssh/authorized_keys
cat .ssh/authorized_keys
exit

View File

@@ -48,6 +48,10 @@ let
type = lib.types.bool;
default = false;
};
useDefaultProxy = lib.mkOption {
type = lib.types.bool;
default = true;
};
certPath = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;

View File

@@ -49,7 +49,7 @@ in
server = "192.168.100.15";
miniserver = "192.168.1.100";
workstation = "192.168.100.18";
vps = "45.79.25.87";
vps = "45.33.0.228";
wg-vps = "10.77.0.1";
wg-server = "10.77.0.2";
wg-g1 = "10.9.0.2";
@@ -68,12 +68,13 @@ in
server = "enp0s31f6";
miniserver = "enp2s0";
workstation = "enp5s0";
vps = "eth0";
};
description = "Set of network interface names for all my computers.";
};
mainServer = lib.mkOption {
type = lib.types.str;
default = "miniserver";
default = "vps";
description = "The hostname of the main server.";
};
postgresSocket = lib.mkOption {

View File

@@ -5,44 +5,24 @@
...
}:
let
proxyReverseServices = [
"bazarr"
"firefox-syncserver"
"flame"
"flameSecret"
"isso"
"kavita"
"linkwarden"
"maloja"
"mealie"
"metube"
"microbin"
"multi-scrobbler"
"nix-serve"
"plausible"
"shiori"
"vaultwarden"
"yamtrack"
];
proxyReverseFixServices = [
"atticd"
"audiobookshelf"
"gitea"
"lidarr"
"ombi"
"prowlarr"
"radarr"
"sonarr"
"stash"
];
proxyReversePrivateServices = [
"homepage"
"prowlarr"
"stash"
];
mkServiceConfig =
type: services: lib.listToAttrs (map (name: lib.nameValuePair name { inherit type; }) services);
standardProxyServices =
(mkServiceConfig "proxyReverse" proxyReverseServices)
// (mkServiceConfig "proxyReverseFix" proxyReverseFixServices)
(mkServiceConfig "proxyReverseFix" proxyReverseFixServices)
// (mkServiceConfig "proxyReversePrivate" proxyReversePrivateServices);
generateProxyConfig =
serviceName: serviceConfig:
@@ -59,9 +39,21 @@ let
throw "Unknown proxy type: ${serviceConfig.type}";
in
lib.nameValuePair cfg.host (lib.mkIf cfg.enableProxy (proxyFunc cfg));
standardProxyNames = builtins.attrNames standardProxyServices;
customProxyServices =
config.my.servers
|> lib.filterAttrs (
name: srv:
(srv.enableProxy or false)
&& (srv.useDefaultProxy or true)
&& !(builtins.elem name standardProxyNames)
)
|> lib.mapAttrs (_name: _srv: { type = "proxyReverse"; });
in
{
config = lib.mkIf config.my.enableProxy {
services.nginx.virtualHosts = lib.mapAttrs' generateProxyConfig standardProxyServices;
services.nginx.virtualHosts = lib.mapAttrs' generateProxyConfig (
standardProxyServices // customProxyServices
);
};
}

View File

@@ -9,31 +9,33 @@ let
in
{
options.my.servers.homepage = setup.mkOptions "homepage" "home" 8082;
config = lib.mkIf config.my.secureHost {
sops.secrets = lib.mkIf cfg.enable {
homepage.sopsFile = ../../secrets/homepage.yaml;
"private-ca/pem" = {
config = lib.mkMerge [
(lib.mkIf (cfg.enable && config.my.secureHost) {
sops.secrets.homepage.sopsFile = ../../secrets/homepage.yaml;
services.homepage-dashboard = {
inherit (cfg) enable;
listenPort = cfg.port;
environmentFile = config.sops.secrets.homepage.path;
settings = {
providers.openweathermap = "{{HOMEPAGE_VAR_OPENWEATHERMAP_API_KEY}}";
layout = import ./homepage/layout.nix;
};
widgets = import ./homepage/widgets.nix;
services = import ./homepage/services.nix { inherit lib config; };
bookmarks =
builtins.readDir ./homepage/bookmarks
|> builtins.attrNames
|> builtins.filter (file: builtins.match ".*\\.nix" file != null)
|> map (file: import ./homepage/bookmarks/${file});
};
})
(lib.mkIf (cfg.enableProxy && config.my.enableProxy && config.my.secureHost) {
sops.secrets."private-ca/pem" = {
sopsFile = ../../secrets/certs.yaml;
owner = "nginx";
group = "nginx";
};
};
my.servers.homepage.certPath = config.sops.secrets."private-ca/pem".path;
services.homepage-dashboard = lib.mkIf cfg.enable {
inherit (cfg) enable;
listenPort = cfg.port;
environmentFile = config.sops.secrets.homepage.path;
settings = {
providers.openweathermap = "{{HOMEPAGE_VAR_OPENWEATHERMAP_API_KEY}}";
layout = import ./homepage/layout.nix;
};
widgets = import ./homepage/widgets.nix;
services = import ./homepage/services.nix { inherit lib config; };
bookmarks =
builtins.readDir ./homepage/bookmarks
|> builtins.attrNames
|> builtins.filter (file: builtins.match ".*\\.nix" file != null)
|> map (file: import ./homepage/bookmarks/${file});
};
};
my.servers.homepage.certPath = config.sops.secrets."private-ca/pem".path;
})
];
}

View File

@@ -23,22 +23,48 @@ let
in
{
options.my.servers.jellyfin = setup.mkOptions "jellyfin" "flix" 8096;
config = lib.mkIf (cfg.enable && config.my.secureHost) {
environment.systemPackages = [
pkgs.jellyfin-ffmpeg
]
++ (lib.optional cfg.enableCron [ sub-sync-path ]);
users.users.jellyfin = {
uid = 984;
group = "piracy";
isSystemUser = true;
};
services = {
jellyfin = {
config = lib.mkMerge [
(lib.mkIf (cfg.enable && config.my.secureHost) {
environment.systemPackages = [
pkgs.jellyfin-ffmpeg
]
++ (lib.optional cfg.enableCron [ sub-sync-path ]);
users.users.jellyfin = {
uid = 984;
group = "piracy";
isSystemUser = true;
};
services.jellyfin = {
inherit (cfg) enable;
group = "piracy";
};
nginx = lib.mkIf cfg.enableProxy {
systemd = lib.mkIf cfg.enableCron {
services.sub-sync = {
restartIfChanged = true;
description = "syncronizes subtitles downloaded & modified today";
wantedBy = [ "default.target" ];
path = sub-sync-path;
serviceConfig = {
Restart = "on-failure";
RestartSec = 30;
ExecStart = "${sub-sync}/bin/sub-sync all";
Type = "simple";
User = "root";
};
};
timers.sub-sync = {
enable = true;
description = "syncronizes subtitles downloaded & modified today";
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = "20:00";
};
};
};
})
(lib.mkIf (cfg.enableProxy && config.my.enableProxy) {
my.servers.jellyfin.useDefaultProxy = false;
services.nginx = {
appendHttpConfig = ''
# JELLYFIN
proxy_cache_path /var/cache/nginx/jellyfin levels=1:2 keys_zone=jellyfin:100m max_size=15g inactive=1d use_temp_path=off;
@@ -94,29 +120,6 @@ in
};
};
};
};
systemd = lib.mkIf cfg.enableCron {
services.sub-sync = {
restartIfChanged = true;
description = "syncronizes subtitles downloaded & modified today";
wantedBy = [ "default.target" ];
path = sub-sync-path;
serviceConfig = {
Restart = "on-failure";
RestartSec = 30;
ExecStart = "${sub-sync}/bin/sub-sync all";
Type = "simple";
User = "root";
};
};
timers.sub-sync = {
enable = true;
description = "syncronizes subtitles downloaded & modified today";
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = "20:00";
};
};
};
};
})
];
}

View File

@@ -10,35 +10,38 @@ let
in
{
options.my.servers.keycloak = setup.mkOptions "keycloak" "auth" 8090;
config = lib.mkIf (cfg.enable && config.my.secureHost) {
sops.secrets.postgres-password.sopsFile = ../../secrets/secrets.yaml;
sops.secrets.keycloak = {
sopsFile = ../../secrets/env.yaml;
restartUnits = [ "keycloak.service" ];
};
services.keycloak = {
inherit (cfg) enable;
database = {
type = "postgresql";
host = "localhost";
createLocally = false;
username = "keycloak";
name = "keycloak";
passwordFile = config.sops.secrets.postgres-password.path;
config = lib.mkMerge [
(lib.mkIf (cfg.enable && config.my.secureHost) {
sops.secrets.postgres-password.sopsFile = ../../secrets/secrets.yaml;
sops.secrets.keycloak = {
sopsFile = ../../secrets/env.yaml;
restartUnits = [ "keycloak.service" ];
};
settings = {
hostname = cfg.host;
hostname-strict = true;
hostname-strict-https = false;
http-enabled = true;
http-port = cfg.port;
http-host = cfg.ip;
proxy-headers = "xforwarded";
services.keycloak = {
inherit (cfg) enable;
database = {
type = "postgresql";
host = "localhost";
createLocally = false;
username = "keycloak";
name = "keycloak";
passwordFile = config.sops.secrets.postgres-password.path;
};
settings = {
hostname = cfg.host;
hostname-strict = true;
hostname-strict-https = false;
http-enabled = true;
http-port = cfg.port;
http-host = cfg.ip;
proxy-headers = "xforwarded";
};
};
};
systemd.services.keycloak.serviceConfig.EnvironmentFile = config.sops.secrets.keycloak.path;
services.nginx.virtualHosts.${cfg.host} = lib.mkIf (cfg.enableProxy && config.my.enableProxy) (
inputs.self.lib.proxyReverseFix cfg
);
};
systemd.services.keycloak.serviceConfig.EnvironmentFile = config.sops.secrets.keycloak.path;
})
(lib.mkIf (cfg.enableProxy && config.my.enableProxy) {
my.servers.keycloak.useDefaultProxy = false;
services.nginx.virtualHosts.${cfg.host} = inputs.self.lib.proxyReverseFix cfg;
})
];
}

View File

@@ -38,137 +38,141 @@ let
in
{
options.my.servers = {
nextcloud = setup.mkOptions "nextcloud" "cloud" 80;
nextcloud = setup.mkOptions "nextcloud" "cloud" 8081;
collabora = setup.mkOptions "collabora" "collabora" 9980;
go-vod.enable = lib.mkEnableOption "Go-VOD video transcoding service";
};
config = lib.mkIf (cfg.enable && config.my.servers.postgres.enable && config.my.secureHost) {
sops.secrets.nextcloud-adminpass = {
owner = config.users.users.nextcloud.name;
inherit (config.users.users.nextcloud) group;
};
nixpkgs.config.permittedInsecurePackages = [
"nodejs-14.21.3"
"openssl-1.1.1v"
];
users.groups.nextcloud = { inherit gid; };
users.users.nextcloud = {
inherit uid;
isSystemUser = true;
group = "nextcloud";
extraGroups = [ "render" ];
packages = builtins.attrValues {
inherit exiftool pytensorflow;
inherit (pkgs)
ffmpeg
mediainfo
nodejs
perl
;
config = lib.mkMerge [
{ my.servers.nextcloud.useDefaultProxy = false; }
(lib.mkIf (cfg.enable && config.my.servers.postgres.enable && config.my.secureHost) {
sops.secrets.nextcloud-adminpass = {
owner = config.users.users.nextcloud.name;
inherit (config.users.users.nextcloud) group;
};
};
services = {
nextcloud = {
enable = true;
https = false; # vps
package = pkgs.nextcloud32;
appstoreEnable = true;
configureRedis = true;
extraAppsEnable = true;
enableImagemagick = true;
maxUploadSize = "4096M";
hostName = cfg.host;
caching = {
redis = true;
memcached = true;
apcu = true;
nixpkgs.config.permittedInsecurePackages = [
"nodejs-14.21.3"
"openssl-1.1.1v"
];
users = {
groups.nextcloud = { inherit gid; };
users.nextcloud = {
inherit uid;
isSystemUser = true;
group = "nextcloud";
extraGroups = [ "render" ];
packages = builtins.attrValues {
inherit exiftool pytensorflow;
inherit (pkgs)
ffmpeg
mediainfo
nodejs
perl
;
};
};
config = {
adminpassFile = config.sops.secrets.nextcloud-adminpass.path;
dbtype = "pgsql";
dbhost = config.my.postgresSocket;
dbname = "nextcloud";
};
phpOptions = {
catch_workers_output = "yes";
display_errors = "stderr";
error_reporting = "E_ALL & ~E_DEPRECATED & ~E_STRICT";
expose_php = "Off";
preview_max_x = 2048;
preview_max_y = 2048;
short_open_tag = "Off";
"opcache.enable_cli" = "1";
"opcache.fast_shutdown" = "1";
"opcache.interned_strings_buffer" = "16";
"opcache.jit" = "1255";
"opcache.jit_buffer_size" = "256M";
"opcache.max_accelerated_files" = "10000";
"opcache.huge_code_pages" = "1";
"opcache.enable_file_override" = "1";
"opcache.memory_consumption" = "256";
"opcache.revalidate_freq" = "60";
"opcache.save_comments" = "1";
"opcache.validate_timestamps" = "0";
"openssl.cafile" = "/etc/ssl/certs/ca-certificates.crt";
};
settings = {
log_type = "file";
loglevel = 1;
trusted_proxies = [
config.my.localhost
config.my.localhost6
config.my.ips.router
config.my.ips.wg-vps
};
services = {
nextcloud = {
enable = true;
https = false; # vps
package = pkgs.nextcloud32;
appstoreEnable = true;
configureRedis = true;
extraAppsEnable = true;
enableImagemagick = true;
maxUploadSize = "4096M";
hostName = cfg.host;
caching = {
redis = true;
memcached = true;
apcu = true;
};
config = {
adminpassFile = config.sops.secrets.nextcloud-adminpass.path;
dbtype = "pgsql";
dbhost = config.my.postgresSocket;
dbname = "nextcloud";
};
phpOptions = {
catch_workers_output = "yes";
display_errors = "stderr";
error_reporting = "E_ALL & ~E_DEPRECATED & ~E_STRICT";
expose_php = "Off";
preview_max_x = 2048;
preview_max_y = 2048;
short_open_tag = "Off";
"opcache.enable_cli" = "1";
"opcache.fast_shutdown" = "1";
"opcache.interned_strings_buffer" = "16";
"opcache.jit" = "1255";
"opcache.jit_buffer_size" = "256M";
"opcache.max_accelerated_files" = "10000";
"opcache.huge_code_pages" = "1";
"opcache.enable_file_override" = "1";
"opcache.memory_consumption" = "256";
"opcache.revalidate_freq" = "60";
"opcache.save_comments" = "1";
"opcache.validate_timestamps" = "0";
"openssl.cafile" = "/etc/ssl/certs/ca-certificates.crt";
};
settings = {
log_type = "file";
loglevel = 1;
trusted_proxies = [
config.my.localhost
config.my.localhost6
config.my.ips.router
config.my.ips.wg-vps
];
trusted_domains = [
cfg.host
config.my.ips.${config.networking.hostName}
"localhost"
"cloud.rotehaare.art"
];
overwriteprotocol = "https";
"overwrite.cli.url" = "${cfg.url}";
forwarded_for_headers = [ "HTTP_X_FORWARDED_FOR" ];
default_phone_region = "MX";
allow_local_remote_servers = true;
mail_smtpmode = "sendmail";
mail_sendmailmode = "pipe";
preview_ffmpeg_path = "${pkgs.ffmpeg}/bin/ffmpeg";
"memories.exiftool" = "${exiftool}/bin/exiftool";
"memories.ffmpeg_path" = "${pkgs.ffmpeg}/bin/ffmpeg";
"memories.ffprobe_path" = "${pkgs.ffmpeg}/bin/ffprobe";
enabledPreviewProviders = [
"OC\\Preview\\AVI"
"OC\\Preview\\BMP"
"OC\\Preview\\GIF"
"OC\\Preview\\HEIC"
"OC\\Preview\\Image"
"OC\\Preview\\JPEG"
"OC\\Preview\\Krita"
"OC\\Preview\\MKV"
"OC\\Preview\\MP3"
"OC\\Preview\\MP4"
"OC\\Preview\\MarkDown"
"OC\\Preview\\Movie"
"OC\\Preview\\OpenDocument"
"OC\\Preview\\PNG"
"OC\\Preview\\TIFF"
"OC\\Preview\\TXT"
"OC\\Preview\\XBitmap"
];
};
phpExtraExtensions = all: [
all.pdlib
all.bz2
];
trusted_domains = [
cfg.host
config.my.ips.${config.networking.hostName}
"localhost"
};
nginx.virtualHosts.${cfg.host} = {
forceSSL = false;
enableACME = false;
http2 = false;
serverAliases = [
"cloud.rotehaare.art"
];
overwriteprotocol = "https";
"overwrite.cli.url" = "${cfg.url}";
forwarded_for_headers = [ "HTTP_X_FORWARDED_FOR" ];
default_phone_region = "MX";
allow_local_remote_servers = true;
mail_smtpmode = "sendmail";
mail_sendmailmode = "pipe";
preview_ffmpeg_path = "${pkgs.ffmpeg}/bin/ffmpeg";
"memories.exiftool" = "${exiftool}/bin/exiftool";
"memories.ffmpeg_path" = "${pkgs.ffmpeg}/bin/ffmpeg";
"memories.ffprobe_path" = "${pkgs.ffmpeg}/bin/ffprobe";
enabledPreviewProviders = [
"OC\\Preview\\AVI"
"OC\\Preview\\BMP"
"OC\\Preview\\GIF"
"OC\\Preview\\HEIC"
"OC\\Preview\\Image"
"OC\\Preview\\JPEG"
"OC\\Preview\\Krita"
"OC\\Preview\\MKV"
"OC\\Preview\\MP3"
"OC\\Preview\\MP4"
"OC\\Preview\\MarkDown"
"OC\\Preview\\Movie"
"OC\\Preview\\OpenDocument"
"OC\\Preview\\PNG"
"OC\\Preview\\TIFF"
"OC\\Preview\\TXT"
"OC\\Preview\\XBitmap"
];
};
phpExtraExtensions = all: [
all.pdlib
all.bz2
];
};
nginx.virtualHosts = {
"${cfg.host}" = lib.mkIf cfg.enableProxy {
forceSSL = false; # vps
enableACME = false; # vps
http2 = false; # vps
# default = true; #vps
#vps
listen = [
{
addr = config.my.ips.wg-server;
@@ -179,7 +183,86 @@ in
port = 8081;
}
];
#vps
};
};
virtualisation.oci-containers.containers = {
go-vod = lib.mkIf config.my.servers.go-vod.enable {
autoStart = true;
image = "radialapps/go-vod";
environment = {
TZ = config.my.timeZone;
NEXTCLOUD_HOST = "https://${config.services.nextcloud.hostName}";
NVIDIA_VISIBLE_DEVICES = "all";
};
volumes = [ "ncdata:/var/www/html:ro" ];
extraOptions = [
"--device=/dev/dri" # VA-API (omit for NVENC)
];
};
collabora = lib.mkIf cfgC.enable {
autoStart = true;
image = "collabora/code:latest";
ports = [ "${toString cfgC.port}:${toString cfgC.port}" ];
environment = {
TZ = config.my.timeZone;
domain = cfg.host;
aliasgroup1 = "${cfg.url}:443";
aliasgroup2 = "https://cloud.rotehaare.art:443";
server_name = cfgC.host;
dictionaries = "en_CA en_US es_MX es_ES fr_FR it pt_BR ru";
extra_params = ''
--o:ssl.enable=false
--o:ssl.termination=true
--o:remote_font_config.url=${cfg.url}/apps/richdocuments/settings/fonts.json
--o:logging.level=information
'';
DONT_GEN_SSL_CERT = "1";
SLEEPFORDEBUGGER = "0";
};
extraOptions = [
"--cap-add"
"MKNOD"
];
};
};
systemd = lib.mkIf cfg.enableCron {
services = {
nextcloud-cron.path = [ pkgs.perl ];
nextcloud-cronjob =
let
inherit (inputs.jawz-scripts.packages.x86_64-linux) nextcloud-cronjob;
in
{
description = "Runs various nextcloud-related cronjobs";
wantedBy = [ "multi-user.target" ];
path = [
pkgs.bash
nextcloud-cronjob
];
serviceConfig = {
Restart = "on-failure";
RestartSec = 30;
ExecStart = "${nextcloud-cronjob}/bin/nextcloud-cronjob";
};
};
};
timers.nextcloud-cronjob = {
enable = true;
description = "Runs various nextcloud-related cronjobs";
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = "*:0/10";
};
};
};
})
(lib.mkIf (cfg.enableProxy && config.my.enableProxy && config.networking.hostName == "vps") {
services.nginx.virtualHosts = {
"${cfg.host}" = {
forceSSL = true;
enableACME = true;
http2 = true;
default = true;
serverAliases = [ "cloud.rotehaare.art" ];
extraConfig = ''
add_header X-XSS-Protection "1; mode=block" always;
@@ -188,11 +271,16 @@ in
add_header X-Frame-Options "SAMEORIGIN" always;
add_header Referrer-Policy "no-referrer-when-downgrade" always;
add_header X-Permitted-Cross-Domain-Policies "none" always;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
'';
locations = {
"/".proxyWebsockets = true;
"~ ^/nextcloud/(?:index|remote|public|cron|core/ajax/update|status|ocs/v[12]|updater/.+|oc[ms]-provider/.+|.+/richdocumentscode/proxy).php(?:$|/)" =
{ };
"/" = {
proxyPass = cfg.local;
proxyWebsockets = true;
};
};
};
"${cfgC.host}" = lib.mkIf cfgC.enableProxy {
@@ -243,76 +331,6 @@ in
};
};
};
};
virtualisation.oci-containers.containers = {
go-vod = lib.mkIf config.my.servers.go-vod.enable {
autoStart = true;
image = "radialapps/go-vod";
environment = {
TZ = config.my.timeZone;
NEXTCLOUD_HOST = "https://${config.services.nextcloud.hostName}";
NVIDIA_VISIBLE_DEVICES = "all";
};
volumes = [ "ncdata:/var/www/html:ro" ];
extraOptions = [
"--device=/dev/dri" # VA-API (omit for NVENC)
];
};
collabora = lib.mkIf cfgC.enable {
autoStart = true;
image = "collabora/code:latest";
ports = [ "9980:9980" ];
environment = {
TZ = config.my.timeZone;
domain = cfg.host;
aliasgroup1 = "${cfg.url}:443";
aliasgroup2 = "https://cloud.rotehaare.art:443";
server_name = cfgC.host;
dictionaries = "en_CA en_US es_MX es_ES fr_FR it pt_BR ru";
extra_params = ''
--o:ssl.enable=false
--o:ssl.termination=true
--o:remote_font_config.url=${cfg.url}/apps/richdocuments/settings/fonts.json
--o:logging.level=information
'';
DONT_GEN_SSL_CERT = "1";
SLEEPFORDEBUGGER = "0";
};
extraOptions = [
"--cap-add"
"MKNOD"
];
};
};
systemd = lib.mkIf cfg.enableCron {
services = {
nextcloud-cron.path = [ pkgs.perl ];
nextcloud-cronjob =
let
inherit (inputs.jawz-scripts.packages.x86_64-linux) nextcloud-cronjob;
in
{
description = "Runs various nextcloud-related cronjobs";
wantedBy = [ "multi-user.target" ];
path = [
pkgs.bash
nextcloud-cronjob
];
serviceConfig = {
Restart = "on-failure";
RestartSec = 30;
ExecStart = "${nextcloud-cronjob}/bin/nextcloud-cronjob";
};
};
};
timers.nextcloud-cronjob = {
enable = true;
description = "Runs various nextcloud-related cronjobs";
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = "*:0/10";
};
};
};
};
})
];
}

View File

@@ -40,7 +40,7 @@ in
secure = true;
expire = "168h";
refresh = "1h";
domain = ".lebubu.org";
domain = ".${config.my.domain}";
secret = config.sops.secrets.oauth2-proxy-cookie.path;
};
extraConfig = {
@@ -53,7 +53,7 @@ in
session-store-type = "cookie";
skip-provider-button = true;
code-challenge-method = "S256";
whitelist-domain = [ ".lebubu.org" ];
whitelist-domain = [ ".${config.my.domain}" ];
};
};
};

View File

@@ -9,51 +9,52 @@ let
in
{
options.my.servers.plex = setup.mkOptions "plex" "plex" 32400;
config = lib.mkIf (cfg.enable && config.my.secureHost) {
users.users.plex = {
uid = 193;
group = "piracy";
isSystemUser = true;
};
services = {
plex = {
config = lib.mkMerge [
(lib.mkIf (cfg.enable && config.my.secureHost) {
users.users.plex = {
uid = 193;
group = "piracy";
isSystemUser = true;
};
services.plex = {
inherit (cfg) enable;
group = "piracy";
};
nginx = lib.mkIf cfg.enableProxy {
virtualHosts."${cfg.host}" = {
forceSSL = true;
enableACME = true;
http2 = true;
serverAliases = [
"plex.rotehaare.art"
];
extraConfig = ''
# Some players don't reopen a socket and playback stops totally instead of resuming after an extended pause
send_timeout 100m;
# Plex headers
proxy_set_header X-Plex-Client-Identifier $http_x_plex_client_identifier;
proxy_set_header X-Plex-Device $http_x_plex_device;
proxy_set_header X-Plex-Device-Name $http_x_plex_device_name;
proxy_set_header X-Plex-Platform $http_x_plex_platform;
proxy_set_header X-Plex-Platform-Version $http_x_plex_platform_version;
proxy_set_header X-Plex-Product $http_x_plex_product;
proxy_set_header X-Plex-Token $http_x_plex_token;
proxy_set_header X-Plex-Version $http_x_plex_version;
proxy_set_header X-Plex-Nocache $http_x_plex_nocache;
proxy_set_header X-Plex-Provides $http_x_plex_provides;
proxy_set_header X-Plex-Device-Vendor $http_x_plex_device_vendor;
proxy_set_header X-Plex-Model $http_x_plex_model;
# Buffering off send to the client as soon as the data is received from Plex.
proxy_redirect off;
proxy_buffering off;
'';
locations."/" = {
proxyPass = cfg.local;
proxyWebsockets = true;
};
})
(lib.mkIf (cfg.enableProxy && config.my.enableProxy) {
my.servers.plex.useDefaultProxy = false;
services.nginx.virtualHosts."${cfg.host}" = {
forceSSL = true;
enableACME = true;
http2 = true;
serverAliases = [
"plex.rotehaare.art"
];
extraConfig = ''
# Some players don't reopen a socket and playback stops totally instead of resuming after an extended pause
send_timeout 100m;
# Plex headers
proxy_set_header X-Plex-Client-Identifier $http_x_plex_client_identifier;
proxy_set_header X-Plex-Device $http_x_plex_device;
proxy_set_header X-Plex-Device-Name $http_x_plex_device_name;
proxy_set_header X-Plex-Platform $http_x_plex_platform;
proxy_set_header X-Plex-Platform-Version $http_x_plex_platform_version;
proxy_set_header X-Plex-Product $http_x_plex_product;
proxy_set_header X-Plex-Token $http_x_plex_token;
proxy_set_header X-Plex-Version $http_x_plex_version;
proxy_set_header X-Plex-Nocache $http_x_plex_nocache;
proxy_set_header X-Plex-Provides $http_x_plex_provides;
proxy_set_header X-Plex-Device-Vendor $http_x_plex_device_vendor;
proxy_set_header X-Plex-Model $http_x_plex_model;
# Buffering off send to the client as soon as the data is received from Plex.
proxy_redirect off;
proxy_buffering off;
'';
locations."/" = {
proxyPass = cfg.local;
proxyWebsockets = true;
};
};
};
};
})
];
}

View File

@@ -37,6 +37,7 @@ let
"mealie"
"nextcloud"
"paperless"
"plausible"
"shiori"
"sonarqube"
"vaultwarden"

View File

@@ -9,19 +9,29 @@ let
in
{
options.my.servers.prowlarr = setup.mkOptions "prowlarr" "indexer" 9696;
config = lib.mkIf cfg.enable {
users.users.prowlarr = {
uid = 987;
group = "piracy";
isSystemUser = true;
};
services = {
prowlarr = {
inherit (cfg) enable;
config = lib.mkMerge [
(lib.mkIf cfg.enable {
users.users.prowlarr = {
uid = 987;
group = "piracy";
isSystemUser = true;
};
flaresolverr = {
inherit (cfg) enable;
services = {
prowlarr = {
inherit (cfg) enable;
};
flaresolverr = {
inherit (cfg) enable;
};
};
};
};
})
(lib.mkIf (cfg.enableProxy && config.my.enableProxy && config.my.secureHost) {
sops.secrets."private-ca/pem" = {
sopsFile = ../../secrets/certs.yaml;
owner = "nginx";
group = "nginx";
};
my.servers.prowlarr.certPath = config.sops.secrets."private-ca/pem".path;
})
];
}

View File

@@ -29,46 +29,56 @@ let
in
{
options.my.servers.stash = setup.mkOptions "stash" "xxx" 9999;
config = lib.mkIf (cfg.enable && config.my.secureHost) {
sops.secrets = {
"stash/password".sopsFile = ../../secrets/secrets.yaml;
"stash/jwt".sopsFile = ../../secrets/secrets.yaml;
"stash/session".sopsFile = ../../secrets/secrets.yaml;
};
services.stash = {
inherit (cfg) enable;
group = "glue";
mutableSettings = true;
username = "Suing8150";
passwordFile = config.sops.secrets."stash/password".path;
jwtSecretKeyFile = config.sops.secrets."stash/jwt".path;
sessionStoreKeyFile = config.sops.secrets."stash/session".path;
settings = {
inherit (cfg) port;
host = "0.0.0.0";
stash = [
{
path = "/srv/pool/glue/";
}
];
config = lib.mkMerge [
(lib.mkIf (cfg.enable && config.my.secureHost) {
sops.secrets = {
"stash/password".sopsFile = ../../secrets/secrets.yaml;
"stash/jwt".sopsFile = ../../secrets/secrets.yaml;
"stash/session".sopsFile = ../../secrets/secrets.yaml;
};
};
systemd.services.stash = {
environment = {
PYTHONPATH = "/var/lib/stash/venv/lib/python3.12/site-packages";
LD_LIBRARY_PATH = "${pkgs.stdenv.cc.cc.lib}/lib:${pkgs.glibc}/lib:${pkgs.zlib}/lib:${pkgs.libffi}/lib:${pkgs.openssl}/lib";
services.stash = {
inherit (cfg) enable;
group = "glue";
mutableSettings = true;
username = "Suing8150";
passwordFile = config.sops.secrets."stash/password".path;
jwtSecretKeyFile = config.sops.secrets."stash/jwt".path;
sessionStoreKeyFile = config.sops.secrets."stash/session".path;
settings = {
inherit (cfg) port;
host = "0.0.0.0";
stash = [
{
path = "/srv/pool/glue/";
}
];
};
};
serviceConfig = {
PrivateUsers = lib.mkForce false;
BindReadOnlyPaths = lib.mkForce [ ];
BindPaths = lib.mkIf (cfgS.settings != { }) (map (stash: "${stash.path}") cfgS.settings.stash);
systemd.services.stash = {
environment = {
PYTHONPATH = "/var/lib/stash/venv/lib/python3.12/site-packages";
LD_LIBRARY_PATH = "${pkgs.stdenv.cc.cc.lib}/lib:${pkgs.glibc}/lib:${pkgs.zlib}/lib:${pkgs.libffi}/lib:${pkgs.openssl}/lib";
};
serviceConfig = {
PrivateUsers = lib.mkForce false;
BindReadOnlyPaths = lib.mkForce [ ];
BindPaths = lib.mkIf (cfgS.settings != { }) (map (stash: "${stash.path}") cfgS.settings.stash);
};
};
};
users.users.stash = {
uid = 974;
isSystemUser = true;
group = "glue";
packages = [ stashPythonFHS ];
};
};
users.users.stash = {
uid = 974;
isSystemUser = true;
group = "glue";
packages = [ stashPythonFHS ];
};
})
(lib.mkIf (cfg.enableProxy && config.my.enableProxy && config.my.secureHost) {
sops.secrets."private-ca/pem" = {
sopsFile = ../../secrets/certs.yaml;
owner = "nginx";
group = "nginx";
};
my.servers.stash.certPath = config.sops.secrets."private-ca/pem".path;
})
];
}

View File

@@ -25,42 +25,37 @@ in
synapse = setup.mkOptions "synapse" "pYLemuAfsrzNBaH77xSu" 8008;
element = setup.mkOptions "element" "55a608953f6d64c199" 5345;
};
config = lib.mkIf (cfg.enable && config.my.secureHost) {
my.servers = {
synapse = { inherit domain; };
element = { inherit domain; };
};
users.groups.matrix-synapse = { inherit gid; };
users.users.matrix-synapse = {
inherit uid;
isSystemUser = true;
group = "matrix-synapse";
};
sops.secrets = {
synapse = {
sopsFile = ../../secrets/env.yaml;
owner = "matrix-synapse";
config = lib.mkMerge [
(lib.mkIf (cfg.enable && config.my.secureHost) {
my.servers = {
synapse = { inherit domain; };
element = { inherit domain; };
};
users.groups.matrix-synapse = { inherit gid; };
users.users.matrix-synapse = {
inherit uid;
isSystemUser = true;
group = "matrix-synapse";
};
"iqQCY4iAWO-ca/pem" = {
sopsFile = ../../secrets/certs.yaml;
owner = "nginx";
group = "nginx";
sops.secrets = {
synapse = {
sopsFile = ../../secrets/env.yaml;
owner = "matrix-synapse";
group = "matrix-synapse";
};
"matrix/key" = {
sopsFile = ../../secrets/certs.yaml;
owner = "matrix-synapse";
group = "matrix-synapse";
};
"matrix/cert" = {
sopsFile = ../../secrets/certs.yaml;
owner = "matrix-synapse";
group = "matrix-synapse";
};
};
"matrix/key" = {
sopsFile = ../../secrets/certs.yaml;
owner = "matrix-synapse";
group = "matrix-synapse";
};
"matrix/cert" = {
sopsFile = ../../secrets/certs.yaml;
owner = "matrix-synapse";
group = "matrix-synapse";
};
};
networking.firewall.allowedTCPPorts = lib.mkIf (!cfg.isLocal) [ cfg.port ];
services = {
matrix-synapse = {
networking.firewall.allowedTCPPorts = lib.mkIf (!cfg.isLocal) [ cfg.port ];
services.matrix-synapse = {
inherit (cfg) enable;
extraConfigFiles = [
config.sops.secrets.synapse.path
@@ -100,7 +95,18 @@ in
];
};
};
nginx.virtualHosts = lib.mkIf cfg.enableProxy {
})
(lib.mkIf (cfg.enableProxy && config.my.enableProxy) {
sops.secrets."iqQCY4iAWO-ca/pem" = {
sopsFile = ../../secrets/certs.yaml;
owner = "nginx";
group = "nginx";
};
my.servers.synapse = {
useDefaultProxy = false;
certPath = config.sops.secrets."iqQCY4iAWO-ca/pem".path;
};
services.nginx.virtualHosts = {
"${cfgE.host}" = {
enableACME = true;
forceSSL = true;
@@ -125,13 +131,8 @@ in
"/_matrix".proxyPass = "http://[${config.my.localhost6}]:${toString cfg.port}";
"/_synapse/client".proxyPass = "http://[${config.my.localhost6}]:${toString cfg.port}";
};
# extraConfig = ''
# ssl_verify_client on;
# ssl_client_certificate ${config.sops.secrets."iqQCY4iAWO-ca/pem".path};
# error_page 403 /403.html;
# '';
};
};
};
};
})
];
}

View File

@@ -1,7 +1,6 @@
{
config,
lib,
pkgs,
...
}:
let
@@ -11,7 +10,7 @@ in
{
options.my.services.wireguard.enable = lib.mkEnableOption "WireGuard VPN configuration";
config = lib.mkIf (config.my.services.wireguard.enable && config.my.secureHost) {
sops.secrets."wireguard/private".sopsFile = ../../secrets/wireguard.yaml;
sops.secrets."vps/server/private".sopsFile = ../../secrets/wireguard.yaml;
networking = {
firewall.allowedUDPPorts = [ port ];
nat = {
@@ -20,32 +19,36 @@ in
internalInterfaces = [ "wg0" ];
};
wireguard.interfaces.wg0 = {
ips = [ "10.100.0.1/24" ];
ips = [
"${config.my.ips.wg-vps}/24"
"10.8.0.1/24"
"10.9.0.1/24"
];
listenPort = port;
postSetup = ''
${pkgs.iptables}/bin/iptables -t nat -A POSTROUTING -s 10.100.0.0/24 -o ${interface} -j MASQUERADE
'';
postShutdown = ''
${pkgs.iptables}/bin/iptables -t nat -D POSTROUTING -s 10.100.0.0/24 -o ${interface} -j MASQUERADE
'';
privateKeyFile = config.sops.secrets."wireguard/private".path;
postSetup = "";
postShutdown = "";
privateKeyFile = config.sops.secrets."vps/server/private".path;
peers = [
{
publicKey = "ciupBjCcIpd3K5vlzNMJC8iiyNqB9xXwkSC6UXPKP3g=";
allowedIPs = [ "10.100.0.2/32" ];
} # phone
publicKey = "OUiqluRaS4hmGvLJ3csQrnIM3Zzet50gsqtTABaUkH4=";
allowedIPs = [ "${config.my.ips.wg-server}/32" ];
}
{
publicKey = "JgeA1ElDwR7oLmyGn8RzvxiscMBhR8+L+mEjY1Cq7gk=";
allowedIPs = [ "10.100.0.3/32" ];
} # tablet
publicKey = "rFgT6TXzRazK6GMazMNGjtOvzAAPST0LvCfN7QXsLho=";
allowedIPs = [ "${config.my.ips.wg-friend1}/32" ];
}
{
publicKey = "giPVRUTLtqPGb57R4foGZMNS0tjIp2ry6lMKYtqHjn4=";
allowedIPs = [ "10.100.0.15/32" ];
} # jeancarlos
publicKey = "R1CTx5+CXivMI6ZEmRYsyFUFILhe6Qnub0iEIRvvrEY=";
allowedIPs = [ "${config.my.ips.wg-friend2}/32" ];
}
{
publicKey = "92JdW/NExg1tUE4cEyl6Yn+0Eex+iFVA37ahPRhRnRM=";
allowedIPs = [ "10.100.0.16/32" ];
} # gorilia
publicKey = "ecPNSacD6yVwpnLBs171z0xkw9M1DXKh/Kn70cIBcwA=";
allowedIPs = [ "${config.my.ips.wg-friend3}/32" ];
}
{
publicKey = "yg+2miZCrx89znFaUlU/le/7UIPgEAMY74fZfEwz8g4=";
allowedIPs = [ "${config.my.ips.wg-friend4}/32" ];
}
];
};
};

View File

@@ -196,6 +196,13 @@ in
inherit ip;
};
};
mkEnabledProxyIp = ip: name: {
inherit name;
value = {
enableProxy = true;
inherit ip;
};
};
enableList = func: list: list |> map func |> builtins.listToAttrs;
mkPostgresDependency = config: serviceName: displayName: {
assertion = config.my.servers.${serviceName}.enable -> config.my.servers.postgres.enable;

View File

@@ -0,0 +1 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIG6scNSRnOprOvqm5DSTSMORvh9c5z0S1GzX1D7u+gMw deploy@portfolio

View File

@@ -0,0 +1 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKbCQ/f117hL7Z02Vog1RCaOVUi95beYf//Qppnqf2Ha lidarr-reports@lidarr-reports

View File

@@ -0,0 +1,7 @@
-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
QyNTUxOQAAACAg2NEQIaCDPaucUAqi1iUIppNyQJH2AHGm8RhZ8ZjQagAAAJggRAEdIEQB
HQAAAAtzc2gtZWQyNTUxOQAAACAg2NEQIaCDPaucUAqi1iUIppNyQJH2AHGm8RhZ8ZjQag
AAAECI12wNotU67+KnPGhWMcLUxotEQdz4jry+aijaiHP26CDY0RAhoIM9q5xQCqLWJQim
k3JAkfYAcabxGFnxmNBqAAAAEGphd3pAd29ya3N0YXRpb24BAgMEBQ==
-----END OPENSSH PRIVATE KEY-----

View File

@@ -5,6 +5,7 @@ vps:
server:
private: ENC[AES256_GCM,data:wrP/069tuQs3ObYE8Q0MNVxe3+4vZ2HIImoIdZpj1uPgdBknboX1wmANv/k=,iv:FJL5KumHos8PoXra+BB2Uc6YedsF6MD3wWyuugXzJ+E=,tag:nVuTrW2P7JvnWnv6H1SmdQ==,type:str]
public: ENC[AES256_GCM,data:YnKOf9725v9FkzdNPDVf/iinMbY/YWn6ksqEz+mpB4KHVlOvpbV6vLSKRcs=,iv:aWQNy6mT4sxVbzaXKgRzZ9XVsiBCRsOlLORRqC+uiKE=,tag:mLWv6mr3VVfw0J5BrqByXg==,type:str]
#ENC[AES256_GCM,data:u5SEQfK0Hw==,iv:+qr9WmOzQowZ/JyN1KoWhoyHA2132fmmZzIQy7o5y6k=,tag:9TPVeQgoo2nWQ9dhuYULGw==,type:comment]
home:
private: ENC[AES256_GCM,data:YZ0jvBzkMv8Bwc9u3LDJzwSqQvPj8wPUxTIeBFiLYVQQIBjm8aS1dTYuPvo=,iv:mXuW7TVERxOMmGIit3a7Spmbk/EgYuGkO66AWJUnMF0=,tag:xM7C3F3JCiud/A9yPD5ydQ==,type:str]
public: ENC[AES256_GCM,data:DcwAHhHjIxFqRL5h7p/0nkFnWiI/iqR8Fws6AuFaxjgUHKYd/6l3D6q/O/0=,iv:bBJ0bsKRiGQUSlRmHqeLQWkOIUNfG5VVpuV6MOtKZO0=,tag:harMG6GDIfclmSq3D36bTw==,type:str]
@@ -46,7 +47,7 @@ sops:
SG40OS8wMHlKNmxQa0VScHQrU2NmT2sKt9xw/8jsgnV1cZndqYNiHvIf8VdEJYCl
UUJ1KPz9mvUx3ny+rK50FSD61U8PHEZm2UC0w+/qkZwRtCx21Ku6dw==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2025-09-08T00:14:52Z"
mac: ENC[AES256_GCM,data:O2herKRy4k9ZMuPzzPF5QlBC2isXdRoIsbYLJ/6X7esxtxxgNuAljx4SCR6UMT7pl3G2E33cnnBEkuAIy6SMXOaZNfOuAEJXaCwpRwCXu26lrcTf6n7UdP36GWfIRsR4utD5/vv66ch6MqmQWkW7E5zydy5dOv+BJ4XS/50OUQs=,iv:TscYNQaeI+mBxyobxI1O4wUzRtA27pvjXz27kqMJhA0=,tag:zx/xrYAWJCxYz5HRTKzYfQ==,type:str]
lastmodified: "2026-02-04T18:37:11Z"
mac: ENC[AES256_GCM,data:AlrMK34dWDm5hfVwnQnzk3l8NIRbiVV6KHa6io9S9l07WvC3TYLTOJS6xOi4pkEz6sqQ7IpZU7RRdosxuQp50NmMEt2QYawTHFZIgzFYeKRbl5N5LCu9afC6yTtvG/sT7uenTMhh2qT1JBwebJiUdM9zNVUzWlW5d1SdxrHgIbs=,iv:dvqsDaC+trhY1kheYUEOEwHfCDz0Mu7N0LpfjnKko5g=,tag:tuqyK8vuwSrk1kf+Vi7MKg==,type:str]
unencrypted_suffix: _unencrypted
version: 3.10.2
version: 3.11.0

View File

@@ -0,0 +1,34 @@
# Specification Quality Checklist: VPS Migration
**Purpose**: Validate specification completeness and quality before proceeding to planning
**Created**: 2026-02-04
**Feature**: /home/jawz/Development/NixOS/specs/004-vps-migration/spec.md
## Content Quality
- [x] No implementation details (languages, frameworks, APIs)
- [x] Focused on user value and business needs
- [x] Written for non-technical stakeholders
- [x] All mandatory sections completed
## Requirement Completeness
- [x] No [NEEDS CLARIFICATION] markers remain
- [x] Requirements are testable and unambiguous
- [x] Success criteria are measurable
- [x] Success criteria are technology-agnostic (no implementation details)
- [x] All acceptance scenarios are defined
- [x] Edge cases are identified
- [x] Scope is clearly bounded
- [x] Dependencies and assumptions identified
## Feature Readiness
- [x] All functional requirements have clear acceptance criteria
- [x] User scenarios cover primary flows
- [x] Feature meets measurable outcomes defined in Success Criteria
- [x] No implementation details leak into specification
## Notes
- All checks passed on first review.

View File

@@ -0,0 +1,38 @@
openapi: 3.0.3
info:
title: VPS Migration Verification API
version: 0.1.0
description: |
Optional verification endpoints for migration validation. These describe
checks that can be automated; if no API is implemented, treat as a checklist.
paths:
/verify/proxy:
get:
summary: Verify reverse proxy routing to host services
responses:
"200":
description: Proxy mappings resolve to services on host server
/verify/firewall:
get:
summary: Verify iptables ruleset parity
responses:
"200":
description: Firewall flows match expected allow/deny behavior
/verify/vpn:
get:
summary: Verify VPN peer connectivity and address assignment
responses:
"200":
description: All peers connect with correct addresses
/verify/ssh:
get:
summary: Verify SSH access for authorized principals
responses:
"200":
description: Authorized keys allow expected access only
/verify/analytics:
get:
summary: Verify analytics data migrated successfully
responses:
"200":
description: Historical analytics data present on new server

View File

@@ -0,0 +1,41 @@
# Data Model: VPS Migration
## Host
- **Fields**: name, role (primary/secondary), publicIp, vpnEndpoint, services[], proxyMappings[], firewallRuleSet
- **Rules**: Exactly one primary host for reverse proxying.
## Service
- **Fields**: name, enabled, runsOnHost, proxyEnabled, domains[]
- **Rules**: Services remain on host server; proxyEnabled true on VPS for all enabled services.
## ProxyMapping
- **Fields**: domain, targetService, tlsRequired
- **Rules**: domain must be unique across mappings; domain must match service definitions.
## FirewallRuleSet
- **Fields**: sourceFile (iptables), rules[], appliedHost
- **Rules**: Ruleset must be applied as-is; no translation allowed.
## VPNPeer
- **Fields**: name, publicKeyRef, allowedIps[]
- **Rules**: allowedIps must be unique across peers; publicKeyRef must resolve via secrets system.
## VPNInterface
- **Fields**: addressRanges[], listenPort, privateKeyRef
- **Rules**: privateKeyRef stored in secrets system; listenPort exposed on VPS.
## ServiceUser
- **Fields**: username, group, authorizedKeys[]
- **Rules**: deploy uses ed25519_deploy.pub; lidarr-reports uses ed25519_lidarr-reports.pub.
## MigrationChecklistItem
- **Fields**: task, verificationStep, status
- **Rules**: each migration task must have a verification step.

View File

@@ -0,0 +1,52 @@
# Implementation Plan: VPS Migration
**Branch**: `004-vps-migration` | **Date**: 2026-02-04 | **Spec**: /home/jawz/Development/NixOS/specs/004-vps-migration/spec.md
**Input**: Feature specification from `/specs/004-vps-migration/spec.md`
## Summary
Migrate VPS responsibilities to the new NixOS host by making it the primary reverse-proxy host (nginx only), mirroring the existing iptables ruleset, enabling wireguard with secret-managed keys, and restoring SSH/service-user access, while keeping all services running on the host server. Provide validation steps, review historical configs for gaps, and document analytics data migration.
## Technical Context
**Language/Version**: Nix (flakes; nixpkgs 25.11)
**Primary Dependencies**: NixOS modules, sops-nix, nginx, wireguard, openssh, iptables
**Storage**: Files (configuration and secrets)
**Testing**: Manual validation steps (no automated test harness)
**Target Platform**: Linux server (NixOS)
**Project Type**: configuration repo
**Performance Goals**: N/A (configuration change)
**Constraints**: Services remain on host server; VPS only terminates proxy and exposes wireguard port; iptables parity required
**Scale/Scope**: Single VPS + host server, small set of VPN peers and admin SSH principals
## Constitution Check
No enforceable constitution rules are defined (placeholders only). Gate passes by default.
Post-design check: unchanged (no enforceable gates found).
## Project Structure
### Documentation (this feature)
```text
specs/004-vps-migration/
├── plan.md
├── research.md
├── data-model.md
├── quickstart.md
├── contracts/
└── tasks.md
```
### Source Code (repository root)
```text
hosts/
modules/
secrets/
iptables
scripts/
```
**Structure Decision**: Use the existing NixOS configuration layout (`hosts/`, `modules/`, `secrets/`) and the root `iptables` ruleset file.

View File

@@ -0,0 +1,103 @@
# Quickstart: VPS Migration
## Prerequisites
- Access to this repo and the new VPS host configuration
- Existing iptables ruleset file available at repo root: `iptables`
- VPN keys present in the secrets system
- SSH public keys present in `secrets/ssh/`
## Steps
1. Review the spec and clarifications:
- `/home/jawz/Development/NixOS/specs/004-vps-migration/spec.md`
2. Ensure secrets are available:
- VPN private/public keys are stored in the secrets system
- `secrets/ssh/ed25519_deploy.pub` and `secrets/ssh/ed25519_lidarr-reports.pub` exist
3. Update host configuration:
- Set new VPS as primary reverse proxy host
- Enable proxying for all enabled services (services remain on host server)
- Apply iptables ruleset as-is
- Enable wireguard on VPS and expose port
- Add service users and admin SSH keys
- Update VPS public IP to `45.33.0.228` in SSH configuration
- Update host server VPN client to target the new VPS
4. Provide and review legacy proxy config snapshot:
- Supply caddy files for subdomain comparison
- Treat caddy as migration input only; nginx is the only proxy target for NixOS runtime
## Caddy vs Nix Subdomain Comparison (from provided caddy/ directory)
**Caddy-only domains (present in caddy, not found in current Nix server hosts):**
- danilo-reyes.com
- www.danilo-reyes.com
- blog.danilo-reyes.com
- www.blog.danilo-reyes.com
- mb-report.lebubu.org
- torrent.lebubu.org
**Nix-only domains (present in Nix server hosts, not in caddy config):**
- auth-proxy.lebubu.org
- comments.danilo-reyes.com
- flix.rotehaare.art
- 55a608953f6d64c199.lebubu.org
- pYLemuAfsrzNBaH77xSu.lebubu.org
- bookmarks.lebubu.org
- drpp.lebubu.org
- portfolio.lebubu.org
- qampqwn4wprhqny8h8zj.lebubu.org
- requests.lebubu.org
- start.lebubu.org
- sync.lebubu.org
- tranga.lebubu.org
**Notes:**
- `auth-proxy.lebubu.org` appears only in `15-private.caddyfile__` (not imported by Caddy), so it is currently inactive in caddy.
- `danilo-reyes.com` and `blog.danilo-reyes.com` are handled as static sites in caddy; Nix has `my.websites.portfolio` and `isso` which may need mapping to these domains.
- `mb-report.lebubu.org` and `torrent.lebubu.org` are present in caddy but no matching Nix server host was found.
5. Migrate analytics data:
- Export data from existing server
- Import into new server
- Validate historical data is present
6. Run verification steps for each task (per spec FR-012).
## Clarification Candidates From History Review
- `opentracker` was installed and enabled (`systemctl enable --now opentracker`) with firewall rules for TCP/UDP `6969`; confirm if tracker service is still required on NixOS.
- `ip6tables` was enabled on Fedora (`systemctl enable ip6tables`); confirm if equivalent IPv6 policy is required on VPS.
- `net.ipv4.conf.wg0.rp_filter=0` was set during forwarding troubleshooting; confirm if this sysctl needs to be persisted on VPS.
- Fedora-specific SELinux SSH port handling (`semanage ssh_port_t`) appears in history; confirm it can remain excluded on NixOS.
## Verification Steps
- **T001**: `test -f ./iptables && test -f ./secrets/ssh/ed25519_deploy.pub && test -f ./secrets/ssh/ed25519_lidarr-reports.pub && test -f ./secrets/wireguard.yaml`
- **T002**: verify this section exists in `/home/jawz/Development/NixOS/specs/004-vps-migration/quickstart.md`
- **T003**: `rg -n "mainServer|enableProxy" hosts/server/toggles.nix modules/modules.nix`
- **T004**: `rg -n "wireguard|wg0|services.wireguard" modules/services/wireguard.nix hosts/vps/configuration.nix`
- **T005**: `rg -n "vps|45.33.0.228|programs.ssh" config/jawz.nix modules/modules.nix`
- **T006**: `rg -n "/etc/caddy/Caddyfile.d" sudo_hist jawz_hist`
- **T007**: `rg -n 'mainServer = "vps"' hosts/server/toggles.nix modules/modules.nix`
- **T008**: `rg -n "enableProxy = true" hosts/vps/toggles.nix hosts/vps/configuration.nix hosts/server/toggles.nix`
- **T009**: ensure Caddy vs Nix comparison section remains in this file
- **T010**: `rg -n "iqQCY4iAWO-ca/pem|certPath|proxyReversePrivate" modules/network/nginx.nix modules/servers`
- **T011**: `rg -n "iptables.rules|iptables-restore|networking.firewall.enable = false" hosts/vps/configuration.nix`
- **T012**: `rg -n "services.wireguard.enable = true" hosts/vps/configuration.nix`
- **T013**: confirm `wireguard/private` exists in `secrets/wireguard.yaml`
- **T014**: `rg -n "10.77.0.1/24|10.8.0.1/24|10.9.0.1/24|AllowedIPs|allowedIPs" modules/services/wireguard.nix`
- **T015**: `rg -n "users\\.deploy|users\\.lidarr-reports|ed25519_deploy|ed25519_lidarr-reports" hosts/vps/configuration.nix`
- **T016**: `rg -n "workstation|server|deacero|galaxy" hosts/vps/configuration.nix`
- **T017**: `rg -n "ports = \\[ 3456 \\]|PermitRootLogin = \"no\"" hosts/vps/configuration.nix`
- **T018**: `rg -n "sudo-rs\\.extraRules|nixos-rebuild|nixremote" hosts/vps/configuration.nix`
- **T019**: `rg -n "nixworkstation" hosts/vps/configuration.nix`
- **T020**: `rg -n "45\\.33\\.0\\.228" modules/modules.nix config/jawz.nix`
- **T021**: `rg -n "endpoint = .*my\\.ips\\.vps" hosts/server/configuration.nix`
- **T022**: verify "Clarification Candidates From History Review" section exists in this file
- **T023**: intentionally skipped by operator for this implementation pass
- **T024**: verify each task from T001-T026 has a corresponding verification line in this section
- **T025**: `rg -n "caddy|Caddy" README.org docs || true` and confirm no active-proxy references remain outside legacy migration notes
- **T026**: `rg -n "T0[0-2][0-9]" /home/jawz/Development/NixOS/specs/004-vps-migration/tasks.md` and confirm each task mentions at least one concrete path

View File

@@ -0,0 +1,31 @@
# Research: VPS Migration
## Decision 1: Reverse proxy role
- **Decision**: New VPS runs nginx as the primary reverse proxy; services remain on the host server.
- **Rationale**: Matches the clarified scope and minimizes service migration risk while restoring proxy functionality.
- **Alternatives considered**: Migrating services to VPS; keeping old proxy (caddy) on Fedora VPS.
## Decision 2: Firewall parity
- **Decision**: Apply the existing iptables ruleset as-is on the new VPS.
- **Rationale**: Ensures exact behavioral parity for complex routing and hot-swap behavior.
- **Alternatives considered**: Translating to another firewall system; partial translation with mixed rules.
## Decision 3: VPN key handling
- **Decision**: Store VPN keys only in the existing secrets system; no plaintext keys in config.
- **Rationale**: Preserves confidentiality and aligns with encrypted secrets workflow.
- **Alternatives considered**: Plaintext inline keys; separate unmanaged secrets store.
## Decision 4: Admin SSH principals
- **Decision**: Limit admin SSH authorized_keys entries to workstation, server, deacero, and galaxy.
- **Rationale**: Keeps access scope bounded to explicitly requested principals.
- **Alternatives considered**: Auto-adding other hosts found in config; adding only after confirmation.
## Decision 5: Analytics (Plausible) migration
- **Decision**: Migrate existing analytics data to the new server.
- **Rationale**: Preserves historical reporting and continuity of metrics.
- **Alternatives considered**: Fresh start with no history; read-only legacy instance for history.

View File

@@ -0,0 +1,177 @@
# Feature Specification: VPS Migration
**Feature Branch**: `004-vps-migration`
**Created**: 2026-02-04
**Status**: Draft
**Input**: User description: "start feature branch 004, the git fetch command will fail, so force 004. Feature 003 added a new hosts vps, as a linode host, I want to now fully migrate my existing fedora vps to this new nixos vps. to do so I want to bring in the configurations fedora vps has. 1. right now the nginx logic of my servers is disabled, because I let the fedora vps handle the reverse proxy through caddy. But I dont want that caddy logic, on nixos I want to let nginx take care of the reverse proxies, plus the logic is already backed in, there is a isLocal logic to the factory, and I dont remember exactly the name of the code. but there is some flag under the my. options that specifies the mainHost, the constitution mentions that mainHost is the host handling nginx and because the vps will be it, then main host needs to become vps, I think before it was miniserver. This change means, that all the currently enabled servers on the toggles.nix from the host server, should have the enableProxy flag on vps (double check the logic) this should make it so, that nginx runs on vps, and the servers run on server. 2. Add a step to ask me for the caddy files, just to check that the subdomains caddy handles for each server match the subdomains on the servers/.*nix files. 3. I use iptables on the fedora vps, and the nixos vps, well I dont mind you using another firewall but there are some complex firewall rules that I need them to work 100% as the original vps, the rules will be on a file named iptables, this is perhaps the most important step, otherwise the complex network configuration this vps has wont be able to hot swap and serve my servers to the world.
4. modify the existing wireguard.nix module, doublecheck that isnt toggled anywhere, toggle it on vps and add this configuration to it
[Interface]
#DNS = 10.77.0.1
Address = 10.77.0.1/24, 10.8.0.1/24, 10.9.0.1/24
ListenPort = 51820
PrivateKey = aDQHN3DfAGEFjVHRKIJ34CJKPcKx7HdYzkEbRNBNWGw=
# me
[Peer]
PublicKey = OUiqluRaS4hmGvLJ3csQrnIM3Zzet50gsqtTABaUkH4=
AllowedIPs = 10.77.0.2/32
# friends
[Peer] # 7351
PublicKey = rFgT6TXzRazK6GMazMNGjtOvzAAPST0LvCfN7QXsLho=
AllowedIPs = 10.8.0.2/32
[Peer]
PublicKey = R1CTx5+CXivMI6ZEmRYsyFUFILhe6Qnub0iEIRvvrEY=
AllowedIPs = 10.8.0.3/32
[Peer]
PublicKey = ecPNSacD6yVwpnLBs171z0xkw9M1DXKh/Kn70cIBcwA=
AllowedIPs = 10.8.0.4/32
[Peer]
PublicKey = yg+2miZCrx89znFaUlU/le/7UIPgEAMY74fZfEwz8g4=
AllowedIPs = 10.8.0.5/32
# # gooners
# [Peer]
# PublicKey = GawtOvsZ75avelIri5CjGoPXd8AFpi9qlZ6dSsqUISE=
# AllowedIPs = 10.77.0.2/32, 10.9.0.2/32
can I use sops to encrypt the public and private keys? if so, on modules.nix you will see that the ips on that wireguard config correspond to wg-friend1...n when you get to this step pause and tell me to create the sops secrets for these public keys.
5. I have two cicds on this server
drwxrwxr-x. 11 deploy www-data 4096 Dec 26 20:47 blog
drwxr-xr-x. 2 lidarr-reports lidarr-reports 4096 Nov 11 17:52 lidarr-mb-gap
drwxrwxr-x. 12 deploy www-data 4096 Dec 26 21:01 portfolio
I need you to create the service users and groups for deploy and lidarr-reports.
in those, I need you to add ./secrets/ssh/ed25519_deploy.pub to authorized_keys for the user deploy
and for lidarr-reports ed25519_lidarr-reports.pub
6. similar to every other host, add ssh login authorized_keys for workstation, server, deacero, galaxy and check if Im missing one. Because this will replace the ssh vps on the ssh config, you need to replace the existing vps ip with 45.33.0.228. 7. change the configuration on the host server, so that its wireguard session, connects to this server (i think will ve done automagically when the ip changes right?) 8. Ive added sudo_hist and jawz_hist, which are a dump of the histfile of this server, just check if there is a configuration that Im missing, something I did on there that I missed, and add it to the clarification list, so when I run clarify I tell you if I want that or not, granted lots of those commands are trial and error, so I think I have everything. 9. I have setup a plausible server, write the steps necesary to migrate it, I dont know.
10. add verification steps for every task we did, when youre done and"
## Clarifications
### Session 2026-02-04
- Q: Are any services being migrated to the new VPS, and what does enableProxy do? → A: No services are migrated; enableProxy only configures nginx on the VPS, wireguard exposes the port, and services continue running on the host server.
- Q: How should the analytics service be migrated? → A: Migrate existing analytics data to the new server.
- Q: How should firewall parity be achieved on the new VPS? → A: Use the existing iptables ruleset as-is.
- Q: Where should VPN keys be stored? → A: Preserve keys only in the existing secrets system.
- Q: Which admin hosts should receive SSH authorized_keys entries? → A: Only the listed hosts (workstation, server, deacero, galaxy).
## User Scenarios & Testing *(mandatory)*
### User Story 1 - Migrate VPS as Primary Host (Priority: P1)
As an operator, I want the new VPS to become the primary host for reverse proxying and networking while services continue running on the host server, so public traffic and internal tunnels continue working after the migration.
**Why this priority**: This is the core migration goal and failure would cause outages.
**Independent Test**: Can be fully tested by switching the primary host role to the new VPS and verifying proxy and tunnel connectivity without depending on the other stories.
**Acceptance Scenarios**:
1. **Given** the new VPS is designated as the primary host, **When** proxying is enabled, **Then** public endpoints resolve through the new VPS while services remain on the host server.
2. **Given** the previous VPS is no longer handling proxying, **When** traffic is routed through the new VPS, **Then** no service loses external access.
---
### User Story 2 - Preserve Firewall Behavior (Priority: P1)
As an operator, I want the firewall behavior on the new VPS to match the existing VPS so that all current network paths continue to function.
**Why this priority**: Firewall parity is critical to avoid breaking complex routing and hot-swap behavior.
**Independent Test**: Can be fully tested by comparing allowed/blocked traffic and confirming all required network paths remain functional.
**Acceptance Scenarios**:
1. **Given** the firewall rules are applied to the new VPS, **When** all known inbound and outbound paths are exercised, **Then** they behave identically to the existing VPS.
---
### User Story 3 - Restore Secure Access and VPN Peers (Priority: P2)
As an operator, I want VPN peers and SSH access to be configured on the new VPS so administration and CI/CD access remain available.
**Why this priority**: Secure access is required for operating and deploying services.
**Independent Test**: Can be fully tested by connecting each VPN peer and verifying SSH access for each authorized user.
**Acceptance Scenarios**:
1. **Given** the VPN configuration is enabled on the new VPS, **When** each peer connects, **Then** each peer receives the correct addresses and can reach intended resources.
2. **Given** service users and admin users are created on the new VPS, **When** their authorized keys are used, **Then** SSH access succeeds with the expected permissions.
---
### User Story 4 - Capture Migration Gaps and Validation (Priority: P3)
As an operator, I want a checklist of potential missing configuration from existing server history and clear verification steps so the migration is safe and complete.
**Why this priority**: This reduces risk of overlooked manual changes and provides confidence during cutover.
**Independent Test**: Can be fully tested by running the verification steps and confirming no missing items remain.
**Acceptance Scenarios**:
1. **Given** historical command logs are reviewed, **When** likely missing configurations are identified, **Then** they are listed as clarifications for user confirmation.
2. **Given** verification steps are provided for each task, **When** the operator executes them, **Then** each migration task can be validated.
---
### Edge Cases
- What happens when a subdomain mapping differs between the previous proxy configuration and the current service definitions?
- How does the system handle a firewall rule that is ambiguous or conflicts with existing policy?
- What happens if an SSH key file is missing or invalid for a service user?
## Requirements *(mandatory)*
### Functional Requirements
- **FR-001**: The system MUST designate the new VPS as the primary host for reverse proxying and ensure all enabled services are routed through it without relocating the services.
- **FR-002**: The system MUST ensure proxy configuration is enabled for all services currently enabled on the host server so traffic flows through the new VPS while services remain on the host server.
- **FR-003**: The system MUST request existing proxy configuration files for verification and flag any subdomain mismatches against current service definitions.
- **FR-004**: The system MUST apply the existing iptables ruleset as-is on the new VPS to match the existing VPS behavior for all documented inbound and outbound flows.
- **FR-005**: The system MUST enable the VPN configuration on the new VPS with the specified peer addresses and ensure each peer is uniquely identified.
- **FR-006**: The system MUST support encrypting sensitive VPN keys and pause for user-provided secret material when required.
- **FR-015**: The system MUST store VPN keys only in the existing secrets system and must not place them in plaintext configuration.
- **FR-007**: The system MUST create service users and groups for deployment workflows and grant SSH access via specified public keys.
- **FR-008**: The system MUST configure SSH access for all standard admin hosts and update the VPS connection target to the new public IP.
- **FR-016**: The system MUST grant SSH access only to workstation, server, deacero, and galaxy admin hosts.
- **FR-017**: The system MUST configure SSHD to use port 3456 and disable root/password authentication to match the existing VPS security posture.
- **FR-018**: The system MUST harden remote rebuild access by using a non-root SSH user with least-privilege access for rebuild operations.
- **FR-009**: The system MUST update dependent host configurations so existing VPN client connections target the new VPS.
- **FR-010**: The system MUST review provided history logs and produce a clarification list of potential missing configurations.
- **FR-011**: The system MUST document migration steps for the analytics service and include them in the migration plan.
- **FR-013**: The system MUST include analytics data migration as part of the analytics service migration steps.
- **FR-012**: The system MUST provide verification steps for each migration task performed.
### Key Entities *(include if feature involves data)*
- **Host**: A server instance that can be assigned primary or secondary roles and hosts services.
- **Service**: A deployable workload with external endpoints and internal configuration.
- **Proxy Mapping**: The set of subdomains and routing rules that map public traffic to services.
- **Firewall Rule Set**: The collection of allowed and blocked network flows required for the VPS.
- **VPN Peer**: A client identity with assigned addresses and access constraints.
- **SSH Key**: A public key used for authenticated access to a user account.
- **Migration Checklist**: A list of tasks and verification steps that confirm readiness.
## Success Criteria *(mandatory)*
### Measurable Outcomes
- **SC-001**: 100% of services previously reachable via the old VPS are reachable via the new VPS after cutover.
- **SC-002**: All documented firewall flows (inbound and outbound) pass or block with the same outcomes as the old VPS.
- **SC-003**: 100% of configured VPN peers can connect and reach required internal addresses.
- **SC-004**: 100% of authorized SSH users can authenticate using their specified keys.
- **SC-005**: Migration verification steps can be completed in a single run without unresolved failures.
## Assumptions
- The existing proxy configuration files will be provided by the user for comparison.
- The firewall rules from the existing VPS are authoritative and should be mirrored on the new VPS.
- The list of standard admin hosts for SSH access is complete unless the review identifies an omission.
- The analytics service migration steps are documentation-only and do not require immediate cutover.

View File

@@ -0,0 +1,92 @@
# Tasks: VPS Migration
**Branch**: `004-vps-migration`
**Date**: 2026-02-04
**Spec**: /home/jawz/Development/NixOS/specs/004-vps-migration/spec.md
**Plan**: /home/jawz/Development/NixOS/specs/004-vps-migration/plan.md
## Implementation Strategy
Deliver MVP as User Story 1 (primary host reverse proxy + keep services on host server). Then complete firewall parity (US2), secure access (US3), and migration gap review + verification (US4).
## Phase 1: Setup
- [x] T001 Confirm baseline files exist: iptables, secrets/ssh/ed25519_deploy.pub, secrets/ssh/ed25519_lidarr-reports.pub, secrets system entries for VPN keys
- [x] T002 Create working checklist placeholder for verification steps in /home/jawz/Development/NixOS/specs/004-vps-migration/tasks.md (this file)
## Phase 2: Foundational
- [x] T003 [P] Review mainServer and enableProxy options in hosts/server/toggles.nix and modules/modules.nix
- [x] T004 [P] Review wireguard module in modules/services/wireguard.nix and VPS host config in hosts/vps/configuration.nix
- [x] T005 [P] Review SSH host/IP settings in config/jawz.nix and modules/modules.nix for vps IP updates
- [x] T006 [P] Review caddy file list references in ./jawz_hist and ./sudo_hist to prepare subdomain comparison inputs
## Phase 3: User Story 1 (P1) - Primary VPS reverse proxy
**Story goal**: New VPS is primary reverse-proxy host (nginx only) while services remain on host server.
**Independent test criteria**: Proxy mappings resolve through VPS to host server services without relocating services.
- [x] T007 [US1] Set mainServer to \"vps\" in hosts/server/toggles.nix
- [x] T008 [US1] Enable proxying on VPS by setting my.enableProxy = true in hosts/vps/configuration.nix and ensure services in hosts/server/toggles.nix have enableProxy = true
- [x] T009 [US1] Capture provided caddy config files (e.g., /etc/caddy/Caddyfile.d/*) and compare subdomains to modules/servers/*.nix domain definitions; document mismatches in specs/004-vps-migration/quickstart.md
- [x] T010 [US1] Add shared client certificate handling from modules/servers/synapse.nix into the factory or shared module and apply it to mTLS-protected sites (use secrets/certs.yaml for client CA)
## Phase 4: User Story 2 (P1) - Firewall parity
**Story goal**: Firewall behavior on new VPS matches old VPS by applying iptables ruleset as-is.
**Independent test criteria**: Known inbound/outbound flows match existing VPS behavior.
- [x] T011 [US2] Apply iptables ruleset as-is to VPS configuration in hosts/vps/configuration.nix using the repo root iptables file
## Phase 5: User Story 3 (P2) - Secure access and VPN peers
**Story goal**: Wireguard enabled on VPS with secrets-managed keys; SSH access for service users and admin hosts.
**Independent test criteria**: VPN peers connect with correct addresses; SSH keys authenticate as expected.
- [x] T012 [US3] Enable wireguard module on VPS in hosts/vps/configuration.nix (my.services.wireguard.enable = true) and ensure listen port exposed
- [x] T013 [US3] Add sops secrets entries for wireguard keys in secrets/wireguard.yaml and confirm user-provided key material
- [x] T014 [US3] Update wireguard peer configuration in modules/services/wireguard.nix using sops secrets refs for public/private keys (no plaintext)
- [x] T015 [US3] Add service users and groups deploy and lidarr-reports with authorized_keys in hosts/vps/configuration.nix using secrets/ssh/ed25519_deploy.pub and secrets/ssh/ed25519_lidarr-reports.pub
- [x] T016 [US3] Add admin SSH authorized_keys for workstation, server, deacero, galaxy in hosts/vps/configuration.nix
- [x] T017 [US3] Configure sshd port and auth settings in hosts/vps/configuration.nix to match: Port 3456, PermitRootLogin no, PasswordAuthentication no
- [x] T018 [US3] Harden remote rebuild access by switching to a non-root SSH user for rebuilds (nixremote) and requiring sudo for nixos-rebuild in hosts/vps/configuration.nix and modules/users/nixremote.nix
- [x] T019 [US3] Restrict SSH access for remote rebuilds by limiting allowed users/keys for nixremote (update inputs.self.lib.getSshKeys list in hosts/vps/configuration.nix)
- [x] T020 [US3] Update VPS IP to 45.33.0.228 in modules/modules.nix and config/jawz.nix SSH host entry
- [x] T021 [US3] Update host server wireguard client configuration in hosts/server/configuration.nix to target the new VPS endpoint
## Phase 6: User Story 4 (P3) - Migration gaps and verification
**Story goal**: Identify missing configuration from history logs and provide verification steps for every task.
**Independent test criteria**: Clarification list exists and each task has a verification step.
- [x] T022 [US4] Review sudo_hist and jawz_hist for missing configuration; record clarification list in specs/004-vps-migration/quickstart.md
- [ ] T023 [US4] Document analytics data migration steps (export, import, validate) in specs/004-vps-migration/quickstart.md
- [x] T024 [US4] Add verification steps for each task in specs/004-vps-migration/quickstart.md
## Phase 7: Polish & Cross-Cutting Concerns
- [x] T025 [P] Update references to old VPS proxy logic (caddy) to ensure nginx is the only runtime proxy in README.org and docs/*.md
- [x] T026 [P] Validate all task descriptions include explicit file paths in specs/004-vps-migration/tasks.md and update mismatches
## Dependencies
- US1 → US2 → US3 → US4
## Parallel Execution Examples
- US1: T007, T008, T009 can proceed once T003 and T006 are reviewed.
- US2: T011 can proceed once iptables application location is identified.
- US3: T012, T016, T017, T018, and T020 can proceed after T004 and T005 review; T013 depends on user-provided secrets.
- US4: T022, T023, T024 can proceed independently once logs are reviewed and quickstart.md is open.
## Validation
- All tasks use the required checklist format with IDs, story labels, and explicit file paths.
## Verification Steps (Placeholder)
- To be filled during T024 with per-task verification steps.

457
sudo_hist Normal file
View File

@@ -0,0 +1,457 @@
clear
exit
clear
dnf install wireguard-tools neovim caddy
systemctl enable --now caddy
systemctl enable --now iptables
dnf install iptables-services
systemctl enable --now iptables
ls /home/
ls /home/fedora
nano /etc/ssh/sshd_config
nano /etc/wireguard/wg0.conf
nano /etc/wireguard/home_private.key
sudo useradd -m -s /bin/bash jawz
sudo passwd jawz
sudo usermod -aG wheel jawz
visudo
ls
su jawz
cat /home/jawz/iptables /etc/sysconfig/iptables
cat /home/jawz/iptables > /etc/sysconfig/iptables
cat /home/jawz/iptables-config /etc/sysconfig/iptables-config
cat /home/jawz/iptables-config > /etc/sysconfig/iptables-config
sudo systemctl restart iptables.service
nano /etc/hosts
ls
sudoedit /etc/ssh/sshd_config
ls
sudo reboot
mv /home/jawz/Caddyfile.d/ /etc/caddy/
ls /etc/caddy/
ls /etc/caddy/ -la
sudo chown root:root /etc/caddy/Caddyfile -R
ls /etc/caddy/ -la
chown root:root -R /etc/caddy/Caddyfile
ls /etc/caddy/ -la
chown root:root -R /etc/caddy/Caddyfile.d/
ls /etc/caddy/ -la
sudo systemctl restart caddy
exit
528491
clear
export TERM=xterm-256color
clear
sudo iptables -S
ping google.com
sudoedit /etc/sysconfig/iptables
sudo systemctl restart iptables.service
sudo systemctl restart wg-quick@wg0.service
sudo iptables -L FORWARD -n -v --line-numbers
sudoedit /etc/sysconfig/iptables
sudo systemctl restart iptables.service
sudoedit /etc/sysconfig/iptables
sudo systemctl restart iptables.service
sudoedit /etc/sysconfig/iptables
sudo iptables-save > /root/iptables-backup-$(date +%s)
sudo iptables -F FORWARD
sudo iptables-restore < /tmp/iptables
sudo iptables -D FORWARD 4
sudo iptables -S
sudo systemctl restart iptables.service
sudo iptables -S
sudoedit /etc/sysconfig/iptables
sud nvim /etc/sysconfig/iptables
sudo nvim /etc/sysconfig/iptables
sudo systemctl restart iptables.service
sudo journalctl -xeu iptables
sudo nvim /etc/sysconfig/iptables
sudo systemctl restart iptables.service
sudo systemctl restart caddy
cd /etc/caddy/Caddyfile.d/
ls
mv portfolio.caddyfile portfolio.caddyfile_
sudo systemctl restart caddy
sudoedit /etc/wireguard/wg0.conf
sudo systemctl restart wg-quick@wg0.service
ping 10.77.0.2:80
sudoedit /etc/wireguard/wg0.conf
ping 10.77.0.2
sudo journalctl -xefu wg-quick@wg0
ping 10.77.0.2
ping server
wg show
sudoedit /etc/wireguard/wg0.conf
wg show
cd /etc/caddy/Caddyfile.d/
mv portfolio.caddyfile_ portfolio.caddyfile
mv portfolio.caddyfile portfolio.caddyfile_
cat /etc/sysconfig/iptables
sudo nvim /etc/sysconfig/iptables
sudo systemctl restart iptables.service
journalctl -xeu iptables
sudo nvim /etc/sysconfig/iptables
sudo systemctl restart iptables.service
sudo iptables -L FORWARD -n -v --line-numbers
# In one terminal, watch the iptables counters
sudo watch -n1 'iptables -L FORWARD -n -v --line-numbers'
export TERM=xterm-256color
sudo watch -n1 'iptables -L FORWARD -n -v --line-numbers'
sudo tcpdump -i any icmp -n
ip addr show wg0
sudo iptables -I FORWARD 6 -s 10.8.0.0/24 -d 10.77.0.2/32 -p icmp -j ACCEPT
sudo iptables -I FORWARD 7 -s 10.77.0.2/32 -d 10.8.0.0/24 -p icmp -j ACCEPT
sudo iptables -L FORWARD -n -v --line-numbers
sudo iptables-save > /etc/sysconfig/iptables
nano /etc/wireguard/wg0.conf
export TERM=xterm-256color
nano /etc/wireguard/wg0.conf
systemctl restart wg-quick.target
systemctl restart wg-quick@wg0
cat /etc/wireguard/wg0.conf
sudo nvim /etc/wireguard/wg0.conf
sudo systemctl restart wg-quick@wg0.service
wg show
sudo nvim /etc/wireguard/wg0.conf
sudo systemctl restart wg-quick@wg0.service
wg show
sudo systemctl enable ip6tables
sudo systemctl disable --now nftables 2>/dev/null || true
sudo systemctl mask nftables 2>/dev/null || true
exit
export TERM=xterm-256color
sudo nano /etc/sysconfig/iptables
cd /etc/caddy/Caddyfile.d/
ls
cat fun.caddyfile__
rm fun.caddyfile__
ls
nano simple.caddyfile
export TERM=xterm-256color
nano simple.caddyfile
nvim simple.caddyfile
mv simple.caddyfile servers.caddyfile
systemctl restart caddy
ls
exit
export TERM=xterm-256color
cd /etc/caddy/Caddyfile.d/
nvim servers.caddyfile
sudo systemctl restart caddy
journalctl -xeu caddy
cd /etc/caddy/Caddyfile.d/
nvim redirect.caddyfile
sudo systemctl restart caddy
nvim redirect.caddyfile
sudo journalctl -u caddy -f
ls
nvim redirect.caddyfile
mv redirect.caddyfile 10-redirect.caddyfile
nvim 00-allowlist.caddyfile
mv servers.caddyfile 20-servers.caddyfile
cd ..
ls
nvim Caddyfile
sudo systemctl restart caddy
sudo journalctl -u caddy -f
nvim Caddyfile
sudo systemctl restart caddy
nvim Caddyfile
ls
cd Caddyfile.d/
ls
mv 00-allowlist.caddyfile 00-allowlist.caddyfile_
mv 10-redirect.caddyfile 10-redirect.caddyfile_
sudo systemctl restart caddy
exit
cd /etc/caddy/Caddyfile.d/
nvim servers.caddyfile
nvim redirect.caddyfile
sudo caddy fmt --overwrite redirect.caddyfile
sudo caddy validate --config redirect.caddyfile
nvim /etc/caddy/Caddyfile.d/servers.caddyfile
systemctl restart caddy
cd /etc/caddy/Caddyfile.d/
ls
rm 00-allowlist.caddyfile_ 10-redirect.caddyfile_ portfolio.caddyfile_
ls
mv portfolio.caddyfile_ 30-portfolio.caddyfile_
nvim 30-portfolio.caddyfile_
ls
cat 20-servers.caddyfile
nvim 20-servers.caddyfile
systemctl restart caddy
nvim 20-servers.caddyfile
nvim 10-nextcloud.caddyfile
nvim 20-servers.caddyfile
cd ..
cat Caddyfile.d/20-servers.caddyfile
cat Caddyfile.d/20-servers.caddyfile | head -n 30
cat Caddyfile.d/20-servers.caddyfile | head -n 10
nvim /etc/caddy/client_ca.pem
nvim /etc/caddy/Caddyfile.d/20-servers.caddyfile
systemctl restart caddy
cat Caddyfile.d/20-servers.caddyfile | head -n 10
exit
nvim /etc/caddy/Caddyfile.d/20-servers.caddyfile
nvim /etc/caddy/Caddyfile.d/15-private.caddyfile
sudo systemctl restart caddy
nvim /etc/caddy/Caddyfile.d/10-nextcloud.caddyfile
nvim /etc/caddy/Caddyfile.d/20-servers.caddyfile
cat /etc/caddy/Caddyfile.d/20-servers.caddyfile
exit
cd /etc/
ls
cd sysconfig/
ls
nvim iptables
cat iptables
curl 10.77.0.2:8999
nvim iptables
sudo systemctl restart iptables.service
exit
curl 10.77.0.2:8999
curl 10.8.0.2:8999
curl 10.8.0.1:8999
exit
cd /etc/wireguard/
ls
cat wg0.conf
exit
cd /etc/caddy/
ls
cd Caddyfile.d/
ls
mv 30-portfolio.caddyfile_ 30-portfolio.caddyfile
cat 15-private.caddyfile__
ls
cat 25-static.caddyfile
cat 30-portfolio.caddyfile
rm 30-portfolio.caddyfile
nvim 25-static.caddyfile
systemctl restart caddy
exit
cat /etc/caddy/Caddyfile.d/25-static.caddyfile
nvim /etc/caddy/Caddyfile.d/25-static.caddyfile
sudo systemctl restart caddy
nvim /etc/caddy/Caddyfile.d/25-static.caddyfile
sudo systemctl restart caddy
cat /etc/caddy/Caddyfile.d/25-static.caddyfile
nvim /etc/caddy/Caddyfile.d/25-static.caddyfile
sudo systemctl restart caddy
cat /etc/caddy/Caddyfile.d/25-static.caddyfile
caddy validate --config /etc/caddy/Caddyfile.d/25-static.caddyfile
caddy fmt --overwrite /etc/caddy/Caddyfile.d/*
caddy fmt --overwrite /etc/caddy/Caddyfile.d/25-static.caddyfile
find -tf /etc/caddy/Caddyfile.d/25-static.caddyfile
find -type f /etc/caddy/Caddyfile.d/
find /etc/caddy/Caddyfile.d/ -type f
find /etc/caddy/Caddyfile.d/ -type f -exec caddy fmt --overwrite {}
find /etc/caddy/Caddyfile.d/ -type f -exec caddy fmt --overwrite {} \;
caddy validate --config /etc/caddy/Caddyfile.d/25-static.caddyfile
ls -la /var/www/html/portfolio/
ls -la /var/www/html/portfolio/images/
ls -la /var/www/html/portfolio/old_ijwbs/
du -sh /var/www/html/portfolio/
ls -la /var/www/html/portfolio/
ls -la /var/www/html/portfolio/friends/
cd /etc/sysconfig/
ls
cat iptables
rg 51413
rg 51412
cat iptables
sudo tcpdump -ni eth0 port 51412
sudo tcpdump -ni wg0 port 51412
sudo tcpdump -ni eth0 port 51412
ss -ltnp | grep ":51412"
sysctl -w net.ipv4.ip_forward=1
# ---- NAT (insert at top) ----
iptables -t nat -I PREROUTING 1 -i eth0 -p tcp --dport 51412 -j DNAT --to-destination 10.77.0.2:51412
iptables -t nat -I PREROUTING 2 -i eth0 -p udp --dport 51412 -j DNAT --to-destination 10.77.0.2:51412
iptables -t nat -I POSTROUTING 1 -s 10.77.0.0/24 -o eth0 -j MASQUERADE
# ---- FORWARD ----
iptables -I FORWARD 1 -i eth0 -o wg0 -p tcp -d 10.77.0.2 --dport 51412 -m conntrack --ctstate NEW,ESTABLISHED,RELATED -j ACCEPT
iptables -I FORWARD 2 -i eth0 -o wg0 -p udp -d 10.77.0.2 --dport 51412 -j ACCEPT
iptables -I FORWARD 3 -i wg0 -o eth0 -s 10.77.0.2 -p tcp --sport 51412 -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
iptables -I FORWARD 4 -i wg0 -o eth0 -s 10.77.0.2 -p udp --sport 51412 -j ACCEPT
iptables -I FORWARD 5 -i wg0 -o eth0 -j ACCEPT
iptables -I FORWARD 6 -i eth0 -o wg0 -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
net.ipv4.ip_forward = 1
sysctl -w net.ipv4.ip_forward=1
iptables -t nat -I PREROUTING 1 -i eth0 -p tcp --dport 51412 -j DNAT --to-destination 10.77.0.2:51412
iptables -t nat -I PREROUTING 2 -i eth0 -p udp --dport 51412 -j DNAT --to-destination 10.77.0.2:51412
iptables -t nat -I POSTROUTING 1 -s 10.77.0.0/24 -o eth0 -j MASQUERADE
iptables -I FORWARD 1 -i eth0 -o wg0 -p tcp -d 10.77.0.2 --dport 51412 -m conntrack --ctstate NEW,ESTABLISHED,RELATED -j ACCEPT
iptables -I FORWARD 2 -i eth0 -o wg0 -p udp -d 10.77.0.2 --dport 51412 -j ACCEPT
iptables -I FORWARD 3 -i wg0 -o eth0 -s 10.77.0.2 -p tcp --sport 51412 -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
iptables -I FORWARD 4 -i wg0 -o eth0 -s 10.77.0.2 -p udp --sport 51412 -j ACCEPT
iptables -I FORWARD 5 -i wg0 -o eth0 -j ACCEPT
iptables -I FORWARD 6 -i eth0 -o wg0 -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
iptables -L FORWARD -n -v --line-numbers
iptables -t nat -L -n -v --line-numbers
iptables -L FORWARD -n -v --line-numbers
iptables -t nat -L -n -v --line-numbers
sudo tcpdump -ni eth0 port 51412
curl -4 ifconfig.me
tcpdump -ni eth0 port 51412
ss -lntup | grep 51412
iptables -t raw -I PREROUTING 1 -p tcp --dport 51412 -j NOTRACK
iptables -t raw -I PREROUTING 1 -p udp --dport 51412 -j NOTRACK
iptables -t nat -I PREROUTING 1 -i eth0 -p tcp --dport 51412 -j DNAT --to-destination 10.77.0.2:51412
iptables -t nat -I PREROUTING 2 -i eth0 -p udp --dport 51412 -j DNAT --to-destination 10.77.0.2:51412
iptables -I FORWARD 1 -i eth0 -o wg0 -p tcp -d 10.77.0.2 --dport 51412 -j ACCEPT
iptables -I FORWARD 2 -i eth0 -o wg0 -p udp -d 10.77.0.2 --dport 51412 -j ACCEPT
iptables -I FORWARD 3 -i wg0 -o eth0 -s 10.77.0.2 --sport 51412 -j ACCEPT
iptables -t nat -I POSTROUTING 1 -s 10.77.0.2 -o eth0 -j MASQUERADE
tcpdump -ni wg0 port 51412
sysctl net.ipv4.ip_forward
iptables -t raw -I PREROUTING 1 -p tcp --dport 51412 -j NOTRACK
iptables -t raw -I PREROUTING 2 -p udp --dport 51412 -j NOTRACK
iptables -t nat -I PREROUTING 1 -i eth0 -p tcp --dport 51412 -j DNAT --to-destination 10.77.0.2:51412
iptables -t nat -I PREROUTING 2 -i eth0 -p udp --dport 51412 -j DNAT --to-destination 10.77.0.2:51412
iptables -I FORWARD 1 -i eth0 -o wg0 -p tcp -d 10.77.0.2 --dport 51412 -j ACCEPT
iptables -I FORWARD 2 -i eth0 -o wg0 -p udp -d 10.77.0.2 --dport 51412 -j ACCEPT
iptables -I FORWARD 3 -i wg0 -o eth0 -s 10.77.0.2 --sport 51412 -j ACCEPT
iptables -t nat -I POSTROUTING 1 -s 10.77.0.2 -o eth0 -j MASQUERADE
tcpdump -ni wg0 port 51412
tcpdump -ni eth0 'tcp port 51412'
sysctl net.ipv4.conf.eth0.route_localnet
sysctl -w net.ipv4.conf.eth0.route_localnet=1
ip rule add fwmark 0x1 lookup 100
ip route add default dev wg0 table 100
iptables -t mangle -I PREROUTING 1 -i eth0 -p tcp --dport 51412 -j MARK --set-mark 1
iptables -t mangle -I PREROUTING 2 -i eth0 -p udp --dport 51412 -j MARK --set-mark 1
tcpdump -ni eth0 'tcp port 51412'
reboot
mkfs.ext4 "/dev/disk/by-id/scsi-0Linode_Volume_box"
mkdir /mnt/box
mount "/dev/disk/by-id/scsi-0Linode_Volume_box" "/mnt/box"
nvim /etc/fstab
cd /mnt/box/
ls -lag
sudo dnf install -y qbittorrent-nox
exit
cd /srv/torrents/downloads/
ls
cd The.Sims.4.Jenny/
ls
du -sh
rm rune
rm rune.nfo
exit
cd /srv/torrents/downloads/
ls
ls ../incomplete/
ls
ls in
ls ../incomplete/
ls
ls -lag
cd ..
su -sh
dh -sh
du -sh
df -h
ls
rm -rf incomplete/The.Sims.4.Jenny/
exit
cd
cd /srv/torrents/
ls -lag
du -sh
ls
mv tits/The.Sims.4.Jenny/ incomplete/
rmdir tits/
chown -R qbittorrent:qbittorrent incomplete/
cd /etc/sysconfig/
ls
cp iptables iptables_working
nvim iptables
systemctl restart iptables.service
journal -xeu iptables
journalctl -xeu iptables
nvim iptables
systemctl restart iptables.service
journalctl -xeu iptables
exit
nvim iptables
cd /etc/sysconfig/
nvim iptables
cd /etc/wireguard/
ls
nvim wg0.conf
nvim /etc/sysconfig/iptables
cd /etc/wireguard/
ls
wg genkey | tee privatekey | wg pubkey > publickey
ls
rm privatekey publickey
ls
mkdir friend
cd friend/
wg genkey | tee privatekey | wg pubkey > publickey
ls
cat privatekey
cat publickey
nvim ../wg0.conf
cat privatekey
nvim ../wg0.conf
systemctl restart wireguard
systemctl restart wg-quick@wg0.service
nvim /etc/sysconfig/iptables
nvim ../wg0.conf
systemctl restart wg-quick@wg0.service
nvim ../wg0.conf
wg show
nvim ../wg0.conf
nvim /etc/sysconfig/iptables
sudo systemctl restart iptables.service
nvim ../wg0.conf
cd /etc/wireguard/
ls
cd friend/
ls
rm *
wg genkey | tee privatekey | wg pubkey > publickey
cat publickey
nvim ../wg0.conf
cat privatekey
nvim ../wg0.conf
rm *
wg genkey | tee privatekey | wg pubkey > publickey
cat publickey
nvim ../wg0.conf
cat privatekey
rm *
wg genkey | tee privatekey | wg pubkey > publickey
cat publickey
nvim ../wg0.conf
cat privatekey
nvim /etc/sysconfig/iptables
sudo reboot
cd /etc/caddy/Caddyfile.d/
ls
rg xxx
nvim 15-private.caddyfile
sudo systemctl restart caddy
nvim 15-private.caddyfile
nvim 15-private.caddyfile__
exit
cd /etc/wireguard/
ls
cat wg0.conf
ls
ls friend/
rm friend/ -rf
ls
cd /var/www/html/
ls -lag blog/ lidarr-mb-gap/ portfolio/
ls -lag
ls -la
ls
cd
su deploy
su lidarr-reports
exit