split configs into two systems...

This commit is contained in:
2023-09-06 23:46:45 -06:00
parent b161ac589c
commit 19e87b429e
56 changed files with 4280 additions and 4 deletions

4
workstation/.gitignore vendored Normal file
View File

@@ -0,0 +1,4 @@
/dotfiles/*.Appimage
/scripts/download/.direnv/
/configuration.nix
/scripts/PureRef-1.11.1_x64.Appimage

1090
workstation/configuration.org Executable file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,334 @@
{
"extractor": {
"skip": "abort:5",
"cookies": [
"firefox",
"yw8fhvh4.default-release",
"gnomekeyring"
],
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36",
"retries": 10,
"sleep-request": 0,
"directlink": {
"directory": [],
"filename": "{filename}.{extension}"
},
"twitter": {
"skip": "abort:1",
"directory": [
"{user[name]}"
],
"retweets": false,
"videos": true,
"logout": true
},
"flickr": {
"directory": [
"{category}",
"{owner[username]}"
],
"size-max": "Original",
"access-token": "72157720849409732-e83af94a8ca145aa",
"access-token-secret": "0c7e86529694756a"
},
"pinterest": {
"directory": [
"{board[owner][username]}",
"{board[name]}"
]
},
"wikifeet": {
"page-reverse": true,
"directory": [
"{category}",
"{celebrity}"
]
},
"instagram": {
"sleep-request": "15-45",
"sleep": "2-10",
"directory": [
"{username}"
],
"parent-directory": true,
"highlights": {
"reverse": "true",
"directory": [
"{username}"
]
},
"stories": {
"reverse": "true",
"directory": [
"{username}"
]
},
"tagged": {
"directory": [
"{tagged_username}",
"tagged"
]
}
},
"kemonoparty": {
"directory": [
"{category}",
"{user}"
],
"retries": 10,
"timeout": 5,
"filename": "{id}_{filename}.{extension}"
},
"exhentai": {
"directory": [
"{category}",
"{title}"
]
},
"tumblr": {
"directory": [
"{blog_name}"
],
"access-token": "WTt2nJdHLJAOQMpTbnMBGYqeJwoBeY2HDRztDPjf4HnqJ65rnT",
"access-token-secret": "0mI7ZWmD9CJPrQ1jjXvMGLjvJa44kOtgcKHtwz8LsAVDcODMPi",
"external": true,
"inline": true,
"posts": "all",
"reblogs": false,
"parent-directory": true,
"api-key": "uhBUtgPaX9gl7eaD8suGWW6ZInRedQoVT6xsZzopljy0jXHqm5",
"api-secret": "D3FDj1INyPzXikVpp4jmzSqjlC9czFUQ8oj2I883PSYJdqwURv"
},
"deviantart": {
"client-id": "20016",
"client-secret": "52e1f9b0cb26e673da36f69e2ddd0e9a",
"refresh-token": "cc862526cb515d82e750c099aa7f32a29087c961",
"directory": [
"{username}"
],
"include": "gallery,scraps",
"flat": true,
"original": true,
"mature": true,
"auto-watch": true,
"auto-unwatch": true
},
"furaffinity": {
"directory": [
"{user}",
"{subcategory}"
],
"include": [
"scraps",
"gallery"
]
},
"patreon": {
"directory": [
"(Patreon) {creator[vanity]}",
"({date:%Y%m%d}) {title} ({id})"
],
"filename": "{filename}.{num}.{extension}",
"browser": "firefox"
},
"blogger": {
"directory": [
"{blog[name]}",
"{post[author]}",
"{post[title]} - [{post[id]}]"
],
"filename": "{filename} - {num}.{extension}"
},
"artstation": {
"directory": [
"{userinfo[username]}"
],
"external": true
},
"gfycat": {
"format": "webm"
},
"reddit": {
"user-agent": "Python:gallery-dl:v1.0 (by /u/captainjawz)",
"client-id": "T7nZ6WZ3_onJWBhLP8r08g",
"refresh-token": "184157546842-bkMXgGYWzkwGSgXTeC8mMmaDZouhUQ",
"directory": [
"{author}"
],
"parent-directory": true
},
"redgifs": {
"reverse": "true",
"directory": [
"{userName}"
]
},
"imgur": {
"mp4": true
},
"paheal": {
"directory": [
"Husbands",
"{search_tags}"
]
},
"rule34": {
"directory": [
"Husbands",
"{search_tags}"
]
},
"e621": {
"directory": [
"Husbands",
"{search_tags}"
]
},
"baraag": {
"directory": [
"{account[username]}"
]
},
"pixiv": {
"refresh-token": "O4kc9tTzGItuuacDcfmevW6NELjm5CJdWiAbZdUv3Kk",
"directory": [
"{user[account]} - {user[id]}"
],
"ugoira": true,
"favorite": {
"directory": [
"{user_bookmark[account]} - {user_bookmark[id]}",
"Bookmarks"
]
},
"postprocessors": [
{
"name": "ugoira",
"extension": "webm",
"keep-files": false,
"whitelist": [
"pixiv"
],
"ffmpeg-twopass": true,
"ffmpeg-args": [
"-c:v",
"libvpx",
"-crf",
"4",
"-b:v",
"5000k",
"-an"
]
}
]
},
"readcomiconline": {
"chapter-reverse": true,
"directory": [
"Comics",
"{comic}",
"{comic} #{issue}"
],
"quality": "hq",
"captcha": "wait",
"postprocessors": [
"cbz"
]
},
"kissmanga": {
"chapter-reverse": true,
"directory": [
"Manga",
"{manga}",
"{manga} Ch.{chapter}{chapter_minor}"
],
"captcha": "wait",
"postprocessors": [
"cbz"
]
},
"mangahere": {
"chapter-reverse": true,
"directory": [
"Manga",
"{manga}",
"{manga} Ch.{chapter}{chapter_minor}"
],
"postprocessors": [
"cbz"
]
},
"mangadex": {
"chapter-reverse": true,
"chapter-filter": "lang == 'en'",
"directory": [
"Manga",
"{manga}",
"{manga} Ch.{chapter}{chapter_minor}"
],
"postprocessors": [
"cbz"
]
},
"mangareader": {
"chapter-reverse": true,
"directory": [
"Manga",
"{manga}",
"{manga} Ch.{chapter}{chapter_minor}"
],
"postprocessors": [
"cbz"
]
},
"mangapanda": {
"chapter-reverse": true,
"directory": [
"Manga",
"{manga}",
"{manga} Ch.{chapter}{chapter_minor}"
],
"postprocessors": [
"cbz"
]
},
"webtoons": {
"chapter-reverse": true,
"directory": [
"Webtoons",
"{comic}",
"{comic} #{episode}"
],
"postprocessors": [
"cbz"
]
}
},
"output": {
"mode": "auto"
},
"downloader": {
"part": true,
"part-directory": "/home/jawz/.cache/gallery-dl",
"ytdl": {
"logging": true,
"format": "bestvideo+bestaudio/best",
"module": "yt_dlp",
"forward-cookies": true
},
"http": {
"rate": null,
"retries": 5,
"timeout": 10.0,
"verify": true
}
},
"postprocessor": {
"cbz": {
"name": "zip",
"compression": "store",
"mode": "safe",
"extension": "cbz"
}
}
}

View File

@@ -0,0 +1,10 @@
autoclip: true
autoimport: false
cliptimeout: 45
exportkeys: false
nopager: false
notifications: false
parsing: true
path: /home/jawz/.local/share/pass
safecontent: true
mounts: {}

View File

@@ -0,0 +1,61 @@
# Beware! This file is rewritten by htop when settings are changed in the interface.
# The parser is also very primitive, and not human-friendly.
htop_version=3.2.1
config_reader_min_version=3
fields=18 0 123 124 46 47 38 50 1
hide_kernel_threads=0
hide_userland_threads=0
shadow_other_users=0
show_thread_names=0
show_program_path=0
highlight_base_name=1
highlight_deleted_exe=1
highlight_megabytes=1
highlight_threads=1
highlight_changes=0
highlight_changes_delay_secs=5
find_comm_in_cmdline=1
strip_exe_from_cmdline=1
show_merged_command=1
header_margin=1
screen_tabs=1
detailed_cpu_time=0
cpu_count_from_one=1
show_cpu_usage=1
show_cpu_frequency=1
show_cpu_temperature=1
degree_fahrenheit=0
update_process_names=0
account_guest_in_cpu_meter=0
color_scheme=3
enable_mouse=1
delay=15
hide_function_bar=0
header_layout=two_67_33
column_meters_0=LeftCPUs Swap Tasks NetworkIO Memory
column_meter_modes_0=1 1 2 2 2
column_meters_1=RightCPUs Hostname Uptime LoadAverage
column_meter_modes_1=1 2 2 2
tree_view=1
sort_key=38
tree_sort_key=0
sort_direction=-1
tree_sort_direction=1
tree_view_always_by_pid=1
all_branches_collapsed=1
screen:Main=NICE PID COMM EXE PERCENT_CPU PERCENT_MEM M_VIRT NLWP Command
.sort_key=M_VIRT
.tree_sort_key=PID
.tree_view=1
.tree_view_always_by_pid=1
.sort_direction=-1
.tree_sort_direction=1
.all_branches_collapsed=1
screen:I/O=PID USER IO_PRIORITY IO_RATE IO_READ_RATE IO_WRITE_RATE PERCENT_SWAP_DELAY PERCENT_IO_DELAY Command
.sort_key=IO_RATE
.tree_sort_key=PID
.tree_view=0
.tree_view_always_by_pid=0
.sort_direction=-1
.tree_sort_direction=1
.all_branches_collapsed=0

7
workstation/dotfiles/npm/npmrc Executable file
View File

@@ -0,0 +1,7 @@
user=0
unsafe-perm=true
prefix=${XDG_DATA_HOME}/npm
cache=${XDG_CACHE_HOME}/npm
tmp=${XDG_RUNTIME_DIR}/npm
init-module=${XDG_CONFIG_HOME}/npm/config/npm-init.js
store-dir=${XDG_DATA_HOME}/pnpm-store

View File

@@ -0,0 +1,4 @@
{
"optOut": false,
"lastUpdateCheck": 1646662583446
}

View File

@@ -0,0 +1 @@
hsts-file = /home/jawz/.cache/wget-hsts

View File

@@ -0,0 +1,197 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }:
let
unstable = import
(builtins.fetchTarball "https://github.com/nixos/nixpkgs/tarball/master") {
config = config.nixpkgs.config;
};
in {
imports = [ (modulesPath + "/installer/scan/not-detected.nix") ];
boot = {
#plymouth = { enable = true; };
loader = {
efi = {
canTouchEfiVariables = true;
efiSysMountPoint = "/boot/efi";
};
grub = {
enable = true;
device = "nodev";
efiSupport = true;
enableCryptodisk = true;
};
};
initrd.luks.devices = {
nvme = {
device = "/dev/disk/by-uuid/af72f45c-cf7c-4e7d-8eab-2a95ab754921";
preLVM = true;
};
disk1 = {
device = "/dev/disk/by-uuid/a9b0f346-7e38-40a6-baf6-3ad80cafc842";
preLVM = true;
};
disk2 = {
device = "/dev/disk/by-uuid/0ed12b83-4c56-4ba8-b4ea-75a9e927d771";
preLVM = true;
};
hnbox = {
device = "/dev/disk/by-uuid/c7dd2d5a-b0b3-46a0-aca9-3d4975c1f0bc";
preLVM = true;
};
seedbox = {
device = "/dev/disk/by-uuid/04f06a3e-a91f-476b-9a4b-b9c722ba99e7";
preLVM = true;
};
};
kernelModules = [ "kvm-intel" ];
kernel.sysctl = { "vm.swappiness" = 80; };
extraModulePackages = [ ];
initrd = {
availableKernelModules =
[ "xhci_pci" "ahci" "usbhid" "nvme" "usb_storage" "sd_mod" ];
kernelModules = [ ];
};
};
fileSystems."/" = {
device = "/dev/mapper/nvme";
fsType = "btrfs";
options = [
"subvol=nix"
"ssd"
"compress=zstd:3"
"x-systemd.device-timeout=0"
"space_cache=v2"
"commit=120"
"datacow"
"noatime"
];
};
fileSystems."/home" = {
device = "/dev/mapper/nvme";
fsType = "btrfs";
options = [
"subvol=home"
"ssd"
"compress=zstd:3"
"x-systemd.device-timeout=0"
"space_cache=v2"
"commit=120"
"datacow"
];
};
fileSystems."/mnt/disk1" = {
device = "/dev/mapper/disk1";
fsType = "btrfs";
options = [ "compress=zstd:3" "space_cache=v2" "commit=120" "datacow" ];
};
fileSystems."/var/lib/nextcloud/data" = {
device = "/mnt/disk1/nextcloud";
options = [ "bind" ];
};
fileSystems."/mnt/jellyfin/media" = {
device = "/mnt/disk1/multimedia/media";
options = [ "bind" "ro" ];
};
fileSystems."/mnt/disk2" = {
device = "/dev/mapper/disk2";
fsType = "btrfs";
options = [ "compress=zstd:3" "space_cache=v2" "commit=120" "datacow" ];
};
fileSystems."/mnt/hnbox" = {
device = "/dev/mapper/hnbox";
fsType = "btrfs";
options = [ "compress=zstd:3" "space_cache=v2" "commit=120" "datacow" ];
};
fileSystems."/mnt/seedbox" = {
device = "/dev/mapper/seedbox";
fsType = "btrfs";
options = [ "compress=zstd:3" "space_cache=v2" "commit=120" "datacow" ];
};
fileSystems."/mnt/jellyfin/external" = {
device = "/mnt/seedbox/external";
options = [ "bind" "ro" ];
};
fileSystems."/mnt/parity" = {
device = "/dev/disk/by-uuid/643b727a-555d-425c-943c-62f5b93631c9";
fsType = "xfs";
options = [ "defaults" ];
};
fileSystems."/boot" = {
device = "/dev/disk/by-uuid/c574cb53-dc40-46db-beff-0fe8a4787156";
fsType = "ext4";
};
fileSystems."/boot/efi" = {
device = "/dev/disk/by-uuid/CBE7-5DEB";
fsType = "vfat";
};
swapDevices = [{
device = "/dev/disk/by-partuuid/cb0ad486-ebf8-4bfc-ad7c-96bdc68576ca";
randomEncryption = {
enable = true;
cipher = "aes-xts-plain64";
keySize = 512;
sectorSize = 4096;
};
}];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true;
# networking.interfaces.enp0s31f6.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
powerManagement.cpuFreqGovernor = lib.mkDefault "performance";
# nixpkgs.config.packageOverrides = pkgs: {
# vaapiIntel = pkgs.vaapiIntel.override { enableHybridCodec = true; };
# };
nixpkgs.config = { allowUnfree = true; };
virtualisation.docker.enableNvidia = true;
services.xserver.videoDrivers = [ "nvidia" ];
hardware = {
nvidia = {
modesetting.enable = true;
powerManagement.enable = true;
};
sane = {
enable = true;
extraBackends = [ pkgs.hplip pkgs.hplipWithPlugin ];
};
cpu.intel.updateMicrocode = lib.mkDefault true;
bluetooth.enable = true;
# opentabletdriver = {
# enable = true;
# package = unstable.opentabletdriver;
# daemon.enable = false;
# };
opengl = {
enable = true;
driSupport = true;
driSupport32Bit = true;
# extraPackages = with pkgs; [
# intel-media-driver # LIBVA_DRIVER_NAME=iHD
# vaapiIntel # LIBVA_DRIVER_NAME=i965 (older but works better for Firefox/Chromium)
# vaapiVdpau
# libvdpau-va-gl
# ];
};
};
}

204
workstation/nginx.nix Executable file
View File

@@ -0,0 +1,204 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }:
let
localhost = "127.0.0.1";
jellyfinPort = "8096";
nextcloudPort = 80;
searxPort = 8080;
newflixPort = 8897;
shioriPort = 9001;
flamePort = 5005;
secretFlamePort = 5007;
lidarrPort = 8686;
sonarrPort = 8989;
prowlarrPort = 9696;
radarrPort = 7878;
bazarrPort = config.services.bazarr.listenPort;
kavitaPort = config.services.kavita.port;
vaultPort = config.services.vaultwarden.config.ROCKET_PORT;
in {
services.nginx = {
enable = true;
recommendedTlsSettings = true;
recommendedGzipSettings = true;
recommendedOptimisation = true;
# recommendedProxySettings = true;
sslCiphers = "AES256+EECDH:AES256+EDH:!aNULL";
appendHttpConfig = ''
### GLOBAL
# Add HSTS header with preloading to HTTPS requests.
# Adding this header to HTTP requests is discouraged
map $scheme $hsts_header {
https "max-age=31536000; includeSubdomains; preload";
}
add_header Strict-Transport-Security $hsts_header;
# Enable CSP for your services.
#add_header Content-Security-Policy "script-src 'self'; object-src 'none'; base-uri 'none';" always;
# Minimize information leaked to other domains
add_header 'Referrer-Policy' 'origin-when-cross-origin';
# Disable embedding as a frame
# add_header X-Frame-Options DENY;
# Prevent injection of code in other mime types (XSS Attacks)
add_header X-Content-Type-Options nosniff;
# Enable XSS protection of the browser.
# May be unnecessary when CSP is configured properly (see above)
add_header X-XSS-Protection "1; mode=block";
# This might create errors
proxy_cookie_path / "/; secure; HttpOnly; SameSite=strict";
# NEXTCLOUD
# upstream php-handler {
# server ${localhost}:9000;
# #server unix:/var/run/php/php7.4-fpm.sock;
# }
# Set the `immutable` cache control options only for assets with a cache busting `v` argument
# map $arg_v $asset_immutable {
# "" "";
# default "immutable";
# }
# JELLYFIN
proxy_cache_path /var/cache/nginx/jellyfin-videos levels=1:2 keys_zone=jellyfin-videos:100m inactive=90d max_size=35000m;
proxy_cache_path /var/cache/nginx/jellyfin levels=1:2 keys_zone=jellyfin:100m max_size=15g inactive=30d use_temp_path=off;
map $request_uri $h264Level { ~(h264-level=)(.+?)& $2; }
map $request_uri $h264Profile { ~(h264-profile=)(.+?)& $2; }
'';
virtualHosts = let
base = locations: {
inherit locations;
forceSSL = true;
enableACME = true;
http2 = true;
};
proxy = port:
base { "/".proxyPass = "http://${localhost}:${toString (port)}/"; };
proxyArr = port:
proxy port // {
extraConfig = ''
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $http_connection;
proxy_redirect off;
proxy_http_version 1.1;
'';
};
in {
"movies.servidos.lat" = proxyArr radarrPort // { };
"indexer.servidos.lat" = proxyArr prowlarrPort // { };
"newflix.servidos.lat" = proxy newflixPort // { };
"library.servidos.lat" = proxy kavitaPort // { };
"bookmarks.servidos.lat" = proxy shioriPort // { };
"start.servidos.lat" = proxy flamePort // { };
"music.servidos.lat" = proxy lidarrPort // { };
"subs.servidos.lat" = proxy bazarrPort // { };
"series.servidos.lat" = proxy sonarrPort // { };
"vault.servidos.lat" = proxy vaultPort // { };
"searx.servidos.lat" = proxy searxPort // { };
"qampqwn4wprhqny8h8zj.servidos.lat" = proxy secretFlamePort // { };
"flix.servidos.lat" = {
forceSSL = true;
enableACME = true;
http2 = true;
extraConfig = ''
# use a variable to store the upstream proxy
# in this example we are using a hostname which is resolved via DNS
# (if you aren't using DNS remove the resolver line and change the variable to point to an IP address
resolver ${localhost} valid=30;
location = / {
return 302 http://$host/web/;
#return 302 https://$host/web/;
}
location = /web/ {
# Proxy main Jellyfin traffic
proxy_pass http://${localhost}:${jellyfinPort}/web/index.html;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Protocol $scheme;
proxy_set_header X-Forwarded-Host $http_host;
}
'';
locations = {
"/" = {
proxyPass = "http://${localhost}:${jellyfinPort}";
proxyWebsockets = true;
};
"/socket" = {
proxyPass = "http://${localhost}:${jellyfinPort}";
extraConfig = ''
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
'';
};
"~ /Items/(.*)/Images" = {
proxyPass = "http://${localhost}:${jellyfinPort}";
extraConfig = ''
proxy_cache jellyfin;
proxy_cache_revalidate on;
proxy_cache_lock on;
'';
};
"~* ^/Videos/(.*)/(?!live)" = {
proxyPass = "http://${localhost}:${jellyfinPort}";
extraConfig = ''
# Set size of a slice (this amount will be always requested from the backend by nginx)
# Higher value means more latency, lower more overhead
# This size is independent of the size clients/browsers can request
# slice 2m;
proxy_cache jellyfin-videos;
proxy_cache_valid 200 206 301 302 30d;
proxy_ignore_headers Expires Cache-Control Set-Cookie X-Accel-Expires;
proxy_cache_use_stale error timeout invalid_header updating http_500 http_502 http_503 http_504;
proxy_connect_timeout 15s;
proxy_http_version 1.1;
proxy_set_header Connection "";
# Transmit slice range to the backend
proxy_set_header Range 2m;
# This saves bandwidth between the proxy and jellyfin, as a file is only downloaded one time instead of multiple times when multiple clients want to at the same time
# The first client will trigger the download, the other clients will have to wait until the slice is cached
# Esp. practical during SyncPlay
proxy_cache_lock on;
proxy_cache_lock_age 60s;
proxy_cache_key "jellyvideo$uri?MediaSourceId=$arg_MediaSourceId&VideoCodec=$arg_VideoCodec&AudioCodec=$arg_AudioCodec&AudioStreamIndex=$arg_AudioStreamIndex&VideoBitrate=$arg_VideoBitrate&AudioBitrate=$arg_AudioBitrate&SubtitleMethod=$arg_SubtitleMethod&TranscodingMaxAudioChannels=$arg_TranscodingMaxAudioChannels&RequireAvc=$arg_RequireAvc&SegmentContainer=$arg_SegmentContainer&MinSegments=$arg_MinSegments&BreakOnNonKeyFrames=$arg_BreakOnNonKeyFrames&h264-profile=$h264Profile&h264-level=$h264Level&slicerange=2m";
# add_header X-Cache-Status $upstream_cache_status; # This is only for debugging cache
'';
};
};
};
${config.services.nextcloud.hostName} = {
forceSSL = true;
enableACME = true;
http2 = true;
serverAliases = [ "cloud.rotehaare.art" "danilo-reyes.com" ];
};
};
};
networking = {
firewall = let open_firewall_ports = [ 80 443 ];
in {
enable = true;
allowedTCPPorts = open_firewall_ports;
allowedUDPPorts = open_firewall_ports;
};
};
}

83
workstation/openldap.nix Executable file
View File

@@ -0,0 +1,83 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }:
let hostname = "servidos.lat";
in {
services.openldap = {
enable = true;
# enable plain and secure connections
urlList = [ "ldap:///" "ldaps:///" ];
settings = {
attrs = {
olcLogLevel = "conns config";
# settings for acme ssl
olcTLSCACertificateFile = "/var/lib/acme/${hostname}/full.pem";
olcTLSCertificateFile = "/var/lib/acme/${hostname}/cert.pem";
olcTLSCertificateKeyFile = "/var/lib/acme/${hostname}/key.pem";
olcTLSCipherSuite = "HIGH:MEDIUM:+3DES:+RC4:+aNULL";
olcTLSCRLCheck = "none";
olcTLSVerifyClient = "never";
olcTLSProtocolMin = "3.1";
};
children = {
"cn=schema".includes = [
"${pkgs.openldap}/etc/schema/core.ldif"
"${pkgs.openldap}/etc/schema/cosine.ldif"
"${pkgs.openldap}/etc/schema/inetorgperson.ldif"
];
"olcDatabase={1}mdb".attrs = {
objectClass = [ "olcDatabaseConfig" "olcMdbConfig" ];
olcDatabase = "{1}mdb";
olcDbDirectory = "/var/lib/openldap/data";
olcSuffix = "dc=example,dc=com";
# your admin account, do not use writeText on a production system
olcRootDN = "cn=admin,dc=example,dc=com";
olcRootPW.path = pkgs.writeText "olcRootPW" "pass";
olcAccess = [
# custom access rules for userPassword attributes
''
{0}to attrs=userPassword
by self write
by anonymous auth
by * none''
# allow read on anything else
''
{1}to *
by * read''
];
};
};
};
};
# ensure openldap is launched after certificates are created
systemd.services.openldap = {
wants = [ "acme-${hostname}.service" ];
after = [ "acme-${hostname}.service" ];
};
# make acme certificates accessible by openldap
security.acme.defaults.group = "certs";
users.groups.certs.members = [ "openldap" ];
# trigger the actual certificate generation for your hostname
security.acme.certs."${hostname}" = { extraDomainNames = [ ]; };
# example using hetzner dns to run letsencrypt verification
security.acme.defaults.dnsProvider = "hetzner";
security.acme.defaults.credentialsFile = pkgs.writeText "credentialsFile" ''
HETZNER_API_KEY=<your-hetzner-dns-api-key>
'';
}

21
workstation/scripts/chat-dl.sh Executable file
View File

@@ -0,0 +1,21 @@
#!/usr/bin/env nix-shell
#! nix-shell -i bash -p bash yt-dlp
minutes=10
time_alive=60
sleep_time=$((minutes * 60))
loops=$((time_alive / (sleep_time / time_alive)))
url="https://chaturbate.com/$1"
save_dir=/mnt/disk2/glue/Tuhmayto
if [ ! -d "$save_dir" ]; then
mkdir -p "$save_dir"
fi
cd $save_dir || exit
for i in $(seq 1 1 "$loops"); do
waiting_time=$(((i * sleep_time) / time_alive))
yt-dlp --hls-use-mpegts --prefer-ffmpeg -o '%(title)s.%(ext)s' "$url"
echo "sleeping for $sleep_time seconds… been waiting for $waiting_time minutes"
sleep $sleep_time
done

View File

@@ -0,0 +1 @@
CONFIG_FILE = "/home/jawz/.config/jawz/config.yaml"

View File

@@ -0,0 +1 @@
use nix

View File

@@ -0,0 +1,96 @@
#!/usr/bin/env python3
"""Setup the argparser"""
import argparse
scrapper_types = (
"push",
"gallery",
"instagram",
"kemono",
"comic",
"manga",
"webcomic",
)
# Define types of instagram stories
instagram_types = ["posts", "reels", "channel", "stories", "highlights"]
def argparser(users: list) -> argparse.Namespace:
"""Returns an argparser to evaluate user input"""
# ARG PARSER
parser = argparse.ArgumentParser(
prog="Downloader",
description="Download images and galleries from a wide array of websites"
" either by using links or chosing from user define lists."
" This program also takes care of archiving tasks,"
" that keep the run time fast and prevents downloading duplicates.",
)
# Chose the type of scrapper
parser.add_argument(
choices=scrapper_types,
nargs="?",
dest="scrapper",
help="Select a scrapper.",
)
# Parse user list
parser.add_argument(
"-u",
"--user",
choices=users,
dest="user",
help="Selects the personal user list to process. Defaults to everyone",
default="everyone",
type=str,
)
# Parse individual links
parser.add_argument(
"-i",
"--input",
nargs="*",
dest="link",
action="append",
help="Download the provided links",
type=str,
)
# Set the print list flag
parser.add_argument(
"-l",
"--list",
dest="flag_list",
action="store_true",
help="Prints a list of all the added links and prompts for a choice",
)
# Set the use archiver flag
parser.add_argument(
"-a",
"--no-archive",
dest="flag_archive",
action="store_false",
help="Disables the archiver flag",
)
# Set the skip flag
parser.add_argument(
"-s",
"--no_skip",
dest="flag_skip",
action="store_false",
help="Disables the skip function, downloads the entire gallery",
)
parser.add_argument(
"-v",
"--verbose",
dest="flag_verbose",
action="store_true",
help="Prints the generated commands instead of running them",
)
parser.add_argument(
"-t",
"--type-post",
choices=instagram_types,
nargs="*",
dest="post_type",
help="Filters posts on instagram by type",
default=instagram_types,
type=str,
)
return parser.parse_args()

View File

@@ -0,0 +1,417 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Rewriting of the download manager script
with the intention to make it
more modular with the use of flags
in order to avoid unnecesary modifications
to the cofig files.
Also following in line more posix and python rules.
"""
import re
import time
import logging
import yaml
from functions import run
from functions import quote
from functions import list_lines
from functions import load_config_variables
from argparser import argparser
from gdl_classes import User
# GLOBAL VARIABLE SECTION
# Store the name of the main binaries early in the code
BIN_GALLERY = "gallery-dl"
BIN_YOUTUBE = "yt-dlp"
# SKIP = "3"
CONFIGS = load_config_variables()
LOGGER = logging.getLogger()
HANDLER = logging.StreamHandler()
FORMATTER = logging.Formatter(
"[%(filename)s][%(levelname)s] %(funcName)s '%(message)s'"
)
HANDLER.setFormatter(FORMATTER)
LOGGER.addHandler(HANDLER)
LOGGER.setLevel(logging.INFO)
# Enable a default "everyone" flag for when running stuff like download gallery
USERS = ["everyone"]
for dictionary in CONFIGS["users"]:
USERS.append(dictionary["name"])
ARGS = argparser(USERS)
def get_index(value: str) -> int:
"""Find the index in the config file"""
for i, dic in enumerate(CONFIGS["users"]):
if dic["name"] == value:
LOGGER.debug("%s is %s", dic["name"], i)
return i
return -1
def parse_gallery(gdl_list: str, user: User):
"""Processes the gallery-dl command based on the selected gallery"""
# skip_arg = f" -A {SKIP}" if ARGS.flag_skip else ""
skip_arg = " -o skip=true" if not ARGS.flag_skip else ""
LOGGER.debug(skip_arg)
# Send the list to gallery-dl
download_gallery(
ARGS.flag_archive,
skip_arg,
"",
str(user.sleep),
quote(f"{user.dir_download}"),
quote(f"{user.archive_gallery}"),
quote(gdl_list),
parse_instagram(gdl_list),
)
def parse_instagram(link: str) -> str:
"""Fix instagram links"""
if "instagram" not in link:
return ""
if isinstance(ARGS.post_type, list):
string = f" -o include={quote(','.join(ARGS.post_type))}"
LOGGER.debug(string)
return string
string = f" -o include={quote(ARGS.post_type)}"
LOGGER.debug(string)
return string
def parse_link(link: str) -> str:
"""Fixes links"""
if not re.search(r"(twitter\.com\/\w+(\/)?(?!.*status))", link):
LOGGER.debug("No modifications needed for the link %s", link)
return link
# if url contains /media at the end just write the line
fixed_link = re.sub(r"\/$|\/media(\/?)$", "", link) + "/media"
LOGGER.debug("Processed link %s", fixed_link)
return fixed_link
def download_gallery(
use_archive: bool,
skip_arg: str = "",
link: str = "",
sleep: str = "0",
destination: str = "",
database: str = "",
queue: str = "",
opt_args: str = "",
):
"""Processes the command string to run the gallery archiver"""
command = f"{BIN_GALLERY} --sleep {sleep}"
if skip_arg != "":
command += skip_arg
if destination != "":
command += f" --dest {destination}"
if use_archive:
command += f" --download-archive {database}"
if opt_args != "":
command += opt_args
if link != "" and queue == "":
LOGGER.info("link: %s", quote(link))
command += f" {link}"
if queue != "" and link == "":
LOGGER.info("queue: %s", queue)
command += f" -i {queue}"
LOGGER.debug(command)
run(command, ARGS.flag_verbose)
def download_youtube(
use_archive: bool,
link: str = "",
destination: str = "",
database: str = "",
):
"""Filters and processes the required command to download videos"""
command = BIN_YOUTUBE
if re.search(r"(https:\/\/youtube|https:\/\/www.youtube|https:\/\/youtu.be)", link):
command += f' -o {quote(destination + "/%(title)s.%(ext)s")}'
elif re.search(r"(https:\/\/music.youtube.*)", link):
if use_archive:
command += f" --download-archive {database}"
command += f""" \
--no-playlist --newline -x \
--audio-format best --add-metadata --audio-quality 0 -o \
{quote(destination + '/%(title)s.%(ext)s')} \
"""
elif re.search(r"chaturbate", link):
# Re-runs the program every 30 seconds in case the stream goes private or dc
for i in range(1, 41): # For a 20 minute total
run(
f"""
{BIN_YOUTUBE} \
--hls-use-mpegts --prefer-ffmpeg \
-o {quote(destination + '/%(title)s.%(ext)s')} \
{link}
""",
ARGS.flag_verbose,
)
time.sleep(30)
LOGGER.info("waited for %s minutes", i * 30 / 60)
else: # Any other video link, just do it generic
command += f" -f mp4 -o {quote(destination + '/%(title)s.%(ext)s')}"
LOGGER.info("%s %s", command, link)
run(f"{command} {link}", ARGS.flag_verbose)
def comic_manager(skip_arg: str, category: str):
"""Process the information to download manga"""
re_cat = ""
if category == "manga":
re_cat = "manga|webtoon"
elif category == "comic":
re_cat = "readcomiconline"
with open(CONFIGS["comic"]["list"], encoding="utf-8") as list_comic:
for graphic_novel in [line.rstrip() for line in list_comic]:
# Search for mangas but exclude comics
if not re.search(re_cat, graphic_novel):
LOGGER.debug("%s does not match regex espression", graphic_novel)
continue
download_gallery(
ARGS.flag_archive,
skip_arg,
quote(graphic_novel),
"0",
CONFIGS["comic"]["download-directory"],
CONFIGS["comic"]["archive"],
"",
"",
)
def webcomic_manager():
"""Process the information to download webcomics"""
webcomic_list = CONFIGS["comic"]["webcomic-list"]
with open(webcomic_list, encoding="utf-8") as open_list:
webcomic_file = yaml.safe_load(open_list)
# Create a list of all the available webcomics for the user to chose from
for index, entry in enumerate(webcomic_file["Webcomics"]):
print(list_lines(index, entry["name"]))
# Prompt for a choice
usr_input = int(input("Select your comic: "))
# Determines where the webcomic will be downloaded
rating = webcomic_file["Webcomics"][usr_input]["type"]
webcomic_category = webcomic_file["Global"][f"{rating}_directory"]
LOGGER.debug("The webcomic is %s", webcomic_category)
command = f"""cd {quote(webcomic_category)} && webcomix custom \
{quote(webcomic_file["Webcomics"][usr_input]["name"])} \
--start-url \
{quote(webcomic_file["Webcomics"][usr_input]["url"])} \
--next-page-xpath={quote(webcomic_file["Webcomics"][usr_input]["next_code"])} \
--image-xpath={quote(webcomic_file["Webcomics"][usr_input]["image_code"])} \
-y --cbz"""
LOGGER.debug(command)
run(command, ARGS.flag_verbose)
def push_manager(user: User):
"""Filters out the URL to use the appropiate downloader"""
# Creates an array which will store any links that should use youtube-dl
link_video_cache = []
re_links = re.compile(
r"(twitter\.com\/\w+((?=.*media)|(?!.*status)))"
r"|(men\.wikifeet)"
r"|(furaffinity\.net\/user\/)"
r"|((deviantart\.com\/\w+(?!.*\/art\/)))"
r"|(furaffinity\.net\/gallery\/)"
r"|(furaffinity\.net\/scraps\/)"
r"|(furaffinity\.net\/favorites\/)"
r"|(instagram.com(?!\/p\/)\/\w+)"
r"|(e621\.net((?=\/post\/)|(?!\/posts\/)))"
r"|(flickr\.com\/photos\/\w+\/(?!\d+))"
r"|(tumblr\.com(?!\/post\/))"
r"|(kemono\.party\/(fanbox|gumroad|patreon)(?!\/user\/\d+\/post))"
r"|(blogspot\.com(?!\/))"
r"|(rule34\.paheal\.net\/post\/(?!view))"
r"|(rule34\.xxx\/index\.php\?page\=post&s=(?!view))"
r"|(pixiv\.net\/(en\/)?((?=users)|(?!artwork)))"
r"|(reddit\.com\/(user|u))"
r"|(baraag\.net\/((@\w+)|(?!\/\d+)))"
r"|(pinterest\.com\/(?!pin\/\d+))"
r"|(redgifs\.com\/(users|u|(?!watch)))",
)
with open(user.list_push, encoding="utf-8") as list_push:
for link in [line.rstrip() for line in list_push]:
LOGGER.debug("Processing %s", link)
# Flush the push list, cleans all the contents
with open(user.list_push, "w", encoding="utf-8") as list_push:
list_push.close()
# VIDEOS
if re.search(r"youtu.be|youtube|pornhub|xtube|xvideos|chaturbate", link):
LOGGER.debug("Matched type yt-dlp")
link_video_cache.append(link)
# Search for gallery links, these will be added to a list after downloading
elif re.search(re_links, link):
LOGGER.debug("Matched type gallery-dl")
# skip_arg = f" -A {SKIP}" if ARGS.flag_skip else ""
skip_arg = " -o skip=true" if not ARGS.flag_skip else ""
LOGGER.debug("Skip: %s, link: %s", skip_arg, parse_instagram(link))
download_gallery(
ARGS.flag_archive,
skip_arg,
quote(f"{parse_link(link)}"),
f"{user.sleep}",
quote(f"{user.dir_download}"),
quote(f"{user.archive_gallery}"),
"",
f"{parse_instagram(link)}",
)
# Record the gallery link, so it remains on the watch list
with open(user.list_master, "a", encoding="utf-8") as w_file, open(
user.list_master, "r", encoding="utf-8"
) as r_file:
content = r_file.read().lower()
if parse_link(link).lower() in content:
LOGGER.info("Gallery repeated, not saving")
continue
LOGGER.info("New gallery, saving")
w_file.write(parse_link(str(link)) + "\n")
# Searches for comic/manga links
elif re.search(r"readcomiconline|mangahere|mangadex|webtoons", link):
# Toggle for comic/manga skip flag
if ARGS.flag_skip and re.search(r"readcomiconline", link):
skip_arg = " --chapter-range 1"
elif ARGS.flag_skip and re.search(r"mangahere|webtoons", link):
skip_arg = " --chapter-range 1-5"
else:
skip_arg = ""
LOGGER.debug(skip_arg)
download_gallery(
ARGS.flag_archive,
skip_arg,
quote(link),
"0",
CONFIGS["comic"]["download-directory"],
CONFIGS["comic"]["archive"],
"",
"",
)
# Add comic/manga link to the list
list_gn = CONFIGS["comic"]["list"]
with open(list_gn, "a", encoding="utf-8") as w_file, open(
list_gn, "r", encoding="utf-8"
) as r_file:
content = r_file.read().lower()
if parse_link(link).lower() in content:
LOGGER.info("Graphic novel repeated, not saving")
continue
LOGGER.info("New graphic novel, saving")
w_file.write(link + "\n")
# Download generic links, the -o flag overwrites config file and
# downloads the files into the root destination
else:
LOGGER.info("Other type of download %s", link)
download_gallery(
False,
" -o directory='[]'",
quote(link),
"0",
quote(str(user.dir_push)),
"",
"",
"",
)
# Send the video links to youtube-dl
for link in link_video_cache:
download_youtube(
ARGS.flag_archive,
quote(link),
f"{user.dir_media_download}",
quote(f"{user.archive_media}"),
)
def scrapper_manager(user: User):
# pylint: disable=too-many-branches
"""Analyze the user arguments and call in functions"""
if not ARGS.scrapper: # Check if a scrapper was selected
return
if re.search(r"gallery|instagram|kemono", ARGS.scrapper):
# skip_arg = f" -A {SKIP}" if ARGS.flag_skip else ""
skip_arg = " -o skip=true" if not ARGS.flag_skip else ""
LOGGER.debug(skip_arg)
if ARGS.scrapper == "gallery":
parse_gallery(f"{user.list_main}", user)
elif ARGS.scrapper == "instagram":
parse_gallery(f"{user.list_instagram}", user)
elif ARGS.scrapper == "kemono":
parse_gallery(f"{user.list_kemono}", user)
elif ARGS.scrapper in "push":
push_manager(user)
elif ARGS.scrapper in "comic":
skip_arg = " --chapter-range 1" if ARGS.flag_skip else ""
LOGGER.debug(skip_arg)
comic_manager(skip_arg, "comic")
elif ARGS.scrapper in "manga":
skip_arg = " --chapter-range 1-5" if ARGS.flag_skip else ""
LOGGER.debug(skip_arg)
comic_manager(skip_arg, "manga")
elif ARGS.scrapper in "webcomic":
webcomic_manager()
def main():
"""Main module to decide what to do based on the parsed arguments"""
if ARGS.scrapper:
if (ARGS.user in "everyone") and (
re.search(r"push|gallery|instagram|kemono", ARGS.scrapper)
):
for current_user in CONFIGS["users"]:
user = User(get_index(current_user["name"]))
user.list_manager()
LOGGER.info("Scrapping %s for %s", ARGS.scrapper, current_user["name"])
scrapper_manager(user)
elif re.search(r"comic|manga|webcomic", ARGS.scrapper):
user = User(get_index("jawz"))
user.list_manager()
LOGGER.info("Scrapping %s", ARGS.scrapper)
scrapper_manager(user)
else:
# Create the lists to scrap
user = User(get_index(ARGS.user))
user.list_manager()
scrapper_manager(user)
elif ARGS.link:
LOGGER.debug(ARGS.link)
if re.search(r"everyone|jawz", ARGS.user):
# Create the lists to scrap
user = User(get_index("jawz"))
user.list_manager()
else:
# Create the lists to scrap
user = User(get_index(ARGS.user))
user.list_manager()
for arg_link in ARGS.link[0]:
LOGGER.debug(arg_link)
if ARGS.flag_verbose:
LOGGER.debug(
"%s >> %s", quote(parse_link(arg_link)), quote(user.list_push)
)
else:
with open(user.list_push, "a", encoding="utf-8") as open_file:
open_file.write(parse_link(arg_link) + "\n")
push_manager(user)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,70 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Personal functions to aid on multiple scripts"""
import sys
import fileinput
import re
import os
from pathlib import Path
import yaml
VERBOSE_G = False
def load_config_variables():
"""Loads all the variables from the config file"""
config_file = Path("~/.config/jawz/config.yaml")
with open(config_file.expanduser(), encoding="utf-8") as open_file:
return yaml.safe_load(open_file)
def run(command: str, verbose: bool):
"""Run command in a subprocess"""
# pylint: disable=subprocess-run-check
# This toggle allows for a really wasy debug when using -v
if verbose:
print(command)
else:
os.system(command)
def list_lines(i: int, line: str) -> str:
"""Create a numbered list"""
return f"{i}) {line}"
def quote(line: str) -> str:
"""Quote the line"""
return f'"{line}"'
def sort_txt_file(file_path: Path):
"""Sort every line alphabetically
remove duplicated and empty lines"""
file = str(file_path.resolve())
run(f"sort -u {quote(file)} -o {quote(file)}", VERBOSE_G)
run(f"sed -i '/^$/d' {quote(file)}", VERBOSE_G)
run(f'sed -i -e "s,http:,https:," {quote(file)}', VERBOSE_G)
# fix this using strip on python
# line.strip("/")
run(f'sed -i -e "s,/$,," {quote(file)}', VERBOSE_G) # trailing /
def randomize_txt_file(file_path: Path):
"""Randomize the order of the
lines of the txt file"""
file = str(file_path.resolve())
run(f"sort -R {quote(file)} -o {quote(file)}", VERBOSE_G)
def parse_list(file):
"""Replace http with https and remove trailing /"""
for line in fileinput.input(file, inplace=True):
sys.stdout.write(str(line).replace("http://", "https://"))
with open(file, "r+", encoding="utf-8") as open_file:
f_content = open_file.read()
f_content = re.compile(r"\/$", 0).sub(r"\/$", "")
open_file.seek(0)
open_file.truncate()
print(f_content)
sort_txt_file(file)

View File

@@ -0,0 +1,103 @@
#!/usr/bin/env python3
"""Define the user class to populate and setup the download environment"""
import re
from pathlib import Path
from functions import sort_txt_file, randomize_txt_file, load_config_variables
config_variables = load_config_variables()
class User:
"""Populate the directory for each user"""
# pylint: disable=too-many-instance-attributes
def __init__(self, index):
self.user = config_variables["users"][index]
self.config = config_variables["global"]
self.name = self.user["name"]
self.sleep = self.config["sleep"]
# Directories
self.dir_cache = Path(self.config["cache-directory"]) / self.name
self.dir_log = Path(self.config["log-directory"])
self.dir_archive = Path(self.config["archive-directory"])
self.dir_download = Path(self.user["download-directory"])
self.dir_media_download = Path(self.user["media-directory"])
self.dir_push = Path(self.user["push-directory"])
self.dir_master_list = Path(self.config["list-dir"]) / self.name
# Files
self.archive_gallery = self.dir_archive / f"{self.name}.sqlite3"
self.archive_media = self.dir_archive / f"{self.name}_ytdl.txt"
# Lists
self.list_master = self.dir_master_list / "watch.txt"
self.list_push = self.dir_master_list / "instant.txt"
self.list_instagram = self.dir_cache / "instagram.txt"
self.list_kemono = self.dir_cache / "kemono.txt"
self.list_main = self.dir_cache / "main.txt"
def create_directories(self):
"""Create user directories if they don't exist"""
if self.dir_cache.is_dir():
for file in self.dir_cache.iterdir():
if file.is_file():
file.unlink()
for file in self.dir_cache.iterdir():
if file.is_dir():
file.rmdir()
self.dir_cache.rmdir()
# Create directories
self.dir_cache.mkdir(parents=True, exist_ok=True)
self.dir_log.mkdir(parents=True, exist_ok=True)
self.dir_archive.mkdir(parents=True, exist_ok=True)
self.dir_download.mkdir(parents=True, exist_ok=True)
self.dir_media_download.mkdir(parents=True, exist_ok=True)
self.dir_push.mkdir(parents=True, exist_ok=True)
# Check for the existence of core files
if not Path(self.archive_gallery).is_file():
self.archive_gallery.touch()
if not Path(self.archive_media).is_file():
self.archive_media.touch()
if not self.dir_master_list.is_dir():
print(f"ERROR: Directory for user {self.name} doesn't exist")
if not Path(self.list_master).is_file():
self.list_master.touch()
if not Path(self.list_push).is_file():
self.list_push.touch()
# Create temporary lists
for gdl_list in ("instagram", "kemono", "main"):
Path(self.dir_cache.resolve() / f"{gdl_list}.txt").touch()
def list_manager(self):
"""Manage all the user list and create sub-lists"""
# sort_txt_file(self.list_master)
self.create_directories() # Call the function to create necesary cache dirs
with open(self.list_master, encoding="utf-8") as list_master:
# Create temporary list files segmented per scrapper
for line in [line.rstrip() for line in list_master]:
# WIKIFEET
with open(self.list_main, "a", encoding="utf-8") as list_main, open(
self.list_kemono, "a", encoding="utf-8"
) as list_kemono, open(
self.list_instagram, "a", encoding="utf-8"
) as list_instagram:
if re.search(r"kemono.party", line):
list_kemono.write(line + "\n")
elif re.search(r"instagram", line):
list_instagram.write(line + "\n")
elif re.search(r"wikifeet", line):
continue
# list_main.write(line + "\n")
elif re.search(r"furaffinity", line):
list_main.write(line + "\n")
elif re.search(r"twitter", line):
# if url contains /media at the end just write the line
if re.search(r"\/media$", line):
list_main.write(line + "\n")
else:
# if does not contain /media at the end then add /media
list_main.write(line + "/media" + "\n")
else:
list_main.write(line + "\n")
sort_txt_file(self.list_kemono)
# Try to avoid getting banned by shuffling download order
randomize_txt_file(self.list_instagram)
randomize_txt_file(self.list_main)

View File

@@ -0,0 +1,17 @@
[metadata]
name = download
version = 1.5
[options]
py_modules =
download
functions
argparser
gdl_classes
[options.entry_points]
console_scripts =
download = download:main
# [aliases]
# test = pytest

View File

@@ -0,0 +1,24 @@
from setuptools import setup
setup()
# import os
# from setuptools import find_packages
# from distutils.core import setup
# import setuptools
# # User-friendly description from README.md
# current_directory = os.path.dirname(os.path.abspath(__file__))
# try:
# with open(os.path.join(current_directory, "README.md"), encoding="utf-8") as f:
# long_description = f.read()
# except Exception:
# long_description = ""
# setup(
# name="download",
# # packages=["argparser", "functions"],
# version="1.5.0",
# scripts=["download.py"],
# # entry_points={"console_scripts": ["download = download:main"]},
# )

View File

@@ -0,0 +1,28 @@
{ pkgs ? import <nixpkgs> { } }:
with pkgs;
mkShell {
packages = [
(python3.withPackages (ps:
with ps; [
setuptools
pyyaml
types-pyyaml
# (buildPythonApplication rec {
# pname = "webcomix";
# version = "3.6.6";
# src = fetchPypi {
# inherit pname version;
# sha256 = "sha256-hCnic8Rd81qY1R1XMrSME5ntYTSvZu4/ANp03nCmLKU=";
# };
# doCheck = false;
# propagatedBuildInputs =
# [ click scrapy scrapy-splash scrapy-fake-useragent tqdm ];
# })
]))
];
buildInputs = [
];
}

View File

@@ -0,0 +1,136 @@
#!/usr/bin/env python3
# Imports
import os
import math
# Function for calculating the appropriate bitrate to use during conversion
def get_bitrate(duration, filesize, audio_br):
br = math.floor(filesize / duration - audio_br)
return br, br * 0.50, br * 1.45
def encode(ffmpeg_string, output_name, fs):
os.system(ffmpeg_string)
end_size = (
os.path.getsize(
"/dev/shm/ffmpeg/out/{output_name}".format(output_name=output_name)
)
* 0.00000095367432
)
if end_size < fs:
print(
ffmpeg_string.replace("\t", "")
+ "\nThe FFMPEG string above has yielded a file whose size is "
+ str(end_size)
+ "MB.\n{output_name} is ready for Discord.\n".format(
output_name=output_name
)
)
return False
else:
print(
ffmpeg_string.replace("\t", "")
+ "\nThe FFMPEG string above has yielded a file whose size is "
+ str(end_size)
+ "MB.\n{output_name} is NOT ready for Discord, and will be re-run.\nMy bad.".format(
output_name=output_name
)
)
return True
def time_calculations(fname, length):
startstring = fname[0:2] + ":" + fname[2:4] + ":" + fname[4:6]
endstring = fname[7:9] + ":" + fname[9:11] + ":" + fname[11:13]
try:
int(fname[0:6])
startseconds = (
int(fname[0:2]) * 60 * 60 + int(fname[2:4]) * 60 + int(fname[4:6])
)
try:
int(fname[11:13])
endseconds = (
int(fname[7:9]) * 60 * 60 + int(fname[9:11]) * 60 + int(fname[11:13])
)
duration = endseconds - startseconds
timestamped_section = f"-ss {startstring} -to {endstring}"
except:
duration = length - startseconds
timestamped_section = f"-ss {startstring}"
except:
duration = length
timestamped_section = ""
return duration, timestamped_section
fname = os.listdir("/dev/shm/ffmpeg/in/")[0]
os.rename("/dev/shm/ffmpeg/in/" + fname, "/dev/shm/ffmpeg/in/" + fname.replace(" ", ""))
fname = fname.replace(" ", "")
# ffprobe to calculate the total duration of the clip.
length = math.floor(
float(
os.popen(
"ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 /dev/shm/ffmpeg/in/{fname}".format(
fname=fname
)
).read()
)
)
duration, timestamped_section = time_calculations(fname, length)
run = True
reso = os.getenv("reso")
codec = os.getenv("codec")
audio_br = os.getenv("audio_br")
audio_br = int(str(os.getenv("audio_br")))
fs = float(str(os.getenv("fs")))
target_fs = fs
codecs = {
"vp9": {
"pass1": f"-vf scale={reso} -g 240 -threads 8 -speed 4 -row-mt 1 -tile-columns 2 -vsync cfr -c:v libvpx-vp9 -pass 1 -an",
"pass2": f"-vf scale={reso} -g 240 -threads 8 -speed 2 -row-mt 1 -tile-columns 2 -c:v libvpx-vp9 -c:a libopus -pass 2",
"output_name": "small_" + fname.replace(".mp4", ".webm"),
},
"x264": {
"pass1": f"-vf scale={reso} -vsync cfr -c:v libx264 -pass 1 -an",
"pass2": f"-vf scale={reso} -c:v libx264 -c:a aac -pass 2 ",
"output_name": "small_" + fname,
},
"x265": {
"pass1": f"-vf scale={reso} -c:v libx265 -vsync cfr -x265-params pass=1 -an",
"pass2": f"-vf scale={reso} -c:v libx265 -x265-params pass=2 -c:a aac",
"output_name": "small_" + fname,
},
}
while run:
# Conversion to KiB
end_fs = fs * 8192
br, minbr, maxbr = get_bitrate(
duration=duration, filesize=end_fs, audio_br=audio_br
)
ffmpeg_string = f"""
ffpb {timestamped_section} -hwaccel cuda -i /dev/shm/ffmpeg/in/{fname} -y \
{codecs[str(codec)]['pass1']} \
-b:v {br}k -minrate {minbr}k -maxrate {maxbr}k \
-f null /dev/null && \
ffpb {timestamped_section} -hwaccel cuda -i /dev/shm/ffmpeg/in/{fname} \
{codecs[str(codec)]['pass2']} \
-b:a {audio_br}k -b:v {br}k -minrate {minbr}k -maxrate {maxbr}k \
/dev/shm/ffmpeg/out/{codecs[str(codec)]['output_name']} -y
"""
run = encode(
ffmpeg_string, output_name=codecs[str(codec)]["output_name"], fs=target_fs
)
if run:
fs = fs - 0.2

98
workstation/scripts/ffmpreg.sh Executable file
View File

@@ -0,0 +1,98 @@
#! /usr/bin/env nix-shell
#! nix-shell -i bash -p bash gum trashy fd ripgrep mediainfo
replace_extension() {
local file_basename
file_basename=$(basename "$1")
echo "${file_basename%.*}.$2"
}
convert_gif() {
file_newname=$(replace_extension "$1" gif)
ffpb -i "$(realpath "$1")" -vf fps=12,scale=480:-1,smartblur=ls=-0.5 "$file_newname"
}
convert_mp4() {
local file_newname
file_newname=$(replace_extension "$1" mp4)
local file_tempdest=/dev/shm/$file_newname
local file_destination
file_destination=$(dirname "$(realpath "$1")")/$file_newname
ffpb -i "$1" \
-c:v libx265 \
"$file_tempdest"
trash "$1"
mv -i "$file_tempdest" "$file_destination"
}
convert_discord() {
local file_newname
file_newname=$2_$(replace_extension "$1" mp4)
local dir_ram=/dev/shm/ffmpeg
mkdir -p $dir_ram/{in,out}
ffpb -hwaccel cuda -i "$(realpath "$1")" \
-c:v h264_nvenc \
"$dir_ram"/in/discord.mp4
cd "$dir_ram" || exit
codec=x264 audio_br=$3 fs=$4 reso=$5 ffmpeg4discord
mv "$dir_ram"/out/small_discord.mp4 ~/"$file_newname"
command rm -rf "$dir_ram"
}
operation=$(gum choose mp4 discord nitro gif enc265)
case $operation in
1 | mp4)
to_convert=()
while IFS= read -r file; do
to_convert+=("$file")
done < <(fd . "$(pwd)" -tf -aL | fzf --multi -i)
for file in "${to_convert[@]}"; do
convert_mp4 "$file"
done
;;
2 | discord)
to_convert=()
while IFS= read -r file; do
to_convert+=("$file")
done < <(fd . "$(pwd)" -tf -aL | fzf --multi -i)
for file in "${to_convert[@]}"; do
convert_discord "$file" discord 96 8.0 "1280x720"
done
;;
3 | nitro)
to_convert=()
while IFS= read -r file; do
to_convert+=("$file")
done < <(fd . "$(pwd)" -tf -aL | fzf --multi -i)
for file in "${to_convert[@]}"; do
convert_discord "$file" nitro 128 50.0 "1920x1080"
done
;;
4 | gif)
to_convert=()
while IFS= read -r file; do
to_convert+=("$file")
done < <(fd . "$(pwd)" -tf -aL | fzf --multi -i)
for file in "${to_convert[@]}"; do
convert_gif "$file"
done
;;
5 | enc265)
to_convert=()
extensions=(flv m4v mpg avi mov ts mkv mp4 webm)
for ext in "${extensions[@]}"; do
while IFS= read -r file; do
if ! (mediainfo "$file" | grep Writing\ library | grep -q x265); then
to_convert+=("$file")
fi
done < <(fd . -e "$ext" -tf -aL)
done
for file in "${to_convert[@]}"; do
convert_mp4 "$file"
done
;;
*)
echo -n "Please select a valid input"
;;
esac

View File

@@ -0,0 +1,153 @@
#! /usr/bin/env nix-shell
#! nix-shell -i bash -p bash gum fd ripgrep exa trashy zip unzip
root_directories=(
~/Multimedia/Library/Comics
~/Multimedia/Library/Manga
~/Multimedia/Library/Webtoons
)
newname() {
echo "$1" | sed -E "s/$2/$3/g"
}
separator() {
gum style --foreground 7 _________________________
}
announce_changes() {
echo "Renaming:"
gum style --foreground 1 "$1"
echo "Into:"
gum style --foreground 2 "$2"
separator
}
rename_file() {
while IFS= read -r file; do
local original_name
original_name=$(basename "$file")
local new_name
new_name=$(newname "$(basename "$file")" "$2" "$3")
announce_changes "$original_name" "$new_name"
command mv -n "$(dirname "$file")"/{"$original_name","$new_name"}
done < <(fd "$1" --absolute-path -tf -s "${root_directories[@]}")
}
rename_directory() {
while IFS= read -r dir; do
local new_name
new_name=$(newname "$(basename "$dir")" "$2" "$3")
local new_dir
new_dir=$(dirname "$dir")/$new_name
announce_changes "$dir" "$new_dir"
echo "Processing..."
if [ ! -d "$new_dir" ]; then
echo "$(basename "$new_dir") doesn't exist. Creating it."
command mkdir -p "$new_dir"
fi
if [ -d "$new_dir" ]; then
echo "$(basename "$new_dir") has been created!, moving the following files:"
exa "$dir"
fd . "$dir" -x mv -n {} "$(realpath "$new_dir")"
fi
separator
done < <(fd "$1" --absolute-path -td -s "${root_directories[@]}")
}
# Capitalize Special words
words=(special tpb full annual)
Words=(Special TPB Full Annual)
counter=0
for word in "${words[@]}"; do
while IFS= read -r file; do
new_name=$(newname "$(basename "$file")" "$word" "${Words[$counter]}")
echo "Inproper capitalization of the word"
gum style --foreground 1 "$word"
echo "adjusting it into"
gum style --foreground 2 "${Words[$counter]}"
announce_changes "$(basename "$file")" "$new_name"
command mv -n "$(dirname "$file")"/{"$(basename "$file")","$new_name"}
done < <(fd "$word" --absolute-path -tf -s "${root_directories[@]}")
counter=$((counter + 1))
done
# Rename Year files
# set regex_year_grep "\([[:digit:]]{4}\)"
# set regex_year_string "(\()(\d{4})(\))"
# rename_directory $regex_year_grep $regex_year_string \$2
# rename_file $regex_year_grep $regex_year_string \$2
# Rename #_ downloads
regex_hashtag="#_"
rename_directory $regex_hashtag $regex_hashtag "#"
rename_file $regex_hashtag $regex_hashtag "#"
rename_keywords() {
# Followed by digit
local regex_digit_fd="$1 \d+"
local regex_digit="($1 )([[:digit:]]+)"
rename_directory "$regex_digit_fd" "$regex_digit" "\1#\2"
rename_file "$regex_digit_fd" "$regex_digit" "\1#\2"
# Without digit
regex="#$1"
rename_directory "$regex" "$regex" "$1"
rename_file "$regex" "$regex" "$1"
}
rename_keywords TPB
rename_keywords Special
rename_keywords Annual
# Rename #Full
rename_directory " #Full" " #Full" ""
rename_file " #Full" " #Full" ""
# Rename double space
rename_directory " " " " " "
rename_file " " " " " "
# Fix names
wrongnames=(
"Dr. Stone"
i-dont-want-this-kind-of-hero
pure-of-heart
scoob-and-shag
stick-n-poke
"Houseki no Kuni"
"Gantz E"
"Gantz G"
)
rightname=(
"Dr. STONE"
"I DON'T WANT THIS KIND OF HERO"
"Pure of Heart"
"Scoob and Shag"
"Stick n' Poke"
"Land of the Lustrous"
"Gatz:E"
"Gantz:G"
)
counter=0
for wrongname in "${wrongnames[@]}"; do
rename_directory "$wrongname" "$wrongname" "${rightname[$counter]}"
rename_file "$wrongname" "$wrongname" "${rightname[$counter]}"
counter=$((counter + 1))
done
# Merge TPB (Part X) files
while IFS= read -r file; do
new_name=$(newname "$(basename "$file" .cbz)" "TPB \(Part [[:digit:]]+\)" TPB)
extract_dir=$(realpath "$(dirname "$file")"/"$new_name")
if [ ! -d "$extract_dir" ]; then
mkdir -p "$extract_dir"
fi
unzip "$file" -d "$extract_dir"/"$(basename "$file" .cbz)"
cd "$extract_dir" || exit
zip -r "$(realpath "$(dirname "$file")")"/"$new_name"\.cbz ./
trash "$file"
trash "$extract_dir"/"$(basename "$file" .cbz)"
done < <(fd "Part \d+" --absolute-path -tf -s "${root_directories[@]}")
fd . --absolute-path -td -te "${root_directories[@]}" -x trash {}

View File

@@ -0,0 +1,59 @@
#!/run/current-system/sw/bin/bash
# Cron tasks
if type /run/current-system/sw/bin/nextcloud-occ 2>/dev/null; then
/run/current-system/sw/bin/nextcloud-occ preview:pre-generate
/run/current-system/sw/bin/nextcloud-occ face:background_job -t 900
fi
# Sync GDL stuff
root=/mnt/disk2/scrapping
cd $root || exit
set -- Aqp Ghekre
for user in "$@"; do
originDir=$root/$user
destDir=/mnt/disk1/nextcloud/$user/files/Requested
destDirDup=/mnt/disk1/nextcloud/$user/files/RequestedDupePlzCheckNDel
if [ ! -d "$destDir" ]; then
echo "$destDir does not exist, creating..."
mkdir -p "$destDir"
fi
cd "$originDir" || exit
find . -type f | while read -r file; do
destination=$destDir/"$(echo "$file" | sed "s/^\.\///")"
destinationDup=$destDirDup/"$(echo "$file" | sed "s/^\.\///")"
if [ ! -f "$destination" ]; then
echo "Safe to move $(basename "$file")"
if [ ! -d "$(dirname "$destination")" ]; then
echo "Creating parent directory..."
mkdir -p "$(dirname "$destination")"
fi
mv -n "$file" "$destination"
else
echo "Duplicated encountered $(basename "$file")"
if [ ! -d "$(dirname "$destinationDup")" ]; then
echo "Creating parent directory..."
mkdir -p "$(dirname "$destinationDup")"
fi
mv -n "$file" "$destinationDup"
fi
done
find ./ -mindepth 1 -type d -empty -delete
chown 990:990 -R "$destDir"
find "$destDir" -type d -exec chmod -R 755 {} \;
find "$destDir" -type f -exec chmod -R 644 {} \;
if [ -d "$destDirDup" ]; then
chown 990:990 -R "$destDirDup"
find "$destDirDup" -type d -exec chmod -R 755 {} \;
find "$destDirDup" -type f -exec chmod -R 644 {} \;
fi
if type /run/current-system/sw/bin/nextcloud-occ 2>/dev/null; then
/run/current-system/sw/bin/nextcloud-occ files:scan --all
fi
done

View File

@@ -0,0 +1,51 @@
#! /usr/bin/env nix-shell
#! nix-shell -i bash -p bash fd borgbackup gum ripgrep
BORG_PASSPHRASE=$(gum input --password --placeholder "Type borg password")
export BORG_PASSPHRASE
d_root=$HOME/pika
f_string=home/jawz/.config/jawz/lists/jawz/watch.txt
d_borg=/mnt/disk1/backups/pika/lists
while IFS= read -r repo; do
IFS=" " read -r -a array <<<"$repo"
repo_id="${array[0]}"
mkdir -vp "$d_root/$repo_id" && cd "$d_root/$repo_id" || exit
borg extract $d_borg::"$repo_id" $f_string
cat "$d_root/$repo_id/$f_string" >>"$d_root/master"
done < <(borg list "$d_borg")
cd "$HOME" || exit
sort -u "$d_root/master" -o "$d_root/sorted"
sort -u "$LW" -o "$LW"
echo "Current $(wc -l <"$LW") archived $(wc -l <"$d_root/sorted")"
echo "Missing lines:"
diff "$d_root/sorted" "$LW"
# look for duped lines with different casing
echo "Duplicated lines:"
while IFS= read -r line; do
if ! [ "$line" == "${line,,}" ]; then
if rg "${line,,}" <"$LW"; then
echo "$line"
fi
fi
done <"$LW"
# delete pika backups
if gum confirm "Limpiar pika?"; then
command rm -rf "$d_root"
while IFS= read -r repo; do
IFS=" " read -r -a array <<<"$repo"
repo_id="${array[0]}"
gum spin --spinner dot --title "Cleaning $repo_id..." -- borg delete $d_borg::"$repo_id"
done < <(borg list "$d_borg")
else
echo "Canceled, no files deleted"
fi
gum spin --spinner dot --title "Cleaning $repo_id..." -- borg compact "$d_borg"
gum spin --spinner dot --title "Cleaning $repo_id..." -- borg compact /mnt/disk1/backups/pika/home

View File

@@ -0,0 +1,48 @@
#! /usr/bin/env nix-shell
#! nix-shell -i bash -p bash gnome.zenity rmlint git gum xclip
if [ -n "$1" ]; then
operation=$1
else
operation=$(gum choose rmlint_1 rmlint_2 download git)
fi
case $operation in
# onlyfans)
# source ~/Development/Python/onlyfans/bin/activate.fish
# python ~/Development/Git/OnlyFans/start_ofd.py
# deactivate
rmlint_1)
rmlint -g --types="duplicates" \
--config=sh:handler=clone \
/mnt/disk1/personal
;;
rmlint_2)
rmlint -g --types="duplicates" \
--config=sh:handler=clone \
/mnt/disk2/{glue,home,personal,scrapping}
;;
download)
ENTRY=$(zenity --entry --width=250 --title "Push Manager" \
--text="Verify the following entry is correct" \
--add-entry="Clipboard:" --entry-text "$(xclip -o -sel clip)")
if [ -n "$ENTRY" ]; then
kgx -e "download -u jawz -i '$ENTRY'"
else
zenity --error --width=250 \
--text "Please verify and try again"
fi
;;
git)
git_dir=$HOME/Development/Git
while IFS= read -r repo; do
if ! [ -d "$repo/.git" ]; then
continue
fi
cd "$repo" || exit
gum style --foreground 2 "Updating $(basename "$repo")"
git fsck --full
git pull
done < <(fd . "$git_dir" -td --absolute-path -d 1)
;;
esac

View File

@@ -0,0 +1,28 @@
#! /usr/bin/env nix-shell
#! nix-shell -i bash -p bash fd
before_count=$(fd -tf | wc -l)
i=0
for file in $(fd -d1 -tf -E '*.mp4'); do
dir_name=$(basename "$(pwd)")_$(printf %03d $((i / $1 + 1)))
mkdir -p "$dir_name"
mv -i "$file" "$(realpath "$dir_name")"/
i=$((i + 1))
done
for file in $(fd -d1 -tf -e mp4); do
mkdir -p videos
mv -i "$file" "$(realpath videos)"/
done
after_count=$(fd -tf | wc -l)
if [[ "$before_count" == "$after_count" ]]; then
echo "No file count differences"
else
echo "Before count: $before_count"
echo "After count: $after_count"
fi
sleep 10
exit

140
workstation/scripts/tasks.sh Executable file
View File

@@ -0,0 +1,140 @@
#! /usr/bin/env nix-shell
#! nix-shell -i bash -p bash trashy fd ripgrep file
directories=("$HOME/Pictures/To Organize/" "$HOME/Downloads/")
replace_extension() {
local file_basename
file_basename=$(basename "$1")
echo "${file_basename%.*}.$2"
}
generate_random_number() {
local min=0
local max=9999999999
printf "%010d\n" $((min + RANDOM % max))
}
test_name() {
local random_number
random_number=$(generate_random_number)
while (($(fd "$random_number"* "$HOME/Pictures/" "$HOME/Downloads/" -tf | wc -l) > 0)); do
echo "Conflicts found, generating a new filename"
random_number=$(generate_random_number)
echo "$random_number"
done
echo "$random_number"
}
while IFS= read -r file; do
regex_str='source|tenor|media|duckduckgo\.com|giphy|'
regex_str+='(?<!app)image|^download|unknown|zoom|'
regex_str+='new_canvas|untitled|drawpile'
if basename "$file" | rg --pcre2 -q "$regex_str"; then
new_name=$(test_name)
echo renaming
echo "$file"
echo into
echo "$(dirname "$file")"/"$new_name"
echo ---------------
command mv -n "$(dirname "$file")"/{"$(basename "$file")","$new_name"}
fi
if basename "$file" | rg -q 'Screenshot_\d{8}'; then
echo "moving screenshot $file into $HOME/Pictures/Screenshots/"
command mv -n "$file" "$HOME/Pictures/Screenshots/"
fi
done < <(fd . "${directories[@]}" -d 1 -tf --absolute-path)
screenshots=$HOME/Pictures/Screenshots
if (($(fd . "$screenshots" -tf -d 1 | wc -l) > 0)); then
while IFS= read -r file; do
date=$(stat -c "%y" "$file" | rg -o "\d{4}-\d{2}-\d{2}")
year=$(echo "$date" | rg -o "\d{4}")
month=$(echo "$date" | rg -o "\d{4}-\d{2}" | rg -o --pcre2 "(?<=-)\d{2}")
dest_dir=$(realpath "$screenshots/$year/$month")
echo "Moving screenshot $(basename "$file") into $dest_dir"
mkdir -vp "$dest_dir"
command mv -n "$file" "$dest_dir/"
done < <(fd . "$screenshots" --absolute-path -tf -d 1)
fi
# Where steam screenshots are stored, may need to replace with ur ID
dir_steam=$XDG_DATA_HOME/Steam/userdata/107446271/760/remote
declare -A games
# Insert here new games, put between [] the ID of the game
# You can find it by visiting the $dir_steam directory
# the ID is simply the name of the folder in there.
games+=(
[386360]=Smite
[960090]="Bloons Tower Defense 6"
[648800]=Raft
[262060]="Darkest Dungeon"
[234140]="Mad Max"
[433340]="Slime Rancher"
)
for key in "${!games[@]}"; do
# Modify this to store your screenshots somewhere else
dir_dest=$(realpath "$HOME/Pictures/Screenshots/Games")/${games[$key]}
dir_game=$(realpath "$dir_steam")/$key/screenshots
# If there are not screenshots currently stored, why bother lol
if ! [[ -d $dir_game ]]; then #
continue
fi
# If screenshots exist however...
if (($(fd . "$dir_game" -d 1 -tf | wc -l) > 0)); then
# Create destination directory
mkdir -vp "$dir_dest"
echo "Moving ${games[$key]} screenshots..."
fd . "$dir_game" -d 1 -tf -x mv -n {} "$dir_dest"/
# Delete thumnnails
echo "Deleting ${games[$key]} thumbnails..."
rm -rf "$dir_game"/thumbnails
fi
done
# Clearing up empty directories
fd . "$dir_steam" -td -te -x trash {}
cyberpunk_dir=$HOME/Games/cyberpunk-2077/drive_c/users/jawz/Pictures/"Cyberpunk 2077"
if [[ -d $cyberpunk_dir ]]; then
while IFS= read -r file; do
echo "Moving cyberpunk screenshots"
command mv -n "$file" "$HOME/Pictures/Screenshots/Games/Cyberpunk 2077/"
done < <(fd . "$cyberpunk_dir" -tf)
fi
proton_dir=$HOME/.steam/steam/compatibilitytools.d
if [[ -d "$proton_dir" ]]; then
while IFS= read -r protonver; do
lutrisdir=$XDG_DATA_HOME/lutris/runners/wine/$(basename "$protonver")
if ! [ -d "$lutrisdir" ] && ! [ -L "$lutrisdir" ]; then
echo "Symlink $lutrisdir doesn't exist, creating link..."
ln -s "$(realpath "$protonver")"/files "$lutrisdir"
fi
done < <(fd . "$proton_dir" -d 1 -td)
fi
fd . "$XDG_DATA_HOME/lutris/runners/wine" -d 1 -tl -x trash {}
while IFS= read -r file; do
ext=$(file --mime-type "$file" | rg -o '\w+$')
correct_ext=${ext,,}
filename=$(basename -- "$file")
current_ext="${filename##*.}"
filename="${filename%.*}"
if echo "$correct_ext" | rg -q 'jpe|jpg|jpeg|png|gif'; then
if [ "$current_ext" != "$correct_ext" ]; then
echo "The file $(basename "$file")" \
"will be renamed, the propper extension is $correct_ext"
new_name="$filename".$correct_ext
command mv -n "$(dirname "$file")"/{"$(basename "$file")","$new_name"}
fi
fi
done < <(fd . "${directories[@]}" -d 1 -tf)
files_home_clean=(.pki HuionCore.pid DriverUI.pid huion.log)
for file in "${files_home_clean[@]}"; do
file=$HOME/$file
if [ -e "$file" ]; then
rm -rf "$file"
fi
done

View File

@@ -0,0 +1,38 @@
#!/usr/bin/env nix-shell
#! nix-shell -i bash -p bash curl jq dig
# Shell script to update namecheap.com dynamic dns
# for a domain to your external IP address
# namecheap
hostnames=(cloud @)
domain=rotehaare.art
password=60d672be5d9d4828a0f96264babe0ac1
ip=$(curl -s ipecho.net/plain)
for hostname in "${hostnames[@]}"; do
curl "https://dynamicdns.park-your-domain.com/update?host=$hostname&domain=$domain&password=$password&ip=$ip"
done
# cloudflare
zone_id=833996ed25eb09f1a50606e0457790e4
record=servidos.lat
record_id=6b117173e53a7511ba36ceb9637ede63
cloudflare_token=VdKosfThQmOcuywLOUq9DY4-df9EmbHrDWyf_vUb
# get record_id
# curl -s -X GET "https://api.cloudflare.com/client/v4/zones/${zone_id}/dns_records?type=A&name=${record}" \
# -H "Authorization: Bearer ${cloudflare_token}" \
# -H "Content-Type: application/json" | jq -r '{"result"}[] | .[0] | .id'
curr_ip=$(curl -s -X GET https://checkip.amazonaws.com)
curr_reg=$(dig ${record} +short @1.1.1.1)
if echo "${curr_reg}" | grep "${curr_ip}"; then
echo "$(date --rfc-3339=seconds) - OK - Current record matches current IP (${curr_ip})"
else
curl -s -X PUT "https://api.cloudflare.com/client/v4/zones/${zone_id}/dns_records/${record_id}" \
-H "Authorization: Bearer ${cloudflare_token}" \
-H "Content-Type: application/json" \
--data "{\"type\":\"A\",\"name\":\"${record}\",\"content\":\"$curr_ip\",\"ttl\":1,\"proxied\":false}" >/dev/null
echo "$(date --rfc-3339=seconds) - NOK - Record Updated to $curr_ip from ${curr_reg}"
fi

View File

255
workstation/servers.nix Normal file
View File

@@ -0,0 +1,255 @@
{ config, lib, pkgs, modulesPath, ... }:
let
localhost = "127.0.0.1";
postgresPort = toString (config.services.postgresql.port);
unstable = import
(builtins.fetchTarball "https://github.com/nixos/nixpkgs/tarball/master") {
config = config.nixpkgs.config;
};
in {
imports = [ ./nginx.nix ];
nixpkgs.config = {
permittedInsecurePackages = [ "nodejs-14.21.3" "openssl-1.1.1v" ];
};
users.groups = { piracy.gid = 985; };
users.users = let base = { isSystemUser = true; };
in {
prowlarr = base // { group = "piracy"; };
paperless = base // { };
nextcloud = base // {
extraGroups = [ "render" ];
packages = (with pkgs; [
nodejs_14
perl
(perlPackages.buildPerlPackage rec {
pname = "Image-ExifTool";
version = "12.60";
src = fetchurl {
url = "https://exiftool.org/Image-ExifTool-${version}.tar.gz";
hash = "sha256-c9vgbQBMMQgqVueNfyRvK7AAL7sYNUR7wyorB289Mq0=";
};
})
]);
};
};
services = let
base = {
enable = true;
group = "piracy";
};
in {
sonarr = base // { package = unstable.pkgs.sonarr; };
radarr = base // { package = unstable.pkgs.radarr; };
bazarr = base // { };
jellyfin = base // { };
prowlarr.enable = true;
paperless = {
enable = true;
consumptionDirIsPublic = true;
extraConfig = {
PAPERLESS_DBENGINE = "postgress";
PAPERLESS_DBHOST = "${localhost}";
PAPERLESS_DBNAME = "paperless";
PAPERLESS_DBUSER = "paperless";
PAPERLESS_DBPASS = "sopacerias";
PAPERLESS_DBPORT = "${postgresPort}";
PAPERLESS_CONSUMER_IGNORE_PATTERN =
builtins.toJSON [ ".DS_STORE/*" "desktop.ini" ];
PAPERLESS_TIME_ZONE = "America/Mexico_City";
PAPERLESS_OCR_USER_ARGS = builtins.toJSON {
optimize = 1;
pdfa_image_compression = "lossless";
};
};
};
vaultwarden = {
enable = true;
dbBackend = "postgresql";
package = unstable.pkgs.vaultwarden;
config = {
ROCKET_ADDRESS = "${localhost}";
ROCKET_PORT = 8222;
WEBSOCKET_PORT = 8333;
ADMIN_TOKEN =
"x9BLqz2QmnU5RmrMLt2kPpoPBTNPZxNFw/b8XrPgpQML2/01+MYENl87dmhDX+Jm";
DATABASE_URL =
"postgresql://vaultwarden:sopacerias@${localhost}:${postgresPort}/vaultwarden";
ENABLE_DB_WAL = false;
WEBSOCKET_ENABLED = true;
SHOW_PASSWORD_HINT = false;
SIGNUPS_ALLOWED = false;
EXTENDED_LOGGING = true;
LOG_LEVEL = "warn";
};
};
kavita = {
enable = true;
tokenKeyFile = "${pkgs.writeText "kavitaToken"
"Au002BRkRxBjlQrmWSuXWTGUcpXZjzMo2nJ0Z4g4OZ1S4c2zp6oaesGUXzKp2mhvOwjju002BNoURG3CRIE2qnGybvOgAlDxAZCPBzSNRcx6RJ1lFRgvI8wQR6Nd5ivYX0RMo4S8yOH8XIDhzN6vNo31rCjyv2IycX0JqiJPIovfbvXn9Y="}";
};
nextcloud = {
enable = true;
https = true;
package = pkgs.nextcloud27;
appstoreEnable = true;
configureRedis = true;
extraAppsEnable = true;
enableImagemagick = true;
maxUploadSize = "512M";
hostName = "cloud.servidos.lat";
config = {
adminpassFile = "${pkgs.writeText "adminpass"
"Overlying-Hatchback-Charting-Encounter-Deface-Gallantly7"}";
overwriteProtocol = "https";
defaultPhoneRegion = "MX";
dbtype = "pgsql";
dbuser = "nextcloud";
dbpassFile = "${pkgs.writeText "dbpass" "sopacerias"}";
dbtableprefix = "oc_";
dbname = "nextcloud";
trustedProxies = [ "nginx" ];
extraTrustedDomains = [ "cloud.rotehaare.art" "danilo-reyes.com" ];
};
phpOptions = {
catch_workers_output = "yes";
display_errors = "stderr";
error_reporting = "E_ALL & ~E_DEPRECATED & ~E_STRICT";
expose_php = "Off";
"opcache.enable_cli" = "1";
"opcache.fast_shutdown" = "1";
"opcache.interned_strings_buffer" = "16";
"opcache.jit" = "1255";
"opcache.jit_buffer_size" = "128M";
"opcache.max_accelerated_files" = "10000";
"opcache.memory_consumption" = "128";
"opcache.revalidate_freq" = "1";
"opcache.save_comments" = "1";
"opcache.validate_timestamps" = "0";
"openssl.cafile" = "/etc/ssl/certs/ca-certificates.crt";
short_open_tag = "Off";
};
extraOptions = {
mail_smtpmode = "sendmail";
mail_sendmailmode = "pipe";
"installed" = true;
"memories.exiftool" = "/etc/profiles/per-user/nextcloud/bin/exiftool";
enabledPreviewProviders = [
"OC\\Preview\\Image"
"OC\\Preview\\HEIC"
"OC\\Preview\\TIFF"
"OC\\Preview\\MKV"
"OC\\Preview\\MP4"
"OC\\Preview\\AVI"
"OC\\Preview\\Movie"
];
};
phpExtraExtensions = all: [ all.pdlib all.bz2 ];
};
postgresql = {
enable = true;
ensureDatabases = [ "paperless" "nextcloud" "mealie" "vaultwarden" ];
ensureUsers = [
{
name = "nextcloud";
ensurePermissions = { "DATABASE nextcloud" = "ALL PRIVILEGES"; };
}
{
name = "paperless";
ensurePermissions = { "DATABASE paperless" = "ALL PRIVILEGES"; };
}
{
name = "mealie";
ensurePermissions = { "DATABASE mealie" = "ALL PRIVILEGES"; };
}
{
name = "vaultwarden";
ensurePermissions = { "DATABASE vaultwarden" = "ALL PRIVILEGES"; };
}
];
authentication = pkgs.lib.mkOverride 10 ''
local all all trust
host all all ${localhost}/32 trust
host all all ::1/128 trust
'';
};
};
environment.systemPackages = with pkgs; [ docker-compose ];
virtualisation.docker = {
enable = true;
storageDriver = "btrfs";
};
systemd = {
services = {
docker-compose = {
enable = true;
restartIfChanged = true;
description = "Start docker-compose servers";
after = [ "docker.service" "docker.socket" ];
requires = [ "docker.service" "docker.socket" ];
wantedBy = [ "default.target" ];
environment = {
FILE = "/home/jawz/Development/Docker/docker-compose.yml";
};
path = [ pkgs.docker-compose ];
serviceConfig = {
Restart = "on-failure";
RestartSec = 30;
ExecStart =
"${pkgs.docker-compose}/bin/docker-compose -f \${FILE} up --remove-orphans";
ExecStop =
"${pkgs.docker-compose}/bin/docker-compose -f \${FILE} down";
};
};
nextcloud-cronjob = let
jawzNextcloudCronjob = pkgs.writeScriptBin "nextcloud-cronjob"
(builtins.readFile ./scripts/nextcloud-cronjob.sh);
in {
description = "Runs various nextcloud-related cronjobs";
wantedBy = [ "default.target" ];
path = [ pkgs.bash jawzNextcloudCronjob ];
serviceConfig = {
Restart = "on-failure";
RestartSec = 30;
# ${config.services.nextcloud.package}
ExecStart = "${jawzNextcloudCronjob}/bin/nextcloud-cronjob";
};
};
};
timers = {
nextcloud-cronjob = {
enable = true;
description = "Runs various nextcloud-related cronjobs";
wantedBy = [ "timers.target" ];
timerConfig = { OnCalendar = "*:0/10"; };
};
};
user.services = {
update-dns = let
jawzUpdateDns = pkgs.writeScriptBin "update-dns"
(builtins.readFile ./scripts/update-dns.sh);
in {
restartIfChanged = true;
description = "update DNS of my websites";
wantedBy = [ "default.target" ];
path = [ pkgs.bash pkgs.nix jawzUpdateDns ];
serviceConfig = {
Restart = "on-failure";
RestartSec = 30;
ExecStart = "${jawzUpdateDns}/bin/update-dns";
};
};
};
user.timers = {
update-dns = {
enable = true;
description = "update DNS of my websites";
wantedBy = [ "timers.target" ];
timerConfig = {
OnBootSec = "1min";
OnUnitActiveSec = "6h";
};
};
};
};
}