more segmentation changes battlestation
askdjs
This commit is contained in:
parent
47acc81969
commit
c4306417a6
@ -1,21 +0,0 @@
|
|||||||
#!/usr/bin/env nix-shell
|
|
||||||
#! nix-shell -i bash -p bash yt-dlp
|
|
||||||
|
|
||||||
minutes=10
|
|
||||||
time_alive=60
|
|
||||||
sleep_time=$((minutes * 60))
|
|
||||||
loops=$((time_alive / (sleep_time / time_alive)))
|
|
||||||
url="https://chaturbate.com/$1"
|
|
||||||
|
|
||||||
save_dir=/mnt/disk2/glue/Tuhmayto
|
|
||||||
if [ ! -d "$save_dir" ]; then
|
|
||||||
mkdir -p "$save_dir"
|
|
||||||
fi
|
|
||||||
cd $save_dir || exit
|
|
||||||
|
|
||||||
for i in $(seq 1 1 "$loops"); do
|
|
||||||
waiting_time=$(((i * sleep_time) / time_alive))
|
|
||||||
yt-dlp --hls-use-mpegts --prefer-ffmpeg -o '%(title)s.%(ext)s' "$url"
|
|
||||||
echo "sleeping for $sleep_time seconds… been waiting for $waiting_time minutes"
|
|
||||||
sleep $sleep_time
|
|
||||||
done
|
|
||||||
@ -1 +0,0 @@
|
|||||||
CONFIG_FILE = "/home/jawz/.config/jawz/config.yaml"
|
|
||||||
@ -1 +0,0 @@
|
|||||||
use nix
|
|
||||||
@ -1,96 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""Setup the argparser"""
|
|
||||||
import argparse
|
|
||||||
|
|
||||||
scrapper_types = (
|
|
||||||
"push",
|
|
||||||
"gallery",
|
|
||||||
"instagram",
|
|
||||||
"kemono",
|
|
||||||
"comic",
|
|
||||||
"manga",
|
|
||||||
"webcomic",
|
|
||||||
)
|
|
||||||
# Define types of instagram stories
|
|
||||||
instagram_types = ["posts", "reels", "channel", "stories", "highlights"]
|
|
||||||
|
|
||||||
|
|
||||||
def argparser(users: list) -> argparse.Namespace:
|
|
||||||
"""Returns an argparser to evaluate user input"""
|
|
||||||
# ARG PARSER
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
prog="Downloader",
|
|
||||||
description="Download images and galleries from a wide array of websites"
|
|
||||||
" either by using links or chosing from user define lists."
|
|
||||||
" This program also takes care of archiving tasks,"
|
|
||||||
" that keep the run time fast and prevents downloading duplicates.",
|
|
||||||
)
|
|
||||||
# Chose the type of scrapper
|
|
||||||
parser.add_argument(
|
|
||||||
choices=scrapper_types,
|
|
||||||
nargs="?",
|
|
||||||
dest="scrapper",
|
|
||||||
help="Select a scrapper.",
|
|
||||||
)
|
|
||||||
# Parse user list
|
|
||||||
parser.add_argument(
|
|
||||||
"-u",
|
|
||||||
"--user",
|
|
||||||
choices=users,
|
|
||||||
dest="user",
|
|
||||||
help="Selects the personal user list to process. Defaults to everyone",
|
|
||||||
default="everyone",
|
|
||||||
type=str,
|
|
||||||
)
|
|
||||||
# Parse individual links
|
|
||||||
parser.add_argument(
|
|
||||||
"-i",
|
|
||||||
"--input",
|
|
||||||
nargs="*",
|
|
||||||
dest="link",
|
|
||||||
action="append",
|
|
||||||
help="Download the provided links",
|
|
||||||
type=str,
|
|
||||||
)
|
|
||||||
# Set the print list flag
|
|
||||||
parser.add_argument(
|
|
||||||
"-l",
|
|
||||||
"--list",
|
|
||||||
dest="flag_list",
|
|
||||||
action="store_true",
|
|
||||||
help="Prints a list of all the added links and prompts for a choice",
|
|
||||||
)
|
|
||||||
# Set the use archiver flag
|
|
||||||
parser.add_argument(
|
|
||||||
"-a",
|
|
||||||
"--no-archive",
|
|
||||||
dest="flag_archive",
|
|
||||||
action="store_false",
|
|
||||||
help="Disables the archiver flag",
|
|
||||||
)
|
|
||||||
# Set the skip flag
|
|
||||||
parser.add_argument(
|
|
||||||
"-s",
|
|
||||||
"--no_skip",
|
|
||||||
dest="flag_skip",
|
|
||||||
action="store_false",
|
|
||||||
help="Disables the skip function, downloads the entire gallery",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-v",
|
|
||||||
"--verbose",
|
|
||||||
dest="flag_verbose",
|
|
||||||
action="store_true",
|
|
||||||
help="Prints the generated commands instead of running them",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-t",
|
|
||||||
"--type-post",
|
|
||||||
choices=instagram_types,
|
|
||||||
nargs="*",
|
|
||||||
dest="post_type",
|
|
||||||
help="Filters posts on instagram by type",
|
|
||||||
default=instagram_types,
|
|
||||||
type=str,
|
|
||||||
)
|
|
||||||
return parser.parse_args()
|
|
||||||
@ -1,417 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
"""
|
|
||||||
Rewriting of the download manager script
|
|
||||||
with the intention to make it
|
|
||||||
more modular with the use of flags
|
|
||||||
in order to avoid unnecesary modifications
|
|
||||||
to the cofig files.
|
|
||||||
Also following in line more posix and python rules.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import re
|
|
||||||
import time
|
|
||||||
import logging
|
|
||||||
import yaml
|
|
||||||
from functions import run
|
|
||||||
from functions import quote
|
|
||||||
from functions import list_lines
|
|
||||||
from functions import load_config_variables
|
|
||||||
from argparser import argparser
|
|
||||||
from gdl_classes import User
|
|
||||||
|
|
||||||
# GLOBAL VARIABLE SECTION
|
|
||||||
# Store the name of the main binaries early in the code
|
|
||||||
BIN_GALLERY = "gallery-dl"
|
|
||||||
BIN_YOUTUBE = "yt-dlp"
|
|
||||||
# SKIP = "3"
|
|
||||||
CONFIGS = load_config_variables()
|
|
||||||
|
|
||||||
LOGGER = logging.getLogger()
|
|
||||||
HANDLER = logging.StreamHandler()
|
|
||||||
FORMATTER = logging.Formatter(
|
|
||||||
"[%(filename)s][%(levelname)s] %(funcName)s '%(message)s'"
|
|
||||||
)
|
|
||||||
HANDLER.setFormatter(FORMATTER)
|
|
||||||
LOGGER.addHandler(HANDLER)
|
|
||||||
LOGGER.setLevel(logging.INFO)
|
|
||||||
|
|
||||||
# Enable a default "everyone" flag for when running stuff like download gallery
|
|
||||||
USERS = ["everyone"]
|
|
||||||
for dictionary in CONFIGS["users"]:
|
|
||||||
USERS.append(dictionary["name"])
|
|
||||||
|
|
||||||
ARGS = argparser(USERS)
|
|
||||||
|
|
||||||
|
|
||||||
def get_index(value: str) -> int:
|
|
||||||
"""Find the index in the config file"""
|
|
||||||
for i, dic in enumerate(CONFIGS["users"]):
|
|
||||||
if dic["name"] == value:
|
|
||||||
LOGGER.debug("%s is %s", dic["name"], i)
|
|
||||||
return i
|
|
||||||
return -1
|
|
||||||
|
|
||||||
|
|
||||||
def parse_gallery(gdl_list: str, user: User):
|
|
||||||
"""Processes the gallery-dl command based on the selected gallery"""
|
|
||||||
# skip_arg = f" -A {SKIP}" if ARGS.flag_skip else ""
|
|
||||||
skip_arg = " -o skip=true" if not ARGS.flag_skip else ""
|
|
||||||
LOGGER.debug(skip_arg)
|
|
||||||
|
|
||||||
# Send the list to gallery-dl
|
|
||||||
download_gallery(
|
|
||||||
ARGS.flag_archive,
|
|
||||||
skip_arg,
|
|
||||||
"",
|
|
||||||
str(user.sleep),
|
|
||||||
quote(f"{user.dir_download}"),
|
|
||||||
quote(f"{user.archive_gallery}"),
|
|
||||||
quote(gdl_list),
|
|
||||||
parse_instagram(gdl_list),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def parse_instagram(link: str) -> str:
|
|
||||||
"""Fix instagram links"""
|
|
||||||
if "instagram" not in link:
|
|
||||||
return ""
|
|
||||||
if isinstance(ARGS.post_type, list):
|
|
||||||
string = f" -o include={quote(','.join(ARGS.post_type))}"
|
|
||||||
LOGGER.debug(string)
|
|
||||||
return string
|
|
||||||
string = f" -o include={quote(ARGS.post_type)}"
|
|
||||||
LOGGER.debug(string)
|
|
||||||
return string
|
|
||||||
|
|
||||||
|
|
||||||
def parse_link(link: str) -> str:
|
|
||||||
"""Fixes links"""
|
|
||||||
if not re.search(r"(twitter\.com\/\w+(\/)?(?!.*status))", link):
|
|
||||||
LOGGER.debug("No modifications needed for the link %s", link)
|
|
||||||
return link
|
|
||||||
# if url contains /media at the end just write the line
|
|
||||||
fixed_link = re.sub(r"\/$|\/media(\/?)$", "", link) + "/media"
|
|
||||||
LOGGER.debug("Processed link %s", fixed_link)
|
|
||||||
return fixed_link
|
|
||||||
|
|
||||||
|
|
||||||
def download_gallery(
|
|
||||||
use_archive: bool,
|
|
||||||
skip_arg: str = "",
|
|
||||||
link: str = "",
|
|
||||||
sleep: str = "0",
|
|
||||||
destination: str = "",
|
|
||||||
database: str = "",
|
|
||||||
queue: str = "",
|
|
||||||
opt_args: str = "",
|
|
||||||
):
|
|
||||||
"""Processes the command string to run the gallery archiver"""
|
|
||||||
command = f"{BIN_GALLERY} --sleep {sleep}"
|
|
||||||
if skip_arg != "":
|
|
||||||
command += skip_arg
|
|
||||||
if destination != "":
|
|
||||||
command += f" --dest {destination}"
|
|
||||||
if use_archive:
|
|
||||||
command += f" --download-archive {database}"
|
|
||||||
if opt_args != "":
|
|
||||||
command += opt_args
|
|
||||||
if link != "" and queue == "":
|
|
||||||
LOGGER.info("link: %s", quote(link))
|
|
||||||
command += f" {link}"
|
|
||||||
if queue != "" and link == "":
|
|
||||||
LOGGER.info("queue: %s", queue)
|
|
||||||
command += f" -i {queue}"
|
|
||||||
LOGGER.debug(command)
|
|
||||||
run(command, ARGS.flag_verbose)
|
|
||||||
|
|
||||||
|
|
||||||
def download_youtube(
|
|
||||||
use_archive: bool,
|
|
||||||
link: str = "",
|
|
||||||
destination: str = "",
|
|
||||||
database: str = "",
|
|
||||||
):
|
|
||||||
"""Filters and processes the required command to download videos"""
|
|
||||||
command = BIN_YOUTUBE
|
|
||||||
|
|
||||||
if re.search(r"(https:\/\/youtube|https:\/\/www.youtube|https:\/\/youtu.be)", link):
|
|
||||||
command += f' -o {quote(destination + "/%(title)s.%(ext)s")}'
|
|
||||||
|
|
||||||
elif re.search(r"(https:\/\/music.youtube.*)", link):
|
|
||||||
if use_archive:
|
|
||||||
command += f" --download-archive {database}"
|
|
||||||
command += f""" \
|
|
||||||
--no-playlist --newline -x \
|
|
||||||
--audio-format best --add-metadata --audio-quality 0 -o \
|
|
||||||
{quote(destination + '/%(title)s.%(ext)s')} \
|
|
||||||
"""
|
|
||||||
|
|
||||||
elif re.search(r"chaturbate", link):
|
|
||||||
# Re-runs the program every 30 seconds in case the stream goes private or dc
|
|
||||||
for i in range(1, 41): # For a 20 minute total
|
|
||||||
run(
|
|
||||||
f"""
|
|
||||||
{BIN_YOUTUBE} \
|
|
||||||
--hls-use-mpegts --prefer-ffmpeg \
|
|
||||||
-o {quote(destination + '/%(title)s.%(ext)s')} \
|
|
||||||
{link}
|
|
||||||
""",
|
|
||||||
ARGS.flag_verbose,
|
|
||||||
)
|
|
||||||
time.sleep(30)
|
|
||||||
LOGGER.info("waited for %s minutes", i * 30 / 60)
|
|
||||||
|
|
||||||
else: # Any other video link, just do it generic
|
|
||||||
command += f" -f mp4 -o {quote(destination + '/%(title)s.%(ext)s')}"
|
|
||||||
LOGGER.info("%s %s", command, link)
|
|
||||||
run(f"{command} {link}", ARGS.flag_verbose)
|
|
||||||
|
|
||||||
|
|
||||||
def comic_manager(skip_arg: str, category: str):
|
|
||||||
"""Process the information to download manga"""
|
|
||||||
re_cat = ""
|
|
||||||
if category == "manga":
|
|
||||||
re_cat = "manga|webtoon"
|
|
||||||
elif category == "comic":
|
|
||||||
re_cat = "readcomiconline"
|
|
||||||
|
|
||||||
with open(CONFIGS["comic"]["list"], encoding="utf-8") as list_comic:
|
|
||||||
for graphic_novel in [line.rstrip() for line in list_comic]:
|
|
||||||
# Search for mangas but exclude comics
|
|
||||||
if not re.search(re_cat, graphic_novel):
|
|
||||||
LOGGER.debug("%s does not match regex espression", graphic_novel)
|
|
||||||
continue
|
|
||||||
download_gallery(
|
|
||||||
ARGS.flag_archive,
|
|
||||||
skip_arg,
|
|
||||||
quote(graphic_novel),
|
|
||||||
"0",
|
|
||||||
CONFIGS["comic"]["download-directory"],
|
|
||||||
CONFIGS["comic"]["archive"],
|
|
||||||
"",
|
|
||||||
"",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def webcomic_manager():
|
|
||||||
"""Process the information to download webcomics"""
|
|
||||||
webcomic_list = CONFIGS["comic"]["webcomic-list"]
|
|
||||||
with open(webcomic_list, encoding="utf-8") as open_list:
|
|
||||||
webcomic_file = yaml.safe_load(open_list)
|
|
||||||
|
|
||||||
# Create a list of all the available webcomics for the user to chose from
|
|
||||||
for index, entry in enumerate(webcomic_file["Webcomics"]):
|
|
||||||
print(list_lines(index, entry["name"]))
|
|
||||||
|
|
||||||
# Prompt for a choice
|
|
||||||
usr_input = int(input("Select your comic: "))
|
|
||||||
# Determines where the webcomic will be downloaded
|
|
||||||
rating = webcomic_file["Webcomics"][usr_input]["type"]
|
|
||||||
webcomic_category = webcomic_file["Global"][f"{rating}_directory"]
|
|
||||||
LOGGER.debug("The webcomic is %s", webcomic_category)
|
|
||||||
command = f"""cd {quote(webcomic_category)} && webcomix custom \
|
|
||||||
{quote(webcomic_file["Webcomics"][usr_input]["name"])} \
|
|
||||||
--start-url \
|
|
||||||
{quote(webcomic_file["Webcomics"][usr_input]["url"])} \
|
|
||||||
--next-page-xpath={quote(webcomic_file["Webcomics"][usr_input]["next_code"])} \
|
|
||||||
--image-xpath={quote(webcomic_file["Webcomics"][usr_input]["image_code"])} \
|
|
||||||
-y --cbz"""
|
|
||||||
LOGGER.debug(command)
|
|
||||||
run(command, ARGS.flag_verbose)
|
|
||||||
|
|
||||||
|
|
||||||
def push_manager(user: User):
|
|
||||||
"""Filters out the URL to use the appropiate downloader"""
|
|
||||||
# Creates an array which will store any links that should use youtube-dl
|
|
||||||
link_video_cache = []
|
|
||||||
re_links = re.compile(
|
|
||||||
r"(twitter\.com\/\w+((?=.*media)|(?!.*status)))"
|
|
||||||
r"|(men\.wikifeet)"
|
|
||||||
r"|(furaffinity\.net\/user\/)"
|
|
||||||
r"|((deviantart\.com\/\w+(?!.*\/art\/)))"
|
|
||||||
r"|(furaffinity\.net\/gallery\/)"
|
|
||||||
r"|(furaffinity\.net\/scraps\/)"
|
|
||||||
r"|(furaffinity\.net\/favorites\/)"
|
|
||||||
r"|(instagram.com(?!\/p\/)\/\w+)"
|
|
||||||
r"|(e621\.net((?=\/post\/)|(?!\/posts\/)))"
|
|
||||||
r"|(flickr\.com\/photos\/\w+\/(?!\d+))"
|
|
||||||
r"|(tumblr\.com(?!\/post\/))"
|
|
||||||
r"|(kemono\.party\/(fanbox|gumroad|patreon)(?!\/user\/\d+\/post))"
|
|
||||||
r"|(blogspot\.com(?!\/))"
|
|
||||||
r"|(rule34\.paheal\.net\/post\/(?!view))"
|
|
||||||
r"|(rule34\.xxx\/index\.php\?page\=post&s=(?!view))"
|
|
||||||
r"|(pixiv\.net\/(en\/)?((?=users)|(?!artwork)))"
|
|
||||||
r"|(reddit\.com\/(user|u))"
|
|
||||||
r"|(baraag\.net\/((@\w+)|(?!\/\d+)))"
|
|
||||||
r"|(pinterest\.com\/(?!pin\/\d+))"
|
|
||||||
r"|(redgifs\.com\/(users|u|(?!watch)))",
|
|
||||||
)
|
|
||||||
with open(user.list_push, encoding="utf-8") as list_push:
|
|
||||||
for link in [line.rstrip() for line in list_push]:
|
|
||||||
LOGGER.debug("Processing %s", link)
|
|
||||||
# Flush the push list, cleans all the contents
|
|
||||||
with open(user.list_push, "w", encoding="utf-8") as list_push:
|
|
||||||
list_push.close()
|
|
||||||
# VIDEOS
|
|
||||||
if re.search(r"youtu.be|youtube|pornhub|xtube|xvideos|chaturbate", link):
|
|
||||||
LOGGER.debug("Matched type yt-dlp")
|
|
||||||
link_video_cache.append(link)
|
|
||||||
# Search for gallery links, these will be added to a list after downloading
|
|
||||||
elif re.search(re_links, link):
|
|
||||||
LOGGER.debug("Matched type gallery-dl")
|
|
||||||
# skip_arg = f" -A {SKIP}" if ARGS.flag_skip else ""
|
|
||||||
skip_arg = " -o skip=true" if not ARGS.flag_skip else ""
|
|
||||||
LOGGER.debug("Skip: %s, link: %s", skip_arg, parse_instagram(link))
|
|
||||||
download_gallery(
|
|
||||||
ARGS.flag_archive,
|
|
||||||
skip_arg,
|
|
||||||
quote(f"{parse_link(link)}"),
|
|
||||||
f"{user.sleep}",
|
|
||||||
quote(f"{user.dir_download}"),
|
|
||||||
quote(f"{user.archive_gallery}"),
|
|
||||||
"",
|
|
||||||
f"{parse_instagram(link)}",
|
|
||||||
)
|
|
||||||
# Record the gallery link, so it remains on the watch list
|
|
||||||
with open(user.list_master, "a", encoding="utf-8") as w_file, open(
|
|
||||||
user.list_master, "r", encoding="utf-8"
|
|
||||||
) as r_file:
|
|
||||||
content = r_file.read().lower()
|
|
||||||
if parse_link(link).lower() in content:
|
|
||||||
LOGGER.info("Gallery repeated, not saving")
|
|
||||||
continue
|
|
||||||
LOGGER.info("New gallery, saving")
|
|
||||||
w_file.write(parse_link(str(link)) + "\n")
|
|
||||||
|
|
||||||
# Searches for comic/manga links
|
|
||||||
elif re.search(r"readcomiconline|mangahere|mangadex|webtoons", link):
|
|
||||||
# Toggle for comic/manga skip flag
|
|
||||||
if ARGS.flag_skip and re.search(r"readcomiconline", link):
|
|
||||||
skip_arg = " --chapter-range 1"
|
|
||||||
elif ARGS.flag_skip and re.search(r"mangahere|webtoons", link):
|
|
||||||
skip_arg = " --chapter-range 1-5"
|
|
||||||
else:
|
|
||||||
skip_arg = ""
|
|
||||||
LOGGER.debug(skip_arg)
|
|
||||||
|
|
||||||
download_gallery(
|
|
||||||
ARGS.flag_archive,
|
|
||||||
skip_arg,
|
|
||||||
quote(link),
|
|
||||||
"0",
|
|
||||||
CONFIGS["comic"]["download-directory"],
|
|
||||||
CONFIGS["comic"]["archive"],
|
|
||||||
"",
|
|
||||||
"",
|
|
||||||
)
|
|
||||||
# Add comic/manga link to the list
|
|
||||||
list_gn = CONFIGS["comic"]["list"]
|
|
||||||
with open(list_gn, "a", encoding="utf-8") as w_file, open(
|
|
||||||
list_gn, "r", encoding="utf-8"
|
|
||||||
) as r_file:
|
|
||||||
content = r_file.read().lower()
|
|
||||||
if parse_link(link).lower() in content:
|
|
||||||
LOGGER.info("Graphic novel repeated, not saving")
|
|
||||||
continue
|
|
||||||
LOGGER.info("New graphic novel, saving")
|
|
||||||
w_file.write(link + "\n")
|
|
||||||
# Download generic links, the -o flag overwrites config file and
|
|
||||||
# downloads the files into the root destination
|
|
||||||
else:
|
|
||||||
LOGGER.info("Other type of download %s", link)
|
|
||||||
download_gallery(
|
|
||||||
False,
|
|
||||||
" -o directory='[]'",
|
|
||||||
quote(link),
|
|
||||||
"0",
|
|
||||||
quote(str(user.dir_push)),
|
|
||||||
"",
|
|
||||||
"",
|
|
||||||
"",
|
|
||||||
)
|
|
||||||
# Send the video links to youtube-dl
|
|
||||||
for link in link_video_cache:
|
|
||||||
download_youtube(
|
|
||||||
ARGS.flag_archive,
|
|
||||||
quote(link),
|
|
||||||
f"{user.dir_media_download}",
|
|
||||||
quote(f"{user.archive_media}"),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def scrapper_manager(user: User):
|
|
||||||
# pylint: disable=too-many-branches
|
|
||||||
"""Analyze the user arguments and call in functions"""
|
|
||||||
if not ARGS.scrapper: # Check if a scrapper was selected
|
|
||||||
return
|
|
||||||
|
|
||||||
if re.search(r"gallery|instagram|kemono", ARGS.scrapper):
|
|
||||||
# skip_arg = f" -A {SKIP}" if ARGS.flag_skip else ""
|
|
||||||
skip_arg = " -o skip=true" if not ARGS.flag_skip else ""
|
|
||||||
LOGGER.debug(skip_arg)
|
|
||||||
if ARGS.scrapper == "gallery":
|
|
||||||
parse_gallery(f"{user.list_main}", user)
|
|
||||||
elif ARGS.scrapper == "instagram":
|
|
||||||
parse_gallery(f"{user.list_instagram}", user)
|
|
||||||
elif ARGS.scrapper == "kemono":
|
|
||||||
parse_gallery(f"{user.list_kemono}", user)
|
|
||||||
elif ARGS.scrapper in "push":
|
|
||||||
push_manager(user)
|
|
||||||
elif ARGS.scrapper in "comic":
|
|
||||||
skip_arg = " --chapter-range 1" if ARGS.flag_skip else ""
|
|
||||||
LOGGER.debug(skip_arg)
|
|
||||||
comic_manager(skip_arg, "comic")
|
|
||||||
elif ARGS.scrapper in "manga":
|
|
||||||
skip_arg = " --chapter-range 1-5" if ARGS.flag_skip else ""
|
|
||||||
LOGGER.debug(skip_arg)
|
|
||||||
comic_manager(skip_arg, "manga")
|
|
||||||
elif ARGS.scrapper in "webcomic":
|
|
||||||
webcomic_manager()
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
"""Main module to decide what to do based on the parsed arguments"""
|
|
||||||
if ARGS.scrapper:
|
|
||||||
if (ARGS.user in "everyone") and (
|
|
||||||
re.search(r"push|gallery|instagram|kemono", ARGS.scrapper)
|
|
||||||
):
|
|
||||||
for current_user in CONFIGS["users"]:
|
|
||||||
user = User(get_index(current_user["name"]))
|
|
||||||
user.list_manager()
|
|
||||||
LOGGER.info("Scrapping %s for %s", ARGS.scrapper, current_user["name"])
|
|
||||||
scrapper_manager(user)
|
|
||||||
elif re.search(r"comic|manga|webcomic", ARGS.scrapper):
|
|
||||||
user = User(get_index("jawz"))
|
|
||||||
user.list_manager()
|
|
||||||
LOGGER.info("Scrapping %s", ARGS.scrapper)
|
|
||||||
scrapper_manager(user)
|
|
||||||
else:
|
|
||||||
# Create the lists to scrap
|
|
||||||
user = User(get_index(ARGS.user))
|
|
||||||
user.list_manager()
|
|
||||||
scrapper_manager(user)
|
|
||||||
elif ARGS.link:
|
|
||||||
LOGGER.debug(ARGS.link)
|
|
||||||
if re.search(r"everyone|jawz", ARGS.user):
|
|
||||||
# Create the lists to scrap
|
|
||||||
user = User(get_index("jawz"))
|
|
||||||
user.list_manager()
|
|
||||||
else:
|
|
||||||
# Create the lists to scrap
|
|
||||||
user = User(get_index(ARGS.user))
|
|
||||||
user.list_manager()
|
|
||||||
for arg_link in ARGS.link[0]:
|
|
||||||
LOGGER.debug(arg_link)
|
|
||||||
if ARGS.flag_verbose:
|
|
||||||
LOGGER.debug(
|
|
||||||
"%s >> %s", quote(parse_link(arg_link)), quote(user.list_push)
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
with open(user.list_push, "a", encoding="utf-8") as open_file:
|
|
||||||
open_file.write(parse_link(arg_link) + "\n")
|
|
||||||
push_manager(user)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@ -1,70 +0,0 @@
|
|||||||
#!/usr/bin/python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
"""Personal functions to aid on multiple scripts"""
|
|
||||||
import sys
|
|
||||||
import fileinput
|
|
||||||
import re
|
|
||||||
import os
|
|
||||||
from pathlib import Path
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
VERBOSE_G = False
|
|
||||||
|
|
||||||
|
|
||||||
def load_config_variables():
|
|
||||||
"""Loads all the variables from the config file"""
|
|
||||||
config_file = Path("~/.config/jawz/config.yaml")
|
|
||||||
with open(config_file.expanduser(), encoding="utf-8") as open_file:
|
|
||||||
return yaml.safe_load(open_file)
|
|
||||||
|
|
||||||
|
|
||||||
def run(command: str, verbose: bool):
|
|
||||||
"""Run command in a subprocess"""
|
|
||||||
# pylint: disable=subprocess-run-check
|
|
||||||
# This toggle allows for a really wasy debug when using -v
|
|
||||||
if verbose:
|
|
||||||
print(command)
|
|
||||||
else:
|
|
||||||
os.system(command)
|
|
||||||
|
|
||||||
|
|
||||||
def list_lines(i: int, line: str) -> str:
|
|
||||||
"""Create a numbered list"""
|
|
||||||
return f"{i}) {line}"
|
|
||||||
|
|
||||||
|
|
||||||
def quote(line: str) -> str:
|
|
||||||
"""Quote the line"""
|
|
||||||
return f'"{line}"'
|
|
||||||
|
|
||||||
|
|
||||||
def sort_txt_file(file_path: Path):
|
|
||||||
"""Sort every line alphabetically
|
|
||||||
remove duplicated and empty lines"""
|
|
||||||
file = str(file_path.resolve())
|
|
||||||
run(f"sort -u {quote(file)} -o {quote(file)}", VERBOSE_G)
|
|
||||||
run(f"sed -i '/^$/d' {quote(file)}", VERBOSE_G)
|
|
||||||
run(f'sed -i -e "s,http:,https:," {quote(file)}', VERBOSE_G)
|
|
||||||
# fix this using strip on python
|
|
||||||
# line.strip("/")
|
|
||||||
run(f'sed -i -e "s,/$,," {quote(file)}', VERBOSE_G) # trailing /
|
|
||||||
|
|
||||||
|
|
||||||
def randomize_txt_file(file_path: Path):
|
|
||||||
"""Randomize the order of the
|
|
||||||
lines of the txt file"""
|
|
||||||
file = str(file_path.resolve())
|
|
||||||
run(f"sort -R {quote(file)} -o {quote(file)}", VERBOSE_G)
|
|
||||||
|
|
||||||
|
|
||||||
def parse_list(file):
|
|
||||||
"""Replace http with https and remove trailing /"""
|
|
||||||
for line in fileinput.input(file, inplace=True):
|
|
||||||
sys.stdout.write(str(line).replace("http://", "https://"))
|
|
||||||
with open(file, "r+", encoding="utf-8") as open_file:
|
|
||||||
f_content = open_file.read()
|
|
||||||
f_content = re.compile(r"\/$", 0).sub(r"\/$", "")
|
|
||||||
open_file.seek(0)
|
|
||||||
open_file.truncate()
|
|
||||||
print(f_content)
|
|
||||||
sort_txt_file(file)
|
|
||||||
@ -1,103 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""Define the user class to populate and setup the download environment"""
|
|
||||||
import re
|
|
||||||
from pathlib import Path
|
|
||||||
from functions import sort_txt_file, randomize_txt_file, load_config_variables
|
|
||||||
|
|
||||||
config_variables = load_config_variables()
|
|
||||||
|
|
||||||
|
|
||||||
class User:
|
|
||||||
"""Populate the directory for each user"""
|
|
||||||
|
|
||||||
# pylint: disable=too-many-instance-attributes
|
|
||||||
def __init__(self, index):
|
|
||||||
self.user = config_variables["users"][index]
|
|
||||||
self.config = config_variables["global"]
|
|
||||||
self.name = self.user["name"]
|
|
||||||
self.sleep = self.config["sleep"]
|
|
||||||
# Directories
|
|
||||||
self.dir_cache = Path(self.config["cache-directory"]) / self.name
|
|
||||||
self.dir_log = Path(self.config["log-directory"])
|
|
||||||
self.dir_archive = Path(self.config["archive-directory"])
|
|
||||||
self.dir_download = Path(self.user["download-directory"])
|
|
||||||
self.dir_media_download = Path(self.user["media-directory"])
|
|
||||||
self.dir_push = Path(self.user["push-directory"])
|
|
||||||
self.dir_master_list = Path(self.config["list-dir"]) / self.name
|
|
||||||
# Files
|
|
||||||
self.archive_gallery = self.dir_archive / f"{self.name}.sqlite3"
|
|
||||||
self.archive_media = self.dir_archive / f"{self.name}_ytdl.txt"
|
|
||||||
# Lists
|
|
||||||
self.list_master = self.dir_master_list / "watch.txt"
|
|
||||||
self.list_push = self.dir_master_list / "instant.txt"
|
|
||||||
self.list_instagram = self.dir_cache / "instagram.txt"
|
|
||||||
self.list_kemono = self.dir_cache / "kemono.txt"
|
|
||||||
self.list_main = self.dir_cache / "main.txt"
|
|
||||||
|
|
||||||
def create_directories(self):
|
|
||||||
"""Create user directories if they don't exist"""
|
|
||||||
if self.dir_cache.is_dir():
|
|
||||||
for file in self.dir_cache.iterdir():
|
|
||||||
if file.is_file():
|
|
||||||
file.unlink()
|
|
||||||
for file in self.dir_cache.iterdir():
|
|
||||||
if file.is_dir():
|
|
||||||
file.rmdir()
|
|
||||||
self.dir_cache.rmdir()
|
|
||||||
# Create directories
|
|
||||||
self.dir_cache.mkdir(parents=True, exist_ok=True)
|
|
||||||
self.dir_log.mkdir(parents=True, exist_ok=True)
|
|
||||||
self.dir_archive.mkdir(parents=True, exist_ok=True)
|
|
||||||
self.dir_download.mkdir(parents=True, exist_ok=True)
|
|
||||||
self.dir_media_download.mkdir(parents=True, exist_ok=True)
|
|
||||||
self.dir_push.mkdir(parents=True, exist_ok=True)
|
|
||||||
# Check for the existence of core files
|
|
||||||
if not Path(self.archive_gallery).is_file():
|
|
||||||
self.archive_gallery.touch()
|
|
||||||
if not Path(self.archive_media).is_file():
|
|
||||||
self.archive_media.touch()
|
|
||||||
if not self.dir_master_list.is_dir():
|
|
||||||
print(f"ERROR: Directory for user {self.name} doesn't exist")
|
|
||||||
if not Path(self.list_master).is_file():
|
|
||||||
self.list_master.touch()
|
|
||||||
if not Path(self.list_push).is_file():
|
|
||||||
self.list_push.touch()
|
|
||||||
# Create temporary lists
|
|
||||||
for gdl_list in ("instagram", "kemono", "main"):
|
|
||||||
Path(self.dir_cache.resolve() / f"{gdl_list}.txt").touch()
|
|
||||||
|
|
||||||
def list_manager(self):
|
|
||||||
"""Manage all the user list and create sub-lists"""
|
|
||||||
# sort_txt_file(self.list_master)
|
|
||||||
self.create_directories() # Call the function to create necesary cache dirs
|
|
||||||
with open(self.list_master, encoding="utf-8") as list_master:
|
|
||||||
# Create temporary list files segmented per scrapper
|
|
||||||
for line in [line.rstrip() for line in list_master]:
|
|
||||||
# WIKIFEET
|
|
||||||
with open(self.list_main, "a", encoding="utf-8") as list_main, open(
|
|
||||||
self.list_kemono, "a", encoding="utf-8"
|
|
||||||
) as list_kemono, open(
|
|
||||||
self.list_instagram, "a", encoding="utf-8"
|
|
||||||
) as list_instagram:
|
|
||||||
if re.search(r"kemono.party", line):
|
|
||||||
list_kemono.write(line + "\n")
|
|
||||||
elif re.search(r"instagram", line):
|
|
||||||
list_instagram.write(line + "\n")
|
|
||||||
elif re.search(r"wikifeet", line):
|
|
||||||
continue
|
|
||||||
# list_main.write(line + "\n")
|
|
||||||
elif re.search(r"furaffinity", line):
|
|
||||||
list_main.write(line + "\n")
|
|
||||||
elif re.search(r"twitter", line):
|
|
||||||
# if url contains /media at the end just write the line
|
|
||||||
if re.search(r"\/media$", line):
|
|
||||||
list_main.write(line + "\n")
|
|
||||||
else:
|
|
||||||
# if does not contain /media at the end then add /media
|
|
||||||
list_main.write(line + "/media" + "\n")
|
|
||||||
else:
|
|
||||||
list_main.write(line + "\n")
|
|
||||||
sort_txt_file(self.list_kemono)
|
|
||||||
# Try to avoid getting banned by shuffling download order
|
|
||||||
randomize_txt_file(self.list_instagram)
|
|
||||||
randomize_txt_file(self.list_main)
|
|
||||||
@ -1,17 +0,0 @@
|
|||||||
[metadata]
|
|
||||||
name = download
|
|
||||||
version = 1.5
|
|
||||||
|
|
||||||
[options]
|
|
||||||
py_modules =
|
|
||||||
download
|
|
||||||
functions
|
|
||||||
argparser
|
|
||||||
gdl_classes
|
|
||||||
|
|
||||||
[options.entry_points]
|
|
||||||
console_scripts =
|
|
||||||
download = download:main
|
|
||||||
|
|
||||||
# [aliases]
|
|
||||||
# test = pytest
|
|
||||||
@ -1,24 +0,0 @@
|
|||||||
from setuptools import setup
|
|
||||||
|
|
||||||
setup()
|
|
||||||
# import os
|
|
||||||
# from setuptools import find_packages
|
|
||||||
# from distutils.core import setup
|
|
||||||
|
|
||||||
# import setuptools
|
|
||||||
|
|
||||||
# # User-friendly description from README.md
|
|
||||||
# current_directory = os.path.dirname(os.path.abspath(__file__))
|
|
||||||
# try:
|
|
||||||
# with open(os.path.join(current_directory, "README.md"), encoding="utf-8") as f:
|
|
||||||
# long_description = f.read()
|
|
||||||
# except Exception:
|
|
||||||
# long_description = ""
|
|
||||||
|
|
||||||
# setup(
|
|
||||||
# name="download",
|
|
||||||
# # packages=["argparser", "functions"],
|
|
||||||
# version="1.5.0",
|
|
||||||
# scripts=["download.py"],
|
|
||||||
# # entry_points={"console_scripts": ["download = download:main"]},
|
|
||||||
# )
|
|
||||||
@ -1,28 +0,0 @@
|
|||||||
{ pkgs ? import <nixpkgs> { } }:
|
|
||||||
|
|
||||||
with pkgs;
|
|
||||||
|
|
||||||
mkShell {
|
|
||||||
packages = [
|
|
||||||
(python3.withPackages (ps:
|
|
||||||
with ps; [
|
|
||||||
setuptools
|
|
||||||
pyyaml
|
|
||||||
types-pyyaml
|
|
||||||
# (buildPythonApplication rec {
|
|
||||||
# pname = "webcomix";
|
|
||||||
# version = "3.6.6";
|
|
||||||
# src = fetchPypi {
|
|
||||||
# inherit pname version;
|
|
||||||
# sha256 = "sha256-hCnic8Rd81qY1R1XMrSME5ntYTSvZu4/ANp03nCmLKU=";
|
|
||||||
# };
|
|
||||||
# doCheck = false;
|
|
||||||
# propagatedBuildInputs =
|
|
||||||
# [ click scrapy scrapy-splash scrapy-fake-useragent tqdm ];
|
|
||||||
# })
|
|
||||||
]))
|
|
||||||
];
|
|
||||||
buildInputs = [
|
|
||||||
|
|
||||||
];
|
|
||||||
}
|
|
||||||
@ -1,136 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
# Imports
|
|
||||||
import os
|
|
||||||
import math
|
|
||||||
|
|
||||||
# Function for calculating the appropriate bitrate to use during conversion
|
|
||||||
def get_bitrate(duration, filesize, audio_br):
|
|
||||||
br = math.floor(filesize / duration - audio_br)
|
|
||||||
return br, br * 0.50, br * 1.45
|
|
||||||
|
|
||||||
|
|
||||||
def encode(ffmpeg_string, output_name, fs):
|
|
||||||
os.system(ffmpeg_string)
|
|
||||||
end_size = (
|
|
||||||
os.path.getsize(
|
|
||||||
"/dev/shm/ffmpeg/out/{output_name}".format(output_name=output_name)
|
|
||||||
)
|
|
||||||
* 0.00000095367432
|
|
||||||
)
|
|
||||||
if end_size < fs:
|
|
||||||
print(
|
|
||||||
ffmpeg_string.replace("\t", "")
|
|
||||||
+ "\nThe FFMPEG string above has yielded a file whose size is "
|
|
||||||
+ str(end_size)
|
|
||||||
+ "MB.\n{output_name} is ready for Discord.\n".format(
|
|
||||||
output_name=output_name
|
|
||||||
)
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
print(
|
|
||||||
ffmpeg_string.replace("\t", "")
|
|
||||||
+ "\nThe FFMPEG string above has yielded a file whose size is "
|
|
||||||
+ str(end_size)
|
|
||||||
+ "MB.\n{output_name} is NOT ready for Discord, and will be re-run.\nMy bad.".format(
|
|
||||||
output_name=output_name
|
|
||||||
)
|
|
||||||
)
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def time_calculations(fname, length):
|
|
||||||
startstring = fname[0:2] + ":" + fname[2:4] + ":" + fname[4:6]
|
|
||||||
endstring = fname[7:9] + ":" + fname[9:11] + ":" + fname[11:13]
|
|
||||||
|
|
||||||
try:
|
|
||||||
int(fname[0:6])
|
|
||||||
startseconds = (
|
|
||||||
int(fname[0:2]) * 60 * 60 + int(fname[2:4]) * 60 + int(fname[4:6])
|
|
||||||
)
|
|
||||||
try:
|
|
||||||
int(fname[11:13])
|
|
||||||
endseconds = (
|
|
||||||
int(fname[7:9]) * 60 * 60 + int(fname[9:11]) * 60 + int(fname[11:13])
|
|
||||||
)
|
|
||||||
duration = endseconds - startseconds
|
|
||||||
timestamped_section = f"-ss {startstring} -to {endstring}"
|
|
||||||
except:
|
|
||||||
duration = length - startseconds
|
|
||||||
timestamped_section = f"-ss {startstring}"
|
|
||||||
except:
|
|
||||||
duration = length
|
|
||||||
timestamped_section = ""
|
|
||||||
|
|
||||||
return duration, timestamped_section
|
|
||||||
|
|
||||||
|
|
||||||
fname = os.listdir("/dev/shm/ffmpeg/in/")[0]
|
|
||||||
os.rename("/dev/shm/ffmpeg/in/" + fname, "/dev/shm/ffmpeg/in/" + fname.replace(" ", ""))
|
|
||||||
fname = fname.replace(" ", "")
|
|
||||||
|
|
||||||
# ffprobe to calculate the total duration of the clip.
|
|
||||||
length = math.floor(
|
|
||||||
float(
|
|
||||||
os.popen(
|
|
||||||
"ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 /dev/shm/ffmpeg/in/{fname}".format(
|
|
||||||
fname=fname
|
|
||||||
)
|
|
||||||
).read()
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
duration, timestamped_section = time_calculations(fname, length)
|
|
||||||
|
|
||||||
run = True
|
|
||||||
|
|
||||||
reso = os.getenv("reso")
|
|
||||||
codec = os.getenv("codec")
|
|
||||||
audio_br = os.getenv("audio_br")
|
|
||||||
audio_br = int(str(os.getenv("audio_br")))
|
|
||||||
fs = float(str(os.getenv("fs")))
|
|
||||||
target_fs = fs
|
|
||||||
|
|
||||||
codecs = {
|
|
||||||
"vp9": {
|
|
||||||
"pass1": f"-vf scale={reso} -g 240 -threads 8 -speed 4 -row-mt 1 -tile-columns 2 -vsync cfr -c:v libvpx-vp9 -pass 1 -an",
|
|
||||||
"pass2": f"-vf scale={reso} -g 240 -threads 8 -speed 2 -row-mt 1 -tile-columns 2 -c:v libvpx-vp9 -c:a libopus -pass 2",
|
|
||||||
"output_name": "small_" + fname.replace(".mp4", ".webm"),
|
|
||||||
},
|
|
||||||
"x264": {
|
|
||||||
"pass1": f"-vf scale={reso} -vsync cfr -c:v libx264 -pass 1 -an",
|
|
||||||
"pass2": f"-vf scale={reso} -c:v libx264 -c:a aac -pass 2 ",
|
|
||||||
"output_name": "small_" + fname,
|
|
||||||
},
|
|
||||||
"x265": {
|
|
||||||
"pass1": f"-vf scale={reso} -c:v libx265 -vsync cfr -x265-params pass=1 -an",
|
|
||||||
"pass2": f"-vf scale={reso} -c:v libx265 -x265-params pass=2 -c:a aac",
|
|
||||||
"output_name": "small_" + fname,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
while run:
|
|
||||||
# Conversion to KiB
|
|
||||||
end_fs = fs * 8192
|
|
||||||
br, minbr, maxbr = get_bitrate(
|
|
||||||
duration=duration, filesize=end_fs, audio_br=audio_br
|
|
||||||
)
|
|
||||||
ffmpeg_string = f"""
|
|
||||||
ffpb {timestamped_section} -hwaccel cuda -i /dev/shm/ffmpeg/in/{fname} -y \
|
|
||||||
{codecs[str(codec)]['pass1']} \
|
|
||||||
-b:v {br}k -minrate {minbr}k -maxrate {maxbr}k \
|
|
||||||
-f null /dev/null && \
|
|
||||||
ffpb {timestamped_section} -hwaccel cuda -i /dev/shm/ffmpeg/in/{fname} \
|
|
||||||
{codecs[str(codec)]['pass2']} \
|
|
||||||
-b:a {audio_br}k -b:v {br}k -minrate {minbr}k -maxrate {maxbr}k \
|
|
||||||
/dev/shm/ffmpeg/out/{codecs[str(codec)]['output_name']} -y
|
|
||||||
"""
|
|
||||||
|
|
||||||
run = encode(
|
|
||||||
ffmpeg_string, output_name=codecs[str(codec)]["output_name"], fs=target_fs
|
|
||||||
)
|
|
||||||
|
|
||||||
if run:
|
|
||||||
fs = fs - 0.2
|
|
||||||
@ -1,98 +0,0 @@
|
|||||||
#! /usr/bin/env nix-shell
|
|
||||||
#! nix-shell -i bash -p bash gum trashy fd ripgrep mediainfo
|
|
||||||
|
|
||||||
replace_extension() {
|
|
||||||
local file_basename
|
|
||||||
file_basename=$(basename "$1")
|
|
||||||
echo "${file_basename%.*}.$2"
|
|
||||||
}
|
|
||||||
|
|
||||||
convert_gif() {
|
|
||||||
file_newname=$(replace_extension "$1" gif)
|
|
||||||
ffpb -i "$(realpath "$1")" -vf fps=12,scale=480:-1,smartblur=ls=-0.5 "$file_newname"
|
|
||||||
}
|
|
||||||
|
|
||||||
convert_mp4() {
|
|
||||||
local file_newname
|
|
||||||
file_newname=$(replace_extension "$1" mp4)
|
|
||||||
local file_tempdest=/dev/shm/$file_newname
|
|
||||||
local file_destination
|
|
||||||
file_destination=$(dirname "$(realpath "$1")")/$file_newname
|
|
||||||
ffpb -i "$1" \
|
|
||||||
-c:v libx265 \
|
|
||||||
"$file_tempdest"
|
|
||||||
trash "$1"
|
|
||||||
mv -i "$file_tempdest" "$file_destination"
|
|
||||||
}
|
|
||||||
|
|
||||||
convert_discord() {
|
|
||||||
local file_newname
|
|
||||||
file_newname=$2_$(replace_extension "$1" mp4)
|
|
||||||
local dir_ram=/dev/shm/ffmpeg
|
|
||||||
mkdir -p $dir_ram/{in,out}
|
|
||||||
ffpb -hwaccel cuda -i "$(realpath "$1")" \
|
|
||||||
-c:v h264_nvenc \
|
|
||||||
"$dir_ram"/in/discord.mp4
|
|
||||||
cd "$dir_ram" || exit
|
|
||||||
codec=x264 audio_br=$3 fs=$4 reso=$5 ffmpeg4discord
|
|
||||||
mv "$dir_ram"/out/small_discord.mp4 ~/"$file_newname"
|
|
||||||
command rm -rf "$dir_ram"
|
|
||||||
}
|
|
||||||
|
|
||||||
operation=$(gum choose mp4 discord nitro gif enc265)
|
|
||||||
|
|
||||||
case $operation in
|
|
||||||
1 | mp4)
|
|
||||||
to_convert=()
|
|
||||||
while IFS= read -r file; do
|
|
||||||
to_convert+=("$file")
|
|
||||||
done < <(fd . "$(pwd)" -tf -aL | fzf --multi -i)
|
|
||||||
for file in "${to_convert[@]}"; do
|
|
||||||
convert_mp4 "$file"
|
|
||||||
done
|
|
||||||
;;
|
|
||||||
2 | discord)
|
|
||||||
to_convert=()
|
|
||||||
while IFS= read -r file; do
|
|
||||||
to_convert+=("$file")
|
|
||||||
done < <(fd . "$(pwd)" -tf -aL | fzf --multi -i)
|
|
||||||
for file in "${to_convert[@]}"; do
|
|
||||||
convert_discord "$file" discord 96 8.0 "1280x720"
|
|
||||||
done
|
|
||||||
;;
|
|
||||||
3 | nitro)
|
|
||||||
to_convert=()
|
|
||||||
while IFS= read -r file; do
|
|
||||||
to_convert+=("$file")
|
|
||||||
done < <(fd . "$(pwd)" -tf -aL | fzf --multi -i)
|
|
||||||
for file in "${to_convert[@]}"; do
|
|
||||||
convert_discord "$file" nitro 128 50.0 "1920x1080"
|
|
||||||
done
|
|
||||||
;;
|
|
||||||
4 | gif)
|
|
||||||
to_convert=()
|
|
||||||
while IFS= read -r file; do
|
|
||||||
to_convert+=("$file")
|
|
||||||
done < <(fd . "$(pwd)" -tf -aL | fzf --multi -i)
|
|
||||||
for file in "${to_convert[@]}"; do
|
|
||||||
convert_gif "$file"
|
|
||||||
done
|
|
||||||
;;
|
|
||||||
5 | enc265)
|
|
||||||
to_convert=()
|
|
||||||
extensions=(flv m4v mpg avi mov ts mkv mp4 webm)
|
|
||||||
for ext in "${extensions[@]}"; do
|
|
||||||
while IFS= read -r file; do
|
|
||||||
if ! (mediainfo "$file" | grep Writing\ library | grep -q x265); then
|
|
||||||
to_convert+=("$file")
|
|
||||||
fi
|
|
||||||
done < <(fd . -e "$ext" -tf -aL)
|
|
||||||
done
|
|
||||||
for file in "${to_convert[@]}"; do
|
|
||||||
convert_mp4 "$file"
|
|
||||||
done
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo -n "Please select a valid input"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
@ -1,153 +0,0 @@
|
|||||||
#! /usr/bin/env nix-shell
|
|
||||||
#! nix-shell -i bash -p bash gum fd ripgrep exa trashy zip unzip
|
|
||||||
|
|
||||||
root_directories=(
|
|
||||||
~/Multimedia/Library/Comics
|
|
||||||
~/Multimedia/Library/Manga
|
|
||||||
~/Multimedia/Library/Webtoons
|
|
||||||
)
|
|
||||||
|
|
||||||
newname() {
|
|
||||||
echo "$1" | sed -E "s/$2/$3/g"
|
|
||||||
}
|
|
||||||
|
|
||||||
separator() {
|
|
||||||
gum style --foreground 7 _________________________
|
|
||||||
}
|
|
||||||
announce_changes() {
|
|
||||||
echo "Renaming:"
|
|
||||||
gum style --foreground 1 "$1"
|
|
||||||
echo "Into:"
|
|
||||||
gum style --foreground 2 "$2"
|
|
||||||
separator
|
|
||||||
}
|
|
||||||
|
|
||||||
rename_file() {
|
|
||||||
while IFS= read -r file; do
|
|
||||||
local original_name
|
|
||||||
original_name=$(basename "$file")
|
|
||||||
local new_name
|
|
||||||
new_name=$(newname "$(basename "$file")" "$2" "$3")
|
|
||||||
|
|
||||||
announce_changes "$original_name" "$new_name"
|
|
||||||
command mv -n "$(dirname "$file")"/{"$original_name","$new_name"}
|
|
||||||
done < <(fd "$1" --absolute-path -tf -s "${root_directories[@]}")
|
|
||||||
}
|
|
||||||
|
|
||||||
rename_directory() {
|
|
||||||
while IFS= read -r dir; do
|
|
||||||
local new_name
|
|
||||||
new_name=$(newname "$(basename "$dir")" "$2" "$3")
|
|
||||||
local new_dir
|
|
||||||
new_dir=$(dirname "$dir")/$new_name
|
|
||||||
|
|
||||||
announce_changes "$dir" "$new_dir"
|
|
||||||
echo "Processing..."
|
|
||||||
if [ ! -d "$new_dir" ]; then
|
|
||||||
echo "$(basename "$new_dir") doesn't exist. Creating it."
|
|
||||||
command mkdir -p "$new_dir"
|
|
||||||
fi
|
|
||||||
if [ -d "$new_dir" ]; then
|
|
||||||
echo "$(basename "$new_dir") has been created!, moving the following files:"
|
|
||||||
exa "$dir"
|
|
||||||
fd . "$dir" -x mv -n {} "$(realpath "$new_dir")"
|
|
||||||
fi
|
|
||||||
separator
|
|
||||||
done < <(fd "$1" --absolute-path -td -s "${root_directories[@]}")
|
|
||||||
}
|
|
||||||
|
|
||||||
# Capitalize Special words
|
|
||||||
words=(special tpb full annual)
|
|
||||||
Words=(Special TPB Full Annual)
|
|
||||||
counter=0
|
|
||||||
for word in "${words[@]}"; do
|
|
||||||
while IFS= read -r file; do
|
|
||||||
new_name=$(newname "$(basename "$file")" "$word" "${Words[$counter]}")
|
|
||||||
echo "Inproper capitalization of the word"
|
|
||||||
gum style --foreground 1 "$word"
|
|
||||||
echo "adjusting it into"
|
|
||||||
gum style --foreground 2 "${Words[$counter]}"
|
|
||||||
announce_changes "$(basename "$file")" "$new_name"
|
|
||||||
command mv -n "$(dirname "$file")"/{"$(basename "$file")","$new_name"}
|
|
||||||
done < <(fd "$word" --absolute-path -tf -s "${root_directories[@]}")
|
|
||||||
counter=$((counter + 1))
|
|
||||||
done
|
|
||||||
|
|
||||||
# Rename Year files
|
|
||||||
# set regex_year_grep "\([[:digit:]]{4}\)"
|
|
||||||
# set regex_year_string "(\()(\d{4})(\))"
|
|
||||||
# rename_directory $regex_year_grep $regex_year_string \$2
|
|
||||||
# rename_file $regex_year_grep $regex_year_string \$2
|
|
||||||
|
|
||||||
# Rename #_ downloads
|
|
||||||
regex_hashtag="#_"
|
|
||||||
rename_directory $regex_hashtag $regex_hashtag "#"
|
|
||||||
rename_file $regex_hashtag $regex_hashtag "#"
|
|
||||||
|
|
||||||
rename_keywords() {
|
|
||||||
# Followed by digit
|
|
||||||
local regex_digit_fd="$1 \d+"
|
|
||||||
local regex_digit="($1 )([[:digit:]]+)"
|
|
||||||
rename_directory "$regex_digit_fd" "$regex_digit" "\1#\2"
|
|
||||||
rename_file "$regex_digit_fd" "$regex_digit" "\1#\2"
|
|
||||||
# Without digit
|
|
||||||
regex="#$1"
|
|
||||||
rename_directory "$regex" "$regex" "$1"
|
|
||||||
rename_file "$regex" "$regex" "$1"
|
|
||||||
}
|
|
||||||
|
|
||||||
rename_keywords TPB
|
|
||||||
rename_keywords Special
|
|
||||||
rename_keywords Annual
|
|
||||||
|
|
||||||
# Rename #Full
|
|
||||||
rename_directory " #Full" " #Full" ""
|
|
||||||
rename_file " #Full" " #Full" ""
|
|
||||||
|
|
||||||
# Rename double space
|
|
||||||
rename_directory " " " " " "
|
|
||||||
rename_file " " " " " "
|
|
||||||
|
|
||||||
# Fix names
|
|
||||||
wrongnames=(
|
|
||||||
"Dr. Stone"
|
|
||||||
i-dont-want-this-kind-of-hero
|
|
||||||
pure-of-heart
|
|
||||||
scoob-and-shag
|
|
||||||
stick-n-poke
|
|
||||||
"Houseki no Kuni"
|
|
||||||
"Gantz E"
|
|
||||||
"Gantz G"
|
|
||||||
)
|
|
||||||
rightname=(
|
|
||||||
"Dr. STONE"
|
|
||||||
"I DON'T WANT THIS KIND OF HERO"
|
|
||||||
"Pure of Heart"
|
|
||||||
"Scoob and Shag"
|
|
||||||
"Stick n' Poke"
|
|
||||||
"Land of the Lustrous"
|
|
||||||
"Gatz:E"
|
|
||||||
"Gantz:G"
|
|
||||||
)
|
|
||||||
counter=0
|
|
||||||
for wrongname in "${wrongnames[@]}"; do
|
|
||||||
rename_directory "$wrongname" "$wrongname" "${rightname[$counter]}"
|
|
||||||
rename_file "$wrongname" "$wrongname" "${rightname[$counter]}"
|
|
||||||
counter=$((counter + 1))
|
|
||||||
done
|
|
||||||
|
|
||||||
# Merge TPB (Part X) files
|
|
||||||
while IFS= read -r file; do
|
|
||||||
new_name=$(newname "$(basename "$file" .cbz)" "TPB \(Part [[:digit:]]+\)" TPB)
|
|
||||||
extract_dir=$(realpath "$(dirname "$file")"/"$new_name")
|
|
||||||
if [ ! -d "$extract_dir" ]; then
|
|
||||||
mkdir -p "$extract_dir"
|
|
||||||
fi
|
|
||||||
unzip "$file" -d "$extract_dir"/"$(basename "$file" .cbz)"
|
|
||||||
cd "$extract_dir" || exit
|
|
||||||
zip -r "$(realpath "$(dirname "$file")")"/"$new_name"\.cbz ./
|
|
||||||
trash "$file"
|
|
||||||
trash "$extract_dir"/"$(basename "$file" .cbz)"
|
|
||||||
done < <(fd "Part \d+" --absolute-path -tf -s "${root_directories[@]}")
|
|
||||||
|
|
||||||
fd . --absolute-path -td -te "${root_directories[@]}" -x trash {}
|
|
||||||
@ -1,59 +0,0 @@
|
|||||||
#!/run/current-system/sw/bin/bash
|
|
||||||
|
|
||||||
# Cron tasks
|
|
||||||
if type /run/current-system/sw/bin/nextcloud-occ 2>/dev/null; then
|
|
||||||
/run/current-system/sw/bin/nextcloud-occ preview:pre-generate
|
|
||||||
/run/current-system/sw/bin/nextcloud-occ face:background_job -t 900
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Sync GDL stuff
|
|
||||||
root=/mnt/disk2/scrapping
|
|
||||||
|
|
||||||
cd $root || exit
|
|
||||||
set -- Aqp Ghekre
|
|
||||||
for user in "$@"; do
|
|
||||||
originDir=$root/$user
|
|
||||||
destDir=/mnt/disk1/nextcloud/$user/files/Requested
|
|
||||||
destDirDup=/mnt/disk1/nextcloud/$user/files/RequestedDupePlzCheckNDel
|
|
||||||
if [ ! -d "$destDir" ]; then
|
|
||||||
echo "$destDir does not exist, creating..."
|
|
||||||
mkdir -p "$destDir"
|
|
||||||
fi
|
|
||||||
cd "$originDir" || exit
|
|
||||||
find . -type f | while read -r file; do
|
|
||||||
destination=$destDir/"$(echo "$file" | sed "s/^\.\///")"
|
|
||||||
destinationDup=$destDirDup/"$(echo "$file" | sed "s/^\.\///")"
|
|
||||||
|
|
||||||
if [ ! -f "$destination" ]; then
|
|
||||||
echo "Safe to move $(basename "$file")"
|
|
||||||
if [ ! -d "$(dirname "$destination")" ]; then
|
|
||||||
echo "Creating parent directory..."
|
|
||||||
mkdir -p "$(dirname "$destination")"
|
|
||||||
fi
|
|
||||||
mv -n "$file" "$destination"
|
|
||||||
else
|
|
||||||
echo "Duplicated encountered $(basename "$file")"
|
|
||||||
if [ ! -d "$(dirname "$destinationDup")" ]; then
|
|
||||||
echo "Creating parent directory..."
|
|
||||||
mkdir -p "$(dirname "$destinationDup")"
|
|
||||||
fi
|
|
||||||
mv -n "$file" "$destinationDup"
|
|
||||||
fi
|
|
||||||
|
|
||||||
done
|
|
||||||
|
|
||||||
find ./ -mindepth 1 -type d -empty -delete
|
|
||||||
|
|
||||||
chown 990:990 -R "$destDir"
|
|
||||||
find "$destDir" -type d -exec chmod -R 755 {} \;
|
|
||||||
find "$destDir" -type f -exec chmod -R 644 {} \;
|
|
||||||
|
|
||||||
if [ -d "$destDirDup" ]; then
|
|
||||||
chown 990:990 -R "$destDirDup"
|
|
||||||
find "$destDirDup" -type d -exec chmod -R 755 {} \;
|
|
||||||
find "$destDirDup" -type f -exec chmod -R 644 {} \;
|
|
||||||
fi
|
|
||||||
if type /run/current-system/sw/bin/nextcloud-occ 2>/dev/null; then
|
|
||||||
/run/current-system/sw/bin/nextcloud-occ files:scan --all
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
@ -1,51 +0,0 @@
|
|||||||
#! /usr/bin/env nix-shell
|
|
||||||
#! nix-shell -i bash -p bash fd borgbackup gum ripgrep
|
|
||||||
|
|
||||||
BORG_PASSPHRASE=$(gum input --password --placeholder "Type borg password")
|
|
||||||
export BORG_PASSPHRASE
|
|
||||||
|
|
||||||
d_root=$HOME/pika
|
|
||||||
f_string=home/jawz/.config/jawz/lists/jawz/watch.txt
|
|
||||||
d_borg=/mnt/disk1/backups/pika/lists
|
|
||||||
|
|
||||||
while IFS= read -r repo; do
|
|
||||||
IFS=" " read -r -a array <<<"$repo"
|
|
||||||
repo_id="${array[0]}"
|
|
||||||
mkdir -vp "$d_root/$repo_id" && cd "$d_root/$repo_id" || exit
|
|
||||||
borg extract $d_borg::"$repo_id" $f_string
|
|
||||||
cat "$d_root/$repo_id/$f_string" >>"$d_root/master"
|
|
||||||
done < <(borg list "$d_borg")
|
|
||||||
|
|
||||||
cd "$HOME" || exit
|
|
||||||
|
|
||||||
sort -u "$d_root/master" -o "$d_root/sorted"
|
|
||||||
sort -u "$LW" -o "$LW"
|
|
||||||
|
|
||||||
echo "Current $(wc -l <"$LW") archived $(wc -l <"$d_root/sorted")"
|
|
||||||
|
|
||||||
echo "Missing lines:"
|
|
||||||
diff "$d_root/sorted" "$LW"
|
|
||||||
|
|
||||||
# look for duped lines with different casing
|
|
||||||
echo "Duplicated lines:"
|
|
||||||
while IFS= read -r line; do
|
|
||||||
if ! [ "$line" == "${line,,}" ]; then
|
|
||||||
if rg "${line,,}" <"$LW"; then
|
|
||||||
echo "$line"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done <"$LW"
|
|
||||||
|
|
||||||
# delete pika backups
|
|
||||||
if gum confirm "Limpiar pika?"; then
|
|
||||||
command rm -rf "$d_root"
|
|
||||||
while IFS= read -r repo; do
|
|
||||||
IFS=" " read -r -a array <<<"$repo"
|
|
||||||
repo_id="${array[0]}"
|
|
||||||
gum spin --spinner dot --title "Cleaning $repo_id..." -- borg delete $d_borg::"$repo_id"
|
|
||||||
done < <(borg list "$d_borg")
|
|
||||||
else
|
|
||||||
echo "Canceled, no files deleted"
|
|
||||||
fi
|
|
||||||
gum spin --spinner dot --title "Cleaning $repo_id..." -- borg compact "$d_borg"
|
|
||||||
gum spin --spinner dot --title "Cleaning $repo_id..." -- borg compact /mnt/disk1/backups/pika/home
|
|
||||||
@ -1,48 +0,0 @@
|
|||||||
#! /usr/bin/env nix-shell
|
|
||||||
#! nix-shell -i bash -p bash gnome.zenity rmlint git gum xclip
|
|
||||||
|
|
||||||
if [ -n "$1" ]; then
|
|
||||||
operation=$1
|
|
||||||
else
|
|
||||||
operation=$(gum choose rmlint_1 rmlint_2 download git)
|
|
||||||
fi
|
|
||||||
|
|
||||||
case $operation in
|
|
||||||
# onlyfans)
|
|
||||||
# source ~/Development/Python/onlyfans/bin/activate.fish
|
|
||||||
# python ~/Development/Git/OnlyFans/start_ofd.py
|
|
||||||
# deactivate
|
|
||||||
rmlint_1)
|
|
||||||
rmlint -g --types="duplicates" \
|
|
||||||
--config=sh:handler=clone \
|
|
||||||
/mnt/disk1/personal
|
|
||||||
;;
|
|
||||||
rmlint_2)
|
|
||||||
rmlint -g --types="duplicates" \
|
|
||||||
--config=sh:handler=clone \
|
|
||||||
/mnt/disk2/{glue,home,personal,scrapping}
|
|
||||||
;;
|
|
||||||
download)
|
|
||||||
ENTRY=$(zenity --entry --width=250 --title "Push Manager" \
|
|
||||||
--text="Verify the following entry is correct" \
|
|
||||||
--add-entry="Clipboard:" --entry-text "$(xclip -o -sel clip)")
|
|
||||||
if [ -n "$ENTRY" ]; then
|
|
||||||
kgx -e "download -u jawz -i '$ENTRY'"
|
|
||||||
else
|
|
||||||
zenity --error --width=250 \
|
|
||||||
--text "Please verify and try again"
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
git)
|
|
||||||
git_dir=$HOME/Development/Git
|
|
||||||
while IFS= read -r repo; do
|
|
||||||
if ! [ -d "$repo/.git" ]; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
cd "$repo" || exit
|
|
||||||
gum style --foreground 2 "Updating $(basename "$repo")"
|
|
||||||
git fsck --full
|
|
||||||
git pull
|
|
||||||
done < <(fd . "$git_dir" -td --absolute-path -d 1)
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
@ -1,28 +0,0 @@
|
|||||||
#! /usr/bin/env nix-shell
|
|
||||||
#! nix-shell -i bash -p bash fd
|
|
||||||
|
|
||||||
before_count=$(fd -tf | wc -l)
|
|
||||||
i=0
|
|
||||||
|
|
||||||
for file in $(fd -d1 -tf -E '*.mp4'); do
|
|
||||||
dir_name=$(basename "$(pwd)")_$(printf %03d $((i / $1 + 1)))
|
|
||||||
mkdir -p "$dir_name"
|
|
||||||
mv -i "$file" "$(realpath "$dir_name")"/
|
|
||||||
i=$((i + 1))
|
|
||||||
done
|
|
||||||
|
|
||||||
for file in $(fd -d1 -tf -e mp4); do
|
|
||||||
mkdir -p videos
|
|
||||||
mv -i "$file" "$(realpath videos)"/
|
|
||||||
done
|
|
||||||
|
|
||||||
after_count=$(fd -tf | wc -l)
|
|
||||||
|
|
||||||
if [[ "$before_count" == "$after_count" ]]; then
|
|
||||||
echo "No file count differences"
|
|
||||||
else
|
|
||||||
echo "Before count: $before_count"
|
|
||||||
echo "After count: $after_count"
|
|
||||||
fi
|
|
||||||
sleep 10
|
|
||||||
exit
|
|
||||||
@ -1,140 +0,0 @@
|
|||||||
#! /usr/bin/env nix-shell
|
|
||||||
#! nix-shell -i bash -p bash trashy fd ripgrep file
|
|
||||||
|
|
||||||
directories=("$HOME/Pictures/To Organize/" "$HOME/Downloads/")
|
|
||||||
|
|
||||||
replace_extension() {
|
|
||||||
local file_basename
|
|
||||||
file_basename=$(basename "$1")
|
|
||||||
echo "${file_basename%.*}.$2"
|
|
||||||
}
|
|
||||||
|
|
||||||
generate_random_number() {
|
|
||||||
local min=0
|
|
||||||
local max=9999999999
|
|
||||||
printf "%010d\n" $((min + RANDOM % max))
|
|
||||||
}
|
|
||||||
|
|
||||||
test_name() {
|
|
||||||
local random_number
|
|
||||||
random_number=$(generate_random_number)
|
|
||||||
while (($(fd "$random_number"* "$HOME/Pictures/" "$HOME/Downloads/" -tf | wc -l) > 0)); do
|
|
||||||
echo "Conflicts found, generating a new filename"
|
|
||||||
random_number=$(generate_random_number)
|
|
||||||
echo "$random_number"
|
|
||||||
done
|
|
||||||
echo "$random_number"
|
|
||||||
}
|
|
||||||
|
|
||||||
while IFS= read -r file; do
|
|
||||||
regex_str='source|tenor|media|duckduckgo\.com|giphy|'
|
|
||||||
regex_str+='(?<!app)image|^download|unknown|zoom|'
|
|
||||||
regex_str+='new_canvas|untitled|drawpile'
|
|
||||||
if basename "$file" | rg --pcre2 -q "$regex_str"; then
|
|
||||||
new_name=$(test_name)
|
|
||||||
echo renaming
|
|
||||||
echo "$file"
|
|
||||||
echo into
|
|
||||||
echo "$(dirname "$file")"/"$new_name"
|
|
||||||
echo ---------------
|
|
||||||
command mv -n "$(dirname "$file")"/{"$(basename "$file")","$new_name"}
|
|
||||||
fi
|
|
||||||
if basename "$file" | rg -q 'Screenshot_\d{8}'; then
|
|
||||||
echo "moving screenshot $file into $HOME/Pictures/Screenshots/"
|
|
||||||
command mv -n "$file" "$HOME/Pictures/Screenshots/"
|
|
||||||
fi
|
|
||||||
done < <(fd . "${directories[@]}" -d 1 -tf --absolute-path)
|
|
||||||
|
|
||||||
screenshots=$HOME/Pictures/Screenshots
|
|
||||||
if (($(fd . "$screenshots" -tf -d 1 | wc -l) > 0)); then
|
|
||||||
while IFS= read -r file; do
|
|
||||||
date=$(stat -c "%y" "$file" | rg -o "\d{4}-\d{2}-\d{2}")
|
|
||||||
year=$(echo "$date" | rg -o "\d{4}")
|
|
||||||
month=$(echo "$date" | rg -o "\d{4}-\d{2}" | rg -o --pcre2 "(?<=-)\d{2}")
|
|
||||||
dest_dir=$(realpath "$screenshots/$year/$month")
|
|
||||||
echo "Moving screenshot $(basename "$file") into $dest_dir"
|
|
||||||
mkdir -vp "$dest_dir"
|
|
||||||
command mv -n "$file" "$dest_dir/"
|
|
||||||
done < <(fd . "$screenshots" --absolute-path -tf -d 1)
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Where steam screenshots are stored, may need to replace with ur ID
|
|
||||||
dir_steam=$XDG_DATA_HOME/Steam/userdata/107446271/760/remote
|
|
||||||
declare -A games
|
|
||||||
# Insert here new games, put between [] the ID of the game
|
|
||||||
# You can find it by visiting the $dir_steam directory
|
|
||||||
# the ID is simply the name of the folder in there.
|
|
||||||
games+=(
|
|
||||||
[386360]=Smite
|
|
||||||
[960090]="Bloons Tower Defense 6"
|
|
||||||
[648800]=Raft
|
|
||||||
[262060]="Darkest Dungeon"
|
|
||||||
[234140]="Mad Max"
|
|
||||||
[433340]="Slime Rancher"
|
|
||||||
)
|
|
||||||
|
|
||||||
for key in "${!games[@]}"; do
|
|
||||||
# Modify this to store your screenshots somewhere else
|
|
||||||
dir_dest=$(realpath "$HOME/Pictures/Screenshots/Games")/${games[$key]}
|
|
||||||
dir_game=$(realpath "$dir_steam")/$key/screenshots
|
|
||||||
# If there are not screenshots currently stored, why bother lol
|
|
||||||
if ! [[ -d $dir_game ]]; then #
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
# If screenshots exist however...
|
|
||||||
if (($(fd . "$dir_game" -d 1 -tf | wc -l) > 0)); then
|
|
||||||
# Create destination directory
|
|
||||||
mkdir -vp "$dir_dest"
|
|
||||||
echo "Moving ${games[$key]} screenshots..."
|
|
||||||
fd . "$dir_game" -d 1 -tf -x mv -n {} "$dir_dest"/
|
|
||||||
# Delete thumnnails
|
|
||||||
echo "Deleting ${games[$key]} thumbnails..."
|
|
||||||
rm -rf "$dir_game"/thumbnails
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
# Clearing up empty directories
|
|
||||||
fd . "$dir_steam" -td -te -x trash {}
|
|
||||||
|
|
||||||
cyberpunk_dir=$HOME/Games/cyberpunk-2077/drive_c/users/jawz/Pictures/"Cyberpunk 2077"
|
|
||||||
if [[ -d $cyberpunk_dir ]]; then
|
|
||||||
while IFS= read -r file; do
|
|
||||||
echo "Moving cyberpunk screenshots"
|
|
||||||
command mv -n "$file" "$HOME/Pictures/Screenshots/Games/Cyberpunk 2077/"
|
|
||||||
done < <(fd . "$cyberpunk_dir" -tf)
|
|
||||||
fi
|
|
||||||
|
|
||||||
proton_dir=$HOME/.steam/steam/compatibilitytools.d
|
|
||||||
if [[ -d "$proton_dir" ]]; then
|
|
||||||
while IFS= read -r protonver; do
|
|
||||||
lutrisdir=$XDG_DATA_HOME/lutris/runners/wine/$(basename "$protonver")
|
|
||||||
if ! [ -d "$lutrisdir" ] && ! [ -L "$lutrisdir" ]; then
|
|
||||||
echo "Symlink $lutrisdir doesn't exist, creating link..."
|
|
||||||
ln -s "$(realpath "$protonver")"/files "$lutrisdir"
|
|
||||||
fi
|
|
||||||
done < <(fd . "$proton_dir" -d 1 -td)
|
|
||||||
fi
|
|
||||||
fd . "$XDG_DATA_HOME/lutris/runners/wine" -d 1 -tl -x trash {}
|
|
||||||
|
|
||||||
while IFS= read -r file; do
|
|
||||||
ext=$(file --mime-type "$file" | rg -o '\w+$')
|
|
||||||
correct_ext=${ext,,}
|
|
||||||
filename=$(basename -- "$file")
|
|
||||||
current_ext="${filename##*.}"
|
|
||||||
filename="${filename%.*}"
|
|
||||||
if echo "$correct_ext" | rg -q 'jpe|jpg|jpeg|png|gif'; then
|
|
||||||
if [ "$current_ext" != "$correct_ext" ]; then
|
|
||||||
echo "The file $(basename "$file")" \
|
|
||||||
"will be renamed, the propper extension is $correct_ext"
|
|
||||||
new_name="$filename".$correct_ext
|
|
||||||
command mv -n "$(dirname "$file")"/{"$(basename "$file")","$new_name"}
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done < <(fd . "${directories[@]}" -d 1 -tf)
|
|
||||||
|
|
||||||
files_home_clean=(.pki HuionCore.pid DriverUI.pid huion.log)
|
|
||||||
for file in "${files_home_clean[@]}"; do
|
|
||||||
file=$HOME/$file
|
|
||||||
if [ -e "$file" ]; then
|
|
||||||
rm -rf "$file"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
@ -1,38 +0,0 @@
|
|||||||
#!/usr/bin/env nix-shell
|
|
||||||
#! nix-shell -i bash -p bash curl jq dig
|
|
||||||
|
|
||||||
# Shell script to update namecheap.com dynamic dns
|
|
||||||
# for a domain to your external IP address
|
|
||||||
|
|
||||||
# namecheap
|
|
||||||
hostnames=(cloud @ 6fxAtnPxEeI8hN)
|
|
||||||
domain=rotehaare.art
|
|
||||||
password=60d672be5d9d4828a0f96264babe0ac1
|
|
||||||
|
|
||||||
ip=$(curl -s ipecho.net/plain)
|
|
||||||
for hostname in "${hostnames[@]}"; do
|
|
||||||
curl "https://dynamicdns.park-your-domain.com/update?host=$hostname&domain=$domain&password=$password&ip=$ip"
|
|
||||||
done
|
|
||||||
|
|
||||||
# cloudflare
|
|
||||||
zone_id=833996ed25eb09f1a50606e0457790e4
|
|
||||||
record=servidos.lat
|
|
||||||
record_id=6b117173e53a7511ba36ceb9637ede63
|
|
||||||
cloudflare_token=VdKosfThQmOcuywLOUq9DY4-df9EmbHrDWyf_vUb
|
|
||||||
|
|
||||||
# get record_id
|
|
||||||
# curl -s -X GET "https://api.cloudflare.com/client/v4/zones/${zone_id}/dns_records?type=A&name=${record}" \
|
|
||||||
# -H "Authorization: Bearer ${cloudflare_token}" \
|
|
||||||
# -H "Content-Type: application/json" | jq -r '{"result"}[] | .[0] | .id'
|
|
||||||
|
|
||||||
curr_ip=$(curl -s -X GET https://checkip.amazonaws.com)
|
|
||||||
curr_reg=$(dig ${record} +short @1.1.1.1)
|
|
||||||
if echo "${curr_reg}" | grep "${curr_ip}"; then
|
|
||||||
echo "$(date --rfc-3339=seconds) - OK - Current record matches current IP (${curr_ip})"
|
|
||||||
else
|
|
||||||
curl -s -X PUT "https://api.cloudflare.com/client/v4/zones/${zone_id}/dns_records/${record_id}" \
|
|
||||||
-H "Authorization: Bearer ${cloudflare_token}" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
--data "{\"type\":\"A\",\"name\":\"${record}\",\"content\":\"$curr_ip\",\"ttl\":1,\"proxied\":false}" >/dev/null
|
|
||||||
echo "$(date --rfc-3339=seconds) - NOK - Record Updated to $curr_ip from ${curr_reg}"
|
|
||||||
fi
|
|
||||||
Loading…
x
Reference in New Issue
Block a user