From a9fcf80b36c9fefb55a8625b739a68cd766291b0 Mon Sep 17 00:00:00 2001 From: GoldenSpring Date: Thu, 25 Dec 2025 21:12:03 +0300 Subject: [PATCH 001/228] started building the script --- ct/sonobarr.sh | 62 +++++++++++++++++++++++++++++++++++++ install/sonobarr-install.sh | 43 +++++++++++++++++++++++++ 2 files changed, 105 insertions(+) create mode 100644 ct/sonobarr.sh create mode 100644 install/sonobarr-install.sh diff --git a/ct/sonobarr.sh b/ct/sonobarr.sh new file mode 100644 index 000000000..ff3938aa1 --- /dev/null +++ b/ct/sonobarr.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash +source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) +# Copyright (c) 2021-2025 community-scripts ORG +# Author: GoldenSpringness +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/Dodelidoo-Labs/sonobarr + +APP="sonobarr" +var_tags="${var_tags:-pastebin;storage}" +var_cpu="${var_cpu:-1}" +var_ram="${var_ram:-512}" +var_disk="${var_disk:-20}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -f "/opt/sonobarr" ]]; then + msg_error "No sonobarr Installation Found!" + exit + fi + + if check_for_gh_release "sonobarr" "Dodelidoo-Labs/sonobarr"; then + msg_info "Stopping sonobarr" + systemctl stop sonobarr + msg_ok "Stopped sonobarr" + + msg_info "Creating Backup" + tar -czf "/opt/sonobarr_backup_$(date +%F).tar.gz" "/opt/sonobarr/upload" + msg_ok "Backup Created" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "sonobarr" "Dodelidoo-Labs/sonobarr" "tarball" + + msg_info "Updating sonobarr" + cd /opt/sonobarr + msg_ok "Updated sonobarr" + + msg_info "Starting sonobarr" + systemctl start sonobarr + msg_ok "Started sonobarr" + msg_ok "Update sonobarr" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}rustypaste setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8000${CL}" diff --git a/install/sonobarr-install.sh b/install/sonobarr-install.sh new file mode 100644 index 000000000..3480a8f21 --- /dev/null +++ b/install/sonobarr-install.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2025 community-scripts ORG +# Author: GoldenSpringness +# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE +# Source: https://github.com/Dodelidoo-Labs/sonobarr + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +setup_python +fetch_and_deploy_gh_release "sonobarr" "Dodelidoo-Labs/sonobarr" "tarball" + +msg_info "Setting up sonobarr" +cd /opt/sonobarr +pip install --no-cache-dir -r requirements.txt +msg_ok "Set up sonobarr" + +msg_info "Creating Service" +cat </etc/systemd/system/sonobarr +[Unit] +Description=sonobarr Service +After=network.target + +[Service] +WorkingDirectory=/opt/sonobarr +ExecStart=/bin/bash -c 'gunicorn src.Sonobarr:app -c gunicorn_config.py' +Restart=always + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now sonobarr +msg_ok "Created Service" + +motd_ssh +customize +cleanup_lxc From f2e5f003df2390fb2c07c34bbb0ad34c526a1bf5 Mon Sep 17 00:00:00 2001 From: GoldenSpring Date: Fri, 26 Dec 2025 01:10:45 +0300 Subject: [PATCH 002/228] last changes --- ct/sonobarr.sh | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/ct/sonobarr.sh b/ct/sonobarr.sh index ff3938aa1..96b599267 100644 --- a/ct/sonobarr.sh +++ b/ct/sonobarr.sh @@ -34,20 +34,13 @@ function update_script() { systemctl stop sonobarr msg_ok "Stopped sonobarr" - msg_info "Creating Backup" - tar -czf "/opt/sonobarr_backup_$(date +%F).tar.gz" "/opt/sonobarr/upload" - msg_ok "Backup Created" - - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "sonobarr" "Dodelidoo-Labs/sonobarr" "tarball" - msg_info "Updating sonobarr" - cd /opt/sonobarr + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "sonobarr" "Dodelidoo-Labs/sonobarr" "tarball" msg_ok "Updated sonobarr" msg_info "Starting sonobarr" systemctl start sonobarr msg_ok "Started sonobarr" - msg_ok "Update sonobarr" fi exit } @@ -59,4 +52,4 @@ description msg_ok "Completed Successfully!\n" echo -e "${CREATING}${GN}rustypaste setup has been successfully initialized!${CL}" echo -e "${INFO}${YW} Access it using the following URL:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8000${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:5000${CL}" From bc11b0cd2db954d865ff4a2462379029c09af0b7 Mon Sep 17 00:00:00 2001 From: GoldenSpring Date: Sat, 27 Dec 2025 10:45:27 +0300 Subject: [PATCH 003/228] changed files for testing --- ct/sonobarr.sh | 2 +- frontend/public/json/sonobarr.json | 35 ++++++++++++++++++++++++++++++ misc/build.func | 4 ++-- 3 files changed, 38 insertions(+), 3 deletions(-) create mode 100644 frontend/public/json/sonobarr.json diff --git a/ct/sonobarr.sh b/ct/sonobarr.sh index 96b599267..aac1fcba2 100644 --- a/ct/sonobarr.sh +++ b/ct/sonobarr.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) +source <(curl -s https://raw.githubusercontent.com/GoldenSpringness/ProxmoxVED/refs/heads/feature/sonobarr/misc/build.func) # Copyright (c) 2021-2025 community-scripts ORG # Author: GoldenSpringness # License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE diff --git a/frontend/public/json/sonobarr.json b/frontend/public/json/sonobarr.json new file mode 100644 index 000000000..77ae4be76 --- /dev/null +++ b/frontend/public/json/sonobarr.json @@ -0,0 +1,35 @@ +{ + "name": "Sonobarr", + "slug": "sonobarr", + "categories": [ + 14 + ], + "date_created": "2025-12-27", + "type": "ct", + "updateable": true, + "privileged": false, + "interface_port": 5000, + "documentation": "https://github.com/Dodelidoo-Labs/sonobarr", + "config_path": "gunicorn_config.py", + "website": "https://github.com/Dodelidoo-Labs/sonobarr", + "logo": "https://camo.githubusercontent.com/cc3f9cb9e157fbdee667130c50ba431fffc407a3d5c6cdf415574d652dd8484f/68747470733a2f2f696e756265732e6170702f617070732f66696c65735f73686172696e672f7075626c6963707265766965772f356a36574a597243476342696a646f3f66696c653d2f2666696c6549643d323731323226783d3338343026793d3231363026613d7472756526657461673d6535393833393032393962643532643062393863663835613464376161636565", + "description": "Sonobarr marries your existing Lidarr library with Last.fm’s discovery graph to surface artists you'll actually like. It runs as a Flask + Socket.IO application, ships with a polished Bootstrap UI, and includes admin tooling so folks can share a single instance safely.", + "install_methods": [ + { + "type": "default", + "script": "ct/sonobarr.sh", + "resources": { + "cpu": 1, + "ram": 1, + "hdd": 20, + "os": "Debian", + "version": "13" + } + } + ], + "default_credentials": { + "username": null, + "password": null + }, + "notes": [] +} diff --git a/misc/build.func b/misc/build.func index c1674fb49..73b7ccf51 100644 --- a/misc/build.func +++ b/misc/build.func @@ -3199,7 +3199,7 @@ EOF' set +Eeuo pipefail # Disable ALL error handling temporarily trap - ERR # Remove ERR trap completely - lxc-attach -n "$CTID" -- bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/install/${var_install}.sh)" + lxc-attach -n "$CTID" -- bash -c "$(curl -fsSL https://raw.githubusercontent.com/GoldenSpringness/ProxmoxVED/refs/heads/feature/sonobarr/install/${var_install}.sh)" local lxc_exit=$? set -Eeuo pipefail # Re-enable error handling @@ -3286,7 +3286,7 @@ EOF' if [[ "${DEV_MODE_MOTD:-false}" == "true" ]]; then echo -e "${TAB}${HOLD}${DGN}Setting up MOTD and SSH for debugging...${CL}" if pct exec "$CTID" -- bash -c " - source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/install.func) + source <(curl -fsSL https://raw.githubusercontent.com/GoldenSpringness/ProxmoxVED/refs/heads/feature/sonobarr/misc/install.func) declare -f motd_ssh >/dev/null 2>&1 && motd_ssh || true " >/dev/null 2>&1; then local ct_ip=$(pct exec "$CTID" ip a s dev eth0 2>/dev/null | awk '/inet / {print $2}' | cut -d/ -f1) From 2e2a539a349c6b69ab1b0ade5230c02ede6646c8 Mon Sep 17 00:00:00 2001 From: GoldenSpring Date: Sat, 3 Jan 2026 12:48:41 +0300 Subject: [PATCH 004/228] changed the tags+ removed imaginary setup_python function --- ct/sonobarr.sh | 6 +++--- install/sonobarr-install.sh | 1 - 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/ct/sonobarr.sh b/ct/sonobarr.sh index aac1fcba2..2aeea6426 100644 --- a/ct/sonobarr.sh +++ b/ct/sonobarr.sh @@ -6,9 +6,9 @@ source <(curl -s https://raw.githubusercontent.com/GoldenSpringness/ProxmoxVED/r # Source: https://github.com/Dodelidoo-Labs/sonobarr APP="sonobarr" -var_tags="${var_tags:-pastebin;storage}" -var_cpu="${var_cpu:-1}" -var_ram="${var_ram:-512}" +var_tags="${var_tags:-storage}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-1024}" var_disk="${var_disk:-20}" var_os="${var_os:-debian}" var_version="${var_version:-13}" diff --git a/install/sonobarr-install.sh b/install/sonobarr-install.sh index 3480a8f21..217669a7a 100644 --- a/install/sonobarr-install.sh +++ b/install/sonobarr-install.sh @@ -13,7 +13,6 @@ setting_up_container network_check update_os -setup_python fetch_and_deploy_gh_release "sonobarr" "Dodelidoo-Labs/sonobarr" "tarball" msg_info "Setting up sonobarr" From c7b2c885db964c0638aba6e3bf79d4018c142e9f Mon Sep 17 00:00:00 2001 From: GoldenSpring Date: Sat, 3 Jan 2026 12:57:57 +0300 Subject: [PATCH 005/228] added python venv creation --- ct/sonobarr.sh | 4 ++-- install/sonobarr-install.sh | 6 ++++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/ct/sonobarr.sh b/ct/sonobarr.sh index 2aeea6426..8f502e6fa 100644 --- a/ct/sonobarr.sh +++ b/ct/sonobarr.sh @@ -7,8 +7,8 @@ source <(curl -s https://raw.githubusercontent.com/GoldenSpringness/ProxmoxVED/r APP="sonobarr" var_tags="${var_tags:-storage}" -var_cpu="${var_cpu:-2}" -var_ram="${var_ram:-1024}" +var_cpu="${var_cpu:-1}" +var_ram="${var_ram:-512}" var_disk="${var_disk:-20}" var_os="${var_os:-debian}" var_version="${var_version:-13}" diff --git a/install/sonobarr-install.sh b/install/sonobarr-install.sh index 217669a7a..21a264c95 100644 --- a/install/sonobarr-install.sh +++ b/install/sonobarr-install.sh @@ -16,7 +16,13 @@ update_os fetch_and_deploy_gh_release "sonobarr" "Dodelidoo-Labs/sonobarr" "tarball" msg_info "Setting up sonobarr" + cd /opt/sonobarr + +python3 -m venv venv + +source venv/bin/activate + pip install --no-cache-dir -r requirements.txt msg_ok "Set up sonobarr" From 26ee2996a861a2a5ddcacedb24d525a71d3b4f6d Mon Sep 17 00:00:00 2001 From: GoldenSpring Date: Sat, 3 Jan 2026 13:01:03 +0300 Subject: [PATCH 006/228] added venv library installation --- install/sonobarr-install.sh | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/install/sonobarr-install.sh b/install/sonobarr-install.sh index 21a264c95..f20c9d384 100644 --- a/install/sonobarr-install.sh +++ b/install/sonobarr-install.sh @@ -14,15 +14,12 @@ network_check update_os fetch_and_deploy_gh_release "sonobarr" "Dodelidoo-Labs/sonobarr" "tarball" - -msg_info "Setting up sonobarr" - cd /opt/sonobarr +msg_info "Setting up sonobarr" +apt install python3.13-venv python3 -m venv venv - source venv/bin/activate - pip install --no-cache-dir -r requirements.txt msg_ok "Set up sonobarr" From ddeaccb508edaa8209f3cb236625b1f1d0c87f7c Mon Sep 17 00:00:00 2001 From: GoldenSpring Date: Sat, 3 Jan 2026 13:58:55 +0300 Subject: [PATCH 007/228] added -y to apt installation --- install/sonobarr-install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install/sonobarr-install.sh b/install/sonobarr-install.sh index f20c9d384..6bef91121 100644 --- a/install/sonobarr-install.sh +++ b/install/sonobarr-install.sh @@ -17,7 +17,7 @@ fetch_and_deploy_gh_release "sonobarr" "Dodelidoo-Labs/sonobarr" "tarball" cd /opt/sonobarr msg_info "Setting up sonobarr" -apt install python3.13-venv +apt install python3.13-venv -y python3 -m venv venv source venv/bin/activate pip install --no-cache-dir -r requirements.txt From 0392720e630f3a72527a0c76ff358dc5b6cba8a2 Mon Sep 17 00:00:00 2001 From: GoldenSpring Date: Sat, 3 Jan 2026 14:03:51 +0300 Subject: [PATCH 008/228] fixed the naming of the sonobarr service --- install/sonobarr-install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install/sonobarr-install.sh b/install/sonobarr-install.sh index 6bef91121..cf248f5e9 100644 --- a/install/sonobarr-install.sh +++ b/install/sonobarr-install.sh @@ -24,7 +24,7 @@ pip install --no-cache-dir -r requirements.txt msg_ok "Set up sonobarr" msg_info "Creating Service" -cat </etc/systemd/system/sonobarr +cat </etc/systemd/system/sonobarr.service [Unit] Description=sonobarr Service After=network.target From 0cd7eed2763464fb84fa4852ecd3858c8b22ff72 Mon Sep 17 00:00:00 2001 From: juronja <101410098+juronja@users.noreply.github.com> Date: Thu, 15 Jan 2026 15:46:35 +0000 Subject: [PATCH 009/228] initial commit --- frontend/public/json/truenas-vm.json | 56 +++ vm/truenas-vm.sh | 592 +++++++++++++++++++++++++++ 2 files changed, 648 insertions(+) create mode 100644 frontend/public/json/truenas-vm.json create mode 100644 vm/truenas-vm.sh diff --git a/frontend/public/json/truenas-vm.json b/frontend/public/json/truenas-vm.json new file mode 100644 index 000000000..d4675b55f --- /dev/null +++ b/frontend/public/json/truenas-vm.json @@ -0,0 +1,56 @@ +{ + "name": "TrueNAS Community Edition", + "slug": "truenas-community-edition", + "categories": [ + 2 + ], + "date_created": "2026-01-16", + "type": "vm", + "updateable": true, + "privileged": false, + "interface_port": null, + "documentation": "https://www.truenas.com/docs/", + "website": "https://www.truenas.com/truenas-community-edition/", + "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/truenas-core.webp", + "config_path": "", + "description": "TrueNAS Community Edition is the world's most deployed storage software. Free, flexible and build on OpenZFS with Docker.", + "install_methods": [ + { + "type": "default", + "script": "vm/truenas-vm.sh", + "resources": { + "cpu": 2, + "ram": 8192, + "hdd": 16, + "os": null, + "version": null + } + } + ], + "default_credentials": { + "username": null, + "password": null + }, + "notes": [ + { + "text": "The default install uses the latest stable release. Please use advanced install for more options.", + "type": "info" + }, + { + "text": "8 GB of RAM is the minimum requirement, but 16 GB+ is recommended for optimal performance.", + "type": "info" + }, + { + "text": "After installation, you will be prompted to unmount the media. To do this: Go to the VM's Hardware tab > select the CD/DVD Drive > Edit > select 'Do not use any media'", + "type": "info" + }, + { + "text": "While you can import onboard disks during install, it is highly recommended to use an HBA to pass through disks for production environments.", + "type": "warning" + }, + { + "text": "ECC RAM is strongly recommended to ensure data integrity, as ZFS checksumming can be compromised by bad data in RAM before the data is written to the pool.", + "type": "warning" + } + ] +} diff --git a/vm/truenas-vm.sh b/vm/truenas-vm.sh new file mode 100644 index 000000000..ab796ee77 --- /dev/null +++ b/vm/truenas-vm.sh @@ -0,0 +1,592 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: juronja +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE + +source /dev/stdin <<<$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func) + +function header_info() { + clear + cat <<"EOF" + ______ _ _____ _____ + /_ __/______ _____ / | / / | / ___/ + / / / ___/ / / / _ \/ |/ / /| | \__ \ + / / / / / /_/ / __/ /| / ___ |___/ / +/_/ /_/ \__,_/\___/_/ |_/_/ |_/____/ + (Community Edition) +EOF +} +header_info +echo -e "\n Loading..." +GEN_MAC=02:$(openssl rand -hex 5 | awk '{print toupper($0)}' | sed 's/\(..\)/\1:/g; s/.$//') +RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" +METHOD="" + +YW=$(echo "\033[33m") +BL=$(echo "\033[36m") +RD=$(echo "\033[01;31m") +BGN=$(echo "\033[4;92m") +GN=$(echo "\033[1;92m") +DGN=$(echo "\033[32m") +CL=$(echo "\033[m") + +CL=$(echo "\033[m") +BOLD=$(echo "\033[1m") +BFR="\\r\\033[K" +HOLD=" " +TAB=" " + +CM="${TAB}✔️${TAB}${CL}" +CROSS="${TAB}✖️${TAB}${CL}" +INFO="${TAB}💡${TAB}${CL}" +OS="${TAB}🖥️${TAB}${CL}" +CONTAINERTYPE="${TAB}📦${TAB}${CL}" +ISO="${TAB}📀${TAB}${CL}" +DISKSIZE="${TAB}💾${TAB}${CL}" +CPUCORE="${TAB}🧠${TAB}${CL}" +RAMSIZE="${TAB}🛠️${TAB}${CL}" +CONTAINERID="${TAB}🆔${TAB}${CL}" +HOSTNAME="${TAB}🏠${TAB}${CL}" +BRIDGE="${TAB}🌉${TAB}${CL}" +GATEWAY="${TAB}🌐${TAB}${CL}" +DISK="${TAB}💽${TAB}${CL}" +DEFAULT="${TAB}⚙️${TAB}${CL}" +MACADDRESS="${TAB}🔗${TAB}${CL}" +VLANTAG="${TAB}🏷️${TAB}${CL}" +CREATING="${TAB}🚀${TAB}${CL}" +ADVANCED="${TAB}🧩${TAB}${CL}" +CLOUD="${TAB}☁️${TAB}${CL}" + +set -e +trap 'error_handler $LINENO "$BASH_COMMAND"' ERR +trap cleanup EXIT +trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT +trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM +function error_handler() { + local exit_code="$?" + local line_number="$1" + local command="$2" + local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}" + post_update_to_api "failed" "${command}" + echo -e "\n$error_message\n" + cleanup_vmid +} + +function truenas_iso_lookup() { + local BASE_URL="https://download.truenas.com" + local current_year=$(date +%y) + local last_year=$(date -d "1 year ago" +%y) + local year_pattern="${current_year}\.|${last_year}\." + + declare -A latest_stables + local pre_releases=() + + local all_paths=$( + curl -sL "$BASE_URL" | + grep -oE 'href="[^"]+\.iso"' | + sed 's/href="//; s/"$//' | + grep -vE '(nightly|ALPHA)' | + grep -E "$year_pattern" + ) + + while read -r path; do + local filename=$(basename "$path") + local version=$(echo "$filename" | sed -E 's/.*TrueNAS-SCALE-([0-9]{2}\.[0-9]{2}(\.[0-9]+)*(-RC[0-9]|-BETA[0-9])?)\.iso.*/\1/') + if [[ "$version" =~ (RC|BETA) ]]; then + pre_releases+=("$path") + else + local major_version=$(echo "$version" | cut -d'.' -f1,2) + local current_stored_path=${latest_stables["$major_version"]} + if [[ -z "$current_stored_path" ]]; then + latest_stables["$major_version"]="$path" + else + local stored_version=$(basename "$current_stored_path" | sed -E 's/.*TrueNAS-SCALE-([0-9]{2}\.[0-9]{2}(\.[0-9]+)*)\.iso.*/\1/') + if printf '%s\n' "$version" "$stored_version" | sort -V | tail -n 1 | grep -q "$version"; then + latest_stables["$major_version"]="$path" + fi + fi + fi + done <<<"$all_paths" + + for key in "${!latest_stables[@]}"; do + echo "${latest_stables[$key]#/}" + done + + for pre in "${pre_releases[@]}"; do + echo "${pre#/}" + done | sort -V +} + +function get_valid_nextid() { + local try_id + try_id=$(pvesh get /cluster/nextid) + while true; do + if [ -f "/etc/pve/qemu-server/${try_id}.conf" ] || [ -f "/etc/pve/lxc/${try_id}.conf" ]; then + try_id=$((try_id + 1)) + continue + fi + if lvs --noheadings -o lv_name | grep -qE "(^|[-_])${try_id}($|[-_])"; then + try_id=$((try_id + 1)) + continue + fi + break + done + echo "$try_id" +} + +function cleanup_vmid() { + if qm status $VMID &>/dev/null; then + qm stop $VMID &>/dev/null + qm destroy $VMID &>/dev/null + fi +} + +function cleanup() { + popd >/dev/null + post_update_to_api "done" "none" + rm -rf $TEMP_DIR +} + +TEMP_DIR=$(mktemp -d) +pushd $TEMP_DIR >/dev/null +if whiptail --backtitle "Proxmox VE Helper Scripts" --title "TrueNAS VM" --yesno "This will create a New TrueNAS VM. Proceed?" 10 58; then + : +else + header_info && echo -e "${CROSS}${RD}User exited script${CL}\n" && exit +fi + +function msg_info() { + local msg="$1" + echo -ne "${TAB}${YW}${HOLD}${msg}${HOLD}" +} + +function msg_ok() { + local msg="$1" + echo -e "${BFR}${CM}${GN}${msg}${CL}" +} + +function msg_error() { + local msg="$1" + echo -e "${BFR}${CROSS}${RD}${msg}${CL}" +} + +function check_root() { + if [[ "$(id -u)" -ne 0 || $(ps -o comm= -p $PPID) == "sudo" ]]; then + clear + msg_error "Please run this script as root." + echo -e "\nExiting..." + sleep 2 + exit + fi +} + +# This function checks the version of Proxmox Virtual Environment (PVE) and exits if the version is not supported. +# Supported: Proxmox VE 8.0.x – 8.9.x and 9.0 – 9.1 +pve_check() { + local PVE_VER + PVE_VER="$(pveversion | awk -F'/' '{print $2}' | awk -F'-' '{print $1}')" + + # Check for Proxmox VE 8.x: allow 8.0–8.9 + if [[ "$PVE_VER" =~ ^8\.([0-9]+) ]]; then + local MINOR="${BASH_REMATCH[1]}" + if ((MINOR < 0 || MINOR > 9)); then + msg_error "This version of Proxmox VE is not supported." + msg_error "Supported: Proxmox VE version 8.0 – 8.9" + exit 1 + fi + return 0 + fi + + # Check for Proxmox VE 9.x: allow 9.0–9.1 + if [[ "$PVE_VER" =~ ^9\.([0-9]+) ]]; then + local MINOR="${BASH_REMATCH[1]}" + if ((MINOR < 0 || MINOR > 1)); then + msg_error "This version of Proxmox VE is not yet supported." + msg_error "Supported: Proxmox VE version 9.0 – 9.1" + exit 1 + fi + return 0 + fi + + # All other unsupported versions + msg_error "This version of Proxmox VE is not supported." + msg_error "Supported versions: Proxmox VE 8.0 – 8.x or 9.0 – 9.1" + exit 1 +} + +function arch_check() { + if [ "$(dpkg --print-architecture)" != "amd64" ]; then + echo -e "\n ${INFO}${YWB}This script will not work with PiMox! \n" + echo -e "\n ${YWB}Visit https://github.com/asylumexp/Proxmox for ARM64 support. \n" + echo -e "Exiting..." + sleep 2 + exit + fi +} + +function ssh_check() { + if command -v pveversion >/dev/null 2>&1; then + if [ -n "${SSH_CLIENT:+x}" ]; then + if whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "SSH DETECTED" --yesno "It's suggested to use the Proxmox shell instead of SSH, since SSH can create issues while gathering variables. Would you like to proceed with using SSH?" 10 62; then + echo "you've been warned" + else + clear + exit + fi + fi + fi +} + +function exit-script() { + clear + echo -e "\n${CROSS}${RD}User exited script${CL}\n" + exit +} + +function default_settings() { + VMID=$(get_valid_nextid) + ISO_DEFAULT="latest stable" + FORMAT="" + MACHINE="q35" + DISK_SIZE="16" + HN="truenas" + CPU_TYPE="host" + CORE_COUNT="2" + RAM_SIZE="8192" + BRG="vmbr0" + MAC="$GEN_MAC" + VLAN="" + MTU="" + START_VM="yes" + METHOD="default" + echo -e "${CONTAINERID}${BOLD}${DGN}Virtual Machine ID: ${BGN}${VMID}${CL}" + echo -e "${ISO}${BOLD}${DGN}ISO Chosen: ${BGN}${ISO_DEFAULT}${CL}" + echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}q35${CL}" + echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE}${CL}" + echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}${HN}${CL}" + echo -e "${OS}${BOLD}${DGN}CPU Model: ${BGN}${CPU_TYPE}${CL}" + echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}${CORE_COUNT}${CL}" + echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE}${CL}" + echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}${BRG}${CL}" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}${MAC}${CL}" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}Default${CL}" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}Default${CL}" + echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}" + echo -e "${CREATING}${BOLD}${DGN}Creating a TrueNAS VM using the above default settings${CL}" +} + +function advanced_settings() { + METHOD="advanced" + [ -z "${VMID:-}" ] && VMID=$(get_valid_nextid) + while true; do + if VMID=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Virtual Machine ID" 8 58 $VMID --title "VIRTUAL MACHINE ID" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$VMID" ]; then + VMID=$(get_valid_nextid) + fi + if pct status "$VMID" &>/dev/null || qm status "$VMID" &>/dev/null; then + echo -e "${CROSS}${RD} ID $VMID is already in use${CL}" + sleep 2 + continue + fi + echo -e "${CONTAINERID}${BOLD}${DGN}Virtual Machine ID: ${BGN}$VMID${CL}" + break + else + exit-script + fi + done + + # ISO lookup + ISOARRAY=() + while read -r ISOPATH; do + FILENAME=$(basename "$ISOPATH") + ISOARRAY+=("$ISOPATH" "$FILENAME" "OFF") + done < <(truenas_iso_lookup | sort -V) + if [ ${#ISOARRAY[@]} -eq 0 ]; then + echo "No ISOs found." + exit 1 + fi + + if SELECTED_ISO=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "SELECT ISO TO INSTALL" --notags --radiolist "\nSelect version (BETA/RC + Latest stables):" 20 58 12 "${ISOARRAY[@]}" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + echo -e "${ISO}${BOLD}${DGN}ISO Chosen: ${BGN}$(basename "$SELECTED_ISO")${CL}" + else + exit-script + fi + + if DISK_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Disk Size in GiB (e.g., 10, 20)" 8 58 "$DISK_SIZE" --title "DISK SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + DISK_SIZE=$(echo "$DISK_SIZE" | tr -d ' ') + if [[ "$DISK_SIZE" =~ ^[0-9]+$ ]]; then + echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}$DISK_SIZE${CL}" + else + echo -e "${DISKSIZE}${BOLD}${RD}Invalid Disk Size. Please use a number (e.g., 10).${CL}" + exit-script + fi + else + exit-script + fi + + if VM_NAME=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Hostname" 8 58 truenas --title "HOSTNAME" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z $VM_NAME ]; then + HN="truenas" + echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" + else + HN=$(echo ${VM_NAME,,} | tr -d ' ') + echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" + fi + else + exit-script + fi + + if CPU_TYPE1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "CPU MODEL" --radiolist "Choose CPU Model" --cancel-button Exit-Script 10 58 2 \ + "KVM64" "Default – safe for migration/compatibility" OFF \ + "Host" "Use host CPU features (faster, no migration)" ON \ + 3>&1 1>&2 2>&3); then + case "$CPU_TYPE1" in + Host) + echo -e "${OS}${BOLD}${DGN}CPU Model: ${BGN}Host${CL}" + CPU_TYPE="host" + ;; + *) + echo -e "${OS}${BOLD}${DGN}CPU Model: ${BGN}KVM64${CL}" + CPU_TYPE="" + ;; + esac + else + exit-script + fi + + if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z $CORE_COUNT ]; then + CORE_COUNT="2" + echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + else + echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + fi + else + exit-script + fi + + if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 8192 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z $RAM_SIZE ]; then + RAM_SIZE="8192" + echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + else + echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + fi + else + exit-script + fi + + if BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Bridge" 8 58 vmbr0 --title "BRIDGE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z $BRG ]; then + BRG="vmbr0" + echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}" + else + echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}" + fi + else + exit-script + fi + + if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z $MAC1 ]; then + MAC="$GEN_MAC" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + else + MAC="$MAC1" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + fi + else + exit-script + fi + + if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z $VLAN1 ]; then + VLAN1="Default" + VLAN="" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + else + VLAN=",tag=$VLAN1" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + fi + else + exit-script + fi + + if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z $MTU1 ]; then + MTU1="Default" + MTU="" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + else + MTU=",mtu=$MTU1" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + fi + else + exit-script + fi + + if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "IMPORT ONBOARD DISKS" --yesno "Would you like to import onboard disks?" 10 58); then + echo -e "${DISK}${BOLD}${DGN}Import onboard disks: ${BGN}yes${CL}" + IMPORT_DISKS="yes" + else + echo -e "${DISK}${BOLD}${DGN}Import onboard disks: ${BGN}no${CL}" + IMPORT_DISKS="no" + fi + + if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "START VIRTUAL MACHINE" --yesno "Start VM when completed?" 10 58); then + echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}" + START_VM="yes" + else + echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}no${CL}" + START_VM="no" + fi + + if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "ADVANCED SETTINGS COMPLETE" --yesno "Ready to create a TrueNAS VM?" --no-button Do-Over 10 58); then + echo -e "${CREATING}${BOLD}${DGN}Creating a TrueNAS VM using the above advanced settings${CL}" + else + header_info + echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Settings${CL}" + advanced_settings + fi +} + +function start_script() { + if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "SETTINGS" --yesno "Use Default Settings?" --no-button Advanced 10 58); then + header_info + echo -e "${DEFAULT}${BOLD}${BL}Using Default Settings${CL}" + default_settings + else + header_info + echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Settings${CL}" + advanced_settings + fi +} +check_root +arch_check +pve_check +ssh_check +start_script +post_to_api_vm + +msg_info "Validating Storage" +while read -r line; do + TAG=$(echo $line | awk '{print $1}') + TYPE=$(echo $line | awk '{printf "%-10s", $2}') + FREE=$(echo $line | numfmt --field 4-6 --from-unit=K --to=iec --format %.2f | awk '{printf( "%9sB", $6)}') + ITEM=" Type: $TYPE Free: $FREE " + OFFSET=2 + if [[ $((${#ITEM} + $OFFSET)) -gt ${MSG_MAX_LENGTH:-} ]]; then + MSG_MAX_LENGTH=$((${#ITEM} + $OFFSET)) + fi + STORAGE_MENU+=("$TAG" "$ITEM" "OFF") +done < <(pvesm status -content images | awk 'NR>1') +VALID=$(pvesm status -content images | awk 'NR>1') +if [ -z "$VALID" ]; then + msg_error "Unable to detect a valid storage location." + exit +elif [ $((${#STORAGE_MENU[@]} / 3)) -eq 1 ]; then + STORAGE=${STORAGE_MENU[0]} +else + while [ -z "${STORAGE:+x}" ]; do + if [ -n "$SPINNER_PID" ] && ps -p $SPINNER_PID >/dev/null; then kill $SPINNER_PID >/dev/null; fi + printf "\e[?25h" + STORAGE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "Storage Pools" --radiolist \ + "Which storage pool would you like to use for ${HN}?\nTo make a selection, use the Spacebar.\n" \ + 16 $(($MSG_MAX_LENGTH + 23)) 6 \ + "${STORAGE_MENU[@]}" 3>&1 1>&2 2>&3) + done +fi +msg_ok "Using ${CL}${BL}$STORAGE${CL} ${GN}for Storage Location." +msg_ok "Virtual Machine ID is ${CL}${BL}$VMID${CL}." + +if [ -z "${SELECTED_ISO:-}" ]; then + # Fallback: Find the latest stable release only (excluding RC/BETA for safety) + SELECTED_ISO=$(truenas_iso_lookup | grep -vE 'RC|BETA' | sort -V | tail -n 1) + + if [ -z "$SELECTED_ISO" ]; then + msg_error "Could not find a stable ISO for fallback." + exit 1 + fi +fi + +FULL_URL="https://download.truenas.com/${SELECTED_ISO#/}" +ISO_NAME=$(basename "$FULL_URL") +CACHE_DIR="/var/lib/vz/template/iso" +CACHE_FILE="$CACHE_DIR/$ISO_NAME" + +if [[ ! -s "$CACHE_FILE" ]]; then + msg_info "Retrieving the ISO for the TrueNAS Disk Image" + curl -f#SL -o "$CACHE_FILE" "$FULL_URL" + msg_ok "Downloaded ${CL}${BL}$(basename "$CACHE_FILE")${CL}" +else + msg_ok "Using cached image ${CL}${BL}$(basename "$CACHE_FILE")${CL}" +fi + +set -o pipefail +msg_info "Creating TrueNAS VM shell" +qm create "$VMID" -machine q35 -bios ovmf -agent enabled=1 -tablet 0 -localtime 1 -cpu "$CPU_TYPE" \ + -cores "$CORE_COUNT" -memory "$RAM_SIZE" -balloon 0 -name "$HN" -tags community-script \ + -net0 "virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU" -onboot 1 -ostype l26 \ + -efidisk0 $STORAGE:1,efitype=4m,pre-enrolled-keys=0 -scsi0 $STORAGE:$DISK_SIZE,ssd=1,iothread=on \ + -scsihw virtio-scsi-single -cdrom local:iso/$ISO_NAME -vga virtio >/dev/null +msg_ok "Created VM shell" + +if [ "$IMPORT_DISKS" == "yes" ]; then + msg_info "Importing onboard disks" + DISKARRAY=() + SCSI_NR=0 + + while read -r LSOUTPUT; do + DISKARRAY+=("$LSOUTPUT" "" "OFF") + done < <(ls /dev/disk/by-id | grep -E '^ata-|^nvme-' | grep -v 'part') + + SELECTIONS=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "SELECT DISKS TO IMPORT" --checklist "\nSelect disk IDs to import. (Use Spacebar to select)\n" --cancel-button "Exit Script" 20 58 10 "${DISKARRAY[@]}" 3>&1 1>&2 2>&3 | tr -d '"') || exit + + for SELECTION in $SELECTIONS; do + ((++SCSI_NR)) + qm set $VMID --scsi$SCSI_NR /dev/disk/by-id/$SELECTION + done + msg_ok "Disks imported successfully" +fi + +DESCRIPTION=$( + cat < + + Logo + + +

TrueNAS VM

+ +

+ + spend Coffee + +

+ + + + GitHub + + + + Discussions + + + + Issues + + +EOF +) +qm set "$VMID" -description "$DESCRIPTION" >/dev/null + +msg_ok "Created a TrueNAS VM ${CL}${BL}(${HN})" +if [ "$START_VM" == "yes" ]; then + msg_info "Starting TrueNAS VM" + qm start $VMID + msg_ok "Started TrueNAS VM" +fi + +msg_ok "Completed Successfully! Go to VM console and start installation process.\n" From 9872ea39f3824f0f996ab9882c33f23ec6d2b3a8 Mon Sep 17 00:00:00 2001 From: juronja <101410098+juronja@users.noreply.github.com> Date: Thu, 15 Jan 2026 15:52:19 +0000 Subject: [PATCH 010/228] api func source changed to VED --- vm/truenas-vm.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vm/truenas-vm.sh b/vm/truenas-vm.sh index ab796ee77..5808b070e 100644 --- a/vm/truenas-vm.sh +++ b/vm/truenas-vm.sh @@ -4,7 +4,7 @@ # Author: juronja # License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -source /dev/stdin <<<$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func) +source /dev/stdin <<<$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/api.func) function header_info() { clear @@ -262,7 +262,7 @@ function default_settings() { METHOD="default" echo -e "${CONTAINERID}${BOLD}${DGN}Virtual Machine ID: ${BGN}${VMID}${CL}" echo -e "${ISO}${BOLD}${DGN}ISO Chosen: ${BGN}${ISO_DEFAULT}${CL}" - echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}q35${CL}" + echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}${MACHINE}${CL}" echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE}${CL}" echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}${HN}${CL}" echo -e "${OS}${BOLD}${DGN}CPU Model: ${BGN}${CPU_TYPE}${CL}" From e748ad5a6d7d1d9da5d65bf48cf1f79b1737a8b2 Mon Sep 17 00:00:00 2001 From: juronja <101410098+juronja@users.noreply.github.com> Date: Thu, 15 Jan 2026 16:02:20 +0000 Subject: [PATCH 011/228] added sleep for disk pickup --- vm/truenas-vm.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/vm/truenas-vm.sh b/vm/truenas-vm.sh index 5808b070e..453979eac 100644 --- a/vm/truenas-vm.sh +++ b/vm/truenas-vm.sh @@ -582,6 +582,8 @@ EOF ) qm set "$VMID" -description "$DESCRIPTION" >/dev/null +sleep 5 + msg_ok "Created a TrueNAS VM ${CL}${BL}(${HN})" if [ "$START_VM" == "yes" ]; then msg_info "Starting TrueNAS VM" From 42b192272db5fe2686db0b894f6c8a75a0004a17 Mon Sep 17 00:00:00 2001 From: juronja <101410098+juronja@users.noreply.github.com> Date: Thu, 15 Jan 2026 16:19:00 +0000 Subject: [PATCH 012/228] testing with 3 sec sleep --- vm/truenas-vm.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vm/truenas-vm.sh b/vm/truenas-vm.sh index 453979eac..cdbc789b4 100644 --- a/vm/truenas-vm.sh +++ b/vm/truenas-vm.sh @@ -557,7 +557,7 @@ DESCRIPTION=$( Logo -

TrueNAS VM

+

TrueNAS Community Edition

@@ -582,7 +582,7 @@ EOF ) qm set "$VMID" -description "$DESCRIPTION" >/dev/null -sleep 5 +sleep 3 msg_ok "Created a TrueNAS VM ${CL}${BL}(${HN})" if [ "$START_VM" == "yes" ]; then From d979edecf1d7f5bcfc16cbe9ca7111cefc378d68 Mon Sep 17 00:00:00 2001 From: juronja <101410098+juronja@users.noreply.github.com> Date: Fri, 16 Jan 2026 11:16:21 +0000 Subject: [PATCH 013/228] Fetching iso list msg --- vm/truenas-vm.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vm/truenas-vm.sh b/vm/truenas-vm.sh index cdbc789b4..7f46ce73d 100644 --- a/vm/truenas-vm.sh +++ b/vm/truenas-vm.sh @@ -296,7 +296,7 @@ function advanced_settings() { fi done - # ISO lookup + msg_info "Fetching iso list from https://download.truenas.com/" ISOARRAY=() while read -r ISOPATH; do FILENAME=$(basename "$ISOPATH") @@ -591,4 +591,4 @@ if [ "$START_VM" == "yes" ]; then msg_ok "Started TrueNAS VM" fi -msg_ok "Completed Successfully! Go to VM console and start installation process.\n" +msg_ok "Completed Successfully!\n" From 773d588232229d01c256171b3d10e88b788de2d7 Mon Sep 17 00:00:00 2001 From: juronja <101410098+juronja@users.noreply.github.com> Date: Fri, 16 Jan 2026 11:26:25 +0000 Subject: [PATCH 014/228] removed info message --- vm/truenas-vm.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/vm/truenas-vm.sh b/vm/truenas-vm.sh index 7f46ce73d..675a4de7a 100644 --- a/vm/truenas-vm.sh +++ b/vm/truenas-vm.sh @@ -296,7 +296,6 @@ function advanced_settings() { fi done - msg_info "Fetching iso list from https://download.truenas.com/" ISOARRAY=() while read -r ISOPATH; do FILENAME=$(basename "$ISOPATH") From fabeb9f0d6897c35a5fd55ba86f2e98ff1987a67 Mon Sep 17 00:00:00 2001 From: juronja <101410098+juronja@users.noreply.github.com> Date: Fri, 16 Jan 2026 11:34:48 +0000 Subject: [PATCH 015/228] added comments for custom code specific to truenas --- vm/truenas-vm.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/vm/truenas-vm.sh b/vm/truenas-vm.sh index 675a4de7a..b81525ee2 100644 --- a/vm/truenas-vm.sh +++ b/vm/truenas-vm.sh @@ -73,6 +73,9 @@ function error_handler() { cleanup_vmid } +# Scrapes the TrueNAS download portal for ISO paths from the current and previous year, +# filtering out nightlies/alphas and returning the latest stable releases for each major +# version along with any beta or RC pre-releases. function truenas_iso_lookup() { local BASE_URL="https://download.truenas.com" local current_year=$(date +%y) @@ -296,6 +299,7 @@ function advanced_settings() { fi done + # Fetching iso list from TrueNAS downloads for whiptail radiolist ISOARRAY=() while read -r ISOPATH; do FILENAME=$(basename "$ISOPATH") @@ -531,6 +535,7 @@ qm create "$VMID" -machine q35 -bios ovmf -agent enabled=1 -tablet 0 -localtime -scsihw virtio-scsi-single -cdrom local:iso/$ISO_NAME -vga virtio >/dev/null msg_ok "Created VM shell" +# Optional step to import onboard disks if [ "$IMPORT_DISKS" == "yes" ]; then msg_info "Importing onboard disks" DISKARRAY=() From f04946cae8e63238db8d195de3cf9a6295c0bd60 Mon Sep 17 00:00:00 2001 From: juronja <101410098+juronja@users.noreply.github.com> Date: Fri, 16 Jan 2026 13:20:18 +0100 Subject: [PATCH 016/228] Adding source in file header Co-authored-by: greptile-apps[bot] <165735046+greptile-apps[bot]@users.noreply.github.com> --- vm/truenas-vm.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/vm/truenas-vm.sh b/vm/truenas-vm.sh index b81525ee2..54b5652f9 100644 --- a/vm/truenas-vm.sh +++ b/vm/truenas-vm.sh @@ -3,6 +3,7 @@ # Copyright (c) 2021-2026 community-scripts ORG # Author: juronja # License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://www.truenas.com/truenas-community-edition/ source /dev/stdin <<<$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/api.func) From 77f317cb5b073954c8c2d423d67f524e3742acaf Mon Sep 17 00:00:00 2001 From: GoldenSpring Date: Sun, 18 Jan 2026 11:43:54 +0300 Subject: [PATCH 017/228] fixed the gunicorn not working --- install/sonobarr-install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install/sonobarr-install.sh b/install/sonobarr-install.sh index cf248f5e9..7bb88bf41 100644 --- a/install/sonobarr-install.sh +++ b/install/sonobarr-install.sh @@ -31,7 +31,7 @@ After=network.target [Service] WorkingDirectory=/opt/sonobarr -ExecStart=/bin/bash -c 'gunicorn src.Sonobarr:app -c gunicorn_config.py' +ExecStart=/opt/sonobarr/venv/bin/gunicorn src.Sonobarr:app -c gunicorn_config.py Restart=always [Install] From 5ba1b411f42deb31e9227cacb9c9f0ebc249892b Mon Sep 17 00:00:00 2001 From: GoldenSpring Date: Sun, 18 Jan 2026 11:59:47 +0300 Subject: [PATCH 018/228] CHANGED WORKING DIR --- install/sonobarr-install.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/install/sonobarr-install.sh b/install/sonobarr-install.sh index 7bb88bf41..72a45b9be 100644 --- a/install/sonobarr-install.sh +++ b/install/sonobarr-install.sh @@ -30,8 +30,9 @@ Description=sonobarr Service After=network.target [Service] -WorkingDirectory=/opt/sonobarr -ExecStart=/opt/sonobarr/venv/bin/gunicorn src.Sonobarr:app -c gunicorn_config.py +WorkingDirectory=/opt/sonobarr/src +Environment="PATH=/opt/sonobarr/venv/bin" +ExecStart=/bin/bash -c 'gunicorn src.Sonobarr:app -c gunicorn_config.py' Restart=always [Install] From 725ea9408787d5db44a38b5b264ffb424b131875 Mon Sep 17 00:00:00 2001 From: GoldenSpring Date: Wed, 21 Jan 2026 14:29:28 +0300 Subject: [PATCH 019/228] added env file --- install/sonobarr-install.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/install/sonobarr-install.sh b/install/sonobarr-install.sh index 72a45b9be..5a247b656 100644 --- a/install/sonobarr-install.sh +++ b/install/sonobarr-install.sh @@ -21,6 +21,8 @@ apt install python3.13-venv -y python3 -m venv venv source venv/bin/activate pip install --no-cache-dir -r requirements.txt +mv ".sample-env" ".env" +sed -i "s/^secret_key=.*/secret_key=$(openssl rand -hex 16)/" .env msg_ok "Set up sonobarr" msg_info "Creating Service" @@ -31,8 +33,9 @@ After=network.target [Service] WorkingDirectory=/opt/sonobarr/src +EnvironmentFile=/opt/sonobarr/.env Environment="PATH=/opt/sonobarr/venv/bin" -ExecStart=/bin/bash -c 'gunicorn src.Sonobarr:app -c gunicorn_config.py' +ExecStart=/bin/bash -c 'gunicorn Sonobarr:app -c ../gunicorn_config.py' Restart=always [Install] From cbf9c2567d982329810a3b8e351e24e8e1d6790f Mon Sep 17 00:00:00 2001 From: GoldenSpring Date: Wed, 21 Jan 2026 14:44:16 +0300 Subject: [PATCH 020/228] last fixes + json --- ct/sonobarr.sh | 4 +++- frontend/public/json/sonobarr.json | 12 +++++++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/ct/sonobarr.sh b/ct/sonobarr.sh index 8f502e6fa..d2c6b9a24 100644 --- a/ct/sonobarr.sh +++ b/ct/sonobarr.sh @@ -35,7 +35,9 @@ function update_script() { msg_ok "Stopped sonobarr" msg_info "Updating sonobarr" + cp "/opt/sonobarr/.env" "/opt/.sonobarr-env" CLEAN_INSTALL=1 fetch_and_deploy_gh_release "sonobarr" "Dodelidoo-Labs/sonobarr" "tarball" + cp "/opt/.sonobarr-env" "/opt/sonobarr/.env" msg_ok "Updated sonobarr" msg_info "Starting sonobarr" @@ -50,6 +52,6 @@ build_container description msg_ok "Completed Successfully!\n" -echo -e "${CREATING}${GN}rustypaste setup has been successfully initialized!${CL}" +echo -e "${CREATING}${GN}sonobarr setup has been successfully initialized!${CL}" echo -e "${INFO}${YW} Access it using the following URL:${CL}" echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:5000${CL}" diff --git a/frontend/public/json/sonobarr.json b/frontend/public/json/sonobarr.json index 77ae4be76..ee23ab442 100644 --- a/frontend/public/json/sonobarr.json +++ b/frontend/public/json/sonobarr.json @@ -31,5 +31,15 @@ "username": null, "password": null }, - "notes": [] + "notes": [ + { + "text": "secret_key in env is randomly generated at installation, feel free to change it", + "type": "info" + }, + { + "text": "Change the default admin credentials", + "type": "info" + } + + ] } From b015b7b8910426b59c498869298fb22761859cf0 Mon Sep 17 00:00:00 2001 From: GoldenSpring Date: Wed, 21 Jan 2026 14:59:33 +0300 Subject: [PATCH 021/228] removing temp changes --- misc/build.func | 6691 +++++++++++++++++++++++---------------------- misc/install.func | 1259 ++++----- 2 files changed, 4124 insertions(+), 3826 deletions(-) diff --git a/misc/build.func b/misc/build.func index 73b7ccf51..300bd6227 100644 --- a/misc/build.func +++ b/misc/build.func @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Copyright (c) 2021-2025 community-scripts ORG +# Copyright (c) 2021-2026 community-scripts ORG # Author: tteck (tteckster) | MickLesk | michelroegl-brunner # License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/branch/main/LICENSE # Revision: 1 @@ -39,46 +39,46 @@ # - Captures app-declared resource defaults (CPU, RAM, Disk) # ------------------------------------------------------------------------------ variables() { - NSAPP=$(echo "${APP,,}" | tr -d ' ') # This function sets the NSAPP variable by converting the value of the APP variable to lowercase and removing any spaces. - var_install="${NSAPP}-install" # sets the var_install variable by appending "-install" to the value of NSAPP. - INTEGER='^[0-9]+([.][0-9]+)?$' # it defines the INTEGER regular expression pattern. - PVEHOST_NAME=$(hostname) # gets the Proxmox Hostname and sets it to Uppercase - DIAGNOSTICS="yes" # sets the DIAGNOSTICS variable to "yes", used for the API call. - METHOD="default" # sets the METHOD variable to "default", used for the API call. - RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" # generates a random UUID and sets it to the RANDOM_UUID variable. - SESSION_ID="${RANDOM_UUID:0:8}" # Short session ID (first 8 chars of UUID) for log files - BUILD_LOG="/tmp/create-lxc-${SESSION_ID}.log" # Host-side container creation log - CTTYPE="${CTTYPE:-${CT_TYPE:-1}}" + NSAPP=$(echo "${APP,,}" | tr -d ' ') # This function sets the NSAPP variable by converting the value of the APP variable to lowercase and removing any spaces. + var_install="${NSAPP}-install" # sets the var_install variable by appending "-install" to the value of NSAPP. + INTEGER='^[0-9]+([.][0-9]+)?$' # it defines the INTEGER regular expression pattern. + PVEHOST_NAME=$(hostname) # gets the Proxmox Hostname and sets it to Uppercase + DIAGNOSTICS="yes" # sets the DIAGNOSTICS variable to "yes", used for the API call. + METHOD="default" # sets the METHOD variable to "default", used for the API call. + RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" # generates a random UUID and sets it to the RANDOM_UUID variable. + SESSION_ID="${RANDOM_UUID:0:8}" # Short session ID (first 8 chars of UUID) for log files + BUILD_LOG="/tmp/create-lxc-${SESSION_ID}.log" # Host-side container creation log + CTTYPE="${CTTYPE:-${CT_TYPE:-1}}" - # Parse dev_mode early - parse_dev_mode + # Parse dev_mode early + parse_dev_mode - # Setup persistent log directory if logs mode active - if [[ "${DEV_MODE_LOGS:-false}" == "true" ]]; then - mkdir -p /var/log/community-scripts - BUILD_LOG="/var/log/community-scripts/create-lxc-${SESSION_ID}-$(date +%Y%m%d_%H%M%S).log" - fi + # Setup persistent log directory if logs mode active + if [[ "${DEV_MODE_LOGS:-false}" == "true" ]]; then + mkdir -p /var/log/community-scripts + BUILD_LOG="/var/log/community-scripts/create-lxc-${SESSION_ID}-$(date +%Y%m%d_%H%M%S).log" + fi - # Get Proxmox VE version and kernel version - if command -v pveversion >/dev/null 2>&1; then - PVEVERSION="$(pveversion | awk -F'/' '{print $2}' | awk -F'-' '{print $1}')" - else - PVEVERSION="N/A" - fi - KERNEL_VERSION=$(uname -r) + # Get Proxmox VE version and kernel version + if command -v pveversion >/dev/null 2>&1; then + PVEVERSION="$(pveversion | awk -F'/' '{print $2}' | awk -F'-' '{print $1}')" + else + PVEVERSION="N/A" + fi + KERNEL_VERSION=$(uname -r) - # Capture app-declared defaults (for precedence logic) - # These values are set by the app script BEFORE default.vars is loaded - # If app declares higher values than default.vars, app values take precedence - if [[ -n "${var_cpu:-}" && "${var_cpu}" =~ ^[0-9]+$ ]]; then - export APP_DEFAULT_CPU="${var_cpu}" - fi - if [[ -n "${var_ram:-}" && "${var_ram}" =~ ^[0-9]+$ ]]; then - export APP_DEFAULT_RAM="${var_ram}" - fi - if [[ -n "${var_disk:-}" && "${var_disk}" =~ ^[0-9]+$ ]]; then - export APP_DEFAULT_DISK="${var_disk}" - fi + # Capture app-declared defaults (for precedence logic) + # These values are set by the app script BEFORE default.vars is loaded + # If app declares higher values than default.vars, app values take precedence + if [[ -n "${var_cpu:-}" && "${var_cpu}" =~ ^[0-9]+$ ]]; then + export APP_DEFAULT_CPU="${var_cpu}" + fi + if [[ -n "${var_ram:-}" && "${var_ram}" =~ ^[0-9]+$ ]]; then + export APP_DEFAULT_RAM="${var_ram}" + fi + if [[ -n "${var_disk:-}" && "${var_disk}" =~ ^[0-9]+$ ]]; then + export APP_DEFAULT_DISK="${var_disk}" + fi } # ----------------------------------------------------------------------------- @@ -187,17 +187,17 @@ variables() { source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/api.func) if command -v curl >/dev/null 2>&1; then - source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func) - source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func) - load_functions - catch_errors - #echo "(build.func) Loaded core.func via curl" + source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func) + source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func) + load_functions + catch_errors + #echo "(build.func) Loaded core.func via curl" elif command -v wget >/dev/null 2>&1; then - source <(wget -qO- https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func) - source <(wget -qO- https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func) - load_functions - catch_errors - #echo "(build.func) Loaded core.func via wget" + source <(wget -qO- https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func) + source <(wget -qO- https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func) + load_functions + catch_errors + #echo "(build.func) Loaded core.func via wget" fi # ============================================================================== @@ -215,46 +215,46 @@ fi # ------------------------------------------------------------------------------ maxkeys_check() { - # Read kernel parameters - per_user_maxkeys=$(cat /proc/sys/kernel/keys/maxkeys 2>/dev/null || echo 0) - per_user_maxbytes=$(cat /proc/sys/kernel/keys/maxbytes 2>/dev/null || echo 0) + # Read kernel parameters + per_user_maxkeys=$(cat /proc/sys/kernel/keys/maxkeys 2>/dev/null || echo 0) + per_user_maxbytes=$(cat /proc/sys/kernel/keys/maxbytes 2>/dev/null || echo 0) - # Exit if kernel parameters are unavailable - if [[ "$per_user_maxkeys" -eq 0 || "$per_user_maxbytes" -eq 0 ]]; then - echo -e "${CROSS}${RD} Error: Unable to read kernel parameters. Ensure proper permissions.${CL}" - exit 1 - fi + # Exit if kernel parameters are unavailable + if [[ "$per_user_maxkeys" -eq 0 || "$per_user_maxbytes" -eq 0 ]]; then + echo -e "${CROSS}${RD} Error: Unable to read kernel parameters. Ensure proper permissions.${CL}" + exit 1 + fi - # Fetch key usage for user ID 100000 (typical for containers) - used_lxc_keys=$(awk '/100000:/ {print $2}' /proc/key-users 2>/dev/null || echo 0) - used_lxc_bytes=$(awk '/100000:/ {split($5, a, "/"); print a[1]}' /proc/key-users 2>/dev/null || echo 0) + # Fetch key usage for user ID 100000 (typical for containers) + used_lxc_keys=$(awk '/100000:/ {print $2}' /proc/key-users 2>/dev/null || echo 0) + used_lxc_bytes=$(awk '/100000:/ {split($5, a, "/"); print a[1]}' /proc/key-users 2>/dev/null || echo 0) - # Calculate thresholds and suggested new limits - threshold_keys=$((per_user_maxkeys - 100)) - threshold_bytes=$((per_user_maxbytes - 1000)) - new_limit_keys=$((per_user_maxkeys * 2)) - new_limit_bytes=$((per_user_maxbytes * 2)) + # Calculate thresholds and suggested new limits + threshold_keys=$((per_user_maxkeys - 100)) + threshold_bytes=$((per_user_maxbytes - 1000)) + new_limit_keys=$((per_user_maxkeys * 2)) + new_limit_bytes=$((per_user_maxbytes * 2)) - # Check if key or byte usage is near limits - failure=0 - if [[ "$used_lxc_keys" -gt "$threshold_keys" ]]; then - echo -e "${CROSS}${RD} Warning: Key usage is near the limit (${used_lxc_keys}/${per_user_maxkeys}).${CL}" - echo -e "${INFO} Suggested action: Set ${GN}kernel.keys.maxkeys=${new_limit_keys}${CL} in ${BOLD}/etc/sysctl.d/98-community-scripts.conf${CL}." - failure=1 - fi - if [[ "$used_lxc_bytes" -gt "$threshold_bytes" ]]; then - echo -e "${CROSS}${RD} Warning: Key byte usage is near the limit (${used_lxc_bytes}/${per_user_maxbytes}).${CL}" - echo -e "${INFO} Suggested action: Set ${GN}kernel.keys.maxbytes=${new_limit_bytes}${CL} in ${BOLD}/etc/sysctl.d/98-community-scripts.conf${CL}." - failure=1 - fi + # Check if key or byte usage is near limits + failure=0 + if [[ "$used_lxc_keys" -gt "$threshold_keys" ]]; then + echo -e "${CROSS}${RD} Warning: Key usage is near the limit (${used_lxc_keys}/${per_user_maxkeys}).${CL}" + echo -e "${INFO} Suggested action: Set ${GN}kernel.keys.maxkeys=${new_limit_keys}${CL} in ${BOLD}/etc/sysctl.d/98-community-scripts.conf${CL}." + failure=1 + fi + if [[ "$used_lxc_bytes" -gt "$threshold_bytes" ]]; then + echo -e "${CROSS}${RD} Warning: Key byte usage is near the limit (${used_lxc_bytes}/${per_user_maxbytes}).${CL}" + echo -e "${INFO} Suggested action: Set ${GN}kernel.keys.maxbytes=${new_limit_bytes}${CL} in ${BOLD}/etc/sysctl.d/98-community-scripts.conf${CL}." + failure=1 + fi - # Provide next steps if issues are detected - if [[ "$failure" -eq 1 ]]; then - echo -e "${INFO} To apply changes, run: ${BOLD}service procps force-reload${CL}" - exit 1 - fi + # Provide next steps if issues are detected + if [[ "$failure" -eq 1 ]]; then + echo -e "${INFO} To apply changes, run: ${BOLD}service procps force-reload${CL}" + exit 1 + fi - # Silent success - only show errors if they exist + # Silent success - only show errors if they exist } # ============================================================================== @@ -270,18 +270,18 @@ maxkeys_check() { # - Returns "Unknown" if OS type cannot be determined # ------------------------------------------------------------------------------ get_current_ip() { - if [ -f /etc/os-release ]; then - # Check for Debian/Ubuntu (uses hostname -I) - if grep -qE 'ID=debian|ID=ubuntu' /etc/os-release; then - CURRENT_IP=$(hostname -I | awk '{print $1}') - # Check for Alpine (uses ip command) - elif grep -q 'ID=alpine' /etc/os-release; then - CURRENT_IP=$(ip -4 addr show eth0 | awk '/inet / {print $2}' | cut -d/ -f1 | head -n 1) - else - CURRENT_IP="Unknown" - fi + if [ -f /etc/os-release ]; then + # Check for Debian/Ubuntu (uses hostname -I) + if grep -qE 'ID=debian|ID=ubuntu' /etc/os-release; then + CURRENT_IP=$(hostname -I | awk '{print $1}') + # Check for Alpine (uses ip command) + elif grep -q 'ID=alpine' /etc/os-release; then + CURRENT_IP=$(ip -4 addr show eth0 | awk '/inet / {print $2}' | cut -d/ -f1 | head -n 1) + else + CURRENT_IP="Unknown" fi - echo "$CURRENT_IP" + fi + echo "$CURRENT_IP" } # ------------------------------------------------------------------------------ @@ -291,16 +291,16 @@ get_current_ip() { # - Removes old IP entries to avoid duplicates # ------------------------------------------------------------------------------ update_motd_ip() { - MOTD_FILE="/etc/motd" + MOTD_FILE="/etc/motd" - if [ -f "$MOTD_FILE" ]; then - # Remove existing IP Address lines to prevent duplication - sed -i '/IP Address:/d' "$MOTD_FILE" + if [ -f "$MOTD_FILE" ]; then + # Remove existing IP Address lines to prevent duplication + sed -i '/IP Address:/d' "$MOTD_FILE" - IP=$(get_current_ip) - # Add the new IP address - echo -e "${TAB}${NETWORK}${YW} IP Address: ${GN}${IP}${CL}" >>"$MOTD_FILE" - fi + IP=$(get_current_ip) + # Add the new IP address + echo -e "${TAB}${NETWORK}${YW} IP Address: ${GN}${IP}${CL}" >>"$MOTD_FILE" + fi } # ------------------------------------------------------------------------------ @@ -311,27 +311,30 @@ update_motd_ip() { # - Falls back to warning if no keys provided # ------------------------------------------------------------------------------ install_ssh_keys_into_ct() { - [[ "$SSH" != "yes" ]] && return 0 + [[ "${SSH:-no}" != "yes" ]] && return 0 - if [[ -n "$SSH_KEYS_FILE" && -s "$SSH_KEYS_FILE" ]]; then - msg_info "Installing selected SSH keys into CT ${CTID}" - pct exec "$CTID" -- sh -c 'mkdir -p /root/.ssh && chmod 700 /root/.ssh' || { - msg_error "prepare /root/.ssh failed" - return 1 - } - pct push "$CTID" "$SSH_KEYS_FILE" /root/.ssh/authorized_keys >/dev/null 2>&1 || - pct exec "$CTID" -- sh -c "cat > /root/.ssh/authorized_keys" <"$SSH_KEYS_FILE" || { - msg_error "write authorized_keys failed" - return 1 - } - pct exec "$CTID" -- sh -c 'chmod 600 /root/.ssh/authorized_keys' || true - msg_ok "Installed SSH keys into CT ${CTID}" - return 0 - fi + # Ensure SSH_KEYS_FILE is defined (may not be set if advanced_settings was skipped) + : "${SSH_KEYS_FILE:=}" - # Fallback: nichts ausgewählt - msg_warn "No SSH keys to install (skipping)." + if [[ -n "$SSH_KEYS_FILE" && -s "$SSH_KEYS_FILE" ]]; then + msg_info "Installing selected SSH keys into CT ${CTID}" + pct exec "$CTID" -- sh -c 'mkdir -p /root/.ssh && chmod 700 /root/.ssh' || { + msg_error "prepare /root/.ssh failed" + return 1 + } + pct push "$CTID" "$SSH_KEYS_FILE" /root/.ssh/authorized_keys >/dev/null 2>&1 || + pct exec "$CTID" -- sh -c "cat > /root/.ssh/authorized_keys" <"$SSH_KEYS_FILE" || { + msg_error "write authorized_keys failed" + return 1 + } + pct exec "$CTID" -- sh -c 'chmod 600 /root/.ssh/authorized_keys' || true + msg_ok "Installed SSH keys into CT ${CTID}" return 0 + fi + + # Fallback: nichts ausgewählt + msg_warn "No SSH keys to install (skipping)." + return 0 } # ------------------------------------------------------------------------------ @@ -343,55 +346,139 @@ install_ssh_keys_into_ct() { # - Sets FOUND_HOST_KEY_COUNT to number of keys found # ------------------------------------------------------------------------------ find_host_ssh_keys() { - local re='(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))' - local -a files=() cand=() - local g="${var_ssh_import_glob:-}" - local total=0 f base c + local re='(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))' + local -a files=() cand=() + local g="${var_ssh_import_glob:-}" + local total=0 f base c - shopt -s nullglob - if [[ -n "$g" ]]; then - for pat in $g; do cand+=($pat); done - else - cand+=(/root/.ssh/authorized_keys /root/.ssh/authorized_keys2) - cand+=(/root/.ssh/*.pub) - cand+=(/etc/ssh/authorized_keys /etc/ssh/authorized_keys.d/*) - fi - shopt -u nullglob + shopt -s nullglob + if [[ -n "$g" ]]; then + for pat in $g; do cand+=($pat); done + else + cand+=(/root/.ssh/authorized_keys /root/.ssh/authorized_keys2) + cand+=(/root/.ssh/*.pub) + cand+=(/etc/ssh/authorized_keys /etc/ssh/authorized_keys.d/*) + fi + shopt -u nullglob - for f in "${cand[@]}"; do - [[ -f "$f" && -r "$f" ]] || continue - base="$(basename -- "$f")" - case "$base" in - known_hosts | known_hosts.* | config) continue ;; - id_*) [[ "$f" != *.pub ]] && continue ;; - esac + for f in "${cand[@]}"; do + [[ -f "$f" && -r "$f" ]] || continue + base="$(basename -- "$f")" + case "$base" in + known_hosts | known_hosts.* | config) continue ;; + id_*) [[ "$f" != *.pub ]] && continue ;; + esac - # CRLF safe check for host keys - c=$(tr -d '\r' <"$f" | awk ' + # CRLF safe check for host keys + c=$(tr -d '\r' <"$f" | awk ' /^[[:space:]]*#/ {next} /^[[:space:]]*$/ {next} {print} ' | grep -E -c '"$re"' || true) - if ((c > 0)); then - files+=("$f") - total=$((total + c)) - fi - done - - # Fallback to /root/.ssh/authorized_keys - if ((${#files[@]} == 0)) && [[ -r /root/.ssh/authorized_keys ]]; then - if grep -E -q "$re" /root/.ssh/authorized_keys; then - files+=(/root/.ssh/authorized_keys) - total=$((total + $(grep -E -c "$re" /root/.ssh/authorized_keys || echo 0))) - fi + if ((c > 0)); then + files+=("$f") + total=$((total + c)) fi + done - FOUND_HOST_KEY_COUNT="$total" - ( - IFS=: - echo "${files[*]}" - ) + # Fallback to /root/.ssh/authorized_keys + if ((${#files[@]} == 0)) && [[ -r /root/.ssh/authorized_keys ]]; then + if grep -E -q "$re" /root/.ssh/authorized_keys; then + files+=(/root/.ssh/authorized_keys) + total=$((total + $(grep -E -c "$re" /root/.ssh/authorized_keys || echo 0))) + fi + fi + + FOUND_HOST_KEY_COUNT="$total" + ( + IFS=: + echo "${files[*]}" + ) +} + +# ============================================================================== +# SECTION 3B: IP RANGE SCANNING +# ============================================================================== + +# ------------------------------------------------------------------------------ +# ip_to_int() / int_to_ip() +# +# - Converts IP address to integer and vice versa for range iteration +# ------------------------------------------------------------------------------ +ip_to_int() { + local IFS=. + read -r i1 i2 i3 i4 <<<"$1" + echo $(((i1 << 24) + (i2 << 16) + (i3 << 8) + i4)) +} + +int_to_ip() { + local ip=$1 + echo "$(((ip >> 24) & 0xFF)).$(((ip >> 16) & 0xFF)).$(((ip >> 8) & 0xFF)).$((ip & 0xFF))" +} + +# ------------------------------------------------------------------------------ +# resolve_ip_from_range() +# +# - Takes an IP range in format "10.0.0.1/24-10.0.0.10/24" +# - Pings each IP in the range to find the first available one +# - Returns the first free IP with CIDR notation +# - Sets NET_RESOLVED to the resolved IP or empty on failure +# ------------------------------------------------------------------------------ +resolve_ip_from_range() { + local range="$1" + local ip_cidr_regex='^([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})/([0-9]{1,2})$' + local ip_start ip_end + + # Parse range: "10.0.0.1/24-10.0.0.10/24" + ip_start="${range%%-*}" + ip_end="${range##*-}" + + if [[ ! "$ip_start" =~ $ip_cidr_regex ]] || [[ ! "$ip_end" =~ $ip_cidr_regex ]]; then + NET_RESOLVED="" + return 1 + fi + + local ip1="${ip_start%%/*}" + local ip2="${ip_end%%/*}" + local cidr="${ip_start##*/}" + + local start_int=$(ip_to_int "$ip1") + local end_int=$(ip_to_int "$ip2") + + for ((ip_int = start_int; ip_int <= end_int; ip_int++)); do + local ip=$(int_to_ip $ip_int) + msg_info "Checking IP: $ip" + if ! ping -c 1 -W 1 "$ip" >/dev/null 2>&1; then + NET_RESOLVED="$ip/$cidr" + msg_ok "Found free IP: ${BGN}$NET_RESOLVED${CL}" + return 0 + fi + done + + NET_RESOLVED="" + msg_error "No free IP found in range $range" + return 1 +} + +# ------------------------------------------------------------------------------ +# is_ip_range() +# +# - Checks if a string is an IP range (contains - and looks like IP/CIDR) +# - Returns 0 if it's a range, 1 otherwise +# ------------------------------------------------------------------------------ +is_ip_range() { + local value="$1" + local ip_start ip_end + if [[ "$value" == *-* ]] && [[ "$value" != "dhcp" ]]; then + local ip_cidr_regex='^([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})/([0-9]{1,2})$' + ip_start="${value%%-*}" + ip_end="${value##*-}" + if [[ "$ip_start" =~ $ip_cidr_regex ]] && [[ "$ip_end" =~ $ip_cidr_regex ]]; then + return 0 + fi + fi + return 1 } # ============================================================================== @@ -406,53 +493,53 @@ find_host_ssh_keys() { # - Arguments: vars_file, key (var_container_storage/var_template_storage), value # ------------------------------------------------------------------------------ _write_storage_to_vars() { - # $1 = vars_file, $2 = key (var_container_storage / var_template_storage), $3 = value - local vf="$1" key="$2" val="$3" - # remove uncommented and commented versions to avoid duplicates - sed -i "/^[#[:space:]]*${key}=/d" "$vf" - echo "${key}=${val}" >>"$vf" + # $1 = vars_file, $2 = key (var_container_storage / var_template_storage), $3 = value + local vf="$1" key="$2" val="$3" + # remove uncommented and commented versions to avoid duplicates + sed -i "/^[#[:space:]]*${key}=/d" "$vf" + echo "${key}=${val}" >>"$vf" } choose_and_set_storage_for_file() { - # $1 = vars_file, $2 = class ('container'|'template') - local vf="$1" class="$2" key="" current="" - case "$class" in - container) key="var_container_storage" ;; - template) key="var_template_storage" ;; - *) - msg_error "Unknown storage class: $class" - return 1 - ;; - esac + # $1 = vars_file, $2 = class ('container'|'template') + local vf="$1" class="$2" key="" current="" + case "$class" in + container) key="var_container_storage" ;; + template) key="var_template_storage" ;; + *) + msg_error "Unknown storage class: $class" + return 1 + ;; + esac - current=$(awk -F= -v k="^${key}=" '$0 ~ k {print $2; exit}' "$vf") + current=$(awk -F= -v k="^${key}=" '$0 ~ k {print $2; exit}' "$vf") - # If only one storage exists for the content type, auto-pick. Else always ask (your wish #4). - local content="rootdir" - [[ "$class" == "template" ]] && content="vztmpl" - local count - count=$(pvesm status -content "$content" | awk 'NR>1{print $1}' | wc -l) + # If only one storage exists for the content type, auto-pick. Else always ask (your wish #4). + local content="rootdir" + [[ "$class" == "template" ]] && content="vztmpl" + local count + count=$(pvesm status -content "$content" | awk 'NR>1{print $1}' | wc -l) - if [[ "$count" -eq 1 ]]; then - STORAGE_RESULT=$(pvesm status -content "$content" | awk 'NR>1{print $1; exit}') - STORAGE_INFO="" - else - # If the current value is preselectable, we could show it, but per your requirement we always offer selection - select_storage "$class" || return 1 - fi + if [[ "$count" -eq 1 ]]; then + STORAGE_RESULT=$(pvesm status -content "$content" | awk 'NR>1{print $1; exit}') + STORAGE_INFO="" + else + # If the current value is preselectable, we could show it, but per your requirement we always offer selection + select_storage "$class" || return 1 + fi - _write_storage_to_vars "$vf" "$key" "$STORAGE_RESULT" + _write_storage_to_vars "$vf" "$key" "$STORAGE_RESULT" - # Keep environment in sync for later steps (e.g. app-default save) - if [[ "$class" == "container" ]]; then - export var_container_storage="$STORAGE_RESULT" - export CONTAINER_STORAGE="$STORAGE_RESULT" - else - export var_template_storage="$STORAGE_RESULT" - export TEMPLATE_STORAGE="$STORAGE_RESULT" - fi + # Keep environment in sync for later steps (e.g. app-default save) + if [[ "$class" == "container" ]]; then + export var_container_storage="$STORAGE_RESULT" + export CONTAINER_STORAGE="$STORAGE_RESULT" + else + export var_template_storage="$STORAGE_RESULT" + export TEMPLATE_STORAGE="$STORAGE_RESULT" + fi - # Silent operation - no output message + # Silent operation - no output message } # ============================================================================== @@ -469,83 +556,102 @@ choose_and_set_storage_for_file() { # - Sets up container type, resources, network, SSH, features, and tags # ------------------------------------------------------------------------------ base_settings() { - # Default Settings - CT_TYPE=${var_unprivileged:-"1"} + # Default Settings + CT_TYPE=${var_unprivileged:-"1"} - # Resource allocation: App defaults take precedence if HIGHER - # Compare app-declared values (saved in APP_DEFAULT_*) with current var_* values - local final_disk="${var_disk:-4}" - local final_cpu="${var_cpu:-1}" - local final_ram="${var_ram:-1024}" + # Resource allocation: App defaults take precedence if HIGHER + # Compare app-declared values (saved in APP_DEFAULT_*) with current var_* values + local final_disk="${var_disk:-4}" + local final_cpu="${var_cpu:-1}" + local final_ram="${var_ram:-1024}" - # If app declared higher values, use those instead - if [[ -n "${APP_DEFAULT_DISK:-}" && "${APP_DEFAULT_DISK}" =~ ^[0-9]+$ ]]; then - if [[ "${APP_DEFAULT_DISK}" -gt "${final_disk}" ]]; then - final_disk="${APP_DEFAULT_DISK}" - fi + # If app declared higher values, use those instead + if [[ -n "${APP_DEFAULT_DISK:-}" && "${APP_DEFAULT_DISK}" =~ ^[0-9]+$ ]]; then + if [[ "${APP_DEFAULT_DISK}" -gt "${final_disk}" ]]; then + final_disk="${APP_DEFAULT_DISK}" fi + fi - if [[ -n "${APP_DEFAULT_CPU:-}" && "${APP_DEFAULT_CPU}" =~ ^[0-9]+$ ]]; then - if [[ "${APP_DEFAULT_CPU}" -gt "${final_cpu}" ]]; then - final_cpu="${APP_DEFAULT_CPU}" - fi + if [[ -n "${APP_DEFAULT_CPU:-}" && "${APP_DEFAULT_CPU}" =~ ^[0-9]+$ ]]; then + if [[ "${APP_DEFAULT_CPU}" -gt "${final_cpu}" ]]; then + final_cpu="${APP_DEFAULT_CPU}" fi + fi - if [[ -n "${APP_DEFAULT_RAM:-}" && "${APP_DEFAULT_RAM}" =~ ^[0-9]+$ ]]; then - if [[ "${APP_DEFAULT_RAM}" -gt "${final_ram}" ]]; then - final_ram="${APP_DEFAULT_RAM}" - fi + if [[ -n "${APP_DEFAULT_RAM:-}" && "${APP_DEFAULT_RAM}" =~ ^[0-9]+$ ]]; then + if [[ "${APP_DEFAULT_RAM}" -gt "${final_ram}" ]]; then + final_ram="${APP_DEFAULT_RAM}" fi + fi - DISK_SIZE="${final_disk}" - CORE_COUNT="${final_cpu}" - RAM_SIZE="${final_ram}" - VERBOSE=${var_verbose:-"${1:-no}"} - PW=${var_pw:-""} - CT_ID=${var_ctid:-$NEXTID} - HN=${var_hostname:-$NSAPP} - BRG=${var_brg:-"vmbr0"} - NET=${var_net:-"dhcp"} - IPV6_METHOD=${var_ipv6_method:-"none"} - IPV6_STATIC=${var_ipv6_static:-""} - GATE=${var_gateway:-""} - APT_CACHER=${var_apt_cacher:-""} - APT_CACHER_IP=${var_apt_cacher_ip:-""} + DISK_SIZE="${final_disk}" + CORE_COUNT="${final_cpu}" + RAM_SIZE="${final_ram}" + VERBOSE=${var_verbose:-"${1:-no}"} + PW=${var_pw:-""} + CT_ID=${var_ctid:-$NEXTID} + HN=${var_hostname:-$NSAPP} + BRG=${var_brg:-"vmbr0"} + NET=${var_net:-"dhcp"} - # Runtime check: Verify APT cacher is reachable if configured - if [[ -n "$APT_CACHER_IP" && "$APT_CACHER" == "yes" ]]; then - if ! curl -s --connect-timeout 2 "http://${APT_CACHER_IP}:3142" >/dev/null 2>&1; then - msg_warn "APT Cacher configured but not reachable at ${APT_CACHER_IP}:3142" - msg_custom "⚠️" "${YW}" "Disabling APT Cacher for this installation" - APT_CACHER="" - APT_CACHER_IP="" - else - msg_ok "APT Cacher verified at ${APT_CACHER_IP}:3142" - fi + # Resolve IP range if NET contains a range (e.g., 192.168.1.100/24-192.168.1.200/24) + if is_ip_range "$NET"; then + msg_info "Scanning IP range: $NET" + if resolve_ip_from_range "$NET"; then + NET="$NET_RESOLVED" + else + msg_error "Could not find free IP in range. Falling back to DHCP." + NET="dhcp" fi + fi - MTU=${var_mtu:-""} - SD=${var_storage:-""} - NS=${var_ns:-""} - MAC=${var_mac:-""} - VLAN=${var_vlan:-""} - SSH=${var_ssh:-"no"} - SSH_AUTHORIZED_KEY=${var_ssh_authorized_key:-""} - UDHCPC_FIX=${var_udhcpc_fix:-""} - TAGS="community-script,${var_tags:-}" - ENABLE_FUSE=${var_fuse:-"${1:-no}"} - ENABLE_TUN=${var_tun:-"${1:-no}"} - ENABLE_KEYCTL=${var_keyctl:-0} - ENABLE_MKNOD=${var_mknod:-0} - MOUNT_FS=${var_mount_fs:-""} + IPV6_METHOD=${var_ipv6_method:-"none"} + IPV6_STATIC=${var_ipv6_static:-""} + GATE=${var_gateway:-""} + APT_CACHER=${var_apt_cacher:-""} + APT_CACHER_IP=${var_apt_cacher_ip:-""} - # Since these 2 are only defined outside of default_settings function, we add a temporary fallback. TODO: To align everything, we should add these as constant variables (e.g. OSTYPE and OSVERSION), but that would currently require updating the default_settings function for all existing scripts - if [ -z "$var_os" ]; then - var_os="debian" - fi - if [ -z "$var_version" ]; then - var_version="12" + # Runtime check: Verify APT cacher is reachable if configured + if [[ -n "$APT_CACHER_IP" && "$APT_CACHER" == "yes" ]]; then + if ! curl -s --connect-timeout 2 "http://${APT_CACHER_IP}:3142" >/dev/null 2>&1; then + msg_warn "APT Cacher configured but not reachable at ${APT_CACHER_IP}:3142" + msg_custom "⚠️" "${YW}" "Disabling APT Cacher for this installation" + APT_CACHER="" + APT_CACHER_IP="" + else + msg_ok "APT Cacher verified at ${APT_CACHER_IP}:3142" fi + fi + + MTU=${var_mtu:-""} + SD=${var_storage:-""} + NS=${var_ns:-""} + MAC=${var_mac:-""} + VLAN=${var_vlan:-""} + SSH=${var_ssh:-"no"} + SSH_AUTHORIZED_KEY=${var_ssh_authorized_key:-""} + UDHCPC_FIX=${var_udhcpc_fix:-""} + TAGS="community-script,${var_tags:-}" + ENABLE_FUSE=${var_fuse:-"${1:-no}"} + ENABLE_TUN=${var_tun:-"${1:-no}"} + + # Additional settings that may be skipped if advanced_settings is not run (e.g., App Defaults) + ENABLE_GPU=${var_gpu:-"no"} + ENABLE_NESTING=${var_nesting:-"1"} + ENABLE_KEYCTL=${var_keyctl:-"0"} + ENABLE_MKNOD=${var_mknod:-"0"} + MOUNT_FS=${var_mount_fs:-""} + PROTECT_CT=${var_protection:-"no"} + CT_TIMEZONE=${var_timezone:-"$timezone"} + [[ "${CT_TIMEZONE:-}" == Etc/* ]] && CT_TIMEZONE="host" # pct doesn't accept Etc/* zones + + # Since these 2 are only defined outside of default_settings function, we add a temporary fallback. TODO: To align everything, we should add these as constant variables (e.g. OSTYPE and OSVERSION), but that would currently require updating the default_settings function for all existing scripts + if [ -z "$var_os" ]; then + var_os="debian" + fi + if [ -z "$var_version" ]; then + var_version="12" + fi } # ------------------------------------------------------------------------------ @@ -556,49 +662,59 @@ base_settings() { # - Only loads whitelisted var_* keys # ------------------------------------------------------------------------------ load_vars_file() { - local file="$1" - [ -f "$file" ] || return 0 - msg_info "Loading defaults from ${file}" + local file="$1" + [ -f "$file" ] || return 0 + msg_info "Loading defaults from ${file}" - # Allowed var_* keys - local VAR_WHITELIST=( - var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_disk var_fuse var_gpu var_keyctl - var_gateway var_hostname var_ipv6_method var_mac var_mknod var_mount_fs var_mtu - var_net var_nesting var_ns var_protection var_pw var_ram var_tags var_timezone var_tun var_unprivileged - var_verbose var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage - ) + # Allowed var_* keys + local VAR_WHITELIST=( + var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_disk var_fuse var_gpu var_keyctl + var_gateway var_hostname var_ipv6_method var_mac var_mknod var_mount_fs var_mtu + var_net var_nesting var_ns var_protection var_pw var_ram var_tags var_timezone var_tun var_unprivileged + var_verbose var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage + ) - # Whitelist check helper - _is_whitelisted() { - local k="$1" w - for w in "${VAR_WHITELIST[@]}"; do [ "$k" = "$w" ] && return 0; done - return 1 - } + # Whitelist check helper + _is_whitelisted() { + local k="$1" w + for w in "${VAR_WHITELIST[@]}"; do [ "$k" = "$w" ] && return 0; done + return 1 + } - local line key val - while IFS= read -r line || [ -n "$line" ]; do - line="${line#"${line%%[![:space:]]*}"}" - line="${line%"${line##*[![:space:]]}"}" - [[ -z "$line" || "$line" == \#* ]] && continue - if [[ "$line" =~ ^([A-Za-z_][A-Za-z0-9_]*)=(.*)$ ]]; then - local var_key="${BASH_REMATCH[1]}" - local var_val="${BASH_REMATCH[2]}" + local line key val + while IFS= read -r line || [ -n "$line" ]; do + line="${line#"${line%%[![:space:]]*}"}" + line="${line%"${line##*[![:space:]]}"}" + [[ -z "$line" || "$line" == \#* ]] && continue + if [[ "$line" =~ ^([A-Za-z_][A-Za-z0-9_]*)=(.*)$ ]]; then + local var_key="${BASH_REMATCH[1]}" + local var_val="${BASH_REMATCH[2]}" - [[ "$var_key" != var_* ]] && continue - _is_whitelisted "$var_key" || continue + [[ "$var_key" != var_* ]] && continue + _is_whitelisted "$var_key" || continue - # Strip quotes - if [[ "$var_val" =~ ^\"(.*)\"$ ]]; then - var_val="${BASH_REMATCH[1]}" - elif [[ "$var_val" =~ ^\'(.*)\'$ ]]; then - var_val="${BASH_REMATCH[1]}" - fi + # Strip inline comments (everything after unquoted #) + # Handle: var=value # comment OR var="value" # comment + if [[ ! "$var_val" =~ ^[\"\'] ]]; then + # Unquoted value: strip from first # + var_val="${var_val%%#*}" + fi - # Set only if not already exported - [[ -z "${!var_key+x}" ]] && export "${var_key}=${var_val}" - fi - done <"$file" - msg_ok "Loaded ${file}" + # Strip quotes + if [[ "$var_val" =~ ^\"(.*)\"$ ]]; then + var_val="${BASH_REMATCH[1]}" + elif [[ "$var_val" =~ ^\'(.*)\'$ ]]; then + var_val="${BASH_REMATCH[1]}" + fi + + # Trim trailing whitespace + var_val="${var_val%"${var_val##*[![:space:]]}"}" + + # Set only if not already exported + [[ -z "${!var_key+x}" ]] && export "${var_key}=${var_val}" + fi + done <"$file" + msg_ok "Loaded ${file}" } # ------------------------------------------------------------------------------ @@ -611,56 +727,56 @@ load_vars_file() { # - Calls base_settings "$VERBOSE" and echo_default # ------------------------------------------------------------------------------ default_var_settings() { - # Allowed var_* keys (alphabetically sorted) - # Note: Removed var_ctid (can only exist once), var_ipv6_static (static IPs are unique) - local VAR_WHITELIST=( - var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_disk var_fuse var_gpu var_keyctl - var_gateway var_hostname var_ipv6_method var_mac var_mknod var_mount_fs var_mtu - var_net var_nesting var_ns var_protection var_pw var_ram var_tags var_timezone var_tun var_unprivileged - var_verbose var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage - ) + # Allowed var_* keys (alphabetically sorted) + # Note: Removed var_ctid (can only exist once), var_ipv6_static (static IPs are unique) + local VAR_WHITELIST=( + var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_disk var_fuse var_gpu var_keyctl + var_gateway var_hostname var_ipv6_method var_mac var_mknod var_mount_fs var_mtu + var_net var_nesting var_ns var_protection var_pw var_ram var_tags var_timezone var_tun var_unprivileged + var_verbose var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage + ) - # Snapshot: environment variables (highest precedence) - declare -A _HARD_ENV=() - local _k - for _k in "${VAR_WHITELIST[@]}"; do - if printenv "$_k" >/dev/null 2>&1; then _HARD_ENV["$_k"]=1; fi + # Snapshot: environment variables (highest precedence) + declare -A _HARD_ENV=() + local _k + for _k in "${VAR_WHITELIST[@]}"; do + if printenv "$_k" >/dev/null 2>&1; then _HARD_ENV["$_k"]=1; fi + done + + # Find default.vars location + local _find_default_vars + _find_default_vars() { + local f + for f in \ + /usr/local/community-scripts/default.vars \ + "$HOME/.config/community-scripts/default.vars" \ + "./default.vars"; do + [ -f "$f" ] && { + echo "$f" + return 0 + } done + return 1 + } + # Allow override of storages via env (for non-interactive use cases) + [ -n "${var_template_storage:-}" ] && TEMPLATE_STORAGE="$var_template_storage" + [ -n "${var_container_storage:-}" ] && CONTAINER_STORAGE="$var_container_storage" - # Find default.vars location - local _find_default_vars - _find_default_vars() { - local f - for f in \ - /usr/local/community-scripts/default.vars \ - "$HOME/.config/community-scripts/default.vars" \ - "./default.vars"; do - [ -f "$f" ] && { - echo "$f" - return 0 - } - done - return 1 - } - # Allow override of storages via env (for non-interactive use cases) - [ -n "${var_template_storage:-}" ] && TEMPLATE_STORAGE="$var_template_storage" - [ -n "${var_container_storage:-}" ] && CONTAINER_STORAGE="$var_container_storage" + # Create once, with storages already selected, no var_ctid/var_hostname lines + local _ensure_default_vars + _ensure_default_vars() { + _find_default_vars >/dev/null 2>&1 && return 0 - # Create once, with storages already selected, no var_ctid/var_hostname lines - local _ensure_default_vars - _ensure_default_vars() { - _find_default_vars >/dev/null 2>&1 && return 0 + local canonical="/usr/local/community-scripts/default.vars" + # Silent creation - no msg_info output + mkdir -p /usr/local/community-scripts - local canonical="/usr/local/community-scripts/default.vars" - # Silent creation - no msg_info output - mkdir -p /usr/local/community-scripts + # Pick storages before writing the file (always ask unless only one) + # Create a minimal temp file to write into + : >"$canonical" - # Pick storages before writing the file (always ask unless only one) - # Create a minimal temp file to write into - : >"$canonical" - - # Base content (no var_ctid / var_hostname here) - cat >"$canonical" <<'EOF' + # Base content (no var_ctid / var_hostname here) + cat >"$canonical" <<'EOF' # Community-Scripts defaults (var_* only). Lines starting with # are comments. # Precedence: ENV var_* > default.vars > built-ins. # Keep keys alphabetically sorted. @@ -696,12 +812,18 @@ var_fuse=no var_tun=no # Advanced Settings (Proxmox-official features) +# var_nesting: Allow nesting (required for Docker/LXC in CT) var_nesting=1 +# var_keyctl: Allow keyctl() - needed for Docker (systemd-networkd workaround) var_keyctl=0 +# var_mknod: Allow device node creation (requires kernel 5.3+, experimental) var_mknod=0 -var_mount_fs="" +# var_mount_fs: Allow specific filesystems: nfs,fuse,ext4,etc (leave empty for defaults) +var_mount_fs= +# var_protection: Prevent accidental deletion of container var_protection=no -var_timezone="" +# var_timezone: Container timezone (e.g. Europe/Berlin, leave empty for host timezone) +var_timezone= var_tags=community-script var_verbose=no @@ -709,47 +831,47 @@ var_verbose=no # var_pw= EOF - # Now choose storages (always prompt unless just one exists) - choose_and_set_storage_for_file "$canonical" template - choose_and_set_storage_for_file "$canonical" container + # Now choose storages (always prompt unless just one exists) + choose_and_set_storage_for_file "$canonical" template + choose_and_set_storage_for_file "$canonical" container - chmod 0644 "$canonical" - # Silent creation - no output message - } + chmod 0644 "$canonical" + # Silent creation - no output message + } - # Whitelist check - local _is_whitelisted_key - _is_whitelisted_key() { - local k="$1" - local w - for w in "${VAR_WHITELIST[@]}"; do [ "$k" = "$w" ] && return 0; done - return 1 - } + # Whitelist check + local _is_whitelisted_key + _is_whitelisted_key() { + local k="$1" + local w + for w in "${VAR_WHITELIST[@]}"; do [ "$k" = "$w" ] && return 0; done + return 1 + } - # 1) Ensure file exists - _ensure_default_vars + # 1) Ensure file exists + _ensure_default_vars - # 2) Load file - local dv - dv="$(_find_default_vars)" || { - msg_error "default.vars not found after ensure step" - return 1 - } - load_vars_file "$dv" + # 2) Load file + local dv + dv="$(_find_default_vars)" || { + msg_error "default.vars not found after ensure step" + return 1 + } + load_vars_file "$dv" - # 3) Map var_verbose → VERBOSE - if [[ -n "${var_verbose:-}" ]]; then - case "${var_verbose,,}" in 1 | yes | true | on) VERBOSE="yes" ;; 0 | no | false | off) VERBOSE="no" ;; *) VERBOSE="${var_verbose}" ;; esac - else - VERBOSE="no" - fi + # 3) Map var_verbose → VERBOSE + if [[ -n "${var_verbose:-}" ]]; then + case "${var_verbose,,}" in 1 | yes | true | on) VERBOSE="yes" ;; 0 | no | false | off) VERBOSE="no" ;; *) VERBOSE="${var_verbose}" ;; esac + else + VERBOSE="no" + fi - # 4) Apply base settings and show summary - METHOD="mydefaults-global" - base_settings "$VERBOSE" - header_info - echo -e "${DEFAULT}${BOLD}${BL}Using User Defaults (default.vars) on node $PVEHOST_NAME${CL}" - echo_default + # 4) Apply base settings and show summary + METHOD="mydefaults-global" + base_settings "$VERBOSE" + header_info + echo -e "${DEFAULT}${BOLD}${BL}Using User Defaults (default.vars) on node $PVEHOST_NAME${CL}" + echo_default } # ------------------------------------------------------------------------------ @@ -760,8 +882,8 @@ EOF # ------------------------------------------------------------------------------ get_app_defaults_path() { - local n="${NSAPP:-${APP,,}}" - echo "/usr/local/community-scripts/defaults/${n}.vars" + local n="${NSAPP:-${APP,,}}" + echo "/usr/local/community-scripts/defaults/${n}.vars" } # ------------------------------------------------------------------------------ @@ -774,32 +896,32 @@ get_app_defaults_path() { # - Extracts raw values from flags like ",gw=..." ",mtu=..." etc. # ------------------------------------------------------------------------------ if ! declare -p VAR_WHITELIST >/dev/null 2>&1; then - # Note: Removed var_ctid (can only exist once), var_ipv6_static (static IPs are unique) - declare -ag VAR_WHITELIST=( - var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_disk var_fuse var_gpu - var_gateway var_hostname var_ipv6_method var_mac var_mtu - var_net var_ns var_pw var_ram var_tags var_tun var_unprivileged - var_verbose var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage - ) + # Note: Removed var_ctid (can only exist once), var_ipv6_static (static IPs are unique) + declare -ag VAR_WHITELIST=( + var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_disk var_fuse var_gpu + var_gateway var_hostname var_ipv6_method var_mac var_mtu + var_net var_ns var_pw var_ram var_tags var_tun var_unprivileged + var_verbose var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage + ) fi # Global whitelist check function (used by _load_vars_file_to_map and others) _is_whitelisted_key() { - local k="$1" - local w - for w in "${VAR_WHITELIST[@]}"; do [ "$k" = "$w" ] && return 0; done - return 1 + local k="$1" + local w + for w in "${VAR_WHITELIST[@]}"; do [ "$k" = "$w" ] && return 0; done + return 1 } _sanitize_value() { - # Disallow Command-Substitution / Shell-Meta - case "$1" in - *'$('* | *'`'* | *';'* | *'&'* | *'<('*) - echo "" - return 0 - ;; - esac - echo "$1" + # Disallow Command-Substitution / Shell-Meta + case "$1" in + *'$('* | *'`'* | *';'* | *'&'* | *'<('*) + echo "" + return 0 + ;; + esac + echo "$1" } # Map-Parser: read var_* from file into _VARS_IN associative array @@ -807,190 +929,190 @@ _sanitize_value() { # This simplified version is used specifically for diff operations via _VARS_IN array declare -A _VARS_IN _load_vars_file_to_map() { - local file="$1" - [ -f "$file" ] || return 0 - _VARS_IN=() # Clear array - local line key val - while IFS= read -r line || [ -n "$line" ]; do - line="${line#"${line%%[![:space:]]*}"}" - line="${line%"${line##*[![:space:]]}"}" - [ -z "$line" ] && continue - case "$line" in - \#*) continue ;; - esac - key=$(printf "%s" "$line" | cut -d= -f1) - val=$(printf "%s" "$line" | cut -d= -f2-) - case "$key" in - var_*) - if _is_whitelisted_key "$key"; then - _VARS_IN["$key"]="$val" - fi - ;; - esac - done <"$file" + local file="$1" + [ -f "$file" ] || return 0 + _VARS_IN=() # Clear array + local line key val + while IFS= read -r line || [ -n "$line" ]; do + line="${line#"${line%%[![:space:]]*}"}" + line="${line%"${line##*[![:space:]]}"}" + [ -z "$line" ] && continue + case "$line" in + \#*) continue ;; + esac + key=$(printf "%s" "$line" | cut -d= -f1) + val=$(printf "%s" "$line" | cut -d= -f2-) + case "$key" in + var_*) + if _is_whitelisted_key "$key"; then + _VARS_IN["$key"]="$val" + fi + ;; + esac + done <"$file" } # Diff function for two var_* files -> produces human-readable diff list for $1 (old) vs $2 (new) _build_vars_diff() { - local oldf="$1" newf="$2" - local k - local -A OLD=() NEW=() - _load_vars_file_to_map "$oldf" - for k in "${!_VARS_IN[@]}"; do OLD["$k"]="${_VARS_IN[$k]}"; done - _load_vars_file_to_map "$newf" - for k in "${!_VARS_IN[@]}"; do NEW["$k"]="${_VARS_IN[$k]}"; done + local oldf="$1" newf="$2" + local k + local -A OLD=() NEW=() + _load_vars_file_to_map "$oldf" + for k in "${!_VARS_IN[@]}"; do OLD["$k"]="${_VARS_IN[$k]}"; done + _load_vars_file_to_map "$newf" + for k in "${!_VARS_IN[@]}"; do NEW["$k"]="${_VARS_IN[$k]}"; done - local out - out+="# Diff for ${APP} (${NSAPP})\n" - out+="# Old: ${oldf}\n# New: ${newf}\n\n" + local out + out+="# Diff for ${APP} (${NSAPP})\n" + out+="# Old: ${oldf}\n# New: ${newf}\n\n" - local found_change=0 + local found_change=0 - # Changed & Removed - for k in "${!OLD[@]}"; do - if [[ -v NEW["$k"] ]]; then - if [[ "${OLD[$k]}" != "${NEW[$k]}" ]]; then - out+="~ ${k}\n - old: ${OLD[$k]}\n + new: ${NEW[$k]}\n" - found_change=1 - fi - else - out+="- ${k}\n - old: ${OLD[$k]}\n" - found_change=1 - fi - done - - # Added - for k in "${!NEW[@]}"; do - if [[ ! -v OLD["$k"] ]]; then - out+="+ ${k}\n + new: ${NEW[$k]}\n" - found_change=1 - fi - done - - if [[ $found_change -eq 0 ]]; then - out+="(No differences)\n" + # Changed & Removed + for k in "${!OLD[@]}"; do + if [[ -v NEW["$k"] ]]; then + if [[ "${OLD[$k]}" != "${NEW[$k]}" ]]; then + out+="~ ${k}\n - old: ${OLD[$k]}\n + new: ${NEW[$k]}\n" + found_change=1 + fi + else + out+="- ${k}\n - old: ${OLD[$k]}\n" + found_change=1 fi + done - printf "%b" "$out" + # Added + for k in "${!NEW[@]}"; do + if [[ ! -v OLD["$k"] ]]; then + out+="+ ${k}\n + new: ${NEW[$k]}\n" + found_change=1 + fi + done + + if [[ $found_change -eq 0 ]]; then + out+="(No differences)\n" + fi + + printf "%b" "$out" } # Build a temporary .vars file from current advanced settings _build_current_app_vars_tmp() { - tmpf="$(mktemp /tmp/${NSAPP:-app}.vars.new.XXXXXX)" + tmpf="$(mktemp /tmp/${NSAPP:-app}.vars.new.XXXXXX)" - # NET/GW - _net="${NET:-}" - _gate="" - case "${GATE:-}" in - ,gw=*) _gate=$(echo "$GATE" | sed 's/^,gw=//') ;; - esac + # NET/GW + _net="${NET:-}" + _gate="" + case "${GATE:-}" in + ,gw=*) _gate=$(echo "$GATE" | sed 's/^,gw=//') ;; + esac - # IPv6 - _ipv6_method="${IPV6_METHOD:-auto}" - _ipv6_static="" - _ipv6_gateway="" - if [ "$_ipv6_method" = "static" ]; then - _ipv6_static="${IPV6_ADDR:-}" - _ipv6_gateway="${IPV6_GATE:-}" - fi + # IPv6 + _ipv6_method="${IPV6_METHOD:-auto}" + _ipv6_static="" + _ipv6_gateway="" + if [ "$_ipv6_method" = "static" ]; then + _ipv6_static="${IPV6_ADDR:-}" + _ipv6_gateway="${IPV6_GATE:-}" + fi - # MTU/VLAN/MAC - _mtu="" - _vlan="" - _mac="" - case "${MTU:-}" in - ,mtu=*) _mtu=$(echo "$MTU" | sed 's/^,mtu=//') ;; - esac - case "${VLAN:-}" in - ,tag=*) _vlan=$(echo "$VLAN" | sed 's/^,tag=//') ;; - esac - case "${MAC:-}" in - ,hwaddr=*) _mac=$(echo "$MAC" | sed 's/^,hwaddr=//') ;; - esac + # MTU/VLAN/MAC + _mtu="" + _vlan="" + _mac="" + case "${MTU:-}" in + ,mtu=*) _mtu=$(echo "$MTU" | sed 's/^,mtu=//') ;; + esac + case "${VLAN:-}" in + ,tag=*) _vlan=$(echo "$VLAN" | sed 's/^,tag=//') ;; + esac + case "${MAC:-}" in + ,hwaddr=*) _mac=$(echo "$MAC" | sed 's/^,hwaddr=//') ;; + esac - # DNS / Searchdomain - _ns="" - _searchdomain="" - case "${NS:-}" in - -nameserver=*) _ns=$(echo "$NS" | sed 's/^-nameserver=//') ;; - esac - case "${SD:-}" in - -searchdomain=*) _searchdomain=$(echo "$SD" | sed 's/^-searchdomain=//') ;; - esac + # DNS / Searchdomain + _ns="" + _searchdomain="" + case "${NS:-}" in + -nameserver=*) _ns=$(echo "$NS" | sed 's/^-nameserver=//') ;; + esac + case "${SD:-}" in + -searchdomain=*) _searchdomain=$(echo "$SD" | sed 's/^-searchdomain=//') ;; + esac - # SSH / APT / Features - _ssh="${SSH:-no}" - _ssh_auth="${SSH_AUTHORIZED_KEY:-}" - _apt_cacher="${APT_CACHER:-}" - _apt_cacher_ip="${APT_CACHER_IP:-}" - _fuse="${ENABLE_FUSE:-no}" - _tun="${ENABLE_TUN:-no}" - _nesting="${ENABLE_NESTING:-1}" - _keyctl="${ENABLE_KEYCTL:-0}" - _mknod="${ENABLE_MKNOD:-0}" - _mount_fs="${ALLOW_MOUNT_FS:-}" - _protect="${PROTECT_CT:-no}" - _timezone="${CT_TIMEZONE:-}" - _tags="${TAGS:-}" - _verbose="${VERBOSE:-no}" + # SSH / APT / Features + _ssh="${SSH:-no}" + _ssh_auth="${SSH_AUTHORIZED_KEY:-}" + _apt_cacher="${APT_CACHER:-}" + _apt_cacher_ip="${APT_CACHER_IP:-}" + _fuse="${ENABLE_FUSE:-no}" + _tun="${ENABLE_TUN:-no}" + _nesting="${ENABLE_NESTING:-1}" + _keyctl="${ENABLE_KEYCTL:-0}" + _mknod="${ENABLE_MKNOD:-0}" + _mount_fs="${ALLOW_MOUNT_FS:-}" + _protect="${PROTECT_CT:-no}" + _timezone="${CT_TIMEZONE:-}" + _tags="${TAGS:-}" + _verbose="${VERBOSE:-no}" - # Type / Resources / Identity - _unpriv="${CT_TYPE:-1}" - _cpu="${CORE_COUNT:-1}" - _ram="${RAM_SIZE:-1024}" - _disk="${DISK_SIZE:-4}" - _hostname="${HN:-$NSAPP}" + # Type / Resources / Identity + _unpriv="${CT_TYPE:-1}" + _cpu="${CORE_COUNT:-1}" + _ram="${RAM_SIZE:-1024}" + _disk="${DISK_SIZE:-4}" + _hostname="${HN:-$NSAPP}" - # Storage - _tpl_storage="${TEMPLATE_STORAGE:-${var_template_storage:-}}" - _ct_storage="${CONTAINER_STORAGE:-${var_container_storage:-}}" + # Storage + _tpl_storage="${TEMPLATE_STORAGE:-${var_template_storage:-}}" + _ct_storage="${CONTAINER_STORAGE:-${var_container_storage:-}}" - { - echo "# App-specific defaults for ${APP} (${NSAPP})" - echo "# Generated on $(date -u '+%Y-%m-%dT%H:%M:%SZ')" - echo + { + echo "# App-specific defaults for ${APP} (${NSAPP})" + echo "# Generated on $(date -u '+%Y-%m-%dT%H:%M:%SZ')" + echo - echo "var_unprivileged=$(_sanitize_value "$_unpriv")" - echo "var_cpu=$(_sanitize_value "$_cpu")" - echo "var_ram=$(_sanitize_value "$_ram")" - echo "var_disk=$(_sanitize_value "$_disk")" + echo "var_unprivileged=$(_sanitize_value "$_unpriv")" + echo "var_cpu=$(_sanitize_value "$_cpu")" + echo "var_ram=$(_sanitize_value "$_ram")" + echo "var_disk=$(_sanitize_value "$_disk")" - [ -n "${BRG:-}" ] && echo "var_brg=$(_sanitize_value "$BRG")" - [ -n "$_net" ] && echo "var_net=$(_sanitize_value "$_net")" - [ -n "$_gate" ] && echo "var_gateway=$(_sanitize_value "$_gate")" - [ -n "$_mtu" ] && echo "var_mtu=$(_sanitize_value "$_mtu")" - [ -n "$_vlan" ] && echo "var_vlan=$(_sanitize_value "$_vlan")" - [ -n "$_mac" ] && echo "var_mac=$(_sanitize_value "$_mac")" - [ -n "$_ns" ] && echo "var_ns=$(_sanitize_value "$_ns")" + [ -n "${BRG:-}" ] && echo "var_brg=$(_sanitize_value "$BRG")" + [ -n "$_net" ] && echo "var_net=$(_sanitize_value "$_net")" + [ -n "$_gate" ] && echo "var_gateway=$(_sanitize_value "$_gate")" + [ -n "$_mtu" ] && echo "var_mtu=$(_sanitize_value "$_mtu")" + [ -n "$_vlan" ] && echo "var_vlan=$(_sanitize_value "$_vlan")" + [ -n "$_mac" ] && echo "var_mac=$(_sanitize_value "$_mac")" + [ -n "$_ns" ] && echo "var_ns=$(_sanitize_value "$_ns")" - [ -n "$_ipv6_method" ] && echo "var_ipv6_method=$(_sanitize_value "$_ipv6_method")" - # var_ipv6_static removed - static IPs are unique, can't be default + [ -n "$_ipv6_method" ] && echo "var_ipv6_method=$(_sanitize_value "$_ipv6_method")" + # var_ipv6_static removed - static IPs are unique, can't be default - [ -n "$_ssh" ] && echo "var_ssh=$(_sanitize_value "$_ssh")" - [ -n "$_ssh_auth" ] && echo "var_ssh_authorized_key=$(_sanitize_value "$_ssh_auth")" + [ -n "$_ssh" ] && echo "var_ssh=$(_sanitize_value "$_ssh")" + [ -n "$_ssh_auth" ] && echo "var_ssh_authorized_key=$(_sanitize_value "$_ssh_auth")" - [ -n "$_apt_cacher" ] && echo "var_apt_cacher=$(_sanitize_value "$_apt_cacher")" - [ -n "$_apt_cacher_ip" ] && echo "var_apt_cacher_ip=$(_sanitize_value "$_apt_cacher_ip")" + [ -n "$_apt_cacher" ] && echo "var_apt_cacher=$(_sanitize_value "$_apt_cacher")" + [ -n "$_apt_cacher_ip" ] && echo "var_apt_cacher_ip=$(_sanitize_value "$_apt_cacher_ip")" - [ -n "$_fuse" ] && echo "var_fuse=$(_sanitize_value "$_fuse")" - [ -n "$_tun" ] && echo "var_tun=$(_sanitize_value "$_tun")" - [ -n "$_nesting" ] && echo "var_nesting=$(_sanitize_value "$_nesting")" - [ -n "$_keyctl" ] && echo "var_keyctl=$(_sanitize_value "$_keyctl")" - [ -n "$_mknod" ] && echo "var_mknod=$(_sanitize_value "$_mknod")" - [ -n "$_mount_fs" ] && echo "var_mount_fs=$(_sanitize_value "$_mount_fs")" - [ -n "$_protect" ] && echo "var_protection=$(_sanitize_value "$_protect")" - [ -n "$_timezone" ] && echo "var_timezone=$(_sanitize_value "$_timezone")" - [ -n "$_tags" ] && echo "var_tags=$(_sanitize_value "$_tags")" - [ -n "$_verbose" ] && echo "var_verbose=$(_sanitize_value "$_verbose")" + [ -n "$_fuse" ] && echo "var_fuse=$(_sanitize_value "$_fuse")" + [ -n "$_tun" ] && echo "var_tun=$(_sanitize_value "$_tun")" + [ -n "$_nesting" ] && echo "var_nesting=$(_sanitize_value "$_nesting")" + [ -n "$_keyctl" ] && echo "var_keyctl=$(_sanitize_value "$_keyctl")" + [ -n "$_mknod" ] && echo "var_mknod=$(_sanitize_value "$_mknod")" + [ -n "$_mount_fs" ] && echo "var_mount_fs=$(_sanitize_value "$_mount_fs")" + [ -n "$_protect" ] && echo "var_protection=$(_sanitize_value "$_protect")" + [ -n "$_timezone" ] && echo "var_timezone=$(_sanitize_value "$_timezone")" + [ -n "$_tags" ] && echo "var_tags=$(_sanitize_value "$_tags")" + [ -n "$_verbose" ] && echo "var_verbose=$(_sanitize_value "$_verbose")" - [ -n "$_hostname" ] && echo "var_hostname=$(_sanitize_value "$_hostname")" - [ -n "$_searchdomain" ] && echo "var_searchdomain=$(_sanitize_value "$_searchdomain")" + [ -n "$_hostname" ] && echo "var_hostname=$(_sanitize_value "$_hostname")" + [ -n "$_searchdomain" ] && echo "var_searchdomain=$(_sanitize_value "$_searchdomain")" - [ -n "$_tpl_storage" ] && echo "var_template_storage=$(_sanitize_value "$_tpl_storage")" - [ -n "$_ct_storage" ] && echo "var_container_storage=$(_sanitize_value "$_ct_storage")" - } >"$tmpf" + [ -n "$_tpl_storage" ] && echo "var_template_storage=$(_sanitize_value "$_tpl_storage")" + [ -n "$_ct_storage" ] && echo "var_container_storage=$(_sanitize_value "$_ct_storage")" + } >"$tmpf" - echo "$tmpf" + echo "$tmpf" } # ------------------------------------------------------------------------------ @@ -1001,103 +1123,103 @@ _build_current_app_vars_tmp() { # - If file exists: shows diff and allows Update, Keep, View Diff, or Cancel # ------------------------------------------------------------------------------ maybe_offer_save_app_defaults() { - local app_vars_path - app_vars_path="$(get_app_defaults_path)" + local app_vars_path + app_vars_path="$(get_app_defaults_path)" - # always build from current settings - local new_tmp diff_tmp - new_tmp="$(_build_current_app_vars_tmp)" - diff_tmp="$(mktemp -p /tmp "${NSAPP:-app}.vars.diff.XXXXXX")" + # always build from current settings + local new_tmp diff_tmp + new_tmp="$(_build_current_app_vars_tmp)" + diff_tmp="$(mktemp -p /tmp "${NSAPP:-app}.vars.diff.XXXXXX")" - # 1) if no file → offer to create - if [[ ! -f "$app_vars_path" ]]; then - if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \ - --yesno "Save these advanced settings as defaults for ${APP}?\n\nThis will create:\n${app_vars_path}" 12 72; then - mkdir -p "$(dirname "$app_vars_path")" - install -m 0644 "$new_tmp" "$app_vars_path" - msg_ok "Saved app defaults: ${app_vars_path}" - fi - rm -f "$new_tmp" "$diff_tmp" - return 0 + # 1) if no file → offer to create + if [[ ! -f "$app_vars_path" ]]; then + if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \ + --yesno "Save these advanced settings as defaults for ${APP}?\n\nThis will create:\n${app_vars_path}" 12 72; then + mkdir -p "$(dirname "$app_vars_path")" + install -m 0644 "$new_tmp" "$app_vars_path" + msg_ok "Saved app defaults: ${app_vars_path}" fi - - # 2) if file exists → build diff - _build_vars_diff "$app_vars_path" "$new_tmp" >"$diff_tmp" - - # if no differences → do nothing - if grep -q "^(No differences)$" "$diff_tmp"; then - rm -f "$new_tmp" "$diff_tmp" - return 0 - fi - - # 3) if file exists → show menu with default selection "Update Defaults" - local app_vars_file - app_vars_file="$(basename "$app_vars_path")" - - while true; do - local sel - sel="$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \ - --title "APP DEFAULTS – ${APP}" \ - --menu "Differences detected. What do you want to do?" 20 78 10 \ - "Update Defaults" "Write new values to ${app_vars_file}" \ - "Keep Current" "Keep existing defaults (no changes)" \ - "View Diff" "Show a detailed diff" \ - "Cancel" "Abort without changes" \ - --default-item "Update Defaults" \ - 3>&1 1>&2 2>&3)" || { sel="Cancel"; } - - case "$sel" in - "Update Defaults") - install -m 0644 "$new_tmp" "$app_vars_path" - msg_ok "Updated app defaults: ${app_vars_path}" - break - ;; - "Keep Current") - msg_custom "ℹ️" "${BL}" "Keeping current app defaults: ${app_vars_path}" - break - ;; - "View Diff") - whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \ - --title "Diff – ${APP}" \ - --scrolltext --textbox "$diff_tmp" 25 100 - ;; - "Cancel" | *) - msg_custom "🚫" "${YW}" "Canceled. No changes to app defaults." - break - ;; - esac - done - rm -f "$new_tmp" "$diff_tmp" + return 0 + fi + + # 2) if file exists → build diff + _build_vars_diff "$app_vars_path" "$new_tmp" >"$diff_tmp" + + # if no differences → do nothing + if grep -q "^(No differences)$" "$diff_tmp"; then + rm -f "$new_tmp" "$diff_tmp" + return 0 + fi + + # 3) if file exists → show menu with default selection "Update Defaults" + local app_vars_file + app_vars_file="$(basename "$app_vars_path")" + + while true; do + local sel + sel="$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \ + --title "APP DEFAULTS – ${APP}" \ + --menu "Differences detected. What do you want to do?" 20 78 10 \ + "Update Defaults" "Write new values to ${app_vars_file}" \ + "Keep Current" "Keep existing defaults (no changes)" \ + "View Diff" "Show a detailed diff" \ + "Cancel" "Abort without changes" \ + --default-item "Update Defaults" \ + 3>&1 1>&2 2>&3)" || { sel="Cancel"; } + + case "$sel" in + "Update Defaults") + install -m 0644 "$new_tmp" "$app_vars_path" + msg_ok "Updated app defaults: ${app_vars_path}" + break + ;; + "Keep Current") + msg_custom "ℹ️" "${BL}" "Keeping current app defaults: ${app_vars_path}" + break + ;; + "View Diff") + whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \ + --title "Diff – ${APP}" \ + --scrolltext --textbox "$diff_tmp" 25 100 + ;; + "Cancel" | *) + msg_custom "🚫" "${YW}" "Canceled. No changes to app defaults." + break + ;; + esac + done + + rm -f "$new_tmp" "$diff_tmp" } ensure_storage_selection_for_vars_file() { - local vf="$1" + local vf="$1" - # Read stored values (if any) - local tpl ct - tpl=$(grep -E '^var_template_storage=' "$vf" | cut -d= -f2-) - ct=$(grep -E '^var_container_storage=' "$vf" | cut -d= -f2-) + # Read stored values (if any) + local tpl ct + tpl=$(grep -E '^var_template_storage=' "$vf" | cut -d= -f2-) + ct=$(grep -E '^var_container_storage=' "$vf" | cut -d= -f2-) - if [[ -n "$tpl" && -n "$ct" ]]; then - TEMPLATE_STORAGE="$tpl" - CONTAINER_STORAGE="$ct" - return 0 - fi + if [[ -n "$tpl" && -n "$ct" ]]; then + TEMPLATE_STORAGE="$tpl" + CONTAINER_STORAGE="$ct" + return 0 + fi - choose_and_set_storage_for_file "$vf" template - choose_and_set_storage_for_file "$vf" container + choose_and_set_storage_for_file "$vf" template + choose_and_set_storage_for_file "$vf" container - # Silent operation - no output message + # Silent operation - no output message } ensure_global_default_vars_file() { - local vars_path="/usr/local/community-scripts/default.vars" - if [[ ! -f "$vars_path" ]]; then - mkdir -p "$(dirname "$vars_path")" - touch "$vars_path" - fi - echo "$vars_path" + local vars_path="/usr/local/community-scripts/default.vars" + if [[ ! -f "$vars_path" ]]; then + mkdir -p "$(dirname "$vars_path")" + touch "$vars_path" + fi + echo "$vars_path" } # ============================================================================== @@ -1113,785 +1235,785 @@ ensure_global_default_vars_file() { # - Allows user to customize all container settings # ------------------------------------------------------------------------------ advanced_settings() { - # Enter alternate screen buffer to prevent flicker between dialogs - tput smcup 2>/dev/null || true - trap 'tput rmcup 2>/dev/null || true' RETURN + # Enter alternate screen buffer to prevent flicker between dialogs + tput smcup 2>/dev/null || true + trap 'tput rmcup 2>/dev/null || true' RETURN - # Initialize defaults - TAGS="community-script;${var_tags:-}" - local STEP=1 - local MAX_STEP=28 + # Initialize defaults + TAGS="community-script;${var_tags:-}" + local STEP=1 + local MAX_STEP=28 - # Store values for back navigation - inherit from var_* app defaults - local _ct_type="${var_unprivileged:-1}" - local _pw="" - local _pw_display="Automatic Login" - local _ct_id="$NEXTID" - local _hostname="$NSAPP" - local _disk_size="${var_disk:-4}" - local _core_count="${var_cpu:-1}" - local _ram_size="${var_ram:-1024}" - local _bridge="${var_brg:-vmbr0}" - local _net="${var_net:-dhcp}" - local _gate="${var_gateway:-}" - local _ipv6_method="${var_ipv6_method:-auto}" - local _ipv6_addr="" - local _ipv6_gate="" - local _apt_cacher="${var_apt_cacher:-no}" - local _apt_cacher_ip="${var_apt_cacher_ip:-}" - local _mtu="${var_mtu:-}" - local _sd="${var_searchdomain:-}" - local _ns="${var_ns:-}" - local _mac="${var_mac:-}" - local _vlan="${var_vlan:-}" - local _tags="$TAGS" - local _enable_fuse="${var_fuse:-no}" - local _enable_tun="${var_tun:-no}" - local _enable_gpu="${var_gpu:-no}" - local _enable_nesting="${var_nesting:-1}" - local _verbose="${var_verbose:-no}" - local _enable_keyctl="${var_keyctl:-0}" - local _enable_mknod="${var_mknod:-0}" - local _mount_fs="${var_mount_fs:-}" - local _protect_ct="${var_protection:-no}" + # Store values for back navigation - inherit from var_* app defaults + local _ct_type="${var_unprivileged:-1}" + local _pw="" + local _pw_display="Automatic Login" + local _ct_id="$NEXTID" + local _hostname="$NSAPP" + local _disk_size="${var_disk:-4}" + local _core_count="${var_cpu:-1}" + local _ram_size="${var_ram:-1024}" + local _bridge="${var_brg:-vmbr0}" + local _net="${var_net:-dhcp}" + local _gate="${var_gateway:-}" + local _ipv6_method="${var_ipv6_method:-auto}" + local _ipv6_addr="" + local _ipv6_gate="" + local _apt_cacher="${var_apt_cacher:-no}" + local _apt_cacher_ip="${var_apt_cacher_ip:-}" + local _mtu="${var_mtu:-}" + local _sd="${var_searchdomain:-}" + local _ns="${var_ns:-}" + local _mac="${var_mac:-}" + local _vlan="${var_vlan:-}" + local _tags="$TAGS" + local _enable_fuse="${var_fuse:-no}" + local _enable_tun="${var_tun:-no}" + local _enable_gpu="${var_gpu:-no}" + local _enable_nesting="${var_nesting:-1}" + local _verbose="${var_verbose:-no}" + local _enable_keyctl="${var_keyctl:-0}" + local _enable_mknod="${var_mknod:-0}" + local _mount_fs="${var_mount_fs:-}" + local _protect_ct="${var_protection:-no}" - # Detect host timezone for default (if not set via var_timezone) - local _host_timezone="" - if command -v timedatectl >/dev/null 2>&1; then - _host_timezone=$(timedatectl show --value --property=Timezone 2>/dev/null || echo "") - elif [ -f /etc/timezone ]; then - _host_timezone=$(cat /etc/timezone 2>/dev/null || echo "") - fi - local _ct_timezone="${var_timezone:-$_host_timezone}" + # Detect host timezone for default (if not set via var_timezone) + local _host_timezone="" + if command -v timedatectl >/dev/null 2>&1; then + _host_timezone=$(timedatectl show --value --property=Timezone 2>/dev/null || echo "") + elif [ -f /etc/timezone ]; then + _host_timezone=$(cat /etc/timezone 2>/dev/null || echo "") + fi + local _ct_timezone="${var_timezone:-$_host_timezone}" - # Helper to show current progress - show_progress() { - local current=$1 - local total=$MAX_STEP - echo -e "\n${INFO}${BOLD}${DGN}Step $current of $total${CL}" - } + # Helper to show current progress + show_progress() { + local current=$1 + local total=$MAX_STEP + echo -e "\n${INFO}${BOLD}${DGN}Step $current of $total${CL}" + } - # Detect available bridges (do this once) - local BRIDGES="" - local BRIDGE_MENU_OPTIONS=() - _detect_bridges() { - IFACE_FILEPATH_LIST="/etc/network/interfaces"$'\n'$(find "/etc/network/interfaces.d/" -type f 2>/dev/null) - BRIDGES="" - local OLD_IFS=$IFS - IFS=$'\n' - for iface_filepath in ${IFACE_FILEPATH_LIST}; do - local iface_indexes_tmpfile=$(mktemp -q -u '.iface-XXXX') - (grep -Pn '^\s*iface' "${iface_filepath}" 2>/dev/null | cut -d':' -f1 && wc -l "${iface_filepath}" 2>/dev/null | cut -d' ' -f1) | awk 'FNR==1 {line=$0; next} {print line":"$0-1; line=$0}' >"${iface_indexes_tmpfile}" 2>/dev/null || true - if [ -f "${iface_indexes_tmpfile}" ]; then - while read -r pair; do - local start=$(echo "${pair}" | cut -d':' -f1) - local end=$(echo "${pair}" | cut -d':' -f2) - if awk "NR >= ${start} && NR <= ${end}" "${iface_filepath}" 2>/dev/null | grep -qP '^\s*(bridge[-_](ports|stp|fd|vlan-aware|vids)|ovs_type\s+OVSBridge)\b'; then - local iface_name=$(sed "${start}q;d" "${iface_filepath}" | awk '{print $2}') - BRIDGES="${iface_name}"$'\n'"${BRIDGES}" - fi - done <"${iface_indexes_tmpfile}" - rm -f "${iface_indexes_tmpfile}" - fi - done - IFS=$OLD_IFS - BRIDGES=$(echo "$BRIDGES" | grep -v '^\s*$' | sort | uniq) + # Detect available bridges (do this once) + local BRIDGES="" + local BRIDGE_MENU_OPTIONS=() + _detect_bridges() { + IFACE_FILEPATH_LIST="/etc/network/interfaces"$'\n'$(find "/etc/network/interfaces.d/" -type f 2>/dev/null) + BRIDGES="" + local OLD_IFS=$IFS + IFS=$'\n' + for iface_filepath in ${IFACE_FILEPATH_LIST}; do + local iface_indexes_tmpfile=$(mktemp -q -u '.iface-XXXX') + (grep -Pn '^\s*iface' "${iface_filepath}" 2>/dev/null | cut -d':' -f1 && wc -l "${iface_filepath}" 2>/dev/null | cut -d' ' -f1) | awk 'FNR==1 {line=$0; next} {print line":"$0-1; line=$0}' >"${iface_indexes_tmpfile}" 2>/dev/null || true + if [ -f "${iface_indexes_tmpfile}" ]; then + while read -r pair; do + local start=$(echo "${pair}" | cut -d':' -f1) + local end=$(echo "${pair}" | cut -d':' -f2) + if awk "NR >= ${start} && NR <= ${end}" "${iface_filepath}" 2>/dev/null | grep -qP '^\s*(bridge[-_](ports|stp|fd|vlan-aware|vids)|ovs_type\s+OVSBridge)\b'; then + local iface_name=$(sed "${start}q;d" "${iface_filepath}" | awk '{print $2}') + BRIDGES="${iface_name}"$'\n'"${BRIDGES}" + fi + done <"${iface_indexes_tmpfile}" + rm -f "${iface_indexes_tmpfile}" + fi + done + IFS=$OLD_IFS + BRIDGES=$(echo "$BRIDGES" | grep -v '^\s*$' | sort | uniq) - # Build bridge menu - BRIDGE_MENU_OPTIONS=() - if [[ -n "$BRIDGES" ]]; then - while IFS= read -r bridge; do - if [[ -n "$bridge" ]]; then - local description=$(grep -A 10 "iface $bridge" /etc/network/interfaces 2>/dev/null | grep '^#' | head -n1 | sed 's/^#\s*//') - BRIDGE_MENU_OPTIONS+=("$bridge" "${description:- }") - fi - done <<<"$BRIDGES" + # Build bridge menu + BRIDGE_MENU_OPTIONS=() + if [[ -n "$BRIDGES" ]]; then + while IFS= read -r bridge; do + if [[ -n "$bridge" ]]; then + local description=$(grep -A 10 "iface $bridge" /etc/network/interfaces 2>/dev/null | grep '^#' | head -n1 | sed 's/^#\s*//') + BRIDGE_MENU_OPTIONS+=("$bridge" "${description:- }") fi - } - _detect_bridges + done <<<"$BRIDGES" + fi + } + _detect_bridges - # Main wizard loop - while [ $STEP -le $MAX_STEP ]; do - case $STEP in + # Main wizard loop + while [ $STEP -le $MAX_STEP ]; do + case $STEP in - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 1: Container Type - # ═══════════════════════════════════════════════════════════════════════════ - 1) - local default_on="ON" - local default_off="OFF" - [[ "$_ct_type" == "0" ]] && { - default_on="OFF" - default_off="ON" - } + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 1: Container Type + # ═══════════════════════════════════════════════════════════════════════════ + 1) + local default_on="ON" + local default_off="OFF" + [[ "$_ct_type" == "0" ]] && { + default_on="OFF" + default_off="ON" + } - if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "CONTAINER TYPE" \ - --ok-button "Next" --cancel-button "Exit" \ - --radiolist "\nChoose container type:\n\nUse SPACE to select, ENTER to confirm." 14 58 2 \ - "1" "Unprivileged (recommended)" $default_on \ - "0" "Privileged" $default_off \ - 3>&1 1>&2 2>&3); then - [[ -n "$result" ]] && _ct_type="$result" - ((STEP++)) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "CONTAINER TYPE" \ + --ok-button "Next" --cancel-button "Exit" \ + --radiolist "\nChoose container type:\n\nUse SPACE to select, ENTER to confirm." 14 58 2 \ + "1" "Unprivileged (recommended)" $default_on \ + "0" "Privileged" $default_off \ + 3>&1 1>&2 2>&3); then + [[ -n "$result" ]] && _ct_type="$result" + ((STEP++)) + else + exit_script + fi + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 2: Root Password + # ═══════════════════════════════════════════════════════════════════════════ + 2) + if PW1=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "ROOT PASSWORD" \ + --ok-button "Next" --cancel-button "Back" \ + --passwordbox "\nSet Root Password (needed for root ssh access)\n\nLeave blank for automatic login (no password)" 12 58 \ + 3>&1 1>&2 2>&3); then + + if [[ -z "$PW1" ]]; then + _pw="" + _pw_display="Automatic Login" + ((STEP++)) + elif [[ "$PW1" == *" "* ]]; then + whiptail --msgbox "Password cannot contain spaces." 8 58 + elif ((${#PW1} < 5)); then + whiptail --msgbox "Password must be at least 5 characters." 8 58 + else + # Verify password + if PW2=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "PASSWORD VERIFICATION" \ + --ok-button "Confirm" --cancel-button "Back" \ + --passwordbox "\nVerify Root Password" 10 58 \ + 3>&1 1>&2 2>&3); then + if [[ "$PW1" == "$PW2" ]]; then + _pw="-password $PW1" + _pw_display="********" + ((STEP++)) else - exit_script + whiptail --msgbox "Passwords do not match. Please try again." 8 58 fi - ;; + else + ((STEP--)) + fi + fi + else + ((STEP--)) + fi + ;; - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 2: Root Password - # ═══════════════════════════════════════════════════════════════════════════ - 2) - if PW1=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "ROOT PASSWORD" \ + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 3: Container ID + # ═══════════════════════════════════════════════════════════════════════════ + 3) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "CONTAINER ID" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nSet Container ID" 10 58 "$_ct_id" \ + 3>&1 1>&2 2>&3); then + _ct_id="${result:-$NEXTID}" + ((STEP++)) + else + ((STEP--)) + fi + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 4: Hostname + # ═══════════════════════════════════════════════════════════════════════════ + 4) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "HOSTNAME" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nSet Hostname (lowercase, alphanumeric, hyphens only)" 10 58 "$_hostname" \ + 3>&1 1>&2 2>&3); then + local hn_test="${result:-$NSAPP}" + hn_test=$(echo "${hn_test,,}" | tr -d ' ') + if [[ "$hn_test" =~ ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ ]]; then + _hostname="$hn_test" + ((STEP++)) + else + whiptail --msgbox "Invalid hostname: '$hn_test'\n\nOnly lowercase letters, digits and hyphens are allowed." 10 58 + fi + else + ((STEP--)) + fi + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 5: Disk Size + # ═══════════════════════════════════════════════════════════════════════════ + 5) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "DISK SIZE" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nSet Disk Size in GB" 10 58 "$_disk_size" \ + 3>&1 1>&2 2>&3); then + local disk_test="${result:-$var_disk}" + if [[ "$disk_test" =~ ^[1-9][0-9]*$ ]]; then + _disk_size="$disk_test" + ((STEP++)) + else + whiptail --msgbox "Disk size must be a positive integer!" 8 58 + fi + else + ((STEP--)) + fi + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 6: CPU Cores + # ═══════════════════════════════════════════════════════════════════════════ + 6) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "CPU CORES" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nAllocate CPU Cores" 10 58 "$_core_count" \ + 3>&1 1>&2 2>&3); then + local cpu_test="${result:-$var_cpu}" + if [[ "$cpu_test" =~ ^[1-9][0-9]*$ ]]; then + _core_count="$cpu_test" + ((STEP++)) + else + whiptail --msgbox "CPU core count must be a positive integer!" 8 58 + fi + else + ((STEP--)) + fi + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 7: RAM Size + # ═══════════════════════════════════════════════════════════════════════════ + 7) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "RAM SIZE" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nAllocate RAM in MiB" 10 58 "$_ram_size" \ + 3>&1 1>&2 2>&3); then + local ram_test="${result:-$var_ram}" + if [[ "$ram_test" =~ ^[1-9][0-9]*$ ]]; then + _ram_size="$ram_test" + ((STEP++)) + else + whiptail --msgbox "RAM size must be a positive integer!" 8 58 + fi + else + ((STEP--)) + fi + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 8: Network Bridge + # ═══════════════════════════════════════════════════════════════════════════ + 8) + if [[ ${#BRIDGE_MENU_OPTIONS[@]} -eq 0 ]]; then + _bridge="vmbr0" + ((STEP++)) + else + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "NETWORK BRIDGE" \ + --ok-button "Next" --cancel-button "Back" \ + --menu "\nSelect network bridge:" 16 58 6 \ + "${BRIDGE_MENU_OPTIONS[@]}" \ + 3>&1 1>&2 2>&3); then + _bridge="${result:-vmbr0}" + ((STEP++)) + else + ((STEP--)) + fi + fi + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 9: IPv4 Configuration + # ═══════════════════════════════════════════════════════════════════════════ + 9) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "IPv4 CONFIGURATION" \ + --ok-button "Next" --cancel-button "Back" \ + --menu "\nSelect IPv4 Address Assignment:" 14 60 2 \ + "dhcp" "Automatic (DHCP, recommended)" \ + "static" "Static (manual entry)" \ + 3>&1 1>&2 2>&3); then + + if [[ "$result" == "static" ]]; then + # Get static IP + local static_ip + if static_ip=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "STATIC IPv4 ADDRESS" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nEnter Static IPv4 CIDR Address\n(e.g. 192.168.1.100/24)" 12 58 "" \ + 3>&1 1>&2 2>&3); then + if [[ "$static_ip" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$ ]]; then + # Get gateway + local gateway_ip + if gateway_ip=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "GATEWAY IP" \ --ok-button "Next" --cancel-button "Back" \ - --passwordbox "\nSet Root Password (needed for root ssh access)\n\nLeave blank for automatic login (no password)" 12 58 \ + --inputbox "\nEnter Gateway IP address" 10 58 "" \ 3>&1 1>&2 2>&3); then - - if [[ -z "$PW1" ]]; then - _pw="" - _pw_display="Automatic Login" - ((STEP++)) - elif [[ "$PW1" == *" "* ]]; then - whiptail --msgbox "Password cannot contain spaces." 8 58 - elif ((${#PW1} < 5)); then - whiptail --msgbox "Password must be at least 5 characters." 8 58 + if [[ "$gateway_ip" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then + _net="$static_ip" + _gate=",gw=$gateway_ip" + ((STEP++)) else - # Verify password - if PW2=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "PASSWORD VERIFICATION" \ - --ok-button "Confirm" --cancel-button "Back" \ - --passwordbox "\nVerify Root Password" 10 58 \ - 3>&1 1>&2 2>&3); then - if [[ "$PW1" == "$PW2" ]]; then - _pw="-password $PW1" - _pw_display="********" - ((STEP++)) - else - whiptail --msgbox "Passwords do not match. Please try again." 8 58 - fi - else - ((STEP--)) - fi + whiptail --msgbox "Invalid Gateway IP format." 8 58 fi + fi else - ((STEP--)) + whiptail --msgbox "Invalid IPv4 CIDR format.\nExample: 192.168.1.100/24" 8 58 fi - ;; + fi + else + _net="dhcp" + _gate="" + ((STEP++)) + fi + else + ((STEP--)) + fi + ;; - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 3: Container ID - # ═══════════════════════════════════════════════════════════════════════════ - 3) - if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "CONTAINER ID" \ - --ok-button "Next" --cancel-button "Back" \ - --inputbox "\nSet Container ID" 10 58 "$_ct_id" \ - 3>&1 1>&2 2>&3); then - _ct_id="${result:-$NEXTID}" - ((STEP++)) + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 10: IPv6 Configuration + # ═══════════════════════════════════════════════════════════════════════════ + 10) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "IPv6 CONFIGURATION" \ + --ok-button "Next" --cancel-button "Back" \ + --menu "\nSelect IPv6 Address Management:" 16 70 5 \ + "auto" "SLAAC/AUTO (recommended) - Dynamic IPv6 from network" \ + "dhcp" "DHCPv6 - DHCP-assigned IPv6 address" \ + "static" "Static - Manual IPv6 address configuration" \ + "none" "None - No IPv6 assignment (most containers)" \ + "disable" "Fully Disabled - (breaks some services)" \ + 3>&1 1>&2 2>&3); then + + _ipv6_method="$result" + case "$result" in + static) + local ipv6_addr + if ipv6_addr=$(whiptail --backtitle "Proxmox VE Helper Scripts" \ + --title "STATIC IPv6 ADDRESS" \ + --inputbox "\nEnter IPv6 CIDR address\n(e.g. 2001:db8::1/64)" 12 58 "" \ + 3>&1 1>&2 2>&3); then + if [[ "$ipv6_addr" =~ ^([0-9a-fA-F:]+:+)+[0-9a-fA-F]+(/[0-9]{1,3})$ ]]; then + _ipv6_addr="$ipv6_addr" + # Optional gateway + _ipv6_gate=$(whiptail --backtitle "Proxmox VE Helper Scripts" \ + --title "IPv6 GATEWAY" \ + --inputbox "\nEnter IPv6 gateway (optional, leave blank for none)" 10 58 "" \ + 3>&1 1>&2 2>&3) || true + ((STEP++)) else - ((STEP--)) + whiptail --msgbox "Invalid IPv6 CIDR format." 8 58 fi - ;; + fi + ;; + dhcp) + _ipv6_addr="dhcp" + _ipv6_gate="" + ((STEP++)) + ;; + disable) + _ipv6_addr="" + _ipv6_gate="" + ((STEP++)) + ;; + none) + _ipv6_addr="none" + _ipv6_gate="" + ((STEP++)) + ;; + *) + _ipv6_addr="" + _ipv6_gate="" + ((STEP++)) + ;; + esac + else + ((STEP--)) + fi + ;; - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 4: Hostname - # ═══════════════════════════════════════════════════════════════════════════ - 4) - if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "HOSTNAME" \ - --ok-button "Next" --cancel-button "Back" \ - --inputbox "\nSet Hostname (lowercase, alphanumeric, hyphens only)" 10 58 "$_hostname" \ - 3>&1 1>&2 2>&3); then - local hn_test="${result:-$NSAPP}" - hn_test=$(echo "${hn_test,,}" | tr -d ' ') - if [[ "$hn_test" =~ ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ ]]; then - _hostname="$hn_test" - ((STEP++)) - else - whiptail --msgbox "Invalid hostname: '$hn_test'\n\nOnly lowercase letters, digits and hyphens are allowed." 10 58 - fi - else - ((STEP--)) - fi - ;; + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 11: MTU Size + # ═══════════════════════════════════════════════════════════════════════════ + 11) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "MTU SIZE" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nSet Interface MTU Size\n(leave blank for default 1500)" 12 58 "" \ + 3>&1 1>&2 2>&3); then + _mtu="$result" + ((STEP++)) + else + ((STEP--)) + fi + ;; - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 5: Disk Size - # ═══════════════════════════════════════════════════════════════════════════ - 5) - if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "DISK SIZE" \ - --ok-button "Next" --cancel-button "Back" \ - --inputbox "\nSet Disk Size in GB" 10 58 "$_disk_size" \ - 3>&1 1>&2 2>&3); then - local disk_test="${result:-$var_disk}" - if [[ "$disk_test" =~ ^[1-9][0-9]*$ ]]; then - _disk_size="$disk_test" - ((STEP++)) - else - whiptail --msgbox "Disk size must be a positive integer!" 8 58 - fi - else - ((STEP--)) - fi - ;; + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 12: DNS Search Domain + # ═══════════════════════════════════════════════════════════════════════════ + 12) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "DNS SEARCH DOMAIN" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nSet DNS Search Domain\n(leave blank to use host setting)" 12 58 "" \ + 3>&1 1>&2 2>&3); then + _sd="$result" + ((STEP++)) + else + ((STEP--)) + fi + ;; - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 6: CPU Cores - # ═══════════════════════════════════════════════════════════════════════════ - 6) - if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "CPU CORES" \ - --ok-button "Next" --cancel-button "Back" \ - --inputbox "\nAllocate CPU Cores" 10 58 "$_core_count" \ - 3>&1 1>&2 2>&3); then - local cpu_test="${result:-$var_cpu}" - if [[ "$cpu_test" =~ ^[1-9][0-9]*$ ]]; then - _core_count="$cpu_test" - ((STEP++)) - else - whiptail --msgbox "CPU core count must be a positive integer!" 8 58 - fi - else - ((STEP--)) - fi - ;; + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 13: DNS Server + # ═══════════════════════════════════════════════════════════════════════════ + 13) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "DNS SERVER" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nSet DNS Server IP\n(leave blank to use host setting)" 12 58 "" \ + 3>&1 1>&2 2>&3); then + _ns="$result" + ((STEP++)) + else + ((STEP--)) + fi + ;; - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 7: RAM Size - # ═══════════════════════════════════════════════════════════════════════════ - 7) - if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "RAM SIZE" \ - --ok-button "Next" --cancel-button "Back" \ - --inputbox "\nAllocate RAM in MiB" 10 58 "$_ram_size" \ - 3>&1 1>&2 2>&3); then - local ram_test="${result:-$var_ram}" - if [[ "$ram_test" =~ ^[1-9][0-9]*$ ]]; then - _ram_size="$ram_test" - ((STEP++)) - else - whiptail --msgbox "RAM size must be a positive integer!" 8 58 - fi - else - ((STEP--)) - fi - ;; + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 14: MAC Address + # ═══════════════════════════════════════════════════════════════════════════ + 14) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "MAC ADDRESS" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nSet MAC Address\n(leave blank for auto-generated)" 12 58 "" \ + 3>&1 1>&2 2>&3); then + _mac="$result" + ((STEP++)) + else + ((STEP--)) + fi + ;; - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 8: Network Bridge - # ═══════════════════════════════════════════════════════════════════════════ - 8) - if [[ ${#BRIDGE_MENU_OPTIONS[@]} -eq 0 ]]; then - _bridge="vmbr0" - ((STEP++)) - else - if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "NETWORK BRIDGE" \ - --ok-button "Next" --cancel-button "Back" \ - --menu "\nSelect network bridge:" 16 58 6 \ - "${BRIDGE_MENU_OPTIONS[@]}" \ - 3>&1 1>&2 2>&3); then - _bridge="${result:-vmbr0}" - ((STEP++)) - else - ((STEP--)) - fi - fi - ;; + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 15: VLAN Tag + # ═══════════════════════════════════════════════════════════════════════════ + 15) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "VLAN TAG" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nSet VLAN Tag\n(leave blank for no VLAN)" 12 58 "" \ + 3>&1 1>&2 2>&3); then + _vlan="$result" + ((STEP++)) + else + ((STEP--)) + fi + ;; - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 9: IPv4 Configuration - # ═══════════════════════════════════════════════════════════════════════════ - 9) - if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "IPv4 CONFIGURATION" \ - --ok-button "Next" --cancel-button "Back" \ - --menu "\nSelect IPv4 Address Assignment:" 14 60 2 \ - "dhcp" "Automatic (DHCP, recommended)" \ - "static" "Static (manual entry)" \ - 3>&1 1>&2 2>&3); then + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 16: Tags + # ═══════════════════════════════════════════════════════════════════════════ + 16) + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "CONTAINER TAGS" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nSet Custom Tags (semicolon-separated)\n(remove all for no tags)" 12 58 "$_tags" \ + 3>&1 1>&2 2>&3); then + _tags="${result:-;}" + _tags=$(echo "$_tags" | tr -d '[:space:]') + ((STEP++)) + else + ((STEP--)) + fi + ;; - if [[ "$result" == "static" ]]; then - # Get static IP - local static_ip - if static_ip=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "STATIC IPv4 ADDRESS" \ - --ok-button "Next" --cancel-button "Back" \ - --inputbox "\nEnter Static IPv4 CIDR Address\n(e.g. 192.168.1.100/24)" 12 58 "" \ - 3>&1 1>&2 2>&3); then - if [[ "$static_ip" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$ ]]; then - # Get gateway - local gateway_ip - if gateway_ip=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "GATEWAY IP" \ - --ok-button "Next" --cancel-button "Back" \ - --inputbox "\nEnter Gateway IP address" 10 58 "" \ - 3>&1 1>&2 2>&3); then - if [[ "$gateway_ip" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then - _net="$static_ip" - _gate=",gw=$gateway_ip" - ((STEP++)) - else - whiptail --msgbox "Invalid Gateway IP format." 8 58 - fi - fi - else - whiptail --msgbox "Invalid IPv4 CIDR format.\nExample: 192.168.1.100/24" 8 58 - fi - fi - else - _net="dhcp" - _gate="" - ((STEP++)) - fi - else - ((STEP--)) - fi - ;; + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 17: SSH Settings + # ═══════════════════════════════════════════════════════════════════════════ + 17) + configure_ssh_settings "Step $STEP/$MAX_STEP" + # configure_ssh_settings handles its own flow, always advance + ((STEP++)) + ;; - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 10: IPv6 Configuration - # ═══════════════════════════════════════════════════════════════════════════ - 10) - if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "IPv6 CONFIGURATION" \ - --ok-button "Next" --cancel-button "Back" \ - --menu "\nSelect IPv6 Address Management:" 16 70 5 \ - "auto" "SLAAC/AUTO (recommended) - Dynamic IPv6 from network" \ - "dhcp" "DHCPv6 - DHCP-assigned IPv6 address" \ - "static" "Static - Manual IPv6 address configuration" \ - "none" "None - No IPv6 assignment (most containers)" \ - "disable" "Fully Disabled - (breaks some services)" \ - 3>&1 1>&2 2>&3); then + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 18: FUSE Support + # ═══════════════════════════════════════════════════════════════════════════ + 18) + local fuse_default_flag="--defaultno" + [[ "$_enable_fuse" == "yes" || "$_enable_fuse" == "1" ]] && fuse_default_flag="" - _ipv6_method="$result" - case "$result" in - static) - local ipv6_addr - if ipv6_addr=$(whiptail --backtitle "Proxmox VE Helper Scripts" \ - --title "STATIC IPv6 ADDRESS" \ - --inputbox "\nEnter IPv6 CIDR address\n(e.g. 2001:db8::1/64)" 12 58 "" \ - 3>&1 1>&2 2>&3); then - if [[ "$ipv6_addr" =~ ^([0-9a-fA-F:]+:+)+[0-9a-fA-F]+(/[0-9]{1,3})$ ]]; then - _ipv6_addr="$ipv6_addr" - # Optional gateway - _ipv6_gate=$(whiptail --backtitle "Proxmox VE Helper Scripts" \ - --title "IPv6 GATEWAY" \ - --inputbox "\nEnter IPv6 gateway (optional, leave blank for none)" 10 58 "" \ - 3>&1 1>&2 2>&3) || true - ((STEP++)) - else - whiptail --msgbox "Invalid IPv6 CIDR format." 8 58 - fi - fi - ;; - dhcp) - _ipv6_addr="dhcp" - _ipv6_gate="" - ((STEP++)) - ;; - disable) - _ipv6_addr="" - _ipv6_gate="" - ((STEP++)) - ;; - none) - _ipv6_addr="none" - _ipv6_gate="" - ((STEP++)) - ;; - *) - _ipv6_addr="" - _ipv6_gate="" - ((STEP++)) - ;; - esac - else - ((STEP--)) - fi - ;; + if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "FUSE SUPPORT" \ + --ok-button "Next" --cancel-button "Back" \ + $fuse_default_flag \ + --yesno "\nEnable FUSE support?\n\nRequired for: rclone, mergerfs, AppImage, etc.\n\n(App default: ${var_fuse:-no})" 14 58; then + _enable_fuse="yes" + else + if [ $? -eq 1 ]; then + _enable_fuse="no" + else + ((STEP--)) + continue + fi + fi + ((STEP++)) + ;; - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 11: MTU Size - # ═══════════════════════════════════════════════════════════════════════════ - 11) - if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "MTU SIZE" \ - --ok-button "Next" --cancel-button "Back" \ - --inputbox "\nSet Interface MTU Size\n(leave blank for default 1500)" 12 58 "" \ - 3>&1 1>&2 2>&3); then - _mtu="$result" - ((STEP++)) - else - ((STEP--)) - fi - ;; + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 19: TUN/TAP Support + # ═══════════════════════════════════════════════════════════════════════════ + 19) + local tun_default_flag="--defaultno" + [[ "$_enable_tun" == "yes" || "$_enable_tun" == "1" ]] && tun_default_flag="" - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 12: DNS Search Domain - # ═══════════════════════════════════════════════════════════════════════════ - 12) - if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "DNS SEARCH DOMAIN" \ - --ok-button "Next" --cancel-button "Back" \ - --inputbox "\nSet DNS Search Domain\n(leave blank to use host setting)" 12 58 "" \ - 3>&1 1>&2 2>&3); then - _sd="$result" - ((STEP++)) - else - ((STEP--)) - fi - ;; + if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "TUN/TAP SUPPORT" \ + --ok-button "Next" --cancel-button "Back" \ + $tun_default_flag \ + --yesno "\nEnable TUN/TAP device support?\n\nRequired for: VPN apps (WireGuard, OpenVPN, Tailscale),\nnetwork tunneling, and containerized networking.\n\n(App default: ${var_tun:-no})" 14 62; then + _enable_tun="yes" + else + if [ $? -eq 1 ]; then + _enable_tun="no" + else + ((STEP--)) + continue + fi + fi + ((STEP++)) + ;; - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 13: DNS Server - # ═══════════════════════════════════════════════════════════════════════════ - 13) - if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "DNS SERVER" \ - --ok-button "Next" --cancel-button "Back" \ - --inputbox "\nSet DNS Server IP\n(leave blank to use host setting)" 12 58 "" \ - 3>&1 1>&2 2>&3); then - _ns="$result" - ((STEP++)) - else - ((STEP--)) - fi - ;; + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 20: Nesting Support + # ═══════════════════════════════════════════════════════════════════════════ + 20) + local nesting_default_flag="" + [[ "$_enable_nesting" == "0" || "$_enable_nesting" == "no" ]] && nesting_default_flag="--defaultno" - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 14: MAC Address - # ═══════════════════════════════════════════════════════════════════════════ - 14) - if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "MAC ADDRESS" \ - --ok-button "Next" --cancel-button "Back" \ - --inputbox "\nSet MAC Address\n(leave blank for auto-generated)" 12 58 "" \ - 3>&1 1>&2 2>&3); then - _mac="$result" - ((STEP++)) - else - ((STEP--)) - fi - ;; + if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "NESTING SUPPORT" \ + --ok-button "Next" --cancel-button "Back" \ + $nesting_default_flag \ + --yesno "\nEnable Nesting?\n\nRequired for: Docker, LXC inside LXC, Podman,\nand other containerization tools.\n\n(App default: ${var_nesting:-1})" 14 58; then + _enable_nesting="1" + else + if [ $? -eq 1 ]; then + _enable_nesting="0" + else + ((STEP--)) + continue + fi + fi + ((STEP++)) + ;; - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 15: VLAN Tag - # ═══════════════════════════════════════════════════════════════════════════ - 15) - if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "VLAN TAG" \ - --ok-button "Next" --cancel-button "Back" \ - --inputbox "\nSet VLAN Tag\n(leave blank for no VLAN)" 12 58 "" \ - 3>&1 1>&2 2>&3); then - _vlan="$result" - ((STEP++)) - else - ((STEP--)) - fi - ;; + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 21: GPU Passthrough + # ═══════════════════════════════════════════════════════════════════════════ + 21) + local gpu_default_flag="--defaultno" + [[ "$_enable_gpu" == "yes" ]] && gpu_default_flag="" - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 16: Tags - # ═══════════════════════════════════════════════════════════════════════════ - 16) - if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "CONTAINER TAGS" \ - --ok-button "Next" --cancel-button "Back" \ - --inputbox "\nSet Custom Tags (semicolon-separated)\n(remove all for no tags)" 12 58 "$_tags" \ - 3>&1 1>&2 2>&3); then - _tags="${result:-;}" - _tags=$(echo "$_tags" | tr -d '[:space:]') - ((STEP++)) - else - ((STEP--)) - fi - ;; + if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "GPU PASSTHROUGH" \ + --ok-button "Next" --cancel-button "Back" \ + $gpu_default_flag \ + --yesno "\nEnable GPU Passthrough?\n\nAutomatically detects and passes through available GPUs\n(Intel/AMD/NVIDIA) for hardware acceleration.\n\nRecommended for: Media servers, AI/ML, Transcoding\n\n(App default: ${var_gpu:-no})" 16 62; then + _enable_gpu="yes" + else + if [ $? -eq 1 ]; then + _enable_gpu="no" + else + ((STEP--)) + continue + fi + fi + ((STEP++)) + ;; - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 17: SSH Settings - # ═══════════════════════════════════════════════════════════════════════════ - 17) - configure_ssh_settings "Step $STEP/$MAX_STEP" - # configure_ssh_settings handles its own flow, always advance - ((STEP++)) - ;; + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 22: Keyctl Support (Docker/systemd) + # ═══════════════════════════════════════════════════════════════════════════ + 22) + local keyctl_default_flag="--defaultno" + [[ "$_enable_keyctl" == "1" ]] && keyctl_default_flag="" - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 18: FUSE Support - # ═══════════════════════════════════════════════════════════════════════════ - 18) - local fuse_default_flag="--defaultno" - [[ "$_enable_fuse" == "yes" || "$_enable_fuse" == "1" ]] && fuse_default_flag="" + if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "KEYCTL SUPPORT" \ + --ok-button "Next" --cancel-button "Back" \ + $keyctl_default_flag \ + --yesno "\nEnable Keyctl support?\n\nRequired for: Docker containers, systemd-networkd,\nand kernel keyring operations.\n\nNote: Automatically enabled for unprivileged containers.\n\n(App default: ${var_keyctl:-0})" 16 62; then + _enable_keyctl="1" + else + if [ $? -eq 1 ]; then + _enable_keyctl="0" + else + ((STEP--)) + continue + fi + fi + ((STEP++)) + ;; - if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "FUSE SUPPORT" \ - --ok-button "Next" --cancel-button "Back" \ - $fuse_default_flag \ - --yesno "\nEnable FUSE support?\n\nRequired for: rclone, mergerfs, AppImage, etc.\n\n(App default: ${var_fuse:-no})" 14 58; then - _enable_fuse="yes" - else - if [ $? -eq 1 ]; then - _enable_fuse="no" - else - ((STEP--)) - continue - fi - fi - ((STEP++)) - ;; + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 23: APT Cacher Proxy + # ═══════════════════════════════════════════════════════════════════════════ + 23) + local apt_cacher_default_flag="--defaultno" + [[ "$_apt_cacher" == "yes" ]] && apt_cacher_default_flag="" - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 19: TUN/TAP Support - # ═══════════════════════════════════════════════════════════════════════════ - 19) - local tun_default_flag="--defaultno" - [[ "$_enable_tun" == "yes" || "$_enable_tun" == "1" ]] && tun_default_flag="" + if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "APT CACHER PROXY" \ + --ok-button "Next" --cancel-button "Back" \ + $apt_cacher_default_flag \ + --yesno "\nUse APT Cacher-NG proxy?\n\nSpeeds up package downloads by caching them locally.\nRequires apt-cacher-ng running on your network.\n\n(App default: ${var_apt_cacher:-no})" 14 62; then + _apt_cacher="yes" + # Ask for IP if enabled + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "APT CACHER IP" \ + --inputbox "\nEnter APT Cacher-NG server IP address:" 10 58 "$_apt_cacher_ip" \ + 3>&1 1>&2 2>&3); then + _apt_cacher_ip="$result" + fi + else + if [ $? -eq 1 ]; then + _apt_cacher="no" + _apt_cacher_ip="" + else + ((STEP--)) + continue + fi + fi + ((STEP++)) + ;; - if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "TUN/TAP SUPPORT" \ - --ok-button "Next" --cancel-button "Back" \ - $tun_default_flag \ - --yesno "\nEnable TUN/TAP device support?\n\nRequired for: VPN apps (WireGuard, OpenVPN, Tailscale),\nnetwork tunneling, and containerized networking.\n\n(App default: ${var_tun:-no})" 14 62; then - _enable_tun="yes" - else - if [ $? -eq 1 ]; then - _enable_tun="no" - else - ((STEP--)) - continue - fi - fi - ((STEP++)) - ;; + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 24: Container Timezone + # ═══════════════════════════════════════════════════════════════════════════ + 24) + local tz_hint="$_ct_timezone" + [[ -z "$tz_hint" ]] && tz_hint="(empty - will use host timezone)" - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 20: Nesting Support - # ═══════════════════════════════════════════════════════════════════════════ - 20) - local nesting_default_flag="" - [[ "$_enable_nesting" == "0" || "$_enable_nesting" == "no" ]] && nesting_default_flag="--defaultno" + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "CONTAINER TIMEZONE" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nSet container timezone.\n\nExamples: Europe/Berlin, America/New_York, Asia/Tokyo\n\nHost timezone: ${_host_timezone:-unknown}\n\nLeave empty to inherit from host." 16 62 "$_ct_timezone" \ + 3>&1 1>&2 2>&3); then + _ct_timezone="$result" + ((STEP++)) + else + ((STEP--)) + fi + ;; - if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "NESTING SUPPORT" \ - --ok-button "Next" --cancel-button "Back" \ - $nesting_default_flag \ - --yesno "\nEnable Nesting?\n\nRequired for: Docker, LXC inside LXC, Podman,\nand other containerization tools.\n\n(App default: ${var_nesting:-1})" 14 58; then - _enable_nesting="1" - else - if [ $? -eq 1 ]; then - _enable_nesting="0" - else - ((STEP--)) - continue - fi - fi - ((STEP++)) - ;; + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 25: Container Protection + # ═══════════════════════════════════════════════════════════════════════════ + 25) + local protect_default_flag="--defaultno" + [[ "$_protect_ct" == "yes" || "$_protect_ct" == "1" ]] && protect_default_flag="" - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 21: GPU Passthrough - # ═══════════════════════════════════════════════════════════════════════════ - 21) - local gpu_default_flag="--defaultno" - [[ "$_enable_gpu" == "yes" ]] && gpu_default_flag="" + if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "CONTAINER PROTECTION" \ + --ok-button "Next" --cancel-button "Back" \ + $protect_default_flag \ + --yesno "\nEnable Container Protection?\n\nPrevents accidental deletion of this container.\nYou must disable protection before removing.\n\n(App default: ${var_protection:-no})" 14 62; then + _protect_ct="yes" + else + if [ $? -eq 1 ]; then + _protect_ct="no" + else + ((STEP--)) + continue + fi + fi + ((STEP++)) + ;; - if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "GPU PASSTHROUGH" \ - --ok-button "Next" --cancel-button "Back" \ - $gpu_default_flag \ - --yesno "\nEnable GPU Passthrough?\n\nAutomatically detects and passes through available GPUs\n(Intel/AMD/NVIDIA) for hardware acceleration.\n\nRecommended for: Media servers, AI/ML, Transcoding\n\n(App default: ${var_gpu:-no})" 16 62; then - _enable_gpu="yes" - else - if [ $? -eq 1 ]; then - _enable_gpu="no" - else - ((STEP--)) - continue - fi - fi - ((STEP++)) - ;; + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 26: Device Node Creation (mknod) + # ═══════════════════════════════════════════════════════════════════════════ + 26) + local mknod_default_flag="--defaultno" + [[ "$_enable_mknod" == "1" ]] && mknod_default_flag="" - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 22: Keyctl Support (Docker/systemd) - # ═══════════════════════════════════════════════════════════════════════════ - 22) - local keyctl_default_flag="--defaultno" - [[ "$_enable_keyctl" == "1" ]] && keyctl_default_flag="" + if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "DEVICE NODE CREATION" \ + --ok-button "Next" --cancel-button "Back" \ + $mknod_default_flag \ + --yesno "\nAllow device node creation (mknod)?\n\nRequired for: Creating device files inside container.\nExperimental feature (requires kernel 5.3+).\n\n(App default: ${var_mknod:-0})" 14 62; then + _enable_mknod="1" + else + if [ $? -eq 1 ]; then + _enable_mknod="0" + else + ((STEP--)) + continue + fi + fi + ((STEP++)) + ;; - if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "KEYCTL SUPPORT" \ - --ok-button "Next" --cancel-button "Back" \ - $keyctl_default_flag \ - --yesno "\nEnable Keyctl support?\n\nRequired for: Docker containers, systemd-networkd,\nand kernel keyring operations.\n\nNote: Automatically enabled for unprivileged containers.\n\n(App default: ${var_keyctl:-0})" 16 62; then - _enable_keyctl="1" - else - if [ $? -eq 1 ]; then - _enable_keyctl="0" - else - ((STEP--)) - continue - fi - fi - ((STEP++)) - ;; + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 27: Mount Filesystems + # ═══════════════════════════════════════════════════════════════════════════ + 27) + local mount_hint="" + [[ -n "$_mount_fs" ]] && mount_hint="$_mount_fs" || mount_hint="(none)" - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 23: APT Cacher Proxy - # ═══════════════════════════════════════════════════════════════════════════ - 23) - local apt_cacher_default_flag="--defaultno" - [[ "$_apt_cacher" == "yes" ]] && apt_cacher_default_flag="" + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "MOUNT FILESYSTEMS" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nAllow specific filesystem mounts.\n\nComma-separated list: nfs, cifs, fuse, ext4, etc.\nLeave empty for defaults (none).\n\nCurrent: $mount_hint" 14 62 "$_mount_fs" \ + 3>&1 1>&2 2>&3); then + _mount_fs="$result" + ((STEP++)) + else + ((STEP--)) + fi + ;; - if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "APT CACHER PROXY" \ - --ok-button "Next" --cancel-button "Back" \ - $apt_cacher_default_flag \ - --yesno "\nUse APT Cacher-NG proxy?\n\nSpeeds up package downloads by caching them locally.\nRequires apt-cacher-ng running on your network.\n\n(App default: ${var_apt_cacher:-no})" 14 62; then - _apt_cacher="yes" - # Ask for IP if enabled - if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "APT CACHER IP" \ - --inputbox "\nEnter APT Cacher-NG server IP address:" 10 58 "$_apt_cacher_ip" \ - 3>&1 1>&2 2>&3); then - _apt_cacher_ip="$result" - fi - else - if [ $? -eq 1 ]; then - _apt_cacher="no" - _apt_cacher_ip="" - else - ((STEP--)) - continue - fi - fi - ((STEP++)) - ;; + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 28: Verbose Mode & Confirmation + # ═══════════════════════════════════════════════════════════════════════════ + 28) + local verbose_default_flag="--defaultno" + [[ "$_verbose" == "yes" ]] && verbose_default_flag="" - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 24: Container Timezone - # ═══════════════════════════════════════════════════════════════════════════ - 24) - local tz_hint="$_ct_timezone" - [[ -z "$tz_hint" ]] && tz_hint="(empty - will use host timezone)" + if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "VERBOSE MODE" \ + $verbose_default_flag \ + --yesno "\nEnable Verbose Mode?\n\nShows detailed output during installation." 12 58; then + _verbose="yes" + else + _verbose="no" + fi + # Build summary + local ct_type_desc="Unprivileged" + [[ "$_ct_type" == "0" ]] && ct_type_desc="Privileged" - if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "CONTAINER TIMEZONE" \ - --ok-button "Next" --cancel-button "Back" \ - --inputbox "\nSet container timezone.\n\nExamples: Europe/Berlin, America/New_York, Asia/Tokyo\n\nHost timezone: ${_host_timezone:-unknown}\n\nLeave empty to inherit from host." 16 62 "$_ct_timezone" \ - 3>&1 1>&2 2>&3); then - _ct_timezone="$result" - ((STEP++)) - else - ((STEP--)) - fi - ;; + local nesting_desc="Disabled" + [[ "$_enable_nesting" == "1" ]] && nesting_desc="Enabled" - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 25: Container Protection - # ═══════════════════════════════════════════════════════════════════════════ - 25) - local protect_default_flag="--defaultno" - [[ "$_protect_ct" == "yes" || "$_protect_ct" == "1" ]] && protect_default_flag="" + local keyctl_desc="Disabled" + [[ "$_enable_keyctl" == "1" ]] && keyctl_desc="Enabled" - if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "CONTAINER PROTECTION" \ - --ok-button "Next" --cancel-button "Back" \ - $protect_default_flag \ - --yesno "\nEnable Container Protection?\n\nPrevents accidental deletion of this container.\nYou must disable protection before removing.\n\n(App default: ${var_protection:-no})" 14 62; then - _protect_ct="yes" - else - if [ $? -eq 1 ]; then - _protect_ct="no" - else - ((STEP--)) - continue - fi - fi - ((STEP++)) - ;; + local protect_desc="No" + [[ "$_protect_ct" == "yes" || "$_protect_ct" == "1" ]] && protect_desc="Yes" - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 26: Device Node Creation (mknod) - # ═══════════════════════════════════════════════════════════════════════════ - 26) - local mknod_default_flag="--defaultno" - [[ "$_enable_mknod" == "1" ]] && mknod_default_flag="" + local tz_display="${_ct_timezone:-Host TZ}" + local apt_display="${_apt_cacher:-no}" + [[ "$_apt_cacher" == "yes" && -n "$_apt_cacher_ip" ]] && apt_display="$_apt_cacher_ip" - if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "DEVICE NODE CREATION" \ - --ok-button "Next" --cancel-button "Back" \ - $mknod_default_flag \ - --yesno "\nAllow device node creation (mknod)?\n\nRequired for: Creating device files inside container.\nExperimental feature (requires kernel 5.3+).\n\n(App default: ${var_mknod:-0})" 14 62; then - _enable_mknod="1" - else - if [ $? -eq 1 ]; then - _enable_mknod="0" - else - ((STEP--)) - continue - fi - fi - ((STEP++)) - ;; - - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 27: Mount Filesystems - # ═══════════════════════════════════════════════════════════════════════════ - 27) - local mount_hint="" - [[ -n "$_mount_fs" ]] && mount_hint="$_mount_fs" || mount_hint="(none)" - - if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "MOUNT FILESYSTEMS" \ - --ok-button "Next" --cancel-button "Back" \ - --inputbox "\nAllow specific filesystem mounts.\n\nComma-separated list: nfs, cifs, fuse, ext4, etc.\nLeave empty for defaults (none).\n\nCurrent: $mount_hint" 14 62 "$_mount_fs" \ - 3>&1 1>&2 2>&3); then - _mount_fs="$result" - ((STEP++)) - else - ((STEP--)) - fi - ;; - - # ═══════════════════════════════════════════════════════════════════════════ - # STEP 28: Verbose Mode & Confirmation - # ═══════════════════════════════════════════════════════════════════════════ - 28) - local verbose_default_flag="--defaultno" - [[ "$_verbose" == "yes" ]] && verbose_default_flag="" - - if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "VERBOSE MODE" \ - $verbose_default_flag \ - --yesno "\nEnable Verbose Mode?\n\nShows detailed output during installation." 12 58; then - _verbose="yes" - else - _verbose="no" - fi - # Build summary - local ct_type_desc="Unprivileged" - [[ "$_ct_type" == "0" ]] && ct_type_desc="Privileged" - - local nesting_desc="Disabled" - [[ "$_enable_nesting" == "1" ]] && nesting_desc="Enabled" - - local keyctl_desc="Disabled" - [[ "$_enable_keyctl" == "1" ]] && keyctl_desc="Enabled" - - local protect_desc="No" - [[ "$_protect_ct" == "yes" || "$_protect_ct" == "1" ]] && protect_desc="Yes" - - local tz_display="${_ct_timezone:-Host TZ}" - local apt_display="${_apt_cacher:-no}" - [[ "$_apt_cacher" == "yes" && -n "$_apt_cacher_ip" ]] && apt_display="$_apt_cacher_ip" - - local summary="Container Type: $ct_type_desc + local summary="Container Type: $ct_type_desc Container ID: $_ct_id Hostname: $_hostname @@ -1915,105 +2037,105 @@ Advanced: APT Cacher: $apt_display Verbose: $_verbose" - if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "CONFIRM SETTINGS" \ - --ok-button "Create LXC" --cancel-button "Back" \ - --yesno "$summary\n\nCreate ${APP} LXC with these settings?" 32 62; then - ((STEP++)) - else - ((STEP--)) - fi - ;; - esac - done + if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "CONFIRM SETTINGS" \ + --ok-button "Create LXC" --cancel-button "Back" \ + --yesno "$summary\n\nCreate ${APP} LXC with these settings?" 32 62; then + ((STEP++)) + else + ((STEP--)) + fi + ;; + esac + done - # ═══════════════════════════════════════════════════════════════════════════ - # Apply all collected values to global variables - # ═══════════════════════════════════════════════════════════════════════════ - CT_TYPE="$_ct_type" - PW="$_pw" - CT_ID="$_ct_id" - HN="$_hostname" - DISK_SIZE="$_disk_size" - CORE_COUNT="$_core_count" - RAM_SIZE="$_ram_size" - BRG="$_bridge" - NET="$_net" - GATE="$_gate" - IPV6_METHOD="$_ipv6_method" - IPV6_ADDR="$_ipv6_addr" - IPV6_GATE="$_ipv6_gate" - TAGS="$_tags" - ENABLE_FUSE="$_enable_fuse" - ENABLE_TUN="$_enable_tun" - ENABLE_GPU="$_enable_gpu" - ENABLE_NESTING="$_enable_nesting" - ENABLE_KEYCTL="$_enable_keyctl" - ENABLE_MKNOD="$_enable_mknod" - ALLOW_MOUNT_FS="$_mount_fs" - PROTECT_CT="$_protect_ct" - CT_TIMEZONE="$_ct_timezone" - APT_CACHER="$_apt_cacher" - APT_CACHER_IP="$_apt_cacher_ip" - VERBOSE="$_verbose" + # ═══════════════════════════════════════════════════════════════════════════ + # Apply all collected values to global variables + # ═══════════════════════════════════════════════════════════════════════════ + CT_TYPE="$_ct_type" + PW="$_pw" + CT_ID="$_ct_id" + HN="$_hostname" + DISK_SIZE="$_disk_size" + CORE_COUNT="$_core_count" + RAM_SIZE="$_ram_size" + BRG="$_bridge" + NET="$_net" + GATE="$_gate" + IPV6_METHOD="$_ipv6_method" + IPV6_ADDR="$_ipv6_addr" + IPV6_GATE="$_ipv6_gate" + TAGS="$_tags" + ENABLE_FUSE="$_enable_fuse" + ENABLE_TUN="$_enable_tun" + ENABLE_GPU="$_enable_gpu" + ENABLE_NESTING="$_enable_nesting" + ENABLE_KEYCTL="$_enable_keyctl" + ENABLE_MKNOD="$_enable_mknod" + ALLOW_MOUNT_FS="$_mount_fs" + PROTECT_CT="$_protect_ct" + CT_TIMEZONE="$_ct_timezone" + APT_CACHER="$_apt_cacher" + APT_CACHER_IP="$_apt_cacher_ip" + VERBOSE="$_verbose" - # Update var_* based on user choice (for functions that check these) - var_gpu="$_enable_gpu" - var_fuse="$_enable_fuse" - var_tun="$_enable_tun" - var_nesting="$_enable_nesting" - var_keyctl="$_enable_keyctl" - var_mknod="$_enable_mknod" - var_mount_fs="$_mount_fs" - var_protection="$_protect_ct" - var_timezone="$_ct_timezone" - var_apt_cacher="$_apt_cacher" - var_apt_cacher_ip="$_apt_cacher_ip" + # Update var_* based on user choice (for functions that check these) + var_gpu="$_enable_gpu" + var_fuse="$_enable_fuse" + var_tun="$_enable_tun" + var_nesting="$_enable_nesting" + var_keyctl="$_enable_keyctl" + var_mknod="$_enable_mknod" + var_mount_fs="$_mount_fs" + var_protection="$_protect_ct" + var_timezone="$_ct_timezone" + var_apt_cacher="$_apt_cacher" + var_apt_cacher_ip="$_apt_cacher_ip" - # Format optional values - [[ -n "$_mtu" ]] && MTU=",mtu=$_mtu" || MTU="" - [[ -n "$_sd" ]] && SD="-searchdomain=$_sd" || SD="" - [[ -n "$_ns" ]] && NS="-nameserver=$_ns" || NS="" - [[ -n "$_mac" ]] && MAC=",hwaddr=$_mac" || MAC="" - [[ -n "$_vlan" ]] && VLAN=",tag=$_vlan" || VLAN="" + # Format optional values + [[ -n "$_mtu" ]] && MTU=",mtu=$_mtu" || MTU="" + [[ -n "$_sd" ]] && SD="-searchdomain=$_sd" || SD="" + [[ -n "$_ns" ]] && NS="-nameserver=$_ns" || NS="" + [[ -n "$_mac" ]] && MAC=",hwaddr=$_mac" || MAC="" + [[ -n "$_vlan" ]] && VLAN=",tag=$_vlan" || VLAN="" - # Alpine UDHCPC fix - if [ "$var_os" == "alpine" ] && [ "$NET" == "dhcp" ] && [ -n "$_ns" ]; then - UDHCPC_FIX="yes" - else - UDHCPC_FIX="no" - fi - export UDHCPC_FIX - export SSH_KEYS_FILE + # Alpine UDHCPC fix + if [ "$var_os" == "alpine" ] && [ "$NET" == "dhcp" ] && [ -n "$_ns" ]; then + UDHCPC_FIX="yes" + else + UDHCPC_FIX="no" + fi + export UDHCPC_FIX + export SSH_KEYS_FILE - # Exit alternate screen buffer BEFORE displaying summary - # so the summary is visible in the main terminal - tput rmcup 2>/dev/null || true - trap - RETURN # Remove the trap since we already called rmcup + # Exit alternate screen buffer BEFORE displaying summary + # so the summary is visible in the main terminal + tput rmcup 2>/dev/null || true + trap - RETURN # Remove the trap since we already called rmcup - # Display final summary - echo -e "\n${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}" - echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os${CL}" - echo -e "${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}" - echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$([ "$CT_TYPE" == "1" ] && echo "Unprivileged" || echo "Privileged")${CL}" - echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}" - echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" - echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}" - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}" - echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}" - echo -e "${NETWORK}${BOLD}${DGN}IPv4: ${BGN}$NET${CL}" - echo -e "${NETWORK}${BOLD}${DGN}IPv6: ${BGN}$IPV6_METHOD${CL}" - echo -e "${FUSE}${BOLD}${DGN}FUSE Support: ${BGN}$ENABLE_FUSE${CL}" - [[ "$ENABLE_TUN" == "yes" ]] && echo -e "${NETWORK}${BOLD}${DGN}TUN/TAP Support: ${BGN}$ENABLE_TUN${CL}" - echo -e "${CONTAINERTYPE}${BOLD}${DGN}Nesting: ${BGN}$([ "$ENABLE_NESTING" == "1" ] && echo "Enabled" || echo "Disabled")${CL}" - [[ "$ENABLE_KEYCTL" == "1" ]] && echo -e "${CONTAINERTYPE}${BOLD}${DGN}Keyctl: ${BGN}Enabled${CL}" - echo -e "${GPU}${BOLD}${DGN}GPU Passthrough: ${BGN}$ENABLE_GPU${CL}" - [[ "$PROTECT_CT" == "yes" || "$PROTECT_CT" == "1" ]] && echo -e "${CONTAINERTYPE}${BOLD}${DGN}Protection: ${BGN}Enabled${CL}" - [[ -n "$CT_TIMEZONE" ]] && echo -e "${CONTAINERTYPE}${BOLD}${DGN}Timezone: ${BGN}$CT_TIMEZONE${CL}" - [[ "$APT_CACHER" == "yes" ]] && echo -e "${CONTAINERTYPE}${BOLD}${DGN}APT Cacher: ${BGN}$APT_CACHER_IP${CL}" - echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}$VERBOSE${CL}" - echo -e "${CREATING}${BOLD}${RD}Creating a ${APP} LXC using the above advanced settings${CL}" + # Display final summary + echo -e "\n${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}" + echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os${CL}" + echo -e "${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}" + echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$([ "$CT_TYPE" == "1" ] && echo "Unprivileged" || echo "Privileged")${CL}" + echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}" + echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" + echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}" + echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}" + echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}" + echo -e "${NETWORK}${BOLD}${DGN}IPv4: ${BGN}$NET${CL}" + echo -e "${NETWORK}${BOLD}${DGN}IPv6: ${BGN}$IPV6_METHOD${CL}" + echo -e "${FUSE}${BOLD}${DGN}FUSE Support: ${BGN}$ENABLE_FUSE${CL}" + [[ "$ENABLE_TUN" == "yes" ]] && echo -e "${NETWORK}${BOLD}${DGN}TUN/TAP Support: ${BGN}$ENABLE_TUN${CL}" + echo -e "${CONTAINERTYPE}${BOLD}${DGN}Nesting: ${BGN}$([ "$ENABLE_NESTING" == "1" ] && echo "Enabled" || echo "Disabled")${CL}" + [[ "$ENABLE_KEYCTL" == "1" ]] && echo -e "${CONTAINERTYPE}${BOLD}${DGN}Keyctl: ${BGN}Enabled${CL}" + echo -e "${GPU}${BOLD}${DGN}GPU Passthrough: ${BGN}$ENABLE_GPU${CL}" + [[ "$PROTECT_CT" == "yes" || "$PROTECT_CT" == "1" ]] && echo -e "${CONTAINERTYPE}${BOLD}${DGN}Protection: ${BGN}Enabled${CL}" + [[ -n "$CT_TIMEZONE" ]] && echo -e "${CONTAINERTYPE}${BOLD}${DGN}Timezone: ${BGN}$CT_TIMEZONE${CL}" + [[ "$APT_CACHER" == "yes" ]] && echo -e "${CONTAINERTYPE}${BOLD}${DGN}APT Cacher: ${BGN}$APT_CACHER_IP${CL}" + echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}$VERBOSE${CL}" + echo -e "${CREATING}${BOLD}${RD}Creating a ${APP} LXC using the above advanced settings${CL}" } # ============================================================================== @@ -2031,13 +2153,13 @@ Advanced: # - Sets global DIAGNOSTICS variable for API telemetry opt-in/out # ------------------------------------------------------------------------------ diagnostics_check() { - if ! [ -d "/usr/local/community-scripts" ]; then - mkdir -p /usr/local/community-scripts - fi + if ! [ -d "/usr/local/community-scripts" ]; then + mkdir -p /usr/local/community-scripts + fi - if ! [ -f "/usr/local/community-scripts/diagnostics" ]; then - if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "DIAGNOSTICS" --yesno "Send Diagnostics of LXC Installation?\n\n(This only transmits data without user data, just RAM, CPU, LXC name, ...)" 10 58); then - cat </usr/local/community-scripts/diagnostics + if ! [ -f "/usr/local/community-scripts/diagnostics" ]; then + if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "DIAGNOSTICS" --yesno "Send Diagnostics of LXC Installation?\n\n(This only transmits data without user data, just RAM, CPU, LXC name, ...)" 10 58); then + cat </usr/local/community-scripts/diagnostics DIAGNOSTICS=yes #This file is used to store the diagnostics settings for the Community-Scripts API. @@ -2060,9 +2182,9 @@ DIAGNOSTICS=yes #"status" #If you have any concerns, please review the source code at /misc/build.func EOF - DIAGNOSTICS="yes" - else - cat </usr/local/community-scripts/diagnostics + DIAGNOSTICS="yes" + else + cat </usr/local/community-scripts/diagnostics DIAGNOSTICS=no #This file is used to store the diagnostics settings for the Community-Scripts API. @@ -2085,34 +2207,34 @@ DIAGNOSTICS=no #"status" #If you have any concerns, please review the source code at /misc/build.func EOF - DIAGNOSTICS="no" - fi - else - DIAGNOSTICS=$(awk -F '=' '/^DIAGNOSTICS/ {print $2}' /usr/local/community-scripts/diagnostics) - + DIAGNOSTICS="no" fi + else + DIAGNOSTICS=$(awk -F '=' '/^DIAGNOSTICS/ {print $2}' /usr/local/community-scripts/diagnostics) + + fi } diagnostics_menu() { - if [ "${DIAGNOSTICS:-no}" = "yes" ]; then - if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \ - --title "DIAGNOSTIC SETTINGS" \ - --yesno "Send Diagnostics?\n\nCurrent: ${DIAGNOSTICS}" 10 58 \ - --yes-button "No" --no-button "Back"; then - DIAGNOSTICS="no" - sed -i 's/^DIAGNOSTICS=.*/DIAGNOSTICS=no/' /usr/local/community-scripts/diagnostics - whiptail --msgbox "Diagnostics set to ${DIAGNOSTICS}." 8 58 - fi - else - if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \ - --title "DIAGNOSTIC SETTINGS" \ - --yesno "Send Diagnostics?\n\nCurrent: ${DIAGNOSTICS}" 10 58 \ - --yes-button "Yes" --no-button "Back"; then - DIAGNOSTICS="yes" - sed -i 's/^DIAGNOSTICS=.*/DIAGNOSTICS=yes/' /usr/local/community-scripts/diagnostics - whiptail --msgbox "Diagnostics set to ${DIAGNOSTICS}." 8 58 - fi + if [ "${DIAGNOSTICS:-no}" = "yes" ]; then + if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \ + --title "DIAGNOSTIC SETTINGS" \ + --yesno "Send Diagnostics?\n\nCurrent: ${DIAGNOSTICS}" 10 58 \ + --yes-button "No" --no-button "Back"; then + DIAGNOSTICS="no" + sed -i 's/^DIAGNOSTICS=.*/DIAGNOSTICS=no/' /usr/local/community-scripts/diagnostics + whiptail --msgbox "Diagnostics set to ${DIAGNOSTICS}." 8 58 fi + else + if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \ + --title "DIAGNOSTIC SETTINGS" \ + --yesno "Send Diagnostics?\n\nCurrent: ${DIAGNOSTICS}" 10 58 \ + --yes-button "Yes" --no-button "Back"; then + DIAGNOSTICS="yes" + sed -i 's/^DIAGNOSTICS=.*/DIAGNOSTICS=yes/' /usr/local/community-scripts/diagnostics + whiptail --msgbox "Diagnostics set to ${DIAGNOSTICS}." 8 58 + fi + fi } # ------------------------------------------------------------------------------ @@ -2123,25 +2245,25 @@ diagnostics_menu() { # - Convert CT_TYPE to description # ------------------------------------------------------------------------------ echo_default() { - CT_TYPE_DESC="Unprivileged" - if [ "$CT_TYPE" -eq 0 ]; then - CT_TYPE_DESC="Privileged" - fi - echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}" - echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}${CT_ID}${CL}" - echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os ($var_version)${CL}" - echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}" - echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}" - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}${CORE_COUNT}${CL}" - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}" - if [ "${var_gpu:-no}" == "yes" ]; then - echo -e "🎮${BOLD}${DGN} GPU Passthrough: ${BGN}Enabled${CL}" - fi - if [ "$VERBOSE" == "yes" ]; then - echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}Enabled${CL}" - fi - echo -e "${CREATING}${BOLD}${BL}Creating a ${APP} LXC using the above default settings${CL}" - echo -e " " + CT_TYPE_DESC="Unprivileged" + if [ "$CT_TYPE" -eq 0 ]; then + CT_TYPE_DESC="Privileged" + fi + echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}" + echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}${CT_ID}${CL}" + echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os ($var_version)${CL}" + echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}" + echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}" + echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}${CORE_COUNT}${CL}" + echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}" + if [ "${var_gpu:-no}" == "yes" ]; then + echo -e "🎮${BOLD}${DGN} GPU Passthrough: ${BGN}Enabled${CL}" + fi + if [ "$VERBOSE" == "yes" ]; then + echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}Enabled${CL}" + fi + echo -e "${CREATING}${BOLD}${BL}Creating a ${APP} LXC using the above default settings${CL}" + echo -e " " } # ------------------------------------------------------------------------------ @@ -2153,196 +2275,196 @@ echo_default() { # - Applies chosen settings and triggers container build # ------------------------------------------------------------------------------ install_script() { - pve_check - shell_check - root_check - arch_check - ssh_check - maxkeys_check - diagnostics_check + pve_check + shell_check + root_check + arch_check + ssh_check + maxkeys_check + diagnostics_check - if systemctl is-active -q ping-instances.service; then - systemctl -q stop ping-instances.service + if systemctl is-active -q ping-instances.service; then + systemctl -q stop ping-instances.service + fi + + NEXTID=$(pvesh get /cluster/nextid) + + # Get timezone using timedatectl (Debian 13+ compatible) + # Fallback to /etc/timezone for older systems + if command -v timedatectl >/dev/null 2>&1; then + timezone=$(timedatectl show --value --property=Timezone 2>/dev/null || echo "UTC") + elif [ -f /etc/timezone ]; then + timezone=$(cat /etc/timezone) + else + timezone="UTC" + fi + + # Show APP Header + header_info + + # --- Support CLI argument as direct preset (default, advanced, …) --- + CHOICE="${mode:-${1:-}}" + + # If no CLI argument → show whiptail menu + # Build menu dynamically based on available options + local appdefaults_option="" + local settings_option="" + local menu_items=( + "1" "Default Install" + "2" "Advanced Install" + "3" "User Defaults" + ) + + if [ -f "$(get_app_defaults_path)" ]; then + appdefaults_option="4" + menu_items+=("4" "App Defaults for ${APP}") + settings_option="5" + menu_items+=("5" "Settings") + else + settings_option="4" + menu_items+=("4" "Settings") + fi + + APPDEFAULTS_OPTION="$appdefaults_option" + SETTINGS_OPTION="$settings_option" + + # Main menu loop - allows returning from Settings + while true; do + if [ -z "$CHOICE" ]; then + TMP_CHOICE=$(whiptail \ + --backtitle "Proxmox VE Helper Scripts" \ + --title "Community-Scripts Options" \ + --ok-button "Select" --cancel-button "Exit Script" \ + --notags \ + --menu "\nChoose an option:\n Use TAB or Arrow keys to navigate, ENTER to select.\n" \ + 20 60 9 \ + "${menu_items[@]}" \ + --default-item "1" \ + 3>&1 1>&2 2>&3) || exit_script + CHOICE="$TMP_CHOICE" fi - NEXTID=$(pvesh get /cluster/nextid) + # --- Main case --- + local defaults_target="" + local run_maybe_offer="no" + case "$CHOICE" in + 1 | default | DEFAULT) + header_info + echo -e "${DEFAULT}${BOLD}${BL}Using Default Settings on node $PVEHOST_NAME${CL}" + VERBOSE="no" + METHOD="default" + base_settings "$VERBOSE" + echo_default + defaults_target="$(ensure_global_default_vars_file)" + break + ;; + 2 | advanced | ADVANCED) + header_info + echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Install on node $PVEHOST_NAME${CL}" + echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}" + METHOD="advanced" + base_settings + advanced_settings + defaults_target="$(ensure_global_default_vars_file)" + run_maybe_offer="yes" + break + ;; + 3 | mydefaults | MYDEFAULTS | userdefaults | USERDEFAULTS) + default_var_settings || { + msg_error "Failed to apply default.vars" + exit 1 + } + defaults_target="/usr/local/community-scripts/default.vars" + break + ;; + "$APPDEFAULTS_OPTION" | appdefaults | APPDEFAULTS) + if [ -f "$(get_app_defaults_path)" ]; then + header_info + echo -e "${DEFAULT}${BOLD}${BL}Using App Defaults for ${APP} on node $PVEHOST_NAME${CL}" + METHOD="appdefaults" + base_settings + load_vars_file "$(get_app_defaults_path)" + echo_default + defaults_target="$(get_app_defaults_path)" + break + else + msg_error "No App Defaults available for ${APP}" + exit 1 + fi + ;; + "$SETTINGS_OPTION" | settings | SETTINGS) + settings_menu + # After settings menu, show main menu again + header_info + CHOICE="" + ;; + *) + echo -e "${CROSS}${RD}Invalid option: $CHOICE${CL}" + exit 1 + ;; + esac + done - # Get timezone using timedatectl (Debian 13+ compatible) - # Fallback to /etc/timezone for older systems - if command -v timedatectl >/dev/null 2>&1; then - timezone=$(timedatectl show --value --property=Timezone 2>/dev/null || echo "UTC") - elif [ -f /etc/timezone ]; then - timezone=$(cat /etc/timezone) - else - timezone="UTC" - fi + if [[ -n "$defaults_target" ]]; then + ensure_storage_selection_for_vars_file "$defaults_target" + fi - # Show APP Header - header_info - - # --- Support CLI argument as direct preset (default, advanced, …) --- - CHOICE="${mode:-${1:-}}" - - # If no CLI argument → show whiptail menu - # Build menu dynamically based on available options - local appdefaults_option="" - local settings_option="" - local menu_items=( - "1" "Default Install" - "2" "Advanced Install" - "3" "User Defaults" - ) - - if [ -f "$(get_app_defaults_path)" ]; then - appdefaults_option="4" - menu_items+=("4" "App Defaults for ${APP}") - settings_option="5" - menu_items+=("5" "Settings") - else - settings_option="4" - menu_items+=("4" "Settings") - fi - - APPDEFAULTS_OPTION="$appdefaults_option" - SETTINGS_OPTION="$settings_option" - - # Main menu loop - allows returning from Settings - while true; do - if [ -z "$CHOICE" ]; then - TMP_CHOICE=$(whiptail \ - --backtitle "Proxmox VE Helper Scripts" \ - --title "Community-Scripts Options" \ - --ok-button "Select" --cancel-button "Exit Script" \ - --notags \ - --menu "\nChoose an option:\n Use TAB or Arrow keys to navigate, ENTER to select.\n" \ - 20 60 9 \ - "${menu_items[@]}" \ - --default-item "1" \ - 3>&1 1>&2 2>&3) || exit_script - CHOICE="$TMP_CHOICE" - fi - - # --- Main case --- - local defaults_target="" - local run_maybe_offer="no" - case "$CHOICE" in - 1 | default | DEFAULT) - header_info - echo -e "${DEFAULT}${BOLD}${BL}Using Default Settings on node $PVEHOST_NAME${CL}" - VERBOSE="no" - METHOD="default" - base_settings "$VERBOSE" - echo_default - defaults_target="$(ensure_global_default_vars_file)" - break - ;; - 2 | advanced | ADVANCED) - header_info - echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Install on node $PVEHOST_NAME${CL}" - echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}" - METHOD="advanced" - base_settings - advanced_settings - defaults_target="$(ensure_global_default_vars_file)" - run_maybe_offer="yes" - break - ;; - 3 | mydefaults | MYDEFAULTS | userdefaults | USERDEFAULTS) - default_var_settings || { - msg_error "Failed to apply default.vars" - exit 1 - } - defaults_target="/usr/local/community-scripts/default.vars" - break - ;; - "$APPDEFAULTS_OPTION" | appdefaults | APPDEFAULTS) - if [ -f "$(get_app_defaults_path)" ]; then - header_info - echo -e "${DEFAULT}${BOLD}${BL}Using App Defaults for ${APP} on node $PVEHOST_NAME${CL}" - METHOD="appdefaults" - base_settings - load_vars_file "$(get_app_defaults_path)" - echo_default - defaults_target="$(get_app_defaults_path)" - break - else - msg_error "No App Defaults available for ${APP}" - exit 1 - fi - ;; - "$SETTINGS_OPTION" | settings | SETTINGS) - settings_menu - # After settings menu, show main menu again - header_info - CHOICE="" - ;; - *) - echo -e "${CROSS}${RD}Invalid option: $CHOICE${CL}" - exit 1 - ;; - esac - done - - if [[ -n "$defaults_target" ]]; then - ensure_storage_selection_for_vars_file "$defaults_target" - fi - - if [[ "$run_maybe_offer" == "yes" ]]; then - maybe_offer_save_app_defaults - fi + if [[ "$run_maybe_offer" == "yes" ]]; then + maybe_offer_save_app_defaults + fi } edit_default_storage() { - local vf="/usr/local/community-scripts/default.vars" + local vf="/usr/local/community-scripts/default.vars" - # Ensure file exists - if [[ ! -f "$vf" ]]; then - mkdir -p "$(dirname "$vf")" - touch "$vf" - fi + # Ensure file exists + if [[ ! -f "$vf" ]]; then + mkdir -p "$(dirname "$vf")" + touch "$vf" + fi - # Let ensure_storage_selection_for_vars_file handle everything - ensure_storage_selection_for_vars_file "$vf" + # Let ensure_storage_selection_for_vars_file handle everything + ensure_storage_selection_for_vars_file "$vf" } settings_menu() { - while true; do - local settings_items=( - "1" "Manage API-Diagnostic Setting" - "2" "Edit Default.vars" - ) - if [ -f "$(get_app_defaults_path)" ]; then - settings_items+=("3" "Edit App.vars for ${APP}") - settings_items+=("4" "Back to Main Menu") - else - settings_items+=("3" "Back to Main Menu") - fi + while true; do + local settings_items=( + "1" "Manage API-Diagnostic Setting" + "2" "Edit Default.vars" + ) + if [ -f "$(get_app_defaults_path)" ]; then + settings_items+=("3" "Edit App.vars for ${APP}") + settings_items+=("4" "Back to Main Menu") + else + settings_items+=("3" "Back to Main Menu") + fi - local choice - choice=$(whiptail --backtitle "Proxmox VE Helper Scripts" \ - --title "Community-Scripts SETTINGS Menu" \ - --ok-button "Select" --cancel-button "Exit Script" \ - --menu "\n\nChoose a settings option:\n\nUse Arrow keys to navigate, ENTER to select, TAB for buttons." 20 60 9 \ - "${settings_items[@]}" \ - 3>&1 1>&2 2>&3) || exit_script + local choice + choice=$(whiptail --backtitle "Proxmox VE Helper Scripts" \ + --title "Community-Scripts SETTINGS Menu" \ + --ok-button "Select" --cancel-button "Exit Script" \ + --menu "\n\nChoose a settings option:\n\nUse Arrow keys to navigate, ENTER to select, TAB for buttons." 20 60 9 \ + "${settings_items[@]}" \ + 3>&1 1>&2 2>&3) || exit_script - case "$choice" in - 1) diagnostics_menu ;; - 2) nano /usr/local/community-scripts/default.vars ;; - 3) - if [ -f "$(get_app_defaults_path)" ]; then - nano "$(get_app_defaults_path)" - else - # Back was selected (no app.vars available) - return - fi - ;; - 4) - # Back to main menu - return - ;; - esac - done + case "$choice" in + 1) diagnostics_menu ;; + 2) nano /usr/local/community-scripts/default.vars ;; + 3) + if [ -f "$(get_app_defaults_path)" ]; then + nano "$(get_app_defaults_path)" + else + # Back was selected (no app.vars available) + return + fi + ;; + 4) + # Back to main menu + return + ;; + esac + done } # ------------------------------------------------------------------------------ @@ -2352,21 +2474,21 @@ settings_menu() { # - Warns if under-provisioned and asks user to continue or abort # ------------------------------------------------------------------------------ check_container_resources() { - current_ram=$(free -m | awk 'NR==2{print $2}') - current_cpu=$(nproc) + current_ram=$(free -m | awk 'NR==2{print $2}') + current_cpu=$(nproc) - if [[ "$current_ram" -lt "$var_ram" ]] || [[ "$current_cpu" -lt "$var_cpu" ]]; then - echo -e "\n${INFO}${HOLD} ${GN}Required: ${var_cpu} CPU, ${var_ram}MB RAM ${CL}| ${RD}Current: ${current_cpu} CPU, ${current_ram}MB RAM${CL}" - echo -e "${YWB}Please ensure that the ${APP} LXC is configured with at least ${var_cpu} vCPU and ${var_ram} MB RAM for the build process.${CL}\n" - echo -ne "${INFO}${HOLD} May cause data loss! ${INFO} Continue update with under-provisioned LXC? " - read -r prompt - if [[ ! ${prompt,,} =~ ^(yes)$ ]]; then - echo -e "${CROSS}${HOLD} ${YWB}Exiting based on user input.${CL}" - exit 1 - fi - else - echo -e "" + if [[ "$current_ram" -lt "$var_ram" ]] || [[ "$current_cpu" -lt "$var_cpu" ]]; then + echo -e "\n${INFO}${HOLD} ${GN}Required: ${var_cpu} CPU, ${var_ram}MB RAM ${CL}| ${RD}Current: ${current_cpu} CPU, ${current_ram}MB RAM${CL}" + echo -e "${YWB}Please ensure that the ${APP} LXC is configured with at least ${var_cpu} vCPU and ${var_ram} MB RAM for the build process.${CL}\n" + echo -ne "${INFO}${HOLD} May cause data loss! ${INFO} Continue update with under-provisioned LXC? " + read -r prompt + if [[ ! ${prompt,,} =~ ^(yes)$ ]]; then + echo -e "${CROSS}${HOLD} ${YWB}Exiting based on user input.${CL}" + exit 1 fi + else + echo -e "" + fi } # ------------------------------------------------------------------------------ @@ -2376,18 +2498,18 @@ check_container_resources() { # - Warns if usage >80% and asks user confirmation before proceeding # ------------------------------------------------------------------------------ check_container_storage() { - total_size=$(df /boot --output=size | tail -n 1) - local used_size=$(df /boot --output=used | tail -n 1) - usage=$((100 * used_size / total_size)) - if ((usage > 80)); then - echo -e "${INFO}${HOLD} ${YWB}Warning: Storage is dangerously low (${usage}%).${CL}" - echo -ne "Continue anyway? " - read -r prompt - if [[ ! ${prompt,,} =~ ^(y|yes)$ ]]; then - echo -e "${CROSS}${HOLD}${YWB}Exiting based on user input.${CL}" - exit 1 - fi + total_size=$(df /boot --output=size | tail -n 1) + local used_size=$(df /boot --output=used | tail -n 1) + usage=$((100 * used_size / total_size)) + if ((usage > 80)); then + echo -e "${INFO}${HOLD} ${YWB}Warning: Storage is dangerously low (${usage}%).${CL}" + echo -ne "Continue anyway? " + read -r prompt + if [[ ! ${prompt,,} =~ ^(y|yes)$ ]]; then + echo -e "${CROSS}${HOLD}${YWB}Exiting based on user input.${CL}" + exit 1 fi + fi } # ------------------------------------------------------------------------------ @@ -2397,9 +2519,9 @@ check_container_storage() { # - Supports RSA, Ed25519, ECDSA and filters out comments/invalid lines # ------------------------------------------------------------------------------ ssh_extract_keys_from_file() { - local f="$1" - [[ -r "$f" ]] || return 0 - tr -d '\r' <"$f" | awk ' + local f="$1" + [[ -r "$f" ]] || return 0 + tr -d '\r' <"$f" | awk ' /^[[:space:]]*#/ {next} /^[[:space:]]*$/ {next} # nackt: typ base64 [comment] @@ -2419,45 +2541,45 @@ ssh_extract_keys_from_file() { # - Generates fingerprint, type and comment for each key # ------------------------------------------------------------------------------ ssh_build_choices_from_files() { - local -a files=("$@") - CHOICES=() - COUNT=0 - MAPFILE="$(mktemp)" - local id key typ fp cmt base ln=0 + local -a files=("$@") + CHOICES=() + COUNT=0 + MAPFILE="$(mktemp)" + local id key typ fp cmt base ln=0 - for f in "${files[@]}"; do - [[ -f "$f" && -r "$f" ]] || continue - base="$(basename -- "$f")" - case "$base" in - known_hosts | known_hosts.* | config) continue ;; - id_*) [[ "$f" != *.pub ]] && continue ;; - esac + for f in "${files[@]}"; do + [[ -f "$f" && -r "$f" ]] || continue + base="$(basename -- "$f")" + case "$base" in + known_hosts | known_hosts.* | config) continue ;; + id_*) [[ "$f" != *.pub ]] && continue ;; + esac - # map every key in file - while IFS= read -r key; do - [[ -n "$key" ]] || continue + # map every key in file + while IFS= read -r key; do + [[ -n "$key" ]] || continue - typ="" - fp="" - cmt="" - # Only the pure key part (without options) is already included in ‘key’. - read -r _typ _b64 _cmt <<<"$key" - typ="${_typ:-key}" - cmt="${_cmt:-}" - # Fingerprint via ssh-keygen (if available) - if command -v ssh-keygen >/dev/null 2>&1; then - fp="$(printf '%s\n' "$key" | ssh-keygen -lf - 2>/dev/null | awk '{print $2}')" - fi - # Label shorten - [[ ${#cmt} -gt 40 ]] && cmt="${cmt:0:37}..." + typ="" + fp="" + cmt="" + # Only the pure key part (without options) is already included in ‘key’. + read -r _typ _b64 _cmt <<<"$key" + typ="${_typ:-key}" + cmt="${_cmt:-}" + # Fingerprint via ssh-keygen (if available) + if command -v ssh-keygen >/dev/null 2>&1; then + fp="$(printf '%s\n' "$key" | ssh-keygen -lf - 2>/dev/null | awk '{print $2}')" + fi + # Label shorten + [[ ${#cmt} -gt 40 ]] && cmt="${cmt:0:37}..." - ln=$((ln + 1)) - COUNT=$((COUNT + 1)) - id="K${COUNT}" - echo "${id}|${key}" >>"$MAPFILE" - CHOICES+=("$id" "[$typ] ${fp:+$fp }${cmt:+$cmt }— ${base}" "OFF") - done < <(ssh_extract_keys_from_file "$f") - done + ln=$((ln + 1)) + COUNT=$((COUNT + 1)) + id="K${COUNT}" + echo "${id}|${key}" >>"$MAPFILE" + CHOICES+=("$id" "[$typ] ${fp:+$fp }${cmt:+$cmt }— ${base}" "OFF") + done < <(ssh_extract_keys_from_file "$f") + done } # ------------------------------------------------------------------------------ @@ -2467,105 +2589,105 @@ ssh_build_choices_from_files() { # - Includes ~/.ssh/*.pub, /etc/ssh/authorized_keys, etc. # ------------------------------------------------------------------------------ ssh_discover_default_files() { - local -a cand=() - shopt -s nullglob - cand+=(/root/.ssh/authorized_keys /root/.ssh/authorized_keys2) - cand+=(/root/.ssh/*.pub) - cand+=(/etc/ssh/authorized_keys /etc/ssh/authorized_keys.d/*) - shopt -u nullglob - printf '%s\0' "${cand[@]}" + local -a cand=() + shopt -s nullglob + cand+=(/root/.ssh/authorized_keys /root/.ssh/authorized_keys2) + cand+=(/root/.ssh/*.pub) + cand+=(/etc/ssh/authorized_keys /etc/ssh/authorized_keys.d/*) + shopt -u nullglob + printf '%s\0' "${cand[@]}" } configure_ssh_settings() { - local step_info="${1:-}" - local backtitle="[dev] Proxmox VE Helper Scripts" - [[ -n "$step_info" ]] && backtitle="[dev] Proxmox VE Helper Scripts [${step_info}]" + local step_info="${1:-}" + local backtitle="[dev] Proxmox VE Helper Scripts" + [[ -n "$step_info" ]] && backtitle="[dev] Proxmox VE Helper Scripts [${step_info}]" - SSH_KEYS_FILE="$(mktemp)" - : >"$SSH_KEYS_FILE" + SSH_KEYS_FILE="$(mktemp)" + : >"$SSH_KEYS_FILE" - IFS=$'\0' read -r -d '' -a _def_files < <(ssh_discover_default_files && printf '\0') - ssh_build_choices_from_files "${_def_files[@]}" - local default_key_count="$COUNT" + IFS=$'\0' read -r -d '' -a _def_files < <(ssh_discover_default_files && printf '\0') + ssh_build_choices_from_files "${_def_files[@]}" + local default_key_count="$COUNT" - local ssh_key_mode - if [[ "$default_key_count" -gt 0 ]]; then - ssh_key_mode=$(whiptail --backtitle "$backtitle" --title "SSH KEY SOURCE" --menu \ - "Provision SSH keys for root:" 14 72 4 \ - "found" "Select from detected keys (${default_key_count})" \ - "manual" "Paste a single public key" \ - "folder" "Scan another folder (path or glob)" \ - "none" "No keys" 3>&1 1>&2 2>&3) || exit_script - else - ssh_key_mode=$(whiptail --backtitle "$backtitle" --title "SSH KEY SOURCE" --menu \ - "No host keys detected; choose manual/none:" 12 72 2 \ - "manual" "Paste a single public key" \ - "none" "No keys" 3>&1 1>&2 2>&3) || exit_script - fi + local ssh_key_mode + if [[ "$default_key_count" -gt 0 ]]; then + ssh_key_mode=$(whiptail --backtitle "$backtitle" --title "SSH KEY SOURCE" --menu \ + "Provision SSH keys for root:" 14 72 4 \ + "found" "Select from detected keys (${default_key_count})" \ + "manual" "Paste a single public key" \ + "folder" "Scan another folder (path or glob)" \ + "none" "No keys" 3>&1 1>&2 2>&3) || exit_script + else + ssh_key_mode=$(whiptail --backtitle "$backtitle" --title "SSH KEY SOURCE" --menu \ + "No host keys detected; choose manual/none:" 12 72 2 \ + "manual" "Paste a single public key" \ + "none" "No keys" 3>&1 1>&2 2>&3) || exit_script + fi - case "$ssh_key_mode" in - found) - local selection - selection=$(whiptail --backtitle "$backtitle" --title "SELECT HOST KEYS" \ - --checklist "Select one or more keys to import:" 20 140 10 "${CHOICES[@]}" 3>&1 1>&2 2>&3) || exit_script - for tag in $selection; do + case "$ssh_key_mode" in + found) + local selection + selection=$(whiptail --backtitle "$backtitle" --title "SELECT HOST KEYS" \ + --checklist "Select one or more keys to import:" 20 140 10 "${CHOICES[@]}" 3>&1 1>&2 2>&3) || exit_script + for tag in $selection; do + tag="${tag%\"}" + tag="${tag#\"}" + local line + line=$(grep -E "^${tag}\|" "$MAPFILE" | head -n1 | cut -d'|' -f2-) + [[ -n "$line" ]] && printf '%s\n' "$line" >>"$SSH_KEYS_FILE" + done + ;; + manual) + SSH_AUTHORIZED_KEY="$(whiptail --backtitle "$backtitle" \ + --inputbox "Paste one SSH public key line (ssh-ed25519/ssh-rsa/...)" 10 72 --title "SSH Public Key" 3>&1 1>&2 2>&3)" + [[ -n "$SSH_AUTHORIZED_KEY" ]] && printf '%s\n' "$SSH_AUTHORIZED_KEY" >>"$SSH_KEYS_FILE" + ;; + folder) + local glob_path + glob_path=$(whiptail --backtitle "$backtitle" \ + --inputbox "Enter a folder or glob to scan (e.g. /root/.ssh/*.pub)" 10 72 --title "Scan Folder/Glob" 3>&1 1>&2 2>&3) + if [[ -n "$glob_path" ]]; then + shopt -s nullglob + read -r -a _scan_files <<<"$glob_path" + shopt -u nullglob + if [[ "${#_scan_files[@]}" -gt 0 ]]; then + ssh_build_choices_from_files "${_scan_files[@]}" + if [[ "$COUNT" -gt 0 ]]; then + local folder_selection + folder_selection=$(whiptail --backtitle "$backtitle" --title "SELECT FOLDER KEYS" \ + --checklist "Select key(s) to import:" 20 78 10 "${CHOICES[@]}" 3>&1 1>&2 2>&3) || exit_script + for tag in $folder_selection; do tag="${tag%\"}" tag="${tag#\"}" local line line=$(grep -E "^${tag}\|" "$MAPFILE" | head -n1 | cut -d'|' -f2-) [[ -n "$line" ]] && printf '%s\n' "$line" >>"$SSH_KEYS_FILE" - done - ;; - manual) - SSH_AUTHORIZED_KEY="$(whiptail --backtitle "$backtitle" \ - --inputbox "Paste one SSH public key line (ssh-ed25519/ssh-rsa/...)" 10 72 --title "SSH Public Key" 3>&1 1>&2 2>&3)" - [[ -n "$SSH_AUTHORIZED_KEY" ]] && printf '%s\n' "$SSH_AUTHORIZED_KEY" >>"$SSH_KEYS_FILE" - ;; - folder) - local glob_path - glob_path=$(whiptail --backtitle "$backtitle" \ - --inputbox "Enter a folder or glob to scan (e.g. /root/.ssh/*.pub)" 10 72 --title "Scan Folder/Glob" 3>&1 1>&2 2>&3) - if [[ -n "$glob_path" ]]; then - shopt -s nullglob - read -r -a _scan_files <<<"$glob_path" - shopt -u nullglob - if [[ "${#_scan_files[@]}" -gt 0 ]]; then - ssh_build_choices_from_files "${_scan_files[@]}" - if [[ "$COUNT" -gt 0 ]]; then - local folder_selection - folder_selection=$(whiptail --backtitle "$backtitle" --title "SELECT FOLDER KEYS" \ - --checklist "Select key(s) to import:" 20 78 10 "${CHOICES[@]}" 3>&1 1>&2 2>&3) || exit_script - for tag in $folder_selection; do - tag="${tag%\"}" - tag="${tag#\"}" - local line - line=$(grep -E "^${tag}\|" "$MAPFILE" | head -n1 | cut -d'|' -f2-) - [[ -n "$line" ]] && printf '%s\n' "$line" >>"$SSH_KEYS_FILE" - done - else - whiptail --backtitle "$backtitle" --msgbox "No keys found in: $glob_path" 8 60 - fi - else - whiptail --backtitle "$backtitle" --msgbox "Path/glob returned no files." 8 60 - fi + done + else + whiptail --backtitle "$backtitle" --msgbox "No keys found in: $glob_path" 8 60 fi - ;; - none) - : - ;; - esac - - if [[ -s "$SSH_KEYS_FILE" ]]; then - sort -u -o "$SSH_KEYS_FILE" "$SSH_KEYS_FILE" - printf '\n' >>"$SSH_KEYS_FILE" + else + whiptail --backtitle "$backtitle" --msgbox "Path/glob returned no files." 8 60 + fi fi + ;; + none) + : + ;; + esac - # Always show SSH access dialog - user should be able to enable SSH even without keys - if (whiptail --backtitle "$backtitle" --defaultno --title "SSH ACCESS" --yesno "Enable root SSH access?" 10 58); then - SSH="yes" - else - SSH="no" - fi + if [[ -s "$SSH_KEYS_FILE" ]]; then + sort -u -o "$SSH_KEYS_FILE" "$SSH_KEYS_FILE" + printf '\n' >>"$SSH_KEYS_FILE" + fi + + # Always show SSH access dialog - user should be able to enable SSH even without keys + if (whiptail --backtitle "$backtitle" --defaultno --title "SSH ACCESS" --yesno "Enable root SSH access?" 10 58); then + SSH="yes" + else + SSH="no" + fi } # ------------------------------------------------------------------------------ @@ -2577,39 +2699,39 @@ configure_ssh_settings() { # - Otherwise: shows update/setting menu # ------------------------------------------------------------------------------ start() { - source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/tools.func) - if command -v pveversion >/dev/null 2>&1; then - install_script || return 0 - return 0 - elif [ ! -z ${PHS_SILENT+x} ] && [[ "${PHS_SILENT}" == "1" ]]; then - VERBOSE="no" - set_std_mode - update_script - else - CHOICE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "${APP} LXC Update/Setting" --menu \ - "Support/Update functions for ${APP} LXC. Choose an option:" \ - 12 60 3 \ - "1" "YES (Silent Mode)" \ - "2" "YES (Verbose Mode)" \ - "3" "NO (Cancel Update)" --nocancel --default-item "1" 3>&1 1>&2 2>&3) + source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/tools.func) + if command -v pveversion >/dev/null 2>&1; then + install_script || return 0 + return 0 + elif [ ! -z ${PHS_SILENT+x} ] && [[ "${PHS_SILENT}" == "1" ]]; then + VERBOSE="no" + set_std_mode + update_script + else + CHOICE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "${APP} LXC Update/Setting" --menu \ + "Support/Update functions for ${APP} LXC. Choose an option:" \ + 12 60 3 \ + "1" "YES (Silent Mode)" \ + "2" "YES (Verbose Mode)" \ + "3" "NO (Cancel Update)" --nocancel --default-item "1" 3>&1 1>&2 2>&3) - case "$CHOICE" in - 1) - VERBOSE="no" - set_std_mode - ;; - 2) - VERBOSE="yes" - set_std_mode - ;; - 3) - clear - exit_script - exit - ;; - esac - update_script - fi + case "$CHOICE" in + 1) + VERBOSE="no" + set_std_mode + ;; + 2) + VERBOSE="yes" + set_std_mode + ;; + 3) + clear + exit_script + exit + ;; + esac + update_script + fi } # ============================================================================== @@ -2630,256 +2752,256 @@ start() { # - Posts installation telemetry to API if diagnostics enabled # ------------------------------------------------------------------------------ build_container() { - # if [ "$VERBOSE" == "yes" ]; then set -x; fi + # if [ "$VERBOSE" == "yes" ]; then set -x; fi - NET_STRING="-net0 name=eth0,bridge=${BRG:-vmbr0}" + NET_STRING="-net0 name=eth0,bridge=${BRG:-vmbr0}" - # MAC - if [[ -n "$MAC" ]]; then - case "$MAC" in - ,hwaddr=*) NET_STRING+="$MAC" ;; - *) NET_STRING+=",hwaddr=$MAC" ;; - esac - fi - - # IP (immer zwingend, Standard dhcp) - NET_STRING+=",ip=${NET:-dhcp}" - - # Gateway - if [[ -n "$GATE" ]]; then - case "$GATE" in - ,gw=*) NET_STRING+="$GATE" ;; - *) NET_STRING+=",gw=$GATE" ;; - esac - fi - - # VLAN - if [[ -n "$VLAN" ]]; then - case "$VLAN" in - ,tag=*) NET_STRING+="$VLAN" ;; - *) NET_STRING+=",tag=$VLAN" ;; - esac - fi - - # MTU - if [[ -n "$MTU" ]]; then - case "$MTU" in - ,mtu=*) NET_STRING+="$MTU" ;; - *) NET_STRING+=",mtu=$MTU" ;; - esac - fi - - # IPv6 Handling - case "$IPV6_METHOD" in - auto) NET_STRING="$NET_STRING,ip6=auto" ;; - dhcp) NET_STRING="$NET_STRING,ip6=dhcp" ;; - static) - NET_STRING="$NET_STRING,ip6=$IPV6_ADDR" - [ -n "$IPV6_GATE" ] && NET_STRING="$NET_STRING,gw6=$IPV6_GATE" - ;; - none) ;; + # MAC + if [[ -n "$MAC" ]]; then + case "$MAC" in + ,hwaddr=*) NET_STRING+="$MAC" ;; + *) NET_STRING+=",hwaddr=$MAC" ;; esac + fi - # Build FEATURES string based on container type and user choices - FEATURES="" + # IP (immer zwingend, Standard dhcp) + NET_STRING+=",ip=${NET:-dhcp}" - # Nesting support (user configurable, default enabled) - if [ "${ENABLE_NESTING:-1}" == "1" ]; then - FEATURES="nesting=1" - fi + # Gateway + if [[ -n "$GATE" ]]; then + case "$GATE" in + ,gw=*) NET_STRING+="$GATE" ;; + *) NET_STRING+=",gw=$GATE" ;; + esac + fi - # Keyctl for unprivileged containers (needed for Docker) - if [ "$CT_TYPE" == "1" ]; then - [ -n "$FEATURES" ] && FEATURES="$FEATURES," - FEATURES="${FEATURES}keyctl=1" - fi + # VLAN + if [[ -n "$VLAN" ]]; then + case "$VLAN" in + ,tag=*) NET_STRING+="$VLAN" ;; + *) NET_STRING+=",tag=$VLAN" ;; + esac + fi - if [ "$ENABLE_FUSE" == "yes" ]; then - [ -n "$FEATURES" ] && FEATURES="$FEATURES," - FEATURES="${FEATURES}fuse=1" - fi + # MTU + if [[ -n "$MTU" ]]; then + case "$MTU" in + ,mtu=*) NET_STRING+="$MTU" ;; + *) NET_STRING+=",mtu=$MTU" ;; + esac + fi - # NEW IMPLEMENTATION (Fixed): Build PCT_OPTIONS properly - # Key insight: Bash cannot export arrays, so we build the options as a string + # IPv6 Handling + case "$IPV6_METHOD" in + auto) NET_STRING="$NET_STRING,ip6=auto" ;; + dhcp) NET_STRING="$NET_STRING,ip6=dhcp" ;; + static) + NET_STRING="$NET_STRING,ip6=$IPV6_ADDR" + [ -n "$IPV6_GATE" ] && NET_STRING="$NET_STRING,gw6=$IPV6_GATE" + ;; + none) ;; + esac - TEMP_DIR=$(mktemp -d) - pushd "$TEMP_DIR" >/dev/null + # Build FEATURES string based on container type and user choices + FEATURES="" - # Unified install.func automatically detects OS type (debian, alpine, fedora, etc.) - export FUNCTIONS_FILE_PATH="$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/install.func)" + # Nesting support (user configurable, default enabled) + if [ "${ENABLE_NESTING:-1}" == "1" ]; then + FEATURES="nesting=1" + fi - # Core exports for install.func - export DIAGNOSTICS="$DIAGNOSTICS" - export RANDOM_UUID="$RANDOM_UUID" - export SESSION_ID="$SESSION_ID" - export CACHER="$APT_CACHER" - export CACHER_IP="$APT_CACHER_IP" - export tz="$timezone" - export APPLICATION="$APP" - export app="$NSAPP" - export PASSWORD="$PW" - export VERBOSE="$VERBOSE" - export SSH_ROOT="${SSH}" - export SSH_AUTHORIZED_KEY - export CTID="$CT_ID" - export CTTYPE="$CT_TYPE" - export ENABLE_FUSE="$ENABLE_FUSE" - export ENABLE_TUN="$ENABLE_TUN" - export PCT_OSTYPE="$var_os" - export PCT_OSVERSION="$var_version" - export PCT_DISK_SIZE="$DISK_SIZE" + # Keyctl for unprivileged containers (needed for Docker) + if [ "$CT_TYPE" == "1" ]; then + [ -n "$FEATURES" ] && FEATURES="$FEATURES," + FEATURES="${FEATURES}keyctl=1" + fi - # DEV_MODE exports (optional, for debugging) - export BUILD_LOG="$BUILD_LOG" - export INSTALL_LOG="/root/.install-${SESSION_ID}.log" - export dev_mode="${dev_mode:-}" - export DEV_MODE_MOTD="${DEV_MODE_MOTD:-false}" - export DEV_MODE_KEEP="${DEV_MODE_KEEP:-false}" - export DEV_MODE_TRACE="${DEV_MODE_TRACE:-false}" - export DEV_MODE_PAUSE="${DEV_MODE_PAUSE:-false}" - export DEV_MODE_BREAKPOINT="${DEV_MODE_BREAKPOINT:-false}" - export DEV_MODE_LOGS="${DEV_MODE_LOGS:-false}" - export DEV_MODE_DRYRUN="${DEV_MODE_DRYRUN:-false}" + if [ "$ENABLE_FUSE" == "yes" ]; then + [ -n "$FEATURES" ] && FEATURES="$FEATURES," + FEATURES="${FEATURES}fuse=1" + fi - # Build PCT_OPTIONS as multi-line string - PCT_OPTIONS_STRING=" -features $FEATURES + # NEW IMPLEMENTATION (Fixed): Build PCT_OPTIONS properly + # Key insight: Bash cannot export arrays, so we build the options as a string + + TEMP_DIR=$(mktemp -d) + pushd "$TEMP_DIR" >/dev/null + + # Unified install.func automatically detects OS type (debian, alpine, fedora, etc.) + export FUNCTIONS_FILE_PATH="$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/install.func)" + + # Core exports for install.func + export DIAGNOSTICS="$DIAGNOSTICS" + export RANDOM_UUID="$RANDOM_UUID" + export SESSION_ID="$SESSION_ID" + export CACHER="$APT_CACHER" + export CACHER_IP="$APT_CACHER_IP" + export tz="$timezone" + export APPLICATION="$APP" + export app="$NSAPP" + export PASSWORD="$PW" + export VERBOSE="$VERBOSE" + export SSH_ROOT="${SSH}" + export SSH_AUTHORIZED_KEY + export CTID="$CT_ID" + export CTTYPE="$CT_TYPE" + export ENABLE_FUSE="$ENABLE_FUSE" + export ENABLE_TUN="$ENABLE_TUN" + export PCT_OSTYPE="$var_os" + export PCT_OSVERSION="$var_version" + export PCT_DISK_SIZE="$DISK_SIZE" + + # DEV_MODE exports (optional, for debugging) + export BUILD_LOG="$BUILD_LOG" + export INSTALL_LOG="/root/.install-${SESSION_ID}.log" + export dev_mode="${dev_mode:-}" + export DEV_MODE_MOTD="${DEV_MODE_MOTD:-false}" + export DEV_MODE_KEEP="${DEV_MODE_KEEP:-false}" + export DEV_MODE_TRACE="${DEV_MODE_TRACE:-false}" + export DEV_MODE_PAUSE="${DEV_MODE_PAUSE:-false}" + export DEV_MODE_BREAKPOINT="${DEV_MODE_BREAKPOINT:-false}" + export DEV_MODE_LOGS="${DEV_MODE_LOGS:-false}" + export DEV_MODE_DRYRUN="${DEV_MODE_DRYRUN:-false}" + + # Build PCT_OPTIONS as multi-line string + PCT_OPTIONS_STRING=" -features $FEATURES -hostname $HN -tags $TAGS" - # Add storage if specified - if [ -n "$SD" ]; then - PCT_OPTIONS_STRING="$PCT_OPTIONS_STRING - $SD" - fi - - # Add nameserver if specified - if [ -n "$NS" ]; then - PCT_OPTIONS_STRING="$PCT_OPTIONS_STRING - $NS" - fi - - # Network configuration + # Add storage if specified + if [ -n "$SD" ]; then PCT_OPTIONS_STRING="$PCT_OPTIONS_STRING + $SD" + fi + + # Add nameserver if specified + if [ -n "$NS" ]; then + PCT_OPTIONS_STRING="$PCT_OPTIONS_STRING + $NS" + fi + + # Network configuration + PCT_OPTIONS_STRING="$PCT_OPTIONS_STRING $NET_STRING -onboot 1 -cores $CORE_COUNT -memory $RAM_SIZE -unprivileged $CT_TYPE" - # Protection flag (if var_protection was set) - if [ "${PROTECT_CT:-}" == "1" ] || [ "${PROTECT_CT:-}" == "yes" ]; then - PCT_OPTIONS_STRING="$PCT_OPTIONS_STRING + # Protection flag (if var_protection was set) + if [ "${PROTECT_CT:-}" == "1" ] || [ "${PROTECT_CT:-}" == "yes" ]; then + PCT_OPTIONS_STRING="$PCT_OPTIONS_STRING -protection 1" - fi + fi - # Timezone flag (if var_timezone was set) - if [ -n "${CT_TIMEZONE:-}" ]; then - PCT_OPTIONS_STRING="$PCT_OPTIONS_STRING + # Timezone flag (if var_timezone was set) + if [ -n "${CT_TIMEZONE:-}" ]; then + PCT_OPTIONS_STRING="$PCT_OPTIONS_STRING -timezone $CT_TIMEZONE" - fi + fi - # Password (already formatted) - if [ -n "$PW" ]; then - PCT_OPTIONS_STRING="$PCT_OPTIONS_STRING + # Password (already formatted) + if [ -n "$PW" ]; then + PCT_OPTIONS_STRING="$PCT_OPTIONS_STRING $PW" + fi + + # Export as string (this works, unlike arrays!) + export PCT_OPTIONS="$PCT_OPTIONS_STRING" + export TEMPLATE_STORAGE="${var_template_storage:-}" + export CONTAINER_STORAGE="${var_container_storage:-}" + + # # DEBUG: Show final PCT_OPTIONS being exported + # echo "[DEBUG] PCT_OPTIONS to be exported:" + # echo "$PCT_OPTIONS" | sed 's/^/ /' + # echo "[DEBUG] Calling create_lxc_container..." + + create_lxc_container || exit $? + + LXC_CONFIG="/etc/pve/lxc/${CTID}.conf" + + # ============================================================================ + # GPU/USB PASSTHROUGH CONFIGURATION + # ============================================================================ + + # Check if GPU passthrough is enabled + # Returns true only if var_gpu is explicitly set to "yes" + # Can be set via: + # - Environment variable: var_gpu=yes bash -c "..." + # - CT script default: var_gpu="${var_gpu:-no}" + # - Advanced settings wizard + # - App defaults file: /usr/local/community-scripts/defaults/.vars + is_gpu_app() { + [[ "${var_gpu:-no}" == "yes" ]] && return 0 + return 1 + } + + # Detect all available GPU devices + detect_gpu_devices() { + INTEL_DEVICES=() + AMD_DEVICES=() + NVIDIA_DEVICES=() + + # Store PCI info to avoid multiple calls + local pci_vga_info=$(lspci -nn 2>/dev/null | grep -E "VGA|Display|3D") + + # Check for Intel GPU - look for Intel vendor ID [8086] + if echo "$pci_vga_info" | grep -q "\[8086:"; then + msg_custom "🎮" "${BL}" "Detected Intel GPU" + if [[ -d /dev/dri ]]; then + for d in /dev/dri/renderD* /dev/dri/card*; do + [[ -e "$d" ]] && INTEL_DEVICES+=("$d") + done + fi fi - # Export as string (this works, unlike arrays!) - export PCT_OPTIONS="$PCT_OPTIONS_STRING" - export TEMPLATE_STORAGE="${var_template_storage:-}" - export CONTAINER_STORAGE="${var_container_storage:-}" - - # # DEBUG: Show final PCT_OPTIONS being exported - # echo "[DEBUG] PCT_OPTIONS to be exported:" - # echo "$PCT_OPTIONS" | sed 's/^/ /' - # echo "[DEBUG] Calling create_lxc_container..." - - create_lxc_container || exit $? - - LXC_CONFIG="/etc/pve/lxc/${CTID}.conf" - - # ============================================================================ - # GPU/USB PASSTHROUGH CONFIGURATION - # ============================================================================ - - # Check if GPU passthrough is enabled - # Returns true only if var_gpu is explicitly set to "yes" - # Can be set via: - # - Environment variable: var_gpu=yes bash -c "..." - # - CT script default: var_gpu="${var_gpu:-no}" - # - Advanced settings wizard - # - App defaults file: /usr/local/community-scripts/defaults/.vars - is_gpu_app() { - [[ "${var_gpu:-no}" == "yes" ]] && return 0 - return 1 - } - - # Detect all available GPU devices - detect_gpu_devices() { - INTEL_DEVICES=() - AMD_DEVICES=() - NVIDIA_DEVICES=() - - # Store PCI info to avoid multiple calls - local pci_vga_info=$(lspci -nn 2>/dev/null | grep -E "VGA|Display|3D") - - # Check for Intel GPU - look for Intel vendor ID [8086] - if echo "$pci_vga_info" | grep -q "\[8086:"; then - msg_custom "🎮" "${BL}" "Detected Intel GPU" - if [[ -d /dev/dri ]]; then - for d in /dev/dri/renderD* /dev/dri/card*; do - [[ -e "$d" ]] && INTEL_DEVICES+=("$d") - done - fi + # Check for AMD GPU - look for AMD vendor IDs [1002] (AMD/ATI) or [1022] (AMD) + if echo "$pci_vga_info" | grep -qE "\[1002:|\[1022:"; then + msg_custom "🎮" "${RD}" "Detected AMD GPU" + if [[ -d /dev/dri ]]; then + # Only add if not already claimed by Intel + if [[ ${#INTEL_DEVICES[@]} -eq 0 ]]; then + for d in /dev/dri/renderD* /dev/dri/card*; do + [[ -e "$d" ]] && AMD_DEVICES+=("$d") + done fi + fi + fi - # Check for AMD GPU - look for AMD vendor IDs [1002] (AMD/ATI) or [1022] (AMD) - if echo "$pci_vga_info" | grep -qE "\[1002:|\[1022:"; then - msg_custom "🎮" "${RD}" "Detected AMD GPU" - if [[ -d /dev/dri ]]; then - # Only add if not already claimed by Intel - if [[ ${#INTEL_DEVICES[@]} -eq 0 ]]; then - for d in /dev/dri/renderD* /dev/dri/card*; do - [[ -e "$d" ]] && AMD_DEVICES+=("$d") - done - fi - fi - fi + # Check for NVIDIA GPU - look for NVIDIA vendor ID [10de] + if echo "$pci_vga_info" | grep -q "\[10de:"; then + msg_custom "🎮" "${GN}" "Detected NVIDIA GPU" - # Check for NVIDIA GPU - look for NVIDIA vendor ID [10de] - if echo "$pci_vga_info" | grep -q "\[10de:"; then - msg_custom "🎮" "${GN}" "Detected NVIDIA GPU" + # Simple passthrough - just bind /dev/nvidia* devices if they exist + # Skip directories like /dev/nvidia-caps (they need special handling) + for d in /dev/nvidia*; do + [[ -e "$d" ]] || continue + [[ -d "$d" ]] && continue # Skip directories + NVIDIA_DEVICES+=("$d") + done - # Simple passthrough - just bind /dev/nvidia* devices if they exist - # Skip directories like /dev/nvidia-caps (they need special handling) - for d in /dev/nvidia*; do - [[ -e "$d" ]] || continue - [[ -d "$d" ]] && continue # Skip directories - NVIDIA_DEVICES+=("$d") - done + if [[ ${#NVIDIA_DEVICES[@]} -gt 0 ]]; then + msg_custom "🎮" "${GN}" "Found ${#NVIDIA_DEVICES[@]} NVIDIA device(s) for passthrough" + else + msg_warn "NVIDIA GPU detected via PCI but no /dev/nvidia* devices found" + msg_custom "ℹ️" "${YW}" "Skipping NVIDIA passthrough (host drivers may not be loaded)" + fi + fi - if [[ ${#NVIDIA_DEVICES[@]} -gt 0 ]]; then - msg_custom "🎮" "${GN}" "Found ${#NVIDIA_DEVICES[@]} NVIDIA device(s) for passthrough" - else - msg_warn "NVIDIA GPU detected via PCI but no /dev/nvidia* devices found" - msg_custom "ℹ️" "${YW}" "Skipping NVIDIA passthrough (host drivers may not be loaded)" - fi - fi + # Debug output + msg_debug "Intel devices: ${INTEL_DEVICES[*]}" + msg_debug "AMD devices: ${AMD_DEVICES[*]}" + msg_debug "NVIDIA devices: ${NVIDIA_DEVICES[*]}" + } - # Debug output - msg_debug "Intel devices: ${INTEL_DEVICES[*]}" - msg_debug "AMD devices: ${AMD_DEVICES[*]}" - msg_debug "NVIDIA devices: ${NVIDIA_DEVICES[*]}" - } + # Configure USB passthrough for privileged containers + configure_usb_passthrough() { + if [[ "$CT_TYPE" != "0" ]]; then + return 0 + fi - # Configure USB passthrough for privileged containers - configure_usb_passthrough() { - if [[ "$CT_TYPE" != "0" ]]; then - return 0 - fi - - msg_info "Configuring automatic USB passthrough (privileged container)" - cat <>"$LXC_CONFIG" + msg_info "Configuring automatic USB passthrough (privileged container)" + cat <>"$LXC_CONFIG" # Automatic USB passthrough (privileged container) lxc.cgroup2.devices.allow: a lxc.cap.drop: @@ -2891,454 +3013,552 @@ lxc.mount.entry: /dev/ttyUSB1 dev/ttyUSB1 none bind,optional,create= lxc.mount.entry: /dev/ttyACM0 dev/ttyACM0 none bind,optional,create=file lxc.mount.entry: /dev/ttyACM1 dev/ttyACM1 none bind,optional,create=file EOF - msg_ok "USB passthrough configured" - } + msg_ok "USB passthrough configured" + } - # Configure GPU passthrough - configure_gpu_passthrough() { - # Skip if: - # GPU passthrough is enabled when var_gpu="yes": - # - Set via environment variable: var_gpu=yes bash -c "..." - # - Set in CT script: var_gpu="${var_gpu:-no}" - # - Enabled in advanced_settings wizard - # - Configured in app defaults file - if ! is_gpu_app "$APP"; then - return 0 - fi + # Configure GPU passthrough + configure_gpu_passthrough() { + # Skip if: + # GPU passthrough is enabled when var_gpu="yes": + # - Set via environment variable: var_gpu=yes bash -c "..." + # - Set in CT script: var_gpu="${var_gpu:-no}" + # - Enabled in advanced_settings wizard + # - Configured in app defaults file + if ! is_gpu_app "$APP"; then + return 0 + fi - detect_gpu_devices + detect_gpu_devices - # Count available GPU types - local gpu_count=0 - local available_gpus=() + # Count available GPU types + local gpu_count=0 + local available_gpus=() - if [[ ${#INTEL_DEVICES[@]} -gt 0 ]]; then - available_gpus+=("INTEL") - gpu_count=$((gpu_count + 1)) - fi + if [[ ${#INTEL_DEVICES[@]} -gt 0 ]]; then + available_gpus+=("INTEL") + gpu_count=$((gpu_count + 1)) + fi - if [[ ${#AMD_DEVICES[@]} -gt 0 ]]; then - available_gpus+=("AMD") - gpu_count=$((gpu_count + 1)) - fi + if [[ ${#AMD_DEVICES[@]} -gt 0 ]]; then + available_gpus+=("AMD") + gpu_count=$((gpu_count + 1)) + fi - if [[ ${#NVIDIA_DEVICES[@]} -gt 0 ]]; then - available_gpus+=("NVIDIA") - gpu_count=$((gpu_count + 1)) - fi + if [[ ${#NVIDIA_DEVICES[@]} -gt 0 ]]; then + available_gpus+=("NVIDIA") + gpu_count=$((gpu_count + 1)) + fi - if [[ $gpu_count -eq 0 ]]; then - msg_custom "ℹ️" "${YW}" "No GPU devices found for passthrough" - return 0 - fi + if [[ $gpu_count -eq 0 ]]; then + msg_custom "ℹ️" "${YW}" "No GPU devices found for passthrough" + return 0 + fi - local selected_gpu="" + local selected_gpu="" - if [[ $gpu_count -eq 1 ]]; then - # Automatic selection for single GPU - selected_gpu="${available_gpus[0]}" - msg_ok "Automatically configuring ${selected_gpu} GPU passthrough" - else - # Multiple GPUs - ask user - echo -e "\n${INFO} Multiple GPU types detected:" - for gpu in "${available_gpus[@]}"; do - echo " - $gpu" - done - read -rp "Which GPU type to passthrough? (${available_gpus[*]}): " selected_gpu - selected_gpu="${selected_gpu^^}" + if [[ $gpu_count -eq 1 ]]; then + # Automatic selection for single GPU + selected_gpu="${available_gpus[0]}" + msg_ok "Automatically configuring ${selected_gpu} GPU passthrough" + else + # Multiple GPUs - ask user + echo -e "\n${INFO} Multiple GPU types detected:" + for gpu in "${available_gpus[@]}"; do + echo " - $gpu" + done + read -rp "Which GPU type to passthrough? (${available_gpus[*]}): " selected_gpu + selected_gpu="${selected_gpu^^}" - # Validate selection - local valid=0 - for gpu in "${available_gpus[@]}"; do - [[ "$selected_gpu" == "$gpu" ]] && valid=1 - done + # Validate selection + local valid=0 + for gpu in "${available_gpus[@]}"; do + [[ "$selected_gpu" == "$gpu" ]] && valid=1 + done - if [[ $valid -eq 0 ]]; then - msg_warn "Invalid selection. Skipping GPU passthrough." - return 0 - fi - fi + if [[ $valid -eq 0 ]]; then + msg_warn "Invalid selection. Skipping GPU passthrough." + return 0 + fi + fi - # Apply passthrough configuration based on selection - local dev_idx=0 + # Apply passthrough configuration based on selection + local dev_idx=0 - case "$selected_gpu" in - INTEL | AMD) - local devices=() - [[ "$selected_gpu" == "INTEL" ]] && devices=("${INTEL_DEVICES[@]}") - [[ "$selected_gpu" == "AMD" ]] && devices=("${AMD_DEVICES[@]}") + case "$selected_gpu" in + INTEL | AMD) + local devices=() + [[ "$selected_gpu" == "INTEL" ]] && devices=("${INTEL_DEVICES[@]}") + [[ "$selected_gpu" == "AMD" ]] && devices=("${AMD_DEVICES[@]}") - # Use pct set to add devices with proper dev0/dev1 format - # GIDs will be detected and set after container starts - local dev_index=0 - for dev in "${devices[@]}"; do - # Add to config using pct set (will be visible in GUI) - echo "dev${dev_index}: ${dev},gid=44" >>"$LXC_CONFIG" - dev_index=$((dev_index + 1)) - done + # Use pct set to add devices with proper dev0/dev1 format + # GIDs will be detected and set after container starts + local dev_index=0 + for dev in "${devices[@]}"; do + # Add to config using pct set (will be visible in GUI) + echo "dev${dev_index}: ${dev},gid=44" >>"$LXC_CONFIG" + dev_index=$((dev_index + 1)) + done - export GPU_TYPE="$selected_gpu" - msg_ok "${selected_gpu} GPU passthrough configured (${#devices[@]} devices)" - ;; + export GPU_TYPE="$selected_gpu" + msg_ok "${selected_gpu} GPU passthrough configured (${#devices[@]} devices)" + ;; - NVIDIA) - if [[ ${#NVIDIA_DEVICES[@]} -eq 0 ]]; then - msg_warn "No NVIDIA devices available for passthrough" - return 0 - fi + NVIDIA) + if [[ ${#NVIDIA_DEVICES[@]} -eq 0 ]]; then + msg_warn "No NVIDIA devices available for passthrough" + return 0 + fi - # Use pct set for NVIDIA devices - local dev_index=0 - for dev in "${NVIDIA_DEVICES[@]}"; do - echo "dev${dev_index}: ${dev},gid=44" >>"$LXC_CONFIG" - dev_index=$((dev_index + 1)) - done + # Use pct set for NVIDIA devices + local dev_index=0 + for dev in "${NVIDIA_DEVICES[@]}"; do + echo "dev${dev_index}: ${dev},gid=44" >>"$LXC_CONFIG" + dev_index=$((dev_index + 1)) + done - export GPU_TYPE="NVIDIA" - msg_ok "NVIDIA GPU passthrough configured (${#NVIDIA_DEVICES[@]} devices) - install drivers in container if needed" - ;; - esac - } + export GPU_TYPE="NVIDIA" + msg_ok "NVIDIA GPU passthrough configured (${#NVIDIA_DEVICES[@]} devices) - install drivers in container if needed" + ;; + esac + } - # Additional device passthrough - configure_additional_devices() { - # TUN device passthrough - if [ "$ENABLE_TUN" == "yes" ]; then - cat <>"$LXC_CONFIG" + # Additional device passthrough + configure_additional_devices() { + # TUN device passthrough + if [ "$ENABLE_TUN" == "yes" ]; then + cat <>"$LXC_CONFIG" lxc.cgroup2.devices.allow: c 10:200 rwm lxc.mount.entry: /dev/net/tun dev/net/tun none bind,create=file EOF - fi - - # Coral TPU passthrough - if [[ -e /dev/apex_0 ]]; then - msg_custom "🔌" "${BL}" "Detected Coral TPU - configuring passthrough" - echo "lxc.mount.entry: /dev/apex_0 dev/apex_0 none bind,optional,create=file" >>"$LXC_CONFIG" - fi - } - - # Execute pre-start configurations - configure_usb_passthrough - configure_gpu_passthrough - configure_additional_devices - - # ============================================================================ - # START CONTAINER AND INSTALL USERLAND - # ============================================================================ - - msg_info "Starting LXC Container" - pct start "$CTID" - - # Wait for container to be running - for i in {1..10}; do - if pct status "$CTID" | grep -q "status: running"; then - msg_ok "Started LXC Container" - break - fi - sleep 1 - if [ "$i" -eq 10 ]; then - msg_error "LXC Container did not reach running state" - exit 1 - fi - done - - # Wait for network (skip for Alpine initially) - if [ "$var_os" != "alpine" ]; then - msg_info "Waiting for network in LXC container" - - # Wait for IP assignment (IPv4 or IPv6) - local ip_in_lxc="" - for i in {1..20}; do - # Try IPv4 first - ip_in_lxc=$(pct exec "$CTID" -- ip -4 addr show dev eth0 2>/dev/null | awk '/inet / {print $2}' | cut -d/ -f1) - # Fallback to IPv6 if IPv4 not available - if [ -z "$ip_in_lxc" ]; then - ip_in_lxc=$(pct exec "$CTID" -- ip -6 addr show dev eth0 scope global 2>/dev/null | awk '/inet6 / {print $2}' | cut -d/ -f1 | head -n1) - fi - [ -n "$ip_in_lxc" ] && break - sleep 1 - done - - if [ -z "$ip_in_lxc" ]; then - msg_error "No IP assigned to CT $CTID after 20s" - echo -e "${YW}Troubleshooting:${CL}" - echo " • Verify bridge ${BRG} exists and has connectivity" - echo " • Check if DHCP server is reachable (if using DHCP)" - echo " • Verify static IP configuration (if using static IP)" - echo " • Check Proxmox firewall rules" - echo " • If using Tailscale: Disable MagicDNS temporarily" - exit 1 - fi - - # Verify basic connectivity (ping test) - local ping_success=false - for retry in {1..3}; do - if pct exec "$CTID" -- ping -c 1 -W 2 1.1.1.1 &>/dev/null || - pct exec "$CTID" -- ping -c 1 -W 2 8.8.8.8 &>/dev/null || - pct exec "$CTID" -- ping6 -c 1 -W 2 2606:4700:4700::1111 &>/dev/null; then - ping_success=true - break - fi - sleep 2 - done - - if [ "$ping_success" = false ]; then - msg_warn "Network configured (IP: $ip_in_lxc) but connectivity test failed" - echo -e "${YW}Container may have limited internet access. Installation will continue...${CL}" - else - msg_ok "Network in LXC is reachable (ping)" - fi fi - # Function to get correct GID inside container - get_container_gid() { - local group="$1" - local gid=$(pct exec "$CTID" -- getent group "$group" 2>/dev/null | cut -d: -f3) - echo "${gid:-44}" # Default to 44 if not found - } + # Coral TPU passthrough + if [[ -e /dev/apex_0 ]]; then + msg_custom "🔌" "${BL}" "Detected Coral TPU - configuring passthrough" + echo "lxc.mount.entry: /dev/apex_0 dev/apex_0 none bind,optional,create=file" >>"$LXC_CONFIG" + fi + } - fix_gpu_gids + # Execute pre-start configurations + configure_usb_passthrough + configure_gpu_passthrough + configure_additional_devices - # Continue with standard container setup - msg_info "Customizing LXC Container" + # ============================================================================ + # START CONTAINER AND INSTALL USERLAND + # ============================================================================ - # # Install GPU userland if configured - # if [[ "${ENABLE_VAAPI:-0}" == "1" ]]; then - # install_gpu_userland "VAAPI" - # fi + msg_info "Starting LXC Container" + pct start "$CTID" - # if [[ "${ENABLE_NVIDIA:-0}" == "1" ]]; then - # install_gpu_userland "NVIDIA" - # fi + # Wait for container to be running + for i in {1..10}; do + if pct status "$CTID" | grep -q "status: running"; then + msg_ok "Started LXC Container" + break + fi + sleep 1 + if [ "$i" -eq 10 ]; then + msg_error "LXC Container did not reach running state" + exit 1 + fi + done - # Continue with standard container setup - install core dependencies based on OS - sleep 3 + # Wait for network (skip for Alpine initially) + if [ "$var_os" != "alpine" ]; then + msg_info "Waiting for network in LXC container" - case "$var_os" in - alpine) - pct exec "$CTID" -- /bin/sh -c 'cat </etc/apk/repositories + # Wait for IP assignment (IPv4 or IPv6) + local ip_in_lxc="" + for i in {1..20}; do + # Try IPv4 first + ip_in_lxc=$(pct exec "$CTID" -- ip -4 addr show dev eth0 2>/dev/null | awk '/inet / {print $2}' | cut -d/ -f1) + # Fallback to IPv6 if IPv4 not available + if [ -z "$ip_in_lxc" ]; then + ip_in_lxc=$(pct exec "$CTID" -- ip -6 addr show dev eth0 scope global 2>/dev/null | awk '/inet6 / {print $2}' | cut -d/ -f1 | head -n1) + fi + [ -n "$ip_in_lxc" ] && break + sleep 1 + done + + if [ -z "$ip_in_lxc" ]; then + msg_error "No IP assigned to CT $CTID after 20s" + echo -e "${YW}Troubleshooting:${CL}" + echo " • Verify bridge ${BRG} exists and has connectivity" + echo " • Check if DHCP server is reachable (if using DHCP)" + echo " • Verify static IP configuration (if using static IP)" + echo " • Check Proxmox firewall rules" + echo " • If using Tailscale: Disable MagicDNS temporarily" + exit 1 + fi + + # Verify basic connectivity (ping test) + local ping_success=false + for retry in {1..3}; do + if pct exec "$CTID" -- ping -c 1 -W 2 1.1.1.1 &>/dev/null || + pct exec "$CTID" -- ping -c 1 -W 2 8.8.8.8 &>/dev/null || + pct exec "$CTID" -- ping6 -c 1 -W 2 2606:4700:4700::1111 &>/dev/null; then + ping_success=true + break + fi + sleep 2 + done + + if [ "$ping_success" = false ]; then + msg_warn "Network configured (IP: $ip_in_lxc) but connectivity test failed" + echo -e "${YW}Container may have limited internet access. Installation will continue...${CL}" + else + msg_ok "Network in LXC is reachable (ping)" + fi + fi + + # Function to get correct GID inside container + get_container_gid() { + local group="$1" + local gid=$(pct exec "$CTID" -- getent group "$group" 2>/dev/null | cut -d: -f3) + echo "${gid:-44}" # Default to 44 if not found + } + + fix_gpu_gids + + # Continue with standard container setup + msg_info "Customizing LXC Container" + + # # Install GPU userland if configured + # if [[ "${ENABLE_VAAPI:-0}" == "1" ]]; then + # install_gpu_userland "VAAPI" + # fi + + # if [[ "${ENABLE_NVIDIA:-0}" == "1" ]]; then + # install_gpu_userland "NVIDIA" + # fi + + # Continue with standard container setup - install core dependencies based on OS + sleep 3 + + case "$var_os" in + alpine) + pct exec "$CTID" -- /bin/sh -c 'cat </etc/apk/repositories http://dl-cdn.alpinelinux.org/alpine/latest-stable/main http://dl-cdn.alpinelinux.org/alpine/latest-stable/community EOF' - pct exec "$CTID" -- ash -c "apk add bash newt curl openssh nano mc ncurses jq >/dev/null" - ;; + pct exec "$CTID" -- ash -c "apk add bash newt curl openssh nano mc ncurses jq >/dev/null" + ;; - debian | ubuntu | devuan) - # Locale setup for Debian-based - pct exec "$CTID" -- bash -c "sed -i '/$LANG/ s/^# //' /etc/locale.gen 2>/dev/null || true" - pct exec "$CTID" -- bash -c "locale_line=\$(grep -v '^#' /etc/locale.gen 2>/dev/null | grep -E '^[a-zA-Z]' | awk '{print \$1}' | head -n 1) && \ + debian | ubuntu | devuan) + # First install locales package (required for locale-gen on minimal templates) + pct exec "$CTID" -- bash -c "apt-get update >/dev/null && apt-get install -y locales >/dev/null 2>&1 || true" + + # Locale setup for Debian-based + pct exec "$CTID" -- bash -c "sed -i '/$LANG/ s/^# //' /etc/locale.gen 2>/dev/null || true" + pct exec "$CTID" -- bash -c "locale_line=\$(grep -v '^#' /etc/locale.gen 2>/dev/null | grep -E '^[a-zA-Z]' | awk '{print \$1}' | head -n 1) && \ [[ -n \"\$locale_line\" ]] && echo LANG=\$locale_line >/etc/default/locale && \ locale-gen >/dev/null 2>&1 && \ export LANG=\$locale_line || true" - # Timezone setup - if [[ -z "${tz:-}" ]]; then - tz=$(timedatectl show --property=Timezone --value 2>/dev/null || echo "Etc/UTC") - fi - if pct exec "$CTID" -- test -e "/usr/share/zoneinfo/$tz"; then - pct exec "$CTID" -- bash -c "tz='$tz'; ln -sf \"/usr/share/zoneinfo/\$tz\" /etc/localtime && echo \"\$tz\" >/etc/timezone || true" - else - msg_warn "Skipping timezone setup – zone '$tz' not found in container" - fi - - # Core dependencies - pct exec "$CTID" -- bash -c "apt-get update >/dev/null && apt-get install -y sudo curl mc gnupg2 jq >/dev/null" || { - msg_error "apt-get base packages installation failed" - exit 1 - } - ;; - - fedora | rockylinux | almalinux | centos) - # RHEL-based: Fedora, Rocky, AlmaLinux, CentOS - pct exec "$CTID" -- bash -c "dnf install -y curl sudo mc jq procps-ng >/dev/null 2>&1 || yum install -y curl sudo mc jq procps-ng >/dev/null 2>&1" || { - msg_error "dnf/yum base packages installation failed" - exit 1 - } - ;; - - opensuse) - # openSUSE - pct exec "$CTID" -- bash -c "zypper --non-interactive install curl sudo mc jq >/dev/null" || { - msg_error "zypper base packages installation failed" - exit 1 - } - ;; - - gentoo) - # Gentoo - emerge is slow, only install essentials - pct exec "$CTID" -- bash -c "emerge --quiet app-misc/jq net-misc/curl app-misc/mc >/dev/null 2>&1" || { - msg_warn "Gentoo base packages installation incomplete - may need manual setup" - } - ;; - - openeuler) - # openEuler (RHEL-compatible) - pct exec "$CTID" -- bash -c "dnf install -y curl sudo mc jq >/dev/null" || { - msg_error "dnf base packages installation failed" - exit 1 - } - ;; - - *) - msg_warn "Unknown OS '$var_os' - skipping core dependency installation" - ;; - esac - - msg_ok "Customized LXC Container" - - # Install SSH keys - install_ssh_keys_into_ct - - # Run application installer - # NOTE: We disable error handling here because: - # 1. Container errors are caught by error_handler INSIDE container - # 2. Container creates flag file with exit code - # 3. We read flag file and handle cleanup manually below - # 4. We DON'T want host error_handler to fire for lxc-attach command itself - - set +Eeuo pipefail # Disable ALL error handling temporarily - trap - ERR # Remove ERR trap completely - - lxc-attach -n "$CTID" -- bash -c "$(curl -fsSL https://raw.githubusercontent.com/GoldenSpringness/ProxmoxVED/refs/heads/feature/sonobarr/install/${var_install}.sh)" - local lxc_exit=$? - - set -Eeuo pipefail # Re-enable error handling - trap 'error_handler' ERR # Restore ERR trap - - # Check for error flag file in container (more reliable than lxc-attach exit code) - local install_exit_code=0 - if [[ -n "${SESSION_ID:-}" ]]; then - local error_flag="/root/.install-${SESSION_ID}.failed" - if pct exec "$CTID" -- test -f "$error_flag" 2>/dev/null; then - install_exit_code=$(pct exec "$CTID" -- cat "$error_flag" 2>/dev/null || echo "1") - pct exec "$CTID" -- rm -f "$error_flag" 2>/dev/null || true - fi + # Timezone setup + if [[ -z "${tz:-}" ]]; then + tz=$(timedatectl show --property=Timezone --value 2>/dev/null || echo "Etc/UTC") + fi + if pct exec "$CTID" -- test -e "/usr/share/zoneinfo/$tz"; then + pct exec "$CTID" -- bash -c "tz='$tz'; ln -sf \"/usr/share/zoneinfo/\$tz\" /etc/localtime && echo \"\$tz\" >/etc/timezone || true" + else + msg_warn "Skipping timezone setup – zone '$tz' not found in container" fi - # Fallback to lxc-attach exit code if no flag file - if [[ $install_exit_code -eq 0 && $lxc_exit -ne 0 ]]; then - install_exit_code=$lxc_exit + # Core dependencies + pct exec "$CTID" -- bash -c "apt-get update >/dev/null && apt-get install -y sudo curl mc gnupg2 jq >/dev/null" || { + msg_error "apt-get base packages installation failed" + exit 1 + } + ;; + + fedora | rockylinux | almalinux | centos) + # RHEL-based: Fedora, Rocky, AlmaLinux, CentOS + # Detect OS major version for EL10+ compatibility (DNF 5, different packages) + local rhel_version + rhel_version=$(pct exec "$CTID" -- bash -c "grep -oP '(?<=VERSION_ID=\")[0-9]+' /etc/os-release 2>/dev/null || echo 9") + + # First run makecache to ensure repos are ready (critical for fresh templates) + msg_info "Initializing package manager (this may take a moment)..." + if ! pct exec "$CTID" -- bash -c "dnf makecache --refresh 2>&1 || yum makecache 2>&1" >/dev/null 2>&1; then + msg_warn "Package cache update had issues, continuing anyway..." fi - # Installation failed? - if [[ $install_exit_code -ne 0 ]]; then - msg_error "Installation failed in container ${CTID} (exit code: ${install_exit_code})" + # Build package list - EL10+ may not have glibc-langpack-en in same form + local rhel_packages="curl sudo mc jq which tar procps-ng ncurses" + if [[ "$rhel_version" -lt 10 ]]; then + rhel_packages="$rhel_packages glibc-langpack-en" + else + # EL10 uses glibc-all-langpacks or langpacks-en + rhel_packages="$rhel_packages langpacks-en glibc-all-langpacks" + fi - # Copy both logs from container before potential deletion - local build_log_copied=false - local install_log_copied=false - - if [[ -n "$CTID" && -n "${SESSION_ID:-}" ]]; then - # Copy BUILD_LOG (creation log) if it exists - if [[ -f "${BUILD_LOG}" ]]; then - cp "${BUILD_LOG}" "/tmp/create-lxc-${CTID}-${SESSION_ID}.log" 2>/dev/null && build_log_copied=true - fi - - # Copy INSTALL_LOG from container - if pct pull "$CTID" "/root/.install-${SESSION_ID}.log" "/tmp/install-lxc-${CTID}-${SESSION_ID}.log" 2>/dev/null; then - install_log_copied=true - fi - - # Show available logs - echo "" - [[ "$build_log_copied" == true ]] && echo -e "${GN}✔${CL} Container creation log: ${BL}/tmp/create-lxc-${CTID}-${SESSION_ID}.log${CL}" - [[ "$install_log_copied" == true ]] && echo -e "${GN}✔${CL} Installation log: ${BL}/tmp/install-lxc-${CTID}-${SESSION_ID}.log${CL}" + # Install base packages with better error handling + local install_log="/tmp/dnf_install_${CTID}.log" + if ! pct exec "$CTID" -- bash -c "dnf install -y $rhel_packages 2>&1 | tee $install_log; exit \${PIPESTATUS[0]}" >/dev/null 2>&1; then + # Check if it's just missing optional packages + if pct exec "$CTID" -- bash -c "rpm -q curl sudo mc jq which tar procps-ng" >/dev/null 2>&1; then + msg_warn "Some optional packages may have failed, but core packages installed" + else + # Real failure - try minimal install + msg_warn "Full package install failed, trying minimal set..." + if ! pct exec "$CTID" -- bash -c "dnf install -y curl sudo jq which tar 2>&1" >/dev/null 2>&1; then + msg_error "dnf/yum base packages installation failed" + pct exec "$CTID" -- bash -c "cat $install_log 2>/dev/null" || true + exit 1 fi + fi + fi - # Dev mode: Keep container or open breakpoint shell - if [[ "${DEV_MODE_KEEP:-false}" == "true" ]]; then - msg_dev "Keep mode active - container ${CTID} preserved" - return 0 - elif [[ "${DEV_MODE_BREAKPOINT:-false}" == "true" ]]; then - msg_dev "Breakpoint mode - opening shell in container ${CTID}" - echo -e "${YW}Type 'exit' to return to host${CL}" - pct enter "$CTID" - echo "" - echo -en "${YW}Container ${CTID} still running. Remove now? (y/N): ${CL}" - if read -r response && [[ "$response" =~ ^[Yy]$ ]]; then - pct stop "$CTID" &>/dev/null || true - pct destroy "$CTID" &>/dev/null || true - msg_ok "Container ${CTID} removed" - else - msg_dev "Container ${CTID} kept for debugging" - fi - exit $install_exit_code - fi + # Set locale for RHEL-based systems + pct exec "$CTID" -- bash -c "localectl set-locale LANG=en_US.UTF-8 2>/dev/null || echo 'LANG=en_US.UTF-8' > /etc/locale.conf" || true - # Report failure to API before container cleanup - post_update_to_api "failed" "$install_exit_code" + # Timezone setup for RHEL + if [[ -z "${tz:-}" ]]; then + tz=$(timedatectl show --property=Timezone --value 2>/dev/null || echo "Etc/UTC") + fi + [[ "${tz:-}" == Etc/* ]] && tz="UTC" + if pct exec "$CTID" -- test -e "/usr/share/zoneinfo/$tz"; then + pct exec "$CTID" -- bash -c "timedatectl set-timezone '$tz' 2>/dev/null || ln -sf '/usr/share/zoneinfo/$tz' /etc/localtime" || true + fi + ;; - # Prompt user for cleanup with 60s timeout (plain echo - no msg_info to avoid spinner) - echo "" - echo -en "${YW}Remove broken container ${CTID}? (Y/n) [auto-remove in 60s]: ${CL}" + opensuse) + # openSUSE - special handling for terminal/locale issues + # Use --gpg-auto-import-keys to avoid interactive prompts that cause hangs + msg_info "Initializing package manager for openSUSE..." + pct exec "$CTID" -- bash -c "zypper --gpg-auto-import-keys --non-interactive refresh 2>&1" >/dev/null 2>&1 || true - if read -t 60 -r response; then - if [[ -z "$response" || "$response" =~ ^[Yy]$ ]]; then - # Remove container - echo -e "\n${TAB}${HOLD}${YW}Removing container ${CTID}${CL}" - pct stop "$CTID" &>/dev/null || true - pct destroy "$CTID" &>/dev/null || true - echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}" - elif [[ "$response" =~ ^[Nn]$ ]]; then - echo -e "\n${TAB}${YW}Container ${CTID} kept for debugging${CL}" + # Install packages - ncurses and terminfo are CRITICAL for terminal to work + if ! pct exec "$CTID" -- bash -c "zypper --gpg-auto-import-keys --non-interactive install -y curl sudo mc jq glibc-locale ncurses terminfo-base 2>&1" >/dev/null 2>&1; then + # Try without glibc-locale + if ! pct exec "$CTID" -- bash -c "zypper --gpg-auto-import-keys --non-interactive install -y curl sudo mc jq ncurses terminfo-base 2>&1" >/dev/null 2>&1; then + msg_error "zypper base packages installation failed" + exit 1 + fi + fi - # Dev mode: Setup MOTD/SSH for debugging access to broken container - if [[ "${DEV_MODE_MOTD:-false}" == "true" ]]; then - echo -e "${TAB}${HOLD}${DGN}Setting up MOTD and SSH for debugging...${CL}" - if pct exec "$CTID" -- bash -c " - source <(curl -fsSL https://raw.githubusercontent.com/GoldenSpringness/ProxmoxVED/refs/heads/feature/sonobarr/misc/install.func) + # Fix 'unknown terminal type' error - set TERM in multiple places + pct exec "$CTID" -- bash -c "localectl set-locale LANG=en_US.UTF-8 2>/dev/null || echo 'LANG=en_US.UTF-8' > /etc/locale.conf" || true + + # Set TERM globally for all users + pct exec "$CTID" -- bash -c "cat > /etc/profile.d/term.sh << 'EOFTERM' +# Fix terminal type for LXC containers +if [ -z \"\$TERM\" ] || [ \"\$TERM\" = \"dumb\" ] || [ \"\$TERM\" = \"-\" ]; then + export TERM=xterm-256color +fi +EOFTERM +chmod +x /etc/profile.d/term.sh" || true + + # Also set in /etc/environment for non-login shells + pct exec "$CTID" -- bash -c "grep -q '^TERM=' /etc/environment 2>/dev/null || echo 'TERM=xterm-256color' >> /etc/environment" || true + ;; + + gentoo) + # Gentoo - OpenRC based, emerge is slow + # Use emerge-webrsync (faster, uses http instead of rsync) + msg_info "Syncing Gentoo portage via webrsync (faster than rsync)..." + pct exec "$CTID" -- bash -c "emerge-webrsync 2>&1" >/dev/null 2>&1 || { + msg_warn "emerge-webrsync failed, trying emerge --sync..." + pct exec "$CTID" -- bash -c "emerge --sync 2>&1" >/dev/null 2>&1 || true + } + + # Install curl FIRST - it's required for install.func to work + msg_info "Installing essential packages for Gentoo..." + if ! pct exec "$CTID" -- bash -c "emerge --quiet --noreplace net-misc/curl 2>&1" >/dev/null 2>&1; then + msg_error "Failed to install curl on Gentoo - this is required" + exit 1 + fi + + # Install remaining packages + pct exec "$CTID" -- bash -c "emerge --quiet --noreplace app-misc/jq app-misc/mc sys-libs/ncurses 2>&1" >/dev/null 2>&1 || { + msg_warn "Some Gentoo packages may need manual setup" + } + + # Set TERM for Gentoo + pct exec "$CTID" -- bash -c "echo 'export TERM=xterm-256color' >> /etc/profile.d/term.sh && chmod +x /etc/profile.d/term.sh" || true + ;; + + openeuler) + # openEuler (RHEL-compatible, uses DNF) + # Note: Template was patched with /etc/redhat-release in create_container + msg_info "Initializing package manager for openEuler..." + pct exec "$CTID" -- bash -c "dnf makecache --refresh 2>&1" >/dev/null 2>&1 || true + + # openEuler package names may differ from RHEL + local euler_packages="curl sudo mc jq procps-ng ncurses" + if ! pct exec "$CTID" -- bash -c "dnf install -y $euler_packages 2>&1" >/dev/null 2>&1; then + # Try without procps-ng (might be just 'procps' in openEuler) + if ! pct exec "$CTID" -- bash -c "dnf install -y curl sudo mc jq ncurses 2>&1" >/dev/null 2>&1; then + msg_error "dnf base packages installation failed" + exit 1 + fi + fi + # Set locale + pct exec "$CTID" -- bash -c "echo 'LANG=en_US.UTF-8' > /etc/locale.conf" || true + ;; + + *) + msg_warn "Unknown OS '$var_os' - skipping core dependency installation" + ;; + esac + + msg_ok "Customized LXC Container" + + # Install SSH keys + install_ssh_keys_into_ct + + # Run application installer + # NOTE: We disable error handling here because: + # 1. Container errors are caught by error_handler INSIDE container + # 2. Container creates flag file with exit code + # 3. We read flag file and handle cleanup manually below + # 4. We DON'T want host error_handler to fire for lxc-attach command itself + + set +Eeuo pipefail # Disable ALL error handling temporarily + trap - ERR # Remove ERR trap completely + + lxc-attach -n "$CTID" -- bash -c "$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/install/${var_install}.sh)" + local lxc_exit=$? + + set -Eeuo pipefail # Re-enable error handling + trap 'error_handler' ERR # Restore ERR trap + + # Check for error flag file in container (more reliable than lxc-attach exit code) + local install_exit_code=0 + if [[ -n "${SESSION_ID:-}" ]]; then + local error_flag="/root/.install-${SESSION_ID}.failed" + if pct exec "$CTID" -- test -f "$error_flag" 2>/dev/null; then + install_exit_code=$(pct exec "$CTID" -- cat "$error_flag" 2>/dev/null || echo "1") + pct exec "$CTID" -- rm -f "$error_flag" 2>/dev/null || true + fi + fi + + # Fallback to lxc-attach exit code if no flag file + if [[ $install_exit_code -eq 0 && $lxc_exit -ne 0 ]]; then + install_exit_code=$lxc_exit + fi + + # Installation failed? + if [[ $install_exit_code -ne 0 ]]; then + msg_error "Installation failed in container ${CTID} (exit code: ${install_exit_code})" + + # Copy both logs from container before potential deletion + local build_log_copied=false + local install_log_copied=false + + if [[ -n "$CTID" && -n "${SESSION_ID:-}" ]]; then + # Copy BUILD_LOG (creation log) if it exists + if [[ -f "${BUILD_LOG}" ]]; then + cp "${BUILD_LOG}" "/tmp/create-lxc-${CTID}-${SESSION_ID}.log" 2>/dev/null && build_log_copied=true + fi + + # Copy INSTALL_LOG from container + if pct pull "$CTID" "/root/.install-${SESSION_ID}.log" "/tmp/install-lxc-${CTID}-${SESSION_ID}.log" 2>/dev/null; then + install_log_copied=true + fi + + # Show available logs + echo "" + [[ "$build_log_copied" == true ]] && echo -e "${GN}✔${CL} Container creation log: ${BL}/tmp/create-lxc-${CTID}-${SESSION_ID}.log${CL}" + [[ "$install_log_copied" == true ]] && echo -e "${GN}✔${CL} Installation log: ${BL}/tmp/install-lxc-${CTID}-${SESSION_ID}.log${CL}" + fi + + # Dev mode: Keep container or open breakpoint shell + if [[ "${DEV_MODE_KEEP:-false}" == "true" ]]; then + msg_dev "Keep mode active - container ${CTID} preserved" + return 0 + elif [[ "${DEV_MODE_BREAKPOINT:-false}" == "true" ]]; then + msg_dev "Breakpoint mode - opening shell in container ${CTID}" + echo -e "${YW}Type 'exit' to return to host${CL}" + pct enter "$CTID" + echo "" + echo -en "${YW}Container ${CTID} still running. Remove now? (y/N): ${CL}" + if read -r response && [[ "$response" =~ ^[Yy]$ ]]; then + pct stop "$CTID" &>/dev/null || true + pct destroy "$CTID" &>/dev/null || true + msg_ok "Container ${CTID} removed" + else + msg_dev "Container ${CTID} kept for debugging" + fi + exit $install_exit_code + fi + + # Report failure to API before container cleanup + post_update_to_api "failed" "$install_exit_code" + + # Prompt user for cleanup with 60s timeout (plain echo - no msg_info to avoid spinner) + echo "" + echo -en "${YW}Remove broken container ${CTID}? (Y/n) [auto-remove in 60s]: ${CL}" + + if read -t 60 -r response; then + if [[ -z "$response" || "$response" =~ ^[Yy]$ ]]; then + # Remove container + echo -e "\n${TAB}${HOLD}${YW}Removing container ${CTID}${CL}" + pct stop "$CTID" &>/dev/null || true + pct destroy "$CTID" &>/dev/null || true + echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}" + elif [[ "$response" =~ ^[Nn]$ ]]; then + echo -e "\n${TAB}${YW}Container ${CTID} kept for debugging${CL}" + + # Dev mode: Setup MOTD/SSH for debugging access to broken container + if [[ "${DEV_MODE_MOTD:-false}" == "true" ]]; then + echo -e "${TAB}${HOLD}${DGN}Setting up MOTD and SSH for debugging...${CL}" + if pct exec "$CTID" -- bash -c " + source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/install.func) declare -f motd_ssh >/dev/null 2>&1 && motd_ssh || true " >/dev/null 2>&1; then - local ct_ip=$(pct exec "$CTID" ip a s dev eth0 2>/dev/null | awk '/inet / {print $2}' | cut -d/ -f1) - echo -e "${BFR}${CM}${GN}MOTD/SSH ready - SSH into container: ssh root@${ct_ip}${CL}" - fi - fi - fi - else - # Timeout - auto-remove - echo -e "\n${YW}No response - auto-removing container${CL}" - echo -e "${TAB}${HOLD}${YW}Removing container ${CTID}${CL}" - pct stop "$CTID" &>/dev/null || true - pct destroy "$CTID" &>/dev/null || true - echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}" + local ct_ip=$(pct exec "$CTID" ip a s dev eth0 2>/dev/null | awk '/inet / {print $2}' | cut -d/ -f1) + echo -e "${BFR}${CM}${GN}MOTD/SSH ready - SSH into container: ssh root@${ct_ip}${CL}" + fi fi - - exit $install_exit_code + fi + else + # Timeout - auto-remove + echo -e "\n${YW}No response - auto-removing container${CL}" + echo -e "${TAB}${HOLD}${YW}Removing container ${CTID}${CL}" + pct stop "$CTID" &>/dev/null || true + pct destroy "$CTID" &>/dev/null || true + echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}" fi + + exit $install_exit_code + fi } destroy_lxc() { - if [[ -z "$CT_ID" ]]; then - msg_error "No CT_ID found. Nothing to remove." - return 1 + if [[ -z "$CT_ID" ]]; then + msg_error "No CT_ID found. Nothing to remove." + return 1 + fi + + # Abbruch bei Ctrl-C / Ctrl-D / ESC + trap 'echo; msg_error "Aborted by user (SIGINT/SIGQUIT)"; return 130' INT QUIT + + local prompt + if ! read -rp "Remove this Container? " prompt; then + # read gibt != 0 zurück bei Ctrl-D/ESC + msg_error "Aborted input (Ctrl-D/ESC)" + return 130 + fi + + case "${prompt,,}" in + y | yes) + if pct stop "$CT_ID" &>/dev/null && pct destroy "$CT_ID" &>/dev/null; then + msg_ok "Removed Container $CT_ID" + else + msg_error "Failed to remove Container $CT_ID" + return 1 fi - - # Abbruch bei Ctrl-C / Ctrl-D / ESC - trap 'echo; msg_error "Aborted by user (SIGINT/SIGQUIT)"; return 130' INT QUIT - - local prompt - if ! read -rp "Remove this Container? " prompt; then - # read gibt != 0 zurück bei Ctrl-D/ESC - msg_error "Aborted input (Ctrl-D/ESC)" - return 130 - fi - - case "${prompt,,}" in - y | yes) - if pct stop "$CT_ID" &>/dev/null && pct destroy "$CT_ID" &>/dev/null; then - msg_ok "Removed Container $CT_ID" - else - msg_error "Failed to remove Container $CT_ID" - return 1 - fi - ;; - "" | n | no) - msg_custom "ℹ️" "${BL}" "Container was not removed." - ;; - *) - msg_warn "Invalid response. Container was not removed." - ;; - esac + ;; + "" | n | no) + msg_custom "ℹ️" "${BL}" "Container was not removed." + ;; + *) + msg_warn "Invalid response. Container was not removed." + ;; + esac } # ------------------------------------------------------------------------------ @@ -3346,81 +3566,81 @@ destroy_lxc() { # ------------------------------------------------------------------------------ # ===== Storage discovery / selection helpers (ported from create_lxc.sh) ===== resolve_storage_preselect() { - local class="$1" preselect="$2" required_content="" - case "$class" in - template) required_content="vztmpl" ;; - container) required_content="rootdir" ;; - *) return 1 ;; - esac - [[ -z "$preselect" ]] && return 1 - if ! pvesm status -content "$required_content" | awk 'NR>1{print $1}' | grep -qx -- "$preselect"; then - msg_warn "Preselected storage '${preselect}' does not support content '${required_content}' (or not found)" - return 1 - fi + local class="$1" preselect="$2" required_content="" + case "$class" in + template) required_content="vztmpl" ;; + container) required_content="rootdir" ;; + *) return 1 ;; + esac + [[ -z "$preselect" ]] && return 1 + if ! pvesm status -content "$required_content" | awk 'NR>1{print $1}' | grep -qx -- "$preselect"; then + msg_warn "Preselected storage '${preselect}' does not support content '${required_content}' (or not found)" + return 1 + fi - local line total used free - line="$(pvesm status | awk -v s="$preselect" 'NR>1 && $1==s {print $0}')" - if [[ -z "$line" ]]; then - STORAGE_INFO="n/a" + local line total used free + line="$(pvesm status | awk -v s="$preselect" 'NR>1 && $1==s {print $0}')" + if [[ -z "$line" ]]; then + STORAGE_INFO="n/a" + else + total="$(awk '{print $4}' <<<"$line")" + used="$(awk '{print $5}' <<<"$line")" + free="$(awk '{print $6}' <<<"$line")" + local total_h used_h free_h + if command -v numfmt >/dev/null 2>&1; then + total_h="$(numfmt --to=iec --suffix=B --format %.1f "$total" 2>/dev/null || echo "$total")" + used_h="$(numfmt --to=iec --suffix=B --format %.1f "$used" 2>/dev/null || echo "$used")" + free_h="$(numfmt --to=iec --suffix=B --format %.1f "$free" 2>/dev/null || echo "$free")" + STORAGE_INFO="Free: ${free_h} Used: ${used_h}" else - total="$(awk '{print $4}' <<<"$line")" - used="$(awk '{print $5}' <<<"$line")" - free="$(awk '{print $6}' <<<"$line")" - local total_h used_h free_h - if command -v numfmt >/dev/null 2>&1; then - total_h="$(numfmt --to=iec --suffix=B --format %.1f "$total" 2>/dev/null || echo "$total")" - used_h="$(numfmt --to=iec --suffix=B --format %.1f "$used" 2>/dev/null || echo "$used")" - free_h="$(numfmt --to=iec --suffix=B --format %.1f "$free" 2>/dev/null || echo "$free")" - STORAGE_INFO="Free: ${free_h} Used: ${used_h}" - else - STORAGE_INFO="Free: ${free} Used: ${used}" - fi + STORAGE_INFO="Free: ${free} Used: ${used}" fi - STORAGE_RESULT="$preselect" - return 0 + fi + STORAGE_RESULT="$preselect" + return 0 } fix_gpu_gids() { - if [[ -z "${GPU_TYPE:-}" ]]; then - return 0 - fi + if [[ -z "${GPU_TYPE:-}" ]]; then + return 0 + fi - msg_info "Detecting and setting correct GPU group IDs" + msg_info "Detecting and setting correct GPU group IDs" - # Get actual GIDs from container - local video_gid=$(pct exec "$CTID" -- sh -c "getent group video 2>/dev/null | cut -d: -f3") - local render_gid=$(pct exec "$CTID" -- sh -c "getent group render 2>/dev/null | cut -d: -f3") + # Get actual GIDs from container + local video_gid=$(pct exec "$CTID" -- sh -c "getent group video 2>/dev/null | cut -d: -f3") + local render_gid=$(pct exec "$CTID" -- sh -c "getent group render 2>/dev/null | cut -d: -f3") - # Create groups if they don't exist - if [[ -z "$video_gid" ]]; then - pct exec "$CTID" -- sh -c "groupadd -r video 2>/dev/null || true" >/dev/null 2>&1 - video_gid=$(pct exec "$CTID" -- sh -c "getent group video 2>/dev/null | cut -d: -f3") - [[ -z "$video_gid" ]] && video_gid="44" - fi + # Create groups if they don't exist + if [[ -z "$video_gid" ]]; then + pct exec "$CTID" -- sh -c "groupadd -r video 2>/dev/null || true" >/dev/null 2>&1 + video_gid=$(pct exec "$CTID" -- sh -c "getent group video 2>/dev/null | cut -d: -f3") + [[ -z "$video_gid" ]] && video_gid="44" + fi - if [[ -z "$render_gid" ]]; then - pct exec "$CTID" -- sh -c "groupadd -r render 2>/dev/null || true" >/dev/null 2>&1 - render_gid=$(pct exec "$CTID" -- sh -c "getent group render 2>/dev/null | cut -d: -f3") - [[ -z "$render_gid" ]] && render_gid="104" - fi + if [[ -z "$render_gid" ]]; then + pct exec "$CTID" -- sh -c "groupadd -r render 2>/dev/null || true" >/dev/null 2>&1 + render_gid=$(pct exec "$CTID" -- sh -c "getent group render 2>/dev/null | cut -d: -f3") + [[ -z "$render_gid" ]] && render_gid="104" + fi - # Stop container to update config - pct stop "$CTID" >/dev/null 2>&1 - sleep 1 + # Stop container to update config + pct stop "$CTID" >/dev/null 2>&1 + sleep 1 - # Update dev entries with correct GIDs - sed -i.bak -E "s|(dev[0-9]+: /dev/dri/renderD[0-9]+),gid=[0-9]+|\1,gid=${render_gid}|g" "$LXC_CONFIG" - sed -i -E "s|(dev[0-9]+: /dev/dri/card[0-9]+),gid=[0-9]+|\1,gid=${video_gid}|g" "$LXC_CONFIG" + # Update dev entries with correct GIDs + sed -i.bak -E "s|(dev[0-9]+: /dev/dri/renderD[0-9]+),gid=[0-9]+|\1,gid=${render_gid}|g" "$LXC_CONFIG" + sed -i -E "s|(dev[0-9]+: /dev/dri/card[0-9]+),gid=[0-9]+|\1,gid=${video_gid}|g" "$LXC_CONFIG" - # Restart container - pct start "$CTID" >/dev/null 2>&1 - sleep 2 + # Restart container + pct start "$CTID" >/dev/null 2>&1 + sleep 2 - msg_ok "GPU passthrough configured (video:${video_gid}, render:${render_gid})" + msg_ok "GPU passthrough configured (video:${video_gid}, render:${render_gid})" - # For privileged containers: also fix permissions inside container - if [[ "$CT_TYPE" == "0" ]]; then - pct exec "$CTID" -- bash -c " + # For privileged containers: also fix permissions inside container + if [[ "$CT_TYPE" == "0" ]]; then + pct exec "$CTID" -- bash -c " if [ -d /dev/dri ]; then for dev in /dev/dri/*; do if [ -e \"\$dev\" ]; then @@ -3434,740 +3654,797 @@ fix_gpu_gids() { done fi " >/dev/null 2>&1 - fi + fi } check_storage_support() { - local CONTENT="$1" VALID=0 - while IFS= read -r line; do - local STORAGE_NAME - STORAGE_NAME=$(awk '{print $1}' <<<"$line") - [[ -n "$STORAGE_NAME" ]] && VALID=1 - done < <(pvesm status -content "$CONTENT" 2>/dev/null | awk 'NR>1') - [[ $VALID -eq 1 ]] + local CONTENT="$1" VALID=0 + while IFS= read -r line; do + local STORAGE_NAME + STORAGE_NAME=$(awk '{print $1}' <<<"$line") + [[ -n "$STORAGE_NAME" ]] && VALID=1 + done < <(pvesm status -content "$CONTENT" 2>/dev/null | awk 'NR>1') + [[ $VALID -eq 1 ]] } select_storage() { - local CLASS=$1 CONTENT CONTENT_LABEL - case $CLASS in - container) - CONTENT='rootdir' - CONTENT_LABEL='Container' - ;; - template) - CONTENT='vztmpl' - CONTENT_LABEL='Container template' - ;; - iso) - CONTENT='iso' - CONTENT_LABEL='ISO image' - ;; - images) - CONTENT='images' - CONTENT_LABEL='VM Disk image' - ;; - backup) - CONTENT='backup' - CONTENT_LABEL='Backup' - ;; - snippets) - CONTENT='snippets' - CONTENT_LABEL='Snippets' - ;; - *) - msg_error "Invalid storage class '$CLASS'" - return 1 - ;; - esac + local CLASS=$1 CONTENT CONTENT_LABEL + case $CLASS in + container) + CONTENT='rootdir' + CONTENT_LABEL='Container' + ;; + template) + CONTENT='vztmpl' + CONTENT_LABEL='Container template' + ;; + iso) + CONTENT='iso' + CONTENT_LABEL='ISO image' + ;; + images) + CONTENT='images' + CONTENT_LABEL='VM Disk image' + ;; + backup) + CONTENT='backup' + CONTENT_LABEL='Backup' + ;; + snippets) + CONTENT='snippets' + CONTENT_LABEL='Snippets' + ;; + *) + msg_error "Invalid storage class '$CLASS'" + return 1 + ;; + esac - declare -A STORAGE_MAP - local -a MENU=() - local COL_WIDTH=0 + declare -A STORAGE_MAP + local -a MENU=() + local COL_WIDTH=0 - while read -r TAG TYPE _ TOTAL USED FREE _; do - [[ -n "$TAG" && -n "$TYPE" ]] || continue - local DISPLAY="${TAG} (${TYPE})" - local USED_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$USED") - local FREE_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$FREE") - local INFO="Free: ${FREE_FMT}B Used: ${USED_FMT}B" - STORAGE_MAP["$DISPLAY"]="$TAG" - MENU+=("$DISPLAY" "$INFO" "OFF") - ((${#DISPLAY} > COL_WIDTH)) && COL_WIDTH=${#DISPLAY} - done < <(pvesm status -content "$CONTENT" | awk 'NR>1') + while read -r TAG TYPE _ TOTAL USED FREE _; do + [[ -n "$TAG" && -n "$TYPE" ]] || continue + local DISPLAY="${TAG} (${TYPE})" + local USED_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$USED") + local FREE_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$FREE") + local INFO="Free: ${FREE_FMT}B Used: ${USED_FMT}B" + STORAGE_MAP["$DISPLAY"]="$TAG" + MENU+=("$DISPLAY" "$INFO" "OFF") + ((${#DISPLAY} > COL_WIDTH)) && COL_WIDTH=${#DISPLAY} + done < <(pvesm status -content "$CONTENT" | awk 'NR>1') - if [[ ${#MENU[@]} -eq 0 ]]; then - msg_error "No storage found for content type '$CONTENT'." - return 2 + if [[ ${#MENU[@]} -eq 0 ]]; then + msg_error "No storage found for content type '$CONTENT'." + return 2 + fi + + if [[ $((${#MENU[@]} / 3)) -eq 1 ]]; then + STORAGE_RESULT="${STORAGE_MAP[${MENU[0]}]}" + STORAGE_INFO="${MENU[1]}" + return 0 + fi + + local WIDTH=$((COL_WIDTH + 42)) + while true; do + local DISPLAY_SELECTED + DISPLAY_SELECTED=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \ + --title "Storage Pools" \ + --radiolist "Which storage pool for ${CONTENT_LABEL,,}?\n(Spacebar to select)" \ + 16 "$WIDTH" 6 "${MENU[@]}" 3>&1 1>&2 2>&3) || { exit_script; } + + DISPLAY_SELECTED=$(sed 's/[[:space:]]*$//' <<<"$DISPLAY_SELECTED") + if [[ -z "$DISPLAY_SELECTED" || -z "${STORAGE_MAP[$DISPLAY_SELECTED]+_}" ]]; then + whiptail --msgbox "No valid storage selected. Please try again." 8 58 + continue fi - - if [[ $((${#MENU[@]} / 3)) -eq 1 ]]; then - STORAGE_RESULT="${STORAGE_MAP[${MENU[0]}]}" - STORAGE_INFO="${MENU[1]}" - return 0 - fi - - local WIDTH=$((COL_WIDTH + 42)) - while true; do - local DISPLAY_SELECTED - DISPLAY_SELECTED=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \ - --title "Storage Pools" \ - --radiolist "Which storage pool for ${CONTENT_LABEL,,}?\n(Spacebar to select)" \ - 16 "$WIDTH" 6 "${MENU[@]}" 3>&1 1>&2 2>&3) || { exit_script; } - - DISPLAY_SELECTED=$(sed 's/[[:space:]]*$//' <<<"$DISPLAY_SELECTED") - if [[ -z "$DISPLAY_SELECTED" || -z "${STORAGE_MAP[$DISPLAY_SELECTED]+_}" ]]; then - whiptail --msgbox "No valid storage selected. Please try again." 8 58 - continue - fi - STORAGE_RESULT="${STORAGE_MAP[$DISPLAY_SELECTED]}" - for ((i = 0; i < ${#MENU[@]}; i += 3)); do - if [[ "${MENU[$i]}" == "$DISPLAY_SELECTED" ]]; then - STORAGE_INFO="${MENU[$i + 1]}" - break - fi - done - return 0 + STORAGE_RESULT="${STORAGE_MAP[$DISPLAY_SELECTED]}" + for ((i = 0; i < ${#MENU[@]}; i += 3)); do + if [[ "${MENU[$i]}" == "$DISPLAY_SELECTED" ]]; then + STORAGE_INFO="${MENU[$i + 1]}" + break + fi done + return 0 + done } create_lxc_container() { - # ------------------------------------------------------------------------------ - # Optional verbose mode (debug tracing) - # ------------------------------------------------------------------------------ - if [[ "${CREATE_LXC_VERBOSE:-no}" == "yes" ]]; then set -x; fi + # ------------------------------------------------------------------------------ + # Optional verbose mode (debug tracing) + # ------------------------------------------------------------------------------ + if [[ "${CREATE_LXC_VERBOSE:-no}" == "yes" ]]; then set -x; fi - # ------------------------------------------------------------------------------ - # Helpers (dynamic versioning / template parsing) - # ------------------------------------------------------------------------------ - pkg_ver() { dpkg-query -W -f='${Version}\n' "$1" 2>/dev/null || echo ""; } - pkg_cand() { apt-cache policy "$1" 2>/dev/null | awk '/Candidate:/ {print $2}'; } + # ------------------------------------------------------------------------------ + # Helpers (dynamic versioning / template parsing) + # ------------------------------------------------------------------------------ + pkg_ver() { dpkg-query -W -f='${Version}\n' "$1" 2>/dev/null || echo ""; } + pkg_cand() { apt-cache policy "$1" 2>/dev/null | awk '/Candidate:/ {print $2}'; } - ver_ge() { dpkg --compare-versions "$1" ge "$2"; } - ver_gt() { dpkg --compare-versions "$1" gt "$2"; } - ver_lt() { dpkg --compare-versions "$1" lt "$2"; } + ver_ge() { dpkg --compare-versions "$1" ge "$2"; } + ver_gt() { dpkg --compare-versions "$1" gt "$2"; } + ver_lt() { dpkg --compare-versions "$1" lt "$2"; } - # Extract Debian OS minor from template name: debian-13-standard_13.1-1_amd64.tar.zst => "13.1" - parse_template_osver() { sed -n 's/.*_\([0-9][0-9]*\(\.[0-9]\+\)\?\)-.*/\1/p' <<<"$1"; } + # Extract Debian OS minor from template name: debian-13-standard_13.1-1_amd64.tar.zst => "13.1" + parse_template_osver() { sed -n 's/.*_\([0-9][0-9]*\(\.[0-9]\+\)\?\)-.*/\1/p' <<<"$1"; } - # Offer upgrade for pve-container/lxc-pve if candidate > installed; optional auto-retry pct create - # Returns: - # 0 = no upgrade needed - # 1 = upgraded (and if do_retry=yes and retry succeeded, creation done) - # 2 = user declined - # 3 = upgrade attempted but failed OR retry failed - offer_lxc_stack_upgrade_and_maybe_retry() { - local do_retry="${1:-no}" # yes|no - local _pvec_i _pvec_c _lxcp_i _lxcp_c need=0 + # Offer upgrade for pve-container/lxc-pve if candidate > installed; optional auto-retry pct create + # Returns: + # 0 = no upgrade needed + # 1 = upgraded (and if do_retry=yes and retry succeeded, creation done) + # 2 = user declined + # 3 = upgrade attempted but failed OR retry failed + offer_lxc_stack_upgrade_and_maybe_retry() { + local do_retry="${1:-no}" # yes|no + local _pvec_i _pvec_c _lxcp_i _lxcp_c need=0 - _pvec_i="$(pkg_ver pve-container)" - _lxcp_i="$(pkg_ver lxc-pve)" - _pvec_c="$(pkg_cand pve-container)" - _lxcp_c="$(pkg_cand lxc-pve)" + _pvec_i="$(pkg_ver pve-container)" + _lxcp_i="$(pkg_ver lxc-pve)" + _pvec_c="$(pkg_cand pve-container)" + _lxcp_c="$(pkg_cand lxc-pve)" - if [[ -n "$_pvec_c" && "$_pvec_c" != "none" ]]; then - ver_gt "$_pvec_c" "${_pvec_i:-0}" && need=1 - fi - if [[ -n "$_lxcp_c" && "$_lxcp_c" != "none" ]]; then - ver_gt "$_lxcp_c" "${_lxcp_i:-0}" && need=1 - fi - if [[ $need -eq 0 ]]; then - msg_debug "No newer candidate for pve-container/lxc-pve (installed=$_pvec_i/$_lxcp_i, cand=$_pvec_c/$_lxcp_c)" + if [[ -n "$_pvec_c" && "$_pvec_c" != "none" ]]; then + ver_gt "$_pvec_c" "${_pvec_i:-0}" && need=1 + fi + if [[ -n "$_lxcp_c" && "$_lxcp_c" != "none" ]]; then + ver_gt "$_lxcp_c" "${_lxcp_i:-0}" && need=1 + fi + if [[ $need -eq 0 ]]; then + msg_debug "No newer candidate for pve-container/lxc-pve (installed=$_pvec_i/$_lxcp_i, cand=$_pvec_c/$_lxcp_c)" + return 0 + fi + + echo + echo "An update for the Proxmox LXC stack is available:" + echo " pve-container: installed=${_pvec_i:-n/a} candidate=${_pvec_c:-n/a}" + echo " lxc-pve : installed=${_lxcp_i:-n/a} candidate=${_lxcp_c:-n/a}" + echo + read -rp "Do you want to upgrade now? [y/N] " _ans + case "${_ans,,}" in + y | yes) + msg_info "Upgrading Proxmox LXC stack (pve-container, lxc-pve)" + if $STD apt-get update && $STD apt-get install -y --only-upgrade pve-container lxc-pve; then + msg_ok "LXC stack upgraded." + if [[ "$do_retry" == "yes" ]]; then + msg_info "Retrying container creation after upgrade" + if pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" $PCT_OPTIONS >>"$LOGFILE" 2>&1; then + msg_ok "Container created successfully after upgrade." return 0 + else + msg_error "pct create still failed after upgrade. See $LOGFILE" + return 3 + fi fi + return 1 + else + msg_error "Upgrade failed. Please check APT output." + return 3 + fi + ;; + *) return 2 ;; + esac + } - echo - echo "An update for the Proxmox LXC stack is available:" - echo " pve-container: installed=${_pvec_i:-n/a} candidate=${_pvec_c:-n/a}" - echo " lxc-pve : installed=${_lxcp_i:-n/a} candidate=${_lxcp_c:-n/a}" - echo - read -rp "Do you want to upgrade now? [y/N] " _ans - case "${_ans,,}" in - y | yes) - msg_info "Upgrading Proxmox LXC stack (pve-container, lxc-pve)" - if $STD apt-get update && $STD apt-get install -y --only-upgrade pve-container lxc-pve; then - msg_ok "LXC stack upgraded." - if [[ "$do_retry" == "yes" ]]; then - msg_info "Retrying container creation after upgrade" - if pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" $PCT_OPTIONS >>"$LOGFILE" 2>&1; then - msg_ok "Container created successfully after upgrade." - return 0 - else - msg_error "pct create still failed after upgrade. See $LOGFILE" - return 3 - fi - fi - return 1 - else - msg_error "Upgrade failed. Please check APT output." - return 3 - fi - ;; - *) return 2 ;; - esac - } + # ------------------------------------------------------------------------------ + # Required input variables + # ------------------------------------------------------------------------------ + [[ "${CTID:-}" ]] || { + msg_error "You need to set 'CTID' variable." + exit 203 + } + [[ "${PCT_OSTYPE:-}" ]] || { + msg_error "You need to set 'PCT_OSTYPE' variable." + exit 204 + } - # ------------------------------------------------------------------------------ - # Required input variables - # ------------------------------------------------------------------------------ - [[ "${CTID:-}" ]] || { - msg_error "You need to set 'CTID' variable." - exit 203 - } - [[ "${PCT_OSTYPE:-}" ]] || { - msg_error "You need to set 'PCT_OSTYPE' variable." - exit 204 - } + msg_debug "CTID=$CTID" + msg_debug "PCT_OSTYPE=$PCT_OSTYPE" + msg_debug "PCT_OSVERSION=${PCT_OSVERSION:-default}" - msg_debug "CTID=$CTID" - msg_debug "PCT_OSTYPE=$PCT_OSTYPE" - msg_debug "PCT_OSVERSION=${PCT_OSVERSION:-default}" + # ID checks + [[ "$CTID" -ge 100 ]] || { + msg_error "ID cannot be less than 100." + exit 205 + } + if qm status "$CTID" &>/dev/null || pct status "$CTID" &>/dev/null; then + echo -e "ID '$CTID' is already in use." + unset CTID + msg_error "Cannot use ID that is already in use." + exit 206 + fi - # ID checks - [[ "$CTID" -ge 100 ]] || { - msg_error "ID cannot be less than 100." - exit 205 - } - if qm status "$CTID" &>/dev/null || pct status "$CTID" &>/dev/null; then - echo -e "ID '$CTID' is already in use." - unset CTID - msg_error "Cannot use ID that is already in use." - exit 206 - fi + # Storage capability check + check_storage_support "rootdir" || { + msg_error "No valid storage found for 'rootdir' [Container]" + exit 1 + } + check_storage_support "vztmpl" || { + msg_error "No valid storage found for 'vztmpl' [Template]" + exit 1 + } - # Storage capability check - check_storage_support "rootdir" || { - msg_error "No valid storage found for 'rootdir' [Container]" - exit 1 - } - check_storage_support "vztmpl" || { - msg_error "No valid storage found for 'vztmpl' [Template]" - exit 1 - } + # Template storage selection + if resolve_storage_preselect template "${TEMPLATE_STORAGE:-}"; then + TEMPLATE_STORAGE="$STORAGE_RESULT" + TEMPLATE_STORAGE_INFO="$STORAGE_INFO" + msg_ok "Storage ${BL}${TEMPLATE_STORAGE}${CL} (${TEMPLATE_STORAGE_INFO}) [Template]" + else + while true; do + if [[ -z "${var_template_storage:-}" ]]; then + if select_storage template; then + TEMPLATE_STORAGE="$STORAGE_RESULT" + TEMPLATE_STORAGE_INFO="$STORAGE_INFO" + msg_ok "Storage ${BL}${TEMPLATE_STORAGE}${CL} (${TEMPLATE_STORAGE_INFO}) [Template]" + break + fi + fi + done + fi - # Template storage selection - if resolve_storage_preselect template "${TEMPLATE_STORAGE:-}"; then - TEMPLATE_STORAGE="$STORAGE_RESULT" - TEMPLATE_STORAGE_INFO="$STORAGE_INFO" - msg_ok "Storage ${BL}${TEMPLATE_STORAGE}${CL} (${TEMPLATE_STORAGE_INFO}) [Template]" - else - while true; do - if [[ -z "${var_template_storage:-}" ]]; then - if select_storage template; then - TEMPLATE_STORAGE="$STORAGE_RESULT" - TEMPLATE_STORAGE_INFO="$STORAGE_INFO" - msg_ok "Storage ${BL}${TEMPLATE_STORAGE}${CL} (${TEMPLATE_STORAGE_INFO}) [Template]" - break - fi - fi - done - fi - - # Container storage selection - if resolve_storage_preselect container "${CONTAINER_STORAGE:-}"; then + # Container storage selection + if resolve_storage_preselect container "${CONTAINER_STORAGE:-}"; then + CONTAINER_STORAGE="$STORAGE_RESULT" + CONTAINER_STORAGE_INFO="$STORAGE_INFO" + msg_ok "Storage ${BL}${CONTAINER_STORAGE}${CL} (${CONTAINER_STORAGE_INFO}) [Container]" + else + if [[ -z "${var_container_storage:-}" ]]; then + if select_storage container; then CONTAINER_STORAGE="$STORAGE_RESULT" CONTAINER_STORAGE_INFO="$STORAGE_INFO" msg_ok "Storage ${BL}${CONTAINER_STORAGE}${CL} (${CONTAINER_STORAGE_INFO}) [Container]" + fi + fi + fi + + msg_info "Validating storage '$CONTAINER_STORAGE'" + STORAGE_TYPE=$(grep -E "^[^:]+: $CONTAINER_STORAGE$" /etc/pve/storage.cfg | cut -d: -f1 | head -1) + + case "$STORAGE_TYPE" in + iscsidirect) exit 212 ;; + iscsi | zfs) exit 213 ;; + cephfs) exit 219 ;; + pbs) exit 224 ;; + linstor | rbd | nfs | cifs) + pvesm status -storage "$CONTAINER_STORAGE" &>/dev/null || exit 217 + ;; + esac + + pvesm status -content rootdir 2>/dev/null | awk 'NR>1{print $1}' | grep -qx "$CONTAINER_STORAGE" || exit 213 + msg_ok "Storage '$CONTAINER_STORAGE' ($STORAGE_TYPE) validated" + + msg_info "Validating template storage '$TEMPLATE_STORAGE'" + TEMPLATE_TYPE=$(grep -E "^[^:]+: $TEMPLATE_STORAGE$" /etc/pve/storage.cfg | cut -d: -f1) + + if ! pvesm status -content vztmpl 2>/dev/null | awk 'NR>1{print $1}' | grep -qx "$TEMPLATE_STORAGE"; then + msg_warn "Template storage '$TEMPLATE_STORAGE' may not support 'vztmpl'" + fi + msg_ok "Template storage '$TEMPLATE_STORAGE' validated" + + # Free space check + STORAGE_FREE=$(pvesm status | awk -v s="$CONTAINER_STORAGE" '$1 == s { print $6 }') + REQUIRED_KB=$((${PCT_DISK_SIZE:-8} * 1024 * 1024)) + [[ "$STORAGE_FREE" -ge "$REQUIRED_KB" ]] || { + msg_error "Not enough space on '$CONTAINER_STORAGE'. Needed: ${PCT_DISK_SIZE:-8}G." + exit 214 + } + + # Cluster quorum (if cluster) + if [[ -f /etc/pve/corosync.conf ]]; then + msg_info "Checking cluster quorum" + if ! pvecm status | awk -F':' '/^Quorate/ { exit ($2 ~ /Yes/) ? 0 : 1 }'; then + msg_error "Cluster is not quorate. Start all nodes or configure quorum device (QDevice)." + exit 210 + fi + msg_ok "Cluster is quorate" + fi + + # ------------------------------------------------------------------------------ + # Template discovery & validation + # Supported OS types (pveam available): alpine, almalinux, centos, debian, + # devuan, fedora, gentoo, openeuler, opensuse, rockylinux, ubuntu + # Template naming conventions: + # - Debian/Ubuntu/Devuan: --standard__.tar.zst + # - Alpine/Fedora/Rocky/CentOS/AlmaLinux/openEuler: --default__.tar.xz + # - Gentoo: gentoo-current-openrc__.tar.xz (note: underscore before date!) + # - openSUSE: opensuse--default__.tar.xz + # - CentOS: centos--stream-default__.tar.xz (note: stream in name) + # ------------------------------------------------------------------------------ + TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}" + case "$PCT_OSTYPE" in + debian | ubuntu | devuan) TEMPLATE_PATTERN="-standard_" ;; + alpine | fedora | rockylinux | almalinux | openeuler) TEMPLATE_PATTERN="-default_" ;; + centos) TEMPLATE_PATTERN="-stream-default_" ;; + gentoo) TEMPLATE_PATTERN="-openrc_" ;; # Pattern: gentoo-current-openrc_ (underscore!) + opensuse) TEMPLATE_PATTERN="-default_" ;; + *) TEMPLATE_PATTERN="" ;; + esac + + msg_info "Searching for template '$TEMPLATE_SEARCH'" + + # Build regex patterns outside awk/grep for clarity + SEARCH_PATTERN="^${TEMPLATE_SEARCH}" + + #echo "[DEBUG] TEMPLATE_SEARCH='$TEMPLATE_SEARCH'" + #echo "[DEBUG] SEARCH_PATTERN='$SEARCH_PATTERN'" + #echo "[DEBUG] TEMPLATE_PATTERN='$TEMPLATE_PATTERN'" + + mapfile -t LOCAL_TEMPLATES < <( + pveam list "$TEMPLATE_STORAGE" 2>/dev/null | + awk -v search="${SEARCH_PATTERN}" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' | + sed 's|.*/||' | sort -t - -k 2 -V + ) + + pveam update >/dev/null 2>&1 || msg_warn "Could not update template catalog (pveam update failed)." + + msg_ok "Template search completed" + + #echo "[DEBUG] pveam available output (first 5 lines with .tar files):" + #pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | head -5 | sed 's/^/ /' + + set +u + mapfile -t ONLINE_TEMPLATES < <(pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | awk '{print $2}' | grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" | sort -t - -k 2 -V 2>/dev/null || true) + #echo "[DEBUG] After filtering: ${#ONLINE_TEMPLATES[@]} online templates found" + set -u + + ONLINE_TEMPLATE="" + [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}" + + #msg_debug "SEARCH_PATTERN='${SEARCH_PATTERN}' TEMPLATE_PATTERN='${TEMPLATE_PATTERN}'" + #msg_debug "Found ${#LOCAL_TEMPLATES[@]} local templates, ${#ONLINE_TEMPLATES[@]} online templates" + if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then + #msg_debug "First 3 online templates:" + count=0 + for idx in "${!ONLINE_TEMPLATES[@]}"; do + #msg_debug " [$idx]: ${ONLINE_TEMPLATES[$idx]}" + ((count++)) + [[ $count -ge 3 ]] && break + done + fi + #msg_debug "ONLINE_TEMPLATE='$ONLINE_TEMPLATE'" + + if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then + TEMPLATE="${LOCAL_TEMPLATES[-1]}" + TEMPLATE_SOURCE="local" + else + TEMPLATE="$ONLINE_TEMPLATE" + TEMPLATE_SOURCE="online" + fi + + # If still no template, try to find alternatives + if [[ -z "$TEMPLATE" ]]; then + echo "" + echo "[DEBUG] No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}, searching for alternatives..." + + # Get all available versions for this OS type + # Special handling for Gentoo which uses 'current' instead of numeric version + if [[ "$PCT_OSTYPE" == "gentoo" ]]; then + mapfile -t AVAILABLE_VERSIONS < <( + pveam available -section system 2>/dev/null | + grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | + awk '{print $2}' | + grep "^gentoo-" | + sed -E 's/gentoo-([^-]+)-.*/\1/' | + sort -u 2>/dev/null || sort -u + ) else - if [[ -z "${var_container_storage:-}" ]]; then - if select_storage container; then - CONTAINER_STORAGE="$STORAGE_RESULT" - CONTAINER_STORAGE_INFO="$STORAGE_INFO" - msg_ok "Storage ${BL}${CONTAINER_STORAGE}${CL} (${CONTAINER_STORAGE_INFO}) [Container]" - fi - fi + mapfile -t AVAILABLE_VERSIONS < <( + pveam available -section system 2>/dev/null | + grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | + awk '{print $2}' | + grep "^${PCT_OSTYPE}-" | + sed -E "s/${PCT_OSTYPE}-([0-9]+(\.[0-9]+)?).*/\1/" | + grep -E '^[0-9]' | + sort -u -V 2>/dev/null || sort -u + ) fi - msg_info "Validating storage '$CONTAINER_STORAGE'" - STORAGE_TYPE=$(grep -E "^[^:]+: $CONTAINER_STORAGE$" /etc/pve/storage.cfg | cut -d: -f1 | head -1) + if [[ ${#AVAILABLE_VERSIONS[@]} -gt 0 ]]; then + echo "" + echo "${BL}Available ${PCT_OSTYPE} versions:${CL}" + for i in "${!AVAILABLE_VERSIONS[@]}"; do + echo " [$((i + 1))] ${AVAILABLE_VERSIONS[$i]}" + done + echo "" + read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or press Enter to cancel: " choice - case "$STORAGE_TYPE" in - iscsidirect) exit 212 ;; - iscsi | zfs) exit 213 ;; - cephfs) exit 219 ;; - pbs) exit 224 ;; - linstor | rbd | nfs | cifs) - pvesm status -storage "$CONTAINER_STORAGE" &>/dev/null || exit 217 - ;; - esac + if [[ "$choice" =~ ^[0-9]+$ ]] && [[ "$choice" -ge 1 ]] && [[ "$choice" -le ${#AVAILABLE_VERSIONS[@]} ]]; then + PCT_OSVERSION="${AVAILABLE_VERSIONS[$((choice - 1))]}" + TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION}" + SEARCH_PATTERN="^${TEMPLATE_SEARCH}-" - pvesm status -content rootdir 2>/dev/null | awk 'NR>1{print $1}' | grep -qx "$CONTAINER_STORAGE" || exit 213 - msg_ok "Storage '$CONTAINER_STORAGE' ($STORAGE_TYPE) validated" + #echo "[DEBUG] Retrying with version: $PCT_OSVERSION" - msg_info "Validating template storage '$TEMPLATE_STORAGE'" - TEMPLATE_TYPE=$(grep -E "^[^:]+: $TEMPLATE_STORAGE$" /etc/pve/storage.cfg | cut -d: -f1) - - if ! pvesm status -content vztmpl 2>/dev/null | awk 'NR>1{print $1}' | grep -qx "$TEMPLATE_STORAGE"; then - msg_warn "Template storage '$TEMPLATE_STORAGE' may not support 'vztmpl'" - fi - msg_ok "Template storage '$TEMPLATE_STORAGE' validated" - - # Free space check - STORAGE_FREE=$(pvesm status | awk -v s="$CONTAINER_STORAGE" '$1 == s { print $6 }') - REQUIRED_KB=$((${PCT_DISK_SIZE:-8} * 1024 * 1024)) - [[ "$STORAGE_FREE" -ge "$REQUIRED_KB" ]] || { - msg_error "Not enough space on '$CONTAINER_STORAGE'. Needed: ${PCT_DISK_SIZE:-8}G." - exit 214 - } - - # Cluster quorum (if cluster) - if [[ -f /etc/pve/corosync.conf ]]; then - msg_info "Checking cluster quorum" - if ! pvecm status | awk -F':' '/^Quorate/ { exit ($2 ~ /Yes/) ? 0 : 1 }'; then - msg_error "Cluster is not quorate. Start all nodes or configure quorum device (QDevice)." - exit 210 - fi - msg_ok "Cluster is quorate" - fi - - # ------------------------------------------------------------------------------ - # Template discovery & validation - # Supported OS types (pveam available): alpine, almalinux, centos, debian, - # devuan, fedora, gentoo, openeuler, opensuse, rockylinux, ubuntu - # ------------------------------------------------------------------------------ - TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}" - case "$PCT_OSTYPE" in - debian | ubuntu | devuan) TEMPLATE_PATTERN="-standard_" ;; - alpine | fedora | rocky | rockylinux | centos | almalinux | openeuler) TEMPLATE_PATTERN="-default_" ;; - gentoo) TEMPLATE_PATTERN="-current-openrc" ;; - opensuse) TEMPLATE_PATTERN="-default_" ;; - *) TEMPLATE_PATTERN="" ;; - esac - - msg_info "Searching for template '$TEMPLATE_SEARCH'" - - # Build regex patterns outside awk/grep for clarity - SEARCH_PATTERN="^${TEMPLATE_SEARCH}" - - #echo "[DEBUG] TEMPLATE_SEARCH='$TEMPLATE_SEARCH'" - #echo "[DEBUG] SEARCH_PATTERN='$SEARCH_PATTERN'" - #echo "[DEBUG] TEMPLATE_PATTERN='$TEMPLATE_PATTERN'" - - mapfile -t LOCAL_TEMPLATES < <( - pveam list "$TEMPLATE_STORAGE" 2>/dev/null | - awk -v search="${SEARCH_PATTERN}" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' | - sed 's|.*/||' | sort -t - -k 2 -V - ) - - pveam update >/dev/null 2>&1 || msg_warn "Could not update template catalog (pveam update failed)." - - msg_ok "Template search completed" - - #echo "[DEBUG] pveam available output (first 5 lines with .tar files):" - #pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | head -5 | sed 's/^/ /' - - set +u - mapfile -t ONLINE_TEMPLATES < <(pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | awk '{print $2}' | grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" | sort -t - -k 2 -V 2>/dev/null || true) - #echo "[DEBUG] After filtering: ${#ONLINE_TEMPLATES[@]} online templates found" - set -u - - ONLINE_TEMPLATE="" - [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}" - - #msg_debug "SEARCH_PATTERN='${SEARCH_PATTERN}' TEMPLATE_PATTERN='${TEMPLATE_PATTERN}'" - #msg_debug "Found ${#LOCAL_TEMPLATES[@]} local templates, ${#ONLINE_TEMPLATES[@]} online templates" - if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then - #msg_debug "First 3 online templates:" - count=0 - for idx in "${!ONLINE_TEMPLATES[@]}"; do - #msg_debug " [$idx]: ${ONLINE_TEMPLATES[$idx]}" - ((count++)) - [[ $count -ge 3 ]] && break - done - fi - #msg_debug "ONLINE_TEMPLATE='$ONLINE_TEMPLATE'" - - if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then - TEMPLATE="${LOCAL_TEMPLATES[-1]}" - TEMPLATE_SOURCE="local" - else - TEMPLATE="$ONLINE_TEMPLATE" - TEMPLATE_SOURCE="online" - fi - - # If still no template, try to find alternatives - if [[ -z "$TEMPLATE" ]]; then - echo "" - echo "[DEBUG] No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}, searching for alternatives..." - - # Get all available versions for this OS type - mapfile -t AVAILABLE_VERSIONS < <( - pveam available -section system 2>/dev/null | - grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | - awk -F'\t' '{print $1}' | - grep "^${PCT_OSTYPE}-" | - sed -E "s/.*${PCT_OSTYPE}-([0-9]+(\.[0-9]+)?).*/\1/" | - sort -u -V 2>/dev/null + mapfile -t ONLINE_TEMPLATES < <( + pveam available -section system 2>/dev/null | + grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | + awk -F'\t' '{print $1}' | + grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" | + sort -t - -k 2 -V 2>/dev/null || true ) - if [[ ${#AVAILABLE_VERSIONS[@]} -gt 0 ]]; then - echo "" - echo "${BL}Available ${PCT_OSTYPE} versions:${CL}" - for i in "${!AVAILABLE_VERSIONS[@]}"; do - echo " [$((i + 1))] ${AVAILABLE_VERSIONS[$i]}" - done - echo "" - read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or press Enter to cancel: " choice - - if [[ "$choice" =~ ^[0-9]+$ ]] && [[ "$choice" -ge 1 ]] && [[ "$choice" -le ${#AVAILABLE_VERSIONS[@]} ]]; then - PCT_OSVERSION="${AVAILABLE_VERSIONS[$((choice - 1))]}" - TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION}" - SEARCH_PATTERN="^${TEMPLATE_SEARCH}-" - - #echo "[DEBUG] Retrying with version: $PCT_OSVERSION" - - mapfile -t ONLINE_TEMPLATES < <( - pveam available -section system 2>/dev/null | - grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | - awk -F'\t' '{print $1}' | - grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" | - sort -t - -k 2 -V 2>/dev/null || true - ) - - if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then - TEMPLATE="${ONLINE_TEMPLATES[-1]}" - TEMPLATE_SOURCE="online" - #echo "[DEBUG] Found alternative: $TEMPLATE" - else - msg_error "No templates available for ${PCT_OSTYPE} ${PCT_OSVERSION}" - exit 225 - fi - else - msg_custom "🚫" "${YW}" "Installation cancelled" - exit 0 - fi + if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then + TEMPLATE="${ONLINE_TEMPLATES[-1]}" + TEMPLATE_SOURCE="online" + #echo "[DEBUG] Found alternative: $TEMPLATE" else - msg_error "No ${PCT_OSTYPE} templates available at all" - exit 225 - fi - fi - - #echo "[DEBUG] Selected TEMPLATE='$TEMPLATE' SOURCE='$TEMPLATE_SOURCE'" - #msg_debug "Selected TEMPLATE='$TEMPLATE' SOURCE='$TEMPLATE_SOURCE'" - - TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)" - if [[ -z "$TEMPLATE_PATH" ]]; then - TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg) - [[ -n "$TEMPLATE_BASE" ]] && TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE" - fi - - # If we still don't have a path but have a valid template name, construct it - if [[ -z "$TEMPLATE_PATH" && -n "$TEMPLATE" ]]; then - TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE" - fi - - [[ -n "$TEMPLATE_PATH" ]] || { - if [[ -z "$TEMPLATE" ]]; then - msg_error "Template ${PCT_OSTYPE} ${PCT_OSVERSION} not available" - - # Get available versions - mapfile -t AVAILABLE_VERSIONS < <( - pveam available -section system 2>/dev/null | - grep "^${PCT_OSTYPE}-" | - sed -E 's/.*'"${PCT_OSTYPE}"'-([0-9]+\.[0-9]+).*/\1/' | - grep -E '^[0-9]+\.[0-9]+$' | - sort -u -V 2>/dev/null || sort -u - ) - - if [[ ${#AVAILABLE_VERSIONS[@]} -gt 0 ]]; then - echo -e "\n${BL}Available versions:${CL}" - for i in "${!AVAILABLE_VERSIONS[@]}"; do - echo " [$((i + 1))] ${AVAILABLE_VERSIONS[$i]}" - done - - echo "" - read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or Enter to exit: " choice - - if [[ "$choice" =~ ^[0-9]+$ ]] && [[ "$choice" -ge 1 ]] && [[ "$choice" -le ${#AVAILABLE_VERSIONS[@]} ]]; then - export var_version="${AVAILABLE_VERSIONS[$((choice - 1))]}" - export PCT_OSVERSION="$var_version" - msg_ok "Switched to ${PCT_OSTYPE} ${var_version}" - - # Retry template search with new version - TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}" - SEARCH_PATTERN="^${TEMPLATE_SEARCH}-" - - mapfile -t LOCAL_TEMPLATES < <( - pveam list "$TEMPLATE_STORAGE" 2>/dev/null | - awk -v search="${SEARCH_PATTERN}" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' | - sed 's|.*/||' | sort -t - -k 2 -V - ) - mapfile -t ONLINE_TEMPLATES < <( - pveam available -section system 2>/dev/null | - grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | - awk -F'\t' '{print $1}' | - grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" | - sort -t - -k 2 -V 2>/dev/null || true - ) - ONLINE_TEMPLATE="" - [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}" - - if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then - TEMPLATE="${LOCAL_TEMPLATES[-1]}" - TEMPLATE_SOURCE="local" - else - TEMPLATE="$ONLINE_TEMPLATE" - TEMPLATE_SOURCE="online" - fi - - TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)" - if [[ -z "$TEMPLATE_PATH" ]]; then - TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg) - [[ -n "$TEMPLATE_BASE" ]] && TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE" - fi - - # If we still don't have a path but have a valid template name, construct it - if [[ -z "$TEMPLATE_PATH" && -n "$TEMPLATE" ]]; then - TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE" - fi - - [[ -n "$TEMPLATE_PATH" ]] || { - msg_error "Template still not found after version change" - exit 220 - } - else - msg_custom "🚫" "${YW}" "Installation cancelled" - exit 1 - fi - else - msg_error "No ${PCT_OSTYPE} templates available" - exit 220 - fi - fi - } - - # Validate that we found a template - if [[ -z "$TEMPLATE" ]]; then - msg_error "No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}" - msg_custom "ℹ️" "${YW}" "Please check:" - msg_custom " •" "${YW}" "Is pveam catalog available? (run: pveam available -section system)" - msg_custom " •" "${YW}" "Does the template exist for your OS version?" - exit 225 - fi - - msg_ok "Template ${BL}$TEMPLATE${CL} [$TEMPLATE_SOURCE]" - msg_debug "Resolved TEMPLATE_PATH=$TEMPLATE_PATH" - - NEED_DOWNLOAD=0 - if [[ ! -f "$TEMPLATE_PATH" ]]; then - msg_info "Template not present locally – will download." - NEED_DOWNLOAD=1 - elif [[ ! -r "$TEMPLATE_PATH" ]]; then - msg_error "Template file exists but is not readable – check permissions." - exit 221 - elif [[ "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then - if [[ -n "$ONLINE_TEMPLATE" ]]; then - msg_warn "Template file too small (<1MB) – re-downloading." - NEED_DOWNLOAD=1 - else - msg_warn "Template looks too small, but no online version exists. Keeping local file." - fi - elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then - if [[ -n "$ONLINE_TEMPLATE" ]]; then - msg_warn "Template appears corrupted – re-downloading." - NEED_DOWNLOAD=1 - else - msg_warn "Template appears corrupted, but no online version exists. Keeping local file." + msg_error "No templates available for ${PCT_OSTYPE} ${PCT_OSVERSION}" + exit 225 fi + else + msg_custom "🚫" "${YW}" "Installation cancelled" + exit 0 + fi else - $STD msg_ok "Template $TEMPLATE is present and valid." + msg_error "No ${PCT_OSTYPE} templates available at all" + exit 225 fi + fi - if [[ "$TEMPLATE_SOURCE" == "local" && -n "$ONLINE_TEMPLATE" && "$TEMPLATE" != "$ONLINE_TEMPLATE" ]]; then - msg_warn "Local template is outdated: $TEMPLATE (latest available: $ONLINE_TEMPLATE)" - if whiptail --yesno "A newer template is available:\n$ONLINE_TEMPLATE\n\nDo you want to download and use it instead?" 12 70; then - TEMPLATE="$ONLINE_TEMPLATE" - NEED_DOWNLOAD=1 - else - msg_custom "ℹ️" "${BL}" "Continuing with local template $TEMPLATE" - fi - fi + #echo "[DEBUG] Selected TEMPLATE='$TEMPLATE' SOURCE='$TEMPLATE_SOURCE'" + #msg_debug "Selected TEMPLATE='$TEMPLATE' SOURCE='$TEMPLATE_SOURCE'" - if [[ "$NEED_DOWNLOAD" -eq 1 ]]; then - [[ -f "$TEMPLATE_PATH" ]] && rm -f "$TEMPLATE_PATH" - for attempt in {1..3}; do - msg_info "Attempt $attempt: Downloading template $TEMPLATE to $TEMPLATE_STORAGE" - if pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null 2>&1; then - msg_ok "Template download successful." - break - fi - if [[ $attempt -eq 3 ]]; then - msg_error "Failed after 3 attempts. Please check network access, permissions, or manually run:\n pveam download $TEMPLATE_STORAGE $TEMPLATE" - exit 222 - fi - sleep $((attempt * 5)) + TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)" + if [[ -z "$TEMPLATE_PATH" ]]; then + TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg) + [[ -n "$TEMPLATE_BASE" ]] && TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE" + fi + + # If we still don't have a path but have a valid template name, construct it + if [[ -z "$TEMPLATE_PATH" && -n "$TEMPLATE" ]]; then + TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE" + fi + + [[ -n "$TEMPLATE_PATH" ]] || { + if [[ -z "$TEMPLATE" ]]; then + msg_error "Template ${PCT_OSTYPE} ${PCT_OSVERSION} not available" + + # Get available versions + mapfile -t AVAILABLE_VERSIONS < <( + pveam available -section system 2>/dev/null | + grep "^${PCT_OSTYPE}-" | + sed -E 's/.*'"${PCT_OSTYPE}"'-([0-9]+\.[0-9]+).*/\1/' | + grep -E '^[0-9]+\.[0-9]+$' | + sort -u -V 2>/dev/null || sort -u + ) + + if [[ ${#AVAILABLE_VERSIONS[@]} -gt 0 ]]; then + echo -e "\n${BL}Available versions:${CL}" + for i in "${!AVAILABLE_VERSIONS[@]}"; do + echo " [$((i + 1))] ${AVAILABLE_VERSIONS[$i]}" done - fi - if ! pveam list "$TEMPLATE_STORAGE" 2>/dev/null | grep -q "$TEMPLATE"; then - msg_error "Template $TEMPLATE not available in storage $TEMPLATE_STORAGE after download." - exit 223 - fi + echo "" + read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or Enter to exit: " choice - # ------------------------------------------------------------------------------ - # Dynamic preflight for Debian 13.x: offer upgrade if available (no hard mins) - # ------------------------------------------------------------------------------ - if [[ "$PCT_OSTYPE" == "debian" ]]; then - OSVER="$(parse_template_osver "$TEMPLATE")" - if [[ -n "$OSVER" ]]; then - # Proactive, aber ohne Abbruch – nur Angebot - offer_lxc_stack_upgrade_and_maybe_retry "no" || true - fi - fi + if [[ "$choice" =~ ^[0-9]+$ ]] && [[ "$choice" -ge 1 ]] && [[ "$choice" -le ${#AVAILABLE_VERSIONS[@]} ]]; then + export var_version="${AVAILABLE_VERSIONS[$((choice - 1))]}" + export PCT_OSVERSION="$var_version" + msg_ok "Switched to ${PCT_OSTYPE} ${var_version}" - # ------------------------------------------------------------------------------ - # Create LXC Container - # ------------------------------------------------------------------------------ - msg_info "Creating LXC container" + # Retry template search with new version + TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}" + SEARCH_PATTERN="^${TEMPLATE_SEARCH}-" - # Ensure subuid/subgid entries exist - grep -q "root:100000:65536" /etc/subuid || echo "root:100000:65536" >>/etc/subuid - grep -q "root:100000:65536" /etc/subgid || echo "root:100000:65536" >>/etc/subgid + mapfile -t LOCAL_TEMPLATES < <( + pveam list "$TEMPLATE_STORAGE" 2>/dev/null | + awk -v search="${SEARCH_PATTERN}" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' | + sed 's|.*/||' | sort -t - -k 2 -V + ) + mapfile -t ONLINE_TEMPLATES < <( + pveam available -section system 2>/dev/null | + grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | + awk -F'\t' '{print $1}' | + grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" | + sort -t - -k 2 -V 2>/dev/null || true + ) + ONLINE_TEMPLATE="" + [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}" - # PCT_OPTIONS is now a string (exported from build_container) - # Add rootfs if not already specified - if [[ ! "$PCT_OPTIONS" =~ "-rootfs" ]]; then - PCT_OPTIONS="$PCT_OPTIONS - -rootfs $CONTAINER_STORAGE:${PCT_DISK_SIZE:-8}" - fi + if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then + TEMPLATE="${LOCAL_TEMPLATES[-1]}" + TEMPLATE_SOURCE="local" + else + TEMPLATE="$ONLINE_TEMPLATE" + TEMPLATE_SOURCE="online" + fi - # Lock by template file (avoid concurrent downloads/creates) - lockfile="/tmp/template.${TEMPLATE}.lock" - exec 9>"$lockfile" || { - msg_error "Failed to create lock file '$lockfile'." - exit 200 - } - flock -w 60 9 || { - msg_error "Timeout while waiting for template lock." - exit 211 - } + TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)" + if [[ -z "$TEMPLATE_PATH" ]]; then + TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg) + [[ -n "$TEMPLATE_BASE" ]] && TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE" + fi - LOGFILE="/tmp/pct_create_${CTID}_$(date +%Y%m%d_%H%M%S)_${SESSION_ID}.log" + # If we still don't have a path but have a valid template name, construct it + if [[ -z "$TEMPLATE_PATH" && -n "$TEMPLATE" ]]; then + TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE" + fi - # # DEBUG: Show the actual command that will be executed - # echo "[DEBUG] ===== PCT CREATE COMMAND DETAILS =====" - # echo "[DEBUG] CTID: $CTID" - # echo "[DEBUG] Template: ${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" - # echo "[DEBUG] PCT_OPTIONS (will be word-split):" - # echo "$PCT_OPTIONS" | sed 's/^/ /' - # echo "[DEBUG] Full command line:" - # echo " pct create $CTID ${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE} $PCT_OPTIONS" - # echo "[DEBUG] ========================================" - - msg_debug "pct create command: pct create $CTID ${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE} $PCT_OPTIONS" - msg_debug "Logfile: $LOGFILE" - - # First attempt (PCT_OPTIONS is a multi-line string, use it directly) - if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" $PCT_OPTIONS >"$LOGFILE" 2>&1; then - msg_debug "Container creation failed on ${TEMPLATE_STORAGE}. Validating template..." - - # Validate template file - if [[ ! -s "$TEMPLATE_PATH" || "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then - msg_warn "Template file too small or missing – re-downloading." - rm -f "$TEMPLATE_PATH" - pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" - elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then - if [[ -n "$ONLINE_TEMPLATE" ]]; then - msg_warn "Template appears corrupted – re-downloading." - rm -f "$TEMPLATE_PATH" - pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" - else - msg_warn "Template appears corrupted, but no online version exists. Skipping re-download." - fi - fi - - # Retry after repair - if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" $PCT_OPTIONS >>"$LOGFILE" 2>&1; then - # Fallback to local storage if not already on local - if [[ "$TEMPLATE_STORAGE" != "local" ]]; then - msg_info "Retrying container creation with fallback to local storage..." - LOCAL_TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE" - if [[ ! -f "$LOCAL_TEMPLATE_PATH" ]]; then - msg_info "Downloading template to local..." - pveam download local "$TEMPLATE" >/dev/null 2>&1 - fi - if ! pct create "$CTID" "local:vztmpl/${TEMPLATE}" $PCT_OPTIONS >>"$LOGFILE" 2>&1; then - # Local fallback also failed - check for LXC stack version issue - if grep -qiE 'unsupported .* version' "$LOGFILE"; then - echo - echo "pct reported 'unsupported ... version' – your LXC stack might be too old for this template." - echo "We can try to upgrade 'pve-container' and 'lxc-pve' now and retry automatically." - offer_lxc_stack_upgrade_and_maybe_retry "yes" - rc=$? - case $rc in - 0) : ;; # success - container created, continue - 2) - echo "Upgrade was declined. Please update and re-run: - apt update && apt install --only-upgrade pve-container lxc-pve" - exit 231 - ;; - 3) - echo "Upgrade and/or retry failed. Please inspect: $LOGFILE" - exit 231 - ;; - esac - else - msg_error "Container creation failed. See $LOGFILE" - if whiptail --yesno "pct create failed.\nDo you want to enable verbose debug mode and view detailed logs?" 12 70; then - set -x - pct create "$CTID" "local:vztmpl/${TEMPLATE}" $PCT_OPTIONS 2>&1 | tee -a "$LOGFILE" - set +x - fi - exit 209 - fi - else - msg_ok "Container successfully created using local fallback." - fi - else - # Already on local storage and still failed - check LXC stack version - if grep -qiE 'unsupported .* version' "$LOGFILE"; then - echo - echo "pct reported 'unsupported ... version' – your LXC stack might be too old for this template." - echo "We can try to upgrade 'pve-container' and 'lxc-pve' now and retry automatically." - offer_lxc_stack_upgrade_and_maybe_retry "yes" - rc=$? - case $rc in - 0) : ;; # success - container created, continue - 2) - echo "Upgrade was declined. Please update and re-run: - apt update && apt install --only-upgrade pve-container lxc-pve" - exit 231 - ;; - 3) - echo "Upgrade and/or retry failed. Please inspect: $LOGFILE" - exit 231 - ;; - esac - else - msg_error "Container creation failed. See $LOGFILE" - if whiptail --yesno "pct create failed.\nDo you want to enable verbose debug mode and view detailed logs?" 12 70; then - set -x - pct create "$CTID" "local:vztmpl/${TEMPLATE}" $PCT_OPTIONS 2>&1 | tee -a "$LOGFILE" - set +x - fi - exit 209 - fi - fi + [[ -n "$TEMPLATE_PATH" ]] || { + msg_error "Template still not found after version change" + exit 220 + } else - msg_ok "Container successfully created after template repair." + msg_custom "🚫" "${YW}" "Installation cancelled" + exit 1 fi + else + msg_error "No ${PCT_OSTYPE} templates available" + exit 220 + fi + fi + } + + # Validate that we found a template + if [[ -z "$TEMPLATE" ]]; then + msg_error "No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}" + msg_custom "ℹ️" "${YW}" "Please check:" + msg_custom " •" "${YW}" "Is pveam catalog available? (run: pveam available -section system)" + msg_custom " •" "${YW}" "Does the template exist for your OS version?" + exit 225 + fi + + msg_ok "Template ${BL}$TEMPLATE${CL} [$TEMPLATE_SOURCE]" + msg_debug "Resolved TEMPLATE_PATH=$TEMPLATE_PATH" + + NEED_DOWNLOAD=0 + if [[ ! -f "$TEMPLATE_PATH" ]]; then + msg_info "Template not present locally – will download." + NEED_DOWNLOAD=1 + elif [[ ! -r "$TEMPLATE_PATH" ]]; then + msg_error "Template file exists but is not readable – check permissions." + exit 221 + elif [[ "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then + if [[ -n "$ONLINE_TEMPLATE" ]]; then + msg_warn "Template file too small (<1MB) – re-downloading." + NEED_DOWNLOAD=1 + else + msg_warn "Template looks too small, but no online version exists. Keeping local file." + fi + elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then + if [[ -n "$ONLINE_TEMPLATE" ]]; then + msg_warn "Template appears corrupted – re-downloading." + NEED_DOWNLOAD=1 + else + msg_warn "Template appears corrupted, but no online version exists. Keeping local file." + fi + else + $STD msg_ok "Template $TEMPLATE is present and valid." + fi + + if [[ "$TEMPLATE_SOURCE" == "local" && -n "$ONLINE_TEMPLATE" && "$TEMPLATE" != "$ONLINE_TEMPLATE" ]]; then + msg_warn "Local template is outdated: $TEMPLATE (latest available: $ONLINE_TEMPLATE)" + if whiptail --yesno "A newer template is available:\n$ONLINE_TEMPLATE\n\nDo you want to download and use it instead?" 12 70; then + TEMPLATE="$ONLINE_TEMPLATE" + NEED_DOWNLOAD=1 + else + msg_custom "ℹ️" "${BL}" "Continuing with local template $TEMPLATE" + fi + fi + + if [[ "$NEED_DOWNLOAD" -eq 1 ]]; then + [[ -f "$TEMPLATE_PATH" ]] && rm -f "$TEMPLATE_PATH" + for attempt in {1..3}; do + msg_info "Attempt $attempt: Downloading template $TEMPLATE to $TEMPLATE_STORAGE" + if pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null 2>&1; then + msg_ok "Template download successful." + break + fi + if [[ $attempt -eq 3 ]]; then + msg_error "Failed after 3 attempts. Please check network access, permissions, or manually run:\n pveam download $TEMPLATE_STORAGE $TEMPLATE" + exit 222 + fi + sleep $((attempt * 5)) + done + fi + + if ! pveam list "$TEMPLATE_STORAGE" 2>/dev/null | grep -q "$TEMPLATE"; then + msg_error "Template $TEMPLATE not available in storage $TEMPLATE_STORAGE after download." + exit 223 + fi + + # ------------------------------------------------------------------------------ + # Dynamic preflight for Debian 13.x: offer upgrade if available (no hard mins) + # ------------------------------------------------------------------------------ + if [[ "$PCT_OSTYPE" == "debian" ]]; then + OSVER="$(parse_template_osver "$TEMPLATE")" + if [[ -n "$OSVER" ]]; then + # Proactive, aber ohne Abbruch – nur Angebot + offer_lxc_stack_upgrade_and_maybe_retry "no" || true + fi + fi + + # ------------------------------------------------------------------------------ + # Create LXC Container + # ------------------------------------------------------------------------------ + msg_info "Creating LXC container" + + # Ensure subuid/subgid entries exist + grep -q "root:100000:65536" /etc/subuid || echo "root:100000:65536" >>/etc/subuid + grep -q "root:100000:65536" /etc/subgid || echo "root:100000:65536" >>/etc/subgid + + # PCT_OPTIONS is now a string (exported from build_container) + # Add rootfs if not already specified + if [[ ! "$PCT_OPTIONS" =~ "-rootfs" ]]; then + PCT_OPTIONS="$PCT_OPTIONS + -rootfs $CONTAINER_STORAGE:${PCT_DISK_SIZE:-8}" + fi + + # Lock by template file (avoid concurrent downloads/creates) + lockfile="/tmp/template.${TEMPLATE}.lock" + exec 9>"$lockfile" || { + msg_error "Failed to create lock file '$lockfile'." + exit 200 + } + flock -w 60 9 || { + msg_error "Timeout while waiting for template lock." + exit 211 + } + + LOGFILE="/tmp/pct_create_${CTID}_$(date +%Y%m%d_%H%M%S)_${SESSION_ID}.log" + + # ------------------------------------------------------------------------------ + # openEuler Template Patch: Create /etc/redhat-release inside template + # PVE's post_create_hook expects this file for RHEL-family OS detection + # Without it, container creation fails with "error in setup task" + # ------------------------------------------------------------------------------ + if [[ "${var_os:-}" == "openeuler" ]]; then + msg_info "Patching openEuler template for PVE compatibility..." + local TEMP_EXTRACT_DIR="/tmp/openeuler_template_patch_$$" + local PATCHED_TEMPLATE="${TEMPLATE_PATH%.tar.xz}_patched.tar.xz" + + # Only patch if not already patched + if [[ ! -f "$PATCHED_TEMPLATE" ]]; then + mkdir -p "$TEMP_EXTRACT_DIR" + + # Extract template + if tar -xf "$TEMPLATE_PATH" -C "$TEMP_EXTRACT_DIR" 2>/dev/null; then + # Create /etc/redhat-release if it doesn't exist + if [[ ! -f "$TEMP_EXTRACT_DIR/etc/redhat-release" ]]; then + echo "openEuler release ${var_version:-25.03}" >"$TEMP_EXTRACT_DIR/etc/redhat-release" + fi + + # Repack template + if tar -cJf "$PATCHED_TEMPLATE" -C "$TEMP_EXTRACT_DIR" . 2>/dev/null; then + # Replace original with patched version + mv "$PATCHED_TEMPLATE" "$TEMPLATE_PATH" + msg_ok "openEuler template patched successfully" + else + msg_warn "Failed to repack template, trying without patch..." + fi + else + msg_warn "Failed to extract template for patching, trying without patch..." + fi + + rm -rf "$TEMP_EXTRACT_DIR" + fi + fi + + # # DEBUG: Show the actual command that will be executed + # echo "[DEBUG] ===== PCT CREATE COMMAND DETAILS =====" + # echo "[DEBUG] CTID: $CTID" + # echo "[DEBUG] Template: ${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" + # echo "[DEBUG] PCT_OPTIONS (will be word-split):" + # echo "$PCT_OPTIONS" | sed 's/^/ /' + # echo "[DEBUG] Full command line:" + # echo " pct create $CTID ${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE} $PCT_OPTIONS" + # echo "[DEBUG] ========================================" + + msg_debug "pct create command: pct create $CTID ${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE} $PCT_OPTIONS" + msg_debug "Logfile: $LOGFILE" + + # First attempt (PCT_OPTIONS is a multi-line string, use it directly) + if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" $PCT_OPTIONS >"$LOGFILE" 2>&1; then + msg_debug "Container creation failed on ${TEMPLATE_STORAGE}. Validating template..." + + # Validate template file + if [[ ! -s "$TEMPLATE_PATH" || "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then + msg_warn "Template file too small or missing – re-downloading." + rm -f "$TEMPLATE_PATH" + pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" + elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then + if [[ -n "$ONLINE_TEMPLATE" ]]; then + msg_warn "Template appears corrupted – re-downloading." + rm -f "$TEMPLATE_PATH" + pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" + else + msg_warn "Template appears corrupted, but no online version exists. Skipping re-download." + fi fi - # Verify container exists - pct list | awk '{print $1}' | grep -qx "$CTID" || { - msg_error "Container ID $CTID not listed in 'pct list'. See $LOGFILE" - exit 215 - } + # Retry after repair + if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" $PCT_OPTIONS >>"$LOGFILE" 2>&1; then + # Fallback to local storage if not already on local + if [[ "$TEMPLATE_STORAGE" != "local" ]]; then + msg_info "Retrying container creation with fallback to local storage..." + LOCAL_TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE" + if [[ ! -f "$LOCAL_TEMPLATE_PATH" ]]; then + msg_info "Downloading template to local..." + pveam download local "$TEMPLATE" >/dev/null 2>&1 + fi + if ! pct create "$CTID" "local:vztmpl/${TEMPLATE}" $PCT_OPTIONS >>"$LOGFILE" 2>&1; then + # Local fallback also failed - check for LXC stack version issue + if grep -qiE 'unsupported .* version' "$LOGFILE"; then + echo + echo "pct reported 'unsupported ... version' – your LXC stack might be too old for this template." + echo "We can try to upgrade 'pve-container' and 'lxc-pve' now and retry automatically." + offer_lxc_stack_upgrade_and_maybe_retry "yes" + rc=$? + case $rc in + 0) : ;; # success - container created, continue + 2) + echo "Upgrade was declined. Please update and re-run: + apt update && apt install --only-upgrade pve-container lxc-pve" + exit 231 + ;; + 3) + echo "Upgrade and/or retry failed. Please inspect: $LOGFILE" + exit 231 + ;; + esac + else + msg_error "Container creation failed. See $LOGFILE" + if whiptail --yesno "pct create failed.\nDo you want to enable verbose debug mode and view detailed logs?" 12 70; then + set -x + pct create "$CTID" "local:vztmpl/${TEMPLATE}" $PCT_OPTIONS 2>&1 | tee -a "$LOGFILE" + set +x + fi + exit 209 + fi + else + msg_ok "Container successfully created using local fallback." + fi + else + # Already on local storage and still failed - check LXC stack version + if grep -qiE 'unsupported .* version' "$LOGFILE"; then + echo + echo "pct reported 'unsupported ... version' – your LXC stack might be too old for this template." + echo "We can try to upgrade 'pve-container' and 'lxc-pve' now and retry automatically." + offer_lxc_stack_upgrade_and_maybe_retry "yes" + rc=$? + case $rc in + 0) : ;; # success - container created, continue + 2) + echo "Upgrade was declined. Please update and re-run: + apt update && apt install --only-upgrade pve-container lxc-pve" + exit 231 + ;; + 3) + echo "Upgrade and/or retry failed. Please inspect: $LOGFILE" + exit 231 + ;; + esac + else + msg_error "Container creation failed. See $LOGFILE" + if whiptail --yesno "pct create failed.\nDo you want to enable verbose debug mode and view detailed logs?" 12 70; then + set -x + pct create "$CTID" "local:vztmpl/${TEMPLATE}" $PCT_OPTIONS 2>&1 | tee -a "$LOGFILE" + set +x + fi + exit 209 + fi + fi + else + msg_ok "Container successfully created after template repair." + fi + fi - # Verify config rootfs - grep -q '^rootfs:' "/etc/pve/lxc/$CTID.conf" || { - msg_error "RootFS entry missing in container config. See $LOGFILE" - exit 216 - } + # Verify container exists + pct list | awk '{print $1}' | grep -qx "$CTID" || { + msg_error "Container ID $CTID not listed in 'pct list'. See $LOGFILE" + exit 215 + } - msg_ok "LXC Container ${BL}$CTID${CL} ${GN}was successfully created." + # Verify config rootfs + grep -q '^rootfs:' "/etc/pve/lxc/$CTID.conf" || { + msg_error "RootFS entry missing in container config. See $LOGFILE" + exit 216 + } - # Report container creation to API - post_to_api + msg_ok "LXC Container ${BL}$CTID${CL} ${GN}was successfully created." + + # Report container creation to API + post_to_api } # ============================================================================== @@ -4187,11 +4464,11 @@ create_lxc_container() { # - Posts final "done" status to API telemetry # ------------------------------------------------------------------------------ description() { - IP=$(pct exec "$CTID" ip a s dev eth0 | awk '/inet / {print $2}' | cut -d/ -f1) + IP=$(pct exec "$CTID" ip a s dev eth0 | awk '/inet / {print $2}' | cut -d/ -f1) - # Generate LXC Description - DESCRIPTION=$( - cat < Logo @@ -4219,14 +4496,14 @@ description() { EOF - ) - pct set "$CTID" -description "$DESCRIPTION" + ) + pct set "$CTID" -description "$DESCRIPTION" - if [[ -f /etc/systemd/system/ping-instances.service ]]; then - systemctl start ping-instances.service - fi + if [[ -f /etc/systemd/system/ping-instances.service ]]; then + systemctl start ping-instances.service + fi - post_update_to_api "done" "none" + post_update_to_api "done" "none" } # ============================================================================== @@ -4243,14 +4520,14 @@ EOF # - Only executes on non-zero exit codes # ------------------------------------------------------------------------------ api_exit_script() { - exit_code=$? - if [ $exit_code -ne 0 ]; then - post_update_to_api "failed" "$exit_code" - fi + exit_code=$? + if [ $exit_code -ne 0 ]; then + post_update_to_api "failed" "$exit_code" + fi } if command -v pveversion >/dev/null 2>&1; then - trap 'api_exit_script' EXIT + trap 'api_exit_script' EXIT fi trap 'post_update_to_api "failed" "$BASH_COMMAND"' ERR trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT diff --git a/misc/install.func b/misc/install.func index 484a3a1fc..c8d6ae7e1 100644 --- a/misc/install.func +++ b/misc/install.func @@ -1,4 +1,4 @@ -# Copyright (c) 2021-2025 community-scripts ORG +# Copyright (c) 2021-2026 community-scripts ORG # Author: tteck (tteckster) # Co-Author: MickLesk # Co-Author: michelroegl-brunner @@ -49,134 +49,134 @@ INIT_SYSTEM="" # systemd, openrc, sysvinit # OS_TYPE, OS_FAMILY, OS_VERSION, PKG_MANAGER, INIT_SYSTEM # ------------------------------------------------------------------------------ detect_os() { - if [[ -f /etc/os-release ]]; then - # shellcheck disable=SC1091 - . /etc/os-release - OS_TYPE="${ID:-unknown}" - OS_VERSION="${VERSION_ID:-unknown}" - elif [[ -f /etc/alpine-release ]]; then - OS_TYPE="alpine" - OS_VERSION=$(cat /etc/alpine-release) - elif [[ -f /etc/debian_version ]]; then - OS_TYPE="debian" - OS_VERSION=$(cat /etc/debian_version) - elif [[ -f /etc/redhat-release ]]; then - OS_TYPE="centos" - OS_VERSION=$(grep -oE '[0-9]+\.[0-9]+' /etc/redhat-release | head -1) - elif [[ -f /etc/arch-release ]]; then - OS_TYPE="arch" - OS_VERSION="rolling" - elif [[ -f /etc/gentoo-release ]]; then - OS_TYPE="gentoo" - OS_VERSION=$(cat /etc/gentoo-release | grep -oE '[0-9.]+') - else - OS_TYPE="unknown" - OS_VERSION="unknown" - fi + if [[ -f /etc/os-release ]]; then + # shellcheck disable=SC1091 + . /etc/os-release + OS_TYPE="${ID:-unknown}" + OS_VERSION="${VERSION_ID:-unknown}" + elif [[ -f /etc/alpine-release ]]; then + OS_TYPE="alpine" + OS_VERSION=$(cat /etc/alpine-release) + elif [[ -f /etc/debian_version ]]; then + OS_TYPE="debian" + OS_VERSION=$(cat /etc/debian_version) + elif [[ -f /etc/redhat-release ]]; then + OS_TYPE="centos" + OS_VERSION=$(grep -oE '[0-9]+\.[0-9]+' /etc/redhat-release | head -1) + elif [[ -f /etc/arch-release ]]; then + OS_TYPE="arch" + OS_VERSION="rolling" + elif [[ -f /etc/gentoo-release ]]; then + OS_TYPE="gentoo" + OS_VERSION=$(cat /etc/gentoo-release | grep -oE '[0-9.]+') + else + OS_TYPE="unknown" + OS_VERSION="unknown" + fi - # Normalize OS type and determine family - case "$OS_TYPE" in - debian) - OS_FAMILY="debian" - PKG_MANAGER="apt" - ;; - ubuntu) - OS_FAMILY="debian" - PKG_MANAGER="apt" - ;; - devuan) - OS_FAMILY="debian" - PKG_MANAGER="apt" - ;; - alpine) - OS_FAMILY="alpine" - PKG_MANAGER="apk" - ;; - fedora) - OS_FAMILY="rhel" - PKG_MANAGER="dnf" - ;; - rocky | rockylinux) - OS_TYPE="rocky" - OS_FAMILY="rhel" - PKG_MANAGER="dnf" - ;; - alma | almalinux) - OS_TYPE="alma" - OS_FAMILY="rhel" - PKG_MANAGER="dnf" - ;; - centos) - OS_FAMILY="rhel" - # CentOS 7 uses yum, 8+ uses dnf - if [[ "${OS_VERSION%%.*}" -ge 8 ]]; then - PKG_MANAGER="dnf" - else - PKG_MANAGER="yum" - fi - ;; - rhel) - OS_FAMILY="rhel" - PKG_MANAGER="dnf" - ;; - openeuler) - OS_FAMILY="rhel" - PKG_MANAGER="dnf" - ;; - opensuse* | sles) - OS_TYPE="opensuse" - OS_FAMILY="suse" - PKG_MANAGER="zypper" - ;; - gentoo) - OS_FAMILY="gentoo" - PKG_MANAGER="emerge" - ;; - *) - OS_FAMILY="unknown" - PKG_MANAGER="unknown" - ;; - esac - - # Detect init system - if command -v systemctl &>/dev/null && [[ -d /run/systemd/system ]]; then - INIT_SYSTEM="systemd" - elif command -v rc-service &>/dev/null || [[ -d /etc/init.d && -f /sbin/openrc ]]; then - INIT_SYSTEM="openrc" - elif [[ -f /etc/inittab ]]; then - INIT_SYSTEM="sysvinit" + # Normalize OS type and determine family + case "$OS_TYPE" in + debian) + OS_FAMILY="debian" + PKG_MANAGER="apt" + ;; + ubuntu) + OS_FAMILY="debian" + PKG_MANAGER="apt" + ;; + devuan) + OS_FAMILY="debian" + PKG_MANAGER="apt" + ;; + alpine) + OS_FAMILY="alpine" + PKG_MANAGER="apk" + ;; + fedora) + OS_FAMILY="rhel" + PKG_MANAGER="dnf" + ;; + rocky | rockylinux) + OS_TYPE="rocky" + OS_FAMILY="rhel" + PKG_MANAGER="dnf" + ;; + alma | almalinux) + OS_TYPE="alma" + OS_FAMILY="rhel" + PKG_MANAGER="dnf" + ;; + centos) + OS_FAMILY="rhel" + # CentOS 7 uses yum, 8+ uses dnf + if [[ "${OS_VERSION%%.*}" -ge 8 ]]; then + PKG_MANAGER="dnf" else - INIT_SYSTEM="unknown" + PKG_MANAGER="yum" fi + ;; + rhel) + OS_FAMILY="rhel" + PKG_MANAGER="dnf" + ;; + openeuler) + OS_FAMILY="rhel" + PKG_MANAGER="dnf" + ;; + opensuse* | sles) + OS_TYPE="opensuse" + OS_FAMILY="suse" + PKG_MANAGER="zypper" + ;; + gentoo) + OS_FAMILY="gentoo" + PKG_MANAGER="emerge" + ;; + *) + OS_FAMILY="unknown" + PKG_MANAGER="unknown" + ;; + esac + + # Detect init system + if command -v systemctl &>/dev/null && [[ -d /run/systemd/system ]]; then + INIT_SYSTEM="systemd" + elif command -v rc-service &>/dev/null || [[ -d /etc/init.d && -f /sbin/openrc ]]; then + INIT_SYSTEM="openrc" + elif [[ -f /etc/inittab ]]; then + INIT_SYSTEM="sysvinit" + else + INIT_SYSTEM="unknown" + fi } # ------------------------------------------------------------------------------ # Bootstrap: Ensure curl is available and source core functions # ------------------------------------------------------------------------------ _bootstrap() { - # Minimal bootstrap to get curl installed - if ! command -v curl &>/dev/null; then - printf "\r\e[2K%b" '\033[93m Setup Source \033[m' >&2 - if command -v apt-get &>/dev/null; then - apt-get update &>/dev/null && apt-get install -y curl &>/dev/null - elif command -v apk &>/dev/null; then - apk update &>/dev/null && apk add curl &>/dev/null - elif command -v dnf &>/dev/null; then - dnf install -y curl &>/dev/null - elif command -v yum &>/dev/null; then - yum install -y curl &>/dev/null - elif command -v zypper &>/dev/null; then - zypper install -y curl &>/dev/null - elif command -v emerge &>/dev/null; then - emerge --quiet net-misc/curl &>/dev/null - fi + # Minimal bootstrap to get curl installed + if ! command -v curl &>/dev/null; then + printf "\r\e[2K%b" '\033[93m Setup Source \033[m' >&2 + if command -v apt-get &>/dev/null; then + apt-get update &>/dev/null && apt-get install -y curl &>/dev/null + elif command -v apk &>/dev/null; then + apk update &>/dev/null && apk add curl &>/dev/null + elif command -v dnf &>/dev/null; then + dnf install -y curl &>/dev/null + elif command -v yum &>/dev/null; then + yum install -y curl &>/dev/null + elif command -v zypper &>/dev/null; then + zypper install -y curl &>/dev/null + elif command -v emerge &>/dev/null; then + emerge --quiet net-misc/curl &>/dev/null fi + fi - # Source core functions - source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func) - source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func) - load_functions - catch_errors + # Source core functions + source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func) + source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func) + load_functions + catch_errors } # Run bootstrap and OS detection @@ -193,30 +193,30 @@ detect_os # Updates package manager cache/database # ------------------------------------------------------------------------------ pkg_update() { - case "$PKG_MANAGER" in - apt) - $STD apt-get update - ;; - apk) - $STD apk update - ;; - dnf) - $STD dnf makecache - ;; - yum) - $STD yum makecache - ;; - zypper) - $STD zypper refresh - ;; - emerge) - $STD emerge --sync - ;; - *) - msg_error "Unknown package manager: $PKG_MANAGER" - return 1 - ;; - esac + case "$PKG_MANAGER" in + apt) + $STD apt-get update + ;; + apk) + $STD apk update + ;; + dnf) + $STD dnf makecache + ;; + yum) + $STD yum makecache + ;; + zypper) + $STD zypper refresh + ;; + emerge) + $STD emerge --sync + ;; + *) + msg_error "Unknown package manager: $PKG_MANAGER" + return 1 + ;; + esac } # ------------------------------------------------------------------------------ @@ -225,30 +225,30 @@ pkg_update() { # Upgrades all installed packages # ------------------------------------------------------------------------------ pkg_upgrade() { - case "$PKG_MANAGER" in - apt) - $STD apt-get -o Dpkg::Options::="--force-confold" -y dist-upgrade - ;; - apk) - $STD apk -U upgrade - ;; - dnf) - $STD dnf -y upgrade - ;; - yum) - $STD yum -y update - ;; - zypper) - $STD zypper -n update - ;; - emerge) - $STD emerge --quiet --update --deep @world - ;; - *) - msg_error "Unknown package manager: $PKG_MANAGER" - return 1 - ;; - esac + case "$PKG_MANAGER" in + apt) + $STD apt-get -o Dpkg::Options::="--force-confold" -y dist-upgrade + ;; + apk) + $STD apk -U upgrade + ;; + dnf) + $STD dnf -y upgrade + ;; + yum) + $STD yum -y update + ;; + zypper) + $STD zypper -n update + ;; + emerge) + $STD emerge --quiet --update --deep @world + ;; + *) + msg_error "Unknown package manager: $PKG_MANAGER" + return 1 + ;; + esac } # ------------------------------------------------------------------------------ @@ -259,33 +259,33 @@ pkg_upgrade() { # packages - List of packages to install # ------------------------------------------------------------------------------ pkg_install() { - local packages=("$@") - [[ ${#packages[@]} -eq 0 ]] && return 0 + local packages=("$@") + [[ ${#packages[@]} -eq 0 ]] && return 0 - case "$PKG_MANAGER" in - apt) - $STD apt-get install -y "${packages[@]}" - ;; - apk) - $STD apk add --no-cache "${packages[@]}" - ;; - dnf) - $STD dnf install -y "${packages[@]}" - ;; - yum) - $STD yum install -y "${packages[@]}" - ;; - zypper) - $STD zypper install -y "${packages[@]}" - ;; - emerge) - $STD emerge --quiet "${packages[@]}" - ;; - *) - msg_error "Unknown package manager: $PKG_MANAGER" - return 1 - ;; - esac + case "$PKG_MANAGER" in + apt) + $STD apt-get install -y "${packages[@]}" + ;; + apk) + $STD apk add --no-cache "${packages[@]}" + ;; + dnf) + $STD dnf install -y "${packages[@]}" + ;; + yum) + $STD yum install -y "${packages[@]}" + ;; + zypper) + $STD zypper install -y "${packages[@]}" + ;; + emerge) + $STD emerge --quiet "${packages[@]}" + ;; + *) + msg_error "Unknown package manager: $PKG_MANAGER" + return 1 + ;; + esac } # ------------------------------------------------------------------------------ @@ -294,33 +294,33 @@ pkg_install() { # Removes one or more packages # ------------------------------------------------------------------------------ pkg_remove() { - local packages=("$@") - [[ ${#packages[@]} -eq 0 ]] && return 0 + local packages=("$@") + [[ ${#packages[@]} -eq 0 ]] && return 0 - case "$PKG_MANAGER" in - apt) - $STD apt-get remove -y "${packages[@]}" - ;; - apk) - $STD apk del "${packages[@]}" - ;; - dnf) - $STD dnf remove -y "${packages[@]}" - ;; - yum) - $STD yum remove -y "${packages[@]}" - ;; - zypper) - $STD zypper remove -y "${packages[@]}" - ;; - emerge) - $STD emerge --quiet --unmerge "${packages[@]}" - ;; - *) - msg_error "Unknown package manager: $PKG_MANAGER" - return 1 - ;; - esac + case "$PKG_MANAGER" in + apt) + $STD apt-get remove -y "${packages[@]}" + ;; + apk) + $STD apk del "${packages[@]}" + ;; + dnf) + $STD dnf remove -y "${packages[@]}" + ;; + yum) + $STD yum remove -y "${packages[@]}" + ;; + zypper) + $STD zypper remove -y "${packages[@]}" + ;; + emerge) + $STD emerge --quiet --unmerge "${packages[@]}" + ;; + *) + msg_error "Unknown package manager: $PKG_MANAGER" + return 1 + ;; + esac } # ------------------------------------------------------------------------------ @@ -329,31 +329,31 @@ pkg_remove() { # Cleans package manager cache to free space # ------------------------------------------------------------------------------ pkg_clean() { - case "$PKG_MANAGER" in - apt) - $STD apt-get autoremove -y - $STD apt-get autoclean - ;; - apk) - $STD apk cache clean - ;; - dnf) - $STD dnf clean all - $STD dnf autoremove -y - ;; - yum) - $STD yum clean all - ;; - zypper) - $STD zypper clean - ;; - emerge) - $STD emerge --quiet --depclean - ;; - *) - return 0 - ;; - esac + case "$PKG_MANAGER" in + apt) + $STD apt-get autoremove -y + $STD apt-get autoclean + ;; + apk) + $STD apk cache clean + ;; + dnf) + $STD dnf clean all + $STD dnf autoremove -y + ;; + yum) + $STD yum clean all + ;; + zypper) + $STD zypper clean + ;; + emerge) + $STD emerge --quiet --depclean + ;; + *) + return 0 + ;; + esac } # ============================================================================== @@ -366,28 +366,28 @@ pkg_clean() { # Enables a service to start at boot # ------------------------------------------------------------------------------ svc_enable() { - local service="$1" - [[ -z "$service" ]] && return 1 + local service="$1" + [[ -z "$service" ]] && return 1 - case "$INIT_SYSTEM" in - systemd) - $STD systemctl enable "$service" - ;; - openrc) - $STD rc-update add "$service" default - ;; - sysvinit) - if command -v update-rc.d &>/dev/null; then - $STD update-rc.d "$service" defaults - elif command -v chkconfig &>/dev/null; then - $STD chkconfig "$service" on - fi - ;; - *) - msg_warn "Unknown init system, cannot enable $service" - return 1 - ;; - esac + case "$INIT_SYSTEM" in + systemd) + $STD systemctl enable "$service" + ;; + openrc) + $STD rc-update add "$service" default + ;; + sysvinit) + if command -v update-rc.d &>/dev/null; then + $STD update-rc.d "$service" defaults + elif command -v chkconfig &>/dev/null; then + $STD chkconfig "$service" on + fi + ;; + *) + msg_warn "Unknown init system, cannot enable $service" + return 1 + ;; + esac } # ------------------------------------------------------------------------------ @@ -396,27 +396,27 @@ svc_enable() { # Disables a service from starting at boot # ------------------------------------------------------------------------------ svc_disable() { - local service="$1" - [[ -z "$service" ]] && return 1 + local service="$1" + [[ -z "$service" ]] && return 1 - case "$INIT_SYSTEM" in - systemd) - $STD systemctl disable "$service" - ;; - openrc) - $STD rc-update del "$service" default 2>/dev/null || true - ;; - sysvinit) - if command -v update-rc.d &>/dev/null; then - $STD update-rc.d "$service" remove - elif command -v chkconfig &>/dev/null; then - $STD chkconfig "$service" off - fi - ;; - *) - return 1 - ;; - esac + case "$INIT_SYSTEM" in + systemd) + $STD systemctl disable "$service" + ;; + openrc) + $STD rc-update del "$service" default 2>/dev/null || true + ;; + sysvinit) + if command -v update-rc.d &>/dev/null; then + $STD update-rc.d "$service" remove + elif command -v chkconfig &>/dev/null; then + $STD chkconfig "$service" off + fi + ;; + *) + return 1 + ;; + esac } # ------------------------------------------------------------------------------ @@ -425,23 +425,23 @@ svc_disable() { # Starts a service immediately # ------------------------------------------------------------------------------ svc_start() { - local service="$1" - [[ -z "$service" ]] && return 1 + local service="$1" + [[ -z "$service" ]] && return 1 - case "$INIT_SYSTEM" in - systemd) - $STD systemctl start "$service" - ;; - openrc) - $STD rc-service "$service" start - ;; - sysvinit) - $STD /etc/init.d/"$service" start - ;; - *) - return 1 - ;; - esac + case "$INIT_SYSTEM" in + systemd) + $STD systemctl start "$service" + ;; + openrc) + $STD rc-service "$service" start + ;; + sysvinit) + $STD /etc/init.d/"$service" start + ;; + *) + return 1 + ;; + esac } # ------------------------------------------------------------------------------ @@ -450,23 +450,23 @@ svc_start() { # Stops a running service # ------------------------------------------------------------------------------ svc_stop() { - local service="$1" - [[ -z "$service" ]] && return 1 + local service="$1" + [[ -z "$service" ]] && return 1 - case "$INIT_SYSTEM" in - systemd) - $STD systemctl stop "$service" - ;; - openrc) - $STD rc-service "$service" stop - ;; - sysvinit) - $STD /etc/init.d/"$service" stop - ;; - *) - return 1 - ;; - esac + case "$INIT_SYSTEM" in + systemd) + $STD systemctl stop "$service" + ;; + openrc) + $STD rc-service "$service" stop + ;; + sysvinit) + $STD /etc/init.d/"$service" stop + ;; + *) + return 1 + ;; + esac } # ------------------------------------------------------------------------------ @@ -475,23 +475,23 @@ svc_stop() { # Restarts a service # ------------------------------------------------------------------------------ svc_restart() { - local service="$1" - [[ -z "$service" ]] && return 1 + local service="$1" + [[ -z "$service" ]] && return 1 - case "$INIT_SYSTEM" in - systemd) - $STD systemctl restart "$service" - ;; - openrc) - $STD rc-service "$service" restart - ;; - sysvinit) - $STD /etc/init.d/"$service" restart - ;; - *) - return 1 - ;; - esac + case "$INIT_SYSTEM" in + systemd) + $STD systemctl restart "$service" + ;; + openrc) + $STD rc-service "$service" restart + ;; + sysvinit) + $STD /etc/init.d/"$service" restart + ;; + *) + return 1 + ;; + esac } # ------------------------------------------------------------------------------ @@ -500,23 +500,23 @@ svc_restart() { # Gets service status (returns 0 if running) # ------------------------------------------------------------------------------ svc_status() { - local service="$1" - [[ -z "$service" ]] && return 1 + local service="$1" + [[ -z "$service" ]] && return 1 - case "$INIT_SYSTEM" in - systemd) - systemctl is-active --quiet "$service" - ;; - openrc) - rc-service "$service" status &>/dev/null - ;; - sysvinit) - /etc/init.d/"$service" status &>/dev/null - ;; - *) - return 1 - ;; - esac + case "$INIT_SYSTEM" in + systemd) + systemctl is-active --quiet "$service" + ;; + openrc) + rc-service "$service" status &>/dev/null + ;; + sysvinit) + /etc/init.d/"$service" status &>/dev/null + ;; + *) + return 1 + ;; + esac } # ------------------------------------------------------------------------------ @@ -525,15 +525,15 @@ svc_status() { # Reloads init system daemon configuration (for systemd) # ------------------------------------------------------------------------------ svc_reload_daemon() { - case "$INIT_SYSTEM" in - systemd) - $STD systemctl daemon-reload - ;; - *) - # Other init systems don't need this - return 0 - ;; - esac + case "$INIT_SYSTEM" in + systemd) + $STD systemctl daemon-reload + ;; + *) + # Other init systems don't need this + return 0 + ;; + esac } # ============================================================================== @@ -547,24 +547,24 @@ svc_reload_daemon() { # Returns: IP address string # ------------------------------------------------------------------------------ get_ip() { - local ip="" + local ip="" - # Try hostname -I first (most common) - if command -v hostname &>/dev/null; then - ip=$(hostname -I 2>/dev/null | awk '{print $1}') - fi + # Try hostname -I first (most common) + if command -v hostname &>/dev/null; then + ip=$(hostname -I 2>/dev/null | awk '{print $1}' || true) + fi - # Fallback to ip command - if [[ -z "$ip" ]] && command -v ip &>/dev/null; then - ip=$(ip -4 addr show scope global | grep -oP '(?<=inet\s)\d+(\.\d+){3}' | head -1) - fi + # Fallback to ip command + if [[ -z "$ip" ]] && command -v ip &>/dev/null; then + ip=$(ip -4 addr show scope global | awk '/inet /{print $2}' | cut -d/ -f1 | head -1) + fi - # Fallback to ifconfig - if [[ -z "$ip" ]] && command -v ifconfig &>/dev/null; then - ip=$(ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1' | head -1) - fi + # Fallback to ifconfig + if [[ -z "$ip" ]] && command -v ifconfig &>/dev/null; then + ip=$(ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1' | head -1) + fi - echo "$ip" + echo "$ip" } # ------------------------------------------------------------------------------ @@ -574,25 +574,25 @@ get_ip() { # If IPV6_METHOD=disable: disables IPv6 via sysctl # ------------------------------------------------------------------------------ verb_ip6() { - set_std_mode # Set STD mode based on VERBOSE + set_std_mode # Set STD mode based on VERBOSE - if [[ "${IPV6_METHOD:-}" == "disable" ]]; then - msg_info "Disabling IPv6 (this may affect some services)" - mkdir -p /etc/sysctl.d - cat >/etc/sysctl.d/99-disable-ipv6.conf </etc/sysctl.d/99-disable-ipv6.conf </dev/null || true - fi - msg_ok "Disabled IPv6" + # For OpenRC, ensure sysctl runs at boot + if [[ "$INIT_SYSTEM" == "openrc" ]]; then + $STD rc-update add sysctl default 2>/dev/null || true fi + msg_ok "Disabled IPv6" + fi } # ------------------------------------------------------------------------------ @@ -604,36 +604,36 @@ EOF # - Disables network wait services # ------------------------------------------------------------------------------ setting_up_container() { - msg_info "Setting up Container OS" + msg_info "Setting up Container OS" - # Wait for network - local i - for ((i = RETRY_NUM; i > 0; i--)); do - if [[ -n "$(get_ip)" ]]; then - break - fi - echo 1>&2 -en "${CROSS}${RD} No Network! " - sleep "$RETRY_EVERY" - done - - if [[ -z "$(get_ip)" ]]; then - echo 1>&2 -e "\n${CROSS}${RD} No Network After $RETRY_NUM Tries${CL}" - echo -e "${NETWORK}Check Network Settings" - exit 1 + # Wait for network + local i + for ((i = RETRY_NUM; i > 0; i--)); do + if [[ -n "$(get_ip)" ]]; then + break fi + echo 1>&2 -en "${CROSS}${RD} No Network! " + sleep "$RETRY_EVERY" + done - # Remove Python EXTERNALLY-MANAGED restriction (Debian 12+, Ubuntu 23.04+) - rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED 2>/dev/null || true + if [[ -z "$(get_ip)" ]]; then + echo 1>&2 -e "\n${CROSS}${RD} No Network After $RETRY_NUM Tries${CL}" + echo -e "${NETWORK}Check Network Settings" + exit 1 + fi - # Disable network wait services for faster boot - case "$INIT_SYSTEM" in - systemd) - systemctl disable -q --now systemd-networkd-wait-online.service 2>/dev/null || true - ;; - esac + # Remove Python EXTERNALLY-MANAGED restriction (Debian 12+, Ubuntu 23.04+) + rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED 2>/dev/null || true - msg_ok "Set up Container OS" - msg_ok "Network Connected: ${BL}$(get_ip)" + # Disable network wait services for faster boot + case "$INIT_SYSTEM" in + systemd) + systemctl disable -q --now systemd-networkd-wait-online.service 2>/dev/null || true + ;; + esac + + msg_ok "Set up Container OS" + msg_ok "Network Connected: ${BL}$(get_ip)" } # ------------------------------------------------------------------------------ @@ -643,65 +643,65 @@ setting_up_container() { # Tests connectivity to DNS servers and verifies DNS resolution # ------------------------------------------------------------------------------ network_check() { - set +e - trap - ERR - local ipv4_connected=false - local ipv6_connected=false - sleep 1 + set +e + trap - ERR + local ipv4_connected=false + local ipv6_connected=false + sleep 1 - # Check IPv4 connectivity - if ping -c 1 -W 1 1.1.1.1 &>/dev/null || ping -c 1 -W 1 8.8.8.8 &>/dev/null || ping -c 1 -W 1 9.9.9.9 &>/dev/null; then - msg_ok "IPv4 Internet Connected" - ipv4_connected=true + # Check IPv4 connectivity + if ping -c 1 -W 1 1.1.1.1 &>/dev/null || ping -c 1 -W 1 8.8.8.8 &>/dev/null || ping -c 1 -W 1 9.9.9.9 &>/dev/null; then + msg_ok "IPv4 Internet Connected" + ipv4_connected=true + else + msg_error "IPv4 Internet Not Connected" + fi + + # Check IPv6 connectivity (if ping6 exists) + if command -v ping6 &>/dev/null; then + if ping6 -c 1 -W 1 2606:4700:4700::1111 &>/dev/null || ping6 -c 1 -W 1 2001:4860:4860::8888 &>/dev/null; then + msg_ok "IPv6 Internet Connected" + ipv6_connected=true else - msg_error "IPv4 Internet Not Connected" + msg_error "IPv6 Internet Not Connected" fi + fi - # Check IPv6 connectivity (if ping6 exists) - if command -v ping6 &>/dev/null; then - if ping6 -c 1 -W 1 2606:4700:4700::1111 &>/dev/null || ping6 -c 1 -W 1 2001:4860:4860::8888 &>/dev/null; then - msg_ok "IPv6 Internet Connected" - ipv6_connected=true - else - msg_error "IPv6 Internet Not Connected" - fi - fi - - # Prompt if both fail - if [[ $ipv4_connected == false && $ipv6_connected == false ]]; then - read -r -p "No Internet detected, would you like to continue anyway? " prompt - if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then - echo -e "${INFO}${RD}Expect Issues Without Internet${CL}" - else - echo -e "${NETWORK}Check Network Settings" - exit 1 - fi - fi - - # DNS resolution checks - local GIT_HOSTS=("github.com" "raw.githubusercontent.com" "git.community-scripts.org") - local GIT_STATUS="Git DNS:" - local DNS_FAILED=false - - for HOST in "${GIT_HOSTS[@]}"; do - local RESOLVEDIP - RESOLVEDIP=$(getent hosts "$HOST" 2>/dev/null | awk '{ print $1 }' | head -n1) - if [[ -z "$RESOLVEDIP" ]]; then - GIT_STATUS+=" $HOST:(${DNSFAIL:-FAIL})" - DNS_FAILED=true - else - GIT_STATUS+=" $HOST:(${DNSOK:-OK})" - fi - done - - if [[ "$DNS_FAILED" == true ]]; then - fatal "$GIT_STATUS" + # Prompt if both fail + if [[ $ipv4_connected == false && $ipv6_connected == false ]]; then + read -r -p "No Internet detected, would you like to continue anyway? " prompt + if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then + echo -e "${INFO}${RD}Expect Issues Without Internet${CL}" else - msg_ok "$GIT_STATUS" + echo -e "${NETWORK}Check Network Settings" + exit 1 fi + fi - set -e - trap 'error_handler $LINENO "$BASH_COMMAND"' ERR + # DNS resolution checks + local GIT_HOSTS=("github.com" "raw.githubusercontent.com" "git.community-scripts.org") + local GIT_STATUS="Git DNS:" + local DNS_FAILED=false + + for HOST in "${GIT_HOSTS[@]}"; do + local RESOLVEDIP + RESOLVEDIP=$(getent hosts "$HOST" 2>/dev/null | awk '{ print $1 }' | head -n1) + if [[ -z "$RESOLVEDIP" ]]; then + GIT_STATUS+=" $HOST:(${DNSFAIL:-FAIL})" + DNS_FAILED=true + else + GIT_STATUS+=" $HOST:(${DNSOK:-OK})" + fi + done + + if [[ "$DNS_FAILED" == true ]]; then + fatal "$GIT_STATUS" + else + msg_ok "$GIT_STATUS" + fi + + set -e + trap 'error_handler $LINENO "$BASH_COMMAND"' ERR } # ============================================================================== @@ -714,12 +714,12 @@ network_check() { # Updates container OS and sources appropriate tools.func # ------------------------------------------------------------------------------ update_os() { - msg_info "Updating Container OS" + msg_info "Updating Container OS" - # Configure APT cacher proxy if enabled (Debian/Ubuntu only) - if [[ "$PKG_MANAGER" == "apt" && "${CACHER:-}" == "yes" ]]; then - echo 'Acquire::http::Proxy-Auto-Detect "/usr/local/bin/apt-proxy-detect.sh";' >/etc/apt/apt.conf.d/00aptproxy - cat </usr/local/bin/apt-proxy-detect.sh + # Configure APT cacher proxy if enabled (Debian/Ubuntu only) + if [[ "$PKG_MANAGER" == "apt" && "${CACHER:-}" == "yes" ]]; then + echo 'Acquire::http::Proxy-Auto-Detect "/usr/local/bin/apt-proxy-detect.sh";' >/etc/apt/apt.conf.d/00aptproxy + cat </usr/local/bin/apt-proxy-detect.sh #!/bin/bash if nc -w1 -z "${CACHER_IP}" 3142; then echo -n "http://${CACHER_IP}:3142" @@ -727,27 +727,27 @@ else echo -n "DIRECT" fi EOF - chmod +x /usr/local/bin/apt-proxy-detect.sh - fi + chmod +x /usr/local/bin/apt-proxy-detect.sh + fi - # Update and upgrade - pkg_update - pkg_upgrade + # Update and upgrade + pkg_update + pkg_upgrade - # Remove Python EXTERNALLY-MANAGED restriction - rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED 2>/dev/null || true + # Remove Python EXTERNALLY-MANAGED restriction + rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED 2>/dev/null || true - msg_ok "Updated Container OS" + msg_ok "Updated Container OS" - # Source appropriate tools.func based on OS - case "$OS_FAMILY" in - alpine) - source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/alpine-tools.func) - ;; - *) - source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/tools.func) - ;; - esac + # Source appropriate tools.func based on OS + case "$OS_FAMILY" in + alpine) + source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/alpine-tools.func) + ;; + *) + source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/tools.func) + ;; + esac } # ============================================================================== @@ -760,79 +760,79 @@ EOF # Configures Message of the Day and SSH settings # ------------------------------------------------------------------------------ motd_ssh() { - # Set terminal to 256-color mode - grep -qxF "export TERM='xterm-256color'" /root/.bashrc 2>/dev/null || echo "export TERM='xterm-256color'" >>/root/.bashrc + # Set terminal to 256-color mode + grep -qxF "export TERM='xterm-256color'" /root/.bashrc 2>/dev/null || echo "export TERM='xterm-256color'" >>/root/.bashrc - # Get OS information - local os_name="$OS_TYPE" - local os_version="$OS_VERSION" + # Get OS information + local os_name="$OS_TYPE" + local os_version="$OS_VERSION" - if [[ -f /etc/os-release ]]; then - os_name=$(grep ^NAME /etc/os-release | cut -d= -f2 | tr -d '"') - os_version=$(grep ^VERSION_ID /etc/os-release | cut -d= -f2 | tr -d '"') - fi + if [[ -f /etc/os-release ]]; then + os_name=$(grep ^NAME /etc/os-release | cut -d= -f2 | tr -d '"') + os_version=$(grep ^VERSION_ID /etc/os-release | cut -d= -f2 | tr -d '"') + fi - # Create MOTD profile script - local PROFILE_FILE="/etc/profile.d/00_lxc-details.sh" - cat >"$PROFILE_FILE" <"$PROFILE_FILE" </dev/null | awk '{print \$1}' || ip -4 addr show scope global | grep -oP '(?<=inet\s)\\d+(\\.\\d+){3}' | head -1)${CL:-}" +echo -e "${YW:-} IP Address: ${GN:-}\$(hostname -I 2>/dev/null | awk '{print \$1}' || ip -4 addr show scope global | awk '/inet /{print \$2}' | cut -d/ -f1 | head -1)${CL:-}" echo -e "${YW:-} Repository: ${GN:-}https://github.com/community-scripts/ProxmoxVED${CL:-}" echo "" EOF - # Disable default MOTD scripts (Debian/Ubuntu) - [[ -d /etc/update-motd.d ]] && chmod -x /etc/update-motd.d/* 2>/dev/null || true + # Disable default MOTD scripts (Debian/Ubuntu) + [[ -d /etc/update-motd.d ]] && chmod -x /etc/update-motd.d/* 2>/dev/null || true - # Configure SSH root access if requested - if [[ "${SSH_ROOT:-}" == "yes" ]]; then - # Ensure SSH server is installed - if [[ ! -f /etc/ssh/sshd_config ]]; then - msg_info "Installing SSH server" - case "$PKG_MANAGER" in - apt) - pkg_install openssh-server - ;; - apk) - pkg_install openssh - rc-update add sshd default 2>/dev/null || true - ;; - dnf | yum) - pkg_install openssh-server - ;; - zypper) - pkg_install openssh - ;; - emerge) - pkg_install net-misc/openssh - ;; - esac - msg_ok "Installed SSH server" - fi - - local sshd_config="/etc/ssh/sshd_config" - if [[ -f "$sshd_config" ]]; then - sed -i "s/#PermitRootLogin prohibit-password/PermitRootLogin yes/g" "$sshd_config" - sed -i "s/PermitRootLogin prohibit-password/PermitRootLogin yes/g" "$sshd_config" - - case "$INIT_SYSTEM" in - systemd) - svc_restart sshd 2>/dev/null || svc_restart ssh 2>/dev/null || true - ;; - openrc) - svc_enable sshd 2>/dev/null || true - svc_start sshd 2>/dev/null || true - ;; - *) - svc_restart sshd 2>/dev/null || true - ;; - esac - fi + # Configure SSH root access if requested + if [[ "${SSH_ROOT:-}" == "yes" ]]; then + # Ensure SSH server is installed + if [[ ! -f /etc/ssh/sshd_config ]]; then + msg_info "Installing SSH server" + case "$PKG_MANAGER" in + apt) + pkg_install openssh-server + ;; + apk) + pkg_install openssh + rc-update add sshd default 2>/dev/null || true + ;; + dnf | yum) + pkg_install openssh-server + ;; + zypper) + pkg_install openssh + ;; + emerge) + pkg_install net-misc/openssh + ;; + esac + msg_ok "Installed SSH server" fi + + local sshd_config="/etc/ssh/sshd_config" + if [[ -f "$sshd_config" ]]; then + sed -i "s/#PermitRootLogin prohibit-password/PermitRootLogin yes/g" "$sshd_config" + sed -i "s/PermitRootLogin prohibit-password/PermitRootLogin yes/g" "$sshd_config" + + case "$INIT_SYSTEM" in + systemd) + svc_restart sshd 2>/dev/null || svc_restart ssh 2>/dev/null || true + ;; + openrc) + svc_enable sshd 2>/dev/null || true + svc_start sshd 2>/dev/null || true + ;; + *) + svc_restart sshd 2>/dev/null || true + ;; + esac + fi + fi } # ============================================================================== @@ -845,83 +845,104 @@ EOF # Customizes container for passwordless login and creates update script # ------------------------------------------------------------------------------ customize() { - if [[ "${PASSWORD:-}" == "" ]]; then - msg_info "Customizing Container" + if [[ "${PASSWORD:-}" == "" ]]; then + msg_info "Customizing Container" - # Remove root password for auto-login - passwd -d root &>/dev/null || true + # Remove root password for auto-login + passwd -d root &>/dev/null || true - case "$INIT_SYSTEM" in - systemd) - # Mask services that block boot in LXC containers - # systemd-homed-firstboot.service hangs waiting for user input on Fedora - systemctl mask systemd-homed-firstboot.service &>/dev/null || true - systemctl mask systemd-homed.service &>/dev/null || true + case "$INIT_SYSTEM" in + systemd) + # Mask services that block boot in LXC containers + # systemd-homed-firstboot.service hangs waiting for user input on Fedora + systemctl mask systemd-homed-firstboot.service &>/dev/null || true + systemctl mask systemd-homed.service &>/dev/null || true - # Configure console-getty for auto-login in LXC containers - # console-getty.service is THE service that handles /dev/console in LXC - # It's present on all systemd distros but not enabled by default on Fedora/RHEL + # Configure console-getty for auto-login in LXC containers + # console-getty.service is THE service that handles /dev/console in LXC + # It's present on all systemd distros but not enabled by default on Fedora/RHEL - if [[ -f /usr/lib/systemd/system/console-getty.service ]]; then - mkdir -p /etc/systemd/system/console-getty.service.d - cat >/etc/systemd/system/console-getty.service.d/override.conf <<'EOF' + if [[ -f /usr/lib/systemd/system/console-getty.service ]]; then + mkdir -p /etc/systemd/system/console-getty.service.d + cat >/etc/systemd/system/console-getty.service.d/override.conf <<'EOF' [Service] ExecStart= ExecStart=-/sbin/agetty --autologin root --noclear --keep-baud 115200,38400,9600 - $TERM EOF - # Enable console-getty for LXC web console (required on Fedora/RHEL) - systemctl enable console-getty.service &>/dev/null || true - fi + # Enable console-getty for LXC web console (required on Fedora/RHEL) + systemctl enable console-getty.service &>/dev/null || true + fi - # Also configure container-getty@1 (Debian/Ubuntu default in LXC) - if [[ -f /usr/lib/systemd/system/container-getty@.service ]]; then - mkdir -p /etc/systemd/system/container-getty@1.service.d - cat >/etc/systemd/system/container-getty@1.service.d/override.conf <<'EOF' + # Also configure container-getty@1 (Debian/Ubuntu default in LXC) + if [[ -f /usr/lib/systemd/system/container-getty@.service ]]; then + mkdir -p /etc/systemd/system/container-getty@1.service.d + cat >/etc/systemd/system/container-getty@1.service.d/override.conf <<'EOF' [Service] ExecStart= ExecStart=-/sbin/agetty --autologin root --noclear --keep-baud tty%I 115200,38400,9600 - $TERM EOF - fi + fi - # Reload systemd and restart getty services to apply auto-login - systemctl daemon-reload - systemctl restart console-getty.service &>/dev/null || true - systemctl restart container-getty@1.service &>/dev/null || true - ;; + # Reload systemd and restart getty services to apply auto-login + systemctl daemon-reload + systemctl restart console-getty.service &>/dev/null || true + systemctl restart container-getty@1.service &>/dev/null || true + ;; - openrc) - # Alpine/Gentoo: modify inittab for auto-login - if [[ -f /etc/inittab ]]; then - sed -i 's|^tty1::respawn:.*|tty1::respawn:/sbin/agetty --autologin root --noclear tty1 38400 linux|' /etc/inittab - fi - touch /root/.hushlogin - ;; + openrc) + # Alpine/Gentoo: modify inittab for auto-login + if [[ -f /etc/inittab ]]; then + sed -i 's|^tty1::respawn:.*|tty1::respawn:/sbin/agetty --autologin root --noclear tty1 38400 linux|' /etc/inittab + fi + touch /root/.hushlogin + ;; - sysvinit) - # Devuan/older systems - modify inittab with flexible runlevel matching - if [[ -f /etc/inittab ]]; then - # Match various runlevel patterns (23, 2345, 12345, etc.) and both getty/agetty - sed -i 's|^1:[0-9]*:respawn:/sbin/a\?getty.*|1:2345:respawn:/sbin/agetty --autologin root tty1 38400 linux|' /etc/inittab - fi - ;; - esac + sysvinit) + # Devuan/older systems - modify inittab for auto-login + # Devuan 5 (daedalus) uses SysVinit with various inittab formats + # CRITICAL: LXC uses /dev/console, NOT tty1! pct console connects to console device + if [[ -f /etc/inittab ]]; then + # Backup original inittab + cp /etc/inittab /etc/inittab.bak 2>/dev/null || true - msg_ok "Customized Container" - fi + # First, enable autologin on tty1 (for direct access) + sed -i 's|^1:[0-9]*:respawn:.*/\(a\?getty\).*|1:2345:respawn:/sbin/agetty --autologin root --noclear tty1 38400 linux|' /etc/inittab - # Create update script - # Use var_os for OS-based containers, otherwise use app name - local update_script_name="${var_os:-$app}" - echo "bash -c \"\$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/ct/${update_script_name}.sh)\"" >/usr/bin/update - chmod +x /usr/bin/update + # CRITICAL: Add console entry for LXC - this is what pct console uses! + # Check if there's already a console getty entry + if ! grep -qE '^[^#].*respawn.*console' /etc/inittab; then + # Add new console entry for LXC + echo "" >>/etc/inittab + echo "# LXC console autologin (added by community-scripts)" >>/etc/inittab + echo "co:2345:respawn:/sbin/agetty --autologin root --noclear console 115200,38400,9600 linux" >>/etc/inittab + else + # Enable autologin on existing console entry + sed -i 's|^[^#]*:[0-9]*:respawn:.*/\(a\?getty\).*console.*|co:2345:respawn:/sbin/agetty --autologin root --noclear console 115200,38400,9600 linux|' /etc/inittab + fi - # Inject SSH authorized keys if provided - if [[ -n "${SSH_AUTHORIZED_KEY:-}" ]]; then - mkdir -p /root/.ssh - echo "${SSH_AUTHORIZED_KEY}" >/root/.ssh/authorized_keys - chmod 700 /root/.ssh - chmod 600 /root/.ssh/authorized_keys - fi + # Force a reload of inittab - try multiple methods + telinit q &>/dev/null || init q &>/dev/null || kill -1 1 &>/dev/null || true + fi + touch /root/.hushlogin + ;; + esac + + msg_ok "Customized Container" + fi + + # Create update script + # Use var_os for OS-based containers, otherwise use app name + local update_script_name="${var_os:-$app}" + echo "bash -c \"\$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/ct/${update_script_name}.sh)\"" >/usr/bin/update + chmod +x /usr/bin/update + + # Inject SSH authorized keys if provided + if [[ -n "${SSH_AUTHORIZED_KEY:-}" ]]; then + mkdir -p /root/.ssh + echo "${SSH_AUTHORIZED_KEY}" >/root/.ssh/authorized_keys + chmod 700 /root/.ssh + chmod 600 /root/.ssh/authorized_keys + fi } # ============================================================================== @@ -935,8 +956,8 @@ EOF # Returns: 0 if valid, 1 if invalid # ------------------------------------------------------------------------------ validate_tz() { - local tz="$1" - [[ -f "/usr/share/zoneinfo/$tz" ]] + local tz="$1" + [[ -f "/usr/share/zoneinfo/$tz" ]] } # ------------------------------------------------------------------------------ @@ -945,21 +966,21 @@ validate_tz() { # Sets container timezone # ------------------------------------------------------------------------------ set_timezone() { - local tz="$1" - if validate_tz "$tz"; then - ln -sf "/usr/share/zoneinfo/$tz" /etc/localtime - echo "$tz" >/etc/timezone 2>/dev/null || true + local tz="$1" + if validate_tz "$tz"; then + ln -sf "/usr/share/zoneinfo/$tz" /etc/localtime + echo "$tz" >/etc/timezone 2>/dev/null || true - # Update tzdata if available - case "$PKG_MANAGER" in - apt) - dpkg-reconfigure -f noninteractive tzdata 2>/dev/null || true - ;; - esac - msg_ok "Timezone set to $tz" - else - msg_warn "Invalid timezone: $tz" - fi + # Update tzdata if available + case "$PKG_MANAGER" in + apt) + dpkg-reconfigure -f noninteractive tzdata 2>/dev/null || true + ;; + esac + msg_ok "Timezone set to $tz" + else + msg_warn "Invalid timezone: $tz" + fi } # ------------------------------------------------------------------------------ @@ -968,9 +989,9 @@ set_timezone() { # Prints detected OS information (for debugging) # ------------------------------------------------------------------------------ os_info() { - echo "OS Type: $OS_TYPE" - echo "OS Family: $OS_FAMILY" - echo "OS Version: $OS_VERSION" - echo "Pkg Manager: $PKG_MANAGER" - echo "Init System: $INIT_SYSTEM" + echo "OS Type: $OS_TYPE" + echo "OS Family: $OS_FAMILY" + echo "OS Version: $OS_VERSION" + echo "Pkg Manager: $PKG_MANAGER" + echo "Init System: $INIT_SYSTEM" } From c55bda86490ce2272b6fd3ddda09eb2ca6642575 Mon Sep 17 00:00:00 2001 From: GoldenSpring Date: Wed, 21 Jan 2026 15:58:44 +0300 Subject: [PATCH 022/228] changed requests from code review --- ct/sonobarr.sh | 5 ++++- frontend/public/json/sonobarr.json | 3 +-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/ct/sonobarr.sh b/ct/sonobarr.sh index d2c6b9a24..0e1ba7031 100644 --- a/ct/sonobarr.sh +++ b/ct/sonobarr.sh @@ -34,15 +34,18 @@ function update_script() { systemctl stop sonobarr msg_ok "Stopped sonobarr" - msg_info "Updating sonobarr" cp "/opt/sonobarr/.env" "/opt/.sonobarr-env" CLEAN_INSTALL=1 fetch_and_deploy_gh_release "sonobarr" "Dodelidoo-Labs/sonobarr" "tarball" + + msg_info "Updating sonobarr" cp "/opt/.sonobarr-env" "/opt/sonobarr/.env" msg_ok "Updated sonobarr" msg_info "Starting sonobarr" systemctl start sonobarr msg_ok "Started sonobarr" + + msg_ok "Updated successfully!" fi exit } diff --git a/frontend/public/json/sonobarr.json b/frontend/public/json/sonobarr.json index ee23ab442..6f23eff39 100644 --- a/frontend/public/json/sonobarr.json +++ b/frontend/public/json/sonobarr.json @@ -12,7 +12,7 @@ "documentation": "https://github.com/Dodelidoo-Labs/sonobarr", "config_path": "gunicorn_config.py", "website": "https://github.com/Dodelidoo-Labs/sonobarr", - "logo": "https://camo.githubusercontent.com/cc3f9cb9e157fbdee667130c50ba431fffc407a3d5c6cdf415574d652dd8484f/68747470733a2f2f696e756265732e6170702f617070732f66696c65735f73686172696e672f7075626c6963707265766965772f356a36574a597243476342696a646f3f66696c653d2f2666696c6549643d323731323226783d3338343026793d3231363026613d7472756526657461673d6535393833393032393962643532643062393863663835613464376161636565", + "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/sonobarr.webp", "description": "Sonobarr marries your existing Lidarr library with Last.fm’s discovery graph to surface artists you'll actually like. It runs as a Flask + Socket.IO application, ships with a polished Bootstrap UI, and includes admin tooling so folks can share a single instance safely.", "install_methods": [ { @@ -40,6 +40,5 @@ "text": "Change the default admin credentials", "type": "info" } - ] } From 852ac484ed90722d8dda39e1f7d151bc52713875 Mon Sep 17 00:00:00 2001 From: GoldenSpring Date: Wed, 21 Jan 2026 13:51:55 +0000 Subject: [PATCH 023/228] Apply suggestions from code review reptile suggestions merge Co-authored-by: greptile-apps[bot] <165735046+greptile-apps[bot]@users.noreply.github.com> --- ct/sonobarr.sh | 4 ++-- install/sonobarr-install.sh | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/ct/sonobarr.sh b/ct/sonobarr.sh index d2c6b9a24..8abd12a00 100644 --- a/ct/sonobarr.sh +++ b/ct/sonobarr.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -source <(curl -s https://raw.githubusercontent.com/GoldenSpringness/ProxmoxVED/refs/heads/feature/sonobarr/misc/build.func) +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) # Copyright (c) 2021-2025 community-scripts ORG # Author: GoldenSpringness # License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE @@ -24,7 +24,7 @@ function update_script() { check_container_storage check_container_resources - if [[ ! -f "/opt/sonobarr" ]]; then + if [[ ! -d "/opt/sonobarr" ]]; then msg_error "No sonobarr Installation Found!" exit fi diff --git a/install/sonobarr-install.sh b/install/sonobarr-install.sh index 5a247b656..da5cedb4e 100644 --- a/install/sonobarr-install.sh +++ b/install/sonobarr-install.sh @@ -14,19 +14,19 @@ network_check update_os fetch_and_deploy_gh_release "sonobarr" "Dodelidoo-Labs/sonobarr" "tarball" -cd /opt/sonobarr +msg_info "Setting up sonobarr" msg_info "Setting up sonobarr" -apt install python3.13-venv -y -python3 -m venv venv -source venv/bin/activate -pip install --no-cache-dir -r requirements.txt +$STD apt-get install -y python3.13-venv +$STD python3 -m venv /opt/sonobarr/venv +source /opt/sonobarr/venv/bin/activate +$STD pip install --no-cache-dir -r /opt/sonobarr/requirements.txt mv ".sample-env" ".env" sed -i "s/^secret_key=.*/secret_key=$(openssl rand -hex 16)/" .env msg_ok "Set up sonobarr" msg_info "Creating Service" -cat </etc/systemd/system/sonobarr.service +cat </etc/systemd/system/sonobarr.service [Unit] Description=sonobarr Service After=network.target From 946ae297a4d9d9e48ff2d29b3a5306825e768e66 Mon Sep 17 00:00:00 2001 From: GoldenSpring Date: Wed, 21 Jan 2026 16:04:24 +0300 Subject: [PATCH 024/228] changed config file and copyright years --- ct/sonobarr.sh | 2 +- frontend/public/json/sonobarr.json | 4 ++-- install/sonobarr-install.sh | 3 +-- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/ct/sonobarr.sh b/ct/sonobarr.sh index 2ab8cc62c..616d63a67 100644 --- a/ct/sonobarr.sh +++ b/ct/sonobarr.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) -# Copyright (c) 2021-2025 community-scripts ORG +# Copyright (c) 2021-2026 community-scripts ORG # Author: GoldenSpringness # License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE # Source: https://github.com/Dodelidoo-Labs/sonobarr diff --git a/frontend/public/json/sonobarr.json b/frontend/public/json/sonobarr.json index 6f23eff39..8847fc612 100644 --- a/frontend/public/json/sonobarr.json +++ b/frontend/public/json/sonobarr.json @@ -4,13 +4,13 @@ "categories": [ 14 ], - "date_created": "2025-12-27", + "date_created": "2026-01-21", "type": "ct", "updateable": true, "privileged": false, "interface_port": 5000, "documentation": "https://github.com/Dodelidoo-Labs/sonobarr", - "config_path": "gunicorn_config.py", + "config_path": ".env", "website": "https://github.com/Dodelidoo-Labs/sonobarr", "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/sonobarr.webp", "description": "Sonobarr marries your existing Lidarr library with Last.fm’s discovery graph to surface artists you'll actually like. It runs as a Flask + Socket.IO application, ships with a polished Bootstrap UI, and includes admin tooling so folks can share a single instance safely.", diff --git a/install/sonobarr-install.sh b/install/sonobarr-install.sh index da5cedb4e..32c988d65 100644 --- a/install/sonobarr-install.sh +++ b/install/sonobarr-install.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Copyright (c) 2021-2025 community-scripts ORG +# Copyright (c) 2021-2026 community-scripts ORG # Author: GoldenSpringness # License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE # Source: https://github.com/Dodelidoo-Labs/sonobarr @@ -14,7 +14,6 @@ network_check update_os fetch_and_deploy_gh_release "sonobarr" "Dodelidoo-Labs/sonobarr" "tarball" -msg_info "Setting up sonobarr" msg_info "Setting up sonobarr" $STD apt-get install -y python3.13-venv From 6050a88ff9bef5ae6fc294d31c9ca0acdcb99b66 Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 23 Jan 2026 10:17:51 -0500 Subject: [PATCH 025/228] Update install/sonobarr-install.sh --- install/sonobarr-install.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/install/sonobarr-install.sh b/install/sonobarr-install.sh index 32c988d65..d9015d422 100644 --- a/install/sonobarr-install.sh +++ b/install/sonobarr-install.sh @@ -14,6 +14,7 @@ network_check update_os fetch_and_deploy_gh_release "sonobarr" "Dodelidoo-Labs/sonobarr" "tarball" +PYTHON_VERSION="3.12" setup_uv msg_info "Setting up sonobarr" $STD apt-get install -y python3.13-venv From a1c2cb84ab8c8cc71de97457772118570767f117 Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 23 Jan 2026 10:18:35 -0500 Subject: [PATCH 026/228] Update install/sonobarr-install.sh --- install/sonobarr-install.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/install/sonobarr-install.sh b/install/sonobarr-install.sh index d9015d422..b2d385718 100644 --- a/install/sonobarr-install.sh +++ b/install/sonobarr-install.sh @@ -17,7 +17,6 @@ fetch_and_deploy_gh_release "sonobarr" "Dodelidoo-Labs/sonobarr" "tarball" PYTHON_VERSION="3.12" setup_uv msg_info "Setting up sonobarr" -$STD apt-get install -y python3.13-venv $STD python3 -m venv /opt/sonobarr/venv source /opt/sonobarr/venv/bin/activate $STD pip install --no-cache-dir -r /opt/sonobarr/requirements.txt From ec1f6dc5fd7b72ae512f79226b2e15578ce44513 Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 23 Jan 2026 10:18:54 -0500 Subject: [PATCH 027/228] Update install/sonobarr-install.sh --- install/sonobarr-install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install/sonobarr-install.sh b/install/sonobarr-install.sh index b2d385718..1ff2af96d 100644 --- a/install/sonobarr-install.sh +++ b/install/sonobarr-install.sh @@ -19,7 +19,7 @@ PYTHON_VERSION="3.12" setup_uv msg_info "Setting up sonobarr" $STD python3 -m venv /opt/sonobarr/venv source /opt/sonobarr/venv/bin/activate -$STD pip install --no-cache-dir -r /opt/sonobarr/requirements.txt +$STD uv pip install --no-cache-dir -r /opt/sonobarr/requirements.txt mv ".sample-env" ".env" sed -i "s/^secret_key=.*/secret_key=$(openssl rand -hex 16)/" .env msg_ok "Set up sonobarr" From e523f5bded6ee2b669ba83d75517a0ea67e6e6b8 Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 23 Jan 2026 10:19:13 -0500 Subject: [PATCH 028/228] Update install/sonobarr-install.sh --- install/sonobarr-install.sh | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/install/sonobarr-install.sh b/install/sonobarr-install.sh index 1ff2af96d..b64e6ca2e 100644 --- a/install/sonobarr-install.sh +++ b/install/sonobarr-install.sh @@ -20,8 +20,11 @@ msg_info "Setting up sonobarr" $STD python3 -m venv /opt/sonobarr/venv source /opt/sonobarr/venv/bin/activate $STD uv pip install --no-cache-dir -r /opt/sonobarr/requirements.txt -mv ".sample-env" ".env" -sed -i "s/^secret_key=.*/secret_key=$(openssl rand -hex 16)/" .env +mkdir -p /etc/sonobarr +mv /opt/sonobarr/.sample-env /etc/sonobarr/.env +sed -i "s/^secret_key=.*/secret_key=$(openssl rand -hex 16)/" /etc/sonobarr/.env +echo "release_version=$(cat ~/.sonobarr)" >>/etc/sonobarr/.env +echo "sonobarr_config_dir=/etc/sonobarr" >>/etc/sonobarr.env msg_ok "Set up sonobarr" msg_info "Creating Service" From 7177d7f7feb323340706af104ef94048ed99fb55 Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 23 Jan 2026 10:19:29 -0500 Subject: [PATCH 029/228] Update install/sonobarr-install.sh --- install/sonobarr-install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install/sonobarr-install.sh b/install/sonobarr-install.sh index b64e6ca2e..ac735a149 100644 --- a/install/sonobarr-install.sh +++ b/install/sonobarr-install.sh @@ -35,7 +35,7 @@ After=network.target [Service] WorkingDirectory=/opt/sonobarr/src -EnvironmentFile=/opt/sonobarr/.env +EnvironmentFile=/etc/sonobarr/.env Environment="PATH=/opt/sonobarr/venv/bin" ExecStart=/bin/bash -c 'gunicorn Sonobarr:app -c ../gunicorn_config.py' Restart=always From 31d377960d422ed173ad7d41e761aea1cbc4b71b Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 23 Jan 2026 10:19:53 -0500 Subject: [PATCH 030/228] Update frontend/public/json/sonobarr.json --- frontend/public/json/sonobarr.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/public/json/sonobarr.json b/frontend/public/json/sonobarr.json index 8847fc612..614ed36cd 100644 --- a/frontend/public/json/sonobarr.json +++ b/frontend/public/json/sonobarr.json @@ -10,7 +10,7 @@ "privileged": false, "interface_port": 5000, "documentation": "https://github.com/Dodelidoo-Labs/sonobarr", - "config_path": ".env", + "config_path": "/etc/sonobarr/.env", "website": "https://github.com/Dodelidoo-Labs/sonobarr", "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/sonobarr.webp", "description": "Sonobarr marries your existing Lidarr library with Last.fm’s discovery graph to surface artists you'll actually like. It runs as a Flask + Socket.IO application, ships with a polished Bootstrap UI, and includes admin tooling so folks can share a single instance safely.", From e30c2c3f423e0bd77d6bf0675f4a4eafe1e7b1ba Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 23 Jan 2026 10:20:10 -0500 Subject: [PATCH 031/228] Update ct/sonobarr.sh --- ct/sonobarr.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ct/sonobarr.sh b/ct/sonobarr.sh index 616d63a67..e46fe9640 100644 --- a/ct/sonobarr.sh +++ b/ct/sonobarr.sh @@ -29,6 +29,8 @@ function update_script() { exit fi +PYTHON_VERSION="3.12" setup_uv + if check_for_gh_release "sonobarr" "Dodelidoo-Labs/sonobarr"; then msg_info "Stopping sonobarr" systemctl stop sonobarr From 7a54babd8f69298d5e10079d90e196dac042b170 Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 23 Jan 2026 10:20:30 -0500 Subject: [PATCH 032/228] Update ct/sonobarr.sh --- ct/sonobarr.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/ct/sonobarr.sh b/ct/sonobarr.sh index e46fe9640..d9efd674d 100644 --- a/ct/sonobarr.sh +++ b/ct/sonobarr.sh @@ -36,7 +36,6 @@ PYTHON_VERSION="3.12" setup_uv systemctl stop sonobarr msg_ok "Stopped sonobarr" - cp "/opt/sonobarr/.env" "/opt/.sonobarr-env" CLEAN_INSTALL=1 fetch_and_deploy_gh_release "sonobarr" "Dodelidoo-Labs/sonobarr" "tarball" msg_info "Updating sonobarr" From e34de15355d0a80888d016d01876feb437a2a3a2 Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 23 Jan 2026 10:20:58 -0500 Subject: [PATCH 033/228] Update ct/sonobarr.sh --- ct/sonobarr.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ct/sonobarr.sh b/ct/sonobarr.sh index d9efd674d..7e2634b25 100644 --- a/ct/sonobarr.sh +++ b/ct/sonobarr.sh @@ -39,7 +39,10 @@ PYTHON_VERSION="3.12" setup_uv CLEAN_INSTALL=1 fetch_and_deploy_gh_release "sonobarr" "Dodelidoo-Labs/sonobarr" "tarball" msg_info "Updating sonobarr" - cp "/opt/.sonobarr-env" "/opt/sonobarr/.env" + $STD uv venv -c /opt/sonobarr/venv + $STD source /opt/sonobarr/venv/bin/activate + $STD uv pip install --no-cache-dir -r /opt/sonobarr/requirements.txt + sed -i "/release_version/s/=.*/=$(cat ~/.sonobarr)/" /etc/sonobarr/.env msg_ok "Updated sonobarr" msg_info "Starting sonobarr" From 6f53fa7ceb65138116093ede7319fa7aef603cec Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 23 Jan 2026 10:23:58 -0500 Subject: [PATCH 034/228] Update ct/sonobarr.sh Co-authored-by: Tobias <96661824+CrazyWolf13@users.noreply.github.com> --- ct/sonobarr.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/ct/sonobarr.sh b/ct/sonobarr.sh index 7e2634b25..eb90b0797 100644 --- a/ct/sonobarr.sh +++ b/ct/sonobarr.sh @@ -48,7 +48,6 @@ PYTHON_VERSION="3.12" setup_uv msg_info "Starting sonobarr" systemctl start sonobarr msg_ok "Started sonobarr" - msg_ok "Updated successfully!" fi exit From 1713d7071e671f940e7d21bf20131caf2b48b333 Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 23 Jan 2026 10:24:15 -0500 Subject: [PATCH 035/228] Update frontend/public/json/sonobarr.json Co-authored-by: Tobias <96661824+CrazyWolf13@users.noreply.github.com> --- frontend/public/json/sonobarr.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/public/json/sonobarr.json b/frontend/public/json/sonobarr.json index 614ed36cd..102bcfdb6 100644 --- a/frontend/public/json/sonobarr.json +++ b/frontend/public/json/sonobarr.json @@ -20,7 +20,7 @@ "script": "ct/sonobarr.sh", "resources": { "cpu": 1, - "ram": 1, + "ram": 1024, "hdd": 20, "os": "Debian", "version": "13" From 1329c3cb5c582642f9dd812687c7f0ad40200b24 Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 23 Jan 2026 10:24:58 -0500 Subject: [PATCH 036/228] Update frontend/public/json/sonobarr.json Co-authored-by: Tobias <96661824+CrazyWolf13@users.noreply.github.com> --- frontend/public/json/sonobarr.json | 4 ---- 1 file changed, 4 deletions(-) diff --git a/frontend/public/json/sonobarr.json b/frontend/public/json/sonobarr.json index 102bcfdb6..607c53216 100644 --- a/frontend/public/json/sonobarr.json +++ b/frontend/public/json/sonobarr.json @@ -32,10 +32,6 @@ "password": null }, "notes": [ - { - "text": "secret_key in env is randomly generated at installation, feel free to change it", - "type": "info" - }, { "text": "Change the default admin credentials", "type": "info" From 3f2ed22f2075ce052ba161f0f58aa0965f3d4385 Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 23 Jan 2026 10:26:12 -0500 Subject: [PATCH 037/228] Update install/sonobarr-install.sh Co-authored-by: Tobias <96661824+CrazyWolf13@users.noreply.github.com> --- install/sonobarr-install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install/sonobarr-install.sh b/install/sonobarr-install.sh index ac735a149..b2bed6d0e 100644 --- a/install/sonobarr-install.sh +++ b/install/sonobarr-install.sh @@ -28,7 +28,7 @@ echo "sonobarr_config_dir=/etc/sonobarr" >>/etc/sonobarr.env msg_ok "Set up sonobarr" msg_info "Creating Service" -cat </etc/systemd/system/sonobarr.service +cat </etc/systemd/system/sonobarr.service [Unit] Description=sonobarr Service After=network.target From a4355c5617d0719d711b00037ac0ffb89d16f8a3 Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 23 Jan 2026 10:26:37 -0500 Subject: [PATCH 038/228] Update ct/sonobarr.sh --- ct/sonobarr.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ct/sonobarr.sh b/ct/sonobarr.sh index eb90b0797..0ed453596 100644 --- a/ct/sonobarr.sh +++ b/ct/sonobarr.sh @@ -29,7 +29,7 @@ function update_script() { exit fi -PYTHON_VERSION="3.12" setup_uv + PYTHON_VERSION="3.12" setup_uv if check_for_gh_release "sonobarr" "Dodelidoo-Labs/sonobarr"; then msg_info "Stopping sonobarr" From 57fe52595ae79ca59e25f8d6b75db53eb68c37fb Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 23 Jan 2026 10:26:49 -0500 Subject: [PATCH 039/228] Update install/sonobarr-install.sh --- install/sonobarr-install.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/install/sonobarr-install.sh b/install/sonobarr-install.sh index b2bed6d0e..0bdb4c036 100644 --- a/install/sonobarr-install.sh +++ b/install/sonobarr-install.sh @@ -17,7 +17,6 @@ fetch_and_deploy_gh_release "sonobarr" "Dodelidoo-Labs/sonobarr" "tarball" PYTHON_VERSION="3.12" setup_uv msg_info "Setting up sonobarr" -$STD python3 -m venv /opt/sonobarr/venv source /opt/sonobarr/venv/bin/activate $STD uv pip install --no-cache-dir -r /opt/sonobarr/requirements.txt mkdir -p /etc/sonobarr From 9f35cc33caed382301996525f3fc4085042edd55 Mon Sep 17 00:00:00 2001 From: Tom Frenzel Date: Sun, 25 Jan 2026 14:43:56 +0100 Subject: [PATCH 040/228] fix: add version variable to shelfmark --- ct/shelfmark.sh | 2 ++ install/shelfmark-install.sh | 2 ++ 2 files changed, 4 insertions(+) diff --git a/ct/shelfmark.sh b/ct/shelfmark.sh index f69bca3f0..3c1d1dfab 100644 --- a/ct/shelfmark.sh +++ b/ct/shelfmark.sh @@ -39,8 +39,10 @@ function update_script() { cp /opt/shelfmark/start.sh /opt/start.sh.bak CLEAN_INSTALL=1 fetch_and_deploy_gh_release "shelfmark" "calibrain/shelfmark" "tarball" "latest" "/opt/shelfmark" + RELEASE_VERSION=$(cat "$HOME/.shelfmark") msg_info "Updating Shelfmark" + sed -i "s/^RELEASE_VERSION=.*/RELEASE_VERSION=$RELEASE_VERSION/" /etc/shelfmark/.env cd /opt/shelfmark/src/frontend $STD npm ci $STD npm run build diff --git a/install/shelfmark-install.sh b/install/shelfmark-install.sh index d38645bae..729cf7d7a 100644 --- a/install/shelfmark-install.sh +++ b/install/shelfmark-install.sh @@ -22,6 +22,7 @@ NODE_VERSION="22" setup_nodejs PYTHON_VERSION="3.12" setup_uv fetch_and_deploy_gh_release "shelfmark" "calibrain/shelfmark" "tarball" "latest" "/opt/shelfmark" +RELEASE_VERSION=$(cat "$HOME/.shelfmark") msg_info "Building Shelfmark frontend" cd /opt/shelfmark/src/frontend @@ -43,6 +44,7 @@ TMP_DIR=/tmp/shelfmark ENABLE_LOGGING=true FLASK_HOST=0.0.0.0 FLASK_PORT=8084 +RELEASE_VERSION=$RELEASE_VERSION # SESSION_COOKIES_SECURE=true # CWA_DB_PATH= # USE_CF_BYPASS=true From bdca2666fc81b0cadccb4a1282b2b4a23e653d45 Mon Sep 17 00:00:00 2001 From: Stellae <80097964+StellaeAlis@users.noreply.github.com> Date: Sun, 25 Jan 2026 13:59:49 +0000 Subject: [PATCH 041/228] feat: add WriteFreely --- ct/writefreely.sh | 91 +++++++++++++++++++++++++ frontend/public/json/writefreely.json | 40 +++++++++++ install/writefreely-install.sh | 96 +++++++++++++++++++++++++++ 3 files changed, 227 insertions(+) create mode 100644 ct/writefreely.sh create mode 100644 frontend/public/json/writefreely.json create mode 100644 install/writefreely-install.sh diff --git a/ct/writefreely.sh b/ct/writefreely.sh new file mode 100644 index 000000000..29f9501fa --- /dev/null +++ b/ct/writefreely.sh @@ -0,0 +1,91 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: StellaeAlis +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/writefreely/writefreely + +# App Default Values +APP="WriteFreely" +var_tags="${var_tags:-writing}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-1024}" +var_disk="${var_disk:-4}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +# ============================================================================= +# CONFIGURATION GUIDE +# ============================================================================= +# APP - Display name, title case (e.g. "Koel", "Wallabag", "Actual Budget") +# var_tags - Max 2 tags, semicolon separated (e.g. "music;streaming", "finance") +# var_cpu - CPU cores: 1-4 typical +# var_ram - RAM in MB: 512, 1024, 2048, 4096 typical +# var_disk - Disk in GB: 4, 6, 8, 10, 20 typical +# var_os - OS: debian, ubuntu, alpine +# var_version - OS version: 12/13 (debian), 22.04/24.04 (ubuntu), 3.20/3.21 (alpine) +# var_unprivileged - 1 = unprivileged (secure, default), 0 = privileged (for docker etc.) + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + # Check if installation exists + if [[ ! -d /opt/writefreely ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + # check_for_gh_release returns 0 (true) if update available, 1 (false) if not + if check_for_gh_release "writefreely" "writefreely/writefreely"; then + msg_info "Stopping Services" + systemctl stop writefreely + msg_ok "Stopped Services" + + # Backup important data before update + msg_info "Creating Backup" + mkdir -p /tmp/writefreely_backup + cp /opt/writefreely/keys /tmp/writefreely_backup/ 2>/dev/null || true + cp /opt/writefreely/config.ini /tmp/writefreely_backup/ 2>/dev/null || true + msg_ok "Created Backup" + + # CLEAN_INSTALL=1 removes old directory before extracting new version + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "writefreely" "writefreely/writefreely" "prebuild" "latest" "/opt/writefreely" "writefreely_*_linux_amd64.tar.gz" + + # Restore configuration and data + msg_info "Restoring Data" + cp /tmp/writefreely_backup/config.ini /opt/writefreely/ 2>/dev/null || true + cp /tmp/writefreely_backup/keys/* /opt/writefreely/keys/ 2>/dev/null || true + rm -rf /tmp/writefreely_backup + msg_ok "Restored Data" + + # Optional: Run any post-update commands + msg_info "Running Post-Update Tasks" + cd /opt/writefreely + $STD ./writefreely db migrate + msg_ok "Ran Post-Update Tasks" + + msg_info "Starting Services" + systemctl start writefreely + msg_ok "Started Services" + + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}" diff --git a/frontend/public/json/writefreely.json b/frontend/public/json/writefreely.json new file mode 100644 index 000000000..345df956c --- /dev/null +++ b/frontend/public/json/writefreely.json @@ -0,0 +1,40 @@ +{ + "name": "WriteFreely", + "slug": "writefreely", + "categories": [ + 12 + ], + "date_created": "2026-01-25", + "type": "ct", + "updateable": true, + "privileged": false, + "interface_port": 80, + "documentation": "https://writefreely.org/docs", + "config_path": "/opt/writefreely/config.ini", + "website": "https://writefreely.org/", + "logo": "https://github.com/writefreely/writefreely/blob/develop/static/img/wf-sq.png?raw=true", + "description": "WriteFreely is free and open source software for easily publishing writing on the web with support for the ActivityPub protocol. Use it to start a personal blog — or an entire community.", + "install_methods": [ + { + "type": "default", + "script": "ct/writefreely.sh", + "resources": { + "cpu": 2, + "ram": 1024, + "hdd": 4, + "os": "Debian", + "version": "13" + } + } + ], + "default_credentials": { + "username": null, + "password": null + }, + "notes": [ + { + "text": "After installation execute `./writefreely user create --admin username:password` in the `/opt/writefreely` directory to create your user.", + "type": "info" + } + ] +} diff --git a/install/writefreely-install.sh b/install/writefreely-install.sh new file mode 100644 index 000000000..96aa76662 --- /dev/null +++ b/install/writefreely-install.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: StellaeAlis +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/writefreely/writefreely + +# Import Functions and Setup +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +# ============================================================================= +# DEPENDENCIES +# ============================================================================= +# Only install what's actually needed - curl/sudo/mc are already in the base image + +msg_info "Installing Dependencies" +$STD apt install -y \ + crudini +msg_ok "Installed Dependencies" + +# --- Setup database --- +setup_mariadb +MARIADB_DB_NAME="writefreely" MARIADB_DB_USER="writefreely" setup_mariadb_db + +get_lxc_ip + +# --- Download and install app --- +fetch_and_deploy_gh_release "writefreely" "writefreely/writefreely" "prebuild" "latest" "/opt/writefreely" "writefreely_*_linux_amd64.tar.gz" + +msg_info "Setting up WriteFreely" +cd /opt/writefreely +$STD ./writefreely config generate +$STD ./writefreely keys generate +msg_ok "Setup WriteFreely" + +# ============================================================================= +# CONFIGURATION +# ============================================================================= + +msg_info "Configuring WriteFreely" +$STD crudini --set config.ini server port 80 +$STD crudini --set config.ini server bind $LOCAL_IP + +$STD crudini --set config.ini database username $MARIADB_DB_USER +$STD crudini --set config.ini database password $MARIADB_DB_PASS +$STD crudini --set config.ini database database $MARIADB_DB_NAME + +$STD crudini --set config.ini app host http://$LOCAL_IP:80 + +$STD ./writefreely db init +msg_ok "Configured WriteFreely" + +# ============================================================================= +# SERVICE CREATION +# ============================================================================= + +msg_info "Creating Service" +cat </etc/systemd/system/writefreely.service +[Unit] +Description=WriteFreely Service +After=syslog.target network.target + +[Service] +Type=simple +User=root +WorkingDirectory=/opt/writefreely +ExecStart=/opt/writefreely/writefreely +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now writefreely +msg_ok "Created Service" + +# ============================================================================= +# CLEANUP & FINALIZATION +# ============================================================================= + + +msg_info "Cleaning up" +$STD rm ~/writefreely.creds +msg_ok "Cleaned up" + +motd_ssh +customize + +# cleanup_lxc handles: apt autoremove, autoclean, temp files, bash history +cleanup_lxc From 2fcc38614a3d511dcffa1cae89aa4ce063f2b5b2 Mon Sep 17 00:00:00 2001 From: Stellae <80097964+StellaeAlis@users.noreply.github.com> Date: Sun, 25 Jan 2026 14:22:33 +0000 Subject: [PATCH 042/228] fix: Greptile errors --- ct/writefreely.sh | 26 ++++---------------------- frontend/public/json/writefreely.json | 2 +- install/writefreely-install.sh | 21 --------------------- 3 files changed, 5 insertions(+), 44 deletions(-) diff --git a/ct/writefreely.sh b/ct/writefreely.sh index 29f9501fa..48f93a535 100644 --- a/ct/writefreely.sh +++ b/ct/writefreely.sh @@ -15,18 +15,6 @@ var_os="${var_os:-debian}" var_version="${var_version:-13}" var_unprivileged="${var_unprivileged:-1}" -# ============================================================================= -# CONFIGURATION GUIDE -# ============================================================================= -# APP - Display name, title case (e.g. "Koel", "Wallabag", "Actual Budget") -# var_tags - Max 2 tags, semicolon separated (e.g. "music;streaming", "finance") -# var_cpu - CPU cores: 1-4 typical -# var_ram - RAM in MB: 512, 1024, 2048, 4096 typical -# var_disk - Disk in GB: 4, 6, 8, 10, 20 typical -# var_os - OS: debian, ubuntu, alpine -# var_version - OS version: 12/13 (debian), 22.04/24.04 (ubuntu), 3.20/3.21 (alpine) -# var_unprivileged - 1 = unprivileged (secure, default), 0 = privileged (for docker etc.) - header_info "$APP" variables color @@ -37,36 +25,30 @@ function update_script() { check_container_storage check_container_resources - # Check if installation exists if [[ ! -d /opt/writefreely ]]; then msg_error "No ${APP} Installation Found!" exit fi - # check_for_gh_release returns 0 (true) if update available, 1 (false) if not if check_for_gh_release "writefreely" "writefreely/writefreely"; then msg_info "Stopping Services" systemctl stop writefreely msg_ok "Stopped Services" - # Backup important data before update msg_info "Creating Backup" mkdir -p /tmp/writefreely_backup - cp /opt/writefreely/keys /tmp/writefreely_backup/ 2>/dev/null || true - cp /opt/writefreely/config.ini /tmp/writefreely_backup/ 2>/dev/null || true + cp /opt/writefreely/keys /tmp/writefreely_backup/ 2>/dev/null + cp /opt/writefreely/config.ini /tmp/writefreely_backup/ 2>/dev/null msg_ok "Created Backup" - # CLEAN_INSTALL=1 removes old directory before extracting new version CLEAN_INSTALL=1 fetch_and_deploy_gh_release "writefreely" "writefreely/writefreely" "prebuild" "latest" "/opt/writefreely" "writefreely_*_linux_amd64.tar.gz" - # Restore configuration and data msg_info "Restoring Data" - cp /tmp/writefreely_backup/config.ini /opt/writefreely/ 2>/dev/null || true - cp /tmp/writefreely_backup/keys/* /opt/writefreely/keys/ 2>/dev/null || true + cp /tmp/writefreely_backup/config.ini /opt/writefreely/ 2>/dev/null + cp /tmp/writefreely_backup/keys/* /opt/writefreely/keys/ 2>/dev/null rm -rf /tmp/writefreely_backup msg_ok "Restored Data" - # Optional: Run any post-update commands msg_info "Running Post-Update Tasks" cd /opt/writefreely $STD ./writefreely db migrate diff --git a/frontend/public/json/writefreely.json b/frontend/public/json/writefreely.json index 345df956c..eec8a77c2 100644 --- a/frontend/public/json/writefreely.json +++ b/frontend/public/json/writefreely.json @@ -12,7 +12,7 @@ "documentation": "https://writefreely.org/docs", "config_path": "/opt/writefreely/config.ini", "website": "https://writefreely.org/", - "logo": "https://github.com/writefreely/writefreely/blob/develop/static/img/wf-sq.png?raw=true", + "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/png/writefreely-light.png", "description": "WriteFreely is free and open source software for easily publishing writing on the web with support for the ActivityPub protocol. Use it to start a personal blog — or an entire community.", "install_methods": [ { diff --git a/install/writefreely-install.sh b/install/writefreely-install.sh index 96aa76662..98be776cf 100644 --- a/install/writefreely-install.sh +++ b/install/writefreely-install.sh @@ -14,23 +14,16 @@ setting_up_container network_check update_os -# ============================================================================= -# DEPENDENCIES -# ============================================================================= -# Only install what's actually needed - curl/sudo/mc are already in the base image - msg_info "Installing Dependencies" $STD apt install -y \ crudini msg_ok "Installed Dependencies" -# --- Setup database --- setup_mariadb MARIADB_DB_NAME="writefreely" MARIADB_DB_USER="writefreely" setup_mariadb_db get_lxc_ip -# --- Download and install app --- fetch_and_deploy_gh_release "writefreely" "writefreely/writefreely" "prebuild" "latest" "/opt/writefreely" "writefreely_*_linux_amd64.tar.gz" msg_info "Setting up WriteFreely" @@ -39,10 +32,6 @@ $STD ./writefreely config generate $STD ./writefreely keys generate msg_ok "Setup WriteFreely" -# ============================================================================= -# CONFIGURATION -# ============================================================================= - msg_info "Configuring WriteFreely" $STD crudini --set config.ini server port 80 $STD crudini --set config.ini server bind $LOCAL_IP @@ -56,10 +45,6 @@ $STD crudini --set config.ini app host http://$LOCAL_IP:80 $STD ./writefreely db init msg_ok "Configured WriteFreely" -# ============================================================================= -# SERVICE CREATION -# ============================================================================= - msg_info "Creating Service" cat </etc/systemd/system/writefreely.service [Unit] @@ -80,11 +65,6 @@ EOF systemctl enable -q --now writefreely msg_ok "Created Service" -# ============================================================================= -# CLEANUP & FINALIZATION -# ============================================================================= - - msg_info "Cleaning up" $STD rm ~/writefreely.creds msg_ok "Cleaned up" @@ -92,5 +72,4 @@ msg_ok "Cleaned up" motd_ssh customize -# cleanup_lxc handles: apt autoremove, autoclean, temp files, bash history cleanup_lxc From a89fbda0273adae692bff101bcdffcbcc553b9be Mon Sep 17 00:00:00 2001 From: Stellae <80097964+StellaeAlis@users.noreply.github.com> Date: Mon, 26 Jan 2026 10:29:20 +0000 Subject: [PATCH 043/228] fix: review requests --- ct/writefreely.sh | 1 - frontend/public/json/writefreely.json | 2 +- install/writefreely-install.sh | 6 +----- 3 files changed, 2 insertions(+), 7 deletions(-) diff --git a/ct/writefreely.sh b/ct/writefreely.sh index 48f93a535..bb0c64c58 100644 --- a/ct/writefreely.sh +++ b/ct/writefreely.sh @@ -57,7 +57,6 @@ function update_script() { msg_info "Starting Services" systemctl start writefreely msg_ok "Started Services" - msg_ok "Updated successfully!" fi exit diff --git a/frontend/public/json/writefreely.json b/frontend/public/json/writefreely.json index eec8a77c2..984243807 100644 --- a/frontend/public/json/writefreely.json +++ b/frontend/public/json/writefreely.json @@ -12,7 +12,7 @@ "documentation": "https://writefreely.org/docs", "config_path": "/opt/writefreely/config.ini", "website": "https://writefreely.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/png/writefreely-light.png", + "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/writefreely-light.webp", "description": "WriteFreely is free and open source software for easily publishing writing on the web with support for the ActivityPub protocol. Use it to start a personal blog — or an entire community.", "install_methods": [ { diff --git a/install/writefreely-install.sh b/install/writefreely-install.sh index 98be776cf..c3a1e21a7 100644 --- a/install/writefreely-install.sh +++ b/install/writefreely-install.sh @@ -15,8 +15,7 @@ network_check update_os msg_info "Installing Dependencies" -$STD apt install -y \ - crudini +$STD apt install -y crudini msg_ok "Installed Dependencies" setup_mariadb @@ -35,13 +34,10 @@ msg_ok "Setup WriteFreely" msg_info "Configuring WriteFreely" $STD crudini --set config.ini server port 80 $STD crudini --set config.ini server bind $LOCAL_IP - $STD crudini --set config.ini database username $MARIADB_DB_USER $STD crudini --set config.ini database password $MARIADB_DB_PASS $STD crudini --set config.ini database database $MARIADB_DB_NAME - $STD crudini --set config.ini app host http://$LOCAL_IP:80 - $STD ./writefreely db init msg_ok "Configured WriteFreely" From 27693b41a82f57f4ae4a2ded08a2b6b6068c2725 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Mon, 26 Jan 2026 13:09:09 +0100 Subject: [PATCH 044/228] test kitchenowl --- install/kitchenowl-install.sh | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/install/kitchenowl-install.sh b/install/kitchenowl-install.sh index eb90ba9a8..f60a81698 100644 --- a/install/kitchenowl-install.sh +++ b/install/kitchenowl-install.sh @@ -17,13 +17,28 @@ msg_info "Installing Dependencies" $STD apt install -y \ nginx \ build-essential \ + gfortran \ + pkg-config \ + ninja-build \ + autoconf \ + automake \ libpq-dev \ libffi-dev \ - libssl-dev + libssl-dev \ + libpcre2-dev \ + libre2-dev \ + libxml2-dev \ + libxslt-dev \ + libopenblas-dev \ + liblapack-dev \ + zlib1g-dev \ + libjpeg62-turbo-dev \ + libsqlite3-dev \ + libexpat1-dev \ + libicu-dev msg_ok "Installed Dependencies" PYTHON_VERSION="3.14" setup_uv -import_local_ip fetch_and_deploy_gh_release "kitchenowl" "TomBursch/kitchenowl" "tarball" "latest" "/opt/kitchenowl" rm -rf /opt/kitchenowl/web fetch_and_deploy_gh_release "kitchenowl-web" "TomBursch/kitchenowl" "prebuild" "latest" "/opt/kitchenowl/web" "kitchenowl_Web.tar.gz" From 0d382b9b6a1b83f4a773374d95e95b30668163c1 Mon Sep 17 00:00:00 2001 From: tremor021 Date: Mon, 26 Jan 2026 13:14:47 +0100 Subject: [PATCH 045/228] ghost test --- ct/ghost.sh | 55 ++++++++++++++++++++++++++++++++++++++++ install/ghost-install.sh | 44 ++++++++++++++++++++++++++++++++ 2 files changed, 99 insertions(+) create mode 100644 ct/ghost.sh create mode 100644 install/ghost-install.sh diff --git a/ct/ghost.sh b/ct/ghost.sh new file mode 100644 index 000000000..31fa44237 --- /dev/null +++ b/ct/ghost.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: fabrice1236 +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://ghost.org/ + +APP="Ghost" +var_tags="${var_tags:-cms;blog}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-1024}" +var_disk="${var_disk:-5}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + setup_mariadb + NODE_VERSION="22" setup_nodejs + + msg_info "Updating Ghost" + if command -v ghost &>/dev/null; then + current_version=$(ghost version | grep 'Ghost-CLI version' | awk '{print $3}') + latest_version=$(npm show ghost-cli version) + if [ "$current_version" != "$latest_version" ]; then + msg_info "Updating ${APP} from version v${current_version} to v${latest_version}" + $STD npm install -g ghost-cli@latest + msg_ok "Updated successfully!" + else + msg_ok "${APP} is already at v${current_version}" + fi + else + msg_error "No ${APP} Installation Found!" + exit + fi + exit +} + +start +build_container +description + +msg_ok "Completed successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:2368${CL}" diff --git a/install/ghost-install.sh b/install/ghost-install.sh new file mode 100644 index 000000000..d97de4f31 --- /dev/null +++ b/install/ghost-install.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: fabrice1236 +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://ghost.org/ + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y \ + nginx \ + ca-certificates \ + libjemalloc2 +msg_ok "Installed Dependencies" + +setup_mariadb +MARIADB_DB_NAME="ghost" MARIADB_DB_USER="ghostuser" setup_mariadb_db +NODE_VERSION="22" setup_nodejs + +msg_info "Installing Ghost CLI" +$STD npm install ghost-cli@latest -g +msg_ok "Installed Ghost CLI" + +msg_info "Creating Service" +$STD adduser --disabled-password --gecos "Ghost user" ghost-user +$STD usermod -aG sudo ghost-user +echo "ghost-user ALL=(ALL) NOPASSWD:ALL" | tee /etc/sudoers.d/ghost-user +mkdir -p /var/www/ghost +chown -R ghost-user:ghost-user /var/www/ghost +chmod 775 /var/www/ghost +$STD sudo -u ghost-user -H sh -c "cd /var/www/ghost && ghost install --db=mysql --dbhost=localhost --dbuser=$MARIADB_DB_USER --dbpass=$MARIADB_DB_PASS --dbname=$MARIADB_DB_NAME --url=http://localhost:2368 --no-prompt --no-setup-nginx --no-setup-ssl --no-setup-mysql --enable --start --ip 0.0.0.0" +rm /etc/sudoers.d/ghost-user +msg_ok "Creating Service" + +motd_ssh +customize +cleanup_lxc From ae10fac90b1f4b6364ef8d6514fb58060c5564b3 Mon Sep 17 00:00:00 2001 From: Michel Roegl-Brunner Date: Mon, 26 Jan 2026 13:20:23 +0100 Subject: [PATCH 046/228] First Clawdbot --- ct/clawdbot.sh | 71 ++++++++++++++++++++++++++++++ frontend/public/json/clawdbot.json | 36 +++++++++++++++ install/clawdbot-install.sh | 65 +++++++++++++++++++++++++++ 3 files changed, 172 insertions(+) create mode 100644 ct/clawdbot.sh create mode 100644 frontend/public/json/clawdbot.json create mode 100644 install/clawdbot-install.sh diff --git a/ct/clawdbot.sh b/ct/clawdbot.sh new file mode 100644 index 000000000..09aca1f73 --- /dev/null +++ b/ct/clawdbot.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: michelroegl-brunner +# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE +# Source: https://github.com/clawdbot/clawdbot + +APP="Clawdbot" +var_tags="${var_tags:-ai;assistant}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-2048}" +var_disk="${var_disk:-8}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /opt/clawdbot ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "clawdbot" "clawdbot/clawdbot"; then + msg_info "Stopping Service" + systemctl stop clawdbot + msg_ok "Stopped Service" + + msg_info "Backing up Data" + cp -r /opt/clawdbot/data /opt/clawdbot_data_backup 2>/dev/null || true + cp -r /root/.clawdbot /root/.clawdbot_backup 2>/dev/null || true + msg_ok "Backed up Data" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "clawdbot" "clawdbot/clawdbot" + + msg_info "Rebuilding Application" + cd /opt/clawdbot + $STD npm install + msg_ok "Rebuilt Application" + + msg_info "Restoring Data" + cp -r /opt/clawdbot_data_backup/. /opt/clawdbot/data 2>/dev/null || true + cp -r /root/.clawdbot_backup/. /root/.clawdbot 2>/dev/null || true + rm -rf /opt/clawdbot_data_backup /root/.clawdbot_backup + msg_ok "Restored Data" + + msg_info "Starting Service" + systemctl start clawdbot + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:18791${CL}" + diff --git a/frontend/public/json/clawdbot.json b/frontend/public/json/clawdbot.json new file mode 100644 index 000000000..ca21f9043 --- /dev/null +++ b/frontend/public/json/clawdbot.json @@ -0,0 +1,36 @@ +{ + "name": "Clawdbot", + "slug": "clawdbot", + "categories": [ + 20 + ], + "date_created": "2026-01-26", + "type": "ct", + "updateable": true, + "privileged": false, + "interface_port": 18791, + "documentation": "https://docs.clawd.bot/", + "website": "https://clawd.bot/", + "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/clawdbot.webp", + "config_path": "/opt/clawdbot/.env", + "description": "Your own personal AI assistant. Any OS. Any Platform. The lobster way. Clawdbot is a powerful AI agent framework that can be configured to work across multiple platforms and channels.", + "install_methods": [ + { + "type": "default", + "script": "ct/clawdbot.sh", + "resources": { + "cpu": 2, + "ram": 2048, + "hdd": 8, + "os": "debian", + "version": "13" + } + } + ], + "default_credentials": { + "username": null, + "password": null + }, + "notes": [] +} + diff --git a/install/clawdbot-install.sh b/install/clawdbot-install.sh new file mode 100644 index 000000000..10dfb6126 --- /dev/null +++ b/install/clawdbot-install.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: michelroegl-brunner +# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE +# Source: https://github.com/clawdbot/clawdbot + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt-get install -y \ + build-essential \ + git +msg_ok "Installed Dependencies" + +NODE_VERSION="24" setup_nodejs +import_local_ip + +fetch_and_deploy_gh_release "clawdbot" "clawdbot/clawdbot" + +msg_info "Installing Clawdbot" +cd /opt/clawdbot +$STD npm install +msg_ok "Installed Clawdbot" + +msg_info "Configuring Clawdbot" +mkdir -p /opt/clawdbot/data +cat </opt/clawdbot/.env +NODE_ENV=production +GATEWAY_PORT=18791 +GATEWAY_HOST=0.0.0.0 +EOF +msg_ok "Configured Clawdbot" + +msg_info "Creating Service" +cat </etc/systemd/system/clawdbot.service +[Unit] +Description=Clawdbot Service +After=network.target + +[Service] +Type=simple +User=root +WorkingDirectory=/opt/clawdbot +EnvironmentFile=/opt/clawdbot/.env +ExecStart=/usr/bin/npm start +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now clawdbot +msg_ok "Created Service" + +motd_ssh +customize +cleanup_lxc + From fa897ddc6736f1affa547df882784c7895d3fb6d Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Mon, 26 Jan 2026 13:21:04 +0100 Subject: [PATCH 047/228] test --- install/kitchenowl-install.sh | 4 ++-- install/languagetool-install.sh | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/install/kitchenowl-install.sh b/install/kitchenowl-install.sh index f60a81698..72d88030a 100644 --- a/install/kitchenowl-install.sh +++ b/install/kitchenowl-install.sh @@ -46,10 +46,10 @@ fetch_and_deploy_gh_release "kitchenowl-web" "TomBursch/kitchenowl" "prebuild" " msg_info "Setting up KitchenOwl" cd /opt/kitchenowl/backend #rm -f uv.lock -$STD uv sync --frozen +$STD uv sync --no-dev sed -i 's/default=True/default=False/' /opt/kitchenowl/backend/wsgi.py mkdir -p /nltk_data -$STD uv run python -m nltk.downloader -d /nltk_data averaged_perceptron_tagger_eng punkt_tab +$STD uv run python -m nltk.downloader -d /nltk_data averaged_perceptron_tagger_eng JWT_SECRET=$(openssl rand -hex 32) mkdir -p /opt/kitchenowl/data cat </opt/kitchenowl/kitchenowl.env diff --git a/install/languagetool-install.sh b/install/languagetool-install.sh index 6a118e85f..2797151fc 100644 --- a/install/languagetool-install.sh +++ b/install/languagetool-install.sh @@ -25,6 +25,7 @@ download_file "https://languagetool.org/download/LanguageTool-stable.zip" /tmp/L unzip -q /tmp/LanguageTool-stable.zip -d /opt mv /opt/LanguageTool-*/ /opt/LanguageTool/ download_file "https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.bin" /opt/lid.176.bin +msg_ok "" read -r -p "${TAB3}Enter language code (en, de, es, fr, nl) to download ngrams or press ENTER to skip: " lang_code ngram_dir="" From 327b8b531faa0fdd7303c1501f913ec00ab24184 Mon Sep 17 00:00:00 2001 From: tremor021 Date: Mon, 26 Jan 2026 13:27:42 +0100 Subject: [PATCH 048/228] add git dep --- install/ghost-install.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/install/ghost-install.sh b/install/ghost-install.sh index d97de4f31..4cee724a6 100644 --- a/install/ghost-install.sh +++ b/install/ghost-install.sh @@ -17,7 +17,8 @@ msg_info "Installing Dependencies" $STD apt install -y \ nginx \ ca-certificates \ - libjemalloc2 + libjemalloc2 \ + git msg_ok "Installed Dependencies" setup_mariadb From ef7d1927c56383d56d0d8116d22311a9f9fa7e5a Mon Sep 17 00:00:00 2001 From: Michel Roegl-Brunner Date: Mon, 26 Jan 2026 13:29:32 +0100 Subject: [PATCH 049/228] Updated Install Flow --- ct/clawdbot.sh | 45 +++++++++++++++++-------------------- install/clawdbot-install.sh | 11 ++------- 2 files changed, 22 insertions(+), 34 deletions(-) diff --git a/ct/clawdbot.sh b/ct/clawdbot.sh index 09aca1f73..dadefee2f 100644 --- a/ct/clawdbot.sh +++ b/ct/clawdbot.sh @@ -24,39 +24,34 @@ function update_script() { check_container_storage check_container_resources - if [[ ! -d /opt/clawdbot ]]; then + if ! command -v clawdbot >/dev/null 2>&1; then msg_error "No ${APP} Installation Found!" exit fi - if check_for_gh_release "clawdbot" "clawdbot/clawdbot"; then - msg_info "Stopping Service" - systemctl stop clawdbot - msg_ok "Stopped Service" + msg_info "Stopping Service" + systemctl stop clawdbot + msg_ok "Stopped Service" - msg_info "Backing up Data" - cp -r /opt/clawdbot/data /opt/clawdbot_data_backup 2>/dev/null || true - cp -r /root/.clawdbot /root/.clawdbot_backup 2>/dev/null || true - msg_ok "Backed up Data" + msg_info "Backing up Data" + cp -r /opt/clawdbot/data /opt/clawdbot_data_backup 2>/dev/null || true + cp -r /root/.clawdbot /root/.clawdbot_backup 2>/dev/null || true + msg_ok "Backed up Data" - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "clawdbot" "clawdbot/clawdbot" + msg_info "Updating Clawdbot" + $STD npm install -g clawdbot@latest + msg_ok "Updated Clawdbot" - msg_info "Rebuilding Application" - cd /opt/clawdbot - $STD npm install - msg_ok "Rebuilt Application" + msg_info "Restoring Data" + cp -r /opt/clawdbot_data_backup/. /opt/clawdbot/data 2>/dev/null || true + cp -r /root/.clawdbot_backup/. /root/.clawdbot 2>/dev/null || true + rm -rf /opt/clawdbot_data_backup /root/.clawdbot_backup + msg_ok "Restored Data" - msg_info "Restoring Data" - cp -r /opt/clawdbot_data_backup/. /opt/clawdbot/data 2>/dev/null || true - cp -r /root/.clawdbot_backup/. /root/.clawdbot 2>/dev/null || true - rm -rf /opt/clawdbot_data_backup /root/.clawdbot_backup - msg_ok "Restored Data" - - msg_info "Starting Service" - systemctl start clawdbot - msg_ok "Started Service" - msg_ok "Updated successfully!" - fi + msg_info "Starting Service" + systemctl start clawdbot + msg_ok "Started Service" + msg_ok "Updated successfully!" exit } diff --git a/install/clawdbot-install.sh b/install/clawdbot-install.sh index 10dfb6126..861a8570f 100644 --- a/install/clawdbot-install.sh +++ b/install/clawdbot-install.sh @@ -19,16 +19,9 @@ $STD apt-get install -y \ git msg_ok "Installed Dependencies" -NODE_VERSION="24" setup_nodejs +NODE_VERSION="24" NODE_MODULE="clawdbot@latest" setup_nodejs import_local_ip -fetch_and_deploy_gh_release "clawdbot" "clawdbot/clawdbot" - -msg_info "Installing Clawdbot" -cd /opt/clawdbot -$STD npm install -msg_ok "Installed Clawdbot" - msg_info "Configuring Clawdbot" mkdir -p /opt/clawdbot/data cat </opt/clawdbot/.env @@ -49,7 +42,7 @@ Type=simple User=root WorkingDirectory=/opt/clawdbot EnvironmentFile=/opt/clawdbot/.env -ExecStart=/usr/bin/npm start +ExecStart=/usr/bin/clawdbot Restart=on-failure RestartSec=5 From 9c439c150367c9b8408b7cd02788ed60a9c0c9c7 Mon Sep 17 00:00:00 2001 From: Michel Roegl-Brunner Date: Mon, 26 Jan 2026 13:31:24 +0100 Subject: [PATCH 050/228] Updated Install Flow --- install/clawdbot-install.sh | 6 ------ 1 file changed, 6 deletions(-) diff --git a/install/clawdbot-install.sh b/install/clawdbot-install.sh index 861a8570f..374067a77 100644 --- a/install/clawdbot-install.sh +++ b/install/clawdbot-install.sh @@ -13,12 +13,6 @@ setting_up_container network_check update_os -msg_info "Installing Dependencies" -$STD apt-get install -y \ - build-essential \ - git -msg_ok "Installed Dependencies" - NODE_VERSION="24" NODE_MODULE="clawdbot@latest" setup_nodejs import_local_ip From cb525b9ec2b01f8ae9d022927b0302d5fb14e6e7 Mon Sep 17 00:00:00 2001 From: tremor021 Date: Mon, 26 Jan 2026 13:32:04 +0100 Subject: [PATCH 051/228] remove ghost --- ct/ghost.sh | 55 ---------------------------------------- install/ghost-install.sh | 45 -------------------------------- 2 files changed, 100 deletions(-) delete mode 100644 ct/ghost.sh delete mode 100644 install/ghost-install.sh diff --git a/ct/ghost.sh b/ct/ghost.sh deleted file mode 100644 index 31fa44237..000000000 --- a/ct/ghost.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env bash -source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) -# Copyright (c) 2021-2026 community-scripts ORG -# Author: fabrice1236 -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://ghost.org/ - -APP="Ghost" -var_tags="${var_tags:-cms;blog}" -var_cpu="${var_cpu:-2}" -var_ram="${var_ram:-1024}" -var_disk="${var_disk:-5}" -var_os="${var_os:-debian}" -var_version="${var_version:-13}" -var_unprivileged="${var_unprivileged:-1}" - -header_info "$APP" -variables -color -catch_errors - -function update_script() { - header_info - check_container_storage - check_container_resources - - setup_mariadb - NODE_VERSION="22" setup_nodejs - - msg_info "Updating Ghost" - if command -v ghost &>/dev/null; then - current_version=$(ghost version | grep 'Ghost-CLI version' | awk '{print $3}') - latest_version=$(npm show ghost-cli version) - if [ "$current_version" != "$latest_version" ]; then - msg_info "Updating ${APP} from version v${current_version} to v${latest_version}" - $STD npm install -g ghost-cli@latest - msg_ok "Updated successfully!" - else - msg_ok "${APP} is already at v${current_version}" - fi - else - msg_error "No ${APP} Installation Found!" - exit - fi - exit -} - -start -build_container -description - -msg_ok "Completed successfully!\n" -echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" -echo -e "${INFO}${YW} Access it using the following URL:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:2368${CL}" diff --git a/install/ghost-install.sh b/install/ghost-install.sh deleted file mode 100644 index 4cee724a6..000000000 --- a/install/ghost-install.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env bash - -# Copyright (c) 2021-2026 community-scripts ORG -# Author: fabrice1236 -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://ghost.org/ - -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -color -verb_ip6 -catch_errors -setting_up_container -network_check -update_os - -msg_info "Installing Dependencies" -$STD apt install -y \ - nginx \ - ca-certificates \ - libjemalloc2 \ - git -msg_ok "Installed Dependencies" - -setup_mariadb -MARIADB_DB_NAME="ghost" MARIADB_DB_USER="ghostuser" setup_mariadb_db -NODE_VERSION="22" setup_nodejs - -msg_info "Installing Ghost CLI" -$STD npm install ghost-cli@latest -g -msg_ok "Installed Ghost CLI" - -msg_info "Creating Service" -$STD adduser --disabled-password --gecos "Ghost user" ghost-user -$STD usermod -aG sudo ghost-user -echo "ghost-user ALL=(ALL) NOPASSWD:ALL" | tee /etc/sudoers.d/ghost-user -mkdir -p /var/www/ghost -chown -R ghost-user:ghost-user /var/www/ghost -chmod 775 /var/www/ghost -$STD sudo -u ghost-user -H sh -c "cd /var/www/ghost && ghost install --db=mysql --dbhost=localhost --dbuser=$MARIADB_DB_USER --dbpass=$MARIADB_DB_PASS --dbname=$MARIADB_DB_NAME --url=http://localhost:2368 --no-prompt --no-setup-nginx --no-setup-ssl --no-setup-mysql --enable --start --ip 0.0.0.0" -rm /etc/sudoers.d/ghost-user -msg_ok "Creating Service" - -motd_ssh -customize -cleanup_lxc From 602f23f92a9bf82abf1776f3664bb8ba2122d2ce Mon Sep 17 00:00:00 2001 From: Michel Roegl-Brunner Date: Mon, 26 Jan 2026 13:43:25 +0100 Subject: [PATCH 052/228] Update install --- install/clawdbot-install.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/install/clawdbot-install.sh b/install/clawdbot-install.sh index 374067a77..861a8570f 100644 --- a/install/clawdbot-install.sh +++ b/install/clawdbot-install.sh @@ -13,6 +13,12 @@ setting_up_container network_check update_os +msg_info "Installing Dependencies" +$STD apt-get install -y \ + build-essential \ + git +msg_ok "Installed Dependencies" + NODE_VERSION="24" NODE_MODULE="clawdbot@latest" setup_nodejs import_local_ip From d9a075139a63538a49846e59b52e36b251e74027 Mon Sep 17 00:00:00 2001 From: Michel Roegl-Brunner Date: Mon, 26 Jan 2026 14:11:03 +0100 Subject: [PATCH 053/228] Update install --- ct/clawdbot.sh | 2 +- frontend/public/json/clawdbot.json | 10 ++++-- install/clawdbot-install.sh | 58 +++++++++++++++--------------- 3 files changed, 37 insertions(+), 33 deletions(-) diff --git a/ct/clawdbot.sh b/ct/clawdbot.sh index dadefee2f..868da8c38 100644 --- a/ct/clawdbot.sh +++ b/ct/clawdbot.sh @@ -62,5 +62,5 @@ description msg_ok "Completed Successfully!\n" echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" echo -e "${INFO}${YW} Access it using the following URL:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:18791${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:80${CL}" diff --git a/frontend/public/json/clawdbot.json b/frontend/public/json/clawdbot.json index ca21f9043..d444b60ca 100644 --- a/frontend/public/json/clawdbot.json +++ b/frontend/public/json/clawdbot.json @@ -8,7 +8,7 @@ "type": "ct", "updateable": true, "privileged": false, - "interface_port": 18791, + "interface_port": 80, "documentation": "https://docs.clawd.bot/", "website": "https://clawd.bot/", "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/clawdbot.webp", @@ -31,6 +31,10 @@ "username": null, "password": null }, - "notes": [] + "notes": [ + { + "type": "info", + "text": "After install run onboarding: clawdbot onboard --install-daemon" + } + ] } - diff --git a/install/clawdbot-install.sh b/install/clawdbot-install.sh index 861a8570f..ed44b0ef3 100644 --- a/install/clawdbot-install.sh +++ b/install/clawdbot-install.sh @@ -16,41 +16,41 @@ update_os msg_info "Installing Dependencies" $STD apt-get install -y \ build-essential \ - git + git \ + nginx msg_ok "Installed Dependencies" NODE_VERSION="24" NODE_MODULE="clawdbot@latest" setup_nodejs import_local_ip -msg_info "Configuring Clawdbot" -mkdir -p /opt/clawdbot/data -cat </opt/clawdbot/.env -NODE_ENV=production -GATEWAY_PORT=18791 -GATEWAY_HOST=0.0.0.0 + +msg_info "Configuring Nginx" +cat </etc/nginx/sites-available/clawdbot +server { + listen 80; + server_name _; + + location / { + proxy_pass http://127.0.0.1:18791; + proxy_http_version 1.1; + proxy_set_header Upgrade \$http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_set_header Host \$host; + proxy_set_header X-Real-IP \$remote_addr; + proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto \$scheme; + proxy_cache_bypass \$http_upgrade; + proxy_buffering off; + proxy_read_timeout 86400s; + proxy_send_timeout 86400s; + } +} EOF -msg_ok "Configured Clawdbot" - -msg_info "Creating Service" -cat </etc/systemd/system/clawdbot.service -[Unit] -Description=Clawdbot Service -After=network.target - -[Service] -Type=simple -User=root -WorkingDirectory=/opt/clawdbot -EnvironmentFile=/opt/clawdbot/.env -ExecStart=/usr/bin/clawdbot -Restart=on-failure -RestartSec=5 - -[Install] -WantedBy=multi-user.target -EOF -systemctl enable -q --now clawdbot -msg_ok "Created Service" +ln -sf /etc/nginx/sites-available/clawdbot /etc/nginx/sites-enabled/clawdbot +rm -f /etc/nginx/sites-enabled/default +$STD nginx -t +$STD systemctl enable -q --now nginx +msg_ok "Configured Nginx" motd_ssh customize From c1f3b262ff55b15268a79df63bcb3d13552348e8 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Mon, 26 Jan 2026 14:17:12 +0100 Subject: [PATCH 054/228] add gpu for nodecast --- ct/nodecast-tv.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/ct/nodecast-tv.sh b/ct/nodecast-tv.sh index 202e65ecc..53ea93dd1 100644 --- a/ct/nodecast-tv.sh +++ b/ct/nodecast-tv.sh @@ -13,6 +13,7 @@ var_disk="${var_disk:-4}" var_os="${var_os:-debian}" var_version="${var_version:-13}" var_unprivileged="${var_unprivileged:-1}" +var_gpu="${var_gpu:-yes}" header_info "$APP" variables From cb5e1e1524acdc9dd5301f6344f3fb2f54e95469 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Mon, 26 Jan 2026 14:22:43 +0100 Subject: [PATCH 055/228] add ffmpeg --- install/nodecast-tv-install.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/install/nodecast-tv-install.sh b/install/nodecast-tv-install.sh index a9d2e3a7b..77a57e8a1 100644 --- a/install/nodecast-tv-install.sh +++ b/install/nodecast-tv-install.sh @@ -16,6 +16,10 @@ update_os fetch_and_deploy_gh_release "nodecast-tv" "technomancer702/nodecast-tv" setup_nodejs +msg_info "Installing Dependencies" +$STD apt install -y ffmpeg +msg_ok "Installed Dependencies" + msg_info "Installing Modules" cd /opt/nodecast-tv $STD npm install From be9a7e2fb9195bc4bac8504c7e9c7ad6afef67ed Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Mon, 26 Jan 2026 14:28:47 +0100 Subject: [PATCH 056/228] jotty prebuild --- install/jotty-install.sh | 34 ++++++---------------------------- 1 file changed, 6 insertions(+), 28 deletions(-) diff --git a/install/jotty-install.sh b/install/jotty-install.sh index a8ce9f634..67a37edae 100644 --- a/install/jotty-install.sh +++ b/install/jotty-install.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash # Copyright (c) 2021-2026 community-scripts ORG -# Author: vhsdream +# Author: vhsdream | MickLesk # License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE # Source: https://github.com/fccview/jotty @@ -14,37 +14,16 @@ network_check update_os NODE_VERSION="22" NODE_MODULE="yarn" setup_nodejs -#fetch_and_deploy_gh_release "jotty" "fccview/jotty" "tarball" "latest" "/opt/jotty" +fetch_and_deploy_gh_release "jotty" "fccview/jotty" "prebuild" "/opt/jotty" "jotty_*_prebuild.tar.gz" + msg_info "Setup jotty" -mkdir -p /opt/jotty -wget -q https://github.com/fccview/jotty/releases/download/develop/jotty-prebuild-develop.tar.gz -O /opt/jotty.tar.gz -cd /opt -tar -xzf jotty.tar.gz - -cd /opt/jotty -# unset NODE_OPTIONS -# export NODE_OPTIONS="--max-old-space-size=3072" -# # $STD yarn --frozen-lockfiled -# # $STD yarn next telemetry disable -# # $STD yarn build - -# [ -d "public" ] && cp -r public .next/standalone/ -# [ -d "howto" ] && cp -r howto .next/standalone/ -# mkdir -p .next/standalone/.next -# cp -r .next/static .next/standalone/.next/ - -# mv .next/standalone /tmp/jotty_standalone -# rm -rf ./* .next .git .gitignore .yarn -# mv /tmp/jotty_standalone/* . -# mv /tmp/jotty_standalone/.[!.]* . 2>/dev/null || true -# rm -rf /tmp/jotty_standalone - mkdir -p data/{users,checklists,notes} cat </opt/jotty/.env NODE_ENV=production - # --- Uncomment to enable +# APP_URL=https://your-jotty-domain.com +# INTERNAL_API_URL=http://localhost:3000 # HTTPS=true # SERVE_PUBLIC_IMAGES=yes # SERVE_PUBLIC_FILES=yes @@ -57,12 +36,11 @@ NODE_ENV=production # SSO_MODE=oidc # OIDC_ISSUER= # OIDC_CLIENT_ID= -# APP_URL= # SSO_FALLBACK_LOCAL=yes # OIDC_CLIENT_SECRET=your_client_secret # OIDC_ADMIN_GROUPS=admins EOF -msg_ok "Installed ${APPLICATION}" +msg_ok "Setup jotty" msg_info "Creating Service" cat </etc/systemd/system/jotty.service From 4b1aa41aec87e78cf19207f471f083113e9797fd Mon Sep 17 00:00:00 2001 From: Michel Roegl-Brunner Date: Mon, 26 Jan 2026 14:28:07 +0100 Subject: [PATCH 057/228] Update install --- ct/clawdbot.sh | 48 +++++++++++++++++------------- frontend/public/json/clawdbot.json | 2 +- install/clawdbot-install.sh | 41 +++++++------------------ 3 files changed, 39 insertions(+), 52 deletions(-) diff --git a/ct/clawdbot.sh b/ct/clawdbot.sh index 868da8c38..6c82efe1c 100644 --- a/ct/clawdbot.sh +++ b/ct/clawdbot.sh @@ -24,34 +24,40 @@ function update_script() { check_container_storage check_container_resources - if ! command -v clawdbot >/dev/null 2>&1; then + if [[ ! -d /opt/clawdbot ]]; then msg_error "No ${APP} Installation Found!" exit fi - msg_info "Stopping Service" - systemctl stop clawdbot - msg_ok "Stopped Service" + if check_for_gh_release "clawdbot" "clawdbot/clawdbot"; then + msg_info "Stopping Service" + systemctl stop clawdbot + msg_ok "Stopped Service" - msg_info "Backing up Data" - cp -r /opt/clawdbot/data /opt/clawdbot_data_backup 2>/dev/null || true - cp -r /root/.clawdbot /root/.clawdbot_backup 2>/dev/null || true - msg_ok "Backed up Data" + msg_info "Backing up Data" + cp -r /opt/clawdbot/data /opt/clawdbot_data_backup 2>/dev/null || true + cp -r /root/.clawdbot /root/.clawdbot_backup 2>/dev/null || true + msg_ok "Backed up Data" - msg_info "Updating Clawdbot" - $STD npm install -g clawdbot@latest - msg_ok "Updated Clawdbot" + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "clawdbot" "clawdbot/clawdbot" - msg_info "Restoring Data" - cp -r /opt/clawdbot_data_backup/. /opt/clawdbot/data 2>/dev/null || true - cp -r /root/.clawdbot_backup/. /root/.clawdbot 2>/dev/null || true - rm -rf /opt/clawdbot_data_backup /root/.clawdbot_backup - msg_ok "Restored Data" + msg_info "Rebuilding Clawdbot" + cd /opt/clawdbot + $STD pnpm install --frozen-lockfile + $STD pnpm ui:build + msg_ok "Rebuilt Clawdbot" - msg_info "Starting Service" - systemctl start clawdbot - msg_ok "Started Service" - msg_ok "Updated successfully!" + msg_info "Restoring Data" + cp -r /opt/clawdbot_data_backup/. /opt/clawdbot/data 2>/dev/null || true + cp -r /root/.clawdbot_backup/. /root/.clawdbot 2>/dev/null || true + rm -rf /opt/clawdbot_data_backup /root/.clawdbot_backup + msg_ok "Restored Data" + + msg_info "Starting Service" + systemctl start clawdbot + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi exit } @@ -62,5 +68,5 @@ description msg_ok "Completed Successfully!\n" echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" echo -e "${INFO}${YW} Access it using the following URL:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:80${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:18791${CL}" diff --git a/frontend/public/json/clawdbot.json b/frontend/public/json/clawdbot.json index d444b60ca..cc5029067 100644 --- a/frontend/public/json/clawdbot.json +++ b/frontend/public/json/clawdbot.json @@ -8,7 +8,7 @@ "type": "ct", "updateable": true, "privileged": false, - "interface_port": 80, + "interface_port": 18791, "documentation": "https://docs.clawd.bot/", "website": "https://clawd.bot/", "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/clawdbot.webp", diff --git a/install/clawdbot-install.sh b/install/clawdbot-install.sh index ed44b0ef3..f1a50cd4a 100644 --- a/install/clawdbot-install.sh +++ b/install/clawdbot-install.sh @@ -16,41 +16,22 @@ update_os msg_info "Installing Dependencies" $STD apt-get install -y \ build-essential \ - git \ - nginx + git msg_ok "Installed Dependencies" -NODE_VERSION="24" NODE_MODULE="clawdbot@latest" setup_nodejs -import_local_ip +fetch_and_deploy_gh_release "clawdbot" "clawdbot/clawdbot" +pnpm_version=$(grep -oP '"packageManager":\s*"pnpm@\K[^"]+' /opt/clawdbot/package.json 2>/dev/null || echo "latest") +NODE_VERSION="24" NODE_MODULE="pnpm@${pnpm_version}" setup_nodejs -msg_info "Configuring Nginx" -cat </etc/nginx/sites-available/clawdbot -server { - listen 80; - server_name _; +msg_info "Installing Clawdbot Dependencies" +cd /opt/clawdbot +$STD pnpm install --frozen-lockfile +msg_ok "Installed Dependencies" - location / { - proxy_pass http://127.0.0.1:18791; - proxy_http_version 1.1; - proxy_set_header Upgrade \$http_upgrade; - proxy_set_header Connection 'upgrade'; - proxy_set_header Host \$host; - proxy_set_header X-Real-IP \$remote_addr; - proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto \$scheme; - proxy_cache_bypass \$http_upgrade; - proxy_buffering off; - proxy_read_timeout 86400s; - proxy_send_timeout 86400s; - } -} -EOF -ln -sf /etc/nginx/sites-available/clawdbot /etc/nginx/sites-enabled/clawdbot -rm -f /etc/nginx/sites-enabled/default -$STD nginx -t -$STD systemctl enable -q --now nginx -msg_ok "Configured Nginx" +msg_info "Building Clawdbot UI" +$STD pnpm ui:build +msg_ok "Built Clawdbot UI" motd_ssh customize From 447aaf0ebca6c5425a484a865369c7d2b189933a Mon Sep 17 00:00:00 2001 From: tremor021 Date: Mon, 26 Jan 2026 14:31:27 +0100 Subject: [PATCH 058/228] Cleanup --- install/minthcm-install.sh | 50 +++++++++++++------------------------- 1 file changed, 17 insertions(+), 33 deletions(-) diff --git a/install/minthcm-install.sh b/install/minthcm-install.sh index 090a31135..a0e3b8310 100644 --- a/install/minthcm-install.sh +++ b/install/minthcm-install.sh @@ -12,14 +12,13 @@ catch_errors setting_up_container network_check update_os -PHP_VERSION="8.2" -PHP_APACHE="YES" PHP_MODULE="mysql,cli,redis" PHP_FPM="YES" setup_php -setup_composer -msg_info "Enabling Apache modules (rewrite, headers)" -$STD a2enmod rewrite -$STD a2enmod headers -msg_ok "Enabled Apache modules (rewrite, headers)" +PHP_VERSION="8.2" +PHP_APACHE="YES" PHP_MODULE="mysql,redis" PHP_FPM="YES" setup_php +setup_composer +setup_mariadb +MARIADB_DB_NAME="minthcm" MARIADB_DB_USER="minthcm" setup_mariadb_db +$STD mariadb -u root -e "SET GLOBAL sql_mode='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'"; fetch_and_deploy_gh_release "MintHCM" "minthcm/minthcm" "tarball" "latest" "/var/www/MintHCM" @@ -37,19 +36,17 @@ mkdir -p /var/www/script cp /var/www/MintHCM/docker/script/generate_config.php /var/www/script/generate_config.php cp /var/www/MintHCM/docker/.env /var/www/script/.env chown -R www-data:www-data /var/www/script -msg_ok "Configured MintHCM" - -msg_info "Restarting Apache2" +$STD a2enmod rewrite +$STD a2enmod headers $STD systemctl restart apache2 -msg_ok "Restarted Apache2" +msg_ok "Configured MintHCM" msg_info "Setting up Elasticsearch" setup_deb822_repo \ "elasticsearch" \ "https://artifacts.elastic.co/GPG-KEY-elasticsearch" \ "https://artifacts.elastic.co/packages/7.x/apt" \ - "stable" \ - "main" + "stable" $STD apt install -y elasticsearch echo "-Xms2g" >>/etc/elasticsearch/jvm.options echo "-Xmx2g" >>/etc/elasticsearch/jvm.options @@ -57,35 +54,22 @@ $STD /usr/share/elasticsearch/bin/elasticsearch-plugin install ingest-attachment systemctl enable -q --now elasticsearch msg_ok "Set up Elasticsearch" -setup_mariadb -msg_info "Setting up MariaDB" -$STD mariadb -u root -e "SET GLOBAL sql_mode='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'"; -msg_ok "Set up MariaDB" - msg_info "Configuring Database" -DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13) -$STD mariadb -u root -e "CREATE USER 'minthcm'@'localhost' IDENTIFIED BY '${DB_PASS}';" -$STD mariadb -u root -e "GRANT ALL ON *.* TO 'minthcm'@'localhost'; FLUSH PRIVILEGES;" -sed -i 's/^DB_HOST=.*/DB_HOST=localhost/' /var/www/script/.env -sed -i 's/^DB_USER=.*/DB_USER=minthcm/' /var/www/script/.env -sed -i "s/^DB_PASS=.*/DB_PASS=${DB_PASS}/" /var/www/script/.env -sed -i 's/^ELASTICSEARCH_HOST=.*/ELASTICSEARCH_HOST=localhost/' /var/www/script/.env -msg_ok "Configured MariaDB" -{ - echo "MintHCM DB Credentials" - echo "MariaDB User: minthcm" - echo "MariaDB Password: $DB_PASS" -} >>~/minthcm.creds +sed -i "s/^DB_HOST=.*/DB_HOST=localhost/" /var/www/script/.env +sed -i "s/^DB_USER=.*/DB_USER=$MARIADB_DB_USER/" /var/www/script/.env +sed -i "s/^DB_PASS=.*/DB_PASS=$MARIADB_DB_PASS/" /var/www/script/.env +sed -i "s/^ELASTICSEARCH_HOST=.*/ELASTICSEARCH_HOST=localhost/" /var/www/script/.env +msg_ok "Configured Database" msg_info "Generating configuration file" set -a source /var/www/script/.env set +a -php /var/www/script/generate_config.php +$STD php /var/www/script/generate_config.php msg_ok "Generated configuration file" msg_info "Installing MintHCM" -cd /var/www/MintHCM && su -s /bin/bash -c 'php /var/www/MintHCM/MintCLI install < /var/www/MintHCM/configMint4' www-data +$STD sudo -u www-data php /var/www/MintHCM/MintCLI install < /var/www/MintHCM/configMint4 printf "* * * * * cd /var/www/MintHCM/legacy; php -f cron.php > /dev/null 2>&1\n" > /var/spool/cron/crontabs/www-data service cron start rm -f /var/www/MintHCM/configMint4 From ca0a11e87adffdd248936a2eebc48a157157a290 Mon Sep 17 00:00:00 2001 From: Michel Roegl-Brunner Date: Mon, 26 Jan 2026 14:34:21 +0100 Subject: [PATCH 059/228] Update install --- ct/clawdbot.sh | 6 ------ 1 file changed, 6 deletions(-) diff --git a/ct/clawdbot.sh b/ct/clawdbot.sh index 6c82efe1c..14683a3f2 100644 --- a/ct/clawdbot.sh +++ b/ct/clawdbot.sh @@ -30,9 +30,6 @@ function update_script() { fi if check_for_gh_release "clawdbot" "clawdbot/clawdbot"; then - msg_info "Stopping Service" - systemctl stop clawdbot - msg_ok "Stopped Service" msg_info "Backing up Data" cp -r /opt/clawdbot/data /opt/clawdbot_data_backup 2>/dev/null || true @@ -53,9 +50,6 @@ function update_script() { rm -rf /opt/clawdbot_data_backup /root/.clawdbot_backup msg_ok "Restored Data" - msg_info "Starting Service" - systemctl start clawdbot - msg_ok "Started Service" msg_ok "Updated successfully!" fi exit From c5dfa9d403f3e8c59a0505d82dcf6b9f3823493c Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Mon, 26 Jan 2026 14:40:06 +0100 Subject: [PATCH 060/228] add latest --- install/jotty-install.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/install/jotty-install.sh b/install/jotty-install.sh index 67a37edae..e6b88fd59 100644 --- a/install/jotty-install.sh +++ b/install/jotty-install.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash # Copyright (c) 2021-2026 community-scripts ORG -# Author: vhsdream | MickLesk +# Author: vhsdream | MickLesk # License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE # Source: https://github.com/fccview/jotty @@ -14,7 +14,7 @@ network_check update_os NODE_VERSION="22" NODE_MODULE="yarn" setup_nodejs -fetch_and_deploy_gh_release "jotty" "fccview/jotty" "prebuild" "/opt/jotty" "jotty_*_prebuild.tar.gz" +fetch_and_deploy_gh_release "jotty" "fccview/jotty" "prebuild" "latest" "/opt/jotty" "jotty_*_prebuild.tar.gz" msg_info "Setup jotty" mkdir -p data/{users,checklists,notes} From 1e4864bb887940a8f7b50e1a13c92dc9274fe34e Mon Sep 17 00:00:00 2001 From: Michel Roegl-Brunner Date: Mon, 26 Jan 2026 14:43:57 +0100 Subject: [PATCH 061/228] Update install --- ct/clawdbot.sh | 36 ++++++++++++++---------------------- install/clawdbot-install.sh | 7 +------ 2 files changed, 15 insertions(+), 28 deletions(-) diff --git a/ct/clawdbot.sh b/ct/clawdbot.sh index 14683a3f2..7f147dff9 100644 --- a/ct/clawdbot.sh +++ b/ct/clawdbot.sh @@ -24,34 +24,26 @@ function update_script() { check_container_storage check_container_resources - if [[ ! -d /opt/clawdbot ]]; then + if ! command -v clawdbot >/dev/null 2>&1; then msg_error "No ${APP} Installation Found!" exit fi - if check_for_gh_release "clawdbot" "clawdbot/clawdbot"; then + msg_info "Backing up Data" + cp -r /opt/clawdbot/data /opt/clawdbot_data_backup 2>/dev/null || true + cp -r /root/.clawdbot /root/.clawdbot_backup 2>/dev/null || true + msg_ok "Backed up Data" - msg_info "Backing up Data" - cp -r /opt/clawdbot/data /opt/clawdbot_data_backup 2>/dev/null || true - cp -r /root/.clawdbot /root/.clawdbot_backup 2>/dev/null || true - msg_ok "Backed up Data" + msg_info "Updating Clawdbot" + $STD npm install -g clawdbot@latest + msg_ok "Updated Clawdbot" - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "clawdbot" "clawdbot/clawdbot" - - msg_info "Rebuilding Clawdbot" - cd /opt/clawdbot - $STD pnpm install --frozen-lockfile - $STD pnpm ui:build - msg_ok "Rebuilt Clawdbot" - - msg_info "Restoring Data" - cp -r /opt/clawdbot_data_backup/. /opt/clawdbot/data 2>/dev/null || true - cp -r /root/.clawdbot_backup/. /root/.clawdbot 2>/dev/null || true - rm -rf /opt/clawdbot_data_backup /root/.clawdbot_backup - msg_ok "Restored Data" - - msg_ok "Updated successfully!" - fi + msg_info "Restoring Data" + cp -r /opt/clawdbot_data_backup/. /opt/clawdbot/data 2>/dev/null || true + cp -r /root/.clawdbot_backup/. /root/.clawdbot 2>/dev/null || true + rm -rf /opt/clawdbot_data_backup /root/.clawdbot_backup + msg_ok "Restored Data" + msg_ok "Updated successfully!" exit } diff --git a/install/clawdbot-install.sh b/install/clawdbot-install.sh index f1a50cd4a..6769cc16a 100644 --- a/install/clawdbot-install.sh +++ b/install/clawdbot-install.sh @@ -22,12 +22,7 @@ msg_ok "Installed Dependencies" fetch_and_deploy_gh_release "clawdbot" "clawdbot/clawdbot" pnpm_version=$(grep -oP '"packageManager":\s*"pnpm@\K[^"]+' /opt/clawdbot/package.json 2>/dev/null || echo "latest") -NODE_VERSION="24" NODE_MODULE="pnpm@${pnpm_version}" setup_nodejs - -msg_info "Installing Clawdbot Dependencies" -cd /opt/clawdbot -$STD pnpm install --frozen-lockfile -msg_ok "Installed Dependencies" +NODE_VERSION="24" NODE_MODULE="pnpm@${pnpm_version},clawdbot@latest" setup_nodejs msg_info "Building Clawdbot UI" $STD pnpm ui:build From f9a464bdce09b0eca94eb32a43ad0080402913f5 Mon Sep 17 00:00:00 2001 From: tremor021 Date: Mon, 26 Jan 2026 14:45:10 +0100 Subject: [PATCH 062/228] update minth --- install/minthcm-install.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/install/minthcm-install.sh b/install/minthcm-install.sh index a0e3b8310..ed6c8ac23 100644 --- a/install/minthcm-install.sh +++ b/install/minthcm-install.sh @@ -69,7 +69,8 @@ $STD php /var/www/script/generate_config.php msg_ok "Generated configuration file" msg_info "Installing MintHCM" -$STD sudo -u www-data php /var/www/MintHCM/MintCLI install < /var/www/MintHCM/configMint4 +cd /var/www/MintHCM +$STD sudo -u www-data php MintCLI install < /var/www/MintHCM/configMint4 printf "* * * * * cd /var/www/MintHCM/legacy; php -f cron.php > /dev/null 2>&1\n" > /var/spool/cron/crontabs/www-data service cron start rm -f /var/www/MintHCM/configMint4 From 6aacad8e30f577c4c409a8d8a1447eecc3221692 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Mon, 26 Jan 2026 14:54:24 +0100 Subject: [PATCH 063/228] fixes --- tools/pve/update-apps.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/pve/update-apps.sh b/tools/pve/update-apps.sh index b4977e130..c4aa0d5ff 100644 --- a/tools/pve/update-apps.sh +++ b/tools/pve/update-apps.sh @@ -64,9 +64,10 @@ END { } header_info -msg_info "Loading all possible LXC containers from Proxmox VE. This may take a few seconds..." + whiptail --backtitle "Proxmox VE Helper Scripts" --title "LXC Container Update" --yesno "This will update LXC container. Proceed?" 10 58 || exit +msg_info "Loading all possible LXC containers from Proxmox VE. This may take a few seconds..." NODE=$(hostname) containers=$(pct list | tail -n +2 | awk '{print $0 " " $4}') From f9fde3a8e0cc4c0df5581f2c3e764836da05ba63 Mon Sep 17 00:00:00 2001 From: tremor021 Date: Mon, 26 Jan 2026 14:55:36 +0100 Subject: [PATCH 064/228] update minth --- install/minthcm-install.sh | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/install/minthcm-install.sh b/install/minthcm-install.sh index ed6c8ac23..817c99e5b 100644 --- a/install/minthcm-install.sh +++ b/install/minthcm-install.sh @@ -17,7 +17,6 @@ PHP_VERSION="8.2" PHP_APACHE="YES" PHP_MODULE="mysql,redis" PHP_FPM="YES" setup_php setup_composer setup_mariadb -MARIADB_DB_NAME="minthcm" MARIADB_DB_USER="minthcm" setup_mariadb_db $STD mariadb -u root -e "SET GLOBAL sql_mode='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'"; fetch_and_deploy_gh_release "MintHCM" "minthcm/minthcm" "tarball" "latest" "/var/www/MintHCM" @@ -55,9 +54,12 @@ systemctl enable -q --now elasticsearch msg_ok "Set up Elasticsearch" msg_info "Configuring Database" +DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13) +$STD mariadb -u root -e "CREATE USER 'minthcm'@'localhost' IDENTIFIED BY '${DB_PASS}';" +$STD mariadb -u root -e "GRANT ALL ON *.* TO 'minthcm'@'localhost'; FLUSH PRIVILEGES;" sed -i "s/^DB_HOST=.*/DB_HOST=localhost/" /var/www/script/.env -sed -i "s/^DB_USER=.*/DB_USER=$MARIADB_DB_USER/" /var/www/script/.env -sed -i "s/^DB_PASS=.*/DB_PASS=$MARIADB_DB_PASS/" /var/www/script/.env +sed -i "s/^DB_USER=.*/DB_USER=minthcm/" /var/www/script/.env +sed -i "s/^DB_PASS=.*/DB_PASS=$DB_PASS/" /var/www/script/.env sed -i "s/^ELASTICSEARCH_HOST=.*/ELASTICSEARCH_HOST=localhost/" /var/www/script/.env msg_ok "Configured Database" From 66312f05d2e2a50a0c8a385b576c61726a78f4d6 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Mon, 26 Jan 2026 14:56:55 +0100 Subject: [PATCH 065/228] features --- tools/pve/update-apps.sh | 236 ++++++++++++++++++++++++++++++++++----- 1 file changed, 209 insertions(+), 27 deletions(-) diff --git a/tools/pve/update-apps.sh b/tools/pve/update-apps.sh index c4aa0d5ff..b57e7e822 100644 --- a/tools/pve/update-apps.sh +++ b/tools/pve/update-apps.sh @@ -6,6 +6,105 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/refs/heads/main/misc/core.func) +# ============================================================================= +# CONFIGURATION VARIABLES +# Set these variables to skip interactive prompts (Whiptail dialogs) +# ============================================================================= +# var_backup: Enable/disable backup before update +# Options: "yes" | "no" | "" (empty = interactive prompt) +var_backup="${var_backup:-}" + +# var_backup_storage: Storage location for backups (only used if var_backup=yes) +# Options: Storage name from /etc/pve/storage.cfg (e.g., "local", "nas-backup") +# Leave empty for interactive selection +var_backup_storage="${var_backup_storage:-}" + +# var_container: Which containers to update +# Options: +# - "all" : All containers with community-scripts tags +# - "all_running" : Only running containers with community-scripts tags +# - "all_stopped" : Only stopped containers with community-scripts tags +# - "101,102,109" : Comma-separated list of specific container IDs +# - "" : Interactive selection via Whiptail +var_container="${var_container:-}" + +# var_unattended: Run updates without user interaction inside containers +# Options: "yes" | "no" | "" (empty = interactive prompt) +var_unattended="${var_unattended:-}" + +# var_skip_confirm: Skip initial confirmation dialog +# Options: "yes" | "no" (default: no) +var_skip_confirm="${var_skip_confirm:-no}" + +# var_auto_reboot: Automatically reboot containers that require it after update +# Options: "yes" | "no" | "" (empty = interactive prompt) +var_auto_reboot="${var_auto_reboot:-}" + +# ============================================================================= +# JSON CONFIG EXPORT +# Run with --export-config to output current configuration as JSON +# ============================================================================= + +function export_config_json() { + cat <&2 2>&1 1>&3 | tr -d '"') +# Determine container selection based on var_container +if [[ -n "$var_container" ]]; then + case "$var_container" in + all) + # Select all containers with matching tags + CHOICE="" + for ((i=0; i<${#menu_items[@]}; i+=3)); do + CHOICE="$CHOICE ${menu_items[$i]}" + done + CHOICE=$(echo "$CHOICE" | xargs) + ;; + all_running) + # Select only running containers with matching tags + CHOICE="" + for ((i=0; i<${#menu_items[@]}; i+=3)); do + cid="${menu_items[$i]}" + if pct status "$cid" 2>/dev/null | grep -q "running"; then + CHOICE="$CHOICE $cid" + fi + done + CHOICE=$(echo "$CHOICE" | xargs) + ;; + all_stopped) + # Select only stopped containers with matching tags + CHOICE="" + for ((i=0; i<${#menu_items[@]}; i+=3)); do + cid="${menu_items[$i]}" + if pct status "$cid" 2>/dev/null | grep -q "stopped"; then + CHOICE="$CHOICE $cid" + fi + done + CHOICE=$(echo "$CHOICE" | xargs) + ;; + *) + # Assume comma-separated list of container IDs + CHOICE=$(echo "$var_container" | tr ',' ' ') + ;; + esac -if [ -z "$CHOICE" ]; then - whiptail --title "LXC Container Update" \ - --msgbox "No containers selected!" 10 60 - exit 1 + if [[ -z "$CHOICE" ]]; then + msg_error "No containers matched the selection criteria: $var_container" + exit 1 + fi + msg_ok "Selected containers: $CHOICE" +else + CHOICE=$(whiptail --title "LXC Container Update" \ + --checklist "Select LXC containers to update:" 25 60 13 \ + "${menu_items[@]}" 3>&2 2>&1 1>&3 | tr -d '"') + + if [ -z "$CHOICE" ]; then + whiptail --title "LXC Container Update" \ + --msgbox "No containers selected!" 10 60 + exit 1 + fi fi header_info -BACKUP_CHOICE="no" -if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "LXC Container Update" --yesno "Do you want to backup your containers before update?" 10 58); then - BACKUP_CHOICE="yes" + +# Determine backup choice based on var_backup +if [[ -n "$var_backup" ]]; then + BACKUP_CHOICE="$var_backup" +else + BACKUP_CHOICE="no" + if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "LXC Container Update" --yesno "Do you want to backup your containers before update?" 10 58); then + BACKUP_CHOICE="yes" + fi fi -UNATTENDED_UPDATE="no" -if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "LXC Container Update" --yesno "Run updates unattended?" 10 58); then - UNATTENDED_UPDATE="yes" +# Determine unattended update based on var_unattended +if [[ -n "$var_unattended" ]]; then + UNATTENDED_UPDATE="$var_unattended" +else + UNATTENDED_UPDATE="no" + if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "LXC Container Update" --yesno "Run updates unattended?" 10 58); then + UNATTENDED_UPDATE="yes" + fi fi if [ "$BACKUP_CHOICE" == "yes" ]; then - #STORAGES=$(awk '/^(\S+):/ {storage=$2} /content.*backup/ {print storage}' /etc/pve/storage.cfg) get_backup_storages if [ -z "$STORAGES" ]; then - whiptail --msgbox "No storage with 'backup' found!" 8 40 + msg_error "No storage with 'backup' support found!" exit 1 fi - MENU_ITEMS=() - for STORAGE in $STORAGES; do - MENU_ITEMS+=("$STORAGE" "") - done + # Determine storage based on var_backup_storage + if [[ -n "$var_backup_storage" ]]; then + # Validate that the specified storage exists and supports backups + if echo "$STORAGES" | grep -qw "$var_backup_storage"; then + STORAGE_CHOICE="$var_backup_storage" + msg_ok "Using backup storage: $STORAGE_CHOICE" + else + msg_error "Specified backup storage '$var_backup_storage' not found or doesn't support backups!" + msg_info "Available storages: $(echo $STORAGES | tr '\n' ' ')" + exit 1 + fi + else + MENU_ITEMS=() + for STORAGE in $STORAGES; do + MENU_ITEMS+=("$STORAGE" "") + done - STORAGE_CHOICE=$(whiptail --title "Select storage device" --menu "Select a storage device (Only storage devices with 'backup' support are listed):" 15 50 5 "${MENU_ITEMS[@]}" 3>&1 1>&2 2>&3) + STORAGE_CHOICE=$(whiptail --title "Select storage device" --menu "Select a storage device (Only storage devices with 'backup' support are listed):" 15 50 5 "${MENU_ITEMS[@]}" 3>&1 1>&2 2>&3) - if [ -z "$STORAGE_CHOICE" ]; then - msg_error "No storage selected!" - exit 1 + if [ -z "$STORAGE_CHOICE" ]; then + msg_error "No storage selected!" + exit 1 + fi fi fi @@ -271,9 +442,20 @@ if [ "${#containers_needing_reboot[@]}" -gt 0 ]; then for container_name in "${containers_needing_reboot[@]}"; do echo "$container_name" done - echo -ne "${INFO} Do you wish to reboot these containers? " - read -r prompt - if [[ ${prompt,,} =~ ^(yes)$ ]]; then + + # Determine reboot choice based on var_auto_reboot + REBOOT_CHOICE="no" + if [[ -n "$var_auto_reboot" ]]; then + REBOOT_CHOICE="$var_auto_reboot" + else + echo -ne "${INFO} Do you wish to reboot these containers? " + read -r prompt + if [[ ${prompt,,} =~ ^(yes)$ ]]; then + REBOOT_CHOICE="yes" + fi + fi + + if [[ "$REBOOT_CHOICE" == "yes" ]]; then echo -e "${CROSS}${HOLD} ${YWB}Rebooting containers.${CL}" for container_name in "${containers_needing_reboot[@]}"; do container=$(echo $container_name | cut -d " " -f 1) From 039121af88d1d2668932b035f5460067c271917c Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Mon, 26 Jan 2026 15:00:53 +0100 Subject: [PATCH 066/228] increase json --- frontend/public/json/update-apps.json | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/frontend/public/json/update-apps.json b/frontend/public/json/update-apps.json index 0cde58439..c94e9a327 100644 --- a/frontend/public/json/update-apps.json +++ b/frontend/public/json/update-apps.json @@ -51,6 +51,30 @@ { "text": "At the end of the update, containers requiring a reboot will be listed, and you may choose to reboot them directly.", "type": "info" + }, + { + "text": "Use `var_backup=yes|no` to enable/disable backup (skip prompt).", + "type": "info" + }, + { + "text": "Use `var_backup_storage=` to set backup storage location.", + "type": "info" + }, + { + "text": "Use `var_container=all|all_running|all_stopped|101,102,...` to select containers.", + "type": "info" + }, + { + "text": "Use `var_unattended=yes|no` to run updates without interaction.", + "type": "info" + }, + { + "text": "Use `var_skip_confirm=yes` to skip initial confirmation dialog.", + "type": "info" + }, + { + "text": "Use `var_auto_reboot=yes|no` to auto-reboot containers after update.", + "type": "info" } ] } From 870dc3a42f0b14902cbc3a74ff3d19f04278c4a1 Mon Sep 17 00:00:00 2001 From: Michel Roegl-Brunner Date: Mon, 26 Jan 2026 15:14:07 +0100 Subject: [PATCH 067/228] Update install --- install/clawdbot-install.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/install/clawdbot-install.sh b/install/clawdbot-install.sh index 6769cc16a..1ed3ed574 100644 --- a/install/clawdbot-install.sh +++ b/install/clawdbot-install.sh @@ -22,7 +22,12 @@ msg_ok "Installed Dependencies" fetch_and_deploy_gh_release "clawdbot" "clawdbot/clawdbot" pnpm_version=$(grep -oP '"packageManager":\s*"pnpm@\K[^"]+' /opt/clawdbot/package.json 2>/dev/null || echo "latest") -NODE_VERSION="24" NODE_MODULE="pnpm@${pnpm_version},clawdbot@latest" setup_nodejs +NODE_VERSION="24" NODE_MODULE="pnpm@${pnpm_version}" setup_nodejs + +msg_info "Installing Clawdbot" +$STD pnpm add -g clawdbot@latest +msg_ok "Installed Clawdbot" + msg_info "Building Clawdbot UI" $STD pnpm ui:build From 96e1adbf0112201e02261537340bc8df58b8e1b8 Mon Sep 17 00:00:00 2001 From: Michel Roegl-Brunner Date: Mon, 26 Jan 2026 15:24:18 +0100 Subject: [PATCH 068/228] Update install --- install/clawdbot-install.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/install/clawdbot-install.sh b/install/clawdbot-install.sh index 1ed3ed574..38839017b 100644 --- a/install/clawdbot-install.sh +++ b/install/clawdbot-install.sh @@ -24,6 +24,14 @@ fetch_and_deploy_gh_release "clawdbot" "clawdbot/clawdbot" pnpm_version=$(grep -oP '"packageManager":\s*"pnpm@\K[^"]+' /opt/clawdbot/package.json 2>/dev/null || echo "latest") NODE_VERSION="24" NODE_MODULE="pnpm@${pnpm_version}" setup_nodejs +msg_info "Setting up pnpm" +$STD pnpm setup +export PNPM_HOME="$HOME/.local/share/pnpm" +export PATH="$PNPM_HOME:$PATH" +echo 'export PNPM_HOME="$HOME/.local/share/pnpm"' >> ~/.bashrc +echo 'export PATH="$PNPM_HOME:$PATH"' >> ~/.bashrc +msg_ok "Set up pnpm" + msg_info "Installing Clawdbot" $STD pnpm add -g clawdbot@latest msg_ok "Installed Clawdbot" From f91ba71b8a3d0e456fed2e1b851a1fa4fc4a8a35 Mon Sep 17 00:00:00 2001 From: Joerg Heinemann Date: Mon, 26 Jan 2026 15:31:48 +0100 Subject: [PATCH 069/228] Initial commit for ebusd New script for ebusd --- ct/ebusd.sh | 43 +++++++++++++++++++++++++++ ct/headers/ebusd | 6 ++++ frontend/public/json/ebusd.json | 52 +++++++++++++++++++++++++++++++++ install/ebusd-install.sh | 39 +++++++++++++++++++++++++ 4 files changed, 140 insertions(+) create mode 100644 ct/ebusd.sh create mode 100644 ct/headers/ebusd create mode 100644 frontend/public/json/ebusd.json create mode 100644 install/ebusd-install.sh diff --git a/ct/ebusd.sh b/ct/ebusd.sh new file mode 100644 index 000000000..5ce7fd284 --- /dev/null +++ b/ct/ebusd.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/heinemannj/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: Joerg Heinemann (heinemannj) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/john30/ebusd + +APP="ebusd" +var_tags="${var_tags:-automation}" +var_cpu="${var_cpu:-1}" +var_ram="${var_ram:-512}" +var_disk="${var_disk:-2}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + if [[ ! -f /etc/apt/sources.list.d/ebusd.sources ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + msg_info "Updating $APP LXC" + $STD apt update + $STD apt -y upgrade + msg_ok "Updated $APP LXC" + msg_ok "Updated successfully!" + exit +} + +start +build_container +description + +msg_ok "Completed successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" diff --git a/ct/headers/ebusd b/ct/headers/ebusd new file mode 100644 index 000000000..fec79a3e3 --- /dev/null +++ b/ct/headers/ebusd @@ -0,0 +1,6 @@ + __ __ + ___ / /_ _ ___________/ / + / _ \/ __ \/ / / / ___/ __ / +/ __/ /_/ / /_/ (__ ) /_/ / +\___/_.___/\__,_/____/\__,_/ + diff --git a/frontend/public/json/ebusd.json b/frontend/public/json/ebusd.json new file mode 100644 index 000000000..3db2f8542 --- /dev/null +++ b/frontend/public/json/ebusd.json @@ -0,0 +1,52 @@ +{ + "name": "ebusd", + "slug": "ebusd", + "categories": [ + 16 + ], + "date_created": "2026-01-26", + "type": "ct", + "updateable": true, + "privileged": false, + "interface_port": null, + "documentation": "https://github.com/john30/ebusd/wiki", + "website": "https://github.com/john30/ebusd", + "logo": "", + "config_path": "/etc/default/ebusd", + "description": "ebusd is a daemon for handling communication with eBUS devices connected to a 2-wire bus system ("energy bus" used by numerous heating systems).", + "install_methods": [ + { + "type": "default", + "script": "ct/ebusd.sh", + "resources": { + "cpu": 1, + "ram": 512, + "hdd": 2, + "os": "debian", + "version": "13" + } + } + ], + "default_credentials": { + "username": null, + "password": null + }, + "notes": [ + { + "text": "Instructions: \ + 1. Edit `/etc/default/ebusd` if necessary (especially if your device is not `/dev/ttyUSB0`) \ + 2. Start the daemon with `systemctl start ebusd` \ + 3. Check the log file `/var/log/ebusd.log` \ + 4. Make the daemon autostart with `systemctl enable ebusd`", + "type": "info" + }, + { + "text": "Working `/etc/default/ebusd` options for `ebus adapter shield v5`: EBUSD_OPTS='--pidfile=/run/ebusd.pid --latency=100 --scanconfig --configpath=https://ebus.github.io/ --accesslevel=* --pollinterval=30 --device=ens:x.x.x.x:9999 --mqtthost=x.x.x.x --mqttport=1883 --mqttuser=ha-mqtt --mqttpass=xxxxxxxx! --mqttint=/etc/ebusd/mqtt-hassio.cfg --mqttjson --mqttlog --mqttretain --mqtttopic=ebusd --log=all:notice --log=main:notice --log=bus:notice --log=update:notice --log=network:notice --log=other:notice'", + "type": "info" + }, + { + "text": "Only tested with `ebusd-25.1_amd64-bookworm.deb`!", + "type": "warning" + } + ] +} diff --git a/install/ebusd-install.sh b/install/ebusd-install.sh new file mode 100644 index 000000000..32c2eb9a2 --- /dev/null +++ b/install/ebusd-install.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: Joerg Heinemann (heinemannj) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/john30/ebusd + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Setting up ebusd Repository" +setup_deb822_repo \ + "ebusd" \ + "https://raw.githubusercontent.com/john30/ebusd-debian/master/ebusd.gpg" \ + "https://repo.ebusd.eu/apt/default/bookworm/" \ + "bookworm" \ + "main" +$STD apt update +msg_ok "ebusd Repository setup sucessfully" + +msg_info "Installing ebusd" +$STD apt install -y ebusd +msg_ok "Installed ebusd" + +msg_info "Follow below instructions to make the daemon autostart:" +msg_info "1. Edit '/etc/default/ebusd' if necessary (especially if your device is not '/dev/ttyUSB0')" +msg_info "2. Start the daemon with 'systemctl start ebusd'" +msg_info "3. Check the daemon status with 'systemctl status ebusd'" +msg_info "4. Check the log file '/var/log/ebusd.log'" +msg_info "5. Make the daemon autostart with 'systemctl enable ebusd'" + +motd_ssh +customize +cleanup_lxc From 7f01db5f823f03dbcc51f21b99b70a2485f35f76 Mon Sep 17 00:00:00 2001 From: Michel Roegl-Brunner Date: Mon, 26 Jan 2026 15:33:30 +0100 Subject: [PATCH 070/228] Update install --- install/clawdbot-install.sh | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/install/clawdbot-install.sh b/install/clawdbot-install.sh index 38839017b..b9ab2ec4f 100644 --- a/install/clawdbot-install.sh +++ b/install/clawdbot-install.sh @@ -19,27 +19,14 @@ $STD apt-get install -y \ git msg_ok "Installed Dependencies" -fetch_and_deploy_gh_release "clawdbot" "clawdbot/clawdbot" -pnpm_version=$(grep -oP '"packageManager":\s*"pnpm@\K[^"]+' /opt/clawdbot/package.json 2>/dev/null || echo "latest") -NODE_VERSION="24" NODE_MODULE="pnpm@${pnpm_version}" setup_nodejs - -msg_info "Setting up pnpm" -$STD pnpm setup -export PNPM_HOME="$HOME/.local/share/pnpm" -export PATH="$PNPM_HOME:$PATH" -echo 'export PNPM_HOME="$HOME/.local/share/pnpm"' >> ~/.bashrc -echo 'export PATH="$PNPM_HOME:$PATH"' >> ~/.bashrc -msg_ok "Set up pnpm" +NODE_VERSION="24" NODE_MODULE="pnpm@latest" setup_nodejs msg_info "Installing Clawdbot" -$STD pnpm add -g clawdbot@latest +curl -fsSL https://clawd.bot/install.sh | bash msg_ok "Installed Clawdbot" -msg_info "Building Clawdbot UI" -$STD pnpm ui:build -msg_ok "Built Clawdbot UI" motd_ssh customize From d62da49b8afa502ae0f754dbf9fe5770137a7659 Mon Sep 17 00:00:00 2001 From: Joerg Heinemann Date: Mon, 26 Jan 2026 15:54:25 +0100 Subject: [PATCH 071/228] Update ct/ebusd.sh Co-authored-by: greptile-apps[bot] <165735046+greptile-apps[bot]@users.noreply.github.com> --- ct/ebusd.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ct/ebusd.sh b/ct/ebusd.sh index 5ce7fd284..732bc8179 100644 --- a/ct/ebusd.sh +++ b/ct/ebusd.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -source <(curl -fsSL https://raw.githubusercontent.com/heinemannj/ProxmoxVE/main/misc/build.func) +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) # Copyright (c) 2021-2026 community-scripts ORG # Author: Joerg Heinemann (heinemannj) # License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE From 4888dbca6c47b25d13ddcdc6c3bd9b8c0d9d850a Mon Sep 17 00:00:00 2001 From: Joerg Heinemann Date: Mon, 26 Jan 2026 15:55:17 +0100 Subject: [PATCH 072/228] Update ct/ebusd.sh Co-authored-by: greptile-apps[bot] <165735046+greptile-apps[bot]@users.noreply.github.com> --- ct/ebusd.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ct/ebusd.sh b/ct/ebusd.sh index 732bc8179..2791cb696 100644 --- a/ct/ebusd.sh +++ b/ct/ebusd.sh @@ -2,7 +2,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) # Copyright (c) 2021-2026 community-scripts ORG # Author: Joerg Heinemann (heinemannj) -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE # Source: https://github.com/john30/ebusd APP="ebusd" From b945cf7543467733676c6a40c21cfd1f7415b9f1 Mon Sep 17 00:00:00 2001 From: Joerg Heinemann Date: Mon, 26 Jan 2026 15:57:30 +0100 Subject: [PATCH 073/228] Update install/ebusd-install.sh Co-authored-by: greptile-apps[bot] <165735046+greptile-apps[bot]@users.noreply.github.com> --- install/ebusd-install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install/ebusd-install.sh b/install/ebusd-install.sh index 32c2eb9a2..1abc0b12b 100644 --- a/install/ebusd-install.sh +++ b/install/ebusd-install.sh @@ -2,7 +2,7 @@ # Copyright (c) 2021-2026 community-scripts ORG # Author: Joerg Heinemann (heinemannj) -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE # Source: https://github.com/john30/ebusd source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" From 482681222aceb4541d464444492ae6f9bdbe2ad1 Mon Sep 17 00:00:00 2001 From: Joerg Heinemann Date: Mon, 26 Jan 2026 15:58:22 +0100 Subject: [PATCH 074/228] Update install/ebusd-install.sh Co-authored-by: greptile-apps[bot] <165735046+greptile-apps[bot]@users.noreply.github.com> --- install/ebusd-install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install/ebusd-install.sh b/install/ebusd-install.sh index 1abc0b12b..17c21b96a 100644 --- a/install/ebusd-install.sh +++ b/install/ebusd-install.sh @@ -21,7 +21,7 @@ setup_deb822_repo \ "bookworm" \ "main" $STD apt update -msg_ok "ebusd Repository setup sucessfully" +msg_ok "ebusd Repository setup successfully" msg_info "Installing ebusd" $STD apt install -y ebusd From 8a06452794e3d0bafe37566c2a4fc217f2c50fc9 Mon Sep 17 00:00:00 2001 From: Joerg Heinemann Date: Mon, 26 Jan 2026 15:58:59 +0100 Subject: [PATCH 075/228] Update frontend/public/json/ebusd.json Co-authored-by: greptile-apps[bot] <165735046+greptile-apps[bot]@users.noreply.github.com> --- frontend/public/json/ebusd.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/public/json/ebusd.json b/frontend/public/json/ebusd.json index 3db2f8542..0be1da11b 100644 --- a/frontend/public/json/ebusd.json +++ b/frontend/public/json/ebusd.json @@ -28,7 +28,7 @@ } ], "default_credentials": { - "username": null, + "username": "root", "password": null }, "notes": [ From f8124fc6a4e91bdae167051cf00053aa4441e311 Mon Sep 17 00:00:00 2001 From: vhsdream Date: Mon, 26 Jan 2026 10:03:31 -0500 Subject: [PATCH 076/228] Shelfmark: source virtualenv during update --- ct/shelfmark.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/ct/shelfmark.sh b/ct/shelfmark.sh index 3c1d1dfab..2aad4a5d0 100644 --- a/ct/shelfmark.sh +++ b/ct/shelfmark.sh @@ -49,6 +49,7 @@ function update_script() { mv /opt/shelfmark/src/frontend/dist /opt/shelfmark/frontend-dist cd /opt/shelfmark $STD uv venv -c ./venv + $STD source ./venv/bin/activate $STD uv pip install -r requirements-base.txt mv /opt/start.sh.bak /opt/start.sh msg_ok "Updated Shelfmark" From 78ed40e5bd0c3634edbf51d482bd7e6b0f3fb948 Mon Sep 17 00:00:00 2001 From: vhsdream Date: Mon, 26 Jan 2026 10:06:07 -0500 Subject: [PATCH 077/228] Shelfmark: symlink unrar-free to unrar --- install/shelfmark-install.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/install/shelfmark-install.sh b/install/shelfmark-install.sh index 729cf7d7a..8a780d891 100644 --- a/install/shelfmark-install.sh +++ b/install/shelfmark-install.sh @@ -16,6 +16,7 @@ update_os msg_info "Installing Dependencies" $STD apt install -y \ unrar-free +ln -sf /usr/bin/unrar-free /usr/bin/unrar msg_ok "Installed Dependencies" NODE_VERSION="22" setup_nodejs From 02d6b368ef9b658bba3481ffb7deee8afd7734ed Mon Sep 17 00:00:00 2001 From: vhsdream Date: Mon, 26 Jan 2026 10:23:20 -0500 Subject: [PATCH 078/228] Shelfmark: prompt to install FlareSolverr --- install/shelfmark-install.sh | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/install/shelfmark-install.sh b/install/shelfmark-install.sh index 8a780d891..04ffb8dd0 100644 --- a/install/shelfmark-install.sh +++ b/install/shelfmark-install.sh @@ -25,6 +25,42 @@ PYTHON_VERSION="3.12" setup_uv fetch_and_deploy_gh_release "shelfmark" "calibrain/shelfmark" "tarball" "latest" "/opt/shelfmark" RELEASE_VERSION=$(cat "$HOME/.shelfmark") +read -r -p "${TAB3}Install FlareSolverr? Choose N|n if you have an external instance y/N " fs +if [[ ${fs,,} =~ ^(y|Y|yes)$ ]]; then + fetch_and_deploy_gh_release "flaresolverr" "FlareSolverr/FlareSolverr" "prebuild" "latest" "/opt/flaresolverr" "flaresolverr_linux_x64.tar.gz" + msg_info "Installing FlareSolverr" + $STD apt install -y xvfb + setup_deb822_repo \ + "google-chrome" \ + "https://dl.google.com/linux/linux_signing_key.pub" \ + "https://dl.google.com/linux/chrome/deb/" \ + "stable" + $STD apt update + $STD apt install -y google-chrome-stable + # remove google-chrome.list added by google-chrome-stable + rm /etc/apt/sources.list.d/google-chrome.list + + cat </etc/systemd/system/flaresolverr.service +[Unit] +Description=FlareSolverr +After=network.target +[Service] +SyslogIdentifier=flaresolverr +Restart=always +RestartSec=5 +Type=simple +Environment="LOG_LEVEL=info" +Environment="CAPTCHA_SOLVER=none" +WorkingDirectory=/opt/flaresolverr +ExecStart=/opt/flaresolverr/flaresolverr +TimeoutStopSec=30 +[Install] +WantedBy=multi-user.target +EOF + systemctl enable -q --now flaresolverr + msg_ok "Installed FlareSolverr" +fi + msg_info "Building Shelfmark frontend" cd /opt/shelfmark/src/frontend $STD npm ci From 05741195c2e46e6805a6b8cc5182efb4730a55e1 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Mon, 26 Jan 2026 18:14:22 +0100 Subject: [PATCH 079/228] Update clawdbot-install.sh --- install/clawdbot-install.sh | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/install/clawdbot-install.sh b/install/clawdbot-install.sh index b9ab2ec4f..63556fb45 100644 --- a/install/clawdbot-install.sh +++ b/install/clawdbot-install.sh @@ -14,7 +14,7 @@ network_check update_os msg_info "Installing Dependencies" -$STD apt-get install -y \ +$STD apt install -y \ build-essential \ git msg_ok "Installed Dependencies" @@ -22,10 +22,7 @@ msg_ok "Installed Dependencies" NODE_VERSION="24" NODE_MODULE="pnpm@latest" setup_nodejs -msg_info "Installing Clawdbot" curl -fsSL https://clawd.bot/install.sh | bash -msg_ok "Installed Clawdbot" - motd_ssh From e05e9e1a2407516409a57c146a9fce6feebee9f6 Mon Sep 17 00:00:00 2001 From: vhsdream Date: Mon, 26 Jan 2026 12:44:54 -0500 Subject: [PATCH 080/228] Shelfmark: CF Bypass options during install --- ct/shelfmark.sh | 5 +- install/shelfmark-install.sh | 114 +++++++++++++++++++++++++++-------- 2 files changed, 93 insertions(+), 26 deletions(-) diff --git a/ct/shelfmark.sh b/ct/shelfmark.sh index 2aad4a5d0..487860af1 100644 --- a/ct/shelfmark.sh +++ b/ct/shelfmark.sh @@ -50,7 +50,10 @@ function update_script() { cd /opt/shelfmark $STD uv venv -c ./venv $STD source ./venv/bin/activate - $STD uv pip install -r requirements-base.txt + $STD uv pip install -r ./requirements-base.txt + if [[ $(sed -n '/_BYPASS=/s/[^=]*=//p' /etc/shelfmark/.env) == "true" ]] && [[ $(sed -n '/BYPASSER=/s/[^=]*=//p' /etc/shelfmark/.env) == "false" ]]; then + $STD uv pip install -r ./requirements-shelfmark.txt + fi mv /opt/start.sh.bak /opt/start.sh msg_ok "Updated Shelfmark" diff --git a/install/shelfmark-install.sh b/install/shelfmark-install.sh index 04ffb8dd0..a50539f3c 100644 --- a/install/shelfmark-install.sh +++ b/install/shelfmark-install.sh @@ -19,16 +19,72 @@ $STD apt install -y \ ln -sf /usr/bin/unrar-free /usr/bin/unrar msg_ok "Installed Dependencies" -NODE_VERSION="22" setup_nodejs -PYTHON_VERSION="3.12" setup_uv +mkdir -p /etc/shelfmark +cat </etc/shelfmark/.env +DOCKERMODE=false +CONFIG_DIR=/etc/shelfmark +TMP_DIR=/tmp/shelfmark +ENABLE_LOGGING=true +FLASK_HOST=0.0.0.0 +FLASK_PORT=8084 +# SESSION_COOKIES_SECURE=true +# CWA_DB_PATH= +USE_CF_BYPASS=true +USING_EXTERNAL_BYPASSER=false +# EXT_BYPASSER_URL= +# EXT_BYPASSER_PATH=/v1 +EOF -fetch_and_deploy_gh_release "shelfmark" "calibrain/shelfmark" "tarball" "latest" "/opt/shelfmark" -RELEASE_VERSION=$(cat "$HOME/.shelfmark") +echo "" +echo "" +echo -e "${BL}Shelfmark Deployment Type${CL}" +echo "─────────────────────────────────────────" +echo "Please choose your deployment type:" +echo "" +echo " 1) Use Shelfmark's internal captcha bypasser (default)" +echo " 2) Install FlareSolverr in this LXC" +echo " 3) Use an existing Flaresolverr LXC" +echo " 4) Disable captcha bypassing altogether (not recommended)" +echo "" -read -r -p "${TAB3}Install FlareSolverr? Choose N|n if you have an external instance y/N " fs -if [[ ${fs,,} =~ ^(y|Y|yes)$ ]]; then +read -r -p "${TAB3}Select deployment type [1]: " DEPLOYMENT_TYPE +DEPLOYMENT_TYPE="${DEPLOYMENT_TYPE:-1}" + +case "$DEPLOYMENT_TYPE" in +1) + msg_ok "Using Shelfmark's internal captcha bypasser" + ;; +2) + msg_ok "Proceeding with FlareSolverr installation" + ;; +3) + echo "" + echo -e "${BL}Use existing FlareSolverr LXC${CL}" + echo "─────────────────────────────────────────" + echo "Enter the URL/IP address with port of your Flaresolverr instance" + echo "Example: http://flaresoverr.homelab.lan:8191 or" + echo "http://192.168.10.99:8191" + echo "" + read -r -p "FlareSolverr URL: " FLARESOLVERR_URL + + if [[ -z "$FLARESOLVERR_URL" ]]; then + msg_warn "No Flaresolverr URL provided. Falling back to Shelfmark's internal bypasser." + else + FLARESOLVERR_URL="${FLARESOLVERR_URL%/}" + msg_ok "FlareSolverr URL: ${FLARESOLVERR_URL}" + fi + ;; +4) + msg_warn "Disabling captcha bypass. This may cause the majority of searches and downloads to fail." + ;; +*) + msg_warn "Invalid selection. Reverting to default (internal bypasser)!" + ;; +esac + +if [[ "$DEPLOYMENT_TYPE" == "2" ]]; then fetch_and_deploy_gh_release "flaresolverr" "FlareSolverr/FlareSolverr" "prebuild" "latest" "/opt/flaresolverr" "flaresolverr_linux_x64.tar.gz" - msg_info "Installing FlareSolverr" + msg_info "Installing FlareSolverr (please wait)" $STD apt install -y xvfb setup_deb822_repo \ "google-chrome" \ @@ -39,7 +95,7 @@ if [[ ${fs,,} =~ ^(y|Y|yes)$ ]]; then $STD apt install -y google-chrome-stable # remove google-chrome.list added by google-chrome-stable rm /etc/apt/sources.list.d/google-chrome.list - + sed -i '/BYPASSER=/s/false/true' /etc/shelfmark/.env cat </etc/systemd/system/flaresolverr.service [Unit] Description=FlareSolverr @@ -59,8 +115,29 @@ WantedBy=multi-user.target EOF systemctl enable -q --now flaresolverr msg_ok "Installed FlareSolverr" +elif [[ "$DEPLOYMENT_TYPE" == "3" ]]; then + sed -i -e '/BYPASSER=/s/false/true/' \ + -e '/^# EXT_/EXT_/' \ + -e "s\|_URL=.*|${FLARESOLVERR_URL}|" /etc/shelfmark/.env +elif [[ "$DEPLOYMENT_TYPE" == "4" ]]; then + sed -i '/_BYPASS=/s/true/false' /etc/shelfmark/.env +else + msg_info "Installing internal bypasser dependencies" + $STD apt install -y \ + xfvb \ + ffmpeg \ + chromium=143.0.7499.169-1~deb13u1 \ + chromium-driver=143.0.7499.169-1~deb13u1 \ + python3-tk + msg_ok "Installed internal bypasser dependencies" fi +NODE_VERSION="22" setup_nodejs +PYTHON_VERSION="3.12" setup_uv + +fetch_and_deploy_gh_release "shelfmark" "calibrain/shelfmark" "tarball" "latest" "/opt/shelfmark" +RELEASE_VERSION=$(cat "$HOME/.shelfmark") + msg_info "Building Shelfmark frontend" cd /opt/shelfmark/src/frontend $STD npm ci @@ -72,23 +149,10 @@ msg_info "Configuring Shelfmark" cd /opt/shelfmark $STD uv venv ./venv $STD source ./venv/bin/activate -$STD uv pip install -r requirements-base.txt -mkdir -p {/var/log/shelfmark,/tmp/shelfmark,/etc/shelfmark} -cat </etc/shelfmark/.env -DOCKERMODE=false -CONFIG_DIR=/etc/shelfmark -TMP_DIR=/tmp/shelfmark -ENABLE_LOGGING=true -FLASK_HOST=0.0.0.0 -FLASK_PORT=8084 -RELEASE_VERSION=$RELEASE_VERSION -# SESSION_COOKIES_SECURE=true -# CWA_DB_PATH= -# USE_CF_BYPASS=true -# USING_EXTERNAL_BYPASSER=true -# EXT_BYPASSER_URL= -# EXT_BYPASSER_PATH= -EOF +$STD uv pip install -r ./requirements-base.txt +[[ "$DEPLOYMENT_TYPE" == "1" ]] && $STD uv pip install -r ./requirements-shelfmark.txt +mkdir -p {/var/log/shelfmark,/tmp/shelfmark} +echo "$RELEASE_VERSION" >>/etc/shelfmark/.env msg_ok "Configured Shelfmark" msg_info "Creating Service and start script" From f918c81c922999a18b1e4c020874df80983694f6 Mon Sep 17 00:00:00 2001 From: vhsdream Date: Mon, 26 Jan 2026 13:06:38 -0500 Subject: [PATCH 081/228] fix start script restore --- ct/shelfmark.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ct/shelfmark.sh b/ct/shelfmark.sh index 487860af1..f7ba86a0c 100644 --- a/ct/shelfmark.sh +++ b/ct/shelfmark.sh @@ -54,7 +54,7 @@ function update_script() { if [[ $(sed -n '/_BYPASS=/s/[^=]*=//p' /etc/shelfmark/.env) == "true" ]] && [[ $(sed -n '/BYPASSER=/s/[^=]*=//p' /etc/shelfmark/.env) == "false" ]]; then $STD uv pip install -r ./requirements-shelfmark.txt fi - mv /opt/start.sh.bak /opt/start.sh + mv /opt/start.sh.bak /opt/shelfmark/start.sh msg_ok "Updated Shelfmark" msg_info "Starting Service" From 57a3d711731568a46c77e03aeadb4d1ec1e1fe54 Mon Sep 17 00:00:00 2001 From: vhsdream Date: Mon, 26 Jan 2026 13:32:52 -0500 Subject: [PATCH 082/228] fix chromium deps installation --- install/shelfmark-install.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/install/shelfmark-install.sh b/install/shelfmark-install.sh index a50539f3c..f3824ec0c 100644 --- a/install/shelfmark-install.sh +++ b/install/shelfmark-install.sh @@ -123,9 +123,10 @@ elif [[ "$DEPLOYMENT_TYPE" == "4" ]]; then sed -i '/_BYPASS=/s/true/false' /etc/shelfmark/.env else msg_info "Installing internal bypasser dependencies" - $STD apt install -y \ + $STD apt install -y --no-install-recommends \ xfvb \ ffmpeg \ + chromium-common=143.0.7499.169-1~deb13u1 \ chromium=143.0.7499.169-1~deb13u1 \ chromium-driver=143.0.7499.169-1~deb13u1 \ python3-tk From 654ffc5a2c6918014790ffa9d2171d08b73cddec Mon Sep 17 00:00:00 2001 From: vhsdream Date: Mon, 26 Jan 2026 13:46:05 -0500 Subject: [PATCH 083/228] fix typo --- install/shelfmark-install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install/shelfmark-install.sh b/install/shelfmark-install.sh index f3824ec0c..775a4db70 100644 --- a/install/shelfmark-install.sh +++ b/install/shelfmark-install.sh @@ -124,7 +124,7 @@ elif [[ "$DEPLOYMENT_TYPE" == "4" ]]; then else msg_info "Installing internal bypasser dependencies" $STD apt install -y --no-install-recommends \ - xfvb \ + xvfb \ ffmpeg \ chromium-common=143.0.7499.169-1~deb13u1 \ chromium=143.0.7499.169-1~deb13u1 \ From 9c03a5dcd636e579f771c9ebc39468694380df34 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Mon, 26 Jan 2026 21:24:54 +0100 Subject: [PATCH 084/228] Update install/ebusd-install.sh Co-authored-by: Tobias <96661824+CrazyWolf13@users.noreply.github.com> --- install/ebusd-install.sh | 3 --- 1 file changed, 3 deletions(-) diff --git a/install/ebusd-install.sh b/install/ebusd-install.sh index 17c21b96a..c43be4708 100644 --- a/install/ebusd-install.sh +++ b/install/ebusd-install.sh @@ -13,15 +13,12 @@ setting_up_container network_check update_os -msg_info "Setting up ebusd Repository" setup_deb822_repo \ "ebusd" \ "https://raw.githubusercontent.com/john30/ebusd-debian/master/ebusd.gpg" \ "https://repo.ebusd.eu/apt/default/bookworm/" \ "bookworm" \ "main" -$STD apt update -msg_ok "ebusd Repository setup successfully" msg_info "Installing ebusd" $STD apt install -y ebusd From 979057a88835cf7b1563431517ae42ecff185293 Mon Sep 17 00:00:00 2001 From: vhsdream Date: Mon, 26 Jan 2026 15:37:31 -0500 Subject: [PATCH 085/228] Shelfmark: add chromium service file --- ct/shelfmark.sh | 4 +++- install/shelfmark-install.sh | 22 ++++++++++++++++++++-- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/ct/shelfmark.sh b/ct/shelfmark.sh index f7ba86a0c..be53f2d9a 100644 --- a/ct/shelfmark.sh +++ b/ct/shelfmark.sh @@ -35,6 +35,7 @@ function update_script() { if check_for_gh_release "shelfmark" "calibrain/shelfmark"; then msg_info "Stopping Service" systemctl stop shelfmark + [[ -f /etc/systemd/system/chromium.service ]] && systemctl stop chromium msg_ok "Stopped Service" cp /opt/shelfmark/start.sh /opt/start.sh.bak @@ -51,7 +52,7 @@ function update_script() { $STD uv venv -c ./venv $STD source ./venv/bin/activate $STD uv pip install -r ./requirements-base.txt - if [[ $(sed -n '/_BYPASS=/s/[^=]*=//p' /etc/shelfmark/.env) == "true" ]] && [[ $(sed -n '/BYPASSER=/s/[^=]*=//p' /etc/shelfmark/.env) == "false" ]]; then + if [[ $(sed -n '/_BYPASS=/s/[^=]*=//p' /etc/shelfmark/.env) == "true" ]] && [[ $(sed -n '/BYPASSER=/s/[^=]*=//p' /etc/shelfmark/.env == "false") ]]; then $STD uv pip install -r ./requirements-shelfmark.txt fi mv /opt/start.sh.bak /opt/shelfmark/start.sh @@ -59,6 +60,7 @@ function update_script() { msg_info "Starting Service" systemctl start shelfmark + [[ -f /etc/systemd/system/chromium.service ]] && systemctl start chromium msg_ok "Started Service" msg_ok "Updated successfully!" fi diff --git a/install/shelfmark-install.sh b/install/shelfmark-install.sh index 775a4db70..c2f3727bb 100644 --- a/install/shelfmark-install.sh +++ b/install/shelfmark-install.sh @@ -122,6 +122,7 @@ elif [[ "$DEPLOYMENT_TYPE" == "3" ]]; then elif [[ "$DEPLOYMENT_TYPE" == "4" ]]; then sed -i '/_BYPASS=/s/true/false' /etc/shelfmark/.env else + DEPLOYMENT_TYPE="1" msg_info "Installing internal bypasser dependencies" $STD apt install -y --no-install-recommends \ xvfb \ @@ -141,6 +142,7 @@ RELEASE_VERSION=$(cat "$HOME/.shelfmark") msg_info "Building Shelfmark frontend" cd /opt/shelfmark/src/frontend +echo "$RELEASE_VERSION" >>/etc/shelfmark/.env $STD npm ci $STD npm run build mv /opt/shelfmark/src/frontend/dist /opt/shelfmark/frontend-dist @@ -153,10 +155,9 @@ $STD source ./venv/bin/activate $STD uv pip install -r ./requirements-base.txt [[ "$DEPLOYMENT_TYPE" == "1" ]] && $STD uv pip install -r ./requirements-shelfmark.txt mkdir -p {/var/log/shelfmark,/tmp/shelfmark} -echo "$RELEASE_VERSION" >>/etc/shelfmark/.env msg_ok "Configured Shelfmark" -msg_info "Creating Service and start script" +msg_info "Creating Services and start script" cat </etc/systemd/system/shelfmark.service [Unit] Description=Shelfmark server @@ -174,6 +175,23 @@ RestartSec=10 WantedBy=multi-user.target EOF +if [[ "$DEPLOYMENT_TYPE" == "1" ]]; then + cat </etc/systemd/system/chromium.service +[Unit] +Description=karakeep Headless Browser +After=network.target + +[Service] +User=root +ExecStart=/usr/bin/chromium --headless --no-sandbox --disable-gpu --disable-dev-shm-usage --remote-debugging-address=127.0.0.1 --remote-debugging-port=9222 --hide-scrollbars +Restart=always + +[Install] +WantedBy=multi-user.target +EOF + systemctl enable -q --now chromium +fi + cat </opt/shelfmark/start.sh #!/usr/bin/env bash From d97313ad7884b772d345f258d6ec7046075cd71a Mon Sep 17 00:00:00 2001 From: vhsdream Date: Mon, 26 Jan 2026 16:21:20 -0500 Subject: [PATCH 086/228] Shelfmark: move flaresolverr service file creation --- install/shelfmark-install.sh | 40 +++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/install/shelfmark-install.sh b/install/shelfmark-install.sh index c2f3727bb..d5db9dc37 100644 --- a/install/shelfmark-install.sh +++ b/install/shelfmark-install.sh @@ -95,25 +95,7 @@ if [[ "$DEPLOYMENT_TYPE" == "2" ]]; then $STD apt install -y google-chrome-stable # remove google-chrome.list added by google-chrome-stable rm /etc/apt/sources.list.d/google-chrome.list - sed -i '/BYPASSER=/s/false/true' /etc/shelfmark/.env - cat </etc/systemd/system/flaresolverr.service -[Unit] -Description=FlareSolverr -After=network.target -[Service] -SyslogIdentifier=flaresolverr -Restart=always -RestartSec=5 -Type=simple -Environment="LOG_LEVEL=info" -Environment="CAPTCHA_SOLVER=none" -WorkingDirectory=/opt/flaresolverr -ExecStart=/opt/flaresolverr/flaresolverr -TimeoutStopSec=30 -[Install] -WantedBy=multi-user.target -EOF - systemctl enable -q --now flaresolverr + sed -i '/BYPASSER=/s/false/true/' /etc/shelfmark/.env msg_ok "Installed FlareSolverr" elif [[ "$DEPLOYMENT_TYPE" == "3" ]]; then sed -i -e '/BYPASSER=/s/false/true/' \ @@ -191,6 +173,26 @@ WantedBy=multi-user.target EOF systemctl enable -q --now chromium fi +if [[ "$DEPLOYMENT_TYPE" == "2" ]]; then + cat </etc/systemd/system/flaresolverr.service +[Unit] +Description=FlareSolverr +After=network.target +[Service] +SyslogIdentifier=flaresolverr +Restart=always +RestartSec=5 +Type=simple +Environment="LOG_LEVEL=info" +Environment="CAPTCHA_SOLVER=none" +WorkingDirectory=/opt/flaresolverr +ExecStart=/opt/flaresolverr/flaresolverr +TimeoutStopSec=30 +[Install] +WantedBy=multi-user.target +EOF + systemctl enable -q --now flaresolverr +fi cat </opt/shelfmark/start.sh #!/usr/bin/env bash From e68e3894c39c8b61e4cc095b4a06e592cf57738a Mon Sep 17 00:00:00 2001 From: vhsdream Date: Mon, 26 Jan 2026 17:34:20 -0500 Subject: [PATCH 087/228] fix sed --- install/shelfmark-install.sh | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/install/shelfmark-install.sh b/install/shelfmark-install.sh index d5db9dc37..bf4766d6f 100644 --- a/install/shelfmark-install.sh +++ b/install/shelfmark-install.sh @@ -43,7 +43,7 @@ echo "Please choose your deployment type:" echo "" echo " 1) Use Shelfmark's internal captcha bypasser (default)" echo " 2) Install FlareSolverr in this LXC" -echo " 3) Use an existing Flaresolverr LXC" +echo " 3) Use an existing Flaresolverr/Byparr LXC" echo " 4) Disable captcha bypassing altogether (not recommended)" echo "" @@ -95,14 +95,16 @@ if [[ "$DEPLOYMENT_TYPE" == "2" ]]; then $STD apt install -y google-chrome-stable # remove google-chrome.list added by google-chrome-stable rm /etc/apt/sources.list.d/google-chrome.list - sed -i '/BYPASSER=/s/false/true/' /etc/shelfmark/.env + sed -i -e '/BYPASSER=/s/false/true/' \ + -e 's/^# EXT_/EXT_/' \ + -e "s|_URL=.*|http://localhost:8191|" /etc/shelfmark/.env msg_ok "Installed FlareSolverr" elif [[ "$DEPLOYMENT_TYPE" == "3" ]]; then sed -i -e '/BYPASSER=/s/false/true/' \ - -e '/^# EXT_/EXT_/' \ - -e "s\|_URL=.*|${FLARESOLVERR_URL}|" /etc/shelfmark/.env + -e 's/^# EXT_/EXT_/' \ + -e "s|_URL=.*|${FLARESOLVERR_URL}|" /etc/shelfmark/.env elif [[ "$DEPLOYMENT_TYPE" == "4" ]]; then - sed -i '/_BYPASS=/s/true/false' /etc/shelfmark/.env + sed -i '/_BYPASS=/s/true/false/' /etc/shelfmark/.env else DEPLOYMENT_TYPE="1" msg_info "Installing internal bypasser dependencies" From f96c53fc6a32a603523f627115f607471698fa64 Mon Sep 17 00:00:00 2001 From: vhsdream Date: Mon, 26 Jan 2026 17:42:22 -0500 Subject: [PATCH 088/228] more sed fixes --- install/shelfmark-install.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/install/shelfmark-install.sh b/install/shelfmark-install.sh index bf4766d6f..beb825556 100644 --- a/install/shelfmark-install.sh +++ b/install/shelfmark-install.sh @@ -97,12 +97,12 @@ if [[ "$DEPLOYMENT_TYPE" == "2" ]]; then rm /etc/apt/sources.list.d/google-chrome.list sed -i -e '/BYPASSER=/s/false/true/' \ -e 's/^# EXT_/EXT_/' \ - -e "s|_URL=.*|http://localhost:8191|" /etc/shelfmark/.env + -e "s|_URL=.*|_URL=http://localhost:8191|" /etc/shelfmark/.env msg_ok "Installed FlareSolverr" elif [[ "$DEPLOYMENT_TYPE" == "3" ]]; then sed -i -e '/BYPASSER=/s/false/true/' \ -e 's/^# EXT_/EXT_/' \ - -e "s|_URL=.*|${FLARESOLVERR_URL}|" /etc/shelfmark/.env + -e "s|_URL=.*|_URL=${FLARESOLVERR_URL}|" /etc/shelfmark/.env elif [[ "$DEPLOYMENT_TYPE" == "4" ]]; then sed -i '/_BYPASS=/s/true/false/' /etc/shelfmark/.env else @@ -126,7 +126,7 @@ RELEASE_VERSION=$(cat "$HOME/.shelfmark") msg_info "Building Shelfmark frontend" cd /opt/shelfmark/src/frontend -echo "$RELEASE_VERSION" >>/etc/shelfmark/.env +echo "RELEASE_VERION=${RELEASE_VERSION}" >>/etc/shelfmark/.env $STD npm ci $STD npm run build mv /opt/shelfmark/src/frontend/dist /opt/shelfmark/frontend-dist From 797c1f4ede8d47a78059105549256529e66705c6 Mon Sep 17 00:00:00 2001 From: vhsdream Date: Mon, 26 Jan 2026 18:22:12 -0500 Subject: [PATCH 089/228] typo --- install/shelfmark-install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install/shelfmark-install.sh b/install/shelfmark-install.sh index beb825556..2ac3a0c16 100644 --- a/install/shelfmark-install.sh +++ b/install/shelfmark-install.sh @@ -126,7 +126,7 @@ RELEASE_VERSION=$(cat "$HOME/.shelfmark") msg_info "Building Shelfmark frontend" cd /opt/shelfmark/src/frontend -echo "RELEASE_VERION=${RELEASE_VERSION}" >>/etc/shelfmark/.env +echo "RELEASE_VERSION=${RELEASE_VERSION}" >>/etc/shelfmark/.env $STD npm ci $STD npm run build mv /opt/shelfmark/src/frontend/dist /opt/shelfmark/frontend-dist From 9dac0b03a318bf32746f88d272750db5bd9594df Mon Sep 17 00:00:00 2001 From: vhsdream Date: Mon, 26 Jan 2026 18:38:14 -0500 Subject: [PATCH 090/228] Shelfmark: use `KillMode=mixed` to ensure speedy shutdown/restart --- install/shelfmark-install.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/install/shelfmark-install.sh b/install/shelfmark-install.sh index 2ac3a0c16..e327caf44 100644 --- a/install/shelfmark-install.sh +++ b/install/shelfmark-install.sh @@ -154,6 +154,7 @@ EnvironmentFile=/etc/shelfmark/.env ExecStart=/usr/bin/bash /opt/shelfmark/start.sh Restart=always RestartSec=10 +KillMode=mixed [Install] WantedBy=multi-user.target From 7f68c5818195f071177220f8f6b6016df9b15c82 Mon Sep 17 00:00:00 2001 From: vhsdream Date: Mon, 26 Jan 2026 19:22:04 -0500 Subject: [PATCH 091/228] Opencloud: bump version to 5.0.0 --- ct/opencloud.sh | 2 +- install/opencloud-install.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ct/opencloud.sh b/ct/opencloud.sh index 6c047fa6d..2f1925421 100644 --- a/ct/opencloud.sh +++ b/ct/opencloud.sh @@ -29,7 +29,7 @@ function update_script() { exit fi - RELEASE="v4.1.0" + RELEASE="v5.0.0" if check_for_gh_release "opencloud" "opencloud-eu/opencloud" "${RELEASE}"; then msg_info "Stopping services" systemctl stop opencloud opencloud-wopi diff --git a/install/opencloud-install.sh b/install/opencloud-install.sh index 9159b42c5..39b579429 100644 --- a/install/opencloud-install.sh +++ b/install/opencloud-install.sh @@ -57,7 +57,7 @@ echo "$COOLPASS" >~/.coolpass msg_ok "Installed Collabora Online" # OpenCloud -fetch_and_deploy_gh_release "opencloud" "opencloud-eu/opencloud" "singlefile" "v4.1.0" "/usr/bin" "opencloud-*-linux-amd64" +fetch_and_deploy_gh_release "opencloud" "opencloud-eu/opencloud" "singlefile" "v5.0.0" "/usr/bin" "opencloud-*-linux-amd64" msg_info "Configuring OpenCloud" DATA_DIR="/var/lib/opencloud/" From 2371ae3849efa2f9a2d0974424fa3c2de76d9718 Mon Sep 17 00:00:00 2001 From: Joerg Heinemann Date: Tue, 27 Jan 2026 06:42:40 +0100 Subject: [PATCH 092/228] Update frontend/public/json/ebusd.json Co-authored-by: Tobias <96661824+CrazyWolf13@users.noreply.github.com> --- frontend/public/json/ebusd.json | 4 ---- 1 file changed, 4 deletions(-) diff --git a/frontend/public/json/ebusd.json b/frontend/public/json/ebusd.json index 0be1da11b..96d94b7a8 100644 --- a/frontend/public/json/ebusd.json +++ b/frontend/public/json/ebusd.json @@ -44,9 +44,5 @@ "text": "Working `/etc/default/ebusd` options for `ebus adapter shield v5`: EBUSD_OPTS='--pidfile=/run/ebusd.pid --latency=100 --scanconfig --configpath=https://ebus.github.io/ --accesslevel=* --pollinterval=30 --device=ens:x.x.x.x:9999 --mqtthost=x.x.x.x --mqttport=1883 --mqttuser=ha-mqtt --mqttpass=xxxxxxxx! --mqttint=/etc/ebusd/mqtt-hassio.cfg --mqttjson --mqttlog --mqttretain --mqtttopic=ebusd --log=all:notice --log=main:notice --log=bus:notice --log=update:notice --log=network:notice --log=other:notice'", "type": "info" }, - { - "text": "Only tested with `ebusd-25.1_amd64-bookworm.deb`!", - "type": "warning" - } ] } From dfeb8a8e1fe84f3cfc3bc6b711c38d3cae4136f6 Mon Sep 17 00:00:00 2001 From: Joerg Heinemann Date: Tue, 27 Jan 2026 06:47:34 +0100 Subject: [PATCH 093/228] Update ct/ebusd.sh Co-authored-by: Tobias <96661824+CrazyWolf13@users.noreply.github.com> --- ct/ebusd.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ct/ebusd.sh b/ct/ebusd.sh index 2791cb696..63f16f512 100644 --- a/ct/ebusd.sh +++ b/ct/ebusd.sh @@ -27,10 +27,10 @@ function update_script() { msg_error "No ${APP} Installation Found!" exit fi - msg_info "Updating $APP LXC" + msg_info "Updating ebusd" $STD apt update - $STD apt -y upgrade - msg_ok "Updated $APP LXC" + $STD apt -y --upgrade ebusd + msg_ok "Updated ebusd" msg_ok "Updated successfully!" exit } From 0e87e5f76955a31d795d1857d5ae3e1feee41fe9 Mon Sep 17 00:00:00 2001 From: Joerg Heinemann Date: Tue, 27 Jan 2026 06:54:24 +0100 Subject: [PATCH 094/228] Update description format in ebusd.json --- frontend/public/json/ebusd.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/public/json/ebusd.json b/frontend/public/json/ebusd.json index 96d94b7a8..b902ea7a2 100644 --- a/frontend/public/json/ebusd.json +++ b/frontend/public/json/ebusd.json @@ -13,7 +13,7 @@ "website": "https://github.com/john30/ebusd", "logo": "", "config_path": "/etc/default/ebusd", - "description": "ebusd is a daemon for handling communication with eBUS devices connected to a 2-wire bus system ("energy bus" used by numerous heating systems).", + "description": "ebusd is a daemon for handling communication with eBUS devices connected to a 2-wire `energy bus` used by numerous heating systems.", "install_methods": [ { "type": "default", From 6f9a1965f92dfa73637c37f60adc24ecfd8b7eb8 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Tue, 27 Jan 2026 09:29:22 +0100 Subject: [PATCH 095/228] Merge from VE --- misc/api.func | 155 ++++++++++++++++++++++++++++-------------------- misc/build.func | 135 ++++++++--------------------------------- misc/core.func | 140 ++++++++++++++++++++++++------------------- 3 files changed, 193 insertions(+), 237 deletions(-) diff --git a/misc/api.func b/misc/api.func index f6f284bec..657d786ba 100644 --- a/misc/api.func +++ b/misc/api.func @@ -1,6 +1,6 @@ # Copyright (c) 2021-2026 community-scripts ORG # Author: michelroegl-brunner -# License: MIT | https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/LICENSE +# License: MIT | https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/LICENSE # ============================================================================== # API.FUNC - TELEMETRY & DIAGNOSTICS API @@ -36,14 +36,16 @@ # # - Maps numeric exit codes to human-readable error descriptions # - Supports: -# * Generic/Shell errors (1, 2, 126, 127, 128, 130, 137, 139, 143) -# * Package manager errors (APT, DPKG: 100, 101, 255) -# * Node.js/npm errors (243-249, 254) -# * Python/pip/uv errors (210-212) -# * PostgreSQL errors (231-234) -# * MySQL/MariaDB errors (241-244) -# * MongoDB errors (251-254) +# * Generic/Shell errors (1, 2, 124, 126-130, 134, 137, 139, 141, 143) +# * curl/wget errors (6, 7, 22, 28, 35) +# * Package manager errors (APT, DPKG: 100-102, 255) +# * Systemd/Service errors (150-154) +# * Python/pip/uv errors (160-162) +# * PostgreSQL errors (170-173) +# * MySQL/MariaDB errors (180-183) +# * MongoDB errors (190-193) # * Proxmox custom codes (200-231) +# * Node.js/npm errors (243, 245-249) # - Returns description string for given exit code # - Shared function with error_handler.func for consistency # ------------------------------------------------------------------------------ @@ -53,73 +55,98 @@ explain_exit_code() { # --- Generic / Shell --- 1) echo "General error / Operation not permitted" ;; 2) echo "Misuse of shell builtins (e.g. syntax error)" ;; - 126) echo "Command invoked cannot execute (permission problem?)" ;; - 127) echo "Command not found" ;; - 128) echo "Invalid argument to exit" ;; - 130) echo "Terminated by Ctrl+C (SIGINT)" ;; - 137) echo "Killed (SIGKILL / Out of memory?)" ;; - 139) echo "Segmentation fault (core dumped)" ;; - 143) echo "Terminated (SIGTERM)" ;; + + # --- curl / wget errors (commonly seen in downloads) --- + 6) echo "curl: DNS resolution failed (could not resolve host)" ;; + 7) echo "curl: Failed to connect (network unreachable / host down)" ;; + 22) echo "curl: HTTP error returned (404, 429, 500+)" ;; + 28) echo "curl: Operation timeout (network slow or server not responding)" ;; + 35) echo "curl: SSL/TLS handshake failed (certificate error)" ;; # --- Package manager / APT / DPKG --- 100) echo "APT: Package manager error (broken packages / dependency problems)" ;; 101) echo "APT: Configuration error (bad sources.list, malformed config)" ;; - 255) echo "DPKG: Fatal internal error" ;; + 102) echo "APT: Lock held by another process (dpkg/apt still running)" ;; - # --- Node.js / npm / pnpm / yarn --- + # --- Common shell/system errors --- + 124) echo "Command timed out (timeout command)" ;; + 126) echo "Command invoked cannot execute (permission problem?)" ;; + 127) echo "Command not found" ;; + 128) echo "Invalid argument to exit" ;; + 130) echo "Terminated by Ctrl+C (SIGINT)" ;; + 134) echo "Process aborted (SIGABRT - possibly Node.js heap overflow)" ;; + 137) echo "Killed (SIGKILL / Out of memory?)" ;; + 139) echo "Segmentation fault (core dumped)" ;; + 141) echo "Broken pipe (SIGPIPE - output closed prematurely)" ;; + 143) echo "Terminated (SIGTERM)" ;; + + # --- Systemd / Service errors (150-154) --- + 150) echo "Systemd: Service failed to start" ;; + 151) echo "Systemd: Service unit not found" ;; + 152) echo "Permission denied (EACCES)" ;; + 153) echo "Build/compile failed (make/gcc/cmake)" ;; + 154) echo "Node.js: Native addon build failed (node-gyp)" ;; + + # --- Python / pip / uv (160-162) --- + 160) echo "Python: Virtualenv / uv environment missing or broken" ;; + 161) echo "Python: Dependency resolution failed" ;; + 162) echo "Python: Installation aborted (permissions or EXTERNALLY-MANAGED)" ;; + + # --- PostgreSQL (170-173) --- + 170) echo "PostgreSQL: Connection failed (server not running / wrong socket)" ;; + 171) echo "PostgreSQL: Authentication failed (bad user/password)" ;; + 172) echo "PostgreSQL: Database does not exist" ;; + 173) echo "PostgreSQL: Fatal error in query / syntax" ;; + + # --- MySQL / MariaDB (180-183) --- + 180) echo "MySQL/MariaDB: Connection failed (server not running / wrong socket)" ;; + 181) echo "MySQL/MariaDB: Authentication failed (bad user/password)" ;; + 182) echo "MySQL/MariaDB: Database does not exist" ;; + 183) echo "MySQL/MariaDB: Fatal error in query / syntax" ;; + + # --- MongoDB (190-193) --- + 190) echo "MongoDB: Connection failed (server not running)" ;; + 191) echo "MongoDB: Authentication failed (bad user/password)" ;; + 192) echo "MongoDB: Database not found" ;; + 193) echo "MongoDB: Fatal query error" ;; + + # --- Proxmox Custom Codes (200-231) --- + 200) echo "Proxmox: Failed to create lock file" ;; + 203) echo "Proxmox: Missing CTID variable" ;; + 204) echo "Proxmox: Missing PCT_OSTYPE variable" ;; + 205) echo "Proxmox: Invalid CTID (<100)" ;; + 206) echo "Proxmox: CTID already in use" ;; + 207) echo "Proxmox: Password contains unescaped special characters" ;; + 208) echo "Proxmox: Invalid configuration (DNS/MAC/Network format)" ;; + 209) echo "Proxmox: Container creation failed" ;; + 210) echo "Proxmox: Cluster not quorate" ;; + 211) echo "Proxmox: Timeout waiting for template lock" ;; + 212) echo "Proxmox: Storage type 'iscsidirect' does not support containers (VMs only)" ;; + 213) echo "Proxmox: Storage type does not support 'rootdir' content" ;; + 214) echo "Proxmox: Not enough storage space" ;; + 215) echo "Proxmox: Container created but not listed (ghost state)" ;; + 216) echo "Proxmox: RootFS entry missing in config" ;; + 217) echo "Proxmox: Storage not accessible" ;; + 218) echo "Proxmox: Template file corrupted or incomplete" ;; + 219) echo "Proxmox: CephFS does not support containers - use RBD" ;; + 220) echo "Proxmox: Unable to resolve template path" ;; + 221) echo "Proxmox: Template file not readable" ;; + 222) echo "Proxmox: Template download failed" ;; + 223) echo "Proxmox: Template not available after download" ;; + 224) echo "Proxmox: PBS storage is for backups only" ;; + 225) echo "Proxmox: No template available for OS/Version" ;; + 231) echo "Proxmox: LXC stack upgrade failed" ;; + + # --- Node.js / npm / pnpm / yarn (243-249) --- 243) echo "Node.js: Out of memory (JavaScript heap out of memory)" ;; 245) echo "Node.js: Invalid command-line option" ;; 246) echo "Node.js: Internal JavaScript Parse Error" ;; 247) echo "Node.js: Fatal internal error" ;; 248) echo "Node.js: Invalid C++ addon / N-API failure" ;; - 249) echo "Node.js: Inspector error" ;; - 254) echo "npm/pnpm/yarn: Unknown fatal error" ;; + 249) echo "npm/pnpm/yarn: Unknown fatal error" ;; - # --- Python / pip / uv --- - 210) echo "Python: Virtualenv / uv environment missing or broken" ;; - 211) echo "Python: Dependency resolution failed" ;; - 212) echo "Python: Installation aborted (permissions or EXTERNALLY-MANAGED)" ;; - - # --- PostgreSQL --- - 231) echo "PostgreSQL: Connection failed (server not running / wrong socket)" ;; - 232) echo "PostgreSQL: Authentication failed (bad user/password)" ;; - 233) echo "PostgreSQL: Database does not exist" ;; - 234) echo "PostgreSQL: Fatal error in query / syntax" ;; - - # --- MySQL / MariaDB --- - 241) echo "MySQL/MariaDB: Connection failed (server not running / wrong socket)" ;; - 242) echo "MySQL/MariaDB: Authentication failed (bad user/password)" ;; - 243) echo "MySQL/MariaDB: Database does not exist" ;; - 244) echo "MySQL/MariaDB: Fatal error in query / syntax" ;; - - # --- MongoDB --- - 251) echo "MongoDB: Connection failed (server not running)" ;; - 252) echo "MongoDB: Authentication failed (bad user/password)" ;; - 253) echo "MongoDB: Database not found" ;; - 254) echo "MongoDB: Fatal query error" ;; - - # --- Proxmox Custom Codes --- - 200) echo "Custom: Failed to create lock file" ;; - 203) echo "Custom: Missing CTID variable" ;; - 204) echo "Custom: Missing PCT_OSTYPE variable" ;; - 205) echo "Custom: Invalid CTID (<100)" ;; - 206) echo "Custom: CTID already in use (check 'pct list' and /etc/pve/lxc/)" ;; - 207) echo "Custom: Password contains unescaped special characters (-, /, \\, *, etc.)" ;; - 208) echo "Custom: Invalid configuration (DNS/MAC/Network format error)" ;; - 209) echo "Custom: Container creation failed (check logs for pct create output)" ;; - 210) echo "Custom: Cluster not quorate" ;; - 211) echo "Custom: Timeout waiting for template lock (concurrent download in progress)" ;; - 214) echo "Custom: Not enough storage space" ;; - 215) echo "Custom: Container created but not listed (ghost state - check /etc/pve/lxc/)" ;; - 216) echo "Custom: RootFS entry missing in config (incomplete creation)" ;; - 217) echo "Custom: Storage does not support rootdir (check storage capabilities)" ;; - 218) echo "Custom: Template file corrupted or incomplete download (size <1MB or invalid archive)" ;; - 220) echo "Custom: Unable to resolve template path" ;; - 221) echo "Custom: Template file exists but not readable (check file permissions)" ;; - 222) echo "Custom: Template download failed after 3 attempts (network/storage issue)" ;; - 223) echo "Custom: Template not available after download (storage sync issue)" ;; - 225) echo "Custom: No template available for OS/Version (check 'pveam available')" ;; - 231) echo "Custom: LXC stack upgrade/retry failed (outdated pve-container - check https://github.com/community-scripts/ProxmoxVE/discussions/8126)" ;; + # --- DPKG --- + 255) echo "DPKG: Fatal internal error" ;; # --- Default --- *) echo "Unknown error" ;; diff --git a/misc/build.func b/misc/build.func index b584767de..906d64022 100644 --- a/misc/build.func +++ b/misc/build.func @@ -1,8 +1,7 @@ #!/usr/bin/env bash # Copyright (c) 2021-2026 community-scripts ORG # Author: tteck (tteckster) | MickLesk | michelroegl-brunner -# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/branch/main/LICENSE -# Revision: 1 +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/branch/main/LICENSE # ============================================================================== # BUILD.FUNC - LXC CONTAINER BUILD & CONFIGURATION @@ -81,109 +80,6 @@ variables() { fi } -# ----------------------------------------------------------------------------- -# Community-Scripts bootstrap loader -# - Always sources build.func from remote -# - Updates local core files only if build.func changed -# - Local cache: /usr/local/community-scripts/core -# ----------------------------------------------------------------------------- - -# FUNC_DIR="/usr/local/community-scripts/core" -# mkdir -p "$FUNC_DIR" - -# BUILD_URL="https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func" -# BUILD_REV="$FUNC_DIR/build.rev" -# DEVMODE="${DEVMODE:-no}" - -# # --- Step 1: fetch build.func content once, compute hash --- -# build_content="$(curl -fsSL "$BUILD_URL")" || { -# echo "❌ Failed to fetch build.func" -# exit 1 -# } - -# newhash=$(printf "%s" "$build_content" | sha256sum | awk '{print $1}') -# oldhash=$(cat "$BUILD_REV" 2>/dev/null || echo "") - -# # --- Step 2: if build.func changed, offer update for core files --- -# if [ "$newhash" != "$oldhash" ]; then -# echo "⚠️ build.func changed!" - -# while true; do -# read -rp "Refresh local core files? [y/N/diff]: " ans -# case "$ans" in -# [Yy]*) -# echo "$newhash" >"$BUILD_REV" - -# update_func_file() { -# local file="$1" -# local url="https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/$file" -# local local_path="$FUNC_DIR/$file" - -# echo "⬇️ Downloading $file ..." -# curl -fsSL "$url" -o "$local_path" || { -# echo "❌ Failed to fetch $file" -# exit 1 -# } -# echo "✔️ Updated $file" -# } - -# update_func_file core.func -# update_func_file error_handler.func -# update_func_file tools.func -# break -# ;; -# [Dd]*) -# for file in core.func error_handler.func tools.func; do -# local_path="$FUNC_DIR/$file" -# url="https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/$file" -# remote_tmp="$(mktemp)" - -# curl -fsSL "$url" -o "$remote_tmp" || continue - -# if [ -f "$local_path" ]; then -# echo "🔍 Diff for $file:" -# diff -u "$local_path" "$remote_tmp" || echo "(no differences)" -# else -# echo "📦 New file $file will be installed" -# fi - -# rm -f "$remote_tmp" -# done -# ;; -# *) -# echo "❌ Skipped updating local core files" -# break -# ;; -# esac -# done -# else -# if [ "$DEVMODE" != "yes" ]; then -# echo "✔️ build.func unchanged → using existing local core files" -# fi -# fi - -# if [ -n "${_COMMUNITY_SCRIPTS_LOADER:-}" ]; then -# return 0 2>/dev/null || exit 0 -# fi -# _COMMUNITY_SCRIPTS_LOADER=1 - -# # --- Step 3: always source local versions of the core files --- -# source "$FUNC_DIR/core.func" -# source "$FUNC_DIR/error_handler.func" -# source "$FUNC_DIR/tools.func" - -# # --- Step 4: finally, source build.func directly from memory --- -# # (no tmp file needed) -# source <(printf "%s" "$build_content") - -# ------------------------------------------------------------------------------ -# Load core + error handler functions from community-scripts repo -# -# - Prefer curl if available, fallback to wget -# - Load: core.func, error_handler.func, api.func -# - Initialize error traps after loading -# ------------------------------------------------------------------------------ - source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/api.func) if command -v curl >/dev/null 2>&1; then @@ -191,13 +87,11 @@ if command -v curl >/dev/null 2>&1; then source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func) load_functions catch_errors - #echo "(build.func) Loaded core.func via curl" elif command -v wget >/dev/null 2>&1; then source <(wget -qO- https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func) source <(wget -qO- https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func) load_functions catch_errors - #echo "(build.func) Loaded core.func via wget" fi # ============================================================================== @@ -266,17 +160,29 @@ maxkeys_check() { # # - Returns current container IP depending on OS type # - Debian/Ubuntu: uses `hostname -I` -# - Alpine: parses eth0 via `ip -4 addr` +# - Alpine: parses eth0 via `ip -4 addr` or `ip -6 addr` +# - Supports IPv6-only environments as fallback # - Returns "Unknown" if OS type cannot be determined # ------------------------------------------------------------------------------ get_current_ip() { + CURRENT_IP="" if [ -f /etc/os-release ]; then # Check for Debian/Ubuntu (uses hostname -I) if grep -qE 'ID=debian|ID=ubuntu' /etc/os-release; then - CURRENT_IP=$(hostname -I | awk '{print $1}') + # Try IPv4 first + CURRENT_IP=$(hostname -I 2>/dev/null | tr ' ' '\n' | grep -E '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$' | head -n1) + # Fallback to IPv6 if no IPv4 + if [[ -z "$CURRENT_IP" ]]; then + CURRENT_IP=$(hostname -I 2>/dev/null | tr ' ' '\n' | grep -E ':' | head -n1) + fi # Check for Alpine (uses ip command) elif grep -q 'ID=alpine' /etc/os-release; then - CURRENT_IP=$(ip -4 addr show eth0 | awk '/inet / {print $2}' | cut -d/ -f1 | head -n 1) + # Try IPv4 first + CURRENT_IP=$(ip -4 addr show eth0 2>/dev/null | awk '/inet / {print $2}' | cut -d/ -f1 | head -n 1) + # Fallback to IPv6 if no IPv4 + if [[ -z "$CURRENT_IP" ]]; then + CURRENT_IP=$(ip -6 addr show eth0 scope global 2>/dev/null | awk '/inet6 / {print $2}' | cut -d/ -f1 | head -n 1) + fi else CURRENT_IP="Unknown" fi @@ -308,6 +214,7 @@ update_motd_ip() { # # - Installs SSH keys into container root account if SSH is enabled # - Uses pct push or direct input to authorized_keys +# - Supports both SSH_KEYS_FILE (from advanced settings) and SSH_AUTHORIZED_KEY (from user defaults) # - Falls back to warning if no keys provided # ------------------------------------------------------------------------------ install_ssh_keys_into_ct() { @@ -316,6 +223,13 @@ install_ssh_keys_into_ct() { # Ensure SSH_KEYS_FILE is defined (may not be set if advanced_settings was skipped) : "${SSH_KEYS_FILE:=}" + # If SSH_KEYS_FILE doesn't exist but SSH_AUTHORIZED_KEY is set (from user defaults), + # create a temporary SSH_KEYS_FILE with the key + if [[ -z "$SSH_KEYS_FILE" || ! -s "$SSH_KEYS_FILE" ]] && [[ -n "${SSH_AUTHORIZED_KEY:-}" ]]; then + SSH_KEYS_FILE="$(mktemp)" + printf '%s\n' "$SSH_AUTHORIZED_KEY" >"$SSH_KEYS_FILE" + fi + if [[ -n "$SSH_KEYS_FILE" && -s "$SSH_KEYS_FILE" ]]; then msg_info "Installing selected SSH keys into CT ${CTID}" pct exec "$CTID" -- sh -c 'mkdir -p /root/.ssh && chmod 700 /root/.ssh' || { @@ -1025,7 +939,6 @@ base_settings() { ENABLE_NESTING=${var_nesting:-"1"} ENABLE_KEYCTL=${var_keyctl:-"0"} ENABLE_MKNOD=${var_mknod:-"0"} - MOUNT_FS=${var_mount_fs:-""} PROTECT_CT=${var_protection:-"no"} CT_TIMEZONE=${var_timezone:-"$timezone"} [[ "${CT_TIMEZONE:-}" == Etc/* ]] && CT_TIMEZONE="host" # pct doesn't accept Etc/* zones diff --git a/misc/core.func b/misc/core.func index 4506b101b..cf564c8a2 100644 --- a/misc/core.func +++ b/misc/core.func @@ -1,6 +1,6 @@ #!/usr/bin/env bash # Copyright (c) 2021-2026 community-scripts ORG -# License: MIT | https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/LICENSE +# License: MIT | https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/LICENSE # ============================================================================== # CORE FUNCTIONS - LXC CONTAINER UTILITIES @@ -123,6 +123,7 @@ icons() { CREATING="${TAB}🚀${TAB}${CL}" ADVANCED="${TAB}🧩${TAB}${CL}" FUSE="${TAB}🗂️${TAB}${CL}" + GPU="${TAB}🎮${TAB}${CL}" HOURGLASS="${TAB}⏳${TAB}" } @@ -551,11 +552,8 @@ msg_info() { if ! declare -p MSG_INFO_SHOWN &>/dev/null || ! declare -A MSG_INFO_SHOWN &>/dev/null; then declare -gA MSG_INFO_SHOWN=() fi - # Sanitize message for use as associative array key (remove ANSI codes and special chars) - local sanitized_msg - sanitized_msg=$(printf '%s' "$msg" | sed 's/\x1b\[[0-9;]*m//g; s/[^a-zA-Z0-9_]/_/g') - [[ -n "${MSG_INFO_SHOWN["$sanitized_msg"]+x}" ]] && return - MSG_INFO_SHOWN["$sanitized_msg"]=1 + [[ -n "${MSG_INFO_SHOWN["$msg"]+x}" ]] && return + MSG_INFO_SHOWN["$msg"]=1 stop_spinner SPINNER_MSG="$msg" @@ -600,7 +598,6 @@ msg_ok() { stop_spinner clear_line echo -e "$CM ${GN}${msg}${CL}" - # Sanitize message for use as associative array key (remove ANSI codes and special chars) local sanitized_msg sanitized_msg=$(printf '%s' "$msg" | sed 's/\x1b\[[0-9;]*m//g; s/[^a-zA-Z0-9_]/_/g') unset 'MSG_INFO_SHOWN['"$sanitized_msg"']' 2>/dev/null || true @@ -717,7 +714,7 @@ exit_script() { # ------------------------------------------------------------------------------ get_header() { local app_name=$(echo "${APP,,}" | tr -d ' ') - local app_type=${APP_TYPE:-ct} # Default zu 'ct' falls nicht gesetzt + local app_type=${APP_TYPE:-ct} # Default to 'ct' if not set local header_url="https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/${app_type}/headers/${app_name}" local local_header_path="/usr/local/community-scripts/headers/${app_type}/${app_name}" @@ -820,71 +817,64 @@ is_verbose_mode() { # ------------------------------------------------------------------------------ # cleanup_lxc() # -# - Comprehensive cleanup of package managers, caches, and logs -# - Supports Alpine (apk), Debian/Ubuntu (apt), Fedora/Rocky/CentOS (dnf/yum), -# openSUSE (zypper), Gentoo (emerge), and language package managers -# - Cleans: Python (pip/uv), Node.js (npm/yarn/pnpm), Go, Rust, Ruby, PHP -# - Truncates log files and vacuums systemd journal -# - Run at end of container creation to minimize disk usage +# - Cleans package manager and language caches (safe for installs AND updates) +# - Supports Alpine (apk), Debian/Ubuntu (apt), Python, Node.js, Go, Rust, Ruby, PHP +# - Uses fallback error handling to prevent cleanup failures from breaking installs # ------------------------------------------------------------------------------ cleanup_lxc() { msg_info "Cleaning up" - # OS-specific package manager cleanup + if is_alpine; then - $STD apk cache clean 2>/dev/null || true + $STD apk cache clean || true rm -rf /var/cache/apk/* - elif command -v apt &>/dev/null; then - # Debian/Ubuntu/Devuan - $STD apt -y autoremove 2>/dev/null || true - $STD apt -y autoclean 2>/dev/null || true - $STD apt -y clean 2>/dev/null || true - elif command -v dnf &>/dev/null; then - # Fedora/Rocky/AlmaLinux/CentOS 8+ - $STD dnf clean all 2>/dev/null || true - $STD dnf autoremove -y 2>/dev/null || true - elif command -v yum &>/dev/null; then - # CentOS 7/older RHEL - $STD yum clean all 2>/dev/null || true - elif command -v zypper &>/dev/null; then - # openSUSE - $STD zypper clean --all 2>/dev/null || true - elif command -v emerge &>/dev/null; then - # Gentoo - $STD emerge --quiet --depclean 2>/dev/null || true - $STD eclean-dist -d 2>/dev/null || true - $STD eclean-pkg -d 2>/dev/null || true + else + $STD apt -y autoremove 2>/dev/null || msg_warn "apt autoremove failed (non-critical)" + $STD apt -y autoclean 2>/dev/null || msg_warn "apt autoclean failed (non-critical)" + $STD apt -y clean 2>/dev/null || msg_warn "apt clean failed (non-critical)" fi - # Clear temp artifacts (keep sockets/FIFOs; ignore errors) find /tmp /var/tmp -type f -name 'tmp*' -delete 2>/dev/null || true find /tmp /var/tmp -type f -name 'tempfile*' -delete 2>/dev/null || true - # Truncate writable log files silently (permission errors ignored) - if command -v truncate >/dev/null 2>&1; then - find /var/log -type f -writable -print0 2>/dev/null | - xargs -0 -n1 truncate -s 0 2>/dev/null || true + # Python + if command -v pip &>/dev/null; then + rm -rf /root/.cache/pip 2>/dev/null || true + fi + if command -v uv &>/dev/null; then + rm -rf /root/.cache/uv 2>/dev/null || true fi - # Node.js npm + # Node.js if command -v npm &>/dev/null; then rm -rf /root/.npm/_cacache /root/.npm/_logs 2>/dev/null || true fi - # Node.js yarn - #if command -v yarn &>/dev/null; then $STD yarn cache clean 2>/dev/null || true; fi - # Node.js pnpm - if command -v pnpm &>/dev/null; then $STD pnpm store prune 2>/dev/null || true; fi - # Go - if command -v go &>/dev/null; then $STD go clean -cache -modcache 2>/dev/null || true; fi - # Rust cargo - if command -v cargo &>/dev/null; then $STD cargo clean 2>/dev/null || true; fi - # Ruby gem - if command -v gem &>/dev/null; then $STD gem cleanup 2>/dev/null || true; fi - # Composer (PHP) - if command -v composer &>/dev/null; then $STD composer clear-cache 2>/dev/null || true; fi - - if command -v journalctl &>/dev/null; then - $STD journalctl --vacuum-time=10m 2>/dev/null || true + if command -v yarn &>/dev/null; then + rm -rf /root/.cache/yarn /root/.yarn/cache 2>/dev/null || true fi + if command -v pnpm &>/dev/null; then + pnpm store prune &>/dev/null || true + fi + + # Go (only build cache, not modules) + if command -v go &>/dev/null; then + $STD go clean -cache 2>/dev/null || true + fi + + # Rust (only registry cache, not build artifacts) + if command -v cargo &>/dev/null; then + rm -rf /root/.cargo/registry/cache /root/.cargo/.package-cache 2>/dev/null || true + fi + + # Ruby + if command -v gem &>/dev/null; then + rm -rf /root/.gem/cache 2>/dev/null || true + fi + + # PHP + if command -v composer &>/dev/null; then + rm -rf /root/.composer/cache 2>/dev/null || true + fi + msg_ok "Cleaned" } @@ -954,14 +944,14 @@ function get_lxc_ip() { get_current_ip() { local ip - # Try direct interface lookup for eth0 FIRST (most reliable for LXC) + # Try direct interface lookup for eth0 FIRST (most reliable for LXC) - IPv4 ip=$(ip -4 addr show eth0 2>/dev/null | awk '/inet / {print $2}' | cut -d/ -f1 | head -n1) if [[ -n "$ip" && "$ip" =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then echo "$ip" return 0 fi - # Fallback: Try hostname -I + # Fallback: Try hostname -I (returns IPv4 first if available) if command -v hostname >/dev/null 2>&1; then ip=$(hostname -I 2>/dev/null | awk '{print $1}') if [[ -n "$ip" && "$ip" =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then @@ -970,9 +960,9 @@ function get_lxc_ip() { fi fi - # Last resort: Use routing table - local targets=("8.8.8.8" "1.1.1.1" "default") - for target in "${targets[@]}"; do + # Try routing table with IPv4 targets + local ipv4_targets=("8.8.8.8" "1.1.1.1" "default") + for target in "${ipv4_targets[@]}"; do if [[ "$target" == "default" ]]; then ip=$(ip route get 1 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}') else @@ -984,6 +974,32 @@ function get_lxc_ip() { fi done + # IPv6 fallback: Try direct interface lookup for eth0 + ip=$(ip -6 addr show eth0 scope global 2>/dev/null | awk '/inet6 / {print $2}' | cut -d/ -f1 | head -n1) + if [[ -n "$ip" && "$ip" =~ : ]]; then + echo "$ip" + return 0 + fi + + # IPv6 fallback: Try hostname -I for IPv6 + if command -v hostname >/dev/null 2>&1; then + ip=$(hostname -I 2>/dev/null | tr ' ' '\n' | grep -E ':' | head -n1) + if [[ -n "$ip" && "$ip" =~ : ]]; then + echo "$ip" + return 0 + fi + fi + + # IPv6 fallback: Use routing table with IPv6 targets + local ipv6_targets=("2001:4860:4860::8888" "2606:4700:4700::1111") + for target in "${ipv6_targets[@]}"; do + ip=$(ip -6 route get "$target" 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}') + if [[ -n "$ip" && "$ip" =~ : ]]; then + echo "$ip" + return 0 + fi + done + return 1 } From 3b9ad58ce38eb9ca308750a00a135576b78daf1f Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Tue, 27 Jan 2026 10:08:34 +0100 Subject: [PATCH 096/228] merge from Main --- misc/build.func | 1271 ++++++++++++++++++++++++----------------------- 1 file changed, 644 insertions(+), 627 deletions(-) diff --git a/misc/build.func b/misc/build.func index 906d64022..5ea15757a 100644 --- a/misc/build.func +++ b/misc/build.func @@ -1,7 +1,7 @@ #!/usr/bin/env bash # Copyright (c) 2021-2026 community-scripts ORG # Author: tteck (tteckster) | MickLesk | michelroegl-brunner -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/branch/main/LICENSE +# License: MIT | https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/LICENSE # ============================================================================== # BUILD.FUNC - LXC CONTAINER BUILD & CONFIGURATION @@ -246,7 +246,7 @@ install_ssh_keys_into_ct() { return 0 fi - # Fallback: nichts ausgewählt + # Fallback msg_warn "No SSH keys to install (skipping)." return 0 } @@ -298,6 +298,53 @@ get_valid_container_id() { echo "$suggested_id" } +# ------------------------------------------------------------------------------ +# validate_container_id() +# +# - Validates if a container ID is available for use +# - Checks if ID is already used by VM or LXC container +# - Checks if ID is used in LVM logical volumes +# - Returns 0 if ID is available, 1 if already in use +# ------------------------------------------------------------------------------ +validate_container_id() { + local ctid="$1" + + # Check if ID is numeric + if ! [[ "$ctid" =~ ^[0-9]+$ ]]; then + return 1 + fi + + # Check if config file exists for VM or LXC + if [[ -f "/etc/pve/qemu-server/${ctid}.conf" ]] || [[ -f "/etc/pve/lxc/${ctid}.conf" ]]; then + return 1 + fi + + # Check if ID is used in LVM logical volumes + if lvs --noheadings -o lv_name 2>/dev/null | grep -qE "(^|[-_])${ctid}($|[-_])"; then + return 1 + fi + + return 0 +} + +# ------------------------------------------------------------------------------ +# get_valid_container_id() +# +# - Returns a valid, unused container ID +# - If provided ID is valid, returns it +# - Otherwise increments from suggested ID until a free one is found +# - Calls validate_container_id() to check availability +# ------------------------------------------------------------------------------ +get_valid_container_id() { + local suggested_id="${1:-$(pvesh get /cluster/nextid)}" + + while ! validate_container_id "$suggested_id"; do + suggested_id=$((suggested_id + 1)) + done + + echo "$suggested_id" +} + # ------------------------------------------------------------------------------ # validate_hostname() # @@ -316,7 +363,7 @@ validate_hostname() { # Split by dots and validate each label local IFS='.' - read -ra labels <<<"$hostname" + read -ra labels <<< "$hostname" for label in "${labels[@]}"; do # Each label: 1-63 chars, alphanumeric, hyphens allowed (not at start/end) if [[ -z "$label" ]] || [[ ${#label} -gt 63 ]]; then @@ -420,7 +467,7 @@ validate_ipv6_address() { # Check that no segment exceeds 4 hex chars local IFS=':' local -a segments - read -ra segments <<<"$addr" + read -ra segments <<< "$addr" for seg in "${segments[@]}"; do if [[ ${#seg} -gt 4 ]]; then return 1 @@ -470,14 +517,14 @@ validate_gateway_in_subnet() { # Convert IPs to integers local IFS='.' - read -r i1 i2 i3 i4 <<<"$ip" - read -r g1 g2 g3 g4 <<<"$gateway" + read -r i1 i2 i3 i4 <<< "$ip" + read -r g1 g2 g3 g4 <<< "$gateway" - local ip_int=$(((i1 << 24) + (i2 << 16) + (i3 << 8) + i4)) - local gw_int=$(((g1 << 24) + (g2 << 16) + (g3 << 8) + g4)) + local ip_int=$(( (i1 << 24) + (i2 << 16) + (i3 << 8) + i4 )) + local gw_int=$(( (g1 << 24) + (g2 << 16) + (g3 << 8) + g4 )) # Check if both are in same network - if (((ip_int & mask) != (gw_int & mask))); then + if (( (ip_int & mask) != (gw_int & mask) )); then return 1 fi @@ -849,8 +896,6 @@ base_settings() { CORE_COUNT="${final_cpu}" RAM_SIZE="${final_ram}" VERBOSE=${var_verbose:-"${1:-no}"} - - # Password sanitization - clean up dashes and format properly PW="" if [[ -n "${var_pw:-}" ]]; then local _pw_raw="${var_pw}" @@ -1012,113 +1057,113 @@ load_vars_file() { # Validate values before setting (skip empty values - they use defaults) if [[ -n "$var_val" ]]; then case "$var_key" in - var_mac) - if ! validate_mac_address "$var_val"; then - msg_warn "Invalid MAC address '$var_val' in $file, ignoring" - continue - fi - ;; - var_vlan) - if ! validate_vlan_tag "$var_val"; then - msg_warn "Invalid VLAN tag '$var_val' in $file (must be 1-4094), ignoring" - continue - fi - ;; - var_mtu) - if ! validate_mtu "$var_val"; then - msg_warn "Invalid MTU '$var_val' in $file (must be 576-65535), ignoring" - continue - fi - ;; - var_tags) - if ! validate_tags "$var_val"; then - msg_warn "Invalid tags '$var_val' in $file (alphanumeric, -, _, ; only), ignoring" - continue - fi - ;; - var_timezone) - if ! validate_timezone "$var_val"; then - msg_warn "Invalid timezone '$var_val' in $file, ignoring" - continue - fi - ;; - var_brg) - if ! validate_bridge "$var_val"; then - msg_warn "Bridge '$var_val' not found in $file, ignoring" - continue - fi - ;; - var_gateway) - if ! validate_gateway_ip "$var_val"; then - msg_warn "Invalid gateway IP '$var_val' in $file, ignoring" - continue - fi - ;; - var_hostname) - if ! validate_hostname "$var_val"; then - msg_warn "Invalid hostname '$var_val' in $file, ignoring" - continue - fi - ;; - var_cpu) - if ! [[ "$var_val" =~ ^[0-9]+$ ]] || ((var_val < 1 || var_val > 128)); then - msg_warn "Invalid CPU count '$var_val' in $file (must be 1-128), ignoring" - continue - fi - ;; - var_ram) - if ! [[ "$var_val" =~ ^[0-9]+$ ]] || ((var_val < 256)); then - msg_warn "Invalid RAM '$var_val' in $file (must be >= 256 MiB), ignoring" - continue - fi - ;; - var_disk) - if ! [[ "$var_val" =~ ^[0-9]+$ ]] || ((var_val < 1)); then - msg_warn "Invalid disk size '$var_val' in $file (must be >= 1 GB), ignoring" - continue - fi - ;; - var_unprivileged) - if [[ "$var_val" != "0" && "$var_val" != "1" ]]; then - msg_warn "Invalid unprivileged value '$var_val' in $file (must be 0 or 1), ignoring" - continue - fi - ;; - var_nesting) - if [[ "$var_val" != "0" && "$var_val" != "1" ]]; then - msg_warn "Invalid nesting value '$var_val' in $file (must be 0 or 1), ignoring" - continue - fi - ;; - var_keyctl) - if [[ "$var_val" != "0" && "$var_val" != "1" ]]; then - msg_warn "Invalid keyctl value '$var_val' in $file (must be 0 or 1), ignoring" - continue - fi - ;; - var_net) - # var_net can be: dhcp, static IP/CIDR, or IP range - if [[ "$var_val" != "dhcp" ]]; then - if is_ip_range "$var_val"; then - : # IP range is valid, will be resolved at runtime - elif ! validate_ip_address "$var_val"; then - msg_warn "Invalid network '$var_val' in $file (must be dhcp or IP/CIDR), ignoring" + var_mac) + if ! validate_mac_address "$var_val"; then + msg_warn "Invalid MAC address '$var_val' in $file, ignoring" continue fi - fi - ;; - var_fuse | var_tun | var_gpu | var_ssh | var_verbose | var_protection) - if [[ "$var_val" != "yes" && "$var_val" != "no" ]]; then - msg_warn "Invalid boolean '$var_val' for $var_key in $file (must be yes/no), ignoring" - continue - fi - ;; - var_ipv6_method) - if [[ "$var_val" != "auto" && "$var_val" != "dhcp" && "$var_val" != "static" && "$var_val" != "none" ]]; then - msg_warn "Invalid IPv6 method '$var_val' in $file (must be auto/dhcp/static/none), ignoring" - continue - fi - ;; + ;; + var_vlan) + if ! validate_vlan_tag "$var_val"; then + msg_warn "Invalid VLAN tag '$var_val' in $file (must be 1-4094), ignoring" + continue + fi + ;; + var_mtu) + if ! validate_mtu "$var_val"; then + msg_warn "Invalid MTU '$var_val' in $file (must be 576-65535), ignoring" + continue + fi + ;; + var_tags) + if ! validate_tags "$var_val"; then + msg_warn "Invalid tags '$var_val' in $file (alphanumeric, -, _, ; only), ignoring" + continue + fi + ;; + var_timezone) + if ! validate_timezone "$var_val"; then + msg_warn "Invalid timezone '$var_val' in $file, ignoring" + continue + fi + ;; + var_brg) + if ! validate_bridge "$var_val"; then + msg_warn "Bridge '$var_val' not found in $file, ignoring" + continue + fi + ;; + var_gateway) + if ! validate_gateway_ip "$var_val"; then + msg_warn "Invalid gateway IP '$var_val' in $file, ignoring" + continue + fi + ;; + var_hostname) + if ! validate_hostname "$var_val"; then + msg_warn "Invalid hostname '$var_val' in $file, ignoring" + continue + fi + ;; + var_cpu) + if ! [[ "$var_val" =~ ^[0-9]+$ ]] || ((var_val < 1 || var_val > 128)); then + msg_warn "Invalid CPU count '$var_val' in $file (must be 1-128), ignoring" + continue + fi + ;; + var_ram) + if ! [[ "$var_val" =~ ^[0-9]+$ ]] || ((var_val < 256)); then + msg_warn "Invalid RAM '$var_val' in $file (must be >= 256 MiB), ignoring" + continue + fi + ;; + var_disk) + if ! [[ "$var_val" =~ ^[0-9]+$ ]] || ((var_val < 1)); then + msg_warn "Invalid disk size '$var_val' in $file (must be >= 1 GB), ignoring" + continue + fi + ;; + var_unprivileged) + if [[ "$var_val" != "0" && "$var_val" != "1" ]]; then + msg_warn "Invalid unprivileged value '$var_val' in $file (must be 0 or 1), ignoring" + continue + fi + ;; + var_nesting) + if [[ "$var_val" != "0" && "$var_val" != "1" ]]; then + msg_warn "Invalid nesting value '$var_val' in $file (must be 0 or 1), ignoring" + continue + fi + ;; + var_keyctl) + if [[ "$var_val" != "0" && "$var_val" != "1" ]]; then + msg_warn "Invalid keyctl value '$var_val' in $file (must be 0 or 1), ignoring" + continue + fi + ;; + var_net) + # var_net can be: dhcp, static IP/CIDR, or IP range + if [[ "$var_val" != "dhcp" ]]; then + if is_ip_range "$var_val"; then + : # IP range is valid, will be resolved at runtime + elif ! validate_ip_address "$var_val"; then + msg_warn "Invalid network '$var_val' in $file (must be dhcp or IP/CIDR), ignoring" + continue + fi + fi + ;; + var_fuse|var_tun|var_gpu|var_ssh|var_verbose|var_protection) + if [[ "$var_val" != "yes" && "$var_val" != "no" ]]; then + msg_warn "Invalid boolean '$var_val' for $var_key in $file (must be yes/no), ignoring" + continue + fi + ;; + var_ipv6_method) + if [[ "$var_val" != "auto" && "$var_val" != "dhcp" && "$var_val" != "static" && "$var_val" != "none" ]]; then + msg_warn "Invalid IPv6 method '$var_val' in $file (must be auto/dhcp/static/none), ignoring" + continue + fi + ;; esac fi @@ -1228,18 +1273,12 @@ var_fuse=no var_tun=no # Advanced Settings (Proxmox-official features) -# var_nesting: Allow nesting (required for Docker/LXC in CT) -var_nesting=1 -# var_keyctl: Allow keyctl() - needed for Docker (systemd-networkd workaround) -var_keyctl=0 -# var_mknod: Allow device node creation (requires kernel 5.3+, experimental) -var_mknod=0 -# var_mount_fs: Allow specific filesystems: nfs,fuse,ext4,etc (leave empty for defaults) -var_mount_fs= -# var_protection: Prevent accidental deletion of container -var_protection=no -# var_timezone: Container timezone (e.g. Europe/Berlin, leave empty for host timezone) -var_timezone= +var_nesting=1 # Allow nesting (required for Docker/LXC in CT) +var_keyctl=0 # Allow keyctl() - needed for Docker (systemd-networkd workaround) +var_mknod=0 # Allow device node creation (requires kernel 5.3+, experimental) +var_mount_fs= # Allow specific filesystems: nfs,fuse,ext4,etc (leave empty for defaults) +var_protection=no # Prevent accidental deletion of container +var_timezone= # Container timezone (e.g. Europe/Berlin, leave empty for host timezone) var_tags=community-script var_verbose=no @@ -1551,7 +1590,7 @@ maybe_offer_save_app_defaults() { # 1) if no file → offer to create if [[ ! -f "$app_vars_path" ]]; then - if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \ + if whiptail --backtitle "Proxmox VE Helper Scripts" \ --yesno "Save these advanced settings as defaults for ${APP}?\n\nThis will create:\n${app_vars_path}" 12 72; then mkdir -p "$(dirname "$app_vars_path")" install -m 0644 "$new_tmp" "$app_vars_path" @@ -1576,7 +1615,7 @@ maybe_offer_save_app_defaults() { while true; do local sel - sel="$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \ + sel="$(whiptail --backtitle "Proxmox VE Helper Scripts" \ --title "APP DEFAULTS – ${APP}" \ --menu "Differences detected. What do you want to do?" 20 78 10 \ "Update Defaults" "Write new values to ${app_vars_file}" \ @@ -1597,7 +1636,7 @@ maybe_offer_save_app_defaults() { break ;; "View Diff") - whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \ + whiptail --backtitle "Proxmox VE Helper Scripts" \ --title "Diff – ${APP}" \ --scrolltext --textbox "$diff_tmp" 25 100 ;; @@ -1622,6 +1661,13 @@ ensure_storage_selection_for_vars_file() { if [[ -n "$tpl" && -n "$ct" ]]; then TEMPLATE_STORAGE="$tpl" CONTAINER_STORAGE="$ct" + + # Validate storage space for loaded container storage + if [[ -n "${DISK_SIZE:-}" ]]; then + validate_storage_space "$ct" "$DISK_SIZE" "yes" + # Continue even if validation fails - user was warned + fi + return 0 fi @@ -1702,7 +1748,7 @@ advanced_settings() { elif [ -f /etc/timezone ]; then _host_timezone=$(cat /etc/timezone 2>/dev/null || echo "") fi - # pct doesn't accept Etc/* zones - map to 'host' instead + # Map Etc/* timezones to "host" (pct doesn't accept Etc/* zones) [[ "${_host_timezone:-}" == Etc/* ]] && _host_timezone="host" local _ct_timezone="${var_timezone:-$_host_timezone}" [[ "${_ct_timezone:-}" == Etc/* ]] && _ct_timezone="host" @@ -1799,32 +1845,36 @@ advanced_settings() { elif [[ "$PW1" == *" "* ]]; then whiptail --msgbox "Password cannot contain spaces." 8 58 else - # Clean up leading dashes from password local _pw1_clean="$PW1" while [[ "$_pw1_clean" == -* ]]; do _pw1_clean="${_pw1_clean#-}" done if [[ -z "$_pw1_clean" ]]; then whiptail --msgbox "Password cannot be only '-' characters." 8 58 + continue elif ((${#_pw1_clean} < 5)); then whiptail --msgbox "Password must be at least 5 characters (after removing leading '-')." 8 70 - else - # Verify password - if PW2=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ - --title "PASSWORD VERIFICATION" \ - --ok-button "Confirm" --cancel-button "Back" \ - --passwordbox "\nVerify Root Password" 10 58 \ - 3>&1 1>&2 2>&3); then - if [[ "$PW1" == "$PW2" ]]; then - _pw="--password $_pw1_clean" - _pw_display="********" - ((STEP++)) - else - whiptail --msgbox "Passwords do not match. Please try again." 8 58 - fi + continue + fi + # Verify password + if PW2=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "PASSWORD VERIFICATION" \ + --ok-button "Confirm" --cancel-button "Back" \ + --passwordbox "\nVerify Root Password" 10 58 \ + 3>&1 1>&2 2>&3); then + local _pw2_clean="$PW2" + while [[ "$_pw2_clean" == -* ]]; do + _pw2_clean="${_pw2_clean#-}" + done + if [[ "$_pw1_clean" == "$_pw2_clean" ]]; then + _pw="--password $_pw1_clean" + _pw_display="********" + ((STEP++)) else - ((STEP--)) + whiptail --msgbox "Passwords do not match. Please try again." 8 58 fi + else + ((STEP--)) fi fi else @@ -1842,22 +1892,25 @@ advanced_settings() { --inputbox "\nSet Container ID" 10 58 "$_ct_id" \ 3>&1 1>&2 2>&3); then local input_id="${result:-$NEXTID}" - # Validate container ID is numeric + + # Validate that ID is numeric if ! [[ "$input_id" =~ ^[0-9]+$ ]]; then - whiptail --msgbox "Container ID must be numeric." 8 58 + whiptail --backtitle "Proxmox VE Helper Scripts" --title "Invalid ID" --msgbox "Container ID must be numeric." 8 58 continue fi - # Validate container ID is available + + # Check if ID is already in use if ! validate_container_id "$input_id"; then - if whiptail --yesno "Container/VM ID $input_id is already in use.\n\nWould you like to use the next available ID: $(get_valid_container_id "$input_id")?" 10 58; then + if whiptail --backtitle "Proxmox VE Helper Scripts" --title "ID Already In Use" \ + --yesno "Container/VM ID $input_id is already in use.\n\nWould you like to use the next available ID ($(get_valid_container_id "$input_id"))?" 10 58; then _ct_id=$(get_valid_container_id "$input_id") - ((STEP++)) + else + continue fi - # else stay on this step else _ct_id="$input_id" - ((STEP++)) fi + ((STEP++)) else ((STEP--)) fi @@ -1870,15 +1923,16 @@ advanced_settings() { if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ --title "HOSTNAME" \ --ok-button "Next" --cancel-button "Back" \ - --inputbox "\nSet Hostname (lowercase, alphanumeric, hyphens only)" 10 58 "$_hostname" \ + --inputbox "\nSet Hostname (or FQDN, e.g. host.example.com)" 10 58 "$_hostname" \ 3>&1 1>&2 2>&3); then local hn_test="${result:-$NSAPP}" hn_test=$(echo "${hn_test,,}" | tr -d ' ') - if [[ "$hn_test" =~ ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ ]]; then + + if validate_hostname "$hn_test"; then _hostname="$hn_test" ((STEP++)) else - whiptail --msgbox "Invalid hostname: '$hn_test'\n\nOnly lowercase letters, digits and hyphens are allowed." 10 58 + whiptail --msgbox "Invalid hostname: '$hn_test'\n\nRules:\n- Only lowercase letters, digits, dots and hyphens\n- Labels separated by dots (max 63 chars each)\n- No leading/trailing hyphens or dots\n- No consecutive dots\n- Total max 253 characters" 14 60 fi else ((STEP--)) @@ -1953,8 +2007,14 @@ advanced_settings() { # ═══════════════════════════════════════════════════════════════════════════ 8) if [[ ${#BRIDGE_MENU_OPTIONS[@]} -eq 0 ]]; then - _bridge="vmbr0" - ((STEP++)) + # Validate default bridge exists + if validate_bridge "vmbr0"; then + _bridge="vmbr0" + ((STEP++)) + else + whiptail --msgbox "Default bridge 'vmbr0' not found!\n\nPlease configure a network bridge in Proxmox first." 10 58 + exit 1 + fi else if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ --title "NETWORK BRIDGE" \ @@ -1962,8 +2022,13 @@ advanced_settings() { --menu "\nSelect network bridge:" 16 58 6 \ "${BRIDGE_MENU_OPTIONS[@]}" \ 3>&1 1>&2 2>&3); then - _bridge="${result:-vmbr0}" - ((STEP++)) + local bridge_test="${result:-vmbr0}" + if validate_bridge "$bridge_test"; then + _bridge="$bridge_test" + ((STEP++)) + else + whiptail --msgbox "Bridge '$bridge_test' is not available or not active." 8 58 + fi else ((STEP--)) fi @@ -1977,9 +2042,10 @@ advanced_settings() { if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ --title "IPv4 CONFIGURATION" \ --ok-button "Next" --cancel-button "Back" \ - --menu "\nSelect IPv4 Address Assignment:" 14 60 2 \ + --menu "\nSelect IPv4 Address Assignment:" 16 65 3 \ "dhcp" "Automatic (DHCP, recommended)" \ "static" "Static (manual entry)" \ + "range" "IP Range Scan (find first free IP)" \ 3>&1 1>&2 2>&3); then if [[ "$result" == "static" ]]; then @@ -1990,7 +2056,7 @@ advanced_settings() { --ok-button "Next" --cancel-button "Back" \ --inputbox "\nEnter Static IPv4 CIDR Address\n(e.g. 192.168.1.100/24)" 12 58 "" \ 3>&1 1>&2 2>&3); then - if [[ "$static_ip" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$ ]]; then + if validate_ip_address "$static_ip"; then # Get gateway local gateway_ip if gateway_ip=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ @@ -1998,16 +2064,62 @@ advanced_settings() { --ok-button "Next" --cancel-button "Back" \ --inputbox "\nEnter Gateway IP address" 10 58 "" \ 3>&1 1>&2 2>&3); then - if [[ "$gateway_ip" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then - _net="$static_ip" - _gate=",gw=$gateway_ip" - ((STEP++)) + if validate_gateway_ip "$gateway_ip"; then + # Validate gateway is in same subnet + if validate_gateway_in_subnet "$static_ip" "$gateway_ip"; then + _net="$static_ip" + _gate=",gw=$gateway_ip" + ((STEP++)) + else + whiptail --msgbox "Gateway is not in the same subnet as the static IP.\n\nStatic IP: $static_ip\nGateway: $gateway_ip" 10 58 + fi else - whiptail --msgbox "Invalid Gateway IP format." 8 58 + whiptail --msgbox "Invalid Gateway IP format.\n\nEach octet must be 0-255.\nExample: 192.168.1.1" 10 58 fi fi else - whiptail --msgbox "Invalid IPv4 CIDR format.\nExample: 192.168.1.100/24" 8 58 + whiptail --msgbox "Invalid IPv4 CIDR format.\n\nEach octet must be 0-255.\nCIDR must be 1-32.\nExample: 192.168.1.100/24" 12 58 + fi + fi + elif [[ "$result" == "range" ]]; then + # IP Range Scan + local ip_range + if ip_range=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "IP RANGE SCAN" \ + --ok-button "Scan" --cancel-button "Back" \ + --inputbox "\nEnter IP range to scan for free address\n(e.g. 192.168.1.100/24-192.168.1.200/24)" 12 65 "" \ + 3>&1 1>&2 2>&3); then + if is_ip_range "$ip_range"; then + # Exit whiptail screen temporarily to show scan progress + clear + header_info + echo -e "${INFO}${BOLD}${DGN}Scanning IP range for free address...${CL}\n" + if resolve_ip_from_range "$ip_range"; then + # Get gateway + local gateway_ip + if gateway_ip=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "GATEWAY IP" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "\nFound free IP: $NET_RESOLVED\n\nEnter Gateway IP address" 12 58 "" \ + 3>&1 1>&2 2>&3); then + if validate_gateway_ip "$gateway_ip"; then + # Validate gateway is in same subnet + if validate_gateway_in_subnet "$NET_RESOLVED" "$gateway_ip"; then + _net="$NET_RESOLVED" + _gate=",gw=$gateway_ip" + ((STEP++)) + else + whiptail --msgbox "Gateway is not in the same subnet as the IP.\n\nIP: $NET_RESOLVED\nGateway: $gateway_ip" 10 58 + fi + else + whiptail --msgbox "Invalid Gateway IP format.\n\nEach octet must be 0-255.\nExample: 192.168.1.1" 10 58 + fi + fi + else + whiptail --msgbox "No free IP found in the specified range.\nAll IPs responded to ping." 10 58 + fi + else + whiptail --msgbox "Invalid IP range format.\n\nExample: 192.168.1.100/24-192.168.1.200/24" 10 58 fi fi else @@ -2043,16 +2155,33 @@ advanced_settings() { --title "STATIC IPv6 ADDRESS" \ --inputbox "\nEnter IPv6 CIDR address\n(e.g. 2001:db8::1/64)" 12 58 "" \ 3>&1 1>&2 2>&3); then - if [[ "$ipv6_addr" =~ ^([0-9a-fA-F:]+:+)+[0-9a-fA-F]+(/[0-9]{1,3})$ ]]; then + if validate_ipv6_address "$ipv6_addr"; then _ipv6_addr="$ipv6_addr" - # Optional gateway - _ipv6_gate=$(whiptail --backtitle "Proxmox VE Helper Scripts" \ - --title "IPv6 GATEWAY" \ - --inputbox "\nEnter IPv6 gateway (optional, leave blank for none)" 10 58 "" \ - 3>&1 1>&2 2>&3) || true - ((STEP++)) + # Optional gateway - loop until valid or empty + local ipv6_gw_valid=false + while [[ "$ipv6_gw_valid" == "false" ]]; do + local ipv6_gw + ipv6_gw=$(whiptail --backtitle "Proxmox VE Helper Scripts" \ + --title "IPv6 GATEWAY" \ + --inputbox "\nEnter IPv6 gateway (optional, leave blank for none)" 10 58 "" \ + 3>&1 1>&2 2>&3) || true + # Validate gateway if provided + if [[ -n "$ipv6_gw" ]]; then + if validate_ipv6_address "$ipv6_gw"; then + _ipv6_gate="$ipv6_gw" + ipv6_gw_valid=true + ((STEP++)) + else + whiptail --msgbox "Invalid IPv6 gateway format.\n\nExample: 2001:db8::1" 8 58 + fi + else + _ipv6_gate="" + ipv6_gw_valid=true + ((STEP++)) + fi + done else - whiptail --msgbox "Invalid IPv6 CIDR format." 8 58 + whiptail --msgbox "Invalid IPv6 CIDR format.\n\nExample: 2001:db8::1/64\nCIDR must be 1-128." 10 58 fi fi ;; @@ -2061,11 +2190,7 @@ advanced_settings() { _ipv6_gate="" ((STEP++)) ;; - disable) - _ipv6_addr="" - _ipv6_gate="" - ((STEP++)) - ;; + none) _ipv6_addr="none" _ipv6_gate="" @@ -2089,10 +2214,14 @@ advanced_settings() { if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ --title "MTU SIZE" \ --ok-button "Next" --cancel-button "Back" \ - --inputbox "\nSet Interface MTU Size\n(leave blank for default 1500)" 12 58 "" \ + --inputbox "\nSet Interface MTU Size\n(leave blank for default 1500, common values: 1500, 9000)" 12 62 "" \ 3>&1 1>&2 2>&3); then - _mtu="$result" - ((STEP++)) + if validate_mtu "$result"; then + _mtu="$result" + ((STEP++)) + else + whiptail --msgbox "Invalid MTU size.\n\nMTU must be between 576 and 65535.\nCommon values: 1500 (default), 9000 (jumbo frames)" 10 58 + fi else ((STEP--)) fi @@ -2137,10 +2266,14 @@ advanced_settings() { if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ --title "MAC ADDRESS" \ --ok-button "Next" --cancel-button "Back" \ - --inputbox "\nSet MAC Address\n(leave blank for auto-generated)" 12 58 "" \ + --inputbox "\nSet MAC Address\n(leave blank for auto-generated, format: XX:XX:XX:XX:XX:XX)" 12 62 "" \ 3>&1 1>&2 2>&3); then - _mac="$result" - ((STEP++)) + if validate_mac_address "$result"; then + _mac="$result" + ((STEP++)) + else + whiptail --msgbox "Invalid MAC address format.\n\nRequired format: XX:XX:XX:XX:XX:XX\nExample: 02:00:00:00:00:01" 10 58 + fi else ((STEP--)) fi @@ -2153,10 +2286,14 @@ advanced_settings() { if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ --title "VLAN TAG" \ --ok-button "Next" --cancel-button "Back" \ - --inputbox "\nSet VLAN Tag\n(leave blank for no VLAN)" 12 58 "" \ + --inputbox "\nSet VLAN Tag (1-4094)\n(leave blank for no VLAN)" 12 58 "" \ 3>&1 1>&2 2>&3); then - _vlan="$result" - ((STEP++)) + if validate_vlan_tag "$result"; then + _vlan="$result" + ((STEP++)) + else + whiptail --msgbox "Invalid VLAN tag.\n\nVLAN must be a number between 1 and 4094." 8 58 + fi else ((STEP--)) fi @@ -2169,11 +2306,16 @@ advanced_settings() { if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ --title "CONTAINER TAGS" \ --ok-button "Next" --cancel-button "Back" \ - --inputbox "\nSet Custom Tags (semicolon-separated)\n(remove all for no tags)" 12 58 "$_tags" \ + --inputbox "\nSet Custom Tags (semicolon-separated)\n(alphanumeric, hyphens, underscores only)" 12 58 "$_tags" \ 3>&1 1>&2 2>&3); then - _tags="${result:-;}" - _tags=$(echo "$_tags" | tr -d '[:space:]') - ((STEP++)) + local tags_test="${result:-}" + tags_test=$(echo "$tags_test" | tr -d '[:space:]') + if validate_tags "$tags_test"; then + _tags="$tags_test" + ((STEP++)) + else + whiptail --msgbox "Invalid tag format.\n\nTags can only contain:\n- Letters (a-z, A-Z)\n- Numbers (0-9)\n- Hyphens (-)\n- Underscores (_)\n- Semicolons (;) as separator" 14 58 + fi else ((STEP--)) fi @@ -2352,8 +2494,14 @@ advanced_settings() { --ok-button "Next" --cancel-button "Back" \ --inputbox "\nSet container timezone.\n\nExamples: Europe/Berlin, America/New_York, Asia/Tokyo\n\nHost timezone: ${_host_timezone:-unknown}\n\nLeave empty to inherit from host." 16 62 "$_ct_timezone" \ 3>&1 1>&2 2>&3); then - _ct_timezone="$result" - ((STEP++)) + local tz_test="$result" + [[ "${tz_test:-}" == Etc/* ]] && tz_test="host" # pct doesn't accept Etc/* zones + if validate_timezone "$tz_test"; then + _ct_timezone="$tz_test" + ((STEP++)) + else + whiptail --msgbox "Invalid timezone: '$result'\n\nTimezone must exist in /usr/share/zoneinfo/\n\nExamples:\n- Europe/Berlin\n- America/New_York\n- Asia/Tokyo\n- UTC" 14 58 + fi else ((STEP--)) fi @@ -2553,10 +2701,9 @@ Advanced: export UDHCPC_FIX export SSH_KEYS_FILE - # Exit alternate screen buffer BEFORE displaying summary - # so the summary is visible in the main terminal + # Exit alternate screen buffer before showing summary (so output remains visible) tput rmcup 2>/dev/null || true - trap - RETURN # Remove the trap since we already called rmcup + trap - RETURN # Display final summary echo -e "\n${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}" @@ -2571,14 +2718,14 @@ Advanced: echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}" echo -e "${NETWORK}${BOLD}${DGN}IPv4: ${BGN}$NET${CL}" echo -e "${NETWORK}${BOLD}${DGN}IPv6: ${BGN}$IPV6_METHOD${CL}" - echo -e "${FUSE}${BOLD}${DGN}FUSE Support: ${BGN}$ENABLE_FUSE${CL}" - [[ "$ENABLE_TUN" == "yes" ]] && echo -e "${NETWORK}${BOLD}${DGN}TUN/TAP Support: ${BGN}$ENABLE_TUN${CL}" - echo -e "${CONTAINERTYPE}${BOLD}${DGN}Nesting: ${BGN}$([ "$ENABLE_NESTING" == "1" ] && echo "Enabled" || echo "Disabled")${CL}" - [[ "$ENABLE_KEYCTL" == "1" ]] && echo -e "${CONTAINERTYPE}${BOLD}${DGN}Keyctl: ${BGN}Enabled${CL}" - echo -e "${GPU}${BOLD}${DGN}GPU Passthrough: ${BGN}$ENABLE_GPU${CL}" - [[ "$PROTECT_CT" == "yes" || "$PROTECT_CT" == "1" ]] && echo -e "${CONTAINERTYPE}${BOLD}${DGN}Protection: ${BGN}Enabled${CL}" - [[ -n "$CT_TIMEZONE" ]] && echo -e "${CONTAINERTYPE}${BOLD}${DGN}Timezone: ${BGN}$CT_TIMEZONE${CL}" - [[ "$APT_CACHER" == "yes" ]] && echo -e "${CONTAINERTYPE}${BOLD}${DGN}APT Cacher: ${BGN}$APT_CACHER_IP${CL}" + echo -e "${FUSE}${BOLD}${DGN}FUSE Support: ${BGN}${ENABLE_FUSE:-no}${CL}" + [[ "${ENABLE_TUN:-no}" == "yes" ]] && echo -e "${NETWORK}${BOLD}${DGN}TUN/TAP Support: ${BGN}$ENABLE_TUN${CL}" + echo -e "${CONTAINERTYPE}${BOLD}${DGN}Nesting: ${BGN}$([ "${ENABLE_NESTING:-1}" == "1" ] && echo "Enabled" || echo "Disabled")${CL}" + [[ "${ENABLE_KEYCTL:-0}" == "1" ]] && echo -e "${CONTAINERTYPE}${BOLD}${DGN}Keyctl: ${BGN}Enabled${CL}" + echo -e "${GPU}${BOLD}${DGN}GPU Passthrough: ${BGN}${ENABLE_GPU:-no}${CL}" + [[ "${PROTECT_CT:-no}" == "yes" || "${PROTECT_CT:-no}" == "1" ]] && echo -e "${CONTAINERTYPE}${BOLD}${DGN}Protection: ${BGN}Enabled${CL}" + [[ -n "${CT_TIMEZONE:-}" ]] && echo -e "${INFO}${BOLD}${DGN}Timezone: ${BGN}$CT_TIMEZONE${CL}" + [[ "$APT_CACHER" == "yes" ]] && echo -e "${INFO}${BOLD}${DGN}APT Cacher: ${BGN}$APT_CACHER_IP${CL}" echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}$VERBOSE${CL}" echo -e "${CREATING}${BOLD}${RD}Creating a ${APP} LXC using the above advanced settings${CL}" } @@ -2603,12 +2750,12 @@ diagnostics_check() { fi if ! [ -f "/usr/local/community-scripts/diagnostics" ]; then - if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "DIAGNOSTICS" --yesno "Send Diagnostics of LXC Installation?\n\n(This only transmits data without user data, just RAM, CPU, LXC name, ...)" 10 58); then + if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "DIAGNOSTICS" --yesno "Send Diagnostics of LXC Installation?\n\n(This only transmits data without user data, just RAM, CPU, LXC name, ...)" 10 58); then cat </usr/local/community-scripts/diagnostics DIAGNOSTICS=yes #This file is used to store the diagnostics settings for the Community-Scripts API. -#https://github.com/community-scripts/ProxmoxVED/discussions/1836 +#https://git.community-scripts.org/community-scripts/ProxmoxVED/discussions/1836 #Your diagnostics will be sent to the Community-Scripts API for troubleshooting/statistical purposes. #You can review the data at https://community-scripts.github.io/ProxmoxVE/data #If you do not wish to send diagnostics, please set the variable 'DIAGNOSTICS' to "no" in /usr/local/community-scripts/diagnostics, or use the menue. @@ -2633,7 +2780,7 @@ EOF DIAGNOSTICS=no #This file is used to store the diagnostics settings for the Community-Scripts API. -#https://github.com/community-scripts/ProxmoxVED/discussions/1836 +#https://git.community-scripts.org/community-scripts/ProxmoxVED/discussions/1836 #Your diagnostics will be sent to the Community-Scripts API for troubleshooting/statistical purposes. #You can review the data at https://community-scripts.github.io/ProxmoxVE/data #If you do not wish to send diagnostics, please set the variable 'DIAGNOSTICS' to "no" in /usr/local/community-scripts/diagnostics, or use the menue. @@ -2662,7 +2809,7 @@ EOF diagnostics_menu() { if [ "${DIAGNOSTICS:-no}" = "yes" ]; then - if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \ + if whiptail --backtitle "Proxmox VE Helper Scripts" \ --title "DIAGNOSTIC SETTINGS" \ --yesno "Send Diagnostics?\n\nCurrent: ${DIAGNOSTICS}" 10 58 \ --yes-button "No" --no-button "Back"; then @@ -2671,7 +2818,7 @@ diagnostics_menu() { whiptail --msgbox "Diagnostics set to ${DIAGNOSTICS}." 8 58 fi else - if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \ + if whiptail --backtitle "Proxmox VE Helper Scripts" \ --title "DIAGNOSTIC SETTINGS" \ --yesno "Send Diagnostics?\n\nCurrent: ${DIAGNOSTICS}" 10 58 \ --yes-button "Yes" --no-button "Back"; then @@ -2701,8 +2848,8 @@ echo_default() { echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}" echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}${CORE_COUNT}${CL}" echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}" - if [ "${var_gpu:-no}" == "yes" ]; then - echo -e "🎮${BOLD}${DGN} GPU Passthrough: ${BGN}Enabled${CL}" + if [[ -n "${var_gpu:-}" && "${var_gpu}" == "yes" ]]; then + echo -e "${GPU}${BOLD}${DGN}GPU Passthrough: ${BGN}Enabled${CL}" fi if [ "$VERBOSE" == "yes" ]; then echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}Enabled${CL}" @@ -2743,8 +2890,7 @@ install_script() { else timezone="UTC" fi - # pct doesn't accept Etc/* zones - map to 'host' instead - [[ "${timezone:-}" == Etc/* ]] && timezone="host" + [[ "${timezone:-}" == Etc/* ]] && timezone="host" # pct doesn't accept Etc/* zones # Show APP Header header_info @@ -2808,7 +2954,6 @@ install_script() { 2 | advanced | ADVANCED) header_info echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Install on node $PVEHOST_NAME${CL}" - echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}" METHOD="advanced" base_settings advanced_settings @@ -2829,8 +2974,8 @@ install_script() { header_info echo -e "${DEFAULT}${BOLD}${BL}Using App Defaults for ${APP} on node $PVEHOST_NAME${CL}" METHOD="appdefaults" + load_vars_file "$(get_app_defaults_path)" "yes" # Force override script defaults base_settings - load_vars_file "$(get_app_defaults_path)" echo_default defaults_target="$(get_app_defaults_path)" break @@ -2971,9 +3116,9 @@ ssh_extract_keys_from_file() { tr -d '\r' <"$f" | awk ' /^[[:space:]]*#/ {next} /^[[:space:]]*$/ {next} - # nackt: typ base64 [comment] + # bare format: type base64 [comment] /^(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))[[:space:]]+/ {print; next} - # mit Optionen: finde ab erstem Key-Typ + # with options: find from first key-type onward { match($0, /(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))[[:space:]]+/) if (RSTART>0) { print substr($0, RSTART) } @@ -3047,8 +3192,8 @@ ssh_discover_default_files() { configure_ssh_settings() { local step_info="${1:-}" - local backtitle="[dev] Proxmox VE Helper Scripts" - [[ -n "$step_info" ]] && backtitle="[dev] Proxmox VE Helper Scripts [${step_info}]" + local backtitle="Proxmox VE Helper Scripts" + [[ -n "$step_info" ]] && backtitle="Proxmox VE Helper Scripts [${step_info}]" SSH_KEYS_FILE="$(mktemp)" : >"$SSH_KEYS_FILE" @@ -3157,7 +3302,7 @@ start() { update_script cleanup_lxc else - CHOICE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "${APP} LXC Update/Setting" --menu \ + CHOICE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "${APP} LXC Update/Setting" --menu \ "Support/Update functions for ${APP} LXC. Choose an option:" \ 12 60 3 \ "1" "YES (Silent Mode)" \ @@ -3215,7 +3360,7 @@ build_container() { esac fi - # IP (immer zwingend, Standard dhcp) + # IP (always required, default dhcp) NET_STRING+=",ip=${NET:-dhcp}" # Gateway @@ -3272,14 +3417,14 @@ build_container() { FEATURES="${FEATURES}fuse=1" fi - # NEW IMPLEMENTATION (Fixed): Build PCT_OPTIONS properly - # Key insight: Bash cannot export arrays, so we build the options as a string - + # Build PCT_OPTIONS as string for export TEMP_DIR=$(mktemp -d) pushd "$TEMP_DIR" >/dev/null - - # Unified install.func automatically detects OS type (debian, alpine, fedora, etc.) - export FUNCTIONS_FILE_PATH="$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/install.func)" + if [ "$var_os" == "alpine" ]; then + export FUNCTIONS_FILE_PATH="$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/alpine-install.func)" + else + export FUNCTIONS_FILE_PATH="$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/install.func)" + fi # Core exports for install.func export DIAGNOSTICS="$DIAGNOSTICS" @@ -3298,11 +3443,11 @@ build_container() { export CTTYPE="$CT_TYPE" export ENABLE_FUSE="$ENABLE_FUSE" export ENABLE_TUN="$ENABLE_TUN" - export ENABLE_GPU="$ENABLE_GPU" - export IPV6_METHOD="$IPV6_METHOD" export PCT_OSTYPE="$var_os" export PCT_OSVERSION="$var_version" export PCT_DISK_SIZE="$DISK_SIZE" + export IPV6_METHOD="$IPV6_METHOD" + export ENABLE_GPU="$ENABLE_GPU" # DEV_MODE exports (optional, for debugging) export BUILD_LOG="$BUILD_LOG" @@ -3316,38 +3461,21 @@ build_container() { export DEV_MODE_LOGS="${DEV_MODE_LOGS:-false}" export DEV_MODE_DRYRUN="${DEV_MODE_DRYRUN:-false}" - # Validate storage space before container creation - if [[ -n "$CONTAINER_STORAGE" ]]; then - msg_info "Validating storage space" - if ! validate_storage_space "$CONTAINER_STORAGE" "$DISK_SIZE" "no"; then - local free_space - free_space=$(pvesm status 2>/dev/null | awk -v s="$CONTAINER_STORAGE" '$1 == s { print $6 }') - local free_fmt - free_fmt=$(numfmt --to=iec --from-unit=1024 --suffix=B --format %.1f "$free_space" 2>/dev/null || echo "${free_space}KB") - msg_error "Not enough space on '$CONTAINER_STORAGE'. Required: ${DISK_SIZE}GB, Available: ${free_fmt}" - exit 214 - fi - msg_ok "Storage space validated" - fi - # Build PCT_OPTIONS as multi-line string - PCT_OPTIONS_STRING="" + PCT_OPTIONS_STRING=" -hostname $HN" - # Add features if set - if [ -n "$FEATURES" ]; then - PCT_OPTIONS_STRING=" -features $FEATURES" - fi - - # Add hostname - PCT_OPTIONS_STRING="$PCT_OPTIONS_STRING - -hostname $HN" - - # Add tags if set + # Only add -tags if TAGS is not empty if [ -n "$TAGS" ]; then PCT_OPTIONS_STRING="$PCT_OPTIONS_STRING -tags $TAGS" fi + # Only add -features if FEATURES is not empty + if [ -n "$FEATURES" ]; then + PCT_OPTIONS_STRING=" -features $FEATURES +$PCT_OPTIONS_STRING" + fi + # Add storage if specified if [ -n "$SD" ]; then PCT_OPTIONS_STRING="$PCT_OPTIONS_STRING @@ -3374,10 +3502,12 @@ build_container() { -protection 1" fi - # Timezone flag (if var_timezone was set) + # Timezone (map Etc/* to "host" as pct doesn't accept them) if [ -n "${CT_TIMEZONE:-}" ]; then + local _pct_timezone="$CT_TIMEZONE" + [[ "$_pct_timezone" == Etc/* ]] && _pct_timezone="host" PCT_OPTIONS_STRING="$PCT_OPTIONS_STRING - -timezone $CT_TIMEZONE" + -timezone $_pct_timezone" fi # Password (already formatted) @@ -3391,10 +3521,20 @@ build_container() { export TEMPLATE_STORAGE="${var_template_storage:-}" export CONTAINER_STORAGE="${var_container_storage:-}" - # # DEBUG: Show final PCT_OPTIONS being exported - # echo "[DEBUG] PCT_OPTIONS to be exported:" - # echo "$PCT_OPTIONS" | sed 's/^/ /' - # echo "[DEBUG] Calling create_lxc_container..." + # Validate storage space only if CONTAINER_STORAGE is already set + # (Storage selection happens in create_lxc_container for some modes) + if [[ -n "$CONTAINER_STORAGE" ]]; then + msg_info "Validating storage space" + if ! validate_storage_space "$CONTAINER_STORAGE" "$DISK_SIZE" "no"; then + local free_space + free_space=$(pvesm status 2>/dev/null | awk -v s="$CONTAINER_STORAGE" '$1 == s { print $6 }') + local free_fmt + free_fmt=$(numfmt --to=iec --from-unit=1024 --suffix=B --format %.1f "$free_space" 2>/dev/null || echo "${free_space}KB") + msg_error "Not enough space on '$CONTAINER_STORAGE'. Required: ${DISK_SIZE}GB, Available: ${free_fmt}" + exit 214 + fi + msg_ok "Storage space validated" + fi create_lxc_container || exit $? @@ -3453,12 +3593,16 @@ build_container() { msg_custom "🎮" "${GN}" "Detected NVIDIA GPU" # Simple passthrough - just bind /dev/nvidia* devices if they exist - # Skip directories like /dev/nvidia-caps (they need special handling) + # Only include character devices (-c), skip directories like /dev/nvidia-caps for d in /dev/nvidia*; do - [[ -e "$d" ]] || continue - [[ -d "$d" ]] && continue # Skip directories - NVIDIA_DEVICES+=("$d") + [[ -c "$d" ]] && NVIDIA_DEVICES+=("$d") done + # Also check for devices inside /dev/nvidia-caps/ directory + if [[ -d /dev/nvidia-caps ]]; then + for d in /dev/nvidia-caps/*; do + [[ -c "$d" ]] && NVIDIA_DEVICES+=("$d") + done + fi if [[ ${#NVIDIA_DEVICES[@]} -gt 0 ]]; then msg_custom "🎮" "${GN}" "Found ${#NVIDIA_DEVICES[@]} NVIDIA device(s) for passthrough" @@ -3691,7 +3835,6 @@ EOF msg_ok "Network in LXC is reachable (ping)" fi fi - # Function to get correct GID inside container get_container_gid() { local group="$1" @@ -3713,176 +3856,41 @@ EOF # install_gpu_userland "NVIDIA" # fi - # Continue with standard container setup - install core dependencies based on OS - sleep 3 - - case "$var_os" in - alpine) + # Continue with standard container setup + if [ "$var_os" == "alpine" ]; then + sleep 3 pct exec "$CTID" -- /bin/sh -c 'cat </etc/apk/repositories http://dl-cdn.alpinelinux.org/alpine/latest-stable/main http://dl-cdn.alpinelinux.org/alpine/latest-stable/community EOF' pct exec "$CTID" -- ash -c "apk add bash newt curl openssh nano mc ncurses jq >/dev/null" - ;; + else + sleep 3 + LANG=${LANG:-en_US.UTF-8} + pct exec "$CTID" -- bash -c "sed -i \"/$LANG/ s/^# //\" /etc/locale.gen" + pct exec "$CTID" -- bash -c "locale_line=\$(grep -v '^#' /etc/locale.gen | grep -E '^[a-zA-Z]' | awk '{print \$1}' | head -n 1) && \ + echo LANG=\$locale_line >/etc/default/locale && \ + locale-gen >/dev/null && \ + export LANG=\$locale_line" - debian | ubuntu | devuan) - # First install locales package (required for locale-gen on minimal templates) - pct exec "$CTID" -- bash -c "apt-get update >/dev/null && apt-get install -y locales >/dev/null 2>&1 || true" - - # Locale setup for Debian-based - pct exec "$CTID" -- bash -c "sed -i '/$LANG/ s/^# //' /etc/locale.gen 2>/dev/null || true" - pct exec "$CTID" -- bash -c "locale_line=\$(grep -v '^#' /etc/locale.gen 2>/dev/null | grep -E '^[a-zA-Z]' | awk '{print \$1}' | head -n 1) && \ - [[ -n \"\$locale_line\" ]] && echo LANG=\$locale_line >/etc/default/locale && \ - locale-gen >/dev/null 2>&1 && \ - export LANG=\$locale_line || true" - - # Timezone setup if [[ -z "${tz:-}" ]]; then - tz=$(timedatectl show --property=Timezone --value 2>/dev/null || echo "Etc/UTC") + tz=$(timedatectl show --property=Timezone --value 2>/dev/null || echo "UTC") fi + [[ "${tz:-}" == Etc/* ]] && tz="UTC" # Normalize Etc/* to UTC for container setup + if pct exec "$CTID" -- test -e "/usr/share/zoneinfo/$tz"; then + # Set timezone using symlink (Debian 13+ compatible) + # Create /etc/timezone for backwards compatibility with older scripts pct exec "$CTID" -- bash -c "tz='$tz'; ln -sf \"/usr/share/zoneinfo/\$tz\" /etc/localtime && echo \"\$tz\" >/etc/timezone || true" else msg_warn "Skipping timezone setup – zone '$tz' not found in container" fi - # Core dependencies pct exec "$CTID" -- bash -c "apt-get update >/dev/null && apt-get install -y sudo curl mc gnupg2 jq >/dev/null" || { msg_error "apt-get base packages installation failed" exit 1 } - ;; - - fedora | rockylinux | almalinux | centos) - # RHEL-based: Fedora, Rocky, AlmaLinux, CentOS - # Detect OS major version for EL10+ compatibility (DNF 5, different packages) - local rhel_version - rhel_version=$(pct exec "$CTID" -- bash -c "grep -oP '(?<=VERSION_ID=\")[0-9]+' /etc/os-release 2>/dev/null || echo 9") - - # First run makecache to ensure repos are ready (critical for fresh templates) - msg_info "Initializing package manager (this may take a moment)..." - if ! pct exec "$CTID" -- bash -c "dnf makecache --refresh 2>&1 || yum makecache 2>&1" >/dev/null 2>&1; then - msg_warn "Package cache update had issues, continuing anyway..." - fi - - # Build package list - EL10+ may not have glibc-langpack-en in same form - local rhel_packages="curl sudo mc jq which tar procps-ng ncurses" - if [[ "$rhel_version" -lt 10 ]]; then - rhel_packages="$rhel_packages glibc-langpack-en" - else - # EL10 uses glibc-all-langpacks or langpacks-en - rhel_packages="$rhel_packages langpacks-en glibc-all-langpacks" - fi - - # Install base packages with better error handling - local install_log="/tmp/dnf_install_${CTID}.log" - if ! pct exec "$CTID" -- bash -c "dnf install -y $rhel_packages 2>&1 | tee $install_log; exit \${PIPESTATUS[0]}" >/dev/null 2>&1; then - # Check if it's just missing optional packages - if pct exec "$CTID" -- bash -c "rpm -q curl sudo mc jq which tar procps-ng" >/dev/null 2>&1; then - msg_warn "Some optional packages may have failed, but core packages installed" - else - # Real failure - try minimal install - msg_warn "Full package install failed, trying minimal set..." - if ! pct exec "$CTID" -- bash -c "dnf install -y curl sudo jq which tar 2>&1" >/dev/null 2>&1; then - msg_error "dnf/yum base packages installation failed" - pct exec "$CTID" -- bash -c "cat $install_log 2>/dev/null" || true - exit 1 - fi - fi - fi - - # Set locale for RHEL-based systems - pct exec "$CTID" -- bash -c "localectl set-locale LANG=en_US.UTF-8 2>/dev/null || echo 'LANG=en_US.UTF-8' > /etc/locale.conf" || true - - # Timezone setup for RHEL - if [[ -z "${tz:-}" ]]; then - tz=$(timedatectl show --property=Timezone --value 2>/dev/null || echo "Etc/UTC") - fi - [[ "${tz:-}" == Etc/* ]] && tz="UTC" - if pct exec "$CTID" -- test -e "/usr/share/zoneinfo/$tz"; then - pct exec "$CTID" -- bash -c "timedatectl set-timezone '$tz' 2>/dev/null || ln -sf '/usr/share/zoneinfo/$tz' /etc/localtime" || true - fi - ;; - - opensuse) - # openSUSE - special handling for terminal/locale issues - # Use --gpg-auto-import-keys to avoid interactive prompts that cause hangs - msg_info "Initializing package manager for openSUSE..." - pct exec "$CTID" -- bash -c "zypper --gpg-auto-import-keys --non-interactive refresh 2>&1" >/dev/null 2>&1 || true - - # Install packages - ncurses and terminfo are CRITICAL for terminal to work - if ! pct exec "$CTID" -- bash -c "zypper --gpg-auto-import-keys --non-interactive install -y curl sudo mc jq glibc-locale ncurses terminfo-base 2>&1" >/dev/null 2>&1; then - # Try without glibc-locale - if ! pct exec "$CTID" -- bash -c "zypper --gpg-auto-import-keys --non-interactive install -y curl sudo mc jq ncurses terminfo-base 2>&1" >/dev/null 2>&1; then - msg_error "zypper base packages installation failed" - exit 1 - fi - fi - - # Fix 'unknown terminal type' error - set TERM in multiple places - pct exec "$CTID" -- bash -c "localectl set-locale LANG=en_US.UTF-8 2>/dev/null || echo 'LANG=en_US.UTF-8' > /etc/locale.conf" || true - - # Set TERM globally for all users - pct exec "$CTID" -- bash -c "cat > /etc/profile.d/term.sh << 'EOFTERM' -# Fix terminal type for LXC containers -if [ -z \"\$TERM\" ] || [ \"\$TERM\" = \"dumb\" ] || [ \"\$TERM\" = \"-\" ]; then - export TERM=xterm-256color -fi -EOFTERM -chmod +x /etc/profile.d/term.sh" || true - - # Also set in /etc/environment for non-login shells - pct exec "$CTID" -- bash -c "grep -q '^TERM=' /etc/environment 2>/dev/null || echo 'TERM=xterm-256color' >> /etc/environment" || true - ;; - - gentoo) - # Gentoo - OpenRC based, emerge is slow - # Use emerge-webrsync (faster, uses http instead of rsync) - msg_info "Syncing Gentoo portage via webrsync (faster than rsync)..." - pct exec "$CTID" -- bash -c "emerge-webrsync 2>&1" >/dev/null 2>&1 || { - msg_warn "emerge-webrsync failed, trying emerge --sync..." - pct exec "$CTID" -- bash -c "emerge --sync 2>&1" >/dev/null 2>&1 || true - } - - # Install curl FIRST - it's required for install.func to work - msg_info "Installing essential packages for Gentoo..." - if ! pct exec "$CTID" -- bash -c "emerge --quiet --noreplace net-misc/curl 2>&1" >/dev/null 2>&1; then - msg_error "Failed to install curl on Gentoo - this is required" - exit 1 - fi - - # Install remaining packages - pct exec "$CTID" -- bash -c "emerge --quiet --noreplace app-misc/jq app-misc/mc sys-libs/ncurses 2>&1" >/dev/null 2>&1 || { - msg_warn "Some Gentoo packages may need manual setup" - } - - # Set TERM for Gentoo - pct exec "$CTID" -- bash -c "echo 'export TERM=xterm-256color' >> /etc/profile.d/term.sh && chmod +x /etc/profile.d/term.sh" || true - ;; - - openeuler) - # openEuler (RHEL-compatible, uses DNF) - # Note: Template was patched with /etc/redhat-release in create_container - msg_info "Initializing package manager for openEuler..." - pct exec "$CTID" -- bash -c "dnf makecache --refresh 2>&1" >/dev/null 2>&1 || true - - # openEuler package names may differ from RHEL - local euler_packages="curl sudo mc jq procps-ng ncurses" - if ! pct exec "$CTID" -- bash -c "dnf install -y $euler_packages 2>&1" >/dev/null 2>&1; then - # Try without procps-ng (might be just 'procps' in openEuler) - if ! pct exec "$CTID" -- bash -c "dnf install -y curl sudo mc jq ncurses 2>&1" >/dev/null 2>&1; then - msg_error "dnf base packages installation failed" - exit 1 - fi - fi - # Set locale - pct exec "$CTID" -- bash -c "echo 'LANG=en_US.UTF-8' > /etc/locale.conf" || true - ;; - - *) - msg_warn "Unknown OS '$var_os' - skipping core dependency installation" - ;; - esac + fi msg_ok "Customized LXC Container" @@ -3890,12 +3898,7 @@ chmod +x /etc/profile.d/term.sh" || true install_ssh_keys_into_ct # Run application installer - # NOTE: We disable error handling here because: - # 1. Container errors are caught by error_handler INSIDE container - # 2. Container creates flag file with exit code - # 3. We read flag file and handle cleanup manually below - # 4. We DON'T want host error_handler to fire for lxc-attach command itself - + # Disable error trap - container errors are handled internally via flag file set +Eeuo pipefail # Disable ALL error handling temporarily trap - ERR # Remove ERR trap completely @@ -3965,35 +3968,129 @@ chmod +x /etc/profile.d/term.sh" || true exit $install_exit_code fi - # Report failure to API before container cleanup - post_update_to_api "failed" "$install_exit_code" - # Prompt user for cleanup with 60s timeout (plain echo - no msg_info to avoid spinner) echo "" - echo -en "${YW}Remove broken container ${CTID}? (Y/n) [auto-remove in 60s]: ${CL}" + + # Detect error type for smart recovery options + local is_oom=false + local error_explanation="" + if declare -f explain_exit_code >/dev/null 2>&1; then + error_explanation="$(explain_exit_code "$install_exit_code")" + fi + + # OOM detection: exit codes 134 (SIGABRT/heap), 137 (SIGKILL/OOM), 243 (Node.js heap) + if [[ $install_exit_code -eq 134 || $install_exit_code -eq 137 || $install_exit_code -eq 243 ]]; then + is_oom=true + fi + + # Show error explanation if available + if [[ -n "$error_explanation" ]]; then + echo -e "${TAB}${RD}Error: ${error_explanation}${CL}" + echo "" + fi + + # Build recovery menu based on error type + echo -e "${YW}What would you like to do?${CL}" + echo "" + echo -e " ${GN}1)${CL} Remove container and exit" + echo -e " ${GN}2)${CL} Keep container for debugging" + echo -e " ${GN}3)${CL} Retry with verbose mode" + if [[ "$is_oom" == true ]]; then + local new_ram=$((RAM_SIZE * 3 / 2)) + local new_cpu=$((CORE_COUNT + 1)) + echo -e " ${GN}4)${CL} Retry with more resources (RAM: ${RAM_SIZE}→${new_ram} MiB, CPU: ${CORE_COUNT}→${new_cpu} cores)" + fi + echo "" + echo -en "${YW}Select option [1-$([[ "$is_oom" == true ]] && echo "4" || echo "3")] (default: 1, auto-remove in 60s): ${CL}" if read -t 60 -r response; then - if [[ -z "$response" || "$response" =~ ^[Yy]$ ]]; then - # Remove container - echo -e "\n${TAB}${HOLD}${YW}Removing container ${CTID}${CL}" - pct stop "$CTID" &>/dev/null || true - pct destroy "$CTID" &>/dev/null || true - echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}" - elif [[ "$response" =~ ^[Nn]$ ]]; then - echo -e "\n${TAB}${YW}Container ${CTID} kept for debugging${CL}" - - # Dev mode: Setup MOTD/SSH for debugging access to broken container - if [[ "${DEV_MODE_MOTD:-false}" == "true" ]]; then - echo -e "${TAB}${HOLD}${DGN}Setting up MOTD and SSH for debugging...${CL}" - if pct exec "$CTID" -- bash -c " - source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/install.func) - declare -f motd_ssh >/dev/null 2>&1 && motd_ssh || true - " >/dev/null 2>&1; then - local ct_ip=$(pct exec "$CTID" ip a s dev eth0 2>/dev/null | awk '/inet / {print $2}' | cut -d/ -f1) - echo -e "${BFR}${CM}${GN}MOTD/SSH ready - SSH into container: ssh root@${ct_ip}${CL}" + case "${response:-1}" in + 1) + # Remove container + echo -e "\n${TAB}${HOLD}${YW}Removing container ${CTID}${CL}" + pct stop "$CTID" &>/dev/null || true + pct destroy "$CTID" &>/dev/null || true + echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}" + ;; + 2) + echo -e "\n${TAB}${YW}Container ${CTID} kept for debugging${CL}" + # Dev mode: Setup MOTD/SSH for debugging access to broken container + if [[ "${DEV_MODE_MOTD:-false}" == "true" ]]; then + echo -e "${TAB}${HOLD}${DGN}Setting up MOTD and SSH for debugging...${CL}" + if pct exec "$CTID" -- bash -c " + source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/install.func) + declare -f motd_ssh >/dev/null 2>&1 && motd_ssh || true + " >/dev/null 2>&1; then + local ct_ip=$(pct exec "$CTID" ip a s dev eth0 2>/dev/null | awk '/inet / {print $2}' | cut -d/ -f1) + echo -e "${BFR}${CM}${GN}MOTD/SSH ready - SSH into container: ssh root@${ct_ip}${CL}" + fi fi - fi - fi + exit $install_exit_code + ;; + 3) + # Retry with verbose mode + echo -e "\n${TAB}${HOLD}${YW}Removing container ${CTID} for rebuild...${CL}" + pct stop "$CTID" &>/dev/null || true + pct destroy "$CTID" &>/dev/null || true + echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}" + echo "" + # Get new container ID + local old_ctid="$CTID" + export CTID=$(get_valid_container_id "$CTID") + export VERBOSE="yes" + export var_verbose="yes" + + # Show rebuild summary + echo -e "${YW}Rebuilding with preserved settings:${CL}" + echo -e " Container ID: ${old_ctid} → ${CTID}" + echo -e " RAM: ${RAM_SIZE} MiB | CPU: ${CORE_COUNT} cores | Disk: ${DISK_SIZE} GB" + echo -e " Network: ${NET:-dhcp} | Bridge: ${BRG:-vmbr0}" + echo -e " Verbose: ${GN}enabled${CL}" + echo "" + msg_info "Restarting installation..." + # Re-run build_container + build_container + return $? + ;; + 4) + if [[ "$is_oom" == true ]]; then + # Retry with more resources + echo -e "\n${TAB}${HOLD}${YW}Removing container ${CTID} for rebuild with more resources...${CL}" + pct stop "$CTID" &>/dev/null || true + pct destroy "$CTID" &>/dev/null || true + echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}" + echo "" + # Get new container ID and increase resources + local old_ctid="$CTID" + local old_ram="$RAM_SIZE" + local old_cpu="$CORE_COUNT" + export CTID=$(get_valid_container_id "$CTID") + export RAM_SIZE=$((RAM_SIZE * 3 / 2)) + export CORE_COUNT=$((CORE_COUNT + 1)) + export var_ram="$RAM_SIZE" + export var_cpu="$CORE_COUNT" + + # Show rebuild summary + echo -e "${YW}Rebuilding with increased resources:${CL}" + echo -e " Container ID: ${old_ctid} → ${CTID}" + echo -e " RAM: ${old_ram} → ${GN}${RAM_SIZE}${CL} MiB (+50%)" + echo -e " CPU: ${old_cpu} → ${GN}${CORE_COUNT}${CL} cores (+1)" + echo -e " Disk: ${DISK_SIZE} GB | Network: ${NET:-dhcp} | Bridge: ${BRG:-vmbr0}" + echo "" + msg_info "Restarting installation..." + # Re-run build_container + build_container + return $? + else + echo -e "\n${TAB}${YW}Invalid option. Container ${CTID} kept.${CL}" + exit $install_exit_code + fi + ;; + *) + echo -e "\n${TAB}${YW}Invalid option. Container ${CTID} kept.${CL}" + exit $install_exit_code + ;; + esac else # Timeout - auto-remove echo -e "\n${YW}No response - auto-removing container${CL}" @@ -4013,12 +4110,12 @@ destroy_lxc() { return 1 fi - # Abbruch bei Ctrl-C / Ctrl-D / ESC + # Abort on Ctrl-C / Ctrl-D / ESC trap 'echo; msg_error "Aborted by user (SIGINT/SIGQUIT)"; return 130' INT QUIT local prompt if ! read -rp "Remove this Container? " prompt; then - # read gibt != 0 zurück bei Ctrl-D/ESC + # read returns non-zero on Ctrl-D/ESC msg_error "Aborted input (Ctrl-D/ESC)" return 130 fi @@ -4044,7 +4141,6 @@ destroy_lxc() { # ------------------------------------------------------------------------------ # Storage discovery / selection helpers # ------------------------------------------------------------------------------ -# ===== Storage discovery / selection helpers (ported from create_lxc.sh) ===== resolve_storage_preselect() { local class="$1" preselect="$2" required_content="" case "$class" in @@ -4120,15 +4216,14 @@ fix_gpu_gids() { # For privileged containers: also fix permissions inside container if [[ "$CT_TYPE" == "0" ]]; then - pct exec "$CTID" -- bash -c " + pct exec "$CTID" -- sh -c " if [ -d /dev/dri ]; then for dev in /dev/dri/*; do if [ -e \"\$dev\" ]; then - if [[ \"\$dev\" =~ renderD ]]; then - chgrp ${render_gid} \"\$dev\" 2>/dev/null || true - else - chgrp ${video_gid} \"\$dev\" 2>/dev/null || true - fi + case \"\$dev\" in + *renderD*) chgrp ${render_gid} \"\$dev\" 2>/dev/null || true ;; + *) chgrp ${video_gid} \"\$dev\" 2>/dev/null || true ;; + esac chmod 660 \"\$dev\" 2>/dev/null || true fi done @@ -4203,18 +4298,13 @@ select_storage() { if [[ $((${#MENU[@]} / 3)) -eq 1 ]]; then STORAGE_RESULT="${STORAGE_MAP[${MENU[0]}]}" STORAGE_INFO="${MENU[1]}" - - # Validate storage space for auto-picked container storage - if [[ "$CLASS" == "container" && -n "${DISK_SIZE:-}" ]]; then - validate_storage_space "$STORAGE_RESULT" "$DISK_SIZE" "yes" - fi return 0 fi local WIDTH=$((COL_WIDTH + 42)) while true; do local DISPLAY_SELECTED - DISPLAY_SELECTED=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \ + DISPLAY_SELECTED=$(whiptail --backtitle "Proxmox VE Helper Scripts" \ --title "Storage Pools" \ --radiolist "Which storage pool for ${CONTENT_LABEL,,}?\n(Spacebar to select)" \ 16 "$WIDTH" 6 "${MENU[@]}" 3>&1 1>&2 2>&3) || { exit_script; } @@ -4235,7 +4325,9 @@ select_storage() { # Validate storage space for container storage if [[ "$CLASS" == "container" && -n "${DISK_SIZE:-}" ]]; then validate_storage_space "$STORAGE_RESULT" "$DISK_SIZE" "yes" + # Continue even if validation fails - user was warned fi + return 0 done } @@ -4298,6 +4390,18 @@ validate_storage_space() { return 0 } +# ============================================================================== +# SECTION 8: CONTAINER CREATION +# ============================================================================== + +# ------------------------------------------------------------------------------ +# create_lxc_container() +# +# - Main function for creating LXC containers +# - Handles all phases: validation, template discovery, container creation, +# network config, storage, etc. +# - Extensive error checking with detailed exit codes +# ------------------------------------------------------------------------------ create_lxc_container() { # ------------------------------------------------------------------------------ # Optional verbose mode (debug tracing) @@ -4469,14 +4573,6 @@ create_lxc_container() { fi msg_ok "Template storage '$TEMPLATE_STORAGE' validated" - # Free space check - STORAGE_FREE=$(pvesm status | awk -v s="$CONTAINER_STORAGE" '$1 == s { print $6 }') - REQUIRED_KB=$((${PCT_DISK_SIZE:-8} * 1024 * 1024)) - [[ "$STORAGE_FREE" -ge "$REQUIRED_KB" ]] || { - msg_error "Not enough space on '$CONTAINER_STORAGE'. Needed: ${PCT_DISK_SIZE:-8}G." - exit 214 - } - # Cluster quorum (if cluster) if [[ -f /etc/pve/corosync.conf ]]; then msg_info "Checking cluster quorum" @@ -4489,81 +4585,52 @@ create_lxc_container() { # ------------------------------------------------------------------------------ # Template discovery & validation - # Supported OS types (pveam available): alpine, almalinux, centos, debian, - # devuan, fedora, gentoo, openeuler, opensuse, rockylinux, ubuntu - # Template naming conventions: - # - Debian/Ubuntu/Devuan: --standard__.tar.zst - # - Alpine/Fedora/Rocky/CentOS/AlmaLinux/openEuler: --default__.tar.xz - # - Gentoo: gentoo-current-openrc__.tar.xz (note: underscore before date!) - # - openSUSE: opensuse--default__.tar.xz - # - CentOS: centos--stream-default__.tar.xz (note: stream in name) # ------------------------------------------------------------------------------ TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}" case "$PCT_OSTYPE" in - debian | ubuntu | devuan) TEMPLATE_PATTERN="-standard_" ;; - alpine | fedora | rockylinux | almalinux | openeuler) TEMPLATE_PATTERN="-default_" ;; - centos) TEMPLATE_PATTERN="-stream-default_" ;; - gentoo) TEMPLATE_PATTERN="-openrc_" ;; # Pattern: gentoo-current-openrc_ (underscore!) - opensuse) TEMPLATE_PATTERN="-default_" ;; + debian | ubuntu) TEMPLATE_PATTERN="-standard_" ;; + alpine | fedora | rocky | centos) TEMPLATE_PATTERN="-default_" ;; *) TEMPLATE_PATTERN="" ;; esac msg_info "Searching for template '$TEMPLATE_SEARCH'" - # Build regex patterns outside awk/grep for clarity - SEARCH_PATTERN="^${TEMPLATE_SEARCH}" - - #echo "[DEBUG] TEMPLATE_SEARCH='$TEMPLATE_SEARCH'" - #echo "[DEBUG] SEARCH_PATTERN='$SEARCH_PATTERN'" - #echo "[DEBUG] TEMPLATE_PATTERN='$TEMPLATE_PATTERN'" + # Initialize variables + ONLINE_TEMPLATE="" + ONLINE_TEMPLATES=() + # Step 1: Check local templates first (instant) mapfile -t LOCAL_TEMPLATES < <( pveam list "$TEMPLATE_STORAGE" 2>/dev/null | - awk -v search="${SEARCH_PATTERN}" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' | + awk -v search="${TEMPLATE_SEARCH}" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' | sed 's|.*/||' | sort -t - -k 2 -V ) - # Update template catalog with timeout to prevent hangs on slow networks - if command -v timeout &>/dev/null; then - if ! timeout 30 pveam update >/dev/null 2>&1; then - msg_warn "Template catalog update timed out or failed (continuing with cached data)" - fi - else - pveam update >/dev/null 2>&1 || msg_warn "Could not update template catalog (pveam update failed)." - fi - - msg_ok "Template search completed" - - #echo "[DEBUG] pveam available output (first 5 lines with .tar files):" - #pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | head -5 | sed 's/^/ /' - - set +u - mapfile -t ONLINE_TEMPLATES < <(pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | awk '{print $2}' | grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" | sort -t - -k 2 -V 2>/dev/null || true) - #echo "[DEBUG] After filtering: ${#ONLINE_TEMPLATES[@]} online templates found" - set -u - - ONLINE_TEMPLATE="" - [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}" - - #msg_debug "SEARCH_PATTERN='${SEARCH_PATTERN}' TEMPLATE_PATTERN='${TEMPLATE_PATTERN}'" - #msg_debug "Found ${#LOCAL_TEMPLATES[@]} local templates, ${#ONLINE_TEMPLATES[@]} online templates" - if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then - #msg_debug "First 3 online templates:" - count=0 - for idx in "${!ONLINE_TEMPLATES[@]}"; do - #msg_debug " [$idx]: ${ONLINE_TEMPLATES[$idx]}" - ((count++)) - [[ $count -ge 3 ]] && break - done - fi - #msg_debug "ONLINE_TEMPLATE='$ONLINE_TEMPLATE'" - + # Step 2: If local template found, use it immediately (skip pveam update) if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then TEMPLATE="${LOCAL_TEMPLATES[-1]}" TEMPLATE_SOURCE="local" + msg_ok "Template search completed" else + # Step 3: No local template - need to check online (this may be slow) + msg_info "No local template found, checking online catalog..." + + # Update catalog with timeout to prevent long hangs + if command -v timeout &>/dev/null; then + if ! timeout 30 pveam update >/dev/null 2>&1; then + msg_warn "Template catalog update timed out (possible network/DNS issue). Run 'pveam update' manually to diagnose." + fi + else + pveam update >/dev/null 2>&1 || msg_warn "Could not update template catalog (pveam update failed)" + fi + + ONLINE_TEMPLATES=() + mapfile -t ONLINE_TEMPLATES < <(pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | awk '{print $2}' | grep -E "^${TEMPLATE_SEARCH}.*${TEMPLATE_PATTERN}" | sort -t - -k 2 -V 2>/dev/null || true) + [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}" + TEMPLATE="$ONLINE_TEMPLATE" TEMPLATE_SOURCE="online" + msg_ok "Template search completed" fi # If still no template, try to find alternatives @@ -4572,27 +4639,15 @@ create_lxc_container() { echo "[DEBUG] No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}, searching for alternatives..." # Get all available versions for this OS type - # Special handling for Gentoo which uses 'current' instead of numeric version - if [[ "$PCT_OSTYPE" == "gentoo" ]]; then - mapfile -t AVAILABLE_VERSIONS < <( - pveam available -section system 2>/dev/null | - grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | - awk '{print $2}' | - grep "^gentoo-" | - sed -E 's/gentoo-([^-]+)-.*/\1/' | - sort -u 2>/dev/null || sort -u - ) - else - mapfile -t AVAILABLE_VERSIONS < <( - pveam available -section system 2>/dev/null | - grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | - awk '{print $2}' | - grep "^${PCT_OSTYPE}-" | - sed -E "s/${PCT_OSTYPE}-([0-9]+(\.[0-9]+)?).*/\1/" | - grep -E '^[0-9]' | - sort -u -V 2>/dev/null || sort -u - ) - fi + AVAILABLE_VERSIONS=() + mapfile -t AVAILABLE_VERSIONS < <( + pveam available -section system 2>/dev/null | + grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | + awk -F'\t' '{print $1}' | + grep "^${PCT_OSTYPE}-" | + sed -E "s/.*${PCT_OSTYPE}-([0-9]+(\.[0-9]+)?).*/\1/" | + sort -u -V 2>/dev/null + ) if [[ ${#AVAILABLE_VERSIONS[@]} -gt 0 ]]; then echo "" @@ -4606,22 +4661,19 @@ create_lxc_container() { if [[ "$choice" =~ ^[0-9]+$ ]] && [[ "$choice" -ge 1 ]] && [[ "$choice" -le ${#AVAILABLE_VERSIONS[@]} ]]; then PCT_OSVERSION="${AVAILABLE_VERSIONS[$((choice - 1))]}" TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION}" - SEARCH_PATTERN="^${TEMPLATE_SEARCH}-" - - #echo "[DEBUG] Retrying with version: $PCT_OSVERSION" + ONLINE_TEMPLATES=() mapfile -t ONLINE_TEMPLATES < <( pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | - awk -F'\t' '{print $1}' | - grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" | + awk '{print $2}' | + grep -E "^${TEMPLATE_SEARCH}-.*${TEMPLATE_PATTERN}" | sort -t - -k 2 -V 2>/dev/null || true ) if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then TEMPLATE="${ONLINE_TEMPLATES[-1]}" TEMPLATE_SOURCE="online" - #echo "[DEBUG] Found alternative: $TEMPLATE" else msg_error "No templates available for ${PCT_OSTYPE} ${PCT_OSVERSION}" exit 225 @@ -4636,9 +4688,6 @@ create_lxc_container() { fi fi - #echo "[DEBUG] Selected TEMPLATE='$TEMPLATE' SOURCE='$TEMPLATE_SOURCE'" - #msg_debug "Selected TEMPLATE='$TEMPLATE' SOURCE='$TEMPLATE_SOURCE'" - TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)" if [[ -z "$TEMPLATE_PATH" ]]; then TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg) @@ -4679,18 +4728,17 @@ create_lxc_container() { # Retry template search with new version TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}" - SEARCH_PATTERN="^${TEMPLATE_SEARCH}-" mapfile -t LOCAL_TEMPLATES < <( pveam list "$TEMPLATE_STORAGE" 2>/dev/null | - awk -v search="${SEARCH_PATTERN}" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' | + awk -v search="${TEMPLATE_SEARCH}-" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' | sed 's|.*/||' | sort -t - -k 2 -V ) mapfile -t ONLINE_TEMPLATES < <( pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | - awk -F'\t' '{print $1}' | - grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" | + awk '{print $2}' | + grep -E "^${TEMPLATE_SEARCH}-.*${TEMPLATE_PATTERN}" | sort -t - -k 2 -V 2>/dev/null || true ) ONLINE_TEMPLATE="" @@ -4804,7 +4852,6 @@ create_lxc_container() { if [[ "$PCT_OSTYPE" == "debian" ]]; then OSVER="$(parse_template_osver "$TEMPLATE")" if [[ -n "$OSVER" ]]; then - # Proactive, aber ohne Abbruch – nur Angebot offer_lxc_stack_upgrade_and_maybe_retry "no" || true fi fi @@ -4825,7 +4872,7 @@ create_lxc_container() { -rootfs $CONTAINER_STORAGE:${PCT_DISK_SIZE:-8}" fi - # Lock by template file (avoid concurrent downloads/creates) + # Lock by template file (avoid concurrent template downloads/validation) lockfile="/tmp/template.${TEMPLATE}.lock" # Cleanup stale lock files (older than 1 hour - likely from crashed processes) @@ -4859,84 +4906,54 @@ create_lxc_container() { LOGFILE="/tmp/pct_create_${CTID}_$(date +%Y%m%d_%H%M%S)_${SESSION_ID}.log" - # ------------------------------------------------------------------------------ - # openEuler Template Patch: Create /etc/redhat-release inside template - # PVE's post_create_hook expects this file for RHEL-family OS detection - # Without it, container creation fails with "error in setup task" - # ------------------------------------------------------------------------------ - if [[ "${var_os:-}" == "openeuler" ]]; then - msg_info "Patching openEuler template for PVE compatibility..." - local TEMP_EXTRACT_DIR="/tmp/openeuler_template_patch_$$" - local PATCHED_TEMPLATE="${TEMPLATE_PATH%.tar.xz}_patched.tar.xz" - - # Only patch if not already patched - if [[ ! -f "$PATCHED_TEMPLATE" ]]; then - mkdir -p "$TEMP_EXTRACT_DIR" - - # Extract template - if tar -xf "$TEMPLATE_PATH" -C "$TEMP_EXTRACT_DIR" 2>/dev/null; then - # Create /etc/redhat-release if it doesn't exist - if [[ ! -f "$TEMP_EXTRACT_DIR/etc/redhat-release" ]]; then - echo "openEuler release ${var_version:-25.03}" >"$TEMP_EXTRACT_DIR/etc/redhat-release" - fi - - # Repack template - if tar -cJf "$PATCHED_TEMPLATE" -C "$TEMP_EXTRACT_DIR" . 2>/dev/null; then - # Replace original with patched version - mv "$PATCHED_TEMPLATE" "$TEMPLATE_PATH" - msg_ok "openEuler template patched successfully" - else - msg_warn "Failed to repack template, trying without patch..." - fi - else - msg_warn "Failed to extract template for patching, trying without patch..." - fi - - rm -rf "$TEMP_EXTRACT_DIR" + # Validate template before pct create (while holding lock) + if [[ ! -s "$TEMPLATE_PATH" || "$(stat -c%s "$TEMPLATE_PATH" 2>/dev/null || echo 0)" -lt 1000000 ]]; then + msg_info "Template file missing or too small – downloading" + rm -f "$TEMPLATE_PATH" + pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null 2>&1 + msg_ok "Template downloaded" + elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then + if [[ -n "$ONLINE_TEMPLATE" ]]; then + msg_info "Template appears corrupted – re-downloading" + rm -f "$TEMPLATE_PATH" + pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null 2>&1 + msg_ok "Template re-downloaded" + else + msg_warn "Template appears corrupted, but no online version exists. Skipping re-download." fi fi - # # DEBUG: Show the actual command that will be executed - # echo "[DEBUG] ===== PCT CREATE COMMAND DETAILS =====" - # echo "[DEBUG] CTID: $CTID" - # echo "[DEBUG] Template: ${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" - # echo "[DEBUG] PCT_OPTIONS (will be word-split):" - # echo "$PCT_OPTIONS" | sed 's/^/ /' - # echo "[DEBUG] Full command line:" - # echo " pct create $CTID ${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE} $PCT_OPTIONS" - # echo "[DEBUG] ========================================" + # Release lock after template validation - pct create has its own internal locking + exec 9>&- msg_debug "pct create command: pct create $CTID ${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE} $PCT_OPTIONS" msg_debug "Logfile: $LOGFILE" # First attempt (PCT_OPTIONS is a multi-line string, use it directly) if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" $PCT_OPTIONS >"$LOGFILE" 2>&1; then - msg_debug "Container creation failed on ${TEMPLATE_STORAGE}. Validating template..." + msg_debug "Container creation failed on ${TEMPLATE_STORAGE}. Checking error..." - # Validate template file - if [[ ! -s "$TEMPLATE_PATH" || "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then - msg_warn "Template file too small or missing – re-downloading." + # Check if template issue - retry with fresh download + if grep -qiE 'unable to open|corrupt|invalid' "$LOGFILE"; then + msg_info "Template may be corrupted – re-downloading" rm -f "$TEMPLATE_PATH" - pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" - elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then - if [[ -n "$ONLINE_TEMPLATE" ]]; then - msg_warn "Template appears corrupted – re-downloading." - rm -f "$TEMPLATE_PATH" - pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" - else - msg_warn "Template appears corrupted, but no online version exists. Skipping re-download." - fi + pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null 2>&1 + msg_ok "Template re-downloaded" fi # Retry after repair if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" $PCT_OPTIONS >>"$LOGFILE" 2>&1; then # Fallback to local storage if not already on local if [[ "$TEMPLATE_STORAGE" != "local" ]]; then - msg_info "Retrying container creation with fallback to local storage..." + msg_info "Retrying container creation with fallback to local storage" LOCAL_TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE" if [[ ! -f "$LOCAL_TEMPLATE_PATH" ]]; then - msg_info "Downloading template to local..." + msg_ok "Trying local storage fallback" + msg_info "Downloading template to local" pveam download local "$TEMPLATE" >/dev/null 2>&1 + msg_ok "Template downloaded to local" + else + msg_ok "Trying local storage fallback" fi if ! pct create "$CTID" "local:vztmpl/${TEMPLATE}" $PCT_OPTIONS >>"$LOGFILE" 2>&1; then # Local fallback also failed - check for LXC stack version issue @@ -5060,15 +5077,15 @@ description() { - GitHub + Git - Discussions + Discussions - Issues + Issues EOF From 310d0e54a6a491bb03b4867404f35ebeab65eb23 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Tue, 27 Jan 2026 13:26:31 +0100 Subject: [PATCH 097/228] Interactive Prompts --- .../core.func/CORE_FUNCTIONS_REFERENCE.md | 222 +++++++- install/forgejo-runner-install.sh | 27 +- install/garmin-grafana-install.sh | 9 +- misc/build.func | 69 +-- misc/core.func | 519 +++++++++++++++++- 5 files changed, 773 insertions(+), 73 deletions(-) diff --git a/docs/misc/core.func/CORE_FUNCTIONS_REFERENCE.md b/docs/misc/core.func/CORE_FUNCTIONS_REFERENCE.md index c89942083..8adf62a6f 100644 --- a/docs/misc/core.func/CORE_FUNCTIONS_REFERENCE.md +++ b/docs/misc/core.func/CORE_FUNCTIONS_REFERENCE.md @@ -634,4 +634,224 @@ silent() - `SILENT_LOGFILE`: Silent execution log file path - `SPINNER_PID`: Spinner process ID - `SPINNER_MSG`: Spinner message text -- `MSG_INFO_SHOWN`: Tracks shown info messages +- `MSG_INFO_SHOWN`: Tracks shown info messages- `PHS_SILENT`: Unattended mode flag (1 = silent) +- `var_unattended`: Unattended mode variable (yes/no) +- `UNATTENDED`: Alternative unattended mode variable + +## Unattended/Interactive Prompt Functions + +These functions provide a unified way to handle user prompts in both interactive and unattended modes. They automatically detect the execution context and either prompt the user (with timeout) or use default values silently. + +### `is_unattended()` +**Purpose**: Detect if script is running in unattended/silent mode +**Parameters**: None +**Returns**: +- `0` (true): Running unattended +- `1` (false): Running interactively +**Side Effects**: None +**Dependencies**: None +**Environment Variables Used**: `PHS_SILENT`, `var_unattended`, `UNATTENDED` + +**Usage Example**: +```bash +if is_unattended; then + echo "Running in unattended mode" +else + echo "Running interactively" +fi +``` + +### `prompt_confirm()` +**Purpose**: Prompt user for yes/no confirmation with timeout and unattended support +**Parameters**: +- `$1` - Prompt message (required) +- `$2` - Default value: "y" or "n" (optional, default: "n") +- `$3` - Timeout in seconds (optional, default: 60) +**Returns**: +- `0`: User confirmed (yes) +- `1`: User declined (no) or timeout with default "n" +**Side Effects**: Displays prompt to terminal +**Dependencies**: `is_unattended()` +**Environment Variables Used**: Color variables (`YW`, `CL`) + +**Behavior**: +- **Unattended mode**: Immediately returns default value +- **Non-TTY**: Immediately returns default value +- **Interactive**: Displays prompt with timeout countdown +- **Timeout**: Auto-applies default value after specified seconds + +**Usage Examples**: +```bash +# Basic confirmation (default: no) +if prompt_confirm "Proceed with installation?"; then + install_package +fi + +# Default to yes, 30 second timeout +if prompt_confirm "Continue?" "y" 30; then + continue_operation +fi + +# In unattended mode (will use default immediately) +var_unattended=yes +prompt_confirm "Delete files?" "n" && echo "Deleting" || echo "Skipped" +``` + +### `prompt_input()` +**Purpose**: Prompt user for text input with timeout and unattended support +**Parameters**: +- `$1` - Prompt message (required) +- `$2` - Default value (optional, default: "") +- `$3` - Timeout in seconds (optional, default: 60) +**Output**: Prints the user input or default value to stdout +**Returns**: `0` always +**Side Effects**: Displays prompt to stderr (keeps stdout clean for value) +**Dependencies**: `is_unattended()` +**Environment Variables Used**: Color variables (`YW`, `CL`) + +**Behavior**: +- **Unattended mode**: Returns default value immediately +- **Non-TTY**: Returns default value immediately +- **Interactive**: Waits for user input with timeout +- **Empty input**: Returns default value +- **Timeout**: Returns default value + +**Usage Examples**: +```bash +# Get username with default +username=$(prompt_input "Enter username:" "admin" 30) +echo "Using username: $username" + +# With validation loop +while true; do + port=$(prompt_input "Enter port:" "8080" 30) + [[ "$port" =~ ^[0-9]+$ ]] && break + echo "Invalid port number" +done + +# Capture value in unattended mode +var_unattended=yes +db_name=$(prompt_input "Database name:" "myapp_db") +``` + +### `prompt_select()` +**Purpose**: Prompt user to select from a list of options with timeout support +**Parameters**: +- `$1` - Prompt message (required) +- `$2` - Default option number, 1-based (optional, default: 1) +- `$3` - Timeout in seconds (optional, default: 60) +- `$4+` - Options to display (required, at least 1) +**Output**: Prints the selected option value to stdout +**Returns**: +- `0`: Success +- `1`: No options provided +**Side Effects**: Displays numbered menu to stderr +**Dependencies**: `is_unattended()` +**Environment Variables Used**: Color variables (`YW`, `GN`, `CL`) + +**Behavior**: +- **Unattended mode**: Returns default selection immediately +- **Non-TTY**: Returns default selection immediately +- **Interactive**: Displays numbered menu with timeout +- **Invalid selection**: Uses default +- **Timeout**: Auto-selects default + +**Usage Examples**: +```bash +# Simple selection +choice=$(prompt_select "Select database:" 1 30 "PostgreSQL" "MySQL" "SQLite") +echo "Selected: $choice" + +# Using array +options=("Production" "Staging" "Development") +env=$(prompt_select "Select environment:" 2 60 "${options[@]}") + +# In automation scripts +var_unattended=yes +db=$(prompt_select "Database:" 1 30 "postgres" "mysql" "sqlite") +# Returns "postgres" immediately without menu +``` + +### `prompt_password()` +**Purpose**: Prompt user for password input with hidden characters and auto-generation +**Parameters**: +- `$1` - Prompt message (required) +- `$2` - Default value or "generate" for auto-generation (optional) +- `$3` - Timeout in seconds (optional, default: 60) +- `$4` - Minimum length for validation (optional, default: 0) +**Output**: Prints the password to stdout +**Returns**: `0` always +**Side Effects**: Displays prompt to stderr with hidden input +**Dependencies**: `is_unattended()`, `openssl` (for generation) +**Environment Variables Used**: Color variables (`YW`, `CL`) + +**Behavior**: +- **"generate" default**: Creates random 16-character password +- **Unattended mode**: Returns default/generated password immediately +- **Non-TTY**: Returns default/generated password immediately +- **Interactive**: Hidden input with timeout +- **Min length validation**: Falls back to default if too short +- **Timeout**: Returns default/generated password + +**Usage Examples**: +```bash +# Auto-generate password if user doesn't provide one +password=$(prompt_password "Enter password:" "generate" 30) +echo "Password has been set" + +# Require minimum length +db_pass=$(prompt_password "Database password:" "" 60 12) + +# With default password +admin_pass=$(prompt_password "Admin password:" "changeme123" 30) + +# In unattended mode with auto-generation +var_unattended=yes +password=$(prompt_password "Password:" "generate") +# Returns randomly generated password +``` + +## Prompt Function Decision Flow + +``` +prompt_confirm() / prompt_input() / prompt_select() / prompt_password() +│ +├── is_unattended()? ─────────────────────┐ +│ └── PHS_SILENT=1? │ +│ └── var_unattended=yes? ├── YES → Return default immediately +│ └── UNATTENDED=yes? │ +│ │ +├── TTY available? ─────────────── NO ────┘ +│ +└── Interactive Mode + ├── Display prompt with timeout hint + ├── read -t $timeout + │ ├── User input received → Validate and return + │ ├── Empty input → Return default + │ └── Timeout → Return default with message + └── Return value +``` + +## Migration Guide: Converting read Commands + +To make existing scripts unattended-compatible, replace `read` commands with the appropriate prompt function: + +### Before (blocking): +```bash +read -rp "Continue? [y/N]: " confirm +[[ "$confirm" =~ ^[Yy]$ ]] && do_something + +read -p "Enter port: " port +port="${port:-8080}" + +read -p "Select (1-3): " choice +``` + +### After (unattended-safe): +```bash +prompt_confirm "Continue?" "n" && do_something + +port=$(prompt_input "Enter port:" "8080") + +choice=$(prompt_select "Select option:" 1 60 "Option 1" "Option 2" "Option 3") +``` diff --git a/install/forgejo-runner-install.sh b/install/forgejo-runner-install.sh index 367b2b76e..78f0a6e5f 100644 --- a/install/forgejo-runner-install.sh +++ b/install/forgejo-runner-install.sh @@ -12,19 +12,19 @@ setting_up_container network_check update_os -if [[ -z "$var_forgejo_instance" ]]; then - read -rp "Forgejo Instance URL (e.g. https://code.forgejo.org): " var_forgejo_instance -fi +# Get required configuration with sensible fallbacks for unattended mode +# These will show a warning if defaults are used +var_forgejo_instance=$(prompt_input_required \ + "Forgejo Instance URL:" \ + "${var_forgejo_instance:-https://codeberg.org}" \ + 120 \ + "var_forgejo_instance") -if [[ -z "$var_forgejo_runner_token" ]]; then - read -rp "Forgejo Runner Registration Token: " var_forgejo_runner_token - echo -fi - -if [[ -z "$var_forgejo_instance" || -z "$var_forgejo_runner_token" ]]; then - echo "❌ Forgejo instance URL and runner token are required." - exit 1 -fi +var_forgejo_runner_token=$(prompt_input_required \ + "Forgejo Runner Registration Token:" \ + "${var_forgejo_runner_token:-REPLACE_WITH_YOUR_TOKEN}" \ + 120 \ + "var_forgejo_runner_token") export FORGEJO_INSTANCE="$var_forgejo_instance" export FORGEJO_RUNNER_TOKEN="$var_forgejo_runner_token" @@ -78,6 +78,9 @@ EOF systemctl enable -q --now forgejo-runner msg_ok "Created Services" +# Show warning if any required values used fallbacks +show_missing_values_warning + motd_ssh customize cleanup_lxc diff --git a/install/garmin-grafana-install.sh b/install/garmin-grafana-install.sh index 46853fae5..12dc4f8d7 100644 --- a/install/garmin-grafana-install.sh +++ b/install/garmin-grafana-install.sh @@ -110,8 +110,7 @@ msg_ok "Installed garmin-grafana" msg_info "Setting up garmin-grafana" # Check if using Chinese garmin servers -read -rp "Are you using Garmin in mainland China? (y/N): " prompt -if [[ "${prompt,,}" =~ ^(y|yes|Y)$ ]]; then +if prompt_confirm "Are you using Garmin in mainland China?" "n" 60; then GARMIN_CN="True" else GARMIN_CN="False" @@ -131,9 +130,9 @@ EOF # garmin-grafana usually prompts the user for email and password (and MFA) on first run, # then stores a refreshable token. We try to avoid storing user credentials in the env vars if [ -z "$(ls -A /opt/garmin-grafana/.garminconnect)" ]; then - read -r -p "Please enter your Garmin Connect Email: " GARMIN_EMAIL - read -r -p "Please enter your Garmin Connect Password (this is used to generate a token and NOT stored): " GARMIN_PASSWORD - read -r -p "Please enter your MFA Code (if applicable, leave blank if not): " GARMIN_MFA + GARMIN_EMAIL=$(prompt_input "Please enter your Garmin Connect Email:" "" 120) + GARMIN_PASSWORD=$(prompt_password "Please enter your Garmin Connect Password (used to generate token, NOT stored):" "" 120) + GARMIN_MFA=$(prompt_input "Please enter your MFA Code (leave blank if not applicable):" "" 60) # Run the script once to prompt for credential msg_info "Creating Garmin credentials, this will timeout in 60 seconds" timeout 60s uv run --env-file /opt/garmin-grafana/.env --project /opt/garmin-grafana/ /opt/garmin-grafana/src/garmin_grafana/garmin_fetch.py < " prompt; then - # read returns non-zero on Ctrl-D/ESC - msg_error "Aborted input (Ctrl-D/ESC)" - return 130 - fi - - case "${prompt,,}" in - y | yes) + if prompt_confirm "Remove this Container?" "n" 60; then if pct stop "$CT_ID" &>/dev/null && pct destroy "$CT_ID" &>/dev/null; then msg_ok "Removed Container $CT_ID" else msg_error "Failed to remove Container $CT_ID" return 1 fi - ;; - "" | n | no) + else msg_custom "ℹ️" "${BL}" "Container was not removed." - ;; - *) - msg_warn "Invalid response. Container was not removed." - ;; - esac + fi } # ------------------------------------------------------------------------------ @@ -4452,9 +4435,7 @@ create_lxc_container() { echo " pve-container: installed=${_pvec_i:-n/a} candidate=${_pvec_c:-n/a}" echo " lxc-pve : installed=${_lxcp_i:-n/a} candidate=${_lxcp_c:-n/a}" echo - read -rp "Do you want to upgrade now? [y/N] " _ans - case "${_ans,,}" in - y | yes) + if prompt_confirm "Do you want to upgrade now?" "n" 60; then msg_info "Upgrading Proxmox LXC stack (pve-container, lxc-pve)" if $STD apt-get update && $STD apt-get install -y --only-upgrade pve-container lxc-pve; then msg_ok "LXC stack upgraded." @@ -4473,9 +4454,9 @@ create_lxc_container() { msg_error "Upgrade failed. Please check APT output." return 3 fi - ;; - *) return 2 ;; - esac + else + return 2 + fi } # ------------------------------------------------------------------------------ @@ -4650,16 +4631,12 @@ create_lxc_container() { ) if [[ ${#AVAILABLE_VERSIONS[@]} -gt 0 ]]; then - echo "" - echo "${BL}Available ${PCT_OSTYPE} versions:${CL}" - for i in "${!AVAILABLE_VERSIONS[@]}"; do - echo " [$((i + 1))] ${AVAILABLE_VERSIONS[$i]}" - done - echo "" - read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or press Enter to cancel: " choice + # Use prompt_select for version selection (supports unattended mode) + local selected_version + selected_version=$(prompt_select "Select ${PCT_OSTYPE} version:" 1 60 "${AVAILABLE_VERSIONS[@]}") - if [[ "$choice" =~ ^[0-9]+$ ]] && [[ "$choice" -ge 1 ]] && [[ "$choice" -le ${#AVAILABLE_VERSIONS[@]} ]]; then - PCT_OSVERSION="${AVAILABLE_VERSIONS[$((choice - 1))]}" + if [[ -n "$selected_version" ]]; then + PCT_OSVERSION="$selected_version" TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION}" ONLINE_TEMPLATES=() @@ -4713,16 +4690,12 @@ create_lxc_container() { ) if [[ ${#AVAILABLE_VERSIONS[@]} -gt 0 ]]; then - echo -e "\n${BL}Available versions:${CL}" - for i in "${!AVAILABLE_VERSIONS[@]}"; do - echo " [$((i + 1))] ${AVAILABLE_VERSIONS[$i]}" - done + # Use prompt_select for version selection (supports unattended mode) + local selected_version + selected_version=$(prompt_select "Select ${PCT_OSTYPE} version:" 1 60 "${AVAILABLE_VERSIONS[@]}") - echo "" - read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or Enter to exit: " choice - - if [[ "$choice" =~ ^[0-9]+$ ]] && [[ "$choice" -ge 1 ]] && [[ "$choice" -le ${#AVAILABLE_VERSIONS[@]} ]]; then - export var_version="${AVAILABLE_VERSIONS[$((choice - 1))]}" + if [[ -n "$selected_version" ]]; then + export var_version="$selected_version" export PCT_OSVERSION="$var_version" msg_ok "Switched to ${PCT_OSTYPE} ${var_version}" @@ -4767,10 +4740,6 @@ create_lxc_container() { msg_error "Template still not found after version change" exit 220 } - else - msg_custom "🚫" "${YW}" "Installation cancelled" - exit 1 - fi else msg_error "No ${PCT_OSTYPE} templates available" exit 220 diff --git a/misc/core.func b/misc/core.func index cf564c8a2..10e40a501 100644 --- a/misc/core.func +++ b/misc/core.func @@ -810,6 +810,517 @@ is_verbose_mode() { [[ "$verbose" != "no" || ! -t 2 ]] } +# ------------------------------------------------------------------------------ +# is_unattended() +# +# - Detects if script is running in unattended/silent mode +# - Checks PHS_SILENT, var_unattended, and UNATTENDED variables +# - Returns 0 (true) if unattended, 1 (false) otherwise +# - Used by prompt functions to auto-apply defaults +# ------------------------------------------------------------------------------ +is_unattended() { + [[ "${PHS_SILENT:-0}" == "1" ]] && return 0 + [[ "${var_unattended:-}" =~ ^(yes|true|1)$ ]] && return 0 + [[ "${UNATTENDED:-}" =~ ^(yes|true|1)$ ]] && return 0 + return 1 +} + +# ------------------------------------------------------------------------------ +# show_missing_values_warning() +# +# - Displays a summary of required values that used fallback defaults +# - Should be called at the end of install scripts +# - Only shows warning if MISSING_REQUIRED_VALUES array has entries +# - Provides clear guidance on what needs manual configuration +# +# Global: +# MISSING_REQUIRED_VALUES - Array of variable names that need configuration +# +# Example: +# # At end of install script: +# show_missing_values_warning +# ------------------------------------------------------------------------------ +show_missing_values_warning() { + if [[ ${#MISSING_REQUIRED_VALUES[@]} -gt 0 ]]; then + echo "" + echo -e "${YW}╔════════════════════════════════════════════════════════════╗${CL}" + echo -e "${YW}║ ⚠️ MANUAL CONFIGURATION REQUIRED ║${CL}" + echo -e "${YW}╠════════════════════════════════════════════════════════════╣${CL}" + echo -e "${YW}║ The following values were not provided and need to be ║${CL}" + echo -e "${YW}║ configured manually for the service to work properly: ║${CL}" + echo -e "${YW}╟────────────────────────────────────────────────────────────╢${CL}" + for val in "${MISSING_REQUIRED_VALUES[@]}"; do + printf "${YW}║${CL} • %-56s ${YW}║${CL}\n" "$val" + done + echo -e "${YW}╟────────────────────────────────────────────────────────────╢${CL}" + echo -e "${YW}║ Check the service configuration files or environment ║${CL}" + echo -e "${YW}║ variables and update the placeholder values. ║${CL}" + echo -e "${YW}╚════════════════════════════════════════════════════════════╝${CL}" + echo "" + return 1 + fi + return 0 +} + +# ------------------------------------------------------------------------------ +# prompt_confirm() +# +# - Prompts user for yes/no confirmation with timeout and unattended support +# - In unattended mode: immediately returns default value +# - In interactive mode: waits for user input with configurable timeout +# - After timeout: auto-applies default value +# +# Arguments: +# $1 - Prompt message (required) +# $2 - Default value: "y" or "n" (optional, default: "n") +# $3 - Timeout in seconds (optional, default: 60) +# +# Returns: +# 0 - User confirmed (yes) +# 1 - User declined (no) or timeout with default "n" +# +# Example: +# if prompt_confirm "Proceed with installation?" "y" 30; then +# echo "Installing..." +# fi +# +# # Unattended: prompt_confirm will use default without waiting +# var_unattended=yes +# prompt_confirm "Delete files?" "n" && echo "Deleting" || echo "Skipped" +# ------------------------------------------------------------------------------ +prompt_confirm() { + local message="${1:-Confirm?}" + local default="${2:-n}" + local timeout="${3:-60}" + local response + + # Normalize default to lowercase + default="${default,,}" + [[ "$default" != "y" ]] && default="n" + + # Build prompt hint + local hint + if [[ "$default" == "y" ]]; then + hint="[Y/n]" + else + hint="[y/N]" + fi + + # Unattended mode: apply default immediately + if is_unattended; then + if [[ "$default" == "y" ]]; then + return 0 + else + return 1 + fi + fi + + # Check if running in a TTY + if [[ ! -t 0 ]]; then + # Not a TTY, use default + if [[ "$default" == "y" ]]; then + return 0 + else + return 1 + fi + fi + + # Interactive prompt with timeout + echo -en "${YW}${message} ${hint} (auto-${default} in ${timeout}s): ${CL}" + + if read -t "$timeout" -r response; then + # User provided input + response="${response,,}" # lowercase + case "$response" in + y|yes) + return 0 + ;; + n|no) + return 1 + ;; + "") + # Empty response, use default + if [[ "$default" == "y" ]]; then + return 0 + else + return 1 + fi + ;; + *) + # Invalid input, use default + echo -e "${YW}Invalid response, using default: ${default}${CL}" + if [[ "$default" == "y" ]]; then + return 0 + else + return 1 + fi + ;; + esac + else + # Timeout occurred + echo "" # Newline after timeout + echo -e "${YW}Timeout - auto-selecting: ${default}${CL}" + if [[ "$default" == "y" ]]; then + return 0 + else + return 1 + fi + fi +} + +# ------------------------------------------------------------------------------ +# prompt_input() +# +# - Prompts user for text input with timeout and unattended support +# - In unattended mode: immediately returns default value +# - In interactive mode: waits for user input with configurable timeout +# - After timeout: auto-applies default value +# +# Arguments: +# $1 - Prompt message (required) +# $2 - Default value (optional, default: "") +# $3 - Timeout in seconds (optional, default: 60) +# +# Output: +# Prints the user input or default value to stdout +# +# Example: +# username=$(prompt_input "Enter username:" "admin" 30) +# echo "Using username: $username" +# +# # With validation +# while true; do +# port=$(prompt_input "Enter port:" "8080" 30) +# [[ "$port" =~ ^[0-9]+$ ]] && break +# echo "Invalid port number" +# done +# ------------------------------------------------------------------------------ +prompt_input() { + local message="${1:-Enter value:}" + local default="${2:-}" + local timeout="${3:-60}" + local response + + # Build display default hint + local hint="" + [[ -n "$default" ]] && hint=" (default: ${default})" + + # Unattended mode: return default immediately + if is_unattended; then + echo "$default" + return 0 + fi + + # Check if running in a TTY + if [[ ! -t 0 ]]; then + # Not a TTY, use default + echo "$default" + return 0 + fi + + # Interactive prompt with timeout + echo -en "${YW}${message}${hint} (auto-default in ${timeout}s): ${CL}" >&2 + + if read -t "$timeout" -r response; then + # User provided input (or pressed Enter for empty) + if [[ -n "$response" ]]; then + echo "$response" + else + echo "$default" + fi + else + # Timeout occurred + echo "" >&2 # Newline after timeout + echo -e "${YW}Timeout - using default: ${default}${CL}" >&2 + echo "$default" + fi +} + +# ------------------------------------------------------------------------------ +# prompt_input_required() +# +# - Prompts user for REQUIRED text input with fallback support +# - In unattended mode: Uses fallback value if no env var set (with warning) +# - In interactive mode: loops until user provides non-empty input +# - Tracks missing required values for end-of-script summary +# +# Arguments: +# $1 - Prompt message (required) +# $2 - Fallback/example value for unattended mode (optional) +# $3 - Timeout in seconds (optional, default: 120) +# $4 - Environment variable name hint for error messages (optional) +# +# Output: +# Prints the user input or fallback value to stdout +# +# Returns: +# 0 - Success (value provided or fallback used) +# 1 - Failed (interactive timeout without input) +# +# Global: +# MISSING_REQUIRED_VALUES - Array tracking fields that used fallbacks +# +# Example: +# # With fallback - script continues even in unattended mode +# token=$(prompt_input_required "Enter API Token:" "YOUR_TOKEN_HERE" 60 "var_api_token") +# +# # Check at end of script if any values need manual configuration +# if [[ ${#MISSING_REQUIRED_VALUES[@]} -gt 0 ]]; then +# msg_warn "Please configure: ${MISSING_REQUIRED_VALUES[*]}" +# fi +# ------------------------------------------------------------------------------ +# Global array to track missing required values +declare -g -a MISSING_REQUIRED_VALUES=() + +prompt_input_required() { + local message="${1:-Enter required value:}" + local fallback="${2:-CHANGE_ME}" + local timeout="${3:-120}" + local env_var_hint="${4:-}" + local response="" + + # Check if value is already set via environment variable (if hint provided) + if [[ -n "$env_var_hint" ]]; then + local env_value="${!env_var_hint:-}" + if [[ -n "$env_value" ]]; then + echo "$env_value" + return 0 + fi + fi + + # Unattended mode: use fallback with warning + if is_unattended; then + if [[ -n "$env_var_hint" ]]; then + echo -e "${YW}⚠ Required value '${env_var_hint}' not set - using fallback: ${fallback}${CL}" >&2 + MISSING_REQUIRED_VALUES+=("$env_var_hint") + else + echo -e "${YW}⚠ Required value not provided - using fallback: ${fallback}${CL}" >&2 + MISSING_REQUIRED_VALUES+=("(unnamed)") + fi + echo "$fallback" + return 0 + fi + + # Check if running in a TTY + if [[ ! -t 0 ]]; then + echo -e "${YW}⚠ Not interactive - using fallback: ${fallback}${CL}" >&2 + MISSING_REQUIRED_VALUES+=("${env_var_hint:-unnamed}") + echo "$fallback" + return 0 + fi + + # Interactive prompt - loop until non-empty input or use fallback on timeout + local attempts=0 + while [[ -z "$response" ]]; do + ((attempts++)) + + if [[ $attempts -gt 3 ]]; then + echo -e "${YW}Too many empty inputs - using fallback: ${fallback}${CL}" >&2 + MISSING_REQUIRED_VALUES+=("${env_var_hint:-manual_input}") + echo "$fallback" + return 0 + fi + + echo -en "${YW}${message} (required, timeout ${timeout}s): ${CL}" >&2 + + if read -t "$timeout" -r response; then + if [[ -z "$response" ]]; then + echo -e "${YW}This field is required. Please enter a value. (attempt ${attempts}/3)${CL}" >&2 + fi + else + # Timeout occurred - use fallback + echo "" >&2 + echo -e "${YW}Timeout - using fallback value: ${fallback}${CL}" >&2 + MISSING_REQUIRED_VALUES+=("${env_var_hint:-timeout}") + echo "$fallback" + return 0 + fi + done + + echo "$response" +} + +# ------------------------------------------------------------------------------ +# prompt_select() +# +# - Prompts user to select from a list of options with timeout support +# - In unattended mode: immediately returns default selection +# - In interactive mode: displays numbered menu and waits for choice +# - After timeout: auto-applies default selection +# +# Arguments: +# $1 - Prompt message (required) +# $2 - Default option number, 1-based (optional, default: 1) +# $3 - Timeout in seconds (optional, default: 60) +# $4+ - Options to display (required, at least 2) +# +# Output: +# Prints the selected option value to stdout +# +# Returns: +# 0 - Success +# 1 - No options provided or invalid state +# +# Example: +# choice=$(prompt_select "Select database:" 1 30 "PostgreSQL" "MySQL" "SQLite") +# echo "Selected: $choice" +# +# # With array +# options=("Option A" "Option B" "Option C") +# selected=$(prompt_select "Choose:" 2 60 "${options[@]}") +# ------------------------------------------------------------------------------ +prompt_select() { + local message="${1:-Select option:}" + local default="${2:-1}" + local timeout="${3:-60}" + shift 3 + + local options=("$@") + local num_options=${#options[@]} + + # Validate options + if [[ $num_options -eq 0 ]]; then + echo "" >&2 + return 1 + fi + + # Validate default + if [[ ! "$default" =~ ^[0-9]+$ ]] || [[ "$default" -lt 1 ]] || [[ "$default" -gt "$num_options" ]]; then + default=1 + fi + + # Unattended mode: return default immediately + if is_unattended; then + echo "${options[$((default - 1))]}" + return 0 + fi + + # Check if running in a TTY + if [[ ! -t 0 ]]; then + echo "${options[$((default - 1))]}" + return 0 + fi + + # Display menu + echo -e "${YW}${message}${CL}" >&2 + local i + for i in "${!options[@]}"; do + local num=$((i + 1)) + if [[ $num -eq $default ]]; then + echo -e " ${GN}${num})${CL} ${options[$i]} ${YW}(default)${CL}" >&2 + else + echo -e " ${GN}${num})${CL} ${options[$i]}" >&2 + fi + done + + # Interactive prompt with timeout + echo -en "${YW}Select [1-${num_options}] (auto-select ${default} in ${timeout}s): ${CL}" >&2 + + local response + if read -t "$timeout" -r response; then + if [[ -z "$response" ]]; then + # Empty response, use default + echo "${options[$((default - 1))]}" + elif [[ "$response" =~ ^[0-9]+$ ]] && [[ "$response" -ge 1 ]] && [[ "$response" -le "$num_options" ]]; then + # Valid selection + echo "${options[$((response - 1))]}" + else + # Invalid input, use default + echo -e "${YW}Invalid selection, using default: ${options[$((default - 1))]}${CL}" >&2 + echo "${options[$((default - 1))]}" + fi + else + # Timeout occurred + echo "" >&2 # Newline after timeout + echo -e "${YW}Timeout - auto-selecting: ${options[$((default - 1))]}${CL}" >&2 + echo "${options[$((default - 1))]}" + fi +} + +# ------------------------------------------------------------------------------ +# prompt_password() +# +# - Prompts user for password input with hidden characters +# - In unattended mode: returns default or generates random password +# - Supports auto-generation of secure passwords +# - After timeout: generates random password if allowed +# +# Arguments: +# $1 - Prompt message (required) +# $2 - Default value or "generate" for auto-generation (optional) +# $3 - Timeout in seconds (optional, default: 60) +# $4 - Minimum length for validation (optional, default: 0 = no minimum) +# +# Output: +# Prints the password to stdout +# +# Example: +# password=$(prompt_password "Enter password:" "generate" 30 8) +# echo "Password set" +# +# # Require user input (no default) +# db_pass=$(prompt_password "Database password:" "" 60 12) +# ------------------------------------------------------------------------------ +prompt_password() { + local message="${1:-Enter password:}" + local default="${2:-}" + local timeout="${3:-60}" + local min_length="${4:-0}" + local response + + # Generate random password if requested + local generated="" + if [[ "$default" == "generate" ]]; then + generated=$(openssl rand -base64 16 2>/dev/null | tr -dc 'a-zA-Z0-9' | head -c 16) + [[ -z "$generated" ]] && generated=$(head /dev/urandom | tr -dc 'a-zA-Z0-9' | head -c 16) + default="$generated" + fi + + # Unattended mode: return default immediately + if is_unattended; then + echo "$default" + return 0 + fi + + # Check if running in a TTY + if [[ ! -t 0 ]]; then + echo "$default" + return 0 + fi + + # Build hint + local hint="" + if [[ -n "$generated" ]]; then + hint=" (Enter for auto-generated)" + elif [[ -n "$default" ]]; then + hint=" (Enter for default)" + fi + [[ "$min_length" -gt 0 ]] && hint="${hint} [min ${min_length} chars]" + + # Interactive prompt with timeout (silent input) + echo -en "${YW}${message}${hint} (timeout ${timeout}s): ${CL}" >&2 + + if read -t "$timeout" -rs response; then + echo "" >&2 # Newline after hidden input + if [[ -n "$response" ]]; then + # Validate minimum length + if [[ "$min_length" -gt 0 ]] && [[ ${#response} -lt "$min_length" ]]; then + echo -e "${YW}Password too short (min ${min_length}), using default${CL}" >&2 + echo "$default" + else + echo "$response" + fi + else + echo "$default" + fi + else + # Timeout occurred + echo "" >&2 # Newline after timeout + echo -e "${YW}Timeout - using generated password${CL}" >&2 + echo "$default" + fi +} + # ============================================================================== # SECTION 6: CLEANUP & MAINTENANCE # ============================================================================== @@ -898,15 +1409,13 @@ check_or_create_swap() { msg_error "No active swap detected" - read -p "Do you want to create a swap file? [y/N]: " create_swap - create_swap="${create_swap,,}" # to lowercase - - if [[ "$create_swap" != "y" && "$create_swap" != "yes" ]]; then + if ! prompt_confirm "Do you want to create a swap file?" "n" 60; then msg_info "Skipping swap file creation" return 1 fi - read -p "Enter swap size in MB (e.g., 2048 for 2GB): " swap_size_mb + local swap_size_mb + swap_size_mb=$(prompt_input "Enter swap size in MB (e.g., 2048 for 2GB):" "2048" 60) if ! [[ "$swap_size_mb" =~ ^[0-9]+$ ]]; then msg_error "Invalid size input. Aborting." return 1 From a82d04982dd084287d31c824077c66524740d616 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Tue, 27 Jan 2026 13:38:49 +0100 Subject: [PATCH 098/228] Improve unattended mode detection and export MODE Enhanced the is_unattended() function to prioritize the MODE variable for detecting unattended/non-interactive mode, with detailed handling for various modes and legacy fallbacks. Also, export MODE in build_container() to inform install scripts about the current mode. Updated APP name in forgejo-runner.sh for consistency. --- ct/forgejo-runner.sh | 2 +- misc/build.func | 4 ++++ misc/core.func | 49 ++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 52 insertions(+), 3 deletions(-) diff --git a/ct/forgejo-runner.sh b/ct/forgejo-runner.sh index 1dfe36ebc..c11dd19a0 100644 --- a/ct/forgejo-runner.sh +++ b/ct/forgejo-runner.sh @@ -6,7 +6,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV # License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE # Source: https://forgejo.org/ -APP="Forgejo Runner" +APP="Forgejo-Runner" var_tags="${var_tags:-ci}" var_cpu="${var_cpu:-2}" var_ram="${var_ram:-2048}" diff --git a/misc/build.func b/misc/build.func index cdcb7f2cc..7c431dc99 100644 --- a/misc/build.func +++ b/misc/build.func @@ -3461,6 +3461,10 @@ build_container() { export DEV_MODE_LOGS="${DEV_MODE_LOGS:-false}" export DEV_MODE_DRYRUN="${DEV_MODE_DRYRUN:-false}" + # MODE export for unattended detection in install scripts + # This tells install scripts whether to prompt for input or use defaults + export MODE="${METHOD:-default}" + # Build PCT_OPTIONS as multi-line string PCT_OPTIONS_STRING=" -hostname $HN" diff --git a/misc/core.func b/misc/core.func index 10e40a501..1aa910c1c 100644 --- a/misc/core.func +++ b/misc/core.func @@ -813,15 +813,60 @@ is_verbose_mode() { # ------------------------------------------------------------------------------ # is_unattended() # -# - Detects if script is running in unattended/silent mode -# - Checks PHS_SILENT, var_unattended, and UNATTENDED variables +# - Detects if script is running in unattended/non-interactive mode +# - Checks MODE variable first (primary method) +# - Falls back to legacy flags (PHS_SILENT, var_unattended) # - Returns 0 (true) if unattended, 1 (false) otherwise # - Used by prompt functions to auto-apply defaults +# +# Modes that are unattended: +# - default (1) : Use script defaults, no prompts +# - mydefaults (3) : Use user's default.vars, no prompts +# - appdefaults (4) : Use app-specific defaults, no prompts +# +# Modes that are interactive: +# - advanced (2) : Full wizard with all options +# +# Note: Even in advanced mode, install scripts run unattended because +# all values are already collected during the wizard phase. # ------------------------------------------------------------------------------ is_unattended() { + # Primary: Check MODE variable (case-insensitive) + local mode="${MODE:-${mode:-}}" + mode="${mode,,}" # lowercase + + case "$mode" in + default|1) + return 0 + ;; + mydefaults|userdefaults|3) + return 0 + ;; + appdefaults|4) + return 0 + ;; + advanced|2) + # Advanced mode is interactive ONLY during wizard + # Inside container (install scripts), it should be unattended + # Check if we're inside a container (no pveversion command) + if ! command -v pveversion &>/dev/null; then + # We're inside the container - all values already collected + return 0 + fi + # On host during wizard - interactive + return 1 + ;; + esac + + # Legacy fallbacks for compatibility [[ "${PHS_SILENT:-0}" == "1" ]] && return 0 [[ "${var_unattended:-}" =~ ^(yes|true|1)$ ]] && return 0 [[ "${UNATTENDED:-}" =~ ^(yes|true|1)$ ]] && return 0 + + # No TTY available = unattended + [[ ! -t 0 ]] && return 0 + + # Default: interactive return 1 } From 1bb32bbc9409d00e250f4848e371812f4c57895d Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Tue, 27 Jan 2026 13:39:57 +0100 Subject: [PATCH 099/228] fix syntax --- misc/build.func | 1 + misc/core.func | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/misc/build.func b/misc/build.func index 7c431dc99..3863a6d8a 100644 --- a/misc/build.func +++ b/misc/build.func @@ -4744,6 +4744,7 @@ create_lxc_container() { msg_error "Template still not found after version change" exit 220 } + fi else msg_error "No ${PCT_OSTYPE} templates available" exit 220 diff --git a/misc/core.func b/misc/core.func index 1aa910c1c..36356ca52 100644 --- a/misc/core.func +++ b/misc/core.func @@ -834,7 +834,7 @@ is_unattended() { # Primary: Check MODE variable (case-insensitive) local mode="${MODE:-${mode:-}}" mode="${mode,,}" # lowercase - + case "$mode" in default|1) return 0 @@ -857,15 +857,15 @@ is_unattended() { return 1 ;; esac - + # Legacy fallbacks for compatibility [[ "${PHS_SILENT:-0}" == "1" ]] && return 0 [[ "${var_unattended:-}" =~ ^(yes|true|1)$ ]] && return 0 [[ "${UNATTENDED:-}" =~ ^(yes|true|1)$ ]] && return 0 - + # No TTY available = unattended [[ ! -t 0 ]] && return 0 - + # Default: interactive return 1 } From a6134095d4fc1862bf582e0e9d8f48571a7aaa14 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Tue, 27 Jan 2026 13:45:38 +0100 Subject: [PATCH 100/228] fix: correct if/fi nesting in template selection blocks --- misc/build.func | 121 +++++++++++++++++++++++------------------------- 1 file changed, 58 insertions(+), 63 deletions(-) diff --git a/misc/build.func b/misc/build.func index 3863a6d8a..ffdd4223a 100644 --- a/misc/build.func +++ b/misc/build.func @@ -4639,29 +4639,25 @@ create_lxc_container() { local selected_version selected_version=$(prompt_select "Select ${PCT_OSTYPE} version:" 1 60 "${AVAILABLE_VERSIONS[@]}") - if [[ -n "$selected_version" ]]; then - PCT_OSVERSION="$selected_version" - TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION}" + # prompt_select always returns a value (uses default in unattended mode) + PCT_OSVERSION="$selected_version" + TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION}" - ONLINE_TEMPLATES=() - mapfile -t ONLINE_TEMPLATES < <( - pveam available -section system 2>/dev/null | - grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | - awk '{print $2}' | - grep -E "^${TEMPLATE_SEARCH}-.*${TEMPLATE_PATTERN}" | - sort -t - -k 2 -V 2>/dev/null || true - ) + ONLINE_TEMPLATES=() + mapfile -t ONLINE_TEMPLATES < <( + pveam available -section system 2>/dev/null | + grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | + awk '{print $2}' | + grep -E "^${TEMPLATE_SEARCH}-.*${TEMPLATE_PATTERN}" | + sort -t - -k 2 -V 2>/dev/null || true + ) - if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then - TEMPLATE="${ONLINE_TEMPLATES[-1]}" - TEMPLATE_SOURCE="online" - else - msg_error "No templates available for ${PCT_OSTYPE} ${PCT_OSVERSION}" - exit 225 - fi + if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then + TEMPLATE="${ONLINE_TEMPLATES[-1]}" + TEMPLATE_SOURCE="online" else - msg_custom "🚫" "${YW}" "Installation cancelled" - exit 0 + msg_error "No templates available for ${PCT_OSTYPE} ${PCT_OSVERSION}" + exit 225 fi else msg_error "No ${PCT_OSTYPE} templates available at all" @@ -4698,53 +4694,52 @@ create_lxc_container() { local selected_version selected_version=$(prompt_select "Select ${PCT_OSTYPE} version:" 1 60 "${AVAILABLE_VERSIONS[@]}") - if [[ -n "$selected_version" ]]; then - export var_version="$selected_version" - export PCT_OSVERSION="$var_version" - msg_ok "Switched to ${PCT_OSTYPE} ${var_version}" + # prompt_select always returns a value (uses default in unattended mode) + export var_version="$selected_version" + export PCT_OSVERSION="$var_version" + msg_ok "Switched to ${PCT_OSTYPE} ${var_version}" - # Retry template search with new version - TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}" + # Retry template search with new version + TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}" - mapfile -t LOCAL_TEMPLATES < <( - pveam list "$TEMPLATE_STORAGE" 2>/dev/null | - awk -v search="${TEMPLATE_SEARCH}-" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' | - sed 's|.*/||' | sort -t - -k 2 -V - ) - mapfile -t ONLINE_TEMPLATES < <( - pveam available -section system 2>/dev/null | - grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | - awk '{print $2}' | - grep -E "^${TEMPLATE_SEARCH}-.*${TEMPLATE_PATTERN}" | - sort -t - -k 2 -V 2>/dev/null || true - ) - ONLINE_TEMPLATE="" - [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}" + mapfile -t LOCAL_TEMPLATES < <( + pveam list "$TEMPLATE_STORAGE" 2>/dev/null | + awk -v search="${TEMPLATE_SEARCH}-" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' | + sed 's|.*/||' | sort -t - -k 2 -V + ) + mapfile -t ONLINE_TEMPLATES < <( + pveam available -section system 2>/dev/null | + grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | + awk '{print $2}' | + grep -E "^${TEMPLATE_SEARCH}-.*${TEMPLATE_PATTERN}" | + sort -t - -k 2 -V 2>/dev/null || true + ) + ONLINE_TEMPLATE="" + [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}" - if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then - TEMPLATE="${LOCAL_TEMPLATES[-1]}" - TEMPLATE_SOURCE="local" - else - TEMPLATE="$ONLINE_TEMPLATE" - TEMPLATE_SOURCE="online" - fi - - TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)" - if [[ -z "$TEMPLATE_PATH" ]]; then - TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg) - [[ -n "$TEMPLATE_BASE" ]] && TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE" - fi - - # If we still don't have a path but have a valid template name, construct it - if [[ -z "$TEMPLATE_PATH" && -n "$TEMPLATE" ]]; then - TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE" - fi - - [[ -n "$TEMPLATE_PATH" ]] || { - msg_error "Template still not found after version change" - exit 220 - } + if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then + TEMPLATE="${LOCAL_TEMPLATES[-1]}" + TEMPLATE_SOURCE="local" + else + TEMPLATE="$ONLINE_TEMPLATE" + TEMPLATE_SOURCE="online" fi + + TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)" + if [[ -z "$TEMPLATE_PATH" ]]; then + TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg) + [[ -n "$TEMPLATE_BASE" ]] && TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE" + fi + + # If we still don't have a path but have a valid template name, construct it + if [[ -z "$TEMPLATE_PATH" && -n "$TEMPLATE" ]]; then + TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE" + fi + + [[ -n "$TEMPLATE_PATH" ]] || { + msg_error "Template still not found after version change" + exit 220 + } else msg_error "No ${PCT_OSTYPE} templates available" exit 220 From 6965e3e2ecb1cd40c8a8baa999f1f1cf3dc1daf2 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Tue, 27 Jan 2026 13:47:48 +0100 Subject: [PATCH 101/228] fix: use safe arithmetic to avoid exit code 1 from ((attempts++)) when attempts=0 --- misc/core.func | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/misc/core.func b/misc/core.func index 36356ca52..e14ba3c22 100644 --- a/misc/core.func +++ b/misc/core.func @@ -1157,7 +1157,7 @@ prompt_input_required() { # Interactive prompt - loop until non-empty input or use fallback on timeout local attempts=0 while [[ -z "$response" ]]; do - ((attempts++)) + attempts=$((attempts + 1)) if [[ $attempts -gt 3 ]]; then echo -e "${YW}Too many empty inputs - using fallback: ${fallback}${CL}" >&2 From 90718b440acac48b5b016ee64f8d22abcf4a834a Mon Sep 17 00:00:00 2001 From: Joerg Heinemann Date: Tue, 27 Jan 2026 16:19:14 +0100 Subject: [PATCH 102/228] Clean up installation script by removing autostart instructions Removed instructions for making the ebusd daemon autostart. --- install/ebusd-install.sh | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/install/ebusd-install.sh b/install/ebusd-install.sh index c43be4708..0ede2e9b8 100644 --- a/install/ebusd-install.sh +++ b/install/ebusd-install.sh @@ -22,15 +22,9 @@ setup_deb822_repo \ msg_info "Installing ebusd" $STD apt install -y ebusd +$STD systemctl enable ebusd msg_ok "Installed ebusd" -msg_info "Follow below instructions to make the daemon autostart:" -msg_info "1. Edit '/etc/default/ebusd' if necessary (especially if your device is not '/dev/ttyUSB0')" -msg_info "2. Start the daemon with 'systemctl start ebusd'" -msg_info "3. Check the daemon status with 'systemctl status ebusd'" -msg_info "4. Check the log file '/var/log/ebusd.log'" -msg_info "5. Make the daemon autostart with 'systemctl enable ebusd'" - motd_ssh customize cleanup_lxc From 9d0ed3090c6adea9e72deaf7272092b4af97715c Mon Sep 17 00:00:00 2001 From: vhsdream Date: Tue, 27 Jan 2026 10:39:04 -0500 Subject: [PATCH 103/228] Shelfmark: fix misplaced closing bracket --- ct/shelfmark.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ct/shelfmark.sh b/ct/shelfmark.sh index be53f2d9a..1e2106248 100644 --- a/ct/shelfmark.sh +++ b/ct/shelfmark.sh @@ -52,7 +52,7 @@ function update_script() { $STD uv venv -c ./venv $STD source ./venv/bin/activate $STD uv pip install -r ./requirements-base.txt - if [[ $(sed -n '/_BYPASS=/s/[^=]*=//p' /etc/shelfmark/.env) == "true" ]] && [[ $(sed -n '/BYPASSER=/s/[^=]*=//p' /etc/shelfmark/.env == "false") ]]; then + if [[ $(sed -n '/_BYPASS=/s/[^=]*=//p' /etc/shelfmark/.env) == "true" ]] && [[ $(sed -n '/BYPASSER=/s/[^=]*=//p' /etc/shelfmark/.env) == "false" ]]; then $STD uv pip install -r ./requirements-shelfmark.txt fi mv /opt/start.sh.bak /opt/shelfmark/start.sh From 6be45a03804cbe3cc5c1774260e4364f7f06e91d Mon Sep 17 00:00:00 2001 From: vhsdream Date: Tue, 27 Jan 2026 10:44:47 -0500 Subject: [PATCH 104/228] Shelfmark: small fixes --- install/shelfmark-install.sh | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/install/shelfmark-install.sh b/install/shelfmark-install.sh index e327caf44..9be897055 100644 --- a/install/shelfmark-install.sh +++ b/install/shelfmark-install.sh @@ -59,19 +59,19 @@ case "$DEPLOYMENT_TYPE" in ;; 3) echo "" - echo -e "${BL}Use existing FlareSolverr LXC${CL}" + echo -e "${BL}Use an existing FlareSolverr/Byparr LXC${CL}" echo "─────────────────────────────────────────" - echo "Enter the URL/IP address with port of your Flaresolverr instance" + echo "Enter the URL/IP address with port of your Flaresolverr/Byparr instance" echo "Example: http://flaresoverr.homelab.lan:8191 or" echo "http://192.168.10.99:8191" echo "" - read -r -p "FlareSolverr URL: " FLARESOLVERR_URL + read -r -p "FlareSolverr/Byparr URL: " BYPASSER_URL - if [[ -z "$FLARESOLVERR_URL" ]]; then - msg_warn "No Flaresolverr URL provided. Falling back to Shelfmark's internal bypasser." + if [[ -z "$BYPASSER_URL" ]]; then + msg_warn "No Flaresolverr/Byparr URL provided. Falling back to Shelfmark's internal bypasser." else - FLARESOLVERR_URL="${FLARESOLVERR_URL%/}" - msg_ok "FlareSolverr URL: ${FLARESOLVERR_URL}" + BYPASSER_URL="${BYPASSER_URL%/}" + msg_ok "FlareSolverr/Byparr URL: ${BYPASSER_URL}" fi ;; 4) @@ -102,7 +102,7 @@ if [[ "$DEPLOYMENT_TYPE" == "2" ]]; then elif [[ "$DEPLOYMENT_TYPE" == "3" ]]; then sed -i -e '/BYPASSER=/s/false/true/' \ -e 's/^# EXT_/EXT_/' \ - -e "s|_URL=.*|_URL=${FLARESOLVERR_URL}|" /etc/shelfmark/.env + -e "s|_URL=.*|_URL=${BYPASSER_URL}|" /etc/shelfmark/.env elif [[ "$DEPLOYMENT_TYPE" == "4" ]]; then sed -i '/_BYPASS=/s/true/false/' /etc/shelfmark/.env else From ad48c14cf3330b98f060b451ca44f929800257ce Mon Sep 17 00:00:00 2001 From: Matthew Stern Date: Sun, 25 Jan 2026 18:01:44 -0500 Subject: [PATCH 105/228] feat: add isponsorblocktv --- ct/isponsorblocktv.sh | 41 +++++++++ frontend/public/json/isponsorblocktv.json | 44 ++++++++++ install/isponsorblocktv-install.sh | 102 ++++++++++++++++++++++ 3 files changed, 187 insertions(+) create mode 100644 ct/isponsorblocktv.sh create mode 100644 frontend/public/json/isponsorblocktv.json create mode 100644 install/isponsorblocktv-install.sh diff --git a/ct/isponsorblocktv.sh b/ct/isponsorblocktv.sh new file mode 100644 index 000000000..c4b717339 --- /dev/null +++ b/ct/isponsorblocktv.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: Matthew Stern +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/dmunozv04/iSponsorBlockTV + +APP="iSponsorBlockTV" +var_tags="${var_tags:-media;automation}" +var_cpu="${var_cpu:-1}" +var_ram="${var_ram:-1024}" +var_disk="${var_disk:-4}" +var_os="${var_os:-debian}" +var_version="${var_version:-12}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + if [[ ! -d /opt/isponsorblocktv ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + msg_error "Currently we don't provide an update function for ${APP}." + exit +} + +start +build_container +description + +msg_ok "Completed successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Run the setup wizard inside the container with:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}iSponsorBlockTV setup${CL}" diff --git a/frontend/public/json/isponsorblocktv.json b/frontend/public/json/isponsorblocktv.json new file mode 100644 index 000000000..71f82fb99 --- /dev/null +++ b/frontend/public/json/isponsorblocktv.json @@ -0,0 +1,44 @@ +{ + "name": "iSponsorBlockTV", + "slug": "isponsorblocktv", + "categories": [ + 13 + ], + "date_created": "2026-01-25", + "type": "ct", + "updateable": false, + "privileged": false, + "interface_port": null, + "documentation": "https://github.com/dmunozv04/iSponsorBlockTV/wiki", + "website": "https://github.com/dmunozv04/iSponsorBlockTV", + "logo": null, + "config_path": "/var/lib/isponsorblocktv/config.json", + "description": "iSponsorBlockTV connects to YouTube TV clients and automatically skips SponsorBlock segments, mutes ads, and presses the Skip Ad button when available.", + "install_methods": [ + { + "type": "default", + "script": "ct/isponsorblocktv.sh", + "resources": { + "cpu": 1, + "ram": 1024, + "hdd": 4, + "os": "debian", + "version": "12" + } + } + ], + "default_credentials": { + "username": null, + "password": null + }, + "notes": [ + { + "text": "No web UI; run `iSponsorBlockTV setup` inside the container to configure.", + "type": "info" + }, + { + "text": "SSDP auto-discovery requires multicast on your bridge; manual pairing works without it.", + "type": "info" + } + ] +} diff --git a/install/isponsorblocktv-install.sh b/install/isponsorblocktv-install.sh new file mode 100644 index 000000000..17ebe5652 --- /dev/null +++ b/install/isponsorblocktv-install.sh @@ -0,0 +1,102 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: Matthew Stern +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/dmunozv04/iSponsorBlockTV + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +INSTALL_DIR="/opt/isponsorblocktv" +DATA_DIR="/var/lib/isponsorblocktv" +SERVICE_USER="isponsorblocktv" + +msg_info "Installing Dependencies" +$STD apt-get install -y \ + python3 \ + python3-venv \ + python3-pip +msg_ok "Installed Dependencies" + +msg_info "Downloading iSponsorBlockTV" +fetch_and_deploy_gh_release "isponsorblocktv" "dmunozv04/iSponsorBlockTV" "tarball" "latest" "$INSTALL_DIR" +msg_ok "Downloaded iSponsorBlockTV" + +msg_info "Setting up iSponsorBlockTV" +python3 -m venv "$INSTALL_DIR/venv" +$STD "$INSTALL_DIR/venv/bin/pip" install --upgrade pip +$STD "$INSTALL_DIR/venv/bin/pip" install "$INSTALL_DIR" +msg_ok "Set up iSponsorBlockTV" + +msg_info "Creating service user and data directory" +if ! id "$SERVICE_USER" &>/dev/null; then + useradd --system --home "$DATA_DIR" --create-home "$SERVICE_USER" +fi +install -d -o "$SERVICE_USER" -g "$SERVICE_USER" "$DATA_DIR" +chown -R "$SERVICE_USER":"$SERVICE_USER" "$INSTALL_DIR" +msg_ok "Created service user and data directory" + +msg_info "Creating Service" +cat </etc/systemd/system/isponsorblocktv.service +[Unit] +Description=iSponsorBlockTV +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +User=$SERVICE_USER +Group=$SERVICE_USER +WorkingDirectory=$INSTALL_DIR +Environment=iSPBTV_data_dir=$DATA_DIR +ExecStart=$INSTALL_DIR/venv/bin/iSponsorBlockTV +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOT +systemctl enable -q --now isponsorblocktv +msg_ok "Created Service" + +msg_info "Creating CLI wrapper" +install -d /usr/local/bin +cat <<'EOT' >/usr/local/bin/iSponsorBlockTV +#!/usr/bin/env bash +export iSPBTV_data_dir="/var/lib/isponsorblocktv" + +set +e +/opt/isponsorblocktv/venv/bin/iSponsorBlockTV "$@" +status=$? +set -e + +case "${1:-}" in + setup|setup-cli) + systemctl restart isponsorblocktv >/dev/null 2>&1 || true + ;; +esac + +exit $status +EOT +chmod +x /usr/local/bin/iSponsorBlockTV +ln -sf /usr/local/bin/iSponsorBlockTV /usr/bin/iSponsorBlockTV +msg_ok "Created CLI wrapper" + +msg_info "Setting default data dir for shells" +cat <<'EOT' >/etc/profile.d/isponsorblocktv.sh +export iSPBTV_data_dir="/var/lib/isponsorblocktv" +EOT +if ! grep -q '^iSPBTV_data_dir=' /etc/environment 2>/dev/null; then + echo 'iSPBTV_data_dir=/var/lib/isponsorblocktv' >>/etc/environment +fi +msg_ok "Set default data dir for shells" + +motd_ssh +customize +cleanup_lxc From d25f48f5be7036345a924a5aa6731ca5cf786f0e Mon Sep 17 00:00:00 2001 From: Matthew Stern Date: Sun, 25 Jan 2026 18:46:57 -0500 Subject: [PATCH 106/228] Align to code standards --- ct/isponsorblocktv.sh | 43 ++++++++++++++++++++--- frontend/public/json/isponsorblocktv.json | 6 ++-- install/isponsorblocktv-install.sh | 29 +++++++-------- 3 files changed, 53 insertions(+), 25 deletions(-) diff --git a/ct/isponsorblocktv.sh b/ct/isponsorblocktv.sh index c4b717339..b6bd4e18a 100644 --- a/ct/isponsorblocktv.sh +++ b/ct/isponsorblocktv.sh @@ -1,8 +1,8 @@ #!/usr/bin/env bash -source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) # Copyright (c) 2021-2026 community-scripts ORG -# Author: Matthew Stern -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Author: Matthew Stern (sternma) +# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE # Source: https://github.com/dmunozv04/iSponsorBlockTV APP="iSponsorBlockTV" @@ -11,7 +11,7 @@ var_cpu="${var_cpu:-1}" var_ram="${var_ram:-1024}" var_disk="${var_disk:-4}" var_os="${var_os:-debian}" -var_version="${var_version:-12}" +var_version="${var_version:-13}" var_unprivileged="${var_unprivileged:-1}" header_info "$APP" @@ -23,11 +23,44 @@ function update_script() { header_info check_container_storage check_container_resources + if [[ ! -d /opt/isponsorblocktv ]]; then msg_error "No ${APP} Installation Found!" exit fi - msg_error "Currently we don't provide an update function for ${APP}." + + if check_for_gh_release "isponsorblocktv" "dmunozv04/iSponsorBlockTV"; then + msg_info "Stopping Service" + systemctl stop isponsorblocktv + msg_ok "Stopped Service" + + if [[ -d /var/lib/isponsorblocktv ]]; then + msg_info "Backing up Data" + cp -r /var/lib/isponsorblocktv /var/lib/isponsorblocktv_data_backup + msg_ok "Backed up Data" + fi + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "isponsorblocktv" "dmunozv04/iSponsorBlockTV" + + msg_info "Setting up iSponsorBlockTV" + $STD python3 -m venv /opt/isponsorblocktv/venv + $STD /opt/isponsorblocktv/venv/bin/pip install --upgrade pip + $STD /opt/isponsorblocktv/venv/bin/pip install /opt/isponsorblocktv + msg_ok "Set up iSponsorBlockTV" + + if [[ -d /var/lib/isponsorblocktv_data_backup ]]; then + msg_info "Restoring Data" + rm -rf /var/lib/isponsorblocktv + cp -r /var/lib/isponsorblocktv_data_backup /var/lib/isponsorblocktv + rm -rf /var/lib/isponsorblocktv_data_backup + msg_ok "Restored Data" + fi + + msg_info "Starting Service" + systemctl start isponsorblocktv + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi exit } diff --git a/frontend/public/json/isponsorblocktv.json b/frontend/public/json/isponsorblocktv.json index 71f82fb99..2b25f17e9 100644 --- a/frontend/public/json/isponsorblocktv.json +++ b/frontend/public/json/isponsorblocktv.json @@ -6,12 +6,12 @@ ], "date_created": "2026-01-25", "type": "ct", - "updateable": false, + "updateable": true, "privileged": false, "interface_port": null, "documentation": "https://github.com/dmunozv04/iSponsorBlockTV/wiki", "website": "https://github.com/dmunozv04/iSponsorBlockTV", - "logo": null, + "logo": "https://raw.githubusercontent.com/ajayyy/SponsorBlock/master/public/icons/IconSponsorBlocker512px.png", "config_path": "/var/lib/isponsorblocktv/config.json", "description": "iSponsorBlockTV connects to YouTube TV clients and automatically skips SponsorBlock segments, mutes ads, and presses the Skip Ad button when available.", "install_methods": [ @@ -23,7 +23,7 @@ "ram": 1024, "hdd": 4, "os": "debian", - "version": "12" + "version": "13" } } ], diff --git a/install/isponsorblocktv-install.sh b/install/isponsorblocktv-install.sh index 17ebe5652..307e4b41e 100644 --- a/install/isponsorblocktv-install.sh +++ b/install/isponsorblocktv-install.sh @@ -1,8 +1,8 @@ #!/usr/bin/env bash # Copyright (c) 2021-2026 community-scripts ORG -# Author: Matthew Stern -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Author: Matthew Stern (sternma) +# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE # Source: https://github.com/dmunozv04/iSponsorBlockTV source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" @@ -15,7 +15,6 @@ update_os INSTALL_DIR="/opt/isponsorblocktv" DATA_DIR="/var/lib/isponsorblocktv" -SERVICE_USER="isponsorblocktv" msg_info "Installing Dependencies" $STD apt-get install -y \ @@ -24,23 +23,17 @@ $STD apt-get install -y \ python3-pip msg_ok "Installed Dependencies" -msg_info "Downloading iSponsorBlockTV" -fetch_and_deploy_gh_release "isponsorblocktv" "dmunozv04/iSponsorBlockTV" "tarball" "latest" "$INSTALL_DIR" -msg_ok "Downloaded iSponsorBlockTV" +fetch_and_deploy_gh_release "isponsorblocktv" "dmunozv04/iSponsorBlockTV" msg_info "Setting up iSponsorBlockTV" -python3 -m venv "$INSTALL_DIR/venv" +$STD python3 -m venv "$INSTALL_DIR/venv" $STD "$INSTALL_DIR/venv/bin/pip" install --upgrade pip $STD "$INSTALL_DIR/venv/bin/pip" install "$INSTALL_DIR" msg_ok "Set up iSponsorBlockTV" -msg_info "Creating service user and data directory" -if ! id "$SERVICE_USER" &>/dev/null; then - useradd --system --home "$DATA_DIR" --create-home "$SERVICE_USER" -fi -install -d -o "$SERVICE_USER" -g "$SERVICE_USER" "$DATA_DIR" -chown -R "$SERVICE_USER":"$SERVICE_USER" "$INSTALL_DIR" -msg_ok "Created service user and data directory" +msg_info "Creating data directory" +install -d "$DATA_DIR" +msg_ok "Created data directory" msg_info "Creating Service" cat </etc/systemd/system/isponsorblocktv.service @@ -51,8 +44,8 @@ Wants=network-online.target [Service] Type=simple -User=$SERVICE_USER -Group=$SERVICE_USER +User=root +Group=root WorkingDirectory=$INSTALL_DIR Environment=iSPBTV_data_dir=$DATA_DIR ExecStart=$INSTALL_DIR/venv/bin/iSponsorBlockTV @@ -93,7 +86,9 @@ cat <<'EOT' >/etc/profile.d/isponsorblocktv.sh export iSPBTV_data_dir="/var/lib/isponsorblocktv" EOT if ! grep -q '^iSPBTV_data_dir=' /etc/environment 2>/dev/null; then - echo 'iSPBTV_data_dir=/var/lib/isponsorblocktv' >>/etc/environment + cat <<'EOT' >>/etc/environment +iSPBTV_data_dir=/var/lib/isponsorblocktv +EOT fi msg_ok "Set default data dir for shells" From 741164de3e41fe5e7a806ff38943e398e0a3faef Mon Sep 17 00:00:00 2001 From: Matthew Stern Date: Sun, 25 Jan 2026 18:56:15 -0500 Subject: [PATCH 107/228] switch from apt-get to apt --- install/isponsorblocktv-install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install/isponsorblocktv-install.sh b/install/isponsorblocktv-install.sh index 307e4b41e..8cce9c019 100644 --- a/install/isponsorblocktv-install.sh +++ b/install/isponsorblocktv-install.sh @@ -17,7 +17,7 @@ INSTALL_DIR="/opt/isponsorblocktv" DATA_DIR="/var/lib/isponsorblocktv" msg_info "Installing Dependencies" -$STD apt-get install -y \ +$STD apt install -y \ python3 \ python3-venv \ python3-pip From 1658c13309a54c87b7fb5a976fcba5751ffdb13d Mon Sep 17 00:00:00 2001 From: Joerg Heinemann Date: Wed, 28 Jan 2026 11:09:09 +0100 Subject: [PATCH 108/228] Update logo and configuration instructions in ebusd.json --- frontend/public/json/ebusd.json | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/frontend/public/json/ebusd.json b/frontend/public/json/ebusd.json index b902ea7a2..e946af464 100644 --- a/frontend/public/json/ebusd.json +++ b/frontend/public/json/ebusd.json @@ -11,7 +11,7 @@ "interface_port": null, "documentation": "https://github.com/john30/ebusd/wiki", "website": "https://github.com/john30/ebusd", - "logo": "", + "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/proxmox-helper-scripts.webp", "config_path": "/etc/default/ebusd", "description": "ebusd is a daemon for handling communication with eBUS devices connected to a 2-wire `energy bus` used by numerous heating systems.", "install_methods": [ @@ -33,16 +33,8 @@ }, "notes": [ { - "text": "Instructions: \ - 1. Edit `/etc/default/ebusd` if necessary (especially if your device is not `/dev/ttyUSB0`) \ - 2. Start the daemon with `systemctl start ebusd` \ - 3. Check the log file `/var/log/ebusd.log` \ - 4. Make the daemon autostart with `systemctl enable ebusd`", + "text": "Show configuration instructions: `cat ~/ebusd-configuation-instructions.txt`", "type": "info" - }, - { - "text": "Working `/etc/default/ebusd` options for `ebus adapter shield v5`: EBUSD_OPTS='--pidfile=/run/ebusd.pid --latency=100 --scanconfig --configpath=https://ebus.github.io/ --accesslevel=* --pollinterval=30 --device=ens:x.x.x.x:9999 --mqtthost=x.x.x.x --mqttport=1883 --mqttuser=ha-mqtt --mqttpass=xxxxxxxx! --mqttint=/etc/ebusd/mqtt-hassio.cfg --mqttjson --mqttlog --mqttretain --mqtttopic=ebusd --log=all:notice --log=main:notice --log=bus:notice --log=update:notice --log=network:notice --log=other:notice'", - "type": "info" - }, + } ] } From f8e6daba1440eb431edafdb0acac01ad2b8553c0 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Wed, 28 Jan 2026 13:14:48 +0100 Subject: [PATCH 109/228] Remove Dawarich app and related files Deleted Dawarich install script, container script, and JSON metadata, effectively removing support for the Dawarich application from the repository. Added a new GitHub Actions workflow to automate updating GitHub version information for remaining apps. --- .github/workflows/update-github-versions.yml | 232 +++++++++++++++++++ ct/dawarich.sh | 95 -------- frontend/public/json/dawarich.json | 40 ---- install/dawarich-install.sh | 173 -------------- 4 files changed, 232 insertions(+), 308 deletions(-) create mode 100644 .github/workflows/update-github-versions.yml delete mode 100644 ct/dawarich.sh delete mode 100644 frontend/public/json/dawarich.json delete mode 100644 install/dawarich-install.sh diff --git a/.github/workflows/update-github-versions.yml b/.github/workflows/update-github-versions.yml new file mode 100644 index 000000000..a31299b84 --- /dev/null +++ b/.github/workflows/update-github-versions.yml @@ -0,0 +1,232 @@ +name: Update GitHub Versions + +on: + workflow_dispatch: + schedule: + # Runs 4x daily: 00:00, 06:00, 12:00, 18:00 UTC + - cron: "0 0,6,12,18 * * *" + +permissions: + contents: write + pull-requests: write + +env: + VERSIONS_FILE: frontend/public/json/github-versions.json + +jobs: + update-github-versions: + if: github.repository == 'community-scripts/ProxmoxVE' + runs-on: ubuntu-latest + + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + with: + ref: main + + - name: Generate GitHub App Token + id: generate-token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ vars.APP_ID }} + private-key: ${{ secrets.APP_PRIVATE_KEY }} + + - name: Extract GitHub versions from install scripts + env: + GH_TOKEN: ${{ steps.generate-token.outputs.token }} + run: | + set -euo pipefail + + echo "=========================================" + echo " Extracting GitHub versions from scripts" + echo "=========================================" + + # Initialize versions array + versions_json="[]" + + # Function to add a version entry + add_version() { + local slug="$1" + local repo="$2" + local version="$3" + local pinned="$4" + local date="$5" + + versions_json=$(echo "$versions_json" | jq \ + --arg slug "$slug" \ + --arg repo "$repo" \ + --arg version "$version" \ + --argjson pinned "$pinned" \ + --arg date "$date" \ + '. += [{"slug": $slug, "repo": $repo, "version": $version, "pinned": $pinned, "date": $date}]') + } + + # Get list of slugs from JSON files + echo "" + echo "=== Scanning JSON files for slugs ===" + + for json_file in frontend/public/json/*.json; do + [[ ! -f "$json_file" ]] && continue + + # Skip non-app JSON files + basename_file=$(basename "$json_file") + case "$basename_file" in + metadata.json|versions.json|github-versions.json|dependency-check.json|update-apps.json) + continue + ;; + esac + + # Extract slug from JSON + slug=$(jq -r '.slug // empty' "$json_file" 2>/dev/null) + [[ -z "$slug" ]] && continue + + # Find corresponding install script + install_script="install/${slug}-install.sh" + [[ ! -f "$install_script" ]] && continue + + echo -n "[$slug] " + + # Look for fetch_and_deploy_gh_release calls + # Pattern: fetch_and_deploy_gh_release "app" "owner/repo" ["mode"] ["version"] + while IFS= read -r line; do + # Skip commented lines + [[ "$line" =~ ^[[:space:]]*# ]] && continue + + # Extract repo and version from fetch_and_deploy_gh_release + if [[ "$line" =~ fetch_and_deploy_gh_release[[:space:]]+\"[^\"]*\"[[:space:]]+\"([^\"]+)\"([[:space:]]+\"([^\"]+)\")?([[:space:]]+\"([^\"]+)\")? ]]; then + repo="${BASH_REMATCH[1]}" + mode="${BASH_REMATCH[3]:-tarball}" + pinned_version="${BASH_REMATCH[5]:-latest}" + + # Check if version is pinned (not "latest" and not empty) + is_pinned=false + target_version="" + + if [[ -n "$pinned_version" && "$pinned_version" != "latest" ]]; then + is_pinned=true + target_version="$pinned_version" + echo -n "(pinned: $pinned_version) " + fi + + # Fetch version from GitHub + if [[ "$is_pinned" == "true" ]]; then + # For pinned versions, verify it exists and get date + response=$(gh api "repos/${repo}/releases/tags/${target_version}" 2>/dev/null || echo '{}') + if echo "$response" | jq -e '.tag_name' > /dev/null 2>&1; then + version=$(echo "$response" | jq -r '.tag_name') + date=$(echo "$response" | jq -r '.published_at // empty') + add_version "$slug" "$repo" "$version" "true" "$date" + echo "✓ $version (pinned)" + else + # Pinned version not found, skip + echo "⚠ pinned version $target_version not found" + fi + else + # Fetch latest release + response=$(gh api "repos/${repo}/releases/latest" 2>/dev/null || echo '{}') + if echo "$response" | jq -e '.tag_name' > /dev/null 2>&1; then + version=$(echo "$response" | jq -r '.tag_name') + date=$(echo "$response" | jq -r '.published_at // empty') + add_version "$slug" "$repo" "$version" "false" "$date" + echo "✓ $version" + else + # Try tags as fallback + version=$(gh api "repos/${repo}/tags" --jq '.[0].name // empty' 2>/dev/null || echo "") + if [[ -n "$version" ]]; then + add_version "$slug" "$repo" "$version" "false" "" + echo "✓ $version (from tags)" + else + echo "⚠ no version found" + fi + fi + fi + + break # Only first match per script + fi + done < <(grep 'fetch_and_deploy_gh_release' "$install_script" 2>/dev/null || true) + + done + + # Save versions file + echo "$versions_json" | jq --arg date "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \ + '{generated: $date, versions: (. | sort_by(.slug))}' > "$VERSIONS_FILE" + + total=$(echo "$versions_json" | jq 'length') + echo "" + echo "=========================================" + echo " Total versions extracted: $total" + echo "=========================================" + + - name: Check for changes + id: check-changes + run: | + if git diff --quiet "$VERSIONS_FILE" 2>/dev/null; then + echo "changed=false" >> "$GITHUB_OUTPUT" + echo "No changes detected" + else + echo "changed=true" >> "$GITHUB_OUTPUT" + echo "Changes detected:" + git diff --stat "$VERSIONS_FILE" 2>/dev/null || true + fi + + - name: Create Pull Request + if: steps.check-changes.outputs.changed == 'true' + env: + GH_TOKEN: ${{ steps.generate-token.outputs.token }} + run: | + BRANCH_NAME="automated/update-github-versions-$(date +%Y%m%d)" + + git config --global user.email "github-actions[bot]@users.noreply.github.com" + git config --global user.name "GitHub Actions[bot]" + + # Check if branch exists and delete it + git push origin --delete "$BRANCH_NAME" 2>/dev/null || true + + git checkout -b "$BRANCH_NAME" + git add "$VERSIONS_FILE" + git commit -m "chore: update github-versions.json + + Total versions: $(jq '.versions | length' "$VERSIONS_FILE") + Pinned versions: $(jq '[.versions[] | select(.pinned == true)] | length' "$VERSIONS_FILE") + Generated: $(jq -r '.generated' "$VERSIONS_FILE")" + + git push origin "$BRANCH_NAME" --force + + # Check if PR already exists + existing_pr=$(gh pr list --head "$BRANCH_NAME" --state open --json number --jq '.[0].number // empty') + + if [[ -n "$existing_pr" ]]; then + echo "PR #$existing_pr already exists, updating..." + else + gh pr create \ + --title "[Automated] Update GitHub versions" \ + --body "This PR updates version information from GitHub releases. + + ## How it works + 1. Scans all JSON files in \`frontend/public/json/\` for slugs + 2. Finds corresponding \`install/{slug}-install.sh\` scripts + 3. Extracts \`fetch_and_deploy_gh_release\` calls + 4. Fetches latest (or pinned) version from GitHub + + ## Stats + - Total versions: $(jq '.versions | length' "$VERSIONS_FILE") + - Pinned versions: $(jq '[.versions[] | select(.pinned == true)] | length' "$VERSIONS_FILE") + - Latest versions: $(jq '[.versions[] | select(.pinned == false)] | length' "$VERSIONS_FILE") + + --- + *Automatically generated from install scripts*" \ + --base main \ + --head "$BRANCH_NAME" \ + --label "automated pr" + fi + + - name: Auto-approve PR + if: steps.check-changes.outputs.changed == 'true' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + BRANCH_NAME="automated/update-github-versions-$(date +%Y%m%d)" + pr_number=$(gh pr list --head "$BRANCH_NAME" --state open --json number --jq '.[0].number') + if [[ -n "$pr_number" ]]; then + gh pr review "$pr_number" --approve + fi diff --git a/ct/dawarich.sh b/ct/dawarich.sh deleted file mode 100644 index e4ba70a28..000000000 --- a/ct/dawarich.sh +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env bash -source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) -# Copyright (c) 2021-2026 community-scripts ORG -# Author: MickLesk (CanbiZ) -# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE -# Source: https://github.com/Freika/dawarich - -APP="Dawarich" -var_tags="${var_tags:-location;tracking;gps}" -var_cpu="${var_cpu:-4}" -var_ram="${var_ram:-4096}" -var_disk="${var_disk:-15}" -var_os="${var_os:-debian}" -var_version="${var_version:-13}" -var_unprivileged="${var_unprivileged:-1}" - -header_info "$APP" -variables -color -catch_errors - -function update_script() { - header_info - check_container_storage - check_container_resources - - if [[ ! -d /opt/dawarich ]]; then - msg_error "No ${APP} Installation Found!" - exit - fi - - if check_for_gh_release "dawarich" "Freika/dawarich"; then - msg_info "Stopping Services" - systemctl stop dawarich-web dawarich-worker - msg_ok "Stopped Services" - - msg_info "Backing up Data" - cp -r /opt/dawarich/app/storage /opt/dawarich_storage_backup 2>/dev/null || true - cp /opt/dawarich/app/config/master.key /opt/dawarich_master.key 2>/dev/null || true - cp /opt/dawarich/app/config/credentials.yml.enc /opt/dawarich_credentials.yml.enc 2>/dev/null || true - msg_ok "Backed up Data" - - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "dawarich" "Freika/dawarich" "tarball" "latest" "/opt/dawarich/app" - - RUBY_VERSION=$(cat /opt/dawarich/app/.ruby-version 2>/dev/null || echo "3.4.6") - RUBY_VERSION=${RUBY_VERSION} RUBY_INSTALL_RAILS="false" setup_ruby - - msg_info "Running Migrations" - cd /opt/dawarich/app - source /root/.profile - export PATH="/root/.rbenv/shims:/root/.rbenv/bin:$PATH" - eval "$(/root/.rbenv/bin/rbenv init - bash)" - - set -a && source /opt/dawarich/.env && set +a - - $STD bundle config set --local deployment 'true' - $STD bundle config set --local without 'development test' - $STD bundle install - - if [[ -f /opt/dawarich/package.json ]]; then - cd /opt/dawarich - $STD npm install - cd /opt/dawarich/app - elif [[ -f /opt/dawarich/app/package.json ]]; then - $STD npm install - fi - - $STD bundle exec rake assets:precompile - $STD bundle exec rails db:migrate - $STD bundle exec rake data:migrate - msg_ok "Ran Migrations" - - msg_info "Restoring Data" - cp -r /opt/dawarich_storage_backup/. /opt/dawarich/app/storage/ 2>/dev/null || true - cp /opt/dawarich_master.key /opt/dawarich/app/config/master.key 2>/dev/null || true - cp /opt/dawarich_credentials.yml.enc /opt/dawarich/app/config/credentials.yml.enc 2>/dev/null || true - rm -rf /opt/dawarich_storage_backup /opt/dawarich_master.key /opt/dawarich_credentials.yml.enc - msg_ok "Restored Data" - - msg_info "Starting Services" - systemctl start dawarich-web dawarich-worker - msg_ok "Started Services" - msg_ok "Updated successfully!" - fi - exit -} - -start -build_container -description - -msg_ok "Completed Successfully!\n" -echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" -echo -e "${INFO}${YW} Access it using the following URL:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}" diff --git a/frontend/public/json/dawarich.json b/frontend/public/json/dawarich.json deleted file mode 100644 index 85659bdaa..000000000 --- a/frontend/public/json/dawarich.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Dawarich", - "slug": "dawarich", - "categories": [ - 9 - ], - "date_created": "2026-01-16", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://dawarich.app/docs", - "website": "https://dawarich.app/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/dawarich.webp", - "config_path": "/opt/dawarich/.env", - "description": "Dawarich is a self-hosted alternative to Google Timeline (Google Maps Location History). It allows you to import your location history from Google Maps Timeline and Owntracks, view it on a map, and analyze your location data with statistics and visualizations.", - "install_methods": [ - { - "type": "default", - "script": "ct/dawarich.sh", - "resources": { - "cpu": 4, - "ram": 4096, - "hdd": 15, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "demo@dawarich.app", - "password": "password" - }, - "notes": [ - { - "text": "Default credentials: demo@dawarich.app / password - Change after first login!", - "type": "warning" - } - ] -} diff --git a/install/dawarich-install.sh b/install/dawarich-install.sh deleted file mode 100644 index 0adc17a14..000000000 --- a/install/dawarich-install.sh +++ /dev/null @@ -1,173 +0,0 @@ -#!/usr/bin/env bash - -# Copyright (c) 2021-2026 community-scripts ORG -# Author: MickLesk (CanbiZ) -# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE -# Source: https://github.com/Freika/dawarich - -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -color -verb_ip6 -catch_errors -setting_up_container -network_check -update_os - -msg_info "Installing Dependencies" -$STD apt install -y \ - build-essential \ - git \ - libpq-dev \ - libgeos-dev \ - libyaml-dev \ - libffi-dev \ - libssl-dev \ - libjemalloc2 \ - imagemagick \ - libmagickwand-dev \ - libvips-dev \ - cmake \ - redis-server \ - nginx -msg_ok "Installed Dependencies" - -PG_VERSION="17" PG_MODULES="postgis-3" setup_postgresql -PG_DB_NAME="dawarich_db" PG_DB_USER="dawarich" PG_DB_EXTENSIONS="postgis" setup_postgresql_db - -fetch_and_deploy_gh_release "dawarich" "Freika/dawarich" "tarball" "latest" "/opt/dawarich/app" - -msg_info "Setting up Directories" -mkdir -p /opt/dawarich/app/{storage,log,tmp/pids,tmp/cache,tmp/sockets} -msg_ok "Set up Directories" - -msg_info "Configuring Environment" -SECRET_KEY_BASE=$(openssl rand -hex 64) -RELEASE=$(get_latest_github_release "Freika/dawarich") -cat </opt/dawarich/.env -RAILS_ENV=production -SECRET_KEY_BASE=${SECRET_KEY_BASE} -DATABASE_HOST=localhost -DATABASE_USERNAME=${PG_DB_USER} -DATABASE_PASSWORD=${PG_DB_PASS} -DATABASE_NAME=${PG_DB_NAME} -REDIS_URL=redis://127.0.0.1:6379/0 -BACKGROUND_PROCESSING_CONCURRENCY=10 -APPLICATION_HOST=${LOCAL_IP} -APPLICATION_HOSTS=${LOCAL_IP},localhost -TIME_ZONE=UTC -DISABLE_TELEMETRY=true -APP_VERSION=${RELEASE} -EOF -msg_ok "Configured Environment" - -NODE_VERSION="22" setup_nodejs -RUBY_VERSION=$(cat /opt/dawarich/app/.ruby-version 2>/dev/null || echo "3.4.6") -RUBY_VERSION=${RUBY_VERSION} RUBY_INSTALL_RAILS="false" setup_ruby - -msg_info "Installing Dawarich" -cd /opt/dawarich/app -source /root/.profile -export PATH="/root/.rbenv/shims:/root/.rbenv/bin:$PATH" -eval "$(/root/.rbenv/bin/rbenv init - bash)" -set -a && source /opt/dawarich/.env && set +a -$STD gem install bundler -$STD bundle config set --local deployment 'true' -$STD bundle config set --local without 'development test' -$STD bundle install -if [[ -f /opt/dawarich/package.json ]]; then - cd /opt/dawarich - $STD npm install - cd /opt/dawarich/app -elif [[ -f /opt/dawarich/app/package.json ]]; then - $STD npm install -fi -$STD bundle exec rake assets:precompile -$STD bundle exec rails db:prepare -$STD bundle exec rake data:migrate -msg_ok "Installed Dawarich" - -msg_info "Creating Services" -cat </etc/systemd/system/dawarich-web.service -[Unit] -Description=Dawarich Web Server -After=network.target postgresql.service redis-server.service -Requires=postgresql.service redis-server.service - -[Service] -Type=simple -WorkingDirectory=/opt/dawarich/app -EnvironmentFile=/opt/dawarich/.env -ExecStart=/root/.rbenv/shims/bundle exec puma -C config/puma.rb -Restart=always -RestartSec=5 - -[Install] -WantedBy=multi-user.target -EOF - -cat </etc/systemd/system/dawarich-worker.service -[Unit] -Description=Dawarich Sidekiq Worker -After=network.target postgresql.service redis-server.service -Requires=postgresql.service redis-server.service - -[Service] -Type=simple -WorkingDirectory=/opt/dawarich/app -EnvironmentFile=/opt/dawarich/.env -ExecStart=/root/.rbenv/shims/bundle exec sidekiq -C config/sidekiq.yml -Restart=always -RestartSec=5 - -[Install] -WantedBy=multi-user.target -EOF - -systemctl enable -q --now redis-server dawarich-web dawarich-worker -msg_ok "Created Services" - -msg_info "Configuring Nginx" -cat </etc/nginx/sites-available/dawarich.conf -upstream dawarich { - server 127.0.0.1:3000; -} - -server { - listen 80; - server_name _; - - root /opt/dawarich/app/public; - client_max_body_size 100M; - - location ~ ^/(assets|packs)/ { - expires max; - add_header Cache-Control "public, immutable"; - try_files \$uri =404; - } - - location / { - try_files \$uri @rails; - } - - location @rails { - proxy_pass http://dawarich; - proxy_http_version 1.1; - proxy_set_header Host \$host; - proxy_set_header X-Real-IP \$remote_addr; - proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto \$scheme; - proxy_set_header Upgrade \$http_upgrade; - proxy_set_header Connection "upgrade"; - proxy_redirect off; - proxy_buffering off; - } -} -EOF -ln -sf /etc/nginx/sites-available/dawarich.conf /etc/nginx/sites-enabled/ -rm -f /etc/nginx/sites-enabled/default -systemctl enable -q --now nginx -msg_ok "Configured Nginx" - -motd_ssh -customize -cleanup_lxc From 8224544bf0aabcd4537d5318cca7335a20138ec5 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Wed, 28 Jan 2026 13:16:31 +0100 Subject: [PATCH 110/228] Update update-github-versions.yml --- .github/workflows/update-github-versions.yml | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/.github/workflows/update-github-versions.yml b/.github/workflows/update-github-versions.yml index a31299b84..be3679f25 100644 --- a/.github/workflows/update-github-versions.yml +++ b/.github/workflows/update-github-versions.yml @@ -24,16 +24,9 @@ jobs: with: ref: main - - name: Generate GitHub App Token - id: generate-token - uses: actions/create-github-app-token@v1 - with: - app-id: ${{ vars.APP_ID }} - private-key: ${{ secrets.APP_PRIVATE_KEY }} - - name: Extract GitHub versions from install scripts env: - GH_TOKEN: ${{ steps.generate-token.outputs.token }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | set -euo pipefail @@ -172,7 +165,7 @@ jobs: - name: Create Pull Request if: steps.check-changes.outputs.changed == 'true' env: - GH_TOKEN: ${{ steps.generate-token.outputs.token }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | BRANCH_NAME="automated/update-github-versions-$(date +%Y%m%d)" From e1be251201474ad35cef44fbf15a5dce47327d88 Mon Sep 17 00:00:00 2001 From: Joerg Heinemann Date: Wed, 28 Jan 2026 13:16:35 +0100 Subject: [PATCH 111/228] Add ebusd configuration instructions to a text file Added configuration instructions for ebusd in a new text file. --- install/ebusd-install.sh | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/install/ebusd-install.sh b/install/ebusd-install.sh index 0ede2e9b8..560bf362a 100644 --- a/install/ebusd-install.sh +++ b/install/ebusd-install.sh @@ -25,6 +25,41 @@ $STD apt install -y ebusd $STD systemctl enable ebusd msg_ok "Installed ebusd" +cat <~/ebusd-configuation-instructions.txt +Configuration instructions: + + 1. Edit "/etc/default/ebusd" if necessary (especially if your device is not "/dev/ttyUSB0") + 2. Start the daemon with "systemctl start ebusd" + 3. Check the log file "/var/log/ebusd.log" + 4. Make the daemon autostart with "systemctl enable ebusd" + +Working "/etc/default/ebusd" options for "ebus adapter shield v5": + +EBUSD_OPTS=" + --pidfile=/run/ebusd.pid + --latency=100 + --scanconfig + --configpath=https://cfg.ebusd.eu/ + --accesslevel=* + --pollinterval=30 + --device=ens:XXX.XXX.XXX.XXX:9999 + --mqtthost=XXX.XXX.XXX.XXX + --mqttport=1883 + --mqttuser=XXXXXX + --mqttpass=XXXXXX + --mqttint=/etc/ebusd/mqtt-hassio.cfg + --mqttjson + --mqttlog + --mqttretain + --mqtttopic=ebusd + --log=all:notice + --log=main:notice + --log=bus:notice + --log=update:notice + --log=network:notice + --log=other:notice" +EOF + motd_ssh customize cleanup_lxc From 416f915a62be2224cdcc591de2f39cc484a27d66 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Wed, 28 Jan 2026 13:17:33 +0100 Subject: [PATCH 112/228] Update update-versions-github.yml --- .github/workflows/update-versions-github.yml | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/.github/workflows/update-versions-github.yml b/.github/workflows/update-versions-github.yml index 5c18ef2e5..9e94b9686 100644 --- a/.github/workflows/update-versions-github.yml +++ b/.github/workflows/update-versions-github.yml @@ -25,13 +25,6 @@ jobs: with: ref: main - - name: Generate GitHub App Token - id: generate-token - uses: actions/create-github-app-token@v1 - with: - app-id: ${{ vars.APP_ID }} - private-key: ${{ secrets.APP_PRIVATE_KEY }} - - name: Extract version sources from install scripts run: | set -euo pipefail @@ -330,7 +323,7 @@ jobs: - name: Fetch versions for all sources env: - GH_TOKEN: ${{ steps.generate-token.outputs.token }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | set -euo pipefail @@ -453,7 +446,7 @@ jobs: - name: Create Pull Request if: steps.check-changes.outputs.changed == 'true' env: - GH_TOKEN: ${{ steps.generate-token.outputs.token }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | BRANCH_NAME="automated/update-versions-$(date +%Y%m%d)" From 498f3f93081175588ecc48ab27b18638d529f277 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Wed, 28 Jan 2026 13:18:29 +0100 Subject: [PATCH 113/228] chore: use GITHUB_TOKEN instead of App Token in version workflows --- .github/workflows/update-github-versions.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/update-github-versions.yml b/.github/workflows/update-github-versions.yml index be3679f25..924a4c3c1 100644 --- a/.github/workflows/update-github-versions.yml +++ b/.github/workflows/update-github-versions.yml @@ -57,10 +57,10 @@ jobs: # Get list of slugs from JSON files echo "" echo "=== Scanning JSON files for slugs ===" - + for json_file in frontend/public/json/*.json; do [[ ! -f "$json_file" ]] && continue - + # Skip non-app JSON files basename_file=$(basename "$json_file") case "$basename_file" in From a7a4c3457c77bacef7524a56e71e93322d9c4a5e Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Wed, 28 Jan 2026 13:19:39 +0100 Subject: [PATCH 114/228] Update update-github-versions.yml --- .github/workflows/update-github-versions.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/update-github-versions.yml b/.github/workflows/update-github-versions.yml index 924a4c3c1..f907bc679 100644 --- a/.github/workflows/update-github-versions.yml +++ b/.github/workflows/update-github-versions.yml @@ -1,4 +1,4 @@ -name: Update GitHub Versions +name: Update GitHub Versions (New) on: workflow_dispatch: From 6269bc253a86a06f09d29960627ef7389a97b10e Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Wed, 28 Jan 2026 13:20:52 +0100 Subject: [PATCH 115/228] fix: correct repository name in workflow condition --- .github/workflows/update-github-versions.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/update-github-versions.yml b/.github/workflows/update-github-versions.yml index f907bc679..f12fb4157 100644 --- a/.github/workflows/update-github-versions.yml +++ b/.github/workflows/update-github-versions.yml @@ -15,7 +15,7 @@ env: jobs: update-github-versions: - if: github.repository == 'community-scripts/ProxmoxVE' + if: github.repository == 'community-scripts/ProxmoxVED' runs-on: ubuntu-latest steps: From 12fa5e7d121d77d195e372173ab5464197067458 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Wed, 28 Jan 2026 13:22:47 +0100 Subject: [PATCH 116/228] fix: detect new untracked files in change check --- .github/workflows/update-github-versions.yml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/.github/workflows/update-github-versions.yml b/.github/workflows/update-github-versions.yml index f12fb4157..c7011ab45 100644 --- a/.github/workflows/update-github-versions.yml +++ b/.github/workflows/update-github-versions.yml @@ -153,7 +153,15 @@ jobs: - name: Check for changes id: check-changes run: | - if git diff --quiet "$VERSIONS_FILE" 2>/dev/null; then + # Check if file is new (untracked) or has changes + if [[ ! -f "$VERSIONS_FILE" ]]; then + echo "changed=false" >> "$GITHUB_OUTPUT" + echo "Versions file was not created" + elif ! git ls-files --error-unmatch "$VERSIONS_FILE" &>/dev/null; then + # File exists but is not tracked - it's new + echo "changed=true" >> "$GITHUB_OUTPUT" + echo "New file created: $VERSIONS_FILE" + elif git diff --quiet "$VERSIONS_FILE" 2>/dev/null; then echo "changed=false" >> "$GITHUB_OUTPUT" echo "No changes detected" else From 332eab5fc2a22e7afaf6fcb83432319216df8326 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Wed, 28 Jan 2026 13:28:54 +0100 Subject: [PATCH 117/228] fix: improve output formatting and remove self-approve step --- .github/workflows/update-github-versions.yml | 25 ++++---------------- 1 file changed, 5 insertions(+), 20 deletions(-) diff --git a/.github/workflows/update-github-versions.yml b/.github/workflows/update-github-versions.yml index c7011ab45..9f3680a8c 100644 --- a/.github/workflows/update-github-versions.yml +++ b/.github/workflows/update-github-versions.yml @@ -77,8 +77,6 @@ jobs: install_script="install/${slug}-install.sh" [[ ! -f "$install_script" ]] && continue - echo -n "[$slug] " - # Look for fetch_and_deploy_gh_release calls # Pattern: fetch_and_deploy_gh_release "app" "owner/repo" ["mode"] ["version"] while IFS= read -r line; do @@ -98,7 +96,6 @@ jobs: if [[ -n "$pinned_version" && "$pinned_version" != "latest" ]]; then is_pinned=true target_version="$pinned_version" - echo -n "(pinned: $pinned_version) " fi # Fetch version from GitHub @@ -109,10 +106,9 @@ jobs: version=$(echo "$response" | jq -r '.tag_name') date=$(echo "$response" | jq -r '.published_at // empty') add_version "$slug" "$repo" "$version" "true" "$date" - echo "✓ $version (pinned)" + echo "[$slug] ✓ $version (pinned)" else - # Pinned version not found, skip - echo "⚠ pinned version $target_version not found" + echo "[$slug] ⚠ pinned version $target_version not found" fi else # Fetch latest release @@ -121,15 +117,15 @@ jobs: version=$(echo "$response" | jq -r '.tag_name') date=$(echo "$response" | jq -r '.published_at // empty') add_version "$slug" "$repo" "$version" "false" "$date" - echo "✓ $version" + echo "[$slug] ✓ $version" else # Try tags as fallback version=$(gh api "repos/${repo}/tags" --jq '.[0].name // empty' 2>/dev/null || echo "") if [[ -n "$version" ]]; then add_version "$slug" "$repo" "$version" "false" "" - echo "✓ $version (from tags)" + echo "[$slug] ✓ $version (from tags)" else - echo "⚠ no version found" + echo "[$slug] ⚠ no version found" fi fi fi @@ -220,14 +216,3 @@ jobs: --head "$BRANCH_NAME" \ --label "automated pr" fi - - - name: Auto-approve PR - if: steps.check-changes.outputs.changed == 'true' - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - BRANCH_NAME="automated/update-github-versions-$(date +%Y%m%d)" - pr_number=$(gh pr list --head "$BRANCH_NAME" --state open --json number --jq '.[0].number') - if [[ -n "$pr_number" ]]; then - gh pr review "$pr_number" --approve - fi From b858cafad7e3047c1fd729ebe5c1b711dca7d0ec Mon Sep 17 00:00:00 2001 From: Joerg Heinemann Date: Wed, 28 Jan 2026 13:42:39 +0100 Subject: [PATCH 118/228] Update install/ebusd-install.sh Co-authored-by: Tobias <96661824+CrazyWolf13@users.noreply.github.com> --- install/ebusd-install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install/ebusd-install.sh b/install/ebusd-install.sh index 560bf362a..187cc2265 100644 --- a/install/ebusd-install.sh +++ b/install/ebusd-install.sh @@ -22,7 +22,7 @@ setup_deb822_repo \ msg_info "Installing ebusd" $STD apt install -y ebusd -$STD systemctl enable ebusd +systemctl enable -q --now ebusd msg_ok "Installed ebusd" cat <~/ebusd-configuation-instructions.txt From 4d349d371b8d2cd6f9d131a0bc70e7d7fb3a5200 Mon Sep 17 00:00:00 2001 From: Tobias <96661824+CrazyWolf13@users.noreply.github.com> Date: Wed, 28 Jan 2026 14:26:56 +0100 Subject: [PATCH 119/228] Create vikunja-install.sh --- install/vikunja-install.sh | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 install/vikunja-install.sh diff --git a/install/vikunja-install.sh b/install/vikunja-install.sh new file mode 100644 index 000000000..e17608e96 --- /dev/null +++ b/install/vikunja-install.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (Canbiz) | Co-Author: CrazyWolf13 +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://vikunja.io/ + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +# $STD apt install -y make +msg_ok "Installed Dependencies" + +fetch_and_deploy_gh_release "vikunja" "go-vikunja/vikunja" "binary" "latest" + +msg_info "Setup Vikunja" +sed -i 's|^ timezone: .*| timezone: UTC|' /etc/vikunja/config.yml +sed -i 's|"./vikunja.db"|"/etc/vikunja/vikunja.db"|' /etc/vikunja/config.yml +sed -i 's|./files|/etc/vikunja/files|' /etc/vikunja/config.yml +systemctl enable -q --now vikunja +msg_ok "Setting up Vikunja" + +motd_ssh +customize +cleanup_lxc From ae0f0680c4f0cb27d3f3d97a6b13f6f59dbd3031 Mon Sep 17 00:00:00 2001 From: Tobias <96661824+CrazyWolf13@users.noreply.github.com> Date: Wed, 28 Jan 2026 14:33:29 +0100 Subject: [PATCH 120/228] Create vikunja.sh --- ct/vikunja.sh | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 ct/vikunja.sh diff --git a/ct/vikunja.sh b/ct/vikunja.sh new file mode 100644 index 000000000..ab5287bbf --- /dev/null +++ b/ct/vikunja.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (Canbiz) | Co-Author: CrazyWolf13 +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://vikunja.io/ + +APP="Vikunja" +var_tags="${var_tags:-todo-app}" +var_cpu="${var_cpu:-1}" +var_ram="${var_ram:-1024}" +var_disk="${var_disk:-4}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + if [[ ! -d /opt/vikunja ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "vikunja" "go-vikunja/vikunja" "latest"; then + msg_info "Stopping Service" + systemctl stop vikunja + msg_ok "Stopped Service" + + fetch_and_deploy_gh_release "vikunja" "go-vikunja/vikunja" "binary" "latest" + + msg_info "Starting Service" + systemctl start vikunja + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit 0 +} + +start +build_container +description + +msg_ok "Completed successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3456${CL}" From 454bd021af79503cce10875fa13e5e18b49a90e9 Mon Sep 17 00:00:00 2001 From: Tobias <96661824+CrazyWolf13@users.noreply.github.com> Date: Wed, 28 Jan 2026 14:51:47 +0100 Subject: [PATCH 121/228] Update vikunja-install.sh --- install/vikunja-install.sh | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/install/vikunja-install.sh b/install/vikunja-install.sh index e17608e96..a227e028b 100644 --- a/install/vikunja-install.sh +++ b/install/vikunja-install.sh @@ -13,13 +13,10 @@ setting_up_container network_check update_os -msg_info "Installing Dependencies" -# $STD apt install -y make -msg_ok "Installed Dependencies" - fetch_and_deploy_gh_release "vikunja" "go-vikunja/vikunja" "binary" "latest" msg_info "Setup Vikunja" +sed -i -E 's/^# cors:/cors:/; s/^[[:space:]]*# enable:[[:space:]]*true/ enable: false/' /etc/vikunja/config.yml sed -i 's|^ timezone: .*| timezone: UTC|' /etc/vikunja/config.yml sed -i 's|"./vikunja.db"|"/etc/vikunja/vikunja.db"|' /etc/vikunja/config.yml sed -i 's|./files|/etc/vikunja/files|' /etc/vikunja/config.yml From dc379e05ed5f1a133cdf04b3756252826ef035c8 Mon Sep 17 00:00:00 2001 From: Tobias <96661824+CrazyWolf13@users.noreply.github.com> Date: Wed, 28 Jan 2026 14:52:08 +0100 Subject: [PATCH 122/228] Update vikunja-install.sh --- install/vikunja-install.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/install/vikunja-install.sh b/install/vikunja-install.sh index a227e028b..5f25288e6 100644 --- a/install/vikunja-install.sh +++ b/install/vikunja-install.sh @@ -15,13 +15,13 @@ update_os fetch_and_deploy_gh_release "vikunja" "go-vikunja/vikunja" "binary" "latest" -msg_info "Setup Vikunja" +msg_info "Setting up Vikunja" sed -i -E 's/^# cors:/cors:/; s/^[[:space:]]*# enable:[[:space:]]*true/ enable: false/' /etc/vikunja/config.yml sed -i 's|^ timezone: .*| timezone: UTC|' /etc/vikunja/config.yml sed -i 's|"./vikunja.db"|"/etc/vikunja/vikunja.db"|' /etc/vikunja/config.yml sed -i 's|./files|/etc/vikunja/files|' /etc/vikunja/config.yml systemctl enable -q --now vikunja -msg_ok "Setting up Vikunja" +msg_ok "Set up Vikunja" motd_ssh customize From 293a5a8340bc4c6e8f04d3a6dfb326b1c331ebd9 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Wed, 28 Jan 2026 14:59:32 +0100 Subject: [PATCH 123/228] test --- ct/authelia.sh | 48 ++++++++++++++++ install/authelia-install.sh | 107 ++++++++++++++++++++++++++++++++++++ misc/build.func | 18 ++++++ 3 files changed, 173 insertions(+) create mode 100644 ct/authelia.sh create mode 100644 install/authelia-install.sh diff --git a/ct/authelia.sh b/ct/authelia.sh new file mode 100644 index 000000000..0831c4db1 --- /dev/null +++ b/ct/authelia.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: thost96 (thost96) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://www.authelia.com/ + +APP="Authelia" +var_tags="${var_tags:-authenticator}" +var_cpu="${var_cpu:-1}" +var_ram="${var_ram:-512}" +var_disk="${var_disk:-2}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +base_settings + +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + if [[ ! -d /etc/authelia/ ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "authelia" "authelia/authelia"; then + $STD apt update + $STD apt -y upgrade + fetch_and_deploy_gh_release "authelia" "authelia/authelia" "binary" + msg_ok "Updated successfully!" + fi + exit +} +start +build_container +description + +msg_ok "Completed successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:9091 or https://auth.YOURDOMAIN ${CL}" diff --git a/install/authelia-install.sh b/install/authelia-install.sh new file mode 100644 index 000000000..010be6cab --- /dev/null +++ b/install/authelia-install.sh @@ -0,0 +1,107 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: thost96 (thost96) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://www.authelia.com/ + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +fetch_and_deploy_gh_release "authelia" "authelia/authelia" "binary" + +MAX_ATTEMPTS=3 +attempt=0 +while true; do + attempt=$((attempt + 1)) + read -rp "${TAB3}Enter your domain or IP (ex. example.com or 192.168.1.100): " DOMAIN + if [[ -z "$DOMAIN" ]]; then + if ((attempt >= MAX_ATTEMPTS)); then + DOMAIN="${LOCAL_IP:-localhost}" + msg_warn "Using fallback: $DOMAIN" + break + fi + msg_warn "Domain cannot be empty! (Attempt $attempt/$MAX_ATTEMPTS)" + elif [[ "$DOMAIN" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then + valid_ip=true + IFS='.' read -ra octets <<< "$DOMAIN" + for octet in "${octets[@]}"; do + if ((octet > 255)); then + valid_ip=false + break + fi + done + if $valid_ip; then + break + else + msg_warn "Invalid IP address!" + fi + elif [[ "$DOMAIN" =~ ^[a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(\.[a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*\.[a-zA-Z]{2,}$ ]]; then + break + else + msg_warn "Invalid domain format!" + fi +done +msg_info "Setting Authelia up" +touch /etc/authelia/emails.txt +JWT_SECRET=$(openssl rand -hex 64) +SESSION_SECRET=$(openssl rand -hex 64) +STORAGE_KEY=$(openssl rand -hex 64) + +if [[ "$DOMAIN" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then + AUTHELIA_URL="https://${DOMAIN}:9091" +else + AUTHELIA_URL="https://auth.${DOMAIN}" +fi +echo "$AUTHELIA_URL" > /etc/authelia/.authelia_url + +cat </etc/authelia/users.yml +users: + authelia: + disabled: false + displayname: "Authelia Admin" + password: "\$argon2id\$v=19\$m=65536,t=3,p=4\$ZBopMzXrzhHXPEZxRDVT2w\$SxWm96DwhOsZyn34DLocwQEIb4kCDsk632PuiMdZnig" + groups: [] +EOF +cat </etc/authelia/configuration.yml +authentication_backend: + file: + path: /etc/authelia/users.yml +access_control: + default_policy: one_factor +session: + secret: "${SESSION_SECRET}" + name: 'authelia_session' + same_site: 'lax' + inactivity: '5m' + expiration: '1h' + remember_me: '1M' + cookies: + - domain: "${DOMAIN}" + authelia_url: "${AUTHELIA_URL}" +storage: + encryption_key: "${STORAGE_KEY}" + local: + path: /etc/authelia/db.sqlite +identity_validation: + reset_password: + jwt_secret: "${JWT_SECRET}" + jwt_lifespan: '5 minutes' + jwt_algorithm: 'HS256' +notifier: + filesystem: + filename: /etc/authelia/emails.txt +EOF +touch /etc/authelia/emails.txt +chown -R authelia:authelia /etc/authelia +systemctl enable -q --now authelia +msg_ok "Authelia Setup completed" + +motd_ssh +customize +cleanup_lxc diff --git a/misc/build.func b/misc/build.func index ffdd4223a..051c5f4b8 100644 --- a/misc/build.func +++ b/misc/build.func @@ -3844,6 +3844,24 @@ EOF fix_gpu_gids + # Fix Debian 13 LXC template bug where / is owned by nobody:nogroup + # This causes systemd-tmpfiles to fail with "unsafe path transition" errors + # We need to fix this from the host before any package installation + if [[ "$var_os" == "debian" && "$var_version" == "13" ]]; then + # Stop container, fix ownership, restart + pct stop "$CTID" >/dev/null 2>&1 || true + sleep 1 + # Get the actual rootfs path from pct mount + local rootfs_path + rootfs_path=$(pct mount "$CTID" 2>/dev/null | grep -oP 'mounted at \K.*' || echo "") + if [[ -n "$rootfs_path" && -d "$rootfs_path" ]]; then + chown root:root "$rootfs_path" 2>/dev/null || true + fi + pct unmount "$CTID" >/dev/null 2>&1 || true + pct start "$CTID" >/dev/null 2>&1 + sleep 3 + fi + # Continue with standard container setup msg_info "Customizing LXC Container" From ae8d5fa9e44ed89f2053d1f48664e5fd22ddc5cb Mon Sep 17 00:00:00 2001 From: tremor021 Date: Wed, 28 Jan 2026 15:02:33 +0100 Subject: [PATCH 124/228] LanguageTool: prevent input of multiple language codes --- install/languagetool-install.sh | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/install/languagetool-install.sh b/install/languagetool-install.sh index 2797151fc..c3285a716 100644 --- a/install/languagetool-install.sh +++ b/install/languagetool-install.sh @@ -27,8 +27,32 @@ mv /opt/LanguageTool-*/ /opt/LanguageTool/ download_file "https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.bin" /opt/lid.176.bin msg_ok "" -read -r -p "${TAB3}Enter language code (en, de, es, fr, nl) to download ngrams or press ENTER to skip: " lang_code ngram_dir="" +lang_code="" +max_attempts=3 +attempt=0 + +while [[ $attempt -lt $max_attempts ]]; do + read -r -p "${TAB3}Enter language code (en, de, es, fr, nl) to download ngrams or press ENTER to skip: " lang_code + + if [[ -z "$lang_code" ]]; then + break + fi + + if [[ "$lang_code" =~ [[:space:]] ]]; then + ((attempt++)) + remaining=$((max_attempts - attempt)) + if [[ $remaining -gt 0 ]]; then + msg_error "Please enter only ONE language code. You have $remaining attempt(s) remaining." + else + msg_error "Maximum attempts reached. Continuing without ngrams." + lang_code="" + fi + continue + fi + break +done + if [[ -n "$lang_code" ]]; then if [[ "$lang_code" =~ ^(en|de|es|fr|nl)$ ]]; then msg_info "Searching for $lang_code ngrams..." From b72bb88f5abd68abd9d009dc6b827add4c9ecadd Mon Sep 17 00:00:00 2001 From: tremor021 Date: Wed, 28 Jan 2026 15:11:50 +0100 Subject: [PATCH 125/228] LanguageTool: fix msg block --- install/languagetool-install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install/languagetool-install.sh b/install/languagetool-install.sh index c3285a716..d4b486b6e 100644 --- a/install/languagetool-install.sh +++ b/install/languagetool-install.sh @@ -25,7 +25,7 @@ download_file "https://languagetool.org/download/LanguageTool-stable.zip" /tmp/L unzip -q /tmp/LanguageTool-stable.zip -d /opt mv /opt/LanguageTool-*/ /opt/LanguageTool/ download_file "https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.bin" /opt/lid.176.bin -msg_ok "" +msg_ok "Setup LanguageTool" ngram_dir="" lang_code="" From 834e0244a521158082e75753d850f881598d9045 Mon Sep 17 00:00:00 2001 From: Tobias <96661824+CrazyWolf13@users.noreply.github.com> Date: Wed, 28 Jan 2026 15:17:19 +0100 Subject: [PATCH 126/228] Update vikunja.sh --- ct/vikunja.sh | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/ct/vikunja.sh b/ct/vikunja.sh index ab5287bbf..7eb1f5cd1 100644 --- a/ct/vikunja.sh +++ b/ct/vikunja.sh @@ -28,6 +28,23 @@ function update_script() { exit fi + RELEASE="$(cat /opt/Vikunja_version 2>/dev/null || true)" + if [[ "$RELEASE" == "unstable" ]] || dpkg --compare-versions "$RELEASE" lt "1.0.0"; then + msg_warn "You are upgrading from Vikunja '$RELEASE'." + msg_warn "This requires MANUAL config changes in /etc/vikunja/config.yml." + msg_warn "See: https://vikunja.io/changelog/whats-new-in-vikunja-1.0.0/#config-changes" + + read -rp "Continue with update? (y/yes to proceed): " -t 30 CONFIRM1 || exit 1 + [[ "$CONFIRM1" =~ ^([yY]|[yY][eE][sS])$ ]] || exit 0 + + echo + msg_warn "Vikunja may not start after the update until you manually adjust the config." + msg_warn "Details: https://vikunja.io/changelog/whats-new-in-vikunja-1.0.0/#config-changes" + + read -rp "Acknowledge and continue? (y/yes): " -t 30 CONFIRM2 || exit 1 + [[ "$CONFIRM2" =~ ^([yY]|[yY][eE][sS])$ ]] || exit 0 + fi + if check_for_gh_release "vikunja" "go-vikunja/vikunja" "latest"; then msg_info "Stopping Service" systemctl stop vikunja From 3e1a942d27df97f14805c34a177ebc90e231e1a5 Mon Sep 17 00:00:00 2001 From: Tobias <96661824+CrazyWolf13@users.noreply.github.com> Date: Wed, 28 Jan 2026 15:40:51 +0100 Subject: [PATCH 127/228] Update vikunja.sh --- ct/vikunja.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ct/vikunja.sh b/ct/vikunja.sh index 7eb1f5cd1..fdd97dce9 100644 --- a/ct/vikunja.sh +++ b/ct/vikunja.sh @@ -28,21 +28,21 @@ function update_script() { exit fi - RELEASE="$(cat /opt/Vikunja_version 2>/dev/null || true)" - if [[ "$RELEASE" == "unstable" ]] || dpkg --compare-versions "$RELEASE" lt "1.0.0"; then + RELEASE="$( [[ -f "$HOME/.vikunja" ]] && cat "~/.vikunja" 2>/dev/null || [[ -f /opt/Vikunja_version ]] && cat /opt/Vikunja_version 2>/dev/null)" + if [[ "$RELEASE" == "unstable" ]] || { [[ -n "$RELEASE" ]] && dpkg --compare-versions "$RELEASE" lt "1.0.0"; }; then msg_warn "You are upgrading from Vikunja '$RELEASE'." msg_warn "This requires MANUAL config changes in /etc/vikunja/config.yml." msg_warn "See: https://vikunja.io/changelog/whats-new-in-vikunja-1.0.0/#config-changes" - read -rp "Continue with update? (y/yes to proceed): " -t 30 CONFIRM1 || exit 1 - [[ "$CONFIRM1" =~ ^([yY]|[yY][eE][sS])$ ]] || exit 0 + read -rp "Continue with update? (y to proceed): " -t 30 CONFIRM1 || exit 1 + [[ "$CONFIRM1" =~ ^[yY]$ ]] || exit 0 echo msg_warn "Vikunja may not start after the update until you manually adjust the config." msg_warn "Details: https://vikunja.io/changelog/whats-new-in-vikunja-1.0.0/#config-changes" - read -rp "Acknowledge and continue? (y/yes): " -t 30 CONFIRM2 || exit 1 - [[ "$CONFIRM2" =~ ^([yY]|[yY][eE][sS])$ ]] || exit 0 + read -rp "Acknowledge and continue? (y): " -t 30 CONFIRM2 || exit 1 + [[ "$CONFIRM2" =~ ^[yY]$ ]] || exit 0 fi if check_for_gh_release "vikunja" "go-vikunja/vikunja" "latest"; then From cc6e705614a3b64fa7d31ccb25e49fe5c8dc5ba5 Mon Sep 17 00:00:00 2001 From: Tobias <96661824+CrazyWolf13@users.noreply.github.com> Date: Wed, 28 Jan 2026 15:41:51 +0100 Subject: [PATCH 128/228] Update vikunja.sh --- ct/vikunja.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ct/vikunja.sh b/ct/vikunja.sh index fdd97dce9..4639ec678 100644 --- a/ct/vikunja.sh +++ b/ct/vikunja.sh @@ -28,7 +28,7 @@ function update_script() { exit fi - RELEASE="$( [[ -f "$HOME/.vikunja" ]] && cat "~/.vikunja" 2>/dev/null || [[ -f /opt/Vikunja_version ]] && cat /opt/Vikunja_version 2>/dev/null)" + RELEASE="$( [[ -f "$HOME/.vikunja" ]] && cat "$HOME/.vikunja" 2>/dev/null || [[ -f /opt/Vikunja_version ]] && cat /opt/Vikunja_version 2>/dev/null)" if [[ "$RELEASE" == "unstable" ]] || { [[ -n "$RELEASE" ]] && dpkg --compare-versions "$RELEASE" lt "1.0.0"; }; then msg_warn "You are upgrading from Vikunja '$RELEASE'." msg_warn "This requires MANUAL config changes in /etc/vikunja/config.yml." From b49ef1899f68cc88a1471a39f1268f6b45610762 Mon Sep 17 00:00:00 2001 From: Tobias <96661824+CrazyWolf13@users.noreply.github.com> Date: Wed, 28 Jan 2026 15:49:02 +0100 Subject: [PATCH 129/228] Update vikunja.sh --- ct/vikunja.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ct/vikunja.sh b/ct/vikunja.sh index 4639ec678..06b8c8613 100644 --- a/ct/vikunja.sh +++ b/ct/vikunja.sh @@ -28,7 +28,7 @@ function update_script() { exit fi - RELEASE="$( [[ -f "$HOME/.vikunja" ]] && cat "$HOME/.vikunja" 2>/dev/null || [[ -f /opt/Vikunja_version ]] && cat /opt/Vikunja_version 2>/dev/null)" + RELEASE="$( [[ -f "$HOME/.vikunja" ]] && cat "$HOME/.vikunja" 2>/dev/null || [[ -f /opt/Vikunja_version ]] && cat /opt/Vikunja_version 2>/dev/null || true)" if [[ "$RELEASE" == "unstable" ]] || { [[ -n "$RELEASE" ]] && dpkg --compare-versions "$RELEASE" lt "1.0.0"; }; then msg_warn "You are upgrading from Vikunja '$RELEASE'." msg_warn "This requires MANUAL config changes in /etc/vikunja/config.yml." From 2f5ed17953bfc5d350eef02b31e860d3423ce488 Mon Sep 17 00:00:00 2001 From: Tobias <96661824+CrazyWolf13@users.noreply.github.com> Date: Wed, 28 Jan 2026 16:08:12 +0100 Subject: [PATCH 130/228] fix --- ct/vikunja.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ct/vikunja.sh b/ct/vikunja.sh index 06b8c8613..4ddbd9a47 100644 --- a/ct/vikunja.sh +++ b/ct/vikunja.sh @@ -50,7 +50,7 @@ function update_script() { systemctl stop vikunja msg_ok "Stopped Service" - fetch_and_deploy_gh_release "vikunja" "go-vikunja/vikunja" "binary" "latest" + fetch_and_deploy_gh_release "vikunja" "go-vikunja/vikunja" "binary" msg_info "Starting Service" systemctl start vikunja From e8e98f3c678e390751283e4602d0a1bd11983ae2 Mon Sep 17 00:00:00 2001 From: Tobias <96661824+CrazyWolf13@users.noreply.github.com> Date: Wed, 28 Jan 2026 16:08:27 +0100 Subject: [PATCH 131/228] Update vikunja-install.sh --- install/vikunja-install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install/vikunja-install.sh b/install/vikunja-install.sh index 5f25288e6..44fc31999 100644 --- a/install/vikunja-install.sh +++ b/install/vikunja-install.sh @@ -13,7 +13,7 @@ setting_up_container network_check update_os -fetch_and_deploy_gh_release "vikunja" "go-vikunja/vikunja" "binary" "latest" +fetch_and_deploy_gh_release "vikunja" "go-vikunja/vikunja" "binary" msg_info "Setting up Vikunja" sed -i -E 's/^# cors:/cors:/; s/^[[:space:]]*# enable:[[:space:]]*true/ enable: false/' /etc/vikunja/config.yml From 6475e513c59d7faea97bde8f1676860c9264c841 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 28 Jan 2026 15:11:41 +0000 Subject: [PATCH 132/228] Delete nodecast-tv (ct) after migration to ProxmoxVE (#1350) Co-authored-by: github-actions[bot] --- ct/nodecast-tv.sh | 60 --------------------------- frontend/public/json/nodecast-tv.json | 35 ---------------- install/nodecast-tv-install.sh | 50 ---------------------- 3 files changed, 145 deletions(-) delete mode 100644 ct/nodecast-tv.sh delete mode 100644 frontend/public/json/nodecast-tv.json delete mode 100644 install/nodecast-tv-install.sh diff --git a/ct/nodecast-tv.sh b/ct/nodecast-tv.sh deleted file mode 100644 index 53ea93dd1..000000000 --- a/ct/nodecast-tv.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env bash -source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) -# Copyright (c) 2021-2026 community-scripts ORG -# Author: luismco -# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE -# Source: https://github.com/technomancer702/nodecast-tv - -APP="nodecast-tv" -var_tags="${var_tags:-media}" -var_cpu="${var_cpu:-2}" -var_ram="${var_ram:-2048}" -var_disk="${var_disk:-4}" -var_os="${var_os:-debian}" -var_version="${var_version:-13}" -var_unprivileged="${var_unprivileged:-1}" -var_gpu="${var_gpu:-yes}" - -header_info "$APP" -variables -color -catch_errors - -function update_script() { - header_info - check_container_storage - check_container_resources - if [[ ! -d /opt/nodecast-tv ]]; then - msg_error "No ${APP} Installation Found!" - exit - fi - - if check_for_gh_release "nodecast-tv" "technomancer702/nodecast-tv"; then - msg_info "Stopping Service" - systemctl stop nodecast-tv - msg_ok "Stopped Service" - - fetch_and_deploy_gh_release "nodecast-tv" "technomancer702/nodecast-tv" - - msg_info "Updating Modules" - cd /opt/nodecast-tv - $STD npm install - msg_ok "Updated Modules" - - msg_info "Starting Service" - systemctl start nodecast-tv - msg_ok "Started Service" - msg_ok "Updated successfully!" - fi - exit -} - -start -build_container -description - -msg_ok "Completed successfully!\n" -echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" -echo -e "${INFO}${YW} Access it using the following URL:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}" - diff --git a/frontend/public/json/nodecast-tv.json b/frontend/public/json/nodecast-tv.json deleted file mode 100644 index 8d3f0e1fb..000000000 --- a/frontend/public/json/nodecast-tv.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "nodecast-tv", - "slug": "nodecast-tv", - "categories": [ - 13 - ], - "date_created": "2026-01-14", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://github.com/technomancer702/nodecast-tv/blob/main/README.md", - "website": "https://github.com/technomancer702/nodecast-tv", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/nodecast-tv.webp", - "config_path": "", - "description": "nodecast-tv is a modern, web-based IPTV player featuring Live TV, EPG, Movies (VOD), and Series support. Built with performance and user experience in mind.", - "install_methods": [ - { - "type": "default", - "script": "ct/nodecast-tv.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/install/nodecast-tv-install.sh b/install/nodecast-tv-install.sh deleted file mode 100644 index 77a57e8a1..000000000 --- a/install/nodecast-tv-install.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env bash - -# Copyright (c) 2021-2026 community-scripts ORG -# Author: luismco -# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE -# Source: https://github.com/technomancer702/nodecast-tv - -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -color -verb_ip6 -catch_errors -setting_up_container -network_check -update_os - -fetch_and_deploy_gh_release "nodecast-tv" "technomancer702/nodecast-tv" -setup_nodejs - -msg_info "Installing Dependencies" -$STD apt install -y ffmpeg -msg_ok "Installed Dependencies" - -msg_info "Installing Modules" -cd /opt/nodecast-tv -$STD npm install -msg_ok "Installed Modules" - -msg_info "Creating Service" -cat </etc/systemd/system/nodecast-tv.service -[Unit] -Description=nodecast-tv -After=network.target -Wants=network.target - -[Service] -Type=simple -WorkingDirectory=/opt/nodecast-tv -ExecStart=/bin/npm run dev -Restart=on-failure -RestartSec=10 - -[Install] -WantedBy=multi-user.target -EOF -systemctl enable -q --now nodecast-tv -msg_ok "Created Service" - -motd_ssh -customize -cleanup_lxc From e9d3f47336a0a02429413ae6108ee5b3b05cb5a6 Mon Sep 17 00:00:00 2001 From: Tobias <96661824+CrazyWolf13@users.noreply.github.com> Date: Wed, 28 Jan 2026 16:15:00 +0100 Subject: [PATCH 133/228] Update vikunja.sh --- ct/vikunja.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ct/vikunja.sh b/ct/vikunja.sh index 4ddbd9a47..596f45f88 100644 --- a/ct/vikunja.sh +++ b/ct/vikunja.sh @@ -45,7 +45,7 @@ function update_script() { [[ "$CONFIRM2" =~ ^[yY]$ ]] || exit 0 fi - if check_for_gh_release "vikunja" "go-vikunja/vikunja" "latest"; then + if check_for_gh_release "vikunja" "go-vikunja/vikunja"; then msg_info "Stopping Service" systemctl stop vikunja msg_ok "Stopped Service" From 6a5f9c8e2b71645624b86ca5b4b3ba36ea2f0334 Mon Sep 17 00:00:00 2001 From: "GitHub Actions[bot]" Date: Wed, 28 Jan 2026 18:48:06 +0000 Subject: [PATCH 134/228] chore: update github-versions.json Total versions: 19 Pinned versions: 1 Generated: 2026-01-28T18:48:06Z --- frontend/public/json/github-versions.json | 138 ++++++++++++++++++++++ 1 file changed, 138 insertions(+) create mode 100644 frontend/public/json/github-versions.json diff --git a/frontend/public/json/github-versions.json b/frontend/public/json/github-versions.json new file mode 100644 index 000000000..41eab263d --- /dev/null +++ b/frontend/public/json/github-versions.json @@ -0,0 +1,138 @@ +{ + "generated": "2026-01-28T18:48:06Z", + "versions": [ + { + "slug": "affine", + "repo": "toeverything/AFFiNE", + "version": "v0.25.7", + "pinned": false, + "date": "2025-12-09T04:34:14Z" + }, + { + "slug": "ampache", + "repo": "ampache/ampache", + "version": "7.8.0", + "pinned": false, + "date": "2025-12-22T04:23:45Z" + }, + { + "slug": "databasus", + "repo": "databasus/databasus", + "version": "v3.7.0", + "pinned": false, + "date": "2026-01-28T14:46:28Z" + }, + { + "slug": "ente", + "repo": "ente-io/ente", + "version": "photos-v1.3.0", + "pinned": false, + "date": "2026-01-12T06:33:12Z" + }, + { + "slug": "frigate", + "repo": "blakeblackshear/frigate", + "version": "v0.16.3", + "pinned": false, + "date": "2025-12-06T18:15:00Z" + }, + { + "slug": "hoodik", + "repo": "hudikhq/hoodik", + "version": "v1.8.1", + "pinned": false, + "date": "2025-12-22T20:32:27Z" + }, + { + "slug": "isponsorblocktv", + "repo": "dmunozv04/iSponsorBlockTV", + "version": "v2.6.1", + "pinned": false, + "date": "2025-10-19T17:43:10Z" + }, + { + "slug": "kitchenowl", + "repo": "TomBursch/kitchenowl", + "version": "v0.7.6", + "pinned": false, + "date": "2026-01-24T01:21:14Z" + }, + { + "slug": "minthcm", + "repo": "minthcm/minthcm", + "version": "4.2.2", + "pinned": false, + "date": "2025-10-10T09:37:21Z" + }, + { + "slug": "nextexplorer", + "repo": "vikramsoni2/nextExplorer", + "version": "v2.1.1", + "pinned": false, + "date": "2026-01-24T21:22:09Z" + }, + { + "slug": "nightscout", + "repo": "nightscout/cgm-remote-monitor", + "version": "15.0.3", + "pinned": false, + "date": "2025-05-08T22:12:34Z" + }, + { + "slug": "opencloud", + "repo": "opencloud-eu/opencloud", + "version": "v5.0.0", + "pinned": true, + "date": "2026-01-26T15:58:00Z" + }, + { + "slug": "piler", + "repo": "jsuto/piler", + "version": "piler-1.4.8", + "pinned": false, + "date": "2025-09-24T06:51:38Z" + }, + { + "slug": "pixelfed", + "repo": "pixelfed/pixelfed", + "version": "v0.12.6", + "pinned": false, + "date": "2025-09-03T12:12:04Z" + }, + { + "slug": "romm", + "repo": "RetroAchievements/RALibretro", + "version": "1.8.2", + "pinned": false, + "date": "2026-01-23T17:03:31Z" + }, + { + "slug": "rustypaste", + "repo": "orhun/rustypaste", + "version": "v0.16.1", + "pinned": false, + "date": "2025-03-21T20:44:47Z" + }, + { + "slug": "seer", + "repo": "seerr-team/seerr", + "version": "v2.7.3", + "pinned": false, + "date": "2025-08-14T20:43:46Z" + }, + { + "slug": "shelfmark", + "repo": "FlareSolverr/FlareSolverr", + "version": "v3.4.6", + "pinned": false, + "date": "2025-11-29T02:43:00Z" + }, + { + "slug": "wishlist", + "repo": "cmintey/wishlist", + "version": "v0.59.0", + "pinned": false, + "date": "2026-01-19T16:42:14Z" + } + ] +} From 80b30ba74a504488c6e07031fe2fa15d070eb5ad Mon Sep 17 00:00:00 2001 From: justin Date: Wed, 28 Jan 2026 13:56:38 -0500 Subject: [PATCH 135/228] Keep .vscode consistent with repo .editorconfig --- .editorconfig | 2 +- .vscode/.shellcheckrc | 1 - .vscode/extensions.json | 3 +-- .vscode/settings.json | 41 ++++++++++------------------------------- 4 files changed, 12 insertions(+), 35 deletions(-) delete mode 100644 .vscode/.shellcheckrc diff --git a/.editorconfig b/.editorconfig index f79a823d7..5886a1c76 100644 --- a/.editorconfig +++ b/.editorconfig @@ -10,7 +10,7 @@ indent_style = space insert_final_newline = true max_line_length = 120 tab_width = 2 -; trim_trailing_whitespace = true ; disabled until files are cleaned up +trim_trailing_whitespace = true [*.md] trim_trailing_whitespace = false diff --git a/.vscode/.shellcheckrc b/.vscode/.shellcheckrc deleted file mode 100644 index 4631bd3af..000000000 --- a/.vscode/.shellcheckrc +++ /dev/null @@ -1 +0,0 @@ -disable=SC2034,SC1091,SC2155,SC2086,SC2317,SC2181,SC2164 diff --git a/.vscode/extensions.json b/.vscode/extensions.json index 1949e6fc5..5749d3b1d 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -1,8 +1,7 @@ { "recommendations": [ - "bmalehorn.shell-syntax", "timonwong.shellcheck", - "foxundermoon.shell-format" + "mkhl.shfmt" ], "unwantedRecommendations": [] } diff --git a/.vscode/settings.json b/.vscode/settings.json index 8f17c7ff9..779bbdd17 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,33 +1,12 @@ { - "files.associations": { - "*.func": "shellscript" - }, - "files.eol": "\n", - "files.encoding": "utf8", - "files.trimTrailingWhitespace": true, - "files.insertFinalNewline": true, - "files.autoSave": "afterDelay", - "files.autoGuessEncoding": false, - "editor.detectIndentation": false, - "editor.tabSize": 4, - "editor.insertSpaces": true, - "editor.wordWrap": "off", - "editor.renderWhitespace": "boundary", - "editor.formatOnSave": true, - "editor.formatOnPaste": true, - "editor.minimap.enabled": false, - "terminal.integrated.scrollback": 10000, - "[shellscript]": { - "editor.defaultFormatter": "foxundermoon.shell-format", - "editor.tabSize": 4, - "editor.insertSpaces": true, - }, - "shellcheck.customArgs": [ - "--rcfile", - ".vscode/.shellcheckrc" - ], - "git.autofetch": true, - "git.confirmSync": false, - "git.enableSmartCommit": true, - "extensions.ignoreRecommendations": false + "files.associations": { + "*.func": "shellscript" + }, + "[shellscript]": { + "editor.defaultFormatter": "mkhl.shfmt", + }, + "editor.codeActionsOnSave": { + "source.fixAll": "never", + }, + "shellcheck.useWorkspaceRootAsCwd": true, } From 858b6d8931643a508d192c297eafe3acd35b7574 Mon Sep 17 00:00:00 2001 From: Tobias <96661824+CrazyWolf13@users.noreply.github.com> Date: Wed, 28 Jan 2026 21:24:50 +0100 Subject: [PATCH 136/228] Update vikunja.sh --- ct/vikunja.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ct/vikunja.sh b/ct/vikunja.sh index 4ddbd9a47..596f45f88 100644 --- a/ct/vikunja.sh +++ b/ct/vikunja.sh @@ -45,7 +45,7 @@ function update_script() { [[ "$CONFIRM2" =~ ^[yY]$ ]] || exit 0 fi - if check_for_gh_release "vikunja" "go-vikunja/vikunja" "latest"; then + if check_for_gh_release "vikunja" "go-vikunja/vikunja"; then msg_info "Stopping Service" systemctl stop vikunja msg_ok "Stopped Service" From 2fa14e30636102bc5d04070271c497fce595132e Mon Sep 17 00:00:00 2001 From: Tobias <96661824+CrazyWolf13@users.noreply.github.com> Date: Wed, 28 Jan 2026 22:05:19 +0100 Subject: [PATCH 137/228] Update vikunja-install.sh --- install/vikunja-install.sh | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/install/vikunja-install.sh b/install/vikunja-install.sh index 44fc31999..3307fd04c 100644 --- a/install/vikunja-install.sh +++ b/install/vikunja-install.sh @@ -16,10 +16,9 @@ update_os fetch_and_deploy_gh_release "vikunja" "go-vikunja/vikunja" "binary" msg_info "Setting up Vikunja" -sed -i -E 's/^# cors:/cors:/; s/^[[:space:]]*# enable:[[:space:]]*true/ enable: false/' /etc/vikunja/config.yml -sed -i 's|^ timezone: .*| timezone: UTC|' /etc/vikunja/config.yml -sed -i 's|"./vikunja.db"|"/etc/vikunja/vikunja.db"|' /etc/vikunja/config.yml -sed -i 's|./files|/etc/vikunja/files|' /etc/vikunja/config.yml +sed -i 's|^# \(service:\)|\1|' /etc/vikunja/config.yml +sed -i "s|^ # \(publicurl: \).*| \1\"http://$LOCAL_IP\"|" /etc/vikunja/config.yml +sed -i "0,/^ # \(timezone: \).*/s|| \1${tz}|" /etc/vikunja/config.yml systemctl enable -q --now vikunja msg_ok "Set up Vikunja" From 5aeaf91868fa5f28bf91bf6cbd9b83fa1ce833b2 Mon Sep 17 00:00:00 2001 From: Tobias <96661824+CrazyWolf13@users.noreply.github.com> Date: Wed, 28 Jan 2026 22:19:45 +0100 Subject: [PATCH 138/228] Update vikunja.sh --- ct/vikunja.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ct/vikunja.sh b/ct/vikunja.sh index 596f45f88..f199e7721 100644 --- a/ct/vikunja.sh +++ b/ct/vikunja.sh @@ -29,7 +29,7 @@ function update_script() { fi RELEASE="$( [[ -f "$HOME/.vikunja" ]] && cat "$HOME/.vikunja" 2>/dev/null || [[ -f /opt/Vikunja_version ]] && cat /opt/Vikunja_version 2>/dev/null || true)" - if [[ "$RELEASE" == "unstable" ]] || { [[ -n "$RELEASE" ]] && dpkg --compare-versions "$RELEASE" lt "1.0.0"; }; then + if [[ -z "$RELEASE" ]] || [[ "$RELEASE" == "unstable" ]] || dpkg --compare-versions "${RELEASE:-0.0.0}" lt "1.0.0"; then msg_warn "You are upgrading from Vikunja '$RELEASE'." msg_warn "This requires MANUAL config changes in /etc/vikunja/config.yml." msg_warn "See: https://vikunja.io/changelog/whats-new-in-vikunja-1.0.0/#config-changes" From b1e6e1452cd3d850124e7adce8f9ea340577d92d Mon Sep 17 00:00:00 2001 From: "GitHub Actions[bot]" Date: Thu, 29 Jan 2026 02:41:19 +0000 Subject: [PATCH 139/228] chore: update github-versions.json Total versions: 19 Pinned versions: 1 Generated: 2026-01-29T02:41:18Z --- frontend/public/json/github-versions.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/frontend/public/json/github-versions.json b/frontend/public/json/github-versions.json index 41eab263d..7364cdacf 100644 --- a/frontend/public/json/github-versions.json +++ b/frontend/public/json/github-versions.json @@ -1,5 +1,5 @@ { - "generated": "2026-01-28T18:48:06Z", + "generated": "2026-01-29T02:41:18Z", "versions": [ { "slug": "affine", @@ -32,9 +32,9 @@ { "slug": "frigate", "repo": "blakeblackshear/frigate", - "version": "v0.16.3", + "version": "v0.16.4", "pinned": false, - "date": "2025-12-06T18:15:00Z" + "date": "2026-01-29T00:42:14Z" }, { "slug": "hoodik", From 4aeca93c0b54ce9f5b3837ce1f5ae7d9fcccaf76 Mon Sep 17 00:00:00 2001 From: vhsdream Date: Wed, 28 Jan 2026 22:42:13 -0500 Subject: [PATCH 140/228] OpenCloud: bump version to 5.0.1 --- ct/opencloud.sh | 2 +- install/opencloud-install.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ct/opencloud.sh b/ct/opencloud.sh index 2f1925421..639ee67db 100644 --- a/ct/opencloud.sh +++ b/ct/opencloud.sh @@ -29,7 +29,7 @@ function update_script() { exit fi - RELEASE="v5.0.0" + RELEASE="v5.0.1" if check_for_gh_release "opencloud" "opencloud-eu/opencloud" "${RELEASE}"; then msg_info "Stopping services" systemctl stop opencloud opencloud-wopi diff --git a/install/opencloud-install.sh b/install/opencloud-install.sh index 39b579429..7d4c9a060 100644 --- a/install/opencloud-install.sh +++ b/install/opencloud-install.sh @@ -57,7 +57,7 @@ echo "$COOLPASS" >~/.coolpass msg_ok "Installed Collabora Online" # OpenCloud -fetch_and_deploy_gh_release "opencloud" "opencloud-eu/opencloud" "singlefile" "v5.0.0" "/usr/bin" "opencloud-*-linux-amd64" +fetch_and_deploy_gh_release "opencloud" "opencloud-eu/opencloud" "singlefile" "v5.0.1" "/usr/bin" "opencloud-*-linux-amd64" msg_info "Configuring OpenCloud" DATA_DIR="/var/lib/opencloud/" From 40124eae6dbb7599445f5e0a5fb3e6c286e8809f Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Thu, 29 Jan 2026 08:39:55 +0100 Subject: [PATCH 141/228] immich dev --- ct/immich.sh | 400 +++++++++++++++++++++++++++++++ install/immich-install.sh | 488 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 888 insertions(+) create mode 100644 ct/immich.sh create mode 100644 install/immich-install.sh diff --git a/ct/immich.sh b/ct/immich.sh new file mode 100644 index 000000000..067c6f0bd --- /dev/null +++ b/ct/immich.sh @@ -0,0 +1,400 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: vhsdream +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://immich.app + +APP="immich" +var_tags="${var_tags:-photos}" +var_disk="${var_disk:-20}" +var_cpu="${var_cpu:-4}" +var_ram="${var_ram:-6144}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" +var_gpu="${var_gpu:-yes}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + if [[ ! -d /opt/immich ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + if [[ -f /etc/apt/sources.list.d/immich.list ]]; then + msg_error "Wrong Debian version detected!" + msg_error "You must upgrade your LXC to Debian Trixie before updating." + msg_error "Please visit https://github.com/community-scripts/ProxmoxVE/discussions/7726 for details." + echo "${TAB3} If you have upgraded your LXC to Trixie and you still see this message, please open an Issue in the Community-Scripts repo." + exit + fi + + setup_uv + PNPM_VERSION="$(curl -fsSL "https://raw.githubusercontent.com/immich-app/immich/refs/heads/main/package.json" | jq -r '.packageManager | split("@")[1]')" + NODE_VERSION="24" NODE_MODULE="pnpm@${PNPM_VERSION}" setup_nodejs + + if [[ ! -f /etc/apt/preferences.d/preferences ]]; then + msg_info "Adding Debian Testing repo" + sed -i 's/ trixie-updates/ trixie-updates testing/g' /etc/apt/sources.list.d/debian.sources + cat </etc/apt/preferences.d/preferences +Package: * +Pin: release a=unstable +Pin-Priority: 450 + +Package: * +Pin:release a=testing +Pin-Priority: 450 +EOF + if [[ -f /etc/apt/preferences.d/immich ]]; then + rm /etc/apt/preferences.d/immich + fi + $STD apt update + msg_ok "Added Debian Testing repo" + fi + + if ! dpkg -l "libmimalloc3" | grep -q '3.1' || ! dpkg -l "libde265-dev" | grep -q '1.0.16'; then + msg_info "Installing/upgrading Testing repo packages" + $STD apt install -t testing libmimalloc3 libde265-dev -y + msg_ok "Installed/upgraded Testing repo packages" + fi + + if [[ ! -f /etc/apt/sources.list.d/mise.list ]]; then + msg_info "Installing Mise" + curl -fSs https://mise.jdx.dev/gpg-key.pub | tee /etc/apt/keyrings/mise-archive-keyring.pub 1>/dev/null + echo "deb [signed-by=/etc/apt/keyrings/mise-archive-keyring.pub arch=amd64] https://mise.jdx.dev/deb stable main" >/etc/apt/sources.list.d/mise.list + $STD apt update + $STD apt install -y mise + msg_ok "Installed Mise" + fi + + STAGING_DIR=/opt/staging + BASE_DIR=${STAGING_DIR}/base-images + SOURCE_DIR=${STAGING_DIR}/image-source + cd /tmp + if [[ -f ~/.intel_version ]]; then + curl -fsSLO https://raw.githubusercontent.com/immich-app/base-images/refs/heads/main/server/Dockerfile + readarray -t INTEL_URLS < <( + sed -n "/intel-[igc|opencl]/p" ./Dockerfile | awk '{print $2}' + sed -n "/libigdgmm12/p" ./Dockerfile | awk '{print $3}' + ) + INTEL_RELEASE="$(grep "intel-opencl-icd_" ./Dockerfile | awk -F '_' '{print $2}')" + if [[ "$INTEL_RELEASE" != "$(cat ~/.intel_version)" ]]; then + msg_info "Updating Intel iGPU dependencies" + for url in "${INTEL_URLS[@]}"; do + curl -fsSLO "$url" + done + $STD apt-mark unhold libigdgmm12 + $STD apt install -y ./libigdgmm12*.deb + rm ./libigdgmm12*.deb + $STD apt install -y ./*.deb + rm ./*.deb + $STD apt-mark hold libigdgmm12 + dpkg-query -W -f='${Version}\n' intel-opencl-icd >~/.intel_version + msg_ok "Intel iGPU dependencies updated" + fi + rm ./Dockerfile + fi + if [[ -f ~/.immich_library_revisions ]]; then + libraries=("libjxl" "libheif" "libraw" "imagemagick" "libvips") + cd "$BASE_DIR" + msg_info "Checking for updates to custom image-processing libraries" + $STD git pull + for library in "${libraries[@]}"; do + compile_"$library" + done + msg_ok "Image-processing libraries up to date" + fi + + RELEASE="2.5.0" + if check_for_gh_release "immich" "immich-app/immich" "${RELEASE}"; then + msg_info "Stopping Services" + systemctl stop immich-web + systemctl stop immich-ml + msg_ok "Stopped Services" + VCHORD_RELEASE="0.5.3" + if [[ ! -f ~/.vchord_version ]] || [[ "$VCHORD_RELEASE" != "$(cat ~/.vchord_version)" ]]; then + msg_info "Upgrading VectorChord" + curl -fsSL "https://github.com/tensorchord/vectorchord/releases/download/${VCHORD_RELEASE}/postgresql-16-vchord_${VCHORD_RELEASE}-1_amd64.deb" -o vchord.deb + $STD apt install -y ./vchord.deb + systemctl restart postgresql + $STD sudo -u postgres psql -d immich -c "ALTER EXTENSION vector UPDATE;" + $STD sudo -u postgres psql -d immich -c "ALTER EXTENSION vchord UPDATE;" + $STD sudo -u postgres psql -d immich -c "REINDEX INDEX face_index;" + $STD sudo -u postgres psql -d immich -c "REINDEX INDEX clip_index;" + echo "$VCHORD_RELEASE" >~/.vchord_version + rm ./vchord.deb + msg_ok "Upgraded VectorChord to v${VCHORD_RELEASE}" + fi + if ! dpkg -l | grep -q ccache; then + $STD apt install -yqq ccache + fi + + INSTALL_DIR="/opt/${APP}" + UPLOAD_DIR="$(sed -n '/^IMMICH_MEDIA_LOCATION/s/[^=]*=//p' /opt/immich/.env)" + SRC_DIR="${INSTALL_DIR}/source" + APP_DIR="${INSTALL_DIR}/app" + PLUGIN_DIR="${APP_DIR}/corePlugin" + ML_DIR="${APP_DIR}/machine-learning" + GEO_DIR="${INSTALL_DIR}/geodata" + + cp "$ML_DIR"/ml_start.sh "$INSTALL_DIR" + if grep -qs "set -a" "$APP_DIR"/bin/start.sh; then + cp "$APP_DIR"/bin/start.sh "$INSTALL_DIR" + else + cat <"$INSTALL_DIR"/start.sh +#!/usr/bin/env bash + +set -a +. ${INSTALL_DIR}/.env +set +a + +/usr/bin/node ${APP_DIR}/dist/main.js "\$@" +EOF + chmod +x "$INSTALL_DIR"/start.sh + fi + + ( + shopt -s dotglob + rm -rf "${APP_DIR:?}"/* + ) + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "immich" "immich-app/immich" "tarball" "v${RELEASE}" "$SRC_DIR" + + msg_info "Updating Immich web and microservices" + cd "$SRC_DIR"/server + export COREPACK_ENABLE_DOWNLOAD_PROMPT=0 + export CI=1 + corepack enable + + # server build + export SHARP_IGNORE_GLOBAL_LIBVIPS=true + $STD pnpm --filter immich --frozen-lockfile build + unset SHARP_IGNORE_GLOBAL_LIBVIPS + export SHARP_FORCE_GLOBAL_LIBVIPS=true + $STD pnpm --filter immich --frozen-lockfile --prod --no-optional deploy "$APP_DIR" + cp "$APP_DIR"/package.json "$APP_DIR"/bin + sed -i 's|^start|./start|' "$APP_DIR"/bin/immich-admin + + # openapi & web build + cd "$SRC_DIR" + echo "packageImportMethod: hardlink" >>./pnpm-workspace.yaml + $STD pnpm --filter @immich/sdk --filter immich-web --frozen-lockfile --force install + unset SHARP_FORCE_GLOBAL_LIBVIPS + export SHARP_IGNORE_GLOBAL_LIBVIPS=true + $STD pnpm --filter @immich/sdk --filter immich-web build + cp -a web/build "$APP_DIR"/www + cp LICENSE "$APP_DIR" + + # cli build + $STD pnpm --filter @immich/sdk --filter @immich/cli --frozen-lockfile install + $STD pnpm --filter @immich/sdk --filter @immich/cli build + $STD pnpm --filter @immich/cli --prod --no-optional deploy "$APP_DIR"/cli + cd "$APP_DIR" + mv "$INSTALL_DIR"/start.sh "$APP_DIR"/bin + + # plugins + cd "$SRC_DIR" + $STD mise trust --ignore ./mise.toml + $STD mise trust ./plugins/mise.toml + cd plugins + $STD mise install + $STD mise run build + mkdir -p "$PLUGIN_DIR" + cp -r ./dist "$PLUGIN_DIR"/dist + cp ./manifest.json "$PLUGIN_DIR" + msg_ok "Updated Immich server, web, cli and plugins" + + cd "$SRC_DIR"/machine-learning + mkdir -p "$ML_DIR" && chown -R immich:immich "$ML_DIR" + chown immich:immich ./uv.lock + export VIRTUAL_ENV="${ML_DIR}"/ml-venv + if [[ -f ~/.openvino ]]; then + msg_info "Updating HW-accelerated machine-learning" + $STD uv add --no-sync --optional openvino onnxruntime-openvino==1.20.0 --active -n -p python3.12 --managed-python + $STD sudo --preserve-env=VIRTUAL_ENV -nu immich uv sync --extra openvino --no-dev --active --link-mode copy -n -p python3.12 --managed-python + patchelf --clear-execstack "${VIRTUAL_ENV}/lib/python3.12/site-packages/onnxruntime/capi/onnxruntime_pybind11_state.cpython-312-x86_64-linux-gnu.so" + msg_ok "Updated HW-accelerated machine-learning" + else + msg_info "Updating machine-learning" + $STD sudo --preserve-env=VIRTUAL_ENV -nu immich uv sync --extra cpu --no-dev --active --link-mode copy -n -p python3.11 --managed-python + msg_ok "Updated machine-learning" + fi + cd "$SRC_DIR" + cp -a machine-learning/{ann,immich_ml} "$ML_DIR" + mv "$INSTALL_DIR"/ml_start.sh "$ML_DIR" + if [[ -f ~/.openvino ]]; then + sed -i "/intra_op/s/int = 0/int = os.cpu_count() or 0/" "$ML_DIR"/immich_ml/config.py + fi + ln -sf "$APP_DIR"/resources "$INSTALL_DIR" + cd "$APP_DIR" + grep -rl /usr/src | xargs -n1 sed -i "s|\/usr/src|$INSTALL_DIR|g" + grep -rlE "'/build'" | xargs -n1 sed -i "s|'/build'|'$APP_DIR'|g" + sed -i "s@\"/cache\"@\"$INSTALL_DIR/cache\"@g" "$ML_DIR"/immich_ml/config.py + ln -s "${UPLOAD_DIR:-/opt/immich/upload}" "$APP_DIR"/upload + ln -s "${UPLOAD_DIR:-/opt/immich/upload}" "$ML_DIR"/upload + ln -s "$GEO_DIR" "$APP_DIR" + + chown -R immich:immich "$INSTALL_DIR" + systemctl restart immich-ml immich-web + msg_ok "Updated successfully!" + fi + exit +} + +function compile_libjxl() { + SOURCE=${SOURCE_DIR}/libjxl + JPEGLI_LIBJPEG_LIBRARY_SOVERSION="62" + JPEGLI_LIBJPEG_LIBRARY_VERSION="62.3.0" + : "${LIBJXL_REVISION:=$(jq -cr '.revision' "$BASE_DIR"/server/sources/libjxl.json)}" + if [[ "$LIBJXL_REVISION" != "$(grep 'libjxl' ~/.immich_library_revisions | awk '{print $2}')" ]]; then + msg_info "Recompiling libjxl" + if [[ -d "$SOURCE" ]]; then rm -rf "$SOURCE"; fi + $STD git clone https://github.com/libjxl/libjxl.git "$SOURCE" + cd "$SOURCE" + $STD git reset --hard "$LIBJXL_REVISION" + $STD git submodule update --init --recursive --depth 1 --recommend-shallow + $STD git apply "$BASE_DIR"/server/sources/libjxl-patches/jpegli-empty-dht-marker.patch + $STD git apply "$BASE_DIR"/server/sources/libjxl-patches/jpegli-icc-warning.patch + mkdir build + cd build + $STD cmake \ + -DCMAKE_BUILD_TYPE=Release \ + -DBUILD_TESTING=OFF \ + -DJPEGXL_ENABLE_DOXYGEN=OFF \ + -DJPEGXL_ENABLE_MANPAGES=OFF \ + -DJPEGXL_ENABLE_PLUGIN_GIMP210=OFF \ + -DJPEGXL_ENABLE_BENCHMARK=OFF \ + -DJPEGXL_ENABLE_EXAMPLES=OFF \ + -DJPEGXL_FORCE_SYSTEM_BROTLI=ON \ + -DJPEGXL_FORCE_SYSTEM_HWY=ON \ + -DJPEGXL_ENABLE_JPEGLI=ON \ + -DJPEGXL_ENABLE_JPEGLI_LIBJPEG=ON \ + -DJPEGXL_INSTALL_JPEGLI_LIBJPEG=ON \ + -DJPEGXL_ENABLE_PLUGINS=ON \ + -DJPEGLI_LIBJPEG_LIBRARY_SOVERSION="$JPEGLI_LIBJPEG_LIBRARY_SOVERSION" \ + -DJPEGLI_LIBJPEG_LIBRARY_VERSION="$JPEGLI_LIBJPEG_LIBRARY_VERSION" \ + -DLIBJPEG_TURBO_VERSION_NUMBER=2001005 \ + .. + $STD cmake --build . -- -j"$(nproc)" + $STD cmake --install . + ldconfig /usr/local/lib + $STD make clean + cd "$STAGING_DIR" + rm -rf "$SOURCE"/{build,third_party} + sed -i "s/libjxl: .*$/libjxl: $LIBJXL_REVISION/" ~/.immich_library_revisions + msg_ok "Recompiled libjxl" + fi +} + +function compile_libheif() { + SOURCE=${SOURCE_DIR}/libheif + if ! dpkg -l | grep -q libaom; then + $STD apt install -y libaom-dev + local update="required" + fi + : "${LIBHEIF_REVISION:=$(jq -cr '.revision' "$BASE_DIR"/server/sources/libheif.json)}" + if [[ "${update:-}" ]] || [[ "$LIBHEIF_REVISION" != "$(grep 'libheif' ~/.immich_library_revisions | awk '{print $2}')" ]]; then + msg_info "Recompiling libheif" + if [[ -d "$SOURCE" ]]; then rm -rf "$SOURCE"; fi + $STD git clone https://github.com/strukturag/libheif.git "$SOURCE" + cd "$SOURCE" + $STD git reset --hard "$LIBHEIF_REVISION" + mkdir build + cd build + $STD cmake --preset=release-noplugins \ + -DWITH_DAV1D=ON \ + -DENABLE_PARALLEL_TILE_DECODING=ON \ + -DWITH_LIBSHARPYUV=ON \ + -DWITH_LIBDE265=ON \ + -DWITH_AOM_DECODER=OFF \ + -DWITH_AOM_ENCODER=ON \ + -DWITH_X265=OFF \ + -DWITH_EXAMPLES=OFF \ + .. + $STD make install -j "$(nproc)" + ldconfig /usr/local/lib + $STD make clean + cd "$STAGING_DIR" + rm -rf "$SOURCE"/build + sed -i "s/libheif: .*$/libheif: $LIBHEIF_REVISION/" ~/.immich_library_revisions + msg_ok "Recompiled libheif" + fi +} + +function compile_libraw() { + SOURCE=${SOURCE_DIR}/libraw + : "${LIBRAW_REVISION:=$(jq -cr '.revision' "$BASE_DIR"/server/sources/libraw.json)}" + if [[ "$LIBRAW_REVISION" != "$(grep 'libraw' ~/.immich_library_revisions | awk '{print $2}')" ]]; then + msg_info "Recompiling libraw" + if [[ -d "$SOURCE" ]]; then rm -rf "$SOURCE"; fi + $STD git clone https://github.com/libraw/libraw.git "$SOURCE" + cd "$SOURCE" + $STD git reset --hard "$LIBRAW_REVISION" + $STD autoreconf --install + $STD ./configure --disable-examples + $STD make -j"$(nproc)" + $STD make install + ldconfig /usr/local/lib + $STD make clean + cd "$STAGING_DIR" + sed -i "s/libraw: .*$/libraw: $LIBRAW_REVISION/" ~/.immich_library_revisions + msg_ok "Recompiled libraw" + fi +} + +function compile_imagemagick() { + SOURCE=$SOURCE_DIR/imagemagick + : "${IMAGEMAGICK_REVISION:=$(jq -cr '.revision' "$BASE_DIR"/server/sources/imagemagick.json)}" + if [[ "$IMAGEMAGICK_REVISION" != "$(grep 'imagemagick' ~/.immich_library_revisions | awk '{print $2}')" ]] || + ! grep -q 'DMAGICK_LIBRAW' /usr/local/lib/ImageMagick-7*/config-Q16HDRI/configure.xml; then + msg_info "Recompiling ImageMagick" + if [[ -d "$SOURCE" ]]; then rm -rf "$SOURCE"; fi + $STD git clone https://github.com/ImageMagick/ImageMagick.git "$SOURCE" + cd "$SOURCE" + $STD git reset --hard "$IMAGEMAGICK_REVISION" + $STD ./configure --with-modules CPPFLAGS="-DMAGICK_LIBRAW_VERSION_TAIL=202502" + $STD make -j"$(nproc)" + $STD make install + ldconfig /usr/local/lib + $STD make clean + cd "$STAGING_DIR" + sed -i "s/imagemagick: .*$/imagemagick: $IMAGEMAGICK_REVISION/" ~/.immich_library_revisions + msg_ok "Recompiled ImageMagick" + fi +} + +function compile_libvips() { + SOURCE=$SOURCE_DIR/libvips + : "${LIBVIPS_REVISION:=$(jq -cr '.revision' "$BASE_DIR"/server/sources/libvips.json)}" + if [[ "$LIBVIPS_REVISION" != "$(grep 'libvips' ~/.immich_library_revisions | awk '{print $2}')" ]]; then + msg_info "Recompiling libvips" + if [[ -d "$SOURCE" ]]; then rm -rf "$SOURCE"; fi + $STD git clone https://github.com/libvips/libvips.git "$SOURCE" + cd "$SOURCE" + $STD git reset --hard "$LIBVIPS_REVISION" + $STD meson setup build --buildtype=release --libdir=lib -Dintrospection=disabled -Dtiff=disabled + cd build + $STD ninja install + ldconfig /usr/local/lib + cd "$STAGING_DIR" + rm -rf "$SOURCE"/build + sed -i "s/libvips: .*$/libvips: $LIBVIPS_REVISION/" ~/.immich_library_revisions + msg_ok "Recompiled libvips" + fi +} + +start +build_container +description + +msg_ok "Completed successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:2283${CL}" diff --git a/install/immich-install.sh b/install/immich-install.sh new file mode 100644 index 000000000..339f79c60 --- /dev/null +++ b/install/immich-install.sh @@ -0,0 +1,488 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: vhsdream +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://immich.app + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +echo "" +echo "" +echo -e "🤖 ${BL}Immich Machine Learning Options${CL}" +echo "─────────────────────────────────────────" +echo "Please choose your machine-learning type:" +echo "" +echo " 1) CPU only (default)" +echo " 2) Intel OpenVINO (requires GPU passthrough)" +echo "" + +read -r -p "${TAB3}Select machine-learning type [1]: " ML_TYPE +ML_TYPE="${ML_TYPE:-1}" +if [[ "$ML_TYPE" == "2" ]]; then + msg_info "Installing OpenVINO dependencies" + touch ~/.openvino + $STD apt install -y --no-install-recommends patchelf + tmp_dir=$(mktemp -d) + $STD pushd "$tmp_dir" + curl -fsSLO https://raw.githubusercontent.com/immich-app/base-images/refs/heads/main/server/Dockerfile + readarray -t INTEL_URLS < <( + sed -n "/intel-[igc|opencl]/p" ./Dockerfile | awk '{print $2}' + sed -n "/libigdgmm12/p" ./Dockerfile | awk '{print $3}' + ) + for url in "${INTEL_URLS[@]}"; do + curl -fsSLO "$url" + done + $STD apt install -y ./libigdgmm12*.deb + rm ./libigdgmm12*.deb + $STD apt install -y ./*.deb + $STD apt-mark hold libigdgmm12 + $STD popd + rm -rf "$tmp_dir" + dpkg-query -W -f='${Version}\n' intel-opencl-icd >~/.intel_version + msg_ok "Installed OpenVINO dependencies" +fi + +setup_uv + +msg_info "Installing dependencies" +$STD apt install --no-install-recommends -y \ + git \ + redis \ + autoconf \ + build-essential \ + python3-dev \ + automake \ + cmake \ + jq \ + libtool \ + libltdl-dev \ + libgdk-pixbuf-2.0-dev \ + libbrotli-dev \ + libexif-dev \ + libexpat1-dev \ + libglib2.0-dev \ + libgsf-1-dev \ + libjpeg62-turbo-dev \ + libspng-dev \ + liblcms2-dev \ + libopenexr-dev \ + libgif-dev \ + librsvg2-dev \ + libexpat1 \ + libgcc-s1 \ + libgomp1 \ + liblqr-1-0 \ + libltdl7 \ + libopenjp2-7 \ + meson \ + ninja-build \ + pkg-config \ + mesa-utils \ + mesa-va-drivers \ + mesa-vulkan-drivers \ + ocl-icd-libopencl1 \ + tini \ + zlib1g \ + libio-compress-brotli-perl \ + libwebp7 \ + libwebpdemux2 \ + libwebpmux3 \ + libhwy1t64 \ + libdav1d-dev \ + libhwy-dev \ + libwebp-dev \ + libaom-dev \ + ccache + +setup_deb822_repo \ + "jellyfin" \ + "https://repo.jellyfin.org/jellyfin_team.gpg.key" \ + "https://repo.jellyfin.org/debian" \ + "$(get_os_info codename)" +$STD apt install -y jellyfin-ffmpeg7 +ln -sf /usr/lib/jellyfin-ffmpeg/ffmpeg /usr/bin/ffmpeg +ln -sf /usr/lib/jellyfin-ffmpeg/ffprobe /usr/bin/ffprobe + +# Set permissions for /dev/dri (only in privileged containers and if /dev/dri exists) +if [[ "$CTTYPE" == "0" && -d /dev/dri ]]; then + chgrp video /dev/dri 2>/dev/null || true + chmod 755 /dev/dri 2>/dev/null || true + chmod 660 /dev/dri/* 2>/dev/null || true + $STD adduser "$(id -u -n)" video 2>/dev/null || true + $STD adduser "$(id -u -n)" render 2>/dev/null || true +fi +msg_ok "Dependencies Installed" + +msg_info "Installing Mise" +curl -fSs https://mise.jdx.dev/gpg-key.pub | tee /etc/apt/keyrings/mise-archive-keyring.pub 1>/dev/null +echo "deb [signed-by=/etc/apt/keyrings/mise-archive-keyring.pub arch=amd64] https://mise.jdx.dev/deb stable main" >/etc/apt/sources.list.d/mise.list +$STD apt update +$STD apt install -y mise +msg_ok "Installed Mise" + +msg_info "Configuring Debian Testing Repo" +sed -i 's/ trixie-updates/ trixie-updates testing/g' /etc/apt/sources.list.d/debian.sources +cat </etc/apt/preferences.d/preferences +Package: * +Pin: release a=unstable +Pin-Priority: 450 + +Package: * +Pin:release a=testing +Pin-Priority: 450 +EOF +$STD apt update +msg_ok "Configured Debian Testing repo" +msg_info "Installing packages from Debian Testing repo" +$STD apt install -t testing --no-install-recommends -yqq libmimalloc3 libde265-dev +msg_ok "Installed packages from Debian Testing repo" + +PNPM_VERSION="$(curl -fsSL "https://raw.githubusercontent.com/immich-app/immich/refs/heads/main/package.json" | jq -r '.packageManager | split("@")[1]')" +NODE_VERSION="24" NODE_MODULE="pnpm@${PNPM_VERSION}" setup_nodejs +PG_VERSION="16" PG_MODULES="pgvector" setup_postgresql + +VCHORD_RELEASE="0.5.3" +msg_info "Installing Vectorchord v${VCHORD_RELEASE}" +curl -fsSL "https://github.com/tensorchord/VectorChord/releases/download/${VCHORD_RELEASE}/postgresql-16-vchord_${VCHORD_RELEASE}-1_amd64.deb" -o vchord.deb +$STD apt install -y ./vchord.deb +rm vchord.deb +echo "$VCHORD_RELEASE" >~/.vchord_version +msg_ok "Installed Vectorchord v${VCHORD_RELEASE}" + +sed -i -e "/^#shared_preload/s/^#//;/^shared_preload/s/''/'vchord.so'/" /etc/postgresql/16/main/postgresql.conf +systemctl restart postgresql.service +PG_DB_NAME="immich" PG_DB_USER="immich" PG_DB_GRANT_SUPERUSER="true" PG_DB_SKIP_ALTER_ROLE="true" setup_postgresql_db + +msg_info "Compiling Custom Photo-processing Library (extreme patience)" +LD_LIBRARY_PATH=/usr/local/lib +export LD_RUN_PATH=/usr/local/lib +STAGING_DIR=/opt/staging +BASE_REPO="https://github.com/immich-app/base-images" +BASE_DIR=${STAGING_DIR}/base-images +SOURCE_DIR=${STAGING_DIR}/image-source +$STD git clone -b main "$BASE_REPO" "$BASE_DIR" +mkdir -p "$SOURCE_DIR" + +msg_info "(1/5) Compiling libjxl" +cd "$STAGING_DIR" +SOURCE=${SOURCE_DIR}/libjxl +JPEGLI_LIBJPEG_LIBRARY_SOVERSION="62" +JPEGLI_LIBJPEG_LIBRARY_VERSION="62.3.0" +: "${LIBJXL_REVISION:=$(jq -cr '.revision' $BASE_DIR/server/sources/libjxl.json)}" +$STD git clone https://github.com/libjxl/libjxl.git "$SOURCE" +cd "$SOURCE" +$STD git reset --hard "$LIBJXL_REVISION" +$STD git submodule update --init --recursive --depth 1 --recommend-shallow +$STD git apply "$BASE_DIR"/server/sources/libjxl-patches/jpegli-empty-dht-marker.patch +$STD git apply "$BASE_DIR"/server/sources/libjxl-patches/jpegli-icc-warning.patch +mkdir build +cd build +$STD cmake \ + -DCMAKE_BUILD_TYPE=Release \ + -DBUILD_TESTING=OFF \ + -DJPEGXL_ENABLE_DOXYGEN=OFF \ + -DJPEGXL_ENABLE_MANPAGES=OFF \ + -DJPEGXL_ENABLE_PLUGIN_GIMP210=OFF \ + -DJPEGXL_ENABLE_BENCHMARK=OFF \ + -DJPEGXL_ENABLE_EXAMPLES=OFF \ + -DJPEGXL_FORCE_SYSTEM_BROTLI=ON \ + -DJPEGXL_FORCE_SYSTEM_HWY=ON \ + -DJPEGXL_ENABLE_JPEGLI=ON \ + -DJPEGXL_ENABLE_JPEGLI_LIBJPEG=ON \ + -DJPEGXL_INSTALL_JPEGLI_LIBJPEG=ON \ + -DJPEGXL_ENABLE_PLUGINS=ON \ + -DJPEGLI_LIBJPEG_LIBRARY_SOVERSION="$JPEGLI_LIBJPEG_LIBRARY_SOVERSION" \ + -DJPEGLI_LIBJPEG_LIBRARY_VERSION="$JPEGLI_LIBJPEG_LIBRARY_VERSION" \ + -DLIBJPEG_TURBO_VERSION_NUMBER=2001005 \ + .. +$STD cmake --build . -- -j"$(nproc)" +$STD cmake --install . +ldconfig /usr/local/lib +$STD make clean +cd "$STAGING_DIR" +rm -rf "$SOURCE"/{build,third_party} +msg_ok "(1/5) Compiled libjxl" + +msg_info "(2/5) Compiling libheif" +SOURCE=${SOURCE_DIR}/libheif +: "${LIBHEIF_REVISION:=$(jq -cr '.revision' $BASE_DIR/server/sources/libheif.json)}" +$STD git clone https://github.com/strukturag/libheif.git "$SOURCE" +cd "$SOURCE" +$STD git reset --hard "$LIBHEIF_REVISION" +mkdir build +cd build +$STD cmake --preset=release-noplugins \ + -DWITH_DAV1D=ON \ + -DENABLE_PARALLEL_TILE_DECODING=ON \ + -DWITH_LIBSHARPYUV=ON \ + -DWITH_LIBDE265=ON \ + -DWITH_AOM_DECODER=OFF \ + -DWITH_AOM_ENCODER=ON \ + -DWITH_X265=OFF \ + -DWITH_EXAMPLES=OFF \ + .. +$STD make install -j "$(nproc)" +ldconfig /usr/local/lib +$STD make clean +cd "$STAGING_DIR" +rm -rf "$SOURCE"/build +msg_ok "(2/5) Compiled libheif" + +msg_info "(3/5) Compiling libraw" +SOURCE=${SOURCE_DIR}/libraw +: "${LIBRAW_REVISION:=$(jq -cr '.revision' $BASE_DIR/server/sources/libraw.json)}" +$STD git clone https://github.com/libraw/libraw.git "$SOURCE" +cd "$SOURCE" +$STD git reset --hard "$LIBRAW_REVISION" +$STD autoreconf --install +$STD ./configure --disable-examples +$STD make -j"$(nproc)" +$STD make install +ldconfig /usr/local/lib +$STD make clean +cd "$STAGING_DIR" +msg_ok "(3/5) Compiled libraw" + +msg_info "(4/5) Compiling imagemagick" +SOURCE=$SOURCE_DIR/imagemagick +: "${IMAGEMAGICK_REVISION:=$(jq -cr '.revision' $BASE_DIR/server/sources/imagemagick.json)}" +$STD git clone https://github.com/ImageMagick/ImageMagick.git "$SOURCE" +cd "$SOURCE" +$STD git reset --hard "$IMAGEMAGICK_REVISION" +$STD ./configure --with-modules CPPFLAGS="-DMAGICK_LIBRAW_VERSION_TAIL=202502" +$STD make -j"$(nproc)" +$STD make install +ldconfig /usr/local/lib +$STD make clean +cd "$STAGING_DIR" +msg_ok "(4/5) Compiled imagemagick" + +msg_info "(5/5) Compiling libvips" +SOURCE=$SOURCE_DIR/libvips +: "${LIBVIPS_REVISION:=$(jq -cr '.revision' $BASE_DIR/server/sources/libvips.json)}" +$STD git clone https://github.com/libvips/libvips.git "$SOURCE" +cd "$SOURCE" +$STD git reset --hard "$LIBVIPS_REVISION" +$STD meson setup build --buildtype=release --libdir=lib -Dintrospection=disabled -Dtiff=disabled +cd build +$STD ninja install +ldconfig /usr/local/lib +cd "$STAGING_DIR" +rm -rf "$SOURCE"/build +msg_ok "(5/5) Compiled libvips" +{ + echo "imagemagick: $IMAGEMAGICK_REVISION" + echo "libheif: $LIBHEIF_REVISION" + echo "libjxl: $LIBJXL_REVISION" + echo "libraw: $LIBRAW_REVISION" + echo "libvips: $LIBVIPS_REVISION" +} >~/.immich_library_revisions +msg_ok "Custom Photo-processing Libraries Compiled Successfully" + +INSTALL_DIR="/opt/${APPLICATION}" +UPLOAD_DIR="${INSTALL_DIR}/upload" +SRC_DIR="${INSTALL_DIR}/source" +APP_DIR="${INSTALL_DIR}/app" +PLUGIN_DIR="${APP_DIR}/corePlugin" +ML_DIR="${APP_DIR}/machine-learning" +GEO_DIR="${INSTALL_DIR}/geodata" +mkdir -p "$INSTALL_DIR" +mkdir -p {"${APP_DIR}","${UPLOAD_DIR}","${GEO_DIR}","${INSTALL_DIR}"/cache} + +fetch_and_deploy_gh_release "immich" "immich-app/immich" "tarball" "v2.5.0" "$SRC_DIR" + +msg_info "Installing Immich (patience)" + +cd "$SRC_DIR"/server +export COREPACK_ENABLE_DOWNLOAD_PROMPT=0 +export CI=1 +corepack enable + +# server build +export SHARP_IGNORE_GLOBAL_LIBVIPS=true +$STD pnpm --filter immich --frozen-lockfile build +unset SHARP_IGNORE_GLOBAL_LIBVIPS +export SHARP_FORCE_GLOBAL_LIBVIPS=true +$STD pnpm --filter immich --frozen-lockfile --prod --no-optional deploy "$APP_DIR" +cp "$APP_DIR"/package.json "$APP_DIR"/bin +sed -i 's|^start|./start|' "$APP_DIR"/bin/immich-admin + +# openapi & web build +cd "$SRC_DIR" +echo "packageImportMethod: hardlink" >>./pnpm-workspace.yaml +$STD pnpm --filter @immich/sdk --filter immich-web --frozen-lockfile --force install +unset SHARP_FORCE_GLOBAL_LIBVIPS +export SHARP_IGNORE_GLOBAL_LIBVIPS=true +$STD pnpm --filter @immich/sdk --filter immich-web build +cp -a web/build "$APP_DIR"/www +cp LICENSE "$APP_DIR" + +# cli build +$STD pnpm --filter @immich/sdk --filter @immich/cli --frozen-lockfile install +$STD pnpm --filter @immich/sdk --filter @immich/cli build +$STD pnpm --filter @immich/cli --prod --no-optional deploy "$APP_DIR"/cli + +# plugins +cd "$SRC_DIR" +$STD mise trust --ignore ./mise.toml +$STD mise trust ./plugins/mise.toml +cd plugins +$STD mise install +$STD mise run build +mkdir -p "$PLUGIN_DIR" +cp -r ./dist "$PLUGIN_DIR"/dist +cp ./manifest.json "$PLUGIN_DIR" +msg_ok "Installed Immich Server, Web and Plugin Components" + +cd "$SRC_DIR"/machine-learning +$STD useradd -U -s /usr/sbin/nologin -r -M -d "$INSTALL_DIR" immich +mkdir -p "$ML_DIR" && chown -R immich:immich "$INSTALL_DIR" +export VIRTUAL_ENV="${ML_DIR}/ml-venv" +if [[ -f ~/.openvino ]]; then + msg_info "Installing HW-accelerated machine-learning" + $STD uv add --no-sync --optional openvino onnxruntime-openvino==1.20.0 --active -n -p python3.12 --managed-python + $STD sudo --preserve-env=VIRTUAL_ENV -nu immich uv sync --extra openvino --no-dev --active --link-mode copy -n -p python3.12 --managed-python + patchelf --clear-execstack "${VIRTUAL_ENV}/lib/python3.12/site-packages/onnxruntime/capi/onnxruntime_pybind11_state.cpython-312-x86_64-linux-gnu.so" + msg_ok "Installed HW-accelerated machine-learning" +else + msg_info "Installing machine-learning" + $STD sudo --preserve-env=VIRTUAL_ENV -nu immich uv sync --extra cpu --no-dev --active --link-mode copy -n -p python3.11 --managed-python + msg_ok "Installed machine-learning" +fi +cd "$SRC_DIR" +cp -a machine-learning/{ann,immich_ml} "$ML_DIR" +if [[ -f ~/.openvino ]]; then + sed -i "/intra_op/s/int = 0/int = os.cpu_count() or 0/" "$ML_DIR"/immich_ml/config.py +fi +ln -sf "$APP_DIR"/resources "$INSTALL_DIR" + +cd "$APP_DIR" +grep -rl /usr/src | xargs -n1 sed -i "s|\/usr/src|$INSTALL_DIR|g" +grep -rlE "'/build'" | xargs -n1 sed -i "s|'/build'|'$APP_DIR'|g" +sed -i "s@\"/cache\"@\"$INSTALL_DIR/cache\"@g" "$ML_DIR"/immich_ml/config.py +ln -s "$UPLOAD_DIR" "$APP_DIR"/upload +ln -s "$UPLOAD_DIR" "$ML_DIR"/upload + +msg_info "Installing GeoNames data" +cd "$GEO_DIR" +curl -fsSLZ -O "https://download.geonames.org/export/dump/admin1CodesASCII.txt" \ + -O "https://download.geonames.org/export/dump/admin2Codes.txt" \ + -O "https://download.geonames.org/export/dump/cities500.zip" \ + -O "https://raw.githubusercontent.com/nvkelso/natural-earth-vector/v5.1.2/geojson/ne_10m_admin_0_countries.geojson" +unzip -q cities500.zip +date --iso-8601=seconds | tr -d "\n" >geodata-date.txt +rm cities500.zip +cd "$INSTALL_DIR" +ln -s "$GEO_DIR" "$APP_DIR" +msg_ok "Installed GeoNames data" + +mkdir -p /var/log/immich +touch /var/log/immich/{web.log,ml.log} +msg_ok "Installed Immich" + +msg_info "Modifying user, creating env file, scripts & services" +usermod -aG video,render immich + +cat <"${INSTALL_DIR}"/.env +TZ=$(cat /etc/timezone) +IMMICH_VERSION=release +NODE_ENV=production +IMMICH_ALLOW_SETUP=true + +DB_HOSTNAME=127.0.0.1 +DB_USERNAME=${PG_DB_USER} +DB_PASSWORD=${PG_DB_PASS} +DB_DATABASE_NAME=${PG_DB_NAME} +DB_VECTOR_EXTENSION=vectorchord + +REDIS_HOSTNAME=127.0.0.1 +IMMICH_MACHINE_LEARNING_URL=http://127.0.0.1:3003 +MACHINE_LEARNING_CACHE_FOLDER=${INSTALL_DIR}/cache +## - For OpenVINO only - uncomment below to increase +## - inference speed while reducing accuracy +## - Default is FP32 +# MACHINE_LEARNING_OPENVINO_PRECISION=FP16 + +IMMICH_MEDIA_LOCATION=${UPLOAD_DIR} +EOF +cat <"${ML_DIR}"/ml_start.sh +#!/usr/bin/env bash + +cd ${ML_DIR} +. ${VIRTUAL_ENV}/bin/activate + +set -a +. ${INSTALL_DIR}/.env +set +a + +python3 -m immich_ml +EOF +cat <"$APP_DIR"/bin/start.sh +#!/usr/bin/env bash + +set -a +. ${INSTALL_DIR}/.env +set +a + +/usr/bin/node ${APP_DIR}/dist/main.js "\$@" +EOF +chmod +x "$ML_DIR"/ml_start.sh "$APP_DIR"/bin/start.sh +cat </etc/systemd/system/"${APPLICATION}"-web.service +[Unit] +Description=${APPLICATION} Web Service +After=network.target +Requires=redis-server.service +Requires=postgresql.service +Requires=immich-ml.service + +[Service] +Type=simple +User=immich +Group=immich +UMask=0077 +WorkingDirectory=${APP_DIR} +EnvironmentFile=${INSTALL_DIR}/.env +ExecStart=/usr/bin/node ${APP_DIR}/dist/main +Restart=on-failure +SyslogIdentifier=immich-web +StandardOutput=append:/var/log/immich/web.log +StandardError=append:/var/log/immich/web.log + +[Install] +WantedBy=multi-user.target +EOF +cat </etc/systemd/system/"${APPLICATION}"-ml.service +[Unit] +Description=${APPLICATION} Machine-Learning +After=network.target + +[Service] +Type=simple +UMask=0077 +User=immich +Group=immich +WorkingDirectory=${APP_DIR} +EnvironmentFile=${INSTALL_DIR}/.env +ExecStart=${ML_DIR}/ml_start.sh +Restart=on-failure +SyslogIdentifier=immich-machine-learning +StandardOutput=append:/var/log/immich/ml.log +StandardError=append:/var/log/immich/ml.log + +[Install] +WantedBy=multi-user.target +EOF +chown -R immich:immich "$INSTALL_DIR" /var/log/immich +systemctl enable -q --now "$APPLICATION"-ml.service "$APPLICATION"-web.service +msg_ok "Modified user, created env file, scripts and services" + +motd_ssh +customize +cleanup_lxc From 36eaaea69b0ecd42dd0551eae29ba8a7a8b5e254 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Thu, 29 Jan 2026 08:50:12 +0100 Subject: [PATCH 142/228] add TAG mode in tools.func fetch_and_deploy --- ct/immich.sh | 6 +- install/immich-install.sh | 2 +- misc/tools.func | 151 +++++++++++++++++++++++++++++++++++++- 3 files changed, 154 insertions(+), 5 deletions(-) diff --git a/ct/immich.sh b/ct/immich.sh index 067c6f0bd..24a663b47 100644 --- a/ct/immich.sh +++ b/ct/immich.sh @@ -112,8 +112,8 @@ EOF msg_ok "Image-processing libraries up to date" fi - RELEASE="2.5.0" - if check_for_gh_release "immich" "immich-app/immich" "${RELEASE}"; then + RELEASE="v2.5.2" + if check_for_gh_tag "immich" "immich-app/immich" "${RELEASE}"; then msg_info "Stopping Services" systemctl stop immich-web systemctl stop immich-ml @@ -165,7 +165,7 @@ EOF rm -rf "${APP_DIR:?}"/* ) - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "immich" "immich-app/immich" "tarball" "v${RELEASE}" "$SRC_DIR" + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "immich" "immich-app/immich" "tag" "${RELEASE}" "$SRC_DIR" msg_info "Updating Immich web and microservices" cd "$SRC_DIR"/server diff --git a/install/immich-install.sh b/install/immich-install.sh index 339f79c60..6482c4c28 100644 --- a/install/immich-install.sh +++ b/install/immich-install.sh @@ -296,7 +296,7 @@ GEO_DIR="${INSTALL_DIR}/geodata" mkdir -p "$INSTALL_DIR" mkdir -p {"${APP_DIR}","${UPLOAD_DIR}","${GEO_DIR}","${INSTALL_DIR}"/cache} -fetch_and_deploy_gh_release "immich" "immich-app/immich" "tarball" "v2.5.0" "$SRC_DIR" +fetch_and_deploy_gh_release "immich" "immich-app/immich" "tag" "v2.5.2" "$SRC_DIR" msg_info "Installing Immich (patience)" diff --git a/misc/tools.func b/misc/tools.func index 1e1b86b5c..70e8c3fa9 100644 --- a/misc/tools.func +++ b/misc/tools.func @@ -1563,6 +1563,77 @@ check_for_gh_release() { return 1 } +# ------------------------------------------------------------------------------ +# Checks if a pinned GitHub tag exists and compares to local version. +# +# Description: +# - For tags that are NOT releases (e.g., hotfix tags) +# - Checks if the specified tag exists via Git refs API +# - Compares to local cached version (~/.) +# - If newer, sets global CHECK_UPDATE_RELEASE and returns 0 +# +# Usage: +# if check_for_gh_tag "immich" "immich-app/immich" "v2.5.2"; then +# # trigger update... +# fi +# +# Notes: +# - Requires explicit tag (no 'latest' support - use check_for_gh_release for that) +# - Sets CHECK_UPDATE_RELEASE to the tag name if update is needed +# ------------------------------------------------------------------------------ +check_for_gh_tag() { + local app="$1" + local source="$2" + local pinned_tag="$3" + local app_lc="${app,,}" + local current_file="$HOME/.${app_lc}" + + if [[ -z "$pinned_tag" ]]; then + msg_error "check_for_gh_tag requires a pinned tag version" + return 1 + fi + + msg_info "Checking for update: ${app} (tag: ${pinned_tag})" + + # DNS check + if ! getent hosts api.github.com >/dev/null 2>&1; then + msg_error "Network error: cannot resolve api.github.com" + return 1 + fi + + ensure_dependencies jq + + # Check if tag exists via Git refs API + local tag_check + tag_check=$(curl -fsSL --max-time 20 \ + -H 'Accept: application/vnd.github+json' \ + -H 'X-GitHub-Api-Version: 2022-11-28' \ + "https://api.github.com/repos/${source}/git/refs/tags/${pinned_tag}" 2>/dev/null) + + if [[ $? -ne 0 ]] || [[ -z "$tag_check" ]] || echo "$tag_check" | jq -e '.message' &>/dev/null; then + msg_error "Tag ${pinned_tag} not found in ${source}" + return 1 + fi + + local pin_clean="${pinned_tag#v}" + + # Current installed version + local current="" + if [[ -f "$current_file" ]]; then + current="$(<"$current_file")" + fi + current="${current#v}" + + if [[ "$current" != "$pin_clean" ]]; then + CHECK_UPDATE_RELEASE="$pinned_tag" + msg_ok "Update available: ${app} ${current:-not installed} → ${pin_clean}" + return 0 + fi + + msg_ok "No update available: ${app} is already on version (${current})" + return 1 +} + # ------------------------------------------------------------------------------ # Creates and installs self-signed certificates. # @@ -1694,12 +1765,15 @@ function ensure_usr_local_bin_persist() { # # # 4. Single binary (chmod +x) like Argus, Promtail etc. # fetch_and_deploy_gh_release "argus" "release-argus/Argus" "singlefile" "0.26.3" "/opt/argus" "Argus-.*linux-amd64" +# +# # 5. Git tag (not a release) - bypasses Release API, fetches tarball directly from tag +# fetch_and_deploy_gh_release "immich" "immich-app/immich" "tag" "v2.5.2" "/opt/immich/source" # ------------------------------------------------------------------------------ function fetch_and_deploy_gh_release() { local app="$1" local repo="$2" - local mode="${3:-tarball}" # tarball | binary | prebuild | singlefile + local mode="${3:-tarball}" # tarball | binary | prebuild | singlefile | tag local version="${4:-latest}" local target="${5:-/opt/$app}" local asset_pattern="${6:-}" @@ -1715,6 +1789,81 @@ function fetch_and_deploy_gh_release() { ensure_dependencies jq + ### Tag Mode (bypass Release API) ### + if [[ "$mode" == "tag" ]]; then + if [[ "$version" == "latest" ]]; then + msg_error "Mode 'tag' requires explicit version (not 'latest')" + return 1 + fi + + local tag_name="$version" + [[ "$tag_name" =~ ^v ]] && version="${tag_name:1}" || version="$tag_name" + + if [[ "$current_version" == "$version" ]]; then + $STD msg_ok "$app is already up-to-date (v$version)" + return 0 + fi + + # DNS check + if ! getent hosts "github.com" &>/dev/null; then + msg_error "DNS resolution failed for github.com – check /etc/resolv.conf or networking" + return 1 + fi + + local tmpdir + tmpdir=$(mktemp -d) || return 1 + + msg_info "Fetching GitHub tag: $app ($tag_name)" + + local safe_version="${version//@/_}" + safe_version="${safe_version//\//_}" + local filename="${app_lc}-${safe_version}.tar.gz" + local download_success=false + + # For tags with special characters (@, /), use codeload.github.com + if [[ "$tag_name" =~ [@/] ]]; then + local codeload_encoded="${tag_name//@/%40}" + local codeload_url="https://codeload.github.com/$repo/tar.gz/refs/tags/$codeload_encoded" + if curl $download_timeout -fsSL -o "$tmpdir/$filename" "$codeload_url"; then + download_success=true + fi + else + local direct_tarball_url="https://github.com/$repo/archive/refs/tags/${tag_name}.tar.gz" + if curl $download_timeout -fsSL -o "$tmpdir/$filename" "$direct_tarball_url"; then + download_success=true + fi + fi + + if [[ "$download_success" != "true" ]]; then + msg_error "Download failed for $app ($tag_name)" + rm -rf "$tmpdir" + return 1 + fi + + mkdir -p "$target" + if [[ "${CLEAN_INSTALL:-0}" == "1" ]]; then + rm -rf "${target:?}/"* + fi + + tar --no-same-owner -xzf "$tmpdir/$filename" -C "$tmpdir" || { + msg_error "Failed to extract tarball" + rm -rf "$tmpdir" + return 1 + } + + local unpack_dir + unpack_dir=$(find "$tmpdir" -mindepth 1 -maxdepth 1 -type d | head -n1) + + shopt -s dotglob nullglob + cp -r "$unpack_dir"/* "$target/" + shopt -u dotglob nullglob + + echo "$version" >"$version_file" + msg_ok "Deployed: $app ($version)" + rm -rf "$tmpdir" + return 0 + fi + local api_url="https://api.github.com/repos/$repo/releases" [[ "$version" != "latest" ]] && api_url="$api_url/tags/$version" || api_url="$api_url/latest" local header=() From 45f1e73ae9d052f3c797411ec8fde06601f74af9 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Thu, 29 Jan 2026 09:13:11 +0100 Subject: [PATCH 143/228] Update OpenVINO install for Python 3.13 and add workaround Switches HW-accelerated machine-learning setup to use Python 3.13 and updates related paths. Adds a workaround for onnxruntime-openvino 1.23.x crash by setting MACHINE_LEARNING_OPENVINO_NUM_THREADS to nproc, referencing Immich PR #11240. --- install/immich-install.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/install/immich-install.sh b/install/immich-install.sh index 6482c4c28..d0625e4cf 100644 --- a/install/immich-install.sh +++ b/install/immich-install.sh @@ -347,9 +347,8 @@ mkdir -p "$ML_DIR" && chown -R immich:immich "$INSTALL_DIR" export VIRTUAL_ENV="${ML_DIR}/ml-venv" if [[ -f ~/.openvino ]]; then msg_info "Installing HW-accelerated machine-learning" - $STD uv add --no-sync --optional openvino onnxruntime-openvino==1.20.0 --active -n -p python3.12 --managed-python - $STD sudo --preserve-env=VIRTUAL_ENV -nu immich uv sync --extra openvino --no-dev --active --link-mode copy -n -p python3.12 --managed-python - patchelf --clear-execstack "${VIRTUAL_ENV}/lib/python3.12/site-packages/onnxruntime/capi/onnxruntime_pybind11_state.cpython-312-x86_64-linux-gnu.so" + $STD sudo --preserve-env=VIRTUAL_ENV -nu immich uv sync --extra openvino --no-dev --active --link-mode copy -n -p python3.13 --managed-python + patchelf --clear-execstack "${VIRTUAL_ENV}/lib/python3.13/site-packages/onnxruntime/capi/onnxruntime_pybind11_state.cpython-313-x86_64-linux-gnu.so" msg_ok "Installed HW-accelerated machine-learning" else msg_info "Installing machine-learning" @@ -405,9 +404,10 @@ DB_VECTOR_EXTENSION=vectorchord REDIS_HOSTNAME=127.0.0.1 IMMICH_MACHINE_LEARNING_URL=http://127.0.0.1:3003 MACHINE_LEARNING_CACHE_FOLDER=${INSTALL_DIR}/cache -## - For OpenVINO only - uncomment below to increase -## - inference speed while reducing accuracy -## - Default is FP32 +## - For OpenVINO only - workaround for onnxruntime-openvino 1.23.x crash +## - See: https://github.com/immich-app/immich/pull/11240 +MACHINE_LEARNING_OPENVINO_NUM_THREADS=$(nproc) +## - Uncomment below to increase inference speed while reducing accuracy # MACHINE_LEARNING_OPENVINO_PRECISION=FP16 IMMICH_MEDIA_LOCATION=${UPLOAD_DIR} From 6e294d199e811a5a9a87d76df7c1d7ef9bac04d3 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Thu, 29 Jan 2026 09:15:05 +0100 Subject: [PATCH 144/228] Update OpenVINO sync to use Python 3.13 Changed the OpenVINO sync and related paths from Python 3.12 to Python 3.13 for HW-accelerated machine-learning updates. This ensures compatibility with the newer Python version. --- ct/immich.sh | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/ct/immich.sh b/ct/immich.sh index 24a663b47..d96bbd99d 100644 --- a/ct/immich.sh +++ b/ct/immich.sh @@ -217,9 +217,16 @@ EOF export VIRTUAL_ENV="${ML_DIR}"/ml-venv if [[ -f ~/.openvino ]]; then msg_info "Updating HW-accelerated machine-learning" - $STD uv add --no-sync --optional openvino onnxruntime-openvino==1.20.0 --active -n -p python3.12 --managed-python - $STD sudo --preserve-env=VIRTUAL_ENV -nu immich uv sync --extra openvino --no-dev --active --link-mode copy -n -p python3.12 --managed-python - patchelf --clear-execstack "${VIRTUAL_ENV}/lib/python3.12/site-packages/onnxruntime/capi/onnxruntime_pybind11_state.cpython-312-x86_64-linux-gnu.so" + # Remove old venv if Python version changed (3.12 -> 3.13) + if [[ -d "${VIRTUAL_ENV}" ]] && ! "${VIRTUAL_ENV}/bin/python3" --version 2>/dev/null | grep -q "3.13"; then + rm -rf "${VIRTUAL_ENV}" + fi + $STD sudo --preserve-env=VIRTUAL_ENV -nu immich uv sync --extra openvino --no-dev --active --link-mode copy -n -p python3.13 --managed-python + patchelf --clear-execstack "${VIRTUAL_ENV}/lib/python3.13/site-packages/onnxruntime/capi/onnxruntime_pybind11_state.cpython-313-x86_64-linux-gnu.so" + # Add workaround for onnxruntime-openvino 1.23.x crash if not present + if ! grep -q "MACHINE_LEARNING_OPENVINO_NUM_THREADS" "$INSTALL_DIR/.env" 2>/dev/null; then + sed -i '/MACHINE_LEARNING_CACHE_FOLDER/a ## - For OpenVINO only - workaround for onnxruntime-openvino 1.23.x crash\n## - See: https://github.com/immich-app/immich/pull/11240\nMACHINE_LEARNING_OPENVINO_NUM_THREADS=$(nproc)' "$INSTALL_DIR/.env" + fi msg_ok "Updated HW-accelerated machine-learning" else msg_info "Updating machine-learning" From 8a1f604080286c4cf6a2a8a4a42408533b552167 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Thu, 29 Jan 2026 09:37:45 +0100 Subject: [PATCH 145/228] Update AI.md --- docs/AI.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/AI.md b/docs/AI.md index 489a4b35d..9efebd689 100644 --- a/docs/AI.md +++ b/docs/AI.md @@ -224,9 +224,9 @@ CLEAN_INSTALL=1 fetch_and_deploy_gh_release "appname" "owner/repo" ### Helper Utilities -| Function | Description | Example | -|----------|-------------|----------| -| `import_local_ip` | Sets `$LOCAL_IP` variable | `import_local_ip` | +| Function/Variable | Description | Example | +|-------------------|-------------|----------| +| `$LOCAL_IP` | Always available - contains the container's IP address | `echo "Access: http://${LOCAL_IP}:3000"` | | `ensure_dependencies` | Checks/installs dependencies | `ensure_dependencies curl jq` | | `install_packages_with_retry` | APT install with retry | `install_packages_with_retry nginx redis` | From 646291e7c1012cb162daed35769fc0a5d30b1260 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Thu, 29 Jan 2026 09:38:07 +0100 Subject: [PATCH 146/228] Add localhost mode and conditional Collabora install Introduces a localhost/IP-based mode for OpenCloud installation, allowing setup without Collabora or TLS when no hostname is provided. Collabora Online and WOPI services are now only installed and configured if a valid hostname is entered, with environment and service files adjusted accordingly. This improves flexibility for local testing and simplifies configuration for non-production environments. --- install/opencloud-install copy.sh | 210 ++++++++++++++++++++++++++++++ install/opencloud-install.sh | 139 ++++++++++++-------- 2 files changed, 298 insertions(+), 51 deletions(-) create mode 100644 install/opencloud-install copy.sh diff --git a/install/opencloud-install copy.sh b/install/opencloud-install copy.sh new file mode 100644 index 000000000..7d4c9a060 --- /dev/null +++ b/install/opencloud-install copy.sh @@ -0,0 +1,210 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: vhsdream +# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE +# Source: https://opencloud.eu + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +read -r -p "${TAB3}Enter the hostname of your OpenCloud server (eg cloud.domain.tld): " oc_host +if [[ "$oc_host" ]]; then + OC_HOST="$oc_host" +fi +read -r -p "${TAB3}Enter the hostname of your Collabora server (eg collabora.domain.tld): " collabora_host +if [[ "$collabora_host" ]]; then + COLLABORA_HOST="$collabora_host" +fi +read -r -p "${TAB3}Enter the hostname of your WOPI server (eg wopiserver.domain.tld): " wopi_host +if [[ "$wopi_host" ]]; then + WOPI_HOST="$wopi_host" +fi + +# Collabora online - this is broken because it adds the Component and apt doesn't like that +# setup_deb822_repo \ +# "collaboraonline" \ +# "https://collaboraoffice.com/downloads/gpg/collaboraonline-release-keyring.gpg" \ +# "https://www.collaboraoffice.com/repos/CollaboraOnline/CODE-deb/Release" \ +# "./" \ +# "main" + +msg_info "Installing Collabora Online" +curl -fsSL https://collaboraoffice.com/downloads/gpg/collaboraonline-release-keyring.gpg -o /etc/apt/keyrings/collaboraonline-release-keyring.gpg +cat </etc/apt/sources.list.d/colloboraonline.sources +Types: deb +URIs: https://www.collaboraoffice.com/repos/CollaboraOnline/CODE-deb +Suites: ./ +Signed-By: /etc/apt/keyrings/collaboraonline-release-keyring.gpg +EOF +$STD apt-get update +$STD apt-get install -y coolwsd code-brand +systemctl stop coolwsd +mkdir -p /etc/systemd/system/coolwsd.service.d +cat </etc/systemd/system/coolwsd.service.d/override.conf +[Unit] +Before=opencloud-wopi.service +EOF +systemctl daemon-reload +COOLPASS="$(openssl rand -base64 36)" +$STD sudo -u cool coolconfig set-admin-password --user=admin --password="$COOLPASS" +echo "$COOLPASS" >~/.coolpass +msg_ok "Installed Collabora Online" + +# OpenCloud +fetch_and_deploy_gh_release "opencloud" "opencloud-eu/opencloud" "singlefile" "v5.0.1" "/usr/bin" "opencloud-*-linux-amd64" + +msg_info "Configuring OpenCloud" +DATA_DIR="/var/lib/opencloud/" +CONFIG_DIR="/etc/opencloud" +ENV_FILE="${CONFIG_DIR}/opencloud.env" +mkdir -p "$DATA_DIR" "$CONFIG_DIR"/assets/apps + +curl -fsSL https://raw.githubusercontent.com/opencloud-eu/opencloud-compose/refs/heads/main/config/opencloud/csp.yaml -o "$CONFIG_DIR"/csp.yaml +curl -fsSL https://raw.githubusercontent.com/opencloud-eu/opencloud-compose/refs/heads/main/config/opencloud/proxy.yaml -o "$CONFIG_DIR"/proxy.yaml.bak + +cat <"$ENV_FILE" +OC_URL=https://${OC_HOST} +OC_INSECURE=false +IDM_CREATE_DEMO_USERS=false +OC_LOG_LEVEL=warning +OC_CONFIG_DIR=${CONFIG_DIR} +OC_BASE_DATA_PATH=${DATA_DIR} +STORAGE_SYSTEM_OC_ROOT=${DATA_DIR}/storage/metadata + +## Web +WEB_ASSET_CORE_PATH=${CONFIG_DIR}/web/assets +WEB_ASSET_APPS_PATH=${CONFIG_DIR}/web/assets/apps +WEB_UI_CONFIG_FILE=${CONFIG_DIR}/web/config.json +# WEB_ASSET_THEMES_PATH=${CONFIG_DIR}/web/assets/themes +# WEB_UI_THEME_PATH= + +## Frontend +FRONTEND_DISABLE_RADICALE=true +FRONTEND_GROUPWARE_ENABLED=false +GRAPH_INCLUDE_OCM_SHAREES=true + +## Proxy +PROXY_TLS=false +PROXY_CSP_CONFIG_FILE_LOCATION=${CONFIG_DIR}/csp.yaml + +## Collaboration - requires VALID TLS +COLLABORA_DOMAIN=${COLLABORA_HOST} +COLLABORATION_APP_NAME="CollaboraOnline" +COLLABORATION_APP_PRODUCT="Collabora" +COLLABORATION_APP_ADDR=https://${COLLABORA_HOST} +COLLABORATION_APP_INSECURE=false +COLLABORATION_HTTP_ADDR=0.0.0.0:9300 +COLLABORATION_WOPI_SRC=https://${WOPI_HOST} +COLLABORATION_JWT_SECRET= + +## Notifications - Email settings +# NOTIFICATIONS_SMTP_HOST= +# NOTIFICATIONS_SMTP_PORT= +# NOTIFICATIONS_SMTP_SENDER= +# NOTIFICATIONS_SMTP_USERNAME= +# NOTIFICATIONS_SMTP_PASSWORD= +# NOTIFICATIONS_SMTP_AUTHENTICATION=login +## Encryption method. Possible values are 'starttls', 'ssltls' and 'none' +# NOTIFICATIONS_SMTP_ENCRYPTION=starttls +## Allow insecure connections. Defaults to false. +# NOTIFICATIONS_SMTP_INSECURE=false + +## Start additional services at runtime +## Examples: notifications, antivirus etc. +## Do not uncomment unless configured above. +# OC_ADD_RUN_SERVICES="notifications" + +## OpenID - via web browser +## uncomment for OpenID in general +# OC_EXCLUDE_RUN_SERVICES=idp +# OC_OIDC_ISSUER= +# IDP_DOMAIN= +# PROXY_OIDC_ACCESS_TOKEN_VERIFY_METHOD=none +# PROXY_OIDC_REWRITE_WELLKNOWN=true +# PROXY_USER_OIDC_CLAIM=preferred_username +# PROXY_USER_CS3_CLAIM=username +## automatically create accounts +# PROXY_AUTOPROVISION_ACCOUNTS=true +# WEB_OIDC_SCOPE=openid profile email groups +# GRAPH_ASSIGN_DEFAULT_USER_ROLE=false +# +## uncomment below if using PocketID +# WEB_OIDC_CLIENT_ID= +# WEB_OIDC_METADATA_URL=/.well-known/openid-configuration + +## Full Text Search - Apache Tika +## Requires a separate install of Tika - see https://community-scripts.github.io/ProxmoxVE/scripts?id=apache-tika +# SEARCH_EXTRACTOR_TYPE=tika +# FRONTEND_FULL_TEXT_SEARCH_ENABLED=true +# SEARCH_EXTRACTOR_TIKA_TIKA_URL= + +## External storage test - Only NFS v4.2+ is supported +## User files +# STORAGE_USERS_POSIX_ROOT= +EOF + +cat </etc/systemd/system/opencloud.service +[Unit] +Description=OpenCloud server +After=network-online.target + +[Service] +Type=simple +User=opencloud +Group=opencloud +EnvironmentFile=${ENV_FILE} +ExecStart=/usr/bin/opencloud server +Restart=always + +[Install] +WantedBy=multi-user.target +EOF + +cat </etc/systemd/system/opencloud-wopi.service +[Unit] +Description=OpenCloud WOPI Server +Wants=coolwsd.service +After=opencloud.service coolwsd.service + +[Service] +Type=simple +User=opencloud +Group=opencloud +EnvironmentFile=${ENV_FILE} +ExecStartPre=/bin/sleep 10 +ExecStart=/usr/bin/opencloud collaboration server +Restart=always +KillSignal=SIGKILL +KillMode=mixed +TimeoutStopSec=10 + +[Install] +WantedBy=multi-user.target +EOF + +$STD sudo -u cool coolconfig set ssl.enable false +$STD sudo -u cool coolconfig set ssl.termination true +$STD sudo -u cool coolconfig set ssl.ssl_verification true +sed -i "s|CSP2\"/>|CSP2\">frame-ancestors https://${OC_HOST}|" /etc/coolwsd/coolwsd.xml +useradd -r -M -s /usr/sbin/nologin opencloud +chown -R opencloud:opencloud "$CONFIG_DIR" "$DATA_DIR" +sudo -u opencloud opencloud init --config-path "$CONFIG_DIR" --insecure no +OPENCLOUD_SECRET="$(sed -n '/jwt/p' "$CONFIG_DIR"/opencloud.yaml | awk '{print $2}')" +sed -i "s/JWT_SECRET=/&${OPENCLOUD_SECRET//&/\\&}/" "$ENV_FILE" +msg_ok "Configured OpenCloud" + +msg_info "Starting services" +systemctl enable -q --now coolwsd opencloud +sleep 5 +systemctl enable -q --now opencloud-wopi +msg_ok "Started services" + +motd_ssh +customize +cleanup_lxc diff --git a/install/opencloud-install.sh b/install/opencloud-install.sh index 7d4c9a060..943578598 100644 --- a/install/opencloud-install.sh +++ b/install/opencloud-install.sh @@ -13,48 +13,48 @@ setting_up_container network_check update_os +echo -e "${TAB3}${INFO}${YW} Leave empty to use IP-based localhost mode (no Collabora)${CL}" read -r -p "${TAB3}Enter the hostname of your OpenCloud server (eg cloud.domain.tld): " oc_host -if [[ "$oc_host" ]]; then + +if [[ -z "$oc_host" ]]; then + # Localhost/IP mode - no TLS, no Collabora + OC_HOST="${LOCAL_IP}" + LOCALHOST_MODE=true + msg_info "Using localhost mode with IP: ${LOCAL_IP}" + msg_warn "Collabora requires TLS and will be skipped in localhost mode" +else OC_HOST="$oc_host" -fi -read -r -p "${TAB3}Enter the hostname of your Collabora server (eg collabora.domain.tld): " collabora_host -if [[ "$collabora_host" ]]; then - COLLABORA_HOST="$collabora_host" -fi -read -r -p "${TAB3}Enter the hostname of your WOPI server (eg wopiserver.domain.tld): " wopi_host -if [[ "$wopi_host" ]]; then - WOPI_HOST="$wopi_host" + LOCALHOST_MODE=false + read -r -p "${TAB3}Enter the hostname of your Collabora server [collabora.${OC_HOST#*.}]: " collabora_host + COLLABORA_HOST="${collabora_host:-collabora.${OC_HOST#*.}}" + read -r -p "${TAB3}Enter the hostname of your WOPI server [wopiserver.${OC_HOST#*.}]: " wopi_host + WOPI_HOST="${wopi_host:-wopiserver.${OC_HOST#*.}}" fi -# Collabora online - this is broken because it adds the Component and apt doesn't like that -# setup_deb822_repo \ -# "collaboraonline" \ -# "https://collaboraoffice.com/downloads/gpg/collaboraonline-release-keyring.gpg" \ -# "https://www.collaboraoffice.com/repos/CollaboraOnline/CODE-deb/Release" \ -# "./" \ -# "main" - -msg_info "Installing Collabora Online" -curl -fsSL https://collaboraoffice.com/downloads/gpg/collaboraonline-release-keyring.gpg -o /etc/apt/keyrings/collaboraonline-release-keyring.gpg -cat </etc/apt/sources.list.d/colloboraonline.sources +# Collabora Online - only install if not in localhost mode (requires TLS) +if [[ "$LOCALHOST_MODE" != true ]]; then + msg_info "Installing Collabora Online" + curl -fsSL https://collaboraoffice.com/downloads/gpg/collaboraonline-release-keyring.gpg -o /etc/apt/keyrings/collaboraonline-release-keyring.gpg + cat </etc/apt/sources.list.d/collaboraonline.sources Types: deb URIs: https://www.collaboraoffice.com/repos/CollaboraOnline/CODE-deb Suites: ./ Signed-By: /etc/apt/keyrings/collaboraonline-release-keyring.gpg EOF -$STD apt-get update -$STD apt-get install -y coolwsd code-brand -systemctl stop coolwsd -mkdir -p /etc/systemd/system/coolwsd.service.d -cat </etc/systemd/system/coolwsd.service.d/override.conf + $STD apt-get update + $STD apt-get install -y coolwsd code-brand + systemctl stop coolwsd + mkdir -p /etc/systemd/system/coolwsd.service.d + cat </etc/systemd/system/coolwsd.service.d/override.conf [Unit] Before=opencloud-wopi.service EOF -systemctl daemon-reload -COOLPASS="$(openssl rand -base64 36)" -$STD sudo -u cool coolconfig set-admin-password --user=admin --password="$COOLPASS" -echo "$COOLPASS" >~/.coolpass -msg_ok "Installed Collabora Online" + systemctl daemon-reload + COOLPASS="$(openssl rand -base64 36)" + $STD runuser -u cool -- coolconfig set-admin-password --user=admin --password="$COOLPASS" + echo "$COOLPASS" >~/.coolpass + msg_ok "Installed Collabora Online" +fi # OpenCloud fetch_and_deploy_gh_release "opencloud" "opencloud-eu/opencloud" "singlefile" "v5.0.1" "/usr/bin" "opencloud-*-linux-amd64" @@ -68,9 +68,17 @@ mkdir -p "$DATA_DIR" "$CONFIG_DIR"/assets/apps curl -fsSL https://raw.githubusercontent.com/opencloud-eu/opencloud-compose/refs/heads/main/config/opencloud/csp.yaml -o "$CONFIG_DIR"/csp.yaml curl -fsSL https://raw.githubusercontent.com/opencloud-eu/opencloud-compose/refs/heads/main/config/opencloud/proxy.yaml -o "$CONFIG_DIR"/proxy.yaml.bak +if [[ "$LOCALHOST_MODE" == true ]]; then + OC_URL="http://${OC_HOST}:9200" + OC_INSECURE="true" +else + OC_URL="https://${OC_HOST}" + OC_INSECURE="false" +fi + cat <"$ENV_FILE" -OC_URL=https://${OC_HOST} -OC_INSECURE=false +OC_URL=${OC_URL} +OC_INSECURE=${OC_INSECURE} IDM_CREATE_DEMO_USERS=false OC_LOG_LEVEL=warning OC_CONFIG_DIR=${CONFIG_DIR} @@ -93,15 +101,15 @@ GRAPH_INCLUDE_OCM_SHAREES=true PROXY_TLS=false PROXY_CSP_CONFIG_FILE_LOCATION=${CONFIG_DIR}/csp.yaml -## Collaboration - requires VALID TLS -COLLABORA_DOMAIN=${COLLABORA_HOST} -COLLABORATION_APP_NAME="CollaboraOnline" -COLLABORATION_APP_PRODUCT="Collabora" -COLLABORATION_APP_ADDR=https://${COLLABORA_HOST} -COLLABORATION_APP_INSECURE=false -COLLABORATION_HTTP_ADDR=0.0.0.0:9300 -COLLABORATION_WOPI_SRC=https://${WOPI_HOST} -COLLABORATION_JWT_SECRET= +## Collaboration - requires VALID TLS (disabled in localhost mode) +# COLLABORA_DOMAIN= +# COLLABORATION_APP_NAME="CollaboraOnline" +# COLLABORATION_APP_PRODUCT="Collabora" +# COLLABORATION_APP_ADDR= +# COLLABORATION_APP_INSECURE=false +# COLLABORATION_HTTP_ADDR=0.0.0.0:9300 +# COLLABORATION_WOPI_SRC= +# COLLABORATION_JWT_SECRET= ## Notifications - Email settings # NOTIFICATIONS_SMTP_HOST= @@ -166,7 +174,8 @@ Restart=always WantedBy=multi-user.target EOF -cat </etc/systemd/system/opencloud-wopi.service +if [[ "$LOCALHOST_MODE" != true ]]; then + cat </etc/systemd/system/opencloud-wopi.service [Unit] Description=OpenCloud WOPI Server Wants=coolwsd.service @@ -188,21 +197,49 @@ TimeoutStopSec=10 WantedBy=multi-user.target EOF -$STD sudo -u cool coolconfig set ssl.enable false -$STD sudo -u cool coolconfig set ssl.termination true -$STD sudo -u cool coolconfig set ssl.ssl_verification true -sed -i "s|CSP2\"/>|CSP2\">frame-ancestors https://${OC_HOST}|" /etc/coolwsd/coolwsd.xml + # Append active Collabora config to env file + cat <>"$ENV_FILE" + +## Collaboration - active configuration +COLLABORA_DOMAIN=${COLLABORA_HOST} +COLLABORATION_APP_NAME="CollaboraOnline" +COLLABORATION_APP_PRODUCT="Collabora" +COLLABORATION_APP_ADDR=https://${COLLABORA_HOST} +COLLABORATION_APP_INSECURE=false +COLLABORATION_HTTP_ADDR=0.0.0.0:9300 +COLLABORATION_WOPI_SRC=https://${WOPI_HOST} +COLLABORATION_JWT_SECRET= +EOF + + $STD runuser -u cool -- coolconfig set ssl.enable false + $STD runuser -u cool -- coolconfig set ssl.termination true + $STD runuser -u cool -- coolconfig set ssl.ssl_verification true + sed -i "s|CSP2\"/>|CSP2\">frame-ancestors https://${OC_HOST}|" /etc/coolwsd/coolwsd.xml +fi + useradd -r -M -s /usr/sbin/nologin opencloud chown -R opencloud:opencloud "$CONFIG_DIR" "$DATA_DIR" -sudo -u opencloud opencloud init --config-path "$CONFIG_DIR" --insecure no + +if [[ "$LOCALHOST_MODE" == true ]]; then + $STD runuser -u opencloud -- opencloud init --config-path "$CONFIG_DIR" --insecure yes +else + $STD runuser -u opencloud -- opencloud init --config-path "$CONFIG_DIR" --insecure no +fi + OPENCLOUD_SECRET="$(sed -n '/jwt/p' "$CONFIG_DIR"/opencloud.yaml | awk '{print $2}')" -sed -i "s/JWT_SECRET=/&${OPENCLOUD_SECRET//&/\\&}/" "$ENV_FILE" +if [[ "$LOCALHOST_MODE" != true ]]; then + sed -i "s/COLLABORATION_JWT_SECRET=/&${OPENCLOUD_SECRET//&/\\&}/" "$ENV_FILE" +fi msg_ok "Configured OpenCloud" msg_info "Starting services" -systemctl enable -q --now coolwsd opencloud -sleep 5 -systemctl enable -q --now opencloud-wopi +if [[ "$LOCALHOST_MODE" == true ]]; then + systemctl enable -q --now opencloud +else + systemctl enable -q --now coolwsd opencloud + sleep 5 + systemctl enable -q --now opencloud-wopi +fi msg_ok "Started services" motd_ssh From a5096a5b622d6e89ab044d32fa25b8c7e96fc0e6 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Thu, 29 Jan 2026 09:41:55 +0100 Subject: [PATCH 147/228] test anytype --- ct/anytype.sh | 66 +++++++++++++++++++++++++++++++ frontend/public/json/anytype.json | 48 ++++++++++++++++++++++ install/anytype-install.sh | 59 +++++++++++++++++++++++++++ 3 files changed, 173 insertions(+) create mode 100644 ct/anytype.sh create mode 100644 frontend/public/json/anytype.json create mode 100644 install/anytype-install.sh diff --git a/ct/anytype.sh b/ct/anytype.sh new file mode 100644 index 000000000..b2af170e8 --- /dev/null +++ b/ct/anytype.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE +# Source: https://anytype.io + +APP="Anytype" +var_tags="${var_tags:-notes;productivity;sync}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-2048}" +var_disk="${var_disk:-10}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -f /opt/anytype/any-sync-bundle ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "anytype" "grishy/any-sync-bundle"; then + msg_info "Stopping Service" + systemctl stop anytype + msg_ok "Stopped Service" + + msg_info "Backing up Data" + cp -r /opt/anytype/data /opt/anytype_data_backup + msg_ok "Backed up Data" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "anytype" "grishy/any-sync-bundle" "prebuild" "latest" "/opt/anytype" "any-sync-bundle_*_linux_amd64.tar.gz" + chmod +x /opt/anytype/any-sync-bundle + + msg_info "Restoring Data" + cp -r /opt/anytype_data_backup/. /opt/anytype/data + rm -rf /opt/anytype_data_backup + msg_ok "Restored Data" + + msg_info "Starting Service" + systemctl start anytype + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:33010${CL}" +echo -e "${INFO}${YW} Client config file:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}/opt/anytype/data/client-config.yml${CL}" diff --git a/frontend/public/json/anytype.json b/frontend/public/json/anytype.json new file mode 100644 index 000000000..88c1abe61 --- /dev/null +++ b/frontend/public/json/anytype.json @@ -0,0 +1,48 @@ +{ + "name": "Anytype", + "slug": "anytype", + "categories": [ + 12 + ], + "date_created": "2026-01-29", + "type": "ct", + "updateable": true, + "privileged": false, + "interface_port": 33010, + "documentation": "https://doc.anytype.io/", + "website": "https://anytype.io/", + "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/anytype.webp", + "config_path": "/opt/anytype/.env", + "description": "Anytype is a local-first, privacy-focused alternative to Notion. This script deploys the any-sync-bundle which provides a self-hosted sync server for Anytype clients with embedded MongoDB and Redis.", + "install_methods": [ + { + "type": "default", + "script": "ct/anytype.sh", + "resources": { + "cpu": 2, + "ram": 2048, + "hdd": 10, + "os": "Debian", + "version": "13" + } + } + ], + "default_credentials": { + "username": null, + "password": null + }, + "notes": [ + { + "text": "After installation, import /opt/anytype/data/client-config.yml into your Anytype apps.", + "type": "info" + }, + { + "text": "This uses the community any-sync-bundle by grishy, not the official Anytype deployment.", + "type": "warning" + }, + { + "text": "Firewall: Open TCP 33010 (DRPC) and UDP 33020 (QUIC) for external access.", + "type": "info" + } + ] +} diff --git a/install/anytype-install.sh b/install/anytype-install.sh new file mode 100644 index 000000000..458e70d3b --- /dev/null +++ b/install/anytype-install.sh @@ -0,0 +1,59 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE +# Source: https://anytype.io + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y ca-certificates +msg_ok "Installed Dependencies" + +fetch_and_deploy_gh_release "anytype" "grishy/any-sync-bundle" "prebuild" "latest" "/opt/anytype" "any-sync-bundle_*_linux_amd64.tar.gz" +chmod +x /opt/anytype/any-sync-bundle + +msg_info "Configuring Anytype" +mkdir -p /opt/anytype/data + +cat </opt/anytype/.env +ANY_SYNC_BUNDLE_CONFIG=/opt/anytype/data/bundle-config.yml +ANY_SYNC_BUNDLE_CLIENT_CONFIG=/opt/anytype/data/client-config.yml +ANY_SYNC_BUNDLE_INIT_STORAGE=/opt/anytype/data/storage/ +ANY_SYNC_BUNDLE_INIT_EXTERNAL_ADDRS=${LOCAL_IP} +ANY_SYNC_BUNDLE_LOG_LEVEL=info +EOF +msg_ok "Configured Anytype" + +msg_info "Creating Service" +cat </etc/systemd/system/anytype.service +[Unit] +Description=Anytype Sync Server (any-sync-bundle) +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +User=root +WorkingDirectory=/opt/anytype +EnvironmentFile=/opt/anytype/.env +ExecStart=/opt/anytype/any-sync-bundle start-all-in-one +Restart=on-failure +RestartSec=10 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now anytype +msg_ok "Created Service" + +motd_ssh +customize +cleanup_lxc From 8c4f1ce5318f9a483bbada392b79f3db85b8e3c0 Mon Sep 17 00:00:00 2001 From: justin Date: Tue, 27 Jan 2026 19:31:02 -0500 Subject: [PATCH 148/228] Make check_container_storage() POSIX compliant --- misc/build.func | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/misc/build.func b/misc/build.func index 051c5f4b8..386b37385 100644 --- a/misc/build.func +++ b/misc/build.func @@ -3086,21 +3086,32 @@ check_container_resources() { # ------------------------------------------------------------------------------ # check_container_storage() # -# - Checks /boot partition usage +# - Checks root (/) partition usage # - Warns if usage >80% and asks user confirmation before proceeding # ------------------------------------------------------------------------------ check_container_storage() { - total_size=$(df /boot --output=size | tail -n 1) - local used_size=$(df /boot --output=used | tail -n 1) - usage=$((100 * used_size / total_size)) - if ((usage > 80)); then + usage=$(df / -P | awk 'NR==2 {print $5}' | tr -d '%') + + # shellcheck disable=SC2181 + if [ $? -ne 0 ]; then + echo "Error: Failed to check disk usage." + exit 1 + fi + + if [ "$usage" -gt 80 ]; then echo -e "${INFO}${HOLD} ${YWB}Warning: Storage is dangerously low (${usage}%).${CL}" - echo -ne "Continue anyway? " + printf "Continue anyway? " read -r prompt - if [[ ! ${prompt,,} =~ ^(y|yes)$ ]]; then + + case "$prompt" in + [yY][eE][sS] | [yY]) + # User input is "yes" or "y"; continue + ;; + *) echo -e "${CROSS}${HOLD}${YWB}Exiting based on user input.${CL}" exit 1 - fi + ;; + esac fi } From 000a7a270166a005f09a53a0604aeb91c2362f90 Mon Sep 17 00:00:00 2001 From: justin Date: Tue, 27 Jan 2026 20:41:55 -0500 Subject: [PATCH 149/228] Fix Error check and error message style --- misc/build.func | 365 ++++++++++++++++++++++++------------------------ 1 file changed, 181 insertions(+), 184 deletions(-) diff --git a/misc/build.func b/misc/build.func index 386b37385..7e5e0a794 100644 --- a/misc/build.func +++ b/misc/build.func @@ -363,7 +363,7 @@ validate_hostname() { # Split by dots and validate each label local IFS='.' - read -ra labels <<< "$hostname" + read -ra labels <<<"$hostname" for label in "${labels[@]}"; do # Each label: 1-63 chars, alphanumeric, hyphens allowed (not at start/end) if [[ -z "$label" ]] || [[ ${#label} -gt 63 ]]; then @@ -467,7 +467,7 @@ validate_ipv6_address() { # Check that no segment exceeds 4 hex chars local IFS=':' local -a segments - read -ra segments <<< "$addr" + read -ra segments <<<"$addr" for seg in "${segments[@]}"; do if [[ ${#seg} -gt 4 ]]; then return 1 @@ -517,14 +517,14 @@ validate_gateway_in_subnet() { # Convert IPs to integers local IFS='.' - read -r i1 i2 i3 i4 <<< "$ip" - read -r g1 g2 g3 g4 <<< "$gateway" + read -r i1 i2 i3 i4 <<<"$ip" + read -r g1 g2 g3 g4 <<<"$gateway" - local ip_int=$(( (i1 << 24) + (i2 << 16) + (i3 << 8) + i4 )) - local gw_int=$(( (g1 << 24) + (g2 << 16) + (g3 << 8) + g4 )) + local ip_int=$(((i1 << 24) + (i2 << 16) + (i3 << 8) + i4)) + local gw_int=$(((g1 << 24) + (g2 << 16) + (g3 << 8) + g4)) # Check if both are in same network - if (( (ip_int & mask) != (gw_int & mask) )); then + if (((ip_int & mask) != (gw_int & mask))); then return 1 fi @@ -1057,113 +1057,113 @@ load_vars_file() { # Validate values before setting (skip empty values - they use defaults) if [[ -n "$var_val" ]]; then case "$var_key" in - var_mac) - if ! validate_mac_address "$var_val"; then - msg_warn "Invalid MAC address '$var_val' in $file, ignoring" + var_mac) + if ! validate_mac_address "$var_val"; then + msg_warn "Invalid MAC address '$var_val' in $file, ignoring" + continue + fi + ;; + var_vlan) + if ! validate_vlan_tag "$var_val"; then + msg_warn "Invalid VLAN tag '$var_val' in $file (must be 1-4094), ignoring" + continue + fi + ;; + var_mtu) + if ! validate_mtu "$var_val"; then + msg_warn "Invalid MTU '$var_val' in $file (must be 576-65535), ignoring" + continue + fi + ;; + var_tags) + if ! validate_tags "$var_val"; then + msg_warn "Invalid tags '$var_val' in $file (alphanumeric, -, _, ; only), ignoring" + continue + fi + ;; + var_timezone) + if ! validate_timezone "$var_val"; then + msg_warn "Invalid timezone '$var_val' in $file, ignoring" + continue + fi + ;; + var_brg) + if ! validate_bridge "$var_val"; then + msg_warn "Bridge '$var_val' not found in $file, ignoring" + continue + fi + ;; + var_gateway) + if ! validate_gateway_ip "$var_val"; then + msg_warn "Invalid gateway IP '$var_val' in $file, ignoring" + continue + fi + ;; + var_hostname) + if ! validate_hostname "$var_val"; then + msg_warn "Invalid hostname '$var_val' in $file, ignoring" + continue + fi + ;; + var_cpu) + if ! [[ "$var_val" =~ ^[0-9]+$ ]] || ((var_val < 1 || var_val > 128)); then + msg_warn "Invalid CPU count '$var_val' in $file (must be 1-128), ignoring" + continue + fi + ;; + var_ram) + if ! [[ "$var_val" =~ ^[0-9]+$ ]] || ((var_val < 256)); then + msg_warn "Invalid RAM '$var_val' in $file (must be >= 256 MiB), ignoring" + continue + fi + ;; + var_disk) + if ! [[ "$var_val" =~ ^[0-9]+$ ]] || ((var_val < 1)); then + msg_warn "Invalid disk size '$var_val' in $file (must be >= 1 GB), ignoring" + continue + fi + ;; + var_unprivileged) + if [[ "$var_val" != "0" && "$var_val" != "1" ]]; then + msg_warn "Invalid unprivileged value '$var_val' in $file (must be 0 or 1), ignoring" + continue + fi + ;; + var_nesting) + if [[ "$var_val" != "0" && "$var_val" != "1" ]]; then + msg_warn "Invalid nesting value '$var_val' in $file (must be 0 or 1), ignoring" + continue + fi + ;; + var_keyctl) + if [[ "$var_val" != "0" && "$var_val" != "1" ]]; then + msg_warn "Invalid keyctl value '$var_val' in $file (must be 0 or 1), ignoring" + continue + fi + ;; + var_net) + # var_net can be: dhcp, static IP/CIDR, or IP range + if [[ "$var_val" != "dhcp" ]]; then + if is_ip_range "$var_val"; then + : # IP range is valid, will be resolved at runtime + elif ! validate_ip_address "$var_val"; then + msg_warn "Invalid network '$var_val' in $file (must be dhcp or IP/CIDR), ignoring" continue fi - ;; - var_vlan) - if ! validate_vlan_tag "$var_val"; then - msg_warn "Invalid VLAN tag '$var_val' in $file (must be 1-4094), ignoring" - continue - fi - ;; - var_mtu) - if ! validate_mtu "$var_val"; then - msg_warn "Invalid MTU '$var_val' in $file (must be 576-65535), ignoring" - continue - fi - ;; - var_tags) - if ! validate_tags "$var_val"; then - msg_warn "Invalid tags '$var_val' in $file (alphanumeric, -, _, ; only), ignoring" - continue - fi - ;; - var_timezone) - if ! validate_timezone "$var_val"; then - msg_warn "Invalid timezone '$var_val' in $file, ignoring" - continue - fi - ;; - var_brg) - if ! validate_bridge "$var_val"; then - msg_warn "Bridge '$var_val' not found in $file, ignoring" - continue - fi - ;; - var_gateway) - if ! validate_gateway_ip "$var_val"; then - msg_warn "Invalid gateway IP '$var_val' in $file, ignoring" - continue - fi - ;; - var_hostname) - if ! validate_hostname "$var_val"; then - msg_warn "Invalid hostname '$var_val' in $file, ignoring" - continue - fi - ;; - var_cpu) - if ! [[ "$var_val" =~ ^[0-9]+$ ]] || ((var_val < 1 || var_val > 128)); then - msg_warn "Invalid CPU count '$var_val' in $file (must be 1-128), ignoring" - continue - fi - ;; - var_ram) - if ! [[ "$var_val" =~ ^[0-9]+$ ]] || ((var_val < 256)); then - msg_warn "Invalid RAM '$var_val' in $file (must be >= 256 MiB), ignoring" - continue - fi - ;; - var_disk) - if ! [[ "$var_val" =~ ^[0-9]+$ ]] || ((var_val < 1)); then - msg_warn "Invalid disk size '$var_val' in $file (must be >= 1 GB), ignoring" - continue - fi - ;; - var_unprivileged) - if [[ "$var_val" != "0" && "$var_val" != "1" ]]; then - msg_warn "Invalid unprivileged value '$var_val' in $file (must be 0 or 1), ignoring" - continue - fi - ;; - var_nesting) - if [[ "$var_val" != "0" && "$var_val" != "1" ]]; then - msg_warn "Invalid nesting value '$var_val' in $file (must be 0 or 1), ignoring" - continue - fi - ;; - var_keyctl) - if [[ "$var_val" != "0" && "$var_val" != "1" ]]; then - msg_warn "Invalid keyctl value '$var_val' in $file (must be 0 or 1), ignoring" - continue - fi - ;; - var_net) - # var_net can be: dhcp, static IP/CIDR, or IP range - if [[ "$var_val" != "dhcp" ]]; then - if is_ip_range "$var_val"; then - : # IP range is valid, will be resolved at runtime - elif ! validate_ip_address "$var_val"; then - msg_warn "Invalid network '$var_val' in $file (must be dhcp or IP/CIDR), ignoring" - continue - fi - fi - ;; - var_fuse|var_tun|var_gpu|var_ssh|var_verbose|var_protection) - if [[ "$var_val" != "yes" && "$var_val" != "no" ]]; then - msg_warn "Invalid boolean '$var_val' for $var_key in $file (must be yes/no), ignoring" - continue - fi - ;; - var_ipv6_method) - if [[ "$var_val" != "auto" && "$var_val" != "dhcp" && "$var_val" != "static" && "$var_val" != "none" ]]; then - msg_warn "Invalid IPv6 method '$var_val' in $file (must be auto/dhcp/static/none), ignoring" - continue - fi - ;; + fi + ;; + var_fuse | var_tun | var_gpu | var_ssh | var_verbose | var_protection) + if [[ "$var_val" != "yes" && "$var_val" != "no" ]]; then + msg_warn "Invalid boolean '$var_val' for $var_key in $file (must be yes/no), ignoring" + continue + fi + ;; + var_ipv6_method) + if [[ "$var_val" != "auto" && "$var_val" != "dhcp" && "$var_val" != "static" && "$var_val" != "none" ]]; then + msg_warn "Invalid IPv6 method '$var_val' in $file (must be auto/dhcp/static/none), ignoring" + continue + fi + ;; esac fi @@ -3092,21 +3092,18 @@ check_container_resources() { check_container_storage() { usage=$(df / -P | awk 'NR==2 {print $5}' | tr -d '%') - # shellcheck disable=SC2181 - if [ $? -ne 0 ]; then - echo "Error: Failed to check disk usage." + if [ -z "$usage" ] || [ "$usage" -lt 0 ]; then + echo -e "${CROSS}${HOLD}${RD}Error: Failed to check disk usage.${CL}" exit 1 fi if [ "$usage" -gt 80 ]; then - echo -e "${INFO}${HOLD} ${YWB}Warning: Storage is dangerously low (${usage}%).${CL}" + echo -e "${INFO}${HOLD}${YWB}Warning: Storage is dangerously low (${usage}%).${CL}" printf "Continue anyway? " read -r prompt case "$prompt" in - [yY][eE][sS] | [yY]) - # User input is "yes" or "y"; continue - ;; + [yY][eE][sS] | [yY]) ;; *) echo -e "${CROSS}${HOLD}${YWB}Exiting based on user input.${CL}" exit 1 @@ -4034,91 +4031,91 @@ EOF' if read -t 60 -r response; then case "${response:-1}" in - 1) - # Remove container - echo -e "\n${TAB}${HOLD}${YW}Removing container ${CTID}${CL}" - pct stop "$CTID" &>/dev/null || true - pct destroy "$CTID" &>/dev/null || true - echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}" - ;; - 2) - echo -e "\n${TAB}${YW}Container ${CTID} kept for debugging${CL}" - # Dev mode: Setup MOTD/SSH for debugging access to broken container - if [[ "${DEV_MODE_MOTD:-false}" == "true" ]]; then - echo -e "${TAB}${HOLD}${DGN}Setting up MOTD and SSH for debugging...${CL}" - if pct exec "$CTID" -- bash -c " + 1) + # Remove container + echo -e "\n${TAB}${HOLD}${YW}Removing container ${CTID}${CL}" + pct stop "$CTID" &>/dev/null || true + pct destroy "$CTID" &>/dev/null || true + echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}" + ;; + 2) + echo -e "\n${TAB}${YW}Container ${CTID} kept for debugging${CL}" + # Dev mode: Setup MOTD/SSH for debugging access to broken container + if [[ "${DEV_MODE_MOTD:-false}" == "true" ]]; then + echo -e "${TAB}${HOLD}${DGN}Setting up MOTD and SSH for debugging...${CL}" + if pct exec "$CTID" -- bash -c " source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/install.func) declare -f motd_ssh >/dev/null 2>&1 && motd_ssh || true " >/dev/null 2>&1; then - local ct_ip=$(pct exec "$CTID" ip a s dev eth0 2>/dev/null | awk '/inet / {print $2}' | cut -d/ -f1) - echo -e "${BFR}${CM}${GN}MOTD/SSH ready - SSH into container: ssh root@${ct_ip}${CL}" - fi + local ct_ip=$(pct exec "$CTID" ip a s dev eth0 2>/dev/null | awk '/inet / {print $2}' | cut -d/ -f1) + echo -e "${BFR}${CM}${GN}MOTD/SSH ready - SSH into container: ssh root@${ct_ip}${CL}" fi - exit $install_exit_code - ;; - 3) - # Retry with verbose mode - echo -e "\n${TAB}${HOLD}${YW}Removing container ${CTID} for rebuild...${CL}" + fi + exit $install_exit_code + ;; + 3) + # Retry with verbose mode + echo -e "\n${TAB}${HOLD}${YW}Removing container ${CTID} for rebuild...${CL}" + pct stop "$CTID" &>/dev/null || true + pct destroy "$CTID" &>/dev/null || true + echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}" + echo "" + # Get new container ID + local old_ctid="$CTID" + export CTID=$(get_valid_container_id "$CTID") + export VERBOSE="yes" + export var_verbose="yes" + + # Show rebuild summary + echo -e "${YW}Rebuilding with preserved settings:${CL}" + echo -e " Container ID: ${old_ctid} → ${CTID}" + echo -e " RAM: ${RAM_SIZE} MiB | CPU: ${CORE_COUNT} cores | Disk: ${DISK_SIZE} GB" + echo -e " Network: ${NET:-dhcp} | Bridge: ${BRG:-vmbr0}" + echo -e " Verbose: ${GN}enabled${CL}" + echo "" + msg_info "Restarting installation..." + # Re-run build_container + build_container + return $? + ;; + 4) + if [[ "$is_oom" == true ]]; then + # Retry with more resources + echo -e "\n${TAB}${HOLD}${YW}Removing container ${CTID} for rebuild with more resources...${CL}" pct stop "$CTID" &>/dev/null || true pct destroy "$CTID" &>/dev/null || true echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}" echo "" - # Get new container ID + # Get new container ID and increase resources local old_ctid="$CTID" + local old_ram="$RAM_SIZE" + local old_cpu="$CORE_COUNT" export CTID=$(get_valid_container_id "$CTID") - export VERBOSE="yes" - export var_verbose="yes" + export RAM_SIZE=$((RAM_SIZE * 3 / 2)) + export CORE_COUNT=$((CORE_COUNT + 1)) + export var_ram="$RAM_SIZE" + export var_cpu="$CORE_COUNT" # Show rebuild summary - echo -e "${YW}Rebuilding with preserved settings:${CL}" + echo -e "${YW}Rebuilding with increased resources:${CL}" echo -e " Container ID: ${old_ctid} → ${CTID}" - echo -e " RAM: ${RAM_SIZE} MiB | CPU: ${CORE_COUNT} cores | Disk: ${DISK_SIZE} GB" - echo -e " Network: ${NET:-dhcp} | Bridge: ${BRG:-vmbr0}" - echo -e " Verbose: ${GN}enabled${CL}" + echo -e " RAM: ${old_ram} → ${GN}${RAM_SIZE}${CL} MiB (+50%)" + echo -e " CPU: ${old_cpu} → ${GN}${CORE_COUNT}${CL} cores (+1)" + echo -e " Disk: ${DISK_SIZE} GB | Network: ${NET:-dhcp} | Bridge: ${BRG:-vmbr0}" echo "" msg_info "Restarting installation..." # Re-run build_container build_container return $? - ;; - 4) - if [[ "$is_oom" == true ]]; then - # Retry with more resources - echo -e "\n${TAB}${HOLD}${YW}Removing container ${CTID} for rebuild with more resources...${CL}" - pct stop "$CTID" &>/dev/null || true - pct destroy "$CTID" &>/dev/null || true - echo -e "${BFR}${CM}${GN}Container ${CTID} removed${CL}" - echo "" - # Get new container ID and increase resources - local old_ctid="$CTID" - local old_ram="$RAM_SIZE" - local old_cpu="$CORE_COUNT" - export CTID=$(get_valid_container_id "$CTID") - export RAM_SIZE=$((RAM_SIZE * 3 / 2)) - export CORE_COUNT=$((CORE_COUNT + 1)) - export var_ram="$RAM_SIZE" - export var_cpu="$CORE_COUNT" - - # Show rebuild summary - echo -e "${YW}Rebuilding with increased resources:${CL}" - echo -e " Container ID: ${old_ctid} → ${CTID}" - echo -e " RAM: ${old_ram} → ${GN}${RAM_SIZE}${CL} MiB (+50%)" - echo -e " CPU: ${old_cpu} → ${GN}${CORE_COUNT}${CL} cores (+1)" - echo -e " Disk: ${DISK_SIZE} GB | Network: ${NET:-dhcp} | Bridge: ${BRG:-vmbr0}" - echo "" - msg_info "Restarting installation..." - # Re-run build_container - build_container - return $? - else - echo -e "\n${TAB}${YW}Invalid option. Container ${CTID} kept.${CL}" - exit $install_exit_code - fi - ;; - *) + else echo -e "\n${TAB}${YW}Invalid option. Container ${CTID} kept.${CL}" exit $install_exit_code - ;; + fi + ;; + *) + echo -e "\n${TAB}${YW}Invalid option. Container ${CTID} kept.${CL}" + exit $install_exit_code + ;; esac else # Timeout - auto-remove From d7d16543e571e48aef165aeea63d76798d9ebd2c Mon Sep 17 00:00:00 2001 From: justin Date: Tue, 27 Jan 2026 14:16:13 -0500 Subject: [PATCH 150/228] Fix url assignment in fetch_and_deploy_gh_release --- misc/alpine-tools.func | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/misc/alpine-tools.func b/misc/alpine-tools.func index 955554216..4f798c45a 100644 --- a/misc/alpine-tools.func +++ b/misc/alpine-tools.func @@ -317,9 +317,15 @@ fetch_and_deploy_gh_release() { return 1 } + # The command is the same for all cases other than tarball/source + get_url() { # $1 pattern + printf '%s' "$json" | jq -r '.assets[].browser_download_url' | + awk -v p="$1" 'BEGIN{IGNORECASE=1} $0 ~ p {print; exit}' + } + case "$mode" in tarball | source) - url="$(printf '%s' "$json" | jq -r '.tarball_url // empty')" + url=$(printf '%s' "$json" | jq -r '.tarball_url // empty') [ -z "$url" ] && url="https://github.com/$repo/archive/refs/tags/v$version.tar.gz" filename="${app_lc}-${version}.tar.gz" download_with_progress "$url" "$tmpd/$filename" || { @@ -342,7 +348,7 @@ fetch_and_deploy_gh_release() { ;; binary) [ -n "$pattern" ] || pattern="*.apk" - url="$(printf '%s' "$json" | jq -r '.assets[].browser_download_url' | awk -v p="$pattern" 'BEGIN{IGNORECASE=1} $0 ~ p {print; exit}')" + url=$(get_url "$pattern") [ -z "$url" ] && { msg_error "binary asset not found for pattern: $pattern" rm -rf "$tmpd" @@ -374,10 +380,7 @@ fetch_and_deploy_gh_release() { rm -rf "$tmpd" return 1 } - url="$(printf '%s' "$json" | jq -r '.assets[].browser_download_url' | awk -v p="$pattern" ' - BEGIN{IGNORECASE=1} - $0 ~ p {print; exit} - ')" + url=$(get_url "$pattern") [ -z "$url" ] && { msg_error "asset not found for pattern: $pattern" rm -rf "$tmpd" @@ -431,10 +434,7 @@ fetch_and_deploy_gh_release() { rm -rf "$tmpd" return 1 } - url="$(printf '%s' "$json" | jq -r '.assets[].browser_download_url' | awk -v p="$pattern" ' - BEGIN{IGNORECASE=1} - $0 ~ p {print; exit} - ')" + url=$(get_url "$pattern") [ -z "$url" ] && { msg_error "asset not found for pattern: $pattern" rm -rf "$tmpd" From bf97037ba52d00e25735f2c031b2fbc7f926a95f Mon Sep 17 00:00:00 2001 From: justin Date: Tue, 27 Jan 2026 14:22:39 -0500 Subject: [PATCH 151/228] Remove bad quotes --- misc/alpine-tools.func | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/misc/alpine-tools.func b/misc/alpine-tools.func index 4f798c45a..6dd06b659 100644 --- a/misc/alpine-tools.func +++ b/misc/alpine-tools.func @@ -277,7 +277,7 @@ fetch_and_deploy_gh_release() { # $1 app, $2 repo, [$3 mode], [$4 version], [$5 target], [$6 asset_pattern local app="$1" repo="$2" mode="${3:-tarball}" version="${4:-latest}" target="${5:-/opt/$1}" pattern="${6:-}" local app_lc - app_lc="$(lower "$app" | tr -d ' ')" + app_lc=$(lower "$app" | tr -d ' ') local vfile="$HOME/.${app_lc}" local json url filename tmpd unpack @@ -288,7 +288,7 @@ fetch_and_deploy_gh_release() { need_tool curl jq tar || return 1 [ "$mode" = "prebuild" ] || [ "$mode" = "singlefile" ] && need_tool unzip >/dev/null 2>&1 || true - tmpd="$(mktemp -d)" || return 1 + tmpd=$(mktemp -d) || return 1 mkdir -p "$target" # Release JSON (with token/rate-limit handling) @@ -305,10 +305,10 @@ fetch_and_deploy_gh_release() { return 1 } fi - json="$(cat "$tmpd/release.json")" + json=$(cat "$tmpd/release.json") # correct Version - version="$(printf '%s' "$json" | jq -r '.tag_name // empty')" + version=$(printf '%s' "$json" | jq -r '.tag_name // empty') version="${version#v}" [ -z "$version" ] && { @@ -337,7 +337,7 @@ fetch_and_deploy_gh_release() { rm -rf "$tmpd" return 1 } - unpack="$(find "$tmpd" -mindepth 1 -maxdepth 1 -type d | head -n1)" + unpack=$(find "$tmpd" -mindepth 1 -maxdepth 1 -type d | head -n1) [ "${CLEAN_INSTALL:-0}" = "1" ] && rm -rf "${target:?}/"* # copy content of unpack to target (cd "$unpack" && tar -cf - .) | (cd "$target" && tar -xf -) || { @@ -414,7 +414,7 @@ fetch_and_deploy_gh_release() { [ "${CLEAN_INSTALL:-0}" = "1" ] && rm -rf "${target:?}/"* # top-level folder strippen if [ "$(find "$tmpd/unp" -mindepth 1 -maxdepth 1 -type d | wc -l)" -eq 1 ] && [ -z "$(find "$tmpd/unp" -mindepth 1 -maxdepth 1 -type f | head -n1)" ]; then - unpack="$(find "$tmpd/unp" -mindepth 1 -maxdepth 1 -type d)" + unpack=$(find "$tmpd/unp" -mindepth 1 -maxdepth 1 -type d) (cd "$unpack" && tar -cf - .) | (cd "$target" && tar -xf -) || { msg_error "copy failed" rm -rf "$tmpd" From 16d0ffa19a880387eb4118324b21e7675ca68e2b Mon Sep 17 00:00:00 2001 From: justin Date: Tue, 27 Jan 2026 14:42:24 -0500 Subject: [PATCH 152/228] $pattern doesn't need to be a parameter --- misc/alpine-tools.func | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/misc/alpine-tools.func b/misc/alpine-tools.func index 6dd06b659..9aee5ca56 100644 --- a/misc/alpine-tools.func +++ b/misc/alpine-tools.func @@ -318,9 +318,9 @@ fetch_and_deploy_gh_release() { } # The command is the same for all cases other than tarball/source - get_url() { # $1 pattern + get_url() { printf '%s' "$json" | jq -r '.assets[].browser_download_url' | - awk -v p="$1" 'BEGIN{IGNORECASE=1} $0 ~ p {print; exit}' + awk -v p="$pattern" 'BEGIN{IGNORECASE=1} $0 ~ p {print; exit}' } case "$mode" in @@ -348,7 +348,7 @@ fetch_and_deploy_gh_release() { ;; binary) [ -n "$pattern" ] || pattern="*.apk" - url=$(get_url "$pattern") + url=$(get_url) [ -z "$url" ] && { msg_error "binary asset not found for pattern: $pattern" rm -rf "$tmpd" @@ -380,7 +380,7 @@ fetch_and_deploy_gh_release() { rm -rf "$tmpd" return 1 } - url=$(get_url "$pattern") + url=$(get_url) [ -z "$url" ] && { msg_error "asset not found for pattern: $pattern" rm -rf "$tmpd" @@ -434,7 +434,7 @@ fetch_and_deploy_gh_release() { rm -rf "$tmpd" return 1 } - url=$(get_url "$pattern") + url=$(get_url) [ -z "$url" ] && { msg_error "asset not found for pattern: $pattern" rm -rf "$tmpd" From f4495280cf8b2abb123f66eb1e36754c60197dfb Mon Sep 17 00:00:00 2001 From: justin Date: Tue, 27 Jan 2026 15:25:05 -0500 Subject: [PATCH 153/228] AWK output needs to be sanitized --- misc/alpine-tools.func | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/misc/alpine-tools.func b/misc/alpine-tools.func index 9aee5ca56..3ddc7fda8 100644 --- a/misc/alpine-tools.func +++ b/misc/alpine-tools.func @@ -320,7 +320,8 @@ fetch_and_deploy_gh_release() { # The command is the same for all cases other than tarball/source get_url() { printf '%s' "$json" | jq -r '.assets[].browser_download_url' | - awk -v p="$pattern" 'BEGIN{IGNORECASE=1} $0 ~ p {print; exit}' + awk -v p="$pattern" 'BEGIN{IGNORECASE=1} $0 ~ p {print; exit}' | + tr -d '[:cntrl:]' } case "$mode" in From 54bbdc180a22fdd6e43208debf422387ed7703c0 Mon Sep 17 00:00:00 2001 From: justin Date: Wed, 28 Jan 2026 11:00:41 -0500 Subject: [PATCH 154/228] Remove inline comment in misc/alpine-tools.func --- misc/alpine-tools.func | 1 - 1 file changed, 1 deletion(-) diff --git a/misc/alpine-tools.func b/misc/alpine-tools.func index 3ddc7fda8..6d7ed215a 100644 --- a/misc/alpine-tools.func +++ b/misc/alpine-tools.func @@ -317,7 +317,6 @@ fetch_and_deploy_gh_release() { return 1 } - # The command is the same for all cases other than tarball/source get_url() { printf '%s' "$json" | jq -r '.assets[].browser_download_url' | awk -v p="$pattern" 'BEGIN{IGNORECASE=1} $0 ~ p {print; exit}' | From 622fda4b321a1f93ff12793bb1d8d174299b5742 Mon Sep 17 00:00:00 2001 From: justin Date: Mon, 26 Jan 2026 14:32:47 -0500 Subject: [PATCH 155/228] Fix download_with_progress() content_length calc --- misc/alpine-tools.func | 16 ++++++++++++---- misc/tools.func | 9 +++++++-- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/misc/alpine-tools.func b/misc/alpine-tools.func index 6d7ed215a..a830c220c 100644 --- a/misc/alpine-tools.func +++ b/misc/alpine-tools.func @@ -196,11 +196,19 @@ ensure_usr_local_bin_persist() { download_with_progress() { # $1 url, $2 dest - local url="$1" out="$2" cl + local url="$1" out="$2" content_length need_tool curl pv || return 1 - cl=$(curl -fsSLI "$url" 2>/dev/null | awk 'tolower($0) ~ /^content-length:/ {print $2}' | tr -d '\r') - if [ -n "$cl" ]; then - curl -fsSL "$url" | pv -s "$cl" >"$out" || { + + content_length=$( + curl -fsSLI "$url" 2>/dev/null | + # May return multiple values on redirect. i.e., 0 and content-length + # Cast $2 to int by adding 0 to it + awk '(tolower($1) ~ /^content-length:/) && ($2 + 0 > 0) {print $2+0}' | + tr -cd '[:digit:]' || true + ) + + if [ -n "$content_length" ]; then + curl -fsSL "$url" | pv -s "$content_length" >"$out" || { msg_error "Download failed: $url" return 1 } diff --git a/misc/tools.func b/misc/tools.func index 70e8c3fa9..44484ae5b 100644 --- a/misc/tools.func +++ b/misc/tools.func @@ -1692,7 +1692,13 @@ function download_with_progress() { # Content-Length aus HTTP-Header holen local content_length - content_length=$(curl -fsSLI "$url" | awk '/Content-Length/ {print $2}' | tr -d '\r' || true) + content_length=$( + curl -fsSLI "$url" 2>/dev/null | + # May return multiple values on redirect. i.e., 0 and content_length + # Add 0 to $2 to cast it to int + awk '(tolower($1) ~ /^content-length:/) && ($2 + 0 > 0) {print $2+0}' | + tr -cd '[:digit:]' || true + ) if [[ -z "$content_length" ]]; then if ! curl -fL# -o "$output" "$url"; then @@ -6205,4 +6211,3 @@ function fetch_and_deploy_archive() { msg_ok "Successfully deployed archive to $directory" return 0 } - From 75898f498697d40e1ff4b90db3f9a3098758fd70 Mon Sep 17 00:00:00 2001 From: justin Date: Tue, 27 Jan 2026 11:30:26 -0500 Subject: [PATCH 156/228] Add tail -1 before tr to use only the last value --- misc/alpine-tools.func | 4 ++-- misc/tools.func | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/misc/alpine-tools.func b/misc/alpine-tools.func index a830c220c..035565194 100644 --- a/misc/alpine-tools.func +++ b/misc/alpine-tools.func @@ -201,10 +201,10 @@ download_with_progress() { content_length=$( curl -fsSLI "$url" 2>/dev/null | - # May return multiple values on redirect. i.e., 0 and content-length + # May return multiple values on redirect. i.e., 0 and actual content-length value # Cast $2 to int by adding 0 to it awk '(tolower($1) ~ /^content-length:/) && ($2 + 0 > 0) {print $2+0}' | - tr -cd '[:digit:]' || true + tail -1 | tr -cd '[:digit:]' || true ) if [ -n "$content_length" ]; then diff --git a/misc/tools.func b/misc/tools.func index 44484ae5b..18602dc85 100644 --- a/misc/tools.func +++ b/misc/tools.func @@ -1694,10 +1694,10 @@ function download_with_progress() { local content_length content_length=$( curl -fsSLI "$url" 2>/dev/null | - # May return multiple values on redirect. i.e., 0 and content_length + # May return multiple values on redirect. i.e., 0 and actual content-length value # Add 0 to $2 to cast it to int awk '(tolower($1) ~ /^content-length:/) && ($2 + 0 > 0) {print $2+0}' | - tr -cd '[:digit:]' || true + tail -1 | tr -cd '[:digit:]' || true ) if [[ -z "$content_length" ]]; then From f2a847ac81ffa5f5a670d30b055d4d4d47f79ed0 Mon Sep 17 00:00:00 2001 From: Justin Date: Wed, 28 Jan 2026 15:48:21 +0000 Subject: [PATCH 157/228] Remove inline comment in misc/alpine-tools.func Co-authored-by: Michel Roegl-Brunner <73236783+michelroegl-brunner@users.noreply.github.com> --- misc/alpine-tools.func | 2 -- 1 file changed, 2 deletions(-) diff --git a/misc/alpine-tools.func b/misc/alpine-tools.func index 035565194..9386f737a 100644 --- a/misc/alpine-tools.func +++ b/misc/alpine-tools.func @@ -201,8 +201,6 @@ download_with_progress() { content_length=$( curl -fsSLI "$url" 2>/dev/null | - # May return multiple values on redirect. i.e., 0 and actual content-length value - # Cast $2 to int by adding 0 to it awk '(tolower($1) ~ /^content-length:/) && ($2 + 0 > 0) {print $2+0}' | tail -1 | tr -cd '[:digit:]' || true ) From 62f68959c74b193bc6ab497262608f08240ed652 Mon Sep 17 00:00:00 2001 From: Justin Date: Wed, 28 Jan 2026 15:48:34 +0000 Subject: [PATCH 158/228] Remove inline comment in misc/tools.func Co-authored-by: Michel Roegl-Brunner <73236783+michelroegl-brunner@users.noreply.github.com> --- misc/tools.func | 2 -- 1 file changed, 2 deletions(-) diff --git a/misc/tools.func b/misc/tools.func index 18602dc85..1096b4a01 100644 --- a/misc/tools.func +++ b/misc/tools.func @@ -1694,8 +1694,6 @@ function download_with_progress() { local content_length content_length=$( curl -fsSLI "$url" 2>/dev/null | - # May return multiple values on redirect. i.e., 0 and actual content-length value - # Add 0 to $2 to cast it to int awk '(tolower($1) ~ /^content-length:/) && ($2 + 0 > 0) {print $2+0}' | tail -1 | tr -cd '[:digit:]' || true ) From 210b6080cd9ce40392baa4652c4a1569cc3cd1bc Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Thu, 29 Jan 2026 10:36:57 +0100 Subject: [PATCH 159/228] Update Anytype to use external MongoDB and Redis Stack Switches Anytype deployment to use external MongoDB and Redis Stack instead of embedded databases. Updates default resources to 4GB RAM, 16GB disk, Ubuntu 24.04, and configures MongoDB replica set and Redis Stack installation. Adjusts service dependencies and environment variables accordingly. --- ct/anytype.sh | 8 +++---- frontend/public/json/anytype.json | 10 ++++----- install/anytype-install.sh | 37 +++++++++++++++++++++++++------ 3 files changed, 39 insertions(+), 16 deletions(-) diff --git a/ct/anytype.sh b/ct/anytype.sh index b2af170e8..7525e1eaf 100644 --- a/ct/anytype.sh +++ b/ct/anytype.sh @@ -8,10 +8,10 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV APP="Anytype" var_tags="${var_tags:-notes;productivity;sync}" var_cpu="${var_cpu:-2}" -var_ram="${var_ram:-2048}" -var_disk="${var_disk:-10}" -var_os="${var_os:-debian}" -var_version="${var_version:-13}" +var_ram="${var_ram:-4096}" +var_disk="${var_disk:-16}" +var_os="${var_os:-ubuntu}" +var_version="${var_version:-24.04}" var_unprivileged="${var_unprivileged:-1}" header_info "$APP" diff --git a/frontend/public/json/anytype.json b/frontend/public/json/anytype.json index 88c1abe61..a89835cc9 100644 --- a/frontend/public/json/anytype.json +++ b/frontend/public/json/anytype.json @@ -13,17 +13,17 @@ "website": "https://anytype.io/", "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/anytype.webp", "config_path": "/opt/anytype/.env", - "description": "Anytype is a local-first, privacy-focused alternative to Notion. This script deploys the any-sync-bundle which provides a self-hosted sync server for Anytype clients with embedded MongoDB and Redis.", + "description": "Anytype is a local-first, privacy-focused alternative to Notion. This script deploys the any-sync-bundle which provides a self-hosted sync server for Anytype clients with external MongoDB and Redis Stack.", "install_methods": [ { "type": "default", "script": "ct/anytype.sh", "resources": { "cpu": 2, - "ram": 2048, - "hdd": 10, - "os": "Debian", - "version": "13" + "ram": 4096, + "hdd": 16, + "os": "Ubuntu", + "version": "24.04" } } ], diff --git a/install/anytype-install.sh b/install/anytype-install.sh index 458e70d3b..c6dc1e96b 100644 --- a/install/anytype-install.sh +++ b/install/anytype-install.sh @@ -13,21 +13,43 @@ setting_up_container network_check update_os -msg_info "Installing Dependencies" -$STD apt install -y ca-certificates -msg_ok "Installed Dependencies" +setup_mongodb + +msg_info "Configuring MongoDB Replica Set" +cat <>/etc/mongod.conf + +replication: + replSetName: "rs0" +EOF +systemctl restart mongod +sleep 3 +$STD mongosh --eval 'rs.initiate({_id: "rs0", members: [{_id: 0, host: "127.0.0.1:27017"}]})' +msg_ok "Configured MongoDB Replica Set" + +msg_info "Installing Redis Stack" +setup_deb822_repo \ + "redis-stack" \ + "https://packages.redis.io/gpg" \ + "https://packages.redis.io/deb" \ + "jammy" \ + "main" +$STD apt-get install -y \ + redis-stack-server +systemctl enable -q --now redis-stack-server +msg_ok "Installed Redis Stack" fetch_and_deploy_gh_release "anytype" "grishy/any-sync-bundle" "prebuild" "latest" "/opt/anytype" "any-sync-bundle_*_linux_amd64.tar.gz" chmod +x /opt/anytype/any-sync-bundle msg_info "Configuring Anytype" -mkdir -p /opt/anytype/data - +mkdir -p /opt/anytype/data/storage cat </opt/anytype/.env ANY_SYNC_BUNDLE_CONFIG=/opt/anytype/data/bundle-config.yml ANY_SYNC_BUNDLE_CLIENT_CONFIG=/opt/anytype/data/client-config.yml ANY_SYNC_BUNDLE_INIT_STORAGE=/opt/anytype/data/storage/ ANY_SYNC_BUNDLE_INIT_EXTERNAL_ADDRS=${LOCAL_IP} +ANY_SYNC_BUNDLE_INIT_MONGO_URI=mongodb://127.0.0.1:27017/ +ANY_SYNC_BUNDLE_INIT_REDIS_URI=redis://127.0.0.1:6379/ ANY_SYNC_BUNDLE_LOG_LEVEL=info EOF msg_ok "Configured Anytype" @@ -36,15 +58,16 @@ msg_info "Creating Service" cat </etc/systemd/system/anytype.service [Unit] Description=Anytype Sync Server (any-sync-bundle) -After=network-online.target +After=network-online.target mongod.service redis-stack-server.service Wants=network-online.target +Requires=mongod.service redis-stack-server.service [Service] Type=simple User=root WorkingDirectory=/opt/anytype EnvironmentFile=/opt/anytype/.env -ExecStart=/opt/anytype/any-sync-bundle start-all-in-one +ExecStart=/opt/anytype/any-sync-bundle start-bundle Restart=on-failure RestartSec=10 From 9d77145a7e02b9f4c9fe852ec12c4baca9fa4623 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Thu, 29 Jan 2026 11:22:49 +0100 Subject: [PATCH 160/228] Refactor OpenCloud installer to simplify Collabora handling Reworks opencloud-install.sh to remove localhost mode, always require FQDNs, and streamline Collabora and WOPI configuration. The script now always installs Collabora and WOPI, sets up environment variables accordingly, and simplifies service management. The previous localhost/IP-based mode is preserved in opencloud-install.sh.bak for reference. --- install/opencloud-install.sh | 139 ++++++---------- ...stall copy.sh => opencloud-install.sh.bak} | 155 ++++++++++++------ 2 files changed, 155 insertions(+), 139 deletions(-) rename install/{opencloud-install copy.sh => opencloud-install.sh.bak} (56%) diff --git a/install/opencloud-install.sh b/install/opencloud-install.sh index 943578598..7d4c9a060 100644 --- a/install/opencloud-install.sh +++ b/install/opencloud-install.sh @@ -13,48 +13,48 @@ setting_up_container network_check update_os -echo -e "${TAB3}${INFO}${YW} Leave empty to use IP-based localhost mode (no Collabora)${CL}" read -r -p "${TAB3}Enter the hostname of your OpenCloud server (eg cloud.domain.tld): " oc_host - -if [[ -z "$oc_host" ]]; then - # Localhost/IP mode - no TLS, no Collabora - OC_HOST="${LOCAL_IP}" - LOCALHOST_MODE=true - msg_info "Using localhost mode with IP: ${LOCAL_IP}" - msg_warn "Collabora requires TLS and will be skipped in localhost mode" -else +if [[ "$oc_host" ]]; then OC_HOST="$oc_host" - LOCALHOST_MODE=false - read -r -p "${TAB3}Enter the hostname of your Collabora server [collabora.${OC_HOST#*.}]: " collabora_host - COLLABORA_HOST="${collabora_host:-collabora.${OC_HOST#*.}}" - read -r -p "${TAB3}Enter the hostname of your WOPI server [wopiserver.${OC_HOST#*.}]: " wopi_host - WOPI_HOST="${wopi_host:-wopiserver.${OC_HOST#*.}}" +fi +read -r -p "${TAB3}Enter the hostname of your Collabora server (eg collabora.domain.tld): " collabora_host +if [[ "$collabora_host" ]]; then + COLLABORA_HOST="$collabora_host" +fi +read -r -p "${TAB3}Enter the hostname of your WOPI server (eg wopiserver.domain.tld): " wopi_host +if [[ "$wopi_host" ]]; then + WOPI_HOST="$wopi_host" fi -# Collabora Online - only install if not in localhost mode (requires TLS) -if [[ "$LOCALHOST_MODE" != true ]]; then - msg_info "Installing Collabora Online" - curl -fsSL https://collaboraoffice.com/downloads/gpg/collaboraonline-release-keyring.gpg -o /etc/apt/keyrings/collaboraonline-release-keyring.gpg - cat </etc/apt/sources.list.d/collaboraonline.sources +# Collabora online - this is broken because it adds the Component and apt doesn't like that +# setup_deb822_repo \ +# "collaboraonline" \ +# "https://collaboraoffice.com/downloads/gpg/collaboraonline-release-keyring.gpg" \ +# "https://www.collaboraoffice.com/repos/CollaboraOnline/CODE-deb/Release" \ +# "./" \ +# "main" + +msg_info "Installing Collabora Online" +curl -fsSL https://collaboraoffice.com/downloads/gpg/collaboraonline-release-keyring.gpg -o /etc/apt/keyrings/collaboraonline-release-keyring.gpg +cat </etc/apt/sources.list.d/colloboraonline.sources Types: deb URIs: https://www.collaboraoffice.com/repos/CollaboraOnline/CODE-deb Suites: ./ Signed-By: /etc/apt/keyrings/collaboraonline-release-keyring.gpg EOF - $STD apt-get update - $STD apt-get install -y coolwsd code-brand - systemctl stop coolwsd - mkdir -p /etc/systemd/system/coolwsd.service.d - cat </etc/systemd/system/coolwsd.service.d/override.conf +$STD apt-get update +$STD apt-get install -y coolwsd code-brand +systemctl stop coolwsd +mkdir -p /etc/systemd/system/coolwsd.service.d +cat </etc/systemd/system/coolwsd.service.d/override.conf [Unit] Before=opencloud-wopi.service EOF - systemctl daemon-reload - COOLPASS="$(openssl rand -base64 36)" - $STD runuser -u cool -- coolconfig set-admin-password --user=admin --password="$COOLPASS" - echo "$COOLPASS" >~/.coolpass - msg_ok "Installed Collabora Online" -fi +systemctl daemon-reload +COOLPASS="$(openssl rand -base64 36)" +$STD sudo -u cool coolconfig set-admin-password --user=admin --password="$COOLPASS" +echo "$COOLPASS" >~/.coolpass +msg_ok "Installed Collabora Online" # OpenCloud fetch_and_deploy_gh_release "opencloud" "opencloud-eu/opencloud" "singlefile" "v5.0.1" "/usr/bin" "opencloud-*-linux-amd64" @@ -68,17 +68,9 @@ mkdir -p "$DATA_DIR" "$CONFIG_DIR"/assets/apps curl -fsSL https://raw.githubusercontent.com/opencloud-eu/opencloud-compose/refs/heads/main/config/opencloud/csp.yaml -o "$CONFIG_DIR"/csp.yaml curl -fsSL https://raw.githubusercontent.com/opencloud-eu/opencloud-compose/refs/heads/main/config/opencloud/proxy.yaml -o "$CONFIG_DIR"/proxy.yaml.bak -if [[ "$LOCALHOST_MODE" == true ]]; then - OC_URL="http://${OC_HOST}:9200" - OC_INSECURE="true" -else - OC_URL="https://${OC_HOST}" - OC_INSECURE="false" -fi - cat <"$ENV_FILE" -OC_URL=${OC_URL} -OC_INSECURE=${OC_INSECURE} +OC_URL=https://${OC_HOST} +OC_INSECURE=false IDM_CREATE_DEMO_USERS=false OC_LOG_LEVEL=warning OC_CONFIG_DIR=${CONFIG_DIR} @@ -101,15 +93,15 @@ GRAPH_INCLUDE_OCM_SHAREES=true PROXY_TLS=false PROXY_CSP_CONFIG_FILE_LOCATION=${CONFIG_DIR}/csp.yaml -## Collaboration - requires VALID TLS (disabled in localhost mode) -# COLLABORA_DOMAIN= -# COLLABORATION_APP_NAME="CollaboraOnline" -# COLLABORATION_APP_PRODUCT="Collabora" -# COLLABORATION_APP_ADDR= -# COLLABORATION_APP_INSECURE=false -# COLLABORATION_HTTP_ADDR=0.0.0.0:9300 -# COLLABORATION_WOPI_SRC= -# COLLABORATION_JWT_SECRET= +## Collaboration - requires VALID TLS +COLLABORA_DOMAIN=${COLLABORA_HOST} +COLLABORATION_APP_NAME="CollaboraOnline" +COLLABORATION_APP_PRODUCT="Collabora" +COLLABORATION_APP_ADDR=https://${COLLABORA_HOST} +COLLABORATION_APP_INSECURE=false +COLLABORATION_HTTP_ADDR=0.0.0.0:9300 +COLLABORATION_WOPI_SRC=https://${WOPI_HOST} +COLLABORATION_JWT_SECRET= ## Notifications - Email settings # NOTIFICATIONS_SMTP_HOST= @@ -174,8 +166,7 @@ Restart=always WantedBy=multi-user.target EOF -if [[ "$LOCALHOST_MODE" != true ]]; then - cat </etc/systemd/system/opencloud-wopi.service +cat </etc/systemd/system/opencloud-wopi.service [Unit] Description=OpenCloud WOPI Server Wants=coolwsd.service @@ -197,49 +188,21 @@ TimeoutStopSec=10 WantedBy=multi-user.target EOF - # Append active Collabora config to env file - cat <>"$ENV_FILE" - -## Collaboration - active configuration -COLLABORA_DOMAIN=${COLLABORA_HOST} -COLLABORATION_APP_NAME="CollaboraOnline" -COLLABORATION_APP_PRODUCT="Collabora" -COLLABORATION_APP_ADDR=https://${COLLABORA_HOST} -COLLABORATION_APP_INSECURE=false -COLLABORATION_HTTP_ADDR=0.0.0.0:9300 -COLLABORATION_WOPI_SRC=https://${WOPI_HOST} -COLLABORATION_JWT_SECRET= -EOF - - $STD runuser -u cool -- coolconfig set ssl.enable false - $STD runuser -u cool -- coolconfig set ssl.termination true - $STD runuser -u cool -- coolconfig set ssl.ssl_verification true - sed -i "s|CSP2\"/>|CSP2\">frame-ancestors https://${OC_HOST}|" /etc/coolwsd/coolwsd.xml -fi - +$STD sudo -u cool coolconfig set ssl.enable false +$STD sudo -u cool coolconfig set ssl.termination true +$STD sudo -u cool coolconfig set ssl.ssl_verification true +sed -i "s|CSP2\"/>|CSP2\">frame-ancestors https://${OC_HOST}|" /etc/coolwsd/coolwsd.xml useradd -r -M -s /usr/sbin/nologin opencloud chown -R opencloud:opencloud "$CONFIG_DIR" "$DATA_DIR" - -if [[ "$LOCALHOST_MODE" == true ]]; then - $STD runuser -u opencloud -- opencloud init --config-path "$CONFIG_DIR" --insecure yes -else - $STD runuser -u opencloud -- opencloud init --config-path "$CONFIG_DIR" --insecure no -fi - +sudo -u opencloud opencloud init --config-path "$CONFIG_DIR" --insecure no OPENCLOUD_SECRET="$(sed -n '/jwt/p' "$CONFIG_DIR"/opencloud.yaml | awk '{print $2}')" -if [[ "$LOCALHOST_MODE" != true ]]; then - sed -i "s/COLLABORATION_JWT_SECRET=/&${OPENCLOUD_SECRET//&/\\&}/" "$ENV_FILE" -fi +sed -i "s/JWT_SECRET=/&${OPENCLOUD_SECRET//&/\\&}/" "$ENV_FILE" msg_ok "Configured OpenCloud" msg_info "Starting services" -if [[ "$LOCALHOST_MODE" == true ]]; then - systemctl enable -q --now opencloud -else - systemctl enable -q --now coolwsd opencloud - sleep 5 - systemctl enable -q --now opencloud-wopi -fi +systemctl enable -q --now coolwsd opencloud +sleep 5 +systemctl enable -q --now opencloud-wopi msg_ok "Started services" motd_ssh diff --git a/install/opencloud-install copy.sh b/install/opencloud-install.sh.bak similarity index 56% rename from install/opencloud-install copy.sh rename to install/opencloud-install.sh.bak index 7d4c9a060..0ed6cc619 100644 --- a/install/opencloud-install copy.sh +++ b/install/opencloud-install.sh.bak @@ -13,48 +13,48 @@ setting_up_container network_check update_os +echo -e "${TAB3}${INFO}${YW} Leave empty to use IP-based localhost mode (no Collabora)${CL}" read -r -p "${TAB3}Enter the hostname of your OpenCloud server (eg cloud.domain.tld): " oc_host -if [[ "$oc_host" ]]; then + +if [[ -z "$oc_host" ]]; then + # Localhost/IP mode - no TLS, no Collabora + OC_HOST="${LOCAL_IP}" + LOCALHOST_MODE=true + msg_info "Using localhost mode with IP: ${LOCAL_IP}" + msg_warn "Collabora requires TLS and will be skipped in localhost mode" +else OC_HOST="$oc_host" -fi -read -r -p "${TAB3}Enter the hostname of your Collabora server (eg collabora.domain.tld): " collabora_host -if [[ "$collabora_host" ]]; then - COLLABORA_HOST="$collabora_host" -fi -read -r -p "${TAB3}Enter the hostname of your WOPI server (eg wopiserver.domain.tld): " wopi_host -if [[ "$wopi_host" ]]; then - WOPI_HOST="$wopi_host" + LOCALHOST_MODE=false + read -r -p "${TAB3}Enter the hostname of your Collabora server [collabora.${OC_HOST#*.}]: " collabora_host + COLLABORA_HOST="${collabora_host:-collabora.${OC_HOST#*.}}" + read -r -p "${TAB3}Enter the hostname of your WOPI server [wopiserver.${OC_HOST#*.}]: " wopi_host + WOPI_HOST="${wopi_host:-wopiserver.${OC_HOST#*.}}" fi -# Collabora online - this is broken because it adds the Component and apt doesn't like that -# setup_deb822_repo \ -# "collaboraonline" \ -# "https://collaboraoffice.com/downloads/gpg/collaboraonline-release-keyring.gpg" \ -# "https://www.collaboraoffice.com/repos/CollaboraOnline/CODE-deb/Release" \ -# "./" \ -# "main" - -msg_info "Installing Collabora Online" -curl -fsSL https://collaboraoffice.com/downloads/gpg/collaboraonline-release-keyring.gpg -o /etc/apt/keyrings/collaboraonline-release-keyring.gpg -cat </etc/apt/sources.list.d/colloboraonline.sources +# Collabora Online - only install if not in localhost mode (requires TLS) +if [[ "$LOCALHOST_MODE" != true ]]; then + msg_info "Installing Collabora Online" + curl -fsSL https://collaboraoffice.com/downloads/gpg/collaboraonline-release-keyring.gpg -o /etc/apt/keyrings/collaboraonline-release-keyring.gpg + cat </etc/apt/sources.list.d/collaboraonline.sources Types: deb URIs: https://www.collaboraoffice.com/repos/CollaboraOnline/CODE-deb Suites: ./ Signed-By: /etc/apt/keyrings/collaboraonline-release-keyring.gpg EOF -$STD apt-get update -$STD apt-get install -y coolwsd code-brand -systemctl stop coolwsd -mkdir -p /etc/systemd/system/coolwsd.service.d -cat </etc/systemd/system/coolwsd.service.d/override.conf + $STD apt-get update + $STD apt-get install -y coolwsd code-brand + systemctl stop coolwsd + mkdir -p /etc/systemd/system/coolwsd.service.d + cat </etc/systemd/system/coolwsd.service.d/override.conf [Unit] Before=opencloud-wopi.service EOF -systemctl daemon-reload -COOLPASS="$(openssl rand -base64 36)" -$STD sudo -u cool coolconfig set-admin-password --user=admin --password="$COOLPASS" -echo "$COOLPASS" >~/.coolpass -msg_ok "Installed Collabora Online" + systemctl daemon-reload + COOLPASS="$(openssl rand -base64 36)" + $STD runuser -u cool -- coolconfig set-admin-password --user=admin --password="$COOLPASS" + echo "$COOLPASS" >~/.coolpass + msg_ok "Installed Collabora Online" +fi # OpenCloud fetch_and_deploy_gh_release "opencloud" "opencloud-eu/opencloud" "singlefile" "v5.0.1" "/usr/bin" "opencloud-*-linux-amd64" @@ -68,9 +68,33 @@ mkdir -p "$DATA_DIR" "$CONFIG_DIR"/assets/apps curl -fsSL https://raw.githubusercontent.com/opencloud-eu/opencloud-compose/refs/heads/main/config/opencloud/csp.yaml -o "$CONFIG_DIR"/csp.yaml curl -fsSL https://raw.githubusercontent.com/opencloud-eu/opencloud-compose/refs/heads/main/config/opencloud/proxy.yaml -o "$CONFIG_DIR"/proxy.yaml.bak +if [[ "$LOCALHOST_MODE" == true ]]; then + OC_URL="http://${OC_HOST}:9200" + OC_INSECURE="true" +else + OC_URL="https://${OC_HOST}" + OC_INSECURE="false" +fi + +# Create web config directory and config.json +mkdir -p "$CONFIG_DIR"/web +cat <"$CONFIG_DIR"/web/config.json +{ + "server": "${OC_URL}", + "theme": "https://raw.githubusercontent.com/opencloud-eu/opencloud-compose/refs/heads/main/config/opencloud/web/themes/opencloud/theme.json", + "openIdConnect": { + "metadata_url": "${OC_URL}/.well-known/openid-configuration", + "authority": "${OC_URL}", + "client_id": "web", + "response_type": "code", + "scope": "openid profile email" + } +} +EOF + cat <"$ENV_FILE" -OC_URL=https://${OC_HOST} -OC_INSECURE=false +OC_URL=${OC_URL} +OC_INSECURE=${OC_INSECURE} IDM_CREATE_DEMO_USERS=false OC_LOG_LEVEL=warning OC_CONFIG_DIR=${CONFIG_DIR} @@ -93,15 +117,15 @@ GRAPH_INCLUDE_OCM_SHAREES=true PROXY_TLS=false PROXY_CSP_CONFIG_FILE_LOCATION=${CONFIG_DIR}/csp.yaml -## Collaboration - requires VALID TLS -COLLABORA_DOMAIN=${COLLABORA_HOST} -COLLABORATION_APP_NAME="CollaboraOnline" -COLLABORATION_APP_PRODUCT="Collabora" -COLLABORATION_APP_ADDR=https://${COLLABORA_HOST} -COLLABORATION_APP_INSECURE=false -COLLABORATION_HTTP_ADDR=0.0.0.0:9300 -COLLABORATION_WOPI_SRC=https://${WOPI_HOST} -COLLABORATION_JWT_SECRET= +## Collaboration - requires VALID TLS (disabled in localhost mode) +# COLLABORA_DOMAIN= +# COLLABORATION_APP_NAME="CollaboraOnline" +# COLLABORATION_APP_PRODUCT="Collabora" +# COLLABORATION_APP_ADDR= +# COLLABORATION_APP_INSECURE=false +# COLLABORATION_HTTP_ADDR=0.0.0.0:9300 +# COLLABORATION_WOPI_SRC= +# COLLABORATION_JWT_SECRET= ## Notifications - Email settings # NOTIFICATIONS_SMTP_HOST= @@ -166,7 +190,8 @@ Restart=always WantedBy=multi-user.target EOF -cat </etc/systemd/system/opencloud-wopi.service +if [[ "$LOCALHOST_MODE" != true ]]; then + cat </etc/systemd/system/opencloud-wopi.service [Unit] Description=OpenCloud WOPI Server Wants=coolwsd.service @@ -188,21 +213,49 @@ TimeoutStopSec=10 WantedBy=multi-user.target EOF -$STD sudo -u cool coolconfig set ssl.enable false -$STD sudo -u cool coolconfig set ssl.termination true -$STD sudo -u cool coolconfig set ssl.ssl_verification true -sed -i "s|CSP2\"/>|CSP2\">frame-ancestors https://${OC_HOST}|" /etc/coolwsd/coolwsd.xml + # Append active Collabora config to env file + cat <>"$ENV_FILE" + +## Collaboration - active configuration +COLLABORA_DOMAIN=${COLLABORA_HOST} +COLLABORATION_APP_NAME="CollaboraOnline" +COLLABORATION_APP_PRODUCT="Collabora" +COLLABORATION_APP_ADDR=https://${COLLABORA_HOST} +COLLABORATION_APP_INSECURE=false +COLLABORATION_HTTP_ADDR=0.0.0.0:9300 +COLLABORATION_WOPI_SRC=https://${WOPI_HOST} +COLLABORATION_JWT_SECRET= +EOF + + $STD runuser -u cool -- coolconfig set ssl.enable false + $STD runuser -u cool -- coolconfig set ssl.termination true + $STD runuser -u cool -- coolconfig set ssl.ssl_verification true + sed -i "s|CSP2\"/>|CSP2\">frame-ancestors https://${OC_HOST}|" /etc/coolwsd/coolwsd.xml +fi + useradd -r -M -s /usr/sbin/nologin opencloud chown -R opencloud:opencloud "$CONFIG_DIR" "$DATA_DIR" -sudo -u opencloud opencloud init --config-path "$CONFIG_DIR" --insecure no + +if [[ "$LOCALHOST_MODE" == true ]]; then + $STD runuser -u opencloud -- opencloud init --config-path "$CONFIG_DIR" --insecure yes +else + $STD runuser -u opencloud -- opencloud init --config-path "$CONFIG_DIR" --insecure no +fi + OPENCLOUD_SECRET="$(sed -n '/jwt/p' "$CONFIG_DIR"/opencloud.yaml | awk '{print $2}')" -sed -i "s/JWT_SECRET=/&${OPENCLOUD_SECRET//&/\\&}/" "$ENV_FILE" +if [[ "$LOCALHOST_MODE" != true ]]; then + sed -i "s/COLLABORATION_JWT_SECRET=/&${OPENCLOUD_SECRET//&/\\&}/" "$ENV_FILE" +fi msg_ok "Configured OpenCloud" msg_info "Starting services" -systemctl enable -q --now coolwsd opencloud -sleep 5 -systemctl enable -q --now opencloud-wopi +if [[ "$LOCALHOST_MODE" == true ]]; then + systemctl enable -q --now opencloud +else + systemctl enable -q --now coolwsd opencloud + sleep 5 + systemctl enable -q --now opencloud-wopi +fi msg_ok "Started services" motd_ssh From b4af893e66a47a6a82b9d3186360a16d54651e33 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Thu, 29 Jan 2026 12:25:01 +0100 Subject: [PATCH 161/228] add features to valkey --- install/alpine-valkey-install.sh | 58 ++++++++++++++++++++++++++++++-- 1 file changed, 55 insertions(+), 3 deletions(-) diff --git a/install/alpine-valkey-install.sh b/install/alpine-valkey-install.sh index 66501b3eb..5e09e7d1c 100644 --- a/install/alpine-valkey-install.sh +++ b/install/alpine-valkey-install.sh @@ -14,11 +14,63 @@ network_check update_os msg_info "Installing Valkey" -$STD apk add valkey valkey-openrc valkey-cli -$STD sed -i 's/^bind .*/bind 0.0.0.0/' /etc/valkey/valkey.conf +$STD apk add valkey valkey-openrc valkey-cli openssl +sed -i 's/^bind .*/bind 0.0.0.0/' /etc/valkey/valkey.conf + +PASS="$(openssl rand -base64 48 | tr -dc 'a-zA-Z0-9' | head -c32)" +echo "requirepass $PASS" >>/etc/valkey/valkey.conf +echo "$PASS" >~/valkey.creds +chmod 600 ~/valkey.creds + +MEMTOTAL_MB=$(free -m | grep ^Mem: | awk '{print $2}') +MAXMEMORY_MB=$((MEMTOTAL_MB * 75 / 100)) + +{ + echo "" + echo "# Memory-optimized settings for small-scale deployments" + echo "maxmemory ${MAXMEMORY_MB}mb" + echo "maxmemory-policy allkeys-lru" + echo "maxmemory-samples 10" +} >>/etc/valkey/valkey.conf +msg_ok "Installed Valkey" + +echo +read -r -p "${TAB3}Enable TLS for Valkey (Sentinel mode not supported)? [y/N]: " prompt +if [[ ${prompt,,} =~ ^(y|yes)$ ]]; then + read -r -p "${TAB3}Use TLS-only mode (disable TCP port 6379)? [y/N]: " tls_only + msg_info "Configuring TLS for Valkey..." + + create_self_signed_cert "Valkey" + TLS_DIR="/etc/ssl/valkey" + chown valkey:valkey "$TLS_DIR/valkey.crt" "$TLS_DIR/valkey.key" + + if [[ ${tls_only,,} =~ ^(y|yes)$ ]]; then + { + echo "" + echo "# TLS configuration generated by Proxmox VE Valkey helper-script" + echo "port 0" + echo "tls-port 6379" + echo "tls-cert-file $TLS_DIR/valkey.crt" + echo "tls-key-file $TLS_DIR/valkey.key" + echo "tls-auth-clients no" + } >>/etc/valkey/valkey.conf + msg_ok "Enabled TLS-only mode on port 6379" + else + { + echo "" + echo "# TLS configuration generated by Proxmox VE Valkey helper-script" + echo "tls-port 6380" + echo "tls-cert-file $TLS_DIR/valkey.crt" + echo "tls-key-file $TLS_DIR/valkey.key" + echo "tls-auth-clients no" + } >>/etc/valkey/valkey.conf + msg_ok "Enabled TLS on port 6380 and TCP on 6379" + fi +fi + $STD rc-update add valkey default $STD rc-service valkey start -msg_ok "Installed Valkey" motd_ssh customize +cleanup_lxc From a2bbf6c75f69e3f4b3fa0ee8a43fe69da201b2eb Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Thu, 29 Jan 2026 12:26:20 +0100 Subject: [PATCH 162/228] ssl --- install/alpine-valkey-install.sh | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/install/alpine-valkey-install.sh b/install/alpine-valkey-install.sh index 5e09e7d1c..26571cb72 100644 --- a/install/alpine-valkey-install.sh +++ b/install/alpine-valkey-install.sh @@ -40,8 +40,15 @@ if [[ ${prompt,,} =~ ^(y|yes)$ ]]; then read -r -p "${TAB3}Use TLS-only mode (disable TCP port 6379)? [y/N]: " tls_only msg_info "Configuring TLS for Valkey..." - create_self_signed_cert "Valkey" TLS_DIR="/etc/ssl/valkey" + mkdir -p "$TLS_DIR" + $STD openssl req -new -newkey rsa:2048 -days 365 -nodes -x509 \ + -subj "/CN=Valkey" \ + -addext "subjectAltName=DNS:Valkey" \ + -keyout "$TLS_DIR/valkey.key" \ + -out "$TLS_DIR/valkey.crt" + chmod 600 "$TLS_DIR/valkey.key" + chmod 644 "$TLS_DIR/valkey.crt" chown valkey:valkey "$TLS_DIR/valkey.crt" "$TLS_DIR/valkey.key" if [[ ${tls_only,,} =~ ^(y|yes)$ ]]; then From 8d649bbb8c7e7e52f7af9884ae7100b9fed9255e Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Thu, 29 Jan 2026 12:43:51 +0100 Subject: [PATCH 163/228] clear valkey --- install/alpine-valkey-install.sh | 46 +++----------------------------- 1 file changed, 4 insertions(+), 42 deletions(-) diff --git a/install/alpine-valkey-install.sh b/install/alpine-valkey-install.sh index 26571cb72..ea2d17451 100644 --- a/install/alpine-valkey-install.sh +++ b/install/alpine-valkey-install.sh @@ -14,10 +14,10 @@ network_check update_os msg_info "Installing Valkey" -$STD apk add valkey valkey-openrc valkey-cli openssl +$STD apk add valkey valkey-openrc valkey-cli sed -i 's/^bind .*/bind 0.0.0.0/' /etc/valkey/valkey.conf -PASS="$(openssl rand -base64 48 | tr -dc 'a-zA-Z0-9' | head -c32)" +PASS="$(tr -dc 'a-zA-Z0-9' >/etc/valkey/valkey.conf echo "$PASS" >~/valkey.creds chmod 600 ~/valkey.creds @@ -34,46 +34,8 @@ MAXMEMORY_MB=$((MEMTOTAL_MB * 75 / 100)) } >>/etc/valkey/valkey.conf msg_ok "Installed Valkey" -echo -read -r -p "${TAB3}Enable TLS for Valkey (Sentinel mode not supported)? [y/N]: " prompt -if [[ ${prompt,,} =~ ^(y|yes)$ ]]; then - read -r -p "${TAB3}Use TLS-only mode (disable TCP port 6379)? [y/N]: " tls_only - msg_info "Configuring TLS for Valkey..." - - TLS_DIR="/etc/ssl/valkey" - mkdir -p "$TLS_DIR" - $STD openssl req -new -newkey rsa:2048 -days 365 -nodes -x509 \ - -subj "/CN=Valkey" \ - -addext "subjectAltName=DNS:Valkey" \ - -keyout "$TLS_DIR/valkey.key" \ - -out "$TLS_DIR/valkey.crt" - chmod 600 "$TLS_DIR/valkey.key" - chmod 644 "$TLS_DIR/valkey.crt" - chown valkey:valkey "$TLS_DIR/valkey.crt" "$TLS_DIR/valkey.key" - - if [[ ${tls_only,,} =~ ^(y|yes)$ ]]; then - { - echo "" - echo "# TLS configuration generated by Proxmox VE Valkey helper-script" - echo "port 0" - echo "tls-port 6379" - echo "tls-cert-file $TLS_DIR/valkey.crt" - echo "tls-key-file $TLS_DIR/valkey.key" - echo "tls-auth-clients no" - } >>/etc/valkey/valkey.conf - msg_ok "Enabled TLS-only mode on port 6379" - else - { - echo "" - echo "# TLS configuration generated by Proxmox VE Valkey helper-script" - echo "tls-port 6380" - echo "tls-cert-file $TLS_DIR/valkey.crt" - echo "tls-key-file $TLS_DIR/valkey.key" - echo "tls-auth-clients no" - } >>/etc/valkey/valkey.conf - msg_ok "Enabled TLS on port 6380 and TCP on 6379" - fi -fi +# Note: Alpine's valkey package is compiled without TLS support +# For TLS, use the Debian-based valkey script instead $STD rc-update add valkey default $STD rc-service valkey start From bd5dd98b86df2da8f87f249e0c47748dfb9ba001 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Thu, 29 Jan 2026 12:50:15 +0100 Subject: [PATCH 164/228] Update alpine-valkey-install.sh --- install/alpine-valkey-install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install/alpine-valkey-install.sh b/install/alpine-valkey-install.sh index ea2d17451..5beda4860 100644 --- a/install/alpine-valkey-install.sh +++ b/install/alpine-valkey-install.sh @@ -17,7 +17,7 @@ msg_info "Installing Valkey" $STD apk add valkey valkey-openrc valkey-cli sed -i 's/^bind .*/bind 0.0.0.0/' /etc/valkey/valkey.conf -PASS="$(tr -dc 'a-zA-Z0-9' >/etc/valkey/valkey.conf echo "$PASS" >~/valkey.creds chmod 600 ~/valkey.creds From 669dfa52cf8bbc35faa46bbfda0065eddaca4a97 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Thu, 29 Jan 2026 12:53:19 +0100 Subject: [PATCH 165/228] remove valkey --- install/alpine-valkey-install.sh | 45 -------------------------------- 1 file changed, 45 deletions(-) delete mode 100644 install/alpine-valkey-install.sh diff --git a/install/alpine-valkey-install.sh b/install/alpine-valkey-install.sh deleted file mode 100644 index 5beda4860..000000000 --- a/install/alpine-valkey-install.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env bash - -# Copyright (c) 2021-2026 community-scripts ORG -# Author: pshankinclarke (lazarillo) -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://valkey.io/ - -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -color -verb_ip6 -catch_errors -setting_up_container -network_check -update_os - -msg_info "Installing Valkey" -$STD apk add valkey valkey-openrc valkey-cli -sed -i 's/^bind .*/bind 0.0.0.0/' /etc/valkey/valkey.conf - -PASS="$(head -c 100 /dev/urandom | tr -dc 'a-zA-Z0-9' | head -c32)" -echo "requirepass $PASS" >>/etc/valkey/valkey.conf -echo "$PASS" >~/valkey.creds -chmod 600 ~/valkey.creds - -MEMTOTAL_MB=$(free -m | grep ^Mem: | awk '{print $2}') -MAXMEMORY_MB=$((MEMTOTAL_MB * 75 / 100)) - -{ - echo "" - echo "# Memory-optimized settings for small-scale deployments" - echo "maxmemory ${MAXMEMORY_MB}mb" - echo "maxmemory-policy allkeys-lru" - echo "maxmemory-samples 10" -} >>/etc/valkey/valkey.conf -msg_ok "Installed Valkey" - -# Note: Alpine's valkey package is compiled without TLS support -# For TLS, use the Debian-based valkey script instead - -$STD rc-update add valkey default -$STD rc-service valkey start - -motd_ssh -customize -cleanup_lxc From 448c1d6cafcf3ba98fdf71879a3f244907932b24 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Thu, 29 Jan 2026 12:53:27 +0100 Subject: [PATCH 166/228] remove valkey --- ct/alpine-valkey.sh | 73 --------------------------------------------- 1 file changed, 73 deletions(-) delete mode 100644 ct/alpine-valkey.sh diff --git a/ct/alpine-valkey.sh b/ct/alpine-valkey.sh deleted file mode 100644 index 2765aff7f..000000000 --- a/ct/alpine-valkey.sh +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env bash -source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) -# Copyright (c) 2021-2026 community-scripts ORG -# Author: pshankinclarke (lazarillo) -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://valkey.io/ - -APP="Alpine-Valkey" -var_tags="${var_tags:-alpine;database}" -var_cpu="${var_cpu:-1}" -var_ram="${var_ram:-256}" -var_disk="${var_disk:-1}" -var_os="${var_os:-alpine}" -var_version="${var_version:-3.22}" -var_unprivileged="${var_unprivileged:-1}" - -header_info "$APP" -variables -color -catch_errors - -function update_script() { - if ! apk -e info newt >/dev/null 2>&1; then - apk add -q newt - fi - LXCIP=$(ip a s dev eth0 | awk '/inet / {print $2}' | cut -d/ -f1) - while true; do - CHOICE=$( - whiptail --backtitle "Proxmox VE Helper Scripts" --title "Valkey Management" --menu "Select option" 11 58 3 \ - "1" "Update Valkey" \ - "2" "Allow 0.0.0.0 for listening" \ - "3" "Allow only ${LXCIP} for listening" 3>&2 2>&1 1>&3 - ) - exit_status=$? - if [ $exit_status == 1 ]; then - clear - exit-script - fi - header_info - case $CHOICE in - 1) - msg_info "Updating Valkey" - apk update && apk upgrade valkey - rc-service valkey restart - msg_ok "Updated Valkey" - msg_ok "Updated successfully!" - exit - ;; - 2) - msg_info "Setting Valkey to listen on all interfaces" - sed -i 's/^bind .*/bind 0.0.0.0/' /etc/valkey/valkey.conf - rc-service valkey restart - msg_ok "Valkey now listens on all interfaces!" - exit - ;; - 3) - msg_info "Setting Valkey to listen only on ${LXCIP}" - sed -i "s/^bind .*/bind ${LXCIP}/" /etc/valkey/valkey.conf - rc-service valkey restart - msg_ok "Valkey now listens only on ${LXCIP}!" - exit - ;; - esac - done -} - -start -build_container -description - -msg_ok "Completed successfully!\n" -echo -e "${APP} should be reachable on port 6379. - ${BL}valkey-cli -h ${IP} -p 6379${CL} \n" From ff1cb4e9842d8d981e643c5be77d7b97b7bd11ca Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Thu, 29 Jan 2026 13:43:45 +0100 Subject: [PATCH 167/228] Remove unused import_local_ip calls from install scripts Eliminated redundant or unused calls to import_local_ip across multiple install and update scripts to clean up the codebase. No functional changes were made to the installation or update processes. --- ct/ampache.sh | 2 +- install/affine-install.sh | 2 +- install/deferred/docspell-install.sh | 2 +- install/ente-install.sh | 2 +- install/linkwarden-install.sh | 2 +- install/piler-install.sh | 2 +- install/pixelfed-install.sh | 2 +- install/wishlist-install.sh | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/ct/ampache.sh b/ct/ampache.sh index ef6bb7903..1c4d6415e 100644 --- a/ct/ampache.sh +++ b/ct/ampache.sh @@ -29,7 +29,7 @@ function update_script() { exit fi if check_for_gh_release "Ampache" "ampache/ampache"; then - import_local_ip + msg_info "Stopping Apache" systemctl stop apache2 msg_ok "Stopped Apache" diff --git a/install/affine-install.sh b/install/affine-install.sh index ce3cb9fac..5cce12462 100644 --- a/install/affine-install.sh +++ b/install/affine-install.sh @@ -29,7 +29,7 @@ PG_VERSION="16" PG_MODULES="pgvector" setup_postgresql PG_DB_NAME="affine" PG_DB_USER="affine" setup_postgresql_db NODE_VERSION="22" setup_nodejs setup_rust -import_local_ip + fetch_and_deploy_gh_release "affine_app" "toeverything/AFFiNE" "tarball" "latest" "/opt/affine" diff --git a/install/deferred/docspell-install.sh b/install/deferred/docspell-install.sh index 794e4c9bb..3738c816b 100644 --- a/install/deferred/docspell-install.sh +++ b/install/deferred/docspell-install.sh @@ -14,7 +14,7 @@ update_os msg_info "Setup Functions" setup_local_ip_helper -import_local_ip + msg_ok "Setup Functions" msg_info "Installing Dependencies (Patience)" diff --git a/install/ente-install.sh b/install/ente-install.sh index a6028e3cb..e1b9298e7 100644 --- a/install/ente-install.sh +++ b/install/ente-install.sh @@ -28,7 +28,7 @@ setup_go NODE_VERSION="24" NODE_MODULE="yarn" setup_nodejs RUST_CRATES="wasm-pack" setup_rust $STD rustup target add wasm32-unknown-unknown -import_local_ip + ENTE_CLI_VERSION=$(curl -s https://api.github.com/repos/ente-io/ente/releases | jq -r '[.[] | select(.tag_name | startswith("cli-v"))][0].tag_name') fetch_and_deploy_gh_release "ente-server" "ente-io/ente" "tarball" "latest" "/opt/ente" diff --git a/install/linkwarden-install.sh b/install/linkwarden-install.sh index 8f4b569a5..3808c8426 100644 --- a/install/linkwarden-install.sh +++ b/install/linkwarden-install.sh @@ -22,7 +22,7 @@ PG_VERSION="16" setup_postgresql PG_DB_NAME="linkwardendb" PG_DB_USER="linkwarden" setup_postgresql_db RUST_CRATES="monolith" setup_rust fetch_and_deploy_gh_release "linkwarden" "linkwarden/linkwarden" -import_local_ip + read -r -p "${TAB3}Would you like to add Adminer? " prompt if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then diff --git a/install/piler-install.sh b/install/piler-install.sh index 1eeadff04..0f9c92bc7 100644 --- a/install/piler-install.sh +++ b/install/piler-install.sh @@ -30,7 +30,7 @@ $STD apt install -y \ gnupg msg_ok "Installed Dependencies" -import_local_ip + setup_mariadb MARIADB_DB_NAME="piler" MARIADB_DB_USER="piler" setup_mariadb_db PHP_VERSION="8.3" PHP_FPM="YES" PHP_MODULE="ldap,gd,memcached,pdo,mysql,curl,zip" setup_php diff --git a/install/pixelfed-install.sh b/install/pixelfed-install.sh index f2b9ee78e..923068139 100644 --- a/install/pixelfed-install.sh +++ b/install/pixelfed-install.sh @@ -29,7 +29,7 @@ useradd -rU -s /bin/bash pixelfed usermod -aG redis pixelfed msg_ok "Created Pixelfed User" -import_local_ip + PG_VERSION="17" setup_postgresql PG_DB_NAME="pixelfed" PG_DB_USER="pixelfed" setup_postgresql_db PHP_VERSION="8.4" PHP_FPM="YES" PHP_MODULE="bcmath,ctype,exif,imagick,pgsql,redis,tokenizer" PHP_UPLOAD_MAX_FILESIZE="500M" PHP_POST_MAX_SIZE="500M" PHP_MAX_EXECUTION_TIME="600" setup_php diff --git a/install/wishlist-install.sh b/install/wishlist-install.sh index 5203d2731..659439f74 100644 --- a/install/wishlist-install.sh +++ b/install/wishlist-install.sh @@ -24,7 +24,7 @@ msg_ok "Installed dependencies" NODE_VERSION="24" NODE_MODULE="pnpm" setup_nodejs fetch_and_deploy_gh_release "wishlist" "cmintey/wishlist" "tarball" LATEST_APP_VERSION=$(get_latest_github_release "cmintey/wishlist" false) -import_local_ip + msg_info "Installing Wishlist" cd /opt/wishlist From 4cf56f38358169f0d834ee361d6fb9c9b85012b8 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Thu, 29 Jan 2026 13:45:17 +0100 Subject: [PATCH 168/228] test databasus --- install/databasus-install.sh | 71 ++++++++++++++++++++---------------- 1 file changed, 40 insertions(+), 31 deletions(-) diff --git a/install/databasus-install.sh b/install/databasus-install.sh index fbac56054..2d4f06743 100644 --- a/install/databasus-install.sh +++ b/install/databasus-install.sh @@ -17,12 +17,19 @@ msg_info "Installing Dependencies" $STD apt install -y nginx msg_ok "Installed Dependencies" -import_local_ip PG_VERSION="17" setup_postgresql -PG_DB_NAME="databasus" PG_DB_USER="databasus" setup_postgresql_db setup_go NODE_VERSION="24" setup_nodejs +msg_info "Installing Valkey" +setup_deb822_repo "valkey" \ + "https://greensec.github.io/valkey-debian/public.key" \ + "https://greensec.github.io/valkey-debian/repo" \ + "$(lsb_release -cs)" \ + "main" +$STD apt-get install -y valkey +msg_ok "Installed Valkey" + fetch_and_deploy_gh_release "databasus" "databasus/databasus" "tarball" "latest" "/opt/databasus" msg_info "Building Databasus (Patience)" @@ -36,19 +43,17 @@ $STD go install github.com/swaggo/swag/cmd/swag@latest $STD /root/go/bin/swag init -g cmd/main.go -o swagger $STD env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o databasus ./cmd/main.go mv /opt/databasus/backend/databasus /opt/databasus/databasus -mkdir -p /opt/databasus_data/{data,backups,logs} -mkdir -p /databasus-data/temp +mkdir -p /databasus-data/{pgdata,temp,backups,data,logs} mkdir -p /opt/databasus/ui/build +mkdir -p /opt/databasus/migrations cp -r /opt/databasus/frontend/dist/* /opt/databasus/ui/build/ -cp -r /opt/databasus/backend/migrations /opt/databasus/ -chown -R postgres:postgres /opt/databasus -chown -R postgres:postgres /opt/databasus_data +cp -r /opt/databasus/backend/migrations/* /opt/databasus/migrations/ chown -R postgres:postgres /databasus-data msg_ok "Built Databasus" msg_info "Configuring Databasus" -ADMIN_PASS=$(openssl rand -base64 12) JWT_SECRET=$(openssl rand -hex 32) +ENCRYPTION_KEY=$(openssl rand -hex 32) # Create PostgreSQL version symlinks for compatibility for v in 12 13 14 15 16 18; do @@ -67,50 +72,54 @@ ENV_MODE=production SERVER_PORT=4005 SERVER_HOST=0.0.0.0 -# Database (Internal PostgreSQL for app data) -DATABASE_DSN=host=localhost user=${PG_DB_USER} password=${PG_DB_PASS} dbname=${PG_DB_NAME} port=5432 sslmode=disable -DATABASE_URL=postgres://${PG_DB_USER}:${PG_DB_PASS}@localhost:5432/${PG_DB_NAME}?sslmode=disable +# Database +DATABASE_DSN=host=localhost user=postgres password=postgres dbname=databasus port=5432 sslmode=disable +DATABASE_URL=postgres://postgres:postgres@localhost:5432/databasus?sslmode=disable # Migrations GOOSE_DRIVER=postgres -GOOSE_DBSTRING=postgres://${PG_DB_USER}:${PG_DB_PASS}@localhost:5432/${PG_DB_NAME}?sslmode=disable +GOOSE_DBSTRING=postgres://postgres:postgres@localhost:5432/databasus?sslmode=disable GOOSE_MIGRATION_DIR=/opt/databasus/migrations # Security JWT_SECRET=${JWT_SECRET} -ENCRYPTION_KEY=$(openssl rand -hex 32) - -# Admin User -ADMIN_EMAIL=admin@localhost -ADMIN_PASSWORD=${ADMIN_PASS} +ENCRYPTION_KEY=${ENCRYPTION_KEY} # Paths -DATA_DIR=/opt/databasus_data/data -BACKUP_DIR=/opt/databasus_data/backups -LOG_DIR=/opt/databasus_data/logs - -# PostgreSQL Tools (for creating backups) -PG_DUMP_PATH=/usr/lib/postgresql/17/bin/pg_dump -PG_RESTORE_PATH=/usr/lib/postgresql/17/bin/pg_restore -PSQL_PATH=/usr/lib/postgresql/17/bin/psql +DATA_DIR=/databasus-data/data +BACKUP_DIR=/databasus-data/backups +LOG_DIR=/databasus-data/logs EOF chown postgres:postgres /opt/databasus/.env chmod 600 /opt/databasus/.env msg_ok "Configured Databasus" +msg_info "Configuring Valkey" +cat >/etc/valkey/valkey.conf </dev/null || true +msg_ok "Created Database" + msg_info "Creating Databasus Service" cat </etc/systemd/system/databasus.service [Unit] -Description=Databasus - PostgreSQL Backup Management -After=network.target postgresql.service -Requires=postgresql.service +Description=Databasus - Database Backup Management +After=network.target postgresql.service valkey.service +Requires=postgresql.service valkey.service [Service] Type=simple -User=postgres -Group=postgres WorkingDirectory=/opt/databasus -Environment="PATH=/usr/local/bin:/usr/bin:/bin" EnvironmentFile=/opt/databasus/.env ExecStart=/opt/databasus/databasus Restart=always From 84b9763632e29075f75b5d6285ad266a72ad617d Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Thu, 29 Jan 2026 14:26:07 +0100 Subject: [PATCH 169/228] valkey fix --- install/databasus-install.sh | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/install/databasus-install.sh b/install/databasus-install.sh index 2d4f06743..82238035a 100644 --- a/install/databasus-install.sh +++ b/install/databasus-install.sh @@ -14,22 +14,15 @@ network_check update_os msg_info "Installing Dependencies" -$STD apt install -y nginx +$STD apt install -y \ + nginx \ + valkey msg_ok "Installed Dependencies" PG_VERSION="17" setup_postgresql setup_go NODE_VERSION="24" setup_nodejs -msg_info "Installing Valkey" -setup_deb822_repo "valkey" \ - "https://greensec.github.io/valkey-debian/public.key" \ - "https://greensec.github.io/valkey-debian/repo" \ - "$(lsb_release -cs)" \ - "main" -$STD apt-get install -y valkey -msg_ok "Installed Valkey" - fetch_and_deploy_gh_release "databasus" "databasus/databasus" "tarball" "latest" "/opt/databasus" msg_info "Building Databasus (Patience)" From 670e74b1367089bfb4417ea7c12733f81940dac4 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Thu, 29 Jan 2026 14:44:45 +0100 Subject: [PATCH 170/228] valkey fix --- install/databasus-install.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/install/databasus-install.sh b/install/databasus-install.sh index 82238035a..b2026cb1f 100644 --- a/install/databasus-install.sh +++ b/install/databasus-install.sh @@ -96,7 +96,8 @@ save "" maxmemory 256mb maxmemory-policy allkeys-lru EOF -$STD systemctl enable -q --now valkey +systemctl enable -q --now valkey-server +systemctl restart valkey-server msg_ok "Configured Valkey" msg_info "Creating Database" From aac028efda5436f2fce5ba75f11ff1e050e40ce1 Mon Sep 17 00:00:00 2001 From: Tobias <96661824+CrazyWolf13@users.noreply.github.com> Date: Thu, 29 Jan 2026 15:00:25 +0100 Subject: [PATCH 171/228] Update ebusd.sh --- ct/ebusd.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ct/ebusd.sh b/ct/ebusd.sh index 63f16f512..710c81c06 100644 --- a/ct/ebusd.sh +++ b/ct/ebusd.sh @@ -27,9 +27,10 @@ function update_script() { msg_error "No ${APP} Installation Found!" exit fi + msg_info "Updating ebusd" $STD apt update - $STD apt -y --upgrade ebusd + $STD apt --upgrade -y ebusd msg_ok "Updated ebusd" msg_ok "Updated successfully!" exit From 036c17647e6552344f258f15ee121c6cc48c7865 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Thu, 29 Jan 2026 15:53:16 +0100 Subject: [PATCH 172/228] fixes --- install/databasus-install.sh | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/install/databasus-install.sh b/install/databasus-install.sh index b2026cb1f..4ecf2d9fa 100644 --- a/install/databasus-install.sh +++ b/install/databasus-install.sh @@ -74,6 +74,10 @@ GOOSE_DRIVER=postgres GOOSE_DBSTRING=postgres://postgres:postgres@localhost:5432/databasus?sslmode=disable GOOSE_MIGRATION_DIR=/opt/databasus/migrations +# Valkey (Redis-compatible cache) +VALKEY_HOST=localhost +VALKEY_PORT=6379 + # Security JWT_SECRET=${JWT_SECRET} ENCRYPTION_KEY=${ENCRYPTION_KEY} @@ -101,6 +105,13 @@ systemctl restart valkey-server msg_ok "Configured Valkey" msg_info "Creating Database" +# Configure PostgreSQL to allow local password auth for databasus +PG_HBA="/etc/postgresql/17/main/pg_hba.conf" +if ! grep -q "databasus" "$PG_HBA"; then + sed -i '/^local\s*all\s*all/i local databasus postgres trust' "$PG_HBA" + sed -i '/^host\s*all\s*all\s*127/i host databasus postgres 127.0.0.1/32 trust' "$PG_HBA" + systemctl reload postgresql +fi $STD sudo -u postgres psql -c "CREATE DATABASE databasus;" 2>/dev/null || true msg_ok "Created Database" From eae154328f315fed9de4755f31e0730ee8a4d885 Mon Sep 17 00:00:00 2001 From: tremor021 Date: Thu, 29 Jan 2026 19:03:41 +0100 Subject: [PATCH 173/228] Add PowerDNS script --- ct/alpine-powerdns.sh | 50 ++++++++++++++++++++++++++++++++++++++ install/alpine-powerdns.sh | 34 ++++++++++++++++++++++++++ 2 files changed, 84 insertions(+) create mode 100644 ct/alpine-powerdns.sh create mode 100644 install/alpine-powerdns.sh diff --git a/ct/alpine-powerdns.sh b/ct/alpine-powerdns.sh new file mode 100644 index 000000000..470079396 --- /dev/null +++ b/ct/alpine-powerdns.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/orhun/rustypaste + +APP="Alpine-PowerDNS" +var_tags="${var_tags:-os;alpine;dns}" +var_cpu="${var_cpu:-1}" +var_ram="${var_ram:-256}" +var_disk="${var_disk:-4}" +var_os="${var_os:-alpine}" +var_version="${var_version:-3.23}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if ! apk info -e pdns >/dev/null 2>&1; then + msg_error "No ${APP} Installation Found!" + exit + fi + + msg_info "Updating PowerDNS" + $STD apk -U upgrade + msg_ok "Updated PowerDNS" + + msg_info "Restarting Services" + $STD rc-service pdns restart + msg_ok "Restarted Services" + msg_ok "Updated successfully!" + exit +} + +start +build_container +description + +msg_ok "Completed successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:53${CL}" diff --git a/install/alpine-powerdns.sh b/install/alpine-powerdns.sh new file mode 100644 index 000000000..de78cf8a5 --- /dev/null +++ b/install/alpine-powerdns.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: Slaviša Arežina (tremor021) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://www.powerdns.com/ + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing PowerDNS" +$STD apk add --no-cache pdns pdns-backend-sqlite3 pdns-doc +msg_ok "Installed PowerDNS" + +msg_info "Configuring PowerDNS" +sed -i '/^# launch=$/c\launch=gsqlite3\ngsqlite3-database=/var/lib/powerdns/pdns.sqlite3' /etc/pdns/pdns.conf +mkdir /var/lib/powerdns +sqlite3 /var/lib/powerdns/pdns.sqlite3 < /usr/share/doc/pdns/schema.sqlite3.sql +chown -R pdns:pdns /var/lib/powerdns +msg_ok "Configured PowerDNS" + +msg_info "Creating Service" +$STD rc-update add pdns default +$STD rc-service pdns start +msg_ok "Created Service" + +motd_ssh +customize +cleanup_lxc From e3452d7d9ef2952fed80a9043c4b4c3b7649f9b3 Mon Sep 17 00:00:00 2001 From: tremor021 Date: Thu, 29 Jan 2026 19:26:14 +0100 Subject: [PATCH 174/228] Add PowerDNS script --- install/{alpine-powerdns.sh => alpine-powerdns-install.sh} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename install/{alpine-powerdns.sh => alpine-powerdns-install.sh} (100%) diff --git a/install/alpine-powerdns.sh b/install/alpine-powerdns-install.sh similarity index 100% rename from install/alpine-powerdns.sh rename to install/alpine-powerdns-install.sh From 302ef66520546d79c791f7305217ad484694c6a5 Mon Sep 17 00:00:00 2001 From: tremor021 Date: Thu, 29 Jan 2026 19:52:49 +0100 Subject: [PATCH 175/228] Add PowerDNS script --- ct/alpine-powerdns.sh | 6 +++--- install/alpine-powerdns-install.sh | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ct/alpine-powerdns.sh b/ct/alpine-powerdns.sh index 470079396..21ce7f0b5 100644 --- a/ct/alpine-powerdns.sh +++ b/ct/alpine-powerdns.sh @@ -1,9 +1,9 @@ #!/usr/bin/env bash source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) # Copyright (c) 2021-2026 community-scripts ORG -# Author: MickLesk (CanbiZ) -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://github.com/orhun/rustypaste +# Author: Slaviša Arežina (tremor021) +# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE +# Source: https://www.powerdns.com/ APP="Alpine-PowerDNS" var_tags="${var_tags:-os;alpine;dns}" diff --git a/install/alpine-powerdns-install.sh b/install/alpine-powerdns-install.sh index de78cf8a5..b3cbc6d65 100644 --- a/install/alpine-powerdns-install.sh +++ b/install/alpine-powerdns-install.sh @@ -2,7 +2,7 @@ # Copyright (c) 2021-2026 community-scripts ORG # Author: Slaviša Arežina (tremor021) -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE # Source: https://www.powerdns.com/ source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" From d00e1176becfa380c968503faea4fc15c0b9defe Mon Sep 17 00:00:00 2001 From: "GitHub Actions[bot]" Date: Thu, 29 Jan 2026 18:58:55 +0000 Subject: [PATCH 176/228] chore: update github-versions.json Total versions: 20 Pinned versions: 1 Generated: 2026-01-29T18:58:55Z --- frontend/public/json/github-versions.json | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/frontend/public/json/github-versions.json b/frontend/public/json/github-versions.json index 7364cdacf..73deb25d9 100644 --- a/frontend/public/json/github-versions.json +++ b/frontend/public/json/github-versions.json @@ -1,5 +1,5 @@ { - "generated": "2026-01-29T02:41:18Z", + "generated": "2026-01-29T18:58:55Z", "versions": [ { "slug": "affine", @@ -15,6 +15,13 @@ "pinned": false, "date": "2025-12-22T04:23:45Z" }, + { + "slug": "anytype", + "repo": "grishy/any-sync-bundle", + "version": "v1.2.1-2025-12-10", + "pinned": false, + "date": "2025-12-24T20:40:15Z" + }, { "slug": "databasus", "repo": "databasus/databasus", @@ -25,9 +32,9 @@ { "slug": "ente", "repo": "ente-io/ente", - "version": "photos-v1.3.0", + "version": "photos-v1.3.7", "pinned": false, - "date": "2026-01-12T06:33:12Z" + "date": "2026-01-29T17:20:41Z" }, { "slug": "frigate", @@ -81,9 +88,9 @@ { "slug": "opencloud", "repo": "opencloud-eu/opencloud", - "version": "v5.0.0", + "version": "v5.0.1", "pinned": true, - "date": "2026-01-26T15:58:00Z" + "date": "2026-01-28T15:08:23Z" }, { "slug": "piler", From 2c9fb7b6f926212b6c14ef32ca5ea615b31b2fc7 Mon Sep 17 00:00:00 2001 From: juronja <101410098+juronja@users.noreply.github.com> Date: Thu, 29 Jan 2026 19:01:49 +0000 Subject: [PATCH 177/228] removed comments --- vm/truenas-vm.sh | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/vm/truenas-vm.sh b/vm/truenas-vm.sh index b81525ee2..bc9ab24b1 100644 --- a/vm/truenas-vm.sh +++ b/vm/truenas-vm.sh @@ -73,9 +73,6 @@ function error_handler() { cleanup_vmid } -# Scrapes the TrueNAS download portal for ISO paths from the current and previous year, -# filtering out nightlies/alphas and returning the latest stable releases for each major -# version along with any beta or RC pre-releases. function truenas_iso_lookup() { local BASE_URL="https://download.truenas.com" local current_year=$(date +%y) @@ -184,13 +181,10 @@ function check_root() { fi } -# This function checks the version of Proxmox Virtual Environment (PVE) and exits if the version is not supported. -# Supported: Proxmox VE 8.0.x – 8.9.x and 9.0 – 9.1 pve_check() { local PVE_VER PVE_VER="$(pveversion | awk -F'/' '{print $2}' | awk -F'-' '{print $1}')" - # Check for Proxmox VE 8.x: allow 8.0–8.9 if [[ "$PVE_VER" =~ ^8\.([0-9]+) ]]; then local MINOR="${BASH_REMATCH[1]}" if ((MINOR < 0 || MINOR > 9)); then @@ -201,7 +195,6 @@ pve_check() { return 0 fi - # Check for Proxmox VE 9.x: allow 9.0–9.1 if [[ "$PVE_VER" =~ ^9\.([0-9]+) ]]; then local MINOR="${BASH_REMATCH[1]}" if ((MINOR < 0 || MINOR > 1)); then @@ -212,7 +205,6 @@ pve_check() { return 0 fi - # All other unsupported versions msg_error "This version of Proxmox VE is not supported." msg_error "Supported versions: Proxmox VE 8.0 – 8.x or 9.0 – 9.1" exit 1 @@ -299,7 +291,6 @@ function advanced_settings() { fi done - # Fetching iso list from TrueNAS downloads for whiptail radiolist ISOARRAY=() while read -r ISOPATH; do FILENAME=$(basename "$ISOPATH") @@ -504,7 +495,6 @@ msg_ok "Using ${CL}${BL}$STORAGE${CL} ${GN}for Storage Location." msg_ok "Virtual Machine ID is ${CL}${BL}$VMID${CL}." if [ -z "${SELECTED_ISO:-}" ]; then - # Fallback: Find the latest stable release only (excluding RC/BETA for safety) SELECTED_ISO=$(truenas_iso_lookup | grep -vE 'RC|BETA' | sort -V | tail -n 1) if [ -z "$SELECTED_ISO" ]; then @@ -535,7 +525,6 @@ qm create "$VMID" -machine q35 -bios ovmf -agent enabled=1 -tablet 0 -localtime -scsihw virtio-scsi-single -cdrom local:iso/$ISO_NAME -vga virtio >/dev/null msg_ok "Created VM shell" -# Optional step to import onboard disks if [ "$IMPORT_DISKS" == "yes" ]; then msg_info "Importing onboard disks" DISKARRAY=() From bf57d6693995c8b279ab2b99f736833d03388746 Mon Sep 17 00:00:00 2001 From: juronja <101410098+juronja@users.noreply.github.com> Date: Thu, 29 Jan 2026 20:01:47 +0000 Subject: [PATCH 178/228] Left only one notice with a link to the discussion --- frontend/public/json/truenas-vm.json | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/frontend/public/json/truenas-vm.json b/frontend/public/json/truenas-vm.json index d4675b55f..7f11e7564 100644 --- a/frontend/public/json/truenas-vm.json +++ b/frontend/public/json/truenas-vm.json @@ -33,24 +33,8 @@ }, "notes": [ { - "text": "The default install uses the latest stable release. Please use advanced install for more options.", + "text": "Once the script finishes, proceed with the OS installation via the console. For more details, please refer to this discussion: `https://github.com/community-scripts/ProxmoxVE/discussions/11344`", "type": "info" - }, - { - "text": "8 GB of RAM is the minimum requirement, but 16 GB+ is recommended for optimal performance.", - "type": "info" - }, - { - "text": "After installation, you will be prompted to unmount the media. To do this: Go to the VM's Hardware tab > select the CD/DVD Drive > Edit > select 'Do not use any media'", - "type": "info" - }, - { - "text": "While you can import onboard disks during install, it is highly recommended to use an HBA to pass through disks for production environments.", - "type": "warning" - }, - { - "text": "ECC RAM is strongly recommended to ensure data integrity, as ZFS checksumming can be compromised by bad data in RAM before the data is written to the pool.", - "type": "warning" } ] } From ed1dc2f3635c41de46d6167b66f4fedee5e52f41 Mon Sep 17 00:00:00 2001 From: "GitHub Actions[bot]" Date: Fri, 30 Jan 2026 07:00:55 +0000 Subject: [PATCH 179/228] chore: update github-versions.json Total versions: 20 Pinned versions: 1 Generated: 2026-01-30T07:00:55Z --- frontend/public/json/github-versions.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/public/json/github-versions.json b/frontend/public/json/github-versions.json index 73deb25d9..3be9decfb 100644 --- a/frontend/public/json/github-versions.json +++ b/frontend/public/json/github-versions.json @@ -1,5 +1,5 @@ { - "generated": "2026-01-29T18:58:55Z", + "generated": "2026-01-30T07:00:55Z", "versions": [ { "slug": "affine", From 1ff16293dee578a8bfa6f3b7cc6fd8ce3ca9938e Mon Sep 17 00:00:00 2001 From: Joerg Heinemann Date: Fri, 30 Jan 2026 08:07:54 +0100 Subject: [PATCH 180/228] Remove ebusd configuration instructions Removed configuration instructions for ebusd from the installation script. --- install/ebusd-install.sh | 35 ----------------------------------- 1 file changed, 35 deletions(-) diff --git a/install/ebusd-install.sh b/install/ebusd-install.sh index 187cc2265..f7e58010c 100644 --- a/install/ebusd-install.sh +++ b/install/ebusd-install.sh @@ -25,41 +25,6 @@ $STD apt install -y ebusd systemctl enable -q --now ebusd msg_ok "Installed ebusd" -cat <~/ebusd-configuation-instructions.txt -Configuration instructions: - - 1. Edit "/etc/default/ebusd" if necessary (especially if your device is not "/dev/ttyUSB0") - 2. Start the daemon with "systemctl start ebusd" - 3. Check the log file "/var/log/ebusd.log" - 4. Make the daemon autostart with "systemctl enable ebusd" - -Working "/etc/default/ebusd" options for "ebus adapter shield v5": - -EBUSD_OPTS=" - --pidfile=/run/ebusd.pid - --latency=100 - --scanconfig - --configpath=https://cfg.ebusd.eu/ - --accesslevel=* - --pollinterval=30 - --device=ens:XXX.XXX.XXX.XXX:9999 - --mqtthost=XXX.XXX.XXX.XXX - --mqttport=1883 - --mqttuser=XXXXXX - --mqttpass=XXXXXX - --mqttint=/etc/ebusd/mqtt-hassio.cfg - --mqttjson - --mqttlog - --mqttretain - --mqtttopic=ebusd - --log=all:notice - --log=main:notice - --log=bus:notice - --log=update:notice - --log=network:notice - --log=other:notice" -EOF - motd_ssh customize cleanup_lxc From c9da138a6980eaf7bc7738f23b9e5ad94583ec03 Mon Sep 17 00:00:00 2001 From: Joerg Heinemann Date: Fri, 30 Jan 2026 08:13:08 +0100 Subject: [PATCH 181/228] Update notes with new post installation link --- frontend/public/json/ebusd.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/public/json/ebusd.json b/frontend/public/json/ebusd.json index e946af464..862d24153 100644 --- a/frontend/public/json/ebusd.json +++ b/frontend/public/json/ebusd.json @@ -33,7 +33,7 @@ }, "notes": [ { - "text": "Show configuration instructions: `cat ~/ebusd-configuation-instructions.txt`", + "text": "For required post installation actions, checkout: `https://github.com/community-scripts/ProxmoxVE/discussions/11352`", "type": "info" } ] From 738f536cbfa620e0f65e2167c100f87da67918af Mon Sep 17 00:00:00 2001 From: Michel Roegl-Brunner <73236783+michelroegl-brunner@users.noreply.github.com> Date: Fri, 30 Jan 2026 09:05:05 +0100 Subject: [PATCH 182/228] Apply suggestion from @greptile-apps[bot] Co-authored-by: greptile-apps[bot] <165735046+greptile-apps[bot]@users.noreply.github.com> --- .vscode/settings.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index 779bbdd17..91a1a8f4f 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -3,7 +3,7 @@ "*.func": "shellscript" }, "[shellscript]": { - "editor.defaultFormatter": "mkhl.shfmt", + "editor.defaultFormatter": "mkhl.shfmt" }, "editor.codeActionsOnSave": { "source.fixAll": "never", From ae7a410638a1ee100210f302fb416ea98c61427f Mon Sep 17 00:00:00 2001 From: Michel Roegl-Brunner <73236783+michelroegl-brunner@users.noreply.github.com> Date: Fri, 30 Jan 2026 09:05:18 +0100 Subject: [PATCH 183/228] Apply suggestion from @greptile-apps[bot] Co-authored-by: greptile-apps[bot] <165735046+greptile-apps[bot]@users.noreply.github.com> --- .vscode/settings.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index 91a1a8f4f..fed1b294f 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -6,7 +6,7 @@ "editor.defaultFormatter": "mkhl.shfmt" }, "editor.codeActionsOnSave": { - "source.fixAll": "never", + "source.fixAll": "never" }, "shellcheck.useWorkspaceRootAsCwd": true, } From 0dd93a4df029dfb23fbcc826480fd95728341f88 Mon Sep 17 00:00:00 2001 From: Michel Roegl-Brunner Date: Fri, 30 Jan 2026 09:12:11 +0100 Subject: [PATCH 184/228] Fix CI/CD --- frontend/src/__tests__/public/validate-json.test.ts | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/frontend/src/__tests__/public/validate-json.test.ts b/frontend/src/__tests__/public/validate-json.test.ts index 1ab52db68..f0ab7dde1 100644 --- a/frontend/src/__tests__/public/validate-json.test.ts +++ b/frontend/src/__tests__/public/validate-json.test.ts @@ -7,16 +7,17 @@ console.log('Current directory: ' + process.cwd()); const jsonDir = "public/json"; const metadataFileName = "metadata.json"; const versionsFileName = "versions.json"; +const githubVersionsFileName = "github-versions.json"; const encoding = "utf-8"; const fileNames = (await fs.readdir(jsonDir)) - .filter((fileName) => fileName !== metadataFileName && fileName !== versionsFileName); + .filter((fileName) => fileName !== metadataFileName && fileName !== versionsFileName && fileName !== githubVersionsFileName); describe.each(fileNames)("%s", async (fileName) => { let script: Script; beforeAll(async () => { - const filePath = path.resolve(jsonDir, fileName); + const filePath = path.resolve(jsonDir, fileName); const fileContent = await fs.readFile(filePath, encoding) script = JSON.parse(fileContent); }) @@ -40,7 +41,7 @@ describe(`${metadataFileName}`, async () => { let metadata: Metadata; beforeAll(async () => { - const filePath = path.resolve(jsonDir, metadataFileName); + const filePath = path.resolve(jsonDir, metadataFileName); const fileContent = await fs.readFile(filePath, encoding) metadata = JSON.parse(fileContent); }) @@ -48,9 +49,9 @@ describe(`${metadataFileName}`, async () => { // TODO: create zod schema for metadata. Move zod schemas to /lib/types.ts assert(metadata.categories.length > 0); metadata.categories.forEach((category) => { - assert.isString(category.name) - assert.isNumber(category.id) - assert.isNumber(category.sort_order) + assert.isString(category.name) + assert.isNumber(category.id) + assert.isNumber(category.sort_order) }); }); }) From ff2cd48038a8058c713f2ce2847769703b21d83f Mon Sep 17 00:00:00 2001 From: GoldenSpring Date: Fri, 30 Jan 2026 11:27:18 +0300 Subject: [PATCH 185/228] added the password generation for admin --- frontend/public/json/sonobarr.json | 2 +- install/sonobarr-install.sh | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/frontend/public/json/sonobarr.json b/frontend/public/json/sonobarr.json index 607c53216..965d6aee0 100644 --- a/frontend/public/json/sonobarr.json +++ b/frontend/public/json/sonobarr.json @@ -33,7 +33,7 @@ }, "notes": [ { - "text": "Change the default admin credentials", + "text": "Default generated admin password is in the env file (sonobarr_superadmin_password)", "type": "info" } ] diff --git a/install/sonobarr-install.sh b/install/sonobarr-install.sh index 0bdb4c036..7decf7c94 100644 --- a/install/sonobarr-install.sh +++ b/install/sonobarr-install.sh @@ -22,6 +22,7 @@ $STD uv pip install --no-cache-dir -r /opt/sonobarr/requirements.txt mkdir -p /etc/sonobarr mv /opt/sonobarr/.sample-env /etc/sonobarr/.env sed -i "s/^secret_key=.*/secret_key=$(openssl rand -hex 16)/" /etc/sonobarr/.env +sed -i "s/^sonobarr_superadmin_password=.*/secret_key=$(openssl rand -hex 16)/" /etc/sonobarr/.env echo "release_version=$(cat ~/.sonobarr)" >>/etc/sonobarr/.env echo "sonobarr_config_dir=/etc/sonobarr" >>/etc/sonobarr.env msg_ok "Set up sonobarr" From dc5c1d3ef06a0739bff2cb0706585f1a927abed5 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Fri, 30 Jan 2026 09:47:38 +0100 Subject: [PATCH 186/228] Improve locale setup and database user privileges Add command to grant superuser, createrole, and createdb privileges to the postgres user during Databasus installation. Enhance LXC container customization by installing locales for Devuan templates and only configuring locale if locale.gen exists, improving compatibility with minimal templates. --- install/databasus-install.sh | 1 + misc/build.func | 30 ++++++++++++++---------------- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/install/databasus-install.sh b/install/databasus-install.sh index 4ecf2d9fa..0b068d865 100644 --- a/install/databasus-install.sh +++ b/install/databasus-install.sh @@ -113,6 +113,7 @@ if ! grep -q "databasus" "$PG_HBA"; then systemctl reload postgresql fi $STD sudo -u postgres psql -c "CREATE DATABASE databasus;" 2>/dev/null || true +$STD sudo -u postgres psql -c "ALTER USER postgres WITH SUPERUSER CREATEROLE CREATEDB;" 2>/dev/null || true msg_ok "Created Database" msg_info "Creating Databasus Service" diff --git a/misc/build.func b/misc/build.func index 7e5e0a794..20a20e985 100644 --- a/misc/build.func +++ b/misc/build.func @@ -3870,18 +3870,7 @@ EOF sleep 3 fi - # Continue with standard container setup msg_info "Customizing LXC Container" - - # # Install GPU userland if configured - # if [[ "${ENABLE_VAAPI:-0}" == "1" ]]; then - # install_gpu_userland "VAAPI" - # fi - - # if [[ "${ENABLE_NVIDIA:-0}" == "1" ]]; then - # install_gpu_userland "NVIDIA" - # fi - # Continue with standard container setup if [ "$var_os" == "alpine" ]; then sleep 3 @@ -3893,11 +3882,20 @@ EOF' else sleep 3 LANG=${LANG:-en_US.UTF-8} - pct exec "$CTID" -- bash -c "sed -i \"/$LANG/ s/^# //\" /etc/locale.gen" - pct exec "$CTID" -- bash -c "locale_line=\$(grep -v '^#' /etc/locale.gen | grep -E '^[a-zA-Z]' | awk '{print \$1}' | head -n 1) && \ - echo LANG=\$locale_line >/etc/default/locale && \ - locale-gen >/dev/null && \ - export LANG=\$locale_line" + + # Devuan templates don't include locales package by default - install it first + if [ "$var_os" == "devuan" ]; then + pct exec "$CTID" -- bash -c "apt-get update >/dev/null && apt-get install -y locales >/dev/null" || true + fi + + # Only configure locale if locale.gen exists (some minimal templates don't have it) + if pct exec "$CTID" -- test -f /etc/locale.gen 2>/dev/null; then + pct exec "$CTID" -- bash -c "sed -i \"/$LANG/ s/^# //\" /etc/locale.gen" + pct exec "$CTID" -- bash -c "locale_line=\$(grep -v '^#' /etc/locale.gen | grep -E '^[a-zA-Z]' | awk '{print \$1}' | head -n 1) && \ + echo LANG=\$locale_line >/etc/default/locale && \ + locale-gen >/dev/null && \ + export LANG=\$locale_line" + fi if [[ -z "${tz:-}" ]]; then tz=$(timedatectl show --property=Timezone --value 2>/dev/null || echo "UTC") From edac4a229334458e25ae6190db3de5ac750f9a4b Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Fri, 30 Jan 2026 09:59:11 +0100 Subject: [PATCH 187/228] Update install.func --- misc/install.func | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/misc/install.func b/misc/install.func index 0a26d7d70..1841526a8 100644 --- a/misc/install.func +++ b/misc/install.func @@ -907,23 +907,27 @@ EOF # Backup original inittab cp /etc/inittab /etc/inittab.bak 2>/dev/null || true - # First, enable autologin on tty1 (for direct access) - sed -i 's|^1:[0-9]*:respawn:.*/\(a\?getty\).*|1:2345:respawn:/sbin/agetty --autologin root --noclear tty1 38400 linux|' /etc/inittab + # Enable autologin on tty1 (for direct access) - handle various formats + # Devuan uses format: 1:2345:respawn:/sbin/getty 38400 tty1 + sed -i 's|^\(1:[0-9]*:respawn:\).*getty.*tty1.*|1:2345:respawn:/sbin/agetty --autologin root --noclear tty1 38400 linux|' /etc/inittab - # CRITICAL: Add console entry for LXC - this is what pct console uses! - # Check if there's already a console getty entry - if ! grep -qE '^[^#].*respawn.*console' /etc/inittab; then - # Add new console entry for LXC - echo "" >>/etc/inittab - echo "# LXC console autologin (added by community-scripts)" >>/etc/inittab - echo "co:2345:respawn:/sbin/agetty --autologin root --noclear console 115200,38400,9600 linux" >>/etc/inittab - else - # Enable autologin on existing console entry - sed -i 's|^[^#]*:[0-9]*:respawn:.*/\(a\?getty\).*console.*|co:2345:respawn:/sbin/agetty --autologin root --noclear console 115200,38400,9600 linux|' /etc/inittab - fi + # CRITICAL: Add/replace console entry for LXC - this is what pct console uses! + # Remove any existing console entries first (commented or not) + sed -i '/^[^#]*:.*:respawn:.*getty.*console/d' /etc/inittab + sed -i '/^# LXC console autologin/d' /etc/inittab - # Force a reload of inittab - try multiple methods - telinit q &>/dev/null || init q &>/dev/null || kill -1 1 &>/dev/null || true + # Add new console entry for LXC at the end + echo "" >>/etc/inittab + echo "# LXC console autologin (added by community-scripts)" >>/etc/inittab + echo "co:2345:respawn:/sbin/agetty --autologin root --noclear console 115200 linux" >>/etc/inittab + + # Force a reload of inittab and respawn console getty + # Kill existing getty on console to force respawn with new settings + pkill -9 -f 'getty.*console' &>/dev/null || true + pkill -9 -f 'agetty.*console' &>/dev/null || true + + # Reload inittab - try multiple methods + telinit q &>/dev/null || init q &>/dev/null || kill -HUP 1 &>/dev/null || true fi touch /root/.hushlogin ;; From 773e748d95415570b408dfe62967987d22853bf3 Mon Sep 17 00:00:00 2001 From: juronja <101410098+juronja@users.noreply.github.com> Date: Fri, 30 Jan 2026 09:18:42 +0000 Subject: [PATCH 188/228] imported disks now have a serial ID. TrueNAS needs it. --- vm/truenas-vm.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/vm/truenas-vm.sh b/vm/truenas-vm.sh index 8aee3a72e..4844527fa 100644 --- a/vm/truenas-vm.sh +++ b/vm/truenas-vm.sh @@ -539,7 +539,10 @@ if [ "$IMPORT_DISKS" == "yes" ]; then for SELECTION in $SELECTIONS; do ((++SCSI_NR)) - qm set $VMID --scsi$SCSI_NR /dev/disk/by-id/$SELECTION + + ID_SERIAL=$(echo "$SELECTION" | rev | cut -d'_' -f1 | rev) + + qm set $VMID --scsi$SCSI_NR /dev/disk/by-id/$SELECTION,serial=$ID_SERIAL done msg_ok "Disks imported successfully" fi From abb15a6ec8e00c647bbfbc10533e791884b95c1f Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Fri, 30 Jan 2026 10:21:55 +0100 Subject: [PATCH 189/228] devuan fixes --- misc/install.func | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/misc/install.func b/misc/install.func index 1841526a8..9aa13be45 100644 --- a/misc/install.func +++ b/misc/install.func @@ -902,7 +902,7 @@ EOF sysvinit) # Devuan/older systems - modify inittab for auto-login # Devuan 5 (daedalus) uses SysVinit with various inittab formats - # CRITICAL: LXC uses /dev/console, NOT tty1! pct console connects to console device + # LXC can use /dev/console OR /dev/tty1 depending on how pct console connects if [[ -f /etc/inittab ]]; then # Backup original inittab cp /etc/inittab /etc/inittab.bak 2>/dev/null || true @@ -921,10 +921,12 @@ EOF echo "# LXC console autologin (added by community-scripts)" >>/etc/inittab echo "co:2345:respawn:/sbin/agetty --autologin root --noclear console 115200 linux" >>/etc/inittab - # Force a reload of inittab and respawn console getty - # Kill existing getty on console to force respawn with new settings - pkill -9 -f 'getty.*console' &>/dev/null || true - pkill -9 -f 'agetty.*console' &>/dev/null || true + # Force a reload of inittab and respawn ALL getty processes + # Kill ALL getty processes to force respawn with new autologin settings + pkill -9 -f '[ag]etty' &>/dev/null || true + + # Small delay to let init notice the dead processes + sleep 1 # Reload inittab - try multiple methods telinit q &>/dev/null || init q &>/dev/null || kill -HUP 1 &>/dev/null || true From e43427548eb4dd184a5140ba5a6d9138f58c64b0 Mon Sep 17 00:00:00 2001 From: juronja <101410098+juronja@users.noreply.github.com> Date: Fri, 30 Jan 2026 09:31:47 +0000 Subject: [PATCH 190/228] added disk size placeholder and changed boot diks to sata --- vm/truenas-vm.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vm/truenas-vm.sh b/vm/truenas-vm.sh index 4844527fa..5e6d091e3 100644 --- a/vm/truenas-vm.sh +++ b/vm/truenas-vm.sh @@ -308,7 +308,7 @@ function advanced_settings() { exit-script fi - if DISK_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Disk Size in GiB (e.g., 10, 20)" 8 58 "$DISK_SIZE" --title "DISK SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if DISK_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Disk Size in GiB (e.g., 10, 20)" 8 58 16 "$DISK_SIZE" --title "DISK SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then DISK_SIZE=$(echo "$DISK_SIZE" | tr -d ' ') if [[ "$DISK_SIZE" =~ ^[0-9]+$ ]]; then echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}$DISK_SIZE${CL}" @@ -522,7 +522,7 @@ msg_info "Creating TrueNAS VM shell" qm create "$VMID" -machine q35 -bios ovmf -agent enabled=1 -tablet 0 -localtime 1 -cpu "$CPU_TYPE" \ -cores "$CORE_COUNT" -memory "$RAM_SIZE" -balloon 0 -name "$HN" -tags community-script \ -net0 "virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU" -onboot 1 -ostype l26 \ - -efidisk0 $STORAGE:1,efitype=4m,pre-enrolled-keys=0 -scsi0 $STORAGE:$DISK_SIZE,ssd=1,iothread=on \ + -efidisk0 $STORAGE:1,efitype=4m,pre-enrolled-keys=0 -sata0 $STORAGE:$DISK_SIZE,ssd=1,iothread=on \ -scsihw virtio-scsi-single -cdrom local:iso/$ISO_NAME -vga virtio >/dev/null msg_ok "Created VM shell" From 05416f9fe5bdc61db339209941f787d1bd2d8a30 Mon Sep 17 00:00:00 2001 From: juronja <101410098+juronja@users.noreply.github.com> Date: Fri, 30 Jan 2026 09:39:52 +0000 Subject: [PATCH 191/228] removing iothread and adding serial ID trim for robustness --- vm/truenas-vm.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/vm/truenas-vm.sh b/vm/truenas-vm.sh index 5e6d091e3..d7dcf6763 100644 --- a/vm/truenas-vm.sh +++ b/vm/truenas-vm.sh @@ -522,7 +522,7 @@ msg_info "Creating TrueNAS VM shell" qm create "$VMID" -machine q35 -bios ovmf -agent enabled=1 -tablet 0 -localtime 1 -cpu "$CPU_TYPE" \ -cores "$CORE_COUNT" -memory "$RAM_SIZE" -balloon 0 -name "$HN" -tags community-script \ -net0 "virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU" -onboot 1 -ostype l26 \ - -efidisk0 $STORAGE:1,efitype=4m,pre-enrolled-keys=0 -sata0 $STORAGE:$DISK_SIZE,ssd=1,iothread=on \ + -efidisk0 $STORAGE:1,efitype=4m,pre-enrolled-keys=0 -sata0 $STORAGE:$DISK_SIZE,ssd=1 \ -scsihw virtio-scsi-single -cdrom local:iso/$ISO_NAME -vga virtio >/dev/null msg_ok "Created VM shell" @@ -541,6 +541,7 @@ if [ "$IMPORT_DISKS" == "yes" ]; then ((++SCSI_NR)) ID_SERIAL=$(echo "$SELECTION" | rev | cut -d'_' -f1 | rev) + ID_SERIAL=${ID_SERIAL:0:20} qm set $VMID --scsi$SCSI_NR /dev/disk/by-id/$SELECTION,serial=$ID_SERIAL done From 162f62daa22813b71e8b16b11486caa39f86a2c9 Mon Sep 17 00:00:00 2001 From: juronja <101410098+juronja@users.noreply.github.com> Date: Fri, 30 Jan 2026 10:07:10 +0000 Subject: [PATCH 192/228] reversed the disk placeholder size --- vm/truenas-vm.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vm/truenas-vm.sh b/vm/truenas-vm.sh index d7dcf6763..295dc6954 100644 --- a/vm/truenas-vm.sh +++ b/vm/truenas-vm.sh @@ -308,7 +308,7 @@ function advanced_settings() { exit-script fi - if DISK_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Disk Size in GiB (e.g., 10, 20)" 8 58 16 "$DISK_SIZE" --title "DISK SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if DISK_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Disk Size in GiB (e.g., 10, 20)" 8 58 "$DISK_SIZE" --title "DISK SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then DISK_SIZE=$(echo "$DISK_SIZE" | tr -d ' ') if [[ "$DISK_SIZE" =~ ^[0-9]+$ ]]; then echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}$DISK_SIZE${CL}" From 9d842ef9a3097875bf70f8ba7339dd644a76036a Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Fri, 30 Jan 2026 14:52:51 +0100 Subject: [PATCH 193/228] Improve Pixelfed install script and env setup Added libvips42 to dependencies and expanded PHP modules for better compatibility. Replaced sequential .env modifications with a full .env template for clarity and completeness. Improved status messages for consistency and removed credential file generation for security. --- install/pixelfed-install.sh | 117 ++++++++++++++++++++++-------------- 1 file changed, 72 insertions(+), 45 deletions(-) diff --git a/install/pixelfed-install.sh b/install/pixelfed-install.sh index 923068139..58f480a50 100644 --- a/install/pixelfed-install.sh +++ b/install/pixelfed-install.sh @@ -21,7 +21,8 @@ $STD apt-get install -y \ jpegoptim \ optipng \ pngquant \ - gifsicle + gifsicle \ + libvips42 msg_ok "Installed Dependencies" msg_info "Creating Pixelfed User" @@ -32,7 +33,7 @@ msg_ok "Created Pixelfed User" PG_VERSION="17" setup_postgresql PG_DB_NAME="pixelfed" PG_DB_USER="pixelfed" setup_postgresql_db -PHP_VERSION="8.4" PHP_FPM="YES" PHP_MODULE="bcmath,ctype,exif,imagick,pgsql,redis,tokenizer" PHP_UPLOAD_MAX_FILESIZE="500M" PHP_POST_MAX_SIZE="500M" PHP_MAX_EXECUTION_TIME="600" setup_php +PHP_VERSION="8.4" PHP_FPM="YES" PHP_MODULE="bcmath,ctype,curl,exif,gd,imagick,intl,mbstring,pgsql,redis,xml,zip" PHP_UPLOAD_MAX_FILESIZE="500M" PHP_POST_MAX_SIZE="500M" PHP_MAX_EXECUTION_TIME="600" setup_php setup_composer msg_info "Configuring Redis" @@ -40,7 +41,7 @@ REDIS_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13) sed -i "s/^# requirepass foobared/requirepass $REDIS_PASS/" /etc/redis/redis.conf sed -i "s/^requirepass .*/requirepass $REDIS_PASS/" /etc/redis/redis.conf systemctl restart redis-server -msg_ok "Redis configured" +msg_ok "Configured Redis" msg_info "Configuring PHP-FPM Pool" cp /etc/php/8.4/fpm/pool.d/www.conf /etc/php/8.4/fpm/pool.d/pixelfed.conf @@ -51,37 +52,82 @@ sed -i 's|^listen = .*|listen = /run/php/php8.4-fpm-pixelfed.sock|' /etc/php/8.4 sed -i 's/^listen.owner = .*/listen.owner = www-data/' /etc/php/8.4/fpm/pool.d/pixelfed.conf sed -i 's/^listen.group = .*/listen.group = www-data/' /etc/php/8.4/fpm/pool.d/pixelfed.conf systemctl restart php8.4-fpm -msg_ok "PHP-FPM Pool configured" +msg_ok "Configured PHP-FPM Pool" fetch_and_deploy_gh_release "pixelfed" "pixelfed/pixelfed" "tarball" "latest" "/opt/pixelfed" msg_info "Installing Pixelfed (Patience)" cd /opt/pixelfed -cp .env.example .env -sed -i "s|APP_URL=.*|APP_URL=http://${LOCAL_IP}|" .env -sed -i "s|APP_DOMAIN=.*|APP_DOMAIN=${LOCAL_IP}|" .env -sed -i "s|ADMIN_DOMAIN=.*|ADMIN_DOMAIN=${LOCAL_IP}|" .env -sed -i "s|SESSION_DOMAIN=.*|SESSION_DOMAIN=${LOCAL_IP}|" .env -sed -i "s|DB_CONNECTION=.*|DB_CONNECTION=pgsql|" .env -sed -i "s|DB_HOST=.*|DB_HOST=127.0.0.1|" .env -sed -i "s|DB_PORT=.*|DB_PORT=5432|" .env -sed -i "s|DB_DATABASE=.*|DB_DATABASE=${PG_DB_NAME}|" .env -sed -i "s|DB_USERNAME=.*|DB_USERNAME=${PG_DB_USER}|" .env -sed -i "s|DB_PASSWORD=.*|DB_PASSWORD=${PG_DB_PASS}|" .env -sed -i "s|REDIS_HOST=.*|REDIS_HOST=127.0.0.1|" .env -sed -i "s|REDIS_PASSWORD=.*|REDIS_PASSWORD=${REDIS_PASS}|" .env -sed -i "s|REDIS_PORT=.*|REDIS_PORT=6379|" .env -sed -i "s|ACTIVITY_PUB=.*|ACTIVITY_PUB=true|" .env -sed -i "s|AP_REMOTE_FOLLOW=.*|AP_REMOTE_FOLLOW=true|" .env -sed -i "s|OAUTH_ENABLED=.*|OAUTH_ENABLED=true|" .env -echo "SESSION_SECURE_COOKIE=false" >>.env +cat </opt/pixelfed/.env +APP_NAME="Pixelfed" +APP_ENV="production" +APP_DEBUG="false" +APP_URL=http://${LOCAL_IP} +APP_DOMAIN=${LOCAL_IP} +ADMIN_DOMAIN=${LOCAL_IP} +SESSION_DOMAIN=${LOCAL_IP} +TRUST_PROXIES="*" + +OPEN_REGISTRATION="false" +ENFORCE_EMAIL_VERIFICATION="false" +PF_MAX_USERS="1000" +OAUTH_ENABLED="true" +ENABLE_CONFIG_CACHE="true" +INSTANCE_DISCOVER_PUBLIC="true" + +PF_OPTIMIZE_IMAGES="true" +IMAGE_QUALITY="80" +MAX_PHOTO_SIZE="15000" +MAX_CAPTION_LENGTH="500" +MAX_ALBUM_LENGTH="4" + +DB_CONNECTION="pgsql" +DB_HOST="127.0.0.1" +DB_PORT="5432" +DB_DATABASE="${PG_DB_NAME}" +DB_USERNAME="${PG_DB_USER}" +DB_PASSWORD="${PG_DB_PASS}" + +REDIS_CLIENT="predis" +REDIS_SCHEME="tcp" +REDIS_HOST="127.0.0.1" +REDIS_PASSWORD="${REDIS_PASS}" +REDIS_PORT="6379" + +SESSION_DRIVER="database" +CACHE_DRIVER="redis" +QUEUE_DRIVER="redis" +BROADCAST_DRIVER="log" +LOG_CHANNEL="stack" +HORIZON_PREFIX="horizon-" + +ACTIVITY_PUB="true" +AP_REMOTE_FOLLOW="true" +AP_INBOX="true" +AP_OUTBOX="true" +AP_SHAREDINBOX="true" + +EXP_EMC="true" + +MAIL_DRIVER="log" +MAIL_HOST="smtp.mailtrap.io" +MAIL_PORT="2525" +MAIL_USERNAME="null" +MAIL_PASSWORD="null" +MAIL_ENCRYPTION="null" +MAIL_FROM_ADDRESS="pixelfed@example.com" +MAIL_FROM_NAME="Pixelfed" + +PF_ENABLE_CLOUD="false" +FILESYSTEM_CLOUD="s3" +SESSION_SECURE_COOKIE="false" +EOF chown -R pixelfed:pixelfed /opt/pixelfed chmod -R 755 /opt/pixelfed chmod -R 775 /opt/pixelfed/storage /opt/pixelfed/bootstrap/cache export COMPOSER_ALLOW_SUPERUSER=1 -cd /opt/pixelfed $STD composer install --no-dev --no-ansi --no-interaction --optimize-autoloader sudo -u pixelfed php artisan key:generate @@ -94,7 +140,7 @@ $STD sudo -u pixelfed php artisan view:cache $STD sudo -u pixelfed php artisan config:cache $STD sudo -u pixelfed php artisan instance:actor $STD sudo -u pixelfed php artisan horizon:install -msg_ok "Pixelfed installed" +msg_ok "Installed Pixelfed" msg_info "Configuring Nginx" cat <<'EOF' >/etc/nginx/sites-available/pixelfed @@ -137,7 +183,7 @@ ln -sf /etc/nginx/sites-available/pixelfed /etc/nginx/sites-enabled/pixelfed rm -f /etc/nginx/sites-enabled/default $STD nginx -t systemctl enable -q --now nginx -msg_ok "Nginx configured" +msg_ok "Configured Nginx" msg_info "Creating Services" cat <<'EOF' >/etc/systemd/system/pixelfed-horizon.service @@ -182,29 +228,10 @@ Persistent=true WantedBy=timers.target EOF -systemctl daemon-reload systemctl enable -q --now pixelfed-horizon systemctl enable -q --now pixelfed-scheduler.timer -msg_ok "Services created" +msg_ok "Created Services" -msg_info "Saving Credentials" -CREDS_FILE="/root/pixelfed.creds" -{ - echo "Pixelfed Credentials" - echo "" - echo "PostgreSQL" - echo " Database: ${PG_DB_NAME}" - echo " User: ${PG_DB_USER}" - echo " Password: ${PG_DB_PASS}" - echo "" - echo "Redis" - echo " Host: 127.0.0.1:6379" - echo " Password: ${REDIS_PASS}" - echo "" - echo "Web Interface: http://${LOCAL_IP}" - echo "Config: /opt/pixelfed/.env" -} >"$CREDS_FILE" -msg_ok "Credentials saved to ${CREDS_FILE}" motd_ssh customize From 13d82b5b3132ea0093155c679ec63d487eca725f Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Fri, 30 Jan 2026 15:12:00 +0100 Subject: [PATCH 194/228] Update Pixelfed install script for HTTPS and key generation Added FORCE_HTTPS_URLS and HTTPS environment variables to .env generation, set APP_KEY to empty, and updated artisan key:generate to use --force. These changes improve initial configuration and support for non-HTTPS setups. --- install/pixelfed-install.sh | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/install/pixelfed-install.sh b/install/pixelfed-install.sh index 58f480a50..682392c82 100644 --- a/install/pixelfed-install.sh +++ b/install/pixelfed-install.sh @@ -61,12 +61,14 @@ cd /opt/pixelfed cat </opt/pixelfed/.env APP_NAME="Pixelfed" APP_ENV="production" +APP_KEY= APP_DEBUG="false" APP_URL=http://${LOCAL_IP} APP_DOMAIN=${LOCAL_IP} ADMIN_DOMAIN=${LOCAL_IP} SESSION_DOMAIN=${LOCAL_IP} TRUST_PROXIES="*" +FORCE_HTTPS_URLS="false" OPEN_REGISTRATION="false" ENFORCE_EMAIL_VERIFICATION="false" @@ -121,6 +123,7 @@ MAIL_FROM_NAME="Pixelfed" PF_ENABLE_CLOUD="false" FILESYSTEM_CLOUD="s3" SESSION_SECURE_COOKIE="false" +HTTPS="false" EOF chown -R pixelfed:pixelfed /opt/pixelfed @@ -130,8 +133,8 @@ chmod -R 775 /opt/pixelfed/storage /opt/pixelfed/bootstrap/cache export COMPOSER_ALLOW_SUPERUSER=1 $STD composer install --no-dev --no-ansi --no-interaction --optimize-autoloader -sudo -u pixelfed php artisan key:generate -sudo -u pixelfed php artisan storage:link +$STD sudo -u pixelfed php artisan key:generate --force +$STD sudo -u pixelfed php artisan storage:link $STD sudo -u pixelfed php artisan migrate --force $STD sudo -u pixelfed php artisan import:cities $STD sudo -u pixelfed php artisan passport:keys From e26b3204c4a4519394ccb62430c39619bfe9cf46 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Fri, 30 Jan 2026 15:12:42 +0100 Subject: [PATCH 195/228] test --- install/pixelfed-install.sh | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/install/pixelfed-install.sh b/install/pixelfed-install.sh index 682392c82..79fb03a72 100644 --- a/install/pixelfed-install.sh +++ b/install/pixelfed-install.sh @@ -14,7 +14,7 @@ network_check update_os msg_info "Installing Dependencies" -$STD apt-get install -y \ +$STD apt install -y \ nginx \ redis-server \ ffmpeg \ @@ -126,23 +126,24 @@ SESSION_SECURE_COOKIE="false" HTTPS="false" EOF -chown -R pixelfed:pixelfed /opt/pixelfed chmod -R 755 /opt/pixelfed chmod -R 775 /opt/pixelfed/storage /opt/pixelfed/bootstrap/cache export COMPOSER_ALLOW_SUPERUSER=1 $STD composer install --no-dev --no-ansi --no-interaction --optimize-autoloader -$STD sudo -u pixelfed php artisan key:generate --force -$STD sudo -u pixelfed php artisan storage:link -$STD sudo -u pixelfed php artisan migrate --force -$STD sudo -u pixelfed php artisan import:cities -$STD sudo -u pixelfed php artisan passport:keys -$STD sudo -u pixelfed php artisan route:cache -$STD sudo -u pixelfed php artisan view:cache -$STD sudo -u pixelfed php artisan config:cache -$STD sudo -u pixelfed php artisan instance:actor -$STD sudo -u pixelfed php artisan horizon:install +$STD php artisan key:generate --force +$STD php artisan storage:link +$STD php artisan migrate --force +$STD php artisan import:cities +$STD php artisan passport:keys +$STD php artisan route:cache +$STD php artisan view:cache +$STD php artisan config:cache +$STD php artisan instance:actor +$STD php artisan horizon:install + +chown -R pixelfed:pixelfed /opt/pixelfed msg_ok "Installed Pixelfed" msg_info "Configuring Nginx" @@ -230,9 +231,7 @@ Persistent=true [Install] WantedBy=timers.target EOF - -systemctl enable -q --now pixelfed-horizon -systemctl enable -q --now pixelfed-scheduler.timer +systemctl enable -q --now pixelfed-horizon pixelfed-scheduler.timer msg_ok "Created Services" From a35207bd4ddeb5c635769ab986940e6dc48583b1 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Fri, 30 Jan 2026 15:23:33 +0100 Subject: [PATCH 196/228] fixes --- install/pixelfed-install.sh | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/install/pixelfed-install.sh b/install/pixelfed-install.sh index 79fb03a72..65a28cb38 100644 --- a/install/pixelfed-install.sh +++ b/install/pixelfed-install.sh @@ -125,13 +125,10 @@ FILESYSTEM_CLOUD="s3" SESSION_SECURE_COOKIE="false" HTTPS="false" EOF - chmod -R 755 /opt/pixelfed chmod -R 775 /opt/pixelfed/storage /opt/pixelfed/bootstrap/cache - export COMPOSER_ALLOW_SUPERUSER=1 $STD composer install --no-dev --no-ansi --no-interaction --optimize-autoloader - $STD php artisan key:generate --force $STD php artisan storage:link $STD php artisan migrate --force @@ -142,7 +139,6 @@ $STD php artisan view:cache $STD php artisan config:cache $STD php artisan instance:actor $STD php artisan horizon:install - chown -R pixelfed:pixelfed /opt/pixelfed msg_ok "Installed Pixelfed" @@ -186,7 +182,7 @@ EOF ln -sf /etc/nginx/sites-available/pixelfed /etc/nginx/sites-enabled/pixelfed rm -f /etc/nginx/sites-enabled/default $STD nginx -t -systemctl enable -q --now nginx +systemctl reload nginx msg_ok "Configured Nginx" msg_info "Creating Services" From 03f425ecb753f89f8cd13f316c3609e3fa633ace Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Fri, 30 Jan 2026 15:40:18 +0100 Subject: [PATCH 197/228] friagte --- ct/frigate.sh | 1 + install/frigate-install.sh | 217 ++++++++++++++++++++----------------- 2 files changed, 117 insertions(+), 101 deletions(-) diff --git a/ct/frigate.sh b/ct/frigate.sh index ecdc17126..108d1bfbe 100644 --- a/ct/frigate.sh +++ b/ct/frigate.sh @@ -13,6 +13,7 @@ var_disk="${var_disk:-20}" var_os="${var_os:-debian}" var_version="${var_version:-12}" var_unprivileged="${var_unprivileged:-0}" +var_gpu="${var_gpu:-yes}" header_info "$APP" variables diff --git a/install/frigate-install.sh b/install/frigate-install.sh index 26822920c..357cc7130 100644 --- a/install/frigate-install.sh +++ b/install/frigate-install.sh @@ -7,7 +7,6 @@ # Source: https://frigate.video/ source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -set +e color verb_ip6 catch_errors @@ -15,48 +14,100 @@ setting_up_container network_check update_os -cat <<'EOF' >/etc/apt/sources.list.d/debian.sources +source /etc/os-release +if [[ "$VERSION_ID" == "12" ]]; then + DEBIAN_SUITE="bookworm" +elif [[ "$VERSION_ID" == "13" ]]; then + DEBIAN_SUITE="trixie" +else + msg_error "Unsupported Debian version: $VERSION_ID" + exit 1 +fi + +msg_info "Configuring Debian $VERSION_ID ($DEBIAN_SUITE) Sources" +cat </etc/apt/sources.list.d/debian.sources Types: deb deb-src URIs: http://deb.debian.org/debian -Suites: bookworm +Suites: ${DEBIAN_SUITE} Components: main contrib non-free non-free-firmware Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg Types: deb deb-src URIs: http://deb.debian.org/debian -Suites: bookworm-updates +Suites: ${DEBIAN_SUITE}-updates Components: main contrib non-free non-free-firmware Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg Types: deb deb-src URIs: http://security.debian.org -Suites: bookworm-security +Suites: ${DEBIAN_SUITE}-security Components: main contrib non-free non-free-firmware Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg EOF rm -f /etc/apt/sources.list +$STD apt-get update +msg_ok "Configured Debian $VERSION_ID Sources" -msg_info "Installing system dependencies" -$STD apt-get install -y jq wget xz-utils python3 python3-dev python3-pip gcc pkg-config libhdf5-dev unzip build-essential automake libtool ccache libusb-1.0-0-dev apt-transport-https cmake git libgtk-3-dev libavcodec-dev libavformat-dev libswscale-dev libv4l-dev libxvidcore-dev libx264-dev libjpeg-dev libpng-dev libtiff-dev gfortran openexr libssl-dev libtbbmalloc2 libtbb-dev libdc1394-dev libopenexr-dev libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev tclsh libopenblas-dev liblapack-dev make moreutils -msg_ok "System dependencies installed" +msg_info "Installing Dependencies" +$STD apt-get install -y \ + jq \ + wget \ + xz-utils \ + python3 \ + python3-dev \ + python3-pip \ + gcc \ + pkg-config \ + libhdf5-dev \ + unzip \ + build-essential \ + automake \ + libtool \ + ccache \ + libusb-1.0-0-dev \ + apt-transport-https \ + cmake \ + git \ + libgtk-3-dev \ + libavcodec-dev \ + libavformat-dev \ + libswscale-dev \ + libv4l-dev \ + libxvidcore-dev \ + libx264-dev \ + libjpeg-dev \ + libpng-dev \ + libtiff-dev \ + gfortran \ + openexr \ + libssl-dev \ + libtbbmalloc2 \ + libtbb-dev \ + libdc1394-dev \ + libopenexr-dev \ + libgstreamer-plugins-base1.0-dev \ + libgstreamer1.0-dev \ + tclsh \ + libopenblas-dev \ + liblapack-dev \ + make \ + moreutils +msg_ok "Installed Dependencies" setup_hwaccel +msg_info "Configuring GPU Access" if [[ "$CTTYPE" == "0" ]]; then - msg_info "Configuring render group for privileged container" sed -i -e 's/^kvm:x:104:$/render:x:104:root,frigate/' -e 's/^render:x:105:root$/kvm:x:105:/' /etc/group - msg_ok "Privileged container GPU access configured" else - msg_info "Configuring render group for unprivileged container" sed -i -e 's/^kvm:x:104:$/render:x:104:frigate/' -e 's/^render:x:105:$/kvm:x:105:/' /etc/group - msg_ok "Unprivileged container GPU access configured" fi +msg_ok "Configured GPU Access" export TARGETARCH="amd64" export CCACHE_DIR=/root/.ccache export CCACHE_MAXSIZE=2G export APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn -export DEBIAN_FRONTEND=noninteractive export PIP_BREAK_SYSTEM_PACKAGES=1 export NVIDIA_VISIBLE_DEVICES=all export NVIDIA_DRIVER_CAPABILITIES="compute,video,utility" @@ -67,91 +118,73 @@ export HAILORT_LOGGER_PATH=NONE fetch_and_deploy_gh_release "frigate" "blakeblackshear/frigate" "tarball" "latest" "/opt/frigate" -msg_info "Building Nginx with custom modules" -#sed -i 's|if.*"$VERSION_ID" == "12".*|if [[ "$VERSION_ID" =~ ^(12|13)$ ]]; then|g' /opt/frigate/docker/main/build_nginx.sh +msg_info "Building Nginx" $STD bash /opt/frigate/docker/main/build_nginx.sh sed -e '/s6-notifyoncheck/ s/^#*/#/' -i /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run ln -sf /usr/local/nginx/sbin/nginx /usr/local/bin/nginx -msg_ok "Nginx built successfully" +msg_ok "Built Nginx" -msg_info "Building SQLite with custom modules" -#sed -i 's|if.*"$VERSION_ID" == "12".*|if [[ "$VERSION_ID" =~ ^(12|13)$ ]]; then|g' /opt/frigate/docker/main/build_sqlite_vec.sh +msg_info "Building SQLite Extensions" $STD bash /opt/frigate/docker/main/build_sqlite_vec.sh -msg_ok "SQLite built successfully" +msg_ok "Built SQLite Extensions" fetch_and_deploy_gh_release "go2rtc" "AlexxIT/go2rtc" "singlefile" "latest" "/usr/local/go2rtc/bin" "go2rtc_linux_amd64" -msg_info "Installing tempio" -export TARGETARCH=amd64 +msg_info "Installing Tempio" sed -i 's|/rootfs/usr/local|/usr/local|g' /opt/frigate/docker/main/install_tempio.sh $STD bash /opt/frigate/docker/main/install_tempio.sh ln -sf /usr/local/tempio/bin/tempio /usr/local/bin/tempio -msg_ok "tempio installed" +msg_ok "Installed Tempio" -msg_info "Building libUSB without udev" +msg_info "Building libUSB" cd /opt -wget -q https://github.com/libusb/libusb/archive/v1.0.26.zip -O v1.0.26.zip -$STD unzip -q v1.0.26.zip +wget -q https://github.com/libusb/libusb/archive/v1.0.26.zip -O libusb.zip +$STD unzip -q libusb.zip cd libusb-1.0.26 $STD ./bootstrap.sh $STD ./configure CC='ccache gcc' CCX='ccache g++' --disable-udev --enable-shared -$STD make -j $(nproc --all) +$STD make -j "$(nproc)" cd /opt/libusb-1.0.26/libusb -mkdir -p '/usr/local/lib' -$STD bash ../libtool --mode=install /usr/bin/install -c libusb-1.0.la '/usr/local/lib' -mkdir -p '/usr/local/include/libusb-1.0' -$STD install -c -m 644 libusb.h '/usr/local/include/libusb-1.0' -mkdir -p '/usr/local/lib/pkgconfig' +mkdir -p /usr/local/lib /usr/local/include/libusb-1.0 /usr/local/lib/pkgconfig +$STD bash ../libtool --mode=install /usr/bin/install -c libusb-1.0.la /usr/local/lib +install -c -m 644 libusb.h /usr/local/include/libusb-1.0 cd /opt/libusb-1.0.26/ -$STD install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' +install -c -m 644 libusb-1.0.pc /usr/local/lib/pkgconfig ldconfig -msg_ok "libUSB built successfully" +msg_ok "Built libUSB" -#msg_info "Setting up Python" -#$STD update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3 1 -#msg_ok "Python configured" - -#msg_info "Initializing pip" -#wget -q https://bootstrap.pypa.io/get-pip.py -O /tmp/get-pip.py -#sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' /tmp/get-pip.py -#$STD python3 /tmp/get-pip.py "pip" -#msg_ok "Pip initialized" - -msg_info "Installing Python dependencies from requirements" +msg_info "Installing Python Dependencies" $STD pip3 install -r /opt/frigate/docker/main/requirements.txt -msg_ok "Python dependencies installed" +msg_ok "Installed Python Dependencies" -msg_info "Building pysqlite3" +msg_info "Building Python Wheels (Patience)" sed -i 's|^SQLITE3_VERSION=.*|SQLITE3_VERSION="version-3.46.0"|g' /opt/frigate/docker/main/build_pysqlite3.sh $STD bash /opt/frigate/docker/main/build_pysqlite3.sh mkdir -p /wheels for i in {1..3}; do - msg_info "Building wheels (attempt $i/3)..." - pip3 wheel --wheel-dir=/wheels -r /opt/frigate/docker/main/requirements-wheels.txt --default-timeout=300 --retries=3 && break - if [[ $i -lt 3 ]]; then sleep 10; fi + $STD pip3 wheel --wheel-dir=/wheels -r /opt/frigate/docker/main/requirements-wheels.txt --default-timeout=300 --retries=3 && break + [[ $i -lt 3 ]] && sleep 10 done -msg_ok "pysqlite3 built successfully" +msg_ok "Built Python Wheels" NODE_VERSION="22" NODE_MODULE="yarn" setup_nodejs -msg_info "Downloading inference models" +msg_info "Downloading Inference Models" mkdir -p /models /openvino-model wget -q -O edgetpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite -cd /models -wget -q -O cpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess.tflite +wget -q -O /models/cpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess.tflite cp /opt/frigate/labelmap.txt /labelmap.txt -msg_ok "Inference models downloaded" +msg_ok "Downloaded Inference Models" -msg_info "Downloading audio classification model" -cd / -wget -q -O yamnet-tflite.tar.gz https://www.kaggle.com/api/v1/models/google/yamnet/tfLite/classification-tflite/1/download -$STD tar xzf yamnet-tflite.tar.gz -mv 1.tflite cpu_audio_model.tflite +msg_info "Downloading Audio Model" +wget -q -O /tmp/yamnet.tar.gz https://www.kaggle.com/api/v1/models/google/yamnet/tfLite/classification-tflite/1/download +$STD tar xzf /tmp/yamnet.tar.gz -C / +mv /1.tflite /cpu_audio_model.tflite cp /opt/frigate/audio-labelmap.txt /audio-labelmap.txt -rm -f yamnet-tflite.tar.gz -msg_ok "Audio model prepared" +rm -f /tmp/yamnet.tar.gz +msg_ok "Downloaded Audio Model" -msg_info "Building HailoRT runtime" +msg_info "Installing HailoRT Runtime" $STD bash /opt/frigate/docker/main/install_hailort.sh cp -a /opt/frigate/docker/main/rootfs/. / sed -i '/^.*unset DEBIAN_FRONTEND.*$/d' /opt/frigate/docker/main/install_deps.sh @@ -160,25 +193,24 @@ echo "libedgetpu1-max libedgetpu/install-confirm-max boolean true" | debconf-set $STD bash /opt/frigate/docker/main/install_deps.sh $STD pip3 install -U /wheels/*.whl ldconfig -$STD pip3 install -U /wheels/*.whl -msg_ok "HailoRT runtime built" +msg_ok "Installed HailoRT Runtime" -msg_info "Installing OpenVino runtime and libraries" +msg_info "Installing OpenVino" $STD pip3 install -r /opt/frigate/docker/main/requirements-ov.txt -msg_ok "OpenVino installed" +msg_ok "Installed OpenVino" -msg_info "Preparing OpenVino inference model" +msg_info "Building OpenVino Model" cd /models wget -q http://download.tensorflow.org/models/object_detection/ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz $STD tar -zxf ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz --no-same-owner $STD python3 /opt/frigate/docker/main/build_ov_model.py -cp -r /models/ssdlite_mobilenet_v2.xml /openvino-model/ -cp -r /models/ssdlite_mobilenet_v2.bin /openvino-model/ +cp /models/ssdlite_mobilenet_v2.xml /openvino-model/ +cp /models/ssdlite_mobilenet_v2.bin /openvino-model/ wget -q https://github.com/openvinotoolkit/open_model_zoo/raw/master/data/dataset_classes/coco_91cl_bkgr.txt -O /openvino-model/coco_91cl_bkgr.txt sed -i 's/truck/car/g' /openvino-model/coco_91cl_bkgr.txt -msg_ok "OpenVino model prepared" +msg_ok "Built OpenVino Model" -msg_info "Building Frigate application" +msg_info "Building Frigate Application (Patience)" cd /opt/frigate $STD pip3 install -r /opt/frigate/docker/main/requirements-dev.txt $STD bash /opt/frigate/.devcontainer/initialize.sh @@ -187,31 +219,22 @@ cd /opt/frigate/web $STD npm install $STD npm run build cp -r /opt/frigate/web/dist/* /opt/frigate/web/ -cd /opt/frigate sed -i '/^s6-svc -O \.$/s/^/#/' /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run -msg_ok "Frigate application built" +msg_ok "Built Frigate Application" -msg_info "Preparing configuration directories" +msg_info "Configuring Frigate" mkdir -p /config /media/frigate cp -r /opt/frigate/config/. /config -msg_ok "Configuration directories prepared" -msg_info "Setting up sample video" curl -fsSL "https://github.com/intel-iot-devkit/sample-videos/raw/master/person-bicycle-car-detection.mp4" -o "/media/frigate/person-bicycle-car-detection.mp4" -msg_ok "Sample video downloaded" -msg_info "Configuring tmpfs cache" echo "tmpfs /tmp/cache tmpfs defaults 0 0" >>/etc/fstab -msg_ok "Cache tmpfs configured" -msg_info "Creating environment configuration" cat </etc/frigate.env DEFAULT_FFMPEG_VERSION="7.0" INCLUDED_FFMPEG_VERSIONS="7.0:5.0" EOF -msg_ok "Environment file created" -msg_info "Creating base Frigate configuration" cat </config/config.yml mqtt: enabled: false @@ -233,12 +256,8 @@ auth: detect: enabled: false EOF -msg_ok "Base Frigate configuration created" -msg_info "Configuring object detection model" -if grep -q -o -m1 -E 'avx[^ ]* | sse4_2' /proc/cpuinfo; then - msg_ok "AVX or SSE 4.2 support detected" - msg_info "Configuring hardware-accelerated OpenVino model" +if grep -q -o -m1 -E 'avx[^ ]*|sse4_2' /proc/cpuinfo; then cat <>/config/config.yml ffmpeg: hwaccel_args: auto @@ -253,19 +272,17 @@ model: path: /openvino-model/ssdlite_mobilenet_v2.xml labelmap_path: /openvino-model/coco_91cl_bkgr.txt EOF - msg_ok "OpenVino model configured" else - msg_info "Configuring CPU-only object detection model" cat <>/config/config.yml ffmpeg: hwaccel_args: auto model: path: /cpu_model.tflite EOF - msg_ok "CPU model configured" fi +msg_ok "Configured Frigate" -msg_info "Creating systemd services" +msg_info "Creating Services" cat </etc/systemd/system/create_directories.service [Unit] Description=Create necessary directories for Frigate logs @@ -291,7 +308,7 @@ Restart=always RestartSec=1 User=root EnvironmentFile=/etc/frigate.env -ExecStartPre=+rm /dev/shm/logs/go2rtc/current +ExecStartPre=+rm -f /dev/shm/logs/go2rtc/current ExecStart=/bin/bash -c "bash /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run 2> >(/usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S ' >&2) | /usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S '" StandardOutput=file:/dev/shm/logs/go2rtc/current StandardError=file:/dev/shm/logs/go2rtc/current @@ -312,7 +329,7 @@ Restart=always RestartSec=1 User=root EnvironmentFile=/etc/frigate.env -ExecStartPre=+rm /dev/shm/logs/frigate/current +ExecStartPre=+rm -f /dev/shm/logs/frigate/current ExecStart=/bin/bash -c "bash /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run 2> >(/usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S ' >&2) | /usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S '" StandardOutput=file:/dev/shm/logs/frigate/current StandardError=file:/dev/shm/logs/frigate/current @@ -332,7 +349,7 @@ Type=simple Restart=always RestartSec=1 User=root -ExecStartPre=+rm /dev/shm/logs/nginx/current +ExecStartPre=+rm -f /dev/shm/logs/nginx/current ExecStart=/bin/bash -c "bash /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run 2> >(/usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S ' >&2) | /usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S '" StandardOutput=file:/dev/shm/logs/nginx/current StandardError=file:/dev/shm/logs/nginx/current @@ -341,7 +358,7 @@ StandardError=file:/dev/shm/logs/nginx/current WantedBy=multi-user.target EOF -$STD systemctl daemon-reload +systemctl daemon-reload systemctl enable -q --now create_directories sleep 2 systemctl enable -q --now go2rtc @@ -349,13 +366,11 @@ sleep 2 systemctl enable -q --now frigate sleep 2 systemctl enable -q --now nginx -msg_ok "Systemd services created and enabled" +msg_ok "Created Services" -msg_info "Cleaning up temporary files and caches" -rm -rf /opt/v*.zip /opt/libusb-1.0.26 /tmp/get-pip.py -$STD apt-get -y autoremove -$STD apt-get -y autoclean -msg_ok "Cleanup completed" +msg_info "Cleaning Up" +rm -rf /opt/libusb.zip /opt/libusb-1.0.26 /wheels /models/*.tar.gz +msg_ok "Cleaned Up" motd_ssh customize From 9c68e3c9bbf4e3eb5dc374aa404ab341d1cc1be7 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Fri, 30 Jan 2026 15:45:32 +0100 Subject: [PATCH 198/228] deb13 --- ct/frigate.sh | 2 +- install/frigate-install.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ct/frigate.sh b/ct/frigate.sh index 108d1bfbe..c1c3f3ba9 100644 --- a/ct/frigate.sh +++ b/ct/frigate.sh @@ -11,7 +11,7 @@ var_cpu="${var_cpu:-4}" var_ram="${var_ram:-4096}" var_disk="${var_disk:-20}" var_os="${var_os:-debian}" -var_version="${var_version:-12}" +var_version="${var_version:-13}" var_unprivileged="${var_unprivileged:-0}" var_gpu="${var_gpu:-yes}" diff --git a/install/frigate-install.sh b/install/frigate-install.sh index 357cc7130..7a7d1d9ae 100644 --- a/install/frigate-install.sh +++ b/install/frigate-install.sh @@ -25,6 +25,7 @@ else fi msg_info "Configuring Debian $VERSION_ID ($DEBIAN_SUITE) Sources" +rm -f /etc/apt/sources.list /etc/apt/sources.list.d/*.sources /etc/apt/sources.list.d/*.list cat </etc/apt/sources.list.d/debian.sources Types: deb deb-src URIs: http://deb.debian.org/debian @@ -44,7 +45,6 @@ Suites: ${DEBIAN_SUITE}-security Components: main contrib non-free non-free-firmware Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg EOF -rm -f /etc/apt/sources.list $STD apt-get update msg_ok "Configured Debian $VERSION_ID Sources" From 07c2407158b03a265a9be69b86b92050e29caa87 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Fri, 30 Jan 2026 16:04:38 +0100 Subject: [PATCH 199/228] Update frigate-install.sh --- install/frigate-install.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/install/frigate-install.sh b/install/frigate-install.sh index 7a7d1d9ae..727e67148 100644 --- a/install/frigate-install.sh +++ b/install/frigate-install.sh @@ -119,6 +119,13 @@ export HAILORT_LOGGER_PATH=NONE fetch_and_deploy_gh_release "frigate" "blakeblackshear/frigate" "tarball" "latest" "/opt/frigate" msg_info "Building Nginx" +# Patch build scripts for Debian 13 compatibility +if [[ "$VERSION_ID" == "13" ]]; then + sed -i 's/\[[ "$VERSION_ID" == "12" \]\]/[[ "$VERSION_ID" =~ ^(12|13)$ ]]/g' /opt/frigate/docker/main/build_nginx.sh + sed -i 's/\[[ "$VERSION_ID" == "12" \]\]/[[ "$VERSION_ID" =~ ^(12|13)$ ]]/g' /opt/frigate/docker/main/build_sqlite_vec.sh + # Create empty sources.list if build scripts expect it + touch /etc/apt/sources.list +fi $STD bash /opt/frigate/docker/main/build_nginx.sh sed -e '/s6-notifyoncheck/ s/^#*/#/' -i /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run ln -sf /usr/local/nginx/sbin/nginx /usr/local/bin/nginx From da9d6a91a3a6320219ecf7c650b30ea43c4612ff Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Fri, 30 Jan 2026 16:20:37 +0100 Subject: [PATCH 200/228] Use pysqlite3-binary on Debian 13+ in install script Updated the installation script to install the pre-built pysqlite3-binary package on Debian 13 (Python 3.12+) instead of building pysqlite3 from source. This improves compatibility and reduces build time for newer Debian versions. --- install/frigate-install.sh | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/install/frigate-install.sh b/install/frigate-install.sh index 727e67148..33fd5c175 100644 --- a/install/frigate-install.sh +++ b/install/frigate-install.sh @@ -165,9 +165,14 @@ $STD pip3 install -r /opt/frigate/docker/main/requirements.txt msg_ok "Installed Python Dependencies" msg_info "Building Python Wheels (Patience)" -sed -i 's|^SQLITE3_VERSION=.*|SQLITE3_VERSION="version-3.46.0"|g' /opt/frigate/docker/main/build_pysqlite3.sh -$STD bash /opt/frigate/docker/main/build_pysqlite3.sh mkdir -p /wheels +if [[ "$VERSION_ID" == "13" ]]; then + # Debian 13 (Python 3.12+): Use pre-built pysqlite3-binary instead of building from source + $STD pip3 install pysqlite3-binary +else + sed -i 's|^SQLITE3_VERSION=.*|SQLITE3_VERSION="version-3.46.0"|g' /opt/frigate/docker/main/build_pysqlite3.sh + $STD bash /opt/frigate/docker/main/build_pysqlite3.sh +fi for i in {1..3}; do $STD pip3 wheel --wheel-dir=/wheels -r /opt/frigate/docker/main/requirements-wheels.txt --default-timeout=300 --retries=3 && break [[ $i -lt 3 ]] && sleep 10 From 3cb9377528d372730fa730d1be36c23856c6ccea Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Fri, 30 Jan 2026 16:31:18 +0100 Subject: [PATCH 201/228] Fix wheel build for Debian 13 and Python 3.12 compatibility Updates the install script to filter out incompatible tflite_runtime wheels for Python 3.12 on Debian 13, installs tflite-runtime or ai-edge-litert via pip, and adjusts the wheel build process accordingly. This ensures successful installation on newer Debian versions with Python 3.12+. --- install/frigate-install.sh | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/install/frigate-install.sh b/install/frigate-install.sh index 33fd5c175..4bd8c546b 100644 --- a/install/frigate-install.sh +++ b/install/frigate-install.sh @@ -169,14 +169,22 @@ mkdir -p /wheels if [[ "$VERSION_ID" == "13" ]]; then # Debian 13 (Python 3.12+): Use pre-built pysqlite3-binary instead of building from source $STD pip3 install pysqlite3-binary + # Filter out incompatible wheels for Python 3.12 + grep -v 'tflite_runtime' /opt/frigate/docker/main/requirements-wheels.txt > /tmp/requirements-wheels-filtered.txt + for i in {1..3}; do + $STD pip3 wheel --wheel-dir=/wheels -r /tmp/requirements-wheels-filtered.txt --default-timeout=300 --retries=3 && break + [[ $i -lt 3 ]] && sleep 10 + done + # Install tflite-runtime from pip for Python 3.12 + $STD pip3 install tflite-runtime || $STD pip3 install ai-edge-litert || true else sed -i 's|^SQLITE3_VERSION=.*|SQLITE3_VERSION="version-3.46.0"|g' /opt/frigate/docker/main/build_pysqlite3.sh $STD bash /opt/frigate/docker/main/build_pysqlite3.sh + for i in {1..3}; do + $STD pip3 wheel --wheel-dir=/wheels -r /opt/frigate/docker/main/requirements-wheels.txt --default-timeout=300 --retries=3 && break + [[ $i -lt 3 ]] && sleep 10 + done fi -for i in {1..3}; do - $STD pip3 wheel --wheel-dir=/wheels -r /opt/frigate/docker/main/requirements-wheels.txt --default-timeout=300 --retries=3 && break - [[ $i -lt 3 ]] && sleep 10 -done msg_ok "Built Python Wheels" NODE_VERSION="22" NODE_MODULE="yarn" setup_nodejs From 4e20dfbd41e21df4ec2ec2f5d96ca211abcb4eea Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Fri, 30 Jan 2026 16:47:29 +0100 Subject: [PATCH 202/228] deb12 --- ct/frigate.sh | 2 +- install/frigate-install.sh | 50 ++++++++++---------------------------- 2 files changed, 14 insertions(+), 38 deletions(-) diff --git a/ct/frigate.sh b/ct/frigate.sh index c1c3f3ba9..108d1bfbe 100644 --- a/ct/frigate.sh +++ b/ct/frigate.sh @@ -11,7 +11,7 @@ var_cpu="${var_cpu:-4}" var_ram="${var_ram:-4096}" var_disk="${var_disk:-20}" var_os="${var_os:-debian}" -var_version="${var_version:-13}" +var_version="${var_version:-12}" var_unprivileged="${var_unprivileged:-0}" var_gpu="${var_gpu:-yes}" diff --git a/install/frigate-install.sh b/install/frigate-install.sh index 4bd8c546b..5ea3de0ed 100644 --- a/install/frigate-install.sh +++ b/install/frigate-install.sh @@ -15,38 +15,34 @@ network_check update_os source /etc/os-release -if [[ "$VERSION_ID" == "12" ]]; then - DEBIAN_SUITE="bookworm" -elif [[ "$VERSION_ID" == "13" ]]; then - DEBIAN_SUITE="trixie" -else - msg_error "Unsupported Debian version: $VERSION_ID" +if [[ "$VERSION_ID" != "12" ]]; then + msg_error "Frigate requires Debian 12 (Bookworm) due to Python 3.11 dependencies" exit 1 fi -msg_info "Configuring Debian $VERSION_ID ($DEBIAN_SUITE) Sources" +msg_info "Configuring Debian Sources" rm -f /etc/apt/sources.list /etc/apt/sources.list.d/*.sources /etc/apt/sources.list.d/*.list cat </etc/apt/sources.list.d/debian.sources Types: deb deb-src URIs: http://deb.debian.org/debian -Suites: ${DEBIAN_SUITE} +Suites: bookworm Components: main contrib non-free non-free-firmware Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg Types: deb deb-src URIs: http://deb.debian.org/debian -Suites: ${DEBIAN_SUITE}-updates +Suites: bookworm-updates Components: main contrib non-free non-free-firmware Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg Types: deb deb-src URIs: http://security.debian.org -Suites: ${DEBIAN_SUITE}-security +Suites: bookworm-security Components: main contrib non-free non-free-firmware Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg EOF $STD apt-get update -msg_ok "Configured Debian $VERSION_ID Sources" +msg_ok "Configured Debian Sources" msg_info "Installing Dependencies" $STD apt-get install -y \ @@ -119,13 +115,6 @@ export HAILORT_LOGGER_PATH=NONE fetch_and_deploy_gh_release "frigate" "blakeblackshear/frigate" "tarball" "latest" "/opt/frigate" msg_info "Building Nginx" -# Patch build scripts for Debian 13 compatibility -if [[ "$VERSION_ID" == "13" ]]; then - sed -i 's/\[[ "$VERSION_ID" == "12" \]\]/[[ "$VERSION_ID" =~ ^(12|13)$ ]]/g' /opt/frigate/docker/main/build_nginx.sh - sed -i 's/\[[ "$VERSION_ID" == "12" \]\]/[[ "$VERSION_ID" =~ ^(12|13)$ ]]/g' /opt/frigate/docker/main/build_sqlite_vec.sh - # Create empty sources.list if build scripts expect it - touch /etc/apt/sources.list -fi $STD bash /opt/frigate/docker/main/build_nginx.sh sed -e '/s6-notifyoncheck/ s/^#*/#/' -i /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run ln -sf /usr/local/nginx/sbin/nginx /usr/local/bin/nginx @@ -166,25 +155,12 @@ msg_ok "Installed Python Dependencies" msg_info "Building Python Wheels (Patience)" mkdir -p /wheels -if [[ "$VERSION_ID" == "13" ]]; then - # Debian 13 (Python 3.12+): Use pre-built pysqlite3-binary instead of building from source - $STD pip3 install pysqlite3-binary - # Filter out incompatible wheels for Python 3.12 - grep -v 'tflite_runtime' /opt/frigate/docker/main/requirements-wheels.txt > /tmp/requirements-wheels-filtered.txt - for i in {1..3}; do - $STD pip3 wheel --wheel-dir=/wheels -r /tmp/requirements-wheels-filtered.txt --default-timeout=300 --retries=3 && break - [[ $i -lt 3 ]] && sleep 10 - done - # Install tflite-runtime from pip for Python 3.12 - $STD pip3 install tflite-runtime || $STD pip3 install ai-edge-litert || true -else - sed -i 's|^SQLITE3_VERSION=.*|SQLITE3_VERSION="version-3.46.0"|g' /opt/frigate/docker/main/build_pysqlite3.sh - $STD bash /opt/frigate/docker/main/build_pysqlite3.sh - for i in {1..3}; do - $STD pip3 wheel --wheel-dir=/wheels -r /opt/frigate/docker/main/requirements-wheels.txt --default-timeout=300 --retries=3 && break - [[ $i -lt 3 ]] && sleep 10 - done -fi +sed -i 's|^SQLITE3_VERSION=.*|SQLITE3_VERSION="version-3.46.0"|g' /opt/frigate/docker/main/build_pysqlite3.sh +$STD bash /opt/frigate/docker/main/build_pysqlite3.sh +for i in {1..3}; do + $STD pip3 wheel --wheel-dir=/wheels -r /opt/frigate/docker/main/requirements-wheels.txt --default-timeout=300 --retries=3 && break + [[ $i -lt 3 ]] && sleep 10 +done msg_ok "Built Python Wheels" NODE_VERSION="22" NODE_MODULE="yarn" setup_nodejs From 248397f2f7bf1f1998c29aed52a449364d10a700 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Fri, 30 Jan 2026 17:05:57 +0100 Subject: [PATCH 203/228] Update frigate-install.sh --- install/frigate-install.sh | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/install/frigate-install.sh b/install/frigate-install.sh index 5ea3de0ed..67125edf1 100644 --- a/install/frigate-install.sh +++ b/install/frigate-install.sh @@ -20,30 +20,6 @@ if [[ "$VERSION_ID" != "12" ]]; then exit 1 fi -msg_info "Configuring Debian Sources" -rm -f /etc/apt/sources.list /etc/apt/sources.list.d/*.sources /etc/apt/sources.list.d/*.list -cat </etc/apt/sources.list.d/debian.sources -Types: deb deb-src -URIs: http://deb.debian.org/debian -Suites: bookworm -Components: main contrib non-free non-free-firmware -Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg - -Types: deb deb-src -URIs: http://deb.debian.org/debian -Suites: bookworm-updates -Components: main contrib non-free non-free-firmware -Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg - -Types: deb deb-src -URIs: http://security.debian.org -Suites: bookworm-security -Components: main contrib non-free non-free-firmware -Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg -EOF -$STD apt-get update -msg_ok "Configured Debian Sources" - msg_info "Installing Dependencies" $STD apt-get install -y \ jq \ From 6b195b0c375225a02253a745f24d176c701434f5 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Fri, 30 Jan 2026 17:18:21 +0100 Subject: [PATCH 204/228] Update frigate-install.sh --- install/frigate-install.sh | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/install/frigate-install.sh b/install/frigate-install.sh index 67125edf1..27bb74894 100644 --- a/install/frigate-install.sh +++ b/install/frigate-install.sh @@ -66,7 +66,15 @@ $STD apt-get install -y \ moreutils msg_ok "Installed Dependencies" -setup_hwaccel +msg_info "Setting Up Hardware Acceleration" +# Use Debian 12 native packages instead of setup_hwaccel (Intel Arc latest drivers require Debian 13) +$STD apt-get install -y \ + vainfo \ + intel-media-va-driver-non-free \ + intel-gpu-tools \ + mesa-va-drivers \ + mesa-vulkan-drivers || true +msg_ok "Set Up Hardware Acceleration" msg_info "Configuring GPU Access" if [[ "$CTTYPE" == "0" ]]; then From 79f91841e7ab36fdbc28532c4ea385ad01ed9920 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 30 Jan 2026 22:05:03 +0100 Subject: [PATCH 205/228] chore: update github-versions.json (#1368) --- frontend/public/json/github-versions.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/public/json/github-versions.json b/frontend/public/json/github-versions.json index 3be9decfb..43b04608d 100644 --- a/frontend/public/json/github-versions.json +++ b/frontend/public/json/github-versions.json @@ -1,5 +1,5 @@ { - "generated": "2026-01-30T07:00:55Z", + "generated": "2026-01-30T18:55:55Z", "versions": [ { "slug": "affine", From 7cf7a1d6c965902c588de00e67b8733c77a0c3b2 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 30 Jan 2026 22:49:21 +0000 Subject: [PATCH 206/228] Delete ampache (ct) after migration to ProxmoxVE (#1370) Co-authored-by: github-actions[bot] --- ct/ampache.sh | 78 ------------------------------- ct/headers/ampache | 6 --- frontend/public/json/ampache.json | 48 ------------------- install/ampache-install.sh | 68 --------------------------- 4 files changed, 200 deletions(-) delete mode 100644 ct/ampache.sh delete mode 100644 ct/headers/ampache delete mode 100644 frontend/public/json/ampache.json delete mode 100644 install/ampache-install.sh diff --git a/ct/ampache.sh b/ct/ampache.sh deleted file mode 100644 index 1c4d6415e..000000000 --- a/ct/ampache.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env bash -source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func) -# Copyright (c) 2021-2026 community-scripts ORG -# Author: MickLesk (Canbiz) -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://github.com/ampache/ampache - -APP="Ampache" -var_tags="${var_tags:-music}" -var_disk="${var_disk:-5}" -var_cpu="${var_cpu:-4}" -var_ram="${var_ram:-2048}" -var_os="${var_os:-debian}" -var_version="${var_version:-13}" -var_unprivileged="${var_unprivileged:-1}" - -header_info "$APP" -variables -color -catch_errors - -function update_script() { - header_info - check_container_storage - check_container_resources - - if [[ ! -d /opt/ampache ]]; then - msg_error "No ${APP} Installation Found!" - exit - fi - if check_for_gh_release "Ampache" "ampache/ampache"; then - - msg_info "Stopping Apache" - systemctl stop apache2 - msg_ok "Stopped Apache" - - msg_info "Backing up Configuration" - cp /opt/ampache/config/ampache.cfg.php /tmp/ampache.cfg.php.backup - cp /opt/ampache/public/rest/.htaccess /tmp/ampache_rest.htaccess.backup - cp /opt/ampache/public/play/.htaccess /tmp/ampache_play.htaccess.backup - msg_ok "Backed up Configuration" - - msg_info "Backup Ampache Folder" - rm -rf /opt/ampache_backup - mv /opt/ampache /opt/ampache_backup - msg_ok "Backed up Ampache" - - fetch_and_deploy_gh_release "Ampache" "ampache/ampache" "release" "latest" "/opt/ampache" "ampache-*_all_php8.4.zip" - - msg_info "Restoring Configuration" - cp /tmp/ampache.cfg.php.backup /opt/ampache/config/ampache.cfg.php - cp /tmp/ampache_rest.htaccess.backup /opt/ampache/public/rest/.htaccess - cp /tmp/ampache_play.htaccess.backup /opt/ampache/public/play/.htaccess - chmod 664 /opt/ampache/public/rest/.htaccess /opt/ampache/public/play/.htaccess - chown -R www-data:www-data /opt/ampache - msg_ok "Restored Configuration" - - msg_info "Cleaning up" - rm -f /tmp/ampache*.backup - msg_ok "Cleaned up" - - msg_info "Starting Apache" - systemctl start apache2 - msg_ok "Started Apache" - msg_ok "Updated successfully!" - msg_custom "⚠️" "${YW}" "Complete database update by visiting: http://${LOCAL_IP}/update.php" - fi - exit -} - -start -build_container -description - -msg_ok "Completed successfully!\n" -echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" -echo -e "${INFO}${YW} Access it using the following URL:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}http://${IP}/install.php${CL}" diff --git a/ct/headers/ampache b/ct/headers/ampache deleted file mode 100644 index 68d2c033d..000000000 --- a/ct/headers/ampache +++ /dev/null @@ -1,6 +0,0 @@ - ___ __ - / | ____ ___ ____ ____ ______/ /_ ___ - / /| | / __ `__ \/ __ \/ __ `/ ___/ __ \/ _ \ - / ___ |/ / / / / / /_/ / /_/ / /__/ / / / __/ -/_/ |_/_/ /_/ /_/ .___/\__,_/\___/_/ /_/\___/ - /_/ diff --git a/frontend/public/json/ampache.json b/frontend/public/json/ampache.json deleted file mode 100644 index d16099011..000000000 --- a/frontend/public/json/ampache.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "Ampache", - "slug": "ampache", - "categories": [ - 13 - ], - "date_created": "2026-01-13", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://github.com/ampache/ampache/wiki", - "website": "https://ampache.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/ampache.webp", - "config_path": "/opt/ampache/config/ampache.cfg.php", - "description": "Ampache is a web-based audio streaming application and file manager that allows you to access your music & videos from anywhere. It features a powerful music catalog, multiple user support, transcoding, streaming, and more.", - "install_methods": [ - { - "type": "default", - "script": "ct/ampache.sh", - "resources": { - "cpu": 4, - "ram": 2048, - "hdd": 5, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Complete the web-based setup at http://IP/install.php", - "type": "info" - }, - { - "text": "Database credentials are stored in `~/ampache.creds` - use only the MySQL username and password from this file", - "type": "info" - }, - { - "text": "During installation, only check 'Create Tables' - leave 'Create Database' and 'Create Database User' unchecked", - "type": "info" - } - ] -} diff --git a/install/ampache-install.sh b/install/ampache-install.sh deleted file mode 100644 index 811548ed5..000000000 --- a/install/ampache-install.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env bash - -# Copyright (c) 2021-2026 community-scripts ORG -# Author: MickLesk (Canbiz) -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://github.com/ampache/ampache - -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -color -verb_ip6 -catch_errors -setting_up_container -network_check -update_os - -msg_info "Installing Dependencies" -$STD apt install -y \ - flac \ - vorbis-tools \ - lame \ - ffmpeg \ - inotify-tools \ - libavcodec-extra \ - libmp3lame-dev \ - libtheora-dev \ - libvorbis-dev \ - libvpx-dev -msg_ok "Installed Dependencies" - -PHP_VERSION=8.4 PHP_MODULE=bcmath,bz2,curl,gd,imagick,intl,mbstring,mysql,sqlite3,xml,xmlrpc,zip PHP_APACHE=YES setup_php -setup_mariadb -MARIADB_DB_USER=ampache MARIADB_DB_NAME=ampache setup_mariadb_db - -fetch_and_deploy_gh_release "ampache" "ampache/ampache" "prebuild" "latest" "/opt/ampache" "ampache-*_all_php8.4.zip" - -msg_info "Setup Ampache" -rm -rf /var/www/html -ln -s /opt/ampache/public /var/www/html -mv /opt/ampache/public/rest/.htaccess.dist /opt/ampache/public/rest/.htaccess -mv /opt/ampache/public/play/.htaccess.dist /opt/ampache/public/play/.htaccess -cp /opt/ampache/config/ampache.cfg.php.dist /opt/ampache/config/ampache.cfg.php -chmod 664 /opt/ampache/public/rest/.htaccess /opt/ampache/public/play/.htaccess -msg_ok "Set up Ampache" - -msg_info "Configuring Database Connection" -sed -i 's|^database_hostname = .*|database_hostname = "localhost"|' /opt/ampache/config/ampache.cfg.php -sed -i 's|^database_name = .*|database_name = "ampache"|' /opt/ampache/config/ampache.cfg.php -sed -i 's|^database_username = .*|database_username = "ampache"|' /opt/ampache/config/ampache.cfg.php -sed -i "s|^database_password = .*|database_password = \"${MARIADB_DB_PASS}\"|" /opt/ampache/config/ampache.cfg.php -chown -R www-data:www-data /opt/ampache -msg_ok "Configured Database Connection" - -msg_info "Importing Database Schema" -mariadb -u ampache -p"${MARIADB_DB_PASS}" ampache Date: Fri, 30 Jan 2026 23:11:06 +0000 Subject: [PATCH 207/228] Delete languagetool (ct) after migration to ProxmoxVE (#1372) Co-authored-by: github-actions[bot] --- ct/languagetool.sh | 67 --------------- frontend/public/json/languagetool.json | 44 ---------- install/languagetool-install.sh | 109 ------------------------- 3 files changed, 220 deletions(-) delete mode 100644 ct/languagetool.sh delete mode 100644 frontend/public/json/languagetool.json delete mode 100644 install/languagetool-install.sh diff --git a/ct/languagetool.sh b/ct/languagetool.sh deleted file mode 100644 index 6f63178a1..000000000 --- a/ct/languagetool.sh +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env bash -source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func) -# Copyright (c) 2021-2026 community-scripts ORG -# Author: Slaviša Arežina (tremor021) -# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE -# Source: https://languagetool.org/ - -APP="LanguageTool" -var_tags="${var_tags:-spellcheck}" -var_cpu="${var_cpu:-2}" -var_ram="${var_ram:-4096}" -var_disk="${var_disk:-8}" -var_os="${var_os:-debian}" -var_version="${var_version:-13}" -var_unprivileged="${var_unprivileged:-1}" - -header_info "$APP" -variables -color -catch_errors - -function update_script() { - header_info - check_container_storage - check_container_resources - if [[ ! -d /opt/LanguageTool ]]; then - msg_error "No ${APP} Installation Found!" - exit - fi - - RELEASE=$(curl -fsSL https://languagetool.org/download/ | grep -oP 'LanguageTool-\K[0-9]+\.[0-9]+(\.[0-9]+)?(?=\.zip)' | sort -V | tail -n1) - if [[ "${RELEASE}" != "$(cat ~/.languagetool 2>/dev/null)" ]] || [[ ! -f ~/.languagetool ]]; then - msg_info "Stopping LanguageTool" - systemctl stop language-tool - msg_ok "Stopped LanguageTool" - - msg_info "Creating Backup" - cp /opt/LanguageTool/server.properties /opt/server.properties - msg_ok "Backup Created" - - msg_info "Updating LanguageTool" - rm -rf /opt/LanguageTool - download_file "https://languagetool.org/download/LanguageTool-stable.zip" /tmp/LanguageTool-stable.zip - unzip -q /tmp/LanguageTool-stable.zip -d /opt - mv /opt/LanguageTool-*/ /opt/LanguageTool/ - mv /opt/server.properties /opt/LanguageTool/server.properties - echo "${RELEASE}" >~/.languagetool - msg_ok "Updated LanguageTool" - - msg_info "Starting LanguageTool" - systemctl start language-tool - msg_ok "Started LanguageTool" - msg_ok "Updated successfuly!" - else - msg_ok "No update required. ${APP} is already at v${RELEASE}" - fi - exit -} - -start -build_container -description - -msg_ok "Completed successfully!" -echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" -echo -e "${INFO}${YW} Access it using the following URL:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8081/v2${CL}" diff --git a/frontend/public/json/languagetool.json b/frontend/public/json/languagetool.json deleted file mode 100644 index 03bb66f64..000000000 --- a/frontend/public/json/languagetool.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "LanguageTool", - "slug": "languagetool", - "categories": [ - 0 - ], - "date_created": "2025-12-10", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8081, - "documentation": "https://dev.languagetool.org/", - "config_path": "/opt/LanguageTool/server.properties", - "website": "https://languagetool.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/languagetool.webp", - "description": "LanguageTool is an Open Source proofreading software for English, Spanish, French, German, Portuguese, Polish, Dutch, and more than 20 other languages. It finds many errors that a simple spell checker cannot detect.", - "install_methods": [ - { - "type": "default", - "script": "ct/languagetool.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 8, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "API is available at `http://:8081/v2`.", - "type": "info" - }, - { - "text": "Application doesn't come with n-gram data. If you wish to use that feature, please look at `https://dev.languagetool.org/finding-errors-using-n-gram-data.html`.", - "type": "info" - } - ] -} diff --git a/install/languagetool-install.sh b/install/languagetool-install.sh deleted file mode 100644 index d4b486b6e..000000000 --- a/install/languagetool-install.sh +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/env bash - -# Copyright (c) 2021-2026 community-scripts ORG -# Author: Slaviša Arežina (tremor021) -# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE -# Source: https://languagetool.org/ - -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -color -verb_ip6 -catch_errors -setting_up_container -network_check -update_os - -msg_info "Installing dependencies" -$STD apt install -y fasttext -msg_ok "Installed dependencies" - -JAVA_VERSION="21" setup_java - -msg_info "Setting up LanguageTool" -RELEASE=$(curl -fsSL https://languagetool.org/download/ | grep -oP 'LanguageTool-\K[0-9]+\.[0-9]+(\.[0-9]+)?(?=\.zip)' | sort -V | tail -n1) -download_file "https://languagetool.org/download/LanguageTool-stable.zip" /tmp/LanguageTool-stable.zip -unzip -q /tmp/LanguageTool-stable.zip -d /opt -mv /opt/LanguageTool-*/ /opt/LanguageTool/ -download_file "https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.bin" /opt/lid.176.bin -msg_ok "Setup LanguageTool" - -ngram_dir="" -lang_code="" -max_attempts=3 -attempt=0 - -while [[ $attempt -lt $max_attempts ]]; do - read -r -p "${TAB3}Enter language code (en, de, es, fr, nl) to download ngrams or press ENTER to skip: " lang_code - - if [[ -z "$lang_code" ]]; then - break - fi - - if [[ "$lang_code" =~ [[:space:]] ]]; then - ((attempt++)) - remaining=$((max_attempts - attempt)) - if [[ $remaining -gt 0 ]]; then - msg_error "Please enter only ONE language code. You have $remaining attempt(s) remaining." - else - msg_error "Maximum attempts reached. Continuing without ngrams." - lang_code="" - fi - continue - fi - break -done - -if [[ -n "$lang_code" ]]; then - if [[ "$lang_code" =~ ^(en|de|es|fr|nl)$ ]]; then - msg_info "Searching for $lang_code ngrams..." - filename=$(curl -fsSL https://languagetool.org/download/ngram-data/ | grep -oP "ngrams-${lang_code}-[0-9]+\.zip" | sort -uV | tail -n1) - - if [[ -n "$filename" ]]; then - msg_info "Downloading $filename" - download_file "https://languagetool.org/download/ngram-data/${filename}" "/tmp/${filename}" - - mkdir -p /opt/ngrams - msg_info "Extracting $lang_code ngrams to /opt/ngrams" - unzip -q "/tmp/${filename}" -d /opt/ngrams - rm "/tmp/${filename}" - - ngram_dir="/opt/ngrams" - msg_ok "Installed $lang_code ngrams" - else - msg_info "No ngram file found for ${lang_code}" - fi - else - msg_error "Invalid language code: $lang_code" - fi -fi - -cat </opt/LanguageTool/server.properties -fasttextModel=/opt/lid.176.bin -fasttextBinary=/usr/bin/fasttext -EOF -if [[ -n "$ngram_dir" ]]; then - echo "languageModel=/opt/ngrams" >> /opt/LanguageTool/server.properties -fi -echo "${RELEASE}" >~/.languagetool -msg_ok "Setup LanguageTool" - -msg_info "Creating Service" -cat <<'EOF' >/etc/systemd/system/language-tool.service -[Unit] -Description=LanguageTool Service -After=network.target - -[Service] -WorkingDirectory=/opt/LanguageTool -ExecStart=java -cp languagetool-server.jar org.languagetool.server.HTTPServer --config server.properties --public --allow-origin "*" -Restart=always - -[Install] -WantedBy=multi-user.target -EOF -systemctl enable -q --now language-tool -msg_ok "Created Service" - -motd_ssh -customize -cleanup_lxc From 46509887015b14332b4bdc2d1964a9f5167e0719 Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 30 Jan 2026 21:12:09 -0500 Subject: [PATCH 208/228] Apply suggestion from @vhsdream --- install/sonobarr-install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install/sonobarr-install.sh b/install/sonobarr-install.sh index 7decf7c94..ee0ab0b36 100644 --- a/install/sonobarr-install.sh +++ b/install/sonobarr-install.sh @@ -22,7 +22,7 @@ $STD uv pip install --no-cache-dir -r /opt/sonobarr/requirements.txt mkdir -p /etc/sonobarr mv /opt/sonobarr/.sample-env /etc/sonobarr/.env sed -i "s/^secret_key=.*/secret_key=$(openssl rand -hex 16)/" /etc/sonobarr/.env -sed -i "s/^sonobarr_superadmin_password=.*/secret_key=$(openssl rand -hex 16)/" /etc/sonobarr/.env +sed -i "s/^sonobarr_superadmin_password=.*/sonobarr_superadmin_password=$(openssl rand -hex 16)/" /etc/sonobarr/.env echo "release_version=$(cat ~/.sonobarr)" >>/etc/sonobarr/.env echo "sonobarr_config_dir=/etc/sonobarr" >>/etc/sonobarr.env msg_ok "Set up sonobarr" From ef9fa9b36aaa726cc3875d4ca8446863d3ecbc7d Mon Sep 17 00:00:00 2001 From: Tobias <96661824+CrazyWolf13@users.noreply.github.com> Date: Sat, 31 Jan 2026 11:33:13 +0100 Subject: [PATCH 209/228] Update sonobarr.sh --- ct/sonobarr.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ct/sonobarr.sh b/ct/sonobarr.sh index 0ed453596..57456fdc1 100644 --- a/ct/sonobarr.sh +++ b/ct/sonobarr.sh @@ -32,9 +32,9 @@ function update_script() { PYTHON_VERSION="3.12" setup_uv if check_for_gh_release "sonobarr" "Dodelidoo-Labs/sonobarr"; then - msg_info "Stopping sonobarr" + msg_info "Stopping Service" systemctl stop sonobarr - msg_ok "Stopped sonobarr" + msg_ok "Stopped Service" CLEAN_INSTALL=1 fetch_and_deploy_gh_release "sonobarr" "Dodelidoo-Labs/sonobarr" "tarball" @@ -45,9 +45,9 @@ function update_script() { sed -i "/release_version/s/=.*/=$(cat ~/.sonobarr)/" /etc/sonobarr/.env msg_ok "Updated sonobarr" - msg_info "Starting sonobarr" + msg_info "Starting Service" systemctl start sonobarr - msg_ok "Started sonobarr" + msg_ok "Started Service" msg_ok "Updated successfully!" fi exit From 39d5aa323afd75c8842359b24146bfe4d42eb0a5 Mon Sep 17 00:00:00 2001 From: Tobias <96661824+CrazyWolf13@users.noreply.github.com> Date: Sat, 31 Jan 2026 11:33:45 +0100 Subject: [PATCH 210/228] Update sonobarr.sh --- ct/sonobarr.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ct/sonobarr.sh b/ct/sonobarr.sh index 57456fdc1..b8279a24d 100644 --- a/ct/sonobarr.sh +++ b/ct/sonobarr.sh @@ -8,7 +8,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV APP="sonobarr" var_tags="${var_tags:-storage}" var_cpu="${var_cpu:-1}" -var_ram="${var_ram:-512}" +var_ram="${var_ram:-1024}" var_disk="${var_disk:-20}" var_os="${var_os:-debian}" var_version="${var_version:-13}" From 12619abdff42e6c371b8611b3ddb3e84e2ccfc40 Mon Sep 17 00:00:00 2001 From: Joerg Heinemann Date: Sat, 31 Jan 2026 12:57:08 +0100 Subject: [PATCH 211/228] Modify ebusd installation script to enable service Change systemctl command to enable ebusd without starting it. --- install/ebusd-install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install/ebusd-install.sh b/install/ebusd-install.sh index f7e58010c..ab74a8312 100644 --- a/install/ebusd-install.sh +++ b/install/ebusd-install.sh @@ -22,7 +22,7 @@ setup_deb822_repo \ msg_info "Installing ebusd" $STD apt install -y ebusd -systemctl enable -q --now ebusd +systemctl enable -q ebusd msg_ok "Installed ebusd" motd_ssh From f552f7671a2d70f04f61e471cdb9da86d4383edc Mon Sep 17 00:00:00 2001 From: "GitHub Actions[bot]" Date: Sat, 31 Jan 2026 13:04:18 +0000 Subject: [PATCH 212/228] chore: update github-versions.json Total versions: 20 Pinned versions: 1 Generated: 2026-01-31T13:04:17Z --- frontend/public/json/github-versions.json | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/frontend/public/json/github-versions.json b/frontend/public/json/github-versions.json index 43b04608d..23daaeaab 100644 --- a/frontend/public/json/github-versions.json +++ b/frontend/public/json/github-versions.json @@ -1,5 +1,5 @@ { - "generated": "2026-01-30T18:55:55Z", + "generated": "2026-01-31T13:04:17Z", "versions": [ { "slug": "affine", @@ -8,13 +8,6 @@ "pinned": false, "date": "2025-12-09T04:34:14Z" }, - { - "slug": "ampache", - "repo": "ampache/ampache", - "version": "7.8.0", - "pinned": false, - "date": "2025-12-22T04:23:45Z" - }, { "slug": "anytype", "repo": "grishy/any-sync-bundle", @@ -74,9 +67,9 @@ { "slug": "nextexplorer", "repo": "vikramsoni2/nextExplorer", - "version": "v2.1.1", + "version": "v2.1.2a", "pinned": false, - "date": "2026-01-24T21:22:09Z" + "date": "2026-01-31T00:09:18Z" }, { "slug": "nightscout", @@ -134,6 +127,13 @@ "pinned": false, "date": "2025-11-29T02:43:00Z" }, + { + "slug": "sonobarr", + "repo": "Dodelidoo-Labs/sonobarr", + "version": "0.11.0", + "pinned": false, + "date": "2026-01-21T19:07:21Z" + }, { "slug": "wishlist", "repo": "cmintey/wishlist", From d0c31baaad9c5c647ee7eb933fbe2756bf1821d3 Mon Sep 17 00:00:00 2001 From: FutureCow Date: Sat, 31 Jan 2026 15:33:30 +0100 Subject: [PATCH 213/228] Update Papra installation script configuration Update typo in the INGESTION_FOLDER_ROOT_PATH env. Also add SERVER_BASE_URL for easier deplyment. --- install/papra-install.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/install/papra-install.sh b/install/papra-install.sh index 1f1be0a74..530dbc408 100644 --- a/install/papra-install.sh +++ b/install/papra-install.sh @@ -48,8 +48,10 @@ AUTH_SECRET=$(cat /opt/papra_data/.secret) BETTER_AUTH_SECRET=$(cat /opt/papra_data/.secret) BETTER_AUTH_TELEMETRY=0 CLIENT_BASE_URL=http://${LOCAL_IP}:1221 +SERVER_BASE_URL=http://${LOCAL_IP}:1221 EMAILS_DRY_RUN=true -INGESTION_FOLDER_ROOT=/opt/papra_data/ingestion +INGESTION_FOLDER_IS_ENABLED=true +INGESTION_FOLDER_ROOT_PATH=/opt/papra_data/ingestion EOF msg_ok "Configured Papra" From 59bc2bf070594576db32ad8ae7b55f88a94fb1f4 Mon Sep 17 00:00:00 2001 From: "GitHub Actions[bot]" Date: Sat, 31 Jan 2026 18:42:50 +0000 Subject: [PATCH 214/228] chore: update github-versions.json Total versions: 20 Pinned versions: 1 Generated: 2026-01-31T18:42:49Z --- frontend/public/json/github-versions.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/public/json/github-versions.json b/frontend/public/json/github-versions.json index 23daaeaab..a2c97554c 100644 --- a/frontend/public/json/github-versions.json +++ b/frontend/public/json/github-versions.json @@ -1,5 +1,5 @@ { - "generated": "2026-01-31T13:04:17Z", + "generated": "2026-01-31T18:42:49Z", "versions": [ { "slug": "affine", From 1e3c7caea8a6c5030b8ed573a29d8f567a2450a7 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sat, 31 Jan 2026 23:16:29 +0000 Subject: [PATCH 215/228] Delete shelfmark (ct) after migration to ProxmoxVE (#1382) Co-authored-by: github-actions[bot] --- ct/shelfmark.sh | 77 ---------- frontend/public/json/shelfmark.json | 40 ----- install/shelfmark-install.sh | 217 ---------------------------- 3 files changed, 334 deletions(-) delete mode 100644 ct/shelfmark.sh delete mode 100644 frontend/public/json/shelfmark.json delete mode 100644 install/shelfmark-install.sh diff --git a/ct/shelfmark.sh b/ct/shelfmark.sh deleted file mode 100644 index 1e2106248..000000000 --- a/ct/shelfmark.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env bash -source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) -# Copyright (c) 2021-2026 community-scripts ORG -# Author: vhsdream -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://github.com/calibrain/shelfmark - -APP="shelfmark" -var_tags="${var_tags:-ebooks}" -var_cpu="${var_cpu:-2}" -var_ram="${var_ram:-2048}" -var_disk="${var_disk:-8}" -var_os="${var_os:-debian}" -var_version="${var_version:-13}" -var_unprivileged="${var_unprivileged:-1}" - -header_info "$APP" -variables -color -catch_errors - -function update_script() { - header_info - check_container_storage - check_container_resources - - if [[ ! -d /opt/shelfmark ]]; then - msg_error "No ${APP} Installation Found!" - exit - fi - - NODE_VERSION="22" setup_nodejs - PYTHON_VERSION="3.12" setup_uv - - if check_for_gh_release "shelfmark" "calibrain/shelfmark"; then - msg_info "Stopping Service" - systemctl stop shelfmark - [[ -f /etc/systemd/system/chromium.service ]] && systemctl stop chromium - msg_ok "Stopped Service" - - cp /opt/shelfmark/start.sh /opt/start.sh.bak - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "shelfmark" "calibrain/shelfmark" "tarball" "latest" "/opt/shelfmark" - RELEASE_VERSION=$(cat "$HOME/.shelfmark") - - msg_info "Updating Shelfmark" - sed -i "s/^RELEASE_VERSION=.*/RELEASE_VERSION=$RELEASE_VERSION/" /etc/shelfmark/.env - cd /opt/shelfmark/src/frontend - $STD npm ci - $STD npm run build - mv /opt/shelfmark/src/frontend/dist /opt/shelfmark/frontend-dist - cd /opt/shelfmark - $STD uv venv -c ./venv - $STD source ./venv/bin/activate - $STD uv pip install -r ./requirements-base.txt - if [[ $(sed -n '/_BYPASS=/s/[^=]*=//p' /etc/shelfmark/.env) == "true" ]] && [[ $(sed -n '/BYPASSER=/s/[^=]*=//p' /etc/shelfmark/.env) == "false" ]]; then - $STD uv pip install -r ./requirements-shelfmark.txt - fi - mv /opt/start.sh.bak /opt/shelfmark/start.sh - msg_ok "Updated Shelfmark" - - msg_info "Starting Service" - systemctl start shelfmark - [[ -f /etc/systemd/system/chromium.service ]] && systemctl start chromium - msg_ok "Started Service" - msg_ok "Updated successfully!" - fi - exit -} - -start -build_container -description - -msg_ok "Completed successfully!\n" -echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" -echo -e "${INFO}${YW} Access it using the following URL:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8084${CL}" diff --git a/frontend/public/json/shelfmark.json b/frontend/public/json/shelfmark.json deleted file mode 100644 index 262dcd6f7..000000000 --- a/frontend/public/json/shelfmark.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Shelfmark", - "slug": "shelfmark", - "categories": [ - 13 - ], - "date_created": "2026-01-24", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8084, - "documentation": "https://github.com/calibrain/shelfmark/tree/main/docs", - "website": "https://github.com/calibrain/shelfmark", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/shelfmark.webp", - "config_path": "/etc/shelfmark", - "description": "Shelfmark is a unified web interface for searching and aggregating books and audiobook downloads from multiple sources - all in one place.", - "install_methods": [ - { - "type": "default", - "script": "ct/shelfmark.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 6, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "The configuration at `/etc/shelfmark/.env` is for bootstrapping the initial install. Customize the configuration via the Shelfmark UI.", - "type": "info" - } - ] -} diff --git a/install/shelfmark-install.sh b/install/shelfmark-install.sh deleted file mode 100644 index 9be897055..000000000 --- a/install/shelfmark-install.sh +++ /dev/null @@ -1,217 +0,0 @@ -#!/usr/bin/env bash - -# Copyright (c) 2021-2026 community-scripts ORG -# Author: vhsdream -# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE -# Source: https://github.com/calibrain/shelfmark - -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -color -verb_ip6 -catch_errors -setting_up_container -network_check -update_os - -msg_info "Installing Dependencies" -$STD apt install -y \ - unrar-free -ln -sf /usr/bin/unrar-free /usr/bin/unrar -msg_ok "Installed Dependencies" - -mkdir -p /etc/shelfmark -cat </etc/shelfmark/.env -DOCKERMODE=false -CONFIG_DIR=/etc/shelfmark -TMP_DIR=/tmp/shelfmark -ENABLE_LOGGING=true -FLASK_HOST=0.0.0.0 -FLASK_PORT=8084 -# SESSION_COOKIES_SECURE=true -# CWA_DB_PATH= -USE_CF_BYPASS=true -USING_EXTERNAL_BYPASSER=false -# EXT_BYPASSER_URL= -# EXT_BYPASSER_PATH=/v1 -EOF - -echo "" -echo "" -echo -e "${BL}Shelfmark Deployment Type${CL}" -echo "─────────────────────────────────────────" -echo "Please choose your deployment type:" -echo "" -echo " 1) Use Shelfmark's internal captcha bypasser (default)" -echo " 2) Install FlareSolverr in this LXC" -echo " 3) Use an existing Flaresolverr/Byparr LXC" -echo " 4) Disable captcha bypassing altogether (not recommended)" -echo "" - -read -r -p "${TAB3}Select deployment type [1]: " DEPLOYMENT_TYPE -DEPLOYMENT_TYPE="${DEPLOYMENT_TYPE:-1}" - -case "$DEPLOYMENT_TYPE" in -1) - msg_ok "Using Shelfmark's internal captcha bypasser" - ;; -2) - msg_ok "Proceeding with FlareSolverr installation" - ;; -3) - echo "" - echo -e "${BL}Use an existing FlareSolverr/Byparr LXC${CL}" - echo "─────────────────────────────────────────" - echo "Enter the URL/IP address with port of your Flaresolverr/Byparr instance" - echo "Example: http://flaresoverr.homelab.lan:8191 or" - echo "http://192.168.10.99:8191" - echo "" - read -r -p "FlareSolverr/Byparr URL: " BYPASSER_URL - - if [[ -z "$BYPASSER_URL" ]]; then - msg_warn "No Flaresolverr/Byparr URL provided. Falling back to Shelfmark's internal bypasser." - else - BYPASSER_URL="${BYPASSER_URL%/}" - msg_ok "FlareSolverr/Byparr URL: ${BYPASSER_URL}" - fi - ;; -4) - msg_warn "Disabling captcha bypass. This may cause the majority of searches and downloads to fail." - ;; -*) - msg_warn "Invalid selection. Reverting to default (internal bypasser)!" - ;; -esac - -if [[ "$DEPLOYMENT_TYPE" == "2" ]]; then - fetch_and_deploy_gh_release "flaresolverr" "FlareSolverr/FlareSolverr" "prebuild" "latest" "/opt/flaresolverr" "flaresolverr_linux_x64.tar.gz" - msg_info "Installing FlareSolverr (please wait)" - $STD apt install -y xvfb - setup_deb822_repo \ - "google-chrome" \ - "https://dl.google.com/linux/linux_signing_key.pub" \ - "https://dl.google.com/linux/chrome/deb/" \ - "stable" - $STD apt update - $STD apt install -y google-chrome-stable - # remove google-chrome.list added by google-chrome-stable - rm /etc/apt/sources.list.d/google-chrome.list - sed -i -e '/BYPASSER=/s/false/true/' \ - -e 's/^# EXT_/EXT_/' \ - -e "s|_URL=.*|_URL=http://localhost:8191|" /etc/shelfmark/.env - msg_ok "Installed FlareSolverr" -elif [[ "$DEPLOYMENT_TYPE" == "3" ]]; then - sed -i -e '/BYPASSER=/s/false/true/' \ - -e 's/^# EXT_/EXT_/' \ - -e "s|_URL=.*|_URL=${BYPASSER_URL}|" /etc/shelfmark/.env -elif [[ "$DEPLOYMENT_TYPE" == "4" ]]; then - sed -i '/_BYPASS=/s/true/false/' /etc/shelfmark/.env -else - DEPLOYMENT_TYPE="1" - msg_info "Installing internal bypasser dependencies" - $STD apt install -y --no-install-recommends \ - xvfb \ - ffmpeg \ - chromium-common=143.0.7499.169-1~deb13u1 \ - chromium=143.0.7499.169-1~deb13u1 \ - chromium-driver=143.0.7499.169-1~deb13u1 \ - python3-tk - msg_ok "Installed internal bypasser dependencies" -fi - -NODE_VERSION="22" setup_nodejs -PYTHON_VERSION="3.12" setup_uv - -fetch_and_deploy_gh_release "shelfmark" "calibrain/shelfmark" "tarball" "latest" "/opt/shelfmark" -RELEASE_VERSION=$(cat "$HOME/.shelfmark") - -msg_info "Building Shelfmark frontend" -cd /opt/shelfmark/src/frontend -echo "RELEASE_VERSION=${RELEASE_VERSION}" >>/etc/shelfmark/.env -$STD npm ci -$STD npm run build -mv /opt/shelfmark/src/frontend/dist /opt/shelfmark/frontend-dist -msg_ok "Built Shelfmark frontend" - -msg_info "Configuring Shelfmark" -cd /opt/shelfmark -$STD uv venv ./venv -$STD source ./venv/bin/activate -$STD uv pip install -r ./requirements-base.txt -[[ "$DEPLOYMENT_TYPE" == "1" ]] && $STD uv pip install -r ./requirements-shelfmark.txt -mkdir -p {/var/log/shelfmark,/tmp/shelfmark} -msg_ok "Configured Shelfmark" - -msg_info "Creating Services and start script" -cat </etc/systemd/system/shelfmark.service -[Unit] -Description=Shelfmark server -After=network.target - -[Service] -Type=simple -WorkingDirectory=/opt/shelfmark -EnvironmentFile=/etc/shelfmark/.env -ExecStart=/usr/bin/bash /opt/shelfmark/start.sh -Restart=always -RestartSec=10 -KillMode=mixed - -[Install] -WantedBy=multi-user.target -EOF - -if [[ "$DEPLOYMENT_TYPE" == "1" ]]; then - cat </etc/systemd/system/chromium.service -[Unit] -Description=karakeep Headless Browser -After=network.target - -[Service] -User=root -ExecStart=/usr/bin/chromium --headless --no-sandbox --disable-gpu --disable-dev-shm-usage --remote-debugging-address=127.0.0.1 --remote-debugging-port=9222 --hide-scrollbars -Restart=always - -[Install] -WantedBy=multi-user.target -EOF - systemctl enable -q --now chromium -fi -if [[ "$DEPLOYMENT_TYPE" == "2" ]]; then - cat </etc/systemd/system/flaresolverr.service -[Unit] -Description=FlareSolverr -After=network.target -[Service] -SyslogIdentifier=flaresolverr -Restart=always -RestartSec=5 -Type=simple -Environment="LOG_LEVEL=info" -Environment="CAPTCHA_SOLVER=none" -WorkingDirectory=/opt/flaresolverr -ExecStart=/opt/flaresolverr/flaresolverr -TimeoutStopSec=30 -[Install] -WantedBy=multi-user.target -EOF - systemctl enable -q --now flaresolverr -fi - -cat </opt/shelfmark/start.sh -#!/usr/bin/env bash - -source /opt/shelfmark/venv/bin/activate -set -a -source /etc/shelfmark/.env -set +a - -gunicorn --worker-class geventwebsocket.gunicorn.workers.GeventWebSocketWorker --workers 1 -t 300 -b 0.0.0.0:8084 shelfmark.main:app -EOF -chmod +x /opt/shelfmark/start.sh - -systemctl enable -q --now shelfmark -msg_ok "Created Services and start script" - -motd_ssh -customize -cleanup_lxc From 355b5637d40527a4cd519810d04c4a482079213e Mon Sep 17 00:00:00 2001 From: Joerg Heinemann Date: Sun, 1 Feb 2026 10:47:59 +0100 Subject: [PATCH 216/228] Fix upgrade command syntax for ebusd --- ct/ebusd.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ct/ebusd.sh b/ct/ebusd.sh index 710c81c06..5aa0f9076 100644 --- a/ct/ebusd.sh +++ b/ct/ebusd.sh @@ -30,7 +30,7 @@ function update_script() { msg_info "Updating ebusd" $STD apt update - $STD apt --upgrade -y ebusd + $STD apt upgrade -y ebusd msg_ok "Updated ebusd" msg_ok "Updated successfully!" exit From aef951129be176a590c15dfdda7f61153362cbb9 Mon Sep 17 00:00:00 2001 From: Tobias <96661824+CrazyWolf13@users.noreply.github.com> Date: Sun, 1 Feb 2026 14:41:46 +0100 Subject: [PATCH 217/228] Update forgejo-runner.sh --- ct/forgejo-runner.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/ct/forgejo-runner.sh b/ct/forgejo-runner.sh index c11dd19a0..045ea256f 100644 --- a/ct/forgejo-runner.sh +++ b/ct/forgejo-runner.sh @@ -1,6 +1,5 @@ #!/usr/bin/env bash source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) - # Copyright (c) 2021-2026 community-scripts ORG # Author: Simon Friedrich # License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE From 035527ea21f9766c17c245c3499f95031d8da0f8 Mon Sep 17 00:00:00 2001 From: Tobias <96661824+CrazyWolf13@users.noreply.github.com> Date: Sun, 1 Feb 2026 14:42:16 +0100 Subject: [PATCH 218/228] Update freepbx.sh --- ct/freepbx.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ct/freepbx.sh b/ct/freepbx.sh index e16a1c47a..5b141e78e 100644 --- a/ct/freepbx.sh +++ b/ct/freepbx.sh @@ -1,8 +1,7 @@ #!/usr/bin/env bash source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/refs/heads/freepbx/misc/build.func) # Copyright (c) 2021-2026 community-scripts ORG -# Author: Arian Nasr (arian-nasr) -# Updated by: Javier Pastor (vsc55) +# Author: Arian Nasr (arian-nasr) | Co-Author: Javier Pastor (vsc55) # License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE # Source: https://www.freepbx.org/ From 2e1ee2d8537dde867f1c4f3000e9677eacbcf034 Mon Sep 17 00:00:00 2001 From: MickLesk Date: Sun, 1 Feb 2026 20:35:40 +0100 Subject: [PATCH 219/228] feat(tools): add fetch_and_deploy_codeberg_release function Add Codeberg equivalent of fetch_and_deploy_gh_release supporting: - tarball/source mode for source code archives - binary mode for .deb packages - prebuild mode for prebuilt archives - singlefile mode for standalone binaries - tag mode for direct tag downloads Uses Codeberg API and archive URL patterns: - API: https://codeberg.org/api/v1/repos/{owner}/{repo}/releases - Archive: https://codeberg.org/{owner}/{repo}/archive/{tag}.tar.gz --- misc/tools.func | 434 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 434 insertions(+) diff --git a/misc/tools.func b/misc/tools.func index 1096b4a01..40acc1abb 100644 --- a/misc/tools.func +++ b/misc/tools.func @@ -2180,6 +2180,440 @@ function fetch_and_deploy_gh_release() { rm -rf "$tmpdir" } +# ------------------------------------------------------------------------------ +# Downloads and deploys latest Codeberg release (source, binary, tarball, asset). +# +# Description: +# - Fetches latest release metadata from Codeberg API +# - Supports the following modes: +# - tarball: Source code tarball (default if omitted) +# - source: Alias for tarball (same behavior) +# - binary: .deb package install (arch-dependent) +# - prebuild: Prebuilt .tar.gz archive (e.g. Go binaries) +# - singlefile: Standalone binary (no archive, direct chmod +x install) +# - tag: Direct tag download (bypasses Release API) +# - Handles download, extraction/installation and version tracking in ~/. +# +# Parameters: +# $1 APP - Application name (used for install path and version file) +# $2 REPO - Codeberg repository in form user/repo +# $3 MODE - Release type: +# tarball → source tarball (.tar.gz) +# binary → .deb file (auto-arch matched) +# prebuild → prebuilt archive (e.g. tar.gz) +# singlefile→ standalone binary (chmod +x) +# tag → direct tag (bypasses Release API) +# $4 VERSION - Optional release tag (default: latest) +# $5 TARGET_DIR - Optional install path (default: /opt/) +# $6 ASSET_FILENAME - Required for: +# - prebuild → archive filename or pattern +# - singlefile→ binary filename or pattern +# +# Examples: +# # 1. Minimal: Fetch and deploy source tarball +# fetch_and_deploy_codeberg_release "autocaliweb" "gelbphoenix/autocaliweb" +# +# # 2. Binary install via .deb asset (architecture auto-detected) +# fetch_and_deploy_codeberg_release "myapp" "myuser/myapp" "binary" +# +# # 3. Prebuilt archive (.tar.gz) with asset filename match +# fetch_and_deploy_codeberg_release "myapp" "myuser/myapp" "prebuild" "latest" "/opt/myapp" "myapp_Linux_x86_64.tar.gz" +# +# # 4. Single binary (chmod +x) +# fetch_and_deploy_codeberg_release "myapp" "myuser/myapp" "singlefile" "v1.0.0" "/opt/myapp" "myapp-linux-amd64" +# +# # 5. Explicit tag version +# fetch_and_deploy_codeberg_release "autocaliweb" "gelbphoenix/autocaliweb" "tag" "v0.11.3" "/opt/autocaliweb" +# ------------------------------------------------------------------------------ + +function fetch_and_deploy_codeberg_release() { + local app="$1" + local repo="$2" + local mode="${3:-tarball}" # tarball | binary | prebuild | singlefile | tag + local version="${4:-latest}" + local target="${5:-/opt/$app}" + local asset_pattern="${6:-}" + + local app_lc=$(echo "${app,,}" | tr -d ' ') + local version_file="$HOME/.${app_lc}" + + local api_timeout="--connect-timeout 10 --max-time 60" + local download_timeout="--connect-timeout 15 --max-time 900" + + local current_version="" + [[ -f "$version_file" ]] && current_version=$(<"$version_file") + + ensure_dependencies jq + + ### Tag Mode (bypass Release API) ### + if [[ "$mode" == "tag" ]]; then + if [[ "$version" == "latest" ]]; then + msg_error "Mode 'tag' requires explicit version (not 'latest')" + return 1 + fi + + local tag_name="$version" + [[ "$tag_name" =~ ^v ]] && version="${tag_name:1}" || version="$tag_name" + + if [[ "$current_version" == "$version" ]]; then + $STD msg_ok "$app is already up-to-date (v$version)" + return 0 + fi + + # DNS check + if ! getent hosts "codeberg.org" &>/dev/null; then + msg_error "DNS resolution failed for codeberg.org – check /etc/resolv.conf or networking" + return 1 + fi + + local tmpdir + tmpdir=$(mktemp -d) || return 1 + + msg_info "Fetching Codeberg tag: $app ($tag_name)" + + local safe_version="${version//@/_}" + safe_version="${safe_version//\//_}" + local filename="${app_lc}-${safe_version}.tar.gz" + local download_success=false + + # Codeberg archive URL format: https://codeberg.org/{owner}/{repo}/archive/{tag}.tar.gz + local archive_url="https://codeberg.org/$repo/archive/${tag_name}.tar.gz" + if curl $download_timeout -fsSL -o "$tmpdir/$filename" "$archive_url"; then + download_success=true + fi + + if [[ "$download_success" != "true" ]]; then + msg_error "Download failed for $app ($tag_name)" + rm -rf "$tmpdir" + return 1 + fi + + mkdir -p "$target" + if [[ "${CLEAN_INSTALL:-0}" == "1" ]]; then + rm -rf "${target:?}/"* + fi + + tar --no-same-owner -xzf "$tmpdir/$filename" -C "$tmpdir" || { + msg_error "Failed to extract tarball" + rm -rf "$tmpdir" + return 1 + } + + local unpack_dir + unpack_dir=$(find "$tmpdir" -mindepth 1 -maxdepth 1 -type d | head -n1) + + shopt -s dotglob nullglob + cp -r "$unpack_dir"/* "$target/" + shopt -u dotglob nullglob + + echo "$version" >"$version_file" + msg_ok "Deployed: $app ($version)" + rm -rf "$tmpdir" + return 0 + fi + + # Codeberg API: https://codeberg.org/api/v1/repos/{owner}/{repo}/releases + local api_url="https://codeberg.org/api/v1/repos/$repo/releases" + if [[ "$version" != "latest" ]]; then + # Get release by tag: /repos/{owner}/{repo}/releases/tags/{tag} + api_url="https://codeberg.org/api/v1/repos/$repo/releases/tags/$version" + fi + + # dns pre check + if ! getent hosts "codeberg.org" &>/dev/null; then + msg_error "DNS resolution failed for codeberg.org – check /etc/resolv.conf or networking" + return 1 + fi + + local max_retries=3 retry_delay=2 attempt=1 success=false resp http_code + + while ((attempt <= max_retries)); do + resp=$(curl $api_timeout -fsSL -w "%{http_code}" -o /tmp/codeberg_rel.json "$api_url") && success=true && break + sleep "$retry_delay" + ((attempt++)) + done + + if ! $success; then + msg_error "Failed to fetch release metadata from $api_url after $max_retries attempts" + return 1 + fi + + http_code="${resp:(-3)}" + [[ "$http_code" != "200" ]] && { + msg_error "Codeberg API returned HTTP $http_code" + return 1 + } + + local json tag_name + json=$(/dev/null || uname -m) + [[ "$arch" == "x86_64" ]] && arch="amd64" + [[ "$arch" == "aarch64" ]] && arch="arm64" + + local assets url_match="" + # Codeberg assets are in .assets[].browser_download_url + assets=$(echo "$json" | jq -r '.assets[].browser_download_url') + + # If explicit filename pattern is provided, match that first + if [[ -n "$asset_pattern" ]]; then + for u in $assets; do + case "${u##*/}" in + $asset_pattern) + url_match="$u" + break + ;; + esac + done + fi + + # Fall back to architecture heuristic + if [[ -z "$url_match" ]]; then + for u in $assets; do + if [[ "$u" =~ ($arch|amd64|x86_64|aarch64|arm64).*\.deb$ ]]; then + url_match="$u" + break + fi + done + fi + + # Fallback: any .deb file + if [[ -z "$url_match" ]]; then + for u in $assets; do + [[ "$u" =~ \.deb$ ]] && url_match="$u" && break + done + fi + + if [[ -z "$url_match" ]]; then + msg_error "No suitable .deb asset found for $app" + rm -rf "$tmpdir" + return 1 + fi + + filename="${url_match##*/}" + curl $download_timeout -fsSL -o "$tmpdir/$filename" "$url_match" || { + msg_error "Download failed: $url_match" + rm -rf "$tmpdir" + return 1 + } + + chmod 644 "$tmpdir/$filename" + $STD apt install -y "$tmpdir/$filename" || { + $STD dpkg -i "$tmpdir/$filename" || { + msg_error "Both apt and dpkg installation failed" + rm -rf "$tmpdir" + return 1 + } + } + + ### Prebuild Mode ### + elif [[ "$mode" == "prebuild" ]]; then + local pattern="${6%\"}" + pattern="${pattern#\"}" + [[ -z "$pattern" ]] && { + msg_error "Mode 'prebuild' requires 6th parameter (asset filename pattern)" + rm -rf "$tmpdir" + return 1 + } + + local asset_url="" + for u in $(echo "$json" | jq -r '.assets[].browser_download_url'); do + filename_candidate="${u##*/}" + case "$filename_candidate" in + $pattern) + asset_url="$u" + break + ;; + esac + done + + [[ -z "$asset_url" ]] && { + msg_error "No asset matching '$pattern' found" + rm -rf "$tmpdir" + return 1 + } + + filename="${asset_url##*/}" + curl $download_timeout -fsSL -o "$tmpdir/$filename" "$asset_url" || { + msg_error "Download failed: $asset_url" + rm -rf "$tmpdir" + return 1 + } + + local unpack_tmp + unpack_tmp=$(mktemp -d) + mkdir -p "$target" + if [[ "${CLEAN_INSTALL:-0}" == "1" ]]; then + rm -rf "${target:?}/"* + fi + + if [[ "$filename" == *.zip ]]; then + ensure_dependencies unzip + unzip -q "$tmpdir/$filename" -d "$unpack_tmp" || { + msg_error "Failed to extract ZIP archive" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + elif [[ "$filename" == *.tar.* || "$filename" == *.tgz ]]; then + tar --no-same-owner -xf "$tmpdir/$filename" -C "$unpack_tmp" || { + msg_error "Failed to extract TAR archive" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + else + msg_error "Unsupported archive format: $filename" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + fi + + local top_dirs + top_dirs=$(find "$unpack_tmp" -mindepth 1 -maxdepth 1 -type d | wc -l) + local top_entries inner_dir + top_entries=$(find "$unpack_tmp" -mindepth 1 -maxdepth 1) + if [[ "$(echo "$top_entries" | wc -l)" -eq 1 && -d "$top_entries" ]]; then + inner_dir="$top_entries" + shopt -s dotglob nullglob + if compgen -G "$inner_dir/*" >/dev/null; then + cp -r "$inner_dir"/* "$target/" || { + msg_error "Failed to copy contents from $inner_dir to $target" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + else + msg_error "Inner directory is empty: $inner_dir" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + fi + shopt -u dotglob nullglob + else + shopt -s dotglob nullglob + if compgen -G "$unpack_tmp/*" >/dev/null; then + cp -r "$unpack_tmp"/* "$target/" || { + msg_error "Failed to copy contents to $target" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + else + msg_error "Unpacked archive is empty" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + fi + shopt -u dotglob nullglob + fi + + ### Singlefile Mode ### + elif [[ "$mode" == "singlefile" ]]; then + local pattern="${6%\"}" + pattern="${pattern#\"}" + [[ -z "$pattern" ]] && { + msg_error "Mode 'singlefile' requires 6th parameter (asset filename pattern)" + rm -rf "$tmpdir" + return 1 + } + + local asset_url="" + for u in $(echo "$json" | jq -r '.assets[].browser_download_url'); do + filename_candidate="${u##*/}" + case "$filename_candidate" in + $pattern) + asset_url="$u" + break + ;; + esac + done + + [[ -z "$asset_url" ]] && { + msg_error "No asset matching '$pattern' found" + rm -rf "$tmpdir" + return 1 + } + + filename="${asset_url##*/}" + mkdir -p "$target" + + local use_filename="${USE_ORIGINAL_FILENAME:-false}" + local target_file="$app" + [[ "$use_filename" == "true" ]] && target_file="$filename" + + curl $download_timeout -fsSL -o "$target/$target_file" "$asset_url" || { + msg_error "Download failed: $asset_url" + rm -rf "$tmpdir" + return 1 + } + + if [[ "$target_file" != *.jar && -f "$target/$target_file" ]]; then + chmod +x "$target/$target_file" + fi + + else + msg_error "Unknown mode: $mode" + rm -rf "$tmpdir" + return 1 + fi + + echo "$version" >"$version_file" + msg_ok "Deployed: $app ($version)" + rm -rf "$tmpdir" +} + # ------------------------------------------------------------------------------ # Loads LOCAL_IP from persistent store or detects if missing. # From 649fe3420b269c1bc3f96174ef40d65fb5caa2f8 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Mon, 2 Feb 2026 08:42:19 +0100 Subject: [PATCH 220/228] Update alpine-rustypaste-install.sh --- install/alpine-rustypaste-install.sh | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/install/alpine-rustypaste-install.sh b/install/alpine-rustypaste-install.sh index 1f645c71e..c862405bb 100644 --- a/install/alpine-rustypaste-install.sh +++ b/install/alpine-rustypaste-install.sh @@ -23,6 +23,29 @@ sed -i 's|^address = ".*"|address = "0.0.0.0:8000"|' /etc/rustypaste/config.toml msg_ok "Configured RustyPaste" msg_info "Creating Service" +cat <<'EOF' >/etc/init.d/rustypaste +#!/sbin/openrc-run + +name="rustypaste" +description="RustyPaste - A minimal file upload/pastebin service" +command="/usr/bin/rustypaste" +command_args="" +command_user="root" +command_background=true +pidfile="/run/${RC_SVCNAME}.pid" +directory="/var/lib/rustypaste" + +depend() { + need net + after firewall +} + +start_pre() { + export CONFIG=/etc/rustypaste/config.toml + checkpath --directory --owner root:root --mode 0755 /var/lib/rustypaste +} +EOF +chmod +x /etc/init.d/rustypaste $STD rc-update add rustypaste default $STD rc-service rustypaste start msg_ok "Created Service" From c73a60ebe3e2dd6aa7755a441eddefebdea5bd80 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 2 Feb 2026 08:02:37 +0000 Subject: [PATCH 221/228] Delete kitchenowl (ct) after migration to ProxmoxVE (#1392) Co-authored-by: github-actions[bot] --- ct/headers/kitchenowl | 6 -- ct/kitchenowl.sh | 78 -------------- frontend/public/json/kitchenowl.json | 35 ------- install/kitchenowl-install.sh | 146 --------------------------- 4 files changed, 265 deletions(-) delete mode 100644 ct/headers/kitchenowl delete mode 100644 ct/kitchenowl.sh delete mode 100644 frontend/public/json/kitchenowl.json delete mode 100644 install/kitchenowl-install.sh diff --git a/ct/headers/kitchenowl b/ct/headers/kitchenowl deleted file mode 100644 index 6f20ad457..000000000 --- a/ct/headers/kitchenowl +++ /dev/null @@ -1,6 +0,0 @@ - __ __ _ __ __ ____ __ - / //_/(_) /______/ /_ ___ ____ / __ \_ __/ / - / ,< / / __/ ___/ __ \/ _ \/ __ \/ / / / | /| / / / - / /| |/ / /_/ /__/ / / / __/ / / / /_/ /| |/ |/ / / -/_/ |_/_/\__/\___/_/ /_/\___/_/ /_/\____/ |__/|__/_/ - diff --git a/ct/kitchenowl.sh b/ct/kitchenowl.sh deleted file mode 100644 index 7517980d0..000000000 --- a/ct/kitchenowl.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env bash -source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) -# Copyright (c) 2021-2025 community-scripts ORG -# Author: snazzybean -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://github.com/TomBursch/kitchenowl - -APP="KitchenOwl" -var_tags="${var_tags:-food;recipes}" -var_cpu="${var_cpu:-1}" -var_ram="${var_ram:-2048}" -var_disk="${var_disk:-6}" -var_os="${var_os:-debian}" -var_version="${var_version:-13}" -var_unprivileged="${var_unprivileged:-1}" - -header_info "$APP" -variables -color -catch_errors - -function update_script() { - header_info - check_container_storage - check_container_resources - - if [[ ! -d /opt/kitchenowl ]]; then - msg_error "No ${APP} Installation Found!" - exit - fi - - if check_for_gh_release "kitchenowl" "TomBursch/kitchenowl"; then - msg_info "Stopping Service" - systemctl stop kitchenowl - msg_ok "Stopped Service" - - msg_info "Backing up KitchenOwl" - mkdir -p /opt/kitchenowl_backup - cp -r /opt/kitchenowl/data /opt/kitchenowl_backup/ - cp -f /opt/kitchenowl/kitchenowl.env /opt/kitchenowl_backup/ - msg_ok "Backed up KitchenOwl" - - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "kitchenowl" "TomBursch/kitchenowl" "tarball" "latest" "/opt/kitchenowl" - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "kitchenowl-web" "TomBursch/kitchenowl" "prebuild" "latest" "/opt/kitchenowl/web" "kitchenowl_Web.tar.gz" - - msg_info "Restoring KitchenOwl data" - sed -i 's/default=True/default=False/' /opt/kitchenowl/backend/wsgi.py - cp -r /opt/kitchenowl_backup/data /opt/kitchenowl/ - cp -f /opt/kitchenowl_backup/kitchenowl.env /opt/kitchenowl/ - rm -rf /opt/kitchenowl_backup - msg_ok "Restored KitchenOwl data" - - msg_info "Updating KitchenOwl" - cd /opt/kitchenowl/backend - $STD uv sync --frozen - cd /opt/kitchenowl/backend - set -a - source /opt/kitchenowl/kitchenowl.env - set +a - $STD uv run flask db upgrade - msg_ok "Updated KitchenOwl" - - msg_info "Starting Service" - systemctl start kitchenowl - msg_ok "Started Service" - msg_ok "Updated successfully!" - fi - exit -} - -start -build_container -description - -msg_ok "Completed Successfully!\n" -echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" -echo -e "${INFO}${YW} Access it using the following URL:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:80${CL}" diff --git a/frontend/public/json/kitchenowl.json b/frontend/public/json/kitchenowl.json deleted file mode 100644 index e6c150616..000000000 --- a/frontend/public/json/kitchenowl.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "KitchenOwl", - "slug": "kitchenowl", - "categories": [ - 13 - ], - "date_created": "2025-12-28", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://docs.kitchenowl.org/", - "website": "https://kitchenowl.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/kitchenowl.webp", - "config_path": "/opt/kitchenowl/kitchenowl.env", - "description": "KitchenOwl is a smart self-hosted grocery list and recipe manager with real-time synchronization, recipe management, meal planning, and expense tracking.", - "install_methods": [ - { - "type": "default", - "script": "ct/kitchenowl.sh", - "resources": { - "cpu": 1, - "ram": 2048, - "hdd": 6, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/install/kitchenowl-install.sh b/install/kitchenowl-install.sh deleted file mode 100644 index 72d88030a..000000000 --- a/install/kitchenowl-install.sh +++ /dev/null @@ -1,146 +0,0 @@ -#!/usr/bin/env bash - -# Copyright (c) 2021-2025 community-scripts ORG -# Author: snazzybean -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://github.com/TomBursch/kitchenowl - -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -color -verb_ip6 -catch_errors -setting_up_container -network_check -update_os - -msg_info "Installing Dependencies" -$STD apt install -y \ - nginx \ - build-essential \ - gfortran \ - pkg-config \ - ninja-build \ - autoconf \ - automake \ - libpq-dev \ - libffi-dev \ - libssl-dev \ - libpcre2-dev \ - libre2-dev \ - libxml2-dev \ - libxslt-dev \ - libopenblas-dev \ - liblapack-dev \ - zlib1g-dev \ - libjpeg62-turbo-dev \ - libsqlite3-dev \ - libexpat1-dev \ - libicu-dev -msg_ok "Installed Dependencies" - -PYTHON_VERSION="3.14" setup_uv -fetch_and_deploy_gh_release "kitchenowl" "TomBursch/kitchenowl" "tarball" "latest" "/opt/kitchenowl" -rm -rf /opt/kitchenowl/web -fetch_and_deploy_gh_release "kitchenowl-web" "TomBursch/kitchenowl" "prebuild" "latest" "/opt/kitchenowl/web" "kitchenowl_Web.tar.gz" - -msg_info "Setting up KitchenOwl" -cd /opt/kitchenowl/backend -#rm -f uv.lock -$STD uv sync --no-dev -sed -i 's/default=True/default=False/' /opt/kitchenowl/backend/wsgi.py -mkdir -p /nltk_data -$STD uv run python -m nltk.downloader -d /nltk_data averaged_perceptron_tagger_eng -JWT_SECRET=$(openssl rand -hex 32) -mkdir -p /opt/kitchenowl/data -cat </opt/kitchenowl/kitchenowl.env -STORAGE_PATH=/opt/kitchenowl/data -JWT_SECRET_KEY=${JWT_SECRET} -NLTK_DATA=/nltk_data -FRONT_URL=http://${LOCAL_IP} -FLASK_APP=wsgi.py -FLASK_ENV=production -EOF -set -a -source /opt/kitchenowl/kitchenowl.env -set +a -$STD uv run flask db upgrade -msg_ok "Set up KitchenOwl" - -msg_info "Creating Systemd Service" -cat </etc/systemd/system/kitchenowl.service -[Unit] -Description=KitchenOwl Backend -After=network.target - -[Service] -Type=simple -User=root -WorkingDirectory=/opt/kitchenowl/backend -EnvironmentFile=/opt/kitchenowl/kitchenowl.env -ExecStart=/usr/local/bin/uv run wsgi.py -Restart=on-failure -RestartSec=5 - -[Install] -WantedBy=multi-user.target -EOF -systemctl enable -q --now kitchenowl -msg_ok "Created and Started Service" - -msg_info "Configuring Nginx" -rm -f /etc/nginx/sites-enabled/default -cat <<'EOF' >/etc/nginx/sites-available/kitchenowl.conf -server { - listen 80; - server_name _; - - root /opt/kitchenowl/web; - index index.html; - - client_max_body_size 100M; - - # Security Headers - add_header X-Frame-Options "SAMEORIGIN" always; - add_header X-Content-Type-Options "nosniff" always; - add_header X-XSS-Protection "1; mode=block" always; - add_header Referrer-Policy "strict-origin-when-cross-origin" always; - - location / { - try_files $uri $uri/ /index.html; - } - - location /api { - proxy_pass http://127.0.0.1:5000; - proxy_http_version 1.1; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_connect_timeout 60s; - proxy_send_timeout 60s; - proxy_read_timeout 60s; - } - - location /socket.io { - proxy_pass http://127.0.0.1:5000; - proxy_http_version 1.1; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "upgrade"; - # WebSocket Timeouts - allow long-lived connections - proxy_read_timeout 86400s; - proxy_send_timeout 86400s; - } -} -EOF -ln -sf /etc/nginx/sites-available/kitchenowl.conf /etc/nginx/sites-enabled/ -rm -f /etc/nginx/sites-enabled/default -$STD systemctl reload nginx -msg_ok "Configured Nginx" - -motd_ssh -customize -cleanup_lxc From 4d7ba22505e65943a6e814ad6e900f77c53d9266 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Mon, 2 Feb 2026 09:09:07 +0100 Subject: [PATCH 222/228] close rustypaste --- ct/alpine-rustypaste.sh | 51 -------------------- ct/rustypaste.sh | 69 ---------------------------- frontend/public/json/rustypaste.json | 51 -------------------- install/alpine-rustypaste-install.sh | 55 ---------------------- install/rustypaste-install.sh | 43 ----------------- 5 files changed, 269 deletions(-) delete mode 100644 ct/alpine-rustypaste.sh delete mode 100644 ct/rustypaste.sh delete mode 100644 frontend/public/json/rustypaste.json delete mode 100644 install/alpine-rustypaste-install.sh delete mode 100644 install/rustypaste-install.sh diff --git a/ct/alpine-rustypaste.sh b/ct/alpine-rustypaste.sh deleted file mode 100644 index 893d0d05d..000000000 --- a/ct/alpine-rustypaste.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env bash -source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) -# Copyright (c) 2021-2026 community-scripts ORG -# Author: MickLesk (CanbiZ) -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://github.com/orhun/rustypaste - -APP="Alpine-RustyPaste" -var_tags="${var_tags:-alpine;pastebin;storage}" -var_cpu="${var_cpu:-1}" -var_ram="${var_ram:-256}" -var_disk="${var_disk:-4}" -var_os="${var_os:-alpine}" -var_version="${var_version:-3.23}" -var_unprivileged="${var_unprivileged:-1}" - -header_info "$APP" -variables -color -catch_errors - -function update_script() { - header_info - check_container_storage - check_container_resources - - if ! apk info -e rustypaste >/dev/null 2>&1; then - msg_error "No ${APP} Installation Found!" - exit - fi - - msg_info "Updating RustyPaste" - $STD apk update - $STD apk upgrade rustypaste --repository=https://dl-cdn.alpinelinux.org/alpine/edge/community - msg_ok "Updated RustyPaste" - - msg_info "Restarting Services" - $STD rc-service rustypaste restart - msg_ok "Restarted Services" - msg_ok "Updated successfully!" - exit -} - -start -build_container -description - -msg_ok "Completed successfully!\n" -echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" -echo -e "${INFO}${YW} Access it using the following URL:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8000${CL}" diff --git a/ct/rustypaste.sh b/ct/rustypaste.sh deleted file mode 100644 index 2da17f16e..000000000 --- a/ct/rustypaste.sh +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env bash -source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) -# Copyright (c) 2021-2026 community-scripts ORG -# Author: GoldenSpringness -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://github.com/orhun/rustypaste - -APP="rustypaste" -var_tags="${var_tags:-pastebin;storage}" -var_cpu="${var_cpu:-1}" -var_ram="${var_ram:-1024}" -var_disk="${var_disk:-20}" -var_os="${var_os:-debian}" -var_version="${var_version:-13}" -var_unprivileged="${var_unprivileged:-1}" - -header_info "$APP" -variables -color -catch_errors - -function update_script() { - header_info - check_container_storage - check_container_resources - - if [[ ! -f /opt/rustypaste/rustypaste ]]; then - msg_error "No ${APP} Installation Found!" - exit - fi - - if check_for_gh_release "rustypaste" "orhun/rustypaste"; then - msg_info "Stopping Services" - systemctl stop rustypaste - msg_ok "Stopped Services" - - msg_info "Creating Backup" - tar -czf "/opt/rustypaste_backup_$(date +%F).tar.gz" /opt/rustypaste/upload 2>/dev/null || true - cp /opt/rustypaste/config.toml /tmp/rustypaste_config.toml.bak - msg_ok "Backup Created" - - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "rustypaste" "orhun/rustypaste" "prebuild" "latest" "/opt/rustypaste" "*x86_64-unknown-linux-gnu.tar.gz" - - msg_info "Restoring Data" - mv /tmp/rustypaste_config.toml.bak /opt/rustypaste/config.toml - tar -xzf "/opt/rustypaste_backup_$(date +%F).tar.gz" -C /opt/rustypaste/upload 2>/dev/null || true - rm -rf /opt/rustypaste_backup_$(date +%F).tar.gz - msg_ok "Restored Data" - - msg_info "Starting Services" - systemctl start rustypaste - msg_ok "Started Services" - msg_ok "Updated successfully!" - fi - - if check_for_gh_release "rustypaste-cli" "orhun/rustypaste-cli"; then - fetch_and_deploy_gh_release "rustypaste-cli" "orhun/rustypaste-cli" "prebuild" "latest" "/usr/local/bin" "*x86_64-unknown-linux-gnu.tar.gz" - fi - exit -} - -start -build_container -description - -msg_ok "Completed successfully!\n" -echo -e "${CREATING}${GN}rustypaste setup has been successfully initialized!${CL}" -echo -e "${INFO}${YW} Access it using the following URL:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8000${CL}" diff --git a/frontend/public/json/rustypaste.json b/frontend/public/json/rustypaste.json deleted file mode 100644 index 2ac17c188..000000000 --- a/frontend/public/json/rustypaste.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "name": "RustyPaste", - "slug": "rustypaste", - "categories": [ - 12 - ], - "date_created": "2025-12-22", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8000, - "documentation": "https://github.com/orhun/rustypaste", - "config_path": "/opt/rustypaste/config.toml", - "website": "https://github.com/orhun/rustypaste", - "logo": "https://github.com/orhun/rustypaste/raw/master/img/rustypaste_logo.png", - "description": "Rustypaste is a minimal file upload/pastebin service.", - "install_methods": [ - { - "type": "default", - "script": "ct/rustypaste.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 20, - "os": "Debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-rustypaste.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 4, - "os": "Alpine", - "version": "3.22" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "When updating the script it will backup the whole project including all the uploaded files, make sure to extract it to a safe location or remove", - "type": "info" - } - ] -} diff --git a/install/alpine-rustypaste-install.sh b/install/alpine-rustypaste-install.sh deleted file mode 100644 index c862405bb..000000000 --- a/install/alpine-rustypaste-install.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env bash - -# Copyright (c) 2021-2026 community-scripts ORG -# Author: MickLesk (CanbiZ) -# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE -# Source: https://github.com/orhun/rustypaste - -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -color -verb_ip6 -catch_errors -setting_up_container -network_check -update_os - -msg_info "Installing RustyPaste" -$STD apk add --no-cache rustypaste --repository=https://dl-cdn.alpinelinux.org/alpine/edge/community -msg_ok "Installed RustyPaste" - -msg_info "Configuring RustyPaste" -mkdir -p /var/lib/rustypaste -sed -i 's|^address = ".*"|address = "0.0.0.0:8000"|' /etc/rustypaste/config.toml -msg_ok "Configured RustyPaste" - -msg_info "Creating Service" -cat <<'EOF' >/etc/init.d/rustypaste -#!/sbin/openrc-run - -name="rustypaste" -description="RustyPaste - A minimal file upload/pastebin service" -command="/usr/bin/rustypaste" -command_args="" -command_user="root" -command_background=true -pidfile="/run/${RC_SVCNAME}.pid" -directory="/var/lib/rustypaste" - -depend() { - need net - after firewall -} - -start_pre() { - export CONFIG=/etc/rustypaste/config.toml - checkpath --directory --owner root:root --mode 0755 /var/lib/rustypaste -} -EOF -chmod +x /etc/init.d/rustypaste -$STD rc-update add rustypaste default -$STD rc-service rustypaste start -msg_ok "Created Service" - -motd_ssh -customize -cleanup_lxc diff --git a/install/rustypaste-install.sh b/install/rustypaste-install.sh deleted file mode 100644 index 53e2caf57..000000000 --- a/install/rustypaste-install.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env bash - -# Copyright (c) 2021-2026 community-scripts ORG -# Author: GoldenSpringness | MickLesk (CanbiZ) -# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE -# Source: https://github.com/orhun/rustypaste - -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -color -verb_ip6 -catch_errors -setting_up_container -network_check -update_os - -fetch_and_deploy_gh_release "rustypaste" "orhun/rustypaste" "prebuild" "latest" "/opt/rustypaste" "*x86_64-unknown-linux-gnu.tar.gz" -fetch_and_deploy_gh_release "rustypaste-cli" "orhun/rustypaste-cli" "prebuild" "latest" "/usr/local/bin" "*x86_64-unknown-linux-gnu.tar.gz" - -msg_info "Setting up RustyPaste" -cd /opt/rustypaste -sed -i 's|^address = ".*"|address = "0.0.0.0:8000"|' config.toml -msg_ok "Set up RustyPaste" - -msg_info "Creating Service" -cat </etc/systemd/system/rustypaste.service -[Unit] -Description=rustypaste Service -After=network.target - -[Service] -WorkingDirectory=/opt/rustypaste -ExecStart=/opt/rustypaste/rustypaste -Restart=always - -[Install] -WantedBy=multi-user.target -EOF -systemctl enable -q --now rustypaste -msg_ok "Created Service" - -motd_ssh -customize -cleanup_lxc From 134dabe9c762d29cff3744d20291289e809abf99 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 2 Feb 2026 08:10:06 +0000 Subject: [PATCH 223/228] Delete rustypaste (ct) after migration to ProxmoxVE (#1394) Co-authored-by: github-actions[bot] --- ct/headers/rustypaste | 6 ------ 1 file changed, 6 deletions(-) delete mode 100644 ct/headers/rustypaste diff --git a/ct/headers/rustypaste b/ct/headers/rustypaste deleted file mode 100644 index c691b0a30..000000000 --- a/ct/headers/rustypaste +++ /dev/null @@ -1,6 +0,0 @@ - __ __ - _______ _______/ /___ ______ ____ ______/ /____ - / ___/ / / / ___/ __/ / / / __ \/ __ `/ ___/ __/ _ \ - / / / /_/ (__ ) /_/ /_/ / /_/ / /_/ (__ ) /_/ __/ -/_/ \__,_/____/\__/\__, / .___/\__,_/____/\__/\___/ - /____/_/ From 2b31e79a4b50970c02a0fafa95110cadcb580f23 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Mon, 2 Feb 2026 09:38:12 +0100 Subject: [PATCH 224/228] test --- ct/vaultwarden.sh | 115 ++++++++++++++ install/opencloud-install.sh.bak | 263 ------------------------------- install/vaultwarden-install.sh | 100 ++++++++++++ 3 files changed, 215 insertions(+), 263 deletions(-) create mode 100644 ct/vaultwarden.sh delete mode 100644 install/opencloud-install.sh.bak create mode 100644 install/vaultwarden-install.sh diff --git a/ct/vaultwarden.sh b/ct/vaultwarden.sh new file mode 100644 index 000000000..fc8c5db45 --- /dev/null +++ b/ct/vaultwarden.sh @@ -0,0 +1,115 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) +# Copyright (c) 2021-2026 tteck +# Author: tteck (tteckster) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/dani-garcia/vaultwarden + +APP="Vaultwarden" +var_tags="${var_tags:-password-manager}" +var_cpu="${var_cpu:-4}" +var_ram="${var_ram:-6144}" +var_disk="${var_disk:-20}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + if [[ ! -f /etc/systemd/system/vaultwarden.service ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + VAULT=$(get_latest_github_release "dani-garcia/vaultwarden") + WVRELEASE=$(get_latest_github_release "dani-garcia/bw_web_builds") + + UPD=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "SUPPORT" --radiolist --cancel-button Exit-Script "Spacebar = Select" 11 58 3 \ + "1" "VaultWarden $VAULT" ON \ + "2" "Web-Vault $WVRELEASE" OFF \ + "3" "Set Admin Token" OFF \ + 3>&1 1>&2 2>&3) + + if [ "$UPD" == "1" ]; then + if check_for_gh_release "vaultwarden" "dani-garcia/vaultwarden"; then + msg_info "Stopping Service" + systemctl stop vaultwarden + msg_ok "Stopped Service" + + fetch_and_deploy_gh_release "vaultwarden" "dani-garcia/vaultwarden" "tarball" "latest" "/tmp/vaultwarden-src" + + msg_info "Updating VaultWarden to $VAULT (Patience)" + cd /tmp/vaultwarden-src + $STD cargo build --features "sqlite,mysql,postgresql" --release + if [[ -f /usr/bin/vaultwarden ]]; then + cp target/release/vaultwarden /usr/bin/ + else + cp target/release/vaultwarden /opt/vaultwarden/bin/ + fi + cd ~ && rm -rf /tmp/vaultwarden-src + msg_ok "Updated VaultWarden to ${VAULT}" + + msg_info "Starting Service" + systemctl start vaultwarden + msg_ok "Started Service" + msg_ok "Updated successfully!" + else + msg_ok "VaultWarden is already up-to-date" + fi + exit + fi + + if [ "$UPD" == "2" ]; then + if check_for_gh_release "vaultwarden_webvault" "dani-garcia/bw_web_builds"; then + msg_info "Stopping Service" + systemctl stop vaultwarden + msg_ok "Stopped Service" + + fetch_and_deploy_gh_release "vaultwarden_webvault" "dani-garcia/bw_web_builds" "prebuild" "latest" "/opt/vaultwarden" "bw_web_*.tar.gz" + + msg_info "Updating Web-Vault to $WVRELEASE" + rm -rf /opt/vaultwarden/web-vault + chown -R root:root /opt/vaultwarden/web-vault/ + msg_ok "Updated Web-Vault to ${WVRELEASE}" + + msg_info "Starting Service" + systemctl start vaultwarden + msg_ok "Started Service" + msg_ok "Updated successfully!" + else + msg_ok "Web-Vault is already up-to-date" + fi + exit + fi + + if [ "$UPD" == "3" ]; then + if NEWTOKEN=$(whiptail --backtitle "Proxmox VE Helper Scripts" --passwordbox "Set the ADMIN_TOKEN" 10 58 3>&1 1>&2 2>&3); then + if [[ -z "$NEWTOKEN" ]]; then exit; fi + ensure_dependencies argon2 + TOKEN=$(echo -n "${NEWTOKEN}" | argon2 "$(openssl rand -base64 32)" -t 2 -m 16 -p 4 -l 64 -e) + sed -i "s|ADMIN_TOKEN=.*|ADMIN_TOKEN='${TOKEN}'|" /opt/vaultwarden/.env + if [[ -f /opt/vaultwarden/data/config.json ]]; then + sed -i "s|\"admin_token\":.*|\"admin_token\": \"${TOKEN}\"|" /opt/vaultwarden/data/config.json + fi + systemctl restart vaultwarden + msg_ok "Admin token updated" + fi + exit + fi +} + +start +build_container +description + +msg_ok "Completed successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}https://${IP}:8000${CL}" diff --git a/install/opencloud-install.sh.bak b/install/opencloud-install.sh.bak deleted file mode 100644 index 0ed6cc619..000000000 --- a/install/opencloud-install.sh.bak +++ /dev/null @@ -1,263 +0,0 @@ -#!/usr/bin/env bash - -# Copyright (c) 2021-2026 community-scripts ORG -# Author: vhsdream -# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE -# Source: https://opencloud.eu - -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -color -verb_ip6 -catch_errors -setting_up_container -network_check -update_os - -echo -e "${TAB3}${INFO}${YW} Leave empty to use IP-based localhost mode (no Collabora)${CL}" -read -r -p "${TAB3}Enter the hostname of your OpenCloud server (eg cloud.domain.tld): " oc_host - -if [[ -z "$oc_host" ]]; then - # Localhost/IP mode - no TLS, no Collabora - OC_HOST="${LOCAL_IP}" - LOCALHOST_MODE=true - msg_info "Using localhost mode with IP: ${LOCAL_IP}" - msg_warn "Collabora requires TLS and will be skipped in localhost mode" -else - OC_HOST="$oc_host" - LOCALHOST_MODE=false - read -r -p "${TAB3}Enter the hostname of your Collabora server [collabora.${OC_HOST#*.}]: " collabora_host - COLLABORA_HOST="${collabora_host:-collabora.${OC_HOST#*.}}" - read -r -p "${TAB3}Enter the hostname of your WOPI server [wopiserver.${OC_HOST#*.}]: " wopi_host - WOPI_HOST="${wopi_host:-wopiserver.${OC_HOST#*.}}" -fi - -# Collabora Online - only install if not in localhost mode (requires TLS) -if [[ "$LOCALHOST_MODE" != true ]]; then - msg_info "Installing Collabora Online" - curl -fsSL https://collaboraoffice.com/downloads/gpg/collaboraonline-release-keyring.gpg -o /etc/apt/keyrings/collaboraonline-release-keyring.gpg - cat </etc/apt/sources.list.d/collaboraonline.sources -Types: deb -URIs: https://www.collaboraoffice.com/repos/CollaboraOnline/CODE-deb -Suites: ./ -Signed-By: /etc/apt/keyrings/collaboraonline-release-keyring.gpg -EOF - $STD apt-get update - $STD apt-get install -y coolwsd code-brand - systemctl stop coolwsd - mkdir -p /etc/systemd/system/coolwsd.service.d - cat </etc/systemd/system/coolwsd.service.d/override.conf -[Unit] -Before=opencloud-wopi.service -EOF - systemctl daemon-reload - COOLPASS="$(openssl rand -base64 36)" - $STD runuser -u cool -- coolconfig set-admin-password --user=admin --password="$COOLPASS" - echo "$COOLPASS" >~/.coolpass - msg_ok "Installed Collabora Online" -fi - -# OpenCloud -fetch_and_deploy_gh_release "opencloud" "opencloud-eu/opencloud" "singlefile" "v5.0.1" "/usr/bin" "opencloud-*-linux-amd64" - -msg_info "Configuring OpenCloud" -DATA_DIR="/var/lib/opencloud/" -CONFIG_DIR="/etc/opencloud" -ENV_FILE="${CONFIG_DIR}/opencloud.env" -mkdir -p "$DATA_DIR" "$CONFIG_DIR"/assets/apps - -curl -fsSL https://raw.githubusercontent.com/opencloud-eu/opencloud-compose/refs/heads/main/config/opencloud/csp.yaml -o "$CONFIG_DIR"/csp.yaml -curl -fsSL https://raw.githubusercontent.com/opencloud-eu/opencloud-compose/refs/heads/main/config/opencloud/proxy.yaml -o "$CONFIG_DIR"/proxy.yaml.bak - -if [[ "$LOCALHOST_MODE" == true ]]; then - OC_URL="http://${OC_HOST}:9200" - OC_INSECURE="true" -else - OC_URL="https://${OC_HOST}" - OC_INSECURE="false" -fi - -# Create web config directory and config.json -mkdir -p "$CONFIG_DIR"/web -cat <"$CONFIG_DIR"/web/config.json -{ - "server": "${OC_URL}", - "theme": "https://raw.githubusercontent.com/opencloud-eu/opencloud-compose/refs/heads/main/config/opencloud/web/themes/opencloud/theme.json", - "openIdConnect": { - "metadata_url": "${OC_URL}/.well-known/openid-configuration", - "authority": "${OC_URL}", - "client_id": "web", - "response_type": "code", - "scope": "openid profile email" - } -} -EOF - -cat <"$ENV_FILE" -OC_URL=${OC_URL} -OC_INSECURE=${OC_INSECURE} -IDM_CREATE_DEMO_USERS=false -OC_LOG_LEVEL=warning -OC_CONFIG_DIR=${CONFIG_DIR} -OC_BASE_DATA_PATH=${DATA_DIR} -STORAGE_SYSTEM_OC_ROOT=${DATA_DIR}/storage/metadata - -## Web -WEB_ASSET_CORE_PATH=${CONFIG_DIR}/web/assets -WEB_ASSET_APPS_PATH=${CONFIG_DIR}/web/assets/apps -WEB_UI_CONFIG_FILE=${CONFIG_DIR}/web/config.json -# WEB_ASSET_THEMES_PATH=${CONFIG_DIR}/web/assets/themes -# WEB_UI_THEME_PATH= - -## Frontend -FRONTEND_DISABLE_RADICALE=true -FRONTEND_GROUPWARE_ENABLED=false -GRAPH_INCLUDE_OCM_SHAREES=true - -## Proxy -PROXY_TLS=false -PROXY_CSP_CONFIG_FILE_LOCATION=${CONFIG_DIR}/csp.yaml - -## Collaboration - requires VALID TLS (disabled in localhost mode) -# COLLABORA_DOMAIN= -# COLLABORATION_APP_NAME="CollaboraOnline" -# COLLABORATION_APP_PRODUCT="Collabora" -# COLLABORATION_APP_ADDR= -# COLLABORATION_APP_INSECURE=false -# COLLABORATION_HTTP_ADDR=0.0.0.0:9300 -# COLLABORATION_WOPI_SRC= -# COLLABORATION_JWT_SECRET= - -## Notifications - Email settings -# NOTIFICATIONS_SMTP_HOST= -# NOTIFICATIONS_SMTP_PORT= -# NOTIFICATIONS_SMTP_SENDER= -# NOTIFICATIONS_SMTP_USERNAME= -# NOTIFICATIONS_SMTP_PASSWORD= -# NOTIFICATIONS_SMTP_AUTHENTICATION=login -## Encryption method. Possible values are 'starttls', 'ssltls' and 'none' -# NOTIFICATIONS_SMTP_ENCRYPTION=starttls -## Allow insecure connections. Defaults to false. -# NOTIFICATIONS_SMTP_INSECURE=false - -## Start additional services at runtime -## Examples: notifications, antivirus etc. -## Do not uncomment unless configured above. -# OC_ADD_RUN_SERVICES="notifications" - -## OpenID - via web browser -## uncomment for OpenID in general -# OC_EXCLUDE_RUN_SERVICES=idp -# OC_OIDC_ISSUER= -# IDP_DOMAIN= -# PROXY_OIDC_ACCESS_TOKEN_VERIFY_METHOD=none -# PROXY_OIDC_REWRITE_WELLKNOWN=true -# PROXY_USER_OIDC_CLAIM=preferred_username -# PROXY_USER_CS3_CLAIM=username -## automatically create accounts -# PROXY_AUTOPROVISION_ACCOUNTS=true -# WEB_OIDC_SCOPE=openid profile email groups -# GRAPH_ASSIGN_DEFAULT_USER_ROLE=false -# -## uncomment below if using PocketID -# WEB_OIDC_CLIENT_ID= -# WEB_OIDC_METADATA_URL=/.well-known/openid-configuration - -## Full Text Search - Apache Tika -## Requires a separate install of Tika - see https://community-scripts.github.io/ProxmoxVE/scripts?id=apache-tika -# SEARCH_EXTRACTOR_TYPE=tika -# FRONTEND_FULL_TEXT_SEARCH_ENABLED=true -# SEARCH_EXTRACTOR_TIKA_TIKA_URL= - -## External storage test - Only NFS v4.2+ is supported -## User files -# STORAGE_USERS_POSIX_ROOT= -EOF - -cat </etc/systemd/system/opencloud.service -[Unit] -Description=OpenCloud server -After=network-online.target - -[Service] -Type=simple -User=opencloud -Group=opencloud -EnvironmentFile=${ENV_FILE} -ExecStart=/usr/bin/opencloud server -Restart=always - -[Install] -WantedBy=multi-user.target -EOF - -if [[ "$LOCALHOST_MODE" != true ]]; then - cat </etc/systemd/system/opencloud-wopi.service -[Unit] -Description=OpenCloud WOPI Server -Wants=coolwsd.service -After=opencloud.service coolwsd.service - -[Service] -Type=simple -User=opencloud -Group=opencloud -EnvironmentFile=${ENV_FILE} -ExecStartPre=/bin/sleep 10 -ExecStart=/usr/bin/opencloud collaboration server -Restart=always -KillSignal=SIGKILL -KillMode=mixed -TimeoutStopSec=10 - -[Install] -WantedBy=multi-user.target -EOF - - # Append active Collabora config to env file - cat <>"$ENV_FILE" - -## Collaboration - active configuration -COLLABORA_DOMAIN=${COLLABORA_HOST} -COLLABORATION_APP_NAME="CollaboraOnline" -COLLABORATION_APP_PRODUCT="Collabora" -COLLABORATION_APP_ADDR=https://${COLLABORA_HOST} -COLLABORATION_APP_INSECURE=false -COLLABORATION_HTTP_ADDR=0.0.0.0:9300 -COLLABORATION_WOPI_SRC=https://${WOPI_HOST} -COLLABORATION_JWT_SECRET= -EOF - - $STD runuser -u cool -- coolconfig set ssl.enable false - $STD runuser -u cool -- coolconfig set ssl.termination true - $STD runuser -u cool -- coolconfig set ssl.ssl_verification true - sed -i "s|CSP2\"/>|CSP2\">frame-ancestors https://${OC_HOST}|" /etc/coolwsd/coolwsd.xml -fi - -useradd -r -M -s /usr/sbin/nologin opencloud -chown -R opencloud:opencloud "$CONFIG_DIR" "$DATA_DIR" - -if [[ "$LOCALHOST_MODE" == true ]]; then - $STD runuser -u opencloud -- opencloud init --config-path "$CONFIG_DIR" --insecure yes -else - $STD runuser -u opencloud -- opencloud init --config-path "$CONFIG_DIR" --insecure no -fi - -OPENCLOUD_SECRET="$(sed -n '/jwt/p' "$CONFIG_DIR"/opencloud.yaml | awk '{print $2}')" -if [[ "$LOCALHOST_MODE" != true ]]; then - sed -i "s/COLLABORATION_JWT_SECRET=/&${OPENCLOUD_SECRET//&/\\&}/" "$ENV_FILE" -fi -msg_ok "Configured OpenCloud" - -msg_info "Starting services" -if [[ "$LOCALHOST_MODE" == true ]]; then - systemctl enable -q --now opencloud -else - systemctl enable -q --now coolwsd opencloud - sleep 5 - systemctl enable -q --now opencloud-wopi -fi -msg_ok "Started services" - -motd_ssh -customize -cleanup_lxc diff --git a/install/vaultwarden-install.sh b/install/vaultwarden-install.sh new file mode 100644 index 000000000..4b7f0e6d0 --- /dev/null +++ b/install/vaultwarden-install.sh @@ -0,0 +1,100 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 tteck +# Author: tteck (tteckster) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/dani-garcia/vaultwarden + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y \ + build-essential \ + pkgconf \ + libssl-dev \ + libmariadb-dev-compat \ + libpq-dev \ + argon2 \ + ssl-cert +msg_ok "Installed Dependencies" + +setup_rust +fetch_and_deploy_gh_release "vaultwarden" "dani-garcia/vaultwarden" "tarball" "latest" "/tmp/vaultwarden-src" + +msg_info "Building Vaultwarden (Patience)" +cd /tmp/vaultwarden-src +$STD cargo build --features "sqlite,mysql,postgresql" --release +msg_ok "Built Vaultwarden" + +$STD addgroup --system vaultwarden +$STD adduser --system --home /opt/vaultwarden --shell /usr/sbin/nologin --no-create-home --gecos 'vaultwarden' --ingroup vaultwarden --disabled-login --disabled-password vaultwarden +mkdir -p /opt/vaultwarden/{bin,data} +cp target/release/vaultwarden /opt/vaultwarden/bin/ +cd ~ && rm -rf /tmp/vaultwarden-src + +fetch_and_deploy_gh_release "vaultwarden_webvault" "dani-garcia/bw_web_builds" "prebuild" "latest" "/opt/vaultwarden" "bw_web_*.tar.gz" + +cat </opt/vaultwarden/.env +ADMIN_TOKEN='' +ROCKET_ADDRESS=0.0.0.0 +ROCKET_TLS='{certs="/opt/vaultwarden/ssl-cert-snakeoil.pem",key="/opt/vaultwarden/ssl-cert-snakeoil.key"}' +DATA_FOLDER=/opt/vaultwarden/data +DATABASE_MAX_CONNS=10 +WEB_VAULT_FOLDER=/opt/vaultwarden/web-vault +WEB_VAULT_ENABLED=true +EOF + +mv /etc/ssl/certs/ssl-cert-snakeoil.pem /opt/vaultwarden/ +mv /etc/ssl/private/ssl-cert-snakeoil.key /opt/vaultwarden/ + +msg_info "Creating Service" +chown -R vaultwarden:vaultwarden /opt/vaultwarden/ +chown root:root /opt/vaultwarden/bin/vaultwarden +chmod +x /opt/vaultwarden/bin/vaultwarden +chown -R root:root /opt/vaultwarden/web-vault/ +chmod +r /opt/vaultwarden/.env + +cat <<'EOF' >/etc/systemd/system/vaultwarden.service +[Unit] +Description=Bitwarden Server (Powered by Vaultwarden) +Documentation=https://github.com/dani-garcia/vaultwarden +After=network.target + +[Service] +User=vaultwarden +Group=vaultwarden +EnvironmentFile=-/opt/vaultwarden/.env +ExecStart=/opt/vaultwarden/bin/vaultwarden +LimitNOFILE=65535 +LimitNPROC=4096 +PrivateTmp=true +PrivateDevices=true +ProtectHome=true +ProtectSystem=strict +DevicePolicy=closed +ProtectControlGroups=yes +ProtectKernelModules=yes +ProtectKernelTunables=yes +RestrictNamespaces=yes +RestrictRealtime=yes +MemoryDenyWriteExecute=yes +LockPersonality=yes +WorkingDirectory=/opt/vaultwarden +ReadWriteDirectories=/opt/vaultwarden/data +AmbientCapabilities=CAP_NET_BIND_SERVICE + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable --q -now vaultwarden +msg_ok "Created Service" + +motd_ssh +customize +cleanup_lxc From 77c531683e2fc370075dad26b8a6f4b8c0a66761 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Mon, 2 Feb 2026 09:55:40 +0100 Subject: [PATCH 225/228] checkmate --- ct/checkmate.sh | 72 ++++++++++++++++++++++ frontend/public/json/checkmate.json | 48 +++++++++++++++ install/checkmate-install.sh | 93 +++++++++++++++++++++++++++++ 3 files changed, 213 insertions(+) create mode 100644 ct/checkmate.sh create mode 100644 frontend/public/json/checkmate.json create mode 100644 install/checkmate-install.sh diff --git a/ct/checkmate.sh b/ct/checkmate.sh new file mode 100644 index 000000000..a9fb4c442 --- /dev/null +++ b/ct/checkmate.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE +# Source: https://github.com/bluewave-labs/Checkmate + +APP="Checkmate" +var_tags="${var_tags:-monitoring;uptime}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-4096}" +var_disk="${var_disk:-10}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /opt/checkmate ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "checkmate" "bluewave-labs/Checkmate"; then + msg_info "Stopping Services" + systemctl stop checkmate-server checkmate-client + msg_ok "Stopped Services" + + msg_info "Backing up Data" + cp /opt/checkmate/server/.env /opt/checkmate_server.env.bak + cp /opt/checkmate/client/.env /opt/checkmate_client.env.bak + msg_ok "Backed up Data" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "checkmate" "bluewave-labs/Checkmate" + + msg_info "Updating Checkmate" + cd /opt/checkmate/server + $STD npm install + cd /opt/checkmate/client + $STD npm install + $STD npm run build + msg_ok "Updated Checkmate" + + msg_info "Restoring Data" + mv /opt/checkmate_server.env.bak /opt/checkmate/server/.env + mv /opt/checkmate_client.env.bak /opt/checkmate/client/.env + msg_ok "Restored Data" + + msg_info "Starting Services" + systemctl start checkmate-server checkmate-client + msg_ok "Started Services" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:5173${CL}" diff --git a/frontend/public/json/checkmate.json b/frontend/public/json/checkmate.json new file mode 100644 index 000000000..5a9edd336 --- /dev/null +++ b/frontend/public/json/checkmate.json @@ -0,0 +1,48 @@ +{ + "name": "Checkmate", + "slug": "checkmate", + "categories": [ + 9 + ], + "date_created": "2026-02-02", + "type": "ct", + "updateable": true, + "privileged": false, + "interface_port": 5173, + "documentation": "https://github.com/bluewave-labs/Checkmate#readme", + "website": "https://github.com/bluewave-labs/Checkmate", + "logo": "https://raw.githubusercontent.com/bluewave-labs/Checkmate/develop/client/public/checkmate-logo-light.png", + "config_path": "/opt/checkmate/server/.env", + "description": "Checkmate is an open source uptime and infrastructure monitoring application that helps you track the availability and performance of your services.", + "install_methods": [ + { + "type": "default", + "script": "ct/checkmate.sh", + "resources": { + "cpu": 2, + "ram": 4096, + "hdd": 10, + "os": "Debian", + "version": "13" + } + } + ], + "default_credentials": { + "username": null, + "password": null + }, + "notes": [ + { + "text": "Create your admin account on first login via the web interface.", + "type": "info" + }, + { + "text": "Server API runs on port 52345, Client UI on port 5173.", + "type": "info" + }, + { + "text": "For PageSpeed monitoring, add a Google PageSpeed API key to the server .env file.", + "type": "info" + } + ] +} diff --git a/install/checkmate-install.sh b/install/checkmate-install.sh new file mode 100644 index 000000000..614533829 --- /dev/null +++ b/install/checkmate-install.sh @@ -0,0 +1,93 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE +# Source: https://github.com/bluewave-labs/Checkmate + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt-get install -y \ + build-essential \ + openssl +msg_ok "Installed Dependencies" + +MONGO_VERSION="8.0" setup_mongodb +NODE_VERSION="22" setup_nodejs +fetch_and_deploy_gh_release "checkmate" "bluewave-labs/Checkmate" + +msg_info "Installing Checkmate Server" +cd /opt/checkmate/server +$STD npm install +msg_ok "Installed Checkmate Server" + +msg_info "Installing Checkmate Client" +cd /opt/checkmate/client +$STD npm install +$STD npm run build +msg_ok "Installed Checkmate Client" + +msg_info "Configuring Checkmate" +JWT_SECRET="$(openssl rand -hex 32)" +cat </opt/checkmate/server/.env +CLIENT_HOST="http://${LOCAL_IP}:5173" +JWT_SECRET="${JWT_SECRET}" +DB_CONNECTION_STRING="mongodb://localhost:27017/checkmate_db" +TOKEN_TTL="99d" +ORIGIN="${LOCAL_IP}" +LOG_LEVEL="info" +EOF + +cat </opt/checkmate/client/.env +VITE_APP_API_BASE_URL="http://${LOCAL_IP}:52345/api/v1" +VITE_APP_LOG_LEVEL="warn" +EOF +msg_ok "Configured Checkmate" + +msg_info "Creating Services" +cat </etc/systemd/system/checkmate-server.service +[Unit] +Description=Checkmate Server +After=network.target mongod.service + +[Service] +Type=simple +WorkingDirectory=/opt/checkmate/server +EnvironmentFile=/opt/checkmate/server/.env +ExecStart=/usr/bin/npm start +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF + +cat </etc/systemd/system/checkmate-client.service +[Unit] +Description=Checkmate Client +After=network.target checkmate-server.service + +[Service] +Type=simple +WorkingDirectory=/opt/checkmate/client +EnvironmentFile=/opt/checkmate/client/.env +ExecStart=/usr/bin/npm run preview -- --host 0.0.0.0 --port 5173 +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now checkmate-server checkmate-client +msg_ok "Created Services" + +motd_ssh +customize +cleanup_lxc From 660f451a6e0cb866798156b75dabdae9f01b9c3d Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Mon, 2 Feb 2026 09:59:41 +0100 Subject: [PATCH 226/228] Fix Vaultwarden web vault deployment path Updated the Vaultwarden install script to extract the web vault files into /opt/vaultwarden/web-vault instead of the root Vaultwarden directory. Also added creation of the web-vault directory. Reformatted checkmate.json for consistent indentation; no functional changes. --- frontend/public/json/checkmate.json | 90 ++++++++++++++--------------- install/vaultwarden-install.sh | 4 +- 2 files changed, 47 insertions(+), 47 deletions(-) diff --git a/frontend/public/json/checkmate.json b/frontend/public/json/checkmate.json index 5a9edd336..1febf47bc 100644 --- a/frontend/public/json/checkmate.json +++ b/frontend/public/json/checkmate.json @@ -1,48 +1,48 @@ { - "name": "Checkmate", - "slug": "checkmate", - "categories": [ - 9 - ], - "date_created": "2026-02-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5173, - "documentation": "https://github.com/bluewave-labs/Checkmate#readme", - "website": "https://github.com/bluewave-labs/Checkmate", - "logo": "https://raw.githubusercontent.com/bluewave-labs/Checkmate/develop/client/public/checkmate-logo-light.png", - "config_path": "/opt/checkmate/server/.env", - "description": "Checkmate is an open source uptime and infrastructure monitoring application that helps you track the availability and performance of your services.", - "install_methods": [ - { - "type": "default", - "script": "ct/checkmate.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 10, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Create your admin account on first login via the web interface.", - "type": "info" + "name": "Checkmate", + "slug": "checkmate", + "categories": [ + 9 + ], + "date_created": "2026-02-02", + "type": "ct", + "updateable": true, + "privileged": false, + "interface_port": 5173, + "documentation": "https://github.com/bluewave-labs/Checkmate#readme", + "website": "https://github.com/bluewave-labs/Checkmate", + "logo": "https://raw.githubusercontent.com/bluewave-labs/Checkmate/develop/client/public/checkmate-logo-light.png", + "config_path": "/opt/checkmate/server/.env", + "description": "Checkmate is an open source uptime and infrastructure monitoring application that helps you track the availability and performance of your services.", + "install_methods": [ + { + "type": "default", + "script": "ct/checkmate.sh", + "resources": { + "cpu": 2, + "ram": 4096, + "hdd": 10, + "os": "Debian", + "version": "13" + } + } + ], + "default_credentials": { + "username": null, + "password": null }, - { - "text": "Server API runs on port 52345, Client UI on port 5173.", - "type": "info" - }, - { - "text": "For PageSpeed monitoring, add a Google PageSpeed API key to the server .env file.", - "type": "info" - } - ] + "notes": [ + { + "text": "Create your admin account on first login via the web interface.", + "type": "info" + }, + { + "text": "Server API runs on port 52345, Client UI on port 5173.", + "type": "info" + }, + { + "text": "For PageSpeed monitoring, add a Google PageSpeed API key to the server .env file.", + "type": "info" + } + ] } diff --git a/install/vaultwarden-install.sh b/install/vaultwarden-install.sh index 4b7f0e6d0..9440b1163 100644 --- a/install/vaultwarden-install.sh +++ b/install/vaultwarden-install.sh @@ -34,11 +34,11 @@ msg_ok "Built Vaultwarden" $STD addgroup --system vaultwarden $STD adduser --system --home /opt/vaultwarden --shell /usr/sbin/nologin --no-create-home --gecos 'vaultwarden' --ingroup vaultwarden --disabled-login --disabled-password vaultwarden -mkdir -p /opt/vaultwarden/{bin,data} +mkdir -p /opt/vaultwarden/{bin,data,web-vault} cp target/release/vaultwarden /opt/vaultwarden/bin/ cd ~ && rm -rf /tmp/vaultwarden-src -fetch_and_deploy_gh_release "vaultwarden_webvault" "dani-garcia/bw_web_builds" "prebuild" "latest" "/opt/vaultwarden" "bw_web_*.tar.gz" +fetch_and_deploy_gh_release "vaultwarden_webvault" "dani-garcia/bw_web_builds" "prebuild" "latest" "/opt/vaultwarden/web-vault" "bw_web_*.tar.gz" cat </opt/vaultwarden/.env ADMIN_TOKEN='' From 7c8c81d3d15c12674e5f56fe377478195f69aea6 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Mon, 2 Feb 2026 10:02:17 +0100 Subject: [PATCH 227/228] test --- install/piler-install.sh | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/install/piler-install.sh b/install/piler-install.sh index 0f9c92bc7..2ac56fd1c 100644 --- a/install/piler-install.sh +++ b/install/piler-install.sh @@ -179,8 +179,8 @@ msg_ok "Configured PHP-FPM Pool" msg_info "Configuring Piler Web GUI" # Check if config-site.php already exists (created by .deb package) -if [ ! -f /var/www/piler/config-site.php ]; then - cat </var/www/piler/config-site.php +if [ ! -f /var/piler/www/config-site.php ]; then + cat </var/piler/www/config-site.php /etc/nginx/sites-available/piler server { listen 80; server_name _; - root /var/www/piler; + root /var/piler/www; index index.php; access_log /var/log/nginx/piler-access.log; From f3ef5345eefad22d39304c06d8310a664c2908c5 Mon Sep 17 00:00:00 2001 From: "CanbiZ (MickLesk)" <47820557+MickLesk@users.noreply.github.com> Date: Mon, 2 Feb 2026 10:06:41 +0100 Subject: [PATCH 228/228] cleanup --- .github/workflows/{ => bak}/auto-update-app-headers.yml.bak | 0 .github/workflows/{ => bak}/get-versions-from-gh.yaml | 0 .github/workflows/{ => bak}/get-versions-from-newreleases.yaml | 0 .github/workflows/{ => bak}/revision-bump.yml.bak | 0 .github/workflows/{ => bak}/script-test.yaml | 0 .github/workflows/{ => bak}/update-versions-github.yml | 0 .github/workflows/{ => bak}/update_issue.yml | 0 7 files changed, 0 insertions(+), 0 deletions(-) rename .github/workflows/{ => bak}/auto-update-app-headers.yml.bak (100%) rename .github/workflows/{ => bak}/get-versions-from-gh.yaml (100%) rename .github/workflows/{ => bak}/get-versions-from-newreleases.yaml (100%) rename .github/workflows/{ => bak}/revision-bump.yml.bak (100%) rename .github/workflows/{ => bak}/script-test.yaml (100%) rename .github/workflows/{ => bak}/update-versions-github.yml (100%) rename .github/workflows/{ => bak}/update_issue.yml (100%) diff --git a/.github/workflows/auto-update-app-headers.yml.bak b/.github/workflows/bak/auto-update-app-headers.yml.bak similarity index 100% rename from .github/workflows/auto-update-app-headers.yml.bak rename to .github/workflows/bak/auto-update-app-headers.yml.bak diff --git a/.github/workflows/get-versions-from-gh.yaml b/.github/workflows/bak/get-versions-from-gh.yaml similarity index 100% rename from .github/workflows/get-versions-from-gh.yaml rename to .github/workflows/bak/get-versions-from-gh.yaml diff --git a/.github/workflows/get-versions-from-newreleases.yaml b/.github/workflows/bak/get-versions-from-newreleases.yaml similarity index 100% rename from .github/workflows/get-versions-from-newreleases.yaml rename to .github/workflows/bak/get-versions-from-newreleases.yaml diff --git a/.github/workflows/revision-bump.yml.bak b/.github/workflows/bak/revision-bump.yml.bak similarity index 100% rename from .github/workflows/revision-bump.yml.bak rename to .github/workflows/bak/revision-bump.yml.bak diff --git a/.github/workflows/script-test.yaml b/.github/workflows/bak/script-test.yaml similarity index 100% rename from .github/workflows/script-test.yaml rename to .github/workflows/bak/script-test.yaml diff --git a/.github/workflows/update-versions-github.yml b/.github/workflows/bak/update-versions-github.yml similarity index 100% rename from .github/workflows/update-versions-github.yml rename to .github/workflows/bak/update-versions-github.yml diff --git a/.github/workflows/update_issue.yml b/.github/workflows/bak/update_issue.yml similarity index 100% rename from .github/workflows/update_issue.yml rename to .github/workflows/bak/update_issue.yml