Merge branch 'community-scripts:main' into feature/kutt

This commit is contained in:
Tom Frenzel 2026-01-03 14:49:05 +01:00 committed by GitHub
commit b366e08ecf
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
46 changed files with 6746 additions and 5661 deletions

72
ct/alpine-valkey.sh Normal file
View File

@ -0,0 +1,72 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: pshankinclarke (lazarillo)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://valkey.io/
APP="Alpine-Valkey"
var_tags="${var_tags:-alpine;database}"
var_cpu="${var_cpu:-1}"
var_ram="${var_ram:-256}"
var_disk="${var_disk:-1}"
var_os="${var_os:-alpine}"
var_version="${var_version:-3.22}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
if ! apk -e info newt >/dev/null 2>&1; then
apk add -q newt
fi
LXCIP=$(ip a s dev eth0 | awk '/inet / {print $2}' | cut -d/ -f1)
while true; do
CHOICE=$(
whiptail --backtitle "Proxmox VE Helper Scripts" --title "Valkey Management" --menu "Select option" 11 58 3 \
"1" "Update Valkey" \
"2" "Allow 0.0.0.0 for listening" \
"3" "Allow only ${LXCIP} for listening" 3>&2 2>&1 1>&3
)
exit_status=$?
if [ $exit_status == 1 ]; then
clear
exit-script
fi
header_info
case $CHOICE in
1)
msg_info "Updating Valkey"
apk update && apk upgrade valkey
rc-service valkey restart
msg_ok "Updated successfully!"
exit
;;
2)
msg_info "Setting Valkey to listen on all interfaces"
sed -i 's/^bind .*/bind 0.0.0.0/' /etc/valkey/valkey.conf
rc-service valkey restart
msg_ok "Valkey now listens on all interfaces!"
exit
;;
3)
msg_info "Setting Valkey to listen only on ${LXCIP}"
sed -i "s/^bind .*/bind ${LXCIP}/" /etc/valkey/valkey.conf
rc-service valkey restart
msg_ok "Valkey now listens only on ${LXCIP}!"
exit
;;
esac
done
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${APP} should be reachable on port 6379.
${BL}valkey-cli -h ${IP} -p 6379${CL} \n"

80
ct/heimdall-dashboard.sh Normal file
View File

@ -0,0 +1,80 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
# Copyright (c) 2021-2025 tteck
# Author: tteck (tteckster)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://heimdall.site/
APP="Heimdall-Dashboard"
var_tags="${var_tags:-dashboard}"
var_cpu="${var_cpu:-1}"
var_ram="${var_ram:-512}"
var_disk="${var_disk:-2}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/Heimdall ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gh_release "Heimdall" "linuxserver/Heimdall"; then
msg_info "Stopping Service"
systemctl stop heimdall
sleep 1
msg_ok "Stopped Service"
msg_info "Backing up Data"
cp -R /opt/Heimdall/database database-backup
cp -R /opt/Heimdall/public public-backup
sleep 1
msg_ok "Backed up Data"
setup_composer
fetch_and_deploy_gh_release "Heimdall" "linuxserver/Heimdall" "tarball"
msg_info "Updating Heimdall-Dashboard"
cd /opt/Heimdall
export COMPOSER_ALLOW_SUPERUSER=1
$STD composer dump-autoload
msg_ok "Updated Heimdall-Dashboard"
msg_info "Restoring Data"
cd ~
cp -R database-backup/* /opt/Heimdall/database
cp -R public-backup/* /opt/Heimdall/public
sleep 1
msg_ok "Restored Data"
msg_info "Cleaning Up"
rm -rf {public-backup,database-backup}
sleep 1
msg_ok "Cleaned Up"
msg_info "Starting Service"
systemctl start heimdall.service
sleep 2
msg_ok "Started Service"
msg_ok "Updated successfully!"
fi
exit
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:7990${CL}"

View File

@ -29,7 +29,7 @@ function update_script() {
exit
fi
if check_for_gh_release "homarr" "homarr-labs/homarr"; then
if check_for_gh_release "homarr" "Meierschlumpf/homarr"; then
msg_info "Stopping Services (Patience)"
systemctl stop homarr
systemctl stop redis-server
@ -38,7 +38,12 @@ function update_script() {
if ! { grep -q '^REDIS_IS_EXTERNAL=' /opt/homarr/.env 2>/dev/null || grep -q '^REDIS_IS_EXTERNAL=' /opt/homarr.env 2>/dev/null; }; then
msg_info "Fixing old structure"
systemctl disable -q --now nginx
$STD apt install -y musl-dev
# Error: ec 15 21:05:23 homarr run.sh[330]: Error: libc.musl-x86_64.so.1: cannot open shared object file: No such file or di>
# Dec 15 21:05:23 homarr run.sh[330]: at ignore-listed frames {
# Dec 15 21:05:23 homarr run.sh[330]: code: 'ERR_DLOPEN_FAILED'
# Dec 15 21:05:23 homarr run.sh[330]: }
ln -s /usr/lib/x86_64-linux-musl/libc.so /lib/libc.musl-x86_64.so.1
cp /opt/homarr/.env /opt/homarr.env
echo "REDIS_IS_EXTERNAL='true'" >> /opt/homarr.env
@ -51,7 +56,6 @@ function update_script() {
[Service]
ReadWritePaths=-/appdata/redis -/var/lib/redis -/var/log/redis -/var/run/redis -/etc/redis
EOF
# TODO: change in json
systemctl daemon-reload
rm /opt/run_homarr.sh
msg_ok "Fixed old structure"
@ -62,9 +66,9 @@ EOF
$STD apt upgrade nodejs -y
msg_ok "Updated Nodejs"
NODE_VERSION=$(curl -s https://raw.githubusercontent.com/homarr-labs/homarr/dev/package.json | jq -r '.engines.node | split(">=")[1] | split(".")[0]')
NODE_VERSION=$(curl -s https://raw.githubusercontent.com/Meierschlumpf/homarr/dev/package.json | jq -r '.engines.node | split(">=")[1] | split(".")[0]')
setup_nodejs
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "homarr" "homarr-labs/homarr" "prebuild" "latest" "/opt/homarr" "build-amd64.tar.gz"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "homarr" "Meierschlumpf/homarr" "prebuild" "latest" "/opt/homarr" "source-amd64.tar.gz"
msg_info "Updating Homarr"
cp /opt/homarr/redis.conf /etc/redis/redis.conf

View File

@ -1,82 +0,0 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://koel.dev/
APP="Koel"
var_tags="${var_tags:-music;streaming}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-2048}"
var_disk="${var_disk:-8}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/koel ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gh_release "koel" "koel/koel"; then
msg_info "Stopping Services"
systemctl stop nginx php8.4-fpm
msg_ok "Stopped Services"
msg_info "Creating Backup"
mkdir -p /tmp/koel_backup
cp /opt/koel/.env /tmp/koel_backup/
cp -r /opt/koel/storage /tmp/koel_backup/ 2>/dev/null || true
cp -r /opt/koel/public/img /tmp/koel_backup/ 2>/dev/null || true
msg_ok "Created Backup"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "koel" "koel/koel" "prebuild" "latest" "/opt/koel" "koel-*.tar.gz"
msg_info "Restoring Data"
cp /tmp/koel_backup/.env /opt/koel/
cp -r /tmp/koel_backup/storage/* /opt/koel/storage/ 2>/dev/null || true
cp -r /tmp/koel_backup/img/* /opt/koel/public/img/ 2>/dev/null || true
rm -rf /tmp/koel_backup
msg_ok "Restored Data"
msg_info "Running Migrations"
cd /opt/koel
export COMPOSER_ALLOW_SUPERUSER=1
$STD composer install --no-interaction --no-dev --optimize-autoloader
$STD php artisan migrate --force
$STD php artisan config:clear
$STD php artisan cache:clear
$STD php artisan view:clear
$STD php artisan koel:init --no-assets --no-interaction
chown -R www-data:www-data /opt/koel
chmod -R 775 /opt/koel/storage
msg_ok "Ran Migrations"
msg_info "Starting Services"
systemctl start php8.4-fpm nginx
msg_ok "Started Services"
msg_ok "Updated Successfully"
fi
exit
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}"

75
ct/linkwarden.sh Normal file
View File

@ -0,0 +1,75 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://linkwarden.app/
APP="Linkwarden"
var_tags="${var_tags:-bookmark}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-4096}"
var_disk="${var_disk:-12}"
var_os="${var_os:-ubuntu}"
var_version="${var_version:-24.04}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/linkwarden ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gh_release "linkwarden" "linkwarden/linkwarden"; then
NODE_VERSION="22" NODE_MODULE="yarn@latest" setup_nodejs
msg_info "Stopping Service"
systemctl stop linkwarden
msg_ok "Stopped Service"
RUST_CRATES="monolith" setup_rust
msg_info "Backing up data"
mv /opt/linkwarden/.env /opt/.env
[ -d /opt/linkwarden/data ] && mv /opt/linkwarden/data /opt/data.bak
rm -rf /opt/linkwarden
msg_ok "Backed up data"
fetch_and_deploy_gh_release "linkwarden" "linkwarden/linkwarden"
msg_info "Updating ${APP}"
cd /opt/linkwarden
$STD yarn
$STD npx playwright install-deps
$STD yarn playwright install
mv /opt/.env /opt/linkwarden/.env
$STD yarn prisma:generate
$STD yarn web:build
$STD yarn prisma:deploy
[ -d /opt/data.bak ] && mv /opt/data.bak /opt/linkwarden/data
rm -rf ~/.cargo/registry ~/.cargo/git ~/.cargo/.package-cache
rm -rf /root/.cache/yarn
rm -rf /opt/linkwarden/.next/cache
msg_ok "Updated ${APP}"
msg_info "Starting Service"
systemctl start linkwarden
msg_ok "Started Service"
msg_ok "Updated successfully!"
fi
exit
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}"

View File

@ -29,7 +29,7 @@ function update_script() {
exit
fi
RELEASE="v4.0.0"
RELEASE="v4.1.0"
if check_for_gh_release "opencloud" "opencloud-eu/opencloud" "${RELEASE}"; then
msg_info "Stopping services"
systemctl stop opencloud opencloud-wopi
@ -37,7 +37,7 @@ function update_script() {
msg_info "Updating packages"
$STD apt-get update
$STD apt-get dist-upgrade
$STD apt-get dist-upgrade -y
msg_ok "Updated packages"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "opencloud" "opencloud-eu/opencloud" "singlefile" "${RELEASE}" "/usr/bin" "opencloud-*-linux-amd64"

48
ct/papra.sh Normal file
View File

@ -0,0 +1,48 @@
#!/usr/bin/env bash
source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://github.com/CorentinTh/papra
APP="Papra"
var_tags="${var_tags:-document-management}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-2048}"
var_disk="${var_disk:-10}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/papra ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
msg_info "Updating $APP LXC"
systemctl stop papra
cd /opt/papra
git fetch
git pull
$STD pnpm install --frozen-lockfile
$STD pnpm --filter "@papra/app-client..." run build
$STD pnpm --filter "@papra/app-server..." run build
systemctl start papra
msg_ok "Updated $APP LXC"
exit
}
start
build_container
description
msg_ok "Completed Successfully!"
msg_custom "🚀" "${GN}" "${APP} setup has been successfully initialized!"

88
ct/piler.sh Normal file
View File

@ -0,0 +1,88 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://www.mailpiler.org/
APP="Piler"
var_tags="${var_tags:-email;archive;smtp}"
var_cpu="${var_cpu:-4}"
var_ram="${var_ram:-4096}"
var_disk="${var_disk:-20}"
var_os="${var_os:-debian}"
var_version="${var_version:-12}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -f /etc/piler/piler.conf ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
RELEASE_NEW=$(curl -fsSL https://www.mailpiler.org/download.php | grep -oP 'piler-\K[0-9]+\.[0-9]+\.[0-9]+' | head -1)
RELEASE_OLD=$(pilerd -v 2>/dev/null | grep -oP 'version \K[0-9]+\.[0-9]+\.[0-9]+' || echo "unknown")
if [[ "${RELEASE_NEW}" != "${RELEASE_OLD}" ]]; then
msg_info "Stopping Piler Services"
$STD systemctl stop piler
$STD systemctl stop manticore
msg_ok "Stopped Piler Services"
msg_info "Backing up Configuration"
cp /etc/piler/piler.conf /tmp/piler.conf.bak
msg_ok "Backed up Configuration"
msg_info "Updating to v${RELEASE_NEW}"
cd /tmp
curl -fsSL "https://bitbucket.org/jsuto/piler/downloads/piler-${RELEASE_NEW}.tar.gz" -o piler.tar.gz
tar -xzf piler.tar.gz
cd "piler-${RELEASE_NEW}"
$STD ./configure \
--localstatedir=/var \
--with-database=mysql \
--sysconfdir=/etc/piler \
--enable-memcached
$STD make
$STD make install
$STD ldconfig
cd /tmp && rm -rf "piler-${RELEASE_NEW}" piler.tar.gz
msg_ok "Updated to v${RELEASE_NEW}"
msg_info "Restoring Configuration"
cp /tmp/piler.conf.bak /etc/piler/piler.conf
rm -f /tmp/piler.conf.bak
chown piler:piler /etc/piler/piler.conf
msg_ok "Restored Configuration"
msg_info "Starting Piler Services"
$STD systemctl start manticore
$STD systemctl start piler
msg_ok "Started Piler Services"
msg_ok "Updated Successfully to v${RELEASE_NEW}"
else
msg_ok "No update available (current: v${RELEASE_OLD})"
fi
exit
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}"

View File

@ -2,6 +2,7 @@
source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (CanbiZ)
# Co-author: AlphaLawless
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://romm.app
@ -10,10 +11,9 @@ var_tags="${var_tags:-emulation}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-4096}"
var_disk="${var_disk:-20}"
var_os="${var_os:-ubuntu}"
var_version="${var_version:-24.04}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
var_fuse="${var_fuse:-1}"
header_info "$APP"
variables
@ -30,35 +30,44 @@ function update_script() {
exit
fi
msg_info "Stopping $APP"
systemctl stop romm
systemctl stop nginx
msg_ok "Stopped $APP"
if check_for_gh_release "romm" "rommapp/romm"; then
msg_info "Stopping ${APP} services"
systemctl stop romm-backend romm-worker romm-scheduler romm-watcher
msg_ok "Stopped ${APP} services"
msg_info "Updating $APP"
cd /opt/romm/app
git pull
msg_info "Backing up configuration"
cp /opt/romm/.env /opt/romm/.env.backup
msg_ok "Backed up configuration"
# Update backend
cd /opt/romm/app
source /opt/romm/venv/bin/activate
pip install --upgrade pip
pip install poetry
poetry install
msg_info "Updating ${APP}"
fetch_and_deploy_gh_release "romm" "rommapp/romm" "tarball" "latest" "/opt/romm"
# Update frontend
cd /opt/romm/app/frontend
npm install
npm run build
cp /opt/romm/.env.backup /opt/romm/.env
echo "Updated on $(date)" >/opt/romm/version.txt
msg_ok "Updated $APP"
cd /opt/romm
$STD uv sync --all-extras
msg_info "Starting $APP"
systemctl start romm
systemctl start nginx
msg_ok "Started $APP"
msg_ok "Update Successful"
cd /opt/romm/backend
$STD uv run alembic upgrade head
cd /opt/romm/frontend
$STD npm install
$STD npm run build
# Merge static assets into dist folder
cp -rf /opt/romm/frontend/assets/* /opt/romm/frontend/dist/assets/
mkdir -p /opt/romm/frontend/dist/assets/romm
ln -sfn /var/lib/romm/resources /opt/romm/frontend/dist/assets/romm/resources
ln -sfn /var/lib/romm/assets /opt/romm/frontend/dist/assets/romm/assets
msg_ok "Updated ${APP}"
msg_info "Starting ${APP} services"
systemctl start romm-backend romm-worker romm-scheduler romm-watcher
msg_ok "Started ${APP} services"
msg_ok "Update Successful"
fi
exit
}
@ -69,4 +78,4 @@ description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8080${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}"

64
ct/rustypaste.sh Normal file
View File

@ -0,0 +1,64 @@
#!/usr/bin/env bash
source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: GoldenSpringness
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/orhun/rustypaste
APP="rustypaste"
var_tags="${var_tags:-pastebin;storage}"
var_cpu="${var_cpu:-1}"
var_ram="${var_ram:-512}"
var_disk="${var_disk:-20}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -f "/opt/rustypaste/target/release/rustypaste" ]]; then
msg_error "No rustypaste Installation Found!"
exit
fi
if check_for_gh_release "rustypaste" "orhun/rustypaste"; then
msg_info "Stopping rustypaste"
systemctl stop rustypaste
msg_ok "Stopped rustypaste"
msg_info "Creating Backup"
tar -czf "/opt/rustypaste_backup_$(date +%F).tar.gz" "/opt/rustypaste/upload"
msg_ok "Backup Created"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "rustypaste" "orhun/rustypaste" "tarball" "latest" "/opt/rustypaste"
msg_info "Updating rustypaste"
cd /opt/rustypaste
sed -i 's|^address = ".*"|address = "0.0.0.0:8000"|' config.toml
$STD cargo build --locked --release
msg_ok "Updated rustypaste"
msg_info "Starting rustypaste"
systemctl start rustypaste
msg_ok "Started rustypaste"
msg_ok "Update Successful"
fi
exit
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}rustypaste setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8000${CL}"

53
ct/sportarr.sh Normal file
View File

@ -0,0 +1,53 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: Slaviša Arežina (tremor021)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/Sportarr/Sportarr
APP="Sportarr"
var_tags="${var_tags:-arr}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-2048}"
var_disk="${var_disk:-8}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/sportarr ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gh_release "sportarr" "Sportarr/Sportarr"; then
msg_info "Stopping Sportarr Service"
systemctl stop sportarr
msg_ok "Stopped Sportarr Service"
fetch_and_deploy_gh_release "sportarr" "Sportarr/Sportarr" "prebuild" "latest" "/opt/sportarr" "Sportarr-linux-x64-*.tar.gz"
msg_info "Starting Sportarr Service"
systemctl start sportarr
msg_ok "Started Sportarr Service"
msg_ok "Updated successfully!"
fi
exit
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:1867${CL}"

View File

@ -1,48 +0,0 @@
{
"name": "Koel",
"slug": "koel",
"categories": [
13
],
"date_created": "2025-12-10",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 80,
"documentation": "https://docs.koel.dev/",
"config_path": "/opt/koel/.env",
"website": "https://koel.dev/",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/koel-light.webp",
"description": "Koel is a simple web-based personal audio streaming service written in Vue and Laravel. It supports multiple users, audio visualization, smart playlists, YouTube integration, and Last.fm scrobbling.",
"install_methods": [
{
"type": "default",
"script": "ct/koel.sh",
"resources": {
"cpu": 2,
"ram": 2048,
"hdd": 8,
"os": "Debian",
"version": "13"
}
}
],
"default_credentials": {
"username": "admin@koel.dev",
"password": "KoelIsCool"
},
"notes": [
{
"text": "Media files should be placed in /opt/koel_media",
"type": "info"
},
{
"text": "Database credentials are stored in ~/koel.creds",
"type": "info"
},
{
"text": "Music library is scanned hourly via cron job",
"type": "info"
}
]
}

View File

@ -1,37 +1,41 @@
{
"name": "nextExplorer",
"slug": "nextexplorer",
"categories": [
11,
12
],
"date_created": "2025-12-11",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 3000,
"documentation": "https://explorer.nxz.ai",
"website": "https://github.com/vikramsoni2/nextExplorer",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/nextexplorer.webp",
"config_path": "/etc/nextExplorer/.env",
"description": "",
"install_methods": [
{
"type": "default",
"script": "ct/nextexplorer.sh",
"resources": {
"cpu": 2,
"ram": 3072,
"hdd": 8,
"os": "Debian",
"version": "13"
}
}
],
"notes": [
{
"text": "Bind mount your volume(s) in the `/mnt` directory",
"type": "info"
}
]
"name": "nextExplorer",
"slug": "nextexplorer",
"categories": [
11,
12
],
"date_created": "2025-12-11",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 3000,
"documentation": "https://explorer.nxz.ai",
"website": "https://github.com/vikramsoni2/nextExplorer",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/nextexplorer.webp",
"config_path": "/etc/nextExplorer/.env",
"description": "Self-hosted file access for teams, homelabs, and agencies with a refined interface.",
"install_methods": [
{
"type": "default",
"script": "ct/nextexplorer.sh",
"resources": {
"cpu": 2,
"ram": 3072,
"hdd": 8,
"os": "Debian",
"version": "13"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": [
{
"text": "Bind mount your volume(s) in the `/mnt` directory",
"type": "info"
}
]
}

View File

@ -0,0 +1,56 @@
{
"name": "Papra",
"slug": "papra",
"categories": [
12
],
"date_created": "2025-12-30",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 1221,
"documentation": "https://github.com/CorentinTh/papra",
"website": "https://github.com/CorentinTh/papra",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/papra.webp",
"config_path": "/opt/papra/.env",
"description": "Papra is a modern, self-hosted document management system with full-text search, OCR support, and automatic document processing. Built with Node.js and featuring a clean web interface for organizing and managing your documents.",
"install_methods": [
{
"type": "default",
"script": "ct/papra.sh",
"resources": {
"cpu": 2,
"ram": 2048,
"hdd": 10,
"os": "debian",
"version": "13"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": [
{
"text": "First visit will prompt you to create an account",
"type": "info"
},
{
"text": "Tesseract OCR is pre-installed for all languages",
"type": "info"
},
{
"text": "Documents are stored in /opt/papra/app-data/documents",
"type": "info"
},
{
"text": "Ingestion folder available at /opt/papra/ingestion for automatic document import",
"type": "info"
},
{
"text": "Email functionality runs in dry-run mode by default",
"type": "warning"
}
]
}

View File

@ -1,46 +0,0 @@
{
"name": "Pi-Hole Exporter",
"slug": "pihole-exporter",
"categories": [
9
],
"date_created": "2025-12-08",
"type": "addon",
"updateable": true,
"privileged": false,
"interface_port": 9617,
"documentation": "https://github.com/eko/pihole-exporter",
"website": "https://github.com/eko/pihole-exporter",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/pi-hole.webp",
"config_path": "/opt/pihole-exporter.env",
"description": "A Prometheus exporter for PI-Hole's Raspberry PI ad blocker",
"install_methods": [
{
"type": "default",
"script": "tools/addon/pihole-exporter.sh",
"resources": {
"cpu": null,
"ram": null,
"hdd": null,
"os": null,
"version": null
}
},
{
"type": "alpine",
"script": "tools/addon/pihole-exporter.sh",
"resources": {
"cpu": null,
"ram": null,
"hdd": null,
"os": null,
"version": null
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": []
}

View File

@ -0,0 +1,36 @@
{
"name": "Piler",
"slug": "piler",
"categories": [
7,
18
],
"date_created": "2025-12-15",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 80,
"documentation": "https://www.mailpiler.org/",
"config_path": "",
"website": "https://www.mailpiler.org/",
"logo": "https://www.mailpiler.org/piler-logo.png",
"description": "Piler is a feature rich open source email archiving solution with support for legal hold, deduplication, full text search, and many more features.",
"install_methods": [
{
"type": "default",
"script": "ct/piler.sh",
"resources": {
"cpu": 4,
"ram": 4096,
"hdd": 20,
"os": "Debian",
"version": "12"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": []
}

View File

@ -1,46 +0,0 @@
{
"name": "qbittorrent Exporter",
"slug": "qbittorrent-exporter",
"categories": [
9
],
"date_created": "2025-11-21",
"type": "addon",
"updateable": true,
"privileged": false,
"interface_port": 8090,
"documentation": "https://github.com/martabal/qbittorrent-exporter",
"website": "https://github.com/martabal/qbittorrent-exporter",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/qbittorrent.webp",
"config_path": "/opt/qbittorrent-exporter.env",
"description": "A fast and lightweight prometheus exporter for qBittorrent ",
"install_methods": [
{
"type": "default",
"script": "tools/addon/qbittorrent-exporter.sh",
"resources": {
"cpu": null,
"ram": null,
"hdd": null,
"os": null,
"version": null
}
},
{
"type": "alpine",
"script": "tools/addon/qbittorrent-exporter.sh",
"resources": {
"cpu": null,
"ram": null,
"hdd": null,
"os": null,
"version": null
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": []
}

View File

@ -8,10 +8,10 @@
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 8080,
"interface_port": 80,
"documentation": "https://docs.romm.app/latest/",
"website": "https://romm.app/",
"config_path": "/opt",
"config_path": "/opt/romm/.env",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/romm.webp",
"description": "RomM (ROM Manager) allows you to scan, enrich, browse and play your game collection with a clean and responsive interface. Support for multiple platforms, various naming schemes, and custom tags.",
"install_methods": [
@ -22,14 +22,14 @@
"cpu": 2,
"ram": 4096,
"hdd": 20,
"os": "ubuntu",
"version": "24.04"
"os": "debian",
"version": "13"
}
}
],
"default_credentials": {
"username": "romm",
"password": "changeme"
"username": null,
"password": null
},
"notes": []
}

View File

@ -0,0 +1,40 @@
{
"name": "RustyPaste",
"slug": "rustypaste",
"categories": [
12
],
"date_created": "2025-12-22",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 8000,
"documentation": "https://github.com/orhun/rustypaste",
"config_path": "/opt/rustypaste/config.toml",
"website": "https://github.com/orhun/rustypaste",
"logo": "https://github.com/orhun/rustypaste/raw/master/img/rustypaste_logo.png",
"description": "Rustypaste is a minimal file upload/pastebin service.",
"install_methods": [
{
"type": "default",
"script": "ct/rustypaste.sh",
"resources": {
"cpu": 1,
"ram": 512,
"hdd": 20,
"os": "Debian",
"version": "13"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": [
{
"text": "When updating the script it will backup the whole project including all the uploaded files, make sure to extract it to a safe location or remove",
"type": "info"
}
]
}

View File

@ -0,0 +1,40 @@
{
"name": "Sportarr",
"slug": "sportarr",
"categories": [
14
],
"date_created": "2025-12-12",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 1867,
"documentation": "https://sportarr.net/docs",
"config_path": "/opt/sportarr/.env, /opt/sportarr-data/config/config.xml",
"website": "https://sportarr.net/",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/sportarr.webp",
"description": "Sportarr is an automated media management application for all sports. It works similar to Sonarr and Radarr but specifically designed for combat sports, basketball, football, hockey, motorsports, and hundreds of other sports worldwide.",
"install_methods": [
{
"type": "default",
"script": "ct/sportarr.sh",
"resources": {
"cpu": 2,
"ram": 2048,
"hdd": 8,
"os": "Debian",
"version": "13"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": [
{
"text": "The resources assigned to LXC are considered baseline. Please adjust to match your workload.",
"type": "info"
}
]
}

View File

@ -0,0 +1,24 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2025 community-scripts ORG
# Author: pshankinclarke (lazarillo)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://valkey.io/
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Valkey"
$STD apk add valkey valkey-openrc valkey-cli
$STD sed -i 's/^bind .*/bind 0.0.0.0/' /etc/valkey/valkey.conf
$STD rc-update add valkey default
$STD rc-service valkey start
msg_ok "Installed Valkey"
motd_ssh
customize

View File

@ -12,6 +12,7 @@ catch_errors
setting_up_container
network_check
update_os
setup_hwaccel
msg_info "Installing dependencies"
$STD apt install -y pciutils

View File

@ -13,6 +13,8 @@ setting_up_container
network_check
update_os
setup_hwaccel
msg_info "Installing Base Dependencies"
$STD apt-get install -y curl wget ca-certificates
msg_ok "Installed Base Dependencies"

View File

@ -1,107 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2025 tteck
# Author: tteck
# Co-Author: havardthom
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://openwebui.com/
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt-get install -y \
ffmpeg
msg_ok "Installed Dependencies"
msg_info "Setup Python3"
$STD apt-get install -y --no-install-recommends \
python3 \
python3-pip
msg_ok "Setup Python3"
setup_nodejs
msg_info "Installing Open WebUI (Patience)"
fetch_and_deploy_gh_release "open-webui/open-webui"
cd /opt/openwebui/backend
$STD pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
$STD pip3 install -r requirements.txt -U
cd /opt/openwebui
cat <<EOF >/opt/openwebui/.env
# Ollama URL for the backend to connect
# The path '/ollama' will be redirected to the specified backend URL
OLLAMA_BASE_URL=http://0.0.0.0:11434
OPENAI_API_BASE_URL=''
OPENAI_API_KEY=''
# AUTOMATIC1111_BASE_URL="http://localhost:7860"
# DO NOT TRACK
SCARF_NO_ANALYTICS=true
DO_NOT_TRACK=true
ANONYMIZED_TELEMETRY=false
ENV=prod
ENABLE_OLLAMA_API=false
EOF
$STD npm install
export NODE_OPTIONS="--max-old-space-size=3584"
sed -i "s/git rev-parse HEAD/openssl rand -hex 20/g" /opt/openwebui/svelte.config.js
$STD npm run build
msg_ok "Installed Open WebUI"
read -r -p "${TAB3}Would you like to add Ollama? <y/N> " prompt
if [[ ${prompt,,} =~ ^(y|yes)$ ]]; then
msg_info "Installing Ollama"
curl -fsSLO https://ollama.com/download/ollama-linux-amd64.tgz
tar -C /usr -xzf ollama-linux-amd64.tgz
rm -rf ollama-linux-amd64.tgz
cat <<EOF >/etc/systemd/system/ollama.service
[Unit]
Description=Ollama Service
After=network-online.target
[Service]
Type=exec
ExecStart=/usr/bin/ollama serve
Environment=HOME=$HOME
Environment=OLLAMA_HOST=0.0.0.0
Restart=always
RestartSec=3
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now ollama
sed -i 's/ENABLE_OLLAMA_API=false/ENABLE_OLLAMA_API=true/g' /opt/openwebui/.env
msg_ok "Installed Ollama"
fi
msg_info "Creating Service"
cat <<EOF >/etc/systemd/system/open-webui.service
[Unit]
Description=Open WebUI Service
After=network.target
[Service]
Type=exec
WorkingDirectory=/opt/openwebui
EnvironmentFile=/opt/openwebui/.env
ExecStart=/opt/openwebui/backend/start.sh
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now open-webui
msg_ok "Created Service"
motd_ssh
customize
msg_info "Cleaning up"
$STD apt-get -y autoremove
$STD apt-get -y autoclean
msg_ok "Cleaned"

View File

@ -0,0 +1,57 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2025 tteck
# Author: tteck (tteckster)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://heimdall.site/
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y apt-transport-https
msg_ok "Installed Dependencies"
PHP_VERSION="8.4" PHP_MODULE="bz2,sqlite3" PHP_FPM="YES" setup_php
setup_composer
fetch_and_deploy_gh_release "Heimdall" "linuxserver/Heimdall" "tarball"
msg_info "Setting up Heimdall-Dashboard"
cd /opt/Heimdall
cp .env.example .env
$STD php artisan key:generate
msg_ok "Setup Heimdall-Dashboard"
msg_info "Creating Service"
cat <<EOF >/etc/systemd/system/heimdall.service
[Unit]
Description=Heimdall
After=network.target
[Service]
Restart=always
RestartSec=5
Type=simple
User=root
WorkingDirectory=/opt/Heimdall
ExecStart=/usr/bin/php artisan serve --port 7990 --host 0.0.0.0
TimeoutStopSec=30
[Install]
WantedBy=multi-user.target"
EOF
systemctl enable -q --now heimdall
cd /opt/Heimdall
export COMPOSER_ALLOW_SUPERUSER=1
$STD composer dump-autoload
systemctl restart heimdall.service
msg_ok "Created Service"
motd_ssh
customize
cleanup_lxc

View File

@ -18,16 +18,14 @@ $STD apt install -y \
redis-server \
nginx \
gettext \
openssl \
musl-dev
openssl
msg_ok "Installed Dependencies"
NODE_VERSION=$(curl -s https://raw.githubusercontent.com/homarr-labs/homarr/dev/package.json | jq -r '.engines.node | split(">=")[1] | split(".")[0]')
NODE_VERSION=$(curl -s https://raw.githubusercontent.com/Meierschlumpf/homarr/dev/package.json | jq -r '.engines.node | split(">=")[1] | split(".")[0]')
setup_nodejs
fetch_and_deploy_gh_release "homarr" "homarr-labs/homarr" "prebuild" "latest" "/opt/homarr" "build-amd64.tar.gz"
fetch_and_deploy_gh_release "homarr" "Meierschlumpf/homarr" "prebuild" "latest" "/opt/homarr" "source-debian-amd64.tar.gz"
msg_info "Installing Homarr"
ln -s /usr/lib/x86_64-linux-musl/libc.so /lib/libc.musl-x86_64.so.1
mkdir -p /opt/homarr_db
touch /opt/homarr_db/db.sqlite
SECRET_ENCRYPTION_KEY="$(openssl rand -hex 32)"
@ -47,7 +45,7 @@ msg_ok "Installed Homarr"
msg_info "Copying config files"
mkdir -p /appdata/redis
chown -R redis:redis /appdata/redis
chmod 755 /appdata/redis
chmod 744 /appdata/redis
cp /opt/homarr/redis.conf /etc/redis/redis.conf
rm /etc/nginx/nginx.conf
mkdir -p /etc/nginx/templates
@ -64,6 +62,8 @@ ReadWritePaths=-/appdata/redis -/var/lib/redis -/var/log/redis -/var/run/redis -
EOF
cat <<EOF >/etc/systemd/system/homarr.service
[Unit]
Requires=redis-server.service
After=redis-server.service
Description=Homarr Service
After=network.target
@ -80,6 +80,7 @@ chmod +x /opt/homarr/run.sh
systemctl daemon-reload
systemctl enable -q --now redis-server && sleep 5
systemctl enable -q --now homarr
systemctl disable -q --now nginx
msg_ok "Created Services"
motd_ssh

View File

@ -13,46 +13,47 @@ setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt-get install -y \
pkg-config \
libssl-dev \
libc6-dev \
libpq-dev \
clang \
llvm \
nettle-dev \
build-essential \
make
msg_ok "Installed Dependencies"
#msg_info "Installing Dependencies"
#$STD apt-get install -y \
# pkg-config \
# libssl-dev \
# libc6-dev \
# libpq-dev \
# clang \
# llvm \
# nettle-dev \
# build-essential \
# make
#msg_ok "Installed Dependencies"
setup_rust
NODE_VERSION="22" NODE_MODULE="yarn" setup_nodejs
fetch_and_deploy_gh_release "hoodik" "hudikhq/hoodik" "tarball" "latest" "/opt/hoodik"
#setup_rust
#NODE_VERSION="22" NODE_MODULE="yarn" setup_nodejs
#fetch_and_deploy_gh_release "hoodik" "hudikhq/hoodik" "tarball" "latest" "/opt/hoodik"
fetch_and_deploy_gh_release "hoodik" "hudikhq/hoodik" "prebuild" "latest" "/opt/hoodik" "*x86_64.tar.gz"
msg_info "Installing wasm-pack"
$STD cargo install wasm-pack
msg_ok "Installed wasm-pack"
#msg_info "Installing wasm-pack"
#$STD cargo install wasm-pack
#msg_ok "Installed wasm-pack"
msg_info "Building Hoodik Frontend"
cd /opt/hoodik
$STD yarn install --frozen-lockfile
$STD yarn wasm-pack
$STD yarn web:build
msg_ok "Built Hoodik Frontend"
#msg_info "Building Hoodik Frontend"
#cd /opt/hoodik
#$STD yarn install --frozen-lockfile
#$STD yarn wasm-pack
#$STD yarn web:build
#msg_ok "Built Hoodik Frontend"
msg_info "Building Hoodik Backend"
cd /opt/hoodik
$STD cargo build --release
cp /opt/hoodik/target/release/hoodik /usr/local/bin/hoodik
chmod +x /usr/local/bin/hoodik
msg_ok "Built Hoodik Backend"
#msg_info "Building Hoodik Backend"
#cd /opt/hoodik
#$STD cargo build --release
#cp /opt/hoodik/target/release/hoodik /usr/local/bin/hoodik
#chmod +x /usr/local/bin/hoodik
#msg_ok "Built Hoodik Backend"
msg_info "Cleaning up build artifacts"
rm -rf /opt/hoodik/target
rm -rf /root/.cargo/registry
rm -rf /opt/hoodik/node_modules
msg_ok "Cleaned up build artifacts"
#msg_info "Cleaning up build artifacts"
#rm -rf /opt/hoodik/target
#rm -rf /root/.cargo/registry
#rm -rf /opt/hoodik/node_modules
#msg_ok "Cleaned up build artifacts"
msg_info "Configuring Hoodik"
mkdir -p /opt/hoodik_data
@ -80,7 +81,8 @@ Type=simple
User=root
WorkingDirectory=/opt/hoodik_data
EnvironmentFile=/opt/hoodik/.env
ExecStart=/usr/local/bin/hoodik
#ExecStart=/usr/local/bin/hoodik
ExecStart=/opt/hoodik
Restart=always
RestartSec=5

View File

@ -1,189 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://koel.dev/
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y \
nginx \
ffmpeg \
cron \
locales
msg_ok "Installed Dependencies"
import_local_ip
PG_VERSION="16" setup_postgresql
PG_DB_NAME="koel" PG_DB_USER="koel" setup_postgresql_db
PHP_VERSION="8.4" PHP_FPM="YES" PHP_MODULE="bz2,exif,imagick,pgsql,sqlite3" setup_php
NODE_VERSION="22" NODE_MODULE="pnpm" setup_nodejs
setup_composer
fetch_and_deploy_gh_release "koel" "koel/koel" "prebuild" "latest" "/opt/koel" "koel-*.tar.gz"
msg_info "Configuring Koel"
mkdir -p /opt/koel_media /opt/koel_sync
cd /opt/koel
cat <<EOF >/opt/koel/.env
APP_NAME=Koel
APP_ENV=production
APP_DEBUG=false
APP_URL=http://${LOCAL_IP}
APP_KEY=
TRUSTED_HOSTS=
DB_CONNECTION=pgsql
DB_HOST=127.0.0.1
DB_PORT=5432
DB_DATABASE=${PG_DB_NAME}
DB_USERNAME=${PG_DB_USER}
DB_PASSWORD=${PG_DB_PASS}
STORAGE_DRIVER=local
MEDIA_PATH=/opt/koel_media
ARTIFACTS_PATH=
IGNORE_DOT_FILES=true
APP_MAX_SCAN_TIME=600
MEMORY_LIMIT=
STREAMING_METHOD=php
SCOUT_DRIVER=tntsearch
USE_MUSICBRAINZ=true
MUSICBRAINZ_USER_AGENT=
LASTFM_API_KEY=
LASTFM_API_SECRET=
SPOTIFY_CLIENT_ID=
SPOTIFY_CLIENT_SECRET=
YOUTUBE_API_KEY=
CDN_URL=
TRANSCODE_FLAC=false
FFMPEG_PATH=/usr/bin/ffmpeg
TRANSCODE_BIT_RATE=128
ALLOW_DOWNLOAD=true
BACKUP_ON_DELETE=true
MEDIA_BROWSER_ENABLED=false
PROXY_AUTH_ENABLED=false
SYNC_LOG_LEVEL=error
FORCE_HTTPS=
MAIL_FROM_ADDRESS="noreply@localhost"
MAIL_FROM_NAME="Koel"
MAIL_MAILER=log
MAIL_HOST=null
MAIL_PORT=null
MAIL_USERNAME=null
MAIL_PASSWORD=null
MAIL_ENCRYPTION=null
BROADCAST_CONNECTION=log
CACHE_DRIVER=file
FILESYSTEM_DISK=local
QUEUE_CONNECTION=sync
SESSION_DRIVER=file
SESSION_LIFETIME=120
EOF
mkdir -p /opt/koel/storage/{app/public,framework/{cache/data,sessions,views},logs}
chown -R www-data:www-data /opt/koel /opt/koel_media /opt/koel_sync
chmod -R 775 /opt/koel/storage /opt/koel/bootstrap/cache
msg_ok "Configured Koel"
msg_info "Installing Koel (Patience)"
export COMPOSER_ALLOW_SUPERUSER=1
cd /opt/koel
$STD composer install --no-interaction --no-dev --optimize-autoloader
$STD php artisan key:generate --force
$STD php artisan config:clear
$STD php artisan cache:clear
$STD php artisan koel:init --no-assets --no-interaction
chown -R www-data:www-data /opt/koel
msg_ok "Installed Koel"
msg_info "Tuning PHP-FPM"
PHP_FPM_CONF="/etc/php/8.4/fpm/pool.d/www.conf"
sed -i 's/^pm.max_children = .*/pm.max_children = 15/' "$PHP_FPM_CONF"
sed -i 's/^pm.start_servers = .*/pm.start_servers = 4/' "$PHP_FPM_CONF"
sed -i 's/^pm.min_spare_servers = .*/pm.min_spare_servers = 2/' "$PHP_FPM_CONF"
sed -i 's/^pm.max_spare_servers = .*/pm.max_spare_servers = 8/' "$PHP_FPM_CONF"
$STD systemctl restart php8.4-fpm
msg_ok "Tuned PHP-FPM"
msg_info "Configuring Nginx"
cat <<'EOF' >/etc/nginx/sites-available/koel
server {
listen 80;
server_name _;
root /opt/koel/public;
index index.php;
client_max_body_size 50M;
charset utf-8;
gzip on;
gzip_types text/plain text/css application/x-javascript text/xml application/xml application/xml+rss text/javascript application/json;
gzip_comp_level 9;
send_timeout 3600;
location / {
try_files $uri $uri/ /index.php?$args;
}
location /media/ {
internal;
alias $upstream_http_x_media_root;
}
location ~ \.php$ {
try_files $uri $uri/ /index.php?$args;
fastcgi_pass unix:/run/php/php8.4-fpm.sock;
fastcgi_index index.php;
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_intercept_errors on;
fastcgi_param PATH_INFO $fastcgi_path_info;
fastcgi_param PATH_TRANSLATED $document_root$fastcgi_path_info;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
include fastcgi_params;
}
location ~ /\.(?!well-known).* {
deny all;
}
}
EOF
rm -f /etc/nginx/sites-enabled/default
ln -sf /etc/nginx/sites-available/koel /etc/nginx/sites-enabled/koel
$STD systemctl reload nginx
msg_ok "Configured Nginx"
msg_info "Setting up Cron Job"
cat <<'EOF' >/etc/cron.d/koel
0 * * * * www-data cd /opt/koel && /usr/bin/php artisan koel:scan >/dev/null 2>&1
EOF
chmod 644 /etc/cron.d/koel
msg_ok "Set up Cron Job"
motd_ssh
customize
cleanup_lxc

View File

@ -0,0 +1,76 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (Canbiz)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://linkwarden.app/
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y build-essential
msg_ok "Installed Dependencies"
NODE_VERSION="22" setup_nodejs
PG_VERSION="16" setup_postgresql
PG_DB_NAME="linkwardendb" PG_DB_USER="linkwarden" setup_postgresql_db
RUST_CRATES="monolith" setup_rust
fetch_and_deploy_gh_release "linkwarden" "linkwarden/linkwarden"
import_local_ip
read -r -p "${TAB3}Would you like to add Adminer? <y/N> " prompt
if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then
setup_adminer
fi
msg_info "Installing Linkwarden (Patience)"
export COREPACK_ENABLE_DOWNLOAD_PROMPT=0
export PRISMA_HIDE_UPDATE_MESSAGE=1
export DEBIAN_FRONTEND=noninteractive
corepack enable
SECRET_KEY="$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32)"
cd /opt/linkwarden
$STD yarn workspaces focus linkwarden @linkwarden/web @linkwarden/worker
# $STD npx playwright install-deps
# $STD yarn playwright install
cat <<EOF >/opt/linkwarden/.env
NEXTAUTH_SECRET=${SECRET_KEY}
NEXTAUTH_URL=http://${LOCAL_IP}:3000
DATABASE_URL=postgresql://${PG_DB_USER}:${PG_DB_PASS}@localhost:5432/${PG_DB_NAME}
EOF
$STD yarn prisma:generate
$STD yarn web:build
$STD yarn prisma:deploy
rm -rf ~/.cargo/registry ~/.cargo/git ~/.cargo/.package-cache
rm -rf /root/.cache/yarn
rm -rf /opt/linkwarden/.next/cache
msg_ok "Installed Linkwarden"
msg_info "Creating Service"
cat <<EOF >/etc/systemd/system/linkwarden.service
[Unit]
Description=Linkwarden Service
After=network.target
[Service]
Type=exec
Environment=PATH=$PATH
WorkingDirectory=/opt/linkwarden
ExecStart=/usr/bin/yarn concurrently:start
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now linkwarden
msg_ok "Created Service"
motd_ssh
customize
cleanup_lxc

View File

@ -79,6 +79,7 @@ SESSION_SECRET="${SECRET}"
# OIDC_CLIENT_SECRET=
# OIDC_CALLBACK_URL=
# OIDC_SCOPES=
# OIDC_AUTO_CREATE_USERS=true
# SEARCH_DEEP=
# SEARCH_RIPGREP=

View File

@ -57,7 +57,7 @@ echo "$COOLPASS" >~/.coolpass
msg_ok "Installed Collabora Online"
# OpenCloud
fetch_and_deploy_gh_release "opencloud" "opencloud-eu/opencloud" "singlefile" "v4.0.0" "/usr/bin" "opencloud-*-linux-amd64"
fetch_and_deploy_gh_release "opencloud" "opencloud-eu/opencloud" "singlefile" "v4.1.0" "/usr/bin" "opencloud-*-linux-amd64"
msg_info "Configuring OpenCloud"
DATA_DIR="/var/lib/opencloud/"

104
install/papra-install.sh Normal file
View File

@ -0,0 +1,104 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://github.com/CorentinTh/papra
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y \
git \
build-essential \
tesseract-ocr \
tesseract-ocr-all
msg_ok "Installed Dependencies"
NODE_VERSION="24" setup_nodejs
msg_info "Cloning Papra Repository"
cd /opt
RELEASE=$(curl -s https://api.github.com/repos/papra-hq/papra/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3)}')
$STD git clone --depth=1 --branch ${RELEASE} https://github.com/papra-hq/papra.git
cd papra
msg_ok "Cloned Papra Repository"
msg_info "Setup Papra"
export COREPACK_ENABLE_NETWORK=1
$STD corepack enable
$STD corepack prepare pnpm@10.19.0 --activate
$STD pnpm install --frozen-lockfile --ignore-scripts
$STD pnpm --filter "@papra/app-client..." run build
$STD pnpm --filter "@papra/app-server..." run build
msg_ok "Set up Papra"
msg_info "Configuring Papra"
CONTAINER_IP=$(hostname -I | awk '{print $1}')
BETTER_AUTH_SECRET=$(openssl rand -hex 32)
mkdir -p /opt/papra/app-data/db
mkdir -p /opt/papra/app-data/documents
cat >/opt/papra/.env <<EOF
NODE_ENV=production
SERVER_SERVE_PUBLIC_DIR=true
PORT=1221
# Database Configuration
DATABASE_URL=file:./app-data/db/db.sqlite
# Storage Configuration
DOCUMENT_STORAGE_FILESYSTEM_ROOT=./app-data/documents
PAPRA_CONFIG_DIR=./app-data
# Authentication
BETTER_AUTH_SECRET=${BETTER_AUTH_SECRET}
BETTER_AUTH_TELEMETRY=0
# Application Configuration
CLIENT_BASE_URL=http://${CONTAINER_IP}:1221
# Email Configuration (dry-run mode)
EMAILS_DRY_RUN=true
# Ingestion Folder
INGESTION_FOLDER_ROOT=./ingestion
EOF
mkdir -p /opt/papra/ingestion
chown -R root:root /opt/papra
msg_ok "Configured Papra"
msg_info "Creating Papra Service"
cat >/etc/systemd/system/papra.service <<EOF
[Unit]
Description=Papra Document Management
After=network.target
[Service]
Type=simple
User=root
WorkingDirectory=/opt/papra/apps/papra-server
EnvironmentFile=/opt/papra/.env
ExecStartPre=/usr/bin/pnpm --silent run migration:apply
ExecStart=/usr/bin/pnpm --silent run start
Restart=on-failure
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now papra
msg_ok "Created and Started Papra Service"
motd_ssh
customize
cleanup_lxc

298
install/piler-install.sh Normal file
View File

@ -0,0 +1,298 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://www.mailpiler.org/
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y \
nginx \
openssl \
libtre5 \
catdoc \
poppler-utils \
unrtf \
tnef \
clamav \
clamav-daemon \
memcached \
sysstat \
python3 \
python3-mysqldb
msg_ok "Installed Dependencies"
import_local_ip
setup_mariadb
MARIADB_DB_NAME="piler" MARIADB_DB_USER="piler" setup_mysql_db
PHP_VERSION="8.4" PHP_FPM="YES" PHP_MODULE="ldap,gd,memcached,pdo,mysql,curl,zip" setup_php
msg_info "Installing Manticore Search"
curl -fsSL https://repo.manticoresearch.com/manticore-repo.noarch.deb -o /tmp/manticore-repo.deb
$STD dpkg -i /tmp/manticore-repo.deb
$STD apt update
$STD apt install -y manticore manticore-extra
rm -f /tmp/manticore-repo.deb
msg_ok "Installed Manticore Search"
msg_info "Installing Piler"
VERSION="1.4.8"
cd /tmp
curl -fsSL "https://github.com/jsuto/piler/releases/download/piler-${VERSION}/piler_${VERSION}-bookworm_amd64.deb" -o piler.deb
curl -fsSL "https://github.com/jsuto/piler/releases/download/piler-${VERSION}/piler-webui_${VERSION}-bookworm_amd64.deb" -o piler-webui.deb
$STD dpkg -i piler.deb
$STD apt-get -f install -y
$STD dpkg -i piler-webui.deb
$STD apt-get -f install -y
rm -f piler.deb piler-webui.deb
msg_ok "Installed Piler v${VERSION}"
msg_info "Configuring Piler Database"
cd /usr/local/share/piler
mysql -u root "${MARIADB_DB_NAME}" <db-mysql.sql
msg_ok "Configured Piler Database"
msg_info "Configuring Piler"
PILER_KEY=$(openssl rand -hex 16)
cat <<EOF >/etc/piler/piler.conf
hostid=piler.${LOCAL_IP}.nip.io
update_counters_to_memcached=1
mysql_hostname=localhost
mysql_database=${MARIADB_DB_NAME}
mysql_username=${MARIADB_DB_USER}
mysql_password=${MARIADB_DB_PASS}
mysql_socket=/var/run/mysqld/mysqld.sock
archive_dir=/var/piler/store
data_dir=/var/piler
tmp_dir=/var/piler/tmp
listen_addr=0.0.0.0
listen_port=25
encrypt_messages=1
key=${PILER_KEY}
iv=0123456789ABCDEF
memcached_servers=127.0.0.1
enable_clamav=1
clamd_socket=/var/run/clamav/clamd.ctl
spam_header_line=X-Spam-Status: Yes
verbosity=1
EOF
chown piler:piler /etc/piler/piler.conf
chmod 640 /etc/piler/piler.conf
chown -R piler:piler /var/piler
chmod 750 /var/piler
msg_ok "Configured Piler"
msg_info "Configuring Manticore Search"
cat <<EOF >/etc/manticoresearch/manticore.conf
searchd {
listen = 9306:mysql
listen = 9312
listen = 9308:http
log = /var/log/manticore/searchd.log
query_log = /var/log/manticore/query.log
pid_file = /var/run/manticore/searchd.pid
binlog_path = /var/lib/manticore/data
}
source piler1 {
type = mysql
sql_host = localhost
sql_user = ${MARIADB_DB_USER}
sql_pass = ${MARIADB_DB_PASS}
sql_db = ${MARIADB_DB_NAME}
sql_port = 3306
sql_query = SELECT id, from_addr, to_addr, subject, body, sent FROM metadata
sql_attr_timestamp = sent
}
index piler1 {
source = piler1
path = /var/piler/manticore/piler1
min_word_len = 1
charset_table = 0..9, A..Z->a..z, a..z, U+00E1, U+00E9
}
index tag1 {
type = rt
path = /var/piler/manticore/tag1
rt_field = tag
rt_attr_uint = uid
}
index note1 {
type = rt
path = /var/piler/manticore/note1
rt_field = note
rt_attr_uint = uid
}
EOF
mkdir -p /var/log/manticore
chown -R manticore:manticore /var/log/manticore
chown -R piler:piler /var/piler/manticore
msg_ok "Configured Manticore Search"
msg_info "Creating Piler Service"
cat <<EOF >/etc/systemd/system/piler.service
[Unit]
Description=Piler Email Archiving
After=network.target mysql.service manticore.service
Requires=mysql.service manticore.service
[Service]
Type=forking
User=piler
Group=piler
ExecStart=/usr/local/sbin/pilerd -c /etc/piler/piler.conf
PIDFile=/var/piler/pilerd.pid
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
$STD systemctl daemon-reload
$STD systemctl enable --now manticore
$STD systemctl enable --now memcached
$STD systemctl enable --now clamav-daemon
$STD systemctl enable --now piler
msg_ok "Created Piler Service"
msg_info "Configuring PHP-FPM Pool"
cp /etc/php/8.4/fpm/pool.d/www.conf /etc/php/8.4/fpm/pool.d/piler.conf
sed -i 's/\[www\]/[piler]/' /etc/php/8.4/fpm/pool.d/piler.conf
sed -i 's/^user = www-data/user = piler/' /etc/php/8.4/fpm/pool.d/piler.conf
sed -i 's/^group = www-data/group = piler/' /etc/php/8.4/fpm/pool.d/piler.conf
sed -i 's|^listen = .*|listen = /run/php/php8.4-fpm-piler.sock|' /etc/php/8.4/fpm/pool.d/piler.conf
$STD systemctl restart php8.4-fpm
msg_ok "Configured PHP-FPM Pool"
msg_info "Configuring Piler Web GUI"
cd /var/www/piler
cat <<EOF >/var/www/piler/config-site.php
<?php
\$config['SITE_NAME'] = 'Piler Email Archive';
\$config['SITE_URL'] = 'http://${LOCAL_IP}';
\$config['DB_DRIVER'] = 'mysql';
\$config['DB_HOSTNAME'] = 'localhost';
\$config['DB_DATABASE'] = '${MARIADB_DB_NAME}';
\$config['DB_USERNAME'] = '${MARIADB_DB_USER}';
\$config['DB_PASSWORD'] = '${MARIADB_DB_PASS}';
\$config['SPHINX_DATABASE'] = 'mysql:host=127.0.0.1;port=9306;charset=utf8mb4';
\$config['ENABLE_SAAS'] = 0;
\$config['SESSION_NAME'] = 'piler_session';
\$config['SITE_KEYWORDS'] = 'piler, email archive';
\$config['SITE_DESCRIPTION'] = 'Piler email archiving';
\$config['SMTP_DOMAIN'] = '${LOCAL_IP}';
\$config['SMTP_FROMADDR'] = 'no-reply@${LOCAL_IP}';
\$config['ADMIN_EMAIL'] = 'admin@local';
\$config['ADMIN_PASSWORD'] = '\$1\$PXDhp7Bo\$KlEEURhLLphAEa4w.lj1N0';
\$config['MEMCACHED_ENABLED'] = 1;
\$config['MEMCACHED_PREFIX'] = 'piler';
\$config['MEMCACHED_TTL'] = 3600;
\$config['DIR_BASE'] = '/var/www/piler';
\$config['DIR_ATTACHMENT'] = '/var/piler/store';
\$config['ENCRYPTION_KEY'] = '${PILER_KEY}';
\$config['ENCRYPTION_IV'] = '0123456789ABCDEF';
\$config['DEFAULT_RETENTION_DAYS'] = 2557;
\$config['RESTRICTED_AUDITOR'] = 0;
\$config['ENABLE_LDAP_AUTH'] = 0;
\$config['ENABLE_IMAP_AUTH'] = 0;
\$config['ENABLE_POP3_AUTH'] = 0;
\$config['ENABLE_SSO_AUTH'] = 0;
\$config['HEADER_LINE_TO_HIDE'] = 'X-Envelope-To:';
?>
EOF
chown -R piler:piler /var/www/piler
chmod 755 /var/www/piler
msg_ok "Installed Piler Web GUI"
msg_info "Configuring Nginx"
cat <<EOF >/etc/nginx/sites-available/piler
server {
listen 80;
server_name _;
root /var/www/piler;
index index.php;
access_log /var/log/nginx/piler-access.log;
error_log /var/log/nginx/piler-error.log;
charset utf-8;
location / {
try_files \$uri \$uri/ /index.php?\$args;
}
location ~ \.php$ {
fastcgi_pass unix:/run/php/php8.4-fpm-piler.sock;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME \$document_root\$fastcgi_script_name;
include fastcgi_params;
}
location ~* \.(jpg|jpeg|gif|css|png|js|ico|html|woff|woff2)$ {
access_log off;
expires max;
}
location ~ /\.ht {
deny all;
}
}
EOF
ln -sf /etc/nginx/sites-available/piler /etc/nginx/sites-enabled/piler
rm -f /etc/nginx/sites-enabled/default
$STD nginx -t
$STD systemctl enable --now nginx
msg_ok "Configured Nginx"
msg_info "Setting Up Cron Jobs"
cat <<EOF >/etc/cron.d/piler
30 6 * * * piler /usr/local/libexec/piler/indexer.delta.sh
30 7 * * * piler /usr/local/libexec/piler/indexer.main.sh
*/15 * * * * piler /usr/local/bin/pilerstat
30 2 * * * piler /usr/local/bin/pilerpurge
3 * * * * piler /usr/local/bin/pilerconf
EOF
msg_ok "Set Up Cron Jobs"
motd_ssh
customize
cleanup_lxc

View File

@ -43,13 +43,12 @@ systemctl restart redis-server
msg_ok "Redis configured"
msg_info "Configuring PHP-FPM Pool"
mkdir -p /run/php-fpm
cp /etc/php/8.4/fpm/pool.d/www.conf /etc/php/8.4/fpm/pool.d/pixelfed.conf
sed -i 's/\[www\]/[pixelfed]/' /etc/php/8.4/fpm/pool.d/pixelfed.conf
sed -i 's/^user = www-data/user = pixelfed/' /etc/php/8.4/fpm/pool.d/pixelfed.conf
sed -i 's/^group = www-data/group = pixelfed/' /etc/php/8.4/fpm/pool.d/pixelfed.conf
sed -i 's|^listen = .*|listen = /run/php-fpm/pixelfed.sock|' /etc/php/8.4/fpm/pool.d/pixelfed.conf
sed -i 's/^listen.owner = .*/listen.owner = pixelfed/' /etc/php/8.4/fpm/pool.d/pixelfed.conf
sed -i 's|^listen = .*|listen = /run/php/php8.4-fpm-pixelfed.sock|' /etc/php/8.4/fpm/pool.d/pixelfed.conf
sed -i 's/^listen.owner = .*/listen.owner = www-data/' /etc/php/8.4/fpm/pool.d/pixelfed.conf
sed -i 's/^listen.group = .*/listen.group = www-data/' /etc/php/8.4/fpm/pool.d/pixelfed.conf
systemctl restart php8.4-fpm
msg_ok "PHP-FPM Pool configured"
@ -75,6 +74,7 @@ sed -i "s|REDIS_PORT=.*|REDIS_PORT=6379|" .env
sed -i "s|ACTIVITY_PUB=.*|ACTIVITY_PUB=true|" .env
sed -i "s|AP_REMOTE_FOLLOW=.*|AP_REMOTE_FOLLOW=true|" .env
sed -i "s|OAUTH_ENABLED=.*|OAUTH_ENABLED=true|" .env
echo "SESSION_SECURE_COOKIE=false" >>.env
chown -R pixelfed:pixelfed /opt/pixelfed
chmod -R 755 /opt/pixelfed
@ -122,7 +122,7 @@ server {
location ~ \.php$ {
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_pass unix:/run/php-fpm/pixelfed.sock;
fastcgi_pass unix:/run/php/php8.4-fpm-pixelfed.sock;
fastcgi_index index.php;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;

View File

@ -26,25 +26,39 @@ NODE_VERSION="24" setup_nodejs
fetch_and_deploy_gh_release "postgresus" "RostislavDugin/postgresus" "tarball" "latest" "/opt/postgresus"
msg_info "Building Postgresus (Patience)"
# Build frontend
cd /opt/postgresus/frontend
$STD npm ci
$STD npm run build
# Build backend
cd /opt/postgresus/backend
$STD go mod tidy
$STD go mod download
$STD CGO_ENABLED=0 go build -o /opt/postgresus/postgresus ./cmd/main.go
mkdir -p /opt/postgresus/{data,backups,logs}
cp -r /opt/postgresus/frontend/dist /opt/postgresus/ui
$STD go install github.com/swaggo/swag/cmd/swag@latest
$STD /root/go/bin/swag init -g cmd/main.go -o swagger
$STD env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o postgresus ./cmd/main.go
mv /opt/postgresus/backend/postgresus /opt/postgresus/postgresus
mkdir -p /opt/postgresus_data/{data,backups,logs}
mkdir -p /postgresus-data/temp
mkdir -p /opt/postgresus/ui/build
cp -r /opt/postgresus/frontend/dist/* /opt/postgresus/ui/build/
cp -r /opt/postgresus/backend/migrations /opt/postgresus/
chown -R postgres:postgres /opt/postgresus
chown -R postgres:postgres /opt/postgresus_data
chown -R postgres:postgres /postgresus-data
msg_ok "Built Postgresus"
msg_info "Configuring Postgresus"
ADMIN_PASS=$(openssl rand -base64 12)
JWT_SECRET=$(openssl rand -hex 32)
# Create PostgreSQL version symlinks for compatibility
for v in 12 13 14 15 16 18; do
ln -sf /usr/lib/postgresql/17 /usr/lib/postgresql/$v
done
# Install goose for migrations
$STD go install github.com/pressly/goose/v3/cmd/goose@latest
ln -sf /root/go/bin/goose /usr/local/bin/goose
cat <<EOF >/opt/postgresus/.env
# Environment
ENV_MODE=production
@ -54,8 +68,14 @@ SERVER_PORT=4005
SERVER_HOST=0.0.0.0
# Database (Internal PostgreSQL for app data)
DATABASE_DSN=host=localhost user=${PG_DB_USER} password=${PG_DB_PASS} dbname=${PG_DB_NAME} port=5432 sslmode=disable
DATABASE_URL=postgres://${PG_DB_USER}:${PG_DB_PASS}@localhost:5432/${PG_DB_NAME}?sslmode=disable
# Migrations
GOOSE_DRIVER=postgres
GOOSE_DBSTRING=postgres://${PG_DB_USER}:${PG_DB_PASS}@localhost:5432/${PG_DB_NAME}?sslmode=disable
GOOSE_MIGRATION_DIR=/opt/postgresus/migrations
# Security
JWT_SECRET=${JWT_SECRET}
ENCRYPTION_KEY=$(openssl rand -hex 32)
@ -65,15 +85,16 @@ ADMIN_EMAIL=admin@localhost
ADMIN_PASSWORD=${ADMIN_PASS}
# Paths
DATA_DIR=/opt/postgresus/data
BACKUP_DIR=/opt/postgresus/backups
LOG_DIR=/opt/postgresus/logs
DATA_DIR=/opt/postgresus_data/data
BACKUP_DIR=/opt/postgresus_data/backups
LOG_DIR=/opt/postgresus_data/logs
# PostgreSQL Tools (for creating backups)
PG_DUMP_PATH=/usr/bin/pg_dump
PG_RESTORE_PATH=/usr/bin/pg_restore
PSQL_PATH=/usr/bin/psql
PG_DUMP_PATH=/usr/lib/postgresql/17/bin/pg_dump
PG_RESTORE_PATH=/usr/lib/postgresql/17/bin/pg_restore
PSQL_PATH=/usr/lib/postgresql/17/bin/psql
EOF
chown postgres:postgres /opt/postgresus/.env
chmod 600 /opt/postgresus/.env
msg_ok "Configured Postgresus"
@ -89,6 +110,7 @@ Type=simple
User=postgres
Group=postgres
WorkingDirectory=/opt/postgresus
Environment="PATH=/usr/local/bin:/usr/bin:/bin"
EnvironmentFile=/opt/postgresus/.env
ExecStart=/opt/postgresus/postgresus
Restart=always
@ -99,6 +121,7 @@ StandardError=journal
[Install]
WantedBy=multi-user.target
EOF
$STD systemctl daemon-reload
$STD systemctl enable -q --now postgresus
msg_ok "Created Postgresus Service"

View File

@ -2,9 +2,10 @@
# Copyright (c) 2021-2025 community-scripts ORG
# Author: DevelopmentCats
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Co-author: AlphaLawless
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://romm.app
# Updated: 03/10/2025
# Updated: 25/12/2025
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
@ -18,65 +19,126 @@ msg_info "Installing dependencies"
$STD apt-get install -y \
acl \
build-essential \
gcc \
g++ \
make \
git \
curl \
libssl-dev \
libffi-dev \
libmagic-dev \
python3-dev \
python3-pip \
python3-venv \
libmariadb3 \
libmariadb-dev \
libpq-dev \
libbz2-dev \
libreadline-dev \
libsqlite3-dev \
zlib1g-dev \
liblzma-dev \
libncurses5-dev \
libncursesw5-dev \
redis-server \
redis-tools \
p7zip \
p7zip-full \
tzdata \
jq
msg_ok "Installed core dependencies"
jq \
nginx
msg_ok "Installed dependencies"
PYTHON_VERSION="3.12" setup_uv
NODE_VERSION="22" NODE_MODULE="serve" setup_nodejs
UV_VERSION="0.7.19" PYTHON_VERSION="3.13" setup_uv
NODE_VERSION="22" setup_nodejs
setup_mariadb
MARIADB_DB_NAME="romm" MARIADB_DB_USER="romm" setup_mariadb_db
msg_info "Configuring Database"
DB_NAME=romm
DB_USER=romm
DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)
$STD mariadb -u root -e "CREATE DATABASE IF NOT EXISTS $DB_NAME CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;"
$STD mariadb -u root -e "CREATE USER IF NOT EXISTS '$DB_USER'@'localhost' IDENTIFIED BY '$DB_PASS';"
$STD mariadb -u root -e "GRANT ALL ON $DB_NAME.* TO '$DB_USER'@'localhost'; FLUSH PRIVILEGES;"
{
echo "RomM-Credentials"
echo "RomM Database User: $DB_USER"
echo "RomM Database Password: $DB_PASS"
echo "RomM Database Name: $DB_NAME"
} >~/romm.creds
chmod 600 ~/romm.creds
msg_ok "Configured Database"
msg_info "Creating romm user and directories"
id -u romm &>/dev/null || useradd -r -m -d /var/lib/romm -s /bin/bash romm
msg_info "Creating directories"
mkdir -p /opt/romm \
/var/lib/romm/config \
/var/lib/romm/resources \
/var/lib/romm/assets/{saves,states,screenshots} \
/var/lib/romm/library/roms/{gba,gbc,ps} \
/var/lib/romm/library/bios/{gba,ps}
chown -R romm:romm /opt/romm /var/lib/romm
msg_ok "Created romm user and directories"
/var/lib/romm/library/roms \
/var/lib/romm/library/bios
msg_ok "Created directories"
msg_info "Configuring Database"
DB_NAME=romm
DB_USER=romm
DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)
$STD mariadb -u root -e "CREATE DATABASE $DB_NAME CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;"
$STD mariadb -u root -e "CREATE USER '$DB_USER'@'localhost' IDENTIFIED BY '$DB_PASS';"
$STD mariadb -u root -e "GRANT ALL ON $DB_NAME.* TO '$DB_USER'@'localhost'; FLUSH PRIVILEGES;"
{
echo "RomM-Credentials"
echo "RomM Database User: $DB_USER"
echo "RomM Database Password: $DB_PASS"
echo "RomM Database Name: $DB_NAME"
} >~/romm.creds
msg_ok "Configured Database"
msg_info "Creating configuration file"
cat >/var/lib/romm/config/config.yml <<'CONFIGEOF'
# RomM Configuration File
# Documentation: https://docs.romm.app/latest/Getting-Started/Configuration-File/
# Only uncomment the lines you want to use/modify
# exclude:
# platforms:
# - excluded_folder_a
# roms:
# single_file:
# extensions:
# - xml
# - txt
# names:
# - '._*'
# - '*.nfo'
# multi_file:
# names:
# - downloaded_media
# - media
# system:
# platforms:
# gc: ngc
# ps1: psx
# The folder name where your roms are located (relative to library path)
# filesystem:
# roms_folder: 'roms'
# scan:
# priority:
# metadata:
# - "igdb"
# - "moby"
# - "ss"
# - "ra"
# artwork:
# - "igdb"
# - "moby"
# - "ss"
# region:
# - "us"
# - "eu"
# - "jp"
# language:
# - "en"
# media:
# - box2d
# - box3d
# - screenshot
# - manual
# emulatorjs:
# debug: false
# cache_limit: null
CONFIGEOF
chmod 644 /var/lib/romm/config/config.yml
msg_ok "Created configuration file"
msg_info "Building RAHasher (RetroAchievements)"
RAHASHER_VERSION="1.8.1"
cd /tmp
git clone --recursive --branch "$RAHASHER_VERSION" --depth 1 https://github.com/RetroAchievements/RALibretro.git
cd RALibretro
sed -i '22a #include <ctime>' ./src/Util.h
sed -i '6a #include <unistd.h>' \
./src/libchdr/deps/zlib-1.3.1/gzlib.c \
./src/libchdr/deps/zlib-1.3.1/gzread.c \
./src/libchdr/deps/zlib-1.3.1/gzwrite.c
$STD make HAVE_CHD=1 -f ./Makefile.RAHasher
cp ./bin64/RAHasher /usr/bin/RAHasher
chmod +x /usr/bin/RAHasher
cd /tmp
rm -rf /tmp/RALibretro
msg_ok "Built RAHasher"
fetch_and_deploy_gh_release "romm" "rommapp/romm"
@ -88,13 +150,14 @@ AUTH_SECRET_KEY=$(openssl rand -hex 32)
cat >/opt/romm/.env <<EOF
ROMM_BASE_PATH=/var/lib/romm
ROMM_CONFIG_PATH=/var/lib/romm/config/config.yml
WEB_CONCURRENCY=4
DB_HOST=127.0.0.1
DB_PORT=3306
DB_NAME=$DB_NAME
DB_USER=$DB_USER
DB_PASSWD=$DB_PASS
DB_NAME=$MARIADB_DB_NAME
DB_USER=$MARIADB_DB_USER
DB_PASSWD=$MARIADB_DB_PASS
REDIS_HOST=127.0.0.1
REDIS_PORT=6379
@ -114,29 +177,101 @@ SCHEDULED_UPDATE_SWITCH_TITLEDB_CRON=0 4 * * *
LOGLEVEL=INFO
EOF
chown romm:romm /opt/romm/.env
chmod 600 /opt/romm/.env
msg_ok "Created environment file"
msg_info "Installing backend"
cd /opt/romm
uv pip install --all-extras .
# Limit concurrent downloads to avoid DNS resolution failures in LXC containers
# See: https://github.com/astral-sh/uv/issues/12054
export UV_CONCURRENT_DOWNLOADS=1
$STD uv sync --all-extras
cd /opt/romm/backend
uv run alembic upgrade head
chown -R romm:romm /opt/romm
$STD uv run alembic upgrade head
msg_ok "Installed backend"
msg_info "Installing frontend"
cd /opt/romm/frontend
npm install
npm run build
ln -sfn /var/lib/romm/resources /opt/romm/frontend/assets/romm/resources
ln -sfn /var/lib/romm/assets /opt/romm/frontend/assets/romm/assets
chown -R romm:romm /opt/romm
$STD npm install
$STD npm run build
mkdir -p /opt/romm/frontend/dist/assets/romm
ln -sfn /var/lib/romm/resources /opt/romm/frontend/dist/assets/romm/resources
ln -sfn /var/lib/romm/assets /opt/romm/frontend/dist/assets/romm/assets
msg_ok "Installed frontend"
msg_info "Creating services"
msg_info "Configuring nginx"
cat >/etc/nginx/sites-available/romm <<'EOF'
upstream romm_backend {
server 127.0.0.1:5000;
}
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
listen 80;
server_name _;
root /opt/romm/frontend/dist;
client_max_body_size 0;
# Frontend SPA
location / {
try_files $uri $uri/ /index.html;
}
# EmulatorJS player - requires COOP/COEP headers for SharedArrayBuffer
location ~ ^/rom/.*/ejs$ {
add_header Cross-Origin-Embedder-Policy "require-corp";
add_header Cross-Origin-Opener-Policy "same-origin";
try_files $uri /index.html;
}
# Backend API
location /api {
proxy_pass http://romm_backend;
proxy_buffering off;
proxy_request_buffering off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# WebSocket and Netplay
location ~ ^/(ws|netplay) {
proxy_pass http://romm_backend;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host;
proxy_read_timeout 86400;
}
# OpenAPI docs
location = /openapi.json {
proxy_pass http://romm_backend;
}
# Internal library file serving
location /library/ {
internal;
alias /var/lib/romm/library/;
}
}
EOF
rm -f /etc/nginx/sites-enabled/default
ln -sf /etc/nginx/sites-available/romm /etc/nginx/sites-enabled/romm
$STD nginx -t
systemctl restart nginx
systemctl enable -q nginx
msg_ok "Configured nginx"
msg_info "Creating services"
cat >/etc/systemd/system/romm-backend.service <<EOF
[Unit]
Description=RomM Backend
@ -145,27 +280,12 @@ Requires=mariadb.service redis-server.service
[Service]
Type=simple
User=romm
WorkingDirectory=/opt/romm/backend
EnvironmentFile=/opt/romm/.env
Environment="PYTHONPATH=/opt/romm"
ExecStart=/opt/romm/.venv/bin/uv run gunicorn main:app --workers 4 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:5000
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
cat >/etc/systemd/system/romm-frontend.service <<EOF
[Unit]
Description=RomM Frontend
After=network.target
[Service]
Type=simple
User=romm
WorkingDirectory=/opt/romm/frontend
ExecStart=$(which serve) -s dist -l 8080
ExecStart=/opt/romm/.venv/bin/python main.py
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
@ -173,17 +293,18 @@ EOF
cat >/etc/systemd/system/romm-worker.service <<EOF
[Unit]
Description=RomM Worker
Description=RomM RQ Worker
After=network.target mariadb.service redis-server.service romm-backend.service
Requires=mariadb.service redis-server.service
[Service]
Type=simple
User=romm
WorkingDirectory=/opt/romm/backend
Environment="PYTHONPATH=/opt/romm"
ExecStart=/opt/romm/.venv/bin/uv run python3 worker.py
EnvironmentFile=/opt/romm/.env
Environment="PYTHONPATH=/opt/romm/backend"
ExecStart=/opt/romm/.venv/bin/rq worker --path /opt/romm/backend --url redis://127.0.0.1:6379/0 high default low
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
@ -191,33 +312,48 @@ EOF
cat >/etc/systemd/system/romm-scheduler.service <<EOF
[Unit]
Description=RomM Scheduler
Description=RomM RQ Scheduler
After=network.target mariadb.service redis-server.service romm-backend.service
Requires=mariadb.service redis-server.service
[Service]
Type=simple
User=romm
WorkingDirectory=/opt/romm/backend
Environment="PYTHONPATH=/opt/romm"
ExecStart=/opt/romm/.venv/bin/uv run python3 scheduler.py
EnvironmentFile=/opt/romm/.env
Environment="PYTHONPATH=/opt/romm/backend"
Environment="RQ_REDIS_HOST=127.0.0.1"
Environment="RQ_REDIS_PORT=6379"
ExecStart=/opt/romm/.venv/bin/rqscheduler --path /opt/romm/backend
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now romm-backend romm-frontend romm-worker romm-scheduler
msg_ok "Created services"
cat >/etc/systemd/system/romm-watcher.service <<EOF
[Unit]
Description=RomM Filesystem Watcher
After=network.target romm-backend.service
Requires=romm-backend.service
# Install serve globally
su - ${ROMM_USER} -c "npm install -g serve"
[Service]
Type=simple
WorkingDirectory=/opt/romm/backend
EnvironmentFile=/opt/romm/.env
Environment="PYTHONPATH=/opt/romm/backend"
ExecStart=/opt/romm/.venv/bin/watchfiles --target-type command '/opt/romm/.venv/bin/python watcher.py' /var/lib/romm/library
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable -q --now romm-backend romm-worker romm-scheduler romm-watcher
msg_ok "Created services"
motd_ssh
customize
msg_info "Cleaning up"
$STD apt-get -y autoremove
$STD apt-get -y autoclean
$STD apt-get -y clean
msg_ok "Cleaned up"
cleanup_lxc

View File

@ -0,0 +1,48 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2025 community-scripts ORG
# Author: GoldenSpringness
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://github.com/orhun/rustypaste
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y build-essential
msg_ok "Dependencies Installed Successfully"
RUST_VERSION="1.92.0" setup_rust
fetch_and_deploy_gh_release "rustypaste" "orhun/rustypaste" "tarball"
msg_info "Setting up rustypaste"
cd /opt/rustypaste
sed -i 's|^address = ".*"|address = "0.0.0.0:8000"|' config.toml
$STD cargo build --locked --release
msg_ok "Set up rustypaste"
msg_info "Creating Service"
cat <<EOF >/etc/systemd/system/rustypaste.service
[Unit]
Description=rustypaste Service
After=network.target
[Service]
WorkingDirectory=/opt/rustypaste
ExecStart=/opt/rustypaste/target/release/rustypaste
Restart=always
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now rustypaste
msg_ok "Created Service"
motd_ssh
customize
cleanup_lxc

View File

@ -0,0 +1,57 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2025 community-scripts ORG
# Author: Slaviša Arežina (tremor021)
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://github.com/Sportarr/Sportarr
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
setup_hwaccel
msg_info "Installing Dependencies"
$STD apt install -y \
ffmpeg \
gosu \
sqlite3
msg_ok "Installed Dependencies"
fetch_and_deploy_gh_release "sportarr" "Sportarr/Sportarr" "prebuild" "latest" "/opt/sportarr" "Sportarr-linux-x64-*.tar.gz"
msg_info "Setting up Sportarr"
cat <<EOF >/opt/sportarr/.env
Sportarr__DataPath="/opt/sportarr-data/config"
ASPNETCORE_URLS="http://*:1867"
ASPNETCORE_ENVIRONMENT="Production"
DOTNET_CLI_TELEMETRY_OPTOUT=1
DOTNET_SYSTEM_GLOBALIZATION_INVARIANT=false
LIBVA_DRIVER_NAME=iHD
EOF
msg_ok "Setup Sportarr"
msg_info "Creating Service"
cat <<EOF >/etc/systemd/system/sportarr.service
[Unit]
Description=Sportarr Service
After=network.target
[Service]
EnvironmentFile=/opt/sportarr/.env
WorkingDirectory=/opt/sportarr
ExecStart=/opt/sportarr/Sportarr
Restart=always
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now sportarr
msg_info "Created Service"
motd_ssh
customize
cleanup_lxc

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -72,15 +72,19 @@ stop_all_services() {
local service_patterns=("$@")
for pattern in "${service_patterns[@]}"; do
# Find all matching services
systemctl list-units --type=service --all 2>/dev/null |
grep -oE "${pattern}[^ ]*\.service" |
sort -u |
# Find all matching services (grep || true to handle no matches)
local services
services=$(systemctl list-units --type=service --all 2>/dev/null |
grep -oE "${pattern}[^ ]*\.service" 2>/dev/null | sort -u) || true
if [[ -n "$services" ]]; then
while read -r service; do
$STD systemctl stop "$service" 2>/dev/null || true
$STD systemctl disable "$service" 2>/dev/null || true
done
done <<<"$services"
fi
done
}
# ------------------------------------------------------------------------------
@ -188,6 +192,8 @@ install_packages_with_retry() {
if [[ $retry -le $max_retries ]]; then
msg_warn "Package installation failed, retrying ($retry/$max_retries)..."
sleep 2
# Fix any interrupted dpkg operations before retry
$STD dpkg --configure -a 2>/dev/null || true
$STD apt update 2>/dev/null || true
fi
done
@ -213,6 +219,8 @@ upgrade_packages_with_retry() {
if [[ $retry -le $max_retries ]]; then
msg_warn "Package upgrade failed, retrying ($retry/$max_retries)..."
sleep 2
# Fix any interrupted dpkg operations before retry
$STD dpkg --configure -a 2>/dev/null || true
$STD apt update 2>/dev/null || true
fi
done
@ -1178,6 +1186,12 @@ cleanup_orphaned_sources() {
# This should be called at the start of any setup function
# ------------------------------------------------------------------------------
ensure_apt_working() {
# Fix interrupted dpkg operations first
# This can happen if a previous installation was interrupted (e.g., by script error)
if [[ -f /var/lib/dpkg/lock-frontend ]] || dpkg --audit 2>&1 | grep -q "interrupted"; then
$STD dpkg --configure -a 2>/dev/null || true
fi
# Clean up orphaned sources first
cleanup_orphaned_sources
@ -1208,6 +1222,7 @@ setup_deb822_repo() {
local suite="$4"
local component="${5:-main}"
local architectures="${6-}" # optional
local enabled="${7-}" # optional: "true" or "false"
# Validate required parameters
if [[ -z "$name" || -z "$gpg_url" || -z "$repo_url" || -z "$suite" ]]; then
@ -1235,9 +1250,13 @@ setup_deb822_repo() {
echo "Types: deb"
echo "URIs: $repo_url"
echo "Suites: $suite"
echo "Components: $component"
# Flat repositories (suite="./" or absolute path) must not have Components
if [[ "$suite" != "./" && -n "$component" ]]; then
echo "Components: $component"
fi
[[ -n "$architectures" ]] && echo "Architectures: $architectures"
echo "Signed-By: /etc/apt/keyrings/${name}.gpg"
[[ -n "$enabled" ]] && echo "Enabled: $enabled"
} >/etc/apt/sources.list.d/${name}.sources
$STD apt update
@ -1439,15 +1458,32 @@ check_for_gh_release() {
ensure_dependencies jq
# Fetch releases and exclude drafts/prereleases
local releases_json
releases_json=$(curl -fsSL --max-time 20 \
-H 'Accept: application/vnd.github+json' \
-H 'X-GitHub-Api-Version: 2022-11-28' \
"https://api.github.com/repos/${source}/releases") || {
msg_error "Unable to fetch releases for ${app}"
return 1
}
# Try /latest endpoint for non-pinned versions (most efficient)
local releases_json=""
if [[ -z "$pinned_version_in" ]]; then
releases_json=$(curl -fsSL --max-time 20 \
-H 'Accept: application/vnd.github+json' \
-H 'X-GitHub-Api-Version: 2022-11-28' \
"https://api.github.com/repos/${source}/releases/latest" 2>/dev/null)
if [[ $? -eq 0 ]] && [[ -n "$releases_json" ]]; then
# Wrap single release in array for consistent processing
releases_json="[$releases_json]"
fi
fi
# If no releases yet (pinned version OR /latest failed), fetch up to 100
if [[ -z "$releases_json" ]]; then
# Fetch releases and exclude drafts/prereleases
releases_json=$(curl -fsSL --max-time 20 \
-H 'Accept: application/vnd.github+json' \
-H 'X-GitHub-Api-Version: 2022-11-28' \
"https://api.github.com/repos/${source}/releases?per_page=100") || {
msg_error "Unable to fetch releases for ${app}"
return 1
}
fi
mapfile -t raw_tags < <(jq -r '.[] | select(.draft==false and .prerelease==false) | .tag_name' <<<"$releases_json")
if ((${#raw_tags[@]} == 0)); then
@ -1721,12 +1757,13 @@ function fetch_and_deploy_gh_release() {
### Tarball Mode ###
if [[ "$mode" == "tarball" || "$mode" == "source" ]]; then
url=$(echo "$json" | jq -r '.tarball_url // empty')
[[ -z "$url" ]] && url="https://github.com/$repo/archive/refs/tags/v$version.tar.gz"
# GitHub API's tarball_url/zipball_url can return HTTP 300 Multiple Choices
# when a branch and tag share the same name. Use explicit refs/tags/ URL instead.
local direct_tarball_url="https://github.com/$repo/archive/refs/tags/$tag_name.tar.gz"
filename="${app_lc}-${version}.tar.gz"
curl $download_timeout -fsSL -o "$tmpdir/$filename" "$url" || {
msg_error "Download failed: $url"
curl $download_timeout -fsSL -o "$tmpdir/$filename" "$direct_tarball_url" || {
msg_error "Download failed: $direct_tarball_url"
rm -rf "$tmpdir"
return 1
}
@ -2548,93 +2585,203 @@ function setup_hwaccel() {
fi
# Detect GPU vendor (Intel, AMD, NVIDIA)
local gpu_vendor
gpu_vendor=$(lspci 2>/dev/null | grep -Ei 'vga|3d|display' | grep -Eo 'Intel|AMD|NVIDIA' | head -n1 || echo "")
local gpu_vendor gpu_info
gpu_info=$(lspci 2>/dev/null | grep -Ei 'vga|3d|display' || echo "")
gpu_vendor=$(echo "$gpu_info" | grep -Eo 'Intel|AMD|NVIDIA' | head -n1 || echo "")
# Detect CPU vendor (relevant for AMD APUs)
local cpu_vendor
cpu_vendor=$(lscpu 2>/dev/null | grep -i 'Vendor ID' | awk '{print $3}' || echo "")
if [[ -z "$gpu_vendor" && -z "$cpu_vendor" ]]; then
msg_error "No GPU or CPU vendor detected (missing lspci/lscpu output)"
return 1
msg_warn "No GPU or CPU vendor detected - skipping hardware acceleration setup"
msg_ok "Setup Hardware Acceleration (skipped - no GPU detected)"
return 0
fi
# Detect OS with fallbacks
local os_id os_codename
os_id=$(grep -oP '(?<=^ID=).+' /etc/os-release 2>/dev/null | tr -d '"' || grep '^ID=' /etc/os-release 2>/dev/null | cut -d'=' -f2 | tr -d '"' || echo "debian")
os_codename=$(grep -oP '(?<=^VERSION_CODENAME=).+' /etc/os-release 2>/dev/null | tr -d '"' || grep '^VERSION_CODENAME=' /etc/os-release 2>/dev/null | cut -d'=' -f2 | tr -d '"' || echo "unknown")
local os_id os_codename os_version
os_id=$(grep -oP '(?<=^ID=).+' /etc/os-release 2>/dev/null | tr -d '"' || echo "debian")
os_codename=$(grep -oP '(?<=^VERSION_CODENAME=).+' /etc/os-release 2>/dev/null | tr -d '"' || echo "unknown")
os_version=$(grep -oP '(?<=^VERSION_ID=).+' /etc/os-release 2>/dev/null | tr -d '"' || echo "")
# Validate os_id
if [[ -z "$os_id" ]]; then
os_id="debian"
fi
[[ -z "$os_id" ]] && os_id="debian"
# Determine if we are on a VM or LXC
# Determine if we are in a privileged LXC container
local in_ct="${CTTYPE:-0}"
case "$gpu_vendor" in
Intel)
if [[ "$os_id" == "ubuntu" ]]; then
$STD apt -y install intel-opencl-icd || {
msg_error "Failed to install intel-opencl-icd"
return 1
}
else
# For Debian: fetch Intel GPU drivers from GitHub
fetch_and_deploy_gh_release "" "intel/intel-graphics-compiler" "binary" "latest" "" "intel-igc-core-2_*_amd64.deb" || {
msg_warn "Failed to deploy Intel IGC core 2"
}
fetch_and_deploy_gh_release "" "intel/intel-graphics-compiler" "binary" "latest" "" "intel-igc-opencl-2_*_amd64.deb" || {
msg_warn "Failed to deploy Intel IGC OpenCL 2"
}
fetch_and_deploy_gh_release "" "intel/compute-runtime" "binary" "latest" "" "libigdgmm12_*_amd64.deb" || {
msg_warn "Failed to deploy Intel GDGMM12"
}
fetch_and_deploy_gh_release "" "intel/compute-runtime" "binary" "latest" "" "intel-opencl-icd_*_amd64.deb" || {
msg_warn "Failed to deploy Intel OpenCL ICD"
}
# Detect Intel GPU generation for driver selection
# Gen 9+ (Skylake and newer) benefit from non-free drivers
local intel_gen=""
local needs_nonfree=false
# Check for specific Intel GPU models that need non-free drivers
if echo "$gpu_info" | grep -Ei 'HD Graphics [56][0-9]{2}|UHD Graphics|Iris|Arc|DG[12]' &>/dev/null; then
needs_nonfree=true
intel_gen="gen9+"
fi
$STD apt -y install va-driver-all ocl-icd-libopencl1 vainfo intel-gpu-tools || {
msg_error "Failed to install Intel GPU dependencies"
return 1
}
if [[ "$os_id" == "ubuntu" ]]; then
# Ubuntu: Use packages from Ubuntu repos
$STD apt -y install \
va-driver-all \
ocl-icd-libopencl1 \
intel-opencl-icd \
vainfo \
libmfx-gen1.2 \
intel-gpu-tools || {
msg_error "Failed to install Intel GPU dependencies"
return 1
}
# Try to install intel-media-va-driver for newer GPUs
$STD apt -y install intel-media-va-driver 2>/dev/null || true
elif [[ "$os_id" == "debian" ]]; then
# Debian: Check version and install appropriate drivers
if [[ "$needs_nonfree" == true ]]; then
# Add non-free repo for intel-media-va-driver-non-free
if [[ "$os_codename" == "bookworm" ]]; then
# Debian 12 Bookworm
if [[ ! -f /etc/apt/sources.list.d/non-free.list && ! -f /etc/apt/sources.list.d/non-free.sources ]]; then
cat <<EOF >/etc/apt/sources.list.d/non-free.sources
Types: deb
URIs: http://deb.debian.org/debian
Suites: bookworm bookworm-updates
Components: non-free non-free-firmware
EOF
$STD apt update
fi
$STD apt -y install \
intel-media-va-driver-non-free \
ocl-icd-libopencl1 \
intel-opencl-icd \
vainfo \
libmfx-gen1.2 \
intel-gpu-tools || {
msg_warn "Non-free driver install failed, falling back to open drivers"
needs_nonfree=false
}
elif [[ "$os_codename" == "trixie" || "$os_codename" == "sid" ]]; then
# Debian 13 Trixie / Sid
if [[ ! -f /etc/apt/sources.list.d/non-free.sources ]]; then
cat <<'EOF' >/etc/apt/sources.list.d/non-free.sources
Types: deb
URIs: http://deb.debian.org/debian
Suites: trixie trixie-updates
Components: non-free non-free-firmware
Types: deb
URIs: http://deb.debian.org/debian-security
Suites: trixie-security
Components: non-free non-free-firmware
EOF
$STD apt update
fi
$STD apt -y install \
intel-media-va-driver-non-free \
ocl-icd-libopencl1 \
mesa-opencl-icd \
mesa-va-drivers \
libvpl2 \
vainfo \
libmfx-gen1.2 \
intel-gpu-tools 2>/dev/null || {
msg_warn "Non-free driver install failed, falling back to open drivers"
needs_nonfree=false
}
fi
fi
# Fallback to open drivers or older Intel GPUs
if [[ "$needs_nonfree" == false ]]; then
# Fetch latest Intel drivers from GitHub for Debian
fetch_and_deploy_gh_release "" "intel/intel-graphics-compiler" "binary" "latest" "" "intel-igc-core-2_*_amd64.deb" || {
msg_warn "Failed to deploy Intel IGC core 2"
}
fetch_and_deploy_gh_release "" "intel/intel-graphics-compiler" "binary" "latest" "" "intel-igc-opencl-2_*_amd64.deb" || {
msg_warn "Failed to deploy Intel IGC OpenCL 2"
}
fetch_and_deploy_gh_release "" "intel/compute-runtime" "binary" "latest" "" "libigdgmm12_*_amd64.deb" || {
msg_warn "Failed to deploy Intel GDGMM12"
}
fetch_and_deploy_gh_release "" "intel/compute-runtime" "binary" "latest" "" "intel-opencl-icd_*_amd64.deb" || {
msg_warn "Failed to deploy Intel OpenCL ICD"
}
$STD apt -y install \
va-driver-all \
ocl-icd-libopencl1 \
mesa-opencl-icd \
mesa-va-drivers \
vainfo \
intel-gpu-tools || {
msg_error "Failed to install Intel GPU dependencies"
return 1
}
fi
fi
;;
AMD)
$STD apt -y install mesa-va-drivers mesa-vdpau-drivers mesa-opencl-icd vainfo clinfo || {
$STD apt -y install \
mesa-va-drivers \
mesa-vdpau-drivers \
mesa-opencl-icd \
ocl-icd-libopencl1 \
vainfo \
clinfo 2>/dev/null || {
msg_error "Failed to install AMD GPU dependencies"
return 1
}
# For AMD CPUs without discrete GPU (APUs)
if [[ "$cpu_vendor" == "AuthenticAMD" && -n "$gpu_vendor" ]]; then
$STD apt -y install libdrm-amdgpu1 firmware-amd-graphics || true
# AMD firmware for better GPU support
if [[ "$os_id" == "debian" ]]; then
$STD apt -y install firmware-amd-graphics 2>/dev/null || true
fi
$STD apt -y install libdrm-amdgpu1 2>/dev/null || true
;;
NVIDIA)
# NVIDIA needs manual driver setup - skip for now
msg_info "NVIDIA GPU detected - manual driver setup required"
# NVIDIA needs manual driver setup or passthrough from host
msg_warn "NVIDIA GPU detected - driver must be installed manually or passed through from host"
# Install basic VA-API support for potential hybrid setups
$STD apt -y install va-driver-all vainfo 2>/dev/null || true
;;
*)
# If no discrete GPU, but AMD CPU (e.g., Ryzen APU)
# No discrete GPU detected - check for AMD APU
if [[ "$cpu_vendor" == "AuthenticAMD" ]]; then
$STD apt -y install mesa-opencl-icd ocl-icd-libopencl1 clinfo || {
msg_error "Failed to install Mesa OpenCL stack"
return 1
}
$STD apt -y install \
mesa-va-drivers \
mesa-vdpau-drivers \
mesa-opencl-icd \
ocl-icd-libopencl1 \
vainfo 2>/dev/null || true
else
msg_warn "No supported GPU vendor detected - skipping GPU acceleration"
msg_warn "No supported GPU vendor detected - skipping GPU driver installation"
fi
;;
esac
if [[ -d /dev/dri ]]; then
# Set permissions for /dev/dri (only in privileged containers and if /dev/dri exists)
if [[ "$in_ct" == "0" && -d /dev/dri ]]; then
chgrp video /dev/dri 2>/dev/null || true
chmod 755 /dev/dri 2>/dev/null || true
chmod 660 /dev/dri/* 2>/dev/null || true
$STD adduser "$(id -u -n)" video
$STD adduser "$(id -u -n)" render
$STD adduser "$(id -u -n)" video 2>/dev/null || true
$STD adduser "$(id -u -n)" render 2>/dev/null || true
# Sync GID for video/render groups between host and container
local host_video_gid host_render_gid
host_video_gid=$(getent group video | cut -d: -f3)
host_render_gid=$(getent group render | cut -d: -f3)
if [[ -n "$host_video_gid" && -n "$host_render_gid" ]]; then
sed -i "s/^video:x:[0-9]*:/video:x:$host_video_gid:/" /etc/group 2>/dev/null || true
sed -i "s/^render:x:[0-9]*:/render:x:$host_render_gid:/" /etc/group 2>/dev/null || true
fi
fi
cache_installed_version "hwaccel" "1.0"
@ -2780,12 +2927,19 @@ function setup_java() {
INSTALLED_VERSION=$(dpkg -l 2>/dev/null | awk '/temurin-.*-jdk/{print $2}' | grep -oP 'temurin-\K[0-9]+' | head -n1 || echo "")
fi
# Validate INSTALLED_VERSION is not empty if matched
# Validate INSTALLED_VERSION is not empty if JDK package found
local JDK_COUNT=0
JDK_COUNT=$(dpkg -l 2>/dev/null | grep -c "temurin-.*-jdk" || true)
if [[ -z "$INSTALLED_VERSION" && "${JDK_COUNT:-0}" -gt 0 ]]; then
msg_warn "Found Temurin JDK but cannot determine version"
INSTALLED_VERSION="0"
msg_warn "Found Temurin JDK but cannot determine version - attempting reinstall"
# Try to get actual package name for purge
local OLD_PACKAGE
OLD_PACKAGE=$(dpkg -l 2>/dev/null | awk '/temurin-.*-jdk/{print $2}' | head -n1 || echo "")
if [[ -n "$OLD_PACKAGE" ]]; then
msg_info "Removing existing package: $OLD_PACKAGE"
$STD apt purge -y "$OLD_PACKAGE" || true
fi
INSTALLED_VERSION="" # Reset to trigger fresh install
fi
# Scenario 1: Already at correct version
@ -3234,7 +3388,6 @@ function setup_mongodb() {
return 1
}
# Verify MongoDB was installed correctly
if ! command -v mongod >/dev/null 2>&1; then
msg_error "MongoDB binary not found after installation"
return 1
@ -3410,12 +3563,12 @@ EOF
# - Optionally installs or updates global npm modules
#
# Variables:
# NODE_VERSION - Node.js version to install (default: 22)
# NODE_VERSION - Node.js version to install (default: 24 LTS)
# NODE_MODULE - Comma-separated list of global modules (e.g. "yarn,@vue/cli@5.0.0")
# ------------------------------------------------------------------------------
function setup_nodejs() {
local NODE_VERSION="${NODE_VERSION:-22}"
local NODE_VERSION="${NODE_VERSION:-24}"
local NODE_MODULE="${NODE_MODULE:-}"
# ALWAYS clean up legacy installations first (nvm, etc.) to prevent conflicts
@ -3477,14 +3630,11 @@ function setup_nodejs() {
return 1
}
# CRITICAL: Force APT cache refresh AFTER repository setup
# This ensures NodeSource is the only nodejs source in APT cache
# Force APT cache refresh after repository setup
$STD apt update
# Install dependencies (NodeSource is now the only nodejs source)
ensure_dependencies curl ca-certificates gnupg
# Install Node.js from NodeSource
install_packages_with_retry "nodejs" || {
msg_error "Failed to install Node.js ${NODE_VERSION} from NodeSource"
return 1
@ -3635,7 +3785,7 @@ function setup_php() {
local CURRENT_PHP=""
CURRENT_PHP=$(is_tool_installed "php" 2>/dev/null) || true
# CRITICAL: If wrong version is installed, remove it FIRST before any pinning
# Remove conflicting PHP version before pinning
if [[ -n "$CURRENT_PHP" && "$CURRENT_PHP" != "$PHP_VERSION" ]]; then
msg_info "Removing conflicting PHP ${CURRENT_PHP} (need ${PHP_VERSION})"
stop_all_services "php.*-fpm"
@ -3782,7 +3932,6 @@ EOF
local INSTALLED_VERSION=$(php -v 2>/dev/null | awk '/^PHP/{print $2}' | cut -d. -f1,2)
# Critical: if major.minor doesn't match, fail and cleanup
if [[ "$INSTALLED_VERSION" != "$PHP_VERSION" ]]; then
msg_error "PHP version mismatch: requested ${PHP_VERSION} but got ${INSTALLED_VERSION}"
msg_error "This indicates a critical package installation issue"
@ -3862,11 +4011,14 @@ function setup_postgresql() {
local SUITE
case "$DISTRO_CODENAME" in
trixie | forky | sid)
if verify_repo_available "https://apt.postgresql.org/pub/repos/apt" "trixie-pgdg"; then
SUITE="trixie-pgdg"
else
SUITE="bookworm-pgdg"
fi
;;
*)
SUITE=$(get_fallback_suite "$DISTRO_ID" "$DISTRO_CODENAME" "https://apt.postgresql.org/pub/repos/apt")
@ -4387,7 +4539,7 @@ function setup_rust() {
# Get currently installed version
local CURRENT_VERSION=""
if command -v rustc &>/dev/null; then
CURRENT_VERSION=$(rustc --version 2>/dev/null | awk '{print $2}')
CURRENT_VERSION=$(rustc --version 2>/dev/null | awk '{print $2}' 2>/dev/null) || true
fi
# Scenario 1: Rustup not installed - fresh install
@ -4406,7 +4558,8 @@ function setup_rust() {
return 1
fi
local RUST_VERSION=$(rustc --version 2>/dev/null | awk '{print $2}')
local RUST_VERSION
RUST_VERSION=$(rustc --version 2>/dev/null | awk '{print $2}' 2>/dev/null) || true
if [[ -z "$RUST_VERSION" ]]; then
msg_error "Failed to determine Rust version"
return 1
@ -4437,7 +4590,8 @@ function setup_rust() {
# Ensure PATH is updated for current shell session
export PATH="$CARGO_BIN:$PATH"
local RUST_VERSION=$(rustc --version 2>/dev/null | awk '{print $2}')
local RUST_VERSION
RUST_VERSION=$(rustc --version 2>/dev/null | awk '{print $2}' 2>/dev/null) || true
if [[ -z "$RUST_VERSION" ]]; then
msg_error "Failed to determine Rust version after update"
return 1
@ -4524,6 +4678,8 @@ function setup_uv() {
local UVX_BIN="/usr/local/bin/uvx"
local TMP_DIR=$(mktemp -d)
local CACHED_VERSION
local TARGET_VERSION=""
local USE_PINNED_VERSION=false
# trap for TMP Cleanup
trap "rm -rf '$TMP_DIR'" EXIT
@ -4559,22 +4715,27 @@ function setup_uv() {
ensure_dependencies jq
# Fetch latest version
local releases_json
releases_json=$(curl -fsSL --max-time 15 \
"https://api.github.com/repos/astral-sh/uv/releases/latest" 2>/dev/null || echo "")
# Check if specific version is requested via UV_VERSION environment variable
if [[ -n "${UV_VERSION:-}" ]]; then
TARGET_VERSION="${UV_VERSION}"
USE_PINNED_VERSION=true
else
# Fetch latest version from GitHub API
local releases_json
releases_json=$(curl -fsSL --max-time 15 \
"https://api.github.com/repos/astral-sh/uv/releases/latest" 2>/dev/null || echo "")
if [[ -z "$releases_json" ]]; then
msg_error "Could not fetch latest uv version from GitHub API"
return 1
fi
if [[ -z "$releases_json" ]]; then
msg_error "Could not fetch latest uv version from GitHub API"
return 1
fi
local LATEST_VERSION
LATEST_VERSION=$(echo "$releases_json" | jq -r '.tag_name' 2>/dev/null | sed 's/^v//')
TARGET_VERSION=$(echo "$releases_json" | jq -r '.tag_name' 2>/dev/null | sed 's/^v//')
if [[ -z "$LATEST_VERSION" ]]; then
msg_error "Could not parse uv version from GitHub API response"
return 1
if [[ -z "$TARGET_VERSION" ]]; then
msg_error "Could not parse uv version from GitHub API response"
return 1
fi
fi
# Get currently installed version
@ -4583,9 +4744,9 @@ function setup_uv() {
INSTALLED_VERSION=$("$UV_BIN" --version 2>/dev/null | awk '{print $2}')
fi
# Scenario 1: Already at latest version
if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" == "$LATEST_VERSION" ]]; then
cache_installed_version "uv" "$LATEST_VERSION"
# Scenario 1: Already at target version
if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" == "$TARGET_VERSION" ]]; then
cache_installed_version "uv" "$TARGET_VERSION"
# Check if uvx is needed and missing
if [[ "${USE_UVX:-NO}" == "YES" ]] && [[ ! -x "$UVX_BIN" ]]; then
@ -4597,14 +4758,22 @@ function setup_uv() {
return 0
fi
# Scenario 2: New install or upgrade
if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" != "$LATEST_VERSION" ]]; then
msg_info "Upgrade uv from $INSTALLED_VERSION to $LATEST_VERSION"
# Scenario 2: New install or upgrade/downgrade
if [[ -n "$INSTALLED_VERSION" ]]; then
if [[ "$USE_PINNED_VERSION" == true ]]; then
msg_info "Switching uv from $INSTALLED_VERSION to pinned version $TARGET_VERSION"
else
msg_info "Upgrade uv from $INSTALLED_VERSION to $TARGET_VERSION"
fi
else
msg_info "Setup uv $LATEST_VERSION"
if [[ "$USE_PINNED_VERSION" == true ]]; then
msg_info "Setup uv $TARGET_VERSION (pinned)"
else
msg_info "Setup uv $TARGET_VERSION"
fi
fi
local UV_URL="https://github.com/astral-sh/uv/releases/download/${LATEST_VERSION}/${UV_TAR}"
local UV_URL="https://github.com/astral-sh/uv/releases/download/${TARGET_VERSION}/${UV_TAR}"
$STD curl -fsSL "$UV_URL" -o "$TMP_DIR/uv.tar.gz" || {
msg_error "Failed to download uv from $UV_URL"
@ -4647,6 +4816,7 @@ function setup_uv() {
if [[ -d /usr/share/zsh/site-functions ]]; then
$STD uv generate-shell-completion zsh >/usr/share/zsh/site-functions/_uv 2>/dev/null || true
fi
# Optional: Install specific Python version if requested
if [[ -n "${PYTHON_VERSION:-}" ]]; then
msg_info "Installing Python $PYTHON_VERSION via uv"
@ -4657,8 +4827,8 @@ function setup_uv() {
msg_ok "Python $PYTHON_VERSION installed"
fi
cache_installed_version "uv" "$LATEST_VERSION"
msg_ok "Setup uv $LATEST_VERSION"
cache_installed_version "uv" "$TARGET_VERSION"
msg_ok "Setup uv $TARGET_VERSION"
}
# Helper function to install uvx wrapper
@ -4805,6 +4975,7 @@ function setup_docker() {
# Cleanup old repository configurations
if [ -f /etc/apt/sources.list.d/docker.list ]; then
msg_info "Migrating from old Docker repository format"
rm -f /etc/apt/sources.list.d/docker.list
rm -f /etc/apt/keyrings/docker.asc
fi
@ -4818,7 +4989,6 @@ function setup_docker() {
"$(get_os_info codename)" \
"stable" \
"$(dpkg --print-architecture)"
msg_ok "Set up Docker Repository"
# Install or upgrade Docker
if [ "$docker_installed" = true ]; then
@ -4826,8 +4996,7 @@ function setup_docker() {
DOCKER_LATEST_VERSION=$(apt-cache policy docker-ce | grep Candidate | awk '{print $2}' | cut -d':' -f2 | cut -d'-' -f1)
if [ "$DOCKER_CURRENT_VERSION" != "$DOCKER_LATEST_VERSION" ]; then
msg_ok "Docker update available ($DOCKER_CURRENT_VERSION → $DOCKER_LATEST_VERSION)"
msg_info "Updating Docker"
msg_info "Updating Docker $DOCKER_CURRENT_VERSION → $DOCKER_LATEST_VERSION"
$STD apt install -y --only-upgrade \
docker-ce \
docker-ce-cli \
@ -4873,8 +5042,7 @@ EOF
PORTAINER_LATEST=$(curl -fsSL https://registry.hub.docker.com/v2/repositories/portainer/portainer-ce/tags?page_size=100 | grep -oP '"name":"\K[0-9]+\.[0-9]+\.[0-9]+"' | head -1 | tr -d '"')
if [ "$PORTAINER_CURRENT" != "$PORTAINER_LATEST" ]; then
msg_ok "Portainer update available ($PORTAINER_CURRENT → $PORTAINER_LATEST)"
read -r -p "${TAB3}Update Portainer? <y/N> " prompt
read -r -p "${TAB3}Update Portainer $PORTAINER_CURRENT → $PORTAINER_LATEST? <y/N> " prompt
if [[ ${prompt,,} =~ ^(y|yes)$ ]]; then
msg_info "Updating Portainer"
docker stop portainer
@ -4889,8 +5057,6 @@ EOF
-v portainer_data:/data \
portainer/portainer-ce:latest
msg_ok "Updated Portainer to $PORTAINER_LATEST"
else
msg_ok "Skipped Portainer update"
fi
else
msg_ok "Portainer is up-to-date ($PORTAINER_CURRENT)"
@ -4938,7 +5104,6 @@ EOF
done < <(docker ps --format '{{.Names}} {{.Image}}')
if [ ${#containers_with_updates[@]} -gt 0 ]; then
msg_ok "Found ${#containers_with_updates[@]} container(s) with updates"
echo ""
echo "${TAB3}Container updates available:"
for info in "${container_info[@]}"; do
@ -4967,8 +5132,6 @@ EOF
msg_ok "Stopped and removed $container (please recreate with updated image)"
fi
done
else
msg_ok "Skipped container updates"
fi
else
msg_ok "All containers are up-to-date"

View File

@ -1,249 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2025 community-scripts ORG
# Author: CrazyWolf13
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/eko/pihole-exporter/
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/core.func)
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/tools.func)
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/error_handler.func)
load_functions
# Enable error handling
set -Eeuo pipefail
trap 'error_handler' ERR
# ==============================================================================
# CONFIGURATION
# ==============================================================================
VERBOSE=${var_verbose:-no}
APP="pihole-exporter"
APP_TYPE="tools"
INSTALL_PATH="/opt/pihole-exporter"
CONFIG_PATH="/opt/pihole-exporter.env"
header_info
ensure_usr_local_bin_persist
get_current_ip &>/dev/null
# ==============================================================================
# OS DETECTION
# ==============================================================================
if [[ -f "/etc/alpine-release" ]]; then
OS="Alpine"
SERVICE_PATH="/etc/init.d/pihole-exporter"
elif grep -qE 'ID=debian|ID=ubuntu' /etc/os-release; then
OS="Debian"
SERVICE_PATH="/etc/systemd/system/pihole-exporter.service"
else
echo -e "${CROSS} Unsupported OS detected. Exiting."
exit 1
fi
# ==============================================================================
# UNINSTALL
# ==============================================================================
function uninstall() {
msg_info "Uninstalling Pihole-Exporter"
if [[ "$OS" == "Alpine" ]]; then
rc-service pihole-exporter stop &>/dev/null
rc-update del pihole-exporter &>/dev/null
rm -f "$SERVICE_PATH"
else
systemctl disable -q --now pihole-exporter
rm -f "$SERVICE_PATH"
fi
rm -rf "$INSTALL_PATH" "$CONFIG_PATH"
rm -f "/usr/local/bin/update_pihole-exporter"
rm -f "$HOME/.pihole-exporter"
msg_ok "Pihole-Exporter has been uninstalled"
}
# ==============================================================================
# UPDATE
# ==============================================================================
function update() {
if check_for_gh_release "pihole-exporter" "eko/pihole-exporter"; then
msg_info "Stopping service"
if [[ "$OS" == "Alpine" ]]; then
rc-service pihole-exporter stop &>/dev/null
else
systemctl stop pihole-exporter
fi
msg_ok "Stopped service"
fetch_and_deploy_gh_release "pihole-exporter" "eko/pihole-exporter" "tarball" "latest"
setup_go
msg_info "Building Pihole-Exporter"
cd /opt/pihole-exporter/
$STD /usr/local/bin/go build -o ./pihole-exporter
msg_ok "Built Pihole-Exporter"
msg_info "Starting service"
if [[ "$OS" == "Alpine" ]]; then
rc-service pihole-exporter start
else
systemctl start pihole-exporter
fi
msg_ok "Started service"
msg_ok "Updated successfully"
exit
fi
}
# ==============================================================================
# INSTALL
# ==============================================================================
function install() {
read -erp "Enter the protocol to use (http/https), default https: " pihole_PROTOCOL
read -erp "Enter the hostname of Pihole, example: (127.0.0.1): " pihole_HOSTNAME
read -erp "Enter the port of Pihole, default 443: " pihole_PORT
read -rsp "Enter Pihole password: " pihole_PASSWORD
printf "\n"
read -erp "Do you want to skip TLS-Verification (if using a self-signed Certificate on Pi-Hole) [y/N]: " SKIP_TLS
if [[ "${SKIP_TLS,,}" =~ ^(y|yes)$ ]]; then
pihole_SKIP_TLS="true"
fi
fetch_and_deploy_gh_release "pihole-exporter" "eko/pihole-exporter" "tarball" "latest"
setup_go
msg_info "Building Pihole-Exporter on ${OS}"
cd /opt/pihole-exporter/
$STD /usr/local/bin/go build -o ./pihole-exporter
msg_ok "Built Pihole-Exporter"
msg_info "Creating configuration"
cat <<EOF >"$CONFIG_PATH"
# https://github.com/eko/pihole-exporter/?tab=readme-ov-file#available-cli-options
PIHOLE_PASSWORD="${pihole_PASSWORD}"
PIHOLE_HOSTNAME="${pihole_HOSTNAME}"
PIHOLE_PORT="${pihole_PORT:-443}"
SKIP_TLS_VERIFICATION="${pihole_SKIP_TLS:-false}"
PIHOLE_PROTOCOL="${pihole_PROTOCOL:-https}"
EOF
msg_ok "Created configuration"
msg_info "Creating service"
if [[ "$OS" == "Debian" ]]; then
cat <<EOF >"$SERVICE_PATH"
[Unit]
Description=pihole-exporter
After=network.target
[Service]
User=root
WorkingDirectory=/opt/pihole-exporter
EnvironmentFile=$CONFIG_PATH
ExecStart=/opt/pihole-exporter/pihole-exporter
Restart=always
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable -q --now pihole-exporter
else
cat <<EOF >"$SERVICE_PATH"
#!/sbin/openrc-run
name="pihole-exporter"
description="Pi-hole Exporter for Prometheus"
command="${INSTALL_PATH}/pihole-exporter"
command_background=true
directory="/opt/pihole-exporter"
pidfile="/run/\${RC_SVCNAME}.pid"
output_log="/var/log/pihole-exporter.log"
error_log="/var/log/pihole-exporter.log"
depend() {
need net
after firewall
}
start_pre() {
if [ -f "$CONFIG_PATH" ]; then
export \$(grep -v '^#' $CONFIG_PATH | xargs)
fi
}
EOF
chmod +x "$SERVICE_PATH"
rc-update add pihole-exporter default
rc-service pihole-exporter start
fi
msg_ok "Created and started service"
# Create update script
msg_info "Creating update script"
cat <<'UPDATEEOF' >/usr/local/bin/update_pihole-exporter
#!/usr/bin/env bash
# pihole-exporter Update Script
type=update bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/tools/addon/pihole-exporter.sh)"
UPDATEEOF
chmod +x /usr/local/bin/update_pihole-exporter
msg_ok "Created update script (/usr/local/bin/update_pihole-exporter)"
echo ""
msg_ok "Pihole-Exporter installed successfully"
msg_ok "Metrics: ${BL}http://${CURRENT_IP}:${DEFAULT_PORT}/metrics${CL}"
msg_ok "Config: ${BL}${CONFIG_PATH}${CL}"
}
# ==============================================================================
# MAIN
# ==============================================================================
header_info
ensure_usr_local_bin_persist
get_current_ip &>/dev/null
# Handle type=update (called from update script)
if [[ "${type:-}" == "update" ]]; then
if [[ -d "$INSTALL_PATH" && -f "$INSTALL_PATH/pihole-exporter" ]]; then
update
else
msg_error "Pihole-Exporter is not installed. Nothing to update."
exit 1
fi
exit 0
fi
# Check if already installed
if [[ -d "$INSTALL_PATH" && -f "$INSTALL_PATH/pihole-exporter" ]]; then
msg_warn "Pihole-Exporter is already installed."
echo ""
echo -n "${TAB}Uninstall Pihole-Exporter? (y/N): "
read -r uninstall_prompt
if [[ "${uninstall_prompt,,}" =~ ^(y|yes)$ ]]; then
uninstall
exit 0
fi
echo -n "${TAB}Update Pihole-Exporter? (y/N): "
read -r update_prompt
if [[ "${update_prompt,,}" =~ ^(y|yes)$ ]]; then
update
exit 0
fi
msg_warn "No action selected. Exiting."
exit 0
fi
# Fresh installation
msg_warn "Pihole-Exporter is not installed."
echo ""
echo -e "${TAB}${INFO} This will install:"
echo -e "${TAB} - Pi-hole Exporter (Go binary)"
echo -e "${TAB} - Systemd/OpenRC service"
echo ""
echo -n "${TAB}Install Pihole-Exporter? (y/N): "
read -r install_prompt
if [[ "${install_prompt,,}" =~ ^(y|yes)$ ]]; then
install
else
msg_warn "Installation cancelled. Exiting."
exit 0
fi

View File

@ -1,149 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2025 community-scripts ORG
# Author: CrazyWolf13
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/martabal/qbittorrent-exporter
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/core.func)
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/tools.func)
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
VERBOSE=${var_verbose:-no}
APP="qbittorrent-exporter"
APP_TYPE="tools"
INSTALL_PATH="/opt/qbittorrent-exporter/src/qbittorrent-exporter"
CONFIG_PATH="/opt/qbittorrent-exporter.env"
header_info
ensure_usr_local_bin_persist
get_current_ip &>/dev/null
# OS Detection
if [[ -f "/etc/alpine-release" ]]; then
OS="Alpine"
SERVICE_PATH="/etc/init.d/qbittorrent-exporter"
elif grep -qE 'ID=debian|ID=ubuntu' /etc/os-release; then
OS="Debian"
SERVICE_PATH="/etc/systemd/system/qbittorrent-exporter.service"
else
echo -e "${CROSS} Unsupported OS detected. Exiting."
exit 1
fi
# Existing installation
if [[ -f "$INSTALL_PATH" ]]; then
echo -e "${YW}⚠️ qbittorrent-exporter is already installed.${CL}"
echo -n "Uninstall ${APP}? (y/N): "
read -r uninstall_prompt
if [[ "${uninstall_prompt,,}" =~ ^(y|yes)$ ]]; then
msg_info "Uninstalling qbittorrent-exporter"
if [[ "$OS" == "Debian" ]]; then
systemctl disable --now qbittorrent-exporter.service &>/dev/null
rm -f "$SERVICE_PATH"
else
rc-service qbittorrent-exporter stop &>/dev/null
rc-update del qbittorrent-exporter &>/dev/null
rm -f "$SERVICE_PATH"
fi
rm -f "$INSTALL_PATH" "$CONFIG_PATH" ~/.qbittorrent-exporter
msg_ok "${APP} has been uninstalled."
exit 0
fi
echo -n "Update qbittorrent-exporter? (y/N): "
read -r update_prompt
if [[ "${update_prompt,,}" =~ ^(y|yes)$ ]]; then
if check_for_gh_release "qbittorrent-exporter" "martabal/qbittorrent-exporter"; then
fetch_and_deploy_gh_release "qbittorrent-exporter" "martabal/qbittorrent-exporter"
setup_go
msg_info "Updating qbittorrent-exporter"
cd /opt/qbittorrent-exporter/src
$STD /usr/local/bin/go build -o ./qbittorrent-exporter
msg_ok "Updated Successfully!"
fi
exit 0
else
echo -e "${YW}⚠️ Update skipped. Exiting.${CL}"
exit 0
fi
fi
echo -e "${YW}⚠️ qbittorrent-exporter is not installed.${CL}"
echo -n "Enter URL of qbittorrent, example: (http://127.0.0.1:8080): "
read -er QBITTORRENT_BASE_URL
echo -n "Enter qbittorrent username: "
read -er QBITTORRENT_USERNAME
echo -n "Enter qbittorrent password: "
read -rs QBITTORRENT_PASSWORD
echo
echo -n "Install qbittorrent-exporter? (y/n): "
read -r install_prompt
if ! [[ "${install_prompt,,}" =~ ^(y|yes)$ ]]; then
echo -e "${YW}⚠️ Installation skipped. Exiting.${CL}"
exit 0
fi
fetch_and_deploy_gh_release "qbittorrent-exporter" "martabal/qbittorrent-exporter" "tarball" "latest"
setup_go
msg_info "Installing qbittorrent-exporter on ${OS}"
cd /opt/qbittorrent-exporter/src
$STD /usr/local/bin/go build -o ./qbittorrent-exporter
msg_ok "Installed qbittorrent-exporter"
msg_info "Creating configuration"
cat <<EOF >"$CONFIG_PATH"
# https://github.com/martabal/qbittorrent-exporter?tab=readme-ov-file#parameters
QBITTORRENT_BASE_URL="${QBITTORRENT_BASE_URL}"
QBITTORRENT_USERNAME="${QBITTORRENT_USERNAME}"
QBITTORRENT_PASSWORD="${QBITTORRENT_PASSWORD}"
EOF
msg_ok "Created configuration"
msg_info "Creating service"
if [[ "$OS" == "Debian" ]]; then
cat <<EOF >"$SERVICE_PATH"
[Unit]
Description=qbittorrent-exporter
After=network.target
[Service]
User=root
WorkingDirectory=/opt/qbittorrent-exporter/src
EnvironmentFile=$CONFIG_PATH
ExecStart=/opt/qbittorrent-exporter/src/qbittorrent-exporter
Restart=always
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now qbittorrent-exporter
else
cat <<EOF >"$SERVICE_PATH"
#!/sbin/openrc-run
command="$INSTALL_PATH"
command_args=""
command_background=true
directory="/opt/qbittorrent-exporter/src"
pidfile="/opt/qbittorrent-exporter/src/pidfile"
depend() {
need net
}
start_pre() {
if [ -f "$CONFIG_PATH" ]; then
export \$(grep -v '^#' $CONFIG_PATH | xargs)
fi
}
EOF
chmod +x "$SERVICE_PATH"
rc-update add qbittorrent-exporter default &>/dev/null
rc-service qbittorrent-exporter start &>/dev/null
fi
msg_ok "Service created successfully"
echo -e "${CM} ${GN}${APP} is reachable at: ${BL}http://$CURRENT_IP:8090/metrics${CL}"

352
tools/pve/oci-deploy.sh Normal file
View File

@ -0,0 +1,352 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2025 community-scripts ORG
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://www.proxmox.com/
function header_info {
clear
cat <<"EOF"
____ ________ ______ __ _
/ __ \/ ____/ / / ____/___ ____ / /_____ _(_)___ ___ _____
/ / / / / / / / / / __ \/ __ \/ __/ __ `/ / __ \/ _ \/ ___/
/ /_/ / /___/ / / /___/ /_/ / / / / /_/ /_/ / / / / / __/ /
\____/\____/_/ \____/\____/_/ /_/\__/\__,_/_/_/ /_/\___/_/
EOF
}
YW=$(echo "\033[33m")
GN=$(echo "\033[1;92m")
RD=$(echo "\033[01;31m")
BL=$(echo "\033[36m")
CL=$(echo "\033[m")
CM="${GN}✔️${CL}"
CROSS="${RD}✖️${CL}"
INFO="${BL}${CL}"
APP="OCI-Container"
header_info
function msg_info() {
local msg="$1"
echo -e "${INFO} ${YW}${msg}...${CL}"
}
function msg_ok() {
local msg="$1"
echo -e "${CM} ${GN}${msg}${CL}"
}
function msg_error() {
local msg="$1"
echo -e "${CROSS} ${RD}${msg}${CL}"
}
# Check Proxmox version
if ! command -v pveversion &>/dev/null; then
msg_error "This script must be run on Proxmox VE"
exit 1
fi
PVE_VER=$(pveversion | grep -oP 'pve-manager/\K[0-9.]+' | cut -d. -f1,2)
MAJOR=$(echo "$PVE_VER" | cut -d. -f1)
MINOR=$(echo "$PVE_VER" | cut -d. -f2)
if [[ "$MAJOR" -lt 9 ]] || { [[ "$MAJOR" -eq 9 ]] && [[ "$MINOR" -lt 1 ]]; }; then
msg_error "Proxmox VE 9.1+ required (current: $PVE_VER)"
exit 1
fi
msg_ok "Proxmox VE $PVE_VER detected"
# Parse OCI image
parse_image() {
local input="$1"
if [[ "$input" =~ ^([^/]+\.[^/]+)/ ]]; then
echo "$input"
elif [[ "$input" =~ / ]]; then
echo "docker.io/$input"
else
echo "docker.io/library/$input"
fi
}
# Interactive image selection
if [[ -z "${OCI_IMAGE:-}" ]]; then
echo ""
echo -e "${YW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${CL}"
echo -e "${BL}Select OCI Image:${CL}"
echo -e "${YW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${CL}"
echo -e " ${BL}1)${CL} nginx:alpine - Lightweight web server"
echo -e " ${BL}2)${CL} postgres:16-alpine - PostgreSQL database"
echo -e " ${BL}3)${CL} redis:alpine - Redis cache"
echo -e " ${BL}4)${CL} mariadb:latest - MariaDB database"
echo -e " ${BL}5)${CL} ghcr.io/linkwarden/linkwarden:latest"
echo -e " ${BL}6)${CL} Custom image"
echo -e "${YW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${CL}"
echo ""
read -r -p "Select option (1-6): " IMAGE_CHOICE
case $IMAGE_CHOICE in
1) OCI_IMAGE="nginx:alpine" ;;
2) OCI_IMAGE="postgres:16-alpine" ;;
3) OCI_IMAGE="redis:alpine" ;;
4) OCI_IMAGE="mariadb:latest" ;;
5) OCI_IMAGE="ghcr.io/linkwarden/linkwarden:latest" ;;
6)
read -r -p "Enter OCI image (e.g., ghcr.io/user/repo:tag): " OCI_IMAGE
[[ -z "$OCI_IMAGE" ]] && {
msg_error "No image specified"
exit 1
}
;;
*)
msg_error "Invalid choice"
exit 1
;;
esac
fi
FULL_IMAGE=$(parse_image "$OCI_IMAGE")
msg_ok "Selected: $FULL_IMAGE"
# Derive container name
if [[ -z "${CT_NAME:-}" ]]; then
DEFAULT_NAME=$(echo "$OCI_IMAGE" | sed 's|.*/||; s/:.*//; s/[^a-zA-Z0-9-]/-/g' | cut -c1-60)
read -r -p "Container name [${DEFAULT_NAME}]: " CT_NAME
CT_NAME=${CT_NAME:-$DEFAULT_NAME}
fi
# Get next VMID
if [[ -z "${VMID:-}" ]]; then
NEXT_ID=$(pvesh get /cluster/nextid)
read -r -p "Container ID [${NEXT_ID}]: " VMID
VMID=${VMID:-$NEXT_ID}
fi
# Resources
if [[ -z "${CORES:-}" ]]; then
read -r -p "CPU cores [2]: " CORES
CORES=${CORES:-2}
fi
if [[ -z "${MEMORY:-}" ]]; then
read -r -p "Memory in MB [2048]: " MEMORY
MEMORY=${MEMORY:-2048}
fi
if [[ -z "${DISK:-}" ]]; then
read -r -p "Disk size in GB [8]: " DISK
DISK=${DISK:-8}
fi
# Storage
if [[ -z "${STORAGE:-}" ]]; then
AVAIL_STORAGE=$(pvesm status | awk '/^local-(zfs|lvm)/ {print $1; exit}')
[[ -z "$AVAIL_STORAGE" ]] && AVAIL_STORAGE="local"
read -r -p "Storage [${AVAIL_STORAGE}]: " STORAGE
STORAGE=${STORAGE:-$AVAIL_STORAGE}
fi
# Network
if [[ -z "${BRIDGE:-}" ]]; then
read -r -p "Network bridge [vmbr0]: " BRIDGE
BRIDGE=${BRIDGE:-vmbr0}
fi
if [[ -z "${IP_MODE:-}" ]]; then
read -r -p "IP mode (dhcp/static) [dhcp]: " IP_MODE
IP_MODE=${IP_MODE:-dhcp}
fi
if [[ "$IP_MODE" == "static" ]]; then
read -r -p "Static IP (CIDR, e.g., 192.168.1.100/24): " STATIC_IP
read -r -p "Gateway IP: " GATEWAY
fi
# Environment variables
declare -a ENV_VARS=()
case "$OCI_IMAGE" in
postgres* | postgresql*)
echo ""
msg_info "PostgreSQL requires environment variables"
read -r -p "PostgreSQL password: " -s PG_PASS
echo ""
ENV_VARS+=("POSTGRES_PASSWORD=$PG_PASS")
read -r -p "Create database (optional): " PG_DB
[[ -n "$PG_DB" ]] && ENV_VARS+=("POSTGRES_DB=$PG_DB")
read -r -p "PostgreSQL user (optional): " PG_USER
[[ -n "$PG_USER" ]] && ENV_VARS+=("POSTGRES_USER=$PG_USER")
;;
mariadb* | mysql*)
echo ""
msg_info "MariaDB/MySQL requires environment variables"
read -r -p "Root password: " -s MYSQL_PASS
echo ""
ENV_VARS+=("MYSQL_ROOT_PASSWORD=$MYSQL_PASS")
read -r -p "Create database (optional): " MYSQL_DB
[[ -n "$MYSQL_DB" ]] && ENV_VARS+=("MYSQL_DATABASE=$MYSQL_DB")
read -r -p "Create user (optional): " MYSQL_USER
if [[ -n "$MYSQL_USER" ]]; then
ENV_VARS+=("MYSQL_USER=$MYSQL_USER")
read -r -p "User password: " -s MYSQL_USER_PASS
echo ""
ENV_VARS+=("MYSQL_PASSWORD=$MYSQL_USER_PASS")
fi
;;
*linkwarden*)
echo ""
msg_info "Linkwarden configuration"
read -r -p "NEXTAUTH_SECRET (press Enter to generate): " NEXTAUTH_SECRET
if [[ -z "$NEXTAUTH_SECRET" ]]; then
NEXTAUTH_SECRET=$(openssl rand -base64 32)
fi
ENV_VARS+=("NEXTAUTH_SECRET=$NEXTAUTH_SECRET")
read -r -p "NEXTAUTH_URL [http://localhost:3000]: " NEXTAUTH_URL
NEXTAUTH_URL=${NEXTAUTH_URL:-http://localhost:3000}
ENV_VARS+=("NEXTAUTH_URL=$NEXTAUTH_URL")
read -r -p "DATABASE_URL (PostgreSQL connection string): " DATABASE_URL
[[ -n "$DATABASE_URL" ]] && ENV_VARS+=("DATABASE_URL=$DATABASE_URL")
;;
esac
# Additional env vars
read -r -p "Add custom environment variables? (y/N): " ADD_ENV
if [[ "${ADD_ENV,,}" =~ ^(y|yes)$ ]]; then
while true; do
read -r -p "Enter KEY=VALUE (or press Enter to finish): " CUSTOM_ENV
[[ -z "$CUSTOM_ENV" ]] && break
ENV_VARS+=("$CUSTOM_ENV")
done
fi
# Privileged mode
read -r -p "Run as privileged container? (y/N): " PRIV_MODE
if [[ "${PRIV_MODE,,}" =~ ^(y|yes)$ ]]; then
UNPRIVILEGED="0"
else
UNPRIVILEGED="1"
fi
# Auto-start
read -r -p "Start container after creation? (Y/n): " AUTO_START
if [[ "${AUTO_START,,}" =~ ^(n|no)$ ]]; then
START_AFTER="no"
else
START_AFTER="yes"
fi
# Summary
echo ""
echo -e "${YW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${CL}"
echo -e "${BL}Container Configuration Summary:${CL}"
echo -e "${YW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${CL}"
echo -e " Image: $FULL_IMAGE"
echo -e " ID: $VMID"
echo -e " Name: $CT_NAME"
echo -e " CPUs: $CORES"
echo -e " Memory: ${MEMORY}MB"
echo -e " Disk: ${DISK}GB"
echo -e " Storage: $STORAGE"
echo -e " Network: $BRIDGE ($IP_MODE)"
[[ ${#ENV_VARS[@]} -gt 0 ]] && echo -e " Env vars: ${#ENV_VARS[@]} configured"
echo -e "${YW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${CL}"
echo ""
read -r -p "Proceed with creation? (Y/n): " CONFIRM
if [[ "${CONFIRM,,}" =~ ^(n|no)$ ]]; then
msg_error "Cancelled by user"
exit 0
fi
# Create container
msg_info "Creating container $VMID"
# Build pct create command
PCT_CMD="pct create $VMID"
PCT_CMD+=" --hostname $CT_NAME"
PCT_CMD+=" --cores $CORES"
PCT_CMD+=" --memory $MEMORY"
PCT_CMD+=" --rootfs ${STORAGE}:${DISK},oci=${FULL_IMAGE}"
PCT_CMD+=" --unprivileged $UNPRIVILEGED"
if [[ "$IP_MODE" == "static" && -n "$STATIC_IP" ]]; then
PCT_CMD+=" --net0 name=eth0,bridge=$BRIDGE,ip=$STATIC_IP"
[[ -n "$GATEWAY" ]] && PCT_CMD+=",gw=$GATEWAY"
else
PCT_CMD+=" --net0 name=eth0,bridge=$BRIDGE,ip=dhcp"
fi
if eval "$PCT_CMD" 2>&1; then
msg_ok "Container created"
else
msg_error "Failed to create container"
exit 1
fi
# Set environment variables
if [[ ${#ENV_VARS[@]} -gt 0 ]]; then
msg_info "Configuring environment variables"
for env_var in "${ENV_VARS[@]}"; do
if pct set "$VMID" -env "$env_var" &>/dev/null; then
:
else
msg_error "Failed to set: $env_var"
fi
done
msg_ok "Environment variables configured (${#ENV_VARS[@]} variables)"
fi
# Start container
if [[ "$START_AFTER" == "yes" ]]; then
msg_info "Starting container"
if pct start "$VMID" 2>&1; then
msg_ok "Container started"
# Wait for network
sleep 3
CT_IP=$(pct exec "$VMID" -- hostname -I 2>/dev/null | awk '{print $1}' || echo "N/A")
echo ""
echo -e "${GN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${CL}"
echo -e "${BL}Container Information:${CL}"
echo -e "${GN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${CL}"
echo -e " ID: ${GN}$VMID${CL}"
echo -e " Name: ${GN}$CT_NAME${CL}"
echo -e " Image: ${GN}$FULL_IMAGE${CL}"
echo -e " IP: ${GN}$CT_IP${CL}"
echo -e " Status: ${GN}Running${CL}"
echo -e "${GN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${CL}"
echo ""
echo -e "${INFO} ${YW}Access console:${CL} pct console $VMID"
echo -e "${INFO} ${YW}View logs:${CL} pct logs $VMID"
echo ""
else
msg_error "Failed to start container"
fi
else
echo ""
echo -e "${GN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${CL}"
echo -e "${BL}Container Information:${CL}"
echo -e "${GN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${CL}"
echo -e " ID: ${GN}$VMID${CL}"
echo -e " Name: ${GN}$CT_NAME${CL}"
echo -e " Image: ${GN}$FULL_IMAGE${CL}"
echo -e " Status: ${YW}Stopped${CL}"
echo -e "${GN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${CL}"
echo ""
echo -e "${INFO} ${YW}Start with:${CL} pct start $VMID"
echo ""
fi

View File

@ -588,96 +588,17 @@ echo -en "\e[1A\e[0K"
FILE=$(basename $URL)
msg_ok "Downloaded ${CL}${BL}${FILE}${CL}"
# --- Inject UniFi Installer via Cloud-Init ---
msg_info "Preparing ${OS_DISPLAY} Cloud Image for UniFi OS"
msg_ok "Downloaded ${OS_DISPLAY} Cloud Image"
# Install virt-customize if not available
if ! command -v virt-customize &>/dev/null; then
# Expand root partition to use full disk space
msg_info "Expanding disk image to ${DISK_SIZE}"
# Install virt-resize if not available
if ! command -v virt-resize &>/dev/null; then
apt-get -qq update >/dev/null
apt-get -qq install libguestfs-tools -y >/dev/null
fi
# Create UniFi OS installation script and inject it into the image
virt-customize -a "${FILE}" --run-command "cat > /root/install-unifi-os.sh << 'INSTALLSCRIPT'
#!/bin/bash
set -x
exec > /var/log/unifi-install.log 2>&1
echo \"=== UniFi OS Installation Started at \$(date) ===\"
# Wait for cloud-init to complete
if command -v cloud-init >/dev/null 2>&1; then
echo \"Waiting for cloud-init to complete...\"
cloud-init status --wait 2>/dev/null || true
fi
# Install required packages
echo \"Installing required packages...\"
export DEBIAN_FRONTEND=noninteractive
apt-get update
apt-get install -y curl wget ca-certificates podman uidmap slirp4netns iptables
# Configure Podman
echo \"Configuring Podman...\"
loginctl enable-linger root
# Download UniFi OS Server
echo \"Downloading UniFi OS Server ${UOS_VERSION}...\"
cd /root
curl -fsSL '${UOS_URL}' -o unifi-installer.bin
chmod +x unifi-installer.bin
# Install UniFi OS Server
echo \"Installing UniFi OS Server (this takes 3-5 minutes)...\"
./unifi-installer.bin install
echo \"Waiting for services to start...\"
sleep 15
# Start UniFi OS Server
if systemctl list-unit-files | grep -q unifi-os-server; then
echo \"Starting UniFi OS Server service...\"
systemctl enable unifi-os-server
systemctl start unifi-os-server
sleep 10
if systemctl is-active --quiet unifi-os-server; then
echo \"SUCCESS: UniFi OS Server is running\"
else
echo \"WARNING: Checking service status...\"
systemctl status unifi-os-server --no-pager
fi
fi
touch /root/.unifi-installed
echo \"=== Installation completed at \$(date) ===\"
INSTALLSCRIPT" >/dev/null 2>&1
virt-customize -a "${FILE}" --chmod 0755:/root/install-unifi-os.sh >/dev/null 2>&1
# Create systemd service for first-boot installation
virt-customize -a "${FILE}" --run-command "cat > /etc/systemd/system/unifi-firstboot.service << 'SVCFILE'
[Unit]
Description=UniFi OS First Boot Installation
After=cloud-init.service network-online.target
Wants=network-online.target
ConditionPathExists=!/root/.unifi-installed
[Service]
Type=oneshot
ExecStart=/root/install-unifi-os.sh
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target
SVCFILE" >/dev/null 2>&1
virt-customize -a "${FILE}" --run-command "systemctl enable unifi-firstboot.service" >/dev/null 2>&1
msg_ok "Prepared ${OS_DISPLAY} image with UniFi OS installer"
# Expand root partition to use full disk space
msg_info "Expanding disk image to ${DISK_SIZE}"
qemu-img create -f qcow2 expanded.qcow2 ${DISK_SIZE} >/dev/null 2>&1
# Detect partition device (sda1 for Ubuntu, vda1 for Debian)
@ -712,21 +633,10 @@ qm set "$VMID" \
qm resize "$VMID" scsi0 "$DISK_SIZE" >/dev/null
qm set "$VMID" --agent enabled=1 >/dev/null
# Add Cloud-Init drive (standard Cloud-Init, no custom user-data)
# Add Cloud-Init drive
msg_info "Configuring Cloud-Init"
setup_cloud_init "$VMID" "$STORAGE" "$HN" "yes" >/dev/null 2>&1
msg_ok "Cloud-Init configured (UniFi OS installs via systemd service on first boot)"
# Display credentials immediately so user can login
if [ -n "$CLOUDINIT_CRED_FILE" ] && [ -f "$CLOUDINIT_CRED_FILE" ]; then
echo ""
echo -e "${INFO}${BOLD}${GN}Cloud-Init Credentials (save these now!):${CL}"
echo -e "${TAB}${DGN}User: ${BGN}${CLOUDINIT_USER:-root}${CL}"
echo -e "${TAB}${DGN}Password: ${BGN}${CLOUDINIT_PASSWORD}${CL}"
echo -e "${TAB}${RD}⚠️ UniFi OS installation runs automatically on first boot${CL}"
echo -e "${TAB}${INFO}Monitor: ${BL}tail -f /var/log/unifi-install.log${CL}"
echo ""
fi
msg_ok "Cloud-Init configured"
DESCRIPTION=$(
cat <<EOF
@ -769,69 +679,139 @@ if [ "$START_VM" == "yes" ]; then
qm start $VMID
msg_ok "Started UniFi OS VM"
msg_info "Waiting for VM to boot (30 seconds)"
sleep 30
msg_ok "VM should be booting now"
msg_info "Waiting for VM to boot and Cloud-Init to complete (60-90 seconds)"
sleep 60
msg_ok "VM boot complete"
msg_info "Detecting VM IP address (may take up to 60 seconds)"
msg_info "Detecting VM IP address"
VM_IP=""
for i in {1..30}; do
VM_IP=$(qm guest cmd $VMID network-get-interfaces 2>/dev/null | jq -r '.[1]["ip-addresses"][]? | select(.["ip-address-type"] == "ipv4") | .["ip-address"]' 2>/dev/null | grep -v "127.0.0.1" | head -1 || echo "")
if [ -n "$VM_IP" ]; then
msg_ok "VM IP Address detected: ${VM_IP}"
msg_ok "VM IP Address: ${VM_IP}"
break
fi
sleep 2
done
if [ -n "$VM_IP" ]; then
msg_info "Waiting for UniFi OS installation (via Cloud-Init, takes 5-8 minutes)"
WAIT_COUNT=0
MAX_WAIT=600 # 10 minutes max for Cloud-Init installation
PORT_OPEN=0
LAST_MSG_TIME=0
while [ $WAIT_COUNT -lt $MAX_WAIT ]; do
if timeout 2 bash -c ">/dev/tcp/${VM_IP}/11443" 2>/dev/null; then
PORT_OPEN=1
msg_ok "UniFi OS Server is ready!"
break
fi
sleep 10
WAIT_COUNT=$((WAIT_COUNT + 10))
# Update message every 30 seconds
if [ $((WAIT_COUNT - LAST_MSG_TIME)) -ge 30 ]; then
echo -e "${BFR}${TAB}${YW}${HOLD}Installation in progress... ${WAIT_COUNT}s elapsed${CL}"
echo -e "${TAB}${INFO}${YW}Monitor: ${BL}ssh ${CLOUDINIT_USER:-root}@${VM_IP} 'tail -f /var/log/unifi-install.log'${CL}"
LAST_MSG_TIME=$WAIT_COUNT
fi
done
if [ $PORT_OPEN -eq 1 ]; then
echo -e "\n${TAB}${GATEWAY}${BOLD}${GN}✓ UniFi OS Server is ready!${CL}"
echo -e "${TAB}${GATEWAY}${BOLD}${GN}✓ Access at: ${BGN}https://${VM_IP}:11443${CL}\n"
else
msg_ok "VM is running, UniFi OS installation in progress"
echo -e "${TAB}${INFO}${YW}Installation runs via systemd service on first boot${CL}"
echo -e "${TAB}${INFO}${YW}This takes 5-8 minutes${CL}"
if [ "$USE_CLOUD_INIT" = "yes" ]; then
echo -e "${TAB}${INFO}${YW}SSH: ${BL}ssh ${CLOUDINIT_USER:-root}@${VM_IP}${CL}"
echo -e "${TAB}${INFO}${YW}Password: ${BGN}${CLOUDINIT_PASSWORD}${CL}"
echo -e "${TAB}${INFO}${YW}Monitor: ${BL}tail -f /var/log/unifi-install.log${CL}"
fi
echo -e "${TAB}${INFO}${YW}UniFi OS will be at: ${BGN}https://${VM_IP}:11443${CL}"
fi
else
msg_ok "VM is running (ID: ${VMID})"
echo -e "${TAB}${INFO}${YW}Could not auto-detect IP address${CL}"
if [ -z "$VM_IP" ]; then
msg_error "Could not detect VM IP address"
echo -e "${TAB}${INFO}${YW}Use Proxmox Console to login with Cloud-Init credentials${CL}"
echo -e "${TAB}${INFO}${YW}User: ${BGN}${CLOUDINIT_USER:-root}${CL} / Password: ${BGN}${CLOUDINIT_PASSWORD}${CL}"
echo -e "${TAB}${INFO}${YW}Monitor installation: ${BL}tail -f /var/log/unifi-install.log${CL}"
exit 1
fi
# Wait for SSH to be ready
msg_info "Waiting for SSH to be ready"
SSH_READY=0
for i in {1..30}; do
if timeout 5 sshpass -p "${CLOUDINIT_PASSWORD}" ssh -o ConnectTimeout=3 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \
"${CLOUDINIT_USER:-root}@${VM_IP}" "echo 'SSH Ready'" >/dev/null 2>&1; then
SSH_READY=1
msg_ok "SSH connection ready"
break
fi
sleep 2
done
if [ $SSH_READY -eq 0 ]; then
msg_error "SSH connection failed"
echo -e "${TAB}${INFO}${YW}Manual login - User: ${BGN}${CLOUDINIT_USER:-root}${CL} / Password: ${BGN}${CLOUDINIT_PASSWORD}${CL}"
exit 1
fi
# Check if sshpass is installed
if ! command -v sshpass &>/dev/null; then
msg_info "Installing sshpass for automated SSH"
apt-get update -qq >/dev/null 2>&1
apt-get install -y sshpass -qq >/dev/null 2>&1
fi
# Execute UniFi OS installation directly via SSH
msg_info "Installing UniFi OS Server ${UOS_VERSION} (takes 4-6 minutes)"
# Create installation script
INSTALL_SCRIPT=$(
cat <<'EOFINSTALL'
#!/bin/bash
set -e
export DEBIAN_FRONTEND=noninteractive
echo "[1/5] Updating system packages..."
apt-get update -qq
apt-get install -y curl wget ca-certificates podman uidmap slirp4netns iptables -qq
echo "[2/5] Configuring Podman..."
loginctl enable-linger root
echo "[3/5] Downloading UniFi OS Server installer..."
cd /root
curl -fsSL "UNIFI_URL" -o unifi-installer.bin
chmod +x unifi-installer.bin
echo "[4/5] Installing UniFi OS Server (this takes 3-5 minutes)..."
./unifi-installer.bin install
echo "[5/5] Starting UniFi OS Server..."
sleep 15
if systemctl list-unit-files | grep -q unifi-os-server; then
systemctl enable unifi-os-server
systemctl start unifi-os-server
sleep 10
if systemctl is-active --quiet unifi-os-server; then
echo "✓ UniFi OS Server is running"
else
echo "⚠ Service status:"
systemctl status unifi-os-server --no-pager || true
fi
fi
echo "Installation completed!"
EOFINSTALL
)
# Replace URL placeholder
INSTALL_SCRIPT="${INSTALL_SCRIPT//UNIFI_URL/$UOS_URL}"
# Execute installation via SSH (with output streaming)
if sshpass -p "${CLOUDINIT_PASSWORD}" ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \
"${CLOUDINIT_USER:-root}@${VM_IP}" "bash -s" <<<"$INSTALL_SCRIPT" 2>&1 | while IFS= read -r line; do
echo -e "${TAB}${DGN}${line}${CL}"
done; then
msg_ok "UniFi OS Server installed successfully"
else
msg_error "Installation failed"
echo -e "${TAB}${INFO}${YW}Check logs: ${BL}ssh ${CLOUDINIT_USER:-root}@${VM_IP}${CL}"
exit 1
fi
# Wait for UniFi OS web interface
msg_info "Waiting for UniFi OS web interface (port 11443)"
PORT_OPEN=0
for i in {1..60}; do
if timeout 2 bash -c ">/dev/tcp/${VM_IP}/11443" 2>/dev/null; then
PORT_OPEN=1
msg_ok "UniFi OS Server web interface is ready"
break
fi
sleep 2
done
echo ""
if [ $PORT_OPEN -eq 1 ]; then
echo -e "${TAB}${GATEWAY}${BOLD}${GN}✓ UniFi OS Server is ready!${CL}"
echo -e "${TAB}${GATEWAY}${BOLD}${GN}✓ Access at: ${BGN}https://${VM_IP}:11443${CL}"
else
echo -e "${TAB}${INFO}${YW}UniFi OS is installed but web interface not yet available${CL}"
echo -e "${TAB}${INFO}${YW}Access at: ${BGN}https://${VM_IP}:11443${CL} ${YW}(may take 1-2 more minutes)${CL}"
fi
echo -e "${TAB}${INFO}${DGN}SSH Access: ${BL}ssh ${CLOUDINIT_USER:-root}@${VM_IP}${CL}"
echo -e "${TAB}${INFO}${DGN}Password: ${BGN}${CLOUDINIT_PASSWORD}${CL}"
echo ""
fi
post_update_to_api "done" "none"