Merge branch 'community-scripts:main' into step-ca

This commit is contained in:
Joerg Heinemann 2026-02-17 09:04:17 +01:00 committed by GitHub
commit 4d91b2812f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
58 changed files with 5072 additions and 2067 deletions

69
ct/calibre-web.sh Normal file
View File

@ -0,0 +1,69 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: mikolaj92
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://github.com/janeczku/calibre-web
APP="calibre-web"
var_tags="${var_tags:-media;books}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-2048}"
var_disk="${var_disk:-8}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/calibre-web ]]; then
msg_error "No Calibre-Web Installation Found!"
exit
fi
if check_for_gh_release "calibre-web" "janeczku/calibre-web"; then
msg_info "Stopping Service"
systemctl stop calibre-web
msg_ok "Stopped Service"
msg_info "Backing up Data"
cp -r /opt/calibre-web/app.db /opt/calibre-web/app.db_backup 2>/dev/null
msg_ok "Backed up Data"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "calibre-web" "janeczku/calibre-web" "tarball" "latest" "/opt/calibre-web"
setup_uv
msg_info "Installing Dependencies"
cd /opt/calibre-web
$STD uv sync --no-dev
msg_ok "Installed Dependencies"
msg_info "Restoring Data"
cp /opt/calibre-web/app.db_backup /opt/calibre-web/app.db 2>/dev/null
rm -f /opt/calibre-web/app.db_backup
msg_ok "Restored Data"
msg_info "Starting Service"
systemctl start calibre-web
msg_ok "Started Service"
msg_ok "Updated successfully!"
fi
exit
}
start
build_container
description
msg_ok "Completed successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8083${CL}"

View File

@ -1,58 +0,0 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: Slaviša Arežina (tremor021)
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://www.drawio.com/
APP="DrawIO"
var_tags="${var_tags:-diagrams}"
var_cpu="${var_cpu:-1}"
var_ram="${var_ram:-2048}"
var_disk="${var_disk:-4}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -f /var/lib/tomcat11/webapps/draw.war ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gh_release "drawio" "jgraph/drawio"; then
msg_info "Stopping service"
systemctl stop tomcat11
msg_ok "Service stopped"
msg_info "Updating Debian LXC"
$STD apt update
$STD apt upgrade -y
msg_ok "Updated Debian LXC"
USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "drawio" "jgraph/drawio" "singlefile" "latest" "/var/lib/tomcat11/webapps" "draw.war"
msg_info "Starting service"
systemctl start tomcat11
msg_ok "Service started"
msg_ok "Updated successfully!"
fi
exit
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8080/draw${CL}"

View File

@ -1,50 +0,0 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: Joerg Heinemann (heinemannj)
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://github.com/john30/ebusd
APP="ebusd"
var_tags="${var_tags:-automation}"
var_cpu="${var_cpu:-1}"
var_ram="${var_ram:-512}"
var_disk="${var_disk:-2}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -f /etc/default/ebusd ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gh_release "ebusd" "john30/ebusd"; then
msg_info "Stopping Services"
systemctl stop ebusd.service
msg_ok "Stopped Services"
fetch_and_deploy_gh_release "ebusd" "john30/ebusd" "binary" "latest" "/opt/ebusd" "ebusd-*_amd64-trixie_mqtt1.deb"
msg_info "Starting Services"
systemctl start ebusd.service
msg_ok "Started Services"
msg_ok "Updated successfully!"
fi
exit
}
start
build_container
description
msg_ok "Completed successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"

118
ct/gramps-web.sh Normal file
View File

@ -0,0 +1,118 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: GitHub Copilot
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://www.grampsweb.org/
APP="gramps-web"
var_tags="${var_tags:-genealogy;family;collaboration}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-4096}"
var_disk="${var_disk:-20}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/gramps-web-api ]]; then
msg_error "No Gramps Web API Installation Found!"
exit
fi
if [[ ! -d /opt/gramps-web/frontend ]]; then
msg_error "No Gramps Web Frontend Installation Found!"
exit
fi
if [[ ! -f /opt/gramps-web/config/config.cfg ]]; then
msg_error "No Gramps Web Configuration Found!"
exit
fi
PYTHON_VERSION="3.12" setup_uv
NODE_VERSION="22" setup_nodejs
UPDATE_AVAILABLE=0
if check_for_gh_release "gramps-web-api" "gramps-project/gramps-web-api"; then
UPDATE_AVAILABLE=1
fi
if check_for_gh_release "gramps-web" "gramps-project/gramps-web"; then
UPDATE_AVAILABLE=1
fi
if [[ "$UPDATE_AVAILABLE" == "1" ]]; then
msg_info "Stopping Service"
systemctl stop gramps-web
msg_ok "Stopped Service"
if apt-cache show libgirepository1.0-dev >/dev/null 2>&1; then
GI_DEV_PACKAGE="libgirepository1.0-dev"
elif apt-cache show libgirepository-2.0-dev >/dev/null 2>&1; then
GI_DEV_PACKAGE="libgirepository-2.0-dev"
else
msg_error "No supported girepository development package found!"
exit
fi
msg_info "Ensuring Build Dependencies"
$STD apt install -y \
gobject-introspection \
libcairo2-dev \
libglib2.0-dev \
pkg-config \
"$GI_DEV_PACKAGE"
msg_ok "Ensured Build Dependencies"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "gramps-web-api" "gramps-project/gramps-web-api" "tarball" "latest" "/opt/gramps-web-api"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "gramps-web" "gramps-project/gramps-web" "tarball" "latest" "/opt/gramps-web/frontend"
msg_info "Updating Gramps Web API"
$STD uv venv -c -p python3.12 /opt/gramps-web/venv
source /opt/gramps-web/venv/bin/activate
$STD uv pip install --no-cache-dir --upgrade pip setuptools wheel
$STD uv pip install --no-cache-dir gunicorn
$STD uv pip install --no-cache-dir /opt/gramps-web-api
msg_ok "Updated Gramps Web API"
msg_info "Updating Gramps Web Frontend"
cd /opt/gramps-web/frontend
export COREPACK_ENABLE_DOWNLOAD_PROMPT=0
corepack enable
$STD npm install
$STD npm run build
msg_ok "Updated Gramps Web Frontend"
msg_info "Applying Database Migration"
GRAMPS_API_CONFIG=/opt/gramps-web/config/config.cfg \
GRAMPSHOME=/opt/gramps-web/data/gramps \
GRAMPS_DATABASE_PATH=/opt/gramps-web/data/gramps/grampsdb \
$STD /opt/gramps-web/venv/bin/python3 -m gramps_webapi user migrate
msg_ok "Applied Database Migration"
msg_info "Starting Service"
systemctl start gramps-web
msg_ok "Started Service"
msg_ok "Updated successfully!"
fi
exit
}
start
build_container
description
msg_ok "Completed successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:5000${CL}"

View File

@ -1,6 +0,0 @@
__ __
___ / /_ _ ___________/ /
/ _ \/ __ \/ / / / ___/ __ /
/ __/ /_/ / /_/ (__ ) /_/ /
\___/_.___/\__,_/____/\__,_/

View File

@ -1,6 +0,0 @@
 ____ __ ___
/ __ \____ ____ ___ / |/ /
/ /_/ / __ \/ __ `__ \/ /|_/ /
/ _, _/ /_/ / / / / / / / / /
/_/ |_|\____/_/ /_/ /_/_/ /_/

View File

@ -1,79 +0,0 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: MickLesk (MickLesk)
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://linkding.link/
APP="linkding"
var_tags="${var_tags:-bookmarks;management}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-1024}"
var_disk="${var_disk:-4}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/linkding ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gh_release "linkding" "sissbruecker/linkding"; then
msg_info "Stopping Services"
systemctl stop nginx linkding linkding-tasks
msg_ok "Stopped Services"
msg_info "Backing up Data"
cp -r /opt/linkding/data /opt/linkding_data_backup
cp /opt/linkding/.env /opt/linkding_env_backup
msg_ok "Backed up Data"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "linkding" "sissbruecker/linkding"
msg_info "Restoring Data"
cp -r /opt/linkding_data_backup/. /opt/linkding/data
cp /opt/linkding_env_backup /opt/linkding/.env
rm -rf /opt/linkding_data_backup /opt/linkding_env_backup
ln -sf /usr/lib/x86_64-linux-gnu/mod_icu.so /opt/linkding/libicu.so
msg_ok "Restored Data"
msg_info "Updating ${APP}"
cd /opt/linkding
rm -f bookmarks/settings/dev.py
touch bookmarks/settings/custom.py
$STD npm ci
$STD npm run build
$STD uv sync --no-dev --frozen
$STD uv pip install gunicorn
set -a && source /opt/linkding/.env && set +a
$STD /opt/linkding/.venv/bin/python manage.py migrate
$STD /opt/linkding/.venv/bin/python manage.py collectstatic --no-input
msg_ok "Updated ${APP}"
msg_info "Starting Services"
systemctl start nginx linkding linkding-tasks
msg_ok "Started Services"
msg_ok "Updated Successfully"
fi
exit
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:9090${CL}"

View File

@ -27,7 +27,7 @@ function update_script() {
msg_error "No ${APP} Installation Found!"
exit
fi
RELEASE=$(curl -fsSL https://api.github.com/repos/papra-hq/papra/releases | grep -oP '"tag_name":\s*"\K@papra/docker@[^"]+' | head -n1)
RELEASE=$(curl -fsSL https://api.github.com/repos/papra-hq/papra/releases | grep -oP '"tag_name":\s*"\K@papra/app@[^"]+' | head -n1)
if [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt 2>/dev/null)" ]] || [[ ! -f /opt/${APP}_version.txt ]]; then
msg_info "Stopping Service"
systemctl stop papra

View File

@ -1,80 +0,0 @@
#!/usr/bin/env bash
source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: MickLesk (CanbiZ)
# Co-author: AlphaLawless
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://romm.app
APP="RomM"
var_tags="${var_tags:-emulation}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-4096}"
var_disk="${var_disk:-20}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/romm ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gh_release "romm" "rommapp/romm"; then
msg_info "Stopping Services"
systemctl stop romm-backend romm-worker romm-scheduler romm-watcher
msg_ok "Stopped Services"
msg_info "Backing up configuration"
cp /opt/romm/.env /opt/romm/.env.backup
msg_ok "Backed up configuration"
msg_info "Updating ROMM"
fetch_and_deploy_gh_release "romm" "rommapp/romm" "tarball" "latest" "/opt/romm"
cp /opt/romm/.env.backup /opt/romm/.env
cd /opt/romm
$STD uv sync --all-extras
cd /opt/romm/backend
$STD uv run alembic upgrade head
cd /opt/romm/frontend
$STD npm install
$STD npm run build
# Merge static assets into dist folder
cp -rf /opt/romm/frontend/assets/* /opt/romm/frontend/dist/assets/
mkdir -p /opt/romm/frontend/dist/assets/romm
ln -sfn /var/lib/romm/resources /opt/romm/frontend/dist/assets/romm/resources
ln -sfn /var/lib/romm/assets /opt/romm/frontend/dist/assets/romm/assets
msg_ok "Updated ROMM"
msg_info "Starting Services"
systemctl start romm-backend romm-worker romm-scheduler romm-watcher
msg_ok "Started Services"
msg_ok "Updated successfully"
fi
exit
}
start
build_container
description
msg_ok "Completed successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}"

View File

@ -1,79 +0,0 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: CrazyWolf13
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://docs.seerr.dev/
APP="Seer"
var_tags="${var_tags:-media}"
var_cpu="${var_cpu:-4}"
var_ram="${var_ram:-4096}"
var_disk="${var_disk:-8}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/seer ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gh_release "seer" "seerr-team/seerr"; then
msg_info "Stopping Service"
systemctl stop seer
msg_ok "Stopped Service"
pnpm_desired=$(grep -Po '"pnpm":\s*"\K[^"]+' /opt/seer/package.json)
NODE_VERSION="22" NODE_MODULE="pnpm@$pnpm_desired" setup_nodejs
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "seer" "seerr-team/seerr" "tarball" "latest"
cd /opt/seer
export CYPRESS_INSTALL_BINARY=0
$STD pnpm install --frozen-lockfile
export NODE_OPTIONS="--max-old-space-size=3072"
$STD pnpm build
cat <<EOF >/etc/systemd/system/seer.service
[Unit]
Description=Seer Service
After=network.target
[Service]
EnvironmentFile=/etc/seer/seer.conf
Environment=NODE_ENV=production
Type=exec
WorkingDirectory=/opt/seer
ExecStart=/usr/bin/node dist/index.js
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl start seer
msg_ok "Started Service"
msg_ok "Updated successfully!"
fi
exit
}
start
build_container
description
msg_ok "Completed successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:5055${CL}"

66
ct/split-pro.sh Normal file
View File

@ -0,0 +1,66 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: johanngrobe
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/oss-apps/split-pro
APP="Split-Pro"
var_tags="${var_tags:-finance;expense-sharing}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-4096}"
var_disk="${var_disk:-6}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/split-pro ]]; then
msg_error "No Split Pro Installation Found!"
exit
fi
if check_for_gh_release "split-pro" "oss-apps/split-pro"; then
msg_info "Stopping Service"
systemctl stop split-pro
msg_ok "Stopped Service"
msg_info "Backing up Data"
cp /opt/split-pro/.env /opt/split-pro.env
msg_ok "Backed up Data"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "split-pro" "oss-apps/split-pro" "tarball" "latest" "/opt/split-pro"
msg_info "Building Application"
cd /opt/split-pro
$STD pnpm install --frozen-lockfile
$STD pnpm build
cp /opt/split-pro.env /opt/split-pro/.env
rm -f /opt/split-pro.env
ln -sf /opt/split-pro_data/uploads /opt/split-pro/uploads
$STD pnpm exec prisma migrate deploy
msg_ok "Built Application"
msg_info "Starting Service"
systemctl start split-pro
msg_ok "Started Service"
msg_ok "Updated successfully!"
fi
exit
}
start
build_container
description
msg_ok "Completed successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}"

68
ct/sure.sh Normal file
View File

@ -0,0 +1,68 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: vhsdream
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://sure.am
APP="Sure"
var_tags="${var_tags:-finance}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-2048}"
var_disk="${var_disk:-6}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/sure ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gh_release "Sure" "we-promise/sure"; then
msg_info "Stopping Sure"
$STD systemctl stop sure
msg_ok "Stopped Sure"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "Sure" "we-promise/sure" "tarball" "latest" "/opt/sure"
RUBY_VERSION="$(cat /opt/sure/.ruby-version)" RUBY_INSTALL_RAILS=false setup_ruby
msg_info "Updating Sure"
source ~/.profile
cd /opt/sure
export RAILS_ENV=production
export BUNDLE_DEPLOYMENT=1
export BUNDLE_WITHOUT=development
$STD ./bin/bundle install
$STD ./bin/bundle exec bootsnap precompile --gemfile -j 0
$STD ./bin/bundle exec bootsnap precompile -j 0 app/ lib/
export SECRET_KEY_BASE_DUMMY=1 && $STD ./bin/rails assets:precompile
unset SECRET_KEY_BASE_DUMMY
msg_ok "Updated Sure"
msg_info "Starting Sure"
$STD systemctl start sure
msg_ok "Started Sure"
msg_ok "Updated successfully!"
fi
exit
}
start
build_container
description
msg_ok "Completed successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}"

View File

@ -0,0 +1,44 @@
{
"name": "Calibre-Web",
"slug": "calibre-web",
"categories": [
4
],
"date_created": "2026-02-09",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 8083,
"documentation": "https://github.com/janeczku/calibre-web/wiki",
"website": "https://github.com/janeczku/calibre-web",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/flat/calibre-web.webp",
"config_path": "/opt/calibre-web/app.db",
"description": "Web app for browsing, reading and downloading eBooks from a Calibre database. Provides an attractive interface with mobile support, user management, and eBook conversion capabilities.",
"install_methods": [
{
"type": "default",
"script": "ct/calibre-web.sh",
"resources": {
"cpu": 2,
"ram": 2048,
"hdd": 8,
"os": "Debian",
"version": "13"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": [
{
"text": "No credentials are set by this script. Complete setup and create credentials in the first-run wizard.",
"type": "info"
},
{
"text": "Upload your Calibre library metadata.db during first setup wizard.",
"type": "info"
}
]
}

View File

@ -1,35 +0,0 @@
{
"name": "Draw.IO",
"slug": "drawio",
"categories": [
12
],
"date_created": "2026-01-29",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 8080,
"documentation": "https://www.drawio.com/doc/",
"website": "https://www.drawio.com/",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/draw-io.webp",
"config_path": "",
"description": "draw.io is a configurable diagramming and whiteboarding application, jointly owned and developed by draw.io Ltd (previously named JGraph) and draw.io AG.",
"install_methods": [
{
"type": "default",
"script": "ct/drawio.sh",
"resources": {
"cpu": 1,
"ram": 2048,
"hdd": 4,
"os": "Debian",
"version": "13"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": []
}

View File

@ -1,40 +0,0 @@
{
"name": "ebusd",
"slug": "ebusd",
"categories": [
16
],
"date_created": "2026-01-26",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": null,
"documentation": "https://github.com/john30/ebusd/wiki",
"website": "https://github.com/john30/ebusd",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/ebusd.webp",
"config_path": "/etc/default/ebusd",
"description": "ebusd is a daemon for handling communication with eBUS devices connected to a 2-wire `energy bus` used by numerous heating systems.",
"install_methods": [
{
"type": "default",
"script": "ct/ebusd.sh",
"resources": {
"cpu": 1,
"ram": 512,
"hdd": 2,
"os": "debian",
"version": "13"
}
}
],
"default_credentials": {
"username": "root",
"password": null
},
"notes": [
{
"text": "For required post installation actions, checkout: `https://github.com/community-scripts/ProxmoxVE/discussions/11352`",
"type": "info"
}
]
}

View File

@ -0,0 +1,44 @@
{
"name": "Gramps Web",
"slug": "gramps-web",
"categories": [
12
],
"date_created": "2026-02-16",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 5000,
"documentation": "https://www.grampsweb.org/install_setup/setup/",
"website": "https://www.grampsweb.org/",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/gramps.webp",
"config_path": "/opt/gramps-web/config/config.cfg",
"description": "Gramps Web is a collaborative genealogy platform for browsing, editing and sharing family trees through a modern web interface.",
"install_methods": [
{
"type": "default",
"script": "ct/gramps-web.sh",
"resources": {
"cpu": 2,
"ram": 4096,
"hdd": 20,
"os": "Debian",
"version": "13"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": [
{
"text": "On first access, create the owner account via the built-in onboarding wizard.",
"type": "info"
},
{
"text": "The initial deployment compiles the frontend and can take several minutes.",
"type": "warning"
}
]
}

View File

@ -1,40 +0,0 @@
{
"name": "linkding",
"slug": "linkding",
"categories": [
12
],
"date_created": "2026-02-09",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 9090,
"documentation": "https://linkding.link/",
"website": "https://linkding.link/",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/linkding.webp",
"config_path": "/opt/linkding/.env",
"description": "linkding is a self-hosted bookmark manager that is designed to be minimal, fast, and easy to set up. It features a clean UI, tag-based organization, bulk editing, Markdown notes, read it later functionality, sharing, REST API, and browser extensions for Firefox and Chrome.",
"install_methods": [
{
"type": "default",
"script": "ct/linkding.sh",
"resources": {
"cpu": 2,
"ram": 1024,
"hdd": 4,
"os": "Debian",
"version": "13"
}
}
],
"default_credentials": {
"username": "admin",
"password": null
},
"notes": [
{
"text": "Admin credentials are stored in /opt/linkding/.env",
"type": "info"
}
]
}

View File

@ -1,35 +0,0 @@
{
"name": "RomM",
"slug": "romm",
"categories": [
24
],
"date_created": "2025-03-10",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 80,
"documentation": "https://docs.romm.app/latest/",
"website": "https://romm.app/",
"config_path": "/opt/romm/.env",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/romm.webp",
"description": "RomM (ROM Manager) allows you to scan, enrich, browse and play your game collection with a clean and responsive interface. Support for multiple platforms, various naming schemes, and custom tags.",
"install_methods": [
{
"type": "default",
"script": "ct/romm.sh",
"resources": {
"cpu": 2,
"ram": 4096,
"hdd": 20,
"os": "debian",
"version": "13"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": []
}

View File

@ -1,44 +0,0 @@
{
"name": "Seer",
"slug": "seer",
"categories": [
13
],
"date_created": "2026-01-19",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 5055,
"documentation": "https://docs.seerr.dev/",
"website": "https://seerr.dev/",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/seerr.webp",
"config_path": "/etc/seer/seer.conf",
"description": "Open-source media request and discovery manager for Jellyfin, Plex, and Emby. Unified version of Overseerr and Jellyseerr.",
"install_methods": [
{
"type": "default",
"script": "ct/seer.sh",
"resources": {
"cpu": 4,
"ram": 4096,
"hdd": 8,
"os": "Debian",
"version": "13"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": [
{
"text": "Seer migration is currently in beta.",
"type": "warning"
},
{
"text": "First release is essentially Jellyseerr rebranded with Overseerr migration support.",
"type": "info"
}
]
}

View File

@ -0,0 +1,44 @@
{
"name": "Split Pro",
"slug": "split-pro",
"categories": [
12
],
"date_created": "2026-02-12",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 3000,
"documentation": "https://github.com/oss-apps/split-pro/blob/main/docker/README.md",
"website": "https://github.com/oss-apps/split-pro",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/splitpro.webp",
"config_path": "/opt/split-pro/.env",
"description": "SplitPro is a self-hosted, open source way to share expenses with friends. It is designed as a replacement for Splitwise.",
"install_methods": [
{
"type": "default",
"script": "ct/split-pro.sh",
"resources": {
"cpu": 2,
"ram": 4096,
"hdd": 6,
"os": "debian",
"version": "13"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": [
{
"text": "Before first use you must configure email credentials or authentication (OAuth/OIDC) provider in `/opt/split-pro/.env` and restart the service `systemctl restart split-pro`.",
"type": "warning"
},
{
"text": "Receipt uploads are stored in `/opt/split-pro_data/uploads`",
"type": "info"
}
]
}

View File

@ -0,0 +1,35 @@
{
"name": "Sure",
"slug": "sure",
"categories": [
23
],
"date_created": "2026-02-18",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 3000,
"documentation": "https://github.com/we-promise/sure",
"website": "https://sure.am",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/sure-finance.webp",
"config_path": "/etc/sure/.env",
"description": "The personal finance app for everyone. NOT affiliated with or endorsed by Maybe Finance Inc..",
"install_methods": [
{
"type": "default",
"script": "ct/sure.sh",
"resources": {
"cpu": 2,
"ram": 2048,
"hdd": 6,
"os": "Debian",
"version": "13"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": []
}

View File

@ -6,7 +6,7 @@
],
"date_created": "2026-01-16",
"type": "vm",
"updateable": true,
"updateable": false,
"privileged": false,
"interface_port": null,
"documentation": "https://www.truenas.com/docs/",
@ -22,7 +22,7 @@
"cpu": 2,
"ram": 8192,
"hdd": 16,
"os": null,
"os": "Debian",
"version": null
}
}

View File

@ -0,0 +1,61 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: mikolaj92
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://github.com/janeczku/calibre-web
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y \
python3 \
imagemagick \
libpango-1.0-0 \
libharfbuzz0b \
libpangoft2-1.0-0 \
fonts-liberation
msg_ok "Installed Dependencies"
msg_info "Installing Calibre (for eBook conversion)"
$STD apt install -y calibre
msg_ok "Installed Calibre"
fetch_and_deploy_gh_release "calibre-web" "janeczku/calibre-web" "tarball" "latest" "/opt/calibre-web"
setup_uv
msg_info "Installing Python Dependencies"
cd /opt/calibre-web
$STD uv sync --no-dev
msg_ok "Installed Python Dependencies"
msg_info "Creating Service"
mkdir -p /opt/calibre-web/data
cat <<EOF >/etc/systemd/system/calibre-web.service
[Unit]
Description=Calibre-Web Service
After=network.target
[Service]
Type=simple
User=root
WorkingDirectory=/opt/calibre-web
ExecStart=/opt/calibre-web/.venv/bin/python /opt/calibre-web/cps.py
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now calibre-web
msg_ok "Created Service"
motd_ssh
customize
cleanup_lxc

View File

@ -47,16 +47,13 @@ msg_ok "Built Databasus"
msg_info "Configuring Databasus"
JWT_SECRET=$(openssl rand -hex 32)
ENCRYPTION_KEY=$(openssl rand -hex 32)
# Create PostgreSQL version symlinks for compatibility
for v in 12 13 14 15 16 18; do
ln -sf /usr/lib/postgresql/17 /usr/lib/postgresql/$v
done
# Install goose for migrations
$STD go install github.com/pressly/goose/v3/cmd/goose@latest
ln -sf /root/go/bin/goose /usr/local/bin/goose
cat <<EOF >/opt/databasus/.env
# Environment
ENV_MODE=production
@ -166,6 +163,7 @@ ln -sf /etc/nginx/sites-available/databasus /etc/nginx/sites-enabled/databasus
rm -f /etc/nginx/sites-enabled/default
$STD nginx -t
$STD systemctl enable -q --now nginx
$STD systemctl reload nginx
msg_ok "Configured Nginx"
motd_ssh

View File

@ -30,7 +30,7 @@ msg_ok "Installed Dependencies"
PG_VERSION="16" setup_postgresql
NODE_VERSION="22" setup_nodejs
RUBY_VERSION="3.3.6" setup_ruby
RUBY_VERSION="3.4.4" setup_ruby
msg_info "Configuring PostgreSQL for Discourse"
DISCOURSE_DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)
@ -38,8 +38,8 @@ DISCOURSE_DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)
PG_HBA="/etc/postgresql/16/main/pg_hba.conf"
sed -i 's/^local\s\+all\s\+all\s\+peer$/local all all md5/' "$PG_HBA"
$STD systemctl restart postgresql
# Create user with CREATEDB permission - Rails will create the database
$STD sudo -u postgres psql -c "CREATE ROLE discourse WITH LOGIN PASSWORD '$DISCOURSE_DB_PASS' CREATEDB;"
# Create user + database explicitly for reliable bootstrap
PG_DB_NAME="discourse" PG_DB_USER="discourse" PG_DB_PASS="$DISCOURSE_DB_PASS" setup_postgresql_db
msg_ok "Configured PostgreSQL for Discourse"
msg_info "Configuring Discourse"
@ -90,7 +90,9 @@ cd /opt/discourse
export PATH="$HOME/.rbenv/bin:$HOME/.rbenv/shims:$PATH"
eval "$(rbenv init - bash)" 2>/dev/null || true
export RAILS_ENV=production
$STD bundle exec rails db:create
set -a
source /opt/discourse/.env
set +a
$STD bundle exec rails db:migrate
msg_ok "Set Up Database"
@ -99,6 +101,9 @@ cd /opt/discourse
export PATH="$HOME/.rbenv/bin:$HOME/.rbenv/shims:$PATH"
eval "$(rbenv init - bash)" 2>/dev/null || true
export RAILS_ENV=production
set -a
source /opt/discourse/.env
set +a
$STD bundle exec rails assets:precompile
msg_ok "Built Discourse Assets"
@ -107,6 +112,9 @@ cd /opt/discourse
export PATH="$HOME/.rbenv/bin:$HOME/.rbenv/shims:$PATH"
eval "$(rbenv init - bash)" 2>/dev/null || true
export RAILS_ENV=production
set -a
source /opt/discourse/.env
set +a
$STD bundle exec rails runner "User.create!(email: 'admin@local', username: 'admin', password: '${DISCOURSE_DB_PASS}', admin: true)" || true
msg_ok "Created Discourse Admin User"
@ -121,7 +129,8 @@ Type=simple
User=root
WorkingDirectory=/opt/discourse
Environment=RAILS_ENV=production
ExecStart=/usr/local/bin/bundle exec puma -w 2
Environment=PATH=/root/.rbenv/shims:/root/.rbenv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
ExecStart=/root/.rbenv/shims/bundle exec puma -w 2
Restart=on-failure
RestartSec=5

View File

@ -1,25 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: Slaviša Arežina (tremor021)
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://www.drawio.com/
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
setup_hwaccel
msg_info "Installing Dependencies"
$STD apt install -y tomcat11
msg_ok "Installed Dependencies"
USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "drawio" "jgraph/drawio" "singlefile" "latest" "/var/lib/tomcat11/webapps" "draw.war"
motd_ssh
customize
cleanup_lxc

View File

@ -1,23 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: Joerg Heinemann (heinemannj)
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://github.com/john30/ebusd
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing ebusd"
fetch_and_deploy_gh_release "ebusd" "john30/ebusd" "binary" "latest" "" "ebusd-*_amd64-trixie_mqtt1.deb"
systemctl enable -q ebusd.service
msg_ok "Installed ebusd"
motd_ssh
customize
cleanup_lxc

View File

@ -30,14 +30,18 @@ RUST_CRATES="wasm-pack" setup_rust
$STD rustup target add wasm32-unknown-unknown
ENTE_CLI_VERSION=$(curl -s https://api.github.com/repos/ente-io/ente/releases | jq -r '[.[] | select(.tag_name | startswith("cli-v"))][0].tag_name')
fetch_and_deploy_gh_release "ente-server" "ente-io/ente" "tarball" "latest" "/opt/ente"
fetch_and_deploy_gh_release "ente-cli" "ente-io/ente" "prebuild" "$ENTE_CLI_VERSION" "/usr/local/bin" "ente-$ENTE_CLI_VERSION-linux-amd64.tar.gz"
$STD mkdir -p /opt/ente/cli
msg_info "Building Ente CLI"
cd /opt/ente/cli
$STD go build -o /usr/local/bin/ente .
chmod +x /usr/local/bin/ente
msg_ok "Built Ente CLI"
$STD mkdir -p /opt/ente/cli-config
msg_info "Configuring Ente CLI"
cat <<EOF >>~/.bashrc
export ENTE_CLI_SECRETS_PATH=/opt/ente/cli/secrets.txt
export ENTE_CLI_SECRETS_PATH=/opt/ente/cli-config/secrets.txt
export PATH="/usr/local/bin:$PATH"
EOF
$STD source ~/.bashrc

View File

@ -0,0 +1,129 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: GitHub Copilot
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://www.grampsweb.org/
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
if apt-cache show libgirepository1.0-dev >/dev/null 2>&1; then
GI_DEV_PACKAGE="libgirepository1.0-dev"
elif apt-cache show libgirepository-2.0-dev >/dev/null 2>&1; then
GI_DEV_PACKAGE="libgirepository-2.0-dev"
else
msg_error "No supported girepository development package found!"
exit
fi
msg_info "Installing Dependencies"
$STD apt install -y \
appstream \
build-essential \
ffmpeg \
gettext \
gobject-introspection \
gir1.2-gexiv2-0.10 \
gir1.2-gtk-3.0 \
gir1.2-osmgpsmap-1.0 \
gir1.2-pango-1.0 \
git \
graphviz \
libcairo2-dev \
libglib2.0-dev \
libicu-dev \
libopencv-dev \
pkg-config \
poppler-utils \
python3-dev \
tesseract-ocr \
"$GI_DEV_PACKAGE"
msg_ok "Installed Dependencies"
PYTHON_VERSION="3.12" setup_uv
NODE_VERSION="22" setup_nodejs
fetch_and_deploy_gh_release "gramps-web-api" "gramps-project/gramps-web-api" "tarball" "latest" "/opt/gramps-web-api"
fetch_and_deploy_gh_release "gramps-web" "gramps-project/gramps-web" "tarball" "latest" "/opt/gramps-web/frontend"
msg_info "Setting up Gramps Web"
mkdir -p \
/opt/gramps-web/config \
/opt/gramps-web/data/cache/export \
/opt/gramps-web/data/cache/persistent \
/opt/gramps-web/data/cache/report \
/opt/gramps-web/data/cache/request \
/opt/gramps-web/data/cache/thumbnail \
/opt/gramps-web/data/gramps/grampsdb \
/opt/gramps-web/data/indexdir \
/opt/gramps-web/data/media \
/opt/gramps-web/data/users
SECRET_KEY="$(python3 -c "import secrets; print(secrets.token_urlsafe(32))")"
cat <<EOF >/opt/gramps-web/config/config.cfg
TREE="Gramps Web"
SECRET_KEY="${SECRET_KEY}"
BASE_URL="http://${LOCAL_IP}:5000"
USER_DB_URI="sqlite:////opt/gramps-web/data/users/users.sqlite"
SEARCH_INDEX_DB_URI="sqlite:////opt/gramps-web/data/indexdir/search_index.db"
MEDIA_BASE_DIR="/opt/gramps-web/data/media"
STATIC_PATH="/opt/gramps-web/frontend/dist"
THUMBNAIL_CACHE_CONFIG={"CACHE_TYPE":"FileSystemCache","CACHE_DIR":"/opt/gramps-web/data/cache/thumbnail","CACHE_THRESHOLD":1000,"CACHE_DEFAULT_TIMEOUT":0}
REQUEST_CACHE_CONFIG={"CACHE_TYPE":"FileSystemCache","CACHE_DIR":"/opt/gramps-web/data/cache/request","CACHE_THRESHOLD":1000,"CACHE_DEFAULT_TIMEOUT":0}
PERSISTENT_CACHE_CONFIG={"CACHE_TYPE":"FileSystemCache","CACHE_DIR":"/opt/gramps-web/data/cache/persistent","CACHE_THRESHOLD":0,"CACHE_DEFAULT_TIMEOUT":0}
REPORT_DIR="/opt/gramps-web/data/cache/report"
EXPORT_DIR="/opt/gramps-web/data/cache/export"
EOF
$STD uv venv -c -p python3.12 /opt/gramps-web/venv
source /opt/gramps-web/venv/bin/activate
$STD uv pip install --no-cache-dir --upgrade pip setuptools wheel
$STD uv pip install --no-cache-dir gunicorn
$STD uv pip install --no-cache-dir /opt/gramps-web-api
cd /opt/gramps-web/frontend
export COREPACK_ENABLE_DOWNLOAD_PROMPT=0
corepack enable
$STD npm install
$STD npm run build
GRAMPS_API_CONFIG=/opt/gramps-web/config/config.cfg \
GRAMPSHOME=/opt/gramps-web/data/gramps \
GRAMPS_DATABASE_PATH=/opt/gramps-web/data/gramps/grampsdb \
$STD /opt/gramps-web/venv/bin/python3 -m gramps_webapi user migrate
msg_ok "Set up Gramps Web"
msg_info "Creating Service"
cat <<EOF >/etc/systemd/system/gramps-web.service
[Unit]
Description=Gramps Web Service
After=network.target
[Service]
Type=simple
User=root
WorkingDirectory=/opt/gramps-web-api
Environment=GRAMPS_API_CONFIG=/opt/gramps-web/config/config.cfg
Environment=GRAMPSHOME=/opt/gramps-web/data/gramps
Environment=GRAMPS_DATABASE_PATH=/opt/gramps-web/data/gramps/grampsdb
Environment=PATH=/opt/gramps-web/venv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
ExecStart=/opt/gramps-web/venv/bin/gunicorn -w 2 -b 0.0.0.0:5000 gramps_webapi.wsgi:app --timeout 120 --limit-request-line 8190
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now gramps-web
msg_ok "Created Service"
motd_ssh
customize
cleanup_lxc

View File

@ -1,126 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: MickLesk (MickLesk)
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://linkding.link/
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y \
build-essential \
pkg-config \
python3-dev \
nginx \
libpq-dev \
libicu-dev \
libsqlite3-dev \
libffi-dev
msg_ok "Installed Dependencies"
NODE_VERSION="22" setup_nodejs
setup_uv
fetch_and_deploy_gh_release "linkding" "sissbruecker/linkding"
msg_info "Building Frontend"
cd /opt/linkding
$STD npm ci
$STD npm run build
ln -sf /usr/lib/x86_64-linux-gnu/mod_icu.so /opt/linkding/libicu.so
msg_ok "Built Frontend"
msg_info "Setting up linkding"
rm -f bookmarks/settings/dev.py
touch bookmarks/settings/custom.py
$STD uv sync --no-dev --frozen
$STD uv pip install gunicorn
mkdir -p data/{favicons,previews,assets}
ADMIN_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | cut -c1-13)
cat <<EOF >/opt/linkding/.env
LD_SUPERUSER_NAME=admin
LD_SUPERUSER_PASSWORD=${ADMIN_PASS}
LD_CSRF_TRUSTED_ORIGINS=http://${LOCAL_IP}:9090
EOF
set -a && source /opt/linkding/.env && set +a
$STD /opt/linkding/.venv/bin/python manage.py generate_secret_key
$STD /opt/linkding/.venv/bin/python manage.py migrate
$STD /opt/linkding/.venv/bin/python manage.py enable_wal
$STD /opt/linkding/.venv/bin/python manage.py create_initial_superuser
$STD /opt/linkding/.venv/bin/python manage.py collectstatic --no-input
msg_ok "Set up linkding"
msg_info "Creating Services"
cat <<EOF >/etc/systemd/system/linkding.service
[Unit]
Description=linkding Bookmark Manager
After=network.target
[Service]
User=root
WorkingDirectory=/opt/linkding
EnvironmentFile=/opt/linkding/.env
ExecStart=/opt/linkding/.venv/bin/gunicorn \
--bind 127.0.0.1:8000 \
--workers 3 \
--threads 2 \
--timeout 120 \
bookmarks.wsgi:application
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
cat <<EOF >/etc/systemd/system/linkding-tasks.service
[Unit]
Description=linkding Background Tasks
After=network.target
[Service]
User=root
WorkingDirectory=/opt/linkding
EnvironmentFile=/opt/linkding/.env
ExecStart=/opt/linkding/.venv/bin/python manage.py run_huey
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
cat <<'EOF' >/etc/nginx/sites-available/linkding
server {
listen 9090;
server_name _;
client_max_body_size 20M;
location /static/ {
alias /opt/linkding/static/;
expires 30d;
}
location / {
proxy_pass http://127.0.0.1:8000;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_redirect off;
}
}
EOF
$STD rm -f /etc/nginx/sites-enabled/default
$STD ln -sf /etc/nginx/sites-available/linkding /etc/nginx/sites-enabled/linkding
systemctl enable -q --now nginx linkding linkding-tasks
systemctl restart nginx
msg_ok "Created Services"
motd_ssh
customize
cleanup_lxc

View File

@ -63,6 +63,8 @@ PUBLIC_URL=${LOCAL_IP}:3000
# TRUST_PROXY=
# CORS_ORIGINS=
TERMINAL_ENABLED=false
LOG_LEVEL=info
DEBUG=false
ENABLE_HTTP_LOGGING=false

View File

@ -20,7 +20,7 @@ $STD apt-get install -y \
tesseract-ocr-all
msg_ok "Installed Dependencies"
RELEASE=$(curl -fsSL https://api.github.com/repos/papra-hq/papra/releases | grep -oP '"tag_name":\s*"\K@papra/docker@[^"]+' | head -n1)
RELEASE=$(curl -fsSL https://api.github.com/repos/papra-hq/papra/releases | grep -oP '"tag_name":\s*"\K@papra/app@[^"]+' | head -n1)
fetch_and_deploy_gh_release "papra" "papra-hq/papra" "tarball" "${RELEASE}" "/opt/papra"
pnpm_version=$(grep -oP '"packageManager":\s*"pnpm@\K[^"]+' /opt/papra/package.json)

View File

@ -1,346 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: DevelopmentCats
# Co-author: AlphaLawless
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://romm.app
# Updated: 25/12/2025
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y \
acl \
git \
build-essential \
libssl-dev \
libffi-dev \
libmagic-dev \
python3-dev \
python3-pip \
python3-venv \
libmariadb3 \
libmariadb-dev \
libpq-dev \
libbz2-dev \
libreadline-dev \
libsqlite3-dev \
zlib1g-dev \
liblzma-dev \
libncurses5-dev \
libncursesw5-dev \
redis-server \
redis-tools \
p7zip-full \
tzdata \
nginx
msg_ok "Installed Dependencies"
PYTHON_VERSION="3.13" setup_uv
NODE_VERSION="22" setup_nodejs
setup_mariadb
MARIADB_DB_NAME="romm" MARIADB_DB_USER="romm" setup_mariadb_db
msg_info "Creating directories"
mkdir -p /opt/romm \
/var/lib/romm/config \
/var/lib/romm/resources \
/var/lib/romm/assets/{saves,states,screenshots} \
/var/lib/romm/library/roms \
/var/lib/romm/library/bios
msg_ok "Created directories"
msg_info "Creating configuration file"
cat <<'EOF' >/var/lib/romm/config/config.yml
# RomM Configuration File
# Documentation: https://docs.romm.app/latest/Getting-Started/Configuration-File/
# Only uncomment the lines you want to use/modify
# exclude:
# platforms:
# - excluded_folder_a
# roms:
# single_file:
# extensions:
# - xml
# - txt
# names:
# - '._*'
# - '*.nfo'
# multi_file:
# names:
# - downloaded_media
# - media
# system:
# platforms:
# gc: ngc
# ps1: psx
# The folder name where your roms are located (relative to library path)
# filesystem:
# roms_folder: 'roms'
# scan:
# priority:
# metadata:
# - "igdb"
# - "moby"
# - "ss"
# - "ra"
# artwork:
# - "igdb"
# - "moby"
# - "ss"
# region:
# - "us"
# - "eu"
# - "jp"
# language:
# - "en"
# media:
# - box2d
# - box3d
# - screenshot
# - manual
# emulatorjs:
# debug: false
# cache_limit: null
EOF
chmod 644 /var/lib/romm/config/config.yml
msg_ok "Created configuration file"
fetch_and_deploy_gh_release "RAHasher" "RetroAchievements/RALibretro" "prebuild" "latest" "/opt/RALibretro" "RAHasher-x64-Linux-*.zip"
cp /opt/RALibretro/RAHasher /usr/bin/RAHasher
chmod +x /usr/bin/RAHasher
fetch_and_deploy_gh_release "romm" "rommapp/romm"
msg_info "Creating environment file"
sed -i 's/^supervised no/supervised systemd/' /etc/redis/redis.conf
systemctl restart redis-server
systemctl enable -q --now redis-server
AUTH_SECRET_KEY=$(openssl rand -hex 32)
cat <<EOF >/opt/romm/.env
ROMM_BASE_PATH=/var/lib/romm
ROMM_CONFIG_PATH=/var/lib/romm/config/config.yml
WEB_CONCURRENCY=4
DB_HOST=127.0.0.1
DB_PORT=3306
DB_NAME=$MARIADB_DB_NAME
DB_USER=$MARIADB_DB_USER
DB_PASSWD=$MARIADB_DB_PASS
REDIS_HOST=127.0.0.1
REDIS_PORT=6379
ROMM_AUTH_SECRET_KEY=$AUTH_SECRET_KEY
DISABLE_DOWNLOAD_ENDPOINT_AUTH=false
DISABLE_CSRF_PROTECTION=false
ENABLE_RESCAN_ON_FILESYSTEM_CHANGE=true
RESCAN_ON_FILESYSTEM_CHANGE_DELAY=5
ENABLE_SCHEDULED_RESCAN=true
SCHEDULED_RESCAN_CRON=0 3 * * *
ENABLE_SCHEDULED_UPDATE_SWITCH_TITLEDB=true
SCHEDULED_UPDATE_SWITCH_TITLEDB_CRON=0 4 * * *
LOGLEVEL=INFO
EOF
chmod 600 /opt/romm/.env
msg_ok "Created environment file"
msg_info "Setting up RomM Backend"
cd /opt/romm
export UV_CONCURRENT_DOWNLOADS=1
$STD uv sync --all-extras
cd /opt/romm/backend
$STD uv run alembic upgrade head
msg_ok "Set up RomM Backend"
msg_info "Setting up RomM Frontend"
cd /opt/romm/frontend
$STD npm install
$STD npm run build
cp -rf /opt/romm/frontend/assets/* /opt/romm/frontend/dist/assets/
mkdir -p /opt/romm/frontend/dist/assets/romm
ln -sfn /var/lib/romm/resources /opt/romm/frontend/dist/assets/romm/resources
ln -sfn /var/lib/romm/assets /opt/romm/frontend/dist/assets/romm/assets
msg_ok "Set up RomM Frontend"
msg_info "Configuring Nginx"
cat <<'EOF' >/etc/nginx/sites-available/romm
upstream romm_backend {
server 127.0.0.1:5000;
}
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
listen 80;
server_name _;
root /opt/romm/frontend/dist;
client_max_body_size 0;
# Frontend SPA
location / {
try_files $uri $uri/ /index.html;
}
# Static assets
location /assets {
alias /opt/romm/frontend/dist/assets;
try_files $uri $uri/ =404;
expires 1y;
add_header Cache-Control "public, immutable";
}
# EmulatorJS player - requires COOP/COEP headers for SharedArrayBuffer
location ~ ^/rom/.*/ejs$ {
add_header Cross-Origin-Embedder-Policy "require-corp";
add_header Cross-Origin-Opener-Policy "same-origin";
try_files $uri /index.html;
}
# Backend API
location /api {
proxy_pass http://romm_backend;
proxy_buffering off;
proxy_request_buffering off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# WebSocket and Netplay
location ~ ^/(ws|netplay) {
proxy_pass http://romm_backend;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host;
proxy_read_timeout 86400;
}
# OpenAPI docs
location = /openapi.json {
proxy_pass http://romm_backend;
}
# Internal library file serving
location /library/ {
internal;
alias /var/lib/romm/library/;
}
}
EOF
rm -f /etc/nginx/sites-enabled/default
ln -sf /etc/nginx/sites-available/romm /etc/nginx/sites-enabled/romm
systemctl restart nginx
systemctl enable -q --now nginx
msg_ok "Configured Nginx"
msg_info "Creating Services"
cat <<EOF >/etc/systemd/system/romm-backend.service
[Unit]
Description=RomM Backend
After=network.target mariadb.service redis-server.service
Requires=mariadb.service redis-server.service
[Service]
Type=simple
WorkingDirectory=/opt/romm/backend
EnvironmentFile=/opt/romm/.env
Environment="PYTHONPATH=/opt/romm"
ExecStart=/opt/romm/.venv/bin/python main.py
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
cat <<EOF >/etc/systemd/system/romm-worker.service
[Unit]
Description=RomM RQ Worker
After=network.target mariadb.service redis-server.service romm-backend.service
Requires=mariadb.service redis-server.service
[Service]
Type=simple
WorkingDirectory=/opt/romm/backend
EnvironmentFile=/opt/romm/.env
Environment="PYTHONPATH=/opt/romm/backend"
ExecStart=/opt/romm/.venv/bin/rq worker --path /opt/romm/backend --url redis://127.0.0.1:6379/0 high default low
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
cat <<EOF >/etc/systemd/system/romm-scheduler.service
[Unit]
Description=RomM RQ Scheduler
After=network.target mariadb.service redis-server.service romm-backend.service
Requires=mariadb.service redis-server.service
[Service]
Type=simple
WorkingDirectory=/opt/romm/backend
EnvironmentFile=/opt/romm/.env
Environment="PYTHONPATH=/opt/romm/backend"
Environment="RQ_REDIS_HOST=127.0.0.1"
Environment="RQ_REDIS_PORT=6379"
ExecStart=/opt/romm/.venv/bin/rqscheduler --path /opt/romm/backend
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
cat <<EOF >/etc/systemd/system/romm-watcher.service
[Unit]
Description=RomM Filesystem Watcher
After=network.target romm-backend.service
Requires=romm-backend.service
[Service]
Type=simple
WorkingDirectory=/opt/romm/backend
EnvironmentFile=/opt/romm/.env
Environment="PYTHONPATH=/opt/romm/backend"
ExecStart=/opt/romm/.venv/bin/watchfiles --target-type command '/opt/romm/.venv/bin/python watcher.py' /var/lib/romm/library
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now romm-backend romm-worker romm-scheduler romm-watcher
msg_ok "Created Services"
motd_ssh
customize
cleanup_lxc

View File

@ -1,66 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: CrazyWolf13
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://docs.seerr.dev/
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt-get install -y build-essential
msg_ok "Installed Dependencies"
fetch_and_deploy_gh_release "seer" "seerr-team/seerr" "tarball" "latest"
pnpm_desired=$(grep -Po '"pnpm":\s*"\K[^"]+' /opt/seer/package.json)
NODE_VERSION="22" NODE_MODULE="pnpm@$pnpm_desired" setup_nodejs
msg_info "Installing Seer (Patience)"
export CYPRESS_INSTALL_BINARY=0
cd /opt/seer
$STD pnpm install --frozen-lockfile
export NODE_OPTIONS="--max-old-space-size=3072"
$STD pnpm build
mkdir -p /etc/seer/
cat <<EOF >/etc/seer/seer.conf
## Seer's default port is 5055, if you want to use both, change this.
## specify on which port to listen
PORT=5055
## specify on which interface to listen, by default seer listens on all interfaces
HOST=0.0.0.0
## Uncomment if you want to force Node.js to resolve IPv4 before IPv6 (advanced users only)
# FORCE_IPV4_FIRST=true
EOF
msg_ok "Installed Seer"
msg_info "Creating Service"
cat <<EOF >/etc/systemd/system/seer.service
[Unit]
Description=Seer Service
After=network.target
[Service]
EnvironmentFile=/etc/seer/seer.conf
Environment=NODE_ENV=production
Type=exec
WorkingDirectory=/opt/seer
ExecStart=/usr/bin/node dist/index.js
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now seer
msg_ok "Created Service"
motd_ssh
customize
cleanup_lxc

View File

@ -0,0 +1,89 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: johanngrobe
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/oss-apps/split-pro
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
NODE_VERSION="22" NODE_MODULE="pnpm" setup_nodejs
PG_VERSION="17" setup_postgresql
msg_info "Installing Dependencies"
$STD apt install -y \
openssl \
postgresql-17-cron
msg_ok "Installed Dependencies"
PG_DB_NAME="splitpro" PG_DB_USER="splitpro" setup_postgresql_db
msg_info "Setting up pg_cron"
sed -i "/^#shared_preload_libraries/s/^#//" /etc/postgresql/17/main/postgresql.conf
sed -i "/^shared_preload_libraries/s/''/pg_cron/" /etc/postgresql/17/main/postgresql.conf
systemctl restart postgresql
$STD sudo -u postgres psql -c "ALTER SYSTEM SET cron.database_name = 'splitpro'"
$STD sudo -u postgres psql -c "ALTER SYSTEM SET cron.timezone = 'UTC'"
systemctl restart postgresql
$STD sudo -u postgres psql -d splitpro -c "CREATE EXTENSION IF NOT EXISTS pg_cron"
$STD sudo -u postgres psql -d splitpro -c "GRANT USAGE ON SCHEMA cron TO splitpro"
$STD sudo -u postgres psql -d splitpro -c "GRANT ALL ON ALL TABLES IN SCHEMA cron TO splitpro"
msg_ok "Setup pg_cron complete"
fetch_and_deploy_gh_release "split-pro" "oss-apps/split-pro" "tarball" "latest" "/opt/split-pro"
msg_info "Installing Dependencies"
cd /opt/split-pro
$STD pnpm install --frozen-lockfile
msg_ok "Installed Dependencies"
msg_info "Building Split Pro"
cd /opt/split-pro
mkdir -p /opt/split-pro_data/uploads
ln -sf /opt/split-pro_data/uploads /opt/split-pro/uploads
NEXTAUTH_SECRET=$(openssl rand -base64 32)
cp .env.example .env
sed -i "s|^DATABASE_URL=.*|DATABASE_URL=\"postgresql://${PG_DB_USER}:${PG_DB_PASS}@localhost:5432/${PG_DB_NAME}\"|" .env
sed -i "s|^NEXTAUTH_SECRET=.*|NEXTAUTH_SECRET=\"${NEXTAUTH_SECRET}\"|" .env
sed -i "s|^NEXTAUTH_URL=.*|NEXTAUTH_URL=\"http://${LOCAL_IP}:3000\"|" .env
sed -i "s|^NEXTAUTH_URL_INTERNAL=.*|NEXTAUTH_URL_INTERNAL=\"http://localhost:3000\"|" .env
sed -i "/^POSTGRES_CONTAINER_NAME=/d" .env
sed -i "/^POSTGRES_USER=/d" .env
sed -i "/^POSTGRES_PASSWORD=/d" .env
sed -i "/^POSTGRES_DB=/d" .env
sed -i "/^POSTGRES_PORT=/d" .env
$STD pnpm build
$STD pnpm exec prisma migrate deploy
msg_ok "Built Split Pro"
msg_info "Creating Service"
cat <<EOF >/etc/systemd/system/split-pro.service
[Unit]
Description=Split Pro
After=network.target postgresql.service
Requires=postgresql.service
[Service]
Type=simple
User=root
WorkingDirectory=/opt/split-pro
EnvironmentFile=/opt/split-pro/.env
ExecStart=/usr/bin/pnpm start
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now split-pro
msg_ok "Created Service"
motd_ssh
customize
cleanup_lxc

87
install/sure-install.sh Normal file
View File

@ -0,0 +1,87 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: vhsdream
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://sure.am
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y \
build-essential \
redis-server \
pkg-config \
libpq-dev \
libvips
msg_ok "Installed Dependencies"
fetch_and_deploy_gh_release "Sure" "we-promise/sure" "tarball" "latest" "/opt/sure"
PG_VERSION="$(sed -n '/postgres:/s/[^[:digit:]]*//p' /opt/sure/compose.example.yml)" setup_postgresql
PG_DB_NAME=sure_production PG_DB_USER=sure_user setup_postgresql_db
RUBY_VERSION="$(cat /opt/sure/.ruby-version)" RUBY_INSTALL_RAILS=false setup_ruby
msg_info "Building Sure"
cd /opt/sure
export RAILS_ENV=production
export BUNDLE_DEPLOYMENT=1
export BUNDLE_WITHOUT=development
$STD ./bin/bundle install
$STD ./bin/bundle exec bootsnap precompile --gemfile -j 0
$STD ./bin/bundle exec bootsnap precompile -j 0 app/ lib/
export SECRET_KEY_BASE_DUMMY=1 && $STD ./bin/rails assets:precompile
unset SECRET_KEY_BASE_DUMMY
msg_ok "Built Sure"
msg_info "Configuring Sure"
KEY="$(openssl rand -hex 64)"
mkdir -p /etc/sure
mv /opt/sure/.env.example /etc/sure/.env
sed -i -e "/^SECRET_KEY_BASE=/s/secret-value/${KEY}/" \
-e 's/_KEY_BASE=.*$/&\n\nRAILS_FORCE_SSL=false \
\
# Change to true when using a reverse proxy \
RAILS_ASSUME_SSL=false/' \
-e "/POSTGRES_PASSWORD=/s/postgres/${PG_DB_PASS}/" \
-e "/POSTGRES_USER=/s/postgres/${PG_DB_USER}\\
POSTGRES_DB=${PG_DB_NAME}/" \
-e "s|^APP_DOMAIN=|&${LOCAL_IP}|" /etc/sure/.env
msg_ok "Configured Sure"
msg_info "Creating Service"
cat <<EOF >/etc/systemd/system/sure.service
[Unit]
Description=Sure Service
After=network.target
[Service]
Type=simple
WorkingDirectory=/opt/sure
Environment=RAILS_ENV=production
Environment=BUNDLE_DEPLOYMENT=1
Environment=BUNDLE_WITHOUT=development
Environment=PATH=/root/.rbenv/shims:/root/.rbenv/bin:/usr/bin:\$PATH
EnvironmentFile=/etc/sure/.env
ExecStartPre=/opt/sure/bin/rails db:prepare
ExecStart=/opt/sure/bin/rails server
Restart=always
RestartSec=5
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target
EOF
$STD systemctl enable -q --now sure
msg_ok "Created Service"
motd_ssh
customize
cleanup_lxc

View File

@ -1,6 +1,6 @@
# Copyright (c) 2021-2026 community-scripts ORG
# Author: michelroegl-brunner
# License: MIT | https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/LICENSE
# Author: michelroegl-brunner | MickLesk
# License: MIT | https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/LICENSE
# ==============================================================================
# API.FUNC - TELEMETRY & DIAGNOSTICS API
@ -37,9 +37,74 @@ TELEMETRY_URL="https://telemetry.community-scripts.org/telemetry"
# Timeout for telemetry requests (seconds)
TELEMETRY_TIMEOUT=5
# Repository source identifier (auto-transformed by CI on promotion to ProxmoxVE)
# DO NOT CHANGE - this is used by the telemetry service to route data to the correct collection
REPO_SOURCE="community-scripts/ProxmoxVED"
# ==============================================================================
# SECTION 0: REPOSITORY SOURCE DETECTION
# ==============================================================================
# ------------------------------------------------------------------------------
# detect_repo_source()
#
# - Dynamically detects which GitHub/Gitea repo the scripts were loaded from
# - Inspects /proc/$$/cmdline and $0 to find the source URL
# - Maps detected repo to one of three canonical values:
# * "ProxmoxVE" — official community-scripts/ProxmoxVE (production)
# * "ProxmoxVED" — official community-scripts/ProxmoxVED (development)
# * "external" — any fork or unknown source
# - Fallback: "ProxmoxVED" (CI sed transforms ProxmoxVED → ProxmoxVE on promotion)
# - Sets and exports REPO_SOURCE global variable
# - Skips detection if REPO_SOURCE is already set (e.g., by environment)
# ------------------------------------------------------------------------------
detect_repo_source() {
# Allow explicit override via environment
[[ -n "${REPO_SOURCE:-}" ]] && return 0
local content="" owner_repo=""
# Method 1: Read from /proc/$$/cmdline
# When invoked via: bash -c "$(curl -fsSL https://.../ct/app.sh)"
# the full CT/VM script content is in /proc/$$/cmdline (same PID through source chain)
if [[ -r /proc/$$/cmdline ]]; then
content=$(tr '\0' ' ' </proc/$$/cmdline 2>/dev/null) || true
fi
# Method 2: Read from the original script file (bash ct/app.sh / bash vm/app.sh)
if [[ -z "$content" ]] || ! echo "$content" | grep -qE 'githubusercontent\.com|community-scripts\.org' 2>/dev/null; then
if [[ -f "$0" ]] && [[ "$0" != *bash* ]]; then
content=$(head -10 "$0" 2>/dev/null) || true
fi
fi
# Extract owner/repo from URL patterns found in the script content
if [[ -n "$content" ]]; then
# GitHub raw URL: raw.githubusercontent.com/OWNER/REPO/...
owner_repo=$(echo "$content" | grep -oE 'raw\.githubusercontent\.com/[^/]+/[^/]+' | head -1 | sed 's|raw\.githubusercontent\.com/||') || true
# Gitea URL: git.community-scripts.org/OWNER/REPO/...
if [[ -z "$owner_repo" ]]; then
owner_repo=$(echo "$content" | grep -oE 'git\.community-scripts\.org/[^/]+/[^/]+' | head -1 | sed 's|git\.community-scripts\.org/||') || true
fi
fi
# Map detected owner/repo to canonical repo_source value
case "$owner_repo" in
community-scripts/ProxmoxVE) REPO_SOURCE="ProxmoxVE" ;;
community-scripts/ProxmoxVED) REPO_SOURCE="ProxmoxVED" ;;
"")
# No URL detected — use hardcoded fallback
# CI sed transforms this on promotion: ProxmoxVED → ProxmoxVE
REPO_SOURCE="ProxmoxVED"
;;
*)
# Fork or unknown repo
REPO_SOURCE="external"
;;
esac
export REPO_SOURCE
}
# Run detection immediately when api.func is sourced
detect_repo_source
# ==============================================================================
# SECTION 1: ERROR CODE DESCRIPTIONS
@ -88,7 +153,7 @@ explain_exit_code() {
126) echo "Command invoked cannot execute (permission problem?)" ;;
127) echo "Command not found" ;;
128) echo "Invalid argument to exit" ;;
130) echo "Terminated by Ctrl+C (SIGINT)" ;;
130) echo "Aborted by user (SIGINT)" ;;
134) echo "Process aborted (SIGABRT - possibly Node.js heap overflow)" ;;
137) echo "Killed (SIGKILL / Out of memory?)" ;;
139) echo "Segmentation fault (core dumped)" ;;
@ -168,6 +233,60 @@ explain_exit_code() {
esac
}
# ------------------------------------------------------------------------------
# json_escape()
#
# - Escapes a string for safe JSON embedding
# - Handles backslashes, quotes, newlines, tabs, and carriage returns
# ------------------------------------------------------------------------------
json_escape() {
local s="$1"
s=${s//\\/\\\\}
s=${s//"/\\"/}
s=${s//$'\n'/\\n}
s=${s//$'\r'/}
s=${s//$'\t'/\\t}
echo "$s"
}
# ------------------------------------------------------------------------------
# get_error_text()
#
# - Returns last 20 lines of the active log (INSTALL_LOG or BUILD_LOG)
# - Falls back to combined log or BUILD_LOG if primary is not accessible
# - Handles container paths that don't exist on the host
# ------------------------------------------------------------------------------
get_error_text() {
local logfile=""
if declare -f get_active_logfile >/dev/null 2>&1; then
logfile=$(get_active_logfile)
elif [[ -n "${INSTALL_LOG:-}" ]]; then
logfile="$INSTALL_LOG"
elif [[ -n "${BUILD_LOG:-}" ]]; then
logfile="$BUILD_LOG"
fi
# If logfile is inside container (e.g. /root/.install-*), try the host copy
if [[ -n "$logfile" && ! -s "$logfile" ]]; then
# Try combined log: /tmp/<app>-<CTID>-<SESSION_ID>.log
if [[ -n "${CTID:-}" && -n "${SESSION_ID:-}" ]]; then
local combined_log="/tmp/${NSAPP:-lxc}-${CTID}-${SESSION_ID}.log"
if [[ -s "$combined_log" ]]; then
logfile="$combined_log"
fi
fi
fi
# Also try BUILD_LOG as fallback if primary log is empty/missing
if [[ -z "$logfile" || ! -s "$logfile" ]] && [[ -n "${BUILD_LOG:-}" && -s "${BUILD_LOG}" ]]; then
logfile="$BUILD_LOG"
fi
if [[ -n "$logfile" && -s "$logfile" ]]; then
tail -n 20 "$logfile" 2>/dev/null | sed 's/\r$//'
fi
}
# ==============================================================================
# SECTION 2: TELEMETRY FUNCTIONS
# ==============================================================================
@ -288,6 +407,9 @@ detect_ram() {
# - Never blocks or fails script execution
# ------------------------------------------------------------------------------
post_to_api() {
# Prevent duplicate submissions (post_to_api is called from multiple places)
[[ "${POST_TO_API_DONE:-}" == "true" ]] && return 0
# Silent fail - telemetry should never break scripts
command -v curl &>/dev/null || {
[[ "${DEV_MODE:-}" == "true" ]] && echo "[DEBUG] curl not found, skipping" >&2
@ -317,7 +439,8 @@ post_to_api() {
detect_gpu
fi
local gpu_vendor="${GPU_VENDOR:-unknown}"
local gpu_model="${GPU_MODEL:-}"
local gpu_model
gpu_model=$(json_escape "${GPU_MODEL:-}")
local gpu_passthrough="${GPU_PASSTHROUGH:-unknown}"
# Detect CPU if not already set
@ -325,7 +448,8 @@ post_to_api() {
detect_cpu
fi
local cpu_vendor="${CPU_VENDOR:-unknown}"
local cpu_model="${CPU_MODEL:-}"
local cpu_model
cpu_model=$(json_escape "${CPU_MODEL:-}")
# Detect RAM if not already set
if [[ -z "${RAM_SPEED:-}" ]]; then
@ -375,6 +499,8 @@ EOF
-H "Content-Type: application/json" \
-d "$JSON_PAYLOAD" &>/dev/null || true
fi
POST_TO_API_DONE=true
}
# ------------------------------------------------------------------------------
@ -386,6 +512,7 @@ EOF
# * ct_type=2 (VM instead of LXC)
# * type="vm"
# * Disk size without 'G' suffix
# - Includes hardware detection: CPU, GPU, RAM speed
# - Only executes if DIAGNOSTICS=yes and RANDOM_UUID is set
# - Never blocks or fails script execution
# ------------------------------------------------------------------------------
@ -408,6 +535,29 @@ post_to_api_vm() {
pve_version=$(pveversion 2>/dev/null | awk -F'[/ ]' '{print $2}') || true
fi
# Detect GPU if not already set
if [[ -z "${GPU_VENDOR:-}" ]]; then
detect_gpu
fi
local gpu_vendor="${GPU_VENDOR:-unknown}"
local gpu_model
gpu_model=$(json_escape "${GPU_MODEL:-}")
local gpu_passthrough="${GPU_PASSTHROUGH:-unknown}"
# Detect CPU if not already set
if [[ -z "${CPU_VENDOR:-}" ]]; then
detect_cpu
fi
local cpu_vendor="${CPU_VENDOR:-unknown}"
local cpu_model
cpu_model=$(json_escape "${CPU_MODEL:-}")
# Detect RAM if not already set
if [[ -z "${RAM_SPEED:-}" ]]; then
detect_ram
fi
local ram_speed="${RAM_SPEED:-}"
# Remove 'G' suffix from disk size
local DISK_SIZE_API="${DISK_SIZE%G}"
@ -427,6 +577,12 @@ post_to_api_vm() {
"os_version": "${var_version:-}",
"pve_version": "${pve_version}",
"method": "${METHOD:-default}",
"cpu_vendor": "${cpu_vendor}",
"cpu_model": "${cpu_model}",
"gpu_vendor": "${gpu_vendor}",
"gpu_model": "${gpu_model}",
"gpu_passthrough": "${gpu_passthrough}",
"ram_speed": "${ram_speed}",
"repo_source": "${REPO_SOURCE}"
}
EOF
@ -457,9 +613,12 @@ post_update_to_api() {
# Silent fail - telemetry should never break scripts
command -v curl &>/dev/null || return 0
# Prevent duplicate submissions
# Support "force" mode (3rd arg) to bypass duplicate check for retries after cleanup
local force="${3:-}"
POST_UPDATE_DONE=${POST_UPDATE_DONE:-false}
[[ "$POST_UPDATE_DONE" == "true" ]] && return 0
if [[ "$POST_UPDATE_DONE" == "true" && "$force" != "force" ]]; then
return 0
fi
[[ "${DIAGNOSTICS:-no}" == "no" ]] && return 0
[[ -z "${RANDOM_UUID:-}" ]] && return 0
@ -470,12 +629,14 @@ post_update_to_api() {
# Get GPU info (if detected)
local gpu_vendor="${GPU_VENDOR:-unknown}"
local gpu_model="${GPU_MODEL:-}"
local gpu_model
gpu_model=$(json_escape "${GPU_MODEL:-}")
local gpu_passthrough="${GPU_PASSTHROUGH:-unknown}"
# Get CPU info (if detected)
local cpu_vendor="${CPU_VENDOR:-unknown}"
local cpu_model="${CPU_MODEL:-}"
local cpu_model
cpu_model=$(json_escape "${CPU_MODEL:-}")
# Get RAM info (if detected)
local ram_speed="${RAM_SPEED:-}"
@ -497,13 +658,21 @@ post_update_to_api() {
esac
# For failed/unknown status, resolve exit code and error description
local short_error=""
if [[ "$pb_status" == "failed" ]] || [[ "$pb_status" == "unknown" ]]; then
if [[ "$raw_exit_code" =~ ^[0-9]+$ ]]; then
exit_code="$raw_exit_code"
else
exit_code=1
fi
error=$(explain_exit_code "$exit_code")
local error_text=""
error_text=$(get_error_text)
if [[ -n "$error_text" ]]; then
error=$(json_escape "$error_text")
else
error=$(json_escape "$(explain_exit_code "$exit_code")")
fi
short_error=$(json_escape "$(explain_exit_code "$exit_code")")
error_category=$(categorize_error "$exit_code")
[[ -z "$error" ]] && error="Unknown error"
fi
@ -520,8 +689,9 @@ post_update_to_api() {
pve_version=$(pveversion 2>/dev/null | awk -F'[/ ]' '{print $2}') || true
fi
# Full payload including all fields - allows record creation if initial call failed
# The Go service will find the record by random_id and PATCH, or create if not found
local http_code=""
# ── Attempt 1: Full payload with complete error text ──
local JSON_PAYLOAD
JSON_PAYLOAD=$(
cat <<EOF
@ -553,11 +723,80 @@ post_update_to_api() {
EOF
)
# Fire-and-forget: never block, never fail
http_code=$(curl -sS -w "%{http_code}" -m "${TELEMETRY_TIMEOUT}" -X POST "${TELEMETRY_URL}" \
-H "Content-Type: application/json" \
-d "$JSON_PAYLOAD" -o /dev/null 2>/dev/null) || http_code="000"
if [[ "$http_code" =~ ^2[0-9]{2}$ ]]; then
POST_UPDATE_DONE=true
return 0
fi
# ── Attempt 2: Short error text (no full log) ──
sleep 1
local RETRY_PAYLOAD
RETRY_PAYLOAD=$(
cat <<EOF
{
"random_id": "${RANDOM_UUID}",
"type": "${TELEMETRY_TYPE:-lxc}",
"nsapp": "${NSAPP:-unknown}",
"status": "${pb_status}",
"ct_type": ${CT_TYPE:-1},
"disk_size": ${DISK_SIZE:-0},
"core_count": ${CORE_COUNT:-0},
"ram_size": ${RAM_SIZE:-0},
"os_type": "${var_os:-}",
"os_version": "${var_version:-}",
"pve_version": "${pve_version}",
"method": "${METHOD:-default}",
"exit_code": ${exit_code},
"error": "${short_error}",
"error_category": "${error_category}",
"install_duration": ${duration},
"cpu_vendor": "${cpu_vendor}",
"cpu_model": "${cpu_model}",
"gpu_vendor": "${gpu_vendor}",
"gpu_model": "${gpu_model}",
"gpu_passthrough": "${gpu_passthrough}",
"ram_speed": "${ram_speed}",
"repo_source": "${REPO_SOURCE}"
}
EOF
)
http_code=$(curl -sS -w "%{http_code}" -m "${TELEMETRY_TIMEOUT}" -X POST "${TELEMETRY_URL}" \
-H "Content-Type: application/json" \
-d "$RETRY_PAYLOAD" -o /dev/null 2>/dev/null) || http_code="000"
if [[ "$http_code" =~ ^2[0-9]{2}$ ]]; then
POST_UPDATE_DONE=true
return 0
fi
# ── Attempt 3: Minimal payload (bare minimum to set status) ──
sleep 2
local MINIMAL_PAYLOAD
MINIMAL_PAYLOAD=$(
cat <<EOF
{
"random_id": "${RANDOM_UUID}",
"type": "${TELEMETRY_TYPE:-lxc}",
"nsapp": "${NSAPP:-unknown}",
"status": "${pb_status}",
"exit_code": ${exit_code},
"error": "${short_error}",
"error_category": "${error_category}",
"install_duration": ${duration}
}
EOF
)
curl -sS -w "%{http_code}" -m "${TELEMETRY_TIMEOUT}" -X POST "${TELEMETRY_URL}" \
-H "Content-Type: application/json" \
-d "$JSON_PAYLOAD" -o /dev/null 2>&1 || true
-d "$MINIMAL_PAYLOAD" -o /dev/null 2>/dev/null || true
# Tried 3 times - mark as done regardless to prevent infinite loops
POST_UPDATE_DONE=true
}
@ -593,6 +832,9 @@ categorize_error() {
# Configuration errors
203 | 204 | 205 | 206 | 207 | 208) echo "config" ;;
# Aborted by user
130) echo "aborted" ;;
# Resource errors (OOM, etc)
137 | 134) echo "resource" ;;
@ -657,7 +899,13 @@ post_tool_to_api() {
if [[ "$status" == "failed" ]]; then
[[ ! "$exit_code" =~ ^[0-9]+$ ]] && exit_code=1
error=$(explain_exit_code "$exit_code")
local error_text=""
error_text=$(get_error_text)
if [[ -n "$error_text" ]]; then
error=$(json_escape "$error_text")
else
error=$(json_escape "$(explain_exit_code "$exit_code")")
fi
error_category=$(categorize_error "$exit_code")
fi
@ -718,7 +966,13 @@ post_addon_to_api() {
if [[ "$status" == "failed" ]]; then
[[ ! "$exit_code" =~ ^[0-9]+$ ]] && exit_code=1
error=$(explain_exit_code "$exit_code")
local error_text=""
error_text=$(get_error_text)
if [[ -n "$error_text" ]]; then
error=$(json_escape "$error_text")
else
error=$(json_escape "$(explain_exit_code "$exit_code")")
fi
error_category=$(categorize_error "$exit_code")
fi
@ -811,7 +1065,13 @@ post_update_to_api_extended() {
else
exit_code=1
fi
error=$(explain_exit_code "$exit_code")
local error_text=""
error_text=$(get_error_text)
if [[ -n "$error_text" ]]; then
error=$(json_escape "$error_text")
else
error=$(json_escape "$(explain_exit_code "$exit_code")")
fi
error_category=$(categorize_error "$exit_code")
[[ -z "$error" ]] && error="Unknown error"
fi

34
misc/data/.gitignore vendored Normal file
View File

@ -0,0 +1,34 @@
# If you prefer the allow list template instead of the deny list, see community template:
# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
#
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
telemetry-service
migration/migrate
# Test binary, built with `go test -c`
*.test
# Code coverage profiles and other test artifacts
*.out
coverage.*
*.coverprofile
profile.cov
# Dependency directories (remove the comment below to include it)
# vendor/
# Go workspace file
go.work
go.work.sum
# env file
.env
# Editor/IDE
# .idea/
# .vscode/

View File

@ -24,7 +24,7 @@ ENV ENABLE_REQUEST_LOGGING="false"
# Cache config (optional)
ENV ENABLE_CACHE="true"
ENV CACHE_TTL_SECONDS="60"
ENV CACHE_TTL_SECONDS="300"
ENV ENABLE_REDIS="false"
# ENV REDIS_URL="redis://localhost:6379"

21
misc/data/LICENSE Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2026 Community Scripts
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

81
misc/data/README.md Normal file
View File

@ -0,0 +1,81 @@
# Telemetry Service
A standalone Go microservice that collects anonymous telemetry data from [ProxmoxVE](https://github.com/community-scripts/ProxmoxVE) and [ProxmoxVED](https://github.com/community-scripts/ProxmoxVED) script installations.
## Overview
This service acts as a telemetry ingestion layer between the bash installation scripts and a PocketBase backend. When users run scripts from the ProxmoxVE/ProxmoxVED repositories, optional anonymous usage data is sent here for aggregation and analysis.
**What gets collected:**
- Script name and installation status (success/failed)
- Container/VM type and resource allocation (CPU, RAM, disk)
- OS type and version
- Proxmox VE version
- Anonymous session ID (randomly generated UUID)
**What is NOT collected:**
- IP addresses (not logged, not stored)
- Hostnames or domain names
- User credentials or personal information
- Hardware identifiers (MAC addresses, serial numbers)
- Network configuration or internal IPs
- Any data that could identify a person or system
**What this enables:**
- Understanding which scripts are most popular
- Identifying scripts with high failure rates
- Tracking resource allocation trends
- Improving script quality based on real-world data
## Features
- **Telemetry Ingestion** - Receives and validates telemetry data from bash scripts
- **PocketBase Integration** - Stores data in PocketBase collections
- **Rate Limiting** - Configurable per-IP rate limiting to prevent abuse
- **Caching** - In-memory or Redis-backed caching support
- **Email Alerts** - SMTP-based alerts when failure rates exceed thresholds
- **Dashboard** - Built-in HTML dashboard for telemetry visualization
- **Migration Tool** - Migrate data from external sources to PocketBase
## Architecture
```
┌─────────────────┐ ┌───────────────────┐ ┌────────────┐
│ Bash Scripts │────▶│ Telemetry Service │────▶│ PocketBase │
│ (ProxmoxVE/VED) │ │ (this repo) │ │ Database │
└─────────────────┘ └───────────────────┘ └────────────┘
```
## Project Structure
```
├── service.go # Main service, HTTP handlers, rate limiting
├── cache.go # In-memory and Redis caching
├── alerts.go # SMTP alert system
├── dashboard.go # Dashboard HTML generation
├── migration/
│ ├── migrate.go # Data migration tool
│ └── migrate.sh # Migration shell script
├── Dockerfile # Container build
├── entrypoint.sh # Container entrypoint with migration support
└── go.mod # Go module definition
```
## Related Projects
- [ProxmoxVE](https://github.com/community-scripts/ProxmoxVE) - Proxmox VE Helper Scripts
- [ProxmoxVED](https://github.com/community-scripts/ProxmoxVED) - Proxmox VE Helper Scripts (Dev)
## Privacy & Compliance
This service is designed with privacy in mind and is **GDPR/DSGVO compliant**:
- ✅ **No personal data** - Only anonymous technical metrics are collected
- ✅ **No IP logging** - Request logging is disabled by default, IPs are never stored
- ✅ **Transparent** - All collected fields are documented and the code is open source
- ✅ **No tracking** - Session IDs are randomly generated and cannot be linked to users
- ✅ **No third parties** - Data is only stored in our self-hosted PocketBase instance
## License
MIT License - see [LICENSE](LICENSE) file.

View File

@ -134,7 +134,7 @@ func (a *Alerter) checkAndAlert() {
defer cancel()
// Fetch last hour's data
data, err := a.pb.FetchDashboardData(ctx, 1)
data, err := a.pb.FetchDashboardData(ctx, 1, "ProxmoxVE")
if err != nil {
log.Printf("WARN: alert check failed: %v", err)
return
@ -410,13 +410,13 @@ func (a *Alerter) fetchWeeklyReportData(ctx context.Context) (*WeeklyReportData,
year, week := lastMonday.ISOWeek()
// Fetch current week's data (7 days)
currentData, err := a.pb.FetchDashboardData(ctx, 7)
currentData, err := a.pb.FetchDashboardData(ctx, 7, "ProxmoxVE")
if err != nil {
return nil, fmt.Errorf("failed to fetch current week data: %w", err)
}
// Fetch previous week's data for comparison (14 days, we'll compare)
prevData, err := a.pb.FetchDashboardData(ctx, 14)
prevData, err := a.pb.FetchDashboardData(ctx, 14, "ProxmoxVE")
if err != nil {
// Non-fatal, just log
log.Printf("WARN: could not fetch previous week data: %v", err)

View File

@ -108,7 +108,7 @@ func (c *Cleaner) findStuckInstallations(ctx context.Context) ([]StuckRecord, er
req, err := http.NewRequestWithContext(ctx, http.MethodGet,
fmt.Sprintf("%s/api/collections/%s/records?filter=%s&perPage=100",
c.pb.baseURL, c.pb.devColl, filter),
c.pb.baseURL, c.pb.targetColl, filter),
nil,
)
if err != nil {

File diff suppressed because it is too large Load Diff

View File

@ -12,43 +12,43 @@ export POCKETBASE_COLLECTION="${POCKETBASE_COLLECTION:-$PB_TARGET_COLLECTION}"
# Run migration if enabled
if [ "$RUN_MIGRATION" = "true" ]; then
echo ""
echo "🔄 Migration mode enabled"
echo " Source: $MIGRATION_SOURCE_URL"
echo " Target: $POCKETBASE_URL"
echo " Collection: $POCKETBASE_COLLECTION"
echo ""
# Wait for PocketBase to be ready
echo "⏳ Waiting for PocketBase to be ready..."
RETRIES=30
until wget -q --spider "$POCKETBASE_URL/api/health" 2>/dev/null; do
RETRIES=$((RETRIES - 1))
if [ $RETRIES -le 0 ]; then
echo "❌ PocketBase not reachable after 30 attempts"
if [ "$MIGRATION_REQUIRED" = "true" ]; then
exit 1
fi
echo "⚠️ Continuing without migration..."
break
echo ""
echo "🔄 Migration mode enabled"
echo " Source: $MIGRATION_SOURCE_URL"
echo " Target: $POCKETBASE_URL"
echo " Collection: $POCKETBASE_COLLECTION"
echo ""
# Wait for PocketBase to be ready
echo "⏳ Waiting for PocketBase to be ready..."
RETRIES=30
until wget -q --spider "$POCKETBASE_URL/api/health" 2>/dev/null; do
RETRIES=$((RETRIES - 1))
if [ $RETRIES -le 0 ]; then
echo "❌ PocketBase not reachable after 30 attempts"
if [ "$MIGRATION_REQUIRED" = "true" ]; then
exit 1
fi
echo "⚠️ Continuing without migration..."
break
fi
echo " Waiting... ($RETRIES attempts left)"
sleep 2
done
if wget -q --spider "$POCKETBASE_URL/api/health" 2>/dev/null; then
echo "✅ PocketBase is ready"
echo ""
echo "🚀 Starting migration..."
/app/migrate || {
if [ "$MIGRATION_REQUIRED" = "true" ]; then
echo "❌ Migration failed!"
exit 1
fi
echo "⚠️ Migration failed, but continuing..."
}
echo ""
fi
echo " Waiting... ($RETRIES attempts left)"
sleep 2
done
if wget -q --spider "$POCKETBASE_URL/api/health" 2>/dev/null; then
echo "✅ PocketBase is ready"
echo ""
echo "🚀 Starting migration..."
/app/migrate || {
if [ "$MIGRATION_REQUIRED" = "true" ]; then
echo "❌ Migration failed!"
exit 1
fi
echo "⚠️ Migration failed, but continuing..."
}
echo ""
fi
fi
echo "🚀 Starting telemetry service..."

View File

@ -93,13 +93,13 @@ func main() {
pbCollection = os.Getenv("PB_TARGET_COLLECTION")
}
if pbCollection == "" {
pbCollection = "_dev_telemetry_data"
pbCollection = "telemetry"
}
// Auth collection
authCollection := os.Getenv("PB_AUTH_COLLECTION")
if authCollection == "" {
authCollection = "_dev_telemetry_service"
authCollection = "telemetry_service_user"
}
// Credentials

View File

@ -13,7 +13,7 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Default values
POCKETBASE_URL="${1:-http://localhost:8090}"
POCKETBASE_COLLECTION="${2:-_dev_telemetry_data}"
POCKETBASE_COLLECTION="${2:-telemetry}"
echo "============================================="
echo " ProxmoxVED Data Migration Tool"
@ -27,10 +27,10 @@ echo ""
# Check if PocketBase is reachable
echo "🔍 Checking PocketBase connection..."
if ! curl -sf "$POCKETBASE_URL/api/health" > /dev/null 2>&1; then
echo "❌ Cannot reach PocketBase at $POCKETBASE_URL"
echo " Make sure PocketBase is running and the URL is correct."
exit 1
if ! curl -sf "$POCKETBASE_URL/api/health" >/dev/null 2>&1; then
echo "❌ Cannot reach PocketBase at $POCKETBASE_URL"
echo " Make sure PocketBase is running and the URL is correct."
exit 1
fi
echo "✅ PocketBase is reachable"
echo ""
@ -39,8 +39,8 @@ echo ""
echo "🔍 Checking source API..."
SUMMARY=$(curl -sf "https://api.htl-braunau.at/dev/data/summary" 2>/dev/null || echo "")
if [ -z "$SUMMARY" ]; then
echo "❌ Cannot reach source API"
exit 1
echo "❌ Cannot reach source API"
exit 1
fi
TOTAL=$(echo "$SUMMARY" | grep -o '"total_entries":[0-9]*' | cut -d: -f2)
@ -51,8 +51,8 @@ echo ""
read -p "⚠️ Do you want to start the migration? [y/N] " -n 1 -r
echo ""
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
echo "Migration cancelled."
exit 0
echo "Migration cancelled."
exit 0
fi
echo ""

View File

@ -0,0 +1,106 @@
#!/bin/bash
# Post-migration script to fix timestamps in PocketBase
# Run this INSIDE the PocketBase container after migration completes
#
# Usage: ./fix-timestamps.sh
set -e
DB_PATH="/app/pb_data/data.db"
echo "==========================================================="
echo " Fix Timestamps in PocketBase"
echo "==========================================================="
echo ""
# Check if sqlite3 is available
if ! command -v sqlite3 &> /dev/null; then
echo "sqlite3 not found. Installing..."
apk add sqlite 2>/dev/null || apt-get update && apt-get install -y sqlite3
fi
# Check if database exists
if [ ! -f "$DB_PATH" ]; then
echo "Database not found at $DB_PATH"
echo "Trying alternative paths..."
if [ -f "/pb_data/data.db" ]; then
DB_PATH="/pb_data/data.db"
elif [ -f "/pb/pb_data/data.db" ]; then
DB_PATH="/pb/pb_data/data.db"
else
DB_PATH=$(find / -name "data.db" 2>/dev/null | head -1)
fi
if [ -z "$DB_PATH" ] || [ ! -f "$DB_PATH" ]; then
echo "Could not find PocketBase database!"
exit 1
fi
fi
echo "Database: $DB_PATH"
echo ""
# List tables
echo "Tables in database:"
sqlite3 "$DB_PATH" ".tables"
echo ""
# Find the telemetry table (usually matches collection name)
echo "Looking for telemetry/installations table..."
TABLE_NAME=$(sqlite3 "$DB_PATH" ".tables" | tr ' ' '\n' | grep -E "telemetry|installations" | head -1)
if [ -z "$TABLE_NAME" ]; then
echo "Could not auto-detect table. Available tables:"
sqlite3 "$DB_PATH" ".tables"
echo ""
read -p "Enter table name: " TABLE_NAME
fi
echo "Using table: $TABLE_NAME"
echo ""
# Check if old_created column exists
HAS_OLD_CREATED=$(sqlite3 "$DB_PATH" "PRAGMA table_info($TABLE_NAME);" | grep -c "old_created" || echo "0")
if [ "$HAS_OLD_CREATED" -eq "0" ]; then
echo "Column 'old_created' not found in table $TABLE_NAME"
echo "Migration may not have been run with timestamp preservation."
exit 1
fi
# Show sample data before update
echo "Sample data BEFORE update:"
sqlite3 "$DB_PATH" "SELECT id, created, old_created FROM $TABLE_NAME WHERE old_created IS NOT NULL AND old_created != '' LIMIT 3;"
echo ""
# Count records to update
COUNT=$(sqlite3 "$DB_PATH" "SELECT COUNT(*) FROM $TABLE_NAME WHERE old_created IS NOT NULL AND old_created != '';")
echo "Records to update: $COUNT"
echo ""
read -p "Proceed with timestamp update? [y/N] " -n 1 -r
echo ""
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
echo "Aborted."
exit 0
fi
# Perform the update
echo "Updating timestamps..."
sqlite3 "$DB_PATH" "UPDATE $TABLE_NAME SET created = old_created, updated = old_created WHERE old_created IS NOT NULL AND old_created != '';"
# Show sample data after update
echo ""
echo "Sample data AFTER update:"
sqlite3 "$DB_PATH" "SELECT id, created, old_created FROM $TABLE_NAME LIMIT 3;"
echo ""
echo "==========================================================="
echo " Timestamp Update Complete!"
echo "==========================================================="
echo ""
echo "Next steps:"
echo "1. Verify data in PocketBase Admin UI"
echo "2. Remove the 'old_created' field from the collection schema"
echo ""

View File

@ -0,0 +1,77 @@
#!/bin/sh
# Direct SQLite Import - Pure Shell, FAST batch mode!
# Imports MongoDB Extended JSON directly into PocketBase SQLite
#
# Usage:
# docker cp import-direct.sh pocketbase:/tmp/
# docker cp data.json pocketbase:/tmp/
# docker exec -it pocketbase sh -c "cd /tmp && chmod +x import-direct.sh && ./import-direct.sh"
set -e
JSON_FILE="${1:-/tmp/data.json}"
TABLE="${2:-telemetry}"
REPO="${3:-Proxmox VE}"
DB="${4:-/app/pb_data/data.db}"
BATCH=5000
echo "========================================================="
echo " Direct SQLite Import (Batch Mode)"
echo "========================================================="
echo "JSON: $JSON_FILE"
echo "Table: $TABLE"
echo "Repo: $REPO"
echo "Batch: $BATCH"
echo "---------------------------------------------------------"
# Install jq if missing
command -v jq >/dev/null || apk add --no-cache jq
# Optimize SQLite for bulk
sqlite3 "$DB" "PRAGMA journal_mode=WAL; PRAGMA synchronous=OFF; PRAGMA cache_size=100000;"
SQL_FILE="/tmp/batch.sql"
echo "[INFO] Converting JSON to SQL..."
START=$(date +%s)
# Convert entire JSON to SQL file (much faster than line-by-line sqlite3 calls)
{
echo "BEGIN TRANSACTION;"
jq -r '.[] | @json' "$JSON_FILE" | while read -r r; do
CT=$(echo "$r" | jq -r 'if .ct_type|type=="object" then .ct_type["$numberLong"] else .ct_type end // 0')
DISK=$(echo "$r" | jq -r 'if .disk_size|type=="object" then .disk_size["$numberLong"] else .disk_size end // 0')
CORE=$(echo "$r" | jq -r 'if .core_count|type=="object" then .core_count["$numberLong"] else .core_count end // 0')
RAM=$(echo "$r" | jq -r 'if .ram_size|type=="object" then .ram_size["$numberLong"] else .ram_size end // 0')
OS=$(echo "$r" | jq -r '.os_type // ""' | sed "s/'/''/g")
OSVER=$(echo "$r" | jq -r '.os_version // ""' | sed "s/'/''/g")
DIS6=$(echo "$r" | jq -r '.disable_ip6 // "no"' | sed "s/'/''/g")
APP=$(echo "$r" | jq -r '.nsapp // "unknown"' | sed "s/'/''/g")
METH=$(echo "$r" | jq -r '.method // ""' | sed "s/'/''/g")
PVE=$(echo "$r" | jq -r '.pveversion // ""' | sed "s/'/''/g")
STAT=$(echo "$r" | jq -r '.status // "unknown"')
[ "$STAT" = "done" ] && STAT="success"
RID=$(echo "$r" | jq -r '.random_id // ""' | sed "s/'/''/g")
TYPE=$(echo "$r" | jq -r '.type // "lxc"' | sed "s/'/''/g")
ERR=$(echo "$r" | jq -r '.error // ""' | sed "s/'/''/g")
DATE=$(echo "$r" | jq -r 'if .created_at|type=="object" then .created_at["$date"] else .created_at end // ""')
ID=$(head -c 100 /dev/urandom | tr -dc 'a-z0-9' | head -c 15)
REPO_ESC=$(echo "$REPO" | sed "s/'/''/g")
echo "INSERT OR IGNORE INTO $TABLE (id,created,updated,ct_type,disk_size,core_count,ram_size,os_type,os_version,disableip6,nsapp,method,pve_version,status,random_id,type,error,repo_source) VALUES ('$ID','$DATE','$DATE',$CT,$DISK,$CORE,$RAM,'$OS','$OSVER','$DIS6','$APP','$METH','$PVE','$STAT','$RID','$TYPE','$ERR','$REPO_ESC');"
done
echo "COMMIT;"
} > "$SQL_FILE"
MID=$(date +%s)
echo "[INFO] SQL generated in $((MID - START))s"
echo "[INFO] Importing into SQLite..."
sqlite3 "$DB" < "$SQL_FILE"
END=$(date +%s)
COUNT=$(wc -l < "$SQL_FILE")
rm -f "$SQL_FILE"
echo "========================================================="
echo "Done! ~$((COUNT - 2)) records in $((END - START)) seconds"
echo "========================================================="

View File

@ -0,0 +1,89 @@
#!/bin/bash
# Migration script for Proxmox VE data
# Run directly on the server machine
#
# Usage: ./migrate-linux.sh
#
# Prerequisites:
# - Go installed (apt install golang-go)
# - Network access to source API and PocketBase
set -e
echo "==========================================================="
echo " Proxmox VE Data Migration to PocketBase"
echo "==========================================================="
# Configuration - EDIT THESE VALUES
export MIGRATION_SOURCE_URL="https://api.htl-braunau.at/data"
export POCKETBASE_URL="http://db.community-scripts.org"
export POCKETBASE_COLLECTION="telemetry"
export PB_AUTH_COLLECTION="_superusers"
export PB_IDENTITY="db_admin@community-scripts.org"
export PB_PASSWORD="YOUR_PASSWORD_HERE" # <-- CHANGE THIS!
export REPO_SOURCE="Proxmox VE"
export DATE_UNTIL="2026-02-10"
export BATCH_SIZE="500"
# Optional: Resume from specific page
# export START_PAGE="100"
# Optional: Only import records after this date
# export DATE_FROM="2020-01-01"
echo ""
echo "Configuration:"
echo " Source: $MIGRATION_SOURCE_URL"
echo " Target: $POCKETBASE_URL"
echo " Collection: $POCKETBASE_COLLECTION"
echo " Repo: $REPO_SOURCE"
echo " Until: $DATE_UNTIL"
echo " Batch: $BATCH_SIZE"
echo ""
# Check if Go is installed
if ! command -v go &> /dev/null; then
echo "Go is not installed. Installing..."
apt-get update && apt-get install -y golang-go
fi
# Download migrate.go if not present
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
MIGRATE_GO="$SCRIPT_DIR/migrate.go"
if [ ! -f "$MIGRATE_GO" ]; then
echo "migrate.go not found in $SCRIPT_DIR"
echo "Please copy migrate.go to this directory first."
exit 1
fi
echo "Building migration tool..."
cd "$SCRIPT_DIR"
go build -o migrate migrate.go
echo ""
echo "Starting migration..."
echo "Press Ctrl+C to stop (you can resume later with START_PAGE)"
echo ""
./migrate
echo ""
echo "==========================================================="
echo " Post-Migration Steps"
echo "==========================================================="
echo ""
echo "1. Connect to PocketBase container:"
echo " docker exec -it <pocketbase-container> sh"
echo ""
echo "2. Find the table name:"
echo " sqlite3 /app/pb_data/data.db '.tables'"
echo ""
echo "3. Update timestamps (replace <table> with actual name):"
echo " sqlite3 /app/pb_data/data.db \"UPDATE <table> SET created = old_created, updated = old_created WHERE old_created IS NOT NULL AND old_created != ''\""
echo ""
echo "4. Verify timestamps:"
echo " sqlite3 /app/pb_data/data.db \"SELECT created, old_created FROM <table> LIMIT 5\""
echo ""
echo "5. Remove old_created field in PocketBase Admin UI"
echo ""

File diff suppressed because it is too large Load Diff

View File

@ -13,7 +13,7 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Default values
POCKETBASE_URL="${1:-http://localhost:8090}"
POCKETBASE_COLLECTION="${2:-_dev_telemetry_data}"
POCKETBASE_COLLECTION="${2:-telemetry}"
echo "============================================="
echo " ProxmoxVED Data Migration Tool"

View File

@ -95,13 +95,13 @@ func main() {
pbCollection = os.Getenv("PB_TARGET_COLLECTION")
}
if pbCollection == "" {
pbCollection = "_dev_telemetry_data"
pbCollection = "telemetry"
}
// Auth collection
authCollection := os.Getenv("PB_AUTH_COLLECTION")
if authCollection == "" {
authCollection = "_dev_telemetry_service"
authCollection = "telemetry_service_user"
}
// Credentials - prefer admin auth for timestamp preservation

View File

@ -24,11 +24,10 @@ type Config struct {
// PocketBase
PBBaseURL string
PBAuthCollection string // "_dev_telemetry_service"
PBAuthCollection string // PB auth collection name (from env)
PBIdentity string // email
PBPassword string
PBTargetColl string // "_dev_telemetry_data" (dev default)
PBLiveTargetColl string // "_live_telemetry_data" (production)
PBTargetColl string // PB data collection name (from env)
// Limits
MaxBodyBytes int64
@ -104,10 +103,10 @@ type TelemetryIn struct {
ErrorCategory string `json:"error_category,omitempty"` // "network", "storage", "dependency", "permission", "timeout", "unknown"
// Repository source for collection routing
RepoSource string `json:"repo_source,omitempty"` // "community-scripts/ProxmoxVE" or "community-scripts/ProxmoxVED"
RepoSource string `json:"repo_source,omitempty"` // "ProxmoxVE", "ProxmoxVED", or "external"
}
// TelemetryOut is sent to PocketBase (matches _dev_telemetry_data collection)
// TelemetryOut is sent to PocketBase (matches telemetry collection)
type TelemetryOut struct {
RandomID string `json:"random_id"`
Type string `json:"type"`
@ -133,6 +132,9 @@ type TelemetryOut struct {
RAMSpeed string `json:"ram_speed,omitempty"`
InstallDuration int `json:"install_duration,omitempty"`
ErrorCategory string `json:"error_category,omitempty"`
// Repository source: "ProxmoxVE", "ProxmoxVED", or "external"
RepoSource string `json:"repo_source,omitempty"`
}
// TelemetryStatusUpdate contains only fields needed for status updates
@ -150,10 +152,11 @@ type TelemetryStatusUpdate struct {
RAMSpeed string `json:"ram_speed,omitempty"`
}
// Allowed values for 'repo_source' field — controls collection routing
// Allowed values for 'repo_source' field
var allowedRepoSource = map[string]bool{
"community-scripts/ProxmoxVE": true,
"community-scripts/ProxmoxVED": true,
"ProxmoxVE": true,
"ProxmoxVED": true,
"external": true,
}
type PBClient struct {
@ -161,8 +164,7 @@ type PBClient struct {
authCollection string
identity string
password string
devColl string // "_dev_telemetry_data"
liveColl string // "_live_telemetry_data"
targetColl string // single collection for all telemetry data
mu sync.Mutex
token string
@ -176,25 +178,13 @@ func NewPBClient(cfg Config) *PBClient {
authCollection: cfg.PBAuthCollection,
identity: cfg.PBIdentity,
password: cfg.PBPassword,
devColl: cfg.PBTargetColl,
liveColl: cfg.PBLiveTargetColl,
targetColl: cfg.PBTargetColl,
http: &http.Client{
Timeout: cfg.RequestTimeout,
},
}
}
// resolveCollection maps a repo_source value to the correct PocketBase collection.
// - "community-scripts/ProxmoxVE" → live collection
// - "community-scripts/ProxmoxVED" → dev collection
// - empty / unknown → dev collection (safe default)
func (p *PBClient) resolveCollection(repoSource string) string {
if repoSource == "community-scripts/ProxmoxVE" && p.liveColl != "" {
return p.liveColl
}
return p.devColl
}
func (p *PBClient) ensureAuth(ctx context.Context) error {
p.mu.Lock()
defer p.mu.Unlock()
@ -246,8 +236,8 @@ func (p *PBClient) ensureAuth(ctx context.Context) error {
return nil
}
// FindRecordByRandomID searches for an existing record by random_id in the given collection
func (p *PBClient) FindRecordByRandomID(ctx context.Context, coll, randomID string) (string, error) {
// FindRecordByRandomID searches for an existing record by random_id
func (p *PBClient) FindRecordByRandomID(ctx context.Context, randomID string) (string, error) {
if err := p.ensureAuth(ctx); err != nil {
return "", err
}
@ -256,7 +246,7 @@ func (p *PBClient) FindRecordByRandomID(ctx context.Context, coll, randomID stri
filter := fmt.Sprintf("random_id='%s'", randomID)
req, err := http.NewRequestWithContext(ctx, http.MethodGet,
fmt.Sprintf("%s/api/collections/%s/records?filter=%s&fields=id&perPage=1",
p.baseURL, coll, filter),
p.baseURL, p.targetColl, filter),
nil,
)
if err != nil {
@ -290,14 +280,14 @@ func (p *PBClient) FindRecordByRandomID(ctx context.Context, coll, randomID stri
}
// UpdateTelemetryStatus updates only status, error, and exit_code of an existing record
func (p *PBClient) UpdateTelemetryStatus(ctx context.Context, coll, recordID string, update TelemetryStatusUpdate) error {
func (p *PBClient) UpdateTelemetryStatus(ctx context.Context, recordID string, update TelemetryStatusUpdate) error {
if err := p.ensureAuth(ctx); err != nil {
return err
}
b, _ := json.Marshal(update)
req, err := http.NewRequestWithContext(ctx, http.MethodPatch,
fmt.Sprintf("%s/api/collections/%s/records/%s", p.baseURL, coll, recordID),
fmt.Sprintf("%s/api/collections/%s/records/%s", p.baseURL, p.targetColl, recordID),
bytes.NewReader(b),
)
if err != nil {
@ -319,8 +309,7 @@ func (p *PBClient) UpdateTelemetryStatus(ctx context.Context, coll, recordID str
}
// FetchRecordsPaginated retrieves records with pagination and optional filters.
// Uses devColl by default (dashboard shows dev data); for live data, use separate endpoint if needed.
func (p *PBClient) FetchRecordsPaginated(ctx context.Context, page, limit int, status, app, osType, sortField string) ([]TelemetryRecord, int, error) {
func (p *PBClient) FetchRecordsPaginated(ctx context.Context, page, limit int, status, app, osType, typeFilter, sortField, repoSource string) ([]TelemetryRecord, int, error) {
if err := p.ensureAuth(ctx); err != nil {
return nil, 0, err
}
@ -336,6 +325,12 @@ func (p *PBClient) FetchRecordsPaginated(ctx context.Context, page, limit int, s
if osType != "" {
filters = append(filters, fmt.Sprintf("os_type='%s'", osType))
}
if typeFilter != "" {
filters = append(filters, fmt.Sprintf("type='%s'", typeFilter))
}
if repoSource != "" {
filters = append(filters, fmt.Sprintf("repo_source='%s'", repoSource))
}
filterStr := ""
if len(filters) > 0 {
@ -361,7 +356,7 @@ func (p *PBClient) FetchRecordsPaginated(ctx context.Context, page, limit int, s
}
reqURL := fmt.Sprintf("%s/api/collections/%s/records?sort=%s&page=%d&perPage=%d%s",
p.baseURL, p.devColl, sort, page, limit, filterStr)
p.baseURL, p.targetColl, sort, page, limit, filterStr)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, reqURL, nil)
if err != nil {
@ -391,22 +386,18 @@ func (p *PBClient) FetchRecordsPaginated(ctx context.Context, page, limit int, s
}
// UpsertTelemetry handles both creation and updates intelligently.
// Routes to the correct PocketBase collection based on repoSource:
// - "community-scripts/ProxmoxVE" → _live_telemetry_data
// - "community-scripts/ProxmoxVED" → _dev_telemetry_data
// All records go to the same collection; repo_source is stored as a field.
//
// For status="installing": always creates a new record.
// For status!="installing": updates existing record (found by random_id).
func (p *PBClient) UpsertTelemetry(ctx context.Context, payload TelemetryOut, repoSource string) error {
coll := p.resolveCollection(repoSource)
func (p *PBClient) UpsertTelemetry(ctx context.Context, payload TelemetryOut) error {
// For "installing" status, always create new record
if payload.Status == "installing" {
return p.CreateTelemetry(ctx, coll, payload)
return p.CreateTelemetry(ctx, payload)
}
// For status updates (success/failed/unknown), find and update existing record
recordID, err := p.FindRecordByRandomID(ctx, coll, payload.RandomID)
recordID, err := p.FindRecordByRandomID(ctx, payload.RandomID)
if err != nil {
// Search failed, log and return error
return fmt.Errorf("cannot find record to update: %w", err)
@ -415,7 +406,7 @@ func (p *PBClient) UpsertTelemetry(ctx context.Context, payload TelemetryOut, re
if recordID == "" {
// Record not found - this shouldn't happen normally
// Create a full record as fallback
return p.CreateTelemetry(ctx, coll, payload)
return p.CreateTelemetry(ctx, payload)
}
// Update only status, error, exit_code, and new metrics fields
@ -432,17 +423,17 @@ func (p *PBClient) UpsertTelemetry(ctx context.Context, payload TelemetryOut, re
CPUModel: payload.CPUModel,
RAMSpeed: payload.RAMSpeed,
}
return p.UpdateTelemetryStatus(ctx, coll, recordID, update)
return p.UpdateTelemetryStatus(ctx, recordID, update)
}
func (p *PBClient) CreateTelemetry(ctx context.Context, coll string, payload TelemetryOut) error {
func (p *PBClient) CreateTelemetry(ctx context.Context, payload TelemetryOut) error {
if err := p.ensureAuth(ctx); err != nil {
return err
}
b, _ := json.Marshal(payload)
req, err := http.NewRequestWithContext(ctx, http.MethodPost,
fmt.Sprintf("%s/api/collections/%s/records", p.baseURL, coll),
fmt.Sprintf("%s/api/collections/%s/records", p.baseURL, p.targetColl),
bytes.NewReader(b),
)
if err != nil {
@ -730,9 +721,9 @@ func validate(in *TelemetryIn) error {
return errors.New("invalid install_duration (max 24h)")
}
// Validate repo_source: must be an allowed repository or empty
// Validate repo_source: must be a known value or empty
if in.RepoSource != "" && !allowedRepoSource[in.RepoSource] {
return errors.New("invalid repo_source (must be 'community-scripts/ProxmoxVE' or 'community-scripts/ProxmoxVED')")
return fmt.Errorf("rejected repo_source '%s' (must be 'ProxmoxVE', 'ProxmoxVED', or 'external')", in.RepoSource)
}
return nil
@ -755,11 +746,10 @@ func main() {
TrustedProxiesCIDR: splitCSV(env("TRUSTED_PROXIES_CIDR", "")),
PBBaseURL: mustEnv("PB_URL"),
PBAuthCollection: env("PB_AUTH_COLLECTION", "_dev_telemetry_service"),
PBAuthCollection: mustEnv("PB_AUTH_COLLECTION"),
PBIdentity: mustEnv("PB_IDENTITY"),
PBPassword: mustEnv("PB_PASSWORD"),
PBTargetColl: env("PB_TARGET_COLLECTION", "_dev_telemetry_data"),
PBLiveTargetColl: env("PB_LIVE_TARGET_COLLECTION", "_live_telemetry_data"),
PBTargetColl: mustEnv("PB_TARGET_COLLECTION"),
MaxBodyBytes: envInt64("MAX_BODY_BYTES", 1024),
RateLimitRPM: envInt("RATE_LIMIT_RPM", 60),
@ -772,7 +762,7 @@ func main() {
// Cache config
RedisURL: env("REDIS_URL", ""),
EnableRedis: envBool("ENABLE_REDIS", false),
CacheTTL: time.Duration(envInt("CACHE_TTL_SECONDS", 60)) * time.Second,
CacheTTL: time.Duration(envInt("CACHE_TTL_SECONDS", 300)) * time.Second,
CacheEnabled: envBool("ENABLE_CACHE", true),
// Alert config
@ -870,7 +860,7 @@ func main() {
ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second)
defer cancel()
data, err := pb.FetchDashboardData(ctx, 1) // Last 24h only for metrics
data, err := pb.FetchDashboardData(ctx, 1, "ProxmoxVE") // Last 24h, production only for metrics
if err != nil {
http.Error(w, "failed to fetch metrics", http.StatusInternalServerError)
return
@ -896,22 +886,31 @@ func main() {
// Dashboard API endpoint (with caching)
mux.HandleFunc("/api/dashboard", func(w http.ResponseWriter, r *http.Request) {
days := 30
days := 7 // Default: 7 days
if d := r.URL.Query().Get("days"); d != "" {
fmt.Sscanf(d, "%d", &days)
if days < 1 {
days = 1
}
if days > 365 {
days = 365
// days=0 means "all entries", negative values are invalid
if days < 0 {
days = 7
}
}
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
// repo_source filter (default: ProxmoxVE)
repoSource := r.URL.Query().Get("repo")
if repoSource == "" {
repoSource = "ProxmoxVE"
}
// "all" means no filter
if repoSource == "all" {
repoSource = ""
}
// Increase timeout for large datasets (dashboard aggregation takes time)
ctx, cancel := context.WithTimeout(r.Context(), 120*time.Second)
defer cancel()
// Try cache first
cacheKey := fmt.Sprintf("dashboard:%d", days)
cacheKey := fmt.Sprintf("dashboard:%d:%s", days, repoSource)
var data *DashboardData
if cfg.CacheEnabled && cache.Get(ctx, cacheKey, &data) {
w.Header().Set("Content-Type", "application/json")
@ -920,7 +919,7 @@ func main() {
return
}
data, err := pb.FetchDashboardData(ctx, days)
data, err := pb.FetchDashboardData(ctx, days, repoSource)
if err != nil {
log.Printf("dashboard fetch failed: %v", err)
http.Error(w, "failed to fetch data", http.StatusInternalServerError)
@ -944,7 +943,12 @@ func main() {
status := r.URL.Query().Get("status")
app := r.URL.Query().Get("app")
osType := r.URL.Query().Get("os")
typeFilter := r.URL.Query().Get("type")
sort := r.URL.Query().Get("sort")
repoSource := r.URL.Query().Get("repo")
if repoSource == "" {
repoSource = "ProxmoxVE" // Default filter: production data
}
if p := r.URL.Query().Get("page"); p != "" {
fmt.Sscanf(p, "%d", &page)
@ -965,7 +969,7 @@ func main() {
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
records, total, err := pb.FetchRecordsPaginated(ctx, page, limit, status, app, osType, sort)
records, total, err := pb.FetchRecordsPaginated(ctx, page, limit, status, app, osType, typeFilter, sort, repoSource)
if err != nil {
log.Printf("records fetch failed: %v", err)
http.Error(w, "failed to fetch records", http.StatusInternalServerError)
@ -1052,6 +1056,9 @@ func main() {
return
}
if err := validate(&in); err != nil {
if cfg.EnableReqLogging {
log.Printf("telemetry rejected: %v", err)
}
http.Error(w, "invalid payload", http.StatusBadRequest)
return
}
@ -1080,6 +1087,7 @@ func main() {
RAMSpeed: in.RAMSpeed,
InstallDuration: in.InstallDuration,
ErrorCategory: in.ErrorCategory,
RepoSource: in.RepoSource,
}
_ = computeHash(out) // For future deduplication
@ -1087,8 +1095,8 @@ func main() {
defer cancel()
// Upsert: Creates new record if random_id doesn't exist, updates if it does
// Routes to correct collection based on repo_source
if err := pb.UpsertTelemetry(ctx, out, in.RepoSource); err != nil {
// repo_source is stored as a field on the record for filtering
if err := pb.UpsertTelemetry(ctx, out); err != nil {
// GDPR: don't log raw payload, don't log IPs; log only generic error
log.Printf("pocketbase write failed: %v", err)
http.Error(w, "upstream error", http.StatusBadGateway)
@ -1096,7 +1104,7 @@ func main() {
}
if cfg.EnableReqLogging {
log.Printf("telemetry accepted nsapp=%s status=%s", out.NSAPP, out.Status)
log.Printf("telemetry accepted nsapp=%s status=%s repo=%s", out.NSAPP, out.Status, in.RepoSource)
}
w.WriteHeader(http.StatusAccepted)
@ -1109,6 +1117,22 @@ func main() {
ReadHeaderTimeout: 3 * time.Second,
}
// Background cache warmup job - pre-populates cache for common dashboard queries
if cfg.CacheEnabled {
go func() {
// Initial warmup after startup
time.Sleep(10 * time.Second)
warmupDashboardCache(pb, cache, cfg)
// Periodic refresh (every 4 minutes, before 5-minute TTL expires)
ticker := time.NewTicker(4 * time.Minute)
for range ticker.C {
warmupDashboardCache(pb, cache, cfg)
}
}()
log.Println("background cache warmup enabled")
}
log.Printf("telemetry-ingest listening on %s", cfg.ListenAddr)
log.Fatal(srv.ListenAndServe())
}
@ -1194,4 +1218,44 @@ func splitCSV(s string) []string {
}
}
return out
}
// warmupDashboardCache pre-populates the cache with common dashboard queries
func warmupDashboardCache(pb *PBClient, cache *Cache, cfg Config) {
log.Println("[CACHE] Starting dashboard cache warmup...")
// Common day ranges and repos to pre-cache
dayRanges := []int{7, 30, 90}
repos := []string{"ProxmoxVE", ""} // ProxmoxVE and "all"
warmed := 0
for _, days := range dayRanges {
for _, repo := range repos {
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
cacheKey := fmt.Sprintf("dashboard:%d:%s", days, repo)
// Check if already cached
var existing *DashboardData
if cache.Get(ctx, cacheKey, &existing) {
cancel()
continue // Already cached, skip
}
// Fetch and cache
data, err := pb.FetchDashboardData(ctx, days, repo)
cancel()
if err != nil {
log.Printf("[CACHE] Warmup failed for days=%d repo=%s: %v", days, repo, err)
continue
}
_ = cache.Set(context.Background(), cacheKey, data, cfg.CacheTTL)
warmed++
log.Printf("[CACHE] Warmed cache for days=%d repo=%s (%d installs)", days, repo, data.TotalAllTime)
}
}
log.Printf("[CACHE] Dashboard cache warmup complete (%d entries)", warmed)
}

View File

@ -1778,7 +1778,7 @@ function fetch_and_deploy_gh_release() {
local app="$1"
local repo="$2"
local mode="${3:-tarball}" # tarball | binary | prebuild | singlefile | tag
local version="${APPLICATION_VERSION:-${4:-latest}}"
local version="${var_appversion:-${4:-latest}}"
local target="${5:-/opt/$app}"
local asset_pattern="${6:-}"
@ -2246,7 +2246,7 @@ function fetch_and_deploy_codeberg_release() {
local app="$1"
local repo="$2"
local mode="${3:-tarball}" # tarball | binary | prebuild | singlefile | tag
local version="${APPLICATION_VERSION:-${4:-latest}}"
local version="${var_appversion:-${4:-latest}}"
local target="${5:-/opt/$app}"
local asset_pattern="${6:-}"

588
vm/cachyos-vm.sh Normal file
View File

@ -0,0 +1,588 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# ==============================================================================
# CachyOS VM - Creates a CachyOS Virtual Machine
# CachyOS is a performance-focused Arch Linux distribution with optimized
# packages, custom kernels, and various desktop environment options.
# ==============================================================================
source /dev/stdin <<<$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/api.func)
function header_info {
clear
cat <<"EOF"
______ __ ____ _____
/ ____/___ ______/ /_ __ __/ __ \/ ___/
/ / / __ `/ ___/ __ \/ / / / / / /\__ \
/ /___/ /_/ / /__/ / / / /_/ / /_/ /___/ /
\____/\__,_/\___/_/ /_/\__, /\____//____/
/____/
Performance-Optimized Arch Linux
EOF
}
header_info
echo -e "\n Loading..."
RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)"
METHOD=""
NSAPP="cachyos-vm"
var_os="cachyos"
var_version=" "
GEN_MAC=02:$(openssl rand -hex 5 | awk '{print toupper($0)}' | sed 's/\(..\)/\1:/g; s/.$//')
YW=$(echo "\033[33m")
BL=$(echo "\033[36m")
RD=$(echo "\033[01;31m")
BGN=$(echo "\033[4;92m")
GN=$(echo "\033[1;92m")
DGN=$(echo "\033[32m")
CL=$(echo "\033[m")
BOLD=$(echo "\033[1m")
BFR="\\r\\033[K"
HOLD=" "
TAB=" "
CM="${TAB}✔️${TAB}${CL}"
CROSS="${TAB}✖️${TAB}${CL}"
INFO="${TAB}💡${TAB}${CL}"
OS="${TAB}🖥️${TAB}${CL}"
CONTAINERTYPE="${TAB}📦${TAB}${CL}"
DISKSIZE="${TAB}💾${TAB}${CL}"
CPUCORE="${TAB}🧠${TAB}${CL}"
RAMSIZE="${TAB}🛠️${TAB}${CL}"
CONTAINERID="${TAB}🆔${TAB}${CL}"
HOSTNAME="${TAB}🏠${TAB}${CL}"
BRIDGE="${TAB}🌉${TAB}${CL}"
GATEWAY="${TAB}🌐${TAB}${CL}"
DEFAULT="${TAB}⚙️${TAB}${CL}"
MACADDRESS="${TAB}🔗${TAB}${CL}"
VLANTAG="${TAB}🏷️${TAB}${CL}"
CREATING="${TAB}🚀${TAB}${CL}"
ADVANCED="${TAB}🧩${TAB}${CL}"
THIN="discard=on,ssd=1,"
set -e
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
trap cleanup EXIT
trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
function error_handler() {
local exit_code="$?"
local line_number="$1"
local command="$2"
post_update_to_api "failed" "${command}"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
echo -e "\n$error_message\n"
cleanup_vmid
}
function get_valid_nextid() {
local try_id
try_id=$(pvesh get /cluster/nextid)
while true; do
if [ -f "/etc/pve/qemu-server/${try_id}.conf" ] || [ -f "/etc/pve/lxc/${try_id}.conf" ]; then
try_id=$((try_id + 1))
continue
fi
if lvs --noheadings -o lv_name | grep -qE "(^|[-_])${try_id}($|[-_])"; then
try_id=$((try_id + 1))
continue
fi
break
done
echo "$try_id"
}
function cleanup_vmid() {
if qm status $VMID &>/dev/null; then
qm stop $VMID &>/dev/null
qm destroy $VMID &>/dev/null
fi
}
function cleanup() {
popd >/dev/null
rm -rf $TEMP_DIR
}
TEMP_DIR=$(mktemp -d)
pushd $TEMP_DIR >/dev/null
if whiptail --backtitle "Proxmox VE Helper Scripts" --title "CachyOS VM" --yesno "This will create a New CachyOS VM.\n\nCachyOS is a performance-optimized Arch Linux distribution with:\n• Custom kernels tuned for performance\n• Optimized packages with LTO/PGO\n• Multiple desktop environments (KDE, GNOME, XFCE, etc.)\n• BORE/EEVDF/sched-ext CPU schedulers\n\nYou will need to complete the installation via the graphical Calamares installer.\n\nProceed?" 18 68; then
:
else
header_info && echo -e "${CROSS}${RD}User exited script${CL}\n" && exit
fi
function msg_info() {
local msg="$1"
echo -ne "${TAB}${YW}${HOLD}${msg}${HOLD}"
}
function msg_ok() {
local msg="$1"
echo -e "${BFR}${CM}${GN}${msg}${CL}"
}
function msg_error() {
local msg="$1"
echo -e "${BFR}${CROSS}${RD}${msg}${CL}"
}
function check_root() {
if [[ "$(id -u)" -ne 0 || $(ps -o comm= -p $PPID) == "sudo" ]]; then
clear
msg_error "Please run this script as root."
echo -e "\nExiting..."
sleep 2
exit
fi
}
function pve_check() {
if ! pveversion | grep -Eq "pve-manager/(8\.[1-4]|9\.[0-1])(\.[0-9]+)*"; then
msg_error "${CROSS}${RD}This version of Proxmox Virtual Environment is not supported"
echo -e "Requires Proxmox Virtual Environment Version 8.1 - 8.4 or 9.0 - 9.1."
echo -e "Exiting..."
sleep 2
exit
fi
}
function arch_check() {
if [ "$(dpkg --print-architecture)" != "amd64" ]; then
echo -e "\n ${INFO}${YW}This script will not work with PiMox! \n"
echo -e "\n ${YW}Visit https://github.com/asylumexp/Proxmox for ARM64 support. \n"
echo -e "Exiting..."
sleep 2
exit
fi
}
function ssh_check() {
if command -v pveversion >/dev/null 2>&1; then
if [ -n "${SSH_CLIENT:+x}" ]; then
if whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "SSH DETECTED" --yesno "It's suggested to use the Proxmox shell instead of SSH, since SSH can create issues while gathering variables. Would you like to proceed with using SSH?" 10 62; then
echo "you've been warned"
else
clear
exit
fi
fi
fi
}
function exit-script() {
clear
echo -e "\n${CROSS}${RD}User exited script${CL}\n"
exit
}
# ==============================================================================
# DEFAULT SETTINGS - Optimized for desktop usage with GUI
# ==============================================================================
function default_settings() {
VMID=$(get_valid_nextid)
FORMAT=""
MACHINE=" -machine q35"
DISK_SIZE="40G"
DISK_CACHE=""
HN="cachyos"
CPU_TYPE=" -cpu host"
CORE_COUNT="4"
RAM_SIZE="8192"
BRG="vmbr0"
MAC="$GEN_MAC"
VLAN=""
MTU=""
START_VM="yes"
METHOD="default"
echo -e "${CONTAINERID}${BOLD}${DGN}Virtual Machine ID: ${BGN}${VMID}${CL}"
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}Q35 (Modern)${CL}"
echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE}${CL}"
echo -e "${DISKSIZE}${BOLD}${DGN}Disk Cache: ${BGN}None${CL}"
echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}${HN}${CL}"
echo -e "${OS}${BOLD}${DGN}CPU Model: ${BGN}Host (Recommended for CachyOS optimizations)${CL}"
echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}${CORE_COUNT}${CL}"
echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}${BRG}${CL}"
echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}${MAC}${CL}"
echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}Default${CL}"
echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}Default${CL}"
echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}"
echo -e "${CREATING}${BOLD}${DGN}Creating a CachyOS VM using the above default settings${CL}"
}
# ==============================================================================
# ADVANCED SETTINGS
# ==============================================================================
function advanced_settings() {
METHOD="advanced"
[ -z "${VMID:-}" ] && VMID=$(get_valid_nextid)
# VM ID
while true; do
if VMID=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Virtual Machine ID" 8 58 $VMID --title "VIRTUAL MACHINE ID" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
if [ -z "$VMID" ]; then
VMID=$(get_valid_nextid)
fi
if pct status "$VMID" &>/dev/null || qm status "$VMID" &>/dev/null; then
echo -e "${CROSS}${RD} ID $VMID is already in use${CL}"
sleep 2
continue
fi
echo -e "${CONTAINERID}${BOLD}${DGN}Virtual Machine ID: ${BGN}$VMID${CL}"
break
else
exit-script
fi
done
# Machine Type
if MACH=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "MACHINE TYPE" --radiolist --cancel-button Exit-Script "Choose Type" 10 58 2 \
"q35" "Q35 (Modern, PCIe - Recommended)" ON \
"i440fx" "i440fx (Legacy, PCI)" OFF \
3>&1 1>&2 2>&3); then
if [ $MACH = q35 ]; then
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}Q35 (Modern)${CL}"
FORMAT=""
MACHINE=" -machine q35"
else
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}i440fx (Legacy)${CL}"
FORMAT=",efitype=4m"
MACHINE=""
fi
else
exit-script
fi
# Disk Size
if DISK_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Disk Size in GiB (Recommended: 40+ for desktop)" 8 58 "40" --title "DISK SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
DISK_SIZE=$(echo "$DISK_SIZE" | tr -d ' ')
if [[ "$DISK_SIZE" =~ ^[0-9]+$ ]]; then
DISK_SIZE="${DISK_SIZE}G"
echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}$DISK_SIZE${CL}"
elif [[ "$DISK_SIZE" =~ ^[0-9]+G$ ]]; then
echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}$DISK_SIZE${CL}"
else
echo -e "${DISKSIZE}${BOLD}${RD}Invalid Disk Size. Please use a number (e.g., 40 or 40G).${CL}"
exit-script
fi
else
exit-script
fi
# Disk Cache
if DISK_CACHE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "DISK CACHE" --radiolist "Choose" --cancel-button Exit-Script 10 58 2 \
"0" "None (Default)" ON \
"1" "Write Through" OFF \
3>&1 1>&2 2>&3); then
if [ $DISK_CACHE = "1" ]; then
echo -e "${DISKSIZE}${BOLD}${DGN}Disk Cache: ${BGN}Write Through${CL}"
DISK_CACHE="cache=writethrough,"
else
echo -e "${DISKSIZE}${BOLD}${DGN}Disk Cache: ${BGN}None${CL}"
DISK_CACHE=""
fi
else
exit-script
fi
# Hostname
if VM_NAME=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Hostname" 8 58 cachyos --title "HOSTNAME" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
if [ -z $VM_NAME ]; then
HN="cachyos"
else
HN=$(echo ${VM_NAME,,} | tr -d ' ')
fi
echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}"
else
exit-script
fi
# CPU Model
if CPU_TYPE1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "CPU MODEL" --radiolist "Choose (Host recommended for CachyOS optimizations)" --cancel-button Exit-Script 10 68 2 \
"1" "Host (Recommended - enables x86-64-v3/v4)" ON \
"0" "KVM64 (Compatible)" OFF \
3>&1 1>&2 2>&3); then
if [ $CPU_TYPE1 = "1" ]; then
echo -e "${OS}${BOLD}${DGN}CPU Model: ${BGN}Host${CL}"
CPU_TYPE=" -cpu host"
else
echo -e "${OS}${BOLD}${DGN}CPU Model: ${BGN}KVM64${CL}"
CPU_TYPE=""
fi
else
exit-script
fi
# CPU Cores
if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores (Recommended: 4+ for desktop)" 8 58 4 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
if [ -z $CORE_COUNT ]; then
CORE_COUNT="4"
fi
echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}"
else
exit-script
fi
# RAM Size
if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB (Recommended: 8192+ for desktop)" 8 58 8192 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
if [ -z $RAM_SIZE ]; then
RAM_SIZE="8192"
fi
echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE MiB${CL}"
else
exit-script
fi
# Bridge
if BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Bridge" 8 58 vmbr0 --title "BRIDGE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
if [ -z $BRG ]; then
BRG="vmbr0"
fi
echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}"
else
exit-script
fi
# MAC Address
if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
if [ -z $MAC1 ]; then
MAC="$GEN_MAC"
else
MAC="$MAC1"
fi
echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}"
else
exit-script
fi
# VLAN
if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan (leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
if [ -z $VLAN1 ]; then
VLAN1="Default"
VLAN=""
else
VLAN=",tag=$VLAN1"
fi
echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}"
else
exit-script
fi
# MTU
if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
if [ -z $MTU1 ]; then
MTU1="Default"
MTU=""
else
MTU=",mtu=$MTU1"
fi
echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}"
else
exit-script
fi
# Start VM
if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "START VIRTUAL MACHINE" --yesno "Start VM when completed?" 10 58); then
echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}"
START_VM="yes"
else
echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}no${CL}"
START_VM="no"
fi
# Confirm
if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "ADVANCED SETTINGS COMPLETE" --yesno "Ready to create a CachyOS VM?" --no-button Do-Over 10 58); then
echo -e "${CREATING}${BOLD}${DGN}Creating a CachyOS VM using the above advanced settings${CL}"
else
header_info
echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Settings${CL}"
advanced_settings
fi
}
function start_script() {
if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "SETTINGS" --yesno "Use Default Settings?\n\nDefaults are optimized for desktop usage:\n• 4 CPU Cores (Host model)\n• 8 GB RAM\n• 40 GB Disk\n• Q35 Machine Type" --no-button Advanced 14 58); then
header_info
echo -e "${DEFAULT}${BOLD}${BL}Using Default Settings${CL}"
default_settings
else
header_info
echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Settings${CL}"
advanced_settings
fi
}
# ==============================================================================
# MAIN EXECUTION
# ==============================================================================
check_root
arch_check
pve_check
ssh_check
start_script
post_to_api_vm
# ==============================================================================
# STORAGE SELECTION
# ==============================================================================
msg_info "Validating Storage"
while read -r line; do
TAG=$(echo $line | awk '{print $1}')
TYPE=$(echo $line | awk '{printf "%-10s", $2}')
FREE=$(echo $line | numfmt --field 4-6 --from-unit=K --to=iec --format %.2f | awk '{printf( "%9sB", $6)}')
ITEM=" Type: $TYPE Free: $FREE "
OFFSET=2
if [[ $((${#ITEM} + $OFFSET)) -gt ${MSG_MAX_LENGTH:-} ]]; then
MSG_MAX_LENGTH=$((${#ITEM} + $OFFSET))
fi
STORAGE_MENU+=("$TAG" "$ITEM" "OFF")
done < <(pvesm status -content images | awk 'NR>1')
VALID=$(pvesm status -content images | awk 'NR>1')
if [ -z "$VALID" ]; then
msg_error "Unable to detect a valid storage location."
exit
elif [ $((${#STORAGE_MENU[@]} / 3)) -eq 1 ]; then
STORAGE=${STORAGE_MENU[0]}
else
while [ -z "${STORAGE:+x}" ]; do
STORAGE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "Storage Pools" --radiolist \
"Which storage pool would you like to use for ${HN}?\nTo make a selection, use the Spacebar.\n" \
16 $(($MSG_MAX_LENGTH + 23)) 6 \
"${STORAGE_MENU[@]}" 3>&1 1>&2 2>&3)
done
fi
msg_ok "Using ${CL}${BL}$STORAGE${CL} ${GN}for Storage Location."
msg_ok "Virtual Machine ID is ${CL}${BL}$VMID${CL}."
# ==============================================================================
# ISO DOWNLOAD
# ==============================================================================
msg_info "Retrieving the URL for the CachyOS Desktop ISO"
# Get latest release version from SourceForge (format: YYMMDD in folder links)
CACHYOS_VERSION=$(curl -fsSL "https://sourceforge.net/projects/cachyos-arch/files/gui-installer/desktop/" 2>/dev/null | grep -oP 'desktop/\K[0-9]{6}(?=/)' | sort -rn | head -1)
if [ -z "$CACHYOS_VERSION" ]; then
CACHYOS_VERSION="260124"
fi
# SourceForge download URL with mirror redirect
URL="https://sourceforge.net/projects/cachyos-arch/files/gui-installer/desktop/${CACHYOS_VERSION}/cachyos-desktop-linux-${CACHYOS_VERSION}.iso/download"
FILENAME="cachyos-desktop-linux-${CACHYOS_VERSION}.iso"
CACHE_DIR="/var/lib/vz/template/iso"
CACHE_FILE="${CACHE_DIR}/${FILENAME}"
mkdir -p "$CACHE_DIR"
msg_ok "${CL}${BL}CachyOS Desktop ISO (Release: ${CACHYOS_VERSION})${CL}"
if [[ -s "$CACHE_FILE" ]]; then
msg_ok "Using cached ISO ${CL}${BL}${FILENAME}${CL}"
else
msg_info "Downloading CachyOS ISO (approximately 3.1 GB, this may take a while)"
if curl -fSL -o "$CACHE_FILE" -L "$URL"; then
echo -en "\e[1A\e[0K"
msg_ok "Downloaded ${CL}${BL}${FILENAME}${CL}"
else
msg_error "Failed to download CachyOS ISO"
exit 1
fi
fi
# ==============================================================================
# VM CREATION
# ==============================================================================
msg_info "Creating a CachyOS VM"
qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
-name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 0 -ostype l26 -scsihw virtio-scsi-pci \
-efidisk0 ${STORAGE}:1,efitype=4m,pre-enrolled-keys=0 -scsi0 ${STORAGE}:${DISK_SIZE},${DISK_CACHE}${THIN%,} \
-cdrom local:iso/${FILENAME} -vga qxl -serial0 socket >/dev/null
msg_ok "Created a CachyOS VM ${CL}${BL}(${HN})"
# ==============================================================================
# VM DESCRIPTION
# ==============================================================================
DESCRIPTION=$(
cat <<EOF
<div align='center'>
<a href='https://Helper-Scripts.com' target='_blank' rel='noopener noreferrer'>
<img src='https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/images/logo-81x112.png' alt='Logo' style='width:81px;height:112px;'/>
</a>
<h2 style='font-size: 24px; margin: 20px 0;'>CachyOS VM</h2>
<p style='font-size: 14px; margin: 10px 0;'>
Performance-optimized Arch Linux with custom kernels,<br/>
BORE/EEVDF schedulers, and x86-64-v3/v4 optimizations.
</p>
<p style='margin: 16px 0;'>
<a href='https://ko-fi.com/community_scripts' target='_blank' rel='noopener noreferrer'>
<img src='https://img.shields.io/badge/&#x2615;-Buy us a coffee-blue' alt='Buy us a coffee' />
</a>
</p>
<span style='margin: 0 10px;'>
<i class="fa fa-globe fa-fw" style="color: #f5f5f5;"></i>
<a href='https://cachyos.org/' target='_blank' rel='noopener noreferrer' style='text-decoration: none; color: #00617f;'>CachyOS Website</a>
</span>
<span style='margin: 0 10px;'>
<i class="fa fa-book fa-fw" style="color: #f5f5f5;"></i>
<a href='https://wiki.cachyos.org/' target='_blank' rel='noopener noreferrer' style='text-decoration: none; color: #00617f;'>Wiki</a>
</span>
<span style='margin: 0 10px;'>
<i class="fa fa-github fa-fw" style="color: #f5f5f5;"></i>
<a href='https://github.com/CachyOS' target='_blank' rel='noopener noreferrer' style='text-decoration: none; color: #00617f;'>GitHub</a>
</span>
</div>
EOF
)
qm set "$VMID" -description "$DESCRIPTION" >/dev/null
msg_ok "Created a CachyOS VM ${CL}${BL}(${HN})"
# ==============================================================================
# START VM
# ==============================================================================
if [ "$START_VM" == "yes" ]; then
msg_info "Starting CachyOS VM"
qm start $VMID
msg_ok "Started CachyOS VM"
fi
post_update_to_api "done" "none"
# ==============================================================================
# FINAL OUTPUT
# ==============================================================================
echo -e "\n${INFO}${BOLD}${GN}CachyOS VM Configuration Summary:${CL}"
echo -e "${TAB}${DGN}VM ID: ${BGN}${VMID}${CL}"
echo -e "${TAB}${DGN}Hostname: ${BGN}${HN}${CL}"
echo -e "${TAB}${DGN}Disk Size: ${BGN}${DISK_SIZE}${CL}"
echo -e "${TAB}${DGN}RAM: ${BGN}${RAM_SIZE} MiB${CL}"
echo -e "${TAB}${DGN}CPU Cores: ${BGN}${CORE_COUNT}${CL}"
echo -e "\n${INFO}${BOLD}${YW}Next Steps:${CL}"
echo -e "${TAB}1. Open the VM Console in Proxmox (noVNC or SPICE)"
echo -e "${TAB}2. Boot from the CachyOS ISO"
echo -e "${TAB}3. Use the Calamares installer to complete installation"
echo -e "${TAB}4. Choose your preferred desktop environment during setup:"
echo -e "${TAB} ${BL}KDE Plasma, GNOME, XFCE, Hyprland, i3, and more${CL}"
echo -e "${TAB}5. After installation, remove the ISO from VM settings"
echo -e "${TAB}6. Change boot order to boot from disk (scsi0)"
echo -e "\n${INFO}${BOLD}${GN}CachyOS Features:${CL}"
echo -e "${TAB}• Custom linux-cachyos kernel with BORE scheduler"
echo -e "${TAB}• x86-64-v3/v4 optimized packages (auto-detected)"
echo -e "${TAB}• LTO/PGO optimized applications"
echo -e "${TAB}• Multiple filesystem options: btrfs, ext4, xfs, f2fs, zfs"
msg_ok "Completed successfully!\n"

8
vm/headers/cachyos-vm Normal file
View File

@ -0,0 +1,8 @@
______ __ ____ _____
/ ____/___ ______/ /_ __ __/ __ \/ ___/ _ ______ ___
/ / / __ `/ ___/ __ \/ / / / / / /\__ \______| | / / __ `__ \
/ /___/ /_/ / /__/ / / / /_/ / /_/ /___/ /_____/| |/ / / / / / /
\____/\__,_/\___/_/ /_/\__, /\____//____/ |___/_/ /_/ /_/
/____/
Performance-Optimized Arch Linux Distribution

View File

@ -182,7 +182,7 @@ function check_root() {
fi
}
pve_check() {
function pve_check() {
local PVE_VER
PVE_VER="$(pveversion | awk -F'/' '{print $2}' | awk -F'-' '{print $1}')"
@ -273,6 +273,12 @@ function default_settings() {
}
function advanced_settings() {
DISK_SIZE="16"
HN="truenas"
CORE_COUNT="2"
RAM_SIZE="8192"
BRG="vmbr0"
METHOD="advanced"
[ -z "${VMID:-}" ] && VMID=$(get_valid_nextid)
while true; do
@ -293,16 +299,39 @@ function advanced_settings() {
done
ISOARRAY=()
while read -r ISOPATH; do
FILENAME=$(basename "$ISOPATH")
ISOARRAY+=("$ISOPATH" "$FILENAME" "OFF")
done < <(truenas_iso_lookup | sort -V)
if [ ${#ISOARRAY[@]} -eq 0 ]; then
mapfile -t ALL_ISOS < <(truenas_iso_lookup | sort -V)
ISO_COUNT=${#ALL_ISOS[@]}
if [ $ISO_COUNT -eq 0 ]; then
echo "No ISOs found."
exit 1
fi
if SELECTED_ISO=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "SELECT ISO TO INSTALL" --notags --radiolist "\nSelect version (BETA/RC + Latest stables):" 20 58 12 "${ISOARRAY[@]}" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
# Identify the index of the last stable release
LAST_STABLE_INDEX=-1
for i in "${!ALL_ISOS[@]}"; do
if [[ ! "${ALL_ISOS[$i]}" =~ (BETA|RC) ]]; then
LAST_STABLE_INDEX=$i
fi
done
# Build the whiptail array
for i in "${!ALL_ISOS[@]}"; do
ISOPATH="${ALL_ISOS[$i]}"
FILENAME=$(basename "$ISOPATH")
# Select ON if it's the last stable found, OR fallback to last item if no stable exists
if [[ "$i" -eq "$LAST_STABLE_INDEX" ]]; then
ISOARRAY+=("$ISOPATH" "$FILENAME" "ON")
elif [[ "$LAST_STABLE_INDEX" -eq -1 && "$i" -eq "$((ISO_COUNT - 1))" ]]; then
# Fallback: if somehow no stable is found, select the very last item
ISOARRAY+=("$ISOPATH" "$FILENAME" "ON")
else
ISOARRAY+=("$ISOPATH" "$FILENAME" "OFF")
fi
done
if SELECTED_ISO=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "SELECT ISO TO INSTALL" --notags --radiolist "\nSelect version (BETA/RC/Latest stable):" 20 58 12 "${ISOARRAY[@]}" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
echo -e "${ISO}${BOLD}${DGN}ISO Chosen: ${BGN}$(basename "$SELECTED_ISO")${CL}"
else
exit-script
@ -320,9 +349,8 @@ function advanced_settings() {
exit-script
fi
if VM_NAME=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Hostname" 8 58 truenas --title "HOSTNAME" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
if VM_NAME=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Hostname" 8 58 "$HN" --title "HOSTNAME" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
if [ -z $VM_NAME ]; then
HN="truenas"
echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}"
else
HN=$(echo ${VM_NAME,,} | tr -d ' ')
@ -350,7 +378,7 @@ function advanced_settings() {
exit-script
fi
if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 "$CORE_COUNT" --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
if [ -z $CORE_COUNT ]; then
CORE_COUNT="2"
echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}"
@ -361,7 +389,7 @@ function advanced_settings() {
exit-script
fi
if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 8192 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 "$RAM_SIZE" --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
if [ -z $RAM_SIZE ]; then
RAM_SIZE="8192"
echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}"
@ -372,7 +400,7 @@ function advanced_settings() {
exit-script
fi
if BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Bridge" 8 58 vmbr0 --title "BRIDGE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
if BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Bridge" 8 58 "$BRG" --title "BRIDGE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
if [ -z $BRG ]; then
BRG="vmbr0"
echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}"
@ -533,14 +561,14 @@ if [ "$IMPORT_DISKS" == "yes" ]; then
while read -r LSOUTPUT; do
DISKARRAY+=("$LSOUTPUT" "" "OFF")
done < <(ls /dev/disk/by-id | grep -E '^ata-|^nvme-' | grep -v 'part')
done < <(ls /dev/disk/by-id | grep -E '^ata-|^nvme-|^usb-' | grep -v 'part')
SELECTIONS=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "SELECT DISKS TO IMPORT" --checklist "\nSelect disk IDs to import. (Use Spacebar to select)\n" --cancel-button "Exit Script" 20 58 10 "${DISKARRAY[@]}" 3>&1 1>&2 2>&3 | tr -d '"') || exit
for SELECTION in $SELECTIONS; do
((++SCSI_NR))
ID_SERIAL=$(echo "$SELECTION" | rev | cut -d'_' -f1 | rev)
ID_SERIAL=$(udevadm info --query=property --value --property=ID_SERIAL_SHORT "/dev/disk/by-id/$SELECTION")
ID_SERIAL=${ID_SERIAL:0:20}
qm set $VMID --scsi$SCSI_NR /dev/disk/by-id/$SELECTION,serial=$ID_SERIAL