Merge branch 'community-scripts:main' into main

This commit is contained in:
elvito 2025-04-14 20:18:02 +02:00 committed by GitHub
commit 3c61735cac
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
58 changed files with 3940 additions and 1953 deletions

View File

@ -1,72 +0,0 @@
#!/usr/bin/env bash
source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (Canbiz)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source:
APP="Documenso"
var_tags="${var_tags:-document}"
var_disk="${var_disk:-12}"
var_cpu="${var_cpu:-6}"
var_ram="${var_ram:-6144}"
var_os="${var_os:-debian}"
var_version="${var_version:-12}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/documenso ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
RELEASE=$(curl -s https://api.github.com/repos/documenso/documenso/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }')
if [[ ! -f /opt/${APP}_version.txt ]] || [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]]; then
whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox --title "SET RESOURCES" "Please set the resources in your ${APP} LXC to ${var_cpu}vCPU and ${var_ram}RAM for the build process before continuing" 10 75
msg_info "Stopping ${APP}"
systemctl stop documenso
msg_ok "${APP} Stopped"
msg_info "Updating ${APP} to ${RELEASE}"
cp /opt/documenso/.env /opt/
rm -R /opt/documenso
curl -fsSL "https://github.com/documenso/documenso/archive/refs/tags/v${RELEASE}.zip"
unzip -q v${RELEASE}.zip
mv documenso-${RELEASE} /opt/documenso
cd /opt/documenso
mv /opt/.env /opt/documenso/.env
npm install &>/dev/null
npm run build:web &>/dev/null
npm run prisma:migrate-deploy &>/dev/null
echo "${RELEASE}" >/opt/${APP}_version.txt
msg_ok "Updated ${APP}"
msg_info "Starting ${APP}"
systemctl start documenso
msg_ok "Started ${APP}"
msg_info "Cleaning Up"
rm -rf v${RELEASE}.zip
msg_ok "Cleaned"
msg_ok "Updated Successfully"
else
msg_ok "No update required. ${APP} is already at ${RELEASE}"
fi
exit
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:9000${CL}"

View File

@ -1,46 +0,0 @@
#!/usr/bin/env bash
source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://adguardhome.com/
APP="Alpine-AdGuard"
var_tags="${var_tags:-alpine;networking}"
var_cpu="${var_cpu:-1}"
var_ram="${var_ram:-256}"
var_disk="${var_disk:-1}"
var_os="${var_os:-alpine}"
var_version="${var_version:-3.21}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
msg_info "Updating Alpine Packages"
$STD apk update
$STD apk upgrade
msg_ok "Updated Alpine Packages"
msg_info "Updating AdGuard Home"
$STD /opt/adguardhome/AdGuardHome --update
msg_ok "Updated AdGuard Home"
msg_info "Restarting AdGuard Home"
$STD rc-service adguardhome restart
msg_ok "Restarted AdGuard Home"
exit 0
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}"

View File

@ -1,41 +0,0 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
APP="Alpine-Duplicati"
var_tags="${var_tags:-alpine}"
var_cpu="${var_cpu:-1}"
var_ram="${var_ram:-256}"
var_disk="${var_disk:-1}"
var_os="${var_os:-alpine}"
var_version="${var_version:-3.21}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
msg_info "Updating Alpine Packages"
$STD apk update
$STD apk upgrade
msg_ok "Updated Alpine Packages"
msg_info "Updating Duplicati"
$STD apk upgrade duplicati
msg_ok "Updated Duplicati"
msg_info "Restarting Duplicati"
$STD rc-service duplicati restart || true
msg_ok "Restarted Duplicati"
exit 0
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access info will vary based on service config. CLI access likely available. ${CL}"

47
ct/alpine-rclone.sh Normal file
View File

@ -0,0 +1,47 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: Slaviša Arežina (tremor021)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/rclone/rclone
APP="Alpine-rclone"
var_tags="${var_tags:-alpine;backup}"
var_cpu="${var_cpu:-1}"
var_ram="${var_ram:-256}"
var_disk="${var_disk:-1}"
var_os="${var_os:-alpine}"
var_version="${var_version:-3.21}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
msg_info "Updating Alpine Packages"
$STD apk update
$STD apk upgrade
msg_ok "Updated Alpine Packages"
msg_info "Updating Rclone"
$STD apk upgrade rclone
msg_ok "Updated Rclone"
msg_info "Restarting Rclone"
$STD rc-service rclone restart || true
msg_ok "Restarted Rclone"
exit 0
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following IP:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}"

View File

@ -33,7 +33,7 @@ function update_script() {
$STD rc-service syncthing restart $STD rc-service syncthing restart
msg_ok "Restarted Syncthing" msg_ok "Restarted Syncthing"
exit 0 exit 1
} }
start start

72
ct/documenso.sh Normal file
View File

@ -0,0 +1,72 @@
#!/usr/bin/env bash
source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (Canbiz)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source:
APP="Documenso"
var_tags="${var_tags:-document}"
var_disk="${var_disk:-12}"
var_cpu="${var_cpu:-6}"
var_ram="${var_ram:-6144}"
var_os="${var_os:-debian}"
var_version="${var_version:-12}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/documenso ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
RELEASE=$(curl -s https://api.github.com/repos/documenso/documenso/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }')
if [[ ! -f /opt/${APP}_version.txt ]] || [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]]; then
whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox --title "SET RESOURCES" "Please set the resources in your ${APP} LXC to ${var_cpu}vCPU and ${var_ram}RAM for the build process before continuing" 10 75
msg_info "Stopping ${APP}"
systemctl stop documenso
msg_ok "${APP} Stopped"
msg_info "Updating ${APP} to ${RELEASE}"
cp /opt/documenso/.env /opt/
rm -R /opt/documenso
curl -fsSL "https://github.com/documenso/documenso/archive/refs/tags/v${RELEASE}.zip" -o v${RELEASE}.zip
unzip -q v${RELEASE}.zip
mv documenso-${RELEASE} /opt/documenso
cd /opt/documenso
mv /opt/.env /opt/documenso/.env
npm install &>/dev/null
npm run build:web &>/dev/null
npm run prisma:migrate-deploy &>/dev/null
echo "${RELEASE}" >/opt/${APP}_version.txt
msg_ok "Updated ${APP}"
msg_info "Starting ${APP}"
systemctl start documenso
msg_ok "Started ${APP}"
msg_info "Cleaning Up"
rm -rf v${RELEASE}.zip
msg_ok "Cleaned"
msg_ok "Updated Successfully"
else
msg_ok "No update required. ${APP} is already at ${RELEASE}"
fi
exit
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}"

View File

@ -1,6 +0,0 @@
___ __ _ ___ ________ __
/ | / /___ (_)___ ___ / | ____/ / ____/_ ______ __________/ /
/ /| | / / __ \/ / __ \/ _ \______/ /| |/ __ / / __/ / / / __ `/ ___/ __ /
/ ___ |/ / /_/ / / / / / __/_____/ ___ / /_/ / /_/ / /_/ / /_/ / / / /_/ /
/_/ |_/_/ .___/_/_/ /_/\___/ /_/ |_\__,_/\____/\__,_/\__,_/_/ \__,_/
/_/

View File

@ -1,6 +0,0 @@
___ __ _ ____ ___ __ _
/ | / /___ (_)___ ___ / __ \__ ______ / (_)________ _/ /_(_)
/ /| | / / __ \/ / __ \/ _ \______/ / / / / / / __ \/ / / ___/ __ `/ __/ /
/ ___ |/ / /_/ / / / / / __/_____/ /_/ / /_/ / /_/ / / / /__/ /_/ / /_/ /
/_/ |_/_/ .___/_/_/ /_/\___/ /_____/\__,_/ .___/_/_/\___/\__,_/\__/_/
/_/ /_/

6
ct/headers/alpine-rclone Normal file
View File

@ -0,0 +1,6 @@
___ __ _ __
/ | / /___ (_)___ ___ __________/ /___ ____ ___
/ /| | / / __ \/ / __ \/ _ \______/ ___/ ___/ / __ \/ __ \/ _ \
/ ___ |/ / /_/ / / / / / __/_____/ / / /__/ / /_/ / / / / __/
/_/ |_/_/ .___/_/_/ /_/\___/ /_/ \___/_/\____/_/ /_/\___/
/_/

View File

@ -1,6 +0,0 @@
_ __ _ __ ____
____ ____ ___ ____ ____ (_) /_(_) _________ ____ / /__________ / / /__ _____
/ __ \/ __ \/ _ \/ __ \/_ / / / __/ /_____/ ___/ __ \/ __ \/ __/ ___/ __ \/ / / _ \/ ___/
/ /_/ / /_/ / __/ / / / / /_/ / /_/ /_____/ /__/ /_/ / / / / /_/ / / /_/ / / / __/ /
\____/ .___/\___/_/ /_/ /___/_/\__/_/ \___/\____/_/ /_/\__/_/ \____/_/_/\___/_/
/_/

View File

@ -20,14 +20,33 @@ color
catch_errors catch_errors
function update_script() { function update_script() {
header_info header_info
check_container_storage check_container_storage
check_container_resources check_container_resources
if [[ ! -d /opt/musicassistant ]]; then if [[ ! -d /opt/musicassistant ]]; then
msg_error "No Installation Found!" msg_error "No existing installation found!"
exit exit 1
fi fi
msg_info "Stopping Music Assistant service"
systemctl stop musicassistant
msg_ok "Service stopped"
msg_info "Updating Music Assistant files"
cd /opt/musicassistant || exit 1
$STD fetch_and_deploy_gh_release music-assistant/server
msg_ok "Music Assistant files updated"
msg_info "Updating Python virtual environment"
source .venv/bin/activate || exit 1
pip install --upgrade pip uv
uv pip install .
msg_ok "Python environment updated"
msg_info "Restarting Music Assistant service"
systemctl restart musicassistant
msg_ok "Service restarted"
} }
start start

View File

@ -1,44 +0,0 @@
#!/usr/bin/env bash
source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: emoscardini
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/openziti/ziti
APP="openziti-controller"
var_tags="network;openziti-controller"
var_cpu="2"
var_ram="1024"
var_disk="8"
var_os="debian"
var_version="12"
var_unprivileged="1"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/openziti ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
msg_info "Updating $APP LXC"
$STD apt-get update
$STD apt-get -y upgrade
msg_ok "Updated $APP LXC"
exit
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}https://${IP}:<port>/zac${CL}"

View File

@ -20,81 +20,80 @@ color
catch_errors catch_errors
function update_script() { function update_script() {
header_info header_info
check_container_storage check_container_storage
check_container_resources check_container_resources
if [[ ! -f /etc/systemd/system/Reactive-Resume.service ]]; then if [[ ! -f /etc/systemd/system/Reactive-Resume.service ]]; then
msg_error "No ${APP} Installation Found!" msg_error "No ${APP} Installation Found!"
exit
fi
RELEASE=$(curl -s https://api.github.com/repos/AmruthPillai/Reactive-Resume/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }')
if [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]] || [[ ! -f /opt/${APP}_version.txt ]]; then
msg_info "Stopping services"
systemctl stop Reactive-Resume
msg_ok "Stopped services"
msg_info "Updating $APP to v${RELEASE}"
cp /opt/${APP}/.env /opt/rxresume.env
res_tmp=$(mktemp)
rm -rf /opt/${APP}
curl -fsSL "https://github.com/AmruthPillai/Reactive-Resume/archive/refs/tags/v${RELEASE}.zip" -O $res_tmp
unzip -q $res_tmp
mv ${APP}-${RELEASE}/ /opt/${APP}
cd /opt/${APP}
export PUPPETEER_SKIP_DOWNLOAD="true"
export NEXT_TELEMETRY_DISABLED=1
export CI="true"
export NODE_ENV="production"
$STD pnpm install --frozen-lockfile
$STD pnpm run build
$STD pnpm run prisma:generate
mv /opt/rxresume.env /opt/${APP}/.env
msg_ok "Updated $APP to v${RELEASE}"
msg_info "Updating Minio"
systemctl stop minio
cd /tmp
curl -fsSL https://dl.min.io/server/minio/release/linux-amd64/minio.deb -o minio.deb
$STD dpkg -i minio.deb
msg_ok "Updated Minio"
msg_info "Updating Browserless (Patience)"
systemctl stop browserless
cp /opt/browserless/.env /opt/browserless.env
rm -rf browserless
brwsr_tmp=$(mktemp)
TAG=$(curl -fsSL https://api.github.com/repos/browserless/browserless/tags?per_page=1 | grep "name" | awk '{print substr($2, 3, length($2)-4) }')
curl -fsSL https://github.com/browserless/browserless/archive/refs/tags/v${TAG}.zip -O $brwsr_tmp
unzip -q $brwsr_tmp
mv browserless-${TAG}/ /opt/browserless
cd /opt/browserless
$STD npm install
rm -rf src/routes/{chrome,edge,firefox,webkit}
$STD node_modules/playwright-core/cli.js install --with-deps chromium
$STD npm run build
$STD npm run build:function
$STD npm prune production
mv /opt/browserless.env /opt/browserless/.env
msg_ok "Updated Browserless"
msg_info "Restarting services"
systemctl start minio Reactive-Resume browserless
msg_ok "Restarted services"
msg_info "Cleaning Up"
rm -f /tmp/minio.deb
rm -f $brwsr_tmp
rm -f $res_tmp
msg_ok "Cleanup Completed"
echo "${RELEASE}" >/opt/${APP}_version.txt
msg_ok "Update Successful"
else
msg_ok "No update required. ${APP} is already at v${RELEASE}"
fi
exit exit
fi
RELEASE=$(curl -s https://api.github.com/repos/AmruthPillai/Reactive-Resume/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }')
if [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]] || [[ ! -f /opt/${APP}_version.txt ]]; then
msg_info "Stopping services"
systemctl stop Reactive-Resume
msg_ok "Stopped services"
msg_info "Updating $APP to v${RELEASE}"
cp /opt/${APP}/.env /opt/rxresume.env
res_tmp=$(mktemp)
rm -rf /opt/${APP}
wget -q "https://github.com/AmruthPillai/Reactive-Resume/archive/refs/tags/v${RELEASE}.zip" -O $res_tmp
unzip -q $res_tmp
mv ${APP}-${RELEASE}/ /opt/${APP}
cd /opt/${APP}
export PUPPETEER_SKIP_DOWNLOAD="true"
export NEXT_TELEMETRY_DISABLED=1
export CI="true"
export NODE_ENV="production"
$STD pnpm install --frozen-lockfile
$STD pnpm run build
$STD pnpm run prisma:generate
mv /opt/rxresume.env /opt/${APP}/.env
msg_ok "Updated $APP to v${RELEASE}"
msg_info "Updating Minio"
systemctl stop minio
cd /tmp
wget -q https://dl.min.io/server/minio/release/linux-amd64/minio.deb
$STD dpkg -i minio.deb
msg_ok "Updated Minio"
msg_info "Updating Browserless (Patience)"
systemctl stop browserless
cp /opt/browserless/.env /opt/browserless.env
rm -rf browserless
brwsr_tmp=$(mktemp)
TAG=$(curl -s https://api.github.com/repos/browserless/browserless/tags?per_page=1 | grep "name" | awk '{print substr($2, 3, length($2)-4) }')
wget -q https://github.com/browserless/browserless/archive/refs/tags/v${TAG}.zip -O $brwsr_tmp
unzip -q $brwsr_tmp
mv browserless-${TAG}/ /opt/browserless
cd /opt/browserless
$STD npm install
rm -rf src/routes/{chrome,edge,firefox,webkit}
$STD node_modules/playwright-core/cli.js install --with-deps chromium
$STD npm run build
$STD npm run build:function
$STD npm prune production
mv /opt/browserless.env /opt/browserless/.env
msg_ok "Updated Browserless"
msg_info "Restarting services"
systemctl start minio Reactive-Resume browserless
msg_ok "Restarted services"
msg_info "Cleaning Up"
rm -f /tmp/minio.deb
rm -f $brwsr_tmp
rm -f $res_tmp
msg_ok "Cleanup Completed"
echo "${RELEASE}" >/opt/${APP}_version.txt
msg_ok "Update Successful"
else
msg_ok "No update required. ${APP} is already at v${RELEASE}"
fi
exit
} }
start start

View File

@ -9665,9 +9665,9 @@
"license": "MIT" "license": "MIT"
}, },
"node_modules/vite": { "node_modules/vite": {
"version": "6.2.5", "version": "6.2.6",
"resolved": "https://registry.npmjs.org/vite/-/vite-6.2.5.tgz", "resolved": "https://registry.npmjs.org/vite/-/vite-6.2.6.tgz",
"integrity": "sha512-j023J/hCAa4pRIUH6J9HemwYfjB5llR2Ps0CWeikOtdR8+pAURAk0DoJC5/mm9kd+UgdnIy7d6HE4EAvlYhPhA==", "integrity": "sha512-9xpjNl3kR4rVDZgPNdTL0/c6ao4km69a/2ihNQbcANz8RuCOK3hQBmLSJf3bRKVQjVMda+YvizNE8AwvogcPbw==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {

View File

@ -0,0 +1,54 @@
{
"name": "Alpine-rclone",
"slug": "alpine-rclone",
"categories": [
20
],
"date_created": "2025-04-11",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 3000,
"documentation": "https://rclone.org/docs/",
"website": "https://rclone.org/",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/svg/rclone.svg",
"description": "Rclone is a command-line program to manage files on cloud storage. It is a feature-rich alternative to cloud vendors' web storage interfaces",
"install_methods": [
{
"type": "default",
"script": "ct/alpine-rclone.sh",
"resources": {
"cpu": 1,
"ram": 256,
"hdd": 0.2,
"os": "alpine",
"version": "3.21"
}
},
{
"type": "alpine",
"script": "ct/alpine-rclone.sh",
"resources": {
"cpu": 1,
"ram": 256,
"hdd": 0.2,
"os": "alpine",
"version": "3.21"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": [
{
"type": "info",
"text": "`cat ~/rclone.creds` to view login credentials"
},
{
"type": "info",
"text": "`htpasswd -b -B /opt/rclone/login.pwd newuser newuserpassword` to add more users."
}
]
}

View File

@ -0,0 +1,34 @@
{
"name": "Documenso",
"slug": "documenso",
"categories": [
12
],
"date_created": "2025-04-14",
"type": "ct",
"updateable": false,
"privileged": false,
"interface_port": 3000,
"documentation": "https://documenso.com/",
"website": "https://documenso.com/",
"logo": "https://raw.githubusercontent.com/selfhst/icons/refs/heads/main/svg/documenso.svg",
"description": "Signing documents digitally should be fast and easy and should be the best practice for every document signed worldwide. This is technically quite easy today, but it also introduces a new party to every signature: The signing tool providers. While this is not a problem in itself, it should make us think about how we want these providers of trust to work. Documenso aims to be the world's most trusted document-signing tool. This trust is built by empowering you to self-host Documenso and review how it works under the hood.",
"install_methods": [
{
"type": "default",
"script": "ct/documenso.sh",
"resources": {
"cpu": 4,
"ram": 6144,
"hdd": 10,
"os": "Debian",
"version": "12"
}
}
],
"default_credentials": {
"username": "helper-scripts@local.com",
"password": "helper-scripts"
},
"notes": []
}

View File

@ -1,39 +0,0 @@
{
"name": "openziti-controller",
"slug": "openziti-controller",
"categories": [
4
],
"date_created": "2025-03-20",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": null,
"documentation": "https://openziti.io/docs/reference/tunnelers/docker/",
"website": "https://www.openziti.io/",
"logo": "https://raw.githubusercontent.com/openziti/ziti-doc/main/docusaurus/static/img/ziti-logo-dark.svg",
"description": "OpenZiti is an open-source, zero trust networking platform that enables secure connectivity between applications, services, and devices. It provides secure, encrypted connections between clients and services, and can be used to create secure, zero trust networks.",
"install_methods": [
{
"type": "default",
"script": "ct/openziti-controller.sh",
"resources": {
"cpu": 2,
"ram": 1024,
"hdd": 8,
"os": "debian",
"version": "12"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": [
{
"text": "The Openziti Controller installation will prompt for configuration settings during installation.",
"type": "info"
}
]
}

View File

@ -1,8 +1,238 @@
[ [
{
"name": "evcc-io/evcc",
"version": "0.203.0",
"date": "2025-04-13T11:49:39Z"
},
{
"name": "fhem/fhem-mirror",
"version": "6.2",
"date": "2025-04-13T10:32:39Z"
},
{
"name": "syncthing/syncthing",
"version": "v2.0.0-beta.9",
"date": "2025-04-12T13:58:29Z"
},
{
"name": "Lidarr/Lidarr",
"version": "v2.10.3.4602",
"date": "2025-03-23T11:00:37Z"
},
{
"name": "Readarr/Readarr",
"version": "v2.0.0.4645",
"date": "2017-03-07T18:56:06Z"
},
{
"name": "Prowlarr/Prowlarr",
"version": "v1.33.3.5008",
"date": "2025-04-09T17:58:37Z"
},
{
"name": "Radarr/Radarr",
"version": "v5.21.1.9799",
"date": "2025-03-24T15:52:12Z"
},
{
"name": "morpheus65535/bazarr",
"version": "v1.5.1",
"date": "2025-01-01T16:15:52Z"
},
{
"name": "open-webui/open-webui",
"version": "v0.6.4",
"date": "2025-04-13T06:01:32Z"
},
{
"name": "Jackett/Jackett",
"version": "v0.22.1773",
"date": "2025-04-13T05:56:23Z"
},
{
"name": "slskd/slskd",
"version": "0.22.4",
"date": "2025-04-13T00:14:13Z"
},
{
"name": "Tautulli/Tautulli",
"version": "v2.15.2",
"date": "2025-04-12T23:27:51Z"
},
{
"name": "StarFleetCPTN/GoMFT",
"version": "v0.2.11",
"date": "2025-04-12T21:13:08Z"
},
{
"name": "semaphoreui/semaphore",
"version": "v2.14.0-beta1",
"date": "2025-04-12T20:14:09Z"
},
{
"name": "karakeep-app/karakeep",
"version": "sdk/v0.23.2",
"date": "2025-04-12T18:37:40Z"
},
{
"name": "MediaBrowser/Emby.Releases",
"version": "4.8.11.0",
"date": "2025-03-10T06:39:11Z"
},
{
"name": "rogerfar/rdt-client",
"version": "v2.0.104",
"date": "2025-04-12T14:24:39Z"
},
{
"name": "home-assistant/core",
"version": "2025.4.2",
"date": "2025-04-12T09:46:22Z"
},
{
"name": "readeck/readeck",
"version": "0.18.0",
"date": "2025-04-12T08:55:32Z"
},
{
"name": "runtipi/runtipi",
"version": "v3.10.0",
"date": "2025-03-15T14:38:16Z"
},
{
"name": "Bubka/2FAuth",
"version": "v5.5.2",
"date": "2025-04-11T22:00:06Z"
},
{
"name": "homarr-labs/homarr",
"version": "v1.16.0",
"date": "2025-04-11T19:15:24Z"
},
{
"name": "rabbitmq/rabbitmq-server",
"version": "v4.0.8",
"date": "2025-04-03T05:11:15Z"
},
{
"name": "firefly-iii/firefly-iii",
"version": "v6.2.10",
"date": "2025-03-22T13:02:26Z"
},
{
"name": "tailscale/tailscale",
"version": "v1.82.4",
"date": "2025-04-11T17:58:09Z"
},
{
"name": "coder/code-server",
"version": "v4.99.2",
"date": "2025-04-11T17:57:47Z"
},
{
"name": "keycloak/keycloak",
"version": "26.2.0",
"date": "2025-04-11T12:48:27Z"
},
{
"name": "duplicati/duplicati",
"version": "v2.1.0.113-2.1.0.113_canary_2025-04-11",
"date": "2025-04-11T16:57:07Z"
},
{
"name": "TriliumNext/Notes",
"version": "v0.0.0",
"date": "2025-04-11T14:18:00Z"
},
{
"name": "emqx/emqx",
"version": "e5.9.0-beta.3",
"date": "2025-04-11T14:17:53Z"
},
{
"name": "NLnetLabs/unbound",
"version": "release-1.23.0rc2",
"date": "2025-04-11T13:24:25Z"
},
{
"name": "docmost/docmost",
"version": "v0.10.1",
"date": "2025-04-11T12:42:08Z"
},
{
"name": "dgtlmoon/changedetection.io",
"version": "0.49.13",
"date": "2025-04-11T11:48:06Z"
},
{
"name": "sabnzbd/sabnzbd",
"version": "4.5.1",
"date": "2025-04-11T09:57:47Z"
},
{
"name": "Stirling-Tools/Stirling-PDF",
"version": "v0.45.4",
"date": "2025-04-11T08:45:04Z"
},
{
"name": "home-assistant/operating-system",
"version": "15.1",
"date": "2025-03-31T13:42:20Z"
},
{ {
"name": "outline/outline", "name": "outline/outline",
"version": "v0.82.1-19", "version": "v0.83.0",
"date": "2025-04-10T01:03:03Z" "date": "2025-04-11T03:53:10Z"
},
{
"name": "openobserve/openobserve",
"version": "v0.14.6-rc3",
"date": "2025-04-11T03:08:28Z"
},
{
"name": "Luligu/matterbridge",
"version": "2.2.8",
"date": "2025-04-10T20:30:49Z"
},
{
"name": "netbox-community/netbox",
"version": "v4.2.7",
"date": "2025-04-10T20:08:13Z"
},
{
"name": "gristlabs/grist-core",
"version": "v1.5.1",
"date": "2025-04-10T19:48:43Z"
},
{
"name": "apache/cassandra",
"version": "cassandra-5.0.4",
"date": "2025-04-10T16:32:00Z"
},
{
"name": "VictoriaMetrics/VictoriaMetrics",
"version": "v1.18.0-victorialogs",
"date": "2025-04-10T15:05:20Z"
},
{
"name": "NodeBB/NodeBB",
"version": "v4.2.1",
"date": "2025-04-10T14:03:47Z"
},
{
"name": "Paymenter/Paymenter",
"version": "v1.0.3",
"date": "2025-04-10T13:57:39Z"
},
{
"name": "docker/compose",
"version": "v2.35.0",
"date": "2025-04-10T13:45:22Z"
},
{
"name": "n8n-io/n8n",
"version": "n8n@1.86.1",
"date": "2025-04-09T09:20:55Z"
}, },
{ {
"name": "glanceapp/glance", "name": "glanceapp/glance",
@ -19,46 +249,11 @@
"version": "v2.15.1", "version": "v2.15.1",
"date": "2025-04-09T22:26:39Z" "date": "2025-04-09T22:26:39Z"
}, },
{
"name": "fhem/fhem-mirror",
"version": "6.2",
"date": "2025-04-09T20:29:25Z"
},
{
"name": "syncthing/syncthing",
"version": "v2.0.0-beta.7",
"date": "2025-04-09T13:41:32Z"
},
{
"name": "NodeBB/NodeBB",
"version": "v4.3.0-beta.1",
"date": "2025-04-09T19:06:10Z"
},
{
"name": "docmost/docmost",
"version": "v0.10.0",
"date": "2025-04-09T18:58:20Z"
},
{
"name": "Prowlarr/Prowlarr",
"version": "v1.33.3.5008",
"date": "2025-04-09T17:58:37Z"
},
{ {
"name": "jupyter/notebook", "name": "jupyter/notebook",
"version": "v7.4.0", "version": "v7.4.0",
"date": "2025-04-09T17:36:14Z" "date": "2025-04-09T17:36:14Z"
}, },
{
"name": "MediaBrowser/Emby.Releases",
"version": "4.8.11.0",
"date": "2025-03-10T06:39:11Z"
},
{
"name": "Stirling-Tools/Stirling-PDF",
"version": "v0.45.3",
"date": "2025-04-09T17:17:58Z"
},
{ {
"name": "Brandawg93/PeaNUT", "name": "Brandawg93/PeaNUT",
"version": "v5.7.1", "version": "v5.7.1",
@ -69,21 +264,11 @@
"version": "10.0.18", "version": "10.0.18",
"date": "2025-02-12T11:07:02Z" "date": "2025-02-12T11:07:02Z"
}, },
{
"name": "slskd/slskd",
"version": "0.22.3",
"date": "2025-04-09T14:02:12Z"
},
{ {
"name": "HabitRPG/habitica", "name": "HabitRPG/habitica",
"version": "v5.35.2", "version": "v5.35.2",
"date": "2025-04-09T13:46:58Z" "date": "2025-04-09T13:46:58Z"
}, },
{
"name": "homarr-labs/homarr",
"version": "v1.15.0",
"date": "2025-04-09T13:27:14Z"
},
{ {
"name": "zitadel/zitadel", "name": "zitadel/zitadel",
"version": "v2.69.10", "version": "v2.69.10",
@ -94,51 +279,16 @@
"version": "debian/12.0.14", "version": "debian/12.0.14",
"date": "2025-04-09T10:09:00Z" "date": "2025-04-09T10:09:00Z"
}, },
{
"name": "n8n-io/n8n",
"version": "n8n@1.86.1",
"date": "2025-04-09T09:20:55Z"
},
{ {
"name": "esphome/esphome", "name": "esphome/esphome",
"version": "2025.3.3", "version": "2025.3.3",
"date": "2025-03-31T22:07:05Z" "date": "2025-03-31T22:07:05Z"
}, },
{
"name": "runtipi/runtipi",
"version": "nightly",
"date": "2025-04-08T07:12:33Z"
},
{
"name": "morpheus65535/bazarr",
"version": "v1.5.1",
"date": "2025-01-01T16:15:52Z"
},
{
"name": "Jackett/Jackett",
"version": "v0.22.1751",
"date": "2025-04-09T06:00:36Z"
},
{
"name": "StarFleetCPTN/GoMFT",
"version": "v0.2.7",
"date": "2025-04-09T04:53:30Z"
},
{ {
"name": "OliveTin/OliveTin", "name": "OliveTin/OliveTin",
"version": "2025.4.8", "version": "2025.4.8",
"date": "2025-04-08T22:02:50Z" "date": "2025-04-08T22:02:50Z"
}, },
{
"name": "coder/code-server",
"version": "v4.99.1",
"date": "2025-04-08T21:35:21Z"
},
{
"name": "semaphoreui/semaphore",
"version": "v2.13.12",
"date": "2025-04-08T20:54:58Z"
},
{ {
"name": "minio/minio", "name": "minio/minio",
"version": "RELEASE.2025-04-08T15-41-24Z", "version": "RELEASE.2025-04-08T15-41-24Z",
@ -149,11 +299,6 @@
"version": "version/2025.2.4", "version": "version/2025.2.4",
"date": "2025-04-08T18:39:57Z" "date": "2025-04-08T18:39:57Z"
}, },
{
"name": "keycloak/keycloak",
"version": "26.1.4",
"date": "2025-03-13T15:41:42Z"
},
{ {
"name": "influxdata/influxdb", "name": "influxdata/influxdb",
"version": "v1.12.0rc1", "version": "v1.12.0rc1",
@ -169,11 +314,6 @@
"version": "v0.107.59", "version": "v0.107.59",
"date": "2025-03-21T11:11:39Z" "date": "2025-03-21T11:11:39Z"
}, },
{
"name": "openobserve/openobserve",
"version": "v0.14.6-rc2",
"date": "2025-04-08T14:42:54Z"
},
{ {
"name": "element-hq/synapse", "name": "element-hq/synapse",
"version": "v1.128.0", "version": "v1.128.0",
@ -189,11 +329,6 @@
"version": "v0.303.0-rc.1", "version": "v0.303.0-rc.1",
"date": "2025-04-07T04:39:38Z" "date": "2025-04-07T04:39:38Z"
}, },
{
"name": "NLnetLabs/unbound",
"version": "release-1.23.0rc1",
"date": "2025-04-08T06:39:46Z"
},
{ {
"name": "pelican-dev/panel", "name": "pelican-dev/panel",
"version": "v1.0.0-beta19", "version": "v1.0.0-beta19",
@ -229,21 +364,11 @@
"version": "v1.10.0", "version": "v1.10.0",
"date": "2025-04-07T14:32:15Z" "date": "2025-04-07T14:32:15Z"
}, },
{
"name": "apache/cassandra",
"version": "5.0.4-tentative",
"date": "2025-04-07T12:05:05Z"
},
{ {
"name": "Graylog2/graylog2-server", "name": "Graylog2/graylog2-server",
"version": "6.2.0-beta.4", "version": "6.2.0-beta.4",
"date": "2025-04-07T11:28:13Z" "date": "2025-04-07T11:28:13Z"
}, },
{
"name": "VictoriaMetrics/VictoriaMetrics",
"version": "pmm-6401-v1.115.0",
"date": "2025-04-07T11:15:53Z"
},
{ {
"name": "fallenbagel/jellyseerr", "name": "fallenbagel/jellyseerr",
"version": "preview-back-to-axios", "version": "preview-back-to-axios",
@ -274,21 +399,6 @@
"version": "server/public/v0.1.11", "version": "server/public/v0.1.11",
"date": "2025-03-28T14:04:31Z" "date": "2025-03-28T14:04:31Z"
}, },
{
"name": "open-webui/open-webui",
"version": "v0.6.2",
"date": "2025-04-07T03:41:23Z"
},
{
"name": "firefly-iii/firefly-iii",
"version": "v6.2.10",
"date": "2025-03-22T13:02:26Z"
},
{
"name": "Luligu/matterbridge",
"version": "2.2.7",
"date": "2025-04-06T20:00:53Z"
},
{ {
"name": "Dolibarr/dolibarr", "name": "Dolibarr/dolibarr",
"version": "21.0.1", "version": "21.0.1",
@ -319,26 +429,11 @@
"version": "v1.19.7", "version": "v1.19.7",
"date": "2025-04-06T14:22:44Z" "date": "2025-04-06T14:22:44Z"
}, },
{
"name": "Radarr/Radarr",
"version": "v5.21.1.9799",
"date": "2025-03-24T15:52:12Z"
},
{
"name": "karakeep-app/karakeep",
"version": "extension/v1.2.4",
"date": "2025-04-06T11:56:18Z"
},
{ {
"name": "TechnitiumSoftware/DnsServer", "name": "TechnitiumSoftware/DnsServer",
"version": "v13.5.0", "version": "v13.5.0",
"date": "2025-04-06T11:24:50Z" "date": "2025-04-06T11:24:50Z"
}, },
{
"name": "TriliumNext/Notes",
"version": "v0.92.6",
"date": "2025-04-06T10:38:54Z"
},
{ {
"name": "kimai/kimai", "name": "kimai/kimai",
"version": "2.32.0", "version": "2.32.0",
@ -369,11 +464,6 @@
"version": "2.3", "version": "2.3",
"date": "2025-04-05T18:05:36Z" "date": "2025-04-05T18:05:36Z"
}, },
{
"name": "Paymenter/Paymenter",
"version": "v1.0.2",
"date": "2025-04-05T17:40:25Z"
},
{ {
"name": "bastienwirtz/homer", "name": "bastienwirtz/homer",
"version": "v25.04.1", "version": "v25.04.1",
@ -384,104 +474,9 @@
"version": "v0.55.2", "version": "v0.55.2",
"date": "2025-04-05T12:07:32Z" "date": "2025-04-05T12:07:32Z"
}, },
{
"name": "Kozea/Radicale",
"version": "v3.5.1",
"date": "2025-04-05T06:20:18Z"
},
{
"name": "actualbudget/actual",
"version": "v25.4.0",
"date": "2025-04-05T04:14:57Z"
},
{
"name": "home-assistant/core",
"version": "2025.4.1",
"date": "2025-04-04T20:59:57Z"
},
{
"name": "Koenkk/zigbee2mqtt",
"version": "2.2.1",
"date": "2025-04-04T20:15:48Z"
},
{
"name": "Athou/commafeed",
"version": "5.7.0",
"date": "2025-04-04T18:10:16Z"
},
{
"name": "icereed/paperless-gpt",
"version": "v0.14.4",
"date": "2025-04-04T14:18:53Z"
},
{
"name": "apache/tomcat",
"version": "9.0.104",
"date": "2025-04-04T12:58:11Z"
},
{
"name": "sabnzbd/sabnzbd",
"version": "4.5.0",
"date": "2025-03-30T16:17:11Z"
},
{
"name": "dgtlmoon/changedetection.io",
"version": "0.49.12",
"date": "2025-04-04T07:31:08Z"
},
{
"name": "OctoPrint/OctoPrint",
"version": "1.10.3",
"date": "2024-11-05T09:20:50Z"
},
{
"name": "rabbitmq/rabbitmq-server",
"version": "v4.0.8",
"date": "2025-04-03T05:11:15Z"
},
{
"name": "BookStackApp/BookStack",
"version": "v25.02.2",
"date": "2025-04-02T16:39:50Z"
},
{
"name": "cloudflare/cloudflared",
"version": "2025.4.0",
"date": "2025-04-02T15:38:53Z"
},
{
"name": "hargata/lubelog",
"version": "v1.4.6",
"date": "2025-04-02T14:07:12Z"
},
{
"name": "immich-app/immich",
"version": "v1.131.3",
"date": "2025-04-01T22:48:22Z"
},
{ {
"name": "MagicMirrorOrg/MagicMirror", "name": "MagicMirrorOrg/MagicMirror",
"version": "v2.31.0", "version": "v2.31.0",
"date": "2025-04-01T18:12:45Z" "date": "2025-04-01T18:12:45Z"
},
{
"name": "theonedev/onedev",
"version": "v11.8.6",
"date": "2025-04-01T13:52:03Z"
},
{
"name": "neo4j/neo4j",
"version": "5.26.5",
"date": "2025-04-01T09:32:48Z"
},
{
"name": "diced/zipline",
"version": "v4.0.2",
"date": "2025-04-01T04:51:05Z"
},
{
"name": "Kometa-Team/Kometa",
"version": "v2.2.0",
"date": "2025-03-31T21:31:48Z"
} }
] ]

View File

@ -1,47 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# Source: https://adguardhome.com/
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Downloading AdGuard Home"
$STD curl -fsSL -o /tmp/AdGuardHome_linux_amd64.tar.gz \
"https://github.com/AdguardTeam/AdGuardHome/releases/latest/download/AdGuardHome_linux_amd64.tar.gz"
msg_ok "Downloaded AdGuard Home"
msg_info "Installing AdGuard Home"
$STD tar -xzf /tmp/AdGuardHome_linux_amd64.tar.gz -C /opt
$STD rm /tmp/AdGuardHome_linux_amd64.tar.gz
msg_ok "Installed AdGuard Home"
msg_info "Creating AdGuard Home Service"
cat <<EOF >/etc/init.d/adguardhome
#!/sbin/openrc-run
name="AdGuardHome"
description="AdGuard Home Service"
command="/opt/AdGuardHome/AdGuardHome"
command_background="yes"
pidfile="/run/adguardhome.pid"
EOF
$STD chmod +x /etc/init.d/adguardhome
msg_ok "Created AdGuard Home Service"
msg_info "Enabling AdGuard Home Service"
$STD rc-update add adguardhome default
msg_ok "Enabled AdGuard Home Service"
msg_info "Starting AdGuard Home"
$STD rc-service adguardhome start
msg_ok "Started AdGuard Home"
motd_ssh
customize

View File

@ -1,24 +0,0 @@
#!/usr/bin/env bash
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apk add --no-cache duplicati
msg_ok "Installed duplicati"
msg_info "Enabling duplicati Service"
$STD rc-update add duplicati default || true
msg_ok "Enabled duplicati Service"
msg_info "Starting duplicati"
$STD rc-service duplicati start || true
msg_ok "Started duplicati"
motd_ssh
customize

View File

@ -0,0 +1,63 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2025 community-scripts ORG
# Author: Slaviša Arežina (tremor021)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/rclone/rclone
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing dependencies"
$STD apk add --no-cache \
unzip \
apache2-utils
msg_ok "Installed dependencies"
msg_info "Installing rclone"
temp_file=$(mktemp)
mkdir -p /opt/rclone
RELEASE=$(curl -s https://api.github.com/repos/rclone/rclone/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }')
curl -fsSL "https://github.com/rclone/rclone/releases/download/v${RELEASE}/rclone-v${RELEASE}-linux-amd64.zip" -o $temp_file
$STD unzip -j $temp_file '*/**' -d /opt/rclone
cd /opt/rclone
PASSWORD=$(head -c 16 /dev/urandom | xxd -p -c 16)
$STD htpasswd -cb -B login.pwd admin $PASSWORD
{
echo "rclone-Credentials"
echo "rclone User Name: admin"
echo "rclone Password: $PASSWORD"
} >>~/rclone.creds
echo "${RELEASE}" >/opt/${APPLICATION}_version.txt
rm -f $temp_file
msg_ok "Installed rclone"
msg_info "Enabling rclone Service"
cat <<EOF >/etc/init.d/rclone
#!/sbin/openrc-run
description="rclone Service"
command="/opt/rclone/rclone"
command_args="rcd --rc-web-gui --rc-web-gui-no-open-browser --rc-addr :3000 --rc-htpasswd /opt/rclone/login.pwd"
command_background="true"
command_user="root"
pidfile="/var/run/rclone.pid"
depend() {
use net
}
EOF
chmod +x /etc/init.d/rclone
$STD rc-update add rclone default
msg_ok "Enabled rclone Service"
msg_info "Starting rclone"
$STD service rclone start
msg_ok "Started rclone"
motd_ssh
customize

View File

@ -0,0 +1,91 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2024 tteck
# Author: MickLesk (Canbiz)
# License: MIT
# https://github.com/tteck/Proxmox/raw/main/LICENSE
source /dev/stdin <<< "$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies (Patience)"
apt-get install -y \
mariadb-server \
apache2 \
cron \
flac \
vorbis-tools \
lame \
ffmpeg \
lsb-release \
gosu \
wget \
curl \
git \
zip \
unzip \
sudo \
make \
mc
msg_ok "Installed Dependencies"
msg_info "Setting up PHP"
sudo curl -sSLo /usr/share/keyrings/deb.sury.org-php.gpg https://packages.sury.org/php/apt.gpg
sudo sh -c 'echo "deb [signed-by=/usr/share/keyrings/deb.sury.org-php.gpg] https://packages.sury.org/php/ $(lsb_release -sc) main" > /etc/apt/sources.list.d/php.list'
sudo apt update
sudo apt install -y php8.3 php8.3-{bcmath,bz2,cli,common,curl,fpm,gd,imagick,intl,mbstring,mysql,sqlite3,xml,xmlrpc,zip}
apt-get install -y \
libapache2-mod-php \
inotify-tools \
libavcodec-extra \
libev-libevent-dev \
libmp3lame-dev \
libtheora-dev \
libvorbis-dev \
libvpx-dev
msg_ok "PHP successfully setup"
msg_info "Setting up Database"
DB_NAME=ampache2
DB_USER=ampache2
DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)
sudo mysql -u root -e "CREATE DATABASE $DB_NAME;"
sudo mysql -u root -e "CREATE USER '$DB_USER'@'localhost' IDENTIFIED WITH mysql_native_password AS PASSWORD('$DB_PASS');"
sudo mysql -u root -e "GRANT ALL ON $DB_NAME.* TO '$DB_USER'@'localhost'; FLUSH PRIVILEGES;"
echo "" >>~/ampache.creds
echo -e "Ampache Database User: \e $DB_USER\e" >>~/ampache.creds
echo -e "Ampache Database Password: \e$DB_PASS\e" >>~/ampache.creds
echo -e "Ampache Database Name: \e$DB_NAME\e" >>~/ampache.creds
msg_ok "Set up database"
msg_info "Installing Ampache(Patience)"
cd /opt
AMPACHE_VERSION=$(wget -q https://github.com/ampache/ampache/releases/latest -O - | grep "title>Release" | cut -d " " -f 4)
wget https://github.com/ampache/ampache/releases/download/${AMPACHE_VERSION}/ampache-${AMPACHE_VERSION}_all_php8.3.zip
unzip -q ampache-${AMPACHE_VERSION}_all_php8.3.zip -d ampache
rm -rf /var/www/html
ln -s /opt/ampache/public /var/www/html
sudo mv /opt/ampache/rest/.htaccess.dist /opt/ampache/rest/.htaccess
sudo mv /opt/ampache/play/.htaccess.dist /opt/ampache/play/.htaccess
sudo mv /opt/ampache/channel/.htaccess.dist /opt/ampache/channel/.htaccess
sudo cp /opt/ampache/config/ampache.cfg.php.dist /opt/ampache/config/ampache.cfg.php
sudo chmod 664 /opt/ampache/rest/.htaccess /opt/ampache/play/.htaccess
sudo sed -i 's/upload_max_filesize = .*/upload_max_filesize = 50M/' /etc/php/8.3/apache2/php.ini \
&& sudo sed -i 's/post_max_size = .*/post_max_size = 50M/' /etc/php/8.3/apache2/php.ini \
&& sudo sed -i 's/max_execution_time = .*/max_execution_time = 300/' /etc/php/8.3/apache2/php.ini \
&& sudo sed -i 's/memory_limit = .*/memory_limit = 256M/' /etc/php/8.3/apache2/php.ini \
&& sudo systemctl restart apache2
msg_ok "Installed Ampache"
motd_ssh
customize
msg_info "Cleaning up"
$STD apt-get autoremove
$STD apt-get autoclean
msg_ok "Cleaned"

View File

@ -0,0 +1,152 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2024 tteck
# Author: tteck
# Co-Author: MickLesk (Canbiz)
# License: MIT
# https://github.com/tteck/Proxmox/raw/main/LICENSE
# Source: https://github.com/AnalogJ/scrutiny
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
# Installiere benötigte Pakete
msg_info "Installing Dependencies"
$STD apt-get install -y \
sudo \
curl \
uwsgi \
uwsgi-plugin-python3 \
libopenjp2-7-dev \
libpq-dev \
git \
nginx \
python3 \
python3-pip \
python3-venv \
pipx
msg_ok "Installed Dependencies"
# Installiere Python3 und PipX
#msg_info "Installing Python3 & PipX"
#$STD apt-get install -y python3 python3-dev python3-dotenv python3-pip
#source /opt/babybuddy/.venv/bin/activate
#msg_ok "Installed Python3 & PipX"
# Variablen
INSTALL_DIR="/opt/babybuddy"
APP_DIR="$INSTALL_DIR"
DATA_DIR="$INSTALL_DIR/data"
DOMAIN="babybuddy.example.com" # Ändern, falls benötigt
SECRET_KEY=$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | cut -c1-32)
# Babybuddy Repository installieren
msg_info "Installing Babybuddy"
cd /opt
RELEASE=$(curl -s https://api.github.com/repos/babybuddy/babybuddy/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }')
wget -q "https://github.com/babybuddy/babybuddy/archive/refs/tags/v${RELEASE}.zip"
unzip -q v${RELEASE}.zip
mv babybuddy-${RELEASE} /opt/babybuddy
cd /opt/babybuddy
source /opt/babybuddy/.venv/bin/activate
export PIPENV_VENV_IN_PROJECT=1
pipenv install
pipenv shell
cp babybuddy/settings/production.example.py babybuddy/settings/production.py
# Production-Settings konfigurieren
SECRET_KEY=$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | cut -c1-32)
ALLOWED_HOSTS=$(hostname -I | tr ' ' ',' | sed 's/,$//')",127.0.0.1,localhost"
sed -i \
-e "s/^SECRET_KEY = \"\"/SECRET_KEY = \"$SECRET_KEY\"/" \
-e "s/^ALLOWED_HOSTS = \[\"\"\]/ALLOWED_HOSTS = \[$(echo \"$ALLOWED_HOSTS\" | sed 's/,/\",\"/g')\]/" \
babybuddy/settings/production.py
# Django Migrationen durchführen
export DJANGO_SETTINGS_MODULE=babybuddy.settings.production
python manage.py migrate
# Berechtigungen setzen
sudo chown -R www-data:www-data /opt/babybuddy/data
sudo chmod 640 /opt/babybuddy/data/db.sqlite3
sudo chmod 750 /opt/babybuddy/data
msg_ok "Installed BabyBuddy WebApp"
# Django Admin Setup
DJANGO_ADMIN_USER=admin
DJANGO_ADMIN_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | cut -c1-13)
source /opt/babybuddy/bin/activate
$STD python3 /opt/babybuddy/manage.py shell << EOF
from django.contrib.auth import get_user_model
UserModel = get_user_model()
user = UserModel.objects.create_user('$DJANGO_ADMIN_USER', password='$DJANGO_ADMIN_PASS')
user.is_superuser = True
user.is_staff = True
user.save()
EOF
{
echo ""
echo "Django-Credentials"
echo "Django Admin User: $DJANGO_ADMIN_USER"
echo "Django Admin Password: $DJANGO_ADMIN_PASS"
} >> ~/babybuddy.creds
msg_ok "Setup Django Admin"
# uWSGI konfigurieren
msg_info "Configuring uWSGI"
sudo bash -c "cat > /etc/uwsgi/apps-available/babybuddy.ini" <<EOF
[uwsgi]
plugins = python3
project = babybuddy
base_dir = $INSTALL_DIR
chdir = %(base_dir)/public
virtualenv = %(chdir)/.venv
module = %(project).wsgi:application
env = DJANGO_SETTINGS_MODULE=%(project).settings.production
master = True
vacuum = True
EOF
sudo ln -sf /etc/uwsgi/apps-available/babybuddy.ini /etc/uwsgi/apps-enabled/babybuddy.ini
sudo service uwsgi restart
# NGINX konfigurieren
msg_info "Configuring NGINX"
sudo bash -c "cat > /etc/nginx/sites-available/babybuddy" <<EOF
upstream babybuddy {
server unix:///var/run/uwsgi/app/babybuddy/socket;
}
server {
listen 80;
server_name $DOMAIN;
location / {
uwsgi_pass babybuddy;
include uwsgi_params;
}
location /media {
alias $DATA_DIR/media;
}
}
EOF
sudo ln -sf /etc/nginx/sites-available/babybuddy /etc/nginx/sites-enabled/babybuddy
sudo service nginx restart
# Abschlussnachricht
echo "Deployment abgeschlossen! Besuche http://$DOMAIN"
# Bereinigung
msg_info "Cleaning up"
$STD apt-get -y autoremove
$STD apt-get -y autoclean
msg_ok "Cleaned"

View File

@ -1,9 +1,8 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Copyright (c) 2021-2024 tteck # Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (Canbiz) # Author: MickLesk (Canbiz)
# License: MIT # License: MIT |
# https://github.com/tteck/Proxmox/raw/main/LICENSE
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color color
@ -13,27 +12,26 @@ setting_up_container
network_check network_check
update_os update_os
msg_info "Setup Functions"
setup_local_ip_helper
import_local_ip
msg_ok "Setup Functions"
msg_info "Installing Dependencies (Patience)" msg_info "Installing Dependencies (Patience)"
$STD apt-get install -y \ $STD apt-get install -y \
unzip \ htop \
htop \ gnupg2 \
gnupg2 \ ca-certificates \
ca-certificates \ default-jdk \
default-jdk \ apt-transport-https \
apt-transport-https \ ghostscript \
ghostscript \ tesseract-ocr \
tesseract-ocr \ tesseract-ocr-deu \
tesseract-ocr-deu \ tesseract-ocr-eng \
tesseract-ocr-eng \ unpaper \
unpaper \ unoconv \
unoconv \ wkhtmltopdf \
wkhtmltopdf \ ocrmypdf
ocrmypdf \
zip \
curl \
sudo \
make \
mc
msg_ok "Installed Dependencies" msg_ok "Installed Dependencies"
msg_info "Setting up PostgreSQL Repository" msg_info "Setting up PostgreSQL Repository"
@ -53,10 +51,10 @@ $STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET client_encoding TO 'utf8'
$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET default_transaction_isolation TO 'read committed';" $STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET default_transaction_isolation TO 'read committed';"
$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET timezone TO 'UTC';" $STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET timezone TO 'UTC';"
{ {
echo "Docspell-Credentials" echo "Docspell-Credentials"
echo "Docspell Database Name: $DB_NAME" echo "Docspell Database Name: $DB_NAME"
echo "Docspell Database User: $DB_USER" echo "Docspell Database User: $DB_USER"
echo "Docspell Database Password: $DB_PASS" echo "Docspell Database Password: $DB_PASS"
} >>~/docspell.creds } >>~/docspell.creds
msg_ok "Set up PostgreSQL Database" msg_ok "Set up PostgreSQL Database"
@ -65,46 +63,66 @@ mkdir -p /opt/docspell
Docspell=$(curl -fsSL https://github.com/eikek/docspell/releases/latest -o - | grep "title>Release" | cut -d " " -f 5) Docspell=$(curl -fsSL https://github.com/eikek/docspell/releases/latest -o - | grep "title>Release" | cut -d " " -f 5)
DocspellDSC=$(curl -fsSL https://github.com/docspell/dsc/releases/latest -o - | grep "title>Release" | cut -d " " -f 4 | sed 's/^v//') DocspellDSC=$(curl -fsSL https://github.com/docspell/dsc/releases/latest -o - | grep "title>Release" | cut -d " " -f 4 | sed 's/^v//')
cd /opt cd /opt
curl -fsSL https://github.com/eikek/docspell/releases/download/v${Docspell}/docspell-joex_${Docspell}_all.deb curl -fsSL https://github.com/eikek/docspell/releases/download/v${Docspell}/docspell-joex_${Docspell}_all.deb -o docspell-joex_${Docspell}_all.deb
curl -fsSL https://github.com/eikek/docspell/releases/download/v${Docspell}/docspell-restserver_${Docspell}_all.deb curl -fsSL https://github.com/eikek/docspell/releases/download/v${Docspell}/docspell-restserver_${Docspell}_all.deb -o docspell-restserver_${Docspell}_all.deb
$STD dpkg -i docspell-*.deb $STD dpkg -i docspell-*.deb
curl -fsSL https://github.com/docspell/dsc/releases/download/v${DocspellDSC}/dsc_amd64-musl-${DocspellDSC} curl -fsSL https://github.com/docspell/dsc/releases/download/v${DocspellDSC}/dsc_amd64-musl-${DocspellDSC} -o dsc_amd64-musl-${DocspellDSC}
mv dsc_amd* dsc mv dsc_amd* dsc
chmod +x dsc chmod +x dsc
mv dsc /usr/bin mv dsc /usr/bin
ln -s /etc/docspell-joex /opt/docspell/docspell-joex && ln -s /etc/docspell-restserver /opt/docspell/docspell-restserver && ln -s /usr/bin/dsc /opt/docspell/dsc ln -s /etc/docspell-joex /opt/docspell/docspell-joex && ln -s /etc/docspell-restserver /opt/docspell/docspell-restserver && ln -s /usr/bin/dsc /opt/docspell/dsc
curl -fsSL https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -o /usr/bin/yq curl -fsSL https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -o /usr/bin/yq
chmod +x /usr/bin/yq chmod +x /usr/bin/yq
JOEX_CONF="/usr/share/docspell-joex/conf/docspell-joex.conf" #JOEX_CONF="/usr/share/docspell-joex/conf/docspell-joex.conf"
SERVER_CONF="/usr/share/docspell-restserver/conf/docspell-server.conf" #SERVER_CONF="/usr/share/docspell-restserver/conf/docspell-server.conf"
sed -i 's|address = "localhost"|address = "0.0.0.0"|' "$JOEX_CONF" "$SERVER_CONF" sed -i \
sed -i -E '/backend\s*\{/,/\}/ { -e '11s|localhost|'"$LOCAL_IP"'|' \
/jdbc\s*\{/,/\}/ { -e '17s|localhost|'"$LOCAL_IP"'|' \
s|(url\s*=\s*).*|\1"jdbc:postgresql://localhost:5432/'"$DB_NAME"'"|; -e '49s|url = .*|url = "jdbc:postgresql://localhost:5432/'"$DB_NAME"'"|' \
s|(user\s*=\s*).*|\1"'"$DB_USER"'"|; -e '52s|user = .*|user = "'"$DB_USER"'"|' \
s|(password\s*=\s*).*|\1"'"$DB_PASS"'"|; -e '55s|password = .*|password = "'"$DB_PASS"'"|' \
} -e '827s|url = .*|url = "jdbc:postgresql://localhost:5432/'"$DB_NAME"'"|' \
}' "$SERVER_CONF" -e '828s|user = .*|user = "'"$DB_USER"'"|' \
sed -i -E '/postgresql\s*\{/,/\}/ { -e '829s|password = .*|password = "'"$DB_PASS"'"|' \
/jdbc\s*\{/,/\}/ { /usr/share/docspell-joex/conf/docspell-joex.conf
s|(url\s*=\s*).*|\1"jdbc:postgresql://localhost:5432/'"$DB_NAME"'"|;
s|(user\s*=\s*).*|\1"'"$DB_USER"'"|; sed -i \
s|(password\s*=\s*).*|\1"'"$DB_PASS"'"|; -e '16s|http://localhost:7880|http://'"$LOCAL_IP"':7880|' \
} -e '22s|http://localhost:7880|http://'"$LOCAL_IP"':7880|' \
}' "$SERVER_CONF" -e '356s|url = .*|url = "jdbc:postgresql://localhost:5432/'"$DB_NAME"'"|' \
sed -i -E '/jdbc\s*\{/,/\}/ { -e '357s|user = .*|user = "'"$DB_USER"'"|' \
s|(url\s*=\s*).*|\1"jdbc:postgresql://localhost:5432/'"$DB_NAME"'"|; -e '358s|password = .*|password = "'"$DB_PASS"'"|' \
s|(user\s*=\s*).*|\1"'"$DB_USER"'"|; -e '401s|url = .*|url = "jdbc:postgresql://localhost:5432/'"$DB_NAME"'"|' \
s|(password\s*=\s*).*|\1"'"$DB_PASS"'"|; /usr/share/docspell-restserver/conf/docspell-server.conf
}' "$JOEX_CONF"
# sed -i 's|address = "localhost"|address = "0.0.0.0"|' "$JOEX_CONF" "$SERVER_CONF"
# sed -i -E '/backend\s*\{/,/\}/ {
# /jdbc\s*\{/,/\}/ {
# s|(url\s*=\s*).*|\1"jdbc:postgresql://localhost:5432/'"$DB_NAME"'"|;
# s|(user\s*=\s*).*|\1"'"$DB_USER"'"|;
# s|(password\s*=\s*).*|\1"'"$DB_PASS"'"|;
# }
# }' "$SERVER_CONF"
# sed -i -E '/postgresql\s*\{/,/\}/ {
# /jdbc\s*\{/,/\}/ {
# s|(url\s*=\s*).*|\1"jdbc:postgresql://localhost:5432/'"$DB_NAME"'"|;
# s|(user\s*=\s*).*|\1"'"$DB_USER"'"|;
# s|(password\s*=\s*).*|\1"'"$DB_PASS"'"|;
# }
# }' "$SERVER_CONF"
# sed -i -E '/jdbc\s*\{/,/\}/ {
# s|(url\s*=\s*).*|\1"jdbc:postgresql://localhost:5432/'"$DB_NAME"'"|;
# s|(user\s*=\s*).*|\1"'"$DB_USER"'"|;
# s|(password\s*=\s*).*|\1"'"$DB_PASS"'"|;
# }' "$JOEX_CONF"
msg_ok "Setup Docspell" msg_ok "Setup Docspell"
msg_info "Setup Apache Solr" msg_info "Setup Apache Solr"
cd /opt/docspell cd /opt/docspell
SOLR_DOWNLOAD_URL="https://downloads.apache.org/lucene/solr/" SOLR_DOWNLOAD_URL="https://downloads.apache.org/lucene/solr/"
latest_version=$(curl -s "$SOLR_DOWNLOAD_URL" | grep -oP '(?<=<a href=")[^"]+(?=/">[0-9])' | head -n 1) latest_version=$(curl -fsSL "$SOLR_DOWNLOAD_URL" | grep -oP '(?<=<a href=")[^"]+(?=/">[0-9])' | head -n 1)
download_url="${SOLR_DOWNLOAD_URL}${latest_version}/solr-${latest_version}.tgz" download_url="${SOLR_DOWNLOAD_URL}${latest_version}/solr-${latest_version}.tgz"
curl -fsSL "$download_url" curl -fsSL "$download_url" -o "solr-$latest_version.tgz"
tar xzf "solr-$latest_version.tgz" tar xzf "solr-$latest_version.tgz"
$STD bash "/opt/docspell/solr-$latest_version/bin/install_solr_service.sh" "solr-$latest_version.tgz" $STD bash "/opt/docspell/solr-$latest_version/bin/install_solr_service.sh" "solr-$latest_version.tgz"
mv /opt/solr /opt/docspell/solr mv /opt/solr /opt/docspell/solr

View File

@ -0,0 +1,126 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (Canbiz)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/documenso/documenso
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Setup Functions"
setup_local_ip_helper
import_local_ip
msg_ok "Setup Functions"
msg_info "Installing Dependencies"
$STD apt-get install -y \
gpg \
libc6 \
make \
cmake \
jq \
postgresql \
python3 \
python3-bcrypt
msg_ok "Installed Dependencies"
msg_info "Setting up Node.js Repository"
mkdir -p /etc/apt/keyrings
curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg
echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_22.x nodistro main" >/etc/apt/sources.list.d/nodesource.list
msg_ok "Set up Node.js Repository"
msg_info "Installing Node.js"
$STD apt-get update
$STD apt-get install -y nodejs
$STD npm install -g turbo@1.9.3
msg_ok "Installed Node.js"
msg_info "Setting up PostgreSQL"
DB_NAME="documenso_db"
DB_USER="documenso_user"
DB_PASS="$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | cut -c1-13)"
$STD sudo -u postgres psql -c "CREATE ROLE $DB_USER WITH LOGIN PASSWORD '$DB_PASS';"
$STD sudo -u postgres psql -c "CREATE DATABASE $DB_NAME WITH OWNER $DB_USER ENCODING 'UTF8' TEMPLATE template0;"
$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET client_encoding TO 'utf8';"
$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET default_transaction_isolation TO 'read committed';"
$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET timezone TO 'UTC'"
{
echo "Documenso-Credentials"
echo "Database Name: $DB_NAME"
echo "Database User: $DB_USER"
echo "Database Password: $DB_PASS"
} >>~/documenso.creds
msg_ok "Set up PostgreSQL"
msg_info "Installing Documenso (Patience)"
cd /opt
RELEASE=$(curl -fsSL https://api.github.com/repos/documenso/documenso/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }')
wget -q "https://github.com/documenso/documenso/archive/refs/tags/v${RELEASE}.zip"
unzip -q v${RELEASE}.zip
mv documenso-${RELEASE} /opt/documenso
cd /opt/documenso
mv .env.example /opt/documenso/.env
sed -i \
-e "s|^NEXTAUTH_SECRET=.*|NEXTAUTH_SECRET='$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | cut -c1-32)'|" \
-e "s|^NEXT_PRIVATE_ENCRYPTION_KEY=.*|NEXT_PRIVATE_ENCRYPTION_KEY='$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | cut -c1-32)'|" \
-e "s|^NEXT_PRIVATE_ENCRYPTION_SECONDARY_KEY=.*|NEXT_PRIVATE_ENCRYPTION_SECONDARY_KEY='$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | cut -c1-32)'|" \
-e "s|^DOCUMENSO_ENCRYPTION_KEY=.*|DOCUMENSO_ENCRYPTION_KEY='$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | cut -c1-32)'|" \
-e "s|^DOCUMENSO_ENCRYPTION_SECONDARY_KEY=.*|DOCUMENSO_ENCRYPTION_SECONDARY_KEY='$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | cut -c1-32)'|" \
-e "s|^NEXTAUTH_URL=.*|NEXTAUTH_URL=\"http://${LOCAL_IP}:3000\"|" \
-e "s|^NEXT_PUBLIC_WEBAPP_URL=.*|NEXT_PUBLIC_WEBAPP_URL='http://${LOCAL_IP}:9000'|" \
-e "s|^NEXT_PUBLIC_MARKETING_URL=.*|NEXT_PUBLIC_MARKETING_URL=\"http://${LOCAL_IP}:3001\"|" \
-e "s|^NEXT_PRIVATE_INTERNAL_WEBAPP_URL=.*|NEXT_PRIVATE_INTERNAL_WEBAPP_URL=\"http://${LOCAL_IP}:3000\"|" \
-e "s|^NEXT_PRIVATE_DATABASE_URL=.*|NEXT_PRIVATE_DATABASE_URL=\"postgres://$DB_USER:$DB_PASS@localhost:5432/$DB_NAME\"|" \
-e "s|^NEXT_PRIVATE_DIRECT_DATABASE_URL=.*|NEXT_PRIVATE_DIRECT_DATABASE_URL=\"postgres://$DB_USER:$DB_PASS@localhost:5432/$DB_NAME\"|" \
/opt/documenso/.env
export TURBO_CACHE=1
export NEXT_TELEMETRY_DISABLED=1
export CYPRESS_INSTALL_BINARY=0
export NODE_OPTIONS="--max-old-space-size=4096"
# $STD npm ci --cache ~/.npm-cache --maxsockets=5
# $STD npm run build
# $STD npx prisma migrate deploy --schema ./packages/prisma/schema.prisma
$STD npm ci
$STD npm run build:web
$STD npm run prisma:migrate-deploy
echo "${RELEASE}" >"/opt/${APPLICATION}_version.txt"
msg_ok "Installed Documenso"
msg_info "Create User"
PASSWORD_HASH=$(python3 -c "import bcrypt; print(bcrypt.hashpw(b'helper-scripts', bcrypt.gensalt(rounds=12)).decode())")
sudo -u postgres psql -d documenso_db -c "INSERT INTO \"User\" (name, email, \"emailVerified\", password, \"identityProvider\", roles, \"createdAt\", \"lastSignedIn\", \"updatedAt\", \"customerId\") VALUES ('helper-scripts', 'helper-scripts@local.com', '2025-01-20 17:14:45.058', '$PASSWORD_HASH', 'DOCUMENSO', ARRAY['USER', 'ADMIN']::\"Role\"[], '2025-01-20 16:04:05.543', '2025-01-20 16:14:55.249', '2025-01-20 16:14:55.25', NULL) RETURNING id;"
$STD npm run prisma:migrate-deploy
msg_ok "User created"
msg_info "Creating Service"
cat <<EOF >/etc/systemd/system/documenso.service
[Unit]
Description=Documenso Service
After=network.target postgresql.service
[Service]
WorkingDirectory=/opt/documenso/apps/web
ExecStart=/usr/bin/npm start
Restart=always
EnvironmentFile=/opt/documenso/.env
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now documenso
msg_ok "Created Service"
motd_ssh
customize
msg_info "Cleaning up"
$STD apt-get -y autoremove
$STD apt-get -y autoclean
msg_ok "Cleaned"

View File

@ -0,0 +1,161 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2024 tteck
# Author: tteck (tteckster)
# License: MIT
# https://github.com/tteck/Proxmox/raw/main/LICENSE
source /dev/stdin <<< "$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Python3"
$STD apt-get install -y --no-install-recommends \
python3 \
python3-dev \
python3-setuptools \
python3-venv
msg_ok "Installed Python3"
msg_info "Installing Dependencies (Patience)"
$STD apt-get install -y --no-install-recommends \
redis \
postgresql \
postgresql-contrib \
postgresql-client \
build-essential \
gnupg \
ffmpeg \
libjpeg-dev \
libpq-dev \
libmagic-dev \
libzbar0 \
poppler-utils \
automake \
libtool \
pkg-config \
curl \
libtiff-dev \
libpng-dev \
libleptonica-dev \
sudo \
make \
mc
msg_ok "Installed Dependencies"
msg_info "Setup Funkwhale Dependencies (Patience)"
export FUNKWHALE_VERSION=1.4.0
$STD sudo apt install -y --no-install-recommends $(curl https://dev.funkwhale.audio/funkwhale/funkwhale/-/raw/$FUNKWHALE_VERSION/deploy/requirements.apt)
$STD sudo useradd --system --shell /bin/bash --create-home --home-dir /opt/funkwhale funkwhale
cd /opt/funkwhale
$STD sudo mkdir -p config api data/static data/media data/music front
$STD sudo chown -R funkwhale:funkwhale data
msg_ok "Initial Setup complete"
msg_ok "Download Funkwhale API"
$STD sudo curl -L -o "api-$FUNKWHALE_VERSION.zip" "https://dev.funkwhale.audio/funkwhale/funkwhale/-/jobs/artifacts/$FUNKWHALE_VERSION/download?job=build_api"
$STD sudo unzip -q "api-$FUNKWHALE_VERSION.zip" -d extracted
$STD sudo mv extracted/api/* api/
$STD sudo rm -rf extracted api-$FUNKWHALE_VERSION.zip
msg_ok "Downloaded and extracted Funkwhale API"
msg_info "Download Funkwhale Frontend"
$STD sudo curl -L -o "front-$FUNKWHALE_VERSION.zip" "https://dev.funkwhale.audio/funkwhale/funkwhale/-/jobs/artifacts/$FUNKWHALE_VERSION/download?job=build_front"
$STD sudo unzip -q "front-$FUNKWHALE_VERSION.zip" -d extracted
$STD sudo mv extracted/front .
$STD sudo rm -rf extracted front-$FUNKWHALE_VERSION.zip
msg_ok "Downloaded and extracted Funkwhale Frontend"
msg_info "Install Funkwhale API and DJANGO"
cd /opt/funkwhale
$STD sudo python3 -m venv venv
$STD sudo venv/bin/pip install --upgrade pip wheel
$STD sudo venv/bin/pip install --editable ./api
$STD sudo curl -L -o /opt/funkwhale/config/.env "https://dev.funkwhale.audio/funkwhale/funkwhale/raw/$FUNKWHALE_VERSION/deploy/env.prod.sample"
secret_key=$(openssl rand -base64 45 | sed 's/\//\\\//g')
sudo sed -i "s/DJANGO_SECRET_KEY=.*/DJANGO_SECRET_KEY=$secret_key/" /opt/funkwhale/config/.env
sudo sed -i 's/# CACHE_URL=redis:\/\/127.0.0.1:6379\/0/CACHE_URL=redis:\/\/127.0.0.1:6379\/0/' /opt/funkwhale/config/.env #Remove #Hashtag From Config for Debian
sudo sed -i 's/# DATABASE_URL=postgresql:\/\/funkwhale@:5432\/funkwhale/DATABASE_URL=postgresql:\/\/funkwhale@:5432\/funkwhale/' /opt/funkwhale/config/.env #Remove #Hashtag From Config for Debian
# set the paths to /opt instead of /srv
sudo sed -i 's/MEDIA_ROOT=\/srv\/funkwhale\/data\/media/MEDIA_ROOT=\/opt\/funkwhale\/data\/media/' /opt/funkwhale/config/.env
sudo sed -i 's/STATIC_ROOT=\/srv\/funkwhale\/data\/static/STATIC_ROOT=\/opt\/funkwhale\/data\/static/' /opt/funkwhale/config/.env
sudo sed -i 's/MUSIC_DIRECTORY_PATH=\/srv\/funkwhale\/data\/music/MUSIC_DIRECTORY_PATH=\/opt\/funkwhale\/data\/music/' /opt/funkwhale/config/.env
sudo sed -i 's/MUSIC_DIRECTORY_SERVE_PATH=\/srv\/funkwhale\/data\/music/MUSIC_DIRECTORY_SERVE_PATH=\/opt\/funkwhale\/data\/music/' /opt/funkwhale/config/.env
sudo sed -i 's/FUNKWHALE_FRONTEND_PATH=\/srv\/funkwhale\/front\/dist/FUNKWHALE_FRONTEND_PATH=\/opt\/funkwhale\/front\/dist/' /opt/funkwhale/config/.env
sudo chown funkwhale:funkwhale /opt/funkwhale/config/.env
sudo chmod 600 /opt/funkwhale/config/.env
msg_ok "Environment successfully set up"
msg_info "Setting up Database"
DB_NAME=funkwhale
DB_USER=funkwhale
DB_EXTENSION_UNACCENT=unaccent
DB_EXTENSION_CITEXT=citext
DB_PASS="$(openssl rand -base64 18 | cut -c1-13)"
SECRET_KEY="$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32)"
$STD sudo -u postgres psql -c "CREATE ROLE $DB_USER WITH LOGIN PASSWORD '$DB_PASS';"
$STD sudo -u postgres psql -c "CREATE DATABASE $DB_NAME WITH OWNER $DB_USER TEMPLATE template0;"
$STD sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE $DB_NAME TO $DB_USER;"
echo "" >>~/funkwhale.creds
echo -e "Funkwhale Database User: \e[32m$DB_USER\e[0m" >>~/funkwhale.creds
echo -e "Funkwhale Database Password: \e[32m$DB_PASS\e[0m" >>~/funkwhale.creds
echo -e "Funkwhale Database Name: \e[32m$DB_NAME\e[0m" >>~/funkwhale.creds
$STD sudo -u postgres psql -c "CREATE EXTENSION $DB_EXTENSION_UNACCENT;"
$STD sudo -u postgres psql -c "CREATE EXTENSION $DB_EXTENSION_CITEXT;"
cd /opt/funkwhale
$STD sudo -u funkwhale venv/bin/funkwhale-manage migrate
msg_ok "Set up PostgreSQL database"
msg_info "Setting up Funkwhale and systemd"
FUNKWHALE_USER=funkwhale_su
FUNKWHALE_MAIL=mail@example.com
FUNKWHALE_PASS="$(openssl rand -base64 18 | cut -c1-13)"
echo -e "Funkwhale Superuser: \e[32m$FUNKWHALE_USER\e[0m" >>~/funkwhale.creds
echo -e "Funkwhale Mail: \e[32m$FUNKWHALE_MAIL\e[0m" >>~/funkwhale.creds
echo -e "Funkwhale Superuser Password: \e[32m$FUNKWHALE_PASS\e[0m" >>~/funkwhale.creds
$STD sudo -u funkwhale venv/bin/funkwhale-manage fw users create --superuser --username $FUNKWHALE_USER --email $FUNKWHALE_MAIL --password $FUNKWHALE_PASS
$STD sudo venv/bin/funkwhale-manage collectstatic
$STD sudo curl -L -o "/etc/systemd/system/funkwhale.target" "https://dev.funkwhale.audio/funkwhale/funkwhale/raw/$FUNKWHALE_VERSION/deploy/funkwhale.target"
$STD sudo curl -L -o "/etc/systemd/system/funkwhale-server.service" "https://dev.funkwhale.audio/funkwhale/funkwhale/raw/$FUNKWHALE_VERSION/deploy/funkwhale-server.service"
$STD sudo curl -L -o "/etc/systemd/system/funkwhale-worker.service" "https://dev.funkwhale.audio/funkwhale/funkwhale/raw/$FUNKWHALE_VERSION/deploy/funkwhale-worker.service"
$STD sudo curl -L -o "/etc/systemd/system/funkwhale-beat.service" "https://dev.funkwhale.audio/funkwhale/funkwhale/raw/$FUNKWHALE_VERSION/deploy/funkwhale-beat.service"
$STD sudo systemctl daemon-reload
$STD sudo systemctl start funkwhale.target
$STD sudo systemctl enable --now funkwhale.target
msg_ok "Funkwhale successfully set up"
read -r -p "Would you like to Setup Reverse Proxy (Nginx)? <y/N> " prompt
if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then
msg_info "Installing NGINX"
$STD apt install -y nginx
sudo su
$STD curl -L -o /etc/nginx/funkwhale_proxy.conf "https://dev.funkwhale.audio/funkwhale/funkwhale/raw/$FUNKWHALE_VERSION/deploy/funkwhale_proxy.conf"
$STD curl -L -o /etc/nginx/sites-available/funkwhale.template "https://dev.funkwhale.audio/funkwhale/funkwhale/raw/$FUNKWHALE_VERSION/deploy/nginx.template"
$STD set -a && source /opt/funkwhale/config/.env && set +a envsubst "`env | awk -F = '{printf \" $%s\", $$1}'`" \
< /etc/nginx/sites-available/funkwhale.template \
> /etc/nginx/sites-available/funkwhale.conf
$STD grep '${' /etc/nginx/sites-available/funkwhale.conf
$STD ln -s /etc/nginx/sites-available/funkwhale.conf /etc/nginx/sites-enabled/
$STD systemctl reload nginx
msg_ok "Installed Nginx"
fi
read -r -p "Would you like to Setup TLS (Certbot)? <y/N> " prompt
if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then
msg_info "Installing Certbot"
$STD apt install -y certbot python3-certbot-nginx
$STD sudo certbot --nginx -d $FUNKWHALE_HOSTNAME
msg_ok "Installed Certbot"
fi
motd_ssh
customize
msg_info "Cleaning up"
$STD apt-get autoremove
$STD apt-get autoclean
msg_ok "Cleaned"

View File

@ -0,0 +1,179 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2024 tteck
# Author: tteck (tteckster)
# Co-Author: jcantosz
# License: MIT
# https://github.com/tteck/Proxmox/raw/main/LICENSE
source /dev/stdin <<< "$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt-get update
$STD apt-get install -y \
curl \
lsb-release \
gpg \
g++ \
git \
make \
openssl \
python3 \
postgresql-15 \
redis
msg_ok "Installed Dependencies"
msg_info "Setting up Node.js Repository"
mkdir -p /etc/apt/keyrings
curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg
echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_20.x nodistro main" >/etc/apt/sources.list.d/nodesource.list
msg_ok "Set up Node.js Repository"
msg_info "Installing Node.js"
$STD apt-get update
$STD apt-get install -y --no-install-suggests nodejs
msg_info "Installed Node.js"
msg_info "Installing Postgresql"
POSTGRES_HOST=localhost
POSTGRES_PORT=5432
POSTGRES_DB=ghostfolio-db
POSTGRES_USER='postgres'
POSTGRES_PASSWORD="$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32)"
ACCESS_TOKEN_SALT="$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32)"
DATABASE_URL="postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}?connect_timeout=300&sslmode=prefer"
JWT_SECRET_KEY="$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32)"
$STD su postgres <<EOSU
psql -c "create database \"$POSTGRES_DB\";"
psql -c "ALTER DATABASE \"$POSTGRES_DB\" OWNER TO \"$POSTGRES_USER\";"
psql -c "GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO \"$POSTGRES_USER\";"
psql -c "ALTER USER \"$POSTGRES_USER\" WITH PASSWORD '$POSTGRES_PASSWORD';"
EOSU
echo "" >~/ghostfolio.creds
echo "Ghostfolio Database Credentials" >>~/ghostfolio.creds
echo "" >>~/ghostfolio.creds
echo -e "Ghostfolio Database User: \e[32m$POSTGRES_USER\e[0m" >>~/ghostfolio.creds
echo -e "Ghostfolio Database Password: \e[32m$POSTGRES_PASSWORD\e[0m" >>~/ghostfolio.creds
echo -e "Ghostfolio Database Name: \e[32m$POSTGRES_DB\e[0m" >>~/ghostfolio.creds
msg_ok "Installed Postgresql"
msg_info "Installing Redis"
REDIS_HOST=localhost
REDIS_PORT=6379
REDIS_PASSWORD="$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32)"
$STD redis-cli CONFIG SET requirepass "$REDIS_PASSWORD"
$STD redis-cli -a "$REDIS_PASSWORD" CONFIG REWRITE
$STD systemctl restart redis
echo "" >>~/ghostfolio.creds
echo "Ghostfolio Redis Credentials" >>~/ghostfolio.creds
echo "" >>~/ghostfolio.creds
echo -e "Ghostfolio Redis Password: \e[32m$REDIS_PASSWORD\e[0m" >>~/ghostfolio.creds
msg_ok "Installed Redis"
msg_info "Installing Ghostfolio (Patience)"
RELEASE=$(curl -sL https://api.github.com/repos/ghostfolio/ghostfolio/releases/latest | grep '"tag_name":' | cut -d'"' -f4)
echo "${RELEASE}" >/opt/${APPLICATION}_version.txt
cd /opt/
$STD curl -Ls -o ghostfolio-$RELEASE.tgz https://github.com/ghostfolio/ghostfolio/archive/refs/tags/$RELEASE.tar.gz
$STD tar xzf ghostfolio-$RELEASE.tgz
$STD rm ghostfolio-$RELEASE.tgz
cp /opt/ghostfolio-$RELEASE/package.json /opt/package.json
cp /opt/ghostfolio-$RELEASE/package-lock.json /opt/package-lock.json
cd /opt/ghostfolio-$RELEASE
$STD npm install
$STD npm run build:production
mv /opt/package-lock.json /opt/ghostfolio-$RELEASE/package-lock.json
cd /opt/ghostfolio-$RELEASE/dist/apps/api/
$STD npm install
cp -r /opt/ghostfolio-$RELEASE/prisma .
mv /opt/package.json /opt/ghostfolio-$RELEASE/dist/apps/api/package.json
$STD npm run database:generate-typings
cd /opt
mv /opt/ghostfolio-$RELEASE/dist/apps /opt/ghostfolio
mv /opt/ghostfolio-$RELEASE/docker/entrypoint.sh /opt/ghostfolio/
rm -rf /opt/ghostfolio-$RELEASE
msg_ok "Installed Ghostfolio"
msg_info "Creating Service"
cat <<EOF >/opt/ghostfolio/api/.env
# CACHE
REDIS_HOST=$REDIS_HOST
REDIS_PORT=$REDIS_PORT
REDIS_PASSWORD=$REDIS_PASSWORD
# POSTGRES
POSTGRES_DB=$POSTGRES_DB
POSTGRES_USER=$POSTGRES_USER
POSTGRES_PASSWORD=$POSTGRES_PASSWORD
# VARIOUS
ACCESS_TOKEN_SALT=$ACCESS_TOKEN_SALT
DATABASE_URL="$DATABASE_URL"
JWT_SECRET_KEY=$JWT_SECRET_KEY
EOF
cat <<EOF >/opt/ghostfolio/start.sh
#!/bin/bash
# Source the environment vars and export them otherwise it wont get them properly
set -a
. /opt/ghostfolio/api/.env
set +a
# Run the docker entrypoint
/opt/ghostfolio/entrypoint.sh
EOF
chmod +x /opt/ghostfolio/start.sh
msg_info "Setup Service"
cat <<EOF >/etc/systemd/system/ghostfolio.service
[Unit]
Description=ghostfolio
[Service]
After=postgresql.service redis.service
Require=postgresql.service redis.service
# Start Service
ExecStart=/opt/ghostfolio/start.sh
WorkingDirectory=/opt/ghostfolio/api/
# Restart service after 10 seconds if node service crashes
RestartSec=10
Restart=always
# Output to syslog
StandardOutput=syslog
StandardError=syslog
SyslogIdentifier=ghostfolio
[Install]
WantedBy=multi-user.target
EOF
systemctl enable ghostfolio
systemctl start ghostfolio
msg_ok "Created Service"
motd_ssh
customize
msg_info "Cleaning up"
$STD apt-get -y autoremove
$STD apt-get -y autoclean
msg_ok "Cleaned"

72
install/hoodik-install.sh Normal file
View File

@ -0,0 +1,72 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2024 community-scripts ORG
# Author: MickLesk (Canbiz)
# License: MIT
# https://github.com/tteck/Proxmox/raw/main/LICENSE
# Source: https://github.com/hudikhq/hoodik
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies (Patience)"
$STD apt-get install -y \
pkg-config \
libssl-dev \
libc6-dev \
libpq-dev \
clang \
llvm \
nettle-dev \
build-essential \
curl \
sudo \
make \
mc
msg_ok "Installed Dependencies"
msg_info "Installing Rust (Patience)"
$STD bash <(curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs) -y
source ~/.cargo/env
msg_ok "Installed Rust"
msg_info "Installing Hoodik (Patience)"
cd /opt
RELEASE=$(curl -s https://api.github.com/repos/hudikhq/hoodik/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }')
wget -q "https://github.com/hudikhq/hoodik/archive/refs/tags/${RELEASE}.zip"
unzip -q ${RELEASE}.zip
mv "hoodik-${RELEASE:1}" hoodik
cd hoodik
cargo build -q --release
msg_ok "Installed hoodik"
msg_info "Creating Service"
cat <<EOF >/etc/systemd/system/hoodik.service
[Unit]
Description=Start Hoodik Service
After=network.target
[Service]
User=root
WorkingDirectory=/opt/hoodik
ExecStart=/root/.cargo/bin/cargo run -q --release
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now hoodik.service
msg_ok "Created Service"
motd_ssh
customize
msg_info "Cleaning up"
rm -rf /opt/${RELEASE}.zip
$STD apt-get autoremove
$STD apt-get autoclean
msg_ok "Cleaned"

158
install/koel-install.sh Normal file
View File

@ -0,0 +1,158 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2024 communtiy-scripts ORG
# Author: MickLesk (Canbiz)
# License: MIT
# https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
source /dev/stdin <<< "$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies (Patience)"
$STD apt-get install -y \
postgresql \
nginx \
apt-transport-https \
gnupg2 \
lsb-release \
ffmpeg \
curl \
unzip \
sudo \
mc \
cron \
libapache2-mod-xsendfile \
libzip-dev \
locales \
libpng-dev \
libjpeg62-turbo-dev \
libpq-dev \
libwebp-dev \
libapache2-mod-php \
composer
msg_ok "Installed Dependencies"
msg_info "Setting up PSql Database"
DB_NAME=koel_db
DB_USER=koel
DB_PASS="$(openssl rand -base64 18 | cut -c1-13)"
$STD sudo -u postgres psql -c "CREATE ROLE $DB_USER WITH LOGIN PASSWORD '$DB_PASS';"
$STD sudo -u postgres psql -c "CREATE DATABASE $DB_NAME WITH OWNER $DB_USER ENCODING 'UTF8' TEMPLATE template0;"
$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET client_encoding TO 'utf8';"
$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET default_transaction_isolation TO 'read committed';"
$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET timezone TO 'UTC'"
{
echo "Koel-Credentials"
echo "Koel Database User: $DB_USER"
echo "Koel Database Password: $DB_PASS"
echo "Koel Database Name: $DB_NAME"
} >> ~/koel.creds
msg_ok "Set up PostgreSQL database"
msg_info "Setting up Node.js/Yarn"
mkdir -p /etc/apt/keyrings
$STD curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg
$STD echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_20.x nodistro main" >/etc/apt/sources.list.d/nodesource.list
$STD apt-get update
$STD apt-get install -y nodejs
$STD npm install -g npm@latest
$STD npm install -g yarn
msg_ok "Installed Node.js/Yarn"
msg_info "Setting up PHP"
$STD curl -sSLo /usr/share/keyrings/deb.sury.org-php.gpg https://packages.sury.org/php/apt.gpg
$STD sh -c 'echo "deb [signed-by=/usr/share/keyrings/deb.sury.org-php.gpg] https://packages.sury.org/php/ $(lsb_release -sc) main" > /etc/apt/sources.list.d/php.list'
$STD apt update
$STD apt install -y php8.3 php8.3-{bcmath,exif,bz2,cli,common,curl,fpm,gd,intl,sqlite3,mbstring,xml,zip,pgsql}
msg_ok "PHP successfully setup"
msg_info "Installing Koel(Patience)"
RELEASE=$(wget -q https://github.com/koel/koel/releases/latest -O - | grep "title>Release" | cut -d " " -f 4)
cd /opt
mkdir -p /opt/koel_{media,sync}
wget -q https://github.com/koel/koel/releases/download/${RELEASE}/koel-${RELEASE}.zip
unzip -q koel-${RELEASE}.zip
chown -R :www-data /opt/*
chmod -R g+r /opt/*
chmod -R g+rw /opt/*
chown -R www-data:www-data /opt/*
chmod -R 755 /opt/*
cd /opt/koel
echo "export COMPOSER_ALLOW_SUPERUSER=1" >> ~/.bashrc
source ~/.bashrc
$STD composer update --no-interaction
$STD composer install --no-interaction
sudo sed -i -e "s/DB_CONNECTION=.*/DB_CONNECTION=pgsql/" \
-e "s/DB_HOST=.*/DB_HOST=localhost/" \
-e "s/DB_DATABASE=.*/DB_DATABASE=$DB_NAME/" \
-e "s/DB_PORT=.*/DB_PORT=5432/" \
-e "s/DB_USERNAME=.*/DB_USERNAME=$DB_USER/" \
-e "s|DB_PASSWORD=.*|DB_PASSWORD=$DB_PASS|" \
-e "s|MEDIA_PATH=.*|MEDIA_PATH=/opt/koel_media|" \
-e "s|FFMPEG_PATH=/usr/local/bin/ffmpeg|FFMPEG_PATH=/usr/bin/ffmpeg|" /opt/koel/.env
sed -i -e "s/^upload_max_filesize = .*/upload_max_filesize = 200M/" \
-e "s/^post_max_size = .*/post_max_size = 200M/" \
-e "s/^memory_limit = .*/memory_limit = 200M/" /etc/php/8.3/fpm/php.ini
msg_ok "Installed Koel"
msg_info "Set up web services"
cat <<EOF >/etc/nginx/sites-available/koel
server {
listen 6767;
server_name koel.local;
root /opt/koel/public;
index index.php;
gzip on;
gzip_types text/plain text/css application/x-javascript text/xml application/xml application/xml+rss text/javascript application/json;
gzip_comp_level 9;
send_timeout 3600;
client_max_body_size 200M;
location / {
try_files \$uri \$uri/ /index.php?\$args;
}
location /media/ {
internal;
alias /opt/koel_media;
}
location ~ \.php$ {
try_files \$uri =404;
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_pass unix:/run/php/php8.3-fpm.sock;
fastcgi_index index.php;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME \$document_root\$fastcgi_script_name;
fastcgi_param PATH_INFO \$fastcgi_path_info;
}
}
EOF
ln -s /etc/nginx/sites-available/koel /etc/nginx/sites-enabled/koel
systemctl restart php8.3-fpm
systemctl reload nginx
msg_ok "Created Services"
msg_info "Adding Cronjob (Daily Midnight)"
cat <<EOF >/opt/koel_sync/koel_sync.cron
0 0 * * * cd /opt/koel/ && /usr/bin/php artisan koel:sync >/opt/koel_sync/koel_sync.log 2>&1
EOF
crontab /opt/koel_sync/koel_sync.cron
msg_ok "Cronjob successfully added"
motd_ssh
customize
msg_info "Cleaning up"
rm -rf /opt/koel-${RELEASE}.zip
$STD apt-get autoremove
$STD apt-get autoclean
msg_ok "Cleaned"

View File

@ -0,0 +1,57 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2023 tteck
# Author: tteck (tteckster)
# License: MIT
# https://github.com/tteck/Proxmox/raw/main/LICENSE
source /dev/stdin <<< "$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt-get install -y ansible git apache2
msg_ok "Installed Dependencies"
RELEASE=$(curl -sX GET "https://api.github.com/repos/netbootxyz/netboot.xyz/releases/latest" | awk '/tag_name/{print $4;exit}' FS='[""]')
msg_info "Installing netboot.xyz ${RELEASE}"
$STD curl --silent -o ${RELEASE}.tar.gz -L "https://github.com/netbootxyz/netboot.xyz/archive/${RELEASE}.tar.gz"
$STD tar xvzf ${RELEASE}.tar.gz
VER=$(curl -s https://api.github.com/repos/netbootxyz/netboot.xyz/releases/latest |
grep "tag_name" |
awk '{print substr($2, 2, length($2)-3) }')
rm -rf ${RELEASE}.tar.gz
mv netboot.xyz-${VER} /opt/netboot.xyz
msg_ok "Installed netboot.xyz ${RELEASE}"
msg_info "Creating Service"
service_path="/etc/systemd/system/netbootxyz.service"
echo "[Unit]
Description=netboot.xyz
After=network.target
[Service]
Restart=always
RestartSec=5
Type=simple
User=root
WorkingDirectory=/opt/netboot.xyz
ExecStart="ansible-playbook" -i inventory site.yml
TimeoutStopSec=30
[Install]
WantedBy=multi-user.target" >$service_path
$STD sudo systemctl enable --now netbootxyz.service
msg_ok "Created Service"
motd_ssh
customize
msg_info "Cleaning up"
$STD apt-get autoremove
$STD apt-get autoclean
msg_ok "Cleaned"

View File

@ -0,0 +1,187 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2025 tteck
# Author: tteck (tteckster)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://nginxproxymanager.com/
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt-get update
$STD apt-get -y install \
sudo \
mc \
curl \
gnupg \
make \
gcc \
g++ \
ca-certificates \
apache2-utils \
logrotate \
build-essential \
git
msg_ok "Installed Dependencies"
msg_info "Installing Python3"
$STD apt-get install -y \
python3 \
python3-dev \
python3-pip \
python3-venv \
python3-cffi \
python3-certbot \
python3-certbot-dns-cloudflare
$STD pip3 install certbot-dns-multi
$STD python3 -m venv /opt/certbot/
rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED
msg_ok "Installed Python3"
msg_info "Installing Openresty"
VERSION="$(awk -F'=' '/^VERSION_CODENAME=/{ print $NF }' /etc/os-release)"
wget -qO - https://openresty.org/package/pubkey.gpg | gpg --dearmor -o /etc/apt/trusted.gpg.d/openresty.gpg
echo -e "deb http://openresty.org/package/debian $VERSION openresty" >/etc/apt/sources.list.d/openresty.list
$STD apt-get update
$STD apt-get -y install openresty
msg_ok "Installed Openresty"
msg_info "Installing Node.js"
$STD bash <(curl -fsSL https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh)
source ~/.bashrc
$STD nvm install 16.20.2
ln -sf /root/.nvm/versions/node/v16.20.2/bin/node /usr/bin/node
msg_ok "Installed Node.js"
msg_info "Installing pnpm"
$STD npm install -g pnpm@8.15
msg_ok "Installed pnpm"
msg_info "Setup Nginx Proxy Manager"
RELEASE=$(curl -s https://api.github.com/repos/NginxProxyManager/nginx-proxy-manager/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }')
wget -q https://codeload.github.com/NginxProxyManager/nginx-proxy-manager/tar.gz/v${RELEASE} -O - | tar -xz
cd ./nginx-proxy-manager-${RELEASE}
ln -sf /usr/bin/python3 /usr/bin/python
ln -sf /usr/bin/certbot /opt/certbot/bin/certbot
ln -sf /usr/local/openresty/nginx/sbin/nginx /usr/sbin/nginx
ln -sf /usr/local/openresty/nginx/ /etc/nginx
sed -i "s|\"version\": \"0.0.0\"|\"version\": \"$RELEASE\"|" backend/package.json
sed -i "s|\"version\": \"0.0.0\"|\"version\": \"$RELEASE\"|" frontend/package.json
sed -i 's+^daemon+#daemon+g' docker/rootfs/etc/nginx/nginx.conf
NGINX_CONFS=$(find "$(pwd)" -type f -name "*.conf")
for NGINX_CONF in $NGINX_CONFS; do
sed -i 's+include conf.d+include /etc/nginx/conf.d+g' "$NGINX_CONF"
done
mkdir -p /var/www/html /etc/nginx/logs
cp -r docker/rootfs/var/www/html/* /var/www/html/
cp -r docker/rootfs/etc/nginx/* /etc/nginx/
cp docker/rootfs/etc/letsencrypt.ini /etc/letsencrypt.ini
cp docker/rootfs/etc/logrotate.d/nginx-proxy-manager /etc/logrotate.d/nginx-proxy-manager
ln -sf /etc/nginx/nginx.conf /etc/nginx/conf/nginx.conf
rm -f /etc/nginx/conf.d/dev.conf
mkdir -p /tmp/nginx/body \
/run/nginx \
/data/nginx \
/data/custom_ssl \
/data/logs \
/data/access \
/data/nginx/default_host \
/data/nginx/default_www \
/data/nginx/proxy_host \
/data/nginx/redirection_host \
/data/nginx/stream \
/data/nginx/dead_host \
/data/nginx/temp \
/var/lib/nginx/cache/public \
/var/lib/nginx/cache/private \
/var/cache/nginx/proxy_temp
chmod -R 777 /var/cache/nginx
chown root /tmp/nginx
echo resolver "$(awk 'BEGIN{ORS=" "} $1=="nameserver" {print ($2 ~ ":")? "["$2"]": $2}' /etc/resolv.conf);" >/etc/nginx/conf.d/include/resolvers.conf
if [ ! -f /data/nginx/dummycert.pem ] || [ ! -f /data/nginx/dummykey.pem ]; then
openssl req -new -newkey rsa:2048 -days 3650 -nodes -x509 -subj "/O=Nginx Proxy Manager/OU=Dummy Certificate/CN=localhost" -keyout /data/nginx/dummykey.pem -out /data/nginx/dummycert.pem &>/dev/null
fi
mkdir -p /app/global /app/frontend/images
cp -r backend/* /app
cp -r global/* /app/global
msg_ok "Setup Nginx Proxy Manager"
msg_info "Building Frontend"
cd ./frontend
$STD pnpm install
$STD pnpm upgrade
$STD pnpm run build
cp -r dist/* /app/frontend
cp -r app-images/* /app/frontend/images
msg_ok "Built Frontend"
msg_info "Initializing Backend"
rm -rf /app/config/default.json
if [ ! -f /app/config/production.json ]; then
cat <<'EOF' >/app/config/production.json
{
"database": {
"engine": "knex-native",
"knex": {
"client": "sqlite3",
"connection": {
"filename": "/data/database.sqlite"
}
}
}
}
EOF
fi
cd /app
$STD pnpm install
msg_ok "Initialized Backend"
msg_info "Creating Service"
cat <<'EOF' >/lib/systemd/system/npm.service
[Unit]
Description=Nginx Proxy Manager
After=network.target
Wants=openresty.service
[Service]
Type=simple
Environment=NODE_ENV=production
ExecStartPre=-mkdir -p /tmp/nginx/body /data/letsencrypt-acme-challenge
ExecStart=/usr/bin/node index.js --abort_on_uncaught_exception --max_old_space_size=250
WorkingDirectory=/app
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
msg_ok "Created Service"
motd_ssh
customize
msg_info "Starting Services"
sed -i 's/user npm/user root/g; s/^pid/#pid/g' /usr/local/openresty/nginx/conf/nginx.conf
sed -r -i 's/^([[:space:]]*)su npm npm/\1#su npm npm/g;' /etc/logrotate.d/nginx-proxy-manager
sed -i 's/include-system-site-packages = false/include-system-site-packages = true/g' /opt/certbot/pyvenv.cfg
systemctl enable -q --now openresty
systemctl enable -q --now npm
msg_ok "Started Services"
msg_info "Cleaning up"
rm -rf ../nginx-proxy-manager-*
systemctl restart openresty
$STD apt-get -y autoremove
$STD apt-get -y autoclean
msg_ok "Cleaned"

View File

@ -1,60 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2025 community-scripts ORG
# Author: emoscardini
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/openziti/ziti
source /dev/stdin <<< "$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt-get install -y gpg
msg_ok "Installed Dependencies"
msg_info "Installing openziti"
mkdir -p --mode=0755 /usr/share/keyrings
curl -sSLf https://get.openziti.io/tun/package-repos.gpg | gpg --dearmor -o /usr/share/keyrings/openziti.gpg
echo "deb [signed-by=/usr/share/keyrings/openziti.gpg] https://packages.openziti.org/zitipax-openziti-deb-stable debian main" >/etc/apt/sources.list.d/openziti.list
$STD apt-get update
$STD apt-get install -y openziti-controller openziti-console
msg_ok "Installed openziti"
read -r -p "Would you like to go through the auto configuration now? <y/N>" prompt
if [[ ${prompt,,} =~ ^(y|yes)$ ]]; then
IPADDRESS=$(hostname -I | awk '{print $1}')
GEN_FQDN="controller.${IPADDRESS}.sslip.io"
read -r -p "Please enter the controller FQDN [${GEN_FQDN}]: " ZITI_CTRL_ADVERTISED_ADDRESS
ZITI_CTRL_ADVERTISED_ADDRESS=${ZITI_CTRL_ADVERTISED_ADDRESS:-$GEN_FQDN}
read -r -p "Please enter the controller port [1280]: " ZITI_CTRL_ADVERTISED_PORT
ZITI_CTRL_ADVERTISED_PORT=${ZITI_CTRL_ADVERTISED_PORT:-1280}
read -r -p "Please enter the controller admin user [admin]: " ZITI_USER
ZITI_USER=${ZITI_USER:-admin}
GEN_PWD=$(head -c128 /dev/urandom | LC_ALL=C tr -dc 'A-Za-z0-9!@#$%^*_+~' | cut -c 1-12)
read -r -p "Please enter the controller admin password [${GEN_PWD}]:" ZITI_PWD
ZITI_PWD=${ZITI_PWD:-$GEN_PWD}
CONFIG_FILE="/opt/openziti/etc/controller/bootstrap.env"
sed -i "s|^ZITI_CTRL_ADVERTISED_ADDRESS=.*|ZITI_CTRL_ADVERTISED_ADDRESS='${ZITI_CTRL_ADVERTISED_ADDRESS}'|" "$CONFIG_FILE"
sed -i "s|^ZITI_CTRL_ADVERTISED_PORT=.*|ZITI_CTRL_ADVERTISED_PORT='${ZITI_CTRL_ADVERTISED_PORT}'|" "$CONFIG_FILE"
sed -i "s|^ZITI_USER=.*|ZITI_USER='${ZITI_USER}'|" "$CONFIG_FILE"
sed -i "s|^ZITI_PWD=.*|ZITI_PWD='${ZITI_PWD}'|" "$CONFIG_FILE"
env VERBOSE=0 bash /opt/openziti/etc/controller/bootstrap.bash
msg_ok "Configuration Completed"
systemctl enable -q --now ziti-controller
else
systemctl enable -q ziti-controller
msg_error "Configration not provided; Please run /opt/openziti/etc/controller/bootstrap.bash to configure the controller and restart the container"
fi
motd_ssh
customize
msg_info "Cleaning up"
$STD apt-get -y autoremove
$STD apt-get -y autoclean
msg_ok "Cleaned"

191
install/pixelfed-install.sh Normal file
View File

@ -0,0 +1,191 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2024 tteck
# Author: tteck
# Co-Author: MickLesk (Canbiz)
# License: MIT
# https://github.com/tteck/Proxmox/raw/main/LICENSE
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies (Patience)"
$STD apt-get install -y \
build-essential \
gpg \
curl \
sudo \
git \
gnupg2 \
ca-certificates \
lsb-release \
php8.3-{fpm,bcmath,ctype,curl,exif,gd,iconv,intl,mbstring,redis,tokenizer,xml,zip,pgsql,pdo-pgsql,bz2,sqlite3} \
composer \
redis \
ffmpeg \
jpegoptim \
optipng \
pngquant \
make \
mc
msg_ok "Installed Dependencies"
msg_info "Configure Redis Socket"
REDIS_PASS="$(openssl rand -base64 18 | cut -c1-13)"
sed -i 's/^port .*/port 0/' /etc/redis/redis.conf
sed -i "s/^# requirepass foobared/requirepass $REDIS_PASS/" /etc/redis/redis.conf
sed -i 's|^# unixsocket .*|unixsocket /run/redis/redis.sock|' /etc/redis/redis.conf
sed -i 's/^# unixsocketperm .*/unixsocketperm 770/' /etc/redis/redis.conf
systemctl restart redis
msg_ok "Redis Socket configured"
msg_info "Add pixelfed user"
useradd -rU -s /bin/bash pixelfed
msg_ok "Pixelfed User Added"
msg_info "Configure PHP-FPM for Pixelfed"
cp /etc/php/8.3/fpm/pool.d/www.conf /etc/php/8.3/fpm/pool.d/pixelfed.conf
sed -i 's/\[www\]/\[pixelfed\]/' /etc/php/8.3/fpm/pool.d/pixelfed.conf
sed -i 's/^user = www-data/user = pixelfed/' /etc/php/8.3/fpm/pool.d/pixelfed.conf
sed -i 's/^group = www-data/group = pixelfed/' /etc/php/8.3/fpm/pool.d/pixelfed.conf
sed -i 's|^listen = .*|listen = /run/php-fpm/pixelfed.sock|' /etc/php/8.3/fpm/pool.d/pixelfed.conf
systemctl restart php8.3-fpm
msg_ok "successfully configured PHP-FPM"
msg_info "Setup Postgres Database"
DB_NAME=pixelfed_db
DB_USER=pixelfed_user
DB_PASS="$(openssl rand -base64 18 | cut -c1-13)"
curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor -o /etc/apt/trusted.gpg.d/postgresql.gpg
echo "deb https://apt.postgresql.org/pub/repos/apt bookworm-pgdg main" >/etc/apt/sources.list.d/pgdg.list
apt-get update
apt-get install -y postgresql-17
sudo -u postgres psql -c "CREATE ROLE $DB_USER WITH LOGIN PASSWORD '$DB_PASS';"
sudo -u postgres psql -c "CREATE DATABASE $DB_NAME WITH OWNER $DB_USER TEMPLATE template0;"
sudo -u postgres psql -c "ALTER DATABASE $DB_NAME OWNER TO $DB_USER;"
sudo -u postgres psql -c "GRANT CREATE ON SCHEMA public TO $DB_USER;"
echo "" >>~/pixelfed.creds
echo -e "Pixelfed Database Name: $DB_NAME" >>~/pixelfed.creds
echo -e "Pixelfed Database User: $DB_USER" >>~/pixelfed.creds
echo -e "Pixelfed Database Password: $DB_PASS" >>~/pixelfed.creds
#export $(cat /opt/pixelfed/.env |grep "^[^#]" | xargs)
msg_ok "Set up PostgreSQL Database successfully"
msg_info "Installing Pixelfed (Patience)"
RELEASE=$(curl -s https://api.github.com/repos/pixelfed/pixelfed/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }')
wget -q "https://github.com/pixelfed/pixelfed/archive/refs/tags/${RELEASE}.zip"
unzip -q ${RELEASE}.zip
mv pixelfed-${RELEASE:1} /opt/pixelfed
rm -R ${RELEASE}.zip
cd /opt/pixelfed
chown -R www-data:www-data /opt/pixelfed/storage
chmod -R 775 /opt/pixelfed/storage
chown -R pixelfed:pixelfed /opt/pixelfed/storage
chmod -R 775 /opt/pixelfed/storage
chown -R www-data:www-data /opt/pixelfed
chmod -R 755 /opt/pixelfed
COMPOSER_ALLOW_SUPERUSER=1 composer install --no-dev --no-ansi --no-interaction --optimize-autoloader
msg_info "Setup envoirement & PHP Database Migration"
cp .env.example .env
sed -i "s/DB_CONNECTION=.*/DB_CONNECTION=pgsql/" .env
sed -i "s/DB_PORT=.*/DB_PORT=5432/" .env
sed -i "s/DB_DATABASE=.*/DB_DATABASE=$DB_NAME/" .env
sed -i "s/DB_USERNAME=.*/DB_USERNAME=$DB_USER/" .env
sed -i "s/DB_PASSWORD=.*/DB_PASSWORD=$DB_PASS/" .env
sed -i "s/REDIS_HOST=.*/REDIS_HOST=127.0.0.1/" .env
sed -i "s/REDIS_PASSWORD=.*/REDIS_PASSWORD=$REDIS_PASS/" .env
sed -i "s/APP_URL=.*/APP_URL=http:\/\/localhost/" .env # localhost URL
php artisan key:generate
php artisan storage:link
php artisan migrate --force
php artisan import:cities
php artisan instance:actor
php artisan passport:keys
php artisan route:cache
php artisan view:cache
sed -i 's/^post_max_size = .*/post_max_size = 100M/' /etc/php/8.3/fpm/php.ini
sed -i 's/^upload_max_filesize = .*/upload_max_filesize = 100M/' /etc/php/8.3/fpm/php.ini
sed -i 's/^max_execution_time = .*/max_execution_time = 600/' /etc/php/8.3/fpm/php.ini
systemctl restart php8.3-fpm
msg_ok "Pixelfed successfully set up"
msg_info "Creating Services"
cat <<EOF >/etc/nginx/sites-available/pixelfed.conf
server {
listen 80;
server_name localhost; # Nutzung von localhost
root /opt/pixelfed/public;
index index.php;
location / {
try_files \$uri \$uri/ /index.php?\$query_string;
}
location ~ \.php$ {
include snippets/fastcgi-php.conf;
fastcgi_pass unix:/var/run/php/php8.2-fpm.sock;
fastcgi_param SCRIPT_FILENAME \$document_root\$fastcgi_script_name;
include fastcgi_params;
}
location ~ /\.(?!well-known).* {
deny all;
}
client_max_body_size 20M;
}
EOF
ln -s /etc/nginx/sites-available/pixelfed.conf /etc/nginx/sites-enabled/
nginx -t && systemctl reload nginx
cat <<EOF >/etc/systemd/system/pixelfed-horizon.service
[Unit]
Description=Pixelfed Horizon
After=network.target
Requires=php8.3-fpm
Requires=redis
[Service]
User=www-data
WorkingDirectory=/opt/pixelfed
ExecStart=/usr/bin/php /opt/pixelfed/artisan horizon
Restart=always
[Install]
WantedBy=multi-user.target
EOF
cat <<EOF >/etc/systemd/system/pixelfed-scheduler.service
[Unit]
Description=Pixelfed Scheduler
After=network.target
[Service]
User=www-data
ExecStart=/usr/bin/php /opt/pixelfed/artisan schedule:run
Restart=always
[Install]
WantedBy=multi-user.target
EOF
systemctl enable --now pixelfed-scheduler
systemctl enable --now pixelfed-horizon
msg_ok "Created Services"
motd_ssh
customize
msg_info "Cleaning up"
$STD apt-get -y autoremove
$STD apt-get -y autoclean
msg_ok "Cleaned"

View File

@ -15,9 +15,9 @@ update_os
msg_info "Installing Dependencies" msg_info "Installing Dependencies"
$STD apt-get install -y \ $STD apt-get install -y \
gnupg \ gnupg \
unzip \ unzip \
postgresql-common postgresql-common
msg_ok "Installed Dependencies" msg_ok "Installed Dependencies"
msg_info "Installing Additional Dependencies" msg_info "Installing Additional Dependencies"
@ -27,7 +27,7 @@ echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.co
echo "YES" | /usr/share/postgresql-common/pgdg/apt.postgresql.org.sh &>/dev/null echo "YES" | /usr/share/postgresql-common/pgdg/apt.postgresql.org.sh &>/dev/null
$STD apt-get install -y postgresql-16 nodejs $STD apt-get install -y postgresql-16 nodejs
cd /tmp cd /tmp
wget -q https://dl.min.io/server/minio/release/linux-amd64/minio.deb curl -fsSL https://dl.min.io/server/minio/release/linux-amd64/minio.deb -o minio.deb
$STD dpkg -i minio.deb $STD dpkg -i minio.deb
msg_info "Setting up Database" msg_info "Setting up Database"
@ -46,9 +46,9 @@ ACCESS_TOKEN=$(openssl rand -base64 48)
REFRESH_TOKEN=$(openssl rand -base64 48) REFRESH_TOKEN=$(openssl rand -base64 48)
CHROME_TOKEN=$(openssl rand -hex 32) CHROME_TOKEN=$(openssl rand -hex 32)
LOCAL_IP=$(hostname -I | awk '{print $1}') LOCAL_IP=$(hostname -I | awk '{print $1}')
TAG=$(curl -s https://api.github.com/repos/browserless/browserless/tags?per_page=1 | grep "name" | awk '{print substr($2, 3, length($2)-4) }') TAG=$(curl -fsSL https://api.github.com/repos/browserless/browserless/tags?per_page=1 | grep "name" | awk '{print substr($2, 3, length($2)-4) }')
RELEASE=$(curl -s https://api.github.com/repos/AmruthPillai/Reactive-Resume/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }') RELEASE=$(curl -fsSL https://api.github.com/repos/AmruthPillai/Reactive-Resume/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }')
wget -q "https://github.com/AmruthPillai/Reactive-Resume/archive/refs/tags/v${RELEASE}.zip" curl -fsSL "https://github.com/AmruthPillai/Reactive-Resume/archive/refs/tags/v${RELEASE}.zip" -o v${RELEASE}.zip
unzip -q v${RELEASE}.zip unzip -q v${RELEASE}.zip
mv ${APPLICATION}-${RELEASE}/ /opt/${APPLICATION} mv ${APPLICATION}-${RELEASE}/ /opt/${APPLICATION}
cd /opt/${APPLICATION} cd /opt/${APPLICATION}
@ -65,7 +65,7 @@ msg_ok "Installed ${APPLICATION}"
msg_info "Installing Browserless (Patience)" msg_info "Installing Browserless (Patience)"
cd /tmp cd /tmp
wget -q https://github.com/browserless/browserless/archive/refs/tags/v${TAG}.zip curl -fsSL https://github.com/browserless/browserless/archive/refs/tags/v${TAG}.zip -o v${TAG}.zip
unzip -q v${TAG}.zip unzip -q v${TAG}.zip
mv browserless-${TAG} /opt/browserless mv browserless-${TAG} /opt/browserless
cd /opt/browserless cd /opt/browserless
@ -126,11 +126,11 @@ TOKEN=${CHROME_TOKEN}
EOF EOF
echo "${RELEASE}" >/opt/${APPLICATION}_version.txt echo "${RELEASE}" >/opt/${APPLICATION}_version.txt
{ {
echo "${APPLICATION} Credentials" echo "${APPLICATION} Credentials"
echo "Database User: $DB_USER" echo "Database User: $DB_USER"
echo "Database Password: $DB_PASS" echo "Database Password: $DB_PASS"
echo "Database Name: $DB_NAME" echo "Database Name: $DB_NAME"
echo "Minio Root Password: ${MINIO_PASS}" echo "Minio Root Password: ${MINIO_PASS}"
} >>~/${APPLICATION}.creds } >>~/${APPLICATION}.creds
msg_ok "Configured applications" msg_ok "Configured applications"

View File

@ -0,0 +1,88 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2024 tteck
# Author: tteck
# Co-Author: MickLesk (Canbiz)
# License: MIT
# https://github.com/tteck/Proxmox/raw/main/LICENSE
# Source: https://github.com/roundcube/roundcubemail
source /dev/stdin <<< "$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt-get install -y \
curl \
sudo \
mc \
postgresql \
apache2 \
libapache2-mod-php \
composer \
php8.2-{mbstring,gd,imap,mysql,ldap,curl,intl,imagick,bz2,sqlite3,zip,xml}
msg_ok "Installed Dependencies"
msg_info "Setting up PostgreSQL"
DB_NAME=roundcube_db
DB_USER=roundcube_user
DB_PASS="$(openssl rand -base64 18 | cut -c1-13)"
$STD sudo -u postgres psql -c "CREATE DATABASE $DB_NAME WITH ENCODING 'UTF8';"
$STD sudo -u postgres psql -c "CREATE USER $DB_USER WITH PASSWORD '$DB_PASS';"
$STD sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE $DB_NAME TO $DB_USER;"
$STD sudo -u postgres psql -c "ALTER DATABASE $DB_NAME OWNER TO $DB_USER;"
$STD sudo -u postgres psql -c "ALTER USER $DB_USER WITH SUPERUSER;"
echo "" >>~/roundcubemail.creds
echo -e "Roundcubemail Database User: $DB_USER" >>~/roundcubemail.creds
echo -e "Roundcubemail Database Password: $DB_PASS" >>~/roundcubemail.creds
echo -e "Roundcubemail Database Name: $DB_NAME" >>~/roundcubemail.creds
msg_ok "Set up PostgreSQL"
msg_info "Installing Roundcubemail (Patience)"
cd /opt
RELEASE=$(curl -s https://api.github.com/repos/roundcube/roundcubemail/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }')
wget -q "https://github.com/roundcube/roundcubemail/releases/download/${RELEASE}/roundcubemail-${RELEASE}-complete.tar.gz"
tar -xf roundcubemail-${RELEASE}-complete.tar.gz
mv roundcubemail-${RELEASE} /opt/roundcubemail
cd /opt/roundcubemail
COMPOSER_ALLOW_SUPERUSER=1 composer install --no-dev
cp /opt/roundcubemail/config/config.inc.php.sample /opt/roundcubemail/config/config.inc.php
sed -i "s|^\\\$config\\['db_dsnw'\\] = 'mysql://.*';|\\\$config\\['db_dsnw'\\] = 'pgsql://$DB_USER:$DB_PASS@localhost/$DB_NAME';|" /opt/roundcubemail/config/config.inc.php
chown -R www-data:www-data temp/ logs/
echo "${RELEASE}" >"/opt/${APPLICATION}_version.txt"
cat <<EOF >/etc/apache2/sites-available/roundcubemail.conf
<VirtualHost *:80>
ServerAdmin webmaster@localhost
DocumentRoot /opt/roundcubemail/public_html
<Directory /opt/roundcubemail/public_html>
Options -Indexes +FollowSymLinks
AllowOverride All
Require all granted
</Directory>
ErrorLog \${APACHE_LOG_DIR}/wallos_error.log
CustomLog \${APACHE_LOG_DIR}/wallos_access.log combined
</VirtualHost>
EOF
$STD sudo a2enmod deflate
$STD sudo a2enmod expires
$STD sudo a2enmod headers
$STD a2ensite roundcubemail.conf
$STD a2dissite 000-default.conf
$STD systemctl reload apache2
msg_ok "Installed Wallos"
motd_ssh
customize
msg_info "Cleaning up"
rm -rf /opt/roundcubemail-${RELEASE}-complete.tar.gz
$STD apt-get -y autoremove
$STD apt-get -y autoclean
msg_ok "Cleaned"

View File

@ -0,0 +1,163 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2024 tteck
# Author: tteck (tteckster)
# License: MIT
# https://github.com/tteck/Proxmox/raw/main/LICENSE
source /dev/stdin <<< "$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
# Generate a random string
generate_random_string() {
local LENGTH=$1
tr -dc A-Za-z0-9 </dev/urandom | head -c ${LENGTH} 2>/dev/null || true
}
msg_info "Installing Dependencies"
$STD apk add git
$STD apk add nodejs
$STD apk add npm
$STD apk add ansible
$STD apk add nmap
$STD apk add sudo
$STD apk add openssh
$STD apk add sshpass
$STD apk add py3-pip
$STD apk add expect
$STD apk add libcurl
$STD apk add gcompat
$STD apk add curl
$STD apk add newt
$STD git --version
$STD node --version
$STD npm --version
msg_ok "Installed Dependencies"
msg_info "Installing Redis"
$STD apk add redis
msg_ok "Installed Redis"
msg_info "Installing Nginx"
$STD apk add nginx
rm -rf /etc/nginx/http.d/default.conf
cat <<'EOF'> /etc/nginx/http.d/default.conf
server {
listen 80;
server_name localhost;
access_log off;
error_log off;
location /api/socket.io/ {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host;
proxy_pass http://127.0.0.1:3000/socket.io/;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
location /api/ {
proxy_pass http://127.0.0.1:3000/;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
location / {
proxy_pass http://127.0.0.1:8000/;
# WebSocket support
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
error_page 501 502 503 404 /custom.html;
location = /custom.html {
root /usr/share/nginx/html;
}
}
}
EOF
msg_ok "Installed Nginx"
msg_info "Installing MongoDB Database"
DB_NAME=ssm
DB_PORT=27017
echo 'http://dl-cdn.alpinelinux.org/alpine/v3.9/main' >> /etc/apk/repositories
echo 'http://dl-cdn.alpinelinux.org/alpine/v3.9/community' >> /etc/apk/repositories
$STD apk update
$STD apk add mongodb mongodb-tools
msg_ok "Installed MongoDB Database"
msg_info "Starting Services"
$STD rc-service redis start
$STD rc-update add redis default
$STD rc-service mongodb start
$STD rc-update add mongodb default
msg_ok "Started Services"
msg_info "Setting Up Squirrel Servers Manager"
$STD git clone https://github.com/SquirrelCorporation/SquirrelServersManager.git /opt/squirrelserversmanager
SECRET=$(generate_random_string 32)
SALT=$(generate_random_string 16)
VAULT_PWD=$(generate_random_string 32)
cat <<EOF > /opt/squirrelserversmanager/.env
# SECRETS
SECRET=$SECRET
SALT=$SALT
VAULT_PWD=$VAULT_PWD
# MONGO
DB_HOST=127.0.0.1
DB_NAME=ssm
DB_PORT=27017
# REDIS
REDIS_HOST=127.0.0.1
REDIS_PORT=6379
EOF
export NODE_ENV=production
export $(grep -v '^#' /opt/squirrelserversmanager/.env | xargs)
$STD npm install -g npm@latest
$STD npm install -g @umijs/max
$STD npm install -g typescript
$STD npm install pm2 -g
msg_ok "Squirrel Servers Manager Has Been Setup"
msg_info "Building Squirrel Servers Manager Lib"
cd /opt/squirrelserversmanager/shared-lib
$STD npm ci
$STD npm run build
msg_ok "Squirrel Servers Manager Lib built"
msg_info "Building & Running Squirrel Servers Manager Client"
cd /opt/squirrelserversmanager/client
$STD npm ci
$STD npm run build
$STD pm2 start --name="squirrelserversmanager-frontend" npm -- run serve
msg_ok "Squirrel Servers Manager Client Built & Ran"
msg_info "Building & Running Squirrel Servers Manager Server"
cd /opt/squirrelserversmanager/server
$STD npm ci
$STD npm run build
$STD pm2 start --name="squirrelserversmanager-backend" node -- ./dist/src/index.js
msg_ok "Squirrel Servers Manager Server Built & Ran"
msg_info "Starting Squirrel Servers Manager"
$STD pm2 startup
$STD pm2 save
mkdir -p /usr/share/nginx/html/
cp /opt/squirrelserversmanager/proxy/www/index.html /usr/share/nginx/html/custom.html
$STD rc-service nginx start
$STD rc-update add nginx default
msg_ok "Squirrel Servers Manager Started"
motd_ssh
customize

View File

@ -0,0 +1,163 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
source /dev/stdin <<< "$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt-get install -y \
curl \
sudo \
mc \
gnupg \
apt-transport-https \
lsb-release
msg_ok "Installed Dependencies"
msg_info "Setting up PostgreSQL Repository"
VERSION="$(awk -F'=' '/^VERSION_CODENAME=/{ print $NF }' /etc/os-release)"
echo "deb http://apt.postgresql.org/pub/repos/apt ${VERSION}-pgdg main" >/etc/apt/sources.list.d/pgdg.list
curl -sSL https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor --output /etc/apt/trusted.gpg.d/postgresql.gpg
msg_ok "Setup PostgreSQL Repository"
msg_info "Installing PostgreSQL"
$STD apt-get update
$STD apt-get install -y postgresql postgresql-common
cat <<EOF >/etc/postgresql/17/main/pg_hba.conf
# PostgreSQL Client Authentication Configuration File
local all postgres peer
# TYPE DATABASE USER ADDRESS METHOD
# "local" is for Unix domain socket connections only
local all all md5
# IPv4 local connections:
host all all 127.0.0.1/32 scram-sha-256
host all all 0.0.0.0/24 md5
# IPv6 local connections:
host all all ::1/128 scram-sha-256
host all all 0.0.0.0/0 md5
# Allow replication connections from localhost, by a user with the
# replication privilege.
local replication all peer
host replication all 127.0.0.1/32 scram-sha-256
host replication all ::1/128 scram-sha-256
EOF
cat <<EOF >/etc/postgresql/17/main/postgresql.conf
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
data_directory = '/var/lib/postgresql/17/main'
hba_file = '/etc/postgresql/17/main/pg_hba.conf'
ident_file = '/etc/postgresql/17/main/pg_ident.conf'
external_pid_file = '/var/run/postgresql/17-main.pid'
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------
# - Connection Settings -
listen_addresses = '*'
port = 5432
max_connections = 100
unix_socket_directories = '/var/run/postgresql'
# - SSL -
ssl = on
ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem'
ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key'
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
shared_buffers = 128MB
dynamic_shared_memory_type = posix
#------------------------------------------------------------------------------
# WRITE-AHEAD LOG
#------------------------------------------------------------------------------
max_wal_size = 1GB
min_wal_size = 80MB
#------------------------------------------------------------------------------
# REPORTING AND LOGGING
#------------------------------------------------------------------------------
# - What to Log -
log_line_prefix = '%m [%p] %q%u@%d '
log_timezone = 'Etc/UTC'
#------------------------------------------------------------------------------
# PROCESS TITLE
#------------------------------------------------------------------------------
cluster_name = '17/main'
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------
# - Locale and Formatting -
datestyle = 'iso, mdy'
timezone = 'Etc/UTC'
lc_messages = 'C'
lc_monetary = 'C'
lc_numeric = 'C'
lc_time = 'C'
default_text_search_config = 'pg_catalog.english'
#------------------------------------------------------------------------------
# CONFIG FILE INCLUDES
#------------------------------------------------------------------------------
include_dir = 'conf.d'
EOF
systemctl restart postgresql
msg_ok "Installed PostgreSQL"
msg_info "Setup TimescaleDB"
echo "deb https://packagecloud.io/timescale/timescaledb/debian/ $(lsb_release -c -s) main" | sudo tee /etc/apt/sources.list.d/timescaledb.list
wget --quiet -O - https://packagecloud.io/timescale/timescaledb/gpgkey | sudo gpg --dearmor -o /etc/apt/trusted.gpg.d/timescaledb.gpg
$STD apt-get update
$STD apt-get install timescaledb-2-postgresql-17 postgresql-client-17
$STD timescaledb-tune --quiet --yes
$STD systemctl restart postgresql
msg_ok "Setup TimescaleDB"
read -r -p "Would you like to add Adminer? <y/N> " prompt
if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then
msg_info "Installing Adminer"
$STD apt install -y adminer
$STD a2enconf adminer
systemctl reload apache2
msg_ok "Installed Adminer"
fi
motd_ssh
customize
msg_info "Cleaning up"
$STD apt-get -y autoremove
$STD apt-get -y autoclean
msg_ok "Cleaned"

133
install/wallabag-install.sh Normal file
View File

@ -0,0 +1,133 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (Canbiz)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies (Patience)"
$STD apt-get install -y \
curl \
git \
unzip \
sudo \
make \
php8.2 \
php8.2-{cli,common,bcmath,intl,fpm,tidy,xml,mysql,mbstring,zip,gd,curl} \
composer \
apache2 \
libapache2-mod-php \
redis \
mariadb-server
msg_ok "Installed Dependencies"
msg_info "Setting up Database"
DB_NAME=wallabag_db
DB_USER=wallabag
DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)
SECRET_KEY="$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | cut -c1-32)"
$STD mysql -u root -e "CREATE DATABASE $DB_NAME;"
$STD mysql -u root -e "CREATE USER '$DB_USER'@'localhost' IDENTIFIED BY '$DB_PASS';"
$STD mysql -u root -e "GRANT ALL PRIVILEGES ON $DB_NAME.* TO '$DB_USER'@'localhost'; FLUSH PRIVILEGES;"
{
echo "Wallabag Credentials"
echo "Database User: $DB_USER"
echo "Database Password: $DB_PASS"
echo "Database Name: $DB_NAME"
} >> ~/wallabag.creds
msg_ok "Set up Database"
msg_info "Installing Wallabag (Patience)"
RELEASE=$(curl -s https://api.github.com/repos/wallabag/wallabag/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }')
wget -q "https://github.com/wallabag/wallabag/archive/refs/tags/${RELEASE}.zip"
unzip -q ${RELEASE}.zip
mv wallabag-${RELEASE} /opt/wallabag
cd /opt/wallabag
useradd -d /opt/wallabag -s /bin/bash -M wallabag
chown -R wallabag:wallabag /opt/wallabag
mv /opt/wallabag/app/config/parameters.yml.dist /opt/wallabag/app/config/parameters.yml
sed -i \
-e 's|database_name: wallabag|database_name: wallabag_db|' \
-e 's|database_port: ~|database_port: 3306|' \
-e 's|database_user: root|database_user: wallabag|' \
-e 's|database_password: ~|database_password: '"$DB_PASS"'|' \
-e 's|secret: .*|secret: '"$SECRET_KEY"'|' \
/opt/wallabag/app/config/parameters.yml
export COMPOSER_ALLOW_SUPERUSER=1
sudo -u wallabag make install --no-interaction
export COMPOSER_ALLOW_SUPERUSER=1
composer install --no-dev --prefer-dist --optimize-autoloader --no-interaction
msg_ok "Installed Wallabag"
msg_info "Setting up Virtual Host"
cat <<EOF >/etc/nginx/conf.d/wallabag.conf
server {
root /opt/wallabag/web;
server_name $IPADDRESS;
location / {
# try to serve file directly, fallback to app.php
try_files $uri /app.php$is_args$args;
}
location ~ ^/app\.php(/|$) {
# if, for some reason, you are still using PHP 5,
# then replace /run/php/php7.0 by /var/run/php5
fastcgi_pass unix:/run/php/php7.0-fpm.sock;
fastcgi_split_path_info ^(.+\.php)(/.*)$;
include fastcgi_params;
# When you are using symlinks to link the document root to the
# current version of your application, you should pass the real
# application path instead of the path to the symlink to PHP
# FPM.
# Otherwise, PHP's OPcache may not properly detect changes to
# your PHP files (see https://github.com/zendtech/ZendOptimizerPlus/issues/126
# for more information).
fastcgi_param SCRIPT_FILENAME $realpath_root$fastcgi_script_name;
fastcgi_param DOCUMENT_ROOT $realpath_root;
# Prevents URIs that include the front controller. This will 404:
# http://domain.tld/app.php/some-path
# Remove the internal directive to allow URIs like this
internal;
}
# return 404 for all other php files not matching the front controller
# this prevents access to other php files you don't want to be accessible.
location ~ \.php$ {
return 404;
}
error_log /var/log/nginx/wallabag_error.log;
access_log /var/log/nginx/wallabag_access.log;
}
EOF
$STD a2enmod rewrite
$STD a2ensite wallabag.conf
$STD a2dissite 000-default.conf
systemctl reload apache2
msg_ok "Configured Virtual Host"
msg_info "Setting Permissions"
chown -R www-data:www-data /opt/wallabag/{bin,app/config,vendor,data,var,web}
msg_ok "Set Permissions"
msg_info "Running Wallabag Installation"
php bin/console wallabag:install --env=prod
msg_ok "Wallabag Installed"
motd_ssh
customize
msg_info "Cleaning up"
$STD apt-get autoremove
$STD apt-get autoclean
msg_ok "Cleaned"

View File

@ -13,61 +13,13 @@ variables() {
METHOD="default" # sets the METHOD variable to "default", used for the API call. METHOD="default" # sets the METHOD variable to "default", used for the API call.
RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" # generates a random UUID and sets it to the RANDOM_UUID variable. RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" # generates a random UUID and sets it to the RANDOM_UUID variable.
} }
source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/api.func) source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/api.func)
source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/core.func)
# This function sets various color variables using ANSI escape codes for formatting text in the terminal.
color() {
# Colors
YW=$(echo "\033[33m")
YWB=$(echo "\033[93m")
BL=$(echo "\033[36m")
RD=$(echo "\033[01;31m")
BGN=$(echo "\033[4;92m")
GN=$(echo "\033[1;92m")
DGN=$(echo "\033[32m")
# Formatting
CL=$(echo "\033[m")
BOLD=$(echo "\033[1m")
HOLD=" "
TAB=" "
# Icons
CM="${TAB}✔️${TAB}"
CROSS="${TAB}✖️${TAB}${CL}"
INFO="${TAB}💡${TAB}${CL}"
OS="${TAB}🖥️${TAB}${CL}"
OSVERSION="${TAB}🌟${TAB}${CL}"
CONTAINERTYPE="${TAB}📦${TAB}${CL}"
DISKSIZE="${TAB}💾${TAB}${CL}"
CPUCORE="${TAB}🧠${TAB}${CL}"
RAMSIZE="${TAB}🛠️${TAB}${CL}"
SEARCH="${TAB}🔍${TAB}${CL}"
VERBOSE_CROPPED="🔍${TAB}"
VERIFYPW="${TAB}🔐${TAB}${CL}"
CONTAINERID="${TAB}🆔${TAB}${CL}"
HOSTNAME="${TAB}🏠${TAB}${CL}"
BRIDGE="${TAB}🌉${TAB}${CL}"
NETWORK="${TAB}📡${TAB}${CL}"
GATEWAY="${TAB}🌐${TAB}${CL}"
DISABLEIPV6="${TAB}🚫${TAB}${CL}"
DEFAULT="${TAB}⚙️${TAB}${CL}"
MACADDRESS="${TAB}🔗${TAB}${CL}"
VLANTAG="${TAB}🏷️${TAB}${CL}"
ROOTSSH="${TAB}🔑${TAB}${CL}"
CREATING="${TAB}🚀${TAB}${CL}"
ADVANCED="${TAB}🧩${TAB}${CL}"
FUSE="${TAB}🔧${TAB}${CL}"
}
# This function enables error handling in the script by setting options and defining a trap for the ERR signal.
catch_errors() { catch_errors() {
set -Eeuo pipefail set -Eeuo pipefail
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
} }
# This function is called when an error occurs. It receives the exit code, line number, and command that caused the error, and displays an error message.
error_handler() { error_handler() {
source /dev/stdin <<<$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/api.func) source /dev/stdin <<<$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/api.func)
if [ -n "$SPINNER_PID" ] && ps -p $SPINNER_PID >/dev/null; then kill $SPINNER_PID >/dev/null; fi if [ -n "$SPINNER_PID" ] && ps -p $SPINNER_PID >/dev/null; then kill $SPINNER_PID >/dev/null; fi
@ -84,127 +36,8 @@ error_handler() {
echo -e "\n$error_message\n" echo -e "\n$error_message\n"
} }
# This function displays an informational message with logging support.
declare -A MSG_INFO_SHOWN
SPINNER_ACTIVE=0
SPINNER_PID=""
SPINNER_MSG=""
trap 'stop_spinner' EXIT INT TERM HUP trap 'stop_spinner' EXIT INT TERM HUP
start_spinner() {
local msg="$1"
local frames=(⠋ ⠙ ⠹ ⠸ ⠼ ⠴ ⠦ ⠧ ⠇ ⠏)
local spin_i=0
local interval=0.1
SPINNER_MSG="$msg"
printf "\r\e[2K" >&2
{
while [[ "$SPINNER_ACTIVE" -eq 1 ]]; do
printf "\r\e[2K%s %b" "${frames[spin_i]}" "${YW}${SPINNER_MSG}${CL}" >&2
spin_i=$(((spin_i + 1) % ${#frames[@]}))
sleep "$interval"
done
} &
SPINNER_PID=$!
disown "$SPINNER_PID"
}
stop_spinner() {
if [[ ${SPINNER_PID+v} && -n "$SPINNER_PID" ]] && kill -0 "$SPINNER_PID" 2>/dev/null; then
kill "$SPINNER_PID" 2>/dev/null
sleep 0.1
kill -0 "$SPINNER_PID" 2>/dev/null && kill -9 "$SPINNER_PID" 2>/dev/null
wait "$SPINNER_PID" 2>/dev/null || true
fi
SPINNER_ACTIVE=0
unset SPINNER_PID
}
spinner_guard() {
if [[ "$SPINNER_ACTIVE" -eq 1 ]] && [[ -n "$SPINNER_PID" ]]; then
kill "$SPINNER_PID" 2>/dev/null
wait "$SPINNER_PID" 2>/dev/null || true
SPINNER_ACTIVE=0
unset SPINNER_PID
fi
}
msg_info() {
local msg="$1"
[[ -n "${MSG_INFO_SHOWN["$msg"]+x}" ]] && return
MSG_INFO_SHOWN["$msg"]=1
spinner_guard
SPINNER_ACTIVE=1
start_spinner "$msg"
}
msg_ok() {
local msg="$1"
stop_spinner
printf "\r\e[2K%s %b\n" "${CM}" "${GN}${msg}${CL}" >&2
unset MSG_INFO_SHOWN["$msg"]
}
msg_error() {
stop_spinner
local msg="$1"
printf "\r\e[2K%s %b\n" "${CROSS}" "${RD}${msg}${CL}" >&2
log_message "ERROR" "$msg"
}
log_message() {
local level="$1"
local message="$2"
local timestamp
local logdate
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
logdate=$(date '+%Y-%m-%d')
LOGDIR="/usr/local/community-scripts/logs"
mkdir -p "$LOGDIR"
LOGFILE="${LOGDIR}/${logdate}_${NSAPP}.log"
echo "$timestamp - $level: $message" >>"$LOGFILE"
}
# Check if the shell is using bash
shell_check() {
if [[ "$(basename "$SHELL")" != "bash" ]]; then
clear
msg_error "Your default shell is currently not set to Bash. To use these scripts, please switch to the Bash shell."
echo -e "\nExiting..."
sleep 2
exit
fi
}
# Run as root only
root_check() {
if [[ "$(id -u)" -ne 0 || $(ps -o comm= -p $PPID) == "sudo" ]]; then
clear
msg_error "Please run this script as root."
echo -e "\nExiting..."
sleep 2
exit
fi
}
# This function checks the version of Proxmox Virtual Environment (PVE) and exits if the version is not supported.
pve_check() {
if ! pveversion | grep -Eq "pve-manager/8\.[1-9](\.[0-9]+)*"; then
msg_error "${CROSS}${RD}This version of Proxmox Virtual Environment is not supported"
echo -e "Requires Proxmox Virtual Environment Version 8.1 or later."
echo -e "Exiting..."
sleep 2
exit
fi
}
# When a node is running tens of containers, it's possible to exceed the kernel's cryptographic key storage allocations. # When a node is running tens of containers, it's possible to exceed the kernel's cryptographic key storage allocations.
# These are tuneable, so verify if the currently deployment is approaching the limits, advise the user on how to tune the limits, and exit the script. # These are tuneable, so verify if the currently deployment is approaching the limits, advise the user on how to tune the limits, and exit the script.
# https://cleveruptime.com/docs/files/proc-key-users | https://docs.kernel.org/security/keys/core.html # https://cleveruptime.com/docs/files/proc-key-users | https://docs.kernel.org/security/keys/core.html
@ -251,17 +84,6 @@ maxkeys_check() {
echo -e "${CM}${GN} All kernel key limits are within safe thresholds.${CL}" echo -e "${CM}${GN} All kernel key limits are within safe thresholds.${CL}"
} }
# This function checks the system architecture and exits if it's not "amd64".
arch_check() {
if [ "$(dpkg --print-architecture)" != "amd64" ]; then
echo -e "\n ${INFO}${YWB}This script will not work with PiMox! \n"
echo -e "\n ${YWB}Visit https://github.com/asylumexp/Proxmox for ARM64 support. \n"
echo -e "Exiting..."
sleep 2
exit
fi
}
# Function to get the current IP address based on the distribution # Function to get the current IP address based on the distribution
get_current_ip() { get_current_ip() {
if [ -f /etc/os-release ]; then if [ -f /etc/os-release ]; then
@ -328,29 +150,6 @@ header_info() {
echo "$header_content" echo "$header_content"
} }
# This function checks if the script is running through SSH and prompts the user to confirm if they want to proceed or exit.
ssh_check() {
if [[ -n "${SSH_CLIENT:+x}" ]]; then
dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
--title "SSH DETECTED" \
--yesno "It's advisable to utilize the Proxmox shell rather than SSH,\nas there may be potential complications with variable retrieval.\n\nProceed using SSH?" 12 72 \
--ok-label "Yes" --cancel-label "No"
case $? in
0)
dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
--title "Proceed using SSH" \
--msgbox "You've chosen to proceed using SSH.\n\nIf any issues arise, please run the script in the Proxmox shell before creating a repository issue." 10 72
;;
1 | 255)
clear
printf "%s\n" "Exiting due to SSH usage. Please consider using the Proxmox shell."
exit
;;
esac
fi
}
base_settings() { base_settings() {
# Default Settings # Default Settings
CT_TYPE="1" CT_TYPE="1"
@ -423,13 +222,6 @@ echo_default() {
echo -e " " echo -e " "
} }
# This function is called when the user decides to exit the script. It clears the screen and displays an exit message.
exit_script() {
clear
echo -e "\n${CROSS}${RD}User exited script${CL}\n"
exit
}
# This function allows the user to configure advanced settings for the script. # This function allows the user to configure advanced settings for the script.
advanced_settings() { advanced_settings() {
whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox --title "Here is an instructional tip:" "To make a selection, use the Spacebar." 8 58 whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox --title "Here is an instructional tip:" "To make a selection, use the Spacebar." 8 58
@ -1342,7 +1134,7 @@ get_gh_release() {
} }
start() { start() {
#source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/github.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/tools.func)
LOGDIR="/usr/local/community-scripts/logs" LOGDIR="/usr/local/community-scripts/logs"
mkdir -p "$LOGDIR" mkdir -p "$LOGDIR"
@ -1558,22 +1350,7 @@ EOF
post_update_to_api "done" "none" post_update_to_api "done" "none"
} }
set_std_mode() {
if [ "$VERB" = "yes" ]; then
STD=""
else
STD="silent"
fi
}
# Silent execution function # Silent execution function
silent() {
if [ "$VERB" = "no" ]; then
"$@" >>"$LOGFILE" 2>&1
else
"$@" 2>&1 | tee -a "$LOGFILE"
fi
}
# 26.03.2025 disabled # 26.03.2025 disabled
#exit_script() { #exit_script() {
@ -1614,6 +1391,3 @@ trap 'exit_script' EXIT
trap 'post_update_to_api "failed" "$BASH_COMMAND"' ERR trap 'post_update_to_api "failed" "$BASH_COMMAND"' ERR
trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/helper.func)

220
misc/core.func Normal file
View File

@ -0,0 +1,220 @@
# Copyright (c) 2021-2025 community-scripts ORG
# Author: michelroegl-brunner
# License: MIT | https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/LICENSE
color() {
# Colors
YW=$(echo "\033[33m")
YWB=$(echo "\033[93m")
BL=$(echo "\033[36m")
RD=$(echo "\033[01;31m")
BGN=$(echo "\033[4;92m")
GN=$(echo "\033[1;92m")
DGN=$(echo "\033[32m")
# Formatting
CL=$(echo "\033[m")
BOLD=$(echo "\033[1m")
HOLD=" "
TAB=" "
# Icons
CM="${TAB}✔️${TAB}"
CROSS="${TAB}✖️${TAB}${CL}"
INFO="${TAB}💡${TAB}${CL}"
OS="${TAB}🖥️${TAB}${CL}"
OSVERSION="${TAB}🌟${TAB}${CL}"
CONTAINERTYPE="${TAB}📦${TAB}${CL}"
DISKSIZE="${TAB}💾${TAB}${CL}"
CPUCORE="${TAB}🧠${TAB}${CL}"
RAMSIZE="${TAB}🛠️${TAB}${CL}"
SEARCH="${TAB}🔍${TAB}${CL}"
VERBOSE_CROPPED="🔍${TAB}"
VERIFYPW="${TAB}🔐${TAB}${CL}"
CONTAINERID="${TAB}🆔${TAB}${CL}"
HOSTNAME="${TAB}🏠${TAB}${CL}"
BRIDGE="${TAB}🌉${TAB}${CL}"
NETWORK="${TAB}📡${TAB}${CL}"
GATEWAY="${TAB}🌐${TAB}${CL}"
DISABLEIPV6="${TAB}🚫${TAB}${CL}"
DEFAULT="${TAB}⚙️${TAB}${CL}"
MACADDRESS="${TAB}🔗${TAB}${CL}"
VLANTAG="${TAB}🏷️${TAB}${CL}"
ROOTSSH="${TAB}🔑${TAB}${CL}"
CREATING="${TAB}🚀${TAB}${CL}"
ADVANCED="${TAB}🧩${TAB}${CL}"
FUSE="${TAB}🔧${TAB}${CL}"
}
declare -A MSG_INFO_SHOWN
SPINNER_ACTIVE=0
SPINNER_PID=""
SPINNER_MSG=""
start_spinner() {
local msg="$1"
local frames=(⠋ ⠙ ⠹ ⠸ ⠼ ⠴ ⠦ ⠧ ⠇ ⠏)
local spin_i=0
local interval=0.1
SPINNER_MSG="$msg"
printf "\r\e[2K" >&2
{
while [[ "$SPINNER_ACTIVE" -eq 1 ]]; do
printf "\r\e[2K%s %b" "${frames[spin_i]}" "${YW}${SPINNER_MSG}${CL}" >&2
spin_i=$(((spin_i + 1) % ${#frames[@]}))
sleep "$interval"
done
} &
SPINNER_PID=$!
disown "$SPINNER_PID"
}
stop_spinner() {
if [[ ${SPINNER_PID+v} && -n "$SPINNER_PID" ]] && kill -0 "$SPINNER_PID" 2>/dev/null; then
kill "$SPINNER_PID" 2>/dev/null
sleep 0.1
kill -0 "$SPINNER_PID" 2>/dev/null && kill -9 "$SPINNER_PID" 2>/dev/null
wait "$SPINNER_PID" 2>/dev/null || true
fi
SPINNER_ACTIVE=0
unset SPINNER_PID
}
spinner_guard() {
if [[ "$SPINNER_ACTIVE" -eq 1 ]] && [[ -n "$SPINNER_PID" ]]; then
kill "$SPINNER_PID" 2>/dev/null
wait "$SPINNER_PID" 2>/dev/null || true
SPINNER_ACTIVE=0
unset SPINNER_PID
fi
}
log_message() {
local level="$1"
local message="$2"
local timestamp
local logdate
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
logdate=$(date '+%Y-%m-%d')
LOGDIR="/usr/local/community-scripts/logs"
mkdir -p "$LOGDIR"
LOGFILE="${LOGDIR}/${logdate}_${NSAPP}.log"
echo "$timestamp - $level: $message" >>"$LOGFILE"
}
msg_info() {
local msg="$1"
if [ "${SPINNER_ACTIVE:-0}" -eq 1 ]; then
return
fi
SPINNER_ACTIVE=1
start_spinner "$msg"
}
msg_ok() {
if [ -n "${SPINNER_PID:-}" ] && ps -p "$SPINNER_PID" >/dev/null 2>&1; then
kill "$SPINNER_PID" >/dev/null 2>&1
wait "$SPINNER_PID" 2>/dev/null || true
fi
local msg="$1"
printf "\r\e[2K${CM}${GN}%b${CL}\n" "$msg" >&2
unset SPINNER_PID
SPINNER_ACTIVE=0
log_message "OK" "$msg"
}
msg_error() {
if [ -n "${SPINNER_PID:-}" ] && ps -p "$SPINNER_PID" >/dev/null 2>&1; then
kill "$SPINNER_PID" >/dev/null 2>&1
wait "$SPINNER_PID" 2>/dev/null || true
fi
local msg="$1"
printf "\r\e[2K${CROSS}${RD}%b${CL}\n" "$msg" >&2
unset SPINNER_PID
SPINNER_ACTIVE=0
log_message "ERROR" "$msg"
}
shell_check() {
if [[ "$(basename "$SHELL")" != "bash" ]]; then
clear
msg_error "Your default shell is currently not set to Bash. To use these scripts, please switch to the Bash shell."
echo -e "\nExiting..."
sleep 2
exit
fi
}
root_check() {
if [[ "$(id -u)" -ne 0 || $(ps -o comm= -p $PPID) == "sudo" ]]; then
clear
msg_error "Please run this script as root."
echo -e "\nExiting..."
sleep 2
exit
fi
}
pve_check() {
if ! pveversion | grep -Eq "pve-manager/8\.[1-9](\.[0-9]+)*"; then
msg_error "${CROSS}${RD}This version of Proxmox Virtual Environment is not supported"
echo -e "Requires Proxmox Virtual Environment Version 8.1 or later."
echo -e "Exiting..."
sleep 2
exit
fi
}
arch_check() {
if [ "$(dpkg --print-architecture)" != "amd64" ]; then
echo -e "\n ${INFO}${YWB}This script will not work with PiMox! \n"
echo -e "\n ${YWB}Visit https://github.com/asylumexp/Proxmox for ARM64 support. \n"
echo -e "Exiting..."
sleep 2
exit
fi
}
ssh_check() {
if command -v pveversion >/dev/null 2>&1; then
if [ -n "${SSH_CLIENT:+x}" ]; then
if whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "SSH DETECTED" --yesno "It's suggested to use the Proxmox shell instead of SSH, since SSH can create issues while gathering variables. Would you like to proceed with using SSH?" 10 62; then
echo "you've been warned"
else
clear
exit
fi
fi
fi
}
exit-script() {
clear
echo -e "\n${CROSS}${RD}User exited script${CL}\n"
exit
}
set_std_mode() {
if [ "$VERB" = "yes" ]; then
STD=""
else
STD="silent"
fi
}
silent() {
if [ "$VERB" = "no" ]; then
"$@" >>"$LOGFILE" 2>&1
else
"$@" 2>&1 | tee -a "$LOGFILE"
fi
}

View File

@ -1,380 +0,0 @@
install_node_and_modules() {
local NODE_VERSION="${NODE_VERSION:-22}"
local NODE_MODULE="${NODE_MODULE:-}"
local CURRENT_NODE_VERSION=""
local NEED_NODE_INSTALL=false
# Check if Node.js is already installed
if command -v node >/dev/null; then
CURRENT_NODE_VERSION="$(node -v | grep -oP '^v\K[0-9]+')"
if [[ "$CURRENT_NODE_VERSION" != "$NODE_VERSION" ]]; then
msg_info "Node.js version $CURRENT_NODE_VERSION found, replacing with $NODE_VERSION"
NEED_NODE_INSTALL=true
else
msg_ok "Node.js $NODE_VERSION already installed"
fi
else
msg_info "Node.js not found, installing version $NODE_VERSION"
NEED_NODE_INSTALL=true
fi
# Install Node.js if required
if [[ "$NEED_NODE_INSTALL" == true ]]; then
$STD apt-get purge -y nodejs
rm -f /etc/apt/sources.list.d/nodesource.list /etc/apt/keyrings/nodesource.gpg
mkdir -p /etc/apt/keyrings
if ! curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | \
gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg; then
msg_error "Failed to download or import NodeSource GPG key"
exit 1
fi
echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_${NODE_VERSION}.x nodistro main" \
> /etc/apt/sources.list.d/nodesource.list
if ! apt-get update >/dev/null 2>&1; then
msg_error "Failed to update APT repositories after adding NodeSource"
exit 1
fi
if ! apt-get install -y nodejs >/dev/null 2>&1; then
msg_error "Failed to install Node.js ${NODE_VERSION} from NodeSource"
exit 1
fi
msg_ok "Installed Node.js ${NODE_VERSION}"
fi
export NODE_OPTIONS="--max_old_space_size=4096"
# Install global Node modules
if [[ -n "$NODE_MODULE" ]]; then
IFS=',' read -ra MODULES <<< "$NODE_MODULE"
for mod in "${MODULES[@]}"; do
local MODULE_NAME MODULE_REQ_VERSION MODULE_INSTALLED_VERSION
if [[ "$mod" == *"@"* ]]; then
MODULE_NAME="${mod%@*}"
MODULE_REQ_VERSION="${mod#*@}"
else
MODULE_NAME="$mod"
MODULE_REQ_VERSION="latest"
fi
# Check if the module is already installed
if npm list -g --depth=0 "$MODULE_NAME" >/dev/null 2>&1; then
MODULE_INSTALLED_VERSION="$(npm list -g --depth=0 "$MODULE_NAME" | grep "$MODULE_NAME@" | awk -F@ '{print $2}' | tr -d '[:space:]')"
if [[ "$MODULE_REQ_VERSION" != "latest" && "$MODULE_REQ_VERSION" != "$MODULE_INSTALLED_VERSION" ]]; then
msg_info "Updating $MODULE_NAME from v$MODULE_INSTALLED_VERSION to v$MODULE_REQ_VERSION"
if ! $STD npm install -g "${MODULE_NAME}@${MODULE_REQ_VERSION}"; then
msg_error "Failed to update $MODULE_NAME to version $MODULE_REQ_VERSION"
exit 1
fi
elif [[ "$MODULE_REQ_VERSION" == "latest" ]]; then
msg_info "Updating $MODULE_NAME to latest version"
if ! $STD npm install -g "${MODULE_NAME}@latest"; then
msg_error "Failed to update $MODULE_NAME to latest version"
exit 1
fi
else
msg_ok "$MODULE_NAME@$MODULE_INSTALLED_VERSION already installed"
fi
else
msg_info "Installing $MODULE_NAME@$MODULE_REQ_VERSION"
if ! $STD npm install -g "${MODULE_NAME}@${MODULE_REQ_VERSION}"; then
msg_error "Failed to install $MODULE_NAME@$MODULE_REQ_VERSION"
exit 1
fi
fi
done
msg_ok "All requested Node modules have been processed"
fi
}
function install_postgresql() {
local PG_VERSION="${PG_VERSION:-15}"
local CURRENT_PG_VERSION=""
local DISTRO
local NEED_PG_INSTALL=false
DISTRO="$(awk -F'=' '/^VERSION_CODENAME=/{ print $NF }' /etc/os-release)"
if command -v psql >/dev/null; then
CURRENT_PG_VERSION="$(psql -V | grep -oP '\s\K[0-9]+(?=\.)')"
if [[ "$CURRENT_PG_VERSION" != "$PG_VERSION" ]]; then
msg_info "PostgreSQL Version $CURRENT_PG_VERSION found, replacing with $PG_VERSION"
NEED_PG_INSTALL=true
fi
else
msg_info "PostgreSQL not found, installing version $PG_VERSION"
NEED_PG_INSTALL=true
fi
if [[ "$NEED_PG_INSTALL" == true ]]; then
msg_info "Stopping PostgreSQL if running"
systemctl stop postgresql >/dev/null 2>&1 || true
msg_info "Removing conflicting PostgreSQL packages"
$STD apt-get purge -y "postgresql*"
rm -f /etc/apt/sources.list.d/pgdg.list /etc/apt/trusted.gpg.d/postgresql.gpg
msg_info "Setting up PostgreSQL Repository"
curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc | \
gpg --dearmor -o /etc/apt/trusted.gpg.d/postgresql.gpg
echo "deb https://apt.postgresql.org/pub/repos/apt ${DISTRO}-pgdg main" \
> /etc/apt/sources.list.d/pgdg.list
$STD apt-get update
$STD apt-get install -y "postgresql-${PG_VERSION}"
msg_ok "Installed PostgreSQL ${PG_VERSION}"
fi
}
function install_mariadb() {
local MARIADB_VERSION="${MARIADB_VERSION:-10.11}"
local CURRENT_VERSION=""
local NEED_INSTALL=false
if command -v mariadb >/dev/null; then
CURRENT_VERSION="$(mariadb --version | grep -oP 'Ver\s+\K[0-9]+\.[0-9]+')"
if [[ "$CURRENT_VERSION" != "$MARIADB_VERSION" ]]; then
msg_info "MariaDB $CURRENT_VERSION found, replacing with $MARIADB_VERSION"
NEED_INSTALL=true
else
msg_ok "MariaDB $MARIADB_VERSION already installed"
fi
else
msg_info "MariaDB not found, installing version $MARIADB_VERSION"
NEED_INSTALL=true
fi
if [[ "$NEED_INSTALL" == true ]]; then
msg_info "Removing conflicting MariaDB packages"
$STD systemctl stop mariadb >/dev/null 2>&1 || true
$STD apt-get purge -y 'mariadb*'
rm -f /etc/apt/sources.list.d/mariadb.list /etc/apt/trusted.gpg.d/mariadb.gpg
msg_info "Setting up MariaDB Repository"
curl -fsSL "https://mariadb.org/mariadb_release_signing_key.asc" | gpg --dearmor -o /etc/apt/trusted.gpg.d/mariadb.gpg
DISTRO_CODENAME="$(awk -F= '/VERSION_CODENAME/ { print $2 }' /etc/os-release)"
echo "deb [signed-by=/etc/apt/trusted.gpg.d/mariadb.gpg] http://mirror.mariadb.org/repo/${MARIADB_VERSION}/debian ${DISTRO_CODENAME} main" \
> /etc/apt/sources.list.d/mariadb.list
$STD apt-get update
$STD apt-get install -y mariadb-server mariadb-client
msg_ok "Installed MariaDB $MARIADB_VERSION"
fi
}
function install_mysql() {
local MYSQL_VERSION="${MYSQL_VERSION:-8.0}"
local CURRENT_VERSION=""
local NEED_INSTALL=false
if command -v mysql >/dev/null; then
CURRENT_VERSION="$(mysql --version | grep -oP 'Distrib\s+\K[0-9]+\.[0-9]+')"
if [[ "$CURRENT_VERSION" != "$MYSQL_VERSION" ]]; then
msg_info "MySQL $CURRENT_VERSION found, replacing with $MYSQL_VERSION"
NEED_INSTALL=true
else
msg_ok "MySQL $MYSQL_VERSION already installed"
fi
else
msg_info "MySQL not found, installing version $MYSQL_VERSION"
NEED_INSTALL=true
fi
if [[ "$NEED_INSTALL" == true ]]; then
msg_info "Removing conflicting MySQL packages"
$STD systemctl stop mysql >/dev/null 2>&1 || true
$STD apt-get purge -y 'mysql*'
rm -f /etc/apt/sources.list.d/mysql.list /etc/apt/trusted.gpg.d/mysql.gpg
msg_info "Setting up MySQL APT Repository"
DISTRO_CODENAME="$(awk -F= '/VERSION_CODENAME/ { print $2 }' /etc/os-release)"
curl -fsSL https://repo.mysql.com/RPM-GPG-KEY-mysql-2022 | gpg --dearmor -o /etc/apt/trusted.gpg.d/mysql.gpg
echo "deb [signed-by=/etc/apt/trusted.gpg.d/mysql.gpg] https://repo.mysql.com/apt/debian/ ${DISTRO_CODENAME} mysql-${MYSQL_VERSION}" \
> /etc/apt/sources.list.d/mysql.list
$STD apt-get update
$STD apt-get install -y mysql-server
msg_ok "Installed MySQL $MYSQL_VERSION"
fi
}
fetch_and_deploy_gh_release() {
local repo="$1"
local app=$(echo ${APPLICATION,,} | tr -d ' ')
local api_url="https://api.github.com/repos/$repo/releases/latest"
local header=()
local attempt=0
local max_attempts=3
local api_response tag http_code
local current_version=""
local curl_timeout="--connect-timeout 10 --max-time 30"
# Check if the app directory exists and if there's a version file
if [[ -f "/opt/${app}_version.txt" ]]; then
current_version=$(cat "/opt/${app}_version.txt")
$STD msg_info "Current version: $current_version"
fi
# ensure that jq is installed
if ! command -v jq &>/dev/null; then
$STD msg_info "Installing jq..."
apt-get update -qq &>/dev/null
apt-get install -y jq &>/dev/null || {
msg_error "Failed to install jq"
return 1
}
fi
[[ -n "${GITHUB_TOKEN:-}" ]] && header=(-H "Authorization: token $GITHUB_TOKEN")
until [[ $attempt -ge $max_attempts ]]; do
((attempt++)) || true
$STD msg_info "[$attempt/$max_attempts] Fetching GitHub release for $repo...\n"
api_response=$(curl $curl_timeout -fsSL -w "%{http_code}" -o /tmp/gh_resp.json "${header[@]}" "$api_url")
http_code="${api_response:(-3)}"
if [[ "$http_code" == "404" ]]; then
msg_error "Repository $repo has no Release candidate (404)"
return 1
fi
if [[ "$http_code" != "200" ]]; then
$STD msg_info "Request failed with HTTP $http_code, retrying...\n"
sleep $((attempt * 2))
continue
fi
api_response=$(</tmp/gh_resp.json)
if echo "$api_response" | grep -q "API rate limit exceeded"; then
msg_error "GitHub API rate limit exceeded."
return 1
fi
if echo "$api_response" | jq -e '.message == "Not Found"' &>/dev/null; then
msg_error "Repository not found: $repo"
return 1
fi
tag=$(echo "$api_response" | jq -r '.tag_name // .name // empty')
[[ "$tag" =~ ^v[0-9] ]] && tag="${tag:1}"
if [[ -z "$tag" ]]; then
$STD msg_info "Empty tag received, retrying...\n"
sleep $((attempt * 2))
continue
fi
$STD msg_ok "Found release: $tag for $repo"
break
done
if [[ -z "$tag" ]]; then
msg_error "Failed to fetch release for $repo after $max_attempts attempts."
exit 1
fi
# Version comparison (if we already have this version, skip)
if [[ "$current_version" == "$tag" ]]; then
$STD msg_info "Already running the latest version ($tag). Skipping update."
return 0
fi
local version="$tag"
local base_url="https://github.com/$repo/releases/download/v$tag"
local tmpdir
tmpdir=$(mktemp -d) || return 1
# Extract list of assets from the Release API
local assets urls
assets=$(echo "$api_response" | jq -r '.assets[].browser_download_url') || true
# Detect current architecture
local arch
if command -v dpkg &>/dev/null; then
arch=$(dpkg --print-architecture)
elif command -v uname &>/dev/null; then
case "$(uname -m)" in
x86_64) arch="amd64" ;;
aarch64) arch="arm64" ;;
armv7l) arch="armv7" ;;
armv6l) arch="armv6" ;;
*) arch="unknown" ;;
esac
else
arch="unknown"
fi
$STD msg_info "Detected system architecture: $arch"
# Try to find a matching asset for our architecture
local url=""
for u in $assets; do
if [[ "$u" =~ $arch.*\.tar\.gz$ ]]; then
url="$u"
$STD msg_info "Found matching architecture asset: $url"
break
fi
done
# Fallback to other architectures if our specific one isn't found
if [[ -z "$url" ]]; then
for u in $assets; do
if [[ "$u" =~ (x86_64|amd64|arm64|armv7|armv6).*\.tar\.gz$ ]]; then
url="$u"
$STD msg_info "Architecture-specific asset not found, using: $url"
break
fi
done
fi
# Fallback to any tar.gz
if [[ -z "$url" ]]; then
for u in $assets; do
if [[ "$u" =~ \.tar\.gz$ ]]; then
url="$u"
$STD msg_info "Using generic tarball: $url"
break
fi
done
fi
# Final fallback to GitHub source tarball
if [[ -z "$url" ]]; then
url="https://github.com/$repo/archive/refs/tags/$version.tar.gz"
$STD msg_info "Trying GitHub source tarball fallback: $url"
fi
local filename="${url##*/}"
$STD msg_info "Downloading $url"
if ! curl $curl_timeout -fsSL -o "$tmpdir/$filename" "$url"; then
msg_error "Failed to download release asset from $url"
rm -rf "$tmpdir"
return 1
fi
mkdir -p "/opt/$app"
tar -xzf "$tmpdir/$filename" -C "$tmpdir"
local content_root
content_root=$(find "$tmpdir" -mindepth 1 -maxdepth 1 -type d)
if [[ $(echo "$content_root" | wc -l) -eq 1 ]]; then
cp -r "$content_root"/* "/opt/$app/"
else
cp -r "$tmpdir"/* "/opt/$app/"
fi
echo "$version" >"/opt/${app}_version.txt"
$STD msg_ok "Deployed $app v$version to /opt/$app"
rm -rf "$tmpdir"
}

View File

@ -1,155 +0,0 @@
# Copyright (c) 2021-2025 community-scripts ORG
# Author: michelroegl-brunner
# License: MIT | https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/LICENSE
colors() {
YW=$(echo "\033[33m")
BL=$(echo "\033[36m")
RD=$(echo "\033[01;31m")
BGN=$(echo "\033[4;92m")
GN=$(echo "\033[1;92m")
DGN=$(echo "\033[32m")
CL=$(echo "\033[m")
CL=$(echo "\033[m")
BOLD=$(echo "\033[1m")
BFR="\\r\\033[K"
HOLD=" "
TAB=" "
CM="${TAB}✔️${TAB}${CL}"
CROSS="${TAB}✖️${TAB}${CL}"
INFO="${TAB}💡${TAB}${CL}"
OS="${TAB}🖥️${TAB}${CL}"
CONTAINERTYPE="${TAB}📦${TAB}${CL}"
DISKSIZE="${TAB}💾${TAB}${CL}"
CPUCORE="${TAB}🧠${TAB}${CL}"
RAMSIZE="${TAB}🛠️${TAB}${CL}"
CONTAINERID="${TAB}🆔${TAB}${CL}"
HOSTNAME="${TAB}🏠${TAB}${CL}"
BRIDGE="${TAB}🌉${TAB}${CL}"
GATEWAY="${TAB}🌐${TAB}${CL}"
DEFAULT="${TAB}⚙️${TAB}${CL}"
MACADDRESS="${TAB}🔗${TAB}${CL}"
VLANTAG="${TAB}🏷️${TAB}${CL}"
CREATING="${TAB}🚀${TAB}${CL}"
ADVANCED="${TAB}🧩${TAB}${CL}"
}
start_spinner() {
local msg="$1"
local frames=('⠋' '⠙' '⠹' '⠸' '⠼' '⠴' '⠦' '⠧' '⠇' '⠏')
local spin_i=0
local interval=0.1
local term_width=$(tput cols)
{
while [ "${SPINNER_ACTIVE:-1}" -eq 1 ]; do
printf "\r\e[2K${frames[spin_i]} ${YW}%b${CL}" "$msg" >&2
spin_i=$(((spin_i + 1) % ${#frames[@]}))
sleep "$interval"
done
} &
SPINNER_PID=$!
}
msg_info() {
local msg="$1"
if [ "${SPINNER_ACTIVE:-0}" -eq 1 ]; then
return
fi
SPINNER_ACTIVE=1
start_spinner "$msg"
}
msg_ok() {
if [ -n "${SPINNER_PID:-}" ] && ps -p "$SPINNER_PID" >/dev/null 2>&1; then
kill "$SPINNER_PID" >/dev/null 2>&1
wait "$SPINNER_PID" 2>/dev/null || true
fi
local msg="$1"
printf "\r\e[2K${CM}${GN}%b${CL}\n" "$msg" >&2
unset SPINNER_PID
SPINNER_ACTIVE=0
log_message "OK" "$msg"
}
msg_error() {
if [ -n "${SPINNER_PID:-}" ] && ps -p "$SPINNER_PID" >/dev/null 2>&1; then
kill "$SPINNER_PID" >/dev/null 2>&1
wait "$SPINNER_PID" 2>/dev/null || true
fi
local msg="$1"
printf "\r\e[2K${CROSS}${RD}%b${CL}\n" "$msg" >&2
unset SPINNER_PID
SPINNER_ACTIVE=0
log_message "ERROR" "$msg"
}
root_check() {
if [[ "$(id -u)" -ne 0 || $(ps -o comm= -p $PPID) == "sudo" ]]; then
clear
msg_error "Please run this script as root."
echo -e "\nExiting..."
sleep 2
exit
fi
}
pve_check() {
if ! pveversion | grep -Eq "pve-manager/8\.[1-3](\.[0-9]+)*"; then
msg_error "${CROSS}${RD}This version of Proxmox Virtual Environment is not supported"
echo -e "Requires Proxmox Virtual Environment Version 8.1 or later."
echo -e "Exiting..."
sleep 2
exit
fi
}
arch_check() {
if [ "$(dpkg --print-architecture)" != "amd64" ]; then
echo -e "\n ${INFO}${YWB}This script will not work with PiMox! \n"
echo -e "\n ${YWB}Visit https://github.com/asylumexp/Proxmox for ARM64 support. \n"
echo -e "Exiting..."
sleep 2
exit
fi
}
ssh_check() {
if command -v pveversion >/dev/null 2>&1; then
if [ -n "${SSH_CLIENT:+x}" ]; then
if whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "SSH DETECTED" --yesno "It's suggested to use the Proxmox shell instead of SSH, since SSH can create issues while gathering variables. Would you like to proceed with using SSH?" 10 62; then
echo "you've been warned"
else
clear
exit
fi
fi
fi
}
start_script() {
if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "SETTINGS" --yesno "Use Default Settings?" --no-button Advanced 10 58); then
header_info
echo -e "${DEFAULT}${BOLD}${BL}Using Default Settings${CL}"
default_settings
else
header_info
echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Settings${CL}"
advanced_settings
fi
}
exit-script() {
clear
echo -e "\n${CROSS}${RD}User exited script${CL}\n"
exit
}

View File

@ -1,205 +1,202 @@
# Copyright (c) 2021-2025 tteck # Copyright (c) 2021-2025 tteck
# Author: tteck (tteckster) # Author: tteck (tteckster)
# Co-Author: MickLesk # Co-Author: MickLesk
# Co-Author: michelroegl-brunner
# License: MIT # License: MIT
# https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE # https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# This function sets color variables for formatting output in the terminal
color() { color() {
# Colors # Colors
YW=$(echo "\033[33m") YW=$(echo "\033[33m")
YWB=$(echo "\033[93m") YWB=$(echo "\033[93m")
BL=$(echo "\033[36m") BL=$(echo "\033[36m")
RD=$(echo "\033[01;31m") RD=$(echo "\033[01;31m")
GN=$(echo "\033[1;92m") GN=$(echo "\033[1;92m")
# Formatting # Formatting
CL=$(echo "\033[m") CL=$(echo "\033[m")
BFR="\\r\\033[K" BFR="\\r\\033[K"
BOLD=$(echo "\033[1m") BOLD=$(echo "\033[1m")
HOLD=" " HOLD=" "
TAB=" " TAB=" "
# System # System
RETRY_NUM=10 RETRY_NUM=10
RETRY_EVERY=3 RETRY_EVERY=3
# Icons # Icons
CM="${TAB}✔️${TAB}${CL}" CM="${TAB}✔️${TAB}${CL}"
CROSS="${TAB}✖️${TAB}${CL}" CROSS="${TAB}✖️${TAB}${CL}"
INFO="${TAB}💡${TAB}${CL}" INFO="${TAB}💡${TAB}${CL}"
NETWORK="${TAB}📡${TAB}${CL}" NETWORK="${TAB}📡${TAB}${CL}"
HOSTNAME="${TAB}🏠${TAB}${CL}" OS="${TAB}🖥️${TAB}${CL}"
OSVERSION="${TAB}🌟${TAB}${CL}"
HOSTNAME="${TAB}🏠${TAB}${CL}"
GATEWAY="${TAB}🌐${TAB}${CL}"
DEFAULT="${TAB}⚙️${TAB}${CL}"
} }
# Function to set STD mode based on verbosity # Function to set STD mode based on verbosity
set_std_mode() { set_std_mode() {
if [ "$VERBOSE" = "yes" ]; then if [ "$VERBOSE" = "yes" ]; then
STD="" STD=""
else else
STD="silent" STD="silent"
fi fi
} }
# Silent execution function # Silent execution function
silent() { silent() {
"$@" >/dev/null 2>&1 "$@" >/dev/null 2>&1
} }
# This function enables IPv6 if it's not disabled and sets verbose mode # This function enables IPv6 if it's not disabled and sets verbose mode
verb_ip6() { verb_ip6() {
set_std_mode # Set STD mode based on VERBOSE set_std_mode # Set STD mode based on VERBOSE
if [ "$DISABLEIPV6" == "yes" ]; then if [ "$DISABLEIPV6" == "yes" ]; then
echo "net.ipv6.conf.all.disable_ipv6 = 1" >>/etc/sysctl.conf echo "net.ipv6.conf.all.disable_ipv6 = 1" >>/etc/sysctl.conf
$STD sysctl -p $STD sysctl -p
fi fi
} }
# This function sets error handling options and defines the error_handler function to handle errors # This function sets error handling options and defines the error_handler function to handle errors
catch_errors() { catch_errors() {
set -Eeuo pipefail set -Eeuo pipefail
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
} }
# This function handles errors # This function handles errors
error_handler() { error_handler() {
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/api.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func)
if [ -n "$SPINNER_PID" ] && ps -p $SPINNER_PID >/dev/null; then kill $SPINNER_PID >/dev/null; fi if [ -n "$SPINNER_PID" ] && ps -p "$SPINNER_PID" >/dev/null; then kill "$SPINNER_PID" >/dev/null; fi
printf "\e[?25h" printf "\e[?25h"
local exit_code="$?" local exit_code="$?"
local line_number="$1" local line_number="$1"
local command="$2" local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}" local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
echo -e "\n$error_message" echo -e "\n$error_message"
if [[ "$line_number" -eq 50 ]]; then if [[ "$line_number" -eq 50 ]]; then
echo -e "The silent function has suppressed the error, run the script with verbose mode enabled, which will provide more detailed output.\n" echo -e "The silent function has suppressed the error, run the script with verbose mode enabled, which will provide more detailed output.\n"
post_update_to_api "failed" "No error message, script ran in silent mode" post_update_to_api "failed" "No error message, script ran in silent mode"
else else
post_update_to_api "failed" "${command}" post_update_to_api "failed" "${command}"
fi fi
} }
# This function displays a spinner. # This function displays a spinner.
spinner() { spinner() {
local frames=('⠋' '⠙' '⠹' '⠸' '⠼' '⠴' '⠦' '⠧' '⠇' '⠏') local frames=('⠋' '⠙' '⠹' '⠸' '⠼' '⠴' '⠦' '⠧' '⠇' '⠏')
local spin_i=0 local spin_i=0
local interval=0.1 local interval=0.1
printf "\e[?25l" printf "\e[?25l"
local color="${YWB}" local color="${YWB}"
while true; do while true; do
printf "\r ${color}%s${CL}" "${frames[spin_i]}" printf "\r ${color}%s${CL}" "${frames[spin_i]}"
spin_i=$(((spin_i + 1) % ${#frames[@]})) spin_i=$(((spin_i + 1) % ${#frames[@]}))
sleep "$interval" sleep "$interval"
done done
} }
# This function displays an informational message with a yellow color. # This function displays an informational message with a yellow color.
msg_info() { msg_info() {
local msg="$1" local msg="$1"
echo -ne "${TAB}${YW}${HOLD}${msg}${HOLD}" echo -ne "${TAB}${YW}${HOLD}${msg}${HOLD}"
spinner & spinner &
SPINNER_PID=$! SPINNER_PID=$!
} }
# This function displays a success message with a green color. # This function displays a success message with a green color.
msg_ok() { msg_ok() {
if [ -n "$SPINNER_PID" ] && ps -p $SPINNER_PID >/dev/null; then kill $SPINNER_PID >/dev/null; fi if [ -n "$SPINNER_PID" ] && ps -p "$SPINNER_PID" >/dev/null; then kill "$SPINNER_PID" >/dev/null; fi
printf "\e[?25h" printf "\e[?25h"
local msg="$1" local msg="$1"
echo -e "${BFR}${CM}${GN}${msg}${CL}" echo -e "${BFR}${CM}${GN}${msg}${CL}"
} }
# This function displays a error message with a red color. # This function displays a error message with a red color.
msg_error() { msg_error() {
if [ -n "$SPINNER_PID" ] && ps -p $SPINNER_PID >/dev/null; then kill $SPINNER_PID >/dev/null; fi if [ -n "$SPINNER_PID" ] && ps -p "$SPINNER_PID" >/dev/null; then kill "$SPINNER_PID" >/dev/null; fi
printf "\e[?25h" printf "\e[?25h"
local msg="$1" local msg="$1"
echo -e "${BFR}${CROSS}${RD}${msg}${CL}" echo -e "${BFR}${CROSS}${RD}${msg}${CL}"
} }
# This function sets up the Container OS by generating the locale, setting the timezone, and checking the network connection # This function sets up the Container OS by generating the locale, setting the timezone, and checking the network connection
setting_up_container() { setting_up_container() {
msg_info "Setting up Container OS" msg_info "Setting up Container OS"
sed -i "/$LANG/ s/\(^# \)//" /etc/locale.gen sed -i "/$LANG/ s/\(^# \)//" /etc/locale.gen
locale_line=$(grep -v '^#' /etc/locale.gen | grep -E '^[a-zA-Z]' | awk '{print $1}' | head -n 1) locale_line=$(grep -v '^#' /etc/locale.gen | grep -E '^[a-zA-Z]' | awk '{print $1}' | head -n 1)
echo "LANG=${locale_line}" >/etc/default/locale echo "LANG=${locale_line}" >/etc/default/locale
locale-gen >/dev/null locale-gen >/dev/null
export LANG=${locale_line} export LANG=${locale_line}
echo $tz >/etc/timezone echo $tz >/etc/timezone
ln -sf /usr/share/zoneinfo/$tz /etc/localtime ln -sf /usr/share/zoneinfo/$tz /etc/localtime
for ((i = RETRY_NUM; i > 0; i--)); do for ((i = RETRY_NUM; i > 0; i--)); do
if [ "$(hostname -I)" != "" ]; then if [ "$(hostname -I)" != "" ]; then
break break
fi
echo 1>&2 -en "${CROSS}${RD} No Network! "
sleep $RETRY_EVERY
done
if [ "$(hostname -I)" = "" ]; then
echo 1>&2 -e "\n${CROSS}${RD} No Network After $RETRY_NUM Tries${CL}"
echo -e "${NETWORK}Check Network Settings"
exit 1
fi fi
echo 1>&2 -en "${CROSS}${RD} No Network! " rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED
sleep $RETRY_EVERY systemctl disable -q --now systemd-networkd-wait-online.service
done msg_ok "Set up Container OS"
if [ "$(hostname -I)" = "" ]; then msg_ok "Network Connected: ${BL}$(hostname -I)"
echo 1>&2 -e "\n${CROSS}${RD} No Network After $RETRY_NUM Tries${CL}"
echo -e "${NETWORK}Check Network Settings"
exit 1
fi
rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED
systemctl disable -q --now systemd-networkd-wait-online.service
msg_ok "Set up Container OS"
msg_ok "Network Connected: ${BL}$(hostname -I)"
}
exit_script() {
clear
echo -e "\n${CROSS}${RD}Script aborted.${CL}\n"
exit
} }
# This function checks the network connection by pinging a known IP address and prompts the user to continue if the internet is not connected # This function checks the network connection by pinging a known IP address and prompts the user to continue if the internet is not connected
network_check() { network_check() {
set +e set +e
trap - ERR trap - ERR
ipv4_connected=false ipv4_connected=false
ipv6_connected=false ipv6_connected=false
sleep 1 sleep 1
# Check IPv4 connectivity to Google, Cloudflare & Quad9 DNS servers. # Check IPv4 connectivity to Google, Cloudflare & Quad9 DNS servers.
if ping -c 1 -W 1 1.1.1.1 &>/dev/null || ping -c 1 -W 1 8.8.8.8 &>/dev/null || ping -c 1 -W 1 9.9.9.9 &>/dev/null; then if ping -c 1 -W 1 1.1.1.1 &>/dev/null || ping -c 1 -W 1 8.8.8.8 &>/dev/null || ping -c 1 -W 1 9.9.9.9 &>/dev/null; then
msg_ok "IPv4 Internet Connected" msg_ok "IPv4 Internet Connected"
ipv4_connected=true ipv4_connected=true
else
msg_error "IPv4 Internet Not Connected"
fi
# Check IPv6 connectivity to Google, Cloudflare & Quad9 DNS servers.
if ping6 -c 1 -W 1 2606:4700:4700::1111 &>/dev/null || ping6 -c 1 -W 1 2001:4860:4860::8888 &>/dev/null || ping6 -c 1 -W 1 2620:fe::fe &>/dev/null; then
msg_ok "IPv6 Internet Connected"
ipv6_connected=true
else
msg_error "IPv6 Internet Not Connected"
fi
# If both IPv4 and IPv6 checks fail, prompt the user
if [[ $ipv4_connected == false && $ipv6_connected == false ]]; then
read -r -p "No Internet detected,would you like to continue anyway? <y/N> " prompt
if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then
echo -e "${INFO}${RD}Expect Issues Without Internet${CL}"
else else
echo -e "${NETWORK}Check Network Settings" msg_error "IPv4 Internet Not Connected"
exit 1
fi fi
fi
RESOLVEDIP=$(getent hosts github.com | awk '{ print $1 }') # Check IPv6 connectivity to Google, Cloudflare & Quad9 DNS servers.
if [[ -z "$RESOLVEDIP" ]]; then msg_error "DNS Lookup Failure"; else msg_ok "DNS Resolved github.com to ${BL}$RESOLVEDIP${CL}"; fi if ping6 -c 1 -W 1 2606:4700:4700::1111 &>/dev/null || ping6 -c 1 -W 1 2001:4860:4860::8888 &>/dev/null || ping6 -c 1 -W 1 2620:fe::fe &>/dev/null; then
set -e msg_ok "IPv6 Internet Connected"
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR ipv6_connected=true
else
msg_error "IPv6 Internet Not Connected"
fi
# If both IPv4 and IPv6 checks fail, prompt the user
if [[ $ipv4_connected == false && $ipv6_connected == false ]]; then
read -r -p "No Internet detected,would you like to continue anyway? <y/N> " prompt
if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then
echo -e "${INFO}${RD}Expect Issues Without Internet${CL}"
else
echo -e "${NETWORK}Check Network Settings"
exit 1
fi
fi
RESOLVEDIP=$(getent hosts github.com | awk '{ print $1 }')
if [[ -z "$RESOLVEDIP" ]]; then msg_error "DNS Lookup Failure"; else msg_ok "DNS Resolved github.com to ${BL}$RESOLVEDIP${CL}"; fi
set -e
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
} }
# This function updates the Container OS by running apt-get update and upgrade # This function updates the Container OS by running apt-get update and upgrade
update_os() { update_os() {
msg_info "Updating Container OS" msg_info "Updating Container OS"
if [[ "$CACHER" == "yes" ]]; then if [[ "$CACHER" == "yes" ]]; then
echo "Acquire::http::Proxy-Auto-Detect \"/usr/local/bin/apt-proxy-detect.sh\";" >/etc/apt/apt.conf.d/00aptproxy echo "Acquire::http::Proxy-Auto-Detect \"/usr/local/bin/apt-proxy-detect.sh\";" >/etc/apt/apt.conf.d/00aptproxy
cat <<EOF >/usr/local/bin/apt-proxy-detect.sh cat <<EOF >/usr/local/bin/apt-proxy-detect.sh
#!/bin/bash #!/bin/bash
if nc -w1 -z "${CACHER_IP}" 3142; then if nc -w1 -z "${CACHER_IP}" 3142; then
echo -n "http://${CACHER_IP}:3142" echo -n "http://${CACHER_IP}:3142"
@ -207,72 +204,72 @@ else
echo -n "DIRECT" echo -n "DIRECT"
fi fi
EOF EOF
chmod +x /usr/local/bin/apt-proxy-detect.sh chmod +x /usr/local/bin/apt-proxy-detect.sh
fi fi
$STD apt-get update $STD apt-get update
$STD apt-get -o Dpkg::Options::="--force-confold" -y dist-upgrade $STD apt-get -o Dpkg::Options::="--force-confold" -y dist-upgrade
rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED
msg_ok "Updated Container OS" msg_ok "Updated Container OS"
msg_info "Installing core dependencies"
$STD apt-get update
$STD apt-get install -y sudo curl mc
msg_ok "Core dependencies installed"
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/tools.func)
msg_info "Installing core dependencies"
$STD apt-get update
$STD apt-get install -y sudo curl mc
msg_ok "Core dependencies installed"
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/helper.func)
} }
# This function modifies the message of the day (motd) and SSH settings # This function modifies the message of the day (motd) and SSH settings
motd_ssh() { motd_ssh() {
grep -qxF "export TERM='xterm-256color'" /root/.bashrc || echo "export TERM='xterm-256color'" >>/root/.bashrc grep -qxF "export TERM='xterm-256color'" /root/.bashrc || echo "export TERM='xterm-256color'" >>/root/.bashrc
if [ -f "/etc/os-release" ]; then if [ -f "/etc/os-release" ]; then
OS_NAME=$(grep ^NAME /etc/os-release | cut -d= -f2 | tr -d '"') OS_NAME=$(grep ^NAME /etc/os-release | cut -d= -f2 | tr -d '"')
OS_VERSION=$(grep ^VERSION_ID /etc/os-release | cut -d= -f2 | tr -d '"') OS_VERSION=$(grep ^VERSION_ID /etc/os-release | cut -d= -f2 | tr -d '"')
elif [ -f "/etc/debian_version" ]; then elif [ -f "/etc/debian_version" ]; then
OS_NAME="Debian" OS_NAME="Debian"
OS_VERSION=$(cat /etc/debian_version) OS_VERSION=$(cat /etc/debian_version)
fi fi
PROFILE_FILE="/etc/profile.d/00_lxc-details.sh" PROFILE_FILE="/etc/profile.d/00_lxc-details.sh"
echo "echo -e \"\"" >"$PROFILE_FILE" echo "echo -e \"\"" >"$PROFILE_FILE"
echo -e "echo -e \"${BOLD}${YW}${APPLICATION} LXC Container - DEV Repository${CL}\"" >>"$PROFILE_FILE" echo -e "echo -e \"${BOLD}${YW}${APPLICATION} LXC Container - DEV Repository${CL}\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${RD}WARNING: This is a DEVELOPMENT version (ProxmoxVED). Do NOT use in production!${CL}\"" >>"$PROFILE_FILE" echo -e "echo -e \"${RD}WARNING: This is a DEVELOPMENT version (ProxmoxVED). Do NOT use in production!${CL}\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${YW} OS: ${GN}${OS_NAME} - Version: ${OS_VERSION}${CL}\"" >>"$PROFILE_FILE" echo -e "echo -e \"${YW} OS: ${GN}${OS_NAME} - Version: ${OS_VERSION}${CL}\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${YW} Hostname: ${GN}\$(hostname)${CL}\"" >>"$PROFILE_FILE" echo -e "echo -e \"${YW} Hostname: ${GN}\$(hostname)${CL}\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${YW} IP Address: ${GN}\$(hostname -I | awk '{print \$1}')${CL}\"" >>"$PROFILE_FILE" echo -e "echo -e \"${YW} IP Address: ${GN}\$(hostname -I | awk '{print \$1}')${CL}\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${YW} Repository: ${GN}https://github.com/community-scripts/ProxmoxVED${CL}\"" >>"$PROFILE_FILE" echo -e "echo -e \"${YW} Repository: ${GN}https://github.com/community-scripts/ProxmoxVED${CL}\"" >>"$PROFILE_FILE"
echo "echo \"\"" >>"$PROFILE_FILE" echo "echo \"\"" >>"$PROFILE_FILE"
chmod -x /etc/update-motd.d/* chmod -x /etc/update-motd.d/*
if [[ "${SSH_ROOT}" == "yes" ]]; then if [[ "${SSH_ROOT}" == "yes" ]]; then
sed -i "s/#PermitRootLogin prohibit-password/PermitRootLogin yes/g" /etc/ssh/sshd_config sed -i "s/#PermitRootLogin prohibit-password/PermitRootLogin yes/g" /etc/ssh/sshd_config
systemctl restart sshd systemctl restart sshd
fi fi
} }
# This function customizes the container by modifying the getty service and enabling auto-login for the root user # This function customizes the container by modifying the getty service and enabling auto-login for the root user
customize() { customize() {
if [[ "$PASSWORD" == "" ]]; then if [[ "$PASSWORD" == "" ]]; then
msg_info "Customizing Container" msg_info "Customizing Container"
GETTY_OVERRIDE="/etc/systemd/system/container-getty@1.service.d/override.conf" GETTY_OVERRIDE="/etc/systemd/system/container-getty@1.service.d/override.conf"
mkdir -p $(dirname $GETTY_OVERRIDE) mkdir -p $(dirname $GETTY_OVERRIDE)
cat <<EOF >$GETTY_OVERRIDE cat <<EOF >$GETTY_OVERRIDE
[Service] [Service]
ExecStart= ExecStart=
ExecStart=-/sbin/agetty --autologin root --noclear --keep-baud tty%I 115200,38400,9600 \$TERM ExecStart=-/sbin/agetty --autologin root --noclear --keep-baud tty%I 115200,38400,9600 \$TERM
EOF EOF
systemctl daemon-reload systemctl daemon-reload
systemctl restart $(basename $(dirname $GETTY_OVERRIDE) | sed 's/\.d//') systemctl restart $(basename $(dirname $GETTY_OVERRIDE) | sed 's/\.d//')
msg_ok "Customized Container" msg_ok "Customized Container"
fi fi
echo "bash -c \"\$(curl -fsSL https://github.com/community-scripts/ProxmoxVED/raw/main/ct/${app}.sh)\"" >/usr/bin/update echo "bash -c \"\$(curl -fsSL https://github.com/community-scripts/ProxmoxVED/raw/main/ct/${app}.sh)\"" >/usr/bin/update
chmod +x /usr/bin/update chmod +x /usr/bin/update
if [[ -n "${SSH_AUTHORIZED_KEY}" ]]; then if [[ -n "${SSH_AUTHORIZED_KEY}" ]]; then
mkdir -p /root/.ssh mkdir -p /root/.ssh
echo "${SSH_AUTHORIZED_KEY}" >/root/.ssh/authorized_keys echo "${SSH_AUTHORIZED_KEY}" >/root/.ssh/authorized_keys
chmod 700 /root/.ssh chmod 700 /root/.ssh
chmod 600 /root/.ssh/authorized_keys chmod 600 /root/.ssh/authorized_keys
fi fi
} }

739
misc/tools.func Normal file
View File

@ -0,0 +1,739 @@
install_node_and_modules() {
local NODE_VERSION="${NODE_VERSION:-22}"
local NODE_MODULE="${NODE_MODULE:-}"
local CURRENT_NODE_VERSION=""
local NEED_NODE_INSTALL=false
# Check if Node.js is already installed
if command -v node >/dev/null; then
CURRENT_NODE_VERSION="$(node -v | grep -oP '^v\K[0-9]+')"
if [[ "$CURRENT_NODE_VERSION" != "$NODE_VERSION" ]]; then
msg_info "Node.js version $CURRENT_NODE_VERSION found, replacing with $NODE_VERSION"
NEED_NODE_INSTALL=true
else
msg_ok "Node.js $NODE_VERSION already installed"
fi
else
msg_info "Node.js not found, installing version $NODE_VERSION"
NEED_NODE_INSTALL=true
fi
# Install Node.js if required
if [[ "$NEED_NODE_INSTALL" == true ]]; then
$STD apt-get purge -y nodejs
rm -f /etc/apt/sources.list.d/nodesource.list /etc/apt/keyrings/nodesource.gpg
mkdir -p /etc/apt/keyrings
if ! curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key |
gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg; then
msg_error "Failed to download or import NodeSource GPG key"
exit 1
fi
echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_${NODE_VERSION}.x nodistro main" \
>/etc/apt/sources.list.d/nodesource.list
if ! apt-get update >/dev/null 2>&1; then
msg_error "Failed to update APT repositories after adding NodeSource"
exit 1
fi
if ! apt-get install -y nodejs >/dev/null 2>&1; then
msg_error "Failed to install Node.js ${NODE_VERSION} from NodeSource"
exit 1
fi
msg_ok "Installed Node.js ${NODE_VERSION}"
fi
export NODE_OPTIONS="--max_old_space_size=4096"
# Install global Node modules
if [[ -n "$NODE_MODULE" ]]; then
IFS=',' read -ra MODULES <<<"$NODE_MODULE"
for mod in "${MODULES[@]}"; do
local MODULE_NAME MODULE_REQ_VERSION MODULE_INSTALLED_VERSION
if [[ "$mod" == *"@"* ]]; then
MODULE_NAME="${mod%@*}"
MODULE_REQ_VERSION="${mod#*@}"
else
MODULE_NAME="$mod"
MODULE_REQ_VERSION="latest"
fi
# Check if the module is already installed
if npm list -g --depth=0 "$MODULE_NAME" >/dev/null 2>&1; then
MODULE_INSTALLED_VERSION="$(npm list -g --depth=0 "$MODULE_NAME" | grep "$MODULE_NAME@" | awk -F@ '{print $2}' | tr -d '[:space:]')"
if [[ "$MODULE_REQ_VERSION" != "latest" && "$MODULE_REQ_VERSION" != "$MODULE_INSTALLED_VERSION" ]]; then
msg_info "Updating $MODULE_NAME from v$MODULE_INSTALLED_VERSION to v$MODULE_REQ_VERSION"
if ! $STD npm install -g "${MODULE_NAME}@${MODULE_REQ_VERSION}"; then
msg_error "Failed to update $MODULE_NAME to version $MODULE_REQ_VERSION"
exit 1
fi
elif [[ "$MODULE_REQ_VERSION" == "latest" ]]; then
msg_info "Updating $MODULE_NAME to latest version"
if ! $STD npm install -g "${MODULE_NAME}@latest"; then
msg_error "Failed to update $MODULE_NAME to latest version"
exit 1
fi
else
msg_ok "$MODULE_NAME@$MODULE_INSTALLED_VERSION already installed"
fi
else
msg_info "Installing $MODULE_NAME@$MODULE_REQ_VERSION"
if ! $STD npm install -g "${MODULE_NAME}@${MODULE_REQ_VERSION}"; then
msg_error "Failed to install $MODULE_NAME@$MODULE_REQ_VERSION"
exit 1
fi
fi
done
msg_ok "All requested Node modules have been processed"
fi
}
install_postgresql() {
local PG_VERSION="${PG_VERSION:-16}"
local CURRENT_PG_VERSION=""
local DISTRO
local NEED_PG_INSTALL=false
DISTRO="$(awk -F'=' '/^VERSION_CODENAME=/{ print $NF }' /etc/os-release)"
if command -v psql >/dev/null; then
CURRENT_PG_VERSION="$(psql -V | grep -oP '\s\K[0-9]+(?=\.)')"
if [[ "$CURRENT_PG_VERSION" != "$PG_VERSION" ]]; then
msg_info "PostgreSQL Version $CURRENT_PG_VERSION found, replacing with $PG_VERSION"
NEED_PG_INSTALL=true
fi
else
msg_info "PostgreSQL not found, installing version $PG_VERSION"
NEED_PG_INSTALL=true
fi
if [[ "$NEED_PG_INSTALL" == true ]]; then
msg_info "Stopping PostgreSQL if running"
systemctl stop postgresql >/dev/null 2>&1 || true
msg_info "Removing conflicting PostgreSQL packages"
$STD apt-get purge -y "postgresql*"
rm -f /etc/apt/sources.list.d/pgdg.list /etc/apt/trusted.gpg.d/postgresql.gpg
msg_info "Setting up PostgreSQL Repository"
curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc |
gpg --dearmor -o /etc/apt/trusted.gpg.d/postgresql.gpg
echo "deb https://apt.postgresql.org/pub/repos/apt ${DISTRO}-pgdg main" \
>/etc/apt/sources.list.d/pgdg.list
$STD apt-get update
$STD apt-get install -y "postgresql-${PG_VERSION}"
msg_ok "Installed PostgreSQL ${PG_VERSION}"
fi
}
install_mariadb() {
local MARIADB_VERSION="${MARIADB_VERSION:-latest}"
local DISTRO_CODENAME
DISTRO_CODENAME="$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)"
# grab dynamic latest LTS version
if [[ "$MARIADB_VERSION" == "latest" ]]; then
msg_info "Resolving latest MariaDB version"
MARIADB_VERSION=$(curl -fsSL https://mariadb.org | grep -oP 'MariaDB \K10\.[0-9]+' | head -n1)
if [[ -z "$MARIADB_VERSION" ]]; then
msg_error "Could not determine latest MariaDB version"
return 1
fi
msg_ok "Latest MariaDB version is $MARIADB_VERSION"
fi
local CURRENT_VERSION=""
if command -v mariadb >/dev/null; then
CURRENT_VERSION="$(mariadb --version | grep -oP 'Ver\s+\K[0-9]+\.[0-9]+')"
fi
if [[ "$CURRENT_VERSION" == "$MARIADB_VERSION" ]]; then
msg_info "MariaDB $MARIADB_VERSION already installed, checking for upgrade"
$STD apt-get update
$STD apt-get install --only-upgrade -y mariadb-server mariadb-client
msg_ok "MariaDB $MARIADB_VERSION upgraded if applicable"
return 0
fi
if [[ -n "$CURRENT_VERSION" ]]; then
msg_info "Replacing MariaDB $CURRENT_VERSION with $MARIADB_VERSION (data will be preserved)"
$STD systemctl stop mariadb >/dev/null 2>&1 || true
$STD apt-get purge -y 'mariadb*' || true
rm -f /etc/apt/sources.list.d/mariadb.list /etc/apt/trusted.gpg.d/mariadb.gpg
else
msg_info "Installing MariaDB $MARIADB_VERSION"
fi
msg_info "Setting up MariaDB Repository"
curl -fsSL "https://mariadb.org/mariadb_release_signing_key.asc" |
gpg --dearmor -o /etc/apt/trusted.gpg.d/mariadb.gpg
echo "deb [signed-by=/etc/apt/trusted.gpg.d/mariadb.gpg] http://mirror.mariadb.org/repo/${MARIADB_VERSION}/debian ${DISTRO_CODENAME} main" \
>/etc/apt/sources.list.d/mariadb.list
$STD apt-get update
$STD apt-get install -y mariadb-server mariadb-client
msg_ok "Installed MariaDB $MARIADB_VERSION"
}
install_mysql() {
local MYSQL_VERSION="${MYSQL_VERSION:-8.0}"
local CURRENT_VERSION=""
local NEED_INSTALL=false
if command -v mysql >/dev/null; then
CURRENT_VERSION="$(mysql --version | grep -oP 'Distrib\s+\K[0-9]+\.[0-9]+')"
if [[ "$CURRENT_VERSION" != "$MYSQL_VERSION" ]]; then
msg_info "MySQL $CURRENT_VERSION found, replacing with $MYSQL_VERSION"
NEED_INSTALL=true
else
msg_ok "MySQL $MYSQL_VERSION already installed"
fi
else
msg_info "MySQL not found, installing version $MYSQL_VERSION"
NEED_INSTALL=true
fi
if [[ "$NEED_INSTALL" == true ]]; then
msg_info "Removing conflicting MySQL packages"
$STD systemctl stop mysql >/dev/null 2>&1 || true
$STD apt-get purge -y 'mysql*'
rm -f /etc/apt/sources.list.d/mysql.list /etc/apt/trusted.gpg.d/mysql.gpg
msg_info "Setting up MySQL APT Repository"
DISTRO_CODENAME="$(awk -F= '/VERSION_CODENAME/ { print $2 }' /etc/os-release)"
curl -fsSL https://repo.mysql.com/RPM-GPG-KEY-mysql-2022 | gpg --dearmor -o /etc/apt/trusted.gpg.d/mysql.gpg
echo "deb [signed-by=/etc/apt/trusted.gpg.d/mysql.gpg] https://repo.mysql.com/apt/debian/ ${DISTRO_CODENAME} mysql-${MYSQL_VERSION}" \
>/etc/apt/sources.list.d/mysql.list
$STD apt-get update
$STD apt-get install -y mysql-server
msg_ok "Installed MySQL $MYSQL_VERSION"
fi
}
install_php() {
local PHP_VERSION="${PHP_VERSION:-8.4}"
local PHP_MODULE="${PHP_MODULE:-}"
local PHP_APACHE="${PHP_APACHE:-NO}"
local PHP_FPM="${PHP_FPM:-NO}"
local DEFAULT_MODULES="bcmath,cli,curl,gd,intl,mbstring,opcache,readline,xml,zip"
local COMBINED_MODULES
local PHP_MEMORY_LIMIT="${PHP_MEMORY_LIMIT:-512M}"
local PHP_UPLOAD_MAX_FILESIZE="${PHP_UPLOAD_MAX_FILESIZE:-128M}"
local PHP_POST_MAX_SIZE="${PHP_POST_MAX_SIZE:-128M}"
local PHP_MAX_EXECUTION_TIME="${PHP_MAX_EXECUTION_TIME:-300}"
# Merge default + user-defined modules
if [[ -n "$PHP_MODULE" ]]; then
COMBINED_MODULES="${DEFAULT_MODULES},${PHP_MODULE}"
else
COMBINED_MODULES="${DEFAULT_MODULES}"
fi
# Deduplicate modules
COMBINED_MODULES=$(echo "$COMBINED_MODULES" | tr ',' '\n' | awk '!seen[$0]++' | paste -sd, -)
local CURRENT_PHP
CURRENT_PHP=$(php -v 2>/dev/null | awk '/^PHP/{print $2}' | cut -d. -f1,2)
if [[ "$CURRENT_PHP" != "$PHP_VERSION" ]]; then
$STD echo "PHP $CURRENT_PHP detected, migrating to PHP $PHP_VERSION"
if [[ ! -f /etc/apt/sources.list.d/php.list ]]; then
$STD curl -fsSLo /tmp/debsuryorg-archive-keyring.deb https://packages.sury.org/debsuryorg-archive-keyring.deb
$STD dpkg -i /tmp/debsuryorg-archive-keyring.deb
echo "deb [signed-by=/usr/share/keyrings/deb.sury.org-php.gpg] https://packages.sury.org/php/ $(lsb_release -sc) main" \
>/etc/apt/sources.list.d/php.list
$STD apt-get update
fi
$STD apt-get purge -y "php${CURRENT_PHP//./}"* || true
fi
local MODULE_LIST="php${PHP_VERSION}"
IFS=',' read -ra MODULES <<<"$COMBINED_MODULES"
for mod in "${MODULES[@]}"; do
MODULE_LIST+=" php${PHP_VERSION}-${mod}"
done
if [[ "$PHP_APACHE" == "YES" ]]; then
# Optionally disable old Apache PHP module
if [[ -f /etc/apache2/mods-enabled/php${CURRENT_PHP}.load ]]; then
$STD a2dismod php${CURRENT_PHP} || true
fi
fi
if [[ "$PHP_FPM" == "YES" ]]; then
$STD systemctl stop php${CURRENT_PHP}-fpm || true
$STD systemctl disable php${CURRENT_PHP}-fpm || true
fi
$STD apt-get install -y $MODULE_LIST
msg_ok "Installed PHP $PHP_VERSION with selected modules"
if [[ "$PHP_APACHE" == "YES" ]]; then
$STD systemctl restart apache2 || true
fi
if [[ "$PHP_FPM" == "YES" ]]; then
$STD systemctl enable php${PHP_VERSION}-fpm
$STD systemctl restart php${PHP_VERSION}-fpm
fi
# Patch all relevant php.ini files
local PHP_INI_PATHS=()
PHP_INI_PATHS+=("/etc/php/${PHP_VERSION}/cli/php.ini")
[[ "$PHP_FPM" == "YES" ]] && PHP_INI_PATHS+=("/etc/php/${PHP_VERSION}/fpm/php.ini")
[[ "$PHP_APACHE" == "YES" ]] && PHP_INI_PATHS+=("/etc/php/${PHP_VERSION}/apache2/php.ini")
for ini in "${PHP_INI_PATHS[@]}"; do
if [[ -f "$ini" ]]; then
msg_info "Patching $ini"
sed -i "s|^memory_limit = .*|memory_limit = ${PHP_MEMORY_LIMIT}|" "$ini"
sed -i "s|^upload_max_filesize = .*|upload_max_filesize = ${PHP_UPLOAD_MAX_FILESIZE}|" "$ini"
sed -i "s|^post_max_size = .*|post_max_size = ${PHP_POST_MAX_SIZE}|" "$ini"
sed -i "s|^max_execution_time = .*|max_execution_time = ${PHP_MAX_EXECUTION_TIME}|" "$ini"
msg_ok "Patched $ini"
fi
done
}
install_composer() {
local COMPOSER_BIN="/usr/local/bin/composer"
export COMPOSER_ALLOW_SUPERUSER=1
# Check if composer is already installed
if [[ -x "$COMPOSER_BIN" ]]; then
local CURRENT_VERSION
CURRENT_VERSION=$("$COMPOSER_BIN" --version | awk '{print $3}')
msg_info "Composer $CURRENT_VERSION found, updating to latest"
else
msg_info "Composer not found, installing latest version"
fi
# Download and install latest composer
curl -fsSL https://getcomposer.org/installer -o /tmp/composer-setup.php
php /tmp/composer-setup.php --install-dir=/usr/local/bin --filename=composer >/dev/null 2>&1
if [[ $? -ne 0 ]]; then
msg_error "Failed to install Composer"
return 1
fi
chmod +x "$COMPOSER_BIN"
msg_ok "Installed Composer $($COMPOSER_BIN --version | awk '{print $3}')"
}
install_go() {
local ARCH
case "$(uname -m)" in
x86_64) ARCH="amd64" ;;
aarch64) ARCH="arm64" ;;
*)
msg_error "Unsupported architecture: $(uname -m)"
return 1
;;
esac
# Determine version
if [[ -z "$GO_VERSION" || "$GO_VERSION" == "latest" ]]; then
GO_VERSION=$(curl -fsSL https://go.dev/VERSION?m=text | head -n1 | sed 's/^go//')
if [[ -z "$GO_VERSION" ]]; then
msg_error "Could not determine latest Go version"
return 1
fi
msg_info "Detected latest Go version: $GO_VERSION"
fi
local GO_BIN="/usr/local/bin/go"
local GO_INSTALL_DIR="/usr/local/go"
if [[ -x "$GO_BIN" ]]; then
local CURRENT_VERSION
CURRENT_VERSION=$("$GO_BIN" version | awk '{print $3}' | sed 's/go//')
if [[ "$CURRENT_VERSION" == "$GO_VERSION" ]]; then
msg_ok "Go $GO_VERSION already installed"
return 0
else
msg_info "Go $CURRENT_VERSION found, upgrading to $GO_VERSION"
rm -rf "$GO_INSTALL_DIR"
fi
else
msg_info "Installing Go $GO_VERSION"
fi
local TARBALL="go${GO_VERSION}.linux-${ARCH}.tar.gz"
local URL="https://go.dev/dl/${TARBALL}"
local TMP_TAR=$(mktemp)
curl -fsSL "$URL" -o "$TMP_TAR" || {
msg_error "Failed to download $TARBALL"
return 1
}
tar -C /usr/local -xzf "$TMP_TAR"
ln -sf /usr/local/go/bin/go /usr/local/bin/go
ln -sf /usr/local/go/bin/gofmt /usr/local/bin/gofmt
rm -f "$TMP_TAR"
msg_ok "Installed Go $GO_VERSION"
}
install_java() {
local JAVA_VERSION="${JAVA_VERSION:-17}"
local DISTRO_CODENAME
DISTRO_CODENAME=$(awk -F= '/VERSION_CODENAME/ { print $2 }' /etc/os-release)
local DESIRED_PACKAGE="temurin-${JAVA_VERSION}-jdk"
# Add Adoptium repo if missing
if [[ ! -f /etc/apt/sources.list.d/adoptium.list ]]; then
msg_info "Setting up Adoptium Repository"
mkdir -p /etc/apt/keyrings
curl -fsSL "https://packages.adoptium.net/artifactory/api/gpg/key/public" | gpg --dearmor -o /etc/apt/trusted.gpg.d/adoptium.gpg
echo "deb [signed-by=/etc/apt/trusted.gpg.d/adoptium.gpg] https://packages.adoptium.net/artifactory/deb ${DISTRO_CODENAME} main" \
>/etc/apt/sources.list.d/adoptium.list
$STD apt-get update
msg_ok "Set up Adoptium Repository"
fi
# Detect currently installed temurin version
local INSTALLED_VERSION=""
if dpkg -l | grep -q "temurin-.*-jdk"; then
INSTALLED_VERSION=$(dpkg -l | awk '/temurin-.*-jdk/{print $2}' | grep -oP 'temurin-\K[0-9]+')
fi
if [[ "$INSTALLED_VERSION" == "$JAVA_VERSION" ]]; then
msg_info "Temurin JDK $JAVA_VERSION already installed, updating if needed"
$STD apt-get update
$STD apt-get install --only-upgrade -y "$DESIRED_PACKAGE"
msg_ok "Updated Temurin JDK $JAVA_VERSION (if applicable)"
else
if [[ -n "$INSTALLED_VERSION" ]]; then
msg_info "Removing Temurin JDK $INSTALLED_VERSION"
$STD apt-get purge -y "temurin-${INSTALLED_VERSION}-jdk"
fi
msg_info "Installing Temurin JDK $JAVA_VERSION"
$STD apt-get install -y "$DESIRED_PACKAGE"
msg_ok "Installed Temurin JDK $JAVA_VERSION"
fi
}
install_mongodb() {
local MONGO_VERSION="${MONGO_VERSION:-8.0}"
local DISTRO_CODENAME
DISTRO_CODENAME=$(awk -F= '/VERSION_CODENAME/ { print $2 }' /etc/os-release)
local REPO_LIST="/etc/apt/sources.list.d/mongodb-org-${MONGO_VERSION}.list"
# Aktuell installierte Major-Version ermitteln
local INSTALLED_VERSION=""
if command -v mongod >/dev/null; then
INSTALLED_VERSION=$(mongod --version | awk '/db version/{print $3}' | cut -d. -f1,2)
fi
if [[ "$INSTALLED_VERSION" == "$MONGO_VERSION" ]]; then
msg_info "MongoDB $MONGO_VERSION already installed, checking for upgrade"
$STD apt-get update
$STD apt-get install --only-upgrade -y mongodb-org
msg_ok "MongoDB $MONGO_VERSION upgraded if needed"
return 0
fi
# Ältere Version entfernen (nur Packages, nicht Daten!)
if [[ -n "$INSTALLED_VERSION" ]]; then
msg_info "Replacing MongoDB $INSTALLED_VERSION with $MONGO_VERSION (data will be preserved)"
$STD systemctl stop mongod || true
$STD apt-get purge -y mongodb-org || true
rm -f /etc/apt/sources.list.d/mongodb-org-*.list
rm -f /etc/apt/trusted.gpg.d/mongodb-*.gpg
else
msg_info "Installing MongoDB $MONGO_VERSION"
fi
# MongoDB Repo hinzufügen
curl -fsSL "https://pgp.mongodb.com/server-${MONGO_VERSION}.asc" | gpg --dearmor -o "/etc/apt/trusted.gpg.d/mongodb-${MONGO_VERSION}.gpg"
echo "deb [signed-by=/etc/apt/trusted.gpg.d/mongodb-${MONGO_VERSION}.gpg] https://repo.mongodb.org/apt/debian ${DISTRO_CODENAME}/mongodb-org/${MONGO_VERSION} main" \
>"$REPO_LIST"
$STD apt-get update
$STD apt-get install -y mongodb-org
# Sicherstellen, dass Datenverzeichnis intakt bleibt
mkdir -p /var/lib/mongodb
chown -R mongodb:mongodb /var/lib/mongodb
$STD systemctl enable mongod
$STD systemctl start mongod
msg_ok "MongoDB $MONGO_VERSION installed and started"
}
fetch_and_deploy_gh_release() {
local repo="$1"
local app=$(echo ${APPLICATION,,} | tr -d ' ')
local api_url="https://api.github.com/repos/$repo/releases/latest"
local header=()
local attempt=0
local max_attempts=3
local api_response tag http_code
local current_version=""
local curl_timeout="--connect-timeout 10 --max-time 30"
# Check if the app directory exists and if there's a version file
if [[ -f "/opt/${app}_version.txt" ]]; then
current_version=$(cat "/opt/${app}_version.txt")
$STD msg_info "Current version: $current_version"
fi
# ensure that jq is installed
if ! command -v jq &>/dev/null; then
$STD msg_info "Installing jq..."
$STD apt-get update -qq &>/dev/null
$STD apt-get install -y jq &>/dev/null || {
msg_error "Failed to install jq"
return 1
}
fi
[[ -n "${GITHUB_TOKEN:-}" ]] && header=(-H "Authorization: token $GITHUB_TOKEN")
until [[ $attempt -ge $max_attempts ]]; do
((attempt++)) || true
$STD msg_info "[$attempt/$max_attempts] Fetching GitHub release for $repo...\n"
api_response=$(curl $curl_timeout -fsSL -w "%{http_code}" -o /tmp/gh_resp.json "${header[@]}" "$api_url")
http_code="${api_response:(-3)}"
if [[ "$http_code" == "404" ]]; then
msg_error "Repository $repo has no Release candidate (404)"
return 1
fi
if [[ "$http_code" != "200" ]]; then
$STD msg_info "Request failed with HTTP $http_code, retrying...\n"
sleep $((attempt * 2))
continue
fi
api_response=$(</tmp/gh_resp.json)
if echo "$api_response" | grep -q "API rate limit exceeded"; then
msg_error "GitHub API rate limit exceeded."
return 1
fi
if echo "$api_response" | jq -e '.message == "Not Found"' &>/dev/null; then
msg_error "Repository not found: $repo"
return 1
fi
tag=$(echo "$api_response" | jq -r '.tag_name // .name // empty')
[[ "$tag" =~ ^v[0-9] ]] && tag="${tag:1}"
if [[ -z "$tag" ]]; then
$STD msg_info "Empty tag received, retrying...\n"
sleep $((attempt * 2))
continue
fi
$STD msg_ok "Found release: $tag for $repo"
break
done
if [[ -z "$tag" ]]; then
msg_error "Failed to fetch release for $repo after $max_attempts attempts."
exit 1
fi
# Version comparison (if we already have this version, skip)
if [[ "$current_version" == "$tag" ]]; then
$STD msg_info "Already running the latest version ($tag). Skipping update."
return 0
fi
local version="$tag"
local base_url="https://github.com/$repo/releases/download/v$tag"
local tmpdir
tmpdir=$(mktemp -d) || return 1
# Extract list of assets from the Release API
local assets urls
assets=$(echo "$api_response" | jq -r '.assets[].browser_download_url') || true
# Detect current architecture
local arch
if command -v dpkg &>/dev/null; then
arch=$(dpkg --print-architecture)
elif command -v uname &>/dev/null; then
case "$(uname -m)" in
x86_64) arch="amd64" ;;
aarch64) arch="arm64" ;;
armv7l) arch="armv7" ;;
armv6l) arch="armv6" ;;
*) arch="unknown" ;;
esac
else
arch="unknown"
fi
$STD msg_info "Detected system architecture: $arch"
# Try to find a matching asset for our architecture
local url=""
for u in $assets; do
if [[ "$u" =~ $arch.*\.tar\.gz$ ]]; then
url="$u"
$STD msg_info "Found matching architecture asset: $url"
break
fi
done
# Fallback to other architectures if our specific one isn't found
if [[ -z "$url" ]]; then
for u in $assets; do
if [[ "$u" =~ (x86_64|amd64|arm64|armv7|armv6).*\.tar\.gz$ ]]; then
url="$u"
$STD msg_info "Architecture-specific asset not found, using: $url"
break
fi
done
fi
# Fallback to any tar.gz
if [[ -z "$url" ]]; then
for u in $assets; do
if [[ "$u" =~ \.tar\.gz$ ]]; then
url="$u"
$STD msg_info "Using generic tarball: $url"
break
fi
done
fi
# Final fallback to GitHub source tarball
if [[ -z "$url" ]]; then
url="https://github.com/$repo/archive/refs/tags/$version.tar.gz"
$STD msg_info "Trying GitHub source tarball fallback: $url"
fi
local filename="${url##*/}"
$STD msg_info "Downloading $url"
if ! curl $curl_timeout -fsSL -o "$tmpdir/$filename" "$url"; then
msg_error "Failed to download release asset from $url"
rm -rf "$tmpdir"
return 1
fi
mkdir -p "/opt/$app"
tar -xzf "$tmpdir/$filename" -C "$tmpdir"
local content_root
content_root=$(find "$tmpdir" -mindepth 1 -maxdepth 1 -type d)
if [[ $(echo "$content_root" | wc -l) -eq 1 ]]; then
cp -r "$content_root"/* "/opt/$app/"
else
cp -r "$tmpdir"/* "/opt/$app/"
fi
echo "$version" >"/opt/${app}_version.txt"
$STD msg_ok "Deployed $app v$version to /opt/$app"
rm -rf "$tmpdir"
}
setup_local_ip_helper() {
local BASE_DIR="/usr/local/community-scripts/ip-management"
local SCRIPT_PATH="$BASE_DIR/update_local_ip.sh"
local SERVICE_PATH="/etc/systemd/system/update-local-ip.service"
local TIMER_PATH="/etc/systemd/system/update-local-ip.timer"
mkdir -p "$BASE_DIR"
# Create update script
cat <<'EOF' >"$SCRIPT_PATH"
#!/bin/bash
set -e
IP_FILE="/run/local-ip.env"
mkdir -p "$(dirname "$IP_FILE")"
get_current_ip() {
ip route get 1 | awk '{print $7; exit}' 2>/dev/null
}
current_ip="$(get_current_ip)"
if [[ -z "$current_ip" ]]; then
echo "[ERROR] Could not detect local IP" >&2
exit 1
fi
if [[ -f "$IP_FILE" ]]; then
source "$IP_FILE"
if [[ "$LOCAL_IP" == "$current_ip" ]]; then
exit 0
fi
fi
echo "LOCAL_IP=$current_ip" > "$IP_FILE"
echo "[INFO] LOCAL_IP updated to $current_ip"
EOF
chmod +x "$SCRIPT_PATH"
# Create systemd service
cat <<EOF >"$SERVICE_PATH"
[Unit]
Description=Update LOCAL_IP file
After=network-online.target
[Service]
Type=oneshot
ExecStart=$SCRIPT_PATH
EOF
# Create systemd timer
cat <<EOF >"$TIMER_PATH"
[Unit]
Description=Periodic LOCAL_IP update
[Timer]
OnBootSec=15
OnUnitActiveSec=60
Persistent=true
[Install]
WantedBy=timers.target
EOF
systemctl daemon-reexec
systemctl daemon-reload
systemctl enable -q --now update-local-ip.timer
$STD msg_ok "Setup LOCAL_IP helper in $BASE_DIR with systemd integration"
}
import_local_ip() {
local IP_FILE="/run/local-ip.env"
if [[ -f "$IP_FILE" ]]; then
# shellcheck disable=SC1090
source "$IP_FILE"
fi
if [[ -z "${LOCAL_IP:-}" ]]; then
LOCAL_IP="$(ip route get 1 | awk '{print $7; exit}' 2>/dev/null)"
if [[ -z "$LOCAL_IP" ]]; then
msg_error "Could not determine LOCAL_IP"
return 1
fi
fi
export LOCAL_IP
}

View File

@ -137,100 +137,76 @@ update_installation() {
cat <<'EOF' >/opt/iptag/iptag cat <<'EOF' >/opt/iptag/iptag
#!/bin/bash #!/bin/bash
# =============== CONFIGURATION =============== # # =============== CONFIGURATION =============== #
CONFIG_FILE="/opt/iptag/iptag.conf" readonly CONFIG_FILE="/opt/iptag/iptag.conf"
readonly DEFAULT_TAG_FORMAT="full"
readonly DEFAULT_CHECK_INTERVAL=60
# Load the configuration file if it exists # Load the configuration file if it exists
if [ -f "$CONFIG_FILE" ]; then if [ -f "$CONFIG_FILE" ]; then
# shellcheck source=./iptag.conf # shellcheck source=./iptag.conf
source "$CONFIG_FILE" source "$CONFIG_FILE"
fi fi
# Convert IP to integer for comparison # Convert IP to integer for comparison
ip_to_int() { ip_to_int() {
local ip="$1" local ip="$1"
local a b c d local a b c d
IFS=. read -r a b c d <<< "${ip}" IFS=. read -r a b c d <<< "${ip}"
echo "$((a << 24 | b << 16 | c << 8 | d))" echo "$((a << 24 | b << 16 | c << 8 | d))"
} }
# Check if IP is in CIDR # Check if IP is in CIDR
ip_in_cidr() { ip_in_cidr() {
local ip="$1" local ip="$1" cidr="$2"
local cidr="$2" ipcalc -c "$ip" "$cidr" >/dev/null 2>&1 || return 1
# Use ipcalc with the -c option (check), which returns 0 if the IP is in the network local network prefix ip_parts net_parts
if ipcalc -c "$ip" "$cidr" >/dev/null 2>&1; then
# Get network address and mask from CIDR
local network prefix
network=$(echo "$cidr" | cut -d/ -f1) network=$(echo "$cidr" | cut -d/ -f1)
prefix=$(echo "$cidr" | cut -d/ -f2) prefix=$(echo "$cidr" | cut -d/ -f2)
IFS=. read -r -a ip_parts <<< "$ip"
IFS=. read -r -a net_parts <<< "$network"
# Check if IP is in the network case $prefix in
local ip_a ip_b ip_c ip_d net_a net_b net_c net_d 8) [[ "${ip_parts[0]}" == "${net_parts[0]}" ]] ;;
IFS=. read -r ip_a ip_b ip_c ip_d <<< "$ip" 16) [[ "${ip_parts[0]}.${ip_parts[1]}" == "${net_parts[0]}.${net_parts[1]}" ]] ;;
IFS=. read -r net_a net_b net_c net_d <<< "$network" 24) [[ "${ip_parts[0]}.${ip_parts[1]}.${ip_parts[2]}" == "${net_parts[0]}.${net_parts[1]}.${net_parts[2]}" ]] ;;
32) [[ "$ip" == "$network" ]] ;;
# Check octets match based on prefix length *) return 1 ;;
local result=0 esac
if (( prefix >= 8 )); then
[[ "$ip_a" != "$net_a" ]] && result=1
fi
if (( prefix >= 16 )); then
[[ "$ip_b" != "$net_b" ]] && result=1
fi
if (( prefix >= 24 )); then
[[ "$ip_c" != "$net_c" ]] && result=1
fi
return $result
fi
return 1
} }
# Format IP address according to the configuration # Format IP address according to the configuration
format_ip_tag() { format_ip_tag() {
local ip="$1" local ip="$1"
local format="${TAG_FORMAT:-full}" local format="${TAG_FORMAT:-$DEFAULT_TAG_FORMAT}"
case "$format" in case "$format" in
"last_octet") "last_octet") echo "${ip##*.}" ;;
echo "${ip##*.}" "last_two_octets") echo "${ip#*.*.}" ;;
;; *) echo "$ip" ;;
"last_two_octets") esac
echo "${ip#*.*.}"
;;
*)
echo "$ip"
;;
esac
} }
# Check if IP is in any CIDRs # Check if IP is in any CIDRs
ip_in_cidrs() { ip_in_cidrs() {
local ip="$1" local ip="$1" cidrs="$2"
local cidrs="$2" [[ -z "$cidrs" ]] && return 1
local IFS=' '
# Check that cidrs is not empty for cidr in $cidrs; do ip_in_cidr "$ip" "$cidr" && return 0; done
[[ -z "$cidrs" ]] && return 1 return 1
local IFS=' '
for cidr in $cidrs; do
ip_in_cidr "$ip" "$cidr" && return 0
done
return 1
} }
# Check if IP is valid # Check if IP is valid
is_valid_ipv4() { is_valid_ipv4() {
local ip="$1" local ip="$1"
[[ "$ip" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]] || return 1 [[ "$ip" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]] || return 1
local IFS='.'
read -ra parts <<< "$ip" local IFS='.' parts
for part in "${parts[@]}"; do read -ra parts <<< "$ip"
[[ "$part" =~ ^[0-9]+$ ]] && ((part >= 0 && part <= 255)) || return 1 for part in "${parts[@]}"; do
done (( part >= 0 && part <= 255 )) || return 1
return 0 done
return 0
} }
lxc_status_changed() { lxc_status_changed() {
@ -265,193 +241,172 @@ fw_net_interface_changed() {
# Get VM IPs using MAC addresses and ARP table # Get VM IPs using MAC addresses and ARP table
get_vm_ips() { get_vm_ips() {
local vmid=$1 local vmid=$1 ips="" macs found_ip=false
local ips="" qm status "$vmid" 2>/dev/null | grep -q "status: running" || return
# Check if VM is running macs=$(qm config "$vmid" 2>/dev/null | grep -E 'net[0-9]+' | grep -oE '[a-fA-F0-9]{2}(:[a-fA-F0-9]{2}){5}')
qm status "$vmid" 2>/dev/null | grep -q "status: running" || return [[ -z "$macs" ]] && return
# Get MAC addresses from VM configuration for mac in $macs; do
local macs local ip
macs=$(qm config "$vmid" 2>/dev/null | grep -E 'net[0-9]+' | grep -o -E '[a-fA-F0-9]{2}(:[a-fA-F0-9]{2}){5}') ip=$(arp -an 2>/dev/null | grep -i "$mac" | grep -oE '([0-9]{1,3}\.){3}[0-9]{1,3}')
[[ -n "$ip" ]] && { ips+="$ip "; found_ip=true; }
done
# Look up IPs from ARP table using MAC addresses if ! $found_ip; then
for mac in $macs; do local agent_ip
local ip agent_ip=$(qm agent "$vmid" network-get-interfaces 2>/dev/null | grep -oE '([0-9]{1,3}\.){3}[0-9]{1,3}' || true)
ip=$(arp -an 2>/dev/null | grep -i "$mac" | grep -o -E '([0-9]{1,3}\.){3}[0-9]{1,3}') [[ -n "$agent_ip" ]] && ips+="$agent_ip "
if [ -n "$ip" ]; then
ips+="$ip "
fi fi
done
echo "$ips" echo "${ips% }"
} }
# Update tags for container or VM # Update tags
update_tags() { update_tags() {
local type="$1" local type="$1" vmid="$2" config_cmd="pct"
local vmid="$2" [[ "$type" == "vm" ]] && config_cmd="qm"
local config_cmd="pct"
[[ "$type" == "vm" ]] && config_cmd="qm"
# Get current IPs local current_ips_full
local current_ips_full if [[ "$type" == "lxc" ]]; then
if [[ "$type" == "lxc" ]]; then current_ips_full=$(lxc-info -n "${vmid}" -i 2>/dev/null | grep -E "^IP:" | awk '{print $2}')
# Redirect error output to suppress AppArmor warnings else
current_ips_full=$(lxc-info -n "${vmid}" -i 2>/dev/null | grep -E "^IP:" | awk '{print $2}') current_ips_full=$(get_vm_ips "${vmid}")
else
current_ips_full=$(get_vm_ips "${vmid}")
fi
# Parse current tags and get valid IPs
local current_tags=()
local next_tags=()
mapfile -t current_tags < <($config_cmd config "${vmid}" 2>/dev/null | grep tags | awk '{print $2}' | sed 's/;/\n/g')
for tag in "${current_tags[@]}"; do
# Skip tag if it looks like an IP (full or partial)
if ! is_valid_ipv4 "${tag}" && ! [[ "$tag" =~ ^[0-9]+(\.[0-9]+)*$ ]]; then
next_tags+=("${tag}")
fi fi
done [[ -z "$current_ips_full" ]] && return
# Add valid IPs to tags local current_tags=() next_tags=() current_ip_tags=()
local added_ips=() mapfile -t current_tags < <($config_cmd config "${vmid}" 2>/dev/null | grep tags | awk '{print $2}' | sed 's/;/\n/g')
local skipped_ips=()
for ip in ${current_ips_full}; do # Separate IP and non-IP tags
if is_valid_ipv4 "${ip}"; then for tag in "${current_tags[@]}"; do
if ip_in_cidrs "${ip}" "${CIDR_LIST[*]}"; then if is_valid_ipv4 "${tag}" || [[ "$tag" =~ ^[0-9]+(\.[0-9]+)*$ ]]; then
local formatted_ip=$(format_ip_tag "$ip") current_ip_tags+=("${tag}")
next_tags+=("${formatted_ip}") else
added_ips+=("${formatted_ip}") next_tags+=("${tag}")
else fi
skipped_ips+=("${ip}") done
fi
local formatted_ips=() needs_update=false added_ips=()
for ip in ${current_ips_full}; do
if is_valid_ipv4 "$ip" && ip_in_cidrs "$ip" "${CIDR_LIST[*]}"; then
local formatted_ip=$(format_ip_tag "$ip")
formatted_ips+=("$formatted_ip")
if [[ ! " ${current_ip_tags[*]} " =~ " ${formatted_ip} " ]]; then
needs_update=true
added_ips+=("$formatted_ip")
next_tags+=("$formatted_ip")
fi
fi
done
[[ ${#formatted_ips[@]} -eq 0 ]] && return
# Add existing IP tags that are still valid
for tag in "${current_ip_tags[@]}"; do
if [[ " ${formatted_ips[*]} " =~ " ${tag} " ]]; then
if [[ ! " ${next_tags[*]} " =~ " ${tag} " ]]; then
next_tags+=("$tag")
fi
fi
done
if [[ "$needs_update" == true ]]; then
echo "${type^} ${vmid}: adding IP tags: ${added_ips[*]}"
$config_cmd set "${vmid}" -tags "$(IFS=';'; echo "${next_tags[*]}")" &>/dev/null
elif [[ ${#current_ip_tags[@]} -gt 0 ]]; then
echo "${type^} ${vmid}: IP tags already set: ${current_ip_tags[*]}"
else
echo "${type^} ${vmid}: setting initial IP tags: ${formatted_ips[*]}"
$config_cmd set "${vmid}" -tags "$(IFS=';'; echo "${formatted_ips[*]}")" &>/dev/null
fi fi
done
# Log only if there are changes
if [ ${#added_ips[@]} -gt 0 ]; then
echo "${type^} ${vmid}: added IP tags: ${added_ips[*]}"
fi
# Update if changed
if [[ "$(IFS=';'; echo "${current_tags[*]}")" != "$(IFS=';'; echo "${next_tags[*]}")" ]]; then
$config_cmd set "${vmid}" -tags "$(IFS=';'; echo "${next_tags[*]}")" &>/dev/null
fi
} }
# Check if status changed # Check if status changed
check_status_changed() { check_status() {
local type="$1" local type="$1" current
local current_status case "$type" in
"lxc") current=$(pct list 2>/dev/null | grep -v VMID) ;;
case "$type" in "vm") current=$(qm list 2>/dev/null | grep -v VMID) ;;
"lxc") "fw") current=$(ifconfig 2>/dev/null | grep "^fw") ;;
current_status=$(pct list 2>/dev/null | grep -v VMID) esac
[[ "${last_lxc_status}" == "${current_status}" ]] && return 1 local last_var="last_${type}_status"
last_lxc_status="${current_status}" [[ "${!last_var}" == "$current" ]] && return 1
;; eval "$last_var='$current'"
"vm") return 0
current_status=$(qm list 2>/dev/null | grep -v VMID)
[[ "${last_vm_status}" == "${current_status}" ]] && return 1
last_vm_status="${current_status}"
;;
"fw")
current_status=$(ifconfig 2>/dev/null | grep "^fw")
[[ "${last_net_interface}" == "${current_status}" ]] && return 1
last_net_interface="${current_status}"
;;
esac
return 0
} }
# Update tags for all containers/VMs of specified type # Update all instances
update_all_tags() { update_all() {
local type="$1" local type="$1" list_cmd="pct" vmids count=0
local vmid_list="" [[ "$type" == "vm" ]] && list_cmd="qm"
if [[ "$type" == "lxc" ]]; then vmids=$($list_cmd list 2>/dev/null | grep -v VMID | awk '{print $1}')
# Redirect stderr to /dev/null to suppress AppArmor messages for vmid in $vmids; do ((count++)); done
vmid_list=$(pct list 2>/dev/null | grep -v VMID | awk '{print $1}')
echo "Found $(echo "$vmid_list" | wc -w) LXC containers"
else
vmid_list=$(qm list 2>/dev/null | grep -v VMID | awk '{print $1}')
echo "Found $(echo "$vmid_list" | wc -w) virtual machines"
fi
for vmid in $vmid_list; do echo "Found ${count} running ${type}s"
update_tags "$type" "$vmid" [[ $count -eq 0 ]] && return
done
for vmid in $vmids; do
update_tags "$type" "$vmid"
done
} }
# Main check function
check() { check() {
current_time=$(date +%s) local current_time changes_detected=false
current_time=$(date +%s)
# Check LXC status for type in "lxc" "vm"; do
time_since_last_lxc_status_check=$((current_time - last_lxc_status_check_time)) local interval_var="${type^^}_STATUS_CHECK_INTERVAL"
if [[ "${LXC_STATUS_CHECK_INTERVAL}" -gt 0 ]] \ local last_check_var="last_${type}_check_time"
&& [[ "${time_since_last_lxc_status_check}" -ge "${LXC_STATUS_CHECK_INTERVAL}" ]]; then local last_update_var="last_update_${type}_time"
echo "Checking LXC status..."
last_lxc_status_check_time=${current_time}
if check_status_changed "lxc"; then
update_all_tags "lxc"
last_update_lxc_time=${current_time}
fi
fi
# Check VM status if [[ "${!interval_var}" -gt 0 ]] && (( current_time - ${!last_check_var} >= ${!interval_var} )); then
time_since_last_vm_status_check=$((current_time - last_vm_status_check_time)) echo "Checking ${type^^} status..."
if [[ "${VM_STATUS_CHECK_INTERVAL}" -gt 0 ]] \ eval "${last_check_var}=\$current_time"
&& [[ "${time_since_last_vm_status_check}" -ge "${VM_STATUS_CHECK_INTERVAL}" ]]; then if check_status "$type"; then
echo "Checking VM status..." changes_detected=true
last_vm_status_check_time=${current_time} update_all "$type"
if check_status_changed "vm"; then eval "${last_update_var}=\$current_time"
update_all_tags "vm" fi
last_update_vm_time=${current_time} fi
fi
fi
# Check network interface changes if (( current_time - ${!last_update_var} >= FORCE_UPDATE_INTERVAL )); then
time_since_last_fw_net_interface_check=$((current_time - last_fw_net_interface_check_time)) echo "Force updating ${type} tags..."
if [[ "${FW_NET_INTERFACE_CHECK_INTERVAL}" -gt 0 ]] \ changes_detected=true
&& [[ "${time_since_last_fw_net_interface_check}" -ge "${FW_NET_INTERFACE_CHECK_INTERVAL}" ]]; then update_all "$type"
echo "Checking network interfaces..." eval "${last_update_var}=\$current_time"
last_fw_net_interface_check_time=${current_time} fi
if check_status_changed "fw"; then done
update_all_tags "lxc"
update_all_tags "vm"
last_update_lxc_time=${current_time}
last_update_vm_time=${current_time}
fi
fi
# Force update if needed if [[ "${FW_NET_INTERFACE_CHECK_INTERVAL}" -gt 0 ]] && \
for type in "lxc" "vm"; do (( current_time - last_fw_check_time >= FW_NET_INTERFACE_CHECK_INTERVAL )); then
local last_update_var="last_update_${type}_time" echo "Checking network interfaces..."
local time_since_last_update=$((current_time - ${!last_update_var})) last_fw_check_time=$current_time
if [ ${time_since_last_update} -ge ${FORCE_UPDATE_INTERVAL} ]; then if check_status "fw"; then
echo "Force updating ${type} tags..." changes_detected=true
update_all_tags "$type" update_all "lxc"
eval "${last_update_var}=${current_time}" update_all "vm"
last_update_lxc_time=$current_time
last_update_vm_time=$current_time
fi
fi fi
done
$changes_detected || echo "No changes detected in system status"
} }
# Initialize time variables # Initialize time variables
last_lxc_status_check_time=0 declare -g last_lxc_status="" last_vm_status="" last_fw_status=""
last_vm_status_check_time=0 declare -g last_lxc_check_time=0 last_vm_check_time=0 last_fw_check_time=0
last_fw_net_interface_check_time=0 declare -g last_update_lxc_time=0 last_update_vm_time=0
last_update_lxc_time=0
last_update_vm_time=0
# main: Set the IP tags for all LXC containers and VMs # Main loop
main() { main() {
while true; do while true; do
check check
sleep "${LOOP_INTERVAL}" sleep "${LOOP_INTERVAL:-$DEFAULT_CHECK_INTERVAL}"
done done
} }
main main