This commit is contained in:
CanbiZ (MickLesk)
2026-02-24 09:32:56 +01:00
parent 900463f260
commit 6881dc9de1
32 changed files with 0 additions and 21997 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -1,277 +0,0 @@
# Copyright (c) 2021-2026 tteck
# Author: tteck (tteckster)
# Co-Author: MickLesk
# Co-Author: michelroegl-brunner
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# ==============================================================================
# INSTALL.FUNC - CONTAINER INSTALLATION & SETUP
# ==============================================================================
#
# This file provides installation functions executed inside LXC containers
# after creation. Handles:
#
# - Network connectivity verification (IPv4/IPv6)
# - OS updates and package installation
# - DNS resolution checks
# - MOTD and SSH configuration
# - Container customization and auto-login
#
# Usage:
# - Sourced by <app>-install.sh scripts
# - Executes via pct exec inside container
# - Requires internet connectivity
#
# ==============================================================================
# ==============================================================================
# SECTION 1: INITIALIZATION
# ==============================================================================
if ! command -v curl >/dev/null 2>&1; then
printf "\r\e[2K%b" '\033[93m Setup Source \033[m' >&2
apt-get update >/dev/null 2>&1
apt-get install -y curl >/dev/null 2>&1
fi
source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func)
source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func)
load_functions
catch_errors
# ==============================================================================
# SECTION 2: NETWORK & CONNECTIVITY
# ==============================================================================
# ------------------------------------------------------------------------------
# verb_ip6()
#
# - Configures IPv6 based on DISABLEIPV6 variable
# - If DISABLEIPV6=yes: disables IPv6 via sysctl
# - Sets verbose mode via set_std_mode()
# ------------------------------------------------------------------------------
verb_ip6() {
set_std_mode # Set STD mode based on VERBOSE
if [ "$DISABLEIPV6" == "yes" ]; then
echo "net.ipv6.conf.all.disable_ipv6 = 1" >>/etc/sysctl.conf
$STD sysctl -p
fi
}
# ------------------------------------------------------------------------------
# setting_up_container()
#
# - Verifies network connectivity via hostname -I
# - Retries up to RETRY_NUM times with RETRY_EVERY seconds delay
# - Removes Python EXTERNALLY-MANAGED restrictions
# - Disables systemd-networkd-wait-online.service for faster boot
# - Exits with error if network unavailable after retries
# ------------------------------------------------------------------------------
setting_up_container() {
msg_info "Setting up Container OS"
for ((i = RETRY_NUM; i > 0; i--)); do
if [ "$(hostname -I)" != "" ]; then
break
fi
echo 1>&2 -en "${CROSS}${RD} No Network! "
sleep $RETRY_EVERY
done
if [ "$(hostname -I)" = "" ]; then
echo 1>&2 -e "\n${CROSS}${RD} No Network After $RETRY_NUM Tries${CL}"
echo -e "${NETWORK}Check Network Settings"
exit 1
fi
rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED
systemctl disable -q --now systemd-networkd-wait-online.service
msg_ok "Set up Container OS"
#msg_custom "${CM}" "${GN}" "Network Connected: ${BL}$(hostname -I)"
msg_ok "Network Connected: ${BL}$(hostname -I)"
}
# ------------------------------------------------------------------------------
# network_check()
#
# - Comprehensive network connectivity check for IPv4 and IPv6
# - Tests connectivity to multiple DNS servers:
# * IPv4: 1.1.1.1 (Cloudflare), 8.8.8.8 (Google), 9.9.9.9 (Quad9)
# * IPv6: 2606:4700:4700::1111, 2001:4860:4860::8888, 2620:fe::fe
# - Verifies DNS resolution for GitHub and Community-Scripts domains
# - Prompts user to continue if no internet detected
# - Uses fatal() on DNS resolution failure for critical hosts
# ------------------------------------------------------------------------------
network_check() {
set +e
trap - ERR
ipv4_connected=false
ipv6_connected=false
sleep 1
# Check IPv4 connectivity to Google, Cloudflare & Quad9 DNS servers.
if ping -c 1 -W 1 1.1.1.1 &>/dev/null || ping -c 1 -W 1 8.8.8.8 &>/dev/null || ping -c 1 -W 1 9.9.9.9 &>/dev/null; then
msg_ok "IPv4 Internet Connected"
ipv4_connected=true
else
msg_error "IPv4 Internet Not Connected"
fi
# Check IPv6 connectivity to Google, Cloudflare & Quad9 DNS servers.
if ping6 -c 1 -W 1 2606:4700:4700::1111 &>/dev/null || ping6 -c 1 -W 1 2001:4860:4860::8888 &>/dev/null || ping6 -c 1 -W 1 2620:fe::fe &>/dev/null; then
msg_ok "IPv6 Internet Connected"
ipv6_connected=true
else
msg_error "IPv6 Internet Not Connected"
fi
# If both IPv4 and IPv6 checks fail, prompt the user
if [[ $ipv4_connected == false && $ipv6_connected == false ]]; then
read -r -p "No Internet detected, would you like to continue anyway? <y/N> " prompt
if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then
echo -e "${INFO}${RD}Expect Issues Without Internet${CL}"
else
echo -e "${NETWORK}Check Network Settings"
exit 1
fi
fi
# DNS resolution checks for GitHub-related domains (IPv4 and/or IPv6)
GIT_HOSTS=("github.com" "raw.githubusercontent.com" "api.github.com" "git.community-scripts.org")
GIT_STATUS="Git DNS:"
DNS_FAILED=false
for HOST in "${GIT_HOSTS[@]}"; do
RESOLVEDIP=$(getent hosts "$HOST" | awk '{ print $1 }' | grep -E '(^([0-9]{1,3}\.){3}[0-9]{1,3}$)|(^[a-fA-F0-9:]+$)' | head -n1)
if [[ -z "$RESOLVEDIP" ]]; then
GIT_STATUS+="$HOST:($DNSFAIL)"
DNS_FAILED=true
else
GIT_STATUS+=" $HOST:($DNSOK)"
fi
done
if [[ "$DNS_FAILED" == true ]]; then
fatal "$GIT_STATUS"
else
msg_ok "$GIT_STATUS"
fi
set -e
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
}
# ==============================================================================
# SECTION 3: OS UPDATE & PACKAGE MANAGEMENT
# ==============================================================================
# ------------------------------------------------------------------------------
# update_os()
#
# - Updates container OS via apt-get update and dist-upgrade
# - Configures APT cacher proxy if CACHER=yes (accelerates package downloads)
# - Removes Python EXTERNALLY-MANAGED restrictions for pip
# - Sources tools.func for additional setup functions after update
# - Uses $STD wrapper to suppress output unless VERBOSE=yes
# ------------------------------------------------------------------------------
update_os() {
msg_info "Updating Container OS"
if [[ "$CACHER" == "yes" ]]; then
echo "Acquire::http::Proxy-Auto-Detect \"/usr/local/bin/apt-proxy-detect.sh\";" >/etc/apt/apt.conf.d/00aptproxy
cat <<'EOF' >/usr/local/bin/apt-proxy-detect.sh
#!/bin/bash
if nc -w1 -z "${CACHER_IP}" 3142; then
echo -n "http://${CACHER_IP}:3142"
else
echo -n "DIRECT"
fi
EOF
chmod +x /usr/local/bin/apt-proxy-detect.sh
fi
$STD apt-get update
$STD apt-get -o Dpkg::Options::="--force-confold" -y dist-upgrade
rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED
msg_ok "Updated Container OS"
source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/tools.func)
}
# ==============================================================================
# SECTION 4: MOTD & SSH CONFIGURATION
# ==============================================================================
# ------------------------------------------------------------------------------
# motd_ssh()
#
# - Configures Message of the Day (MOTD) with container information
# - Creates /etc/profile.d/00_lxc-details.sh with:
# * Application name
# * Warning banner (DEV repository)
# * OS name and version
# * Hostname and IP address
# * GitHub repository link
# - Disables executable flag on /etc/update-motd.d/* scripts
# - Enables root SSH access if SSH_ROOT=yes
# - Configures TERM environment variable for better terminal support
# ------------------------------------------------------------------------------
motd_ssh() {
grep -qxF "export TERM='xterm-256color'" /root/.bashrc || echo "export TERM='xterm-256color'" >>/root/.bashrc
if [ -f "/etc/os-release" ]; then
OS_NAME=$(grep ^NAME /etc/os-release | cut -d= -f2 | tr -d '"')
OS_VERSION=$(grep ^VERSION_ID /etc/os-release | cut -d= -f2 | tr -d '"')
elif [ -f "/etc/debian_version" ]; then
OS_NAME="Debian"
OS_VERSION=$(cat /etc/debian_version)
fi
PROFILE_FILE="/etc/profile.d/00_lxc-details.sh"
echo "echo -e \"\"" >"$PROFILE_FILE"
echo -e "echo -e \"${BOLD}${YW}${APPLICATION} LXC Container - DEV Repository${CL}\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${RD}WARNING: This is a DEVELOPMENT version (ProxmoxVED). Do NOT use in production!${CL}\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${YW} OS: ${GN}${OS_NAME} - Version: ${OS_VERSION}${CL}\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${YW} Hostname: ${GN}\$(hostname)${CL}\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${YW} IP Address: ${GN}\$(hostname -I | awk '{print \$1}')${CL}\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${YW} Repository: ${GN}https://github.com/community-scripts/ProxmoxVED${CL}\"" >>"$PROFILE_FILE"
echo "echo \"\"" >>"$PROFILE_FILE"
chmod -x /etc/update-motd.d/*
if [[ "${SSH_ROOT}" == "yes" ]]; then
sed -i "s/#PermitRootLogin prohibit-password/PermitRootLogin yes/g" /etc/ssh/sshd_config
systemctl restart sshd
fi
}
# ==============================================================================
# SECTION 5: CONTAINER CUSTOMIZATION
# ==============================================================================
# ------------------------------------------------------------------------------
# customize()
#
# - Customizes container for passwordless root login if PASSWORD is empty
# - Configures getty for auto-login via /etc/systemd/system/container-getty@1.service.d/override.conf
# - Creates /usr/bin/update script for easy application updates
# - Injects SSH authorized keys if SSH_AUTHORIZED_KEY variable is set
# - Sets proper permissions on SSH directories and key files
# ------------------------------------------------------------------------------
customize() {
if [[ "$PASSWORD" == "" ]]; then
msg_info "Customizing Container"
GETTY_OVERRIDE="/etc/systemd/system/container-getty@1.service.d/override.conf"
mkdir -p $(dirname $GETTY_OVERRIDE)
cat <<EOF >$GETTY_OVERRIDE
[Service]
ExecStart=
ExecStart=-/sbin/agetty --autologin root --noclear --keep-baud tty%I 115200,38400,9600 \$TERM
EOF
systemctl daemon-reload
systemctl restart $(basename $(dirname $GETTY_OVERRIDE) | sed 's/\.d//')
msg_ok "Customized Container"
fi
echo "bash -c \"\$(curl -fsSL https://github.com/community-scripts/ProxmoxVED/raw/main/ct/${app}.sh)\"" >/usr/bin/update
chmod +x /usr/bin/update
if [[ -n "${SSH_AUTHORIZED_KEY}" ]]; then
mkdir -p /root/.ssh
echo "${SSH_AUTHORIZED_KEY}" >/root/.ssh/authorized_keys
chmod 700 /root/.ssh
chmod 600 /root/.ssh/authorized_keys
fi
}

View File

@@ -1,287 +0,0 @@
# Copyright (c) 2021-2026 tteck
# Author: tteck (tteckster)
# Co-Author: MickLesk
# Co-Author: michelroegl-brunner
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# ==============================================================================
# INSTALL.FUNC - CONTAINER INSTALLATION & SETUP
# ==============================================================================
#
# This file provides installation functions executed inside LXC containers
# after creation. Handles:
#
# - Network connectivity verification (IPv4/IPv6)
# - OS updates and package installation
# - DNS resolution checks
# - MOTD and SSH configuration
# - Container customization and auto-login
#
# Usage:
# - Sourced by <app>-install.sh scripts
# - Executes via pct exec inside container
# - Requires internet connectivity
#
# ==============================================================================
# ==============================================================================
# SECTION 1: INITIALIZATION
# ==============================================================================
if ! command -v curl >/dev/null 2>&1; then
printf "\r\e[2K%b" '\033[93m Setup Source \033[m' >&2
apt-get update >/dev/null 2>&1
apt-get install -y curl >/dev/null 2>&1
fi
source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func)
source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func)
load_functions
catch_errors
# ==============================================================================
# SECTION 2: NETWORK & CONNECTIVITY
# ==============================================================================
# ------------------------------------------------------------------------------
# verb_ip6()
#
# - Configures IPv6 based on IPV6_METHOD variable
# - If IPV6_METHOD=disable: disables IPv6 via sysctl
# - Sets verbose mode via set_std_mode()
# ------------------------------------------------------------------------------
verb_ip6() {
set_std_mode # Set STD mode based on VERBOSE
if [ "${IPV6_METHOD:-}" = "disable" ]; then
msg_info "Disabling IPv6 (this may affect some services)"
mkdir -p /etc/sysctl.d
$STD tee /etc/sysctl.d/99-disable-ipv6.conf >/dev/null <<EOF
# Disable IPv6 (set by community-scripts)
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
EOF
$STD sysctl -p /etc/sysctl.d/99-disable-ipv6.conf
msg_ok "Disabled IPv6"
fi
}
# ------------------------------------------------------------------------------
# setting_up_container()
#
# - Verifies network connectivity via hostname -I
# - Retries up to RETRY_NUM times with RETRY_EVERY seconds delay
# - Removes Python EXTERNALLY-MANAGED restrictions
# - Disables systemd-networkd-wait-online.service for faster boot
# - Exits with error if network unavailable after retries
# ------------------------------------------------------------------------------
setting_up_container() {
msg_info "Setting up Container OS"
for ((i = RETRY_NUM; i > 0; i--)); do
if [ "$(hostname -I)" != "" ]; then
break
fi
echo 1>&2 -en "${CROSS}${RD} No Network! "
sleep $RETRY_EVERY
done
if [ "$(hostname -I)" = "" ]; then
echo 1>&2 -e "\n${CROSS}${RD} No Network After $RETRY_NUM Tries${CL}"
echo -e "${NETWORK}Check Network Settings"
exit 1
fi
rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED
systemctl disable -q --now systemd-networkd-wait-online.service
msg_ok "Set up Container OS"
#msg_custom "${CM}" "${GN}" "Network Connected: ${BL}$(hostname -I)"
msg_ok "Network Connected: ${BL}$(hostname -I)"
}
# ------------------------------------------------------------------------------
# network_check()
#
# - Comprehensive network connectivity check for IPv4 and IPv6
# - Tests connectivity to multiple DNS servers:
# * IPv4: 1.1.1.1 (Cloudflare), 8.8.8.8 (Google), 9.9.9.9 (Quad9)
# * IPv6: 2606:4700:4700::1111, 2001:4860:4860::8888, 2620:fe::fe
# - Verifies DNS resolution for GitHub and Community-Scripts domains
# - Prompts user to continue if no internet detected
# - Uses fatal() on DNS resolution failure for critical hosts
# ------------------------------------------------------------------------------
network_check() {
set +e
trap - ERR
ipv4_connected=false
ipv6_connected=false
sleep 1
# Check IPv4 connectivity to Google, Cloudflare & Quad9 DNS servers.
if ping -c 1 -W 1 1.1.1.1 &>/dev/null || ping -c 1 -W 1 8.8.8.8 &>/dev/null || ping -c 1 -W 1 9.9.9.9 &>/dev/null; then
msg_ok "IPv4 Internet Connected"
ipv4_connected=true
else
msg_error "IPv4 Internet Not Connected"
fi
# Check IPv6 connectivity to Google, Cloudflare & Quad9 DNS servers.
if ping6 -c 1 -W 1 2606:4700:4700::1111 &>/dev/null || ping6 -c 1 -W 1 2001:4860:4860::8888 &>/dev/null || ping6 -c 1 -W 1 2620:fe::fe &>/dev/null; then
msg_ok "IPv6 Internet Connected"
ipv6_connected=true
else
msg_error "IPv6 Internet Not Connected"
fi
# If both IPv4 and IPv6 checks fail, prompt the user
if [[ $ipv4_connected == false && $ipv6_connected == false ]]; then
read -r -p "No Internet detected, would you like to continue anyway? <y/N> " prompt
if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then
echo -e "${INFO}${RD}Expect Issues Without Internet${CL}"
else
echo -e "${NETWORK}Check Network Settings"
exit 1
fi
fi
# DNS resolution checks for GitHub-related domains (IPv4 and/or IPv6)
GIT_HOSTS=("github.com" "raw.githubusercontent.com" "api.github.com" "git.community-scripts.org")
GIT_STATUS="Git DNS:"
DNS_FAILED=false
for HOST in "${GIT_HOSTS[@]}"; do
RESOLVEDIP=$(getent hosts "$HOST" | awk '{ print $1 }' | grep -E '(^([0-9]{1,3}\.){3}[0-9]{1,3}$)|(^[a-fA-F0-9:]+$)' | head -n1)
if [[ -z "$RESOLVEDIP" ]]; then
GIT_STATUS+="$HOST:($DNSFAIL)"
DNS_FAILED=true
else
GIT_STATUS+=" $HOST:($DNSOK)"
fi
done
if [[ "$DNS_FAILED" == true ]]; then
fatal "$GIT_STATUS"
else
msg_ok "$GIT_STATUS"
fi
set -e
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
}
# ==============================================================================
# SECTION 3: OS UPDATE & PACKAGE MANAGEMENT
# ==============================================================================
# ------------------------------------------------------------------------------
# update_os()
#
# - Updates container OS via apt-get update and dist-upgrade
# - Configures APT cacher proxy if CACHER=yes (accelerates package downloads)
# - Removes Python EXTERNALLY-MANAGED restrictions for pip
# - Sources tools.func for additional setup functions after update
# - Uses $STD wrapper to suppress output unless VERBOSE=yes
# ------------------------------------------------------------------------------
update_os() {
msg_info "Updating Container OS"
if [[ "$CACHER" == "yes" ]]; then
echo 'Acquire::http::Proxy-Auto-Detect "/usr/local/bin/apt-proxy-detect.sh";' >/etc/apt/apt.conf.d/00aptproxy
cat <<EOF >/usr/local/bin/apt-proxy-detect.sh
#!/bin/bash
if nc -w1 -z "${CACHER_IP}" 3142; then
echo -n "http://${CACHER_IP}:3142"
else
echo -n "DIRECT"
fi
EOF
chmod +x /usr/local/bin/apt-proxy-detect.sh
fi
$STD apt-get update
$STD apt-get -o Dpkg::Options::="--force-confold" -y dist-upgrade
rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED
msg_ok "Updated Container OS"
source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/tools.func)
}
# ==============================================================================
# SECTION 4: MOTD & SSH CONFIGURATION
# ==============================================================================
# ------------------------------------------------------------------------------
# motd_ssh()
#
# - Configures Message of the Day (MOTD) with container information
# - Creates /etc/profile.d/00_lxc-details.sh with:
# * Application name
# * Warning banner (DEV repository)
# * OS name and version
# * Hostname and IP address
# * GitHub repository link
# - Disables executable flag on /etc/update-motd.d/* scripts
# - Enables root SSH access if SSH_ROOT=yes
# - Configures TERM environment variable for better terminal support
# ------------------------------------------------------------------------------
motd_ssh() {
# Set terminal to 256-color mode
grep -qxF "export TERM='xterm-256color'" /root/.bashrc || echo "export TERM='xterm-256color'" >>/root/.bashrc
# Get OS information (Debian / Ubuntu)
if [ -f "/etc/os-release" ]; then
OS_NAME=$(grep ^NAME /etc/os-release | cut -d= -f2 | tr -d '"')
OS_VERSION=$(grep ^VERSION_ID /etc/os-release | cut -d= -f2 | tr -d '"')
elif [ -f "/etc/debian_version" ]; then
OS_NAME="Debian"
OS_VERSION=$(cat /etc/debian_version)
fi
PROFILE_FILE="/etc/profile.d/00_lxc-details.sh"
echo "echo -e \"\"" >"$PROFILE_FILE"
echo -e "echo -e \"${BOLD}${YW}${APPLICATION} LXC Container - DEV Repository${CL}\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${RD}WARNING: This is a DEVELOPMENT version (ProxmoxVED). Do NOT use in production!${CL}\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${YW} OS: ${GN}${OS_NAME} - Version: ${OS_VERSION}${CL}\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${YW} Hostname: ${GN}\$(hostname)${CL}\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${YW} IP Address: ${GN}\$(hostname -I | awk '{print \$1}')${CL}\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${YW} Repository: ${GN}https://github.com/community-scripts/ProxmoxVED${CL}\"" >>"$PROFILE_FILE"
echo "echo \"\"" >>"$PROFILE_FILE"
chmod -x /etc/update-motd.d/*
if [[ "${SSH_ROOT}" == "yes" ]]; then
sed -i "s/#PermitRootLogin prohibit-password/PermitRootLogin yes/g" /etc/ssh/sshd_config
systemctl restart sshd
fi
}
# ==============================================================================
# SECTION 5: CONTAINER CUSTOMIZATION
# ==============================================================================
# ------------------------------------------------------------------------------
# customize()
#
# - Customizes container for passwordless root login if PASSWORD is empty
# - Configures getty for auto-login via /etc/systemd/system/container-getty@1.service.d/override.conf
# - Creates /usr/bin/update script for easy application updates
# - Injects SSH authorized keys if SSH_AUTHORIZED_KEY variable is set
# - Sets proper permissions on SSH directories and key files
# ------------------------------------------------------------------------------
customize() {
if [[ "$PASSWORD" == "" ]]; then
msg_info "Customizing Container"
GETTY_OVERRIDE="/etc/systemd/system/container-getty@1.service.d/override.conf"
mkdir -p $(dirname $GETTY_OVERRIDE)
cat <<EOF >$GETTY_OVERRIDE
[Service]
ExecStart=
ExecStart=-/sbin/agetty --autologin root --noclear --keep-baud tty%I 115200,38400,9600 \$TERM
EOF
systemctl daemon-reload
systemctl restart $(basename $(dirname $GETTY_OVERRIDE) | sed 's/\.d//')
msg_ok "Customized Container"
fi
echo "bash -c \"\$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/ct/${app}.sh)\"" >/usr/bin/update
chmod +x /usr/bin/update
if [[ -n "${SSH_AUTHORIZED_KEY}" ]]; then
mkdir -p /root/.ssh
echo "${SSH_AUTHORIZED_KEY}" >/root/.ssh/authorized_keys
chmod 700 /root/.ssh
chmod 600 /root/.ssh/authorized_keys
fi
}

View File

@@ -1,992 +0,0 @@
# Copyright (c) 2021-2026 community-scripts ORG
# Author: tteck (tteckster)
# Co-Author: MickLesk
# Co-Author: michelroegl-brunner
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
# ==============================================================================
# INSTALL.FUNC - UNIFIED CONTAINER INSTALLATION & SETUP
# ==============================================================================
#
# All-in-One install.func supporting multiple Linux distributions:
# - Debian, Ubuntu, Devuan (apt, systemd/sysvinit)
# - Alpine (apk, OpenRC)
# - Fedora, Rocky, AlmaLinux, CentOS (dnf/yum, systemd)
# - Arch Linux (pacman, systemd)
# - openSUSE (zypper, systemd)
# - Gentoo (emerge, OpenRC)
# - NixOS (nix, systemd)
#
# Features:
# - Automatic OS detection
# - Unified package manager abstraction
# - Init system abstraction (systemd/OpenRC/runit/sysvinit)
# - Network connectivity verification
# - MOTD and SSH configuration
# - Container customization
#
# ==============================================================================
# ==============================================================================
# SECTION 1: INITIALIZATION & OS DETECTION
# ==============================================================================
# Global variables for OS detection
OS_TYPE="" # debian, ubuntu, alpine, fedora, arch, opensuse, gentoo, nixos, devuan, rocky, alma, centos
OS_FAMILY="" # debian, alpine, rhel, arch, suse, gentoo, nixos
OS_VERSION="" # Version number
PKG_MANAGER="" # apt, apk, dnf, yum, pacman, zypper, emerge, nix-env
INIT_SYSTEM="" # systemd, openrc, runit, sysvinit
# ------------------------------------------------------------------------------
# detect_os()
#
# Detects the operating system and sets global variables:
# OS_TYPE, OS_FAMILY, OS_VERSION, PKG_MANAGER, INIT_SYSTEM
# ------------------------------------------------------------------------------
detect_os() {
if [[ -f /etc/os-release ]]; then
# shellcheck disable=SC1091
. /etc/os-release
OS_TYPE="${ID:-unknown}"
OS_VERSION="${VERSION_ID:-unknown}"
elif [[ -f /etc/alpine-release ]]; then
OS_TYPE="alpine"
OS_VERSION=$(cat /etc/alpine-release)
elif [[ -f /etc/debian_version ]]; then
OS_TYPE="debian"
OS_VERSION=$(cat /etc/debian_version)
elif [[ -f /etc/redhat-release ]]; then
OS_TYPE="centos"
OS_VERSION=$(grep -oE '[0-9]+\.[0-9]+' /etc/redhat-release | head -1)
elif [[ -f /etc/arch-release ]]; then
OS_TYPE="arch"
OS_VERSION="rolling"
elif [[ -f /etc/gentoo-release ]]; then
OS_TYPE="gentoo"
OS_VERSION=$(cat /etc/gentoo-release | grep -oE '[0-9.]+')
else
OS_TYPE="unknown"
OS_VERSION="unknown"
fi
# Normalize OS type and determine family
case "$OS_TYPE" in
debian)
OS_FAMILY="debian"
PKG_MANAGER="apt"
;;
ubuntu)
OS_FAMILY="debian"
PKG_MANAGER="apt"
;;
devuan)
OS_FAMILY="debian"
PKG_MANAGER="apt"
;;
alpine)
OS_FAMILY="alpine"
PKG_MANAGER="apk"
;;
fedora)
OS_FAMILY="rhel"
PKG_MANAGER="dnf"
;;
rocky | rockylinux)
OS_TYPE="rocky"
OS_FAMILY="rhel"
PKG_MANAGER="dnf"
;;
alma | almalinux)
OS_TYPE="alma"
OS_FAMILY="rhel"
PKG_MANAGER="dnf"
;;
centos)
OS_FAMILY="rhel"
# CentOS 7 uses yum, 8+ uses dnf
if [[ "${OS_VERSION%%.*}" -ge 8 ]]; then
PKG_MANAGER="dnf"
else
PKG_MANAGER="yum"
fi
;;
rhel)
OS_FAMILY="rhel"
PKG_MANAGER="dnf"
;;
arch | archlinux)
OS_TYPE="arch"
OS_FAMILY="arch"
PKG_MANAGER="pacman"
;;
opensuse* | sles)
OS_TYPE="opensuse"
OS_FAMILY="suse"
PKG_MANAGER="zypper"
;;
gentoo)
OS_FAMILY="gentoo"
PKG_MANAGER="emerge"
;;
nixos)
OS_FAMILY="nixos"
PKG_MANAGER="nix-env"
;;
*)
OS_FAMILY="unknown"
PKG_MANAGER="unknown"
;;
esac
# Detect init system
if command -v systemctl &>/dev/null && [[ -d /run/systemd/system ]]; then
INIT_SYSTEM="systemd"
elif command -v rc-service &>/dev/null || [[ -d /etc/init.d && -f /sbin/openrc ]]; then
INIT_SYSTEM="openrc"
elif command -v sv &>/dev/null && [[ -d /etc/sv ]]; then
INIT_SYSTEM="runit"
elif [[ -f /etc/inittab ]]; then
INIT_SYSTEM="sysvinit"
else
INIT_SYSTEM="unknown"
fi
}
# ------------------------------------------------------------------------------
# Bootstrap: Ensure curl is available and source core functions
# ------------------------------------------------------------------------------
_bootstrap() {
# Minimal bootstrap to get curl installed
if ! command -v curl &>/dev/null; then
printf "\r\e[2K%b" '\033[93m Setup Source \033[m' >&2
if command -v apt-get &>/dev/null; then
apt-get update &>/dev/null && apt-get install -y curl &>/dev/null
elif command -v apk &>/dev/null; then
apk update &>/dev/null && apk add curl &>/dev/null
elif command -v dnf &>/dev/null; then
dnf install -y curl &>/dev/null
elif command -v yum &>/dev/null; then
yum install -y curl &>/dev/null
elif command -v pacman &>/dev/null; then
pacman -Sy --noconfirm curl &>/dev/null
elif command -v zypper &>/dev/null; then
zypper install -y curl &>/dev/null
elif command -v emerge &>/dev/null; then
emerge --quiet net-misc/curl &>/dev/null
fi
fi
# Source core functions
source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func)
source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func)
load_functions
catch_errors
}
# Run bootstrap and OS detection
_bootstrap
detect_os
# ==============================================================================
# SECTION 2: PACKAGE MANAGER ABSTRACTION
# ==============================================================================
# ------------------------------------------------------------------------------
# pkg_update()
#
# Updates package manager cache/database
# ------------------------------------------------------------------------------
pkg_update() {
case "$PKG_MANAGER" in
apt)
$STD apt-get update
;;
apk)
$STD apk update
;;
dnf)
$STD dnf makecache
;;
yum)
$STD yum makecache
;;
pacman)
$STD pacman -Sy
;;
zypper)
$STD zypper refresh
;;
emerge)
$STD emerge --sync
;;
nix-env)
$STD nix-channel --update
;;
*)
msg_error "Unknown package manager: $PKG_MANAGER"
return 1
;;
esac
}
# ------------------------------------------------------------------------------
# pkg_upgrade()
#
# Upgrades all installed packages
# ------------------------------------------------------------------------------
pkg_upgrade() {
case "$PKG_MANAGER" in
apt)
$STD apt-get -o Dpkg::Options::="--force-confold" -y dist-upgrade
;;
apk)
$STD apk -U upgrade
;;
dnf)
$STD dnf -y upgrade
;;
yum)
$STD yum -y update
;;
pacman)
$STD pacman -Syu --noconfirm
;;
zypper)
$STD zypper -n update
;;
emerge)
$STD emerge --quiet --update --deep @world
;;
nix-env)
$STD nix-env -u
;;
*)
msg_error "Unknown package manager: $PKG_MANAGER"
return 1
;;
esac
}
# ------------------------------------------------------------------------------
# pkg_install(packages...)
#
# Installs one or more packages
# Arguments:
# packages - List of packages to install
# ------------------------------------------------------------------------------
pkg_install() {
local packages=("$@")
[[ ${#packages[@]} -eq 0 ]] && return 0
case "$PKG_MANAGER" in
apt)
$STD apt-get install -y "${packages[@]}"
;;
apk)
$STD apk add --no-cache "${packages[@]}"
;;
dnf)
$STD dnf install -y "${packages[@]}"
;;
yum)
$STD yum install -y "${packages[@]}"
;;
pacman)
$STD pacman -S --noconfirm "${packages[@]}"
;;
zypper)
$STD zypper install -y "${packages[@]}"
;;
emerge)
$STD emerge --quiet "${packages[@]}"
;;
nix-env)
for pkg in "${packages[@]}"; do
$STD nix-env -iA "nixos.$pkg"
done
;;
*)
msg_error "Unknown package manager: $PKG_MANAGER"
return 1
;;
esac
}
# ------------------------------------------------------------------------------
# pkg_remove(packages...)
#
# Removes one or more packages
# ------------------------------------------------------------------------------
pkg_remove() {
local packages=("$@")
[[ ${#packages[@]} -eq 0 ]] && return 0
case "$PKG_MANAGER" in
apt)
$STD apt-get remove -y "${packages[@]}"
;;
apk)
$STD apk del "${packages[@]}"
;;
dnf)
$STD dnf remove -y "${packages[@]}"
;;
yum)
$STD yum remove -y "${packages[@]}"
;;
pacman)
$STD pacman -Rs --noconfirm "${packages[@]}"
;;
zypper)
$STD zypper remove -y "${packages[@]}"
;;
emerge)
$STD emerge --quiet --unmerge "${packages[@]}"
;;
nix-env)
for pkg in "${packages[@]}"; do
$STD nix-env -e "$pkg"
done
;;
*)
msg_error "Unknown package manager: $PKG_MANAGER"
return 1
;;
esac
}
# ------------------------------------------------------------------------------
# pkg_clean()
#
# Cleans package manager cache to free space
# ------------------------------------------------------------------------------
pkg_clean() {
case "$PKG_MANAGER" in
apt)
$STD apt-get autoremove -y
$STD apt-get autoclean
;;
apk)
$STD apk cache clean
;;
dnf)
$STD dnf clean all
$STD dnf autoremove -y
;;
yum)
$STD yum clean all
;;
pacman)
$STD pacman -Scc --noconfirm
;;
zypper)
$STD zypper clean
;;
emerge)
$STD emerge --quiet --depclean
;;
nix-env)
$STD nix-collect-garbage -d
;;
*)
return 0
;;
esac
}
# ==============================================================================
# SECTION 3: SERVICE/INIT SYSTEM ABSTRACTION
# ==============================================================================
# ------------------------------------------------------------------------------
# svc_enable(service)
#
# Enables a service to start at boot
# ------------------------------------------------------------------------------
svc_enable() {
local service="$1"
[[ -z "$service" ]] && return 1
case "$INIT_SYSTEM" in
systemd)
$STD systemctl enable "$service"
;;
openrc)
$STD rc-update add "$service" default
;;
runit)
[[ -d "/etc/sv/$service" ]] && ln -sf "/etc/sv/$service" "/var/service/"
;;
sysvinit)
if command -v update-rc.d &>/dev/null; then
$STD update-rc.d "$service" defaults
elif command -v chkconfig &>/dev/null; then
$STD chkconfig "$service" on
fi
;;
*)
msg_warn "Unknown init system, cannot enable $service"
return 1
;;
esac
}
# ------------------------------------------------------------------------------
# svc_disable(service)
#
# Disables a service from starting at boot
# ------------------------------------------------------------------------------
svc_disable() {
local service="$1"
[[ -z "$service" ]] && return 1
case "$INIT_SYSTEM" in
systemd)
$STD systemctl disable "$service"
;;
openrc)
$STD rc-update del "$service" default 2>/dev/null || true
;;
runit)
rm -f "/var/service/$service"
;;
sysvinit)
if command -v update-rc.d &>/dev/null; then
$STD update-rc.d "$service" remove
elif command -v chkconfig &>/dev/null; then
$STD chkconfig "$service" off
fi
;;
*)
return 1
;;
esac
}
# ------------------------------------------------------------------------------
# svc_start(service)
#
# Starts a service immediately
# ------------------------------------------------------------------------------
svc_start() {
local service="$1"
[[ -z "$service" ]] && return 1
case "$INIT_SYSTEM" in
systemd)
$STD systemctl start "$service"
;;
openrc)
$STD rc-service "$service" start
;;
runit)
$STD sv start "$service"
;;
sysvinit)
$STD /etc/init.d/"$service" start
;;
*)
return 1
;;
esac
}
# ------------------------------------------------------------------------------
# svc_stop(service)
#
# Stops a running service
# ------------------------------------------------------------------------------
svc_stop() {
local service="$1"
[[ -z "$service" ]] && return 1
case "$INIT_SYSTEM" in
systemd)
$STD systemctl stop "$service"
;;
openrc)
$STD rc-service "$service" stop
;;
runit)
$STD sv stop "$service"
;;
sysvinit)
$STD /etc/init.d/"$service" stop
;;
*)
return 1
;;
esac
}
# ------------------------------------------------------------------------------
# svc_restart(service)
#
# Restarts a service
# ------------------------------------------------------------------------------
svc_restart() {
local service="$1"
[[ -z "$service" ]] && return 1
case "$INIT_SYSTEM" in
systemd)
$STD systemctl restart "$service"
;;
openrc)
$STD rc-service "$service" restart
;;
runit)
$STD sv restart "$service"
;;
sysvinit)
$STD /etc/init.d/"$service" restart
;;
*)
return 1
;;
esac
}
# ------------------------------------------------------------------------------
# svc_status(service)
#
# Gets service status (returns 0 if running)
# ------------------------------------------------------------------------------
svc_status() {
local service="$1"
[[ -z "$service" ]] && return 1
case "$INIT_SYSTEM" in
systemd)
systemctl is-active --quiet "$service"
;;
openrc)
rc-service "$service" status &>/dev/null
;;
runit)
sv status "$service" | grep -q "^run:"
;;
sysvinit)
/etc/init.d/"$service" status &>/dev/null
;;
*)
return 1
;;
esac
}
# ------------------------------------------------------------------------------
# svc_reload_daemon()
#
# Reloads init system daemon configuration (for systemd)
# ------------------------------------------------------------------------------
svc_reload_daemon() {
case "$INIT_SYSTEM" in
systemd)
$STD systemctl daemon-reload
;;
*)
# Other init systems don't need this
return 0
;;
esac
}
# ==============================================================================
# SECTION 4: NETWORK & CONNECTIVITY
# ==============================================================================
# ------------------------------------------------------------------------------
# get_ip()
#
# Gets the primary IPv4 address of the container
# Returns: IP address string
# ------------------------------------------------------------------------------
get_ip() {
local ip=""
# Try hostname -I first (most common)
if command -v hostname &>/dev/null; then
ip=$(hostname -I 2>/dev/null | awk '{print $1}')
fi
# Fallback to ip command
if [[ -z "$ip" ]] && command -v ip &>/dev/null; then
ip=$(ip -4 addr show scope global | grep -oP '(?<=inet\s)\d+(\.\d+){3}' | head -1)
fi
# Fallback to ifconfig
if [[ -z "$ip" ]] && command -v ifconfig &>/dev/null; then
ip=$(ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1' | head -1)
fi
echo "$ip"
}
# ------------------------------------------------------------------------------
# verb_ip6()
#
# Configures IPv6 based on IPV6_METHOD variable
# If IPV6_METHOD=disable: disables IPv6 via sysctl
# ------------------------------------------------------------------------------
verb_ip6() {
set_std_mode # Set STD mode based on VERBOSE
if [[ "${IPV6_METHOD:-}" == "disable" ]]; then
msg_info "Disabling IPv6 (this may affect some services)"
mkdir -p /etc/sysctl.d
cat >/etc/sysctl.d/99-disable-ipv6.conf <<EOF
# Disable IPv6 (set by community-scripts)
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
EOF
$STD sysctl -p /etc/sysctl.d/99-disable-ipv6.conf
# For OpenRC, ensure sysctl runs at boot
if [[ "$INIT_SYSTEM" == "openrc" ]]; then
$STD rc-update add sysctl default 2>/dev/null || true
fi
msg_ok "Disabled IPv6"
fi
}
# ------------------------------------------------------------------------------
# setting_up_container()
#
# Initial container setup:
# - Verifies network connectivity
# - Removes Python EXTERNALLY-MANAGED restrictions
# - Disables network wait services
# ------------------------------------------------------------------------------
setting_up_container() {
msg_info "Setting up Container OS"
# Wait for network
local i
for ((i = RETRY_NUM; i > 0; i--)); do
if [[ -n "$(get_ip)" ]]; then
break
fi
echo 1>&2 -en "${CROSS}${RD} No Network! "
sleep "$RETRY_EVERY"
done
if [[ -z "$(get_ip)" ]]; then
echo 1>&2 -e "\n${CROSS}${RD} No Network After $RETRY_NUM Tries${CL}"
echo -e "${NETWORK}Check Network Settings"
exit 1
fi
# Remove Python EXTERNALLY-MANAGED restriction (Debian 12+, Ubuntu 23.04+)
rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED 2>/dev/null || true
# Disable network wait services for faster boot
case "$INIT_SYSTEM" in
systemd)
systemctl disable -q --now systemd-networkd-wait-online.service 2>/dev/null || true
;;
esac
msg_ok "Set up Container OS"
msg_ok "Network Connected: ${BL}$(get_ip)"
}
# ------------------------------------------------------------------------------
# network_check()
#
# Comprehensive network connectivity check for IPv4 and IPv6
# Tests connectivity to DNS servers and verifies DNS resolution
# ------------------------------------------------------------------------------
network_check() {
set +e
trap - ERR
local ipv4_connected=false
local ipv6_connected=false
sleep 1
# Check IPv4 connectivity
if ping -c 1 -W 1 1.1.1.1 &>/dev/null || ping -c 1 -W 1 8.8.8.8 &>/dev/null || ping -c 1 -W 1 9.9.9.9 &>/dev/null; then
msg_ok "IPv4 Internet Connected"
ipv4_connected=true
else
msg_error "IPv4 Internet Not Connected"
fi
# Check IPv6 connectivity (if ping6 exists)
if command -v ping6 &>/dev/null; then
if ping6 -c 1 -W 1 2606:4700:4700::1111 &>/dev/null || ping6 -c 1 -W 1 2001:4860:4860::8888 &>/dev/null; then
msg_ok "IPv6 Internet Connected"
ipv6_connected=true
else
msg_error "IPv6 Internet Not Connected"
fi
fi
# Prompt if both fail
if [[ $ipv4_connected == false && $ipv6_connected == false ]]; then
read -r -p "No Internet detected, would you like to continue anyway? <y/N> " prompt
if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then
echo -e "${INFO}${RD}Expect Issues Without Internet${CL}"
else
echo -e "${NETWORK}Check Network Settings"
exit 1
fi
fi
# DNS resolution checks
local GIT_HOSTS=("github.com" "raw.githubusercontent.com" "git.community-scripts.org")
local GIT_STATUS="Git DNS:"
local DNS_FAILED=false
for HOST in "${GIT_HOSTS[@]}"; do
local RESOLVEDIP
RESOLVEDIP=$(getent hosts "$HOST" 2>/dev/null | awk '{ print $1 }' | head -n1)
if [[ -z "$RESOLVEDIP" ]]; then
GIT_STATUS+=" $HOST:(${DNSFAIL:-FAIL})"
DNS_FAILED=true
else
GIT_STATUS+=" $HOST:(${DNSOK:-OK})"
fi
done
if [[ "$DNS_FAILED" == true ]]; then
fatal "$GIT_STATUS"
else
msg_ok "$GIT_STATUS"
fi
set -e
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
}
# ==============================================================================
# SECTION 5: OS UPDATE & PACKAGE MANAGEMENT
# ==============================================================================
# ------------------------------------------------------------------------------
# update_os()
#
# Updates container OS and sources appropriate tools.func
# ------------------------------------------------------------------------------
update_os() {
msg_info "Updating Container OS"
# Configure APT cacher proxy if enabled (Debian/Ubuntu only)
if [[ "$PKG_MANAGER" == "apt" && "${CACHER:-}" == "yes" ]]; then
echo 'Acquire::http::Proxy-Auto-Detect "/usr/local/bin/apt-proxy-detect.sh";' >/etc/apt/apt.conf.d/00aptproxy
cat <<EOF >/usr/local/bin/apt-proxy-detect.sh
#!/bin/bash
if nc -w1 -z "${CACHER_IP}" 3142; then
echo -n "http://${CACHER_IP}:3142"
else
echo -n "DIRECT"
fi
EOF
chmod +x /usr/local/bin/apt-proxy-detect.sh
fi
# Update and upgrade
pkg_update
pkg_upgrade
# Remove Python EXTERNALLY-MANAGED restriction
rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED 2>/dev/null || true
msg_ok "Updated Container OS"
# Source appropriate tools.func based on OS
case "$OS_FAMILY" in
alpine)
source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/alpine-tools.func)
;;
*)
source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/tools.func)
;;
esac
}
# ==============================================================================
# SECTION 6: MOTD & SSH CONFIGURATION
# ==============================================================================
# ------------------------------------------------------------------------------
# motd_ssh()
#
# Configures Message of the Day and SSH settings
# ------------------------------------------------------------------------------
motd_ssh() {
# Set terminal to 256-color mode
grep -qxF "export TERM='xterm-256color'" /root/.bashrc 2>/dev/null || echo "export TERM='xterm-256color'" >>/root/.bashrc
# Get OS information
local os_name="$OS_TYPE"
local os_version="$OS_VERSION"
if [[ -f /etc/os-release ]]; then
os_name=$(grep ^NAME /etc/os-release | cut -d= -f2 | tr -d '"')
os_version=$(grep ^VERSION_ID /etc/os-release | cut -d= -f2 | tr -d '"')
fi
# Create MOTD profile script
local PROFILE_FILE="/etc/profile.d/00_lxc-details.sh"
cat >"$PROFILE_FILE" <<EOF
echo ""
echo -e "${BOLD:-}${YW:-}${APPLICATION:-Container} LXC Container - DEV Repository${CL:-}"
echo -e "${RD:-}WARNING: This is a DEVELOPMENT version (ProxmoxVED). Do NOT use in production!${CL:-}"
echo -e "${YW:-} OS: ${GN:-}${os_name} - Version: ${os_version}${CL:-}"
echo -e "${YW:-} Hostname: ${GN:-}\$(hostname)${CL:-}"
echo -e "${YW:-} IP Address: ${GN:-}\$(hostname -I 2>/dev/null | awk '{print \$1}' || ip -4 addr show scope global | grep -oP '(?<=inet\s)\\d+(\\.\\d+){3}' | head -1)${CL:-}"
echo -e "${YW:-} Repository: ${GN:-}https://github.com/community-scripts/ProxmoxVED${CL:-}"
echo ""
EOF
# Disable default MOTD scripts (Debian/Ubuntu)
[[ -d /etc/update-motd.d ]] && chmod -x /etc/update-motd.d/* 2>/dev/null || true
# Configure SSH root access if requested
if [[ "${SSH_ROOT:-}" == "yes" ]]; then
local sshd_config="/etc/ssh/sshd_config"
if [[ -f "$sshd_config" ]]; then
sed -i "s/#PermitRootLogin prohibit-password/PermitRootLogin yes/g" "$sshd_config"
sed -i "s/PermitRootLogin prohibit-password/PermitRootLogin yes/g" "$sshd_config"
case "$INIT_SYSTEM" in
systemd)
svc_restart sshd 2>/dev/null || svc_restart ssh 2>/dev/null || true
;;
openrc)
svc_enable sshd 2>/dev/null || true
svc_start sshd 2>/dev/null || true
;;
*)
svc_restart sshd 2>/dev/null || true
;;
esac
fi
fi
}
# ==============================================================================
# SECTION 7: CONTAINER CUSTOMIZATION
# ==============================================================================
# ------------------------------------------------------------------------------
# customize()
#
# Customizes container for passwordless login and creates update script
# ------------------------------------------------------------------------------
customize() {
if [[ "${PASSWORD:-}" == "" ]]; then
msg_info "Customizing Container"
# Remove root password for auto-login
passwd -d root &>/dev/null || true
case "$INIT_SYSTEM" in
systemd)
# Configure getty for auto-login
local GETTY_OVERRIDE="/etc/systemd/system/container-getty@1.service.d/override.conf"
mkdir -p "$(dirname "$GETTY_OVERRIDE")"
cat >"$GETTY_OVERRIDE" <<EOF
[Service]
ExecStart=
ExecStart=-/sbin/agetty --autologin root --noclear --keep-baud tty%I 115200,38400,9600 \$TERM
EOF
svc_reload_daemon
systemctl restart "$(basename "$(dirname "$GETTY_OVERRIDE")" | sed 's/\.d//')" 2>/dev/null || true
;;
openrc)
# Alpine/Gentoo: use inittab for auto-login
pkg_install util-linux 2>/dev/null || true
# Create persistent autologin boot script
mkdir -p /etc/local.d
cat <<'EOFSCRIPT' >/etc/local.d/autologin.start
#!/bin/sh
sed -i 's|^tty1::respawn:.*|tty1::respawn:/sbin/agetty --autologin root --noclear tty1 38400 linux|' /etc/inittab
kill -HUP 1
EOFSCRIPT
chmod +x /etc/local.d/autologin.start
rc-update add local 2>/dev/null || true
/etc/local.d/autologin.start 2>/dev/null || true
touch /root/.hushlogin
;;
sysvinit)
# Devuan/older systems
if [[ -f /etc/inittab ]]; then
sed -i 's|^1:2345:respawn:/sbin/getty.*|1:2345:respawn:/sbin/agetty --autologin root tty1 38400 linux|' /etc/inittab
telinit q 2>/dev/null || true
fi
;;
esac
msg_ok "Customized Container"
fi
# Create update script
echo "bash -c \"\$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/ct/${app}.sh)\"" >/usr/bin/update
chmod +x /usr/bin/update
# Inject SSH authorized keys if provided
if [[ -n "${SSH_AUTHORIZED_KEY:-}" ]]; then
mkdir -p /root/.ssh
echo "${SSH_AUTHORIZED_KEY}" >/root/.ssh/authorized_keys
chmod 700 /root/.ssh
chmod 600 /root/.ssh/authorized_keys
fi
}
# ==============================================================================
# SECTION 8: UTILITY FUNCTIONS
# ==============================================================================
# ------------------------------------------------------------------------------
# validate_tz(timezone)
#
# Validates if a timezone is valid
# Returns: 0 if valid, 1 if invalid
# ------------------------------------------------------------------------------
validate_tz() {
local tz="$1"
[[ -f "/usr/share/zoneinfo/$tz" ]]
}
# ------------------------------------------------------------------------------
# set_timezone(timezone)
#
# Sets container timezone
# ------------------------------------------------------------------------------
set_timezone() {
local tz="$1"
if validate_tz "$tz"; then
ln -sf "/usr/share/zoneinfo/$tz" /etc/localtime
echo "$tz" >/etc/timezone 2>/dev/null || true
# Update tzdata if available
case "$PKG_MANAGER" in
apt)
dpkg-reconfigure -f noninteractive tzdata 2>/dev/null || true
;;
esac
msg_ok "Timezone set to $tz"
else
msg_warn "Invalid timezone: $tz"
fi
}
# ------------------------------------------------------------------------------
# os_info()
#
# Prints detected OS information (for debugging)
# ------------------------------------------------------------------------------
os_info() {
echo "OS Type: $OS_TYPE"
echo "OS Family: $OS_FAMILY"
echo "OS Version: $OS_VERSION"
echo "Pkg Manager: $PKG_MANAGER"
echo "Init System: $INIT_SYSTEM"
}

34
misc/data/.gitignore vendored
View File

@@ -1,34 +0,0 @@
# If you prefer the allow list template instead of the deny list, see community template:
# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
#
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
telemetry-service
migration/migrate
# Test binary, built with `go test -c`
*.test
# Code coverage profiles and other test artifacts
*.out
coverage.*
*.coverprofile
profile.cov
# Dependency directories (remove the comment below to include it)
# vendor/
# Go workspace file
go.work
go.work.sum
# env file
.env
# Editor/IDE
# .idea/
# .vscode/

View File

@@ -1,52 +0,0 @@
FROM golang:1.25-alpine AS build
WORKDIR /src
COPY go.mod go.sum* ./
RUN go mod download 2>/dev/null || true
COPY . .
RUN go build -trimpath -ldflags "-s -w" -o /out/telemetry-service .
RUN go build -trimpath -ldflags "-s -w" -o /out/migrate ./migration/migrate.go
FROM alpine:3.23
RUN apk add --no-cache ca-certificates tzdata
WORKDIR /app
COPY --from=build /out/telemetry-service /app/telemetry-service
COPY --from=build /out/migrate /app/migrate
COPY entrypoint.sh /app/entrypoint.sh
RUN chmod +x /app/entrypoint.sh /app/migrate
# Service config
ENV LISTEN_ADDR=":8080"
ENV MAX_BODY_BYTES="1024"
ENV RATE_LIMIT_RPM="60"
ENV RATE_BURST="20"
ENV UPSTREAM_TIMEOUT_MS="4000"
ENV ENABLE_REQUEST_LOGGING="false"
# Cache config (optional)
ENV ENABLE_CACHE="true"
ENV CACHE_TTL_SECONDS="300"
ENV ENABLE_REDIS="false"
# ENV REDIS_URL="redis://localhost:6379"
# Alert config (optional)
ENV ALERT_ENABLED="false"
# ENV SMTP_HOST=""
# ENV SMTP_PORT="587"
# ENV SMTP_USER=""
# ENV SMTP_PASSWORD=""
# ENV SMTP_FROM="telemetry@proxmoxved.local"
# ENV SMTP_TO=""
# ENV SMTP_USE_TLS="false"
ENV ALERT_FAILURE_THRESHOLD="20.0"
ENV ALERT_CHECK_INTERVAL_MIN="15"
ENV ALERT_COOLDOWN_MIN="60"
# Migration config (optional)
ENV RUN_MIGRATION="false"
ENV MIGRATION_REQUIRED="false"
ENV MIGRATION_SOURCE_URL="https://api.htl-braunau.at/dev/data"
EXPOSE 8080
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s \
CMD wget -q --spider http://localhost:8080/healthz || exit 1
ENTRYPOINT ["/app/entrypoint.sh"]

View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) 2026 Community Scripts
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,81 +0,0 @@
# Telemetry Service
A standalone Go microservice that collects anonymous telemetry data from [ProxmoxVE](https://github.com/community-scripts/ProxmoxVE) and [ProxmoxVED](https://github.com/community-scripts/ProxmoxVED) script installations.
## Overview
This service acts as a telemetry ingestion layer between the bash installation scripts and a PocketBase backend. When users run scripts from the ProxmoxVE/ProxmoxVED repositories, optional anonymous usage data is sent here for aggregation and analysis.
**What gets collected:**
- Script name and installation status (success/failed)
- Container/VM type and resource allocation (CPU, RAM, disk)
- OS type and version
- Proxmox VE version
- Anonymous session ID (randomly generated UUID)
**What is NOT collected:**
- IP addresses (not logged, not stored)
- Hostnames or domain names
- User credentials or personal information
- Hardware identifiers (MAC addresses, serial numbers)
- Network configuration or internal IPs
- Any data that could identify a person or system
**What this enables:**
- Understanding which scripts are most popular
- Identifying scripts with high failure rates
- Tracking resource allocation trends
- Improving script quality based on real-world data
## Features
- **Telemetry Ingestion** - Receives and validates telemetry data from bash scripts
- **PocketBase Integration** - Stores data in PocketBase collections
- **Rate Limiting** - Configurable per-IP rate limiting to prevent abuse
- **Caching** - In-memory or Redis-backed caching support
- **Email Alerts** - SMTP-based alerts when failure rates exceed thresholds
- **Dashboard** - Built-in HTML dashboard for telemetry visualization
- **Migration Tool** - Migrate data from external sources to PocketBase
## Architecture
```
┌─────────────────┐ ┌───────────────────┐ ┌────────────┐
│ Bash Scripts │────▶│ Telemetry Service │────▶│ PocketBase │
│ (ProxmoxVE/VED) │ │ (this repo) │ │ Database │
└─────────────────┘ └───────────────────┘ └────────────┘
```
## Project Structure
```
├── service.go # Main service, HTTP handlers, rate limiting
├── cache.go # In-memory and Redis caching
├── alerts.go # SMTP alert system
├── dashboard.go # Dashboard HTML generation
├── migration/
│ ├── migrate.go # Data migration tool
│ └── migrate.sh # Migration shell script
├── Dockerfile # Container build
├── entrypoint.sh # Container entrypoint with migration support
└── go.mod # Go module definition
```
## Related Projects
- [ProxmoxVE](https://github.com/community-scripts/ProxmoxVE) - Proxmox VE Helper Scripts
- [ProxmoxVED](https://github.com/community-scripts/ProxmoxVED) - Proxmox VE Helper Scripts (Dev)
## Privacy & Compliance
This service is designed with privacy in mind and is **GDPR/DSGVO compliant**:
-**No personal data** - Only anonymous technical metrics are collected
-**No IP logging** - Request logging is disabled by default, IPs are never stored
-**Transparent** - All collected fields are documented and the code is open source
-**No tracking** - Session IDs are randomly generated and cannot be linked to users
-**No third parties** - Data is only stored in our self-hosted PocketBase instance
## License
MIT License - see [LICENSE](LICENSE) file.

View File

@@ -1,853 +0,0 @@
package main
import (
"bytes"
"context"
"crypto/tls"
"fmt"
"log"
"net/smtp"
"strings"
"sync"
"time"
)
// AlertConfig holds SMTP alert configuration
type AlertConfig struct {
Enabled bool
SMTPHost string
SMTPPort int
SMTPUser string
SMTPPassword string
SMTPFrom string
SMTPTo []string
UseTLS bool
FailureThreshold float64 // Alert when failure rate exceeds this (e.g., 20.0 = 20%)
CheckInterval time.Duration // How often to check
Cooldown time.Duration // Minimum time between alerts
// Weekly Report settings
WeeklyReportEnabled bool // Enable weekly summary reports
WeeklyReportDay time.Weekday // Day to send report (0=Sunday, 1=Monday, etc.)
WeeklyReportHour int // Hour to send report (0-23)
}
// WeeklyReportData contains aggregated weekly statistics
type WeeklyReportData struct {
CalendarWeek int
Year int
StartDate time.Time
EndDate time.Time
TotalInstalls int
SuccessCount int
FailedCount int
SuccessRate float64
TopApps []AppStat
TopFailedApps []AppStat
ComparedToPrev WeekComparison
OsDistribution map[string]int
TypeDistribution map[string]int
}
// AppStat represents statistics for a single app
type AppStat struct {
Name string
Total int
Failed int
FailureRate float64
}
// WeekComparison shows changes compared to previous week
type WeekComparison struct {
InstallsChange int // Difference in total installs
InstallsPercent float64 // Percentage change
FailRateChange float64 // Change in failure rate (percentage points)
}
// Alerter handles alerting functionality
type Alerter struct {
cfg AlertConfig
lastAlertAt time.Time
lastWeeklyReport time.Time
mu sync.Mutex
pb *PBClient
lastStats alertStats
alertHistory []AlertEvent
}
type alertStats struct {
successCount int
failedCount int
checkedAt time.Time
}
// AlertEvent records an alert that was sent
type AlertEvent struct {
Timestamp time.Time `json:"timestamp"`
Type string `json:"type"`
Message string `json:"message"`
FailureRate float64 `json:"failure_rate,omitempty"`
}
// NewAlerter creates a new alerter instance
func NewAlerter(cfg AlertConfig, pb *PBClient) *Alerter {
return &Alerter{
cfg: cfg,
pb: pb,
alertHistory: make([]AlertEvent, 0),
}
}
// Start begins the alert monitoring loop
func (a *Alerter) Start() {
if !a.cfg.Enabled {
log.Println("INFO: alerting disabled")
return
}
if a.cfg.SMTPHost == "" || len(a.cfg.SMTPTo) == 0 {
log.Println("WARN: alerting enabled but SMTP not configured")
return
}
go a.monitorLoop()
log.Printf("INFO: alert monitoring started (threshold: %.1f%%, interval: %v)", a.cfg.FailureThreshold, a.cfg.CheckInterval)
// Start weekly report scheduler if enabled
if a.cfg.WeeklyReportEnabled {
go a.weeklyReportLoop()
log.Printf("INFO: weekly report scheduler started (day: %s, hour: %02d:00)", a.cfg.WeeklyReportDay, a.cfg.WeeklyReportHour)
}
}
func (a *Alerter) monitorLoop() {
ticker := time.NewTicker(a.cfg.CheckInterval)
defer ticker.Stop()
for range ticker.C {
a.checkAndAlert()
}
}
func (a *Alerter) checkAndAlert() {
ctx, cancel := newTimeoutContext(10 * time.Second)
defer cancel()
// Fetch last hour's data
data, err := a.pb.FetchDashboardData(ctx, 1, "ProxmoxVE")
if err != nil {
log.Printf("WARN: alert check failed: %v", err)
return
}
// Calculate current failure rate
total := data.SuccessCount + data.FailedCount
if total < 10 {
// Not enough data to determine rate
return
}
failureRate := float64(data.FailedCount) / float64(total) * 100
// Check if we should alert
if failureRate >= a.cfg.FailureThreshold {
a.maybeSendAlert(failureRate, data.FailedCount, total)
}
}
func (a *Alerter) maybeSendAlert(rate float64, failed, total int) {
a.mu.Lock()
defer a.mu.Unlock()
// Check cooldown
if time.Since(a.lastAlertAt) < a.cfg.Cooldown {
return
}
// Send alert
subject := fmt.Sprintf("[ProxmoxVED Alert] High Failure Rate: %.1f%%", rate)
body := fmt.Sprintf(`ProxmoxVE Helper Scripts - Telemetry Alert
⚠️ High installation failure rate detected!
Current Statistics (last 24h):
- Failure Rate: %.1f%%
- Failed Installations: %d
- Total Installations: %d
- Threshold: %.1f%%
Time: %s
Please check the dashboard for more details.
---
This is an automated alert from the telemetry service.
`, rate, failed, total, a.cfg.FailureThreshold, time.Now().Format(time.RFC1123))
if err := a.sendEmail(subject, body); err != nil {
log.Printf("ERROR: failed to send alert email: %v", err)
return
}
a.lastAlertAt = time.Now()
a.alertHistory = append(a.alertHistory, AlertEvent{
Timestamp: time.Now(),
Type: "high_failure_rate",
Message: fmt.Sprintf("Failure rate %.1f%% exceeded threshold %.1f%%", rate, a.cfg.FailureThreshold),
FailureRate: rate,
})
// Keep only last 100 alerts
if len(a.alertHistory) > 100 {
a.alertHistory = a.alertHistory[len(a.alertHistory)-100:]
}
log.Printf("ALERT: sent high failure rate alert (%.1f%%)", rate)
}
func (a *Alerter) sendEmail(subject, body string) error {
return a.sendEmailWithType(subject, body, "text/plain")
}
func (a *Alerter) sendHTMLEmail(subject, body string) error {
return a.sendEmailWithType(subject, body, "text/html")
}
func (a *Alerter) sendEmailWithType(subject, body, contentType string) error {
// Build message
var msg bytes.Buffer
msg.WriteString(fmt.Sprintf("From: %s\r\n", a.cfg.SMTPFrom))
msg.WriteString(fmt.Sprintf("To: %s\r\n", strings.Join(a.cfg.SMTPTo, ", ")))
msg.WriteString(fmt.Sprintf("Subject: %s\r\n", subject))
msg.WriteString("MIME-Version: 1.0\r\n")
msg.WriteString(fmt.Sprintf("Content-Type: %s; charset=UTF-8\r\n", contentType))
msg.WriteString("\r\n")
msg.WriteString(body)
addr := fmt.Sprintf("%s:%d", a.cfg.SMTPHost, a.cfg.SMTPPort)
var auth smtp.Auth
if a.cfg.SMTPUser != "" && a.cfg.SMTPPassword != "" {
auth = smtp.PlainAuth("", a.cfg.SMTPUser, a.cfg.SMTPPassword, a.cfg.SMTPHost)
}
if a.cfg.UseTLS {
// TLS connection
tlsConfig := &tls.Config{
ServerName: a.cfg.SMTPHost,
}
conn, err := tls.Dial("tcp", addr, tlsConfig)
if err != nil {
return fmt.Errorf("TLS dial failed: %w", err)
}
defer conn.Close()
client, err := smtp.NewClient(conn, a.cfg.SMTPHost)
if err != nil {
return fmt.Errorf("SMTP client failed: %w", err)
}
defer client.Close()
if auth != nil {
if err := client.Auth(auth); err != nil {
return fmt.Errorf("SMTP auth failed: %w", err)
}
}
if err := client.Mail(a.cfg.SMTPFrom); err != nil {
return fmt.Errorf("SMTP MAIL failed: %w", err)
}
for _, to := range a.cfg.SMTPTo {
if err := client.Rcpt(to); err != nil {
return fmt.Errorf("SMTP RCPT failed: %w", err)
}
}
w, err := client.Data()
if err != nil {
return fmt.Errorf("SMTP DATA failed: %w", err)
}
_, err = w.Write(msg.Bytes())
if err != nil {
return fmt.Errorf("SMTP write failed: %w", err)
}
return w.Close()
}
// Non-TLS (STARTTLS)
return smtp.SendMail(addr, auth, a.cfg.SMTPFrom, a.cfg.SMTPTo, msg.Bytes())
}
// GetAlertHistory returns recent alert events
func (a *Alerter) GetAlertHistory() []AlertEvent {
a.mu.Lock()
defer a.mu.Unlock()
result := make([]AlertEvent, len(a.alertHistory))
copy(result, a.alertHistory)
return result
}
// TestAlert sends a test alert email
func (a *Alerter) TestAlert() error {
if !a.cfg.Enabled || a.cfg.SMTPHost == "" {
return fmt.Errorf("alerting not configured")
}
subject := "[ProxmoxVED] Test Alert"
body := fmt.Sprintf(`This is a test alert from ProxmoxVE Helper Scripts telemetry service.
If you received this email, your alert configuration is working correctly.
Time: %s
SMTP Host: %s
Recipients: %s
---
This is an automated test message.
`, time.Now().Format(time.RFC1123), a.cfg.SMTPHost, strings.Join(a.cfg.SMTPTo, ", "))
return a.sendEmail(subject, body)
}
// Helper for timeout context
func newTimeoutContext(d time.Duration) (context.Context, context.CancelFunc) {
return context.WithTimeout(context.Background(), d)
}
// weeklyReportLoop checks periodically if it's time to send the weekly report
func (a *Alerter) weeklyReportLoop() {
// Check every hour
ticker := time.NewTicker(1 * time.Hour)
defer ticker.Stop()
for range ticker.C {
a.checkAndSendWeeklyReport()
}
}
// checkAndSendWeeklyReport sends the weekly report if it's the right time
func (a *Alerter) checkAndSendWeeklyReport() {
now := time.Now()
// Check if it's the right day and hour
if now.Weekday() != a.cfg.WeeklyReportDay || now.Hour() != a.cfg.WeeklyReportHour {
return
}
a.mu.Lock()
// Check if we already sent a report this week
_, lastWeek := a.lastWeeklyReport.ISOWeek()
_, currentWeek := now.ISOWeek()
if a.lastWeeklyReport.Year() == now.Year() && lastWeek == currentWeek {
a.mu.Unlock()
return
}
a.mu.Unlock()
// Send the weekly report
if err := a.SendWeeklyReport(); err != nil {
log.Printf("ERROR: failed to send weekly report: %v", err)
}
}
// SendWeeklyReport generates and sends the weekly summary email
func (a *Alerter) SendWeeklyReport() error {
if !a.cfg.Enabled || a.cfg.SMTPHost == "" {
return fmt.Errorf("alerting not configured")
}
ctx, cancel := newTimeoutContext(30 * time.Second)
defer cancel()
// Get data for the past week
reportData, err := a.fetchWeeklyReportData(ctx)
if err != nil {
return fmt.Errorf("failed to fetch weekly data: %w", err)
}
// Generate email content
subject := fmt.Sprintf("[ProxmoxVED] Weekly Report - Week %d, %d", reportData.CalendarWeek, reportData.Year)
body := a.generateWeeklyReportHTML(reportData)
if err := a.sendHTMLEmail(subject, body); err != nil {
return fmt.Errorf("failed to send email: %w", err)
}
a.mu.Lock()
a.lastWeeklyReport = time.Now()
a.alertHistory = append(a.alertHistory, AlertEvent{
Timestamp: time.Now(),
Type: "weekly_report",
Message: fmt.Sprintf("Weekly report KW %d/%d sent", reportData.CalendarWeek, reportData.Year),
})
a.mu.Unlock()
log.Printf("INFO: weekly report KW %d/%d sent successfully", reportData.CalendarWeek, reportData.Year)
return nil
}
// fetchWeeklyReportData collects data for the weekly report
func (a *Alerter) fetchWeeklyReportData(ctx context.Context) (*WeeklyReportData, error) {
// Calculate the previous week's date range (Mon-Sun)
now := time.Now()
// Find last Monday
daysToLastMonday := int(now.Weekday() - time.Monday)
if daysToLastMonday < 0 {
daysToLastMonday += 7
}
// Go back to the Monday of LAST week
lastMonday := now.AddDate(0, 0, -daysToLastMonday-7)
lastMonday = time.Date(lastMonday.Year(), lastMonday.Month(), lastMonday.Day(), 0, 0, 0, 0, lastMonday.Location())
lastSunday := lastMonday.AddDate(0, 0, 6)
lastSunday = time.Date(lastSunday.Year(), lastSunday.Month(), lastSunday.Day(), 23, 59, 59, 0, lastSunday.Location())
// Get calendar week
year, week := lastMonday.ISOWeek()
// Fetch current week's data (7 days)
currentData, err := a.pb.FetchDashboardData(ctx, 7, "ProxmoxVE")
if err != nil {
return nil, fmt.Errorf("failed to fetch current week data: %w", err)
}
// Fetch previous week's data for comparison (14 days, we'll compare)
prevData, err := a.pb.FetchDashboardData(ctx, 14, "ProxmoxVE")
if err != nil {
// Non-fatal, just log
log.Printf("WARN: could not fetch previous week data: %v", err)
prevData = nil
}
// Build report data
report := &WeeklyReportData{
CalendarWeek: week,
Year: year,
StartDate: lastMonday,
EndDate: lastSunday,
TotalInstalls: currentData.TotalInstalls,
SuccessCount: currentData.SuccessCount,
FailedCount: currentData.FailedCount,
OsDistribution: make(map[string]int),
TypeDistribution: make(map[string]int),
}
// Calculate success rate
if report.TotalInstalls > 0 {
report.SuccessRate = float64(report.SuccessCount) / float64(report.TotalInstalls) * 100
}
// Top 5 installed apps
for i, app := range currentData.TopApps {
if i >= 5 {
break
}
report.TopApps = append(report.TopApps, AppStat{
Name: app.App,
Total: app.Count,
})
}
// Top 5 failed apps
for i, app := range currentData.FailedApps {
if i >= 5 {
break
}
report.TopFailedApps = append(report.TopFailedApps, AppStat{
Name: app.App,
Total: app.TotalCount,
Failed: app.FailedCount,
FailureRate: app.FailureRate,
})
}
// OS distribution
for _, os := range currentData.OsDistribution {
report.OsDistribution[os.Os] = os.Count
}
// Type distribution (LXC vs VM)
for _, t := range currentData.TypeStats {
report.TypeDistribution[t.Type] = t.Count
}
// Calculate comparison to previous week
if prevData != nil {
// Previous week stats (subtract current from 14-day total)
prevInstalls := prevData.TotalInstalls - currentData.TotalInstalls
prevFailed := prevData.FailedCount - currentData.FailedCount
prevSuccess := prevData.SuccessCount - currentData.SuccessCount
if prevInstalls > 0 {
prevFailRate := float64(prevFailed) / float64(prevInstalls) * 100
currentFailRate := 100 - report.SuccessRate
report.ComparedToPrev.InstallsChange = report.TotalInstalls - prevInstalls
if prevInstalls > 0 {
report.ComparedToPrev.InstallsPercent = float64(report.TotalInstalls-prevInstalls) / float64(prevInstalls) * 100
}
report.ComparedToPrev.FailRateChange = currentFailRate - prevFailRate
_ = prevSuccess // suppress unused warning
}
}
return report, nil
}
// generateWeeklyReportHTML creates the HTML email body for the weekly report
func (a *Alerter) generateWeeklyReportHTML(data *WeeklyReportData) string {
var b strings.Builder
// HTML Email Template
b.WriteString(`<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
</head>
<body style="margin:0;padding:0;background-color:#f6f9fc;font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Helvetica,Arial,sans-serif;">
<table width="100%" cellpadding="0" cellspacing="0" style="background-color:#f6f9fc;padding:40px 20px;">
<tr><td align="center">
<table width="600" cellpadding="0" cellspacing="0" style="background-color:#ffffff;border-radius:12px;box-shadow:0 4px 6px rgba(0,0,0,0.07);">
<!-- Header -->
<tr>
<td style="background:linear-gradient(135deg,#667eea 0%,#764ba2 100%);padding:32px 40px;border-radius:12px 12px 0 0;">
<h1 style="margin:0;color:#ffffff;font-size:24px;font-weight:600;">📊 Weekly Telemetry Report</h1>
<p style="margin:8px 0 0;color:rgba(255,255,255,0.85);font-size:14px;">ProxmoxVE Helper Scripts</p>
</td>
</tr>
<!-- Week Info -->
<tr>
<td style="padding:24px 40px 0;">
<table width="100%" style="background:#f8fafc;border-radius:8px;padding:16px;">
<tr>
<td style="padding:12px 16px;">
<span style="color:#64748b;font-size:12px;text-transform:uppercase;letter-spacing:0.5px;">Calendar Week</span><br>
<span style="color:#1e293b;font-size:20px;font-weight:600;">Week `)
b.WriteString(fmt.Sprintf("%d, %d", data.CalendarWeek, data.Year))
b.WriteString(`</span>
</td>
<td style="padding:12px 16px;text-align:right;">
<span style="color:#64748b;font-size:12px;text-transform:uppercase;letter-spacing:0.5px;">Period</span><br>
<span style="color:#1e293b;font-size:14px;">`)
b.WriteString(fmt.Sprintf("%s %s", data.StartDate.Format("Jan 02"), data.EndDate.Format("Jan 02, 2006")))
b.WriteString(`</span>
</td>
</tr>
</table>
</td>
</tr>
<!-- Stats Grid -->
<tr>
<td style="padding:24px 40px;">
<table width="100%" cellpadding="0" cellspacing="0">
<tr>
<td width="25%" style="padding:8px;">
<div style="background:#f0fdf4;border-radius:8px;padding:16px;text-align:center;">
<div style="color:#16a34a;font-size:28px;font-weight:700;">`)
b.WriteString(fmt.Sprintf("%d", data.TotalInstalls))
b.WriteString(`</div>
<div style="color:#166534;font-size:11px;text-transform:uppercase;letter-spacing:0.5px;margin-top:4px;">Total</div>
</div>
</td>
<td width="25%" style="padding:8px;">
<div style="background:#f0fdf4;border-radius:8px;padding:16px;text-align:center;">
<div style="color:#16a34a;font-size:28px;font-weight:700;">`)
b.WriteString(fmt.Sprintf("%d", data.SuccessCount))
b.WriteString(`</div>
<div style="color:#166534;font-size:11px;text-transform:uppercase;letter-spacing:0.5px;margin-top:4px;">Successful</div>
</div>
</td>
<td width="25%" style="padding:8px;">
<div style="background:#fef2f2;border-radius:8px;padding:16px;text-align:center;">
<div style="color:#dc2626;font-size:28px;font-weight:700;">`)
b.WriteString(fmt.Sprintf("%d", data.FailedCount))
b.WriteString(`</div>
<div style="color:#991b1b;font-size:11px;text-transform:uppercase;letter-spacing:0.5px;margin-top:4px;">Failed</div>
</div>
</td>
<td width="25%" style="padding:8px;">
<div style="background:#eff6ff;border-radius:8px;padding:16px;text-align:center;">
<div style="color:#2563eb;font-size:28px;font-weight:700;">`)
b.WriteString(fmt.Sprintf("%.1f%%", data.SuccessRate))
b.WriteString(`</div>
<div style="color:#1e40af;font-size:11px;text-transform:uppercase;letter-spacing:0.5px;margin-top:4px;">Success Rate</div>
</div>
</td>
</tr>
</table>
</td>
</tr>
`)
// Week comparison
if data.ComparedToPrev.InstallsChange != 0 || data.ComparedToPrev.FailRateChange != 0 {
installIcon := "📈"
installColor := "#16a34a"
if data.ComparedToPrev.InstallsChange < 0 {
installIcon = "📉"
installColor = "#dc2626"
}
failIcon := "✅"
failColor := "#16a34a"
if data.ComparedToPrev.FailRateChange > 0 {
failIcon = "⚠️"
failColor = "#dc2626"
}
b.WriteString(`<tr>
<td style="padding:0 40px 24px;">
<table width="100%" style="background:#fafafa;border-radius:8px;">
<tr>
<td style="padding:16px;border-right:1px solid #e5e7eb;">
<span style="font-size:12px;color:#64748b;">vs. Previous Week</span><br>
<span style="font-size:16px;color:`)
b.WriteString(installColor)
b.WriteString(`;">`)
b.WriteString(installIcon)
b.WriteString(fmt.Sprintf(" %+d installations (%.1f%%)", data.ComparedToPrev.InstallsChange, data.ComparedToPrev.InstallsPercent))
b.WriteString(`</span>
</td>
<td style="padding:16px;">
<span style="font-size:12px;color:#64748b;">Failure Rate Change</span><br>
<span style="font-size:16px;color:`)
b.WriteString(failColor)
b.WriteString(`;">`)
b.WriteString(failIcon)
b.WriteString(fmt.Sprintf(" %+.1f percentage points", data.ComparedToPrev.FailRateChange))
b.WriteString(`</span>
</td>
</tr>
</table>
</td>
</tr>
`)
}
// Top 5 Installed Scripts
b.WriteString(`<tr>
<td style="padding:0 40px 24px;">
<h2 style="margin:0 0 16px;font-size:16px;color:#1e293b;border-bottom:2px solid #e2e8f0;padding-bottom:8px;">🏆 Top 5 Installed Scripts</h2>
<table width="100%" cellpadding="0" cellspacing="0" style="font-size:14px;">
`)
if len(data.TopApps) > 0 {
for i, app := range data.TopApps {
bgColor := "#ffffff"
if i%2 == 0 {
bgColor = "#f8fafc"
}
b.WriteString(fmt.Sprintf(`<tr style="background:%s;">
<td style="padding:12px 16px;border-radius:4px 0 0 4px;">
<span style="background:#e0e7ff;color:#4338ca;padding:2px 8px;border-radius:4px;font-size:12px;font-weight:600;">%d</span>
<span style="margin-left:12px;font-weight:500;color:#1e293b;">%s</span>
</td>
<td style="padding:12px 16px;text-align:right;border-radius:0 4px 4px 0;color:#64748b;">%d installs</td>
</tr>`, bgColor, i+1, app.Name, app.Total))
}
} else {
b.WriteString(`<tr><td style="padding:12px 16px;color:#64748b;">No data available</td></tr>`)
}
b.WriteString(`</table>
</td>
</tr>
`)
// Top 5 Failed Scripts
b.WriteString(`<tr>
<td style="padding:0 40px 24px;">
<h2 style="margin:0 0 16px;font-size:16px;color:#1e293b;border-bottom:2px solid #e2e8f0;padding-bottom:8px;">⚠️ Top 5 Scripts with Highest Failure Rates</h2>
<table width="100%" cellpadding="0" cellspacing="0" style="font-size:14px;">
`)
if len(data.TopFailedApps) > 0 {
for i, app := range data.TopFailedApps {
bgColor := "#ffffff"
if i%2 == 0 {
bgColor = "#fef2f2"
}
rateColor := "#dc2626"
if app.FailureRate < 20 {
rateColor = "#ea580c"
}
if app.FailureRate < 10 {
rateColor = "#ca8a04"
}
b.WriteString(fmt.Sprintf(`<tr style="background:%s;">
<td style="padding:12px 16px;border-radius:4px 0 0 4px;">
<span style="font-weight:500;color:#1e293b;">%s</span>
</td>
<td style="padding:12px 16px;text-align:center;color:#64748b;">%d / %d failed</td>
<td style="padding:12px 16px;text-align:right;border-radius:0 4px 4px 0;">
<span style="background:%s;color:#ffffff;padding:4px 10px;border-radius:12px;font-size:12px;font-weight:600;">%.1f%%</span>
</td>
</tr>`, bgColor, app.Name, app.Failed, app.Total, rateColor, app.FailureRate))
}
} else {
b.WriteString(`<tr><td style="padding:12px 16px;color:#16a34a;">🎉 No failures this week!</td></tr>`)
}
b.WriteString(`</table>
</td>
</tr>
`)
// Type Distribution
if len(data.TypeDistribution) > 0 {
b.WriteString(`<tr>
<td style="padding:0 40px 24px;">
<h2 style="margin:0 0 16px;font-size:16px;color:#1e293b;border-bottom:2px solid #e2e8f0;padding-bottom:8px;">📦 Distribution by Type</h2>
<table width="100%" cellpadding="0" cellspacing="0">
<tr>
`)
for t, count := range data.TypeDistribution {
percent := float64(count) / float64(data.TotalInstalls) * 100
b.WriteString(fmt.Sprintf(`<td style="padding:8px;">
<div style="background:#f1f5f9;border-radius:8px;padding:16px;text-align:center;">
<div style="font-size:24px;font-weight:700;color:#475569;">%d</div>
<div style="font-size:12px;color:#64748b;margin-top:4px;">%s (%.1f%%)</div>
</div>
</td>`, count, strings.ToUpper(t), percent))
}
b.WriteString(`</tr>
</table>
</td>
</tr>
`)
}
// OS Distribution
if len(data.OsDistribution) > 0 {
b.WriteString(`<tr>
<td style="padding:0 40px 24px;">
<h2 style="margin:0 0 16px;font-size:16px;color:#1e293b;border-bottom:2px solid #e2e8f0;padding-bottom:8px;">🐧 Top Operating Systems</h2>
<table width="100%" cellpadding="0" cellspacing="0" style="font-size:14px;">
`)
// Sort OS by count
type osEntry struct {
name string
count int
}
var osList []osEntry
for name, count := range data.OsDistribution {
osList = append(osList, osEntry{name, count})
}
for i := 0; i < len(osList); i++ {
for j := i + 1; j < len(osList); j++ {
if osList[j].count > osList[i].count {
osList[i], osList[j] = osList[j], osList[i]
}
}
}
for i, os := range osList {
if i >= 5 {
break
}
percent := float64(os.count) / float64(data.TotalInstalls) * 100
barWidth := int(percent * 2) // Scale for visual
if barWidth > 100 {
barWidth = 100
}
b.WriteString(fmt.Sprintf(`<tr>
<td style="padding:8px 16px;width:100px;">%s</td>
<td style="padding:8px 16px;">
<div style="background:#e2e8f0;border-radius:4px;height:20px;width:100%%;">
<div style="background:linear-gradient(90deg,#667eea,#764ba2);border-radius:4px;height:20px;width:%d%%;"></div>
</div>
</td>
<td style="padding:8px 16px;text-align:right;width:80px;color:#64748b;">%d (%.1f%%)</td>
</tr>`, os.name, barWidth, os.count, percent))
}
b.WriteString(`</table>
</td>
</tr>
`)
}
// Footer
b.WriteString(`<tr>
<td style="padding:24px 40px;background:#f8fafc;border-radius:0 0 12px 12px;border-top:1px solid #e2e8f0;">
<p style="margin:0;font-size:12px;color:#64748b;text-align:center;">
Generated `)
b.WriteString(time.Now().Format("Jan 02, 2006 at 15:04 MST"))
b.WriteString(`<br>
<a href="https://github.com/community-scripts/ProxmoxVE" style="color:#667eea;text-decoration:none;">ProxmoxVE Helper Scripts</a> —
This is an automated report from the telemetry service.
</p>
</td>
</tr>
</table>
</td></tr>
</table>
</body>
</html>`)
return b.String()
}
// generateWeeklyReportEmail creates the plain text email body (kept for compatibility)
func (a *Alerter) generateWeeklyReportEmail(data *WeeklyReportData) string {
var b strings.Builder
b.WriteString("ProxmoxVE Helper Scripts - Weekly Telemetry Report\n")
b.WriteString("==================================================\n\n")
b.WriteString(fmt.Sprintf("Calendar Week: %d, %d\n", data.CalendarWeek, data.Year))
b.WriteString(fmt.Sprintf("Period: %s - %s\n\n",
data.StartDate.Format("Jan 02, 2006"),
data.EndDate.Format("Jan 02, 2006")))
b.WriteString("OVERVIEW\n")
b.WriteString("--------\n")
b.WriteString(fmt.Sprintf("Total Installations: %d\n", data.TotalInstalls))
b.WriteString(fmt.Sprintf("Successful: %d\n", data.SuccessCount))
b.WriteString(fmt.Sprintf("Failed: %d\n", data.FailedCount))
b.WriteString(fmt.Sprintf("Success Rate: %.1f%%\n\n", data.SuccessRate))
if data.ComparedToPrev.InstallsChange != 0 || data.ComparedToPrev.FailRateChange != 0 {
b.WriteString("vs. Previous Week:\n")
b.WriteString(fmt.Sprintf(" Installations: %+d (%.1f%%)\n", data.ComparedToPrev.InstallsChange, data.ComparedToPrev.InstallsPercent))
b.WriteString(fmt.Sprintf(" Failure Rate: %+.1f pp\n\n", data.ComparedToPrev.FailRateChange))
}
b.WriteString("TOP 5 INSTALLED SCRIPTS\n")
b.WriteString("-----------------------\n")
for i, app := range data.TopApps {
if i >= 5 {
break
}
b.WriteString(fmt.Sprintf("%d. %-25s %5d installs\n", i+1, app.Name, app.Total))
}
b.WriteString("\n")
b.WriteString("TOP 5 FAILED SCRIPTS\n")
b.WriteString("--------------------\n")
if len(data.TopFailedApps) > 0 {
for i, app := range data.TopFailedApps {
if i >= 5 {
break
}
b.WriteString(fmt.Sprintf("%d. %-20s %3d/%3d failed (%.1f%%)\n",
i+1, app.Name, app.Failed, app.Total, app.FailureRate))
}
} else {
b.WriteString("No failures this week!\n")
}
b.WriteString("\n")
b.WriteString("---\n")
b.WriteString(fmt.Sprintf("Generated: %s\n", time.Now().Format("Jan 02, 2006 15:04 MST")))
b.WriteString("This is an automated report from the telemetry service.\n")
return b.String()
}
// TestWeeklyReport sends a test weekly report email
func (a *Alerter) TestWeeklyReport() error {
return a.SendWeeklyReport()
}

View File

@@ -1,158 +0,0 @@
package main
import (
"context"
"encoding/json"
"log"
"sync"
"time"
"github.com/redis/go-redis/v9"
)
// CacheConfig holds cache configuration
type CacheConfig struct {
RedisURL string
EnableRedis bool
DefaultTTL time.Duration
}
// Cache provides caching functionality with Redis or in-memory fallback
type Cache struct {
redis *redis.Client
useRedis bool
defaultTTL time.Duration
// In-memory fallback
mu sync.RWMutex
memData map[string]cacheEntry
}
type cacheEntry struct {
data []byte
expiresAt time.Time
}
// NewCache creates a new cache instance
func NewCache(cfg CacheConfig) *Cache {
c := &Cache{
defaultTTL: cfg.DefaultTTL,
memData: make(map[string]cacheEntry),
}
if cfg.EnableRedis && cfg.RedisURL != "" {
opts, err := redis.ParseURL(cfg.RedisURL)
if err != nil {
log.Printf("WARN: invalid redis URL, using in-memory cache: %v", err)
return c
}
client := redis.NewClient(opts)
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
if err := client.Ping(ctx).Err(); err != nil {
log.Printf("WARN: redis connection failed, using in-memory cache: %v", err)
return c
}
c.redis = client
c.useRedis = true
log.Printf("INFO: connected to Redis for caching")
}
// Start cleanup goroutine for in-memory cache
if !c.useRedis {
go c.cleanupLoop()
}
return c
}
func (c *Cache) cleanupLoop() {
ticker := time.NewTicker(5 * time.Minute)
defer ticker.Stop()
for range ticker.C {
c.mu.Lock()
now := time.Now()
for k, v := range c.memData {
if now.After(v.expiresAt) {
delete(c.memData, k)
}
}
c.mu.Unlock()
}
}
// Get retrieves a value from cache
func (c *Cache) Get(ctx context.Context, key string, dest interface{}) bool {
if c.useRedis {
data, err := c.redis.Get(ctx, key).Bytes()
if err != nil {
return false
}
return json.Unmarshal(data, dest) == nil
}
// In-memory fallback
c.mu.RLock()
entry, ok := c.memData[key]
c.mu.RUnlock()
if !ok || time.Now().After(entry.expiresAt) {
return false
}
return json.Unmarshal(entry.data, dest) == nil
}
// Set stores a value in cache
func (c *Cache) Set(ctx context.Context, key string, value interface{}, ttl time.Duration) error {
if ttl == 0 {
ttl = c.defaultTTL
}
data, err := json.Marshal(value)
if err != nil {
return err
}
if c.useRedis {
return c.redis.Set(ctx, key, data, ttl).Err()
}
// In-memory fallback
c.mu.Lock()
c.memData[key] = cacheEntry{
data: data,
expiresAt: time.Now().Add(ttl),
}
c.mu.Unlock()
return nil
}
// Delete removes a key from cache
func (c *Cache) Delete(ctx context.Context, key string) error {
if c.useRedis {
return c.redis.Del(ctx, key).Err()
}
c.mu.Lock()
delete(c.memData, key)
c.mu.Unlock()
return nil
}
// InvalidateDashboard clears dashboard cache
func (c *Cache) InvalidateDashboard(ctx context.Context) {
// Delete all dashboard cache keys
for days := 1; days <= 365; days++ {
_ = c.Delete(ctx, dashboardCacheKey(days))
}
}
func dashboardCacheKey(days int) string {
return "dashboard:" + string(rune(days))
}

View File

@@ -1,173 +0,0 @@
package main
import (
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"net/url"
"time"
)
// CleanupConfig holds configuration for the cleanup job
type CleanupConfig struct {
Enabled bool
CheckInterval time.Duration // How often to run cleanup
StuckAfterHours int // Consider "installing" as stuck after X hours
}
// Cleaner handles cleanup of stuck installations
type Cleaner struct {
cfg CleanupConfig
pb *PBClient
}
// NewCleaner creates a new cleaner instance
func NewCleaner(cfg CleanupConfig, pb *PBClient) *Cleaner {
return &Cleaner{
cfg: cfg,
pb: pb,
}
}
// Start begins the cleanup loop
func (c *Cleaner) Start() {
if !c.cfg.Enabled {
log.Println("INFO: cleanup job disabled")
return
}
go c.cleanupLoop()
log.Printf("INFO: cleanup job started (interval: %v, stuck after: %d hours)", c.cfg.CheckInterval, c.cfg.StuckAfterHours)
}
func (c *Cleaner) cleanupLoop() {
// Run immediately on start
c.runCleanup()
ticker := time.NewTicker(c.cfg.CheckInterval)
defer ticker.Stop()
for range ticker.C {
c.runCleanup()
}
}
// runCleanup finds and updates stuck installations
func (c *Cleaner) runCleanup() {
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
// Find stuck records
stuckRecords, err := c.findStuckInstallations(ctx)
if err != nil {
log.Printf("WARN: cleanup - failed to find stuck installations: %v", err)
return
}
if len(stuckRecords) == 0 {
log.Printf("INFO: cleanup - no stuck installations found")
return
}
log.Printf("INFO: cleanup - found %d stuck installations", len(stuckRecords))
// Update each record
updated := 0
for _, record := range stuckRecords {
if err := c.markAsUnknown(ctx, record.ID); err != nil {
log.Printf("WARN: cleanup - failed to update record %s: %v", record.ID, err)
continue
}
updated++
}
log.Printf("INFO: cleanup - updated %d stuck installations to 'unknown'", updated)
}
// StuckRecord represents a minimal record for cleanup
type StuckRecord struct {
ID string `json:"id"`
NSAPP string `json:"nsapp"`
Created string `json:"created"`
}
// findStuckInstallations finds records that are stuck in "installing" status
func (c *Cleaner) findStuckInstallations(ctx context.Context) ([]StuckRecord, error) {
if err := c.pb.ensureAuth(ctx); err != nil {
return nil, err
}
// Calculate cutoff time
cutoff := time.Now().Add(-time.Duration(c.cfg.StuckAfterHours) * time.Hour)
cutoffStr := cutoff.Format("2006-01-02 15:04:05")
// Build filter: status='installing' AND created < cutoff
filter := url.QueryEscape(fmt.Sprintf("status='installing' && created<'%s'", cutoffStr))
req, err := http.NewRequestWithContext(ctx, http.MethodGet,
fmt.Sprintf("%s/api/collections/%s/records?filter=%s&perPage=100",
c.pb.baseURL, c.pb.targetColl, filter),
nil,
)
if err != nil {
return nil, err
}
req.Header.Set("Authorization", "Bearer "+c.pb.token)
resp, err := c.pb.http.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var result struct {
Items []StuckRecord `json:"items"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return nil, err
}
return result.Items, nil
}
// markAsUnknown updates a record's status to "unknown"
func (c *Cleaner) markAsUnknown(ctx context.Context, recordID string) error {
update := TelemetryStatusUpdate{
Status: "unknown",
Error: "Installation timed out - no completion status received",
}
return c.pb.UpdateTelemetryStatus(ctx, recordID, update)
}
// RunNow triggers an immediate cleanup run (for testing/manual trigger)
func (c *Cleaner) RunNow() (int, error) {
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
stuckRecords, err := c.findStuckInstallations(ctx)
if err != nil {
return 0, fmt.Errorf("failed to find stuck installations: %w", err)
}
updated := 0
for _, record := range stuckRecords {
if err := c.markAsUnknown(ctx, record.ID); err != nil {
log.Printf("WARN: cleanup - failed to update record %s: %v", record.ID, err)
continue
}
updated++
}
return updated, nil
}
// GetStuckCount returns the current number of stuck installations
func (c *Cleaner) GetStuckCount(ctx context.Context) (int, error) {
records, err := c.findStuckInstallations(ctx)
if err != nil {
return 0, err
}
return len(records), nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,55 +0,0 @@
#!/bin/sh
set -e
echo "============================================="
echo " ProxmoxVED Telemetry Service"
echo "============================================="
# Map Coolify ENV names to migration script names
# Coolify uses PB_URL, PB_TARGET_COLLECTION
export POCKETBASE_URL="${POCKETBASE_URL:-$PB_URL}"
export POCKETBASE_COLLECTION="${POCKETBASE_COLLECTION:-$PB_TARGET_COLLECTION}"
# Run migration if enabled
if [ "$RUN_MIGRATION" = "true" ]; then
echo ""
echo "🔄 Migration mode enabled"
echo " Source: $MIGRATION_SOURCE_URL"
echo " Target: $POCKETBASE_URL"
echo " Collection: $POCKETBASE_COLLECTION"
echo ""
# Wait for PocketBase to be ready
echo "⏳ Waiting for PocketBase to be ready..."
RETRIES=30
until wget -q --spider "$POCKETBASE_URL/api/health" 2>/dev/null; do
RETRIES=$((RETRIES - 1))
if [ $RETRIES -le 0 ]; then
echo "❌ PocketBase not reachable after 30 attempts"
if [ "$MIGRATION_REQUIRED" = "true" ]; then
exit 1
fi
echo "⚠️ Continuing without migration..."
break
fi
echo " Waiting... ($RETRIES attempts left)"
sleep 2
done
if wget -q --spider "$POCKETBASE_URL/api/health" 2>/dev/null; then
echo "✅ PocketBase is ready"
echo ""
echo "🚀 Starting migration..."
/app/migrate || {
if [ "$MIGRATION_REQUIRED" = "true" ]; then
echo "❌ Migration failed!"
exit 1
fi
echo "⚠️ Migration failed, but continuing..."
}
echo ""
fi
fi
echo "🚀 Starting telemetry service..."
exec /app/telemetry-service

View File

@@ -1,10 +0,0 @@
module github.com/community-scripts/telemetry-service
go 1.25.5
require github.com/redis/go-redis/v9 v9.17.3
require (
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
)

View File

@@ -1,10 +0,0 @@
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/redis/go-redis/v9 v9.17.3 h1:fN29NdNrE17KttK5Ndf20buqfDZwGNgoUr9qjl1DQx4=
github.com/redis/go-redis/v9 v9.17.3/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370=

View File

@@ -1,366 +0,0 @@
// +build ignore
// Migration script to import data from the old API to PocketBase
// Run with: go run migrate.go
package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"time"
)
const (
defaultSourceAPI = "https://api.htl-braunau.at/dev/data"
defaultPBURL = "http://localhost:8090"
batchSize = 100
)
var (
sourceAPI string
summaryAPI string
authToken string // PocketBase auth token
)
// OldDataModel represents the data structure from the old API
type OldDataModel struct {
ID string `json:"id"`
CtType int `json:"ct_type"`
DiskSize int `json:"disk_size"`
CoreCount int `json:"core_count"`
RamSize int `json:"ram_size"`
OsType string `json:"os_type"`
OsVersion string `json:"os_version"`
DisableIP6 string `json:"disableip6"`
NsApp string `json:"nsapp"`
Method string `json:"method"`
CreatedAt string `json:"created_at"`
PveVersion string `json:"pve_version"`
Status string `json:"status"`
RandomID string `json:"random_id"`
Type string `json:"type"`
Error string `json:"error"`
}
// PBRecord represents the PocketBase record format
type PBRecord struct {
CtType int `json:"ct_type"`
DiskSize int `json:"disk_size"`
CoreCount int `json:"core_count"`
RamSize int `json:"ram_size"`
OsType string `json:"os_type"`
OsVersion string `json:"os_version"`
DisableIP6 string `json:"disableip6"`
NsApp string `json:"nsapp"`
Method string `json:"method"`
PveVersion string `json:"pve_version"`
Status string `json:"status"`
RandomID string `json:"random_id"`
Type string `json:"type"`
Error string `json:"error"`
// created_at will be set automatically by PocketBase
}
type Summary struct {
TotalEntries int `json:"total_entries"`
}
func main() {
// Setup source URLs
baseURL := os.Getenv("MIGRATION_SOURCE_URL")
if baseURL == "" {
baseURL = defaultSourceAPI
}
sourceAPI = baseURL + "/paginated"
summaryAPI = baseURL + "/summary"
// Support both POCKETBASE_URL and PB_URL (Coolify uses PB_URL)
pbURL := os.Getenv("POCKETBASE_URL")
if pbURL == "" {
pbURL = os.Getenv("PB_URL")
}
if pbURL == "" {
pbURL = defaultPBURL
}
// Support both POCKETBASE_COLLECTION and PB_TARGET_COLLECTION
pbCollection := os.Getenv("POCKETBASE_COLLECTION")
if pbCollection == "" {
pbCollection = os.Getenv("PB_TARGET_COLLECTION")
}
if pbCollection == "" {
pbCollection = "telemetry"
}
// Auth collection
authCollection := os.Getenv("PB_AUTH_COLLECTION")
if authCollection == "" {
authCollection = "telemetry_service_user"
}
// Credentials
pbIdentity := os.Getenv("PB_IDENTITY")
pbPassword := os.Getenv("PB_PASSWORD")
fmt.Println("===========================================")
fmt.Println(" Data Migration to PocketBase")
fmt.Println("===========================================")
fmt.Printf("Source API: %s\n", baseURL)
fmt.Printf("PocketBase URL: %s\n", pbURL)
fmt.Printf("Collection: %s\n", pbCollection)
fmt.Printf("Auth Collection: %s\n", authCollection)
fmt.Println("-------------------------------------------")
// Authenticate with PocketBase
if pbIdentity != "" && pbPassword != "" {
fmt.Println("🔐 Authenticating with PocketBase...")
err := authenticate(pbURL, authCollection, pbIdentity, pbPassword)
if err != nil {
fmt.Printf("❌ Authentication failed: %v\n", err)
os.Exit(1)
}
fmt.Println("✅ Authentication successful")
} else {
fmt.Println("⚠️ No credentials provided, trying without auth...")
}
fmt.Println("-------------------------------------------")
// Get total count
summary, err := getSummary()
if err != nil {
fmt.Printf("❌ Failed to get summary: %v\n", err)
os.Exit(1)
}
fmt.Printf("📊 Total entries to migrate: %d\n", summary.TotalEntries)
fmt.Println("-------------------------------------------")
// Calculate pages
totalPages := (summary.TotalEntries + batchSize - 1) / batchSize
var totalMigrated, totalFailed, totalSkipped int
for page := 1; page <= totalPages; page++ {
fmt.Printf("📦 Fetching page %d/%d (items %d-%d)...\n",
page, totalPages,
(page-1)*batchSize+1,
min(page*batchSize, summary.TotalEntries))
data, err := fetchPage(page, batchSize)
if err != nil {
fmt.Printf(" ❌ Failed to fetch page %d: %v\n", page, err)
totalFailed += batchSize
continue
}
for i, record := range data {
err := importRecord(pbURL, pbCollection, record)
if err != nil {
if isUniqueViolation(err) {
totalSkipped++
continue
}
fmt.Printf(" ❌ Failed to import record %d: %v\n", (page-1)*batchSize+i+1, err)
totalFailed++
continue
}
totalMigrated++
}
fmt.Printf(" ✅ Page %d complete (migrated: %d, skipped: %d, failed: %d)\n",
page, len(data), totalSkipped, totalFailed)
// Small delay to avoid overwhelming the server
time.Sleep(100 * time.Millisecond)
}
fmt.Println("===========================================")
fmt.Println(" Migration Complete")
fmt.Println("===========================================")
fmt.Printf("✅ Successfully migrated: %d\n", totalMigrated)
fmt.Printf("⏭️ Skipped (duplicates): %d\n", totalSkipped)
fmt.Printf("❌ Failed: %d\n", totalFailed)
fmt.Println("===========================================")
}
func getSummary() (*Summary, error) {
resp, err := http.Get(summaryAPI)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var summary Summary
if err := json.NewDecoder(resp.Body).Decode(&summary); err != nil {
return nil, err
}
return &summary, nil
}
func authenticate(pbURL, authCollection, identity, password string) error {
body := map[string]string{
"identity": identity,
"password": password,
}
jsonData, _ := json.Marshal(body)
url := fmt.Sprintf("%s/api/collections/%s/auth-with-password", pbURL, authCollection)
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
client := &http.Client{Timeout: 10 * time.Second}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body))
}
var result struct {
Token string `json:"token"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return err
}
if result.Token == "" {
return fmt.Errorf("no token in response")
}
authToken = result.Token
return nil
}
func fetchPage(page, limit int) ([]OldDataModel, error) {
url := fmt.Sprintf("%s?page=%d&limit=%d", sourceAPI, page, limit)
resp, err := http.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body))
}
var data []OldDataModel
if err := json.NewDecoder(resp.Body).Decode(&data); err != nil {
return nil, err
}
return data, nil
}
func importRecord(pbURL, collection string, old OldDataModel) error {
// Map status: "done" -> "success"
status := old.Status
switch status {
case "done":
status = "success"
case "installing", "failed", "unknown", "success":
// keep as-is
default:
status = "unknown"
}
// Ensure ct_type is not 0 (required field)
ctType := old.CtType
if ctType == 0 {
ctType = 1 // default to unprivileged
}
// Ensure type is set
recordType := old.Type
if recordType == "" {
recordType = "lxc"
}
record := PBRecord{
CtType: ctType,
DiskSize: old.DiskSize,
CoreCount: old.CoreCount,
RamSize: old.RamSize,
OsType: old.OsType,
OsVersion: old.OsVersion,
DisableIP6: old.DisableIP6,
NsApp: old.NsApp,
Method: old.Method,
PveVersion: old.PveVersion,
Status: status,
RandomID: old.RandomID,
Type: recordType,
Error: old.Error,
}
jsonData, err := json.Marshal(record)
if err != nil {
return err
}
url := fmt.Sprintf("%s/api/collections/%s/records", pbURL, collection)
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
if authToken != "" {
req.Header.Set("Authorization", "Bearer "+authToken)
}
client := &http.Client{Timeout: 10 * time.Second}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body))
}
return nil
}
func isUniqueViolation(err error) bool {
if err == nil {
return false
}
errStr := err.Error()
return contains(errStr, "UNIQUE constraint failed") ||
contains(errStr, "duplicate") ||
contains(errStr, "already exists") ||
contains(errStr, "validation_not_unique")
}
func contains(s, substr string) bool {
return len(s) >= len(substr) && (s == substr || len(s) > 0 && containsHelper(s, substr))
}
func containsHelper(s, substr string) bool {
for i := 0; i <= len(s)-len(substr); i++ {
if s[i:i+len(substr)] == substr {
return true
}
}
return false
}
func min(a, b int) int {
if a < b {
return a
}
return b
}

View File

@@ -1,67 +0,0 @@
#!/bin/bash
# Migration script to import data from the old API to PocketBase
# Usage: ./migrate.sh [POCKETBASE_URL] [COLLECTION_NAME]
#
# Examples:
# ./migrate.sh # Uses defaults
# ./migrate.sh http://localhost:8090 # Custom PB URL
# ./migrate.sh http://localhost:8090 my_telemetry # Custom URL and collection
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Default values
POCKETBASE_URL="${1:-http://localhost:8090}"
POCKETBASE_COLLECTION="${2:-telemetry}"
echo "============================================="
echo " ProxmoxVED Data Migration Tool"
echo "============================================="
echo ""
echo "This script will migrate telemetry data from:"
echo " Source: https://api.htl-braunau.at/dev/data"
echo " Target: $POCKETBASE_URL"
echo " Collection: $POCKETBASE_COLLECTION"
echo ""
# Check if PocketBase is reachable
echo "🔍 Checking PocketBase connection..."
if ! curl -sf "$POCKETBASE_URL/api/health" >/dev/null 2>&1; then
echo "❌ Cannot reach PocketBase at $POCKETBASE_URL"
echo " Make sure PocketBase is running and the URL is correct."
exit 1
fi
echo "✅ PocketBase is reachable"
echo ""
# Check source API
echo "🔍 Checking source API..."
SUMMARY=$(curl -sf "https://api.htl-braunau.at/dev/data/summary" 2>/dev/null || echo "")
if [ -z "$SUMMARY" ]; then
echo "❌ Cannot reach source API"
exit 1
fi
TOTAL=$(echo "$SUMMARY" | grep -o '"total_entries":[0-9]*' | cut -d: -f2)
echo "✅ Source API is reachable ($TOTAL entries available)"
echo ""
# Confirm migration
read -p "⚠️ Do you want to start the migration? [y/N] " -n 1 -r
echo ""
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
echo "Migration cancelled."
exit 0
fi
echo ""
echo "Starting migration..."
echo ""
# Run the Go migration script
cd "$SCRIPT_DIR"
POCKETBASE_URL="$POCKETBASE_URL" POCKETBASE_COLLECTION="$POCKETBASE_COLLECTION" go run migrate.go
echo ""
echo "Migration complete!"

View File

@@ -1,106 +0,0 @@
#!/bin/bash
# Post-migration script to fix timestamps in PocketBase
# Run this INSIDE the PocketBase container after migration completes
#
# Usage: ./fix-timestamps.sh
set -e
DB_PATH="/app/pb_data/data.db"
echo "==========================================================="
echo " Fix Timestamps in PocketBase"
echo "==========================================================="
echo ""
# Check if sqlite3 is available
if ! command -v sqlite3 &> /dev/null; then
echo "sqlite3 not found. Installing..."
apk add sqlite 2>/dev/null || apt-get update && apt-get install -y sqlite3
fi
# Check if database exists
if [ ! -f "$DB_PATH" ]; then
echo "Database not found at $DB_PATH"
echo "Trying alternative paths..."
if [ -f "/pb_data/data.db" ]; then
DB_PATH="/pb_data/data.db"
elif [ -f "/pb/pb_data/data.db" ]; then
DB_PATH="/pb/pb_data/data.db"
else
DB_PATH=$(find / -name "data.db" 2>/dev/null | head -1)
fi
if [ -z "$DB_PATH" ] || [ ! -f "$DB_PATH" ]; then
echo "Could not find PocketBase database!"
exit 1
fi
fi
echo "Database: $DB_PATH"
echo ""
# List tables
echo "Tables in database:"
sqlite3 "$DB_PATH" ".tables"
echo ""
# Find the telemetry table (usually matches collection name)
echo "Looking for telemetry/installations table..."
TABLE_NAME=$(sqlite3 "$DB_PATH" ".tables" | tr ' ' '\n' | grep -E "telemetry|installations" | head -1)
if [ -z "$TABLE_NAME" ]; then
echo "Could not auto-detect table. Available tables:"
sqlite3 "$DB_PATH" ".tables"
echo ""
read -p "Enter table name: " TABLE_NAME
fi
echo "Using table: $TABLE_NAME"
echo ""
# Check if old_created column exists
HAS_OLD_CREATED=$(sqlite3 "$DB_PATH" "PRAGMA table_info($TABLE_NAME);" | grep -c "old_created" || echo "0")
if [ "$HAS_OLD_CREATED" -eq "0" ]; then
echo "Column 'old_created' not found in table $TABLE_NAME"
echo "Migration may not have been run with timestamp preservation."
exit 1
fi
# Show sample data before update
echo "Sample data BEFORE update:"
sqlite3 "$DB_PATH" "SELECT id, created, old_created FROM $TABLE_NAME WHERE old_created IS NOT NULL AND old_created != '' LIMIT 3;"
echo ""
# Count records to update
COUNT=$(sqlite3 "$DB_PATH" "SELECT COUNT(*) FROM $TABLE_NAME WHERE old_created IS NOT NULL AND old_created != '';")
echo "Records to update: $COUNT"
echo ""
read -p "Proceed with timestamp update? [y/N] " -n 1 -r
echo ""
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
echo "Aborted."
exit 0
fi
# Perform the update
echo "Updating timestamps..."
sqlite3 "$DB_PATH" "UPDATE $TABLE_NAME SET created = old_created, updated = old_created WHERE old_created IS NOT NULL AND old_created != '';"
# Show sample data after update
echo ""
echo "Sample data AFTER update:"
sqlite3 "$DB_PATH" "SELECT id, created, old_created FROM $TABLE_NAME LIMIT 3;"
echo ""
echo "==========================================================="
echo " Timestamp Update Complete!"
echo "==========================================================="
echo ""
echo "Next steps:"
echo "1. Verify data in PocketBase Admin UI"
echo "2. Remove the 'old_created' field from the collection schema"
echo ""

View File

@@ -1,77 +0,0 @@
#!/bin/sh
# Direct SQLite Import - Pure Shell, FAST batch mode!
# Imports MongoDB Extended JSON directly into PocketBase SQLite
#
# Usage:
# docker cp import-direct.sh pocketbase:/tmp/
# docker cp data.json pocketbase:/tmp/
# docker exec -it pocketbase sh -c "cd /tmp && chmod +x import-direct.sh && ./import-direct.sh"
set -e
JSON_FILE="${1:-/tmp/data.json}"
TABLE="${2:-telemetry}"
REPO="${3:-Proxmox VE}"
DB="${4:-/app/pb_data/data.db}"
BATCH=5000
echo "========================================================="
echo " Direct SQLite Import (Batch Mode)"
echo "========================================================="
echo "JSON: $JSON_FILE"
echo "Table: $TABLE"
echo "Repo: $REPO"
echo "Batch: $BATCH"
echo "---------------------------------------------------------"
# Install jq if missing
command -v jq >/dev/null || apk add --no-cache jq
# Optimize SQLite for bulk
sqlite3 "$DB" "PRAGMA journal_mode=WAL; PRAGMA synchronous=OFF; PRAGMA cache_size=100000;"
SQL_FILE="/tmp/batch.sql"
echo "[INFO] Converting JSON to SQL..."
START=$(date +%s)
# Convert entire JSON to SQL file (much faster than line-by-line sqlite3 calls)
{
echo "BEGIN TRANSACTION;"
jq -r '.[] | @json' "$JSON_FILE" | while read -r r; do
CT=$(echo "$r" | jq -r 'if .ct_type|type=="object" then .ct_type["$numberLong"] else .ct_type end // 0')
DISK=$(echo "$r" | jq -r 'if .disk_size|type=="object" then .disk_size["$numberLong"] else .disk_size end // 0')
CORE=$(echo "$r" | jq -r 'if .core_count|type=="object" then .core_count["$numberLong"] else .core_count end // 0')
RAM=$(echo "$r" | jq -r 'if .ram_size|type=="object" then .ram_size["$numberLong"] else .ram_size end // 0')
OS=$(echo "$r" | jq -r '.os_type // ""' | sed "s/'/''/g")
OSVER=$(echo "$r" | jq -r '.os_version // ""' | sed "s/'/''/g")
DIS6=$(echo "$r" | jq -r '.disable_ip6 // "no"' | sed "s/'/''/g")
APP=$(echo "$r" | jq -r '.nsapp // "unknown"' | sed "s/'/''/g")
METH=$(echo "$r" | jq -r '.method // ""' | sed "s/'/''/g")
PVE=$(echo "$r" | jq -r '.pveversion // ""' | sed "s/'/''/g")
STAT=$(echo "$r" | jq -r '.status // "unknown"')
[ "$STAT" = "done" ] && STAT="success"
RID=$(echo "$r" | jq -r '.random_id // ""' | sed "s/'/''/g")
TYPE=$(echo "$r" | jq -r '.type // "lxc"' | sed "s/'/''/g")
ERR=$(echo "$r" | jq -r '.error // ""' | sed "s/'/''/g")
DATE=$(echo "$r" | jq -r 'if .created_at|type=="object" then .created_at["$date"] else .created_at end // ""')
ID=$(head -c 100 /dev/urandom | tr -dc 'a-z0-9' | head -c 15)
REPO_ESC=$(echo "$REPO" | sed "s/'/''/g")
echo "INSERT OR IGNORE INTO $TABLE (id,created,updated,ct_type,disk_size,core_count,ram_size,os_type,os_version,disableip6,nsapp,method,pve_version,status,random_id,type,error,repo_source) VALUES ('$ID','$DATE','$DATE',$CT,$DISK,$CORE,$RAM,'$OS','$OSVER','$DIS6','$APP','$METH','$PVE','$STAT','$RID','$TYPE','$ERR','$REPO_ESC');"
done
echo "COMMIT;"
} > "$SQL_FILE"
MID=$(date +%s)
echo "[INFO] SQL generated in $((MID - START))s"
echo "[INFO] Importing into SQLite..."
sqlite3 "$DB" < "$SQL_FILE"
END=$(date +%s)
COUNT=$(wc -l < "$SQL_FILE")
rm -f "$SQL_FILE"
echo "========================================================="
echo "Done! ~$((COUNT - 2)) records in $((END - START)) seconds"
echo "========================================================="

View File

@@ -1,89 +0,0 @@
#!/bin/bash
# Migration script for Proxmox VE data
# Run directly on the server machine
#
# Usage: ./migrate-linux.sh
#
# Prerequisites:
# - Go installed (apt install golang-go)
# - Network access to source API and PocketBase
set -e
echo "==========================================================="
echo " Proxmox VE Data Migration to PocketBase"
echo "==========================================================="
# Configuration - EDIT THESE VALUES
export MIGRATION_SOURCE_URL="https://api.htl-braunau.at/data"
export POCKETBASE_URL="http://db.community-scripts.org"
export POCKETBASE_COLLECTION="telemetry"
export PB_AUTH_COLLECTION="_superusers"
export PB_IDENTITY="db_admin@community-scripts.org"
export PB_PASSWORD="YOUR_PASSWORD_HERE" # <-- CHANGE THIS!
export REPO_SOURCE="Proxmox VE"
export DATE_UNTIL="2026-02-10"
export BATCH_SIZE="500"
# Optional: Resume from specific page
# export START_PAGE="100"
# Optional: Only import records after this date
# export DATE_FROM="2020-01-01"
echo ""
echo "Configuration:"
echo " Source: $MIGRATION_SOURCE_URL"
echo " Target: $POCKETBASE_URL"
echo " Collection: $POCKETBASE_COLLECTION"
echo " Repo: $REPO_SOURCE"
echo " Until: $DATE_UNTIL"
echo " Batch: $BATCH_SIZE"
echo ""
# Check if Go is installed
if ! command -v go &> /dev/null; then
echo "Go is not installed. Installing..."
apt-get update && apt-get install -y golang-go
fi
# Download migrate.go if not present
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
MIGRATE_GO="$SCRIPT_DIR/migrate.go"
if [ ! -f "$MIGRATE_GO" ]; then
echo "migrate.go not found in $SCRIPT_DIR"
echo "Please copy migrate.go to this directory first."
exit 1
fi
echo "Building migration tool..."
cd "$SCRIPT_DIR"
go build -o migrate migrate.go
echo ""
echo "Starting migration..."
echo "Press Ctrl+C to stop (you can resume later with START_PAGE)"
echo ""
./migrate
echo ""
echo "==========================================================="
echo " Post-Migration Steps"
echo "==========================================================="
echo ""
echo "1. Connect to PocketBase container:"
echo " docker exec -it <pocketbase-container> sh"
echo ""
echo "2. Find the table name:"
echo " sqlite3 /app/pb_data/data.db '.tables'"
echo ""
echo "3. Update timestamps (replace <table> with actual name):"
echo " sqlite3 /app/pb_data/data.db \"UPDATE <table> SET created = old_created, updated = old_created WHERE old_created IS NOT NULL AND old_created != ''\""
echo ""
echo "4. Verify timestamps:"
echo " sqlite3 /app/pb_data/data.db \"SELECT created, old_created FROM <table> LIMIT 5\""
echo ""
echo "5. Remove old_created field in PocketBase Admin UI"
echo ""

File diff suppressed because it is too large Load Diff

View File

@@ -1,67 +0,0 @@
#!/bin/bash
# Migration script to import data from the old API to PocketBase
# Usage: ./migrate.sh [POCKETBASE_URL] [COLLECTION_NAME]
#
# Examples:
# ./migrate.sh # Uses defaults
# ./migrate.sh http://localhost:8090 # Custom PB URL
# ./migrate.sh http://localhost:8090 my_telemetry # Custom URL and collection
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Default values
POCKETBASE_URL="${1:-http://localhost:8090}"
POCKETBASE_COLLECTION="${2:-telemetry}"
echo "============================================="
echo " ProxmoxVED Data Migration Tool"
echo "============================================="
echo ""
echo "This script will migrate telemetry data from:"
echo " Source: https://api.htl-braunau.at/dev/data"
echo " Target: $POCKETBASE_URL"
echo " Collection: $POCKETBASE_COLLECTION"
echo ""
# Check if PocketBase is reachable
echo "🔍 Checking PocketBase connection..."
if ! curl -sf "$POCKETBASE_URL/api/health" >/dev/null 2>&1; then
echo "❌ Cannot reach PocketBase at $POCKETBASE_URL"
echo " Make sure PocketBase is running and the URL is correct."
exit 1
fi
echo "✅ PocketBase is reachable"
echo ""
# Check source API
echo "🔍 Checking source API..."
SUMMARY=$(curl -sf "https://api.htl-braunau.at/dev/data/summary" 2>/dev/null || echo "")
if [ -z "$SUMMARY" ]; then
echo "❌ Cannot reach source API"
exit 1
fi
TOTAL=$(echo "$SUMMARY" | grep -o '"total_entries":[0-9]*' | cut -d: -f2)
echo "✅ Source API is reachable ($TOTAL entries available)"
echo ""
# Confirm migration
read -p "⚠️ Do you want to start the migration? [y/N] " -n 1 -r
echo ""
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
echo "Migration cancelled."
exit 0
fi
echo ""
echo "Starting migration..."
echo ""
# Run the Go migration script
cd "$SCRIPT_DIR"
POCKETBASE_URL="$POCKETBASE_URL" POCKETBASE_COLLECTION="$POCKETBASE_COLLECTION" go run migrate.go
echo ""
echo "Migration complete!"

View File

@@ -1,492 +0,0 @@
// +build ignore
// Migration script to import data from the old API to PocketBase
// Run with: go run migrate.go
package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"time"
)
const (
defaultSourceAPI = "https://api.htl-braunau.at/dev/data"
defaultPBURL = "http://localhost:8090"
batchSize = 100
)
var (
sourceAPI string
summaryAPI string
authToken string // PocketBase auth token
)
// OldDataModel represents the data structure from the old API
type OldDataModel struct {
ID string `json:"id"`
CtType int `json:"ct_type"`
DiskSize int `json:"disk_size"`
CoreCount int `json:"core_count"`
RamSize int `json:"ram_size"`
OsType string `json:"os_type"`
OsVersion string `json:"os_version"`
DisableIP6 string `json:"disableip6"`
NsApp string `json:"nsapp"`
Method string `json:"method"`
CreatedAt string `json:"created_at"`
PveVersion string `json:"pve_version"`
Status string `json:"status"`
RandomID string `json:"random_id"`
Type string `json:"type"`
Error string `json:"error"`
}
// PBRecord represents the PocketBase record format
type PBRecord struct {
CtType int `json:"ct_type"`
DiskSize int `json:"disk_size"`
CoreCount int `json:"core_count"`
RamSize int `json:"ram_size"`
OsType string `json:"os_type"`
OsVersion string `json:"os_version"`
DisableIP6 string `json:"disableip6"`
NsApp string `json:"nsapp"`
Method string `json:"method"`
PveVersion string `json:"pve_version"`
Status string `json:"status"`
RandomID string `json:"random_id"`
Type string `json:"type"`
Error string `json:"error"`
// Temporary field for timestamp migration (PocketBase doesn't allow setting created/updated via API)
// After migration, run SQL: UPDATE installations SET created = old_created, updated = old_created
OldCreated string `json:"old_created,omitempty"`
}
type Summary struct {
TotalEntries int `json:"total_entries"`
}
func main() {
// Setup source URLs
baseURL := os.Getenv("MIGRATION_SOURCE_URL")
if baseURL == "" {
baseURL = defaultSourceAPI
}
sourceAPI = baseURL + "/paginated"
summaryAPI = baseURL + "/summary"
// Support both POCKETBASE_URL and PB_URL (Coolify uses PB_URL)
pbURL := os.Getenv("POCKETBASE_URL")
if pbURL == "" {
pbURL = os.Getenv("PB_URL")
}
if pbURL == "" {
pbURL = defaultPBURL
}
// Support both POCKETBASE_COLLECTION and PB_TARGET_COLLECTION
pbCollection := os.Getenv("POCKETBASE_COLLECTION")
if pbCollection == "" {
pbCollection = os.Getenv("PB_TARGET_COLLECTION")
}
if pbCollection == "" {
pbCollection = "telemetry"
}
// Auth collection
authCollection := os.Getenv("PB_AUTH_COLLECTION")
if authCollection == "" {
authCollection = "telemetry_service_user"
}
// Credentials - prefer admin auth for timestamp preservation
pbAdminEmail := os.Getenv("PB_ADMIN_EMAIL")
pbAdminPassword := os.Getenv("PB_ADMIN_PASSWORD")
pbIdentity := os.Getenv("PB_IDENTITY")
pbPassword := os.Getenv("PB_PASSWORD")
fmt.Println("===========================================")
fmt.Println(" Data Migration to PocketBase")
fmt.Println("===========================================")
fmt.Printf("Source API: %s\n", baseURL)
fmt.Printf("PocketBase URL: %s\n", pbURL)
fmt.Printf("Collection: %s\n", pbCollection)
fmt.Println("-------------------------------------------")
// Authenticate with PocketBase - prefer Admin auth for timestamp support
if pbAdminEmail != "" && pbAdminPassword != "" {
fmt.Println("🔐 Authenticating as PocketBase Admin...")
err := authenticateAdmin(pbURL, pbAdminEmail, pbAdminPassword)
if err != nil {
fmt.Printf("❌ Admin authentication failed: %v\n", err)
os.Exit(1)
}
fmt.Println("✅ Admin authentication successful (timestamps will be preserved)")
} else if pbIdentity != "" && pbPassword != "" {
fmt.Println("🔐 Authenticating with PocketBase (collection auth)...")
fmt.Println("⚠️ Note: Timestamps may not be preserved without admin auth")
err := authenticate(pbURL, authCollection, pbIdentity, pbPassword)
if err != nil {
fmt.Printf("❌ Authentication failed: %v\n", err)
os.Exit(1)
}
fmt.Println("✅ Authentication successful")
} else {
fmt.Println("⚠️ No credentials provided, trying without auth...")
}
fmt.Println("-------------------------------------------")
// Get total count
summary, err := getSummary()
if err != nil {
fmt.Printf("❌ Failed to get summary: %v\n", err)
os.Exit(1)
}
fmt.Printf("📊 Total entries to migrate: %d\n", summary.TotalEntries)
fmt.Println("-------------------------------------------")
// Calculate pages
totalPages := (summary.TotalEntries + batchSize - 1) / batchSize
var totalMigrated, totalFailed, totalSkipped int
for page := 1; page <= totalPages; page++ {
fmt.Printf("📦 Fetching page %d/%d (items %d-%d)...\n",
page, totalPages,
(page-1)*batchSize+1,
min(page*batchSize, summary.TotalEntries))
data, err := fetchPage(page, batchSize)
if err != nil {
fmt.Printf(" ❌ Failed to fetch page %d: %v\n", page, err)
totalFailed += batchSize
continue
}
for i, record := range data {
err := importRecord(pbURL, pbCollection, record)
if err != nil {
if isUniqueViolation(err) {
totalSkipped++
continue
}
fmt.Printf(" ❌ Failed to import record %d: %v\n", (page-1)*batchSize+i+1, err)
totalFailed++
continue
}
totalMigrated++
}
fmt.Printf(" ✅ Page %d complete (migrated: %d, skipped: %d, failed: %d)\n",
page, len(data), totalSkipped, totalFailed)
// Small delay to avoid overwhelming the server
time.Sleep(100 * time.Millisecond)
}
fmt.Println("===========================================")
fmt.Println(" Migration Complete")
fmt.Println("===========================================")
fmt.Printf("✅ Successfully migrated: %d\n", totalMigrated)
fmt.Printf("⏭️ Skipped (duplicates): %d\n", totalSkipped)
fmt.Printf("❌ Failed: %d\n", totalFailed)
fmt.Println("===========================================")
}
func getSummary() (*Summary, error) {
resp, err := http.Get(summaryAPI)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var summary Summary
if err := json.NewDecoder(resp.Body).Decode(&summary); err != nil {
return nil, err
}
return &summary, nil
}
// authenticateAdmin authenticates as PocketBase admin (required for setting timestamps)
func authenticateAdmin(pbURL, email, password string) error {
body := map[string]string{
"identity": email,
"password": password,
}
jsonData, _ := json.Marshal(body)
// Try new PocketBase v0.23+ endpoint first (_superusers collection)
endpoints := []string{
fmt.Sprintf("%s/api/collections/_superusers/auth-with-password", pbURL),
fmt.Sprintf("%s/api/admins/auth-with-password", pbURL), // Legacy endpoint
}
client := &http.Client{Timeout: 10 * time.Second}
var lastErr error
for _, url := range endpoints {
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
if err != nil {
lastErr = err
continue
}
req.Header.Set("Content-Type", "application/json")
resp, err := client.Do(req)
if err != nil {
lastErr = err
continue
}
if resp.StatusCode == 404 {
resp.Body.Close()
continue // Try next endpoint
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
lastErr = fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body))
continue
}
var result struct {
Token string `json:"token"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
resp.Body.Close()
lastErr = err
continue
}
resp.Body.Close()
if result.Token == "" {
lastErr = fmt.Errorf("no token in response")
continue
}
authToken = result.Token
return nil
}
return fmt.Errorf("all auth endpoints failed: %v", lastErr)
}
func authenticate(pbURL, authCollection, identity, password string) error {
body := map[string]string{
"identity": identity,
"password": password,
}
jsonData, _ := json.Marshal(body)
url := fmt.Sprintf("%s/api/collections/%s/auth-with-password", pbURL, authCollection)
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
client := &http.Client{Timeout: 10 * time.Second}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body))
}
var result struct {
Token string `json:"token"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return err
}
if result.Token == "" {
return fmt.Errorf("no token in response")
}
authToken = result.Token
return nil
}
func fetchPage(page, limit int) ([]OldDataModel, error) {
url := fmt.Sprintf("%s?page=%d&limit=%d", sourceAPI, page, limit)
resp, err := http.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body))
}
var data []OldDataModel
if err := json.NewDecoder(resp.Body).Decode(&data); err != nil {
return nil, err
}
return data, nil
}
func importRecord(pbURL, collection string, old OldDataModel) error {
// Map status: "done" -> "success"
status := old.Status
switch status {
case "done":
status = "success"
case "installing", "failed", "unknown", "success":
// keep as-is
default:
status = "unknown"
}
// ct_type: 1=unprivileged, 2=privileged in old data
// PocketBase might expect 0/1, so normalize to 0 (unprivileged) or 1 (privileged)
ctType := old.CtType
if ctType <= 1 {
ctType = 0 // unprivileged (default)
} else {
ctType = 1 // privileged/VM
}
// Ensure type is set
recordType := old.Type
if recordType == "" {
recordType = "lxc"
}
// Ensure nsapp is set (required field)
nsapp := old.NsApp
if nsapp == "" {
nsapp = "unknown"
}
record := PBRecord{
CtType: ctType,
DiskSize: old.DiskSize,
CoreCount: old.CoreCount,
RamSize: old.RamSize,
OsType: old.OsType,
OsVersion: old.OsVersion,
DisableIP6: old.DisableIP6,
NsApp: nsapp,
Method: old.Method,
PveVersion: old.PveVersion,
Status: status,
RandomID: old.RandomID,
Type: recordType,
Error: old.Error,
OldCreated: convertTimestamp(old.CreatedAt),
}
jsonData, err := json.Marshal(record)
if err != nil {
return err
}
url := fmt.Sprintf("%s/api/collections/%s/records", pbURL, collection)
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
if authToken != "" {
req.Header.Set("Authorization", "Bearer "+authToken)
}
client := &http.Client{Timeout: 10 * time.Second}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body))
}
return nil
}
func isUniqueViolation(err error) bool {
if err == nil {
return false
}
errStr := err.Error()
return contains(errStr, "UNIQUE constraint failed") ||
contains(errStr, "duplicate") ||
contains(errStr, "already exists") ||
contains(errStr, "validation_not_unique")
}
func contains(s, substr string) bool {
return len(s) >= len(substr) && (s == substr || len(s) > 0 && containsHelper(s, substr))
}
func containsHelper(s, substr string) bool {
for i := 0; i <= len(s)-len(substr); i++ {
if s[i:i+len(substr)] == substr {
return true
}
}
return false
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
// convertTimestamp converts various timestamp formats to PocketBase format
// PocketBase expects: "2006-01-02 15:04:05.000Z" or similar
func convertTimestamp(ts string) string {
if ts == "" {
return ""
}
// Try parsing various formats
formats := []string{
time.RFC3339, // "2006-01-02T15:04:05Z07:00"
time.RFC3339Nano, // "2006-01-02T15:04:05.999999999Z07:00"
"2006-01-02T15:04:05.000Z", // ISO with milliseconds
"2006-01-02T15:04:05Z", // ISO without milliseconds
"2006-01-02T15:04:05", // ISO without timezone
"2006-01-02 15:04:05", // SQL format
"2006-01-02 15:04:05.000", // SQL with ms
"2006-01-02 15:04:05.000 UTC", // SQL with UTC
"2006-01-02T15:04:05.000+00:00", // ISO with offset
}
var parsed time.Time
var err error
for _, format := range formats {
parsed, err = time.Parse(format, ts)
if err == nil {
break
}
}
if err != nil {
// If all parsing fails, return empty (PocketBase will set current time)
fmt.Printf(" ⚠️ Could not parse timestamp: %s\n", ts)
return ""
}
// Return in PocketBase format (UTC)
return parsed.UTC().Format("2006-01-02 15:04:05.000Z")
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,172 +0,0 @@
# Copyright (c) 2021-2026 community-scripts ORG
# Author: tteck (tteckster)
# Co-Author: MickLesk
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
if ! command -v curl >/dev/null 2>&1; then
apk update && apk add curl >/dev/null 2>&1
fi
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/core.func)
load_functions
# This function enables IPv6 if it's not disabled and sets verbose mode
verb_ip6() {
set_std_mode # Set STD mode based on VERBOSE
if [ "$IPV6_METHOD" == "disable" ]; then
msg_info "Disabling IPv6 (this may affect some services)"
$STD sysctl -w net.ipv6.conf.all.disable_ipv6=1
$STD sysctl -w net.ipv6.conf.default.disable_ipv6=1
$STD sysctl -w net.ipv6.conf.lo.disable_ipv6=1
mkdir -p /etc/sysctl.d
$STD tee /etc/sysctl.d/99-disable-ipv6.conf >/dev/null <<EOF
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
EOF
$STD rc-update add sysctl default
msg_ok "Disabled IPv6"
fi
}
# This function catches errors and handles them with the error handler function
catch_errors() {
set -Eeuo pipefail
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
}
# This function handles errors
error_handler() {
local exit_code="$?"
local line_number="$1"
local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
echo -e "\n$error_message\n"
}
# This function sets up the Container OS by generating the locale, setting the timezone, and checking the network connection
setting_up_container() {
msg_info "Setting up Container OS"
while [ $i -gt 0 ]; do
if [ "$(ip addr show | grep 'inet ' | grep -v '127.0.0.1' | awk '{print $2}' | cut -d'/' -f1)" != "" ]; then
break
fi
echo 1>&2 -en "${CROSS}${RD} No Network! "
sleep $RETRY_EVERY
i=$((i - 1))
done
if [ "$(ip addr show | grep 'inet ' | grep -v '127.0.0.1' | awk '{print $2}' | cut -d'/' -f1)" = "" ]; then
echo 1>&2 -e "\n${CROSS}${RD} No Network After $RETRY_NUM Tries${CL}"
echo -e "${NETWORK}Check Network Settings"
exit 1
fi
msg_ok "Set up Container OS"
msg_ok "Network Connected: ${BL}$(ip addr show | grep 'inet ' | awk '{print $2}' | cut -d'/' -f1 | tail -n1)${CL}"
}
# This function checks the network connection by pinging a known IP address and prompts the user to continue if the internet is not connected
network_check() {
set +e
trap - ERR
if ping -c 1 -W 1 1.1.1.1 &>/dev/null || ping -c 1 -W 1 8.8.8.8 &>/dev/null || ping -c 1 -W 1 9.9.9.9 &>/dev/null; then
msg_ok "Internet Connected"
else
msg_error "Internet NOT Connected"
read -r -p "Would you like to continue anyway? <y/N> " prompt
if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then
echo -e "${INFO}${RD}Expect Issues Without Internet${CL}"
else
echo -e "${NETWORK}Check Network Settings"
exit 1
fi
fi
RESOLVEDIP=$(getent hosts github.com | awk '{ print $1 }')
if [[ -z "$RESOLVEDIP" ]]; then msg_error "DNS Lookup Failure"; else msg_ok "DNS Resolved github.com to ${BL}$RESOLVEDIP${CL}"; fi
set -e
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
}
# This function updates the Container OS by running apt-get update and upgrade
update_os() {
msg_info "Updating Container OS"
$STD apk -U upgrade
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func)
msg_ok "Updated Container OS"
}
# This function modifies the message of the day (motd) and SSH settings
motd_ssh() {
echo "export TERM='xterm-256color'" >>/root/.bashrc
IP=$(ip -4 addr show eth0 | awk '/inet / {print $2}' | cut -d/ -f1 | head -n 1)
if [ -f "/etc/os-release" ]; then
OS_NAME=$(grep ^NAME /etc/os-release | cut -d= -f2 | tr -d '"')
OS_VERSION=$(grep ^VERSION_ID /etc/os-release | cut -d= -f2 | tr -d '"')
else
OS_NAME="Alpine Linux"
OS_VERSION="Unknown"
fi
PROFILE_FILE="/etc/profile.d/00_lxc-details.sh"
echo "echo -e \"\"" >"$PROFILE_FILE"
echo -e "echo -e \"${BOLD}${APPLICATION} LXC Container${CL}"\" >>"$PROFILE_FILE"
echo -e "echo -e \"${TAB}${GATEWAY}${YW} Provided by: ${GN}community-scripts ORG ${YW}| GitHub: ${GN}https://github.com/community-scripts/ProxmoxVE${CL}\"" >>"$PROFILE_FILE"
echo "echo \"\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${TAB}${OS}${YW} OS: ${GN}${OS_NAME} - Version: ${OS_VERSION}${CL}\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${TAB}${HOSTNAME}${YW} Hostname: ${GN}\$(hostname)${CL}\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${TAB}${INFO}${YW} IP Address: ${GN}\$(ip -4 addr show eth0 | awk '/inet / {print \$2}' | cut -d/ -f1 | head -n 1)${CL}\"" >>"$PROFILE_FILE"
# Configure SSH if enabled
if [[ "${SSH_ROOT}" == "yes" ]]; then
# Enable sshd service
$STD rc-update add sshd
# Allow root login via SSH
sed -i "s/#PermitRootLogin prohibit-password/PermitRootLogin yes/g" /etc/ssh/sshd_config
# Start the sshd service
$STD /etc/init.d/sshd start
fi
}
# Validate Timezone for some LXC's
validate_tz() {
[[ -f "/usr/share/zoneinfo/$1" ]]
}
# This function customizes the container and enables passwordless login for the root user
customize() {
if [[ "$PASSWORD" == "" ]]; then
msg_info "Customizing Container"
passwd -d root >/dev/null 2>&1
# Ensure agetty is available
apk add --no-cache --force-broken-world util-linux >/dev/null 2>&1
# Create persistent autologin boot script
mkdir -p /etc/local.d
cat <<'EOF' >/etc/local.d/autologin.start
#!/bin/sh
sed -i 's|^tty1::respawn:.*|tty1::respawn:/sbin/agetty --autologin root --noclear tty1 38400 linux|' /etc/inittab
kill -HUP 1
EOF
touch /root/.hushlogin
chmod +x /etc/local.d/autologin.start
rc-update add local >/dev/null 2>&1
# Apply autologin immediately for current session
/etc/local.d/autologin.start
msg_ok "Customized Container"
fi
echo "bash -c \"\$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/${app}.sh)\"" >/usr/bin/update
chmod +x /usr/bin/update
if [[ -n "${SSH_AUTHORIZED_KEY}" ]]; then
mkdir -p /root/.ssh
echo "${SSH_AUTHORIZED_KEY}" >/root/.ssh/authorized_keys
chmod 700 /root/.ssh
chmod 600 /root/.ssh/authorized_keys
fi
}

View File

@@ -1,130 +0,0 @@
# Copyright (c) 2021-2026 community-scripts ORG
# Author: michelroegl-brunner
# License: MIT | https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/LICENSE
post_to_api() {
if ! command -v curl &>/dev/null; then
return
fi
if [ "$DIAGNOSTICS" = "no" ]; then
return
fi
if [ -z "$RANDOM_UUID" ]; then
return
fi
local API_URL="http://api.community-scripts.org/upload"
local pve_version="not found"
pve_version=$(pveversion | awk -F'[/ ]' '{print $2}')
JSON_PAYLOAD=$(
cat <<EOF
{
"ct_type": $CT_TYPE,
"type":"lxc",
"disk_size": $DISK_SIZE,
"core_count": $CORE_COUNT,
"ram_size": $RAM_SIZE,
"os_type": "$var_os",
"os_version": "$var_version",
"nsapp": "$NSAPP",
"method": "$METHOD",
"pve_version": "$pve_version",
"status": "installing",
"random_id": "$RANDOM_UUID"
}
EOF
)
if [[ "$DIAGNOSTICS" == "yes" ]]; then
RESPONSE=$(curl -s -w "%{http_code}" -L -X POST "$API_URL" --post301 --post302 \
-H "Content-Type: application/json" \
-d "$JSON_PAYLOAD") || true
fi
}
post_to_api_vm() {
if [[ ! -f /usr/local/community-scripts/diagnostics ]]; then
return
fi
DIAGNOSTICS=$(grep -i "^DIAGNOSTICS=" /usr/local/community-scripts/diagnostics | awk -F'=' '{print $2}')
if ! command -v curl &>/dev/null; then
return
fi
if [ "$DIAGNOSTICS" = "no" ]; then
return
fi
if [ -z "$RANDOM_UUID" ]; then
return
fi
local API_URL="http://api.community-scripts.org/upload"
local pve_version="not found"
pve_version=$(pveversion | awk -F'[/ ]' '{print $2}')
DISK_SIZE_API=${DISK_SIZE%G}
JSON_PAYLOAD=$(
cat <<EOF
{
"ct_type": 2,
"type":"vm",
"disk_size": $DISK_SIZE_API,
"core_count": $CORE_COUNT,
"ram_size": $RAM_SIZE,
"os_type": "$var_os",
"os_version": "$var_version",
"nsapp": "$NSAPP",
"method": "$METHOD",
"pve_version": "$pve_version",
"status": "installing",
"random_id": "$RANDOM_UUID"
}
EOF
)
if [[ "$DIAGNOSTICS" == "yes" ]]; then
RESPONSE=$(curl -s -w "%{http_code}" -L -X POST "$API_URL" --post301 --post302 \
-H "Content-Type: application/json" \
-d "$JSON_PAYLOAD") || true
fi
}
POST_UPDATE_DONE=false
post_update_to_api() {
if ! command -v curl &>/dev/null; then
return
fi
if [ "$POST_UPDATE_DONE" = true ]; then
return 0
fi
local API_URL="http://api.community-scripts.org/upload/updatestatus"
local status="${1:-failed}"
local error="${2:-No error message}"
JSON_PAYLOAD=$(
cat <<EOF
{
"status": "$status",
"error": "$error",
"random_id": "$RANDOM_UUID"
}
EOF
)
if [[ "$DIAGNOSTICS" == "yes" ]]; then
RESPONSE=$(curl -s -w "%{http_code}" -L -X POST "$API_URL" --post301 --post302 \
-H "Content-Type: application/json" \
-d "$JSON_PAYLOAD") || true
fi
POST_UPDATE_DONE=true
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,699 +0,0 @@
config_file() {
CONFIG_FILE="/opt/community-scripts/.settings"
if [[ -f "/opt/community-scripts/${NSAPP}.conf" ]]; then
CONFIG_FILE="/opt/community-scripts/${NSAPP}.conf"
fi
if CONFIG_FILE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set absolute path to config file" 8 58 "$CONFIG_FILE" --title "CONFIG FILE" 3>&1 1>&2 2>&3); then
if [[ ! -f "$CONFIG_FILE" ]]; then
echo -e "${CROSS}${RD}Config file not found, exiting script!.${CL}"
exit
else
echo -e "${INFO}${BOLD}${DGN}Using config File: ${BGN}$CONFIG_FILE${CL}"
source "$CONFIG_FILE"
fi
fi
if [[ -n "${CT_ID-}" ]]; then
if [[ "$CT_ID" =~ ^([0-9]{3,4})-([0-9]{3,4})$ ]]; then
MIN_ID=${BASH_REMATCH[1]}
MAX_ID=${BASH_REMATCH[2]}
if ((MIN_ID >= MAX_ID)); then
msg_error "Invalid Container ID range. The first number must be smaller than the second number, was ${CT_ID}"
exit
fi
LIST_OF_IDS=$(pvesh get /cluster/resources --type vm --output-format json 2>/dev/null | grep -oP '"vmid":\s*\K\d+') || true
if [[ -n "$LIST_OF_IDS" ]]; then
for ((ID = MIN_ID; ID <= MAX_ID; ID++)); do
if ! grep -q "^$ID$" <<<"$LIST_OF_IDS"; then
CT_ID=$ID
break
fi
done
fi
echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
elif [[ "$CT_ID" =~ ^[0-9]+$ ]]; then
LIST_OF_IDS=$(pvesh get /cluster/resources --type vm --output-format json 2>/dev/null | grep -oP '"vmid":\s*\K\d+') || true
if [[ -n "$LIST_OF_IDS" ]]; then
if ! grep -q "^$CT_ID$" <<<"$LIST_OF_IDS"; then
echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
else
msg_error "Container ID $CT_ID already exists"
exit
fi
else
echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
fi
else
msg_error "Invalid Container ID format. Needs to be 0000-9999 or 0-9999, was ${CT_ID}"
exit
fi
else
if CT_ID=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Container ID" 8 58 "$NEXTID" --title "CONTAINER ID" 3>&1 1>&2 2>&3); then
if [ -z "$CT_ID" ]; then
CT_ID="$NEXTID"
echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
else
echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
fi
else
exit_script
fi
fi
if [[ -n "${CT_TYPE-}" ]]; then
if [[ "$CT_TYPE" -eq 0 ]]; then
CT_TYPE_DESC="Privileged"
elif [[ "$CT_TYPE" -eq 1 ]]; then
CT_TYPE_DESC="Unprivileged"
else
msg_error "Unknown setting for CT_TYPE, should be 1 or 0, was ${CT_TYPE}"
exit
fi
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
else
if CT_TYPE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "CONTAINER TYPE" --radiolist "Choose Type" 10 58 2 \
"1" "Unprivileged" ON \
"0" "Privileged" OFF \
3>&1 1>&2 2>&3); then
if [ -n "$CT_TYPE" ]; then
CT_TYPE_DESC="Unprivileged"
if [ "$CT_TYPE" -eq 0 ]; then
CT_TYPE_DESC="Privileged"
fi
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
fi
else
exit_script
fi
fi
if [[ -n "${PW-}" ]]; then
if [[ "$PW" == "none" ]]; then
PW=""
else
if [[ "$PW" == *" "* ]]; then
msg_error "Password cannot be empty"
exit
elif [[ ${#PW} -lt 5 ]]; then
msg_error "Password must be at least 5 characters long"
exit
else
echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}********${CL}"
fi
PW="-password $PW"
fi
else
while true; do
if PW1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --passwordbox "\nSet Root Password (needed for root ssh access)" 9 58 --title "PASSWORD (leave blank for automatic login)" 3>&1 1>&2 2>&3); then
if [[ -n "$PW1" ]]; then
if [[ "$PW1" == *" "* ]]; then
whiptail --msgbox "Password cannot contain spaces. Please try again." 8 58
elif [ ${#PW1} -lt 5 ]; then
whiptail --msgbox "Password must be at least 5 characters long. Please try again." 8 58
else
if PW2=$(whiptail --backtitle "Proxmox VE Helper Scripts" --passwordbox "\nVerify Root Password" 9 58 --title "PASSWORD VERIFICATION" 3>&1 1>&2 2>&3); then
if [[ "$PW1" == "$PW2" ]]; then
PW="-password $PW1"
echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}********${CL}"
break
else
whiptail --msgbox "Passwords do not match. Please try again." 8 58
fi
else
exit_script
fi
fi
else
PW1="Automatic Login"
PW=""
echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}$PW1${CL}"
break
fi
else
exit_script
fi
done
fi
if [[ -n "${HN-}" ]]; then
echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}"
else
if CT_NAME=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Hostname" 8 58 "$NSAPP" --title "HOSTNAME" 3>&1 1>&2 2>&3); then
if [ -z "$CT_NAME" ]; then
HN="$NSAPP"
else
HN=$(echo "${CT_NAME,,}" | tr -d ' ')
fi
echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}"
else
exit_script
fi
fi
if [[ -n "${DISK_SIZE-}" ]]; then
if [[ "$DISK_SIZE" =~ ^-?[0-9]+$ ]]; then
echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}"
else
msg_error "DISK_SIZE must be an integer, was ${DISK_SIZE}"
exit
fi
else
if DISK_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Disk Size in GB" 8 58 "$var_disk" --title "DISK SIZE" 3>&1 1>&2 2>&3); then
if [ -z "$DISK_SIZE" ]; then
DISK_SIZE="$var_disk"
echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}"
else
if ! [[ $DISK_SIZE =~ $INTEGER ]]; then
echo -e "{INFO}${HOLD}${RD} DISK SIZE MUST BE AN INTEGER NUMBER!${CL}"
advanced_settings
fi
echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}"
fi
else
exit_script
fi
fi
if [[ -n "${CORE_COUNT-}" ]]; then
if [[ "$CORE_COUNT" =~ ^-?[0-9]+$ ]]; then
echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}${CORE_COUNT}${CL}"
else
msg_error "CORE_COUNT must be an integer, was ${CORE_COUNT}"
exit
fi
else
if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 "$var_cpu" --title "CORE COUNT" 3>&1 1>&2 2>&3); then
if [ -z "$CORE_COUNT" ]; then
CORE_COUNT="$var_cpu"
echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}"
else
echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}"
fi
else
exit_script
fi
fi
if [[ -n "${RAM_SIZE-}" ]]; then
if [[ "$RAM_SIZE" =~ ^-?[0-9]+$ ]]; then
echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
else
msg_error "RAM_SIZE must be an integer, was ${RAM_SIZE}"
exit
fi
else
if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 "$var_ram" --title "RAM" 3>&1 1>&2 2>&3); then
if [ -z "$RAM_SIZE" ]; then
RAM_SIZE="$var_ram"
echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
else
echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
fi
else
exit_script
fi
fi
IFACE_FILEPATH_LIST="/etc/network/interfaces"$'\n'$(find "/etc/network/interfaces.d/" -type f)
BRIDGES=""
OLD_IFS=$IFS
IFS=$'\n'
for iface_filepath in ${IFACE_FILEPATH_LIST}; do
iface_indexes_tmpfile=$(mktemp -q -u '.iface-XXXX')
(grep -Pn '^\s*iface' "${iface_filepath}" | cut -d':' -f1 && wc -l "${iface_filepath}" | cut -d' ' -f1) | awk 'FNR==1 {line=$0; next} {print line":"$0-1; line=$0}' >"${iface_indexes_tmpfile}" || true
if [ -f "${iface_indexes_tmpfile}" ]; then
while read -r pair; do
start=$(echo "${pair}" | cut -d':' -f1)
end=$(echo "${pair}" | cut -d':' -f2)
if awk "NR >= ${start} && NR <= ${end}" "${iface_filepath}" | grep -qP '^\s*(bridge[-_](ports|stp|fd|vlan-aware|vids)|ovs_type\s+OVSBridge)\b'; then
iface_name=$(sed "${start}q;d" "${iface_filepath}" | awk '{print $2}')
BRIDGES="${iface_name}"$'\n'"${BRIDGES}"
fi
done <"${iface_indexes_tmpfile}"
rm -f "${iface_indexes_tmpfile}"
fi
done
IFS=$OLD_IFS
BRIDGES=$(echo "$BRIDGES" | grep -v '^\s*$' | sort | uniq)
if [[ -n "${BRG-}" ]]; then
if echo "$BRIDGES" | grep -q "${BRG}"; then
echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}"
else
msg_error "Bridge '${BRG}' does not exist in /etc/network/interfaces or /etc/network/interfaces.d/sdn"
exit
fi
else
BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --menu "Select network bridge:" 15 40 6 $(echo "$BRIDGES" | awk '{print $0, "Bridge"}') 3>&1 1>&2 2>&3)
if [ -z "$BRG" ]; then
exit_script
else
echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}"
fi
fi
local ip_cidr_regex='^([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})/([0-9]{1,2})$'
local ip_regex='^([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})$'
if [[ -n ${NET-} ]]; then
if [ "$NET" == "dhcp" ]; then
echo -e "${NETWORK}${BOLD}${DGN}IP Address: ${BGN}DHCP${CL}"
echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}Default${CL}"
GATE=""
elif [[ "$NET" =~ $ip_cidr_regex ]]; then
echo -e "${NETWORK}${BOLD}${DGN}IP Address: ${BGN}$NET${CL}"
if [[ -n "$GATE" ]]; then
[[ "$GATE" =~ ",gw=" ]] && GATE="${GATE##,gw=}"
if [[ "$GATE" =~ $ip_regex ]]; then
echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE${CL}"
GATE=",gw=$GATE"
else
msg_error "Invalid IP Address format for Gateway. Needs to be 0.0.0.0, was ${GATE}"
exit
fi
else
while true; do
GATE1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Enter gateway IP address" 8 58 --title "Gateway IP" 3>&1 1>&2 2>&3)
if [ -z "$GATE1" ]; then
whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "Gateway IP address cannot be empty" 8 58
elif [[ ! "$GATE1" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "Invalid IP address format" 8 58
else
GATE=",gw=$GATE1"
echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE1${CL}"
break
fi
done
fi
elif [[ "$NET" == *-* ]]; then
IFS="-" read -r ip_start ip_end <<<"$NET"
if [[ ! "$ip_start" =~ $ip_cidr_regex ]] || [[ ! "$ip_end" =~ $ip_cidr_regex ]]; then
msg_error "Invalid IP range format, was $NET should be 0.0.0.0/0-0.0.0.0/0"
exit 1
fi
ip1="${ip_start%%/*}"
ip2="${ip_end%%/*}"
cidr="${ip_start##*/}"
ip_to_int() {
local IFS=.
read -r i1 i2 i3 i4 <<<"$1"
echo $(((i1 << 24) + (i2 << 16) + (i3 << 8) + i4))
}
int_to_ip() {
local ip=$1
echo "$(((ip >> 24) & 0xFF)).$(((ip >> 16) & 0xFF)).$(((ip >> 8) & 0xFF)).$((ip & 0xFF))"
}
start_int=$(ip_to_int "$ip1")
end_int=$(ip_to_int "$ip2")
for ((ip_int = start_int; ip_int <= end_int; ip_int++)); do
ip=$(int_to_ip $ip_int)
msg_info "Checking IP: $ip"
if ! ping -c 2 -W 1 "$ip" >/dev/null 2>&1; then
NET="$ip/$cidr"
msg_ok "Using free IP Address: ${BGN}$NET${CL}"
sleep 3
break
fi
done
if [[ "$NET" == *-* ]]; then
msg_error "No free IP found in range"
exit 1
fi
if [ -n "$GATE" ]; then
if [[ "$GATE" =~ $ip_regex ]]; then
echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE${CL}"
GATE=",gw=$GATE"
else
msg_error "Invalid IP Address format for Gateway. Needs to be 0.0.0.0, was ${GATE}"
exit
fi
else
while true; do
GATE1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Enter gateway IP address" 8 58 --title "Gateway IP" 3>&1 1>&2 2>&3)
if [ -z "$GATE1" ]; then
whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "Gateway IP address cannot be empty" 8 58
elif [[ ! "$GATE1" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "Invalid IP address format" 8 58
else
GATE=",gw=$GATE1"
echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE1${CL}"
break
fi
done
fi
else
msg_error "Invalid IP Address format. Needs to be 0.0.0.0/0 or a range like 10.0.0.1/24-10.0.0.10/24, was ${NET}"
exit
fi
else
while true; do
NET=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Static IPv4 CIDR Address (/24)" 8 58 dhcp --title "IP ADDRESS" 3>&1 1>&2 2>&3)
exit_status=$?
if [ $exit_status -eq 0 ]; then
if [ "$NET" = "dhcp" ]; then
echo -e "${NETWORK}${BOLD}${DGN}IP Address: ${BGN}$NET${CL}"
break
else
if [[ "$NET" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$ ]]; then
echo -e "${NETWORK}${BOLD}${DGN}IP Address: ${BGN}$NET${CL}"
break
else
whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "$NET is an invalid IPv4 CIDR address. Please enter a valid IPv4 CIDR address or 'dhcp'" 8 58
fi
fi
else
exit_script
fi
done
if [ "$NET" != "dhcp" ]; then
while true; do
GATE1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Enter gateway IP address" 8 58 --title "Gateway IP" 3>&1 1>&2 2>&3)
if [ -z "$GATE1" ]; then
whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "Gateway IP address cannot be empty" 8 58
elif [[ ! "$GATE1" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "Invalid IP address format" 8 58
else
GATE=",gw=$GATE1"
echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE1${CL}"
break
fi
done
else
GATE=""
echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}Default${CL}"
fi
fi
if [ "$var_os" == "alpine" ]; then
APT_CACHER=""
APT_CACHER_IP=""
else
if [[ -n "${APT_CACHER_IP-}" ]]; then
if [[ ! $APT_CACHER_IP == "none" ]]; then
APT_CACHER="yes"
echo -e "${NETWORK}${BOLD}${DGN}APT-CACHER IP Address: ${BGN}$APT_CACHER_IP${CL}"
else
APT_CACHER=""
echo -e "${NETWORK}${BOLD}${DGN}APT-Cacher IP Address: ${BGN}No${CL}"
fi
else
if APT_CACHER_IP=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set APT-Cacher IP (leave blank for none)" 8 58 --title "APT-Cacher IP" 3>&1 1>&2 2>&3); then
APT_CACHER="${APT_CACHER_IP:+yes}"
echo -e "${NETWORK}${BOLD}${DGN}APT-Cacher IP Address: ${BGN}${APT_CACHER_IP:-Default}${CL}"
if [[ -n $APT_CACHER_IP ]]; then
APT_CACHER_IP="none"
fi
else
exit_script
fi
fi
fi
if [[ -n "${MTU-}" ]]; then
if [[ "$MTU" =~ ^-?[0-9]+$ ]]; then
echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU${CL}"
MTU=",mtu=$MTU"
else
msg_error "MTU must be an integer, was ${MTU}"
exit
fi
else
if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default [The MTU of your selected vmbr, default is 1500])" 8 58 --title "MTU SIZE" 3>&1 1>&2 2>&3); then
if [ -z "$MTU1" ]; then
MTU1="Default"
MTU=""
else
MTU=",mtu=$MTU1"
fi
echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}"
else
exit_script
fi
fi
if [[ "$IPV6_METHOD" == "static" ]]; then
if [[ -n "$IPV6STATIC" ]]; then
IP6=",ip6=${IPV6STATIC}"
echo -e "${NETWORK}${BOLD}${DGN}IPv6 Address: ${BGN}${IPV6STATIC}${CL}"
else
msg_error "IPV6_METHOD is set to static but IPV6STATIC is empty"
exit
fi
elif [[ "$IPV6_METHOD" == "auto" ]]; then
IP6=",ip6=auto"
echo -e "${NETWORK}${BOLD}${DGN}IPv6 Address: ${BGN}auto${CL}"
else
IP6=""
echo -e "${NETWORK}${BOLD}${DGN}IPv6 Address: ${BGN}none${CL}"
fi
if [[ -n "${SD-}" ]]; then
if [[ "$SD" == "none" ]]; then
SD=""
echo -e "${SEARCH}${BOLD}${DGN}DNS Search Domain: ${BGN}Host${CL}"
else
# Strip prefix if present for config file storage
local SD_VALUE="$SD"
[[ "$SD" =~ ^-searchdomain= ]] && SD_VALUE="${SD#-searchdomain=}"
echo -e "${SEARCH}${BOLD}${DGN}DNS Search Domain: ${BGN}$SD_VALUE${CL}"
SD="-searchdomain=$SD_VALUE"
fi
else
if SD=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a DNS Search Domain (leave blank for HOST)" 8 58 --title "DNS Search Domain" 3>&1 1>&2 2>&3); then
if [ -z "$SD" ]; then
SX=Host
SD=""
else
SX=$SD
SD="-searchdomain=$SD"
fi
echo -e "${SEARCH}${BOLD}${DGN}DNS Search Domain: ${BGN}$SX${CL}"
else
exit_script
fi
fi
if [[ -n "${NS-}" ]]; then
if [[ $NS == "none" ]]; then
NS=""
echo -e "${NETWORK}${BOLD}${DGN}DNS Server IP Address: ${BGN}Host${CL}"
else
# Strip prefix if present for config file storage
local NS_VALUE="$NS"
[[ "$NS" =~ ^-nameserver= ]] && NS_VALUE="${NS#-nameserver=}"
if [[ "$NS_VALUE" =~ $ip_regex ]]; then
echo -e "${NETWORK}${BOLD}${DGN}DNS Server IP Address: ${BGN}$NS_VALUE${CL}"
NS="-nameserver=$NS_VALUE"
else
msg_error "Invalid IP Address format for DNS Server. Needs to be 0.0.0.0, was ${NS_VALUE}"
exit
fi
fi
else
if NX=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a DNS Server IP (leave blank for HOST)" 8 58 --title "DNS SERVER IP" 3>&1 1>&2 2>&3); then
if [ -z "$NX" ]; then
NX=Host
NS=""
else
NS="-nameserver=$NX"
fi
echo -e "${NETWORK}${BOLD}${DGN}DNS Server IP Address: ${BGN}$NX${CL}"
else
exit_script
fi
fi
if [[ -n "${MAC-}" ]]; then
if [[ "$MAC" == "none" ]]; then
MAC=""
echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}Host${CL}"
else
# Strip prefix if present for config file storage
local MAC_VALUE="$MAC"
[[ "$MAC" =~ ^,hwaddr= ]] && MAC_VALUE="${MAC#,hwaddr=}"
if [[ "$MAC_VALUE" =~ ^([A-Fa-f0-9]{2}:){5}[A-Fa-f0-9]{2}$ ]]; then
echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC_VALUE${CL}"
MAC=",hwaddr=$MAC_VALUE"
else
msg_error "MAC Address must be in the format xx:xx:xx:xx:xx:xx, was ${MAC_VALUE}"
exit
fi
fi
else
if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address(leave blank for generated MAC)" 8 58 --title "MAC ADDRESS" 3>&1 1>&2 2>&3); then
if [ -z "$MAC1" ]; then
MAC1="Default"
MAC=""
else
MAC=",hwaddr=$MAC1"
echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}"
fi
else
exit_script
fi
fi
if [[ -n "${VLAN-}" ]]; then
if [[ "$VLAN" == "none" ]]; then
VLAN=""
echo -e "${VLANTAG}${BOLD}${DGN}Vlan: ${BGN}Host${CL}"
else
# Strip prefix if present for config file storage
local VLAN_VALUE="$VLAN"
[[ "$VLAN" =~ ^,tag= ]] && VLAN_VALUE="${VLAN#,tag=}"
if [[ "$VLAN_VALUE" =~ ^-?[0-9]+$ ]]; then
echo -e "${VLANTAG}${BOLD}${DGN}Vlan: ${BGN}$VLAN_VALUE${CL}"
VLAN=",tag=$VLAN_VALUE"
else
msg_error "VLAN must be an integer, was ${VLAN_VALUE}"
exit
fi
fi
else
if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for no VLAN)" 8 58 --title "VLAN" 3>&1 1>&2 2>&3); then
if [ -z "$VLAN1" ]; then
VLAN1="Default"
VLAN=""
else
VLAN=",tag=$VLAN1"
fi
echo -e "${VLANTAG}${BOLD}${DGN}Vlan: ${BGN}$VLAN1${CL}"
else
exit_script
fi
fi
if [[ -n "${TAGS-}" ]]; then
if [[ "$TAGS" == *"DEFAULT"* ]]; then
TAGS="${TAGS//DEFAULT/}"
TAGS="${TAGS//;/}"
TAGS="$TAGS;${var_tags:-}"
echo -e "${NETWORK}${BOLD}${DGN}Tags: ${BGN}$TAGS${CL}"
fi
else
TAGS="community-scripts;"
if ADV_TAGS=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Custom Tags?[If you remove all, there will be no tags!]" 8 58 "${TAGS}" --title "Advanced Tags" 3>&1 1>&2 2>&3); then
if [ -n "${ADV_TAGS}" ]; then
ADV_TAGS=$(echo "$ADV_TAGS" | tr -d '[:space:]')
TAGS="${ADV_TAGS}"
else
TAGS=";"
fi
echo -e "${NETWORK}${BOLD}${DGN}Tags: ${BGN}$TAGS${CL}"
else
exit_script
fi
fi
if [[ -n "${SSH-}" ]]; then
if [[ "$SSH" == "yes" ]]; then
echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
if [[ ! -z "$SSH_AUTHORIZED_KEY" ]]; then
echo -e "${ROOTSSH}${BOLD}${DGN}SSH Authorized Key: ${BGN}********************${CL}"
else
echo -e "${ROOTSSH}${BOLD}${DGN}SSH Authorized Key: ${BGN}None${CL}"
fi
elif [[ "$SSH" == "no" ]]; then
echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
else
msg_error "SSH needs to be 'yes' or 'no', was ${SSH}"
exit
fi
else
SSH_AUTHORIZED_KEY="$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "SSH Authorized key for root (leave empty for none)" 8 58 --title "SSH Key" 3>&1 1>&2 2>&3)"
if [[ -z "${SSH_AUTHORIZED_KEY}" ]]; then
SSH_AUTHORIZED_KEY=""
fi
if [[ "$PW" == -password* || -n "$SSH_AUTHORIZED_KEY" ]]; then
if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "SSH ACCESS" --yesno "Enable Root SSH Access?" 10 58); then
SSH="yes"
else
SSH="no"
fi
echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
else
SSH="no"
echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
fi
fi
if [[ -n "$ENABLE_FUSE" ]]; then
if [[ "$ENABLE_FUSE" == "yes" ]]; then
echo -e "${FUSE}${BOLD}${DGN}Enable FUSE: ${BGN}Yes${CL}"
elif [[ "$ENABLE_FUSE" == "no" ]]; then
echo -e "${FUSE}${BOLD}${DGN}Enable FUSE: ${BGN}No${CL}"
else
msg_error "Enable FUSE needs to be 'yes' or 'no', was ${ENABLE_FUSE}"
exit
fi
else
if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "FUSE" --yesno "Enable FUSE?" 10 58); then
ENABLE_FUSE="yes"
else
ENABLE_FUSE="no"
fi
echo -e "${FUSE}${BOLD}${DGN}Enable FUSE: ${BGN}$ENABLE_FUSE${CL}"
fi
if [[ -n "$ENABLE_TUN" ]]; then
if [[ "$ENABLE_TUN" == "yes" ]]; then
echo -e "${FUSE}${BOLD}${DGN}Enable TUN: ${BGN}Yes${CL}"
elif [[ "$ENABLE_TUN" == "no" ]]; then
echo -e "${FUSE}${BOLD}${DGN}Enable TUN: ${BGN}No${CL}"
else
msg_error "Enable TUN needs to be 'yes' or 'no', was ${ENABLE_TUN}"
exit
fi
else
if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "TUN" --yesno "Enable TUN?" 10 58); then
ENABLE_TUN="yes"
else
ENABLE_TUN="no"
fi
echo -e "${FUSE}${BOLD}${DGN}Enable TUN: ${BGN}$ENABLE_TUN${CL}"
fi
if [[ -n "${VERBOSE-}" ]]; then
if [[ "$VERBOSE" == "yes" ]]; then
echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}$VERBOSE${CL}"
elif [[ "$VERBOSE" == "no" ]]; then
echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}No${CL}"
else
msg_error "Verbose Mode needs to be 'yes' or 'no', was ${VERBOSE}"
exit
fi
else
if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "VERBOSE MODE" --yesno "Enable Verbose Mode?" 10 58); then
VERBOSE="yes"
else
VERBOSE="no"
fi
echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}$VERBOSE${CL}"
fi
if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "ADVANCED SETTINGS WITH CONFIG FILE COMPLETE" --yesno "Ready to create ${APP} LXC?" 10 58); then
echo -e "${CREATING}${BOLD}${RD}Creating a ${APP} LXC using the above settings${CL}"
else
clear
header_info
echo -e "${INFO}${HOLD} ${GN}Using Config File on node $PVEHOST_NAME${CL}"
config_file
fi
}

View File

@@ -1,452 +0,0 @@
# Copyright (c) 2021-2026 community-scripts ORG
# License: MIT | https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/LICENSE
# ------------------------------------------------------------------------------
# Loads core utility groups once (colors, formatting, icons, defaults).
# ------------------------------------------------------------------------------
[[ -n "${_CORE_FUNC_LOADED:-}" ]] && return
_CORE_FUNC_LOADED=1
load_functions() {
[[ -n "${__FUNCTIONS_LOADED:-}" ]] && return
__FUNCTIONS_LOADED=1
color
formatting
icons
default_vars
set_std_mode
# add more
}
# ============================================================================
# Error & Signal Handling robust, universal, subshell-safe
# ============================================================================
_tool_error_hint() {
local cmd="$1"
local code="$2"
case "$cmd" in
curl)
case "$code" in
6) echo "Curl: Could not resolve host (DNS problem)" ;;
7) echo "Curl: Failed to connect to host (connection refused)" ;;
22) echo "Curl: HTTP error (404/403 etc)" ;;
28) echo "Curl: Operation timeout" ;;
*) echo "Curl: Unknown error ($code)" ;;
esac
;;
wget)
echo "Wget failed URL unreachable or permission denied"
;;
systemctl)
echo "Systemd unit failure check service name and permissions"
;;
jq)
echo "jq parse error malformed JSON or missing key"
;;
mariadb | mysql)
echo "MySQL/MariaDB command failed check credentials or DB"
;;
unzip)
echo "unzip failed corrupt file or missing permission"
;;
tar)
echo "tar failed invalid format or missing binary"
;;
node | npm | pnpm | yarn)
echo "Node tool failed check version compatibility or package.json"
;;
*) echo "" ;;
esac
}
catch_errors() {
set -Eeuo pipefail
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
}
# ------------------------------------------------------------------------------
# Sets ANSI color codes used for styled terminal output.
# ------------------------------------------------------------------------------
color() {
YW=$(echo "\033[33m")
YWB=$'\e[93m'
BL=$(echo "\033[36m")
RD=$(echo "\033[01;31m")
BGN=$(echo "\033[4;92m")
GN=$(echo "\033[1;92m")
DGN=$(echo "\033[32m")
CL=$(echo "\033[m")
}
# Special for spinner and colorized output via printf
color_spinner() {
CS_YW=$'\033[33m'
CS_YWB=$'\033[93m'
CS_CL=$'\033[m'
}
# ------------------------------------------------------------------------------
# Defines formatting helpers like tab, bold, and line reset sequences.
# ------------------------------------------------------------------------------
formatting() {
BFR="\\r\\033[K"
BOLD=$(echo "\033[1m")
HOLD=" "
TAB=" "
TAB3=" "
}
# ------------------------------------------------------------------------------
# Sets symbolic icons used throughout user feedback and prompts.
# ------------------------------------------------------------------------------
icons() {
CM="${TAB}✔️${TAB}"
CROSS="${TAB}✖️${TAB}"
DNSOK="✔️ "
DNSFAIL="${TAB}✖️${TAB}"
INFO="${TAB}💡${TAB}${CL}"
OS="${TAB}🖥️${TAB}${CL}"
OSVERSION="${TAB}🌟${TAB}${CL}"
CONTAINERTYPE="${TAB}📦${TAB}${CL}"
DISKSIZE="${TAB}💾${TAB}${CL}"
CPUCORE="${TAB}🧠${TAB}${CL}"
RAMSIZE="${TAB}🛠️${TAB}${CL}"
SEARCH="${TAB}🔍${TAB}${CL}"
VERBOSE_CROPPED="🔍${TAB}"
VERIFYPW="${TAB}🔐${TAB}${CL}"
CONTAINERID="${TAB}🆔${TAB}${CL}"
HOSTNAME="${TAB}🏠${TAB}${CL}"
BRIDGE="${TAB}🌉${TAB}${CL}"
NETWORK="${TAB}📡${TAB}${CL}"
GATEWAY="${TAB}🌐${TAB}${CL}"
DISABLEIPV6="${TAB}🚫${TAB}${CL}"
DEFAULT="${TAB}⚙️${TAB}${CL}"
MACADDRESS="${TAB}🔗${TAB}${CL}"
VLANTAG="${TAB}🏷️${TAB}${CL}"
ROOTSSH="${TAB}🔑${TAB}${CL}"
CREATING="${TAB}🚀${TAB}${CL}"
ADVANCED="${TAB}🧩${TAB}${CL}"
FUSE="${TAB}🗂️${TAB}${CL}"
HOURGLASS="${TAB}${TAB}"
}
# ------------------------------------------------------------------------------
# Sets default retry and wait variables used for system actions.
# ------------------------------------------------------------------------------
default_vars() {
RETRY_NUM=10
RETRY_EVERY=3
i=$RETRY_NUM
#[[ "${VAR_OS:-}" == "unknown" ]]
}
# ------------------------------------------------------------------------------
# Sets default verbose mode for script and os execution.
# ------------------------------------------------------------------------------
set_std_mode() {
if [ "${VERBOSE:-no}" = "yes" ]; then
STD=""
else
STD="silent"
fi
}
# Silent execution function
silent() {
"$@" >/dev/null 2>&1
}
# Function to download & save header files
get_header() {
local app_name=$(echo "${APP,,}" | tr -d ' ')
local app_type=${APP_TYPE:-ct}
local header_url="https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/${app_type}/headers/${app_name}"
local local_header_path="/usr/local/community-scripts/headers/${app_type}/${app_name}"
mkdir -p "$(dirname "$local_header_path")"
if [ ! -s "$local_header_path" ]; then
if ! curl -fsSL "$header_url" -o "$local_header_path"; then
return 1
fi
fi
cat "$local_header_path" 2>/dev/null || true
}
header_info() {
local app_name=$(echo "${APP,,}" | tr -d ' ')
local header_content
header_content=$(get_header "$app_name") || header_content=""
clear
local term_width
term_width=$(tput cols 2>/dev/null || echo 120)
if [ -n "$header_content" ]; then
echo "$header_content"
fi
}
ensure_tput() {
if ! command -v tput >/dev/null 2>&1; then
if grep -qi 'alpine' /etc/os-release; then
apk add --no-cache ncurses >/dev/null 2>&1
elif command -v apt-get >/dev/null 2>&1; then
apt-get update -qq >/dev/null
apt-get install -y -qq ncurses-bin >/dev/null 2>&1
fi
fi
}
is_alpine() {
local os_id="${var_os:-${PCT_OSTYPE:-}}"
if [[ -z "$os_id" && -f /etc/os-release ]]; then
os_id="$(
. /etc/os-release 2>/dev/null
echo "${ID:-}"
)"
fi
[[ "$os_id" == "alpine" ]]
}
is_verbose_mode() {
local verbose="${VERBOSE:-${var_verbose:-no}}"
local tty_status
if [[ -t 2 ]]; then
tty_status="interactive"
else
tty_status="not-a-tty"
fi
[[ "$verbose" != "no" || ! -t 2 ]]
}
# ------------------------------------------------------------------------------
# Handles specific curl error codes and displays descriptive messages.
# ------------------------------------------------------------------------------
__curl_err_handler() {
local exit_code="$1"
local target="$2"
local curl_msg="$3"
case $exit_code in
1) msg_error "Unsupported protocol: $target" ;;
2) msg_error "Curl init failed: $target" ;;
3) msg_error "Malformed URL: $target" ;;
5) msg_error "Proxy resolution failed: $target" ;;
6) msg_error "Host resolution failed: $target" ;;
7) msg_error "Connection failed: $target" ;;
9) msg_error "Access denied: $target" ;;
18) msg_error "Partial file transfer: $target" ;;
22) msg_error "HTTP error (e.g. 400/404): $target" ;;
23) msg_error "Write error on local system: $target" ;;
26) msg_error "Read error from local file: $target" ;;
28) msg_error "Timeout: $target" ;;
35) msg_error "SSL connect error: $target" ;;
47) msg_error "Too many redirects: $target" ;;
51) msg_error "SSL cert verify failed: $target" ;;
52) msg_error "Empty server response: $target" ;;
55) msg_error "Send error: $target" ;;
56) msg_error "Receive error: $target" ;;
60) msg_error "SSL CA not trusted: $target" ;;
67) msg_error "Login denied by server: $target" ;;
78) msg_error "Remote file not found (404): $target" ;;
*) msg_error "Curl failed with code $exit_code: $target" ;;
esac
[[ -n "$curl_msg" ]] && printf "%s\n" "$curl_msg" >&2
exit 1
}
fatal() {
msg_error "$1"
kill -INT $$
}
spinner() {
local chars=(⠋ ⠙ ⠹ ⠸ ⠼ ⠴ ⠦ ⠧ ⠇ ⠏)
local i=0
while true; do
local index=$((i++ % ${#chars[@]}))
printf "\r\033[2K%s %b" "${CS_YWB}${chars[$index]}${CS_CL}" "${CS_YWB}${SPINNER_MSG:-}${CS_CL}"
sleep 0.1
done
}
clear_line() {
tput cr 2>/dev/null || echo -en "\r"
tput el 2>/dev/null || echo -en "\033[K"
}
stop_spinner() {
local pid="${SPINNER_PID:-}"
[[ -z "$pid" && -f /tmp/.spinner.pid ]] && pid=$(</tmp/.spinner.pid)
if [[ -n "$pid" && "$pid" =~ ^[0-9]+$ ]]; then
if kill "$pid" 2>/dev/null; then
sleep 0.05
kill -9 "$pid" 2>/dev/null || true
wait "$pid" 2>/dev/null || true
fi
rm -f /tmp/.spinner.pid
fi
unset SPINNER_PID SPINNER_MSG
stty sane 2>/dev/null || true
}
msg_info() {
local msg="$1"
[[ -z "$msg" ]] && return
if ! declare -p MSG_INFO_SHOWN &>/dev/null || ! declare -A MSG_INFO_SHOWN &>/dev/null; then
declare -gA MSG_INFO_SHOWN=()
fi
[[ -n "${MSG_INFO_SHOWN["$msg"]+x}" ]] && return
MSG_INFO_SHOWN["$msg"]=1
stop_spinner
SPINNER_MSG="$msg"
if is_verbose_mode || is_alpine; then
local HOURGLASS="${TAB}${TAB}"
printf "\r\e[2K%s %b" "$HOURGLASS" "${YW}${msg}${CL}" >&2
return
fi
color_spinner
spinner &
SPINNER_PID=$!
echo "$SPINNER_PID" >/tmp/.spinner.pid
disown "$SPINNER_PID" 2>/dev/null || true
}
msg_ok() {
local msg="$1"
[[ -z "$msg" ]] && return
stop_spinner
clear_line
printf "%s %b\n" "$CM" "${GN}${msg}${CL}" >&2
unset MSG_INFO_SHOWN["$msg"]
}
msg_error() {
stop_spinner
local msg="$1"
echo -e "${BFR:-} ${CROSS:-✖️} ${RD}${msg}${CL}"
}
msg_warn() {
stop_spinner
local msg="$1"
echo -e "${BFR:-} ${INFO:-} ${YWB}${msg}${CL}"
}
msg_custom() {
local symbol="${1:-"[*]"}"
local color="${2:-"\e[36m"}"
local msg="${3:-}"
[[ -z "$msg" ]] && return
stop_spinner
echo -e "${BFR:-} ${symbol} ${color}${msg}${CL:-\e[0m}"
}
run_container_safe() {
local ct="$1"
shift
local cmd="$*"
lxc-attach -n "$ct" -- bash -euo pipefail -c "
trap 'echo Aborted in container; exit 130' SIGINT SIGTERM
$cmd
" || __handle_general_error "lxc-attach to CT $ct"
}
cleanup_lxc() {
msg_info "Cleaning up"
if is_alpine; then
$STD apk cache clean || true
rm -rf /var/cache/apk/*
else
$STD apt -y autoremove || true
$STD apt -y autoclean || true
$STD apt -y clean || true
fi
# Clear temp artifacts (keep sockets/FIFOs; ignore errors)
find /tmp /var/tmp -type f -name 'tmp*' -delete 2>/dev/null || true
find /tmp /var/tmp -type f -name 'tempfile*' -delete 2>/dev/null || true
# Truncate writable log files silently (permission errors ignored)
if command -v truncate >/dev/null 2>&1; then
find /var/log -type f -writable -print0 2>/dev/null |
xargs -0 -n1 truncate -s 0 2>/dev/null || true
fi
# Node.js npm
if command -v npm &>/dev/null; then $STD npm cache clean --force || true; fi
# Node.js yarn
if command -v yarn &>/dev/null; then $STD yarn cache clean || true; fi
# Node.js pnpm
if command -v pnpm &>/dev/null; then $STD pnpm store prune || true; fi
# Go
if command -v go &>/dev/null; then $STD go clean -cache -modcache || true; fi
# Rust cargo
if command -v cargo &>/dev/null; then $STD cargo clean || true; fi
# Ruby gem
if command -v gem &>/dev/null; then $STD gem cleanup || true; fi
# Composer (PHP)
if command -v composer &>/dev/null; then $STD composer clear-cache || true; fi
if command -v journalctl &>/dev/null; then
$STD journalctl --vacuum-time=10m || true
fi
msg_ok "Cleaned"
}
check_or_create_swap() {
msg_info "Checking for active swap"
if swapon --noheadings --show | grep -q 'swap'; then
msg_ok "Swap is active"
return 0
fi
msg_error "No active swap detected"
read -p "Do you want to create a swap file? [y/N]: " create_swap
create_swap="${create_swap,,}" # to lowercase
if [[ "$create_swap" != "y" && "$create_swap" != "yes" ]]; then
msg_info "Skipping swap file creation"
return 1
fi
read -p "Enter swap size in MB (e.g., 2048 for 2GB): " swap_size_mb
if ! [[ "$swap_size_mb" =~ ^[0-9]+$ ]]; then
msg_error "Invalid size input. Aborting."
return 1
fi
local swap_file="/swapfile"
msg_info "Creating ${swap_size_mb}MB swap file at $swap_file"
if dd if=/dev/zero of="$swap_file" bs=1M count="$swap_size_mb" status=progress &&
chmod 600 "$swap_file" &&
mkswap "$swap_file" &&
swapon "$swap_file"; then
msg_ok "Swap file created and activated successfully"
else
msg_error "Failed to create or activate swap"
return 1
fi
}
trap 'stop_spinner' EXIT INT TERM

View File

@@ -1,385 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 tteck
# Author: tteck (tteckster)
# Co-Author: MickLesk
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# This sets verbose mode if the global variable is set to "yes"
# if [ "$VERBOSE" == "yes" ]; then set -x; fi
if command -v curl >/dev/null 2>&1; then
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/core.func)
load_functions
#echo "(create-lxc.sh) Loaded core.func via curl"
elif command -v wget >/dev/null 2>&1; then
source <(wget -qO- https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/core.func)
load_functions
#echo "(create-lxc.sh) Loaded core.func via wget"
fi
# This sets error handling options and defines the error_handler function to handle errors
set -Eeuo pipefail
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
trap on_exit EXIT
trap on_interrupt INT
trap on_terminate TERM
function on_exit() {
local exit_code="$?"
[[ -n "${lockfile:-}" && -e "$lockfile" ]] && rm -f "$lockfile"
exit "$exit_code"
}
function error_handler() {
local exit_code="$?"
local line_number="$1"
local command="$2"
printf "\e[?25h"
echo -e "\n${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}\n"
exit "$exit_code"
}
function on_interrupt() {
echo -e "\n${RD}Interrupted by user (SIGINT)${CL}"
exit 130
}
function on_terminate() {
echo -e "\n${RD}Terminated by signal (SIGTERM)${CL}"
exit 143
}
function exit_script() {
clear
printf "\e[?25h"
echo -e "\n${CROSS}${RD}User exited script${CL}\n"
kill 0
exit 1
}
function check_storage_support() {
local CONTENT="$1"
local -a VALID_STORAGES=()
while IFS= read -r line; do
local STORAGE_NAME
STORAGE_NAME=$(awk '{print $1}' <<<"$line")
[[ -z "$STORAGE_NAME" ]] && continue
VALID_STORAGES+=("$STORAGE_NAME")
done < <(pvesm status -content "$CONTENT" 2>/dev/null | awk 'NR>1')
[[ ${#VALID_STORAGES[@]} -gt 0 ]]
}
# This function selects a storage pool for a given content type (e.g., rootdir, vztmpl).
function select_storage() {
local CLASS=$1 CONTENT CONTENT_LABEL
case $CLASS in
container)
CONTENT='rootdir'
CONTENT_LABEL='Container'
;;
template)
CONTENT='vztmpl'
CONTENT_LABEL='Container template'
;;
iso)
CONTENT='iso'
CONTENT_LABEL='ISO image'
;;
images)
CONTENT='images'
CONTENT_LABEL='VM Disk image'
;;
backup)
CONTENT='backup'
CONTENT_LABEL='Backup'
;;
snippets)
CONTENT='snippets'
CONTENT_LABEL='Snippets'
;;
*)
msg_error "Invalid storage class '$CLASS'"
return 1
;;
esac
# Check for preset STORAGE variable
if [ "$CONTENT" = "rootdir" ] && [ -n "${STORAGE:-}" ]; then
if pvesm status -content "$CONTENT" | awk 'NR>1 {print $1}' | grep -qx "$STORAGE"; then
STORAGE_RESULT="$STORAGE"
msg_info "Using preset storage: $STORAGE_RESULT for $CONTENT_LABEL"
return 0
else
msg_error "Preset storage '$STORAGE' is not valid for content type '$CONTENT'."
return 2
fi
fi
local -A STORAGE_MAP
local -a MENU
local COL_WIDTH=0
while read -r TAG TYPE _ TOTAL USED FREE _; do
[[ -n "$TAG" && -n "$TYPE" ]] || continue
local STORAGE_NAME="$TAG"
local DISPLAY="${STORAGE_NAME} (${TYPE})"
local USED_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$USED")
local FREE_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$FREE")
local INFO="Free: ${FREE_FMT}B Used: ${USED_FMT}B"
STORAGE_MAP["$DISPLAY"]="$STORAGE_NAME"
MENU+=("$DISPLAY" "$INFO" "OFF")
((${#DISPLAY} > COL_WIDTH)) && COL_WIDTH=${#DISPLAY}
done < <(pvesm status -content "$CONTENT" | awk 'NR>1')
if [ ${#MENU[@]} -eq 0 ]; then
msg_error "No storage found for content type '$CONTENT'."
return 2
fi
if [ $((${#MENU[@]} / 3)) -eq 1 ]; then
STORAGE_RESULT="${STORAGE_MAP[${MENU[0]}]}"
STORAGE_INFO="${MENU[1]}"
return 0
fi
local WIDTH=$((COL_WIDTH + 42))
while true; do
local DISPLAY_SELECTED
DISPLAY_SELECTED=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
--title "Storage Pools" \
--radiolist "Which storage pool for ${CONTENT_LABEL,,}?\n(Spacebar to select)" \
16 "$WIDTH" 6 "${MENU[@]}" 3>&1 1>&2 2>&3)
# Cancel or ESC
[[ $? -ne 0 ]] && exit_script
# Strip trailing whitespace or newline (important for storages like "storage (dir)")
DISPLAY_SELECTED=$(sed 's/[[:space:]]*$//' <<<"$DISPLAY_SELECTED")
if [[ -z "$DISPLAY_SELECTED" || -z "${STORAGE_MAP[$DISPLAY_SELECTED]+_}" ]]; then
whiptail --msgbox "No valid storage selected. Please try again." 8 58
continue
fi
STORAGE_RESULT="${STORAGE_MAP[$DISPLAY_SELECTED]}"
for ((i = 0; i < ${#MENU[@]}; i += 3)); do
if [[ "${MENU[$i]}" == "$DISPLAY_SELECTED" ]]; then
STORAGE_INFO="${MENU[$i + 1]}"
break
fi
done
return 0
done
}
# Test if required variables are set
[[ "${CTID:-}" ]] || {
msg_error "You need to set 'CTID' variable."
exit 203
}
[[ "${PCT_OSTYPE:-}" ]] || {
msg_error "You need to set 'PCT_OSTYPE' variable."
exit 204
}
# Test if ID is valid
[ "$CTID" -ge "100" ] || {
msg_error "ID cannot be less than 100."
exit 205
}
# Test if ID is in use
if qm status "$CTID" &>/dev/null || pct status "$CTID" &>/dev/null; then
echo -e "ID '$CTID' is already in use."
unset CTID
msg_error "Cannot use ID that is already in use."
exit 206
fi
# This checks for the presence of valid Container Storage and Template Storage locations
if ! check_storage_support "rootdir"; then
msg_error "No valid storage found for 'rootdir' [Container]"
exit 1
fi
if ! check_storage_support "vztmpl"; then
msg_error "No valid storage found for 'vztmpl' [Template]"
exit 1
fi
while true; do
if select_storage template; then
TEMPLATE_STORAGE="$STORAGE_RESULT"
TEMPLATE_STORAGE_INFO="$STORAGE_INFO"
msg_ok "Storage ${BL}$TEMPLATE_STORAGE${CL} ($TEMPLATE_STORAGE_INFO) [Template]"
break
fi
done
while true; do
if select_storage container; then
CONTAINER_STORAGE="$STORAGE_RESULT"
CONTAINER_STORAGE_INFO="$STORAGE_INFO"
msg_ok "Storage ${BL}$CONTAINER_STORAGE${CL} ($CONTAINER_STORAGE_INFO) [Container]"
break
fi
done
# Check free space on selected container storage
STORAGE_FREE=$(pvesm status | awk -v s="$CONTAINER_STORAGE" '$1 == s { print $6 }')
REQUIRED_KB=$((${PCT_DISK_SIZE:-8} * 1024 * 1024))
if [ "$STORAGE_FREE" -lt "$REQUIRED_KB" ]; then
msg_error "Not enough space on '$CONTAINER_STORAGE'. Needed: ${PCT_DISK_SIZE:-8}G."
exit 214
fi
# Check Cluster Quorum if in Cluster
if [ -f /etc/pve/corosync.conf ]; then
msg_info "Checking cluster quorum"
if ! pvecm status | awk -F':' '/^Quorate/ { exit ($2 ~ /Yes/) ? 0 : 1 }'; then
msg_error "Cluster is not quorate. Start all nodes or configure quorum device (QDevice)."
exit 210
fi
msg_ok "Cluster is quorate"
fi
# Update LXC template list
TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}"
case "$PCT_OSTYPE" in
debian | ubuntu)
TEMPLATE_PATTERN="-standard_"
;;
alpine | fedora | rocky | centos)
TEMPLATE_PATTERN="-default_"
;;
*)
TEMPLATE_PATTERN=""
;;
esac
# 1. Check local templates first
msg_info "Searching for template '$TEMPLATE_SEARCH'"
mapfile -t TEMPLATES < <(
pveam list "$TEMPLATE_STORAGE" |
awk -v s="$TEMPLATE_SEARCH" -v p="$TEMPLATE_PATTERN" '$1 ~ s && $1 ~ p {print $1}' |
sed 's/.*\///' | sort -t - -k 2 -V
)
if [ ${#TEMPLATES[@]} -gt 0 ]; then
TEMPLATE_SOURCE="local"
else
msg_info "No local template found, checking online repository"
pveam update >/dev/null 2>&1
mapfile -t TEMPLATES < <(
pveam update >/dev/null 2>&1 &&
pveam available -section system |
sed -n "s/.*\($TEMPLATE_SEARCH.*$TEMPLATE_PATTERN.*\)/\1/p" |
sort -t - -k 2 -V
)
TEMPLATE_SOURCE="online"
fi
TEMPLATE="${TEMPLATES[-1]}"
TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null ||
echo "/var/lib/vz/template/cache/$TEMPLATE")"
msg_ok "Template ${BL}$TEMPLATE${CL} [$TEMPLATE_SOURCE]"
# 4. Validate template (exists & not corrupted)
TEMPLATE_VALID=1
if [ ! -s "$TEMPLATE_PATH" ]; then
TEMPLATE_VALID=0
elif ! tar --use-compress-program=zstdcat -tf "$TEMPLATE_PATH" >/dev/null 2>&1; then
TEMPLATE_VALID=0
fi
if [ "$TEMPLATE_VALID" -eq 0 ]; then
msg_warn "Template $TEMPLATE is missing or corrupted. Re-downloading."
[[ -f "$TEMPLATE_PATH" ]] && rm -f "$TEMPLATE_PATH"
for attempt in {1..3}; do
msg_info "Attempt $attempt: Downloading LXC template..."
if pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null 2>&1; then
msg_ok "Template download successful."
break
fi
if [ $attempt -eq 3 ]; then
msg_error "Failed after 3 attempts. Please check network access or manually run:\n pveam download $TEMPLATE_STORAGE $TEMPLATE"
exit 208
fi
sleep $((attempt * 5))
done
fi
msg_info "Creating LXC Container"
# Check and fix subuid/subgid
grep -q "root:100000:65536" /etc/subuid || echo "root:100000:65536" >>/etc/subuid
grep -q "root:100000:65536" /etc/subgid || echo "root:100000:65536" >>/etc/subgid
# Combine all options
PCT_OPTIONS=(${PCT_OPTIONS[@]:-${DEFAULT_PCT_OPTIONS[@]}})
[[ " ${PCT_OPTIONS[@]} " =~ " -rootfs " ]] || PCT_OPTIONS+=(-rootfs "$CONTAINER_STORAGE:${PCT_DISK_SIZE:-8}")
# Secure creation of the LXC container with lock and template check
lockfile="/tmp/template.${TEMPLATE}.lock"
exec 9>"$lockfile" || {
msg_error "Failed to create lock file '$lockfile'."
exit 200
}
flock -w 60 9 || {
msg_error "Timeout while waiting for template lock"
exit 211
}
if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" &>/dev/null; then
msg_error "Container creation failed. Checking if template is corrupted or incomplete."
if [[ ! -s "$TEMPLATE_PATH" || "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then
msg_error "Template file too small or missing re-downloading."
rm -f "$TEMPLATE_PATH"
elif ! zstdcat "$TEMPLATE_PATH" | tar -tf - &>/dev/null; then
msg_error "Template appears to be corrupted re-downloading."
rm -f "$TEMPLATE_PATH"
else
msg_error "Template is valid, but container creation failed. Update your whole Proxmox System (pve-container) first or check https://github.com/community-scripts/ProxmoxVE/discussions/8126"
exit 209
fi
# Retry download
for attempt in {1..3}; do
msg_info "Attempt $attempt: Re-downloading template..."
if timeout 120 pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null; then
msg_ok "Template re-download successful."
break
fi
if [ "$attempt" -eq 3 ]; then
msg_error "Three failed attempts. Aborting."
exit 208
fi
sleep $((attempt * 5))
done
sleep 1 # I/O-Sync-Delay
msg_ok "Re-downloaded LXC Template"
fi
if ! pct list | awk '{print $1}' | grep -qx "$CTID"; then
msg_error "Container ID $CTID not listed in 'pct list' unexpected failure."
exit 215
fi
if ! grep -q '^rootfs:' "/etc/pve/lxc/$CTID.conf"; then
msg_error "RootFS entry missing in container config storage not correctly assigned."
exit 216
fi
if grep -q '^hostname:' "/etc/pve/lxc/$CTID.conf"; then
CT_HOSTNAME=$(grep '^hostname:' "/etc/pve/lxc/$CTID.conf" | awk '{print $2}')
if [[ ! "$CT_HOSTNAME" =~ ^[a-z0-9-]+$ ]]; then
msg_warn "Hostname '$CT_HOSTNAME' contains invalid characters may cause issues with networking or DNS."
fi
fi
msg_ok "LXC Container ${BL}$CTID${CL} ${GN}was successfully created."

View File

@@ -1,217 +0,0 @@
# Copyright (c) 2021-2026 tteck
# Author: tteck (tteckster)
# Co-Author: MickLesk
# License: MIT
# https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
if ! command -v curl >/dev/null 2>&1; then
printf "\r\e[2K%b" '\033[93m Setup Source \033[m' >&2
apt-get update >/dev/null 2>&1
apt-get install -y curl >/dev/null 2>&1
fi
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/core.func)
load_functions
# This function enables IPv6 if it's not disabled and sets verbose mode
verb_ip6() {
set_std_mode # Set STD mode based on VERBOSE
if [ "$IPV6_METHOD" == "disable" ]; then
msg_info "Disabling IPv6 (this may affect some services)"
mkdir -p /etc/sysctl.d
$STD tee /etc/sysctl.d/99-disable-ipv6.conf >/dev/null <<EOF
# Disable IPv6 (set by community-scripts)
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
EOF
$STD sysctl -p /etc/sysctl.d/99-disable-ipv6.conf
msg_ok "Disabled IPv6"
fi
}
# This function sets error handling options and defines the error_handler function to handle errors
catch_errors() {
set -Eeuo pipefail
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
}
# This function handles errors
error_handler() {
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func)
printf "\e[?25h"
local exit_code="$?"
local line_number="$1"
local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
echo -e "\n$error_message"
if [[ "$line_number" -eq 51 ]]; then
echo -e "The silent function has suppressed the error, run the script with verbose mode enabled, which will provide more detailed output.\n"
post_update_to_api "failed" "No error message, script ran in silent mode"
else
post_update_to_api "failed" "${command}"
fi
}
# This function sets up the Container OS by generating the locale, setting the timezone, and checking the network connection
setting_up_container() {
msg_info "Setting up Container OS"
for ((i = RETRY_NUM; i > 0; i--)); do
if [ "$(hostname -I)" != "" ]; then
break
fi
echo 1>&2 -en "${CROSS}${RD} No Network! "
sleep $RETRY_EVERY
done
if [ "$(hostname -I)" = "" ]; then
echo 1>&2 -e "\n${CROSS}${RD} No Network After $RETRY_NUM Tries${CL}"
echo -e "${NETWORK}Check Network Settings"
exit 1
fi
rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED
systemctl disable -q --now systemd-networkd-wait-online.service
msg_ok "Set up Container OS"
#msg_custom "${CM}" "${GN}" "Network Connected: ${BL}$(hostname -I)"
msg_ok "Network Connected: ${BL}$(hostname -I)"
}
# This function checks the network connection by pinging a known IP address and prompts the user to continue if the internet is not connected
# This function checks the network connection by pinging a known IP address and prompts the user to continue if the internet is not connected
network_check() {
set +e
trap - ERR
ipv4_connected=false
ipv6_connected=false
sleep 1
# Check IPv4 connectivity to Google, Cloudflare & Quad9 DNS servers.
if ping -c 1 -W 1 1.1.1.1 &>/dev/null || ping -c 1 -W 1 8.8.8.8 &>/dev/null || ping -c 1 -W 1 9.9.9.9 &>/dev/null; then
msg_ok "IPv4 Internet Connected"
ipv4_connected=true
else
msg_error "IPv4 Internet Not Connected"
fi
# Check IPv6 connectivity to Google, Cloudflare & Quad9 DNS servers.
if ping6 -c 1 -W 1 2606:4700:4700::1111 &>/dev/null || ping6 -c 1 -W 1 2001:4860:4860::8888 &>/dev/null || ping6 -c 1 -W 1 2620:fe::fe &>/dev/null; then
msg_ok "IPv6 Internet Connected"
ipv6_connected=true
else
msg_error "IPv6 Internet Not Connected"
fi
# If both IPv4 and IPv6 checks fail, prompt the user
if [[ $ipv4_connected == false && $ipv6_connected == false ]]; then
read -r -p "No Internet detected, would you like to continue anyway? <y/N> " prompt
if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then
echo -e "${INFO}${RD}Expect Issues Without Internet${CL}"
else
echo -e "${NETWORK}Check Network Settings"
exit 1
fi
fi
# DNS resolution checks for GitHub-related domains (IPv4 and/or IPv6)
GIT_HOSTS=("github.com" "raw.githubusercontent.com" "api.github.com" "git.community-scripts.org")
GIT_STATUS="Git DNS:"
DNS_FAILED=false
for HOST in "${GIT_HOSTS[@]}"; do
RESOLVEDIP=$(getent hosts "$HOST" | awk '{ print $1 }' | grep -E '(^([0-9]{1,3}\.){3}[0-9]{1,3}$)|(^[a-fA-F0-9:]+$)' | head -n1)
if [[ -z "$RESOLVEDIP" ]]; then
GIT_STATUS+="$HOST:($DNSFAIL)"
DNS_FAILED=true
else
GIT_STATUS+=" $HOST:($DNSOK)"
fi
done
if [[ "$DNS_FAILED" == true ]]; then
fatal "$GIT_STATUS"
else
msg_ok "$GIT_STATUS"
fi
set -e
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
}
# This function updates the Container OS by running apt-get update and upgrade
update_os() {
msg_info "Updating Container OS"
if [[ "$CACHER" == "yes" ]]; then
echo 'Acquire::http::Proxy-Auto-Detect "/usr/local/bin/apt-proxy-detect.sh";' >/etc/apt/apt.conf.d/00aptproxy
cat <<EOF >/usr/local/bin/apt-proxy-detect.sh
#!/bin/bash
if nc -w1 -z "${CACHER_IP}" 3142; then
echo -n "http://${CACHER_IP}:3142"
else
echo -n "DIRECT"
fi
EOF
chmod +x /usr/local/bin/apt-proxy-detect.sh
fi
$STD apt-get update
$STD apt-get -o Dpkg::Options::="--force-confold" -y dist-upgrade
rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED
msg_ok "Updated Container OS"
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func)
}
# This function modifies the message of the day (motd) and SSH settings
motd_ssh() {
# Set terminal to 256-color mode
grep -qxF "export TERM='xterm-256color'" /root/.bashrc || echo "export TERM='xterm-256color'" >>/root/.bashrc
# Get OS information (Debian / Ubuntu)
if [ -f "/etc/os-release" ]; then
OS_NAME=$(grep ^NAME /etc/os-release | cut -d= -f2 | tr -d '"')
OS_VERSION=$(grep ^VERSION_ID /etc/os-release | cut -d= -f2 | tr -d '"')
elif [ -f "/etc/debian_version" ]; then
OS_NAME="Debian"
OS_VERSION=$(cat /etc/debian_version)
fi
PROFILE_FILE="/etc/profile.d/00_lxc-details.sh"
echo "echo -e \"\"" >"$PROFILE_FILE"
echo -e "echo -e \"${BOLD}${APPLICATION} LXC Container${CL}"\" >>"$PROFILE_FILE"
echo -e "echo -e \"${TAB}${GATEWAY}${YW} Provided by: ${GN}community-scripts ORG ${YW}| GitHub: ${GN}https://github.com/community-scripts/ProxmoxVE${CL}\"" >>"$PROFILE_FILE"
echo "echo \"\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${TAB}${OS}${YW} OS: ${GN}${OS_NAME} - Version: ${OS_VERSION}${CL}\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${TAB}${HOSTNAME}${YW} Hostname: ${GN}\$(hostname)${CL}\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${TAB}${INFO}${YW} IP Address: ${GN}\$(hostname -I | awk '{print \$1}')${CL}\"" >>"$PROFILE_FILE"
# Disable default MOTD scripts
chmod -x /etc/update-motd.d/*
if [[ "${SSH_ROOT}" == "yes" ]]; then
sed -i "s/#PermitRootLogin prohibit-password/PermitRootLogin yes/g" /etc/ssh/sshd_config
systemctl restart sshd
fi
}
# This function customizes the container by modifying the getty service and enabling auto-login for the root user
customize() {
if [[ "$PASSWORD" == "" ]]; then
msg_info "Customizing Container"
GETTY_OVERRIDE="/etc/systemd/system/container-getty@1.service.d/override.conf"
mkdir -p $(dirname $GETTY_OVERRIDE)
cat <<EOF >$GETTY_OVERRIDE
[Service]
ExecStart=
ExecStart=-/sbin/agetty --autologin root --noclear --keep-baud tty%I 115200,38400,9600 \$TERM
EOF
systemctl daemon-reload
systemctl restart $(basename $(dirname $GETTY_OVERRIDE) | sed 's/\.d//')
msg_ok "Customized Container"
fi
echo "bash -c \"\$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/${app}.sh)\"" >/usr/bin/update
chmod +x /usr/bin/update
if [[ -n "${SSH_AUTHORIZED_KEY}" ]]; then
mkdir -p /root/.ssh
echo "${SSH_AUTHORIZED_KEY}" >/root/.ssh/authorized_keys
chmod 700 /root/.ssh
chmod 600 /root/.ssh/authorized_keys
fi
}

File diff suppressed because it is too large Load Diff