From 43dd7aacc85c8823affa1fd2edf0cad475e4ae5c Mon Sep 17 00:00:00 2001 From: CanbiZ <47820557+MickLesk@users.noreply.github.com> Date: Mon, 1 Dec 2025 10:19:04 +0100 Subject: [PATCH] Cleanup --- {misc => bak}/core_bak/build copy 2.func | 0 .../core_bak/build refactor_menu.func | 0 ...unc - advanced-backup-20251127-154005.func | 0 {misc => bak}/core_bak/test-tools-func.sh | 0 {misc => bak}/core_bak/tools.func.bak | 0 .../core_ref/newtemplateavailable.png | Bin .../core_ref/pve-container-upgrader.png | Bin .../build.func.backup-20251029-123804 | 0 .../build.func.backup-20251029-124205 | 0 .../build.func.backup-20251029-124307 | 0 .../build.func.backup-20251029-124334 | 0 ...ld.func.backup-refactoring-20251029-125644 | 0 {misc => bak}/deferred/config-file.func | 0 {misc => bak}/deferred/create_lxc.sh | 0 {misc => bak}/deferred/github.func | 0 docs/CHANGELOG_MISC.md | 504 ++ docs/DEFAULTS_SYSTEM_GUIDE.md | 748 +++ docs/TECHNICAL_REFERENCE.md | 881 +++ misc/{cloud-init.sh => cloud-init.func} | 0 misc/old_misc/alpine-install.func | 172 + misc/old_misc/api.func | 130 + misc/old_misc/build.func | 1439 +++++ misc/old_misc/config-file.func | 699 +++ misc/old_misc/core.func | 452 ++ misc/old_misc/create_lxc.sh | 385 ++ misc/old_misc/install.func | 217 + misc/old_misc/tools.func | 4818 +++++++++++++++++ misc/tools.func.md | 1283 +++++ 28 files changed, 11728 insertions(+) rename {misc => bak}/core_bak/build copy 2.func (100%) rename {misc => bak}/core_bak/build refactor_menu.func (100%) rename {misc => bak}/core_bak/build.func - advanced-backup-20251127-154005.func (100%) rename {misc => bak}/core_bak/test-tools-func.sh (100%) rename {misc => bak}/core_bak/tools.func.bak (100%) rename {misc => bak}/core_ref/newtemplateavailable.png (100%) rename {misc => bak}/core_ref/pve-container-upgrader.png (100%) rename {misc => bak}/deferred/build.func.backup-20251029-123804 (100%) rename {misc => bak}/deferred/build.func.backup-20251029-124205 (100%) rename {misc => bak}/deferred/build.func.backup-20251029-124307 (100%) rename {misc => bak}/deferred/build.func.backup-20251029-124334 (100%) rename {misc => bak}/deferred/build.func.backup-refactoring-20251029-125644 (100%) rename {misc => bak}/deferred/config-file.func (100%) rename {misc => bak}/deferred/create_lxc.sh (100%) rename {misc => bak}/deferred/github.func (100%) create mode 100644 docs/CHANGELOG_MISC.md create mode 100644 docs/DEFAULTS_SYSTEM_GUIDE.md create mode 100644 docs/TECHNICAL_REFERENCE.md rename misc/{cloud-init.sh => cloud-init.func} (100%) create mode 100644 misc/old_misc/alpine-install.func create mode 100644 misc/old_misc/api.func create mode 100644 misc/old_misc/build.func create mode 100644 misc/old_misc/config-file.func create mode 100644 misc/old_misc/core.func create mode 100644 misc/old_misc/create_lxc.sh create mode 100644 misc/old_misc/install.func create mode 100644 misc/old_misc/tools.func create mode 100644 misc/tools.func.md diff --git a/misc/core_bak/build copy 2.func b/bak/core_bak/build copy 2.func similarity index 100% rename from misc/core_bak/build copy 2.func rename to bak/core_bak/build copy 2.func diff --git a/misc/core_bak/build refactor_menu.func b/bak/core_bak/build refactor_menu.func similarity index 100% rename from misc/core_bak/build refactor_menu.func rename to bak/core_bak/build refactor_menu.func diff --git a/misc/core_bak/build.func - advanced-backup-20251127-154005.func b/bak/core_bak/build.func - advanced-backup-20251127-154005.func similarity index 100% rename from misc/core_bak/build.func - advanced-backup-20251127-154005.func rename to bak/core_bak/build.func - advanced-backup-20251127-154005.func diff --git a/misc/core_bak/test-tools-func.sh b/bak/core_bak/test-tools-func.sh similarity index 100% rename from misc/core_bak/test-tools-func.sh rename to bak/core_bak/test-tools-func.sh diff --git a/misc/core_bak/tools.func.bak b/bak/core_bak/tools.func.bak similarity index 100% rename from misc/core_bak/tools.func.bak rename to bak/core_bak/tools.func.bak diff --git a/misc/core_ref/newtemplateavailable.png b/bak/core_ref/newtemplateavailable.png similarity index 100% rename from misc/core_ref/newtemplateavailable.png rename to bak/core_ref/newtemplateavailable.png diff --git a/misc/core_ref/pve-container-upgrader.png b/bak/core_ref/pve-container-upgrader.png similarity index 100% rename from misc/core_ref/pve-container-upgrader.png rename to bak/core_ref/pve-container-upgrader.png diff --git a/misc/deferred/build.func.backup-20251029-123804 b/bak/deferred/build.func.backup-20251029-123804 similarity index 100% rename from misc/deferred/build.func.backup-20251029-123804 rename to bak/deferred/build.func.backup-20251029-123804 diff --git a/misc/deferred/build.func.backup-20251029-124205 b/bak/deferred/build.func.backup-20251029-124205 similarity index 100% rename from misc/deferred/build.func.backup-20251029-124205 rename to bak/deferred/build.func.backup-20251029-124205 diff --git a/misc/deferred/build.func.backup-20251029-124307 b/bak/deferred/build.func.backup-20251029-124307 similarity index 100% rename from misc/deferred/build.func.backup-20251029-124307 rename to bak/deferred/build.func.backup-20251029-124307 diff --git a/misc/deferred/build.func.backup-20251029-124334 b/bak/deferred/build.func.backup-20251029-124334 similarity index 100% rename from misc/deferred/build.func.backup-20251029-124334 rename to bak/deferred/build.func.backup-20251029-124334 diff --git a/misc/deferred/build.func.backup-refactoring-20251029-125644 b/bak/deferred/build.func.backup-refactoring-20251029-125644 similarity index 100% rename from misc/deferred/build.func.backup-refactoring-20251029-125644 rename to bak/deferred/build.func.backup-refactoring-20251029-125644 diff --git a/misc/deferred/config-file.func b/bak/deferred/config-file.func similarity index 100% rename from misc/deferred/config-file.func rename to bak/deferred/config-file.func diff --git a/misc/deferred/create_lxc.sh b/bak/deferred/create_lxc.sh similarity index 100% rename from misc/deferred/create_lxc.sh rename to bak/deferred/create_lxc.sh diff --git a/misc/deferred/github.func b/bak/deferred/github.func similarity index 100% rename from misc/deferred/github.func rename to bak/deferred/github.func diff --git a/docs/CHANGELOG_MISC.md b/docs/CHANGELOG_MISC.md new file mode 100644 index 000000000..c2598b723 --- /dev/null +++ b/docs/CHANGELOG_MISC.md @@ -0,0 +1,504 @@ +# Changelog: /misc Directory Refactoring + +> **Last Updated**: November 28, 2025 +> **Status**: Major Refactoring Complete + +## Overview + +The `/misc` directory has undergone significant refactoring to improve maintainability, security, and functionality. This document tracks all changes, removed files, and new patterns. + +--- + +## File Status Summary + +| File | Status | Notes | +|------|--------|-------| +| `api.func` | ✅ Active | API integration & reporting | +| `build.func` | ✅ Refactored | Core build orchestration (Major changes) | +| `cloud-init.sh` | ✅ Active | Cloud-Init VM configuration | +| `core.func` | ✅ Active | Core utilities & functions | +| `error_handler.func` | ✅ Active | Centralized error handling | +| `install.func` | ✅ Active | Container installation orchestration | +| `passthrough.func` | ✅ Active | Hardware passthrough utilities | +| `tools.func` | ✅ Active | Utility functions & repository setup | +| `vm-core.func` | ✅ Active | VM-specific core functions | +| `config-file.func` | ❌ **REMOVED** | Replaced by defaults system | +| `create_lxc.sh` | ❌ **REMOVED** | Replaced by install.func workflow | + +--- + +## Major Changes in build.func + +### 1. **Configuration System Overhaul** + +#### ❌ Removed +- **`config-file.func` dependency**: Old configuration file format no longer used +- **Static configuration approach**: Replaced with dynamic variable-based system + +#### ✅ New System: Three-Tier Defaults Architecture + +``` +Priority Hierarchy (Highest to Lowest): +1. Environment Variables (var_*) ← Highest Priority +2. App-Specific Defaults (.vars files) +3. User Defaults (default.vars) +4. Built-in Defaults ← Fallback +``` + +### 2. **Variable Whitelisting System** + +A new security layer has been introduced to control which variables can be persisted: + +```bash +# Allowed configurable variables +VAR_WHITELIST=( + var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_disk var_fuse + var_gateway var_hostname var_ipv6_method var_mac var_mknod var_mount_fs var_mtu + var_net var_nesting var_ns var_protection var_pw var_ram var_tags var_timezone + var_tun var_unprivileged var_verbose var_vlan var_ssh var_ssh_authorized_key + var_container_storage var_template_storage +) +``` + +**Changes from Previous**: +- ❌ Removed: `var_ctid` (unique per container, cannot be shared) +- ❌ Removed: `var_ipv6_static` (static IPs are container-specific) + +### 3. **Default Settings Management Functions** + +#### `default_var_settings()` +- Creates/updates global user defaults at `/usr/local/community-scripts/default.vars` +- Loads existing defaults and merges with current settings +- Respects environment variable precedence +- Sanitizes values to prevent injection attacks + +#### `get_app_defaults_path()` +- Returns app-specific defaults path: `/usr/local/community-scripts/defaults/.vars` +- Example: `/usr/local/community-scripts/defaults/pihole.vars` + +#### `maybe_offer_save_app_defaults()` +- Called after advanced installation +- Offers to save current settings as app-specific defaults +- Provides diff view when updating existing defaults +- Validates against whitelist before saving + +### 4. **Load Variables File Function** + +#### `load_vars_file()` +- Safely loads variables from `.vars` files +- **Key Security Feature**: Does NOT use `source` or `eval` +- Manual parsing with whitelist validation +- Handles escaping and special characters +- Returns 0 on success, 1 on failure + +**Example Usage**: +```bash +load_vars_file "/usr/local/community-scripts/defaults/pihole.vars" +``` + +### 5. **Removed Functions** + +- ❌ `create_lxc()` - Replaced by install.func workflow +- ❌ `read_config()` - Replaced by load_vars_file() +- ❌ `write_config()` - Replaced by direct file generation with sanitization + +--- + +## Installation Modes & Workflows + +### Mode 1: **Default Settings** +``` +Quick installation with pre-defined values +├── User selects OS/Version +├── Uses built-in defaults +└── Creates container immediately +``` + +**Use Case**: First-time users, basic deployments + +### Mode 2: **Advanced Settings** +``` +Full control over all parameters +├── User prompted for each setting +├── 19-step configuration wizard +├── Shows summary before confirmation +└── Offers to save as app defaults +``` + +**Use Case**: Custom configurations, experienced users + +### Mode 3: **User Defaults** (formerly "My Defaults") +``` +Installation using saved user defaults +├── Loads: /usr/local/community-scripts/default.vars +├── Shows loaded settings summary +└── Creates container +``` + +**Use Case**: Consistent deployments across multiple containers + +### Mode 4: **App Defaults** +``` +Installation using app-specific defaults (if available) +├── Loads: /usr/local/community-scripts/defaults/.vars +├── Shows loaded settings summary +└── Creates container +``` + +**Use Case**: Repeat installations with saved configurations + +### Mode 5: **Settings Menu** +``` +Manage configuration files +├── View current settings +├── Edit storage selections +├── Manage defaults location +└── Reset to built-ins +``` + +**Use Case**: Configuration management + +--- + +## Configurable Variables Reference + +### Resource Allocation + +| Variable | Type | Default | Example | +|----------|------|---------|---------| +| `var_cpu` | Integer | App-dependent | `4` | +| `var_ram` | Integer (MB) | App-dependent | `2048` | +| `var_disk` | Integer (GB) | App-dependent | `20` | +| `var_unprivileged` | Boolean (0/1) | `1` | `1` | + +### Network Configuration + +| Variable | Type | Default | Example | +|----------|------|---------|---------| +| `var_net` | String | Auto | `veth` | +| `var_brg` | String | `vmbr0` | `vmbr100` | +| `var_gateway` | IP Address | Auto-detected | `192.168.1.1` | +| `var_mtu` | Integer | `1500` | `9000` | +| `var_vlan` | Integer | None | `100` | + +### Identity & Access + +| Variable | Type | Default | Example | +|----------|------|---------|---------| +| `var_hostname` | String | App name | `mypihole` | +| `var_pw` | String | Random | `MySecurePass123!` | +| `var_ssh` | Boolean (yes/no) | `no` | `yes` | +| `var_ssh_authorized_key` | String | None | `ssh-rsa AAAA...` | + +### Container Features + +| Variable | Type | Default | Example | +|----------|------|---------|---------| +| `var_fuse` | Boolean (0/1) | `0` | `1` | +| `var_tun` | Boolean (0/1) | `0` | `1` | +| `var_nesting` | Boolean (0/1) | `0` | `1` | +| `var_keyctl` | Boolean (0/1) | `0` | `1` | +| `var_mknod` | Boolean (0/1) | `0` | `1` | +| `var_mount_fs` | String | None | `ext4` | +| `var_protection` | Boolean (0/1) | `0` | `1` | + +### System Configuration + +| Variable | Type | Default | Example | +|----------|------|---------|---------| +| `var_timezone` | String | System | `Europe/Berlin` | +| `var_searchdomain` | String | None | `example.com` | +| `var_apt_cacher` | String | None | `apt-cacher-ng` | +| `var_apt_cacher_ip` | IP Address | None | `192.168.1.100` | +| `var_tags` | String | App name | `docker,production` | +| `var_verbose` | Boolean (yes/no) | `no` | `yes` | + +### Storage Configuration + +| Variable | Type | Default | Example | +|----------|------|---------|---------| +| `var_container_storage` | String | Auto-detected | `local` | +| `var_template_storage` | String | Auto-detected | `local` | + +--- + +## File Formats + +### User Defaults: `/usr/local/community-scripts/default.vars` + +```bash +# User Global Defaults +# Generated by ProxmoxVED Scripts +# Date: 2024-11-28 + +var_cpu=4 +var_ram=2048 +var_disk=20 +var_unprivileged=1 +var_brg=vmbr0 +var_gateway=192.168.1.1 +var_vlan=100 +var_mtu=1500 +var_hostname=mydefaults +var_timezone=Europe/Berlin +var_ssh=yes +var_ssh_authorized_key=ssh-rsa AAAAB3NzaC1... +var_container_storage=local +var_template_storage=local +``` + +### App Defaults: `/usr/local/community-scripts/defaults/.vars` + +```bash +# App-specific defaults for PiHole (pihole) +# Generated on 2024-11-28T15:32:00Z + +var_unprivileged=1 +var_cpu=2 +var_ram=1024 +var_disk=10 +var_brg=vmbr0 +var_net=veth +var_gateway=192.168.1.1 +var_mtu=1500 +var_vlan=100 +var_hostname=pihole +var_timezone=Europe/Berlin +var_container_storage=local +var_template_storage=local +var_tags=dns,pihole +var_verbose=no +``` + +--- + +## Usage Examples + +### Example 1: Set Global User Defaults + +1. Run any app installation script +2. Select **Advanced Settings** +3. Configure all parameters +4. When prompted: **"Save as User Defaults?"** → Select **Yes** +5. File saved to: `/usr/local/community-scripts/default.vars` + +**Future Installations**: Select **User Defaults** mode to reuse settings + +### Example 2: Create & Use App Defaults + +1. Run app installation (e.g., `pihole-install.sh`) +2. Select **Advanced Settings** +3. Fine-tune all parameters for PiHole +4. When prompted: **"Save as App Defaults for PiHole?"** → Select **Yes** +5. File saved to: `/usr/local/community-scripts/defaults/pihole.vars` + +**Next Time**: +- Run `pihole-install.sh` again +- Select **App Defaults** +- Same settings automatically applied + +### Example 3: Override via Environment Variables + +```bash +# Set custom values before running script +export var_cpu=8 +export var_ram=4096 +export var_hostname=custom-pihole + +bash pihole-install.sh +``` + +**Priority**: Environment variables override all defaults + +### Example 4: Manual File Editing + +```bash +# Edit User Defaults +sudo nano /usr/local/community-scripts/default.vars + +# Edit App-Specific Defaults +sudo nano /usr/local/community-scripts/defaults/pihole.vars + +# Verify syntax (no source/eval, safe to read) +cat /usr/local/community-scripts/default.vars +``` + +--- + +## Security Improvements + +### 1. **No `source` or `eval` Used** +- ❌ OLD: `source config_file` (Dangerous - executes arbitrary code) +- ✅ NEW: `load_vars_file()` (Safe - manual parsing with validation) + +### 2. **Variable Whitelisting** +- Only explicitly allowed variables can be persisted +- Prevents accidental storage of sensitive values +- Protects against injection attacks + +### 3. **Value Sanitization** +```bash +# Prevents command injection +_sanitize_value() { + case "$1" in + *'$('* | *'`'* | *';'* | *'&'* | *'<('*) + return 1 # Reject dangerous values + ;; + esac + echo "$1" +} +``` + +### 4. **File Permissions** +```bash +# Default vars accessible only to root +-rw-r--r-- root root /usr/local/community-scripts/default.vars +-rw-r--r-- root root /usr/local/community-scripts/defaults/pihole.vars +``` + +--- + +## Migration Guide + +### For Users + +**OLD Workflow**: Manual config file editing +**NEW Workflow**: +1. Run installation script +2. Select "Advanced Settings" +3. Answer prompts +4. Save as defaults when offered + +### For Script Developers + +**OLD Pattern**: +```bash +source /path/to/config-file.conf +``` + +**NEW Pattern**: +```bash +# User defaults are automatically loaded in build.func +# No manual intervention needed +# Just use the variables directly +``` + +--- + +## Removed Components + +### `config-file.func` (Deprecated) + +**Reason**: Replaced by three-tier defaults system +- Static configuration was inflexible +- Manual editing error-prone +- No validation or sanitization + +**Migration Path**: Use app/user defaults system + +### `create_lxc.sh` (Deprecated) + +**Reason**: Workflow integrated into install.func +- Centralized container creation logic +- Better error handling +- Unified with VM creation + +**Migration Path**: Use install.func directly + +--- + +## Future Enhancements + +### Planned Features + +1. **Configuration UI**: Web-based settings editor +2. **Configuration Sync**: Push defaults to multiple nodes +3. **Configuration History**: Track changes and diffs +4. **Batch Operations**: Apply defaults to multiple containers +5. **Configuration Templates**: Pre-built setting templates per app + +--- + +## Troubleshooting + +### Issue: Defaults not loading + +**Solution**: +```bash +# Check if defaults file exists +ls -la /usr/local/community-scripts/default.vars + +# Verify syntax +cat /usr/local/community-scripts/default.vars + +# Check file permissions +sudo chown root:root /usr/local/community-scripts/default.vars +sudo chmod 644 /usr/local/community-scripts/default.vars +``` + +### Issue: Variable not being applied + +**Solution**: +1. Check if variable is in `VAR_WHITELIST` +2. Verify variable name starts with `var_` +3. Check syntax in .vars file (no spaces around `=`) +4. Use `cat` not `source` to read files + +### Issue: "Invalid option" in defaults menu + +**Solution**: +- Ensure defaults directory exists: `/usr/local/community-scripts/defaults/` +- Create if missing: `sudo mkdir -p /usr/local/community-scripts/defaults/` + +--- + +## Technical Reference + +### Variable Loading Precedence + +``` +1. parse ARGV +2. capture ENV variables (hard environment) +3. load defaults file if exists +4. load app-specific defaults if exists +5. parse command line flags (lowest priority for overrides) + +Precedence (Highest to Lowest): + ENV var_* > AppDefaults.vars > UserDefaults.vars > Built-ins +``` + +### State Machine: Installation Modes + +``` +┌─────────────────┐ +│ Start Script │ +└────────┬────────┘ + │ + ┌────v────────────────┐ + │ Display Mode Menu │ + └────┬─────────────────┘ + │ + ┌────────────────────────────────────┐ + │ User Selects Mode │ + ├──────────┬──────────┬──────────┬──────────┐ + │ │ │ │ │ + v v v v v +┌─────┐ ┌────────┐ ┌──────────┐ ┌─────────┐ ┌───────┐ +│Def. │ │Adv. │ │User │ │App │ │Setting│ +│Set. │ │Set. │ │Default │ │Default │ │Menu │ +└─────┘ └────────┘ └──────────┘ └─────────┘ └───────┘ +``` + +--- + +## Document Versions + +| Version | Date | Changes | +|---------|------|---------| +| 1.0 | 2024-11-28 | Initial comprehensive documentation | + +--- + +**Last Updated**: November 28, 2025 +**Maintainers**: community-scripts Team +**License**: MIT diff --git a/docs/DEFAULTS_SYSTEM_GUIDE.md b/docs/DEFAULTS_SYSTEM_GUIDE.md new file mode 100644 index 000000000..ce5a424de --- /dev/null +++ b/docs/DEFAULTS_SYSTEM_GUIDE.md @@ -0,0 +1,748 @@ +# Configuration & Defaults System - User Guide + +> **Complete Guide to App Defaults and User Defaults** +> +> *Learn how to configure, save, and reuse your installation settings* + +--- + +## Table of Contents + +1. [Quick Start](#quick-start) +2. [Understanding the Defaults System](#understanding-the-defaults-system) +3. [Installation Modes](#installation-modes) +4. [How to Save Defaults](#how-to-save-defaults) +5. [How to Use Saved Defaults](#how-to-use-saved-defaults) +6. [Managing Your Defaults](#managing-your-defaults) +7. [Advanced Configuration](#advanced-configuration) +8. [Troubleshooting](#troubleshooting) + +--- + +## Quick Start + +### 30-Second Setup + +```bash +# 1. Run any container installation script +bash pihole-install.sh + +# 2. When prompted, select: "Advanced Settings" +# (This allows you to customize everything) + +# 3. Answer all configuration questions + +# 4. At the end, when asked "Save as App Defaults?" +# Select: YES + +# 5. Done! Your settings are now saved +``` + +**Next Time**: Run the same script again, select **"App Defaults"** and your settings will be applied automatically! + +--- + +## Understanding the Defaults System + +### The Three-Tier System + +Your installation settings are managed through three layers: + +#### 🔷 **Tier 1: Built-in Defaults** (Fallback) +``` +These are hardcoded in the scripts +Provide sensible defaults for each application +Example: PiHole uses 2 CPU cores by default +``` + +#### 🔶 **Tier 2: User Defaults** (Global) +``` +Your personal global defaults +Applied to ALL container installations +Location: /usr/local/community-scripts/default.vars +Example: "I always want 4 CPU cores and 2GB RAM" +``` + +#### 🔴 **Tier 3: App Defaults** (Specific) +``` +Application-specific saved settings +Only applied when installing that specific app +Location: /usr/local/community-scripts/defaults/.vars +Example: "Whenever I install PiHole, use these exact settings" +``` + +### Priority System + +When installing a container, settings are applied in this order: + +``` +┌─────────────────────────────────────┐ +│ 1. Environment Variables (HIGHEST) │ Set in shell: export var_cpu=8 +│ (these override everything) │ +├─────────────────────────────────────┤ +│ 2. App Defaults │ From: defaults/pihole.vars +│ (app-specific saved settings) │ +├─────────────────────────────────────┤ +│ 3. User Defaults │ From: default.vars +│ (your global defaults) │ +├─────────────────────────────────────┤ +│ 4. Built-in Defaults (LOWEST) │ Hardcoded in script +│ (failsafe, always available) │ +└─────────────────────────────────────┘ +``` + +**In Plain English**: +- If you set an environment variable → it wins +- Otherwise, if you have app-specific defaults → use those +- Otherwise, if you have user defaults → use those +- Otherwise, use the hardcoded defaults + +--- + +## Installation Modes + +When you run any installation script, you'll be presented with a menu: + +### Option 1️⃣ : **Default Settings** + +``` +Quick installation with standard settings +├─ Best for: First-time users, quick deployments +├─ What happens: +│ 1. Script uses built-in defaults +│ 2. Container created immediately +│ 3. No questions asked +└─ Time: ~2 minutes +``` + +**When to use**: You want a standard installation, don't need customization + +--- + +### Option 2️⃣ : **Advanced Settings** + +``` +Full customization with 19 configuration steps +├─ Best for: Power users, custom requirements +├─ What happens: +│ 1. Script asks for EVERY setting +│ 2. You control: CPU, RAM, Disk, Network, SSH, etc. +│ 3. Shows summary before creating +│ 4. Offers to save as App Defaults +└─ Time: ~5-10 minutes +``` + +**When to use**: You want full control over the configuration + +**Available Settings**: +- CPU cores, RAM amount, Disk size +- Container name, network settings +- SSH access, API access, Features +- Password, SSH keys, Tags + +--- + +### Option 3️⃣ : **User Defaults** + +``` +Use your saved global defaults +├─ Best for: Consistent deployments across many containers +├─ Requires: You've previously saved User Defaults +├─ What happens: +│ 1. Loads settings from: /usr/local/community-scripts/default.vars +│ 2. Shows you the loaded settings +│ 3. Creates container immediately +└─ Time: ~2 minutes +``` + +**When to use**: You have preferred defaults you want to use for every app + +--- + +### Option 4️⃣ : **App Defaults** (if available) + +``` +Use previously saved app-specific defaults +├─ Best for: Repeating the same configuration multiple times +├─ Requires: You've previously saved App Defaults for this app +├─ What happens: +│ 1. Loads settings from: /usr/local/community-scripts/defaults/.vars +│ 2. Shows you the loaded settings +│ 3. Creates container immediately +└─ Time: ~2 minutes +``` + +**When to use**: You've installed this app before and want identical settings + +--- + +### Option 5️⃣ : **Settings Menu** + +``` +Manage your saved configurations +├─ Functions: +│ • View current settings +│ • Edit storage selections +│ • Manage defaults location +│ • See what's currently configured +└─ Time: ~1 minute +``` + +**When to use**: You want to review or modify saved settings + +--- + +## How to Save Defaults + +### Method 1: Save While Installing + +This is the easiest way: + +#### Step-by-Step: Create App Defaults + +```bash +# 1. Run the installation script +bash pihole-install.sh + +# 2. Choose installation mode +# ┌─────────────────────────┐ +# │ Select installation mode:│ +# │ 1) Default Settings │ +# │ 2) Advanced Settings │ +# │ 3) User Defaults │ +# │ 4) App Defaults │ +# │ 5) Settings Menu │ +# └─────────────────────────┘ +# +# Enter: 2 (Advanced Settings) + +# 3. Answer all configuration questions +# • Container name? → my-pihole +# • CPU cores? → 4 +# • RAM amount? → 2048 +# • Disk size? → 20 +# • SSH access? → yes +# ... (more options) + +# 4. Review summary (shown before creation) +# ✓ Confirm to proceed + +# 5. After creation completes, you'll see: +# ┌──────────────────────────────────┐ +# │ Save as App Defaults for PiHole? │ +# │ (Yes/No) │ +# └──────────────────────────────────┘ +# +# Select: Yes + +# 6. Done! Settings saved to: +# /usr/local/community-scripts/defaults/pihole.vars +``` + +#### Step-by-Step: Create User Defaults + +```bash +# Same as App Defaults, but: +# When you select "Advanced Settings" +# FIRST app you run with this selection will offer +# to save as "User Defaults" additionally + +# This saves to: /usr/local/community-scripts/default.vars +``` + +--- + +### Method 2: Manual File Creation + +For advanced users who want to create defaults without running installation: + +```bash +# Create User Defaults manually +sudo tee /usr/local/community-scripts/default.vars > /dev/null << 'EOF' +# Global User Defaults +var_cpu=4 +var_ram=2048 +var_disk=20 +var_unprivileged=1 +var_brg=vmbr0 +var_gateway=192.168.1.1 +var_timezone=Europe/Berlin +var_ssh=yes +var_container_storage=local +var_template_storage=local +EOF + +# Create App Defaults manually +sudo tee /usr/local/community-scripts/defaults/pihole.vars > /dev/null << 'EOF' +# App-specific defaults for PiHole +var_unprivileged=1 +var_cpu=2 +var_ram=1024 +var_disk=10 +var_brg=vmbr0 +var_gateway=192.168.1.1 +var_hostname=pihole +var_container_storage=local +var_template_storage=local +EOF +``` + +--- + +### Method 3: Using Environment Variables + +Set defaults via environment before running: + +```bash +# Set as environment variables +export var_cpu=4 +export var_ram=2048 +export var_disk=20 +export var_hostname=my-container + +# Run installation +bash pihole-install.sh + +# These settings will be used +# (Can still be overridden by saved defaults) +``` + +--- + +## How to Use Saved Defaults + +### Using User Defaults + +```bash +# 1. Run any installation script +bash pihole-install.sh + +# 2. When asked for mode, select: +# Option: 3 (User Defaults) + +# 3. Your settings from default.vars are applied +# 4. Container created with your saved settings +``` + +### Using App Defaults + +```bash +# 1. Run the app you configured before +bash pihole-install.sh + +# 2. When asked for mode, select: +# Option: 4 (App Defaults) + +# 3. Your settings from defaults/pihole.vars are applied +# 4. Container created with exact same settings +``` + +### Overriding Saved Defaults + +```bash +# Even if you have defaults saved, +# you can override them with environment variables + +export var_cpu=8 # Override saved defaults +export var_hostname=custom-name + +bash pihole-install.sh +# Installation will use these values instead of saved defaults +``` + +--- + +## Managing Your Defaults + +### View Your Settings + +#### View User Defaults +```bash +cat /usr/local/community-scripts/default.vars +``` + +#### View App Defaults +```bash +cat /usr/local/community-scripts/defaults/pihole.vars +``` + +#### List All Saved App Defaults +```bash +ls -la /usr/local/community-scripts/defaults/ +``` + +### Edit Your Settings + +#### Edit User Defaults +```bash +sudo nano /usr/local/community-scripts/default.vars +``` + +#### Edit App Defaults +```bash +sudo nano /usr/local/community-scripts/defaults/pihole.vars +``` + +### Update Existing Defaults + +```bash +# Run installation again with your app +bash pihole-install.sh + +# Select: Advanced Settings +# Make desired changes +# At end, when asked to save: +# "Defaults already exist, Update?" +# Select: Yes + +# Your saved defaults are updated +``` + +### Delete Defaults + +#### Delete User Defaults +```bash +sudo rm /usr/local/community-scripts/default.vars +``` + +#### Delete App Defaults +```bash +sudo rm /usr/local/community-scripts/defaults/pihole.vars +``` + +#### Delete All App Defaults +```bash +sudo rm /usr/local/community-scripts/defaults/* +``` + +--- + +## Advanced Configuration + +### Available Variables + +All configurable variables start with `var_`: + +#### Resource Allocation +```bash +var_cpu=4 # CPU cores +var_ram=2048 # RAM in MB +var_disk=20 # Disk in GB +var_unprivileged=1 # 0=privileged, 1=unprivileged +``` + +#### Network +```bash +var_brg=vmbr0 # Bridge interface +var_net=veth # Network driver +var_gateway=192.168.1.1 # Default gateway +var_mtu=1500 # MTU size +var_vlan=100 # VLAN ID +``` + +#### System +```bash +var_hostname=pihole # Container name +var_timezone=Europe/Berlin # Timezone +var_pw=SecurePass123 # Root password +var_tags=dns,pihole # Tags for organization +var_verbose=yes # Enable verbose output +``` + +#### Security & Access +```bash +var_ssh=yes # Enable SSH +var_ssh_authorized_key="ssh-rsa AA..." # SSH public key +var_protection=1 # Enable protection flag +``` + +#### Features +```bash +var_fuse=1 # FUSE filesystem support +var_tun=1 # TUN device support +var_nesting=1 # Nesting (Docker in LXC) +var_keyctl=1 # Keyctl syscall +var_mknod=1 # Device node creation +``` + +#### Storage +```bash +var_container_storage=local # Where to store container +var_template_storage=local # Where to store templates +``` + +### Example Configuration Files + +#### Gaming Server Defaults +```bash +# High performance for gaming containers +var_cpu=8 +var_ram=4096 +var_disk=50 +var_unprivileged=0 +var_fuse=1 +var_nesting=1 +var_tags=gaming +``` + +#### Development Server +```bash +# Development with Docker support +var_cpu=4 +var_ram=2048 +var_disk=30 +var_unprivileged=1 +var_nesting=1 +var_ssh=yes +var_tags=development +``` + +#### IoT/Monitoring +```bash +# Low-resource, always-on containers +var_cpu=2 +var_ram=512 +var_disk=10 +var_unprivileged=1 +var_nesting=0 +var_fuse=0 +var_tun=0 +var_tags=iot,monitoring +``` + +--- + +## Troubleshooting + +### "App Defaults not available" Message + +**Problem**: You want to use App Defaults, but option says they're not available + +**Solution**: +1. You haven't created App Defaults yet for this app +2. Run the app with "Advanced Settings" +3. When finished, save as App Defaults +4. Next time, App Defaults will be available + +--- + +### "Settings not being applied" + +**Problem**: You saved defaults, but they're not being used + +**Checklist**: +```bash +# 1. Verify files exist +ls -la /usr/local/community-scripts/default.vars +ls -la /usr/local/community-scripts/defaults/.vars + +# 2. Check file permissions (should be readable) +stat /usr/local/community-scripts/default.vars + +# 3. Verify correct mode selected +# (Make sure you selected "User Defaults" or "App Defaults") + +# 4. Check for environment variable override +env | grep var_ +# If you have var_* set in environment, +# those override your saved defaults +``` + +--- + +### "Cannot write to defaults location" + +**Problem**: Permission denied when saving defaults + +**Solution**: +```bash +# Create the defaults directory if missing +sudo mkdir -p /usr/local/community-scripts/defaults + +# Fix permissions +sudo chmod 755 /usr/local/community-scripts +sudo chmod 755 /usr/local/community-scripts/defaults + +# Make sure you're running as root +sudo bash pihole-install.sh +``` + +--- + +### "Defaults directory doesn't exist" + +**Problem**: Script can't find where to save defaults + +**Solution**: +```bash +# Create the directory +sudo mkdir -p /usr/local/community-scripts/defaults + +# Verify +ls -la /usr/local/community-scripts/ +``` + +--- + +### Settings seem random or wrong + +**Problem**: Container gets different settings than expected + +**Possible Causes & Solutions**: + +```bash +# 1. Check if environment variables are set +env | grep var_ +# If you see var_* entries, those override your defaults +# Clear them: unset var_cpu var_ram (etc) + +# 2. Verify correct defaults are in files +cat /usr/local/community-scripts/default.vars +cat /usr/local/community-scripts/defaults/pihole.vars + +# 3. Check which mode you actually selected +# (Script output shows which defaults were applied) + +# 4. Check Proxmox logs for errors +sudo journalctl -u pve-daemon -n 50 +``` + +--- + +### "Variable not recognized" + +**Problem**: You set a variable that doesn't work + +**Solution**: +Only certain variables are allowed (security whitelist): + +``` +Allowed variables (starting with var_): +✓ var_cpu, var_ram, var_disk, var_unprivileged +✓ var_brg, var_gateway, var_mtu, var_vlan, var_net +✓ var_hostname, var_pw, var_timezone +✓ var_ssh, var_ssh_authorized_key +✓ var_fuse, var_tun, var_nesting, var_keyctl +✓ var_container_storage, var_template_storage +✓ var_tags, var_verbose +✓ var_apt_cacher, var_apt_cacher_ip +✓ var_protection, var_mount_fs + +✗ Other variables are NOT supported +``` + +--- + +## Best Practices + +### ✅ Do's + +✓ Use **App Defaults** when you want app-specific settings +✓ Use **User Defaults** for your global preferences +✓ Edit defaults files directly with `nano` (safe) +✓ Keep separate App Defaults for each app +✓ Back up your defaults regularly +✓ Use environment variables for temporary overrides + +### ❌ Don'ts + +✗ Don't use `source` on defaults files (security risk) +✗ Don't put sensitive passwords in defaults (use SSH keys) +✗ Don't modify defaults while installation is running +✗ Don't delete defaults.d while containers are being created +✗ Don't use special characters without escaping + +--- + +## Quick Reference + +### Defaults Locations + +| Type | Location | Example | +|------|----------|---------| +| User Defaults | `/usr/local/community-scripts/default.vars` | Global settings | +| App Defaults | `/usr/local/community-scripts/defaults/.vars` | PiHole-specific | +| Backup Dir | `/usr/local/community-scripts/defaults/` | All app defaults | + +### File Format + +```bash +# Comments start with # +var_name=value + +# No spaces around = +✓ var_cpu=4 +✗ var_cpu = 4 + +# String values don't need quotes +✓ var_hostname=mycontainer +✓ var_hostname='mycontainer' + +# Values with spaces need quotes +✓ var_tags="docker,production,testing" +✗ var_tags=docker,production,testing +``` + +### Command Reference + +```bash +# View defaults +cat /usr/local/community-scripts/default.vars + +# Edit defaults +sudo nano /usr/local/community-scripts/default.vars + +# List all app defaults +ls /usr/local/community-scripts/defaults/ + +# Backup your defaults +cp -r /usr/local/community-scripts/defaults/ ~/defaults-backup/ + +# Set temporary override +export var_cpu=8 +bash pihole-install.sh + +# Create custom defaults +sudo tee /usr/local/community-scripts/defaults/custom.vars << 'EOF' +var_cpu=4 +var_ram=2048 +EOF +``` + +--- + +## Getting Help + +### Need More Information? + +- 📖 [Main Documentation](../../docs/) +- 🐛 [Report Issues](https://github.com/community-scripts/ProxmoxVED/issues) +- 💬 [Discussions](https://github.com/community-scripts/ProxmoxVED/discussions) + +### Useful Commands + +```bash +# Check what variables are available +grep "var_" /path/to/app-install.sh | head -20 + +# Verify defaults syntax +cat /usr/local/community-scripts/default.vars + +# Monitor installation with defaults +bash pihole-install.sh 2>&1 | tee installation.log +``` + +--- + +## Document Information + +| Field | Value | +|-------|-------| +| Version | 1.0 | +| Last Updated | November 28, 2025 | +| Status | Current | +| License | MIT | + +--- + +**Happy configuring! 🚀** diff --git a/docs/TECHNICAL_REFERENCE.md b/docs/TECHNICAL_REFERENCE.md new file mode 100644 index 000000000..aea5148c9 --- /dev/null +++ b/docs/TECHNICAL_REFERENCE.md @@ -0,0 +1,881 @@ +# Technical Reference: Configuration System Architecture + +> **For Developers and Advanced Users** +> +> *Deep dive into how the defaults and configuration system works* + +--- + +## Table of Contents + +1. [System Architecture](#system-architecture) +2. [File Format Specifications](#file-format-specifications) +3. [Function Reference](#function-reference) +4. [Variable Precedence](#variable-precedence) +5. [Data Flow Diagrams](#data-flow-diagrams) +6. [Security Model](#security-model) +7. [Implementation Details](#implementation-details) + +--- + +## System Architecture + +### Component Overview + +``` +┌─────────────────────────────────────────────────────────────┐ +│ Installation Script │ +│ (pihole-install.sh, docker-install.sh, etc.) │ +└────────────────────┬────────────────────────────────────────┘ + │ + v +┌─────────────────────────────────────────────────────────────┐ +│ build.func Library │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ variables() │ │ +│ │ - Initialize NSAPP, var_install, etc. │ │ +│ └──────────────────────────────────────────────────────┘ │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ install_script() │ │ +│ │ - Display mode menu │ │ +│ │ - Route to appropriate workflow │ │ +│ └──────────────────────────────────────────────────────┘ │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ base_settings() │ │ +│ │ - Apply built-in defaults │ │ +│ │ - Read environment variables (var_*) │ │ +│ └──────────────────────────────────────────────────────┘ │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ load_vars_file() │ │ +│ │ - Safe file parsing (NO source/eval) │ │ +│ │ - Whitelist validation │ │ +│ │ - Value sanitization │ │ +│ └──────────────────────────────────────────────────────┘ │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ default_var_settings() │ │ +│ │ - Load user defaults │ │ +│ │ - Display summary │ │ +│ └──────────────────────────────────────────────────────┘ │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ maybe_offer_save_app_defaults() │ │ +│ │ - Offer to save current settings │ │ +│ │ - Handle updates vs. new saves │ │ +│ └──────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────┘ + │ + v +┌─────────────────────────────────────────────────────────────┐ +│ Configuration Files (on Disk) │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ /usr/local/community-scripts/default.vars │ │ +│ │ (User global defaults) │ │ +│ └──────────────────────────────────────────────────────┘ │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ /usr/local/community-scripts/defaults/*.vars │ │ +│ │ (App-specific defaults) │ │ +│ └──────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────┘ +``` + +--- + +## File Format Specifications + +### User Defaults: `default.vars` + +**Location**: `/usr/local/community-scripts/default.vars` + +**MIME Type**: `text/plain` + +**Encoding**: UTF-8 (no BOM) + +**Format Specification**: + +``` +# File Format: Simple key=value pairs +# Purpose: Store global user defaults +# Security: Sanitized values, whitelist validation + +# Comments and blank lines are ignored +# Line format: var_name=value +# No spaces around the equals sign +# String values do not need quoting (but may be quoted) + +[CONTENT] +var_cpu=4 +var_ram=2048 +var_disk=20 +var_hostname=mydefault +var_brg=vmbr0 +var_gateway=192.168.1.1 +``` + +**Formal Grammar**: + +``` +FILE := (BLANK_LINE | COMMENT_LINE | VAR_LINE)* +BLANK_LINE := \n +COMMENT_LINE := '#' [^\n]* \n +VAR_LINE := VAR_NAME '=' VAR_VALUE \n +VAR_NAME := 'var_' [a-z_]+ +VAR_VALUE := [^\n]* # Any printable characters except newline +``` + +**Constraints**: + +| Constraint | Value | +|-----------|-------| +| Max file size | 64 KB | +| Max line length | 1024 bytes | +| Max variables | 100 | +| Allowed var names | `var_[a-z_]+` | +| Value validation | Whitelist + Sanitization | + +**Example Valid File**: + +```bash +# Global User Defaults +# Created: 2024-11-28 + +# Resource defaults +var_cpu=4 +var_ram=2048 +var_disk=20 + +# Network defaults +var_brg=vmbr0 +var_gateway=192.168.1.1 +var_mtu=1500 +var_vlan=100 + +# System defaults +var_timezone=Europe/Berlin +var_hostname=default-container + +# Storage +var_container_storage=local +var_template_storage=local + +# Security +var_ssh=yes +var_protection=0 +var_unprivileged=1 +``` + +### App Defaults: `.vars` + +**Location**: `/usr/local/community-scripts/defaults/.vars` + +**Format**: Identical to `default.vars` + +**Naming Convention**: `.vars` + +- `nsapp` = lowercase app name with spaces removed +- Examples: + - `pihole` → `pihole.vars` + - `opnsense` → `opnsense.vars` + - `docker compose` → `dockercompose.vars` + +**Example App Defaults**: + +```bash +# App-specific defaults for PiHole (pihole) +# Generated on 2024-11-28T15:32:00Z +# These override user defaults when installing pihole + +var_unprivileged=1 +var_cpu=2 +var_ram=1024 +var_disk=10 +var_brg=vmbr0 +var_net=veth +var_gateway=192.168.1.1 +var_hostname=pihole +var_timezone=Europe/Berlin +var_container_storage=local +var_template_storage=local +var_tags=dns,pihole +``` + +--- + +## Function Reference + +### `load_vars_file()` + +**Purpose**: Safely load variables from .vars files without using `source` or `eval` + +**Signature**: +```bash +load_vars_file(filepath) +``` + +**Parameters**: + +| Param | Type | Required | Example | +|-------|------|----------|---------| +| filepath | String | Yes | `/usr/local/community-scripts/default.vars` | + +**Returns**: +- `0` on success +- `1` on error (file missing, parse error, etc.) + +**Environment Side Effects**: +- Sets all parsed `var_*` variables as shell variables +- Does NOT unset variables if file missing (safe) +- Does NOT affect other variables + +**Implementation Pattern**: + +```bash +load_vars_file() { + local file="$1" + + # File must exist + [ -f "$file" ] || return 0 + + # Parse line by line (not with source/eval) + local line key val + while IFS='=' read -r key val || [ -n "$key" ]; do + # Skip comments and empty lines + [[ "$key" =~ ^[[:space:]]*# ]] && continue + [[ -z "$key" ]] && continue + + # Validate key is in whitelist + _is_whitelisted_key "$key" || continue + + # Sanitize and export value + val="$(_sanitize_value "$val")" + [ $? -eq 0 ] && export "$key=$val" + done < "$file" + + return 0 +} +``` + +**Usage Examples**: + +```bash +# Load user defaults +load_vars_file "/usr/local/community-scripts/default.vars" + +# Load app-specific defaults +load_vars_file "$(get_app_defaults_path)" + +# Check if successful +if load_vars_file "$vars_path"; then + echo "Settings loaded successfully" +else + echo "Failed to load settings" +fi + +# Values are now available as variables +echo "Using $var_cpu cores" +echo "Allocating ${var_ram} MB RAM" +``` + +--- + +### `get_app_defaults_path()` + +**Purpose**: Get the full path for app-specific defaults file + +**Signature**: +```bash +get_app_defaults_path() +``` + +**Parameters**: None + +**Returns**: +- String: Full path to app defaults file + +**Implementation**: + +```bash +get_app_defaults_path() { + local n="${NSAPP:-${APP,,}}" + echo "/usr/local/community-scripts/defaults/${n}.vars" +} +``` + +**Usage Examples**: + +```bash +# Get app defaults path +app_defaults="$(get_app_defaults_path)" +echo "App defaults at: $app_defaults" + +# Check if app defaults exist +if [ -f "$(get_app_defaults_path)" ]; then + echo "App defaults available" +fi + +# Load app defaults +load_vars_file "$(get_app_defaults_path)" +``` + +--- + +### `default_var_settings()` + +**Purpose**: Load and display user global defaults + +**Signature**: +```bash +default_var_settings() +``` + +**Parameters**: None + +**Returns**: +- `0` on success +- `1` on error + +**Workflow**: + +``` +1. Find default.vars location + (usually /usr/local/community-scripts/default.vars) + +2. Create if missing + +3. Load variables from file + +4. Map var_verbose → VERBOSE variable + +5. Call base_settings (apply to container config) + +6. Call echo_default (display summary) +``` + +**Implementation Pattern**: + +```bash +default_var_settings() { + local VAR_WHITELIST=( + var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_disk var_fuse + var_gateway var_hostname var_ipv6_method var_mac var_mtu + var_net var_ns var_pw var_ram var_tags var_tun var_unprivileged + var_verbose var_vlan var_ssh var_ssh_authorized_key + var_container_storage var_template_storage + ) + + # Ensure file exists + _ensure_default_vars + + # Find and load + local dv="$(_find_default_vars)" + load_vars_file "$dv" + + # Map verbose flag + if [[ -n "${var_verbose:-}" ]]; then + case "${var_verbose,,}" in + 1 | yes | true | on) VERBOSE="yes" ;; + *) VERBOSE="${var_verbose}" ;; + esac + fi + + # Apply and display + base_settings "$VERBOSE" + echo_default +} +``` + +--- + +### `maybe_offer_save_app_defaults()` + +**Purpose**: Offer to save current settings as app-specific defaults + +**Signature**: +```bash +maybe_offer_save_app_defaults() +``` + +**Parameters**: None + +**Returns**: None (side effects only) + +**Behavior**: + +1. After advanced installation completes +2. Offers user: "Save as App Defaults for ?" +3. If yes: + - Saves to `/usr/local/community-scripts/defaults/.vars` + - Only whitelisted variables included + - Previous defaults backed up (if exists) +4. If no: + - No action taken + +**Flow**: + +```bash +maybe_offer_save_app_defaults() { + local app_vars_path="$(get_app_defaults_path)" + + # Build current settings from memory + local new_tmp="$(_build_current_app_vars_tmp)" + + # Check if already exists + if [ -f "$app_vars_path" ]; then + # Show diff and ask: Update? Keep? View Diff? + _show_app_defaults_diff_menu "$new_tmp" "$app_vars_path" + else + # New defaults - just save + if whiptail --yesno "Save as App Defaults for $APP?" 10 60; then + mv "$new_tmp" "$app_vars_path" + chmod 644 "$app_vars_path" + fi + fi +} +``` + +--- + +### `_sanitize_value()` + +**Purpose**: Remove dangerous characters/patterns from configuration values + +**Signature**: +```bash +_sanitize_value(value) +``` + +**Parameters**: + +| Param | Type | Required | +|-------|------|----------| +| value | String | Yes | + +**Returns**: +- `0` (success) + sanitized value on stdout +- `1` (failure) + nothing if dangerous + +**Dangerous Patterns**: + +| Pattern | Threat | Example | +|---------|--------|---------| +| `$(...)` | Command substitution | `$(rm -rf /)` | +| `` ` ` `` | Command substitution | `` `whoami` `` | +| `;` | Command separator | `value; rm -rf /` | +| `&` | Background execution | `value & malicious` | +| `<(` | Process substitution | `<(cat /etc/passwd)` | + +**Implementation**: + +```bash +_sanitize_value() { + case "$1" in + *'$('* | *'`'* | *';'* | *'&'* | *'<('*) + echo "" + return 1 # Reject dangerous value + ;; + esac + echo "$1" + return 0 +} +``` + +**Usage Examples**: + +```bash +# Safe value +_sanitize_value "192.168.1.1" # Returns: 192.168.1.1 (status: 0) + +# Dangerous value +_sanitize_value "$(whoami)" # Returns: (empty) (status: 1) + +# Usage in code +if val="$(_sanitize_value "$user_input")"; then + export var_hostname="$val" +else + msg_error "Invalid value: contains dangerous characters" +fi +``` + +--- + +### `_is_whitelisted_key()` + +**Purpose**: Check if variable name is in allowed whitelist + +**Signature**: +```bash +_is_whitelisted_key(key) +``` + +**Parameters**: + +| Param | Type | Required | Example | +|-------|------|----------|---------| +| key | String | Yes | `var_cpu` | + +**Returns**: +- `0` if key is whitelisted +- `1` if key is NOT whitelisted + +**Implementation**: + +```bash +_is_whitelisted_key() { + local k="$1" + local w + for w in "${VAR_WHITELIST[@]}"; do + [ "$k" = "$w" ] && return 0 + done + return 1 +} +``` + +**Usage Examples**: + +```bash +# Check if variable can be saved +if _is_whitelisted_key "var_cpu"; then + echo "var_cpu can be saved" +fi + +# Reject unknown variables +if ! _is_whitelisted_key "var_custom"; then + msg_error "var_custom is not supported" +fi +``` + +--- + +## Variable Precedence + +### Loading Order + +When a container is being created, variables are resolved in this order: + +``` +Step 1: Read ENVIRONMENT VARIABLES + ├─ Check if var_cpu is already set in shell environment + ├─ Check if var_ram is already set + └─ ...all var_* variables + +Step 2: Load APP-SPECIFIC DEFAULTS + ├─ Check if /usr/local/community-scripts/defaults/pihole.vars exists + ├─ Load all var_* from that file + └─ These override built-ins but NOT environment variables + +Step 3: Load USER GLOBAL DEFAULTS + ├─ Check if /usr/local/community-scripts/default.vars exists + ├─ Load all var_* from that file + └─ These override built-ins but NOT app-specific + +Step 4: Use BUILT-IN DEFAULTS + └─ Hardcoded in script (lowest priority) +``` + +### Precedence Examples + +**Example 1: Environment Variable Wins** +```bash +# Shell environment has highest priority +$ export var_cpu=16 +$ bash pihole-install.sh + +# Result: Container gets 16 cores +# (ignores app defaults, user defaults, built-ins) +``` + +**Example 2: App Defaults Override User Defaults** +```bash +# User Defaults: var_cpu=4 +# App Defaults: var_cpu=2 +$ bash pihole-install.sh + +# Result: Container gets 2 cores +# (app-specific setting takes precedence) +``` + +**Example 3: All Defaults Missing (Built-ins Used)** +```bash +# No environment variables set +# No app defaults file +# No user defaults file +$ bash pihole-install.sh + +# Result: Uses built-in defaults +# (var_cpu might be 2 by default) +``` + +### Implementation in Code + +```bash +# Typical pattern in build.func + +base_settings() { + # Priority 1: Environment variables (already set if export used) + CT_TYPE=${var_unprivileged:-"1"} # Use existing or default + + # Priority 2: Load app defaults (may override above) + if [ -f "$(get_app_defaults_path)" ]; then + load_vars_file "$(get_app_defaults_path)" + fi + + # Priority 3: Load user defaults + if [ -f "/usr/local/community-scripts/default.vars" ]; then + load_vars_file "/usr/local/community-scripts/default.vars" + fi + + # Priority 4: Apply built-in defaults (lowest) + CORE_COUNT=${var_cpu:-"${APP_CPU_DEFAULT:-2}"} + RAM_SIZE=${var_ram:-"${APP_RAM_DEFAULT:-1024}"} + + # Result: var_cpu has been set through precedence chain +} +``` + +--- + +## Data Flow Diagrams + +### Installation Flow: Advanced Settings + +``` +┌──────────────┐ +│ Start Script│ +└──────┬───────┘ + │ + v +┌──────────────────────────────┐ +│ Display Installation Mode │ +│ Menu (5 options) │ +└──────┬───────────────────────┘ + │ User selects "Advanced Settings" + v +┌──────────────────────────────────┐ +│ Call: base_settings() │ +│ (Apply built-in defaults) │ +└──────┬───────────────────────────┘ + │ + v +┌──────────────────────────────────┐ +│ Call: advanced_settings() │ +│ (Show 19-step wizard) │ +│ - Ask CPU, RAM, Disk, Network... │ +└──────┬───────────────────────────┘ + │ + v +┌──────────────────────────────────┐ +│ Show Summary │ +│ Review all chosen values │ +└──────┬───────────────────────────┘ + │ User confirms + v +┌──────────────────────────────────┐ +│ Create Container │ +│ Using current variable values │ +└──────┬───────────────────────────┘ + │ + v +┌──────────────────────────────────┐ +│ Installation Complete │ +└──────┬───────────────────────────┘ + │ + v +┌──────────────────────────────────────┐ +│ Offer: Save as App Defaults? │ +│ (Save current settings) │ +└──────┬───────────────────────────────┘ + │ + ├─ YES → Save to defaults/.vars + │ + └─ NO → Exit +``` + +### Variable Resolution Flow + +``` +CONTAINER CREATION STARTED + │ + v + ┌─────────────────────┐ + │ Check ENVIRONMENT │ + │ for var_cpu, var_..│ + └──────┬──────────────┘ + │ Found? Use them (Priority 1) + │ Not found? Continue... + v + ┌──────────────────────────┐ + │ Load App Defaults │ + │ /defaults/.vars │ + └──────┬───────────────────┘ + │ File exists? Parse & load (Priority 2) + │ Not found? Continue... + v + ┌──────────────────────────┐ + │ Load User Defaults │ + │ /default.vars │ + └──────┬───────────────────┘ + │ File exists? Parse & load (Priority 3) + │ Not found? Continue... + v + ┌──────────────────────────┐ + │ Use Built-in Defaults │ + │ (Hardcoded values) │ + └──────┬───────────────────┘ + │ + v + ┌──────────────────────────┐ + │ All Variables Resolved │ + │ Ready for container │ + │ creation │ + └──────────────────────────┘ +``` + +--- + +## Security Model + +### Threat Model + +| Threat | Mitigation | +|--------|-----------| +| **Arbitrary Code Execution** | No `source` or `eval`; manual parsing only | +| **Variable Injection** | Whitelist of allowed variable names | +| **Command Substitution** | `_sanitize_value()` blocks `$()`, backticks, etc. | +| **Path Traversal** | Files locked to `/usr/local/community-scripts/` | +| **Permission Escalation** | Files created with restricted permissions | +| **Information Disclosure** | Sensitive variables not logged | + +### Security Controls + +#### 1. Input Validation + +```bash +# Only specific variables allowed +if ! _is_whitelisted_key "$key"; then + skip_this_variable +fi + +# Values sanitized +if ! val="$(_sanitize_value "$value")"; then + reject_entire_line +fi +``` + +#### 2. Safe File Parsing + +```bash +# ❌ DANGEROUS (OLD) +source /path/to/config.conf +# Could execute: rm -rf / or any code + +# ✅ SAFE (NEW) +load_vars_file "/path/to/config.conf" +# Only reads var_name=value pairs, no execution +``` + +#### 3. Whitelisting + +```bash +# Only these variables can be configured +var_cpu, var_ram, var_disk, var_brg, ... +var_hostname, var_pw, var_ssh, ... + +# NOT allowed: +var_malicious, var_hack, custom_var, ... +``` + +#### 4. Value Constraints + +```bash +# No command injection patterns +if [[ "$value" =~ ($|`|;|&|<\() ]]; then + reject_value +fi +``` + +--- + +## Implementation Details + +### Module: `build.func` + +**Load Order** (in actual scripts): +1. `#!/usr/bin/env bash` - Shebang +2. `source /dev/stdin <<<$(curl ... api.func)` - API functions +3. `source /dev/stdin <<<$(curl ... build.func)` - Build functions +4. `variables()` - Initialize variables +5. `check_root()` - Security check +6. `install_script()` - Main flow + +**Key Sections**: + +```bash +# Section 1: Initialization & Variables +- variables() +- NSAPP, var_install, INTEGER pattern, etc. + +# Section 2: Storage Management +- storage_selector() +- ensure_storage_selection_for_vars_file() + +# Section 3: Base Settings +- base_settings() # Apply defaults to all var_* +- echo_default() # Display current settings + +# Section 4: Variable Loading +- load_vars_file() # Safe parsing +- _is_whitelisted_key() # Validation +- _sanitize_value() # Threat mitigation + +# Section 5: Defaults Management +- default_var_settings() # Load user defaults +- get_app_defaults_path() # Get app defaults path +- maybe_offer_save_app_defaults() # Save option + +# Section 6: Installation Flow +- install_script() # Main entry point +- advanced_settings() # 19-step wizard +``` + +### Regex Patterns Used + +| Pattern | Purpose | Example Match | +|---------|---------|---| +| `^[0-9]+([.][0-9]+)?$` | Integer validation | `4`, `192.168` | +| `^var_[a-z_]+$` | Variable name | `var_cpu`, `var_ssh` | +| `*'$('*` | Command substitution | `$(whoami)` | +| `*\`*` | Backtick substitution | `` `cat /etc/passwd` `` | + +--- + +## Appendix: Migration Reference + +### Old Pattern (Deprecated) + +```bash +# ❌ OLD: config-file.func +source config-file.conf # Executes arbitrary code +if [ "$USE_DEFAULTS" = "yes" ]; then + apply_settings_directly +fi +``` + +### New Pattern (Current) + +```bash +# ✅ NEW: load_vars_file() +if load_vars_file "$(get_app_defaults_path)"; then + echo "Settings loaded securely" +fi +``` + +### Function Mapping + +| Old | New | Location | +|-----|-----|----------| +| `read_config()` | `load_vars_file()` | build.func | +| `write_config()` | `_build_current_app_vars_tmp()` | build.func | +| None | `maybe_offer_save_app_defaults()` | build.func | +| None | `get_app_defaults_path()` | build.func | + +--- + +**End of Technical Reference** diff --git a/misc/cloud-init.sh b/misc/cloud-init.func similarity index 100% rename from misc/cloud-init.sh rename to misc/cloud-init.func diff --git a/misc/old_misc/alpine-install.func b/misc/old_misc/alpine-install.func new file mode 100644 index 000000000..ddea81ecc --- /dev/null +++ b/misc/old_misc/alpine-install.func @@ -0,0 +1,172 @@ +# Copyright (c) 2021-2025 community-scripts ORG +# Author: tteck (tteckster) +# Co-Author: MickLesk +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE + +if ! command -v curl >/dev/null 2>&1; then + apk update && apk add curl >/dev/null 2>&1 +fi +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/core.func) +load_functions + +# This function enables IPv6 if it's not disabled and sets verbose mode +verb_ip6() { + set_std_mode # Set STD mode based on VERBOSE + + if [ "$IPV6_METHOD" == "disable" ]; then + msg_info "Disabling IPv6 (this may affect some services)" + $STD sysctl -w net.ipv6.conf.all.disable_ipv6=1 + $STD sysctl -w net.ipv6.conf.default.disable_ipv6=1 + $STD sysctl -w net.ipv6.conf.lo.disable_ipv6=1 + mkdir -p /etc/sysctl.d + $STD tee /etc/sysctl.d/99-disable-ipv6.conf >/dev/null <&2 -en "${CROSS}${RD} No Network! " + sleep $RETRY_EVERY + i=$((i - 1)) + done + + if [ "$(ip addr show | grep 'inet ' | grep -v '127.0.0.1' | awk '{print $2}' | cut -d'/' -f1)" = "" ]; then + echo 1>&2 -e "\n${CROSS}${RD} No Network After $RETRY_NUM Tries${CL}" + echo -e "${NETWORK}Check Network Settings" + exit 1 + fi + msg_ok "Set up Container OS" + msg_ok "Network Connected: ${BL}$(ip addr show | grep 'inet ' | awk '{print $2}' | cut -d'/' -f1 | tail -n1)${CL}" +} + +# This function checks the network connection by pinging a known IP address and prompts the user to continue if the internet is not connected +network_check() { + set +e + trap - ERR + if ping -c 1 -W 1 1.1.1.1 &>/dev/null || ping -c 1 -W 1 8.8.8.8 &>/dev/null || ping -c 1 -W 1 9.9.9.9 &>/dev/null; then + msg_ok "Internet Connected" + else + msg_error "Internet NOT Connected" + read -r -p "Would you like to continue anyway? " prompt + if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then + echo -e "${INFO}${RD}Expect Issues Without Internet${CL}" + else + echo -e "${NETWORK}Check Network Settings" + exit 1 + fi + fi + RESOLVEDIP=$(getent hosts github.com | awk '{ print $1 }') + if [[ -z "$RESOLVEDIP" ]]; then msg_error "DNS Lookup Failure"; else msg_ok "DNS Resolved github.com to ${BL}$RESOLVEDIP${CL}"; fi + set -e + trap 'error_handler $LINENO "$BASH_COMMAND"' ERR +} + +# This function updates the Container OS by running apt-get update and upgrade +update_os() { + msg_info "Updating Container OS" + $STD apk -U upgrade + source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func) + msg_ok "Updated Container OS" +} + +# This function modifies the message of the day (motd) and SSH settings +motd_ssh() { + echo "export TERM='xterm-256color'" >>/root/.bashrc + IP=$(ip -4 addr show eth0 | awk '/inet / {print $2}' | cut -d/ -f1 | head -n 1) + + if [ -f "/etc/os-release" ]; then + OS_NAME=$(grep ^NAME /etc/os-release | cut -d= -f2 | tr -d '"') + OS_VERSION=$(grep ^VERSION_ID /etc/os-release | cut -d= -f2 | tr -d '"') + else + OS_NAME="Alpine Linux" + OS_VERSION="Unknown" + fi + + PROFILE_FILE="/etc/profile.d/00_lxc-details.sh" + echo "echo -e \"\"" >"$PROFILE_FILE" + echo -e "echo -e \"${BOLD}${APPLICATION} LXC Container${CL}"\" >>"$PROFILE_FILE" + echo -e "echo -e \"${TAB}${GATEWAY}${YW} Provided by: ${GN}community-scripts ORG ${YW}| GitHub: ${GN}https://github.com/community-scripts/ProxmoxVE${CL}\"" >>"$PROFILE_FILE" + echo "echo \"\"" >>"$PROFILE_FILE" + echo -e "echo -e \"${TAB}${OS}${YW} OS: ${GN}${OS_NAME} - Version: ${OS_VERSION}${CL}\"" >>"$PROFILE_FILE" + echo -e "echo -e \"${TAB}${HOSTNAME}${YW} Hostname: ${GN}\$(hostname)${CL}\"" >>"$PROFILE_FILE" + echo -e "echo -e \"${TAB}${INFO}${YW} IP Address: ${GN}\$(ip -4 addr show eth0 | awk '/inet / {print \$2}' | cut -d/ -f1 | head -n 1)${CL}\"" >>"$PROFILE_FILE" + + # Configure SSH if enabled + if [[ "${SSH_ROOT}" == "yes" ]]; then + # Enable sshd service + $STD rc-update add sshd + # Allow root login via SSH + sed -i "s/#PermitRootLogin prohibit-password/PermitRootLogin yes/g" /etc/ssh/sshd_config + # Start the sshd service + $STD /etc/init.d/sshd start + fi +} + +# Validate Timezone for some LXC's +validate_tz() { + [[ -f "/usr/share/zoneinfo/$1" ]] +} + +# This function customizes the container and enables passwordless login for the root user +customize() { + if [[ "$PASSWORD" == "" ]]; then + msg_info "Customizing Container" + passwd -d root >/dev/null 2>&1 + + # Ensure agetty is available + apk add --no-cache --force-broken-world util-linux >/dev/null 2>&1 + + # Create persistent autologin boot script + mkdir -p /etc/local.d + cat <<'EOF' >/etc/local.d/autologin.start +#!/bin/sh +sed -i 's|^tty1::respawn:.*|tty1::respawn:/sbin/agetty --autologin root --noclear tty1 38400 linux|' /etc/inittab +kill -HUP 1 +EOF + touch /root/.hushlogin + + chmod +x /etc/local.d/autologin.start + rc-update add local >/dev/null 2>&1 + + # Apply autologin immediately for current session + /etc/local.d/autologin.start + + msg_ok "Customized Container" + fi + + echo "bash -c \"\$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/${app}.sh)\"" >/usr/bin/update + chmod +x /usr/bin/update + + if [[ -n "${SSH_AUTHORIZED_KEY}" ]]; then + mkdir -p /root/.ssh + echo "${SSH_AUTHORIZED_KEY}" >/root/.ssh/authorized_keys + chmod 700 /root/.ssh + chmod 600 /root/.ssh/authorized_keys + fi +} diff --git a/misc/old_misc/api.func b/misc/old_misc/api.func new file mode 100644 index 000000000..d42f919fc --- /dev/null +++ b/misc/old_misc/api.func @@ -0,0 +1,130 @@ +# Copyright (c) 2021-2025 community-scripts ORG +# Author: michelroegl-brunner +# License: MIT | https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/LICENSE + +post_to_api() { + + if ! command -v curl &>/dev/null; then + return + fi + + if [ "$DIAGNOSTICS" = "no" ]; then + return + fi + + if [ -z "$RANDOM_UUID" ]; then + return + fi + + local API_URL="http://api.community-scripts.org/upload" + local pve_version="not found" + pve_version=$(pveversion | awk -F'[/ ]' '{print $2}') + + JSON_PAYLOAD=$( + cat </dev/null; then + return + fi + + if [ "$POST_UPDATE_DONE" = true ]; then + return 0 + fi + local API_URL="http://api.community-scripts.org/upload/updatestatus" + local status="${1:-failed}" + local error="${2:-No error message}" + + JSON_PAYLOAD=$( + cat </dev/null 2>&1; then + source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/core.func) + load_functions +elif command -v wget >/dev/null 2>&1; then + source <(wget -qO- https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/core.func) + load_functions +fi +# This function enables error handling in the script by setting options and defining a trap for the ERR signal. +catch_errors() { + set -Eeo pipefail + trap 'error_handler $LINENO "$BASH_COMMAND"' ERR +} + +# This function is called when an error occurs. It receives the exit code, line number, and command that caused the error, and displays an error message. +error_handler() { + source /dev/stdin <<<$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func) + printf "\e[?25h" + local exit_code="$?" + local line_number="$1" + local command="$2" + local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}" + post_update_to_api "failed" "${command}" + echo -e "\n$error_message\n" +} + +# Check if the current shell is using bash +shell_check() { + if [[ "$(ps -p $$ -o comm=)" != "bash" ]]; then + clear + msg_error "Your default shell is not bash. Please report this to our github issues or discord." + echo -e "\nExiting..." + sleep 2 + exit + fi +} + +# Run as root only +root_check() { + if [[ "$(id -u)" -ne 0 || $(ps -o comm= -p $PPID) == "sudo" ]]; then + clear + msg_error "Please run this script as root." + echo -e "\nExiting..." + sleep 2 + exit + fi +} + +# This function checks the version of Proxmox Virtual Environment (PVE) and exits if the version is not supported. +# Supported: Proxmox VE 8.0.x – 8.9.x, 9.0 and 9.1 +pve_check() { + local PVE_VER + PVE_VER="$(pveversion | awk -F'/' '{print $2}' | awk -F'-' '{print $1}')" + + # Check for Proxmox VE 8.x: allow 8.0–8.9 + if [[ "$PVE_VER" =~ ^8\.([0-9]+) ]]; then + local MINOR="${BASH_REMATCH[1]}" + if ((MINOR < 0 || MINOR > 9)); then + msg_error "This version of Proxmox VE is not supported." + msg_error "Supported: Proxmox VE version 8.0 – 8.9" + exit 1 + fi + return 0 + fi + + # Check for Proxmox VE 9.x: allow 9.0 and 9.1 + if [[ "$PVE_VER" =~ ^9\.([0-9]+) ]]; then + local MINOR="${BASH_REMATCH[1]}" + if ((MINOR < 0 || MINOR > 1)); then + msg_error "This version of Proxmox VE is not supported." + msg_error "Supported: Proxmox VE version 9.0 – 9.1" + exit 1 + fi + return 0 + fi + + # All other unsupported versions + msg_error "This version of Proxmox VE is not supported." + msg_error "Supported versions: Proxmox VE 8.0 – 8.x or 9.0 – 9.1" + exit 1 +} + +# When a node is running tens of containers, it's possible to exceed the kernel's cryptographic key storage allocations. +# These are tuneable, so verify if the currently deployment is approaching the limits, advise the user on how to tune the limits, and exit the script. +# https://cleveruptime.com/docs/files/proc-key-users | https://docs.kernel.org/security/keys/core.html +maxkeys_check() { + # Read kernel parameters + per_user_maxkeys=$(cat /proc/sys/kernel/keys/maxkeys 2>/dev/null || echo 0) + per_user_maxbytes=$(cat /proc/sys/kernel/keys/maxbytes 2>/dev/null || echo 0) + + # Exit if kernel parameters are unavailable + if [[ "$per_user_maxkeys" -eq 0 || "$per_user_maxbytes" -eq 0 ]]; then + echo -e "${CROSS}${RD} Error: Unable to read kernel parameters. Ensure proper permissions.${CL}" + exit 1 + fi + + # Fetch key usage for user ID 100000 (typical for containers) + used_lxc_keys=$(awk '/100000:/ {print $2}' /proc/key-users 2>/dev/null || echo 0) + used_lxc_bytes=$(awk '/100000:/ {split($5, a, "/"); print a[1]}' /proc/key-users 2>/dev/null || echo 0) + + # Calculate thresholds and suggested new limits + threshold_keys=$((per_user_maxkeys - 100)) + threshold_bytes=$((per_user_maxbytes - 1000)) + new_limit_keys=$((per_user_maxkeys * 2)) + new_limit_bytes=$((per_user_maxbytes * 2)) + + # Check if key or byte usage is near limits + failure=0 + if [[ "$used_lxc_keys" -gt "$threshold_keys" ]]; then + echo -e "${CROSS}${RD} Warning: Key usage is near the limit (${used_lxc_keys}/${per_user_maxkeys}).${CL}" + echo -e "${INFO} Suggested action: Set ${GN}kernel.keys.maxkeys=${new_limit_keys}${CL} in ${BOLD}/etc/sysctl.d/98-community-scripts.conf${CL}." + failure=1 + fi + if [[ "$used_lxc_bytes" -gt "$threshold_bytes" ]]; then + echo -e "${CROSS}${RD} Warning: Key byte usage is near the limit (${used_lxc_bytes}/${per_user_maxbytes}).${CL}" + echo -e "${INFO} Suggested action: Set ${GN}kernel.keys.maxbytes=${new_limit_bytes}${CL} in ${BOLD}/etc/sysctl.d/98-community-scripts.conf${CL}." + failure=1 + fi + + # Provide next steps if issues are detected + if [[ "$failure" -eq 1 ]]; then + echo -e "${INFO} To apply changes, run: ${BOLD}service procps force-reload${CL}" + exit 1 + fi + + echo -e "${CM}${GN} All kernel key limits are within safe thresholds.${CL}" +} + +# This function checks the system architecture and exits if it's not "amd64". +arch_check() { + if [ "$(dpkg --print-architecture)" != "amd64" ]; then + echo -e "\n ${INFO}${YWB}This script will not work with PiMox! \n" + echo -e "\n ${YWB}Visit https://github.com/asylumexp/Proxmox for ARM64 support. \n" + echo -e "Exiting..." + sleep 2 + exit + fi +} + +# Function to get the current IP address based on the distribution +get_current_ip() { + if [ -f /etc/os-release ]; then + # Check for Debian/Ubuntu (uses hostname -I) + if grep -qE 'ID=debian|ID=ubuntu' /etc/os-release; then + CURRENT_IP=$(hostname -I | awk '{print $1}') + # Check for Alpine (uses ip command) + elif grep -q 'ID=alpine' /etc/os-release; then + CURRENT_IP=$(ip -4 addr show eth0 | awk '/inet / {print $2}' | cut -d/ -f1 | head -n 1) + else + CURRENT_IP="Unknown" + fi + fi + echo "$CURRENT_IP" +} + +# Function to update the IP address in the MOTD file +update_motd_ip() { + MOTD_FILE="/etc/motd" + + if [ -f "$MOTD_FILE" ]; then + # Remove existing IP Address lines to prevent duplication + sed -i '/IP Address:/d' "$MOTD_FILE" + + IP=$(get_current_ip) + # Add the new IP address + echo -e "${TAB}${NETWORK}${YW} IP Address: ${GN}${IP}${CL}" >>"$MOTD_FILE" + fi +} + +# This function checks if the script is running through SSH and prompts the user to confirm if they want to proceed or exit. +ssh_check() { + if [ -n "${SSH_CLIENT:+x}" ]; then + if whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "SSH DETECTED" --yesno "It's advisable to utilize the Proxmox shell rather than SSH, as there may be potential complications with variable retrieval. Proceed using SSH?" 10 72; then + whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox --title "Proceed using SSH" "You've chosen to proceed using SSH. If any issues arise, please run the script in the Proxmox shell before creating a repository issue." 10 72 + else + clear + echo "Exiting due to SSH usage. Please consider using the Proxmox shell." + exit + fi + fi +} + +base_settings() { + # Default Settings + CT_TYPE=${var_unprivileged:-"1"} + DISK_SIZE=${var_disk:-"4"} + CORE_COUNT=${var_cpu:-"1"} + RAM_SIZE=${var_ram:-"1024"} + VERBOSE=${var_verbose:-"${1:-no}"} + PW=${var_pw:-""} + CT_ID=${var_ctid:-$NEXTID} + HN=${var_hostname:-$NSAPP} + BRG=${var_brg:-"vmbr0"} + NET=${var_net:-"dhcp"} + IPV6_METHOD=${var_ipv6_method:-"none"} + IPV6_STATIC=${var_ipv6_static:-""} + GATE=${var_gateway:-""} + APT_CACHER=${var_apt_cacher:-""} + APT_CACHER_IP=${var_apt_cacher_ip:-""} + MTU=${var_mtu:-""} + SD=${var_storage:-""} + NS=${var_ns:-""} + MAC=${var_mac:-""} + VLAN=${var_vlan:-""} + SSH=${var_ssh:-"no"} + SSH_AUTHORIZED_KEY=${var_ssh_authorized_key:-""} + UDHCPC_FIX=${var_udhcpc_fix:-""} + TAGS="community-script;${var_tags:-}" + ENABLE_FUSE=${var_fuse:-"${1:-no}"} + ENABLE_TUN=${var_tun:-"${1:-no}"} + + # Since these 2 are only defined outside of default_settings function, we add a temporary fallback. TODO: To align everything, we should add these as constant variables (e.g. OSTYPE and OSVERSION), but that would currently require updating the default_settings function for all existing scripts + if [ -z "$var_os" ]; then + var_os="debian" + fi + if [ -z "$var_version" ]; then + var_version="12" + fi +} + +write_config() { + mkdir -p /opt/community-scripts + # This function writes the configuration to a file. + if whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "Write configfile" --yesno "Do you want to write the selections to a config file?" 10 60; then + FILEPATH="/opt/community-scripts/${NSAPP}.conf" + [[ "$GATE" =~ ",gw=" ]] && local GATE="${GATE##,gw=}" + + # Strip prefixes from parameters for config file storage + local SD_VALUE="${SD}" + local NS_VALUE="${NS}" + local MAC_VALUE="${MAC}" + local VLAN_VALUE="${VLAN}" + [[ "$SD" =~ ^-searchdomain= ]] && SD_VALUE="${SD#-searchdomain=}" + [[ "$NS" =~ ^-nameserver= ]] && NS_VALUE="${NS#-nameserver=}" + [[ "$MAC" =~ ^,hwaddr= ]] && MAC_VALUE="${MAC#,hwaddr=}" + [[ "$VLAN" =~ ^,tag= ]] && VLAN_VALUE="${VLAN#,tag=}" + + if [[ ! -f $FILEPATH ]]; then + cat <"$FILEPATH" +# ${NSAPP} Configuration File +# Generated on $(date) + +CT_TYPE="${CT_TYPE}" +DISK_SIZE="${DISK_SIZE}" +CORE_COUNT="${CORE_COUNT}" +RAM_SIZE="${RAM_SIZE}" +VERBOSE="${VERBOSE}" +PW="${PW##-password }" +#CT_ID=$NEXTID +HN="${HN}" +BRG="${BRG}" +NET="${NET}" +IPV6_METHOD="${IPV6_METHOD:-none}" +# Set this only if using "IPV6_METHOD=static" +#IPV6STATIC="fd00::1234/64" + +GATE="${GATE:-none}" +APT_CACHER_IP="${APT_CACHER_IP:-none}" +MTU="${MTU:-1500}" +SD="${SD_VALUE:-none}" +NS="${NS_VALUE:-none}" +MAC="${MAC_VALUE:-none}" +VLAN="${VLAN_VALUE:-none}" +SSH="${SSH}" +SSH_AUTHORIZED_KEY="${SSH_AUTHORIZED_KEY}" +TAGS="${TAGS:-none}" +ENABLE_FUSE="$ENABLE_FUSE" +ENABLE_TUN="$ENABLE_TUN" +EOF + + echo -e "${INFO}${BOLD}${GN}Writing configuration to ${FILEPATH}${CL}" + else + echo -e "${INFO}${BOLD}${RD}Configuration file already exists at ${FILEPATH}${CL}" + if whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "Overwrite configfile" --yesno "Do you want to overwrite the existing config file?" 10 60; then + rm -f "$FILEPATH" + cat <"$FILEPATH" +# ${NSAPP} Configuration File +# Generated on $(date) + +CT_TYPE="${CT_TYPE}" +DISK_SIZE="${DISK_SIZE}" +CORE_COUNT="${CORE_COUNT}" +RAM_SIZE="${RAM_SIZE}" +VERBOSE="${VERBOSE}" +PW="${PW##-password }" +#CT_ID=$NEXTID +HN="${HN}" +BRG="${BRG}" +NET="${NET}" +IPV6_METHOD="${IPV6_METHOD:-none}" + +# Set this only if using "IPV6_METHOD=static" +#IPV6STATIC="fd00::1234/64" + +GATE="${GATE:-none}" +APT_CACHER_IP="${APT_CACHER_IP:-none}" +MTU="${MTU:-1500}" +SD="${SD_VALUE:-none}" +NS="${NS_VALUE:-none}" +MAC="${MAC_VALUE:-none}" +VLAN="${VLAN_VALUE:-none}" +SSH="${SSH}" +SSH_AUTHORIZED_KEY="${SSH_AUTHORIZED_KEY}" +TAGS="${TAGS:-none}" +ENABLE_FUSE="$ENABLE_FUSE" +ENABLE_TUN="$ENABLE_TUN" +EOF + echo -e "${INFO}${BOLD}${GN}Writing configuration to ${FILEPATH}${CL}" + else + echo -e "${INFO}${BOLD}${RD}Configuration file not overwritten${CL}" + fi + fi + fi +} + +# This function displays the default values for various settings. +echo_default() { + # Convert CT_TYPE to description + CT_TYPE_DESC="Unprivileged" + if [ "$CT_TYPE" -eq 0 ]; then + CT_TYPE_DESC="Privileged" + fi + + # Output the selected values with icons + echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}${CT_ID}${CL}" + echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os ($var_version)${CL}" + echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}" + echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}" + echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}${CORE_COUNT}${CL}" + echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}" + if [ "$VERBOSE" == "yes" ]; then + echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}Enabled${CL}" + fi + echo -e "${CREATING}${BOLD}${BL}Creating a ${APP} LXC using the above default settings${CL}" + echo -e " " +} + +# This function is called when the user decides to exit the script. It clears the screen and displays an exit message. +exit_script() { + clear + echo -e "\n${CROSS}${RD}User exited script${CL}\n" + exit +} + +# This function allows the user to configure advanced settings for the script. +advanced_settings() { + whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox --title "Here is an instructional tip:" "To make a selection, use the Spacebar." 8 58 + # Setting Default Tag for Advanced Settings + TAGS="community-script;${var_tags:-}" + CT_DEFAULT_TYPE="${CT_TYPE}" + CT_TYPE="" + while [ -z "$CT_TYPE" ]; do + if [ "$CT_DEFAULT_TYPE" == "1" ]; then + if CT_TYPE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "CONTAINER TYPE" --radiolist "Choose Type" 10 58 2 \ + "1" "Unprivileged" ON \ + "0" "Privileged" OFF \ + 3>&1 1>&2 2>&3); then + if [ -n "$CT_TYPE" ]; then + CT_TYPE_DESC="Unprivileged" + if [ "$CT_TYPE" -eq 0 ]; then + CT_TYPE_DESC="Privileged" + fi + echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os${CL}" + echo -e "${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}" + echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}" + fi + else + exit_script + fi + fi + if [ "$CT_DEFAULT_TYPE" == "0" ]; then + if CT_TYPE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "CONTAINER TYPE" --radiolist "Choose Type" 10 58 2 \ + "1" "Unprivileged" OFF \ + "0" "Privileged" ON \ + 3>&1 1>&2 2>&3); then + if [ -n "$CT_TYPE" ]; then + CT_TYPE_DESC="Unprivileged" + if [ "$CT_TYPE" -eq 0 ]; then + CT_TYPE_DESC="Privileged" + fi + echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os${CL}" + echo -e "${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}" + echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}" + fi + else + exit_script + fi + fi + done + + while true; do + if PW1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --passwordbox "\nSet Root Password (needed for root ssh access)" 9 58 --title "PASSWORD (leave blank for automatic login)" 3>&1 1>&2 2>&3); then + # Empty = Autologin + if [[ -z "$PW1" ]]; then + PW="" + PW1="Automatic Login" + echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}$PW1${CL}" + break + fi + + # Invalid: contains spaces + if [[ "$PW1" == *" "* ]]; then + whiptail --msgbox "Password cannot contain spaces." 8 58 + continue + fi + + # Invalid: too short + if ((${#PW1} < 5)); then + whiptail --msgbox "Password must be at least 5 characters." 8 58 + continue + fi + + # Confirm password + if PW2=$(whiptail --backtitle "Proxmox VE Helper Scripts" --passwordbox "\nVerify Root Password" 9 58 --title "PASSWORD VERIFICATION" 3>&1 1>&2 2>&3); then + if [[ "$PW1" == "$PW2" ]]; then + PW="-password $PW1" + echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}********${CL}" + break + else + whiptail --msgbox "Passwords do not match. Please try again." 8 58 + fi + else + exit_script + fi + else + exit_script + fi + done + + if CT_ID=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Container ID" 8 58 "$NEXTID" --title "CONTAINER ID" 3>&1 1>&2 2>&3); then + if [ -z "$CT_ID" ]; then + CT_ID="$NEXTID" + echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}" + else + echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}" + fi + else + exit_script + fi + + while true; do + if CT_NAME=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Hostname" 8 58 "$NSAPP" --title "HOSTNAME" 3>&1 1>&2 2>&3); then + if [ -z "$CT_NAME" ]; then + HN="$NSAPP" + else + HN=$(echo "${CT_NAME,,}" | tr -d ' ') + fi + # Hostname validate (RFC 1123) + if [[ "$HN" =~ ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ ]]; then + echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" + break + else + whiptail --backtitle "Proxmox VE Helper Scripts" \ + --msgbox "❌ Invalid hostname: '$HN'\n\nOnly lowercase letters, digits and hyphens (-) are allowed.\nUnderscores (_) or other characters are not permitted!" 10 70 + fi + else + exit_script + fi + done + + while true; do + DISK_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Disk Size in GB" 8 58 "$var_disk" --title "DISK SIZE" 3>&1 1>&2 2>&3) || exit_script + + if [ -z "$DISK_SIZE" ]; then + DISK_SIZE="$var_disk" + fi + + if [[ "$DISK_SIZE" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}" + break + else + whiptail --msgbox "Disk size must be a positive integer!" 8 58 + fi + done + + while true; do + CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" \ + --inputbox "Allocate CPU Cores" 8 58 "$var_cpu" --title "CORE COUNT" 3>&1 1>&2 2>&3) || exit_script + + if [ -z "$CORE_COUNT" ]; then + CORE_COUNT="$var_cpu" + fi + + if [[ "$CORE_COUNT" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + break + else + whiptail --msgbox "CPU core count must be a positive integer!" 8 58 + fi + done + + while true; do + RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" \ + --inputbox "Allocate RAM in MiB" 8 58 "$var_ram" --title "RAM" 3>&1 1>&2 2>&3) || exit_script + + if [ -z "$RAM_SIZE" ]; then + RAM_SIZE="$var_ram" + fi + + if [[ "$RAM_SIZE" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}" + break + else + whiptail --msgbox "RAM size must be a positive integer!" 8 58 + fi + done + + BRIDGES="" + IFACE_FILEPATH_LIST="/etc/network/interfaces"$'\n'$(find "/etc/network/interfaces.d/" -type f) + OLD_IFS=$IFS + IFS=$'\n' + + for iface_filepath in ${IFACE_FILEPATH_LIST}; do + iface_indexes_tmpfile=$(mktemp -q -u '.iface-XXXX') + + (grep -Pn '^\s*iface' "${iface_filepath}" | cut -d':' -f1 && wc -l "${iface_filepath}" | cut -d' ' -f1) | + awk 'FNR==1 {line=$0; next} {print line":"$0-1; line=$0}' >"${iface_indexes_tmpfile}" || true + + if [ -f "${iface_indexes_tmpfile}" ]; then + while read -r pair; do + start=$(echo "${pair}" | cut -d':' -f1) + end=$(echo "${pair}" | cut -d':' -f2) + + if awk "NR >= ${start} && NR <= ${end}" "${iface_filepath}" | grep -qP '^\s*(bridge[-_](ports|stp|fd|vlan-aware|vids)|ovs_type\s+OVSBridge)\b'; then + iface_name=$(sed "${start}q;d" "${iface_filepath}" | awk '{print $2}') + BRIDGES="${iface_name}"$'\n'"${BRIDGES}" + fi + + done <"${iface_indexes_tmpfile}" + rm -f "${iface_indexes_tmpfile}" + fi + + done + + IFS=$OLD_IFS + + BRIDGES=$(echo "$BRIDGES" | grep -v '^\s*$' | sort | uniq) + + if [[ -z "$BRIDGES" ]]; then + BRG="vmbr0" + echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}" + else + BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --menu "Select network bridge:" 15 40 6 $(echo "$BRIDGES" | awk '{print $0, "Bridge"}') 3>&1 1>&2 2>&3) + if [ -z "$BRG" ]; then + exit_script + else + echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}" + fi + fi + + # IPv4 methods: dhcp, static, none + while true; do + IPV4_METHOD=$(whiptail --backtitle "Proxmox VE Helper Scripts" \ + --title "IPv4 Address Management" \ + --menu "Select IPv4 Address Assignment Method:" 12 60 2 \ + "dhcp" "Automatic (DHCP, recommended)" \ + "static" "Static (manual entry)" \ + 3>&1 1>&2 2>&3) + + exit_status=$? + if [ $exit_status -ne 0 ]; then + exit_script + fi + + case "$IPV4_METHOD" in + dhcp) + NET="dhcp" + GATE="" + echo -e "${NETWORK}${BOLD}${DGN}IPv4: DHCP${CL}" + break + ;; + static) + # Static: call and validate CIDR address + while true; do + NET=$(whiptail --backtitle "Proxmox VE Helper Scripts" \ + --inputbox "Enter Static IPv4 CIDR Address (e.g. 192.168.100.50/24)" 8 58 "" \ + --title "IPv4 ADDRESS" 3>&1 1>&2 2>&3) + if [ -z "$NET" ]; then + whiptail --msgbox "IPv4 address must not be empty." 8 58 + continue + elif [[ "$NET" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$ ]]; then + echo -e "${NETWORK}${BOLD}${DGN}IPv4 Address: ${BGN}$NET${CL}" + break + else + whiptail --msgbox "$NET is not a valid IPv4 CIDR address. Please enter a correct value!" 8 58 + fi + done + + # call and validate Gateway + while true; do + GATE1=$(whiptail --backtitle "Proxmox VE Helper Scripts" \ + --inputbox "Enter Gateway IP address for static IPv4" 8 58 "" \ + --title "Gateway IP" 3>&1 1>&2 2>&3) + if [ -z "$GATE1" ]; then + whiptail --msgbox "Gateway IP address cannot be empty." 8 58 + elif [[ ! "$GATE1" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then + whiptail --msgbox "Invalid Gateway IP address format." 8 58 + else + GATE=",gw=$GATE1" + echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE1${CL}" + break + fi + done + break + ;; + esac + done + + # IPv6 Address Management selection + while true; do + IPV6_METHOD=$(whiptail --backtitle "Proxmox VE Helper Scripts" --menu \ + "Select IPv6 Address Management Type:" 16 70 5 \ + "auto" "SLAAC/AUTO (recommended) - Dynamic IPv6 from network" \ + "dhcp" "DHCPv6 - DHCP-assigned IPv6 address" \ + "static" "Static - Manual IPv6 address configuration" \ + "none" "None - No IPv6 assignment (most containers)" \ + "disable" "Fully Disabled - (breaks some services)" \ + --default-item "auto" 3>&1 1>&2 2>&3) + [ $? -ne 0 ] && exit_script + + case "$IPV6_METHOD" in + auto) + echo -e "${NETWORK}${BOLD}${DGN}IPv6: ${BGN}SLAAC/AUTO${CL}" + IPV6_ADDR="" + IPV6_GATE="" + break + ;; + dhcp) + echo -e "${NETWORK}${BOLD}${DGN}IPv6: ${BGN}DHCPv6${CL}" + IPV6_ADDR="dhcp" + IPV6_GATE="" + break + ;; + static) + # Ask for static IPv6 address (CIDR notation, e.g., 2001:db8::1234/64) + while true; do + IPV6_ADDR=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox \ + "Set a static IPv6 CIDR address (e.g., 2001:db8::1234/64)" 8 58 "" \ + --title "IPv6 STATIC ADDRESS" 3>&1 1>&2 2>&3) || exit_script + if [[ "$IPV6_ADDR" =~ ^([0-9a-fA-F:]+:+)+[0-9a-fA-F]+(/[0-9]{1,3})$ ]]; then + echo -e "${NETWORK}${BOLD}${DGN}IPv6 Address: ${BGN}$IPV6_ADDR${CL}" + break + else + whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox \ + "$IPV6_ADDR is an invalid IPv6 CIDR address. Please enter a valid IPv6 CIDR address (e.g., 2001:db8::1234/64)" 8 58 + fi + done + # Optional: ask for IPv6 gateway for static config + while true; do + IPV6_GATE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox \ + "Enter IPv6 gateway address (optional, leave blank for none)" 8 58 "" --title "IPv6 GATEWAY" 3>&1 1>&2 2>&3) + if [ -z "$IPV6_GATE" ]; then + IPV6_GATE="" + break + elif [[ "$IPV6_GATE" =~ ^([0-9a-fA-F:]+:+)+[0-9a-fA-F]+$ ]]; then + break + else + whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox \ + "Invalid IPv6 gateway format." 8 58 + + fi + done + break + ;; + none) + echo -e "${NETWORK}${BOLD}${DGN}IPv6: ${BGN}None${CL}" + IPV6_ADDR="none" + IPV6_GATE="" + break + ;; + disable) + whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox \ + "⚠️ WARNING - FULLY DISABLE IPv6:\n\nThis will completely disable IPv6 inside the container via sysctl.\n\nSide Effects:\n • Services requiring IPv6 will fail\n • Localhost IPv6 (::1) will not work\n • Some applications may not start\n\nOnly use if you have a specific reason to completely disable IPv6.\n\nFor most use cases, select 'None' instead." 14 70 + echo -e "${NETWORK}${BOLD}${DGN}IPv6: ${BGN}Fully Disabled (IPv6 disabled via sysctl)${CL}" + IPV6_ADDR="none" + IPV6_GATE="" + break + ;; + *) + exit_script + ;; + esac + done + + if [ "$var_os" == "alpine" ]; then + APT_CACHER="" + APT_CACHER_IP="" + else + if APT_CACHER_IP=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set APT-Cacher IP (leave blank for none)" 8 58 --title "APT-Cacher IP" 3>&1 1>&2 2>&3); then + APT_CACHER="${APT_CACHER_IP:+yes}" + echo -e "${NETWORK}${BOLD}${DGN}APT-Cacher IP Address: ${BGN}${APT_CACHER_IP:-Default}${CL}" + else + exit_script + fi + fi + + if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default [The MTU of your selected vmbr, default is 1500])" 8 58 --title "MTU SIZE" 3>&1 1>&2 2>&3); then + if [ -z "$MTU1" ]; then + MTU1="Default" + MTU="" + else + MTU=",mtu=$MTU1" + fi + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + else + exit_script + fi + + if SD=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a DNS Search Domain (leave blank for HOST)" 8 58 --title "DNS Search Domain" 3>&1 1>&2 2>&3); then + if [ -z "$SD" ]; then + SX=Host + SD="" + else + SX=$SD + SD="-searchdomain=$SD" + fi + echo -e "${SEARCH}${BOLD}${DGN}DNS Search Domain: ${BGN}$SX${CL}" + else + exit_script + fi + + if NX=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a DNS Server IP (leave blank for HOST)" 8 58 --title "DNS SERVER IP" 3>&1 1>&2 2>&3); then + if [ -z "$NX" ]; then + NX=Host + NS="" + else + NS="-nameserver=$NX" + fi + echo -e "${NETWORK}${BOLD}${DGN}DNS Server IP Address: ${BGN}$NX${CL}" + else + exit_script + fi + + if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address(leave blank for generated MAC)" 8 58 --title "MAC ADDRESS" 3>&1 1>&2 2>&3); then + if [ -z "$MAC1" ]; then + MAC1="Default" + MAC="" + else + MAC=",hwaddr=$MAC1" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + fi + else + exit_script + fi + + if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for no VLAN)" 8 58 --title "VLAN" 3>&1 1>&2 2>&3); then + if [ -z "$VLAN1" ]; then + VLAN1="Default" + VLAN="" + else + VLAN=",tag=$VLAN1" + fi + echo -e "${VLANTAG}${BOLD}${DGN}Vlan: ${BGN}$VLAN1${CL}" + else + exit_script + fi + + if ADV_TAGS=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Custom Tags?[If you remove all, there will be no tags!]" 8 58 "${TAGS}" --title "Advanced Tags" 3>&1 1>&2 2>&3); then + if [ -n "${ADV_TAGS}" ]; then + ADV_TAGS=$(echo "$ADV_TAGS" | tr -d '[:space:]') + TAGS="${ADV_TAGS}" + else + TAGS=";" + fi + echo -e "${NETWORK}${BOLD}${DGN}Tags: ${BGN}$TAGS${CL}" + else + exit_script + fi + + SSH_AUTHORIZED_KEY="$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "SSH Authorized key for root (leave empty for none)" 8 58 --title "SSH Key" 3>&1 1>&2 2>&3)" + + if [[ -z "${SSH_AUTHORIZED_KEY}" ]]; then + SSH_AUTHORIZED_KEY="" + fi + + if [[ "$PW" == -password* || -n "$SSH_AUTHORIZED_KEY" ]]; then + if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "SSH ACCESS" --yesno "Enable Root SSH Access?" 10 58); then + SSH="yes" + else + SSH="no" + fi + echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}" + else + SSH="no" + echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}" + fi + + if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "FUSE Support" --yesno "Enable FUSE support?\nRequired for tools like rclone, mergerfs, AppImage, etc." 10 58); then + ENABLE_FUSE="yes" + else + ENABLE_FUSE="no" + fi + echo -e "${FUSE}${BOLD}${DGN}Enable FUSE Support: ${BGN}$ENABLE_FUSE${CL}" + + if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "VERBOSE MODE" --yesno "Enable Verbose Mode?" 10 58); then + VERBOSE="yes" + else + VERBOSE="no" + fi + echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}$VERBOSE${CL}" + + if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "ADVANCED SETTINGS COMPLETE" --yesno "Ready to create ${APP} LXC?" 10 58); then + echo -e "${CREATING}${BOLD}${RD}Creating a ${APP} LXC using the above advanced settings${CL}" + + # Strip prefixes from DNS parameters for config file storage + local SD_VALUE="$SD" + local NS_VALUE="$NS" + local MAC_VALUE="$MAC" + local VLAN_VALUE="$VLAN" + [[ "$SD" =~ ^-searchdomain= ]] && SD_VALUE="${SD#-searchdomain=}" + [[ "$NS" =~ ^-nameserver= ]] && NS_VALUE="${NS#-nameserver=}" + [[ "$MAC" =~ ^,hwaddr= ]] && MAC_VALUE="${MAC#,hwaddr=}" + [[ "$VLAN" =~ ^,tag= ]] && VLAN_VALUE="${VLAN#,tag=}" + + # Temporarily store original values + local SD_ORIG="$SD" + local NS_ORIG="$NS" + local MAC_ORIG="$MAC" + local VLAN_ORIG="$VLAN" + + # Set clean values for config file writing + SD="$SD_VALUE" + NS="$NS_VALUE" + MAC="$MAC_VALUE" + VLAN="$VLAN_VALUE" + + write_config + + # Restore original formatted values for container creation + SD="$SD_ORIG" + NS="$NS_ORIG" + MAC="$MAC_ORIG" + VLAN="$VLAN_ORIG" + else + clear + header_info + echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Settings on node $PVEHOST_NAME${CL}" + advanced_settings + fi +} + +diagnostics_check() { + if ! [ -d "/usr/local/community-scripts" ]; then + mkdir -p /usr/local/community-scripts + fi + + if ! [ -f "/usr/local/community-scripts/diagnostics" ]; then + if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "DIAGNOSTICS" --yesno "Send Diagnostics of LXC Installation?\n\n(This only transmits data without user data, just RAM, CPU, LXC name, ...)" 10 58); then + cat </usr/local/community-scripts/diagnostics +DIAGNOSTICS=yes + +#This file is used to store the diagnostics settings for the Community-Scripts API. +#https://github.com/community-scripts/ProxmoxVE/discussions/1836 +#Your diagnostics will be sent to the Community-Scripts API for troubleshooting/statistical purposes. +#You can review the data at https://community-scripts.github.io/ProxmoxVE/data +#If you do not wish to send diagnostics, please set the variable 'DIAGNOSTICS' to "no" in /usr/local/community-scripts/diagnostics, or use the menue. +#This will disable the diagnostics feature. +#To send diagnostics, set the variable 'DIAGNOSTICS' to "yes" in /usr/local/community-scripts/diagnostics, or use the menue. +#This will enable the diagnostics feature. +#The following information will be sent: +#"ct_type" +#"disk_size" +#"core_count" +#"ram_size" +#"os_type" +#"os_version" +#"nsapp" +#"method" +#"pve_version" +#"status" +#If you have any concerns, please review the source code at /misc/build.func +EOF + DIAGNOSTICS="yes" + else + cat </usr/local/community-scripts/diagnostics +DIAGNOSTICS=no + +#This file is used to store the diagnostics settings for the Community-Scripts API. +#https://github.com/community-scripts/ProxmoxVE/discussions/1836 +#Your diagnostics will be sent to the Community-Scripts API for troubleshooting/statistical purposes. +#You can review the data at https://community-scripts.github.io/ProxmoxVE/data +#If you do not wish to send diagnostics, please set the variable 'DIAGNOSTICS' to "no" in /usr/local/community-scripts/diagnostics, or use the menue. +#This will disable the diagnostics feature. +#To send diagnostics, set the variable 'DIAGNOSTICS' to "yes" in /usr/local/community-scripts/diagnostics, or use the menue. +#This will enable the diagnostics feature. +#The following information will be sent: +#"ct_type" +#"disk_size" +#"core_count" +#"ram_size" +#"os_type" +#"os_version" +#"nsapp" +#"method" +#"pve_version" +#"status" +#If you have any concerns, please review the source code at /misc/build.func +EOF + DIAGNOSTICS="no" + fi + else + DIAGNOSTICS=$(awk -F '=' '/^DIAGNOSTICS/ {print $2}' /usr/local/community-scripts/diagnostics) + + fi + +} + +install_script() { + pve_check + shell_check + root_check + arch_check + ssh_check + maxkeys_check + diagnostics_check + + if systemctl is-active -q ping-instances.service; then + systemctl -q stop ping-instances.service + fi + NEXTID=$(pvesh get /cluster/nextid) + # Read timezone - fallback for Debian 13/Proxmox 9+ where /etc/timezone doesn't exist + if [[ -f /etc/timezone ]]; then + timezone=$(cat /etc/timezone) + else + timezone=$(timedatectl show --value --property=Timezone 2>/dev/null || echo "UTC") + fi + header_info + while true; do + + TMP_CHOICE=$(whiptail --backtitle "Proxmox VE Helper Scripts" \ + --title "SETTINGS" \ + --menu "Choose an option:" 20 60 6 \ + "1" "Default Settings" \ + "2" "Default Settings (with verbose)" \ + "3" "Advanced Settings" \ + "4" "Use Config File" \ + "5" "Diagnostic Settings" \ + "6" "Exit" \ + --default-item "1" 3>&1 1>&2 2>&3) || true + + if [ -z "$TMP_CHOICE" ]; then + echo -e "\n${CROSS}${RD}Menu canceled. Exiting script.${CL}\n" + exit 0 + fi + + CHOICE="$TMP_CHOICE" + + case $CHOICE in + 1) + header_info + echo -e "${DEFAULT}${BOLD}${BL}Using Default Settings on node $PVEHOST_NAME${CL}" + VERBOSE="no" + METHOD="default" + base_settings "$VERBOSE" + echo_default + break + ;; + 2) + header_info + echo -e "${DEFAULT}${BOLD}${BL}Using Default Settings on node $PVEHOST_NAME (${VERBOSE_CROPPED}Verbose)${CL}" + VERBOSE="yes" + METHOD="default" + base_settings "$VERBOSE" + echo_default + break + ;; + 3) + header_info + echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Settings on node $PVEHOST_NAME${CL}" + METHOD="advanced" + base_settings + advanced_settings + break + ;; + 4) + header_info + echo -e "${INFO}${HOLD} ${GN}Using Config File on node $PVEHOST_NAME${CL}" + METHOD="config_file" + source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/config-file.func) + config_file + break + ;; + 5) + if [[ $DIAGNOSTICS == "yes" ]]; then + if whiptail --backtitle "Proxmox VE Helper Scripts" --title "DIAGNOSTICS SETTINGS" --yesno "Send Diagnostics of LXC Installation?\n\nCurrent setting: ${DIAGNOSTICS}" 10 58 \ + --yes-button "No" --no-button "Back"; then + DIAGNOSTICS="no" + sed -i 's/^DIAGNOSTICS=.*/DIAGNOSTICS=no/' /usr/local/community-scripts/diagnostics + whiptail --backtitle "Proxmox VE Helper Scripts" --title "DIAGNOSTICS SETTINGS" --msgbox "Diagnostics settings changed to ${DIAGNOSTICS}." 8 58 + fi + else + if whiptail --backtitle "Proxmox VE Helper Scripts" --title "DIAGNOSTICS SETTINGS" --yesno "Send Diagnostics of LXC Installation?\n\nCurrent setting: ${DIAGNOSTICS}" 10 58 \ + --yes-button "Yes" --no-button "Back"; then + DIAGNOSTICS="yes" + sed -i 's/^DIAGNOSTICS=.*/DIAGNOSTICS=yes/' /usr/local/community-scripts/diagnostics + whiptail --backtitle "Proxmox VE Helper Scripts" --title "DIAGNOSTICS SETTINGS" --msgbox "Diagnostics settings changed to ${DIAGNOSTICS}." 8 58 + fi + fi + + ;; + 6) + echo -e "\n${CROSS}${RD}Script terminated. Have a great day!${CL}\n" + exit 0 + ;; + *) + echo -e "\n${CROSS}${RD}Invalid option, please try again.${CL}\n" + ;; + esac + done +} + +check_container_resources() { + # Check actual RAM & Cores + current_ram=$(free -m | awk 'NR==2{print $2}') + current_cpu=$(nproc) + + # Check whether the current RAM is less than the required RAM or the CPU cores are less than required + if [[ "$current_ram" -lt "$var_ram" ]] || [[ "$current_cpu" -lt "$var_cpu" ]]; then + echo -e "\n${INFO}${HOLD} ${GN}Required: ${var_cpu} CPU, ${var_ram}MB RAM ${CL}| ${RD}Current: ${current_cpu} CPU, ${current_ram}MB RAM${CL}" + echo -e "${YWB}Please ensure that the ${APP} LXC is configured with at least ${var_cpu} vCPU and ${var_ram} MB RAM for the build process.${CL}\n" + echo -ne "${INFO}${HOLD} May cause data loss! ${INFO} Continue update with under-provisioned LXC? [y/N] " + read -r prompt + if [[ ! "${prompt,,}" =~ ^(y|yes)$ ]]; then + echo -e "${CROSS}${HOLD} ${YWB}Exiting based on user input.${CL}" + exit 1 + fi + else + echo -e "" + fi +} + +check_container_storage() { + # Check if the /boot partition is more than 80% full + total_size=$(df /boot --output=size | tail -n 1) + local used_size=$(df /boot --output=used | tail -n 1) + usage=$((100 * used_size / total_size)) + if ((usage > 80)); then + # Prompt the user for confirmation to continue + echo -e "${INFO}${HOLD} ${YWB}Warning: Storage is dangerously low (${usage}%).${CL}" + echo -ne "Continue anyway? [y/N] " + read -r prompt + if [[ ! "${prompt,,}" =~ ^(y|yes)$ ]]; then + echo -e "${CROSS}${HOLD}${YWB}Exiting based on user input.${CL}" + exit 1 + fi + fi +} + +start() { + source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func) + if command -v pveversion >/dev/null 2>&1; then + install_script + else + CHOICE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "${APP} LXC Update/Setting" --menu \ + "Support/Update functions for ${APP} LXC. Choose an option:" \ + 12 60 3 \ + "1" "YES (Silent Mode)" \ + "2" "YES (Verbose Mode)" \ + "3" "NO (Cancel Update)" --nocancel --default-item "1" 3>&1 1>&2 2>&3) + + case "$CHOICE" in + 1) + VERBOSE="no" + set_std_mode + ;; + 2) + VERBOSE="yes" + set_std_mode + ;; + 3) + clear + exit_script + exit + ;; + esac + update_script + cleanup_lxc + fi +} + +# This function collects user settings and integrates all the collected information. +build_container() { + # if [ "$VERBOSE" == "yes" ]; then set -x; fi + + NET_STRING="-net0 name=eth0,bridge=$BRG$MAC,ip=$NET$GATE$VLAN$MTU" + case "$IPV6_METHOD" in + auto) NET_STRING="$NET_STRING,ip6=auto" ;; + dhcp) NET_STRING="$NET_STRING,ip6=dhcp" ;; + static) + NET_STRING="$NET_STRING,ip6=$IPV6_ADDR" + [ -n "$IPV6_GATE" ] && NET_STRING="$NET_STRING,gw6=$IPV6_GATE" + ;; + none) ;; + esac + if [ "$CT_TYPE" == "1" ]; then + FEATURES="keyctl=1,nesting=1" + else + FEATURES="nesting=1" + fi + + if [ "$ENABLE_FUSE" == "yes" ]; then + FEATURES="$FEATURES,fuse=1" + fi + + if [[ $DIAGNOSTICS == "yes" ]]; then + post_to_api + fi + + TEMP_DIR=$(mktemp -d) + pushd "$TEMP_DIR" >/dev/null + if [ "$var_os" == "alpine" ]; then + export FUNCTIONS_FILE_PATH="$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/alpine-install.func)" + else + export FUNCTIONS_FILE_PATH="$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/install.func)" + fi + + export DIAGNOSTICS="$DIAGNOSTICS" + export RANDOM_UUID="$RANDOM_UUID" + export CACHER="$APT_CACHER" + export CACHER_IP="$APT_CACHER_IP" + export tz="$timezone" + export APPLICATION="$APP" + export app="$NSAPP" + export PASSWORD="$PW" + export VERBOSE="$VERBOSE" + export SSH_ROOT="${SSH}" + export SSH_AUTHORIZED_KEY + export CTID="$CT_ID" + export CTTYPE="$CT_TYPE" + export ENABLE_FUSE="$ENABLE_FUSE" + export ENABLE_TUN="$ENABLE_TUN" + export PCT_OSTYPE="$var_os" + export PCT_OSVERSION="$var_version" + export PCT_DISK_SIZE="$DISK_SIZE" + export PCT_OPTIONS=" + -features $FEATURES + -hostname $HN + -tags $TAGS + $SD + $NS + $NET_STRING + -onboot 1 + -cores $CORE_COUNT + -memory $RAM_SIZE + -unprivileged $CT_TYPE + $PW + " + # This executes create_lxc.sh and creates the container and .conf file + bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/create_lxc.sh)" $? + + LXC_CONFIG="/etc/pve/lxc/${CTID}.conf" + + # USB passthrough for privileged LXC (CT_TYPE=0) + if [ "$CT_TYPE" == "0" ]; then + cat <>"$LXC_CONFIG" +# USB passthrough +lxc.cgroup2.devices.allow: a +lxc.cap.drop: +lxc.cgroup2.devices.allow: c 188:* rwm +lxc.cgroup2.devices.allow: c 189:* rwm +lxc.mount.entry: /dev/serial/by-id dev/serial/by-id none bind,optional,create=dir +lxc.mount.entry: /dev/ttyUSB0 dev/ttyUSB0 none bind,optional,create=file +lxc.mount.entry: /dev/ttyUSB1 dev/ttyUSB1 none bind,optional,create=file +lxc.mount.entry: /dev/ttyACM0 dev/ttyACM0 none bind,optional,create=file +lxc.mount.entry: /dev/ttyACM1 dev/ttyACM1 none bind,optional,create=file +EOF + fi + + # VAAPI passthrough for privileged containers or known apps + VAAPI_APPS=( + "immich" + "Channels" + "Emby" + "ErsatzTV" + "Frigate" + "Jellyfin" + "Plex" + "Scrypted" + "Tdarr" + "Unmanic" + "Ollama" + "FileFlows" + "Open WebUI" + ) + + is_vaapi_app=false + for vaapi_app in "${VAAPI_APPS[@]}"; do + if [[ "$APP" == "$vaapi_app" ]]; then + is_vaapi_app=true + break + fi + done + + if ([ "$CT_TYPE" == "0" ] || [ "$is_vaapi_app" == "true" ]) && + ([[ -e /dev/dri/renderD128 ]] || [[ -e /dev/dri/card0 ]] || [[ -e /dev/fb0 ]]); then + + echo "" + msg_custom "⚙️ " "\e[96m" "Configuring VAAPI passthrough for LXC container" + if [ "$CT_TYPE" != "0" ]; then + msg_custom "⚠️ " "\e[33m" "Container is unprivileged – VAAPI passthrough may not work without additional host configuration (e.g., idmap)." + fi + msg_custom "ℹ️ " "\e[96m" "VAAPI enables GPU hardware acceleration (e.g., for video transcoding in Jellyfin or Plex)." + echo "" + read -rp "➤ Automatically mount all available VAAPI devices? [Y/n]: " VAAPI_ALL + + if [[ "$VAAPI_ALL" =~ ^[Yy]$|^$ ]]; then + if [ "$CT_TYPE" == "0" ]; then + # PRV Container → alles zulässig + [[ -e /dev/dri/renderD128 ]] && { + echo "lxc.cgroup2.devices.allow: c 226:128 rwm" >>"$LXC_CONFIG" + echo "lxc.mount.entry: /dev/dri/renderD128 dev/dri/renderD128 none bind,optional,create=file" >>"$LXC_CONFIG" + } + [[ -e /dev/dri/card0 ]] && { + echo "lxc.cgroup2.devices.allow: c 226:0 rwm" >>"$LXC_CONFIG" + echo "lxc.mount.entry: /dev/dri/card0 dev/dri/card0 none bind,optional,create=file" >>"$LXC_CONFIG" + } + [[ -e /dev/fb0 ]] && { + echo "lxc.cgroup2.devices.allow: c 29:0 rwm" >>"$LXC_CONFIG" + echo "lxc.mount.entry: /dev/fb0 dev/fb0 none bind,optional,create=file" >>"$LXC_CONFIG" + } + [[ -d /dev/dri ]] && { + echo "lxc.mount.entry: /dev/dri dev/dri none bind,optional,create=dir" >>"$LXC_CONFIG" + } + else + # UNPRV Container → nur devX für UI + [[ -e /dev/dri/card0 ]] && echo "dev0: /dev/dri/card0,gid=44" >>"$LXC_CONFIG" + [[ -e /dev/dri/card1 ]] && echo "dev0: /dev/dri/card1,gid=44" >>"$LXC_CONFIG" + [[ -e /dev/dri/renderD128 ]] && echo "dev1: /dev/dri/renderD128,gid=104" >>"$LXC_CONFIG" + fi + fi + + fi + if [ "$CT_TYPE" == "1" ] && [ "$is_vaapi_app" == "true" ]; then + if [[ -e /dev/dri/card0 ]]; then + echo "dev0: /dev/dri/card0,gid=44" >>"$LXC_CONFIG" + elif [[ -e /dev/dri/card1 ]]; then + echo "dev0: /dev/dri/card1,gid=44" >>"$LXC_CONFIG" + fi + if [[ -e /dev/dri/renderD128 ]]; then + echo "dev1: /dev/dri/renderD128,gid=104" >>"$LXC_CONFIG" + fi + fi + + # TUN device passthrough + if [ "$ENABLE_TUN" == "yes" ]; then + cat <>"$LXC_CONFIG" +lxc.cgroup2.devices.allow: c 10:200 rwm +lxc.mount.entry: /dev/net/tun dev/net/tun none bind,create=file +EOF + fi + + # This starts the container and executes -install.sh + msg_info "Starting LXC Container" + pct start "$CTID" + + # wait for status 'running' + for i in {1..10}; do + if pct status "$CTID" | grep -q "status: running"; then + msg_ok "Started LXC Container" + break + fi + sleep 1 + if [ "$i" -eq 10 ]; then + msg_error "LXC Container did not reach running state" + exit 1 + fi + done + + if [ "$var_os" != "alpine" ]; then + msg_info "Waiting for network in LXC container" + for i in {1..10}; do + # 1. Primary check: ICMP ping (fastest, but may be blocked by ISP/firewall) + if pct exec "$CTID" -- ping -c1 -W1 deb.debian.org >/dev/null 2>&1; then + msg_ok "Network in LXC is reachable (ping)" + break + fi + # Wait and retry if not reachable yet + if [ "$i" -lt 10 ]; then + msg_warn "No network in LXC yet (try $i/10) – waiting..." + sleep 3 + else + # After 10 unsuccessful ping attempts, try HTTP connectivity via wget as fallback + msg_warn "Ping failed 10 times. Trying HTTP connectivity check (wget) as fallback..." + if pct exec "$CTID" -- wget -q --spider http://deb.debian.org; then + msg_ok "Network in LXC is reachable (wget fallback)" + else + msg_error "No network in LXC after all checks." + read -r -p "Set fallback DNS (1.1.1.1/8.8.8.8)? [y/N]: " choice + case "$choice" in + [yY]*) + pct set "$CTID" --nameserver 1.1.1.1 + pct set "$CTID" --nameserver 8.8.8.8 + # Final attempt with wget after DNS change + if pct exec "$CTID" -- wget -q --spider http://deb.debian.org; then + msg_ok "Network reachable after DNS fallback" + else + msg_error "Still no network/DNS in LXC! Aborting customization." + exit_script + fi + ;; + *) + msg_error "Aborted by user – no DNS fallback set." + exit_script + ;; + esac + fi + break + fi + done + fi + + msg_info "Customizing LXC Container" + : "${tz:=Etc/UTC}" + if [ "$var_os" == "alpine" ]; then + sleep 3 + pct exec "$CTID" -- /bin/sh -c 'cat </etc/apk/repositories +http://dl-cdn.alpinelinux.org/alpine/latest-stable/main +http://dl-cdn.alpinelinux.org/alpine/latest-stable/community +EOF' + pct exec "$CTID" -- ash -c "apk add bash newt curl openssh nano mc ncurses jq >/dev/null" + else + sleep 3 + LANG=${LANG:-en_US.UTF-8} + pct exec "$CTID" -- bash -c "sed -i \"/$LANG/ s/^# //\" /etc/locale.gen" + pct exec "$CTID" -- bash -c "locale_line=\$(grep -v '^#' /etc/locale.gen | grep -E '^[a-zA-Z]' | awk '{print \$1}' | head -n 1) && \ + echo LANG=\$locale_line >/etc/default/locale && \ + locale-gen >/dev/null && \ + export LANG=\$locale_line" + + if [[ -z "${tz:-}" ]]; then + tz=$(timedatectl show --property=Timezone --value 2>/dev/null || echo "Etc/UTC") + fi + if pct exec "$CTID" -- test -e "/usr/share/zoneinfo/$tz"; then + pct exec "$CTID" -- bash -c "tz='$tz'; echo \"\$tz\" >/etc/timezone && ln -sf \"/usr/share/zoneinfo/\$tz\" /etc/localtime" + else + msg_warn "Skipping timezone setup – zone '$tz' not found in container" + fi + + pct exec "$CTID" -- bash -c "apt-get update >/dev/null && apt-get install -y sudo curl mc gnupg2 jq >/dev/null" + fi + msg_ok "Customized LXC Container" + + lxc-attach -n "$CTID" -- bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/install/${var_install}.sh)" +} + +# This function sets the description of the container. +description() { + IP=$(pct exec "$CTID" ip a s dev eth0 | awk '/inet / {print $2}' | cut -d/ -f1) + + # Generate LXC Description + DESCRIPTION=$( + cat < + + Logo + + +

${APP} LXC

+ +

+ + spend Coffee + +

+ + + + GitHub + + + + Discussions + + + + Issues + + +EOF + ) + + # Set Description in LXC + pct set "$CTID" -description "$DESCRIPTION" + + if [[ -f /etc/systemd/system/ping-instances.service ]]; then + systemctl start ping-instances.service + fi + + post_update_to_api "done" "none" +} + +api_exit_script() { + exit_code=$? # Capture the exit status of the last executed command + #200 exit codes indicate error in create_lxc.sh + #100 exit codes indicate error in install.func + + if [ $exit_code -ne 0 ]; then + case $exit_code in + 100) post_update_to_api "failed" "100: Unexpected error in create_lxc.sh" ;; + 101) post_update_to_api "failed" "101: No network connection detected in create_lxc.sh" ;; + 200) post_update_to_api "failed" "200: LXC creation failed in create_lxc.sh" ;; + 201) post_update_to_api "failed" "201: Invalid Storage class in create_lxc.sh" ;; + 202) post_update_to_api "failed" "202: User aborted menu in create_lxc.sh" ;; + 203) post_update_to_api "failed" "203: CTID not set in create_lxc.sh" ;; + 204) post_update_to_api "failed" "204: PCT_OSTYPE not set in create_lxc.sh" ;; + 205) post_update_to_api "failed" "205: CTID cannot be less than 100 in create_lxc.sh" ;; + 206) post_update_to_api "failed" "206: CTID already in use in create_lxc.sh" ;; + 207) post_update_to_api "failed" "207: Template not found in create_lxc.sh" ;; + 208) post_update_to_api "failed" "208: Error downloading template in create_lxc.sh" ;; + 209) post_update_to_api "failed" "209: Container creation failed, but template is intact in create_lxc.sh" ;; + *) post_update_to_api "failed" "Unknown error, exit code: $exit_code in create_lxc.sh" ;; + esac + fi +} + +if command -v pveversion >/dev/null 2>&1; then + trap 'api_exit_script' EXIT +fi +trap 'post_update_to_api "failed" "$BASH_COMMAND"' ERR +trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT +trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM diff --git a/misc/old_misc/config-file.func b/misc/old_misc/config-file.func new file mode 100644 index 000000000..9799b4a4b --- /dev/null +++ b/misc/old_misc/config-file.func @@ -0,0 +1,699 @@ +config_file() { + CONFIG_FILE="/opt/community-scripts/.settings" + + if [[ -f "/opt/community-scripts/${NSAPP}.conf" ]]; then + CONFIG_FILE="/opt/community-scripts/${NSAPP}.conf" + fi + + if CONFIG_FILE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set absolute path to config file" 8 58 "$CONFIG_FILE" --title "CONFIG FILE" 3>&1 1>&2 2>&3); then + if [[ ! -f "$CONFIG_FILE" ]]; then + echo -e "${CROSS}${RD}Config file not found, exiting script!.${CL}" + exit + else + echo -e "${INFO}${BOLD}${DGN}Using config File: ${BGN}$CONFIG_FILE${CL}" + source "$CONFIG_FILE" + fi + fi + if [[ -n "${CT_ID-}" ]]; then + if [[ "$CT_ID" =~ ^([0-9]{3,4})-([0-9]{3,4})$ ]]; then + MIN_ID=${BASH_REMATCH[1]} + MAX_ID=${BASH_REMATCH[2]} + if ((MIN_ID >= MAX_ID)); then + msg_error "Invalid Container ID range. The first number must be smaller than the second number, was ${CT_ID}" + exit + fi + + LIST_OF_IDS=$(pvesh get /cluster/resources --type vm --output-format json 2>/dev/null | grep -oP '"vmid":\s*\K\d+') || true + if [[ -n "$LIST_OF_IDS" ]]; then + for ((ID = MIN_ID; ID <= MAX_ID; ID++)); do + if ! grep -q "^$ID$" <<<"$LIST_OF_IDS"; then + CT_ID=$ID + break + fi + done + fi + echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}" + + elif [[ "$CT_ID" =~ ^[0-9]+$ ]]; then + LIST_OF_IDS=$(pvesh get /cluster/resources --type vm --output-format json 2>/dev/null | grep -oP '"vmid":\s*\K\d+') || true + if [[ -n "$LIST_OF_IDS" ]]; then + + if ! grep -q "^$CT_ID$" <<<"$LIST_OF_IDS"; then + echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}" + else + msg_error "Container ID $CT_ID already exists" + exit + fi + else + echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}" + fi + else + msg_error "Invalid Container ID format. Needs to be 0000-9999 or 0-9999, was ${CT_ID}" + exit + fi + else + if CT_ID=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Container ID" 8 58 "$NEXTID" --title "CONTAINER ID" 3>&1 1>&2 2>&3); then + if [ -z "$CT_ID" ]; then + CT_ID="$NEXTID" + echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}" + else + echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}" + fi + else + exit_script + fi + + fi + if [[ -n "${CT_TYPE-}" ]]; then + if [[ "$CT_TYPE" -eq 0 ]]; then + CT_TYPE_DESC="Privileged" + elif [[ "$CT_TYPE" -eq 1 ]]; then + CT_TYPE_DESC="Unprivileged" + else + msg_error "Unknown setting for CT_TYPE, should be 1 or 0, was ${CT_TYPE}" + exit + fi + echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}" + else + if CT_TYPE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "CONTAINER TYPE" --radiolist "Choose Type" 10 58 2 \ + "1" "Unprivileged" ON \ + "0" "Privileged" OFF \ + 3>&1 1>&2 2>&3); then + if [ -n "$CT_TYPE" ]; then + CT_TYPE_DESC="Unprivileged" + if [ "$CT_TYPE" -eq 0 ]; then + CT_TYPE_DESC="Privileged" + fi + echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}" + fi + else + exit_script + fi + fi + + if [[ -n "${PW-}" ]]; then + if [[ "$PW" == "none" ]]; then + PW="" + else + if [[ "$PW" == *" "* ]]; then + msg_error "Password cannot be empty" + exit + elif [[ ${#PW} -lt 5 ]]; then + msg_error "Password must be at least 5 characters long" + exit + else + echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}********${CL}" + fi + PW="-password $PW" + fi + else + while true; do + if PW1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --passwordbox "\nSet Root Password (needed for root ssh access)" 9 58 --title "PASSWORD (leave blank for automatic login)" 3>&1 1>&2 2>&3); then + if [[ -n "$PW1" ]]; then + if [[ "$PW1" == *" "* ]]; then + whiptail --msgbox "Password cannot contain spaces. Please try again." 8 58 + elif [ ${#PW1} -lt 5 ]; then + whiptail --msgbox "Password must be at least 5 characters long. Please try again." 8 58 + else + if PW2=$(whiptail --backtitle "Proxmox VE Helper Scripts" --passwordbox "\nVerify Root Password" 9 58 --title "PASSWORD VERIFICATION" 3>&1 1>&2 2>&3); then + if [[ "$PW1" == "$PW2" ]]; then + PW="-password $PW1" + echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}********${CL}" + break + else + whiptail --msgbox "Passwords do not match. Please try again." 8 58 + fi + else + exit_script + fi + fi + else + PW1="Automatic Login" + PW="" + echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}$PW1${CL}" + break + fi + else + exit_script + fi + done + fi + + if [[ -n "${HN-}" ]]; then + echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" + else + if CT_NAME=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Hostname" 8 58 "$NSAPP" --title "HOSTNAME" 3>&1 1>&2 2>&3); then + if [ -z "$CT_NAME" ]; then + HN="$NSAPP" + else + HN=$(echo "${CT_NAME,,}" | tr -d ' ') + fi + echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" + else + exit_script + fi + fi + + if [[ -n "${DISK_SIZE-}" ]]; then + if [[ "$DISK_SIZE" =~ ^-?[0-9]+$ ]]; then + echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}" + else + msg_error "DISK_SIZE must be an integer, was ${DISK_SIZE}" + exit + fi + else + if DISK_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Disk Size in GB" 8 58 "$var_disk" --title "DISK SIZE" 3>&1 1>&2 2>&3); then + if [ -z "$DISK_SIZE" ]; then + DISK_SIZE="$var_disk" + echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}" + else + if ! [[ $DISK_SIZE =~ $INTEGER ]]; then + echo -e "{INFO}${HOLD}${RD} DISK SIZE MUST BE AN INTEGER NUMBER!${CL}" + advanced_settings + fi + echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}" + fi + else + exit_script + fi + fi + + if [[ -n "${CORE_COUNT-}" ]]; then + if [[ "$CORE_COUNT" =~ ^-?[0-9]+$ ]]; then + echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}${CORE_COUNT}${CL}" + else + msg_error "CORE_COUNT must be an integer, was ${CORE_COUNT}" + exit + fi + else + if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 "$var_cpu" --title "CORE COUNT" 3>&1 1>&2 2>&3); then + if [ -z "$CORE_COUNT" ]; then + CORE_COUNT="$var_cpu" + echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + else + echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + fi + else + exit_script + fi + fi + + if [[ -n "${RAM_SIZE-}" ]]; then + if [[ "$RAM_SIZE" =~ ^-?[0-9]+$ ]]; then + echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}" + else + msg_error "RAM_SIZE must be an integer, was ${RAM_SIZE}" + exit + fi + else + if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 "$var_ram" --title "RAM" 3>&1 1>&2 2>&3); then + if [ -z "$RAM_SIZE" ]; then + RAM_SIZE="$var_ram" + echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}" + else + echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}" + fi + else + exit_script + fi + fi + + IFACE_FILEPATH_LIST="/etc/network/interfaces"$'\n'$(find "/etc/network/interfaces.d/" -type f) + BRIDGES="" + OLD_IFS=$IFS + IFS=$'\n' + + for iface_filepath in ${IFACE_FILEPATH_LIST}; do + + iface_indexes_tmpfile=$(mktemp -q -u '.iface-XXXX') + (grep -Pn '^\s*iface' "${iface_filepath}" | cut -d':' -f1 && wc -l "${iface_filepath}" | cut -d' ' -f1) | awk 'FNR==1 {line=$0; next} {print line":"$0-1; line=$0}' >"${iface_indexes_tmpfile}" || true + + if [ -f "${iface_indexes_tmpfile}" ]; then + + while read -r pair; do + start=$(echo "${pair}" | cut -d':' -f1) + end=$(echo "${pair}" | cut -d':' -f2) + if awk "NR >= ${start} && NR <= ${end}" "${iface_filepath}" | grep -qP '^\s*(bridge[-_](ports|stp|fd|vlan-aware|vids)|ovs_type\s+OVSBridge)\b'; then + iface_name=$(sed "${start}q;d" "${iface_filepath}" | awk '{print $2}') + BRIDGES="${iface_name}"$'\n'"${BRIDGES}" + fi + + done <"${iface_indexes_tmpfile}" + rm -f "${iface_indexes_tmpfile}" + fi + + done + IFS=$OLD_IFS + BRIDGES=$(echo "$BRIDGES" | grep -v '^\s*$' | sort | uniq) + + if [[ -n "${BRG-}" ]]; then + if echo "$BRIDGES" | grep -q "${BRG}"; then + echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}" + else + msg_error "Bridge '${BRG}' does not exist in /etc/network/interfaces or /etc/network/interfaces.d/sdn" + exit + fi + else + BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --menu "Select network bridge:" 15 40 6 $(echo "$BRIDGES" | awk '{print $0, "Bridge"}') 3>&1 1>&2 2>&3) + if [ -z "$BRG" ]; then + exit_script + else + echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}" + fi + fi + + local ip_cidr_regex='^([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})/([0-9]{1,2})$' + local ip_regex='^([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})$' + + if [[ -n ${NET-} ]]; then + if [ "$NET" == "dhcp" ]; then + echo -e "${NETWORK}${BOLD}${DGN}IP Address: ${BGN}DHCP${CL}" + echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}Default${CL}" + GATE="" + elif [[ "$NET" =~ $ip_cidr_regex ]]; then + echo -e "${NETWORK}${BOLD}${DGN}IP Address: ${BGN}$NET${CL}" + if [[ -n "$GATE" ]]; then + [[ "$GATE" =~ ",gw=" ]] && GATE="${GATE##,gw=}" + if [[ "$GATE" =~ $ip_regex ]]; then + echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE${CL}" + GATE=",gw=$GATE" + else + msg_error "Invalid IP Address format for Gateway. Needs to be 0.0.0.0, was ${GATE}" + exit + fi + + else + while true; do + GATE1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Enter gateway IP address" 8 58 --title "Gateway IP" 3>&1 1>&2 2>&3) + if [ -z "$GATE1" ]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "Gateway IP address cannot be empty" 8 58 + elif [[ ! "$GATE1" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "Invalid IP address format" 8 58 + else + GATE=",gw=$GATE1" + echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE1${CL}" + break + fi + done + fi + elif [[ "$NET" == *-* ]]; then + IFS="-" read -r ip_start ip_end <<<"$NET" + + if [[ ! "$ip_start" =~ $ip_cidr_regex ]] || [[ ! "$ip_end" =~ $ip_cidr_regex ]]; then + msg_error "Invalid IP range format, was $NET should be 0.0.0.0/0-0.0.0.0/0" + exit 1 + fi + + ip1="${ip_start%%/*}" + ip2="${ip_end%%/*}" + cidr="${ip_start##*/}" + + ip_to_int() { + local IFS=. + read -r i1 i2 i3 i4 <<<"$1" + echo $(((i1 << 24) + (i2 << 16) + (i3 << 8) + i4)) + } + + int_to_ip() { + local ip=$1 + echo "$(((ip >> 24) & 0xFF)).$(((ip >> 16) & 0xFF)).$(((ip >> 8) & 0xFF)).$((ip & 0xFF))" + } + + start_int=$(ip_to_int "$ip1") + end_int=$(ip_to_int "$ip2") + + for ((ip_int = start_int; ip_int <= end_int; ip_int++)); do + ip=$(int_to_ip $ip_int) + msg_info "Checking IP: $ip" + if ! ping -c 2 -W 1 "$ip" >/dev/null 2>&1; then + NET="$ip/$cidr" + msg_ok "Using free IP Address: ${BGN}$NET${CL}" + sleep 3 + break + fi + done + if [[ "$NET" == *-* ]]; then + msg_error "No free IP found in range" + exit 1 + fi + if [ -n "$GATE" ]; then + if [[ "$GATE" =~ $ip_regex ]]; then + echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE${CL}" + GATE=",gw=$GATE" + else + msg_error "Invalid IP Address format for Gateway. Needs to be 0.0.0.0, was ${GATE}" + exit + fi + else + while true; do + GATE1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Enter gateway IP address" 8 58 --title "Gateway IP" 3>&1 1>&2 2>&3) + if [ -z "$GATE1" ]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "Gateway IP address cannot be empty" 8 58 + elif [[ ! "$GATE1" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "Invalid IP address format" 8 58 + else + GATE=",gw=$GATE1" + echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE1${CL}" + break + fi + done + fi + else + msg_error "Invalid IP Address format. Needs to be 0.0.0.0/0 or a range like 10.0.0.1/24-10.0.0.10/24, was ${NET}" + exit + fi + else + while true; do + NET=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Static IPv4 CIDR Address (/24)" 8 58 dhcp --title "IP ADDRESS" 3>&1 1>&2 2>&3) + exit_status=$? + if [ $exit_status -eq 0 ]; then + if [ "$NET" = "dhcp" ]; then + echo -e "${NETWORK}${BOLD}${DGN}IP Address: ${BGN}$NET${CL}" + break + else + if [[ "$NET" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$ ]]; then + echo -e "${NETWORK}${BOLD}${DGN}IP Address: ${BGN}$NET${CL}" + break + else + whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "$NET is an invalid IPv4 CIDR address. Please enter a valid IPv4 CIDR address or 'dhcp'" 8 58 + fi + fi + else + exit_script + fi + done + if [ "$NET" != "dhcp" ]; then + while true; do + GATE1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Enter gateway IP address" 8 58 --title "Gateway IP" 3>&1 1>&2 2>&3) + if [ -z "$GATE1" ]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "Gateway IP address cannot be empty" 8 58 + elif [[ ! "$GATE1" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "Invalid IP address format" 8 58 + else + GATE=",gw=$GATE1" + echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE1${CL}" + break + fi + done + else + GATE="" + echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}Default${CL}" + fi + fi + + if [ "$var_os" == "alpine" ]; then + APT_CACHER="" + APT_CACHER_IP="" + else + if [[ -n "${APT_CACHER_IP-}" ]]; then + if [[ ! $APT_CACHER_IP == "none" ]]; then + APT_CACHER="yes" + echo -e "${NETWORK}${BOLD}${DGN}APT-CACHER IP Address: ${BGN}$APT_CACHER_IP${CL}" + else + APT_CACHER="" + echo -e "${NETWORK}${BOLD}${DGN}APT-Cacher IP Address: ${BGN}No${CL}" + fi + else + if APT_CACHER_IP=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set APT-Cacher IP (leave blank for none)" 8 58 --title "APT-Cacher IP" 3>&1 1>&2 2>&3); then + APT_CACHER="${APT_CACHER_IP:+yes}" + echo -e "${NETWORK}${BOLD}${DGN}APT-Cacher IP Address: ${BGN}${APT_CACHER_IP:-Default}${CL}" + if [[ -n $APT_CACHER_IP ]]; then + APT_CACHER_IP="none" + fi + else + exit_script + fi + fi + fi + + if [[ -n "${MTU-}" ]]; then + if [[ "$MTU" =~ ^-?[0-9]+$ ]]; then + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU${CL}" + MTU=",mtu=$MTU" + else + msg_error "MTU must be an integer, was ${MTU}" + exit + fi + else + if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default [The MTU of your selected vmbr, default is 1500])" 8 58 --title "MTU SIZE" 3>&1 1>&2 2>&3); then + if [ -z "$MTU1" ]; then + MTU1="Default" + MTU="" + else + MTU=",mtu=$MTU1" + fi + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + else + exit_script + fi + fi + + if [[ "$IPV6_METHOD" == "static" ]]; then + if [[ -n "$IPV6STATIC" ]]; then + IP6=",ip6=${IPV6STATIC}" + echo -e "${NETWORK}${BOLD}${DGN}IPv6 Address: ${BGN}${IPV6STATIC}${CL}" + else + msg_error "IPV6_METHOD is set to static but IPV6STATIC is empty" + exit + fi + elif [[ "$IPV6_METHOD" == "auto" ]]; then + IP6=",ip6=auto" + echo -e "${NETWORK}${BOLD}${DGN}IPv6 Address: ${BGN}auto${CL}" + else + IP6="" + echo -e "${NETWORK}${BOLD}${DGN}IPv6 Address: ${BGN}none${CL}" + fi + + if [[ -n "${SD-}" ]]; then + if [[ "$SD" == "none" ]]; then + SD="" + echo -e "${SEARCH}${BOLD}${DGN}DNS Search Domain: ${BGN}Host${CL}" + else + # Strip prefix if present for config file storage + local SD_VALUE="$SD" + [[ "$SD" =~ ^-searchdomain= ]] && SD_VALUE="${SD#-searchdomain=}" + echo -e "${SEARCH}${BOLD}${DGN}DNS Search Domain: ${BGN}$SD_VALUE${CL}" + SD="-searchdomain=$SD_VALUE" + fi + else + if SD=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a DNS Search Domain (leave blank for HOST)" 8 58 --title "DNS Search Domain" 3>&1 1>&2 2>&3); then + if [ -z "$SD" ]; then + SX=Host + SD="" + else + SX=$SD + SD="-searchdomain=$SD" + fi + echo -e "${SEARCH}${BOLD}${DGN}DNS Search Domain: ${BGN}$SX${CL}" + else + exit_script + fi + fi + + if [[ -n "${NS-}" ]]; then + if [[ $NS == "none" ]]; then + NS="" + echo -e "${NETWORK}${BOLD}${DGN}DNS Server IP Address: ${BGN}Host${CL}" + else + # Strip prefix if present for config file storage + local NS_VALUE="$NS" + [[ "$NS" =~ ^-nameserver= ]] && NS_VALUE="${NS#-nameserver=}" + if [[ "$NS_VALUE" =~ $ip_regex ]]; then + echo -e "${NETWORK}${BOLD}${DGN}DNS Server IP Address: ${BGN}$NS_VALUE${CL}" + NS="-nameserver=$NS_VALUE" + else + msg_error "Invalid IP Address format for DNS Server. Needs to be 0.0.0.0, was ${NS_VALUE}" + exit + fi + fi + else + if NX=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a DNS Server IP (leave blank for HOST)" 8 58 --title "DNS SERVER IP" 3>&1 1>&2 2>&3); then + if [ -z "$NX" ]; then + NX=Host + NS="" + else + NS="-nameserver=$NX" + fi + echo -e "${NETWORK}${BOLD}${DGN}DNS Server IP Address: ${BGN}$NX${CL}" + else + exit_script + fi + fi + + if [[ -n "${MAC-}" ]]; then + if [[ "$MAC" == "none" ]]; then + MAC="" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}Host${CL}" + else + # Strip prefix if present for config file storage + local MAC_VALUE="$MAC" + [[ "$MAC" =~ ^,hwaddr= ]] && MAC_VALUE="${MAC#,hwaddr=}" + if [[ "$MAC_VALUE" =~ ^([A-Fa-f0-9]{2}:){5}[A-Fa-f0-9]{2}$ ]]; then + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC_VALUE${CL}" + MAC=",hwaddr=$MAC_VALUE" + else + msg_error "MAC Address must be in the format xx:xx:xx:xx:xx:xx, was ${MAC_VALUE}" + exit + fi + fi + else + if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address(leave blank for generated MAC)" 8 58 --title "MAC ADDRESS" 3>&1 1>&2 2>&3); then + if [ -z "$MAC1" ]; then + MAC1="Default" + MAC="" + else + MAC=",hwaddr=$MAC1" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + fi + else + exit_script + fi + fi + + if [[ -n "${VLAN-}" ]]; then + if [[ "$VLAN" == "none" ]]; then + VLAN="" + echo -e "${VLANTAG}${BOLD}${DGN}Vlan: ${BGN}Host${CL}" + else + # Strip prefix if present for config file storage + local VLAN_VALUE="$VLAN" + [[ "$VLAN" =~ ^,tag= ]] && VLAN_VALUE="${VLAN#,tag=}" + if [[ "$VLAN_VALUE" =~ ^-?[0-9]+$ ]]; then + echo -e "${VLANTAG}${BOLD}${DGN}Vlan: ${BGN}$VLAN_VALUE${CL}" + VLAN=",tag=$VLAN_VALUE" + else + msg_error "VLAN must be an integer, was ${VLAN_VALUE}" + exit + fi + fi + else + if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for no VLAN)" 8 58 --title "VLAN" 3>&1 1>&2 2>&3); then + if [ -z "$VLAN1" ]; then + VLAN1="Default" + VLAN="" + else + VLAN=",tag=$VLAN1" + fi + echo -e "${VLANTAG}${BOLD}${DGN}Vlan: ${BGN}$VLAN1${CL}" + else + exit_script + fi + fi + + if [[ -n "${TAGS-}" ]]; then + if [[ "$TAGS" == *"DEFAULT"* ]]; then + TAGS="${TAGS//DEFAULT/}" + TAGS="${TAGS//;/}" + TAGS="$TAGS;${var_tags:-}" + echo -e "${NETWORK}${BOLD}${DGN}Tags: ${BGN}$TAGS${CL}" + fi + else + TAGS="community-scripts;" + if ADV_TAGS=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Custom Tags?[If you remove all, there will be no tags!]" 8 58 "${TAGS}" --title "Advanced Tags" 3>&1 1>&2 2>&3); then + if [ -n "${ADV_TAGS}" ]; then + ADV_TAGS=$(echo "$ADV_TAGS" | tr -d '[:space:]') + TAGS="${ADV_TAGS}" + else + TAGS=";" + fi + echo -e "${NETWORK}${BOLD}${DGN}Tags: ${BGN}$TAGS${CL}" + else + exit_script + fi + fi + + if [[ -n "${SSH-}" ]]; then + if [[ "$SSH" == "yes" ]]; then + echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}" + if [[ ! -z "$SSH_AUTHORIZED_KEY" ]]; then + echo -e "${ROOTSSH}${BOLD}${DGN}SSH Authorized Key: ${BGN}********************${CL}" + else + echo -e "${ROOTSSH}${BOLD}${DGN}SSH Authorized Key: ${BGN}None${CL}" + fi + elif [[ "$SSH" == "no" ]]; then + echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}" + else + msg_error "SSH needs to be 'yes' or 'no', was ${SSH}" + exit + fi + else + SSH_AUTHORIZED_KEY="$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "SSH Authorized key for root (leave empty for none)" 8 58 --title "SSH Key" 3>&1 1>&2 2>&3)" + if [[ -z "${SSH_AUTHORIZED_KEY}" ]]; then + SSH_AUTHORIZED_KEY="" + fi + if [[ "$PW" == -password* || -n "$SSH_AUTHORIZED_KEY" ]]; then + if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "SSH ACCESS" --yesno "Enable Root SSH Access?" 10 58); then + SSH="yes" + else + SSH="no" + fi + echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}" + else + SSH="no" + echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}" + fi + fi + + if [[ -n "$ENABLE_FUSE" ]]; then + if [[ "$ENABLE_FUSE" == "yes" ]]; then + echo -e "${FUSE}${BOLD}${DGN}Enable FUSE: ${BGN}Yes${CL}" + elif [[ "$ENABLE_FUSE" == "no" ]]; then + echo -e "${FUSE}${BOLD}${DGN}Enable FUSE: ${BGN}No${CL}" + else + msg_error "Enable FUSE needs to be 'yes' or 'no', was ${ENABLE_FUSE}" + exit + fi + else + if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "FUSE" --yesno "Enable FUSE?" 10 58); then + ENABLE_FUSE="yes" + else + ENABLE_FUSE="no" + fi + echo -e "${FUSE}${BOLD}${DGN}Enable FUSE: ${BGN}$ENABLE_FUSE${CL}" + fi + + if [[ -n "$ENABLE_TUN" ]]; then + if [[ "$ENABLE_TUN" == "yes" ]]; then + echo -e "${FUSE}${BOLD}${DGN}Enable TUN: ${BGN}Yes${CL}" + elif [[ "$ENABLE_TUN" == "no" ]]; then + echo -e "${FUSE}${BOLD}${DGN}Enable TUN: ${BGN}No${CL}" + else + msg_error "Enable TUN needs to be 'yes' or 'no', was ${ENABLE_TUN}" + exit + fi + else + if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "TUN" --yesno "Enable TUN?" 10 58); then + ENABLE_TUN="yes" + else + ENABLE_TUN="no" + fi + echo -e "${FUSE}${BOLD}${DGN}Enable TUN: ${BGN}$ENABLE_TUN${CL}" + fi + + if [[ -n "${VERBOSE-}" ]]; then + if [[ "$VERBOSE" == "yes" ]]; then + echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}$VERBOSE${CL}" + elif [[ "$VERBOSE" == "no" ]]; then + echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}No${CL}" + else + msg_error "Verbose Mode needs to be 'yes' or 'no', was ${VERBOSE}" + exit + fi + else + if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "VERBOSE MODE" --yesno "Enable Verbose Mode?" 10 58); then + VERBOSE="yes" + else + VERBOSE="no" + fi + echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}$VERBOSE${CL}" + fi + + if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "ADVANCED SETTINGS WITH CONFIG FILE COMPLETE" --yesno "Ready to create ${APP} LXC?" 10 58); then + echo -e "${CREATING}${BOLD}${RD}Creating a ${APP} LXC using the above settings${CL}" + else + clear + header_info + echo -e "${INFO}${HOLD} ${GN}Using Config File on node $PVEHOST_NAME${CL}" + config_file + fi +} diff --git a/misc/old_misc/core.func b/misc/old_misc/core.func new file mode 100644 index 000000000..1faba7296 --- /dev/null +++ b/misc/old_misc/core.func @@ -0,0 +1,452 @@ +# Copyright (c) 2021-2025 community-scripts ORG +# License: MIT | https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/LICENSE + +# ------------------------------------------------------------------------------ +# Loads core utility groups once (colors, formatting, icons, defaults). +# ------------------------------------------------------------------------------ + +[[ -n "${_CORE_FUNC_LOADED:-}" ]] && return +_CORE_FUNC_LOADED=1 + +load_functions() { + [[ -n "${__FUNCTIONS_LOADED:-}" ]] && return + __FUNCTIONS_LOADED=1 + color + formatting + icons + default_vars + set_std_mode + # add more +} + +# ============================================================================ +# Error & Signal Handling – robust, universal, subshell-safe +# ============================================================================ + +_tool_error_hint() { + local cmd="$1" + local code="$2" + case "$cmd" in + curl) + case "$code" in + 6) echo "Curl: Could not resolve host (DNS problem)" ;; + 7) echo "Curl: Failed to connect to host (connection refused)" ;; + 22) echo "Curl: HTTP error (404/403 etc)" ;; + 28) echo "Curl: Operation timeout" ;; + *) echo "Curl: Unknown error ($code)" ;; + esac + ;; + wget) + echo "Wget failed – URL unreachable or permission denied" + ;; + systemctl) + echo "Systemd unit failure – check service name and permissions" + ;; + jq) + echo "jq parse error – malformed JSON or missing key" + ;; + mariadb | mysql) + echo "MySQL/MariaDB command failed – check credentials or DB" + ;; + unzip) + echo "unzip failed – corrupt file or missing permission" + ;; + tar) + echo "tar failed – invalid format or missing binary" + ;; + node | npm | pnpm | yarn) + echo "Node tool failed – check version compatibility or package.json" + ;; + *) echo "" ;; + esac +} + +catch_errors() { + set -Eeuo pipefail + trap 'error_handler $LINENO "$BASH_COMMAND"' ERR +} + +# ------------------------------------------------------------------------------ +# Sets ANSI color codes used for styled terminal output. +# ------------------------------------------------------------------------------ +color() { + YW=$(echo "\033[33m") + YWB=$'\e[93m' + BL=$(echo "\033[36m") + RD=$(echo "\033[01;31m") + BGN=$(echo "\033[4;92m") + GN=$(echo "\033[1;92m") + DGN=$(echo "\033[32m") + CL=$(echo "\033[m") +} + +# Special for spinner and colorized output via printf +color_spinner() { + CS_YW=$'\033[33m' + CS_YWB=$'\033[93m' + CS_CL=$'\033[m' +} + +# ------------------------------------------------------------------------------ +# Defines formatting helpers like tab, bold, and line reset sequences. +# ------------------------------------------------------------------------------ +formatting() { + BFR="\\r\\033[K" + BOLD=$(echo "\033[1m") + HOLD=" " + TAB=" " + TAB3=" " +} + +# ------------------------------------------------------------------------------ +# Sets symbolic icons used throughout user feedback and prompts. +# ------------------------------------------------------------------------------ +icons() { + CM="${TAB}✔️${TAB}" + CROSS="${TAB}✖️${TAB}" + DNSOK="✔️ " + DNSFAIL="${TAB}✖️${TAB}" + INFO="${TAB}💡${TAB}${CL}" + OS="${TAB}🖥️${TAB}${CL}" + OSVERSION="${TAB}🌟${TAB}${CL}" + CONTAINERTYPE="${TAB}📦${TAB}${CL}" + DISKSIZE="${TAB}💾${TAB}${CL}" + CPUCORE="${TAB}🧠${TAB}${CL}" + RAMSIZE="${TAB}🛠️${TAB}${CL}" + SEARCH="${TAB}🔍${TAB}${CL}" + VERBOSE_CROPPED="🔍${TAB}" + VERIFYPW="${TAB}🔐${TAB}${CL}" + CONTAINERID="${TAB}🆔${TAB}${CL}" + HOSTNAME="${TAB}🏠${TAB}${CL}" + BRIDGE="${TAB}🌉${TAB}${CL}" + NETWORK="${TAB}📡${TAB}${CL}" + GATEWAY="${TAB}🌐${TAB}${CL}" + DISABLEIPV6="${TAB}🚫${TAB}${CL}" + DEFAULT="${TAB}⚙️${TAB}${CL}" + MACADDRESS="${TAB}🔗${TAB}${CL}" + VLANTAG="${TAB}🏷️${TAB}${CL}" + ROOTSSH="${TAB}🔑${TAB}${CL}" + CREATING="${TAB}🚀${TAB}${CL}" + ADVANCED="${TAB}🧩${TAB}${CL}" + FUSE="${TAB}🗂️${TAB}${CL}" + HOURGLASS="${TAB}⏳${TAB}" + +} + +# ------------------------------------------------------------------------------ +# Sets default retry and wait variables used for system actions. +# ------------------------------------------------------------------------------ +default_vars() { + RETRY_NUM=10 + RETRY_EVERY=3 + i=$RETRY_NUM + #[[ "${VAR_OS:-}" == "unknown" ]] +} + +# ------------------------------------------------------------------------------ +# Sets default verbose mode for script and os execution. +# ------------------------------------------------------------------------------ +set_std_mode() { + if [ "${VERBOSE:-no}" = "yes" ]; then + STD="" + else + STD="silent" + fi +} + +# Silent execution function +silent() { + "$@" >/dev/null 2>&1 +} + +# Function to download & save header files +get_header() { + local app_name=$(echo "${APP,,}" | tr -d ' ') + local app_type=${APP_TYPE:-ct} + local header_url="https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/${app_type}/headers/${app_name}" + local local_header_path="/usr/local/community-scripts/headers/${app_type}/${app_name}" + + mkdir -p "$(dirname "$local_header_path")" + + if [ ! -s "$local_header_path" ]; then + if ! curl -fsSL "$header_url" -o "$local_header_path"; then + return 1 + fi + fi + + cat "$local_header_path" 2>/dev/null || true +} + +header_info() { + local app_name=$(echo "${APP,,}" | tr -d ' ') + local header_content + + header_content=$(get_header "$app_name") || header_content="" + + clear + local term_width + term_width=$(tput cols 2>/dev/null || echo 120) + + if [ -n "$header_content" ]; then + echo "$header_content" + fi +} + +ensure_tput() { + if ! command -v tput >/dev/null 2>&1; then + if grep -qi 'alpine' /etc/os-release; then + apk add --no-cache ncurses >/dev/null 2>&1 + elif command -v apt-get >/dev/null 2>&1; then + apt-get update -qq >/dev/null + apt-get install -y -qq ncurses-bin >/dev/null 2>&1 + fi + fi +} + +is_alpine() { + local os_id="${var_os:-${PCT_OSTYPE:-}}" + + if [[ -z "$os_id" && -f /etc/os-release ]]; then + os_id="$( + . /etc/os-release 2>/dev/null + echo "${ID:-}" + )" + fi + + [[ "$os_id" == "alpine" ]] +} + +is_verbose_mode() { + local verbose="${VERBOSE:-${var_verbose:-no}}" + local tty_status + if [[ -t 2 ]]; then + tty_status="interactive" + else + tty_status="not-a-tty" + fi + [[ "$verbose" != "no" || ! -t 2 ]] +} + +# ------------------------------------------------------------------------------ +# Handles specific curl error codes and displays descriptive messages. +# ------------------------------------------------------------------------------ +__curl_err_handler() { + local exit_code="$1" + local target="$2" + local curl_msg="$3" + + case $exit_code in + 1) msg_error "Unsupported protocol: $target" ;; + 2) msg_error "Curl init failed: $target" ;; + 3) msg_error "Malformed URL: $target" ;; + 5) msg_error "Proxy resolution failed: $target" ;; + 6) msg_error "Host resolution failed: $target" ;; + 7) msg_error "Connection failed: $target" ;; + 9) msg_error "Access denied: $target" ;; + 18) msg_error "Partial file transfer: $target" ;; + 22) msg_error "HTTP error (e.g. 400/404): $target" ;; + 23) msg_error "Write error on local system: $target" ;; + 26) msg_error "Read error from local file: $target" ;; + 28) msg_error "Timeout: $target" ;; + 35) msg_error "SSL connect error: $target" ;; + 47) msg_error "Too many redirects: $target" ;; + 51) msg_error "SSL cert verify failed: $target" ;; + 52) msg_error "Empty server response: $target" ;; + 55) msg_error "Send error: $target" ;; + 56) msg_error "Receive error: $target" ;; + 60) msg_error "SSL CA not trusted: $target" ;; + 67) msg_error "Login denied by server: $target" ;; + 78) msg_error "Remote file not found (404): $target" ;; + *) msg_error "Curl failed with code $exit_code: $target" ;; + esac + + [[ -n "$curl_msg" ]] && printf "%s\n" "$curl_msg" >&2 + exit 1 +} + +fatal() { + msg_error "$1" + kill -INT $$ +} + +spinner() { + local chars=(⠋ ⠙ ⠹ ⠸ ⠼ ⠴ ⠦ ⠧ ⠇ ⠏) + local i=0 + while true; do + local index=$((i++ % ${#chars[@]})) + printf "\r\033[2K%s %b" "${CS_YWB}${chars[$index]}${CS_CL}" "${CS_YWB}${SPINNER_MSG:-}${CS_CL}" + sleep 0.1 + done +} + +clear_line() { + tput cr 2>/dev/null || echo -en "\r" + tput el 2>/dev/null || echo -en "\033[K" +} + +stop_spinner() { + local pid="${SPINNER_PID:-}" + [[ -z "$pid" && -f /tmp/.spinner.pid ]] && pid=$(/dev/null; then + sleep 0.05 + kill -9 "$pid" 2>/dev/null || true + wait "$pid" 2>/dev/null || true + fi + rm -f /tmp/.spinner.pid + fi + + unset SPINNER_PID SPINNER_MSG + stty sane 2>/dev/null || true +} + +msg_info() { + local msg="$1" + [[ -z "$msg" ]] && return + + if ! declare -p MSG_INFO_SHOWN &>/dev/null || ! declare -A MSG_INFO_SHOWN &>/dev/null; then + declare -gA MSG_INFO_SHOWN=() + fi + [[ -n "${MSG_INFO_SHOWN["$msg"]+x}" ]] && return + MSG_INFO_SHOWN["$msg"]=1 + + stop_spinner + SPINNER_MSG="$msg" + + if is_verbose_mode || is_alpine; then + local HOURGLASS="${TAB}⏳${TAB}" + printf "\r\e[2K%s %b" "$HOURGLASS" "${YW}${msg}${CL}" >&2 + return + fi + + color_spinner + spinner & + SPINNER_PID=$! + echo "$SPINNER_PID" >/tmp/.spinner.pid + disown "$SPINNER_PID" 2>/dev/null || true +} + +msg_ok() { + local msg="$1" + [[ -z "$msg" ]] && return + stop_spinner + clear_line + printf "%s %b\n" "$CM" "${GN}${msg}${CL}" >&2 + unset MSG_INFO_SHOWN["$msg"] +} + +msg_error() { + stop_spinner + local msg="$1" + echo -e "${BFR:-} ${CROSS:-✖️} ${RD}${msg}${CL}" +} + +msg_warn() { + stop_spinner + local msg="$1" + echo -e "${BFR:-} ${INFO:-ℹ️} ${YWB}${msg}${CL}" +} + +msg_custom() { + local symbol="${1:-"[*]"}" + local color="${2:-"\e[36m"}" + local msg="${3:-}" + [[ -z "$msg" ]] && return + stop_spinner + echo -e "${BFR:-} ${symbol} ${color}${msg}${CL:-\e[0m}" +} + +run_container_safe() { + local ct="$1" + shift + local cmd="$*" + + lxc-attach -n "$ct" -- bash -euo pipefail -c " + trap 'echo Aborted in container; exit 130' SIGINT SIGTERM + $cmd + " || __handle_general_error "lxc-attach to CT $ct" +} + +cleanup_lxc() { + msg_info "Cleaning up" + + if is_alpine; then + $STD apk cache clean || true + rm -rf /var/cache/apk/* + else + $STD apt -y autoremove || true + $STD apt -y autoclean || true + $STD apt -y clean || true + fi + + # Clear temp artifacts (keep sockets/FIFOs; ignore errors) + find /tmp /var/tmp -type f -name 'tmp*' -delete 2>/dev/null || true + find /tmp /var/tmp -type f -name 'tempfile*' -delete 2>/dev/null || true + + # Truncate writable log files silently (permission errors ignored) + if command -v truncate >/dev/null 2>&1; then + find /var/log -type f -writable -print0 2>/dev/null | + xargs -0 -n1 truncate -s 0 2>/dev/null || true + fi + + # Node.js npm + if command -v npm &>/dev/null; then $STD npm cache clean --force || true; fi + # Node.js yarn + if command -v yarn &>/dev/null; then $STD yarn cache clean || true; fi + # Node.js pnpm + if command -v pnpm &>/dev/null; then $STD pnpm store prune || true; fi + # Go + if command -v go &>/dev/null; then $STD go clean -cache -modcache || true; fi + # Rust cargo + if command -v cargo &>/dev/null; then $STD cargo clean || true; fi + # Ruby gem + if command -v gem &>/dev/null; then $STD gem cleanup || true; fi + # Composer (PHP) + if command -v composer &>/dev/null; then $STD composer clear-cache || true; fi + + if command -v journalctl &>/dev/null; then + $STD journalctl --vacuum-time=10m || true + fi + msg_ok "Cleaned" +} + +check_or_create_swap() { + msg_info "Checking for active swap" + + if swapon --noheadings --show | grep -q 'swap'; then + msg_ok "Swap is active" + return 0 + fi + + msg_error "No active swap detected" + + read -p "Do you want to create a swap file? [y/N]: " create_swap + create_swap="${create_swap,,}" # to lowercase + + if [[ "$create_swap" != "y" && "$create_swap" != "yes" ]]; then + msg_info "Skipping swap file creation" + return 1 + fi + + read -p "Enter swap size in MB (e.g., 2048 for 2GB): " swap_size_mb + if ! [[ "$swap_size_mb" =~ ^[0-9]+$ ]]; then + msg_error "Invalid size input. Aborting." + return 1 + fi + + local swap_file="/swapfile" + + msg_info "Creating ${swap_size_mb}MB swap file at $swap_file" + if dd if=/dev/zero of="$swap_file" bs=1M count="$swap_size_mb" status=progress && + chmod 600 "$swap_file" && + mkswap "$swap_file" && + swapon "$swap_file"; then + msg_ok "Swap file created and activated successfully" + else + msg_error "Failed to create or activate swap" + return 1 + fi +} + +trap 'stop_spinner' EXIT INT TERM diff --git a/misc/old_misc/create_lxc.sh b/misc/old_misc/create_lxc.sh new file mode 100644 index 000000000..975311f6a --- /dev/null +++ b/misc/old_misc/create_lxc.sh @@ -0,0 +1,385 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2025 tteck +# Author: tteck (tteckster) +# Co-Author: MickLesk +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE + +# This sets verbose mode if the global variable is set to "yes" +# if [ "$VERBOSE" == "yes" ]; then set -x; fi + +if command -v curl >/dev/null 2>&1; then + source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/core.func) + load_functions + #echo "(create-lxc.sh) Loaded core.func via curl" +elif command -v wget >/dev/null 2>&1; then + source <(wget -qO- https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/core.func) + load_functions + #echo "(create-lxc.sh) Loaded core.func via wget" +fi + +# This sets error handling options and defines the error_handler function to handle errors +set -Eeuo pipefail +trap 'error_handler $LINENO "$BASH_COMMAND"' ERR +trap on_exit EXIT +trap on_interrupt INT +trap on_terminate TERM + +function on_exit() { + local exit_code="$?" + [[ -n "${lockfile:-}" && -e "$lockfile" ]] && rm -f "$lockfile" + exit "$exit_code" +} + +function error_handler() { + local exit_code="$?" + local line_number="$1" + local command="$2" + printf "\e[?25h" + echo -e "\n${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}\n" + exit "$exit_code" +} + +function on_interrupt() { + echo -e "\n${RD}Interrupted by user (SIGINT)${CL}" + exit 130 +} + +function on_terminate() { + echo -e "\n${RD}Terminated by signal (SIGTERM)${CL}" + exit 143 +} + +function exit_script() { + clear + printf "\e[?25h" + echo -e "\n${CROSS}${RD}User exited script${CL}\n" + kill 0 + exit 1 +} + +function check_storage_support() { + local CONTENT="$1" + local -a VALID_STORAGES=() + while IFS= read -r line; do + local STORAGE_NAME + STORAGE_NAME=$(awk '{print $1}' <<<"$line") + [[ -z "$STORAGE_NAME" ]] && continue + VALID_STORAGES+=("$STORAGE_NAME") + done < <(pvesm status -content "$CONTENT" 2>/dev/null | awk 'NR>1') + + [[ ${#VALID_STORAGES[@]} -gt 0 ]] +} + +# This function selects a storage pool for a given content type (e.g., rootdir, vztmpl). +function select_storage() { + local CLASS=$1 CONTENT CONTENT_LABEL + + case $CLASS in + container) + CONTENT='rootdir' + CONTENT_LABEL='Container' + ;; + template) + CONTENT='vztmpl' + CONTENT_LABEL='Container template' + ;; + iso) + CONTENT='iso' + CONTENT_LABEL='ISO image' + ;; + images) + CONTENT='images' + CONTENT_LABEL='VM Disk image' + ;; + backup) + CONTENT='backup' + CONTENT_LABEL='Backup' + ;; + snippets) + CONTENT='snippets' + CONTENT_LABEL='Snippets' + ;; + *) + msg_error "Invalid storage class '$CLASS'" + return 1 + ;; + esac + + # Check for preset STORAGE variable + if [ "$CONTENT" = "rootdir" ] && [ -n "${STORAGE:-}" ]; then + if pvesm status -content "$CONTENT" | awk 'NR>1 {print $1}' | grep -qx "$STORAGE"; then + STORAGE_RESULT="$STORAGE" + msg_info "Using preset storage: $STORAGE_RESULT for $CONTENT_LABEL" + return 0 + else + msg_error "Preset storage '$STORAGE' is not valid for content type '$CONTENT'." + return 2 + fi + fi + + local -A STORAGE_MAP + local -a MENU + local COL_WIDTH=0 + + while read -r TAG TYPE _ TOTAL USED FREE _; do + [[ -n "$TAG" && -n "$TYPE" ]] || continue + local STORAGE_NAME="$TAG" + local DISPLAY="${STORAGE_NAME} (${TYPE})" + local USED_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$USED") + local FREE_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$FREE") + local INFO="Free: ${FREE_FMT}B Used: ${USED_FMT}B" + STORAGE_MAP["$DISPLAY"]="$STORAGE_NAME" + MENU+=("$DISPLAY" "$INFO" "OFF") + ((${#DISPLAY} > COL_WIDTH)) && COL_WIDTH=${#DISPLAY} + done < <(pvesm status -content "$CONTENT" | awk 'NR>1') + + if [ ${#MENU[@]} -eq 0 ]; then + msg_error "No storage found for content type '$CONTENT'." + return 2 + fi + + if [ $((${#MENU[@]} / 3)) -eq 1 ]; then + STORAGE_RESULT="${STORAGE_MAP[${MENU[0]}]}" + STORAGE_INFO="${MENU[1]}" + return 0 + fi + + local WIDTH=$((COL_WIDTH + 42)) + while true; do + local DISPLAY_SELECTED + DISPLAY_SELECTED=$(whiptail --backtitle "Proxmox VE Helper Scripts" \ + --title "Storage Pools" \ + --radiolist "Which storage pool for ${CONTENT_LABEL,,}?\n(Spacebar to select)" \ + 16 "$WIDTH" 6 "${MENU[@]}" 3>&1 1>&2 2>&3) + + # Cancel or ESC + [[ $? -ne 0 ]] && exit_script + + # Strip trailing whitespace or newline (important for storages like "storage (dir)") + DISPLAY_SELECTED=$(sed 's/[[:space:]]*$//' <<<"$DISPLAY_SELECTED") + + if [[ -z "$DISPLAY_SELECTED" || -z "${STORAGE_MAP[$DISPLAY_SELECTED]+_}" ]]; then + whiptail --msgbox "No valid storage selected. Please try again." 8 58 + continue + fi + + STORAGE_RESULT="${STORAGE_MAP[$DISPLAY_SELECTED]}" + for ((i = 0; i < ${#MENU[@]}; i += 3)); do + if [[ "${MENU[$i]}" == "$DISPLAY_SELECTED" ]]; then + STORAGE_INFO="${MENU[$i + 1]}" + break + fi + done + return 0 + done +} + +# Test if required variables are set +[[ "${CTID:-}" ]] || { + msg_error "You need to set 'CTID' variable." + exit 203 +} +[[ "${PCT_OSTYPE:-}" ]] || { + msg_error "You need to set 'PCT_OSTYPE' variable." + exit 204 +} + +# Test if ID is valid +[ "$CTID" -ge "100" ] || { + msg_error "ID cannot be less than 100." + exit 205 +} + +# Test if ID is in use +if qm status "$CTID" &>/dev/null || pct status "$CTID" &>/dev/null; then + echo -e "ID '$CTID' is already in use." + unset CTID + msg_error "Cannot use ID that is already in use." + exit 206 +fi + +# This checks for the presence of valid Container Storage and Template Storage locations +if ! check_storage_support "rootdir"; then + msg_error "No valid storage found for 'rootdir' [Container]" + exit 1 +fi +if ! check_storage_support "vztmpl"; then + msg_error "No valid storage found for 'vztmpl' [Template]" + exit 1 +fi + +while true; do + if select_storage template; then + TEMPLATE_STORAGE="$STORAGE_RESULT" + TEMPLATE_STORAGE_INFO="$STORAGE_INFO" + msg_ok "Storage ${BL}$TEMPLATE_STORAGE${CL} ($TEMPLATE_STORAGE_INFO) [Template]" + break + fi +done + +while true; do + if select_storage container; then + CONTAINER_STORAGE="$STORAGE_RESULT" + CONTAINER_STORAGE_INFO="$STORAGE_INFO" + msg_ok "Storage ${BL}$CONTAINER_STORAGE${CL} ($CONTAINER_STORAGE_INFO) [Container]" + break + fi +done + +# Check free space on selected container storage +STORAGE_FREE=$(pvesm status | awk -v s="$CONTAINER_STORAGE" '$1 == s { print $6 }') +REQUIRED_KB=$((${PCT_DISK_SIZE:-8} * 1024 * 1024)) +if [ "$STORAGE_FREE" -lt "$REQUIRED_KB" ]; then + msg_error "Not enough space on '$CONTAINER_STORAGE'. Needed: ${PCT_DISK_SIZE:-8}G." + exit 214 +fi + +# Check Cluster Quorum if in Cluster +if [ -f /etc/pve/corosync.conf ]; then + msg_info "Checking cluster quorum" + if ! pvecm status | awk -F':' '/^Quorate/ { exit ($2 ~ /Yes/) ? 0 : 1 }'; then + + msg_error "Cluster is not quorate. Start all nodes or configure quorum device (QDevice)." + exit 210 + fi + msg_ok "Cluster is quorate" +fi + +# Update LXC template list +TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}" +case "$PCT_OSTYPE" in +debian | ubuntu) + TEMPLATE_PATTERN="-standard_" + ;; +alpine | fedora | rocky | centos) + TEMPLATE_PATTERN="-default_" + ;; +*) + TEMPLATE_PATTERN="" + ;; +esac + +# 1. Check local templates first +msg_info "Searching for template '$TEMPLATE_SEARCH'" +mapfile -t TEMPLATES < <( + pveam list "$TEMPLATE_STORAGE" | + awk -v s="$TEMPLATE_SEARCH" -v p="$TEMPLATE_PATTERN" '$1 ~ s && $1 ~ p {print $1}' | + sed 's/.*\///' | sort -t - -k 2 -V +) + +if [ ${#TEMPLATES[@]} -gt 0 ]; then + TEMPLATE_SOURCE="local" +else + msg_info "No local template found, checking online repository" + pveam update >/dev/null 2>&1 + mapfile -t TEMPLATES < <( + pveam update >/dev/null 2>&1 && + pveam available -section system | + sed -n "s/.*\($TEMPLATE_SEARCH.*$TEMPLATE_PATTERN.*\)/\1/p" | + sort -t - -k 2 -V + ) + TEMPLATE_SOURCE="online" +fi + +TEMPLATE="${TEMPLATES[-1]}" +TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || + echo "/var/lib/vz/template/cache/$TEMPLATE")" +msg_ok "Template ${BL}$TEMPLATE${CL} [$TEMPLATE_SOURCE]" + +# 4. Validate template (exists & not corrupted) +TEMPLATE_VALID=1 + +if [ ! -s "$TEMPLATE_PATH" ]; then + TEMPLATE_VALID=0 +elif ! tar --use-compress-program=zstdcat -tf "$TEMPLATE_PATH" >/dev/null 2>&1; then + TEMPLATE_VALID=0 +fi + +if [ "$TEMPLATE_VALID" -eq 0 ]; then + msg_warn "Template $TEMPLATE is missing or corrupted. Re-downloading." + [[ -f "$TEMPLATE_PATH" ]] && rm -f "$TEMPLATE_PATH" + for attempt in {1..3}; do + msg_info "Attempt $attempt: Downloading LXC template..." + if pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null 2>&1; then + msg_ok "Template download successful." + break + fi + if [ $attempt -eq 3 ]; then + msg_error "Failed after 3 attempts. Please check network access or manually run:\n pveam download $TEMPLATE_STORAGE $TEMPLATE" + exit 208 + fi + sleep $((attempt * 5)) + done +fi + +msg_info "Creating LXC Container" +# Check and fix subuid/subgid +grep -q "root:100000:65536" /etc/subuid || echo "root:100000:65536" >>/etc/subuid +grep -q "root:100000:65536" /etc/subgid || echo "root:100000:65536" >>/etc/subgid + +# Combine all options +PCT_OPTIONS=(${PCT_OPTIONS[@]:-${DEFAULT_PCT_OPTIONS[@]}}) +[[ " ${PCT_OPTIONS[@]} " =~ " -rootfs " ]] || PCT_OPTIONS+=(-rootfs "$CONTAINER_STORAGE:${PCT_DISK_SIZE:-8}") + +# Secure creation of the LXC container with lock and template check +lockfile="/tmp/template.${TEMPLATE}.lock" +exec 9>"$lockfile" || { + msg_error "Failed to create lock file '$lockfile'." + exit 200 +} +flock -w 60 9 || { + msg_error "Timeout while waiting for template lock" + exit 211 +} + +if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" &>/dev/null; then + msg_error "Container creation failed. Checking if template is corrupted or incomplete." + + if [[ ! -s "$TEMPLATE_PATH" || "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then + msg_error "Template file too small or missing – re-downloading." + rm -f "$TEMPLATE_PATH" + elif ! zstdcat "$TEMPLATE_PATH" | tar -tf - &>/dev/null; then + msg_error "Template appears to be corrupted – re-downloading." + rm -f "$TEMPLATE_PATH" + else + msg_error "Template is valid, but container creation failed. Update your whole Proxmox System (pve-container) first or check https://github.com/community-scripts/ProxmoxVE/discussions/8126" + exit 209 + fi + + # Retry download + for attempt in {1..3}; do + msg_info "Attempt $attempt: Re-downloading template..." + if timeout 120 pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null; then + msg_ok "Template re-download successful." + break + fi + if [ "$attempt" -eq 3 ]; then + msg_error "Three failed attempts. Aborting." + exit 208 + fi + sleep $((attempt * 5)) + done + + sleep 1 # I/O-Sync-Delay + msg_ok "Re-downloaded LXC Template" +fi + +if ! pct list | awk '{print $1}' | grep -qx "$CTID"; then + msg_error "Container ID $CTID not listed in 'pct list' – unexpected failure." + exit 215 +fi + +if ! grep -q '^rootfs:' "/etc/pve/lxc/$CTID.conf"; then + msg_error "RootFS entry missing in container config – storage not correctly assigned." + exit 216 +fi + +if grep -q '^hostname:' "/etc/pve/lxc/$CTID.conf"; then + CT_HOSTNAME=$(grep '^hostname:' "/etc/pve/lxc/$CTID.conf" | awk '{print $2}') + if [[ ! "$CT_HOSTNAME" =~ ^[a-z0-9-]+$ ]]; then + msg_warn "Hostname '$CT_HOSTNAME' contains invalid characters – may cause issues with networking or DNS." + fi +fi + +msg_ok "LXC Container ${BL}$CTID${CL} ${GN}was successfully created." diff --git a/misc/old_misc/install.func b/misc/old_misc/install.func new file mode 100644 index 000000000..bd51cde15 --- /dev/null +++ b/misc/old_misc/install.func @@ -0,0 +1,217 @@ +# Copyright (c) 2021-2025 tteck +# Author: tteck (tteckster) +# Co-Author: MickLesk +# License: MIT +# https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE + +if ! command -v curl >/dev/null 2>&1; then + printf "\r\e[2K%b" '\033[93m Setup Source \033[m' >&2 + apt-get update >/dev/null 2>&1 + apt-get install -y curl >/dev/null 2>&1 +fi +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/core.func) +load_functions +# This function enables IPv6 if it's not disabled and sets verbose mode +verb_ip6() { + set_std_mode # Set STD mode based on VERBOSE + + if [ "$IPV6_METHOD" == "disable" ]; then + msg_info "Disabling IPv6 (this may affect some services)" + mkdir -p /etc/sysctl.d + $STD tee /etc/sysctl.d/99-disable-ipv6.conf >/dev/null < 0; i--)); do + if [ "$(hostname -I)" != "" ]; then + break + fi + echo 1>&2 -en "${CROSS}${RD} No Network! " + sleep $RETRY_EVERY + done + if [ "$(hostname -I)" = "" ]; then + echo 1>&2 -e "\n${CROSS}${RD} No Network After $RETRY_NUM Tries${CL}" + echo -e "${NETWORK}Check Network Settings" + exit 1 + fi + rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED + systemctl disable -q --now systemd-networkd-wait-online.service + msg_ok "Set up Container OS" + #msg_custom "${CM}" "${GN}" "Network Connected: ${BL}$(hostname -I)" + msg_ok "Network Connected: ${BL}$(hostname -I)" +} + +# This function checks the network connection by pinging a known IP address and prompts the user to continue if the internet is not connected +# This function checks the network connection by pinging a known IP address and prompts the user to continue if the internet is not connected +network_check() { + set +e + trap - ERR + ipv4_connected=false + ipv6_connected=false + sleep 1 + + # Check IPv4 connectivity to Google, Cloudflare & Quad9 DNS servers. + if ping -c 1 -W 1 1.1.1.1 &>/dev/null || ping -c 1 -W 1 8.8.8.8 &>/dev/null || ping -c 1 -W 1 9.9.9.9 &>/dev/null; then + msg_ok "IPv4 Internet Connected" + ipv4_connected=true + else + msg_error "IPv4 Internet Not Connected" + fi + + # Check IPv6 connectivity to Google, Cloudflare & Quad9 DNS servers. + if ping6 -c 1 -W 1 2606:4700:4700::1111 &>/dev/null || ping6 -c 1 -W 1 2001:4860:4860::8888 &>/dev/null || ping6 -c 1 -W 1 2620:fe::fe &>/dev/null; then + msg_ok "IPv6 Internet Connected" + ipv6_connected=true + else + msg_error "IPv6 Internet Not Connected" + fi + + # If both IPv4 and IPv6 checks fail, prompt the user + if [[ $ipv4_connected == false && $ipv6_connected == false ]]; then + read -r -p "No Internet detected, would you like to continue anyway? " prompt + if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then + echo -e "${INFO}${RD}Expect Issues Without Internet${CL}" + else + echo -e "${NETWORK}Check Network Settings" + exit 1 + fi + fi + + # DNS resolution checks for GitHub-related domains (IPv4 and/or IPv6) + GIT_HOSTS=("github.com" "raw.githubusercontent.com" "api.github.com" "git.community-scripts.org") + GIT_STATUS="Git DNS:" + DNS_FAILED=false + + for HOST in "${GIT_HOSTS[@]}"; do + RESOLVEDIP=$(getent hosts "$HOST" | awk '{ print $1 }' | grep -E '(^([0-9]{1,3}\.){3}[0-9]{1,3}$)|(^[a-fA-F0-9:]+$)' | head -n1) + if [[ -z "$RESOLVEDIP" ]]; then + GIT_STATUS+="$HOST:($DNSFAIL)" + DNS_FAILED=true + else + GIT_STATUS+=" $HOST:($DNSOK)" + fi + done + + if [[ "$DNS_FAILED" == true ]]; then + fatal "$GIT_STATUS" + else + msg_ok "$GIT_STATUS" + fi + + set -e + trap 'error_handler $LINENO "$BASH_COMMAND"' ERR +} + +# This function updates the Container OS by running apt-get update and upgrade +update_os() { + msg_info "Updating Container OS" + if [[ "$CACHER" == "yes" ]]; then + echo 'Acquire::http::Proxy-Auto-Detect "/usr/local/bin/apt-proxy-detect.sh";' >/etc/apt/apt.conf.d/00aptproxy + cat </usr/local/bin/apt-proxy-detect.sh +#!/bin/bash +if nc -w1 -z "${CACHER_IP}" 3142; then + echo -n "http://${CACHER_IP}:3142" +else + echo -n "DIRECT" +fi +EOF + chmod +x /usr/local/bin/apt-proxy-detect.sh + fi + $STD apt-get update + $STD apt-get -o Dpkg::Options::="--force-confold" -y dist-upgrade + rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED + msg_ok "Updated Container OS" + + source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func) +} + +# This function modifies the message of the day (motd) and SSH settings +motd_ssh() { + # Set terminal to 256-color mode + grep -qxF "export TERM='xterm-256color'" /root/.bashrc || echo "export TERM='xterm-256color'" >>/root/.bashrc + + # Get OS information (Debian / Ubuntu) + if [ -f "/etc/os-release" ]; then + OS_NAME=$(grep ^NAME /etc/os-release | cut -d= -f2 | tr -d '"') + OS_VERSION=$(grep ^VERSION_ID /etc/os-release | cut -d= -f2 | tr -d '"') + elif [ -f "/etc/debian_version" ]; then + OS_NAME="Debian" + OS_VERSION=$(cat /etc/debian_version) + fi + + PROFILE_FILE="/etc/profile.d/00_lxc-details.sh" + echo "echo -e \"\"" >"$PROFILE_FILE" + echo -e "echo -e \"${BOLD}${APPLICATION} LXC Container${CL}"\" >>"$PROFILE_FILE" + echo -e "echo -e \"${TAB}${GATEWAY}${YW} Provided by: ${GN}community-scripts ORG ${YW}| GitHub: ${GN}https://github.com/community-scripts/ProxmoxVE${CL}\"" >>"$PROFILE_FILE" + echo "echo \"\"" >>"$PROFILE_FILE" + echo -e "echo -e \"${TAB}${OS}${YW} OS: ${GN}${OS_NAME} - Version: ${OS_VERSION}${CL}\"" >>"$PROFILE_FILE" + echo -e "echo -e \"${TAB}${HOSTNAME}${YW} Hostname: ${GN}\$(hostname)${CL}\"" >>"$PROFILE_FILE" + echo -e "echo -e \"${TAB}${INFO}${YW} IP Address: ${GN}\$(hostname -I | awk '{print \$1}')${CL}\"" >>"$PROFILE_FILE" + + # Disable default MOTD scripts + chmod -x /etc/update-motd.d/* + + if [[ "${SSH_ROOT}" == "yes" ]]; then + sed -i "s/#PermitRootLogin prohibit-password/PermitRootLogin yes/g" /etc/ssh/sshd_config + systemctl restart sshd + fi +} + +# This function customizes the container by modifying the getty service and enabling auto-login for the root user +customize() { + if [[ "$PASSWORD" == "" ]]; then + msg_info "Customizing Container" + GETTY_OVERRIDE="/etc/systemd/system/container-getty@1.service.d/override.conf" + mkdir -p $(dirname $GETTY_OVERRIDE) + cat <$GETTY_OVERRIDE + [Service] + ExecStart= + ExecStart=-/sbin/agetty --autologin root --noclear --keep-baud tty%I 115200,38400,9600 \$TERM +EOF + systemctl daemon-reload + systemctl restart $(basename $(dirname $GETTY_OVERRIDE) | sed 's/\.d//') + msg_ok "Customized Container" + fi + echo "bash -c \"\$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/${app}.sh)\"" >/usr/bin/update + chmod +x /usr/bin/update + + if [[ -n "${SSH_AUTHORIZED_KEY}" ]]; then + mkdir -p /root/.ssh + echo "${SSH_AUTHORIZED_KEY}" >/root/.ssh/authorized_keys + chmod 700 /root/.ssh + chmod 600 /root/.ssh/authorized_keys + fi +} diff --git a/misc/old_misc/tools.func b/misc/old_misc/tools.func new file mode 100644 index 000000000..91e4a1440 --- /dev/null +++ b/misc/old_misc/tools.func @@ -0,0 +1,4818 @@ +#!/bin/bash + +# ============================================================================== +# HELPER FUNCTIONS FOR PACKAGE MANAGEMENT +# ============================================================================== +# +# This file provides unified helper functions for robust package installation +# and repository management across Debian/Ubuntu OS upgrades. +# +# Key Features: +# - Automatic retry logic for transient APT/network failures +# - Unified keyring cleanup from all 3 locations +# - Legacy installation cleanup (nvm, rbenv, rustup) +# - OS-upgrade-safe repository preparation +# - Service pattern matching for multi-version tools +# +# Usage in install scripts: +# source /dev/stdin <<< "$FUNCTIONS" # Load from build.func +# prepare_repository_setup "mysql" +# install_packages_with_retry "mysql-server" "mysql-client" +# +# Quick Reference (Core Helpers): +# cleanup_tool_keyrings() - Remove keyrings from all 3 locations +# stop_all_services() - Stop services by pattern (e.g. "php*-fpm") +# verify_tool_version() - Validate installed version matches expected +# cleanup_legacy_install() - Remove nvm, rbenv, rustup, etc. +# prepare_repository_setup() - Cleanup repos + keyrings + validate APT +# install_packages_with_retry() - Install with 3 retries and APT refresh +# upgrade_packages_with_retry() - Upgrade with 3 retries and APT refresh +# +# ============================================================================== + +# ------------------------------------------------------------------------------ +# Cache installed version to avoid repeated checks +# ------------------------------------------------------------------------------ +cache_installed_version() { + local app="$1" + local version="$2" + mkdir -p /var/cache/app-versions + echo "$version" >"/var/cache/app-versions/${app}_version.txt" +} + +get_cached_version() { + local app="$1" + mkdir -p /var/cache/app-versions + if [[ -f "/var/cache/app-versions/${app}_version.txt" ]]; then + cat "/var/cache/app-versions/${app}_version.txt" + return 0 + fi + return 0 +} + +# ------------------------------------------------------------------------------ +# Clean up ALL keyring locations for a tool (unified helper) +# Usage: cleanup_tool_keyrings "mariadb" "mysql" "postgresql" +# ------------------------------------------------------------------------------ +cleanup_tool_keyrings() { + local tool_patterns=("$@") + + for pattern in "${tool_patterns[@]}"; do + rm -f /usr/share/keyrings/${pattern}*.gpg \ + /etc/apt/keyrings/${pattern}*.gpg \ + /etc/apt/trusted.gpg.d/${pattern}*.gpg 2>/dev/null || true + done +} + +# ------------------------------------------------------------------------------ +# Stop and disable all service instances matching a pattern +# Usage: stop_all_services "php*-fpm" "mysql" "mariadb" +# ------------------------------------------------------------------------------ +stop_all_services() { + local service_patterns=("$@") + + for pattern in "${service_patterns[@]}"; do + # Find all matching services (use || true to avoid pipeline failures) + local services + services=$(systemctl list-units --type=service --all 2>/dev/null | + grep -oE "${pattern}[^ ]*\.service" 2>/dev/null | + sort -u 2>/dev/null || true) + + # Only process if we found any services + if [[ -n "$services" ]]; then + while IFS= read -r service; do + [[ -z "$service" ]] && continue + $STD systemctl stop "$service" 2>/dev/null || true + $STD systemctl disable "$service" 2>/dev/null || true + done <<<"$services" + fi + done + + return 0 +} + +# ------------------------------------------------------------------------------ +# Verify installed tool version matches expected version +# Returns: 0 if match, 1 if mismatch (with warning) +# Usage: verify_tool_version "nodejs" "22" "$(node -v | grep -oP '^v\K[0-9]+')" +# ------------------------------------------------------------------------------ +verify_tool_version() { + local tool_name="$1" + local expected_version="$2" + local installed_version="$3" + + # Extract major version for comparison + local expected_major="${expected_version%%.*}" + local installed_major="${installed_version%%.*}" + + if [[ "$installed_major" != "$expected_major" ]]; then + msg_warn "$tool_name version mismatch: expected $expected_version, got $installed_version" + return 1 + fi + + return 0 +} + +# ------------------------------------------------------------------------------ +# Clean up legacy installation methods (nvm, rbenv, rustup, etc.) +# Usage: cleanup_legacy_install "nodejs" -> removes nvm +# ------------------------------------------------------------------------------ +cleanup_legacy_install() { + local tool_name="$1" + + case "$tool_name" in + nodejs | node) + if [[ -d "$HOME/.nvm" ]]; then + msg_info "Removing legacy nvm installation" + rm -rf "$HOME/.nvm" "$HOME/.npm" "$HOME/.bower" "$HOME/.config/yarn" 2>/dev/null || true + sed -i '/NVM_DIR/d' "$HOME/.bashrc" "$HOME/.profile" 2>/dev/null || true + msg_ok "Legacy nvm installation removed" + fi + ;; + ruby) + if [[ -d "$HOME/.rbenv" ]]; then + msg_info "Removing legacy rbenv installation" + rm -rf "$HOME/.rbenv" 2>/dev/null || true + sed -i '/rbenv/d' "$HOME/.bashrc" "$HOME/.profile" 2>/dev/null || true + msg_ok "Legacy rbenv installation removed" + fi + ;; + rust) + if [[ -d "$HOME/.cargo" ]] || [[ -d "$HOME/.rustup" ]]; then + msg_info "Removing legacy rustup installation" + rm -rf "$HOME/.cargo" "$HOME/.rustup" 2>/dev/null || true + sed -i '/cargo/d' "$HOME/.bashrc" "$HOME/.profile" 2>/dev/null || true + msg_ok "Legacy rustup installation removed" + fi + ;; + go | golang) + if [[ -d "$HOME/go" ]]; then + msg_info "Removing legacy Go workspace" + # Keep user code, just remove GOPATH env + sed -i '/GOPATH/d' "$HOME/.bashrc" "$HOME/.profile" 2>/dev/null || true + msg_ok "Legacy Go workspace cleaned" + fi + ;; + esac +} + +# ------------------------------------------------------------------------------ +# Unified repository preparation before setup +# Cleans up old repos, keyrings, and ensures APT is working +# Usage: prepare_repository_setup "mariadb" "mysql" +# ------------------------------------------------------------------------------ +prepare_repository_setup() { + local repo_names=("$@") + + # Clean up all old repository files + for repo in "${repo_names[@]}"; do + cleanup_old_repo_files "$repo" + done + + # Clean up all keyrings + cleanup_tool_keyrings "${repo_names[@]}" + + # Ensure APT is in working state + ensure_apt_working || return 1 + + return 0 +} + +# ------------------------------------------------------------------------------ +# Install packages with retry logic +# Usage: install_packages_with_retry "mysql-server" "mysql-client" +# ------------------------------------------------------------------------------ +install_packages_with_retry() { + local packages=("$@") + local max_retries=2 + local retry=0 + + while [[ $retry -le $max_retries ]]; do + if $STD apt install -y "${packages[@]}" 2>/dev/null; then + return 0 + fi + + retry=$((retry + 1)) + if [[ $retry -le $max_retries ]]; then + msg_warn "Package installation failed, retrying ($retry/$max_retries)..." + sleep 2 + $STD apt update 2>/dev/null || true + fi + done + + return 1 +} + +# ------------------------------------------------------------------------------ +# Upgrade specific packages with retry logic +# Usage: upgrade_packages_with_retry "mariadb-server" "mariadb-client" +# ------------------------------------------------------------------------------ +upgrade_packages_with_retry() { + local packages=("$@") + local max_retries=2 + local retry=0 + + while [[ $retry -le $max_retries ]]; do + if $STD apt install --only-upgrade -y "${packages[@]}" 2>/dev/null; then + return 0 + fi + + retry=$((retry + 1)) + if [[ $retry -le $max_retries ]]; then + msg_warn "Package upgrade failed, retrying ($retry/$max_retries)..." + sleep 2 + $STD apt update 2>/dev/null || true + fi + done + + return 1 +} + +# ------------------------------------------------------------------------------ +# Check if tool is already installed and optionally verify exact version +# Returns: 0 if installed (with optional version match), 1 if not installed +# Usage: is_tool_installed "mariadb" "11.4" || echo "Not installed" +# ------------------------------------------------------------------------------ +is_tool_installed() { + local tool_name="$1" + local required_version="${2:-}" + local installed_version="" + + case "$tool_name" in + mariadb) + if command -v mariadb >/dev/null 2>&1; then + installed_version=$(mariadb --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1) + fi + ;; + mysql) + if command -v mysql >/dev/null 2>&1; then + installed_version=$(mysql --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1) + fi + ;; + mongodb | mongod) + if command -v mongod >/dev/null 2>&1; then + installed_version=$(mongod --version 2>/dev/null | awk '/db version/{print $3}' | cut -d. -f1,2) + fi + ;; + node | nodejs) + if command -v node >/dev/null 2>&1; then + installed_version=$(node -v 2>/dev/null | grep -oP '^v\K[0-9]+') + fi + ;; + php) + if command -v php >/dev/null 2>&1; then + installed_version=$(php -v 2>/dev/null | awk '/^PHP/{print $2}' | cut -d. -f1,2) + fi + ;; + postgres | postgresql) + if command -v psql >/dev/null 2>&1; then + installed_version=$(psql --version 2>/dev/null | awk '{print $3}' | cut -d. -f1) + fi + ;; + ruby) + if command -v ruby >/dev/null 2>&1; then + installed_version=$(ruby --version 2>/dev/null | awk '{print $2}' | cut -d. -f1,2) + fi + ;; + rust | rustc) + if command -v rustc >/dev/null 2>&1; then + installed_version=$(rustc --version 2>/dev/null | awk '{print $2}') + fi + ;; + go | golang) + if command -v go >/dev/null 2>&1; then + installed_version=$(go version 2>/dev/null | awk '{print $3}' | sed 's/go//') + fi + ;; + clickhouse) + if command -v clickhouse >/dev/null 2>&1; then + installed_version=$(clickhouse --version 2>/dev/null | awk '{print $2}') + fi + ;; + esac + + if [[ -z "$installed_version" ]]; then + return 1 # Not installed + fi + + if [[ -n "$required_version" && "$installed_version" != "$required_version" ]]; then + echo "$installed_version" + return 1 # Version mismatch + fi + + echo "$installed_version" + return 0 # Installed and version matches (if specified) +} + +# ------------------------------------------------------------------------------ +# Remove old tool version completely (purge + cleanup repos) +# Usage: remove_old_tool_version "mariadb" "repository-name" +# ------------------------------------------------------------------------------ +remove_old_tool_version() { + local tool_name="$1" + local repo_name="${2:-$tool_name}" + + case "$tool_name" in + mariadb) + stop_all_services "mariadb" + $STD apt purge -y 'mariadb*' >/dev/null 2>&1 || true + cleanup_tool_keyrings "mariadb" + ;; + mysql) + stop_all_services "mysql" + $STD apt purge -y 'mysql*' >/dev/null 2>&1 || true + rm -rf /var/lib/mysql 2>/dev/null || true + cleanup_tool_keyrings "mysql" + ;; + mongodb) + stop_all_services "mongod" + $STD apt purge -y 'mongodb*' >/dev/null 2>&1 || true + rm -rf /var/lib/mongodb 2>/dev/null || true + cleanup_tool_keyrings "mongodb" + ;; + node | nodejs) + $STD apt purge -y nodejs npm >/dev/null 2>&1 || true + # Clean up npm global modules + if command -v npm >/dev/null 2>&1; then + npm list -g 2>/dev/null | grep -oE '^ \S+' | awk '{print $1}' | while read -r module; do + npm uninstall -g "$module" >/dev/null 2>&1 || true + done + fi + cleanup_legacy_install "nodejs" + cleanup_tool_keyrings "nodesource" + ;; + php) + stop_all_services "php.*-fpm" + $STD apt purge -y 'php*' >/dev/null 2>&1 || true + rm -rf /etc/php 2>/dev/null || true + cleanup_tool_keyrings "deb.sury.org-php" "php" + ;; + postgresql) + stop_all_services "postgresql" + $STD apt purge -y 'postgresql*' >/dev/null 2>&1 || true + # Keep data directory for safety (can be removed manually if needed) + # rm -rf /var/lib/postgresql 2>/dev/null || true + cleanup_tool_keyrings "postgresql" "pgdg" + ;; + java) + $STD apt purge -y 'temurin*' 'adoptium*' 'openjdk*' >/dev/null 2>&1 || true + cleanup_tool_keyrings "adoptium" + ;; + ruby) + cleanup_legacy_install "ruby" + $STD apt purge -y 'ruby*' >/dev/null 2>&1 || true + ;; + rust) + cleanup_legacy_install "rust" + ;; + go | golang) + rm -rf /usr/local/go 2>/dev/null || true + cleanup_legacy_install "golang" + ;; + clickhouse) + stop_all_services "clickhouse-server" + $STD apt purge -y 'clickhouse*' >/dev/null 2>&1 || true + rm -rf /var/lib/clickhouse 2>/dev/null || true + cleanup_tool_keyrings "clickhouse" + ;; + esac + + # Clean up old repository files (both .list and .sources) + cleanup_old_repo_files "$repo_name" + + return 0 +} + +# ------------------------------------------------------------------------------ +# Determine if tool update/upgrade is needed +# Returns: 0 (update needed), 1 (already up-to-date) +# Usage: if should_update_tool "mariadb" "11.4"; then ... fi +# ------------------------------------------------------------------------------ +should_update_tool() { + local tool_name="$1" + local target_version="$2" + local current_version="" + + # Get currently installed version + current_version=$(is_tool_installed "$tool_name" 2>/dev/null) || return 0 # Not installed = needs install + + # If versions are identical, no update needed + if [[ "$current_version" == "$target_version" ]]; then + return 1 # No update needed + fi + + return 0 # Update needed +} + +# ---------------------–---------------------------------------------------------- +# Unified repository management for tools +# Handles adding, updating, and verifying tool repositories +# Usage: manage_tool_repository "mariadb" "11.4" "https://repo..." "GPG_key_url" +# Supports: mariadb, mongodb, nodejs, postgresql, php, mysql +# ------------------------------------------------------------------------------ +manage_tool_repository() { + local tool_name="$1" + local version="$2" + local repo_url="$3" + local gpg_key_url="${4:-}" + local distro_id repo_component suite + + distro_id=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"') + + case "$tool_name" in + mariadb) + if [[ -z "$repo_url" || -z "$gpg_key_url" ]]; then + msg_error "MariaDB repository requires repo_url and gpg_key_url" + return 1 + fi + + # Clean old repos first + cleanup_old_repo_files "mariadb" + + # Get suite for fallback handling + local distro_codename + distro_codename=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release) + suite=$(get_fallback_suite "$distro_id" "$distro_codename" "$repo_url/$distro_id") + + # Setup new repository using deb822 format + setup_deb822_repo \ + "mariadb" \ + "$gpg_key_url" \ + "$repo_url/$distro_id" \ + "$suite" \ + "main" + return 0 + ;; + + mongodb) + if [[ -z "$repo_url" || -z "$gpg_key_url" ]]; then + msg_error "MongoDB repository requires repo_url and gpg_key_url" + return 1 + fi + + # Clean old repos first + cleanup_old_repo_files "mongodb" + + # Import GPG key + mkdir -p /etc/apt/keyrings + if ! curl -fsSL "$gpg_key_url" | gpg --dearmor --yes -o "/etc/apt/keyrings/mongodb-server-${version}.gpg" 2>/dev/null; then + msg_error "Failed to download MongoDB GPG key" + return 1 + fi + + # Setup repository + local distro_codename + distro_codename=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release) + + # Suite mapping with fallback for newer releases not yet supported by upstream + if [[ "$distro_id" == "debian" ]]; then + case "$distro_codename" in + trixie | forky | sid) + # Testing/unstable releases fallback to latest stable suite + suite="bookworm" + ;; + bookworm) + suite="bookworm" + ;; + bullseye) + suite="bullseye" + ;; + *) + # Unknown release: fallback to latest stable suite + msg_warn "Unknown Debian release '${distro_codename}', using bookworm" + suite="bookworm" + ;; + esac + elif [[ "$distro_id" == "ubuntu" ]]; then + case "$distro_codename" in + oracular | plucky) + # Newer releases fallback to latest LTS + suite="noble" + ;; + noble) + suite="noble" + ;; + jammy) + suite="jammy" + ;; + focal) + suite="focal" + ;; + *) + # Unknown release: fallback to latest LTS + msg_warn "Unknown Ubuntu release '${distro_codename}', using noble" + suite="noble" + ;; + esac + else + # For other distros, try generic fallback + suite=$(get_fallback_suite "$distro_id" "$distro_codename" "$repo_url") + fi + + repo_component="main" + [[ "$distro_id" == "ubuntu" ]] && repo_component="multiverse" + + cat </etc/apt/sources.list.d/mongodb-org-${version}.sources +Types: deb +URIs: ${repo_url} +Suites: ${suite}/mongodb-org/${version} +Components: ${repo_component} +Architectures: $(dpkg --print-architecture) +Signed-By: /etc/apt/keyrings/mongodb-server-${version}.gpg +EOF + return 0 + ;; + + nodejs) + if [[ -z "$repo_url" || -z "$gpg_key_url" ]]; then + msg_error "Node.js repository requires repo_url and gpg_key_url" + return 1 + fi + + cleanup_old_repo_files "nodesource" + + # NodeSource uses deb822 format with GPG from repo + local distro_codename + distro_codename=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release) + + # Create keyring directory first + mkdir -p /etc/apt/keyrings + + # Download GPG key from NodeSource + curl -fsSL "$gpg_key_url" | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg || { + msg_error "Failed to import NodeSource GPG key" + return 1 + } + + cat </etc/apt/sources.list.d/nodesource.sources +Types: deb +URIs: $repo_url +Suites: nodistro +Components: main +Architectures: $(dpkg --print-architecture) +Signed-By: /etc/apt/keyrings/nodesource.gpg +EOF + return 0 + ;; + + php) + if [[ -z "$gpg_key_url" ]]; then + msg_error "PHP repository requires gpg_key_url" + return 1 + fi + + cleanup_old_repo_files "php" + + # Download and install keyring + curl -fsSLo /tmp/debsuryorg-archive-keyring.deb "$gpg_key_url" || { + msg_error "Failed to download PHP keyring" + return 1 + } + dpkg -i /tmp/debsuryorg-archive-keyring.deb >/dev/null 2>&1 || { + msg_error "Failed to install PHP keyring" + rm -f /tmp/debsuryorg-archive-keyring.deb + return 1 + } + rm -f /tmp/debsuryorg-archive-keyring.deb + + # Setup repository + local distro_codename + distro_codename=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release) + cat </etc/apt/sources.list.d/php.sources +Types: deb +URIs: https://packages.sury.org/php +Suites: $distro_codename +Components: main +Architectures: $(dpkg --print-architecture) +Signed-By: /usr/share/keyrings/deb.sury.org-php.gpg +EOF + return 0 + ;; + + postgresql) + if [[ -z "$gpg_key_url" ]]; then + msg_error "PostgreSQL repository requires gpg_key_url" + return 1 + fi + + cleanup_old_repo_files "postgresql" + + # Create keyring directory first + mkdir -p /etc/apt/keyrings + + # Import PostgreSQL key + curl -fsSL "$gpg_key_url" | gpg --dearmor -o /etc/apt/keyrings/postgresql.gpg || { + msg_error "Failed to import PostgreSQL GPG key" + return 1 + } + + # Setup repository + local distro_codename + distro_codename=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release) + cat </etc/apt/sources.list.d/postgresql.sources +Types: deb +URIs: http://apt.postgresql.org/pub/repos/apt +Suites: $distro_codename-pgdg +Components: main +Architectures: $(dpkg --print-architecture) +Signed-By: /etc/apt/keyrings/postgresql.gpg +EOF + return 0 + ;; + + *) + msg_error "Unknown tool repository: $tool_name" + return 1 + ;; + esac + + return 0 +} + +# ------–---------------------------------------------------------------------- +# Unified package upgrade function (with apt update caching) +# ------------------------------------------------------------------------------ +upgrade_package() { + local package="$1" + + # Use same caching logic as ensure_dependencies + local apt_cache_file="/var/cache/apt-update-timestamp" + local current_time=$(date +%s) + local last_update=0 + + if [[ -f "$apt_cache_file" ]]; then + last_update=$(cat "$apt_cache_file" 2>/dev/null || echo 0) + fi + + if ((current_time - last_update > 300)); then + $STD apt update || { + msg_warn "APT update failed in upgrade_package - continuing with cached packages" + } + echo "$current_time" >"$apt_cache_file" + fi + + $STD apt install --only-upgrade -y "$package" || { + msg_warn "Failed to upgrade $package" + return 1 + } +} + +# ------------------------------------------------------------------------------ +# Repository availability check +# ------------------------------------------------------------------------------ +verify_repo_available() { + local repo_url="$1" + local suite="$2" + + if curl -fsSL --max-time 10 "${repo_url}/dists/${suite}/Release" &>/dev/null; then + return 0 + fi + return 1 +} + +# ------------------------------------------------------------------------------ +# Ensure dependencies are installed (with apt update caching) +# ------------------------------------------------------------------------------ +ensure_dependencies() { + local deps=("$@") + local missing=() + + for dep in "${deps[@]}"; do + if ! command -v "$dep" &>/dev/null && ! is_package_installed "$dep"; then + missing+=("$dep") + fi + done + + if [[ ${#missing[@]} -gt 0 ]]; then + # Only run apt update if not done recently (within last 5 minutes) + local apt_cache_file="/var/cache/apt-update-timestamp" + local current_time=$(date +%s) + local last_update=0 + + if [[ -f "$apt_cache_file" ]]; then + last_update=$(cat "$apt_cache_file" 2>/dev/null || echo 0) + fi + + if ((current_time - last_update > 300)); then + # Ensure orphaned sources are cleaned before updating + cleanup_orphaned_sources 2>/dev/null || true + + if ! $STD apt update; then + ensure_apt_working || return 1 + fi + echo "$current_time" >"$apt_cache_file" + fi + + $STD apt install -y "${missing[@]}" || { + msg_error "Failed to install dependencies: ${missing[*]}" + return 1 + } + fi +} + +# ------------------------------------------------------------------------------ +# Smart version comparison +# ------------------------------------------------------------------------------ +version_gt() { + test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1" +} + +# ------------------------------------------------------------------------------ +# Get system architecture (normalized) +# ------------------------------------------------------------------------------ +get_system_arch() { + local arch_type="${1:-dpkg}" # dpkg, uname, or both + local arch + + case "$arch_type" in + dpkg) + arch=$(dpkg --print-architecture 2>/dev/null) + ;; + uname) + arch=$(uname -m) + [[ "$arch" == "x86_64" ]] && arch="amd64" + [[ "$arch" == "aarch64" ]] && arch="arm64" + ;; + both | *) + arch=$(dpkg --print-architecture 2>/dev/null || uname -m) + [[ "$arch" == "x86_64" ]] && arch="amd64" + [[ "$arch" == "aarch64" ]] && arch="arm64" + ;; + esac + + echo "$arch" +} + +# ------------------------------------------------------------------------------ +# Create temporary directory with automatic cleanup +# ------------------------------------------------------------------------------ +create_temp_dir() { + local tmp_dir=$(mktemp -d) + # Set trap to cleanup on EXIT, ERR, INT, TERM + trap "rm -rf '$tmp_dir'" EXIT ERR INT TERM + echo "$tmp_dir" +} + +# ------------------------------------------------------------------------------ +# Check if package is installed (faster than dpkg -l | grep) +# ------------------------------------------------------------------------------ +is_package_installed() { + local package="$1" + dpkg-query -W -f='${Status}' "$package" 2>/dev/null | grep -q "^install ok installed$" +} + +# ------------------------------------------------------------------------------ +# GitHub API call with authentication and rate limit handling +# ------------------------------------------------------------------------------ +github_api_call() { + local url="$1" + local output_file="${2:-/dev/stdout}" + local max_retries=3 + local retry_delay=2 + + local header_args=() + [[ -n "${GITHUB_TOKEN:-}" ]] && header_args=(-H "Authorization: Bearer $GITHUB_TOKEN") + + for attempt in $(seq 1 $max_retries); do + local http_code + http_code=$(curl -fsSL -w "%{http_code}" -o "$output_file" \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + "${header_args[@]}" \ + "$url" 2>/dev/null || echo "000") + + case "$http_code" in + 200) + return 0 + ;; + 403) + # Rate limit - check if we can retry + if [[ $attempt -lt $max_retries ]]; then + msg_warn "GitHub API rate limit, waiting ${retry_delay}s... (attempt $attempt/$max_retries)" + sleep "$retry_delay" + retry_delay=$((retry_delay * 2)) + continue + fi + msg_error "GitHub API rate limit exceeded. Set GITHUB_TOKEN to increase limits." + return 1 + ;; + 404) + msg_error "GitHub API endpoint not found: $url" + return 1 + ;; + *) + if [[ $attempt -lt $max_retries ]]; then + sleep "$retry_delay" + continue + fi + msg_error "GitHub API call failed with HTTP $http_code" + return 1 + ;; + esac + done + + return 1 +} + +should_upgrade() { + local current="$1" + local target="$2" + + [[ -z "$current" ]] && return 0 + version_gt "$target" "$current" && return 0 + return 1 +} + +# ------------------------------------------------------------------------------ +# Get OS information (cached for performance) +# ------------------------------------------------------------------------------ +get_os_info() { + local field="${1:-all}" # id, codename, version, version_id, all + + # Cache OS info to avoid repeated file reads + if [[ -z "${_OS_ID:-}" ]]; then + export _OS_ID=$(awk -F= '/^ID=/{gsub(/"/,"",$2); print $2}' /etc/os-release) + export _OS_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{gsub(/"/,"",$2); print $2}' /etc/os-release) + export _OS_VERSION=$(awk -F= '/^VERSION_ID=/{gsub(/"/,"",$2); print $2}' /etc/os-release) + export _OS_VERSION_FULL=$(awk -F= '/^VERSION=/{gsub(/"/,"",$2); print $2}' /etc/os-release) + fi + + case "$field" in + id) echo "$_OS_ID" ;; + codename) echo "$_OS_CODENAME" ;; + version) echo "$_OS_VERSION" ;; + version_id) echo "$_OS_VERSION" ;; + version_full) echo "$_OS_VERSION_FULL" ;; + all) echo "ID=$_OS_ID CODENAME=$_OS_CODENAME VERSION=$_OS_VERSION" ;; + *) echo "$_OS_ID" ;; + esac +} + +# ------------------------------------------------------------------------------ +# Check if running on specific OS +# ------------------------------------------------------------------------------ +is_debian() { + [[ "$(get_os_info id)" == "debian" ]] +} + +is_ubuntu() { + [[ "$(get_os_info id)" == "ubuntu" ]] +} + +is_alpine() { + [[ "$(get_os_info id)" == "alpine" ]] +} + +# ------------------------------------------------------------------------------ +# Get Debian/Ubuntu major version +# ------------------------------------------------------------------------------ +get_os_version_major() { + local version=$(get_os_info version) + echo "${version%%.*}" +} + +# ------------------------------------------------------------------------------ +# Download file with retry logic and progress +# ------------------------------------------------------------------------------ +download_file() { + local url="$1" + local output="$2" + local max_retries="${3:-3}" + local show_progress="${4:-false}" + + local curl_opts=(-fsSL) + [[ "$show_progress" == "true" ]] && curl_opts=(-fL#) + + for attempt in $(seq 1 $max_retries); do + if curl "${curl_opts[@]}" -o "$output" "$url"; then + return 0 + fi + + if [[ $attempt -lt $max_retries ]]; then + msg_warn "Download failed, retrying... (attempt $attempt/$max_retries)" + sleep 2 + fi + done + + msg_error "Failed to download: $url" + return 1 +} + +# ------------------------------------------------------------------------------ +# Get fallback suite for repository (comprehensive mapping) +# ------------------------------------------------------------------------------ +get_fallback_suite() { + local distro_id="$1" + local distro_codename="$2" + local repo_base_url="$3" + + # Check if current codename works + if verify_repo_available "$repo_base_url" "$distro_codename"; then + echo "$distro_codename" + return 0 + fi + + # Comprehensive fallback mappings + case "$distro_id" in + debian) + case "$distro_codename" in + # Debian 13 (Trixie) → Debian 12 (Bookworm) + trixie | forky | sid) + echo "bookworm" + ;; + # Debian 12 (Bookworm) stays + bookworm) + echo "bookworm" + ;; + # Debian 11 (Bullseye) stays + bullseye) + echo "bullseye" + ;; + # Unknown → latest stable + *) + echo "bookworm" + ;; + esac + ;; + ubuntu) + case "$distro_codename" in + # Ubuntu 24.10 (Oracular) → 24.04 LTS (Noble) + oracular | plucky) + echo "noble" + ;; + # Ubuntu 24.04 LTS (Noble) stays + noble) + echo "noble" + ;; + # Ubuntu 23.10 (Mantic) → 22.04 LTS (Jammy) + mantic | lunar) + echo "jammy" + ;; + # Ubuntu 22.04 LTS (Jammy) stays + jammy) + echo "jammy" + ;; + # Ubuntu 20.04 LTS (Focal) stays + focal) + echo "focal" + ;; + # Unknown → latest LTS + *) + echo "jammy" + ;; + esac + ;; + *) + echo "$distro_codename" + ;; + esac +} + +# ------------------------------------------------------------------------------ +# Verify package source and version +# ------------------------------------------------------------------------------ +verify_package_source() { + local package="$1" + local expected_version="$2" + + if apt-cache policy "$package" 2>/dev/null | grep -q "$expected_version"; then + return 0 + fi + return 1 +} + +# ------------------------------------------------------------------------------ +# Check if running on LTS version +# ------------------------------------------------------------------------------ +is_lts_version() { + local os_id=$(get_os_info id) + local codename=$(get_os_info codename) + + if [[ "$os_id" == "ubuntu" ]]; then + case "$codename" in + focal | jammy | noble) return 0 ;; # 20.04, 22.04, 24.04 + *) return 1 ;; + esac + elif [[ "$os_id" == "debian" ]]; then + # Debian releases are all "stable" + case "$codename" in + bullseye | bookworm | trixie) return 0 ;; + *) return 1 ;; + esac + fi + + return 1 +} + +# ------------------------------------------------------------------------------ +# Get optimal number of parallel jobs (cached) +# ------------------------------------------------------------------------------ +get_parallel_jobs() { + if [[ -z "${_PARALLEL_JOBS:-}" ]]; then + local cpu_count=$(nproc 2>/dev/null || echo 1) + local mem_gb=$(free -g | awk '/^Mem:/{print $2}') + + # Limit by available memory (assume 1GB per job for compilation) + local max_by_mem=$((mem_gb > 0 ? mem_gb : 1)) + local max_jobs=$((cpu_count < max_by_mem ? cpu_count : max_by_mem)) + + # At least 1, at most cpu_count + export _PARALLEL_JOBS=$((max_jobs > 0 ? max_jobs : 1)) + fi + echo "$_PARALLEL_JOBS" +} + +# ------------------------------------------------------------------------------ +# Get default PHP version for OS +# ------------------------------------------------------------------------------ +get_default_php_version() { + local os_id=$(get_os_info id) + local os_version=$(get_os_version_major) + + case "$os_id" in + debian) + case "$os_version" in + 13) echo "8.3" ;; # Debian 13 (Trixie) + 12) echo "8.2" ;; # Debian 12 (Bookworm) + 11) echo "7.4" ;; # Debian 11 (Bullseye) + *) echo "8.2" ;; + esac + ;; + ubuntu) + case "$os_version" in + 24) echo "8.3" ;; # Ubuntu 24.04 LTS (Noble) + 22) echo "8.1" ;; # Ubuntu 22.04 LTS (Jammy) + 20) echo "7.4" ;; # Ubuntu 20.04 LTS (Focal) + *) echo "8.1" ;; + esac + ;; + *) + echo "8.2" + ;; + esac +} + +# ------------------------------------------------------------------------------ +# Get default Python version for OS +# ------------------------------------------------------------------------------ +get_default_python_version() { + local os_id=$(get_os_info id) + local os_version=$(get_os_version_major) + + case "$os_id" in + debian) + case "$os_version" in + 13) echo "3.12" ;; # Debian 13 (Trixie) + 12) echo "3.11" ;; # Debian 12 (Bookworm) + 11) echo "3.9" ;; # Debian 11 (Bullseye) + *) echo "3.11" ;; + esac + ;; + ubuntu) + case "$os_version" in + 24) echo "3.12" ;; # Ubuntu 24.04 LTS + 22) echo "3.10" ;; # Ubuntu 22.04 LTS + 20) echo "3.8" ;; # Ubuntu 20.04 LTS + *) echo "3.10" ;; + esac + ;; + *) + echo "3.11" + ;; + esac +} + +# ------------------------------------------------------------------------------ +# Get default Node.js LTS version +# ------------------------------------------------------------------------------ +get_default_nodejs_version() { + # Always return current LTS (as of 2025) + echo "22" +} + +# ------------------------------------------------------------------------------ +# Check if package manager is locked +# ------------------------------------------------------------------------------ +is_apt_locked() { + if fuser /var/lib/dpkg/lock-frontend &>/dev/null || + fuser /var/lib/apt/lists/lock &>/dev/null || + fuser /var/cache/apt/archives/lock &>/dev/null; then + return 0 + fi + return 1 +} + +# ------------------------------------------------------------------------------ +# Wait for apt to be available +# ------------------------------------------------------------------------------ +wait_for_apt() { + local max_wait="${1:-300}" # 5 minutes default + local waited=0 + + while is_apt_locked; do + if [[ $waited -ge $max_wait ]]; then + msg_error "Timeout waiting for apt to be available" + return 1 + fi + + sleep 5 + waited=$((waited + 5)) + done + + return 0 +} + +# ------------------------------------------------------------------------------ +# Cleanup old repository files (migration helper) +# ------------------------------------------------------------------------------ +cleanup_old_repo_files() { + local app="$1" + + # Remove old-style .list files (including backups) + rm -f /etc/apt/sources.list.d/"${app}"*.list + rm -f /etc/apt/sources.list.d/"${app}"*.list.save + rm -f /etc/apt/sources.list.d/"${app}"*.list.distUpgrade + rm -f /etc/apt/sources.list.d/"${app}"*.list.dpkg-* + + # Remove old GPG keys from trusted.gpg.d + rm -f /etc/apt/trusted.gpg.d/"${app}"*.gpg + + # Remove keyrings from /etc/apt/keyrings + rm -f /etc/apt/keyrings/"${app}"*.gpg + + # Remove ALL .sources files for this app (including the main one) + # This ensures no orphaned .sources files reference deleted keyrings + rm -f /etc/apt/sources.list.d/"${app}"*.sources +} + +# ------------------------------------------------------------------------------ +# Cleanup orphaned .sources files that reference missing keyrings +# This prevents APT signature verification errors +# Call this at the start of any setup function to ensure APT is in a clean state +# ------------------------------------------------------------------------------ +cleanup_orphaned_sources() { + local sources_dir="/etc/apt/sources.list.d" + local keyrings_dir="/etc/apt/keyrings" + + [[ ! -d "$sources_dir" ]] && return 0 + + while IFS= read -r -d '' sources_file; do + local basename_file + basename_file=$(basename "$sources_file") + + # NEVER remove debian.sources - this is the standard Debian repository + if [[ "$basename_file" == "debian.sources" ]]; then + continue + fi + + # Extract Signed-By path from .sources file + local keyring_path + keyring_path=$(grep -E '^Signed-By:' "$sources_file" 2>/dev/null | awk '{print $2}') + + # If keyring doesn't exist, remove the .sources file + if [[ -n "$keyring_path" ]] && [[ ! -f "$keyring_path" ]]; then + rm -f "$sources_file" + fi + done < <(find "$sources_dir" -name "*.sources" -print0 2>/dev/null) + + # Also check for broken symlinks in keyrings directory + if [[ -d "$keyrings_dir" ]]; then + find "$keyrings_dir" -type l ! -exec test -e {} \; -delete 2>/dev/null || true + fi +} + +# ------------------------------------------------------------------------------ +# Ensure APT is in a working state before installing packages +# This should be called at the start of any setup function +# ------------------------------------------------------------------------------ +ensure_apt_working() { + # Clean up orphaned sources first + cleanup_orphaned_sources + + # Try to update package lists + if ! $STD apt update; then + # More aggressive cleanup + rm -f /etc/apt/sources.list.d/*.sources 2>/dev/null || true + cleanup_orphaned_sources + + # Try again + if ! $STD apt update; then + msg_error "Cannot update package lists - APT is critically broken" + return 1 + fi + fi + + return 0 +} + +# ------------------------------------------------------------------------------ +# Standardized deb822 repository setup (with optional Architectures) +# Always runs apt update after repo creation to ensure package availability +# ------------------------------------------------------------------------------ +setup_deb822_repo() { + local name="$1" + local gpg_url="$2" + local repo_url="$3" + local suite="$4" + local component="${5-main}" + local architectures="${6-}" # optional + + # Validate required parameters + if [[ -z "$name" || -z "$gpg_url" || -z "$repo_url" || -z "$suite" ]]; then + msg_error "setup_deb822_repo: missing required parameters (name=$name repo=$repo_url suite=$suite)" + return 1 + fi + + # Cleanup + cleanup_old_repo_files "$name" + cleanup_orphaned_sources + + mkdir -p /etc/apt/keyrings || { + msg_error "Failed to create /etc/apt/keyrings" + return 1 + } + + # Import GPG + curl -fsSL "$gpg_url" | gpg --dearmor --yes -o "/etc/apt/keyrings/${name}.gpg" || { + msg_error "Failed to import GPG key for ${name}" + return 1 + } + + # Write deb822 + { + echo "Types: deb" + echo "URIs: $repo_url" + echo "Suites: $suite" + echo "Components: $component" + [[ -n "$architectures" ]] && echo "Architectures: $architectures" + echo "Signed-By: /etc/apt/keyrings/${name}.gpg" + } >/etc/apt/sources.list.d/${name}.sources + + $STD apt update +} + +# ------------------------------------------------------------------------------ +# Package version hold/unhold helpers +# ------------------------------------------------------------------------------ +hold_package_version() { + local package="$1" + $STD apt-mark hold "$package" +} + +unhold_package_version() { + local package="$1" + $STD apt-mark unhold "$package" +} + +# ------------------------------------------------------------------------------ +# Safe service restart with verification +# ------------------------------------------------------------------------------ +safe_service_restart() { + local service="$1" + + if systemctl is-active --quiet "$service"; then + $STD systemctl restart "$service" + else + $STD systemctl start "$service" + fi + + if ! systemctl is-active --quiet "$service"; then + msg_error "Failed to start $service" + systemctl status "$service" --no-pager + return 1 + fi + return 0 +} + +# ------------------------------------------------------------------------------ +# Enable and start service (with error handling) +# ------------------------------------------------------------------------------ +enable_and_start_service() { + local service="$1" + + if ! systemctl enable "$service" &>/dev/null; then + return 1 + fi + + if ! systemctl start "$service" &>/dev/null; then + msg_error "Failed to start $service" + systemctl status "$service" --no-pager + return 1 + fi + + return 0 +} + +# ------------------------------------------------------------------------------ +# Check if service is enabled +# ------------------------------------------------------------------------------ +is_service_enabled() { + local service="$1" + systemctl is-enabled --quiet "$service" 2>/dev/null +} + +# ------------------------------------------------------------------------------ +# Check if service is running +# ------------------------------------------------------------------------------ +is_service_running() { + local service="$1" + systemctl is-active --quiet "$service" 2>/dev/null +} + +# ------------------------------------------------------------------------------ +# Extract version from JSON (GitHub releases) +# ------------------------------------------------------------------------------ +extract_version_from_json() { + local json="$1" + local field="${2:-tag_name}" + local strip_v="${3:-true}" + + ensure_dependencies jq + + local version + version=$(echo "$json" | jq -r ".${field} // empty") + + if [[ -z "$version" ]]; then + return 1 + fi + + if [[ "$strip_v" == "true" ]]; then + echo "${version#v}" + else + echo "$version" + fi +} + +# ------------------------------------------------------------------------------ +# Get latest GitHub release version +# ------------------------------------------------------------------------------ +get_latest_github_release() { + local repo="$1" + local strip_v="${2:-true}" + local temp_file=$(mktemp) + + if ! github_api_call "https://api.github.com/repos/${repo}/releases/latest" "$temp_file"; then + rm -f "$temp_file" + return 1 + fi + + local version + version=$(extract_version_from_json "$(cat "$temp_file")" "tag_name" "$strip_v") + rm -f "$temp_file" + + if [[ -z "$version" ]]; then + return 1 + fi + + echo "$version" +} + +# ------------------------------------------------------------------------------ +# Debug logging (only if DEBUG=1) +# ------------------------------------------------------------------------------ +debug_log() { + [[ "${DEBUG:-0}" == "1" ]] && echo "[DEBUG] $*" >&2 +} + +# ------------------------------------------------------------------------------ +# Performance timing helper +# ------------------------------------------------------------------------------ +start_timer() { + echo $(date +%s) +} + +end_timer() { + local start_time="$1" + local label="${2:-Operation}" + local end_time=$(date +%s) + local duration=$((end_time - start_time)) +} + +# ------------------------------------------------------------------------------ +# GPG key fingerprint verification +# ------------------------------------------------------------------------------ +verify_gpg_fingerprint() { + local key_file="$1" + local expected_fingerprint="$2" + + local actual_fingerprint + actual_fingerprint=$(gpg --show-keys --with-fingerprint --with-colons "$key_file" 2>&1 | grep -m1 '^fpr:' | cut -d: -f10) + + if [[ "$actual_fingerprint" == "$expected_fingerprint" ]]; then + return 0 + fi + + msg_error "GPG fingerprint mismatch! Expected: $expected_fingerprint, Got: $actual_fingerprint" + return 1 +} + +# ============================================================================== +# INSTALL FUNCTIONS +# ============================================================================== + +# ------------------------------------------------------------------------------ +# Checks for new GitHub release (latest tag). +# +# Description: +# - Queries the GitHub API for the latest release tag +# - Compares it to a local cached version (~/.) +# - If newer, sets global CHECK_UPDATE_RELEASE and returns 0 +# +# Usage: +# if check_for_gh_release "flaresolverr" "FlareSolverr/FlareSolverr" [optional] "v1.1.1"; then +# # trigger update... +# fi +# exit 0 +# } (end of update_script not from the function) +# +# Notes: +# - Requires `jq` (auto-installed if missing) +# - Does not modify anything, only checks version state +# - Does not support pre-releases +# ------------------------------------------------------------------------------ +check_for_gh_release() { + local app="$1" + local source="$2" + local pinned_version_in="${3:-}" # optional + local app_lc="${app,,}" + local current_file="$HOME/.${app_lc}" + + msg_info "Checking for update: ${app}" + + # DNS check + if ! getent hosts api.github.com >/dev/null 2>&1; then + msg_error "Network error: cannot resolve api.github.com" + return 1 + fi + + ensure_dependencies jq + + # Fetch releases and exclude drafts/prereleases + local releases_json + releases_json=$(curl -fsSL --max-time 20 \ + -H 'Accept: application/vnd.github+json' \ + -H 'X-GitHub-Api-Version: 2022-11-28' \ + "https://api.github.com/repos/${source}/releases") || { + msg_error "Unable to fetch releases for ${app}" + return 1 + } + + mapfile -t raw_tags < <(jq -r '.[] | select(.draft==false and .prerelease==false) | .tag_name' <<<"$releases_json") + if ((${#raw_tags[@]} == 0)); then + msg_error "No stable releases found for ${app}" + return 1 + fi + + local clean_tags=() + for t in "${raw_tags[@]}"; do + clean_tags+=("${t#v}") + done + + local latest_raw="${raw_tags[0]}" + local latest_clean="${clean_tags[0]}" + + # current installed (stored without v) + local current="" + if [[ -f "$current_file" ]]; then + current="$(<"$current_file")" + else + # Migration: search for any /opt/*_version.txt + local legacy_files + mapfile -t legacy_files < <(find /opt -maxdepth 1 -type f -name "*_version.txt" 2>/dev/null) + if ((${#legacy_files[@]} == 1)); then + current="$(<"${legacy_files[0]}")" + echo "${current#v}" >"$current_file" + rm -f "${legacy_files[0]}" + fi + fi + current="${current#v}" + + # Pinned version handling + if [[ -n "$pinned_version_in" ]]; then + local pin_clean="${pinned_version_in#v}" + local match_raw="" + for i in "${!clean_tags[@]}"; do + if [[ "${clean_tags[$i]}" == "$pin_clean" ]]; then + match_raw="${raw_tags[$i]}" + break + fi + done + + if [[ -z "$match_raw" ]]; then + msg_error "Pinned version ${pinned_version_in} not found upstream" + return 1 + fi + + if [[ "$current" != "$pin_clean" ]]; then + CHECK_UPDATE_RELEASE="$match_raw" + msg_ok "Update available: ${app} ${current:-not installed} → ${pin_clean}" + return 0 + fi + + msg_ok "No update available: ${app} is already on pinned version (${current})" + return 1 + fi + + # No pinning → use latest + if [[ -z "$current" || "$current" != "$latest_clean" ]]; then + CHECK_UPDATE_RELEASE="$latest_raw" + msg_ok "Update available: ${app} ${current:-not installed} → ${latest_clean}" + return 0 + fi + + msg_ok "No update available: ${app} (${latest_clean})" + return 1 +} + +# ------------------------------------------------------------------------------ +# Creates and installs self-signed certificates. +# +# Description: +# - Create a self-signed certificate with option to override application name +# +# Variables: +# APP - Application name (default: $APPLICATION variable) +# ------------------------------------------------------------------------------ +create_self_signed_cert() { + local APP_NAME="${1:-${APPLICATION}}" + local CERT_DIR="/etc/ssl/${APP_NAME}" + local CERT_KEY="${CERT_DIR}/${APP_NAME}.key" + local CERT_CRT="${CERT_DIR}/${APP_NAME}.crt" + + if [[ -f "$CERT_CRT" && -f "$CERT_KEY" ]]; then + return 0 + fi + + # Use ensure_dependencies for cleaner handling + ensure_dependencies openssl || { + msg_error "Failed to install OpenSSL" + return 1 + } + + mkdir -p "$CERT_DIR" + $STD openssl req -new -newkey rsa:2048 -days 365 -nodes -x509 \ + -subj "/C=US/ST=State/L=City/O=Organization/CN=${APP_NAME}" \ + -keyout "$CERT_KEY" \ + -out "$CERT_CRT" || { + msg_error "Failed to create self-signed certificate" + return 1 + } + + chmod 600 "$CERT_KEY" + chmod 644 "$CERT_CRT" +} + +# ------------------------------------------------------------------------------ +# Downloads file with optional progress indicator using pv. +# +# Arguments: +# $1 - URL +# $2 - Destination path +# ------------------------------------------------------------------------------ + +function download_with_progress() { + local url="$1" + local output="$2" + if [ -n "$SPINNER_PID" ] && ps -p "$SPINNER_PID" >/dev/null; then kill "$SPINNER_PID" >/dev/null; fi + + ensure_dependencies pv + set -o pipefail + + # Content-Length aus HTTP-Header holen + local content_length + content_length=$(curl -fsSLI "$url" | awk '/Content-Length/ {print $2}' | tr -d '\r' || true) + + if [[ -z "$content_length" ]]; then + if ! curl -fL# -o "$output" "$url"; then + msg_error "Download failed" + return 1 + fi + else + if ! curl -fsSL "$url" | pv -s "$content_length" >"$output"; then + msg_error "Download failed" + return 1 + fi + fi +} + +# ------------------------------------------------------------------------------ +# Ensures /usr/local/bin is permanently in system PATH. +# +# Description: +# - Adds to /etc/profile.d if not present +# ------------------------------------------------------------------------------ + +function ensure_usr_local_bin_persist() { + local PROFILE_FILE="/etc/profile.d/custom_path.sh" + + if [[ ! -f "$PROFILE_FILE" ]] && ! command -v pveversion &>/dev/null; then + echo 'export PATH="/usr/local/bin:$PATH"' >"$PROFILE_FILE" + chmod +x "$PROFILE_FILE" + fi +} + +# ------------------------------------------------------------------------------ +# Downloads and deploys latest GitHub release (source, binary, tarball, asset). +# +# Description: +# - Fetches latest release metadata from GitHub API +# - Supports the following modes: +# - tarball: Source code tarball (default if omitted) +# - source: Alias for tarball (same behavior) +# - binary: .deb package install (arch-dependent) +# - prebuild: Prebuilt .tar.gz archive (e.g. Go binaries) +# - singlefile: Standalone binary (no archive, direct chmod +x install) +# - Handles download, extraction/installation and version tracking in ~/. +# +# Parameters: +# $1 APP - Application name (used for install path and version file) +# $2 REPO - GitHub repository in form user/repo +# $3 MODE - Release type: +# tarball → source tarball (.tar.gz) +# binary → .deb file (auto-arch matched) +# prebuild → prebuilt archive (e.g. tar.gz) +# singlefile→ standalone binary (chmod +x) +# $4 VERSION - Optional release tag (default: latest) +# $5 TARGET_DIR - Optional install path (default: /opt/) +# $6 ASSET_FILENAME - Required for: +# - prebuild → archive filename or pattern +# - singlefile→ binary filename or pattern +# +# Optional: +# - Set GITHUB_TOKEN env var to increase API rate limit (recommended for CI/CD). +# +# Examples: +# # 1. Minimal: Fetch and deploy source tarball +# fetch_and_deploy_gh_release "myapp" "myuser/myapp" +# +# # 2. Binary install via .deb asset (architecture auto-detected) +# fetch_and_deploy_gh_release "myapp" "myuser/myapp" "binary" +# +# # 3. Prebuilt archive (.tar.gz) with asset filename match +# fetch_and_deploy_gh_release "hanko" "teamhanko/hanko" "prebuild" "latest" "/opt/hanko" "hanko_Linux_x86_64.tar.gz" +# +# # 4. Single binary (chmod +x) like Argus, Promtail etc. +# fetch_and_deploy_gh_release "argus" "release-argus/Argus" "singlefile" "0.26.3" "/opt/argus" "Argus-.*linux-amd64" +# ------------------------------------------------------------------------------ + +function fetch_and_deploy_gh_release() { + local app="$1" + local repo="$2" + local mode="${3:-tarball}" # tarball | binary | prebuild | singlefile + local version="${4:-latest}" + local target="${5:-/opt/$app}" + local asset_pattern="${6:-}" + + local app_lc=$(echo "${app,,}" | tr -d ' ') + local version_file="$HOME/.${app_lc}" + + local api_timeout="--connect-timeout 10 --max-time 60" + local download_timeout="--connect-timeout 15 --max-time 900" + + local current_version="" + [[ -f "$version_file" ]] && current_version=$(<"$version_file") + + ensure_dependencies jq + + local api_url="https://api.github.com/repos/$repo/releases" + [[ "$version" != "latest" ]] && api_url="$api_url/tags/$version" || api_url="$api_url/latest" + local header=() + [[ -n "${GITHUB_TOKEN:-}" ]] && header=(-H "Authorization: token $GITHUB_TOKEN") + + # dns pre check + local gh_host + gh_host=$(awk -F/ '{print $3}' <<<"$api_url") + if ! getent hosts "$gh_host" &>/dev/null; then + msg_error "DNS resolution failed for $gh_host – check /etc/resolv.conf or networking" + return 1 + fi + + local max_retries=3 retry_delay=2 attempt=1 success=false resp http_code + + while ((attempt <= max_retries)); do + resp=$(curl $api_timeout -fsSL -w "%{http_code}" -o /tmp/gh_rel.json "${header[@]}" "$api_url") && success=true && break + sleep "$retry_delay" + ((attempt++)) + done + + if ! $success; then + msg_error "Failed to fetch release metadata from $api_url after $max_retries attempts" + return 1 + fi + + http_code="${resp:(-3)}" + [[ "$http_code" != "200" ]] && { + msg_error "GitHub API returned HTTP $http_code" + return 1 + } + + local json tag_name + json=$(/dev/null || uname -m) + [[ "$arch" == "x86_64" ]] && arch="amd64" + [[ "$arch" == "aarch64" ]] && arch="arm64" + + local assets url_match="" + assets=$(echo "$json" | jq -r '.assets[].browser_download_url') + + # If explicit filename pattern is provided (param $6), match that first + if [[ -n "$asset_pattern" ]]; then + for u in $assets; do + case "${u##*/}" in + $asset_pattern) + url_match="$u" + break + ;; + esac + done + fi + + # If no match via explicit pattern, fall back to architecture heuristic + if [[ -z "$url_match" ]]; then + for u in $assets; do + if [[ "$u" =~ ($arch|amd64|x86_64|aarch64|arm64).*\.deb$ ]]; then + url_match="$u" + break + fi + done + fi + + # Fallback: any .deb file + if [[ -z "$url_match" ]]; then + for u in $assets; do + [[ "$u" =~ \.deb$ ]] && url_match="$u" && break + done + fi + + if [[ -z "$url_match" ]]; then + msg_error "No suitable .deb asset found for $app" + rm -rf "$tmpdir" + return 1 + fi + + filename="${url_match##*/}" + curl $download_timeout -fsSL -o "$tmpdir/$filename" "$url_match" || { + msg_error "Download failed: $url_match" + rm -rf "$tmpdir" + return 1 + } + + chmod 644 "$tmpdir/$filename" + $STD apt install -y "$tmpdir/$filename" || { + $STD dpkg -i "$tmpdir/$filename" || { + msg_error "Both apt and dpkg installation failed" + rm -rf "$tmpdir" + return 1 + } + } + + ### Prebuild Mode ### + elif [[ "$mode" == "prebuild" ]]; then + local pattern="${6%\"}" + pattern="${pattern#\"}" + [[ -z "$pattern" ]] && { + msg_error "Mode 'prebuild' requires 6th parameter (asset filename pattern)" + rm -rf "$tmpdir" + return 1 + } + + local asset_url="" + for u in $(echo "$json" | jq -r '.assets[].browser_download_url'); do + filename_candidate="${u##*/}" + case "$filename_candidate" in + $pattern) + asset_url="$u" + break + ;; + esac + done + + [[ -z "$asset_url" ]] && { + msg_error "No asset matching '$pattern' found" + rm -rf "$tmpdir" + return 1 + } + + filename="${asset_url##*/}" + curl $download_timeout -fsSL -o "$tmpdir/$filename" "$asset_url" || { + msg_error "Download failed: $asset_url" + rm -rf "$tmpdir" + return 1 + } + + local unpack_tmp + unpack_tmp=$(mktemp -d) + mkdir -p "$target" + if [[ "${CLEAN_INSTALL:-0}" == "1" ]]; then + rm -rf "${target:?}/"* + fi + + if [[ "$filename" == *.zip ]]; then + ensure_dependencies unzip + unzip -q "$tmpdir/$filename" -d "$unpack_tmp" || { + msg_error "Failed to extract ZIP archive" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + elif [[ "$filename" == *.tar.* || "$filename" == *.tgz ]]; then + tar --no-same-owner -xf "$tmpdir/$filename" -C "$unpack_tmp" || { + msg_error "Failed to extract TAR archive" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + else + msg_error "Unsupported archive format: $filename" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + fi + + local top_dirs + top_dirs=$(find "$unpack_tmp" -mindepth 1 -maxdepth 1 -type d | wc -l) + local top_entries inner_dir + top_entries=$(find "$unpack_tmp" -mindepth 1 -maxdepth 1) + if [[ "$(echo "$top_entries" | wc -l)" -eq 1 && -d "$top_entries" ]]; then + # Strip leading folder + inner_dir="$top_entries" + shopt -s dotglob nullglob + if compgen -G "$inner_dir/*" >/dev/null; then + cp -r "$inner_dir"/* "$target/" || { + msg_error "Failed to copy contents from $inner_dir to $target" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + else + msg_error "Inner directory is empty: $inner_dir" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + fi + shopt -u dotglob nullglob + else + # Copy all contents + shopt -s dotglob nullglob + if compgen -G "$unpack_tmp/*" >/dev/null; then + cp -r "$unpack_tmp"/* "$target/" || { + msg_error "Failed to copy contents to $target" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + else + msg_error "Unpacked archive is empty" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + fi + shopt -u dotglob nullglob + fi + + ### Singlefile Mode ### + elif [[ "$mode" == "singlefile" ]]; then + local pattern="${6%\"}" + pattern="${pattern#\"}" + [[ -z "$pattern" ]] && { + msg_error "Mode 'singlefile' requires 6th parameter (asset filename pattern)" + rm -rf "$tmpdir" + return 1 + } + + local asset_url="" + for u in $(echo "$json" | jq -r '.assets[].browser_download_url'); do + filename_candidate="${u##*/}" + case "$filename_candidate" in + $pattern) + asset_url="$u" + break + ;; + esac + done + + [[ -z "$asset_url" ]] && { + msg_error "No asset matching '$pattern' found" + rm -rf "$tmpdir" + return 1 + } + + filename="${asset_url##*/}" + mkdir -p "$target" + + local use_filename="${USE_ORIGINAL_FILENAME:-false}" + local target_file="$app" + [[ "$use_filename" == "true" ]] && target_file="$filename" + + curl $download_timeout -fsSL -o "$target/$target_file" "$asset_url" || { + msg_error "Download failed: $asset_url" + rm -rf "$tmpdir" + return 1 + } + + if [[ "$target_file" != *.jar && -f "$target/$target_file" ]]; then + chmod +x "$target/$target_file" + fi + + else + msg_error "Unknown mode: $mode" + rm -rf "$tmpdir" + return 1 + fi + + echo "$version" >"$version_file" + msg_ok "Deployed: $app ($version)" + rm -rf "$tmpdir" +} + +# ------------------------------------------------------------------------------ +# Loads LOCAL_IP from persistent store or detects if missing. +# +# Description: +# - Loads from /run/local-ip.env or performs runtime lookup +# ------------------------------------------------------------------------------ + +function import_local_ip() { + local IP_FILE="/run/local-ip.env" + if [[ -f "$IP_FILE" ]]; then + # shellcheck disable=SC1090 + source "$IP_FILE" + fi + + if [[ -z "${LOCAL_IP:-}" ]]; then + get_current_ip() { + local targets=("8.8.8.8" "1.1.1.1" "192.168.1.1" "10.0.0.1" "172.16.0.1" "default") + local ip + + for target in "${targets[@]}"; do + if [[ "$target" == "default" ]]; then + ip=$(ip route get 1 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}') + else + ip=$(ip route get "$target" 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}') + fi + if [[ -n "$ip" ]]; then + echo "$ip" + return 0 + fi + done + + return 1 + } + + LOCAL_IP="$(get_current_ip || true)" + if [[ -z "$LOCAL_IP" ]]; then + msg_error "Could not determine LOCAL_IP" + return 1 + fi + fi + + export LOCAL_IP +} + +# ------------------------------------------------------------------------------ +# Installs Adminer (Debian/Ubuntu via APT, Alpine via direct download). +# +# Description: +# - Adds Adminer to Apache or web root +# - Supports Alpine and Debian-based systems +# ------------------------------------------------------------------------------ + +function setup_adminer() { + if grep -qi alpine /etc/os-release; then + msg_info "Setup Adminer (Alpine)" + mkdir -p /var/www/localhost/htdocs/adminer + curl -fsSL https://github.com/vrana/adminer/releases/latest/download/adminer.php \ + -o /var/www/localhost/htdocs/adminer/index.php || { + msg_error "Failed to download Adminer" + return 1 + } + cache_installed_version "adminer" "latest-alpine" + msg_ok "Setup Adminer (Alpine)" + else + msg_info "Setup Adminer (Debian/Ubuntu)" + ensure_dependencies adminer + $STD a2enconf adminer || { + msg_error "Failed to enable Adminer Apache config" + return 1 + } + $STD systemctl reload apache2 || { + msg_error "Failed to reload Apache" + return 1 + } + local VERSION + VERSION=$(dpkg -s adminer 2>/dev/null | grep '^Version:' | awk '{print $2}') + cache_installed_version "adminer" "${VERSION:-unknown}" + msg_ok "Setup Adminer (Debian/Ubuntu)" + fi +} + +# ------------------------------------------------------------------------------ +# Installs or updates Composer globally (robust, idempotent). +# +# - Installs to /usr/local/bin/composer +# - Removes old binaries/symlinks in /usr/bin, /bin, /root/.composer, etc. +# - Ensures /usr/local/bin is in PATH (permanent) +# - Auto-updates to latest version +# ------------------------------------------------------------------------------ + +function setup_composer() { + local COMPOSER_BIN="/usr/local/bin/composer" + export COMPOSER_ALLOW_SUPERUSER=1 + + # Get currently installed version + local INSTALLED_VERSION="" + if [[ -x "$COMPOSER_BIN" ]]; then + INSTALLED_VERSION=$("$COMPOSER_BIN" --version 2>/dev/null | awk '{print $3}') + fi + + # Scenario 1: Already installed - just self-update + if [[ -n "$INSTALLED_VERSION" ]]; then + msg_info "Update Composer $INSTALLED_VERSION" + $STD "$COMPOSER_BIN" self-update --no-interaction || true + local UPDATED_VERSION + UPDATED_VERSION=$("$COMPOSER_BIN" --version 2>/dev/null | awk '{print $3}') + cache_installed_version "composer" "$UPDATED_VERSION" + msg_ok "Update Composer $UPDATED_VERSION" + return 0 + fi + + # Scenario 2: Fresh install + msg_info "Setup Composer" + + for old in /usr/bin/composer /bin/composer /root/.composer/vendor/bin/composer; do + [[ -e "$old" && "$old" != "$COMPOSER_BIN" ]] && rm -f "$old" + done + + ensure_usr_local_bin_persist + export PATH="/usr/local/bin:$PATH" + + curl -fsSL https://getcomposer.org/installer -o /tmp/composer-setup.php || { + msg_error "Failed to download Composer installer" + return 1 + } + + $STD php /tmp/composer-setup.php --install-dir=/usr/local/bin --filename=composer || { + msg_error "Failed to install Composer" + rm -f /tmp/composer-setup.php + return 1 + } + rm -f /tmp/composer-setup.php + + if [[ ! -x "$COMPOSER_BIN" ]]; then + msg_error "Composer installation failed" + return 1 + fi + + chmod +x "$COMPOSER_BIN" + $STD "$COMPOSER_BIN" self-update --no-interaction || true + + local FINAL_VERSION + FINAL_VERSION=$("$COMPOSER_BIN" --version 2>/dev/null | awk '{print $3}') + cache_installed_version "composer" "$FINAL_VERSION" + msg_ok "Setup Composer" +} + +# ------------------------------------------------------------------------------ +# Installs FFmpeg from source or prebuilt binary (Debian/Ubuntu only). +# +# Description: +# - Downloads and builds FFmpeg from GitHub (https://github.com/FFmpeg/FFmpeg) +# - Supports specific version override via FFMPEG_VERSION (e.g. n7.1.1) +# - Supports build profile via FFMPEG_TYPE: +# - minimal : x264, vpx, mp3 only +# - medium : adds subtitles, fonts, opus, vorbis +# - full : adds dav1d, svt-av1, zlib, numa +# - binary : downloads static build (johnvansickle.com) +# - Defaults to latest stable version and full feature set +# +# Notes: +# - Requires: curl, jq, build-essential, and matching codec libraries +# - Result is installed to /usr/local/bin/ffmpeg +# ------------------------------------------------------------------------------ + +function setup_ffmpeg() { + local TMP_DIR=$(mktemp -d) + local GITHUB_REPO="FFmpeg/FFmpeg" + local VERSION="${FFMPEG_VERSION:-latest}" + local TYPE="${FFMPEG_TYPE:-full}" + local BIN_PATH="/usr/local/bin/ffmpeg" + + # Get currently installed version + local INSTALLED_VERSION="" + if command -v ffmpeg &>/dev/null; then + INSTALLED_VERSION=$(ffmpeg -version 2>/dev/null | head -n1 | awk '{print $3}') + fi + + msg_info "Setup FFmpeg ${VERSION} ($TYPE)" + + # Binary fallback mode + if [[ "$TYPE" == "binary" ]]; then + curl -fsSL https://johnvansickle.com/ffmpeg/releases/ffmpeg-release-amd64-static.tar.xz -o "$TMP_DIR/ffmpeg.tar.xz" || { + msg_error "Failed to download FFmpeg binary" + rm -rf "$TMP_DIR" + return 1 + } + tar -xf "$TMP_DIR/ffmpeg.tar.xz" -C "$TMP_DIR" || { + msg_error "Failed to extract FFmpeg binary" + rm -rf "$TMP_DIR" + return 1 + } + local EXTRACTED_DIR + EXTRACTED_DIR=$(find "$TMP_DIR" -maxdepth 1 -type d -name "ffmpeg-*") + cp "$EXTRACTED_DIR/ffmpeg" "$BIN_PATH" + cp "$EXTRACTED_DIR/ffprobe" /usr/local/bin/ffprobe + chmod +x "$BIN_PATH" /usr/local/bin/ffprobe + local FINAL_VERSION=$($BIN_PATH -version 2>/dev/null | head -n1 | awk '{print $3}') + rm -rf "$TMP_DIR" + cache_installed_version "ffmpeg" "$FINAL_VERSION" + ensure_usr_local_bin_persist + [[ -n "$INSTALLED_VERSION" ]] && msg_ok "Upgrade FFmpeg $INSTALLED_VERSION → $FINAL_VERSION" || msg_ok "Setup FFmpeg $FINAL_VERSION" + return 0 + fi + + ensure_dependencies jq + + # Auto-detect latest stable version if none specified + if [[ "$VERSION" == "latest" || -z "$VERSION" ]]; then + local ffmpeg_tags + ffmpeg_tags=$(curl -fsSL --max-time 15 "https://api.github.com/repos/${GITHUB_REPO}/tags" 2>/dev/null || echo "") + + if [[ -z "$ffmpeg_tags" ]]; then + msg_warn "Could not fetch FFmpeg versions from GitHub, trying binary fallback" + VERSION="" # Will trigger binary fallback below + else + VERSION=$(echo "$ffmpeg_tags" | jq -r '.[].name' 2>/dev/null | + grep -E '^n[0-9]+\.[0-9]+\.[0-9]+$' | + sort -V | tail -n1 || echo "") + fi + fi + + if [[ -z "$VERSION" ]]; then + msg_info "Could not determine FFmpeg source version, using pre-built binary" + VERSION="" # Will use binary fallback + fi + + # Dependency selection + local DEPS=(build-essential yasm nasm pkg-config) + case "$TYPE" in + minimal) + DEPS+=(libx264-dev libvpx-dev libmp3lame-dev) + ;; + medium) + DEPS+=(libx264-dev libvpx-dev libmp3lame-dev libfreetype6-dev libass-dev libopus-dev libvorbis-dev) + ;; + full) + DEPS+=( + libx264-dev libx265-dev libvpx-dev libmp3lame-dev + libfreetype6-dev libass-dev libopus-dev libvorbis-dev + libdav1d-dev libsvtav1-dev zlib1g-dev libnuma-dev + libva-dev libdrm-dev + ) + ;; + *) + msg_error "Invalid FFMPEG_TYPE: $TYPE" + rm -rf "$TMP_DIR" + return 1 + ;; + esac + + ensure_dependencies "${DEPS[@]}" + + # Try to download source if VERSION is set + if [[ -n "$VERSION" ]]; then + curl -fsSL "https://github.com/${GITHUB_REPO}/archive/refs/tags/${VERSION}.tar.gz" -o "$TMP_DIR/ffmpeg.tar.gz" || { + msg_warn "Failed to download FFmpeg source ${VERSION}, falling back to pre-built binary" + VERSION="" + } + fi + + # If no source download (either VERSION empty or download failed), use binary + if [[ -z "$VERSION" ]]; then + msg_info "Setup FFmpeg from pre-built binary" + curl -fsSL https://johnvansickle.com/ffmpeg/releases/ffmpeg-release-amd64-static.tar.xz -o "$TMP_DIR/ffmpeg.tar.xz" || { + msg_error "Failed to download FFmpeg pre-built binary" + rm -rf "$TMP_DIR" + return 1 + } + + tar -xJf "$TMP_DIR/ffmpeg.tar.xz" -C "$TMP_DIR" || { + msg_error "Failed to extract FFmpeg binary archive" + rm -rf "$TMP_DIR" + return 1 + } + + if ! cp "$TMP_DIR/ffmpeg-"*/ffmpeg /usr/local/bin/ffmpeg 2>/dev/null; then + msg_error "Failed to install FFmpeg binary" + rm -rf "$TMP_DIR" + return 1 + fi + + cache_installed_version "ffmpeg" "static" + rm -rf "$TMP_DIR" + msg_ok "Setup FFmpeg from pre-built binary" + return 0 + fi + + tar -xzf "$TMP_DIR/ffmpeg.tar.gz" -C "$TMP_DIR" || { + msg_error "Failed to extract FFmpeg source" + rm -rf "$TMP_DIR" + return 1 + } + + cd "$TMP_DIR/FFmpeg-"* || { + msg_error "Source extraction failed" + rm -rf "$TMP_DIR" + return 1 + } + + local args=( + --enable-gpl + --enable-shared + --enable-nonfree + --disable-static + --enable-libx264 + --enable-libvpx + --enable-libmp3lame + ) + + if [[ "$TYPE" != "minimal" ]]; then + args+=(--enable-libfreetype --enable-libass --enable-libopus --enable-libvorbis) + fi + + if [[ "$TYPE" == "full" ]]; then + args+=(--enable-libx265 --enable-libdav1d --enable-zlib) + args+=(--enable-vaapi --enable-libdrm) + fi + + if [[ ${#args[@]} -eq 0 ]]; then + msg_error "FFmpeg configure args array is empty" + rm -rf "$TMP_DIR" + return 1 + fi + + $STD ./configure "${args[@]}" || { + msg_error "FFmpeg configure failed" + rm -rf "$TMP_DIR" + return 1 + } + $STD make -j"$(nproc)" || { + msg_error "FFmpeg compilation failed" + rm -rf "$TMP_DIR" + return 1 + } + $STD make install || { + msg_error "FFmpeg installation failed" + rm -rf "$TMP_DIR" + return 1 + } + echo "/usr/local/lib" >/etc/ld.so.conf.d/ffmpeg.conf + $STD ldconfig + + ldconfig -p 2>/dev/null | grep libavdevice >/dev/null || { + msg_error "libavdevice not registered with dynamic linker" + rm -rf "$TMP_DIR" + return 1 + } + + if ! command -v ffmpeg &>/dev/null; then + msg_error "FFmpeg installation failed" + rm -rf "$TMP_DIR" + return 1 + fi + + local FINAL_VERSION + FINAL_VERSION=$(ffmpeg -version 2>/dev/null | head -n1 | awk '{print $3}') + rm -rf "$TMP_DIR" + cache_installed_version "ffmpeg" "$FINAL_VERSION" + ensure_usr_local_bin_persist + [[ -n "$INSTALLED_VERSION" ]] && msg_ok "Upgrade FFmpeg $INSTALLED_VERSION → $FINAL_VERSION" || msg_ok "Setup FFmpeg $FINAL_VERSION" +} + +# ------------------------------------------------------------------------------ +# Installs Go (Golang) from official tarball. +# +# Description: +# - Determines system architecture +# - Downloads latest version if GO_VERSION not set +# +# Variables: +# GO_VERSION - Version to install (e.g. 1.22.2 or latest) +# ------------------------------------------------------------------------------ + +function setup_go() { + local ARCH + case "$(uname -m)" in + x86_64) ARCH="amd64" ;; + aarch64) ARCH="arm64" ;; + *) + msg_error "Unsupported architecture: $(uname -m)" + return 1 + ;; + esac + + # Resolve "latest" version + local GO_VERSION="${GO_VERSION:-latest}" + if [[ "$GO_VERSION" == "latest" ]]; then + GO_VERSION=$(curl -fsSL https://go.dev/VERSION?m=text 2>/dev/null | head -n1 | sed 's/^go//') || { + msg_error "Could not determine latest Go version" + return 1 + } + [[ -z "$GO_VERSION" ]] && { + msg_error "Latest Go version is empty" + return 1 + } + fi + + local GO_BIN="/usr/local/bin/go" + local GO_INSTALL_DIR="/usr/local/go" + + # Get currently installed version + local CURRENT_VERSION="" + if [[ -x "$GO_BIN" ]]; then + CURRENT_VERSION=$("$GO_BIN" version 2>/dev/null | awk '{print $3}' | sed 's/go//') + fi + + # Scenario 1: Already at target version + if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" == "$GO_VERSION" ]]; then + cache_installed_version "go" "$GO_VERSION" + return 0 + fi + + # Scenario 2: Different version or not installed + if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" != "$GO_VERSION" ]]; then + msg_info "Upgrade Go from $CURRENT_VERSION to $GO_VERSION" + remove_old_tool_version "go" + else + msg_info "Setup Go $GO_VERSION" + fi + + local TARBALL="go${GO_VERSION}.linux-${ARCH}.tar.gz" + local URL="https://go.dev/dl/${TARBALL}" + local TMP_TAR=$(mktemp) + + curl -fsSL "$URL" -o "$TMP_TAR" || { + msg_error "Failed to download Go $GO_VERSION" + rm -f "$TMP_TAR" + return 1 + } + + $STD tar -C /usr/local -xzf "$TMP_TAR" || { + msg_error "Failed to extract Go tarball" + rm -f "$TMP_TAR" + return 1 + } + + ln -sf /usr/local/go/bin/go /usr/local/bin/go + ln -sf /usr/local/go/bin/gofmt /usr/local/bin/gofmt + rm -f "$TMP_TAR" + + cache_installed_version "go" "$GO_VERSION" + ensure_usr_local_bin_persist + msg_ok "Setup Go $GO_VERSION" +} + +# ------------------------------------------------------------------------------ +# Installs or updates Ghostscript (gs) from source. +# +# Description: +# - Fetches latest release +# - Builds and installs system-wide +# ------------------------------------------------------------------------------ + +function setup_gs() { + local TMP_DIR=$(mktemp -d) + local CURRENT_VERSION=$(gs --version 2>/dev/null || echo "0") + + ensure_dependencies jq + + local RELEASE_JSON + RELEASE_JSON=$(curl -fsSL --max-time 15 https://api.github.com/repos/ArtifexSoftware/ghostpdl-downloads/releases/latest 2>/dev/null || echo "") + + if [[ -z "$RELEASE_JSON" ]]; then + msg_warn "Cannot fetch latest Ghostscript version from GitHub API" + # Try to get from current version + if command -v gs &>/dev/null; then + gs --version | head -n1 + cache_installed_version "ghostscript" "$CURRENT_VERSION" + return 0 + fi + msg_error "Cannot determine Ghostscript version and no existing installation found" + return 1 + fi + local LATEST_VERSION + LATEST_VERSION=$(echo "$RELEASE_JSON" | jq -r '.tag_name' | sed 's/^gs//') + local LATEST_VERSION_DOTTED + LATEST_VERSION_DOTTED=$(echo "$RELEASE_JSON" | jq -r '.name' | grep -o '[0-9]\+\.[0-9]\+\.[0-9]\+') + + if [[ -z "$LATEST_VERSION" || -z "$LATEST_VERSION_DOTTED" ]]; then + msg_warn "Could not determine latest Ghostscript version from GitHub - checking system" + # Fallback: try to use system version or return error + if [[ "$CURRENT_VERSION" == "0" ]]; then + msg_error "Ghostscript not installed and cannot determine latest version" + rm -rf "$TMP_DIR" + return 1 + fi + rm -rf "$TMP_DIR" + return 0 + fi + + # Scenario 1: Already at latest version + if [[ -n "$LATEST_VERSION_DOTTED" ]] && dpkg --compare-versions "$CURRENT_VERSION" ge "$LATEST_VERSION_DOTTED" 2>/dev/null; then + cache_installed_version "ghostscript" "$LATEST_VERSION_DOTTED" + rm -rf "$TMP_DIR" + return 0 + fi + + # Scenario 2: New install or upgrade + if [[ "$CURRENT_VERSION" != "0" && "$CURRENT_VERSION" != "$LATEST_VERSION_DOTTED" ]]; then + msg_info "Upgrade Ghostscript from $CURRENT_VERSION to $LATEST_VERSION_DOTTED" + else + msg_info "Setup Ghostscript $LATEST_VERSION_DOTTED" + fi + + curl -fsSL "https://github.com/ArtifexSoftware/ghostpdl-downloads/releases/download/gs${LATEST_VERSION}/ghostscript-${LATEST_VERSION_DOTTED}.tar.gz" -o "$TMP_DIR/ghostscript.tar.gz" || { + msg_error "Failed to download Ghostscript" + rm -rf "$TMP_DIR" + return 1 + } + + if ! tar -xzf "$TMP_DIR/ghostscript.tar.gz" -C "$TMP_DIR"; then + msg_error "Failed to extract Ghostscript archive" + rm -rf "$TMP_DIR" + return 1 + fi + + # Verify directory exists before cd + if [[ ! -d "$TMP_DIR/ghostscript-${LATEST_VERSION_DOTTED}" ]]; then + msg_error "Ghostscript source directory not found: $TMP_DIR/ghostscript-${LATEST_VERSION_DOTTED}" + rm -rf "$TMP_DIR" + return 1 + fi + + cd "$TMP_DIR/ghostscript-${LATEST_VERSION_DOTTED}" || { + msg_error "Failed to enter Ghostscript source directory" + rm -rf "$TMP_DIR" + return 1 + } + + ensure_dependencies build-essential libpng-dev zlib1g-dev + + $STD ./configure || { + msg_error "Ghostscript configure failed" + rm -rf "$TMP_DIR" + return 1 + } + $STD make -j"$(nproc)" || { + msg_error "Ghostscript compilation failed" + rm -rf "$TMP_DIR" + return 1 + } + $STD make install || { + msg_error "Ghostscript installation failed" + rm -rf "$TMP_DIR" + return 1 + } + + hash -r + if [[ ! -x "$(command -v gs)" ]]; then + if [[ -x /usr/local/bin/gs ]]; then + ln -sf /usr/local/bin/gs /usr/bin/gs + fi + fi + + rm -rf "$TMP_DIR" + cache_installed_version "ghostscript" "$LATEST_VERSION_DOTTED" + ensure_usr_local_bin_persist + msg_ok "Setup Ghostscript $LATEST_VERSION_DOTTED" +} + +# ------------------------------------------------------------------------------ +# Sets up Hardware Acceleration on debian or ubuntu. +# +# Description: +# - Determites CPU/GPU/APU Vendor +# - Installs the correct libraries and packages +# - Sets up Hardware Acceleration +# +# Notes: +# - Some things are fetched from intel repositories due to not being in debian repositories. +# ------------------------------------------------------------------------------ +function setup_hwaccel() { + msg_info "Setup Hardware Acceleration" + + if ! command -v lspci &>/dev/null; then + $STD apt -y update || { + msg_error "Failed to update package list" + return 1 + } + $STD apt -y install pciutils || { + msg_error "Failed to install pciutils" + return 1 + } + fi + + # Detect GPU vendor (Intel, AMD, NVIDIA) + local gpu_vendor + gpu_vendor=$(lspci 2>/dev/null | grep -Ei 'vga|3d|display' | grep -Eo 'Intel|AMD|NVIDIA' | head -n1 || echo "") + + # Detect CPU vendor (relevant for AMD APUs) + local cpu_vendor + cpu_vendor=$(lscpu 2>/dev/null | grep -i 'Vendor ID' | awk '{print $3}' || echo "") + + if [[ -z "$gpu_vendor" && -z "$cpu_vendor" ]]; then + msg_error "No GPU or CPU vendor detected (missing lspci/lscpu output)" + return 1 + fi + + # Detect OS with fallbacks + local os_id os_codename + os_id=$(grep -oP '(?<=^ID=).+' /etc/os-release 2>/dev/null | tr -d '"' || grep '^ID=' /etc/os-release 2>/dev/null | cut -d'=' -f2 | tr -d '"' || echo "debian") + os_codename=$(grep -oP '(?<=^VERSION_CODENAME=).+' /etc/os-release 2>/dev/null | tr -d '"' || grep '^VERSION_CODENAME=' /etc/os-release 2>/dev/null | cut -d'=' -f2 | tr -d '"' || echo "unknown") + + # Validate os_id + if [[ -z "$os_id" ]]; then + os_id="debian" + fi + + # Determine if we are on a VM or LXC + local in_ct="${CTTYPE:-0}" + + case "$gpu_vendor" in + Intel) + if [[ "$os_id" == "ubuntu" ]]; then + $STD apt -y install intel-opencl-icd || { + msg_error "Failed to install intel-opencl-icd" + return 1 + } + else + # For Debian: fetch Intel GPU drivers from GitHub + fetch_and_deploy_gh_release "" "intel/intel-graphics-compiler" "binary" "latest" "" "intel-igc-core-2_*_amd64.deb" || { + msg_warn "Failed to deploy Intel IGC core 2" + } + fetch_and_deploy_gh_release "" "intel/intel-graphics-compiler" "binary" "latest" "" "intel-igc-opencl-2_*_amd64.deb" || { + msg_warn "Failed to deploy Intel IGC OpenCL 2" + } + fetch_and_deploy_gh_release "" "intel/compute-runtime" "binary" "latest" "" "libigdgmm12_*_amd64.deb" || { + msg_warn "Failed to deploy Intel GDGMM12" + } + fetch_and_deploy_gh_release "" "intel/compute-runtime" "binary" "latest" "" "intel-opencl-icd_*_amd64.deb" || { + msg_warn "Failed to deploy Intel OpenCL ICD" + } + fi + + $STD apt -y install va-driver-all ocl-icd-libopencl1 vainfo intel-gpu-tools || { + msg_error "Failed to install Intel GPU dependencies" + return 1 + } + ;; + AMD) + $STD apt -y install mesa-va-drivers mesa-vdpau-drivers mesa-opencl-icd vainfo clinfo || { + msg_error "Failed to install AMD GPU dependencies" + return 1 + } + + # For AMD CPUs without discrete GPU (APUs) + if [[ "$cpu_vendor" == "AuthenticAMD" && -n "$gpu_vendor" ]]; then + $STD apt -y install libdrm-amdgpu1 firmware-amd-graphics || true + fi + ;; + NVIDIA) + # NVIDIA needs manual driver setup - skip for now + msg_info "NVIDIA GPU detected - manual driver setup required" + ;; + *) + # If no discrete GPU, but AMD CPU (e.g., Ryzen APU) + if [[ "$cpu_vendor" == "AuthenticAMD" ]]; then + $STD apt -y install mesa-opencl-icd ocl-icd-libopencl1 clinfo || { + msg_error "Failed to install Mesa OpenCL stack" + return 1 + } + else + msg_warn "No supported GPU vendor detected - skipping GPU acceleration" + fi + ;; + esac + + if [[ "$in_ct" == "0" ]]; then + chgrp video /dev/dri 2>/dev/null || true + chmod 755 /dev/dri 2>/dev/null || true + chmod 660 /dev/dri/* 2>/dev/null || true + $STD adduser "$(id -u -n)" video + $STD adduser "$(id -u -n)" render + fi + + cache_installed_version "hwaccel" "1.0" + msg_ok "Setup Hardware Acceleration" +} + +# ------------------------------------------------------------------------------ +# Installs ImageMagick 7 from source (Debian/Ubuntu only). +# +# Description: +# - Downloads the latest ImageMagick source tarball +# - Builds and installs ImageMagick to /usr/local +# - Configures dynamic linker (ldconfig) +# +# Notes: +# - Requires: build-essential, libtool, libjpeg-dev, libpng-dev, etc. +# ------------------------------------------------------------------------------ +function setup_imagemagick() { + local TMP_DIR=$(mktemp -d) + local BINARY_PATH="/usr/local/bin/magick" + + # Get currently installed version + local INSTALLED_VERSION="" + if command -v magick &>/dev/null; then + INSTALLED_VERSION=$(magick -version | awk '/^Version/ {print $3}') + fi + + msg_info "Setup ImageMagick" + + ensure_dependencies \ + build-essential \ + libtool \ + libjpeg-dev \ + libpng-dev \ + libtiff-dev \ + libwebp-dev \ + libheif-dev \ + libde265-dev \ + libopenjp2-7-dev \ + libxml2-dev \ + liblcms2-dev \ + libfreetype6-dev \ + libraw-dev \ + libfftw3-dev \ + liblqr-1-0-dev \ + libgsl-dev \ + pkg-config \ + ghostscript + + curl -fsSL https://imagemagick.org/archive/ImageMagick.tar.gz -o "$TMP_DIR/ImageMagick.tar.gz" || { + msg_error "Failed to download ImageMagick" + rm -rf "$TMP_DIR" + return 1 + } + + tar -xzf "$TMP_DIR/ImageMagick.tar.gz" -C "$TMP_DIR" || { + msg_error "Failed to extract ImageMagick" + rm -rf "$TMP_DIR" + return 1 + } + + cd "$TMP_DIR"/ImageMagick-* || { + msg_error "Source extraction failed" + rm -rf "$TMP_DIR" + return 1 + } + + $STD ./configure --disable-static || { + msg_error "ImageMagick configure failed" + rm -rf "$TMP_DIR" + return 1 + } + $STD make -j"$(nproc)" || { + msg_error "ImageMagick compilation failed" + rm -rf "$TMP_DIR" + return 1 + } + $STD make install || { + msg_error "ImageMagick installation failed" + rm -rf "$TMP_DIR" + return 1 + } + $STD ldconfig /usr/local/lib + + if [[ ! -x "$BINARY_PATH" ]]; then + msg_error "ImageMagick installation failed" + rm -rf "$TMP_DIR" + return 1 + fi + + local FINAL_VERSION + FINAL_VERSION=$("$BINARY_PATH" -version | awk '/^Version/ {print $3}') + rm -rf "$TMP_DIR" + cache_installed_version "imagemagick" "$FINAL_VERSION" + ensure_usr_local_bin_persist + + if [[ -n "$INSTALLED_VERSION" ]]; then + msg_ok "Upgrade ImageMagick $INSTALLED_VERSION → $FINAL_VERSION" + else + msg_ok "Setup ImageMagick $FINAL_VERSION" + fi +} + +# ------------------------------------------------------------------------------ +# Installs Temurin JDK via Adoptium APT repository. +# +# Description: +# - Removes previous JDK if version mismatch +# - Installs or upgrades to specified JAVA_VERSION +# +# Variables: +# JAVA_VERSION - Temurin JDK version to install (e.g. 17, 21) +# ------------------------------------------------------------------------------ + +function setup_java() { + local JAVA_VERSION="${JAVA_VERSION:-21}" + local DISTRO_ID DISTRO_CODENAME + DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"') + DISTRO_CODENAME=$(awk -F= '/VERSION_CODENAME/ { print $2 }' /etc/os-release) + local DESIRED_PACKAGE="temurin-${JAVA_VERSION}-jdk" + + # Prepare repository (cleanup + validation) + prepare_repository_setup "adoptium" || { + msg_error "Failed to prepare Adoptium repository" + return 1 + } + + # Add repo if needed + if [[ ! -f /etc/apt/sources.list.d/adoptium.sources ]]; then + local SUITE + SUITE=$(get_fallback_suite "$DISTRO_ID" "$DISTRO_CODENAME" "https://packages.adoptium.net/artifactory/deb") + setup_deb822_repo \ + "adoptium" \ + "https://packages.adoptium.net/artifactory/api/gpg/key/public" \ + "https://packages.adoptium.net/artifactory/deb" \ + "$SUITE" \ + "main" + fi + + # Get currently installed version + local INSTALLED_VERSION="" + if dpkg -l | grep -q "temurin-.*-jdk" 2>/dev/null; then + INSTALLED_VERSION=$(dpkg -l 2>/dev/null | awk '/temurin-.*-jdk/{print $2}' | grep -oP 'temurin-\K[0-9]+' | head -n1 || echo "") + fi + + # Validate INSTALLED_VERSION is not empty if matched + local JDK_COUNT=0 + JDK_COUNT=$(dpkg -l 2>/dev/null | grep -c "temurin-.*-jdk" || true) + if [[ -z "$INSTALLED_VERSION" && "${JDK_COUNT:-0}" -gt 0 ]]; then + msg_warn "Found Temurin JDK but cannot determine version" + INSTALLED_VERSION="0" + fi + + # Scenario 1: Already at correct version + if [[ "$INSTALLED_VERSION" == "$JAVA_VERSION" ]]; then + msg_info "Update Temurin JDK $JAVA_VERSION" + ensure_apt_working || return 1 + upgrade_packages_with_retry "$DESIRED_PACKAGE" || { + msg_error "Failed to update Temurin JDK" + return 1 + } + cache_installed_version "temurin-jdk" "$JAVA_VERSION" + msg_ok "Update Temurin JDK $JAVA_VERSION" + return 0 + fi + + # Scenario 2: Different version - remove old and install new + if [[ -n "$INSTALLED_VERSION" ]]; then + msg_info "Upgrade Temurin JDK from $INSTALLED_VERSION to $JAVA_VERSION" + $STD apt purge -y "temurin-${INSTALLED_VERSION}-jdk" || true + else + msg_info "Setup Temurin JDK $JAVA_VERSION" + fi + + ensure_apt_working || return 1 + + # Install with retry logic + install_packages_with_retry "$DESIRED_PACKAGE" || { + msg_error "Failed to install Temurin JDK $JAVA_VERSION" + return 1 + } + + cache_installed_version "temurin-jdk" "$JAVA_VERSION" + msg_ok "Setup Temurin JDK $JAVA_VERSION" +} + +# ------------------------------------------------------------------------------ +# Installs a local IP updater script using networkd-dispatcher. +# +# Description: +# - Stores current IP in /run/local-ip.env +# - Automatically runs on network changes +# ------------------------------------------------------------------------------ + +function setup_local_ip_helper() { + local BASE_DIR="/usr/local/community-scripts/ip-management" + local SCRIPT_PATH="$BASE_DIR/update_local_ip.sh" + local IP_FILE="/run/local-ip.env" + local DISPATCHER_SCRIPT="/etc/networkd-dispatcher/routable.d/10-update-local-ip.sh" + + # Check if already set up + if [[ -f "$SCRIPT_PATH" && -f "$DISPATCHER_SCRIPT" ]]; then + msg_info "Update Local IP Helper" + cache_installed_version "local-ip-helper" "1.0" + msg_ok "Update Local IP Helper" + else + msg_info "Setup Local IP Helper" + fi + + mkdir -p "$BASE_DIR" + + # Install networkd-dispatcher if not present + if ! dpkg -s networkd-dispatcher >/dev/null 2>&1; then + ensure_dependencies networkd-dispatcher || { + msg_error "Failed to install networkd-dispatcher" + return 1 + } + fi + + # Write update_local_ip.sh + cat <<'EOF' >"$SCRIPT_PATH" +#!/bin/bash +set -euo pipefail + +IP_FILE="/run/local-ip.env" +mkdir -p "$(dirname "$IP_FILE")" + +get_current_ip() { + local targets=("8.8.8.8" "1.1.1.1" "192.168.1.1" "10.0.0.1" "172.16.0.1" "default") + local ip + + for target in "${targets[@]}"; do + if [[ "$target" == "default" ]]; then + ip=$(ip route get 1 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}') + else + ip=$(ip route get "$target" 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}') + fi + if [[ -n "$ip" ]]; then + echo "$ip" + return 0 + fi + done + + return 1 +} + +current_ip="$(get_current_ip)" + +if [[ -z "$current_ip" ]]; then + echo "[ERROR] Could not detect local IP" >&2 + exit 1 +fi + +if [[ -f "$IP_FILE" ]]; then + source "$IP_FILE" + [[ "$LOCAL_IP" == "$current_ip" ]] && exit 0 +fi + +echo "LOCAL_IP=$current_ip" > "$IP_FILE" +echo "[INFO] LOCAL_IP updated to $current_ip" +EOF + + chmod +x "$SCRIPT_PATH" + + # Install dispatcher hook + mkdir -p "$(dirname "$DISPATCHER_SCRIPT")" + cat <"$DISPATCHER_SCRIPT" +#!/bin/bash +$SCRIPT_PATH +EOF + + chmod +x "$DISPATCHER_SCRIPT" + systemctl enable -q --now networkd-dispatcher.service || { + msg_warn "Failed to enable networkd-dispatcher service" + } + + cache_installed_version "local-ip-helper" "1.0" + msg_ok "Setup Local IP Helper" +} + +# ------------------------------------------------------------------------------ +# Installs or updates MariaDB from official repo. +# +# Description: +# - Detects current MariaDB version and replaces it if necessary +# - Preserves existing database data +# - Dynamically determines latest GA version if "latest" is given +# +# Variables: +# MARIADB_VERSION - MariaDB version to install (e.g. 10.11, latest) (default: latest) +# ------------------------------------------------------------------------------ + +setup_mariadb() { + local MARIADB_VERSION="${MARIADB_VERSION:-latest}" + + # Resolve "latest" to actual version + if [[ "$MARIADB_VERSION" == "latest" ]]; then + if ! curl -fsI --max-time 10 http://mirror.mariadb.org/repo/ >/dev/null 2>&1; then + msg_warn "MariaDB mirror not reachable - trying cached package list fallback" + # Fallback: try to use a known stable version + MARIADB_VERSION="12.0" + else + MARIADB_VERSION=$(curl -fsSL --max-time 15 http://mirror.mariadb.org/repo/ 2>/dev/null | + grep -Eo '[0-9]+\.[0-9]+\.[0-9]+/' | + grep -vE 'rc/|rolling/' | + sed 's|/||' | + sort -Vr | + head -n1 || echo "") + + if [[ -z "$MARIADB_VERSION" ]]; then + msg_warn "Could not parse latest GA MariaDB version from mirror - using fallback" + MARIADB_VERSION="12.0" + fi + fi + fi + + # Get currently installed version + local CURRENT_VERSION="" + CURRENT_VERSION=$(is_tool_installed "mariadb" 2>/dev/null) || true + + # Scenario 1: Already installed at target version - just update packages + if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" == "$MARIADB_VERSION" ]]; then + msg_info "Update MariaDB $MARIADB_VERSION" + + # Ensure APT is working + ensure_apt_working || return 1 + + # Check if repository needs to be refreshed + if [[ -f /etc/apt/sources.list.d/mariadb.sources ]]; then + local REPO_VERSION="" + REPO_VERSION=$(grep -oP 'repo/\K[0-9]+\.[0-9]+' /etc/apt/sources.list.d/mariadb.sources 2>/dev/null || echo "") + if [[ -n "$REPO_VERSION" && "$REPO_VERSION" != "${MARIADB_VERSION%.*}" ]]; then + msg_warn "Repository version mismatch, updating..." + manage_tool_repository "mariadb" "$MARIADB_VERSION" "http://mirror.mariadb.org/repo/$MARIADB_VERSION" \ + "https://mariadb.org/mariadb_release_signing_key.asc" || { + msg_error "Failed to update MariaDB repository" + return 1 + } + fi + fi + + # Perform upgrade with retry logic + ensure_apt_working || return 1 + upgrade_packages_with_retry "mariadb-server" "mariadb-client" || { + msg_error "Failed to upgrade MariaDB packages" + return 1 + } + cache_installed_version "mariadb" "$MARIADB_VERSION" + msg_ok "Update MariaDB $MARIADB_VERSION" + return 0 + fi + + # Scenario 2: Different version installed - clean upgrade + if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" != "$MARIADB_VERSION" ]]; then + msg_info "Upgrade MariaDB from $CURRENT_VERSION to $MARIADB_VERSION" + remove_old_tool_version "mariadb" + fi + + # Scenario 3: Fresh install or version change + msg_info "Setup MariaDB $MARIADB_VERSION" + + # Prepare repository (cleanup + validation) + prepare_repository_setup "mariadb" || { + msg_error "Failed to prepare MariaDB repository" + return 1 + } + + # Install required dependencies first + local mariadb_deps=() + for dep in gawk rsync socat libdbi-perl pv; do + if apt-cache search "^${dep}$" 2>/dev/null | grep -q .; then + mariadb_deps+=("$dep") + fi + done + + if [[ ${#mariadb_deps[@]} -gt 0 ]]; then + $STD apt install -y "${mariadb_deps[@]}" 2>/dev/null || true + fi + + # Setup repository + manage_tool_repository "mariadb" "$MARIADB_VERSION" "http://mirror.mariadb.org/repo/$MARIADB_VERSION" \ + "https://mariadb.org/mariadb_release_signing_key.asc" || { + msg_error "Failed to setup MariaDB repository" + return 1 + } + + # Set debconf selections for all potential versions + local MARIADB_MAJOR_MINOR + MARIADB_MAJOR_MINOR=$(echo "$MARIADB_VERSION" | awk -F. '{print $1"."$2}') + if [[ -n "$MARIADB_MAJOR_MINOR" ]]; then + echo "mariadb-server-$MARIADB_MAJOR_MINOR mariadb-server/feedback boolean false" | debconf-set-selections + fi + + # Install packages with retry logic + export DEBIAN_FRONTEND=noninteractive + if ! install_packages_with_retry "mariadb-server" "mariadb-client"; then + # Fallback: try without specific version + msg_warn "Failed to install MariaDB packages from upstream repo, trying distro fallback..." + cleanup_old_repo_files "mariadb" + $STD apt update || { + msg_warn "APT update also failed, continuing with cache" + } + install_packages_with_retry "mariadb-server" "mariadb-client" || { + msg_error "Failed to install MariaDB packages (both upstream and distro)" + return 1 + } + fi + + cache_installed_version "mariadb" "$MARIADB_VERSION" + msg_ok "Setup MariaDB $MARIADB_VERSION" +} + +# ------------------------------------------------------------------------------ +# Creates MariaDB database with user, charset and optional extra grants/modes +# +# Description: +# - Generates password if empty +# - Creates database with utf8mb4_unicode_ci +# - Creates local user with password +# - Grants full access to this DB +# - Optional: apply extra GRANT statements (comma-separated) +# - Optional: apply custom GLOBAL sql_mode +# - Saves credentials to file +# - Exports variables for use in calling script +# +# Usage: +# MARIADB_DB_NAME="myapp_db" MARIADB_DB_USER="myapp_user" setup_mariadb_db +# MARIADB_DB_NAME="domain_monitor" MARIADB_DB_USER="domainmonitor" setup_mariadb_db +# MARIADB_DB_NAME="myapp" MARIADB_DB_USER="myapp" MARIADB_DB_EXTRA_GRANTS="GRANT SELECT ON \`mysql\`.\`time_zone_name\`" setup_mariadb_db +# MARIADB_DB_NAME="ghostfolio" MARIADB_DB_USER="ghostfolio" MARIADB_DB_SQL_MODE="" setup_mariadb_db +# +# Variables: +# MARIADB_DB_NAME - Database name (required) +# MARIADB_DB_USER - Database user (required) +# MARIADB_DB_PASS - User password (optional, auto-generated if empty) +# MARIADB_DB_EXTRA_GRANTS - Comma-separated GRANT statements (optional) +# Example: "GRANT SELECT ON \`mysql\`.\`time_zone_name\`" +# MARIADB_DB_SQL_MODE - Optional global sql_mode override (e.g. "", "STRICT_TRANS_TABLES") +# MARIADB_DB_CREDS_FILE - Credentials file path (optional, default: ~/${APPLICATION}.creds) +# +# Exports: +# MARIADB_DB_NAME, MARIADB_DB_USER, MARIADB_DB_PASS +# ------------------------------------------------------------------------------ + +function setup_mariadb_db() { + if [[ -z "${MARIADB_DB_NAME:-}" || -z "${MARIADB_DB_USER:-}" ]]; then + msg_error "MARIADB_DB_NAME and MARIADB_DB_USER must be set before calling setup_mariadb_db" + return 1 + fi + + if [[ -z "${MARIADB_DB_PASS:-}" ]]; then + MARIADB_DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13) + fi + + msg_info "Setting up MariaDB Database" + + $STD mariadb -u root -e "CREATE DATABASE \`$MARIADB_DB_NAME\` CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;" + $STD mariadb -u root -e "CREATE USER '$MARIADB_DB_USER'@'localhost' IDENTIFIED BY '$MARIADB_DB_PASS';" + $STD mariadb -u root -e "GRANT ALL ON \`$MARIADB_DB_NAME\`.* TO '$MARIADB_DB_USER'@'localhost';" + + # Optional extra grants + if [[ -n "${MARIADB_DB_EXTRA_GRANTS:-}" ]]; then + IFS=',' read -ra G_LIST <<<"${MARIADB_DB_EXTRA_GRANTS:-}" + for g in "${G_LIST[@]}"; do + g=$(echo "$g" | xargs) + $STD mariadb -u root -e "$g TO '$MARIADB_DB_USER'@'localhost';" + done + fi + + # Optional sql_mode override + if [[ -n "${MARIADB_DB_SQL_MODE:-}" ]]; then + $STD mariadb -u root -e "SET GLOBAL sql_mode='${MARIADB_DB_SQL_MODE:-}';" + fi + + $STD mariadb -u root -e "FLUSH PRIVILEGES;" + + local app_name="${APPLICATION,,}" + local CREDS_FILE="${MARIADB_DB_CREDS_FILE:-${HOME}/${app_name}.creds}" + { + echo "MariaDB Credentials" + echo "Database: $MARIADB_DB_NAME" + echo "User: $MARIADB_DB_USER" + echo "Password: $MARIADB_DB_PASS" + } >>"$CREDS_FILE" + + msg_ok "Set up MariaDB Database" + + export MARIADB_DB_NAME + export MARIADB_DB_USER + export MARIADB_DB_PASS +} + +# ------------------------------------------------------------------------------ +# Installs or updates MongoDB to specified major version. +# +# Description: +# - Preserves data across installations +# - Adds official MongoDB repo +# +# Variables: +# MONGO_VERSION - MongoDB major version to install (e.g. 7.0, 8.0) +# ------------------------------------------------------------------------------ + +function setup_mongodb() { + local MONGO_VERSION="${MONGO_VERSION:-8.0}" + local DISTRO_ID DISTRO_CODENAME + DISTRO_ID=$(get_os_info id) + DISTRO_CODENAME=$(get_os_info codename) + + # Check AVX support + if ! grep -qm1 'avx[^ ]*' /proc/cpuinfo; then + local major="${MONGO_VERSION%%.*}" + if ((major > 5)); then + msg_error "MongoDB ${MONGO_VERSION} requires AVX support, which is not available on this system." + return 1 + fi + fi + + case "$DISTRO_ID" in + ubuntu) + MONGO_BASE_URL="https://repo.mongodb.org/apt/ubuntu" + ;; + debian) + MONGO_BASE_URL="https://repo.mongodb.org/apt/debian" + ;; + *) + msg_error "Unsupported distribution: $DISTRO_ID" + return 1 + ;; + esac + + # Get currently installed version + local INSTALLED_VERSION="" + INSTALLED_VERSION=$(is_tool_installed "mongodb" 2>/dev/null) || true + + # Scenario 1: Already at target version - just update packages + if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" == "$MONGO_VERSION" ]]; then + msg_info "Update MongoDB $MONGO_VERSION" + + ensure_apt_working || return 1 + + # Perform upgrade with retry logic + upgrade_packages_with_retry "mongodb-org" || { + msg_error "Failed to upgrade MongoDB" + return 1 + } + cache_installed_version "mongodb" "$MONGO_VERSION" + msg_ok "Update MongoDB $MONGO_VERSION" + return 0 + fi + + # Scenario 2: Different version installed - clean upgrade + if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" != "$MONGO_VERSION" ]]; then + msg_info "Upgrade MongoDB from $INSTALLED_VERSION to $MONGO_VERSION" + remove_old_tool_version "mongodb" + else + msg_info "Setup MongoDB $MONGO_VERSION" + fi + + cleanup_orphaned_sources + + # Prepare repository (cleanup + validation) + prepare_repository_setup "mongodb" || { + msg_error "Failed to prepare MongoDB repository" + return 1 + } + + # Setup repository + manage_tool_repository "mongodb" "$MONGO_VERSION" "$MONGO_BASE_URL" \ + "https://www.mongodb.org/static/pgp/server-${MONGO_VERSION}.asc" || { + msg_error "Failed to setup MongoDB repository" + return 1 + } + + # Wait for repo to settle + $STD apt update || { + msg_error "APT update failed — invalid MongoDB repo for ${DISTRO_ID}-${DISTRO_CODENAME}?" + return 1 + } + + # Install MongoDB with retry logic + install_packages_with_retry "mongodb-org" || { + msg_error "Failed to install MongoDB packages" + return 1 + } + + # Verify MongoDB was installed correctly + if ! command -v mongod >/dev/null 2>&1; then + msg_error "MongoDB binary not found after installation" + return 1 + fi + + mkdir -p /var/lib/mongodb + chown -R mongodb:mongodb /var/lib/mongodb + + $STD systemctl enable mongod || { + msg_warn "Failed to enable mongod service" + } + safe_service_restart mongod + + # Verify MongoDB version + local INSTALLED_VERSION + INSTALLED_VERSION=$(mongod --version 2>/dev/null | grep -oP 'db version v\K[0-9]+\.[0-9]+' | head -n1 || echo "0.0") + verify_tool_version "MongoDB" "$MONGO_VERSION" "$INSTALLED_VERSION" || true + + cache_installed_version "mongodb" "$MONGO_VERSION" + msg_ok "Setup MongoDB $MONGO_VERSION" +} + +# ------------------------------------------------------------------------------ +# Installs or upgrades MySQL and configures APT repo. +# +# Description: +# - Detects existing MySQL installation +# - Purges conflicting packages before installation +# - Supports clean upgrade +# - Handles Debian Trixie libaio1t64 transition +# +# Variables: +# MYSQL_VERSION - MySQL version to install (e.g. 5.7, 8.0) (default: 8.0) +# ------------------------------------------------------------------------------ + +function setup_mysql() { + local MYSQL_VERSION="${MYSQL_VERSION:-8.0}" + local DISTRO_ID DISTRO_CODENAME + DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"') + DISTRO_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release) + + # Get currently installed version + local CURRENT_VERSION="" + CURRENT_VERSION=$(is_tool_installed "mysql" 2>/dev/null) || true + + # Scenario 1: Already at target version - just update packages + if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" == "$MYSQL_VERSION" ]]; then + msg_info "Update MySQL $MYSQL_VERSION" + + ensure_apt_working || return 1 + + # Perform upgrade with retry logic (non-fatal if fails) + upgrade_packages_with_retry "mysql-server" "mysql-client" || true + + cache_installed_version "mysql" "$MYSQL_VERSION" + msg_ok "Update MySQL $MYSQL_VERSION" + return 0 + fi + + # Scenario 2: Different version installed - clean upgrade + if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" != "$MYSQL_VERSION" ]]; then + msg_info "Upgrade MySQL from $CURRENT_VERSION to $MYSQL_VERSION" + remove_old_tool_version "mysql" + else + msg_info "Setup MySQL $MYSQL_VERSION" + fi + + # Prepare repository (cleanup + validation) + prepare_repository_setup "mysql" || { + msg_error "Failed to prepare MySQL repository" + return 1 + } + + # Debian 13+ Fix: MySQL 8.0 incompatible with libaio1t64, use 8.4 LTS + if [[ "$DISTRO_ID" == "debian" && "$DISTRO_CODENAME" =~ ^(trixie|forky|sid)$ ]]; then + msg_info "Debian ${DISTRO_CODENAME} detected → using MySQL 8.4 LTS (libaio1t64 compatible)" + + if ! curl -fsSL https://repo.mysql.com/RPM-GPG-KEY-mysql-2023 | gpg --dearmor -o /etc/apt/keyrings/mysql.gpg 2>/dev/null; then + msg_error "Failed to import MySQL GPG key" + return 1 + fi + + cat >/etc/apt/sources.list.d/mysql.sources </dev/null | grep -q . && + install_packages_with_retry "mysql-server" "mysql-client"; then + mysql_install_success=true + elif apt-cache search "^mysql-community-server$" 2>/dev/null | grep -q . && + install_packages_with_retry "mysql-community-server" "mysql-community-client"; then + mysql_install_success=true + elif apt-cache search "^mysql$" 2>/dev/null | grep -q . && + install_packages_with_retry "mysql"; then + mysql_install_success=true + fi + + if [[ "$mysql_install_success" == false ]]; then + msg_error "MySQL ${MYSQL_VERSION} package not available for suite ${SUITE}" + return 1 + fi + + # Verify mysql command is accessible + if ! command -v mysql >/dev/null 2>&1; then + hash -r + if ! command -v mysql >/dev/null 2>&1; then + msg_error "MySQL installed but mysql command still not found" + return 1 + fi + fi + + cache_installed_version "mysql" "$MYSQL_VERSION" + msg_ok "Setup MySQL $MYSQL_VERSION" +} + +# ------------------------------------------------------------------------------ +# Installs Node.js and optional global modules. +# +# Description: +# - Installs specified Node.js version using NodeSource APT repo +# - Optionally installs or updates global npm modules +# +# Variables: +# NODE_VERSION - Node.js version to install (default: 22) +# NODE_MODULE - Comma-separated list of global modules (e.g. "yarn,@vue/cli@5.0.0") +# ------------------------------------------------------------------------------ + +function setup_nodejs() { + local NODE_VERSION="${NODE_VERSION:-22}" + local NODE_MODULE="${NODE_MODULE:-}" + + # ALWAYS clean up legacy installations first (nvm, etc.) to prevent conflicts + cleanup_legacy_install "nodejs" + + # Get currently installed version + local CURRENT_NODE_VERSION="" + CURRENT_NODE_VERSION=$(is_tool_installed "nodejs" 2>/dev/null) || true + + # Ensure jq is available for JSON parsing + if ! command -v jq &>/dev/null; then + $STD apt update + $STD apt install -y jq || { + msg_error "Failed to install jq" + return 1 + } + fi + + # Scenario 1: Already installed at target version - just update packages/modules + if [[ -n "$CURRENT_NODE_VERSION" && "$CURRENT_NODE_VERSION" == "$NODE_VERSION" ]]; then + msg_info "Update Node.js $NODE_VERSION" + + ensure_apt_working || return 1 + + # Just update npm to latest + $STD npm install -g npm@latest 2>/dev/null || true + + cache_installed_version "nodejs" "$NODE_VERSION" + msg_ok "Update Node.js $NODE_VERSION" + else + # Scenario 2: Different version installed - clean upgrade + if [[ -n "$CURRENT_NODE_VERSION" && "$CURRENT_NODE_VERSION" != "$NODE_VERSION" ]]; then + msg_info "Upgrade Node.js from $CURRENT_NODE_VERSION to $NODE_VERSION" + remove_old_tool_version "nodejs" + else + msg_info "Setup Node.js $NODE_VERSION" + fi + + # Remove ALL Debian nodejs packages BEFORE adding NodeSource repo + if dpkg -l 2>/dev/null | grep -qE "^ii.*(nodejs|libnode|node-cjs|node-acorn|node-balanced|node-brace|node-minimatch|node-undici|node-xtend|node-corepack)"; then + msg_info "Removing Debian-packaged Node.js and dependencies" + $STD apt purge -y nodejs nodejs-doc libnode* node-* 2>/dev/null || true + $STD apt autoremove -y 2>/dev/null || true + $STD apt clean 2>/dev/null || true + fi + + # Remove any APT pinning (not needed) + rm -f /etc/apt/preferences.d/nodesource 2>/dev/null || true + + # Prepare repository (cleanup + validation) + prepare_repository_setup "nodesource" || { + msg_error "Failed to prepare Node.js repository" + return 1 + } + + # Setup NodeSource repository + manage_tool_repository "nodejs" "$NODE_VERSION" "https://deb.nodesource.com/node_${NODE_VERSION}.x" "https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key" || { + msg_error "Failed to setup Node.js repository" + return 1 + } + + # CRITICAL: Force APT cache refresh AFTER repository setup + # This ensures NodeSource is the only nodejs source in APT cache + $STD apt update + + # Install dependencies (NodeSource is now the only nodejs source) + ensure_dependencies curl ca-certificates gnupg + + # Install Node.js from NodeSource + install_packages_with_retry "nodejs" || { + msg_error "Failed to install Node.js ${NODE_VERSION} from NodeSource" + return 1 + } + + # Verify Node.js was installed correctly + if ! command -v node >/dev/null 2>&1; then + msg_error "Node.js binary not found after installation" + return 1 + fi + + local INSTALLED_NODE_VERSION + INSTALLED_NODE_VERSION=$(node -v 2>/dev/null | grep -oP '^v\K[0-9]+' || echo "0") + verify_tool_version "Node.js" "$NODE_VERSION" "$INSTALLED_NODE_VERSION" || true + + # Verify npm is available (should come with NodeSource nodejs) + if ! command -v npm >/dev/null 2>&1; then + msg_error "npm not found after Node.js installation - repository issue?" + return 1 + fi + + # Update to latest npm (with version check to avoid incompatibility) + local NPM_VERSION + NPM_VERSION=$(npm -v 2>/dev/null || echo "0") + if [[ "$NPM_VERSION" != "0" ]]; then + $STD npm install -g npm@latest 2>/dev/null || { + msg_warn "Failed to update npm to latest version (continuing with bundled npm $NPM_VERSION)" + } + fi + + cache_installed_version "nodejs" "$NODE_VERSION" + msg_ok "Setup Node.js $NODE_VERSION" + fi + + export NODE_OPTIONS="--max-old-space-size=4096" + + # Ensure valid working directory for npm (avoids uv_cwd error) + if [[ ! -d /opt ]]; then + mkdir -p /opt + fi + cd /opt || { + msg_error "Failed to set safe working directory before npm install" + return 1 + } + + # Install global Node modules + if [[ -n "$NODE_MODULE" ]]; then + IFS=',' read -ra MODULES <<<"$NODE_MODULE" + local failed_modules=0 + for mod in "${MODULES[@]}"; do + local MODULE_NAME MODULE_REQ_VERSION MODULE_INSTALLED_VERSION + if [[ "$mod" == @*/*@* ]]; then + # Scoped package with version, e.g. @vue/cli-service@latest + MODULE_NAME="${mod%@*}" + MODULE_REQ_VERSION="${mod##*@}" + elif [[ "$mod" == *"@"* ]]; then + # Unscoped package with version, e.g. yarn@latest + MODULE_NAME="${mod%@*}" + MODULE_REQ_VERSION="${mod##*@}" + else + # No version specified + MODULE_NAME="$mod" + MODULE_REQ_VERSION="latest" + fi + + # Check if the module is already installed + if $STD npm list -g --depth=0 "$MODULE_NAME" 2>&1 | grep -q "$MODULE_NAME@"; then + MODULE_INSTALLED_VERSION="$($STD npm list -g --depth=0 "$MODULE_NAME" 2>&1 | grep "$MODULE_NAME@" | awk -F@ '{print $2}' | tr -d '[:space:]')" + if [[ "$MODULE_REQ_VERSION" != "latest" && "$MODULE_REQ_VERSION" != "$MODULE_INSTALLED_VERSION" ]]; then + msg_info "Updating $MODULE_NAME from v$MODULE_INSTALLED_VERSION to v$MODULE_REQ_VERSION" + if ! $STD npm install -g "${MODULE_NAME}@${MODULE_REQ_VERSION}" 2>/dev/null; then + msg_warn "Failed to update $MODULE_NAME to version $MODULE_REQ_VERSION" + ((failed_modules++)) + continue + fi + elif [[ "$MODULE_REQ_VERSION" == "latest" ]]; then + msg_info "Updating $MODULE_NAME to latest version" + if ! $STD npm install -g "${MODULE_NAME}@latest" 2>/dev/null; then + msg_warn "Failed to update $MODULE_NAME to latest version" + ((failed_modules++)) + continue + fi + fi + else + msg_info "Installing $MODULE_NAME@$MODULE_REQ_VERSION" + if ! $STD npm install -g "${MODULE_NAME}@${MODULE_REQ_VERSION}" 2>/dev/null; then + msg_warn "Failed to install $MODULE_NAME@$MODULE_REQ_VERSION" + ((failed_modules++)) + continue + fi + fi + done + if [[ $failed_modules -eq 0 ]]; then + msg_ok "Installed Node.js modules: $NODE_MODULE" + else + msg_warn "Installed Node.js modules with $failed_modules failure(s): $NODE_MODULE" + fi + fi +} + +# ------------------------------------------------------------------------------ +# Installs PHP with selected modules and configures Apache/FPM support. +# +# Description: +# - Adds Sury PHP repo if needed +# - Installs default and user-defined modules +# - Patches php.ini for CLI, Apache, and FPM as needed +# +# Variables: +# PHP_VERSION - PHP version to install (default: 8.4) +# PHP_MODULE - Additional comma-separated modules +# PHP_APACHE - Set YES to enable PHP with Apache +# PHP_FPM - Set YES to enable PHP-FPM +# PHP_MEMORY_LIMIT - (default: 512M) +# PHP_UPLOAD_MAX_FILESIZE - (default: 128M) +# PHP_POST_MAX_SIZE - (default: 128M) +# PHP_MAX_EXECUTION_TIME - (default: 300) +# ------------------------------------------------------------------------------ + +function setup_php() { + local PHP_VERSION="${PHP_VERSION:-8.4}" + local PHP_MODULE="${PHP_MODULE:-}" + local PHP_APACHE="${PHP_APACHE:-NO}" + local PHP_FPM="${PHP_FPM:-NO}" + local DISTRO_ID DISTRO_CODENAME + DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"') + DISTRO_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release) + + local DEFAULT_MODULES="bcmath,cli,curl,gd,intl,mbstring,opcache,readline,xml,zip" + local COMBINED_MODULES + + local PHP_MEMORY_LIMIT="${PHP_MEMORY_LIMIT:-512M}" + local PHP_UPLOAD_MAX_FILESIZE="${PHP_UPLOAD_MAX_FILESIZE:-128M}" + local PHP_POST_MAX_SIZE="${PHP_POST_MAX_SIZE:-128M}" + local PHP_MAX_EXECUTION_TIME="${PHP_MAX_EXECUTION_TIME:-300}" + + # Merge default + user-defined modules + if [[ -n "$PHP_MODULE" ]]; then + COMBINED_MODULES="${DEFAULT_MODULES},${PHP_MODULE}" + else + COMBINED_MODULES="${DEFAULT_MODULES}" + fi + + # Deduplicate + COMBINED_MODULES=$(echo "$COMBINED_MODULES" | tr ',' '\n' | awk '!seen[$0]++' | paste -sd, -) + + # Get current PHP-CLI version + local CURRENT_PHP="" + CURRENT_PHP=$(is_tool_installed "php" 2>/dev/null) || true + + # CRITICAL: If wrong version is installed, remove it FIRST before any pinning + if [[ -n "$CURRENT_PHP" && "$CURRENT_PHP" != "$PHP_VERSION" ]]; then + msg_info "Removing conflicting PHP ${CURRENT_PHP} (need ${PHP_VERSION})" + stop_all_services "php.*-fpm" + $STD apt-get purge -y "php*" 2>/dev/null || true + $STD apt-get autoremove -y 2>/dev/null || true + fi + + # NOW create pinning for the desired version + mkdir -p /etc/apt/preferences.d + cat </etc/apt/preferences.d/php-pin +Package: php${PHP_VERSION}* +Pin: version ${PHP_VERSION}.* +Pin-Priority: 1001 + +Package: php[0-9].* +Pin: release o=packages.sury.org-php +Pin-Priority: -1 +EOF + + # Setup repository + prepare_repository_setup "php" "deb.sury.org-php" || { + msg_error "Failed to prepare PHP repository" + return 1 + } + + manage_tool_repository "php" "$PHP_VERSION" "" "https://packages.sury.org/debsuryorg-archive-keyring.deb" || { + msg_error "Failed to setup PHP repository" + return 1 + } + + ensure_apt_working || return 1 + $STD apt-get update + + # Get available PHP version from repository + local AVAILABLE_PHP_VERSION="" + AVAILABLE_PHP_VERSION=$(apt-cache show "php${PHP_VERSION}" 2>/dev/null | grep -m1 "^Version:" | awk '{print $2}' | cut -d- -f1) || true + + if [[ -z "$AVAILABLE_PHP_VERSION" ]]; then + msg_error "PHP ${PHP_VERSION} not found in configured repositories" + return 1 + fi + + # Build module list - without version pinning (preferences.d handles it) + local MODULE_LIST="php${PHP_VERSION}" + + IFS=',' read -ra MODULES <<<"$COMBINED_MODULES" + for mod in "${MODULES[@]}"; do + MODULE_LIST+=" php${PHP_VERSION}-${mod}" + done + + if [[ "$PHP_FPM" == "YES" ]]; then + MODULE_LIST+=" php${PHP_VERSION}-fpm" + fi + + # install apache2 with PHP support if requested + if [[ "$PHP_APACHE" == "YES" ]]; then + if ! dpkg -l 2>/dev/null | grep -q "libapache2-mod-php${PHP_VERSION}"; then + msg_info "Installing Apache with PHP ${PHP_VERSION} module" + install_packages_with_retry "apache2" || { + msg_error "Failed to install Apache" + return 1 + } + install_packages_with_retry "libapache2-mod-php${PHP_VERSION}" || { + msg_warn "Failed to install libapache2-mod-php${PHP_VERSION}, continuing without Apache module" + } + fi + fi + + # Install PHP packages (pinning via preferences.d ensures correct version) + msg_info "Installing PHP ${PHP_VERSION} packages" + if ! install_packages_with_retry $MODULE_LIST; then + msg_warn "Failed to install PHP packages, attempting individual installation" + + # Install main package first (critical) + install_packages_with_retry "php${PHP_VERSION}" || { + msg_error "Failed to install php${PHP_VERSION}" + return 1 + } + + # Try to install Apache module individually if requested + if [[ "$PHP_APACHE" == "YES" ]]; then + install_packages_with_retry "libapache2-mod-php${PHP_VERSION}" || { + msg_warn "Could not install libapache2-mod-php${PHP_VERSION}" + } + fi + + # Try to install modules individually - skip those that don't exist + for pkg in "${MODULES[@]}"; do + if apt-cache search "^php${PHP_VERSION}-${pkg}\$" 2>/dev/null | grep -q "^php${PHP_VERSION}-${pkg}"; then + install_packages_with_retry "php${PHP_VERSION}-${pkg}" || { + msg_warn "Could not install php${PHP_VERSION}-${pkg}" + } + fi + done + + if [[ "$PHP_FPM" == "YES" ]]; then + if apt-cache search "^php${PHP_VERSION}-fpm\$" 2>/dev/null | grep -q "^php${PHP_VERSION}-fpm"; then + install_packages_with_retry "php${PHP_VERSION}-fpm" || { + msg_warn "Could not install php${PHP_VERSION}-fpm" + } + fi + fi + fi + cache_installed_version "php" "$PHP_VERSION" + + # Patch all relevant php.ini files + local PHP_INI_PATHS=("/etc/php/${PHP_VERSION}/cli/php.ini") + [[ "$PHP_FPM" == "YES" ]] && PHP_INI_PATHS+=("/etc/php/${PHP_VERSION}/fpm/php.ini") + [[ "$PHP_APACHE" == "YES" ]] && PHP_INI_PATHS+=("/etc/php/${PHP_VERSION}/apache2/php.ini") + for ini in "${PHP_INI_PATHS[@]}"; do + if [[ -f "$ini" ]]; then + $STD sed -i "s|^memory_limit = .*|memory_limit = ${PHP_MEMORY_LIMIT}|" "$ini" + $STD sed -i "s|^upload_max_filesize = .*|upload_max_filesize = ${PHP_UPLOAD_MAX_FILESIZE}|" "$ini" + $STD sed -i "s|^post_max_size = .*|post_max_size = ${PHP_POST_MAX_SIZE}|" "$ini" + $STD sed -i "s|^max_execution_time = .*|max_execution_time = ${PHP_MAX_EXECUTION_TIME}|" "$ini" + fi + done + + # Patch Apache configuration if needed + if [[ "$PHP_APACHE" == "YES" ]]; then + for mod in $(ls /etc/apache2/mods-enabled/ 2>/dev/null | grep -E '^php[0-9]\.[0-9]\.conf$' | sed 's/\.conf//'); do + if [[ "$mod" != "php${PHP_VERSION}" ]]; then + $STD a2dismod "$mod" || true + fi + done + $STD a2enmod mpm_prefork + $STD a2enmod "php${PHP_VERSION}" + safe_service_restart apache2 || true + fi + + # Enable and restart PHP-FPM if requested + if [[ "$PHP_FPM" == "YES" ]]; then + if systemctl list-unit-files | grep -q "php${PHP_VERSION}-fpm.service"; then + $STD systemctl enable php${PHP_VERSION}-fpm + safe_service_restart php${PHP_VERSION}-fpm + fi + fi + + # Verify PHP installation - critical check + if ! command -v php >/dev/null 2>&1; then + msg_error "PHP installation verification failed - php command not found" + return 1 + fi + + local INSTALLED_VERSION=$(php -v 2>/dev/null | awk '/^PHP/{print $2}' | cut -d. -f1,2) + + # Critical: if major.minor doesn't match, fail and cleanup + if [[ "$INSTALLED_VERSION" != "$PHP_VERSION" ]]; then + msg_error "PHP version mismatch: requested ${PHP_VERSION} but got ${INSTALLED_VERSION}" + msg_error "This indicates a critical package installation issue" + # Don't cache wrong version + return 1 + fi + + cache_installed_version "php" "$INSTALLED_VERSION" + msg_ok "Setup PHP ${INSTALLED_VERSION}" +} + +# ------------------------------------------------------------------------------ +# Installs or upgrades PostgreSQL and optional extensions/modules. +# +# Description: +# - Detects existing PostgreSQL version +# - Dumps all databases before upgrade +# - Adds PGDG repo and installs specified version +# - Installs optional PG_MODULES (e.g. postgis, contrib) +# - Restores dumped data post-upgrade +# +# Variables: +# PG_VERSION - Major PostgreSQL version (e.g. 15, 16) (default: 16) +function setup_postgresql() { + local PG_VERSION="${PG_VERSION:-16}" + local PG_MODULES="${PG_MODULES:-}" + local DISTRO_ID DISTRO_CODENAME + DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"') + DISTRO_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release) + + # Get currently installed version + local CURRENT_PG_VERSION="" + if command -v psql >/dev/null; then + CURRENT_PG_VERSION="$(psql -V 2>/dev/null | awk '{print $3}' | cut -d. -f1)" + fi + + # Scenario 1: Already at correct version + if [[ "$CURRENT_PG_VERSION" == "$PG_VERSION" ]]; then + msg_info "Update PostgreSQL $PG_VERSION" + ensure_apt_working || return 1 + + # Perform upgrade with retry logic (non-fatal if fails) + upgrade_packages_with_retry "postgresql-${PG_VERSION}" "postgresql-client-${PG_VERSION}" 2>/dev/null || true + cache_installed_version "postgresql" "$PG_VERSION" + msg_ok "Update PostgreSQL $PG_VERSION" + + # Still install modules if specified + if [[ -n "$PG_MODULES" ]]; then + IFS=',' read -ra MODULES <<<"$PG_MODULES" + for module in "${MODULES[@]}"; do + $STD apt install -y "postgresql-${PG_VERSION}-${module}" 2>/dev/null || true + done + fi + return 0 + fi + + # Scenario 2: Different version - backup, remove old, install new + if [[ -n "$CURRENT_PG_VERSION" ]]; then + msg_info "Upgrade PostgreSQL from $CURRENT_PG_VERSION to $PG_VERSION" + msg_info "Creating backup of PostgreSQL $CURRENT_PG_VERSION databases..." + $STD runuser -u postgres -- pg_dumpall >/var/lib/postgresql/backup_$(date +%F)_v${CURRENT_PG_VERSION}.sql || { + msg_error "Failed to backup PostgreSQL databases" + return 1 + } + $STD systemctl stop postgresql || true + $STD apt purge -y "postgresql-${CURRENT_PG_VERSION}" "postgresql-client-${CURRENT_PG_VERSION}" 2>/dev/null || true + else + msg_info "Setup PostgreSQL $PG_VERSION" + fi + + # Scenario 3: Fresh install or after removal - setup repo and install + prepare_repository_setup "pgdg" "postgresql" || { + msg_error "Failed to prepare PostgreSQL repository" + return 1 + } + + local SUITE + case "$DISTRO_CODENAME" in + trixie | forky | sid) + # For Debian Testing/Unstable, try PostgreSQL repo first, fallback to native packages + if verify_repo_available "https://apt.postgresql.org/pub/repos/apt" "trixie-pgdg"; then + SUITE="trixie-pgdg" + + setup_deb822_repo \ + "pgdg" \ + "https://www.postgresql.org/media/keys/ACCC4CF8.asc" \ + "https://apt.postgresql.org/pub/repos/apt" \ + "$SUITE" \ + "main" + + if ! $STD apt update; then + msg_warn "Failed to update PostgreSQL repository, falling back to native packages" + SUITE="" + fi + else + SUITE="" + fi + + # If no repo or packages not installable, use native Debian packages + if [[ -z "$SUITE" ]] || ! apt-cache show "postgresql-${PG_VERSION}" 2>/dev/null | grep -q "Version:"; then + msg_info "Using native Debian packages for $DISTRO_CODENAME" + + # Install ssl-cert dependency if available + if apt-cache search "^ssl-cert$" 2>/dev/null | grep -q .; then + $STD apt install -y ssl-cert 2>/dev/null || true + fi + + if ! $STD apt install -y postgresql postgresql-client 2>/dev/null; then + msg_error "Failed to install native PostgreSQL packages" + return 1 + fi + + if ! command -v psql >/dev/null 2>&1; then + msg_error "PostgreSQL installed but psql command not found" + return 1 + fi + + # Restore database backup if we upgraded from previous version + if [[ -n "$CURRENT_PG_VERSION" ]]; then + msg_info "Restoring PostgreSQL databases from backup..." + $STD runuser -u postgres -- psql /dev/null || { + msg_warn "Failed to restore database backup - this may be expected for major version upgrades" + } + fi + + $STD systemctl enable --now postgresql 2>/dev/null || true + + # Get actual installed version + INSTALLED_VERSION="$(psql -V 2>/dev/null | awk '{print $3}' | cut -d. -f1)" + + # Add PostgreSQL binaries to PATH + if ! grep -q '/usr/lib/postgresql' /etc/environment 2>/dev/null; then + echo 'PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/lib/postgresql/'"${INSTALLED_VERSION}"'/bin"' >/etc/environment + fi + + cache_installed_version "postgresql" "$INSTALLED_VERSION" + msg_ok "Setup PostgreSQL $INSTALLED_VERSION (native)" + + # Install optional modules if specified + if [[ -n "$PG_MODULES" ]]; then + IFS=',' read -ra MODULES <<<"$PG_MODULES" + for module in "${MODULES[@]}"; do + $STD apt install -y "postgresql-${INSTALLED_VERSION}-${module}" 2>/dev/null || true + done + fi + return 0 + fi + ;; + *) + SUITE=$(get_fallback_suite "$DISTRO_ID" "$DISTRO_CODENAME" "https://apt.postgresql.org/pub/repos/apt") + SUITE="${SUITE}-pgdg" + ;; + esac + + setup_deb822_repo \ + "pgdg" \ + "https://www.postgresql.org/media/keys/ACCC4CF8.asc" \ + "https://apt.postgresql.org/pub/repos/apt" \ + "$SUITE" \ + "main" + + if ! $STD apt update; then + msg_error "APT update failed for PostgreSQL repository" + return 1 + fi + + # Install ssl-cert dependency if available + if apt-cache search "^ssl-cert$" 2>/dev/null | grep -q .; then + $STD apt install -y ssl-cert 2>/dev/null || true + fi + + # Try multiple PostgreSQL package patterns with retry logic + local pg_install_success=false + + if apt-cache search "^postgresql-${PG_VERSION}$" 2>/dev/null | grep -q . && + install_packages_with_retry "postgresql-${PG_VERSION}" "postgresql-client-${PG_VERSION}"; then + pg_install_success=true + fi + + if [[ "$pg_install_success" == false ]] && + apt-cache search "^postgresql-server-${PG_VERSION}$" 2>/dev/null | grep -q . && + $STD apt install -y "postgresql-server-${PG_VERSION}" "postgresql-client-${PG_VERSION}" 2>/dev/null; then + pg_install_success=true + fi + + if [[ "$pg_install_success" == false ]] && + apt-cache search "^postgresql$" 2>/dev/null | grep -q . && + $STD apt install -y postgresql postgresql-client 2>/dev/null; then + pg_install_success=true + fi + + if [[ "$pg_install_success" == false ]]; then + msg_error "PostgreSQL package not available for suite ${SUITE}" + return 1 + fi + + if ! command -v psql >/dev/null 2>&1; then + msg_error "PostgreSQL installed but psql command not found" + return 1 + fi + + # Restore database backup if we upgraded from previous version + if [[ -n "$CURRENT_PG_VERSION" ]]; then + msg_info "Restoring PostgreSQL databases from backup..." + $STD runuser -u postgres -- psql /dev/null || { + msg_warn "Failed to restore database backup - this may be expected for major version upgrades" + } + fi + + $STD systemctl enable --now postgresql 2>/dev/null || true + + # Add PostgreSQL binaries to PATH + if ! grep -q '/usr/lib/postgresql' /etc/environment 2>/dev/null; then + echo 'PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/lib/postgresql/'"${PG_VERSION}"'/bin"' >/etc/environment + fi + + cache_installed_version "postgresql" "$PG_VERSION" + msg_ok "Setup PostgreSQL $PG_VERSION" + + # Install optional modules + if [[ -n "$PG_MODULES" ]]; then + IFS=',' read -ra MODULES <<<"$PG_MODULES" + for module in "${MODULES[@]}"; do + $STD apt install -y "postgresql-${PG_VERSION}-${module}" 2>/dev/null || true + done + fi +} + +# ------------------------------------------------------------------------------ +# Creates PostgreSQL database with user and optional extensions +# +# Description: +# - Creates PostgreSQL role with login and password +# - Creates database with UTF8 encoding and template0 +# - Installs optional extensions (postgis, pgvector, etc.) +# - Configures ALTER ROLE settings for Django/Rails compatibility +# - Saves credentials to file +# - Exports variables for use in calling script +# +# Usage: +# PG_DB_NAME="myapp_db" PG_DB_USER="myapp_user" setup_postgresql_db +# PG_DB_NAME="immich" PG_DB_USER="immich" PG_DB_EXTENSIONS="pgvector" setup_postgresql_db +# PG_DB_NAME="ghostfolio" PG_DB_USER="ghostfolio" PG_DB_GRANT_SUPERUSER="true" setup_postgresql_db +# PG_DB_NAME="adventurelog" PG_DB_USER="adventurelog" PG_DB_EXTENSIONS="postgis" setup_postgresql_db +# +# Variables: +# PG_DB_NAME - Database name (required) +# PG_DB_USER - Database user (required) +# PG_DB_PASS - Database password (optional, auto-generated if empty) +# PG_DB_EXTENSIONS - Comma-separated list of extensions (optional, e.g. "postgis,pgvector") +# PG_DB_GRANT_SUPERUSER - Grant SUPERUSER privilege (optional, "true" to enable, security risk!) +# PG_DB_SCHEMA_PERMS - Grant schema-level permissions (optional, "true" to enable) +# PG_DB_SKIP_ALTER_ROLE - Skip ALTER ROLE settings (optional, "true" to skip) +# PG_DB_CREDS_FILE - Credentials file path (optional, default: ~/${APPLICATION}.creds) +# +# Exports: +# PG_DB_NAME, PG_DB_USER, PG_DB_PASS - For use in calling script +# ------------------------------------------------------------------------------ + +function setup_postgresql_db() { + # Validation + if [[ -z "${PG_DB_NAME:-}" || -z "${PG_DB_USER:-}" ]]; then + msg_error "PG_DB_NAME and PG_DB_USER must be set before calling setup_postgresql_db" + return 1 + fi + + # Generate password if not provided + if [[ -z "${PG_DB_PASS:-}" ]]; then + PG_DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13) + fi + + msg_info "Setting up PostgreSQL Database" + $STD sudo -u postgres psql -c "CREATE ROLE $PG_DB_USER WITH LOGIN PASSWORD '$PG_DB_PASS';" + $STD sudo -u postgres psql -c "CREATE DATABASE $PG_DB_NAME WITH OWNER $PG_DB_USER ENCODING 'UTF8' TEMPLATE template0;" + + # Install extensions (comma-separated) + if [[ -n "${PG_DB_EXTENSIONS:-}" ]]; then + IFS=',' read -ra EXT_LIST <<<"${PG_DB_EXTENSIONS:-}" + for ext in "${EXT_LIST[@]}"; do + ext=$(echo "$ext" | xargs) # Trim whitespace + $STD sudo -u postgres psql -d "$PG_DB_NAME" -c "CREATE EXTENSION IF NOT EXISTS $ext;" + done + fi + + # ALTER ROLE settings for Django/Rails compatibility (unless skipped) + if [[ "${PG_DB_SKIP_ALTER_ROLE:-}" != "true" ]]; then + $STD sudo -u postgres psql -c "ALTER ROLE $PG_DB_USER SET client_encoding TO 'utf8';" + $STD sudo -u postgres psql -c "ALTER ROLE $PG_DB_USER SET default_transaction_isolation TO 'read committed';" + $STD sudo -u postgres psql -c "ALTER ROLE $PG_DB_USER SET timezone TO 'UTC';" + fi + + # Schema permissions (if requested) + if [[ "${PG_DB_SCHEMA_PERMS:-}" == "true" ]]; then + $STD sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE $PG_DB_NAME TO $PG_DB_USER;" + $STD sudo -u postgres psql -c "ALTER USER $PG_DB_USER CREATEDB;" + $STD sudo -u postgres psql -d "$PG_DB_NAME" -c "GRANT ALL ON SCHEMA public TO $PG_DB_USER;" + $STD sudo -u postgres psql -d "$PG_DB_NAME" -c "GRANT CREATE ON SCHEMA public TO $PG_DB_USER;" + $STD sudo -u postgres psql -d "$PG_DB_NAME" -c "ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO $PG_DB_USER;" + $STD sudo -u postgres psql -d "$PG_DB_NAME" -c "ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO $PG_DB_USER;" + fi + + # Superuser grant (if requested - WARNING!) + if [[ "${PG_DB_GRANT_SUPERUSER:-}" == "true" ]]; then + msg_warn "Granting SUPERUSER privilege (security risk!)" + $STD sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE $PG_DB_NAME to $PG_DB_USER;" + $STD sudo -u postgres psql -c "ALTER USER $PG_DB_USER WITH SUPERUSER;" + fi + + # Save credentials + local app_name="${APPLICATION,,}" + local CREDS_FILE="${PG_DB_CREDS_FILE:-${HOME}/${app_name}.creds}" + { + echo "PostgreSQL Credentials" + echo "Database: $PG_DB_NAME" + echo "User: $PG_DB_USER" + echo "Password: $PG_DB_PASS" + } >>"$CREDS_FILE" + + msg_ok "Set up PostgreSQL Database" + + # Export for use in calling script + export PG_DB_NAME + export PG_DB_USER + export PG_DB_PASS +} + +# ------------------------------------------------------------------------------ +# Installs rbenv and ruby-build, installs Ruby and optionally Rails. +# +# Description: +# - Downloads rbenv and ruby-build from GitHub +# - Compiles and installs target Ruby version +# - Optionally installs Rails via gem +# +# Variables: +# RUBY_VERSION - Ruby version to install (default: 3.4.4) +# RUBY_INSTALL_RAILS - true/false to install Rails (default: true) +# ------------------------------------------------------------------------------ + +function setup_ruby() { + local RUBY_VERSION="${RUBY_VERSION:-3.4.4}" + local RUBY_INSTALL_RAILS="${RUBY_INSTALL_RAILS:-true}" + local RBENV_DIR="$HOME/.rbenv" + local RBENV_BIN="$RBENV_DIR/bin/rbenv" + local PROFILE_FILE="$HOME/.profile" + local TMP_DIR=$(mktemp -d) + + # Get currently installed Ruby version + local CURRENT_RUBY_VERSION="" + if [[ -x "$RBENV_BIN" ]]; then + CURRENT_RUBY_VERSION=$("$RBENV_BIN" global 2>/dev/null || echo "") + fi + + # Scenario 1: Already at correct Ruby version + if [[ "$CURRENT_RUBY_VERSION" == "$RUBY_VERSION" ]]; then + msg_info "Update Ruby $RUBY_VERSION" + cache_installed_version "ruby" "$RUBY_VERSION" + msg_ok "Update Ruby $RUBY_VERSION" + return 0 + fi + + # Scenario 2: Different version - reinstall + if [[ -n "$CURRENT_RUBY_VERSION" ]]; then + msg_info "Upgrade Ruby from $CURRENT_RUBY_VERSION to $RUBY_VERSION" + else + msg_info "Setup Ruby $RUBY_VERSION" + fi + + ensure_apt_working || return 1 + + # Install build dependencies with fallbacks + local ruby_deps=() + local dep_variations=( + "jq" + "autoconf" + "patch" + "build-essential" + "libssl-dev" + "libyaml-dev" + "libreadline-dev|libreadline6-dev" + "zlib1g-dev" + "libgmp-dev" + "libncurses-dev|libncurses5-dev" + "libffi-dev" + "libgdbm-dev" + "libdb-dev" + "uuid-dev" + ) + + for dep_pattern in "${dep_variations[@]}"; do + if [[ "$dep_pattern" == *"|"* ]]; then + IFS='|' read -ra variations <<<"$dep_pattern" + for var in "${variations[@]}"; do + if apt-cache search "^${var}$" 2>/dev/null | grep -q .; then + ruby_deps+=("$var") + break + fi + done + else + if apt-cache search "^${dep_pattern}$" 2>/dev/null | grep -q .; then + ruby_deps+=("$dep_pattern") + fi + fi + done + + if [[ ${#ruby_deps[@]} -gt 0 ]]; then + $STD apt install -y "${ruby_deps[@]}" 2>/dev/null || true + else + msg_error "No Ruby build dependencies available" + rm -rf "$TMP_DIR" + return 1 + fi + + # Download and build rbenv if needed + if [[ ! -x "$RBENV_BIN" ]]; then + local RBENV_RELEASE + local rbenv_json + rbenv_json=$(curl -fsSL --max-time 15 https://api.github.com/repos/rbenv/rbenv/releases/latest 2>/dev/null || echo "") + + if [[ -z "$rbenv_json" ]]; then + msg_error "Failed to fetch latest rbenv version from GitHub" + rm -rf "$TMP_DIR" + return 1 + fi + + RBENV_RELEASE=$(echo "$rbenv_json" | jq -r '.tag_name' 2>/dev/null | sed 's/^v//' || echo "") + + if [[ -z "$RBENV_RELEASE" ]]; then + msg_error "Could not parse rbenv version from GitHub response" + rm -rf "$TMP_DIR" + return 1 + fi + + curl -fsSL "https://github.com/rbenv/rbenv/archive/refs/tags/v${RBENV_RELEASE}.tar.gz" -o "$TMP_DIR/rbenv.tar.gz" || { + msg_error "Failed to download rbenv" + rm -rf "$TMP_DIR" + return 1 + } + + tar -xzf "$TMP_DIR/rbenv.tar.gz" -C "$TMP_DIR" || { + msg_error "Failed to extract rbenv" + rm -rf "$TMP_DIR" + return 1 + } + + mkdir -p "$RBENV_DIR" + cp -r "$TMP_DIR/rbenv-${RBENV_RELEASE}/." "$RBENV_DIR/" + (cd "$RBENV_DIR" && src/configure && $STD make -C src) || { + msg_error "Failed to build rbenv" + rm -rf "$TMP_DIR" + return 1 + } + + # Setup profile + if ! grep -q 'rbenv init' "$PROFILE_FILE" 2>/dev/null; then + echo 'export PATH="$HOME/.rbenv/bin:$PATH"' >>"$PROFILE_FILE" + echo 'eval "$(rbenv init -)"' >>"$PROFILE_FILE" + fi + fi + + # Install ruby-build plugin + if [[ ! -d "$RBENV_DIR/plugins/ruby-build" ]]; then + local RUBY_BUILD_RELEASE + local ruby_build_json + ruby_build_json=$(curl -fsSL --max-time 15 https://api.github.com/repos/rbenv/ruby-build/releases/latest 2>/dev/null || echo "") + + if [[ -z "$ruby_build_json" ]]; then + msg_error "Failed to fetch latest ruby-build version from GitHub" + rm -rf "$TMP_DIR" + return 1 + fi + + RUBY_BUILD_RELEASE=$(echo "$ruby_build_json" | jq -r '.tag_name' 2>/dev/null | sed 's/^v//' || echo "") + + if [[ -z "$RUBY_BUILD_RELEASE" ]]; then + msg_error "Could not parse ruby-build version from GitHub response" + rm -rf "$TMP_DIR" + return 1 + fi + + curl -fsSL "https://github.com/rbenv/ruby-build/archive/refs/tags/v${RUBY_BUILD_RELEASE}.tar.gz" -o "$TMP_DIR/ruby-build.tar.gz" || { + msg_error "Failed to download ruby-build" + rm -rf "$TMP_DIR" + return 1 + } + + tar -xzf "$TMP_DIR/ruby-build.tar.gz" -C "$TMP_DIR" || { + msg_error "Failed to extract ruby-build" + rm -rf "$TMP_DIR" + return 1 + } + + mkdir -p "$RBENV_DIR/plugins/ruby-build" + cp -r "$TMP_DIR/ruby-build-${RUBY_BUILD_RELEASE}/." "$RBENV_DIR/plugins/ruby-build/" + fi + + # Setup PATH and install Ruby version + export PATH="$RBENV_DIR/bin:$PATH" + eval "$("$RBENV_BIN" init - bash)" 2>/dev/null || true + + if ! "$RBENV_BIN" versions --bare 2>/dev/null | grep -qx "$RUBY_VERSION"; then + $STD "$RBENV_BIN" install "$RUBY_VERSION" || { + msg_error "Failed to install Ruby $RUBY_VERSION" + rm -rf "$TMP_DIR" + return 1 + } + fi + + "$RBENV_BIN" global "$RUBY_VERSION" || { + msg_error "Failed to set Ruby $RUBY_VERSION as global version" + rm -rf "$TMP_DIR" + return 1 + } + + hash -r + + # Install Rails if requested + if [[ "$RUBY_INSTALL_RAILS" == "true" ]]; then + $STD gem install rails || { + msg_warn "Failed to install Rails - Ruby installation successful" + } + fi + + rm -rf "$TMP_DIR" + cache_installed_version "ruby" "$RUBY_VERSION" + msg_ok "Setup Ruby $RUBY_VERSION" +} + +# ------------------------------------------------------------------------------ +# Installs or upgrades ClickHouse database server. +# +# Description: +# - Adds ClickHouse official repository +# - Installs specified version +# - Configures systemd service +# - Supports Debian/Ubuntu with fallback mechanism +# +# Variables: +# CLICKHOUSE_VERSION - ClickHouse version to install (default: latest) +# ------------------------------------------------------------------------------ + +function setup_clickhouse() { + local CLICKHOUSE_VERSION="${CLICKHOUSE_VERSION:-latest}" + local DISTRO_ID DISTRO_CODENAME + DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"') + DISTRO_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release) + + # Resolve "latest" version + if [[ "$CLICKHOUSE_VERSION" == "latest" ]]; then + CLICKHOUSE_VERSION=$(curl -fsSL --max-time 15 https://packages.clickhouse.com/tgz/stable/ 2>/dev/null | + grep -oP 'clickhouse-common-static-\K[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | + sort -V | tail -n1 || echo "") + + # Fallback to GitHub API if package server failed + if [[ -z "$CLICKHOUSE_VERSION" ]]; then + CLICKHOUSE_VERSION=$(curl -fsSL --max-time 15 https://api.github.com/repos/ClickHouse/ClickHouse/releases/latest 2>/dev/null | + grep -oP '"tag_name":\s*"v\K[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -n1 || echo "") + fi + + [[ -z "$CLICKHOUSE_VERSION" ]] && { + msg_error "Could not determine latest ClickHouse version from any source" + return 1 + } + fi + + # Get currently installed version + local CURRENT_VERSION="" + if command -v clickhouse-server >/dev/null 2>&1; then + CURRENT_VERSION=$(clickhouse-server --version 2>/dev/null | grep -oP 'version \K[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -n1) + fi + + # Scenario 1: Already at target version - just update packages + if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" == "$CLICKHOUSE_VERSION" ]]; then + msg_info "Update ClickHouse $CLICKHOUSE_VERSION" + ensure_apt_working || return 1 + + # Perform upgrade with retry logic (non-fatal if fails) + upgrade_packages_with_retry "clickhouse-server" "clickhouse-client" || true + cache_installed_version "clickhouse" "$CLICKHOUSE_VERSION" + msg_ok "Update ClickHouse $CLICKHOUSE_VERSION" + return 0 + fi + + # Scenario 2: Different version - clean upgrade + if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" != "$CLICKHOUSE_VERSION" ]]; then + msg_info "Upgrade ClickHouse from $CURRENT_VERSION to $CLICKHOUSE_VERSION" + stop_all_services "clickhouse-server" + remove_old_tool_version "clickhouse" + else + msg_info "Setup ClickHouse $CLICKHOUSE_VERSION" + fi + + ensure_dependencies apt-transport-https ca-certificates dirmngr gnupg + + # Prepare repository (cleanup + validation) + prepare_repository_setup "clickhouse" || { + msg_error "Failed to prepare ClickHouse repository" + return 1 + } + + # Setup repository (ClickHouse uses 'stable' suite) + setup_deb822_repo \ + "clickhouse" \ + "https://packages.clickhouse.com/rpm/lts/repodata/repomd.xml.key" \ + "https://packages.clickhouse.com/deb" \ + "stable" \ + "main" + + # Install packages with retry logic + export DEBIAN_FRONTEND=noninteractive + $STD apt update || { + msg_error "APT update failed for ClickHouse repository" + return 1 + } + + install_packages_with_retry "clickhouse-server" "clickhouse-client" || { + msg_error "Failed to install ClickHouse packages" + return 1 + } + + # Verify installation + if ! command -v clickhouse-server >/dev/null 2>&1; then + msg_error "ClickHouse installation completed but clickhouse-server command not found" + return 1 + fi + + # Setup data directory + mkdir -p /var/lib/clickhouse + if id clickhouse >/dev/null 2>&1; then + chown -R clickhouse:clickhouse /var/lib/clickhouse + fi + + # Enable and start service + $STD systemctl enable clickhouse-server || { + msg_warn "Failed to enable clickhouse-server service" + } + safe_service_restart clickhouse-server || true + + cache_installed_version "clickhouse" "$CLICKHOUSE_VERSION" + msg_ok "Setup ClickHouse $CLICKHOUSE_VERSION" +} + +# ------------------------------------------------------------------------------ +# Installs Rust toolchain and optional global crates via cargo. +# +# Description: +# - Installs rustup (if missing) +# - Installs or updates desired Rust toolchain (stable, nightly, or versioned) +# - Installs or updates specified global crates using `cargo install` +# +# Notes: +# - Skips crate install if exact version is already present +# - Updates crate if newer version or different version is requested +# +# Variables: +# RUST_TOOLCHAIN - Rust toolchain to install (default: stable) +# RUST_CRATES - Comma-separated list of crates (e.g. "cargo-edit,wasm-pack@0.12.1") +# ------------------------------------------------------------------------------ + +function setup_rust() { + local RUST_TOOLCHAIN="${RUST_TOOLCHAIN:-stable}" + local RUST_CRATES="${RUST_CRATES:-}" + local CARGO_BIN="${HOME}/.cargo/bin" + + # Get currently installed version + local CURRENT_VERSION="" + if command -v rustc &>/dev/null; then + CURRENT_VERSION=$(rustc --version 2>/dev/null | awk '{print $2}') + fi + + # Scenario 1: Rustup not installed - fresh install + if ! command -v rustup &>/dev/null; then + msg_info "Setup Rust ($RUST_TOOLCHAIN)" + curl -fsSL https://sh.rustup.rs | $STD sh -s -- -y --default-toolchain "$RUST_TOOLCHAIN" || { + msg_error "Failed to install Rust" + return 1 + } + export PATH="$CARGO_BIN:$PATH" + echo 'export PATH="$HOME/.cargo/bin:$PATH"' >>"$HOME/.profile" + + # Verify installation + if ! command -v rustc >/dev/null 2>&1; then + msg_error "Rust binary not found after installation" + return 1 + fi + + local RUST_VERSION=$(rustc --version 2>/dev/null | awk '{print $2}') + if [[ -z "$RUST_VERSION" ]]; then + msg_error "Failed to determine Rust version" + return 1 + fi + + cache_installed_version "rust" "$RUST_VERSION" + msg_ok "Setup Rust $RUST_VERSION" + else + # Scenario 2: Rustup already installed - update/maintain + msg_info "Update Rust ($RUST_TOOLCHAIN)" + + # Ensure default toolchain is set + $STD rustup default "$RUST_TOOLCHAIN" 2>/dev/null || { + # If default fails, install the toolchain first + $STD rustup install "$RUST_TOOLCHAIN" || { + msg_error "Failed to install Rust toolchain $RUST_TOOLCHAIN" + return 1 + } + $STD rustup default "$RUST_TOOLCHAIN" || { + msg_error "Failed to set default Rust toolchain" + return 1 + } + } + + # Update to latest patch version + $STD rustup update "$RUST_TOOLCHAIN" /dev/null | awk '{print $2}') + if [[ -z "$RUST_VERSION" ]]; then + msg_error "Failed to determine Rust version after update" + return 1 + fi + + cache_installed_version "rust" "$RUST_VERSION" + msg_ok "Update Rust $RUST_VERSION" + fi + + # Install global crates + if [[ -n "$RUST_CRATES" ]]; then + msg_info "Processing Rust crates: $RUST_CRATES" + IFS=',' read -ra CRATES <<<"$RUST_CRATES" + for crate in "${CRATES[@]}"; do + crate=$(echo "$crate" | xargs) # trim whitespace + [[ -z "$crate" ]] && continue # skip empty entries + + local NAME VER INSTALLED_VER CRATE_LIST + if [[ "$crate" == *"@"* ]]; then + NAME="${crate%@*}" + VER="${crate##*@}" + else + NAME="$crate" + VER="" + fi + + # Get list of installed crates once + CRATE_LIST=$(cargo install --list 2>/dev/null || echo "") + + # Check if already installed + if echo "$CRATE_LIST" | grep -q "^${NAME} "; then + INSTALLED_VER=$(echo "$CRATE_LIST" | grep "^${NAME} " | head -1 | awk '{print $2}' | tr -d 'v:') + + if [[ -n "$VER" && "$VER" != "$INSTALLED_VER" ]]; then + msg_info "Upgrading $NAME from v$INSTALLED_VER to v$VER" + $STD cargo install "$NAME" --version "$VER" --force || { + msg_error "Failed to install $NAME@$VER" + return 1 + } + msg_ok "Upgraded $NAME to v$VER" + elif [[ -z "$VER" ]]; then + msg_info "Upgrading $NAME to latest" + $STD cargo install "$NAME" --force || { + msg_error "Failed to upgrade $NAME" + return 1 + } + local NEW_VER=$(cargo install --list 2>/dev/null | grep "^${NAME} " | head -1 | awk '{print $2}' | tr -d 'v:') + msg_ok "Upgraded $NAME to v$NEW_VER" + else + msg_ok "$NAME v$INSTALLED_VER already installed" + fi + else + msg_info "Installing $NAME${VER:+@$VER}" + if [[ -n "$VER" ]]; then + $STD cargo install "$NAME" --version "$VER" || { + msg_error "Failed to install $NAME@$VER" + return 1 + } + msg_ok "Installed $NAME v$VER" + else + $STD cargo install "$NAME" || { + msg_error "Failed to install $NAME" + return 1 + } + local NEW_VER=$(cargo install --list 2>/dev/null | grep "^${NAME} " | head -1 | awk '{print $2}' | tr -d 'v:') + msg_ok "Installed $NAME v$NEW_VER" + fi + fi + done + msg_ok "Processed Rust crates" + fi +} + +# ------------------------------------------------------------------------------ +# Installs or upgrades uv (Python package manager) from GitHub releases. +# - Downloads platform-specific tarball (no install.sh!) +# - Extracts uv binary +# - Places it in /usr/local/bin +# - Optionally installs a specific Python version via uv +# ------------------------------------------------------------------------------ + +function setup_uv() { + local UV_BIN="/usr/local/bin/uv" + local UVX_BIN="/usr/local/bin/uvx" + local TMP_DIR=$(mktemp -d) + local CACHED_VERSION + + # trap for TMP Cleanup + trap "rm -rf '$TMP_DIR'" EXIT + + CACHED_VERSION=$(get_cached_version "uv") + + # Architecture Detection + local ARCH=$(uname -m) + local OS_TYPE="" + local UV_TAR="" + + if grep -qi "alpine" /etc/os-release; then + OS_TYPE="musl" + else + OS_TYPE="gnu" + fi + + case "$ARCH" in + x86_64) + UV_TAR="uv-x86_64-unknown-linux-${OS_TYPE}.tar.gz" + ;; + aarch64) + UV_TAR="uv-aarch64-unknown-linux-${OS_TYPE}.tar.gz" + ;; + i686) + UV_TAR="uv-i686-unknown-linux-${OS_TYPE}.tar.gz" + ;; + *) + msg_error "Unsupported architecture: $ARCH (supported: x86_64, aarch64, i686)" + return 1 + ;; + esac + + ensure_dependencies jq + + # Fetch latest version + local releases_json + releases_json=$(curl -fsSL --max-time 15 \ + "https://api.github.com/repos/astral-sh/uv/releases/latest" 2>/dev/null || echo "") + + if [[ -z "$releases_json" ]]; then + msg_error "Could not fetch latest uv version from GitHub API" + return 1 + fi + + local LATEST_VERSION + LATEST_VERSION=$(echo "$releases_json" | jq -r '.tag_name' 2>/dev/null | sed 's/^v//') + + if [[ -z "$LATEST_VERSION" ]]; then + msg_error "Could not parse uv version from GitHub API response" + return 1 + fi + + # Get currently installed version + local INSTALLED_VERSION="" + if [[ -x "$UV_BIN" ]]; then + INSTALLED_VERSION=$("$UV_BIN" --version 2>/dev/null | awk '{print $2}') + fi + + # Scenario 1: Already at latest version + if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" == "$LATEST_VERSION" ]]; then + cache_installed_version "uv" "$LATEST_VERSION" + + # Check if uvx is needed and missing + if [[ "${USE_UVX:-NO}" == "YES" ]] && [[ ! -x "$UVX_BIN" ]]; then + msg_info "Installing uvx wrapper" + _install_uvx_wrapper || return 1 + msg_ok "uvx wrapper installed" + fi + + return 0 + fi + + # Scenario 2: New install or upgrade + if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" != "$LATEST_VERSION" ]]; then + msg_info "Upgrade uv from $INSTALLED_VERSION to $LATEST_VERSION" + else + msg_info "Setup uv $LATEST_VERSION" + fi + + local UV_URL="https://github.com/astral-sh/uv/releases/download/${LATEST_VERSION}/${UV_TAR}" + + $STD curl -fsSL "$UV_URL" -o "$TMP_DIR/uv.tar.gz" || { + msg_error "Failed to download uv from $UV_URL" + return 1 + } + + # Extract + $STD tar -xzf "$TMP_DIR/uv.tar.gz" -C "$TMP_DIR" || { + msg_error "Failed to extract uv" + return 1 + } + + # Find and install uv binary (tarball extracts to uv-VERSION-ARCH/ directory) + local UV_BINARY=$(find "$TMP_DIR" -name "uv" -type f -executable | head -n1) + if [[ ! -f "$UV_BINARY" ]]; then + msg_error "Could not find uv binary in extracted tarball" + return 1 + fi + + $STD install -m 755 "$UV_BINARY" "$UV_BIN" || { + msg_error "Failed to install uv binary" + return 1 + } + + ensure_usr_local_bin_persist + export PATH="/usr/local/bin:$PATH" + + # Optional: Install uvx wrapper + if [[ "${USE_UVX:-NO}" == "YES" ]]; then + msg_info "Installing uvx wrapper" + _install_uvx_wrapper || { + msg_error "Failed to install uvx wrapper" + return 1 + } + msg_ok "uvx wrapper installed" + fi + + # Optional: Generate shell completions + $STD uv generate-shell-completion bash >/etc/bash_completion.d/uv 2>/dev/null || true + if [[ -d /usr/share/zsh/site-functions ]]; then + $STD uv generate-shell-completion zsh >/usr/share/zsh/site-functions/_uv 2>/dev/null || true + fi + + # Optional: Install specific Python version if requested + if [[ -n "${PYTHON_VERSION:-}" ]]; then + msg_info "Installing Python $PYTHON_VERSION via uv" + $STD uv python install "$PYTHON_VERSION" || { + msg_error "Failed to install Python $PYTHON_VERSION" + return 1 + } + msg_ok "Python $PYTHON_VERSION installed" + fi + + cache_installed_version "uv" "$LATEST_VERSION" + msg_ok "Setup uv $LATEST_VERSION" +} + +# Helper function to install uvx wrapper +_install_uvx_wrapper() { + local UVX_BIN="/usr/local/bin/uvx" + + cat >"$UVX_BIN" <<'EOF' +#!/bin/bash +# uvx - Run Python applications from PyPI as command-line tools +# Wrapper for: uv tool run +exec /usr/local/bin/uv tool run "$@" +EOF + + chmod +x "$UVX_BIN" + return 0 +} + +# ------------------------------------------------------------------------------ +# Installs or updates yq (mikefarah/yq - Go version). +# +# Description: +# - Checks if yq is installed and from correct source +# - Compares with latest release on GitHub +# - Updates if outdated or wrong implementation +# ------------------------------------------------------------------------------ + +function setup_yq() { + local TMP_DIR=$(mktemp -d) + local BINARY_PATH="/usr/local/bin/yq" + local GITHUB_REPO="mikefarah/yq" + + ensure_dependencies jq + ensure_usr_local_bin_persist + + # Remove non-mikefarah implementations + if command -v yq &>/dev/null; then + if ! yq --version 2>&1 | grep -q 'mikefarah'; then + rm -f "$(command -v yq)" + fi + fi + + local LATEST_VERSION + local releases_json + releases_json=$(curl -fsSL --max-time 15 "https://api.github.com/repos/${GITHUB_REPO}/releases/latest" 2>/dev/null || echo "") + + if [[ -z "$releases_json" ]]; then + msg_error "Could not fetch latest yq version from GitHub API" + rm -rf "$TMP_DIR" + return 1 + fi + + LATEST_VERSION=$(echo "$releases_json" | jq -r '.tag_name' 2>/dev/null | sed 's/^v//' || echo "") + + if [[ -z "$LATEST_VERSION" ]]; then + msg_error "Could not parse yq version from GitHub API response" + rm -rf "$TMP_DIR" + return 1 + fi + + # Get currently installed version + local INSTALLED_VERSION="" + if command -v yq &>/dev/null && yq --version 2>&1 | grep -q 'mikefarah'; then + INSTALLED_VERSION=$(yq --version 2>/dev/null | awk '{print $NF}' | sed 's/^v//') + fi + + # Scenario 1: Already at latest version + if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" == "$LATEST_VERSION" ]]; then + cache_installed_version "yq" "$LATEST_VERSION" + rm -rf "$TMP_DIR" + return 0 + fi + + # Scenario 2: New install or upgrade + if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" != "$LATEST_VERSION" ]]; then + msg_info "Upgrade yq from $INSTALLED_VERSION to $LATEST_VERSION" + else + msg_info "Setup yq $LATEST_VERSION" + fi + + curl -fsSL "https://github.com/${GITHUB_REPO}/releases/download/v${LATEST_VERSION}/yq_linux_amd64" -o "$TMP_DIR/yq" || { + msg_error "Failed to download yq" + rm -rf "$TMP_DIR" + return 1 + } + + chmod +x "$TMP_DIR/yq" + mv "$TMP_DIR/yq" "$BINARY_PATH" || { + msg_error "Failed to install yq" + rm -rf "$TMP_DIR" + return 1 + } + + rm -rf "$TMP_DIR" + hash -r + + local FINAL_VERSION + FINAL_VERSION=$("$BINARY_PATH" --version 2>/dev/null | awk '{print $NF}' | sed 's/^v//') + cache_installed_version "yq" "$FINAL_VERSION" + msg_ok "Setup yq $FINAL_VERSION" +} diff --git a/misc/tools.func.md b/misc/tools.func.md new file mode 100644 index 000000000..6a0b53734 --- /dev/null +++ b/misc/tools.func.md @@ -0,0 +1,1283 @@ +# Tools.func Wiki + +A comprehensive collection of helper functions for robust package management and repository management in Debian/Ubuntu-based systems. + +--- + +## 📋 Table of Contents + +- [Overview](#overview) +- [Core Helper Functions](#core-helper-functions) +- [Repository Management](#repository-management) +- [Package Management](#package-management) +- [Tool Installation Functions](#tool-installation-functions) +- [GitHub Integration](#github-integration) +- [System Utilities](#system-utilities) +- [Container Setup Functions](#container-setup-functions) + +--- + +## Overview + +This function library provides: + +- ✅ Automatic retry logic for APT/network failures +- ✅ Unified keyring cleanup from all 3 locations +- ✅ Legacy installation cleanup (nvm, rbenv, rustup) +- ✅ OS-upgrade-safe repository preparation +- ✅ Service pattern matching for multi-version tools + +### Usage in Install Scripts + +```bash +source /dev/stdin <<< "$FUNCTIONS" # Load from build.func +prepare_repository_setup "mysql" +install_packages_with_retry "mysql-server" "mysql-client" +``` + +--- + +## Core Helper Functions + +### `cache_installed_version()` + +**Purpose**: Caches installed version to avoid repeated checks. + +**Parameters**: +- `$1` - Application name +- `$2` - Version string + +**Example**: +```bash +cache_installed_version "nodejs" "22.0.0" +``` + +--- + +### `get_cached_version()` + +**Purpose**: Retrieves cached version of an application. + +**Parameters**: +- `$1` - Application name + +**Returns**: Version string or empty if not cached + +**Example**: +```bash +version=$(get_cached_version "nodejs") +``` + +--- + +### `cleanup_tool_keyrings()` + +**Purpose**: Removes ALL keyring files for specified tools from all 3 locations. + +**Parameters**: +- `$@` - Tool name patterns (supports wildcards) + +**Example**: +```bash +cleanup_tool_keyrings "mariadb" "mysql" "postgresql" +``` + +--- + +### `stop_all_services()` + +**Purpose**: Stops and disables all service instances matching a pattern. + +**Parameters**: +- `$@` - Service name patterns (supports wildcards) + +**Example**: +```bash +stop_all_services "php*-fpm" "mysql" "mariadb" +``` + +--- + +### `verify_tool_version()` + +**Purpose**: Verifies installed tool version matches expected version. + +**Parameters**: +- `$1` - Tool name +- `$2` - Expected version +- `$3` - Installed version + +**Returns**: 0 if match, 1 if mismatch + +**Example**: +```bash +verify_tool_version "nodejs" "22" "$(node -v | grep -oP '^v\K[0-9]+')" +``` + +--- + +### `cleanup_legacy_install()` + +**Purpose**: Removes legacy installation methods (nvm, rbenv, rustup, etc.). + +**Parameters**: +- `$1` - Tool name (nodejs, ruby, rust, go) + +**Example**: +```bash +cleanup_legacy_install "nodejs" # Removes nvm +``` + +--- + +## Repository Management + +### `prepare_repository_setup()` + +**Purpose**: Unified repository preparation before setup. Cleans up old repos, keyrings, and ensures APT is working. + +**Parameters**: +- `$@` - Repository names + +**Example**: +```bash +prepare_repository_setup "mariadb" "mysql" +``` + +--- + +### `manage_tool_repository()` + +**Purpose**: Unified repository management for tools. Handles adding, updating, and verifying tool repositories. + +**Parameters**: +- `$1` - Tool name (mariadb, mongodb, nodejs, postgresql, php, mysql) +- `$2` - Version +- `$3` - Repository URL +- `$4` - GPG key URL (optional) + +**Supported Tools**: mariadb, mongodb, nodejs, postgresql, php, mysql + +**Example**: +```bash +manage_tool_repository "mariadb" "11.4" \ + "http://mirror.mariadb.org/repo/11.4" \ + "https://mariadb.org/mariadb_release_signing_key.asc" +``` + +--- + +### `setup_deb822_repo()` + +**Purpose**: Standardized deb822 repository setup with optional architectures. Always runs apt update after repo creation. + +**Parameters**: +- `$1` - Repository name +- `$2` - GPG key URL +- `$3` - Repository URL +- `$4` - Suite +- `$5` - Component (default: main) +- `$6` - Architectures (optional) + +**Example**: +```bash +setup_deb822_repo "adoptium" \ + "https://packages.adoptium.net/artifactory/api/gpg/key/public" \ + "https://packages.adoptium.net/artifactory/deb" \ + "bookworm" \ + "main" +``` + +--- + +### `cleanup_old_repo_files()` + +**Purpose**: Cleanup old repository files (migration helper for OS upgrades). + +**Parameters**: +- `$1` - Application name + +**Example**: +```bash +cleanup_old_repo_files "mariadb" +``` + +--- + +### `cleanup_orphaned_sources()` + +**Purpose**: Cleanup orphaned .sources files that reference missing keyrings. Prevents APT signature verification errors. + +**Example**: +```bash +cleanup_orphaned_sources +``` + +--- + +### `ensure_apt_working()` + +**Purpose**: Ensures APT is in a working state before installing packages. + +**Returns**: 0 if APT is working, 1 if critically broken + +**Example**: +```bash +ensure_apt_working || return 1 +``` + +--- + +### `get_fallback_suite()` + +**Purpose**: Get fallback suite for repository with comprehensive mapping. + +**Parameters**: +- `$1` - Distribution ID (debian, ubuntu) +- `$2` - Distribution codename +- `$3` - Repository base URL + +**Returns**: Appropriate suite name + +**Example**: +```bash +suite=$(get_fallback_suite "debian" "trixie" "https://repo.example.com") +``` + +--- + +## Package Management + +### `install_packages_with_retry()` + +**Purpose**: Install packages with retry logic (3 attempts with APT refresh). + +**Parameters**: +- `$@` - Package names + +**Example**: +```bash +install_packages_with_retry "mysql-server" "mysql-client" +``` + +--- + +### `upgrade_packages_with_retry()` + +**Purpose**: Upgrade specific packages with retry logic. + +**Parameters**: +- `$@` - Package names + +**Example**: +```bash +upgrade_packages_with_retry "mariadb-server" "mariadb-client" +``` + +--- + +### `ensure_dependencies()` + +**Purpose**: Ensures dependencies are installed (with apt update caching). + +**Parameters**: +- `$@` - Dependency names + +**Example**: +```bash +ensure_dependencies "curl" "jq" "git" +``` + +--- + +### `is_package_installed()` + +**Purpose**: Check if package is installed (faster than dpkg -l | grep). + +**Parameters**: +- `$1` - Package name + +**Returns**: 0 if installed, 1 if not + +**Example**: +```bash +if is_package_installed "nginx"; then + echo "Nginx is installed" +fi +``` + +--- + +### `hold_package_version()` + +**Purpose**: Hold package version to prevent upgrades. + +**Parameters**: +- `$1` - Package name + +**Example**: +```bash +hold_package_version "mysql-server" +``` + +--- + +### `unhold_package_version()` + +**Purpose**: Unhold package version to allow upgrades. + +**Parameters**: +- `$1` - Package name + +**Example**: +```bash +unhold_package_version "mysql-server" +``` + +--- + +## Tool Installation Functions + +### `is_tool_installed()` + +**Purpose**: Check if tool is already installed and optionally verify exact version. + +**Parameters**: +- `$1` - Tool name +- `$2` - Required version (optional) + +**Returns**: 0 if installed (with optional version match), 1 if not installed + +**Supported Tools**: mariadb, mysql, mongodb, node, php, postgres, ruby, rust, go, clickhouse + +**Example**: +```bash +is_tool_installed "mariadb" "11.4" || echo "Not installed" +``` + +--- + +### `remove_old_tool_version()` + +**Purpose**: Remove old tool version completely (purge + cleanup repos). + +**Parameters**: +- `$1` - Tool name +- `$2` - Repository name (optional, defaults to tool name) + +**Example**: +```bash +remove_old_tool_version "mariadb" "repository-name" +``` + +--- + +### `should_update_tool()` + +**Purpose**: Determine if tool update/upgrade is needed. + +**Parameters**: +- `$1` - Tool name +- `$2` - Target version + +**Returns**: 0 (update needed), 1 (already up-to-date) + +**Example**: +```bash +if should_update_tool "mariadb" "11.4"; then + echo "Update needed" +fi +``` + +--- + +### `setup_mariadb()` + +**Purpose**: Installs or updates MariaDB from official repo. + +**Variables**: +- `MARIADB_VERSION` - MariaDB version to install (default: latest) + +**Example**: +```bash +MARIADB_VERSION="11.4" setup_mariadb +``` + +--- + +### `setup_mysql()` + +**Purpose**: Installs or upgrades MySQL and configures APT repo. + +**Variables**: +- `MYSQL_VERSION` - MySQL version to install (default: 8.0) + +**Features**: +- Handles Debian Trixie libaio1t64 transition +- Auto-fallback to MariaDB if MySQL 8.0 unavailable + +**Example**: +```bash +MYSQL_VERSION="8.0" setup_mysql +``` + +--- + +### `setup_mongodb()` + +**Purpose**: Installs or updates MongoDB to specified major version. + +**Variables**: +- `MONGO_VERSION` - MongoDB major version (default: 8.0) + +**Example**: +```bash +MONGO_VERSION="7.0" setup_mongodb +``` + +--- + +### `setup_postgresql()` + +**Purpose**: Installs or upgrades PostgreSQL and optional extensions. + +**Variables**: +- `PG_VERSION` - PostgreSQL major version (default: 16) +- `PG_MODULES` - Comma-separated list of extensions + +**Example**: +```bash +PG_VERSION="16" PG_MODULES="postgis,contrib" setup_postgresql +``` + +--- + +### `setup_nodejs()` + +**Purpose**: Installs Node.js and optional global modules. + +**Variables**: +- `NODE_VERSION` - Node.js version (default: 22) +- `NODE_MODULE` - Comma-separated list of global modules + +**Example**: +```bash +NODE_VERSION="22" NODE_MODULE="yarn,@vue/cli@5.0.0" setup_nodejs +``` + +--- + +### `setup_php()` + +**Purpose**: Installs PHP with selected modules and configures Apache/FPM support. + +**Variables**: +- `PHP_VERSION` - PHP version (default: 8.4) +- `PHP_MODULE` - Additional comma-separated modules +- `PHP_APACHE` - Set YES to enable PHP with Apache +- `PHP_FPM` - Set YES to enable PHP-FPM +- `PHP_MEMORY_LIMIT` - Memory limit (default: 512M) +- `PHP_UPLOAD_MAX_FILESIZE` - Upload max filesize (default: 128M) +- `PHP_POST_MAX_SIZE` - Post max size (default: 128M) +- `PHP_MAX_EXECUTION_TIME` - Max execution time (default: 300) + +**Example**: +```bash +PHP_VERSION="8.4" PHP_MODULE="redis,imagick" PHP_FPM="YES" setup_php +``` + +--- + +### `setup_java()` + +**Purpose**: Installs Temurin JDK via Adoptium APT repository. + +**Variables**: +- `JAVA_VERSION` - Temurin JDK version (default: 21) + +**Example**: +```bash +JAVA_VERSION="21" setup_java +``` + +--- + +### `setup_ruby()` + +**Purpose**: Installs rbenv and ruby-build, installs Ruby and optionally Rails. + +**Variables**: +- `RUBY_VERSION` - Ruby version (default: 3.4.4) +- `RUBY_INSTALL_RAILS` - true/false to install Rails (default: true) + +**Example**: +```bash +RUBY_VERSION="3.4.4" RUBY_INSTALL_RAILS="true" setup_ruby +``` + +--- + +### `setup_rust()` + +**Purpose**: Installs Rust toolchain and optional global crates. + +**Variables**: +- `RUST_TOOLCHAIN` - Rust toolchain (default: stable) +- `RUST_CRATES` - Comma-separated list of crates + +**Example**: +```bash +RUST_TOOLCHAIN="stable" RUST_CRATES="cargo-edit,wasm-pack@0.12.1" setup_rust +``` + +--- + +### `setup_go()` + +**Purpose**: Installs Go (Golang) from official tarball. + +**Variables**: +- `GO_VERSION` - Go version (default: latest) + +**Example**: +```bash +GO_VERSION="1.22.2" setup_go +``` + +--- + +### `setup_composer()` + +**Purpose**: Installs or updates Composer globally (robust, idempotent). + +**Features**: +- Installs to /usr/local/bin/composer +- Removes old binaries/symlinks +- Ensures /usr/local/bin is in PATH +- Auto-updates to latest version + +**Example**: +```bash +setup_composer +``` + +--- + +### `setup_uv()` + +**Purpose**: Installs or upgrades uv (Python package manager) from GitHub releases. + +**Variables**: +- `USE_UVX` - Set YES to install uvx wrapper (default: NO) +- `PYTHON_VERSION` - Optional Python version to install via uv + +**Example**: +```bash +USE_UVX="YES" PYTHON_VERSION="3.12" setup_uv +``` + +--- + +### `setup_yq()` + +**Purpose**: Installs or updates yq (mikefarah/yq - Go version). + +**Example**: +```bash +setup_yq +``` + +--- + +### `setup_ffmpeg()` + +**Purpose**: Installs FFmpeg from source or prebuilt binary. + +**Variables**: +- `FFMPEG_VERSION` - FFmpeg version (default: latest) +- `FFMPEG_TYPE` - Build profile: minimal, medium, full, binary (default: full) + +**Example**: +```bash +FFMPEG_VERSION="n7.1.1" FFMPEG_TYPE="full" setup_ffmpeg +``` + +--- + +### `setup_imagemagick()` + +**Purpose**: Installs ImageMagick 7 from source. + +**Example**: +```bash +setup_imagemagick +``` + +--- + +### `setup_gs()` + +**Purpose**: Installs or updates Ghostscript (gs) from source. + +**Example**: +```bash +setup_gs +``` + +--- + +### `setup_hwaccel()` + +**Purpose**: Sets up Hardware Acceleration for Intel/AMD/NVIDIA GPUs. + +**Example**: +```bash +setup_hwaccel +``` + +--- + +### `setup_clickhouse()` + +**Purpose**: Installs or upgrades ClickHouse database server. + +**Variables**: +- `CLICKHOUSE_VERSION` - ClickHouse version (default: latest) + +**Example**: +```bash +CLICKHOUSE_VERSION="latest" setup_clickhouse +``` + +--- + +### `setup_adminer()` + +**Purpose**: Installs Adminer (supports Debian/Ubuntu and Alpine). + +**Example**: +```bash +setup_adminer +``` + +--- + +## GitHub Integration + +### `check_for_gh_release()` + +**Purpose**: Checks for new GitHub release (latest tag). + +**Parameters**: +- `$1` - Application name +- `$2` - GitHub repository (user/repo) +- `$3` - Optional pinned version + +**Returns**: 0 if update available, 1 if up-to-date + +**Global Variables Set**: +- `CHECK_UPDATE_RELEASE` - Latest release tag + +**Example**: +```bash +if check_for_gh_release "flaresolverr" "FlareSolverr/FlareSolverr"; then + echo "Update available: $CHECK_UPDATE_RELEASE" +fi +``` + +--- + +### `fetch_and_deploy_gh_release()` + +**Purpose**: Downloads and deploys latest GitHub release. + +**Parameters**: +- `$1` - Application name +- `$2` - GitHub repository (user/repo) +- `$3` - Mode: tarball, binary, prebuild, singlefile (default: tarball) +- `$4` - Version (default: latest) +- `$5` - Target directory (default: /opt/app) +- `$6` - Asset filename/pattern (required for prebuild/singlefile) + +**Modes**: +- `tarball` - Source code tarball (.tar.gz) +- `binary` - .deb package install (arch-dependent) +- `prebuild` - Prebuilt .tar.gz archive +- `singlefile` - Standalone binary (chmod +x) + +**Example**: +```bash +# Source tarball +fetch_and_deploy_gh_release "myapp" "myuser/myapp" + +# Binary .deb +fetch_and_deploy_gh_release "myapp" "myuser/myapp" "binary" + +# Prebuilt archive +fetch_and_deploy_gh_release "hanko" "teamhanko/hanko" "prebuild" \ + "latest" "/opt/hanko" "hanko_Linux_x86_64.tar.gz" + +# Single binary +fetch_and_deploy_gh_release "argus" "release-argus/Argus" "singlefile" \ + "0.26.3" "/opt/argus" "Argus-.*linux-amd64" +``` + +--- + +### `github_api_call()` + +**Purpose**: GitHub API call with authentication and rate limit handling. + +**Parameters**: +- `$1` - API URL +- `$2` - Output file (default: /dev/stdout) + +**Environment Variables**: +- `GITHUB_TOKEN` - Optional GitHub token for higher rate limits + +**Example**: +```bash +github_api_call "https://api.github.com/repos/user/repo/releases/latest" "/tmp/release.json" +``` + +--- + +### `get_latest_github_release()` + +**Purpose**: Get latest GitHub release version. + +**Parameters**: +- `$1` - GitHub repository (user/repo) +- `$2` - Strip 'v' prefix (default: true) + +**Returns**: Version string + +**Example**: +```bash +version=$(get_latest_github_release "nodejs/node") +``` + +--- + +## System Utilities + +### `get_os_info()` + +**Purpose**: Get OS information (cached for performance). + +**Parameters**: +- `$1` - Field: id, codename, version, version_id, all (default: all) + +**Returns**: Requested OS information + +**Example**: +```bash +os_id=$(get_os_info id) +os_codename=$(get_os_info codename) +``` + +--- + +### `is_debian()`, `is_ubuntu()`, `is_alpine()` + +**Purpose**: Check if running on specific OS. + +**Returns**: 0 if match, 1 if not + +**Example**: +```bash +if is_debian; then + echo "Running on Debian" +fi +``` + +--- + +### `get_os_version_major()` + +**Purpose**: Get Debian/Ubuntu major version. + +**Returns**: Major version number + +**Example**: +```bash +major_version=$(get_os_version_major) +``` + +--- + +### `get_system_arch()` + +**Purpose**: Get system architecture (normalized). + +**Parameters**: +- `$1` - Architecture type: dpkg, uname, both (default: both) + +**Returns**: Architecture string (amd64, arm64) + +**Example**: +```bash +arch=$(get_system_arch) +``` + +--- + +### `version_gt()` + +**Purpose**: Smart version comparison. + +**Parameters**: +- `$1` - Version 1 +- `$2` - Version 2 + +**Returns**: 0 if version 1 > version 2 + +**Example**: +```bash +if version_gt "2.0.0" "1.5.0"; then + echo "Version 2.0.0 is greater" +fi +``` + +--- + +### `is_lts_version()` + +**Purpose**: Check if running on LTS version. + +**Returns**: 0 if LTS, 1 if not + +**Example**: +```bash +if is_lts_version; then + echo "Running on LTS" +fi +``` + +--- + +### `get_parallel_jobs()` + +**Purpose**: Get optimal number of parallel jobs (cached). + +**Returns**: Number of parallel jobs based on CPU and memory + +**Example**: +```bash +jobs=$(get_parallel_jobs) +make -j"$jobs" +``` + +--- + +### `is_apt_locked()` + +**Purpose**: Check if package manager is locked. + +**Returns**: 0 if locked, 1 if not + +**Example**: +```bash +if is_apt_locked; then + echo "APT is locked" +fi +``` + +--- + +### `wait_for_apt()` + +**Purpose**: Wait for apt to be available. + +**Parameters**: +- `$1` - Max wait time in seconds (default: 300) + +**Example**: +```bash +wait_for_apt 600 # Wait up to 10 minutes +``` + +--- + +### `download_file()` + +**Purpose**: Download file with retry logic and progress. + +**Parameters**: +- `$1` - URL +- `$2` - Output path +- `$3` - Max retries (default: 3) +- `$4` - Show progress (default: false) + +**Example**: +```bash +download_file "https://example.com/file.tar.gz" "/tmp/file.tar.gz" 3 true +``` + +--- + +### `create_temp_dir()` + +**Purpose**: Create temporary directory with automatic cleanup. + +**Returns**: Temporary directory path + +**Example**: +```bash +tmp_dir=$(create_temp_dir) +# Directory is automatically cleaned up on exit +``` + +--- + +### `safe_service_restart()` + +**Purpose**: Safe service restart with verification. + +**Parameters**: +- `$1` - Service name + +**Example**: +```bash +safe_service_restart "nginx" +``` + +--- + +### `enable_and_start_service()` + +**Purpose**: Enable and start service (with error handling). + +**Parameters**: +- `$1` - Service name + +**Example**: +```bash +enable_and_start_service "postgresql" +``` + +--- + +### `is_service_enabled()`, `is_service_running()` + +**Purpose**: Check if service is enabled/running. + +**Parameters**: +- `$1` - Service name + +**Returns**: 0 if yes, 1 if no + +**Example**: +```bash +if is_service_running "nginx"; then + echo "Nginx is running" +fi +``` + +--- + +### `create_self_signed_cert()` + +**Purpose**: Creates and installs self-signed certificates. + +**Parameters**: +- `$1` - Application name (optional, defaults to $APPLICATION) + +**Example**: +```bash +create_self_signed_cert "myapp" +``` + +--- + +### `import_local_ip()` + +**Purpose**: Loads LOCAL_IP from persistent store or detects if missing. + +**Global Variables Set**: +- `LOCAL_IP` - Local IP address + +**Example**: +```bash +import_local_ip +echo "Local IP: $LOCAL_IP" +``` + +--- + +### `setup_local_ip_helper()` + +**Purpose**: Installs a local IP updater script using networkd-dispatcher. + +**Example**: +```bash +setup_local_ip_helper +``` + +--- + +### `ensure_usr_local_bin_persist()` + +**Purpose**: Ensures /usr/local/bin is permanently in system PATH. + +**Example**: +```bash +ensure_usr_local_bin_persist +``` + +--- + +### `download_with_progress()` + +**Purpose**: Downloads file with optional progress indicator using pv. + +**Parameters**: +- `$1` - URL +- `$2` - Destination path + +**Example**: +```bash +download_with_progress "https://example.com/file.tar.gz" "/tmp/file.tar.gz" +``` + +--- + +### `verify_gpg_fingerprint()` + +**Purpose**: GPG key fingerprint verification. + +**Parameters**: +- `$1` - Key file path +- `$2` - Expected fingerprint + +**Example**: +```bash +verify_gpg_fingerprint "/tmp/key.gpg" "ABCD1234..." +``` + +--- + +### `debug_log()` + +**Purpose**: Debug logging (only if DEBUG=1). + +**Parameters**: +- `$@` - Message to log + +**Example**: +```bash +DEBUG=1 debug_log "This is a debug message" +``` + +--- + +### `start_timer()`, `end_timer()` + +**Purpose**: Performance timing helpers. + +**Example**: +```bash +start_time=$(start_timer) +# ... do something ... +end_timer "$start_time" "Operation" +``` + +--- + +## Container Setup Functions + +### `color()` + +**Purpose**: Sets up color and formatting variables for terminal output. + +**Example**: +```bash +color +echo -e "${GN}Success${CL}" +``` + +--- + +### `verb_ip6()` + +**Purpose**: Enables or disables IPv6 based on DISABLEIPV6 variable. + +**Variables**: +- `DISABLEIPV6` - Set "yes" to disable IPv6 + +**Example**: +```bash +DISABLEIPV6="yes" verb_ip6 +``` + +--- + +### `catch_errors()` + +**Purpose**: Sets up error handling for the script. + +**Example**: +```bash +catch_errors +``` + +--- + +### `error_handler()` + +**Purpose**: Handles errors that occur during script execution. + +**Parameters**: +- `$1` - Line number +- `$2` - Command that failed + +**Example**: +```bash +error_handler 42 "ls non_existent_file" +``` + +--- + +### `spinner()` + +**Purpose**: Displays a rotating spinner animation. + +**Example**: +```bash +spinner & +SPINNER_PID=$! +``` + +--- + +### `msg_info()`, `msg_ok()`, `msg_error()` + +**Purpose**: Display messages with different statuses. + +**Parameters**: +- `$1` - Message text + +**Example**: +```bash +msg_info "Installing packages..." +msg_ok "Installation complete" +msg_error "Installation failed" +``` + +--- + +### `setting_up_container()` + +**Purpose**: Sets up container OS, configures locale, timezone, and network. + +**Example**: +```bash +setting_up_container +``` + +--- + +### `network_check()` + +**Purpose**: Verifies internet connectivity via IPv4 and IPv6. + +**Example**: +```bash +network_check +``` + +--- + +### `update_os()` + +**Purpose**: Updates the container's OS using apt-get. + +**Variables**: +- `CACHER` - Enable package caching proxy + +**Example**: +```bash +update_os +``` + +--- + +### `motd_ssh()` + +**Purpose**: Modifies message of the day (MOTD) and SSH settings. + +**Example**: +```bash +motd_ssh +``` + +--- + +### `customize()` + +**Purpose**: Customizes the container by enabling auto-login and setting up SSH keys. + +**Example**: +```bash +customize +``` + +--- + +## Best Practices + +### Version Management + +Always cache versions after installation: +```bash +setup_nodejs +cache_installed_version "nodejs" "$NODE_VERSION" +``` + +### Error Handling + +Always check return codes: +```bash +if ! install_packages_with_retry "nginx"; then + msg_error "Failed to install nginx" + return 1 +fi +``` + +### Repository Setup + +Always prepare repositories before installation: +```bash +prepare_repository_setup "mariadb" || return 1 +manage_tool_repository "mariadb" "11.4" "$REPO_URL" "$GPG_URL" || return 1 +``` + +### APT Safety + +Always ensure APT is working before operations: +```bash +ensure_apt_working || return 1 +install_packages_with_retry "package-name" +``` + +--- + +## Notes + +- All functions use `$STD` variable for silent execution +- Functions support both fresh installs and upgrades +- Automatic fallback mechanisms for newer OS versions +- Version caching prevents redundant installations +- Comprehensive error handling and retry logic + +--- + +## License + +This documentation is part of the community-scripts project. + +--- + +## Contributing + +Contributions are welcome! Please follow the existing code style and add appropriate documentation for new functions.