diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
index 0f3ff54ac..b1a6ee56f 100644
--- a/.github/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -1,7 +1,4 @@
-🛑 **New scripts must first be submitted to [ProxmoxVED](https://github.com/community-scripts/ProxmoxVED) for testing.**
-PRs for new scripts that skip this process will be closed.
-
----
+## **Scripts wich are clearly AI generated and not further revied by the Author of this PR (in terms of Coding Standards and Script Layout) may be closed without review.**
## ✍️ Description
diff --git a/.github/workflows/auto-update-app-headers.yml b/.github/workflows/auto-update-app-headers.yml
index 18941379c..30bc5f429 100644
--- a/.github/workflows/auto-update-app-headers.yml
+++ b/.github/workflows/auto-update-app-headers.yml
@@ -20,17 +20,22 @@ jobs:
steps:
- name: Generate a token
id: generate-token
- uses: actions/create-github-app-token@v1
+ uses: actions/create-github-app-token@v2
with:
app-id: ${{ vars.APP_ID }}
private-key: ${{ secrets.APP_PRIVATE_KEY }}
+ owner: community-scripts
+ repositories: ProxmoxVED
+
- name: Generate a token for PR approval and merge
id: generate-token-merge
- uses: actions/create-github-app-token@v1
+ uses: actions/create-github-app-token@v2
with:
- app-id: ${{ secrets.APP_ID_APPROVE_AND_MERGE }}
- private-key: ${{ secrets.APP_KEY_APPROVE_AND_MERGE }}
+ app-id: ${{ vars.APP_ID }}
+ private-key: ${{ secrets.APP_PRIVATE_KEY }}
+ owner: community-scripts
+ repositories: ProxmoxVED
# Step 1: Checkout repository
- name: Checkout repository
diff --git a/.github/workflows/backup/check_and_update_json_date.yml b/.github/workflows/backup/check_and_update_json_date.yml
deleted file mode 100644
index cde3cbbad..000000000
--- a/.github/workflows/backup/check_and_update_json_date.yml
+++ /dev/null
@@ -1,60 +0,0 @@
-name: Update date_created in JSON files
-
-on:
- # Dieser Trigger wird für das Öffnen von PRs sowie für das Aktualisieren von offenen PRs verwendet
- pull_request:
- types: [opened, synchronize]
- schedule:
- # Dieser Trigger wird 4x am Tag ausgelöst, um sicherzustellen, dass das Datum aktualisiert wird
- - cron: "0 0,6,12,18 * * *" # Führt alle 6 Stunden aus
- workflow_dispatch: # Manuelle Ausführung des Workflows möglich
-
-jobs:
- update-date:
- runs-on: ubuntu-latest
-
- steps:
- - name: Checkout repository
- uses: actions/checkout@v4
-
- - name: Install yq
- run: |
- sudo apt-get update
- sudo apt-get install -y yq
-
- - name: Set the current date
- id: set_date
- run: echo "TODAY=$(date -u +%Y-%m-%d)" >> $GITHUB_ENV
-
- - name: Check for changes in PR
- run: |
- # Hole den PR-Branch
- PR_BRANCH="refs/pull/${{ github.event.pull_request.number }}/merge"
- git fetch origin $PR_BRANCH
-
- # Liste alle JSON-Dateien im PR auf, die geändert wurden
- CHANGED_JSON_FILES=$(git diff --name-only origin/main...$PR_BRANCH | grep '.json')
-
- if [ -z "$CHANGED_JSON_FILES" ]; then
- echo "No JSON files changed in this PR."
- exit 0
- fi
-
- # Gehe alle geänderten JSON-Dateien durch und aktualisiere das Datum
- for file in $CHANGED_JSON_FILES; do
- echo "Updating date_created in $file"
- # Setze das aktuelle Datum
- yq eval ".date_created = \"${{ env.TODAY }}\"" -i "$file"
- git add "$file"
- done
-
- - name: Commit and push changes
- run: |
- # Prüfe, ob es Änderungen gibt und committe sie
- git config user.name "json-updater-bot"
- git config user.email "github-actions[bot]@users.noreply.github.com"
-
- git commit -m "Update date_created to ${{ env.TODAY }}" || echo "No changes to commit"
-
- # Push zurück in den PR-Branch
- git push origin $PR_BRANCH
diff --git a/.github/workflows/backup/shellcheck.yml b/.github/workflows/backup/shellcheck.yml
deleted file mode 100644
index 4385fc8e9..000000000
--- a/.github/workflows/backup/shellcheck.yml
+++ /dev/null
@@ -1,60 +0,0 @@
-name: Shellcheck
-
-on:
- push:
- branches:
- - main
- pull_request:
- workflow_dispatch:
- schedule:
- - cron: "5 1 * * *"
-
-jobs:
- shellcheck:
- name: Shellcheck
- runs-on: ubuntu-latest
-
- steps:
- - uses: actions/checkout@v4
-
- - name: Get changed files
- id: changed-files
- uses: tj-actions/changed-files@v45
- with:
- files: |
- **.sh
-
- - name: Download ShellCheck
- shell: bash
- env:
- INPUT_VERSION: "v0.10.0"
- run: |
- set -euo pipefail
- if [[ "${{ runner.os }}" == "macOS" ]]; then
- osvariant="darwin"
- else
- osvariant="linux"
- fi
-
- baseurl="https://github.com/koalaman/shellcheck/releases/download"
- curl -Lso "${{ github.workspace }}/sc.tar.xz" \
- "${baseurl}/${INPUT_VERSION}/shellcheck-${INPUT_VERSION}.${osvariant}.x86_64.tar.xz"
-
- tar -xf "${{ github.workspace }}/sc.tar.xz" -C "${{ github.workspace }}"
- mv "${{ github.workspace }}/shellcheck-${INPUT_VERSION}/shellcheck" \
- "${{ github.workspace }}/shellcheck"
-
- - name: Verify ShellCheck binary
- run: |
- ls -l "${{ github.workspace }}/shellcheck"
-
- - name: Display ShellCheck version
- run: |
- "${{ github.workspace }}/shellcheck" --version
-
- - name: Run ShellCheck
- if: steps.changed-files.outputs.any_changed == 'true'
- env:
- ALL_CHANGED_FILES: ${{ steps.changed-files.outputs.all_changed_files }}
- run: |
- echo "${ALL_CHANGED_FILES}" | xargs "${{ github.workspace }}/shellcheck"
diff --git a/.github/workflows/backup/update_json_date.yml.bak b/.github/workflows/backup/update_json_date.yml.bak
deleted file mode 100644
index cb9bc8559..000000000
--- a/.github/workflows/backup/update_json_date.yml.bak
+++ /dev/null
@@ -1,88 +0,0 @@
-name: Auto Update JSON-Date
-
-on:
- push:
- branches:
- - main
- workflow_dispatch:
-
-jobs:
- update-json-dates:
- runs-on: ubuntu-latest
-
- permissions:
- contents: write
- pull-requests: write
-
- steps:
- - name: Generate a token
- id: generate-token
- uses: actions/create-github-app-token@v1
- with:
- app-id: ${{ vars.APP_ID }}
- private-key: ${{ secrets.APP_PRIVATE_KEY }}
-
- - name: Checkout repository
- uses: actions/checkout@v4
- with:
- fetch-depth: 0 # Full history for proper detection
-
- - name: Set up Git
- run: |
- git config --global user.name "GitHub Actions"
- git config --global user.email "github-actions[bot]@users.noreply.github.com"
-
- - name: Find JSON files with incorrect date_created
- id: find_wrong_json
- run: |
- TODAY=$(date -u +"%Y-%m-%d")
- > incorrect_json_files.txt
-
- for FILE in json/*.json; do
- if [[ -f "$FILE" ]]; then
- DATE_IN_JSON=$(jq -r '.date_created' "$FILE" 2>/dev/null || echo "")
-
- if [[ "$DATE_IN_JSON" != "$TODAY" ]]; then
- echo "$FILE" >> incorrect_json_files.txt
- fi
- fi
- done
-
- if [[ -s incorrect_json_files.txt ]]; then
- echo "CHANGED=true" >> $GITHUB_ENV
- else
- echo "CHANGED=false" >> $GITHUB_ENV
- fi
-
- - name: Run update script
- if: env.CHANGED == 'true'
- run: |
- chmod +x .github/workflows/scripts/update-json.sh
- while read -r FILE; do
- .github/workflows/scripts/update-json.sh "$FILE"
- done < incorrect_json_files.txt
-
- - name: Commit and create PR if changes exist
- if: env.CHANGED == 'true'
- run: |
- git add json/*.json
- git commit -m "Auto-update date_created in incorrect JSON files"
- git checkout -b pr-fix-json-dates
- git push origin pr-fix-json-dates --force
- gh pr create --title "[core] Fix incorrect JSON date_created fields" \
- --body "This PR is auto-generated to fix incorrect `date_created` fields in JSON files." \
- --head pr-fix-json-dates \
- --base main \
- --label "automated pr"
- env:
- GH_TOKEN: ${{ steps.generate-token.outputs.token }}
-
- - name: Approve pull request
- if: env.CHANGED == 'true'
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- PR_NUMBER=$(gh pr list --head "pr-fix-json-dates" --json number --jq '.[].number')
- if [ -n "$PR_NUMBER" ]; then
- gh pr review $PR_NUMBER --approve
- fi
diff --git a/.github/workflows/backup/validate-formatting.yaml.bak b/.github/workflows/backup/validate-formatting.yaml.bak
deleted file mode 100644
index 8eadd0acf..000000000
--- a/.github/workflows/backup/validate-formatting.yaml.bak
+++ /dev/null
@@ -1,133 +0,0 @@
-name: Validate script formatting
-
-on:
- push:
- branches:
- - main
- pull_request_target:
- paths:
- - "**/*.sh"
- - "**/*.func"
-
-jobs:
- shfmt:
- name: Check changed files
- runs-on: ubuntu-latest
- permissions:
-
- pull-requests: write
-
- steps:
- - name: Get pull request information
- if: github.event_name == 'pull_request_target'
- uses: actions/github-script@v7
- id: pr
- with:
- script: |
- const { data: pullRequest } = await github.rest.pulls.get({
- ...context.repo,
- pull_number: context.payload.pull_request.number,
- });
- return pullRequest;
-
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- fetch-depth: 0 # Ensure the full history is fetched for accurate diffing
- ref: ${{ github.event_name == 'pull_request_target' && fromJSON(steps.pr.outputs.result).merge_commit_sha || '' }}
-
- - name: Get changed files
- id: changed-files
- run: |
- if ${{ github.event_name == 'pull_request_target' }}; then
- echo "files=$(git diff --name-only ${{ github.event.pull_request.base.sha }} ${{ steps.pr.outputs.result && fromJSON(steps.pr.outputs.result).merge_commit_sha }} | grep -E '\.(sh|func)$' | xargs)" >> $GITHUB_OUTPUT
- else
- echo "files=$(git diff --name-only ${{ github.event.before }} ${{ github.event.after }} | grep -E '\.(sh|func)$' | xargs)" >> $GITHUB_OUTPUT
- fi
-
- - name: Set up Go
- if: steps.changed-files.outputs.files != ''
- uses: actions/setup-go@v5
-
- - name: Install shfmt
- if: steps.changed-files.outputs.files != ''
- run: |
- go install mvdan.cc/sh/v3/cmd/shfmt@latest
- echo "$GOPATH/bin" >> $GITHUB_PATH
-
- - name: Run shfmt
- if: steps.changed-files.outputs.files != ''
- id: shfmt
- run: |
- set +e
-
-
- shfmt_output=$(shfmt -d ${{ steps.changed-files.outputs.files }})
- if [[ $? -eq 0 ]]; then
- exit 0
- else
- echo "diff=\"$(echo -n "$shfmt_output" | base64 -w 0)\"" >> $GITHUB_OUTPUT
- printf "%s" "$shfmt_output"
- exit 1
- fi
-
- - name: Post comment with results
- if: always() && steps.changed-files.outputs.files != '' && github.event_name == 'pull_request_target'
- uses: actions/github-script@v7
- with:
- script: |
- const result = "${{ job.status }}" === "success" ? "success" : "failure";
- const diff = Buffer.from(
- ${{ steps.shfmt.outputs.diff }},
- "base64",
- ).toString();
- const issueNumber = context.payload.pull_request
- ? context.payload.pull_request.number
- : null;
- const commentIdentifier = "validate-formatting";
- let newCommentBody = `\n### Script formatting\n\n`;
-
- if (result === "failure") {
- newCommentBody +=
- `:x: We found issues in the formatting of the following changed files:\n\n\`\`\`diff\n${diff}\n\`\`\`\n`;
- } else {
- newCommentBody += `:rocket: All changed shell scripts are formatted correctly!\n`;
- }
-
- newCommentBody += `\n\n`;
-
- if (issueNumber) {
- const { data: comments } = await github.rest.issues.listComments({
- ...context.repo,
- issue_number: issueNumber,
- });
-
- const existingComment = comments.find(
- (comment) => comment.user.login === "github-actions[bot]",
-
- );
-
- if (existingComment) {
- if (existingComment.body.includes(commentIdentifier)) {
- const re = new RegExp(
- String.raw`[\s\S]*?`,
- "",
- );
- newCommentBody = existingComment.body.replace(re, newCommentBody);
- } else {
- newCommentBody = existingComment.body + "\n\n---\n\n" + newCommentBody;
- }
-
- await github.rest.issues.updateComment({
- ...context.repo,
- comment_id: existingComment.id,
- body: newCommentBody,
- });
- } else {
- await github.rest.issues.createComment({
- ...context.repo,
- issue_number: issueNumber,
- body: newCommentBody,
- });
- }
- }
diff --git a/.github/workflows/backup/validate-scripts.yml.bak b/.github/workflows/backup/validate-scripts.yml.bak
deleted file mode 100644
index acb86132f..000000000
--- a/.github/workflows/backup/validate-scripts.yml.bak
+++ /dev/null
@@ -1,234 +0,0 @@
-name: Validate scripts
-on:
- push:
- branches:
- - main
- pull_request_target:
- paths:
- - "ct/*.sh"
- - "install/*.sh"
-
-jobs:
- check-scripts:
- name: Check changed files
- runs-on: ubuntu-latest
- permissions:
- pull-requests: write
-
- steps:
- - name: Debug event payload
- run: |
- echo "Event name: ${{ github.event_name }}"
- echo "Payload: $(cat $GITHUB_EVENT_PATH)"
-
- - name: Get pull request information
- if: github.event_name == 'pull_request_target'
- uses: actions/github-script@v7
- id: pr
- with:
- script: |
- const { data: pullRequest } = await github.rest.pulls.get({
- ...context.repo,
- pull_number: context.payload.pull_request.number,
- });
- return pullRequest;
-
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- fetch-depth: 0
- ref: ${{ github.event_name == 'pull_request_target' && fromJSON(steps.pr.outputs.result).merge_commit_sha || '' }}
-
- - name: Get changed files
- id: changed-files
- run: |
- if [ "${{ github.event_name }}" == "pull_request_target" ]; then
- echo "files=$(git diff --name-only ${{ github.event.pull_request.base.sha }} ${{ steps.pr.outputs.result && fromJSON(steps.pr.outputs.result).merge_commit_sha }} | grep -E '\.(sh|func)$' | xargs)" >> $GITHUB_OUTPUT
- else
- echo "files=$(git diff --name-only ${{ github.event.before }} ${{ github.event.after }} | grep -E '\.(sh|func)$' | xargs)" >> $GITHUB_OUTPUT
- fi
-
- - name: Check build.func line
- if: always() && steps.changed-files.outputs.files != ''
- id: build-func
- run: |
- NON_COMPLIANT_FILES=""
- for FILE in ${{ steps.changed-files.outputs.files }}; do
- if [[ "$FILE" == ct/* ]] && [[ $(sed -n '2p' "$FILE") != "source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)" ]]; then
- NON_COMPLIANT_FILES="$NON_COMPLIANT_FILES $FILE"
- fi
- done
-
- if [ -n "$NON_COMPLIANT_FILES" ]; then
- echo "files=$NON_COMPLIANT_FILES" >> $GITHUB_OUTPUT
- echo "Build.func line missing or incorrect in files:"
- for FILE in $NON_COMPLIANT_FILES; do
- echo "$FILE"
- done
- exit 1
- fi
-
- - name: Check executable permissions
- if: always() && steps.changed-files.outputs.files != ''
- id: check-executable
- run: |
- NON_COMPLIANT_FILES=""
- for FILE in ${{ steps.changed-files.outputs.files }}; do
- if [[ ! -x "$FILE" ]]; then
- NON_COMPLIANT_FILES="$NON_COMPLIANT_FILES $FILE"
- fi
- done
-
- if [ -n "$NON_COMPLIANT_FILES" ]; then
- echo "files=$NON_COMPLIANT_FILES" >> $GITHUB_OUTPUT
- echo "Files not executable:"
- for FILE in $NON_COMPLIANT_FILES; do
- echo "$FILE"
- done
- exit 1
- fi
-
- - name: Check copyright
- if: always() && steps.changed-files.outputs.files != ''
- id: check-copyright
- run: |
- NON_COMPLIANT_FILES=""
- for FILE in ${{ steps.changed-files.outputs.files }}; do
- if ! sed -n '3p' "$FILE" | grep -qE "^# Copyright \(c\) [0-9]{4}(-[0-9]{4})? (tteck \| community-scripts ORG|community-scripts ORG|tteck)$"; then
- NON_COMPLIANT_FILES="$NON_COMPLIANT_FILES $FILE"
- fi
- done
-
- if [ -n "$NON_COMPLIANT_FILES" ]; then
- echo "files=$NON_COMPLIANT_FILES" >> $GITHUB_OUTPUT
- echo "Copyright header missing or not on line 3 in files:"
- for FILE in $NON_COMPLIANT_FILES; do
- echo "$FILE"
- done
- exit 1
- fi
-
- - name: Check author
- if: always() && steps.changed-files.outputs.files != ''
- id: check-author
- run: |
- NON_COMPLIANT_FILES=""
- for FILE in ${{ steps.changed-files.outputs.files }}; do
- if ! sed -n '4p' "$FILE" | grep -qE "^# Author: .+"; then
- NON_COMPLIANT_FILES="$NON_COMPLIANT_FILES $FILE"
- fi
- done
-
- if [ -n "$NON_COMPLIANT_FILES" ]; then
- echo "files=$NON_COMPLIANT_FILES" >> $GITHUB_OUTPUT
- echo "Author header missing or invalid on line 4 in files:"
- for FILE in $NON_COMPLIANT_FILES; do
- echo "$FILE"
- done
- exit 1
- fi
-
- - name: Check license
- if: always() && steps.changed-files.outputs.files != ''
- id: check-license
- run: |
- NON_COMPLIANT_FILES=""
- for FILE in ${{ steps.changed-files.outputs.files }}; do
- if [[ "$(sed -n '5p' "$FILE")" != "# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE" ]]; then
- NON_COMPLIANT_FILES="$NON_COMPLIANT_FILES $FILE"
- fi
- done
-
- if [ -n "$NON_COMPLIANT_FILES" ]; then
- echo "files=$NON_COMPLIANT_FILES" >> $GITHUB_OUTPUT
- echo "License header missing or not on line 5 in files:"
- for FILE in $NON_COMPLIANT_FILES; do
- echo "$FILE"
- done
- exit 1
- fi
-
- - name: Check source
- if: always() && steps.changed-files.outputs.files != ''
- id: check-source
- run: |
- NON_COMPLIANT_FILES=""
- for FILE in ${{ steps.changed-files.outputs.files }}; do
- if ! sed -n '6p' "$FILE" | grep -qE "^# Source: .+"; then
- NON_COMPLIANT_FILES="$NON_COMPLIANT_FILES $FILE"
- fi
- done
-
- if [ -n "$NON_COMPLIANT_FILES" ]; then
- echo "files=$NON_COMPLIANT_FILES" >> $GITHUB_OUTPUT
- echo "Source header missing or not on line 6 in files:"
- for FILE in $NON_COMPLIANT_FILES; do
- echo "$FILE"
- done
- exit 1
- fi
-
- - name: Post results and comment
- if: always() && steps.changed-files.outputs.files != '' && github.event_name == 'pull_request_target'
- uses: actions/github-script@v7
- with:
- script: |
- const result = '${{ job.status }}' === 'success' ? 'success' : 'failure';
- const nonCompliantFiles = {
- 'Invalid build.func source': "${{ steps.build-func.outputs.files || '' }}",
- 'Not executable': "${{ steps.check-executable.outputs.files || '' }}",
- 'Copyright header line missing or invalid': "${{ steps.check-copyright.outputs.files || '' }}",
- 'Author header line missing or invalid': "${{ steps.check-author.outputs.files || '' }}",
- 'License header line missing or invalid': "${{ steps.check-license.outputs.files || '' }}",
- 'Source header line missing or invalid': "${{ steps.check-source.outputs.files || '' }}"
- };
-
- const issueNumber = context.payload.pull_request ? context.payload.pull_request.number : null;
- const commentIdentifier = 'validate-scripts';
- let newCommentBody = `\n### Script validation\n\n`;
-
- if (result === 'failure') {
- newCommentBody += ':x: We found issues in the following changed files:\n\n';
- for (const [check, files] of Object.entries(nonCompliantFiles)) {
- if (files) {
- newCommentBody += `**${check}:**\n`;
- files.trim().split(' ').forEach(file => {
- newCommentBody += `- ${file}: ${check}\n`;
- });
- newCommentBody += `\n`;
- }
- }
- } else {
- newCommentBody += `:rocket: All changed shell scripts passed validation!\n`;
- }
-
- newCommentBody += `\n\n`;
-
- if (issueNumber) {
- const { data: comments } = await github.rest.issues.listComments({
- ...context.repo,
- issue_number: issueNumber
- });
-
- const existingComment = comments.find(comment =>
- comment.body.includes(``) &&
- comment.user.login === 'github-actions[bot]'
- );
-
- if (existingComment) {
- const re = new RegExp(String.raw`[\\s\\S]*?`, "m");
- newCommentBody = existingComment.body.replace(re, newCommentBody);
-
- await github.rest.issues.updateComment({
- ...context.repo,
- comment_id: existingComment.id,
- body: newCommentBody
- });
- } else {
- await github.rest.issues.createComment({
- ...context.repo,
- issue_number: issueNumber,
- body: newCommentBody
- });
- }
- }
diff --git a/.github/workflows/changelog-pr.yaml b/.github/workflows/changelog-pr.yaml
deleted file mode 100644
index ca187dd5c..000000000
--- a/.github/workflows/changelog-pr.yaml
+++ /dev/null
@@ -1,282 +0,0 @@
-name: Create Changelog Pull Request
-
-on:
- push:
- branches: ["main"]
- workflow_dispatch:
-
-jobs:
- update-changelog-pull-request:
- if: github.repository == 'community-scripts/ProxmoxVED'
- runs-on: ubuntu-latest
- env:
- CONFIG_PATH: .github/changelog-pr-config.json
- BRANCH_NAME: github-action-update-changelog
- AUTOMATED_PR_LABEL: "automated pr"
- permissions:
- contents: write
- pull-requests: write
- steps:
- - name: Generate a token for PR creation
- id: generate-token
- uses: actions/create-github-app-token@v1
- with:
- app-id: ${{ vars.APP_ID }}
- private-key: ${{ secrets.APP_PRIVATE_KEY }}
-
- - name: Generate a token for PR approval and merge
- id: generate-token-merge
- uses: actions/create-github-app-token@v1
- with:
- app-id: ${{ secrets.APP_ID_APPROVE_AND_MERGE }}
- private-key: ${{ secrets.APP_KEY_APPROVE_AND_MERGE }}
-
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- fetch-depth: 0
-
- - name: Get latest dates in changelog
- run: |
- DATES=$(grep -E '^## [0-9]{4}-[0-9]{2}-[0-9]{2}' CHANGELOG.md | head -n 2 | awk '{print $2}')
-
- LATEST_DATE=$(echo "$DATES" | sed -n '1p')
- SECOND_LATEST_DATE=$(echo "$DATES" | sed -n '2p')
- TODAY=$(date -u +%Y-%m-%d)
-
- echo "TODAY=$TODAY" >> $GITHUB_ENV
- if [[ "$LATEST_DATE" == "$TODAY" ]]; then
- echo "LATEST_DATE=$SECOND_LATEST_DATE" >> $GITHUB_ENV
- else
- echo "LATEST_DATE=$LATEST_DATE" >> $GITHUB_ENV
- fi
-
- - name: Get categorized pull requests
- id: get-categorized-prs
- uses: actions/github-script@v7
- with:
- script: |
- async function main() {
- const fs = require('fs').promises;
- const path = require('path');
-
- const configPath = path.resolve(process.env.CONFIG_PATH);
- const fileContent = await fs.readFile(configPath, 'utf-8');
- const changelogConfig = JSON.parse(fileContent);
-
- const categorizedPRs = changelogConfig.map(obj => ({
- ...obj,
- notes: [],
- subCategories: obj.subCategories ?? (
- obj.labels.includes("update script") ? [
- { title: "🐞 Bug Fixes", labels: ["bugfix"], notes: [] },
- { title: "✨ New Features", labels: ["feature"], notes: [] },
- { title: "💥 Breaking Changes", labels: ["breaking change"], notes: [] },
- { title: "🔧 Refactor", labels: ["refactor"], notes: [] },
- ] :
- obj.labels.includes("maintenance") ? [
- { title: "🐞 Bug Fixes", labels: ["bugfix"], notes: [] },
- { title: "✨ New Features", labels: ["feature"], notes: [] },
- { title: "💥 Breaking Changes", labels: ["breaking change"], notes: [] },
- { title: "📡 API", labels: ["api"], notes: [] },
- { title: "Github", labels: ["github"], notes: [] },
- { title: "📝 Documentation", labels: ["documentation"], notes: [] },
- { title: "🔧 Refactor", labels: ["refactor"], notes: [] }
- ] :
- obj.labels.includes("website") ? [
- { title: "🐞 Bug Fixes", labels: ["bugfix"], notes: [] },
- { title: "✨ New Features", labels: ["feature"], notes: [] },
- { title: "💥 Breaking Changes", labels: ["breaking change"], notes: [] },
- { title: "Script Information", labels: ["json"], notes: [] }
- ] : []
- )
- }));
-
- const latestDateInChangelog = new Date(process.env.LATEST_DATE);
- latestDateInChangelog.setUTCHours(23, 59, 59, 999);
-
- const { data: pulls } = await github.rest.pulls.list({
- owner: context.repo.owner,
- repo: "ProxmoxVE",
- base: "main",
- state: "closed",
- sort: "updated",
- direction: "desc",
- per_page: 100,
- });
-
- const filteredPRs = pulls.filter(pr =>
- pr.merged_at &&
- new Date(pr.merged_at) > latestDateInChangelog &&
- !pr.labels.some(label =>
- ["invalid", "wontdo", process.env.AUTOMATED_PR_LABEL].includes(label.name.toLowerCase())
- )
- );
-
- for (const pr of filteredPRs) {
- const prLabels = pr.labels.map(label => label.name.toLowerCase());
- if (pr.user.login.includes("push-app-to-main[bot]")) {
-
- const scriptName = pr.title;
- try {
- const { data: relatedIssues } = await github.rest.issues.listForRepo({
- owner: context.repo.owner,
- repo: "ProxmoxVED",
- state: "all",
- labels: ["Started Migration To ProxmoxVE"]
- });
-
- const matchingIssue = relatedIssues.find(issue =>
- issue.title.toLowerCase().includes(scriptName.toLowerCase())
- );
-
- if (matchingIssue) {
- const issueAuthor = matchingIssue.user.login;
- const issueAuthorUrl = `https://github.com/${issueAuthor}`;
- prNote = `- ${pr.title} [@${issueAuthor}](${issueAuthorUrl}) ([#${pr.number}](${pr.html_url}))`;
- }
- else {
- prNote = `- ${pr.title} ([#${pr.number}](${pr.html_url}))`;
- }
- } catch (error) {
- console.error(`Error fetching related issues: ${error}`);
- prNote = `- ${pr.title} ([#${pr.number}](${pr.html_url}))`;
- }
- }else{
- prNote = `- ${pr.title} [@${pr.user.login}](https://github.com/${pr.user.login}) ([#${pr.number}](${pr.html_url}))`;
- }
-
-
- if (prLabels.includes("new script")) {
- const newScriptCategory = categorizedPRs.find(category =>
- category.title === "New Scripts" || category.labels.includes("new script"));
- if (newScriptCategory) {
- newScriptCategory.notes.push(prNote);
- }
- } else {
-
- let categorized = false;
- const priorityCategories = categorizedPRs.slice();
- for (const category of priorityCategories) {
- if (categorized) break;
- if (category.labels.some(label => prLabels.includes(label))) {
- if (category.subCategories && category.subCategories.length > 0) {
- const subCategory = category.subCategories.find(sub =>
- sub.labels.some(label => prLabels.includes(label))
- );
-
- if (subCategory) {
- subCategory.notes.push(prNote);
- } else {
- category.notes.push(prNote);
- }
- } else {
- category.notes.push(prNote);
- }
- categorized = true;
- }
- }
- }
-
- }
-
- return categorizedPRs;
- }
-
- return await main();
-
- - name: Update CHANGELOG.md
- uses: actions/github-script@v7
- with:
- script: |
- const fs = require('fs').promises;
- const path = require('path');
-
- const today = process.env.TODAY;
- const latestDateInChangelog = process.env.LATEST_DATE;
- const changelogPath = path.resolve('CHANGELOG.md');
- const categorizedPRs = ${{ steps.get-categorized-prs.outputs.result }};
-
- let newReleaseNotes = `## ${today}\n\n`;
- for (const { title, notes, subCategories } of categorizedPRs) {
- const hasSubcategories = subCategories && subCategories.length > 0;
- const hasMainNotes = notes.length > 0;
- const hasSubNotes = hasSubcategories && subCategories.some(sub => sub.notes && sub.notes.length > 0);
-
- if (hasMainNotes || hasSubNotes) {
- newReleaseNotes += `### ${title}\n\n`;
- }
-
- if (hasMainNotes) {
- newReleaseNotes += ` ${notes.join("\n")}\n\n`;
- }
- if (hasSubcategories) {
- for (const { title: subTitle, notes: subNotes } of subCategories) {
- if (subNotes && subNotes.length > 0) {
- newReleaseNotes += ` - #### ${subTitle}\n\n`;
- newReleaseNotes += ` ${subNotes.join("\n ")}\n\n`;
- }
- }
- }
- }
- const changelogContent = await fs.readFile(changelogPath, 'utf-8');
- const changelogIncludesTodaysReleaseNotes = changelogContent.includes(`\n## ${today}`);
-
- const regex = changelogIncludesTodaysReleaseNotes
- ? new RegExp(`## ${today}.*(?=## ${latestDateInChangelog})`, "gs")
- : new RegExp(`(?=## ${latestDateInChangelog})`, "gs");
-
- const newChangelogContent = changelogContent.replace(regex, newReleaseNotes);
- await fs.writeFile(changelogPath, newChangelogContent);
-
- - name: Check for changes
- id: verify-diff
- run: |
- git diff --quiet . || echo "changed=true" >> $GITHUB_ENV
-
- - name: Commit and push changes
- if: env.changed == 'true'
- run: |
- git config --global user.name "github-actions[bot]"
- git config --global user.email "github-actions[bot]@users.noreply.github.com"
- git add CHANGELOG.md
- git commit -m "Update CHANGELOG.md"
- git checkout -b $BRANCH_NAME || git checkout $BRANCH_NAME
- git push origin $BRANCH_NAME --force
-
- - name: Create pull request if not exists
- if: env.changed == 'true'
- env:
- GH_TOKEN: ${{ steps.generate-token.outputs.token }}
- run: |
- PR_EXISTS=$(gh pr list --head "${BRANCH_NAME}" --json number --jq '.[].number')
- if [ -z "$PR_EXISTS" ]; then
- gh pr create --title "[Github Action] Update CHANGELOG.md" \
- --body "This PR is auto-generated by a Github Action to update the CHANGELOG.md file." \
- --head $BRANCH_NAME \
- --base main \
- --label "$AUTOMATED_PR_LABEL"
- fi
-
- - name: Approve pull request
- if: env.changed == 'true'
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- PR_NUMBER=$(gh pr list --head "${BRANCH_NAME}" --json number --jq '.[].number')
- if [ -n "$PR_NUMBER" ]; then
- gh pr review $PR_NUMBER --approve
- fi
-
- - name: Approve pull request and merge
- if: env.changed == 'true'
- env:
- GH_TOKEN: ${{ steps.generate-token-merge.outputs.token }}
- run: |
- git config --global user.name "github-actions-automege[bot]"
- git config --global user.email "github-actions-automege[bot]@users.noreply.github.com"
- PR_NUMBER=$(gh pr list --head "${BRANCH_NAME}" --json number --jq '.[].number')
- if [ -n "$PR_NUMBER" ]; then
- gh pr review $PR_NUMBER --approve
- gh pr merge $PR_NUMBER --squash --admin
- fi
diff --git a/.github/workflows/close-discussion.yaml b/.github/workflows/close-discussion.yaml
deleted file mode 100644
index 9b0352f43..000000000
--- a/.github/workflows/close-discussion.yaml
+++ /dev/null
@@ -1,164 +0,0 @@
-name: Close Discussion on PR Merge
-
-on:
- push:
- branches:
- - main
-
-permissions:
- contents: read
- discussions: write
-
-jobs:
- close-discussion:
- if: github.repository == 'community-scripts/ProxmoxVED'
- runs-on: ubuntu-latest
-
- steps:
- - name: Checkout Repository
- uses: actions/checkout@v4
-
- - name: Set Up Node.js
- uses: actions/setup-node@v4
- with:
- node-version: "20"
-
- - name: Install Dependencies
- run: npm install zx @octokit/graphql
-
- - name: Close Discussion
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- GITHUB_SHA: ${{ github.sha }}
- GITHUB_REPOSITORY: ${{ github.repository }}
- run: |
- npx zx << 'EOF'
- import { graphql } from "@octokit/graphql";
-
- (async function () {
- try {
- const token = process.env.GITHUB_TOKEN;
- const commitSha = process.env.GITHUB_SHA;
- const [owner, repo] = process.env.GITHUB_REPOSITORY.split("/");
-
- if (!token || !commitSha || !owner || !repo) {
- console.log("Missing required environment variables.");
- process.exit(1);
- }
-
- const graphqlWithAuth = graphql.defaults({
- headers: { authorization: `Bearer ${token}` },
- });
-
- // Find PR from commit SHA
- const searchQuery = `
- query($owner: String!, $repo: String!, $sha: GitObjectID!) {
- repository(owner: $owner, name: $repo) {
- object(oid: $sha) {
- ... on Commit {
- associatedPullRequests(first: 1) {
- nodes {
- number
- body
- }
- }
- }
- }
- }
- }
- `;
-
- const prResult = await graphqlWithAuth(searchQuery, {
- owner,
- repo,
- sha: commitSha,
- });
-
- const pr = prResult.repository.object.associatedPullRequests.nodes[0];
- if (!pr) {
- console.log("No PR found for this commit.");
- return;
- }
-
- const prNumber = pr.number;
- const prBody = pr.body;
-
- const match = prBody.match(/#(\d+)/);
- if (!match) {
- console.log("No discussion ID found in PR body.");
- return;
- }
-
- const discussionNumber = match[1];
- console.log(`Extracted Discussion Number: ${discussionNumber}`);
-
- // Fetch GraphQL discussion ID
- const discussionQuery = `
- query($owner: String!, $repo: String!, $number: Int!) {
- repository(owner: $owner, name: $repo) {
- discussion(number: $number) {
- id
- }
- }
- }
- `;
-
- //
- try {
- const discussionResponse = await graphqlWithAuth(discussionQuery, {
- owner,
- repo,
- number: parseInt(discussionNumber, 10),
- });
-
- const discussionQLId = discussionResponse.repository.discussion.id;
- if (!discussionQLId) {
- console.log("Failed to fetch discussion GraphQL ID.");
- return;
- }
- } catch (error) {
- console.error("Discussion not found or error occurred while fetching discussion:", error);
- return;
- }
-
- // Post comment
- const commentMutation = `
- mutation($discussionId: ID!, $body: String!) {
- addDiscussionComment(input: { discussionId: $discussionId, body: $body }) {
- comment { id body }
- }
- }
- `;
-
- const commentResponse = await graphqlWithAuth(commentMutation, {
- discussionId: discussionQLId,
- body: `Merged with PR #${prNumber}`,
- });
-
- const commentId = commentResponse.addDiscussionComment.comment.id;
- if (!commentId) {
- console.log("Failed to post the comment.");
- return;
- }
-
- console.log(`Comment Posted Successfully! Comment ID: ${commentId}`);
-
- // Mark comment as answer
- const markAnswerMutation = `
- mutation($id: ID!) {
- markDiscussionCommentAsAnswer(input: { id: $id }) {
- discussion { id title }
- }
- }
- `;
-
- await graphqlWithAuth(markAnswerMutation, { id: commentId });
-
- console.log("Comment marked as answer successfully!");
-
- } catch (error) {
- console.error("Error:", error);
- process.exit(1);
- }
- })();
- EOF
diff --git a/.github/workflows/close-ttek-issue.yaml b/.github/workflows/close_template_issue.yml
similarity index 64%
rename from .github/workflows/close-ttek-issue.yaml
rename to .github/workflows/close_template_issue.yml
index 037d60757..d5a5cbc6d 100644
--- a/.github/workflows/close-ttek-issue.yaml
+++ b/.github/workflows/close_template_issue.yml
@@ -1,5 +1,4 @@
-name: Auto-Close tteck Issues
-
+name: Auto-Close Wrong Template Issues
on:
issues:
types: [opened]
@@ -9,7 +8,7 @@ jobs:
if: github.repository == 'community-scripts/ProxmoxVED'
runs-on: ubuntu-latest
steps:
- - name: Auto-close if tteck script detected
+ - name: Auto-close if wrong Template issue detected
uses: actions/github-script@v7
with:
script: |
@@ -18,18 +17,13 @@ jobs:
const issueNumber = issue.number;
// Check for tteck script mention
- if (content.includes("tteck") || content.includes("tteck/Proxmox")) {
- const message = `Hello, it looks like you are referencing the **old tteck repo**.
+ if (content.includes("Template debian-13-standard_13.1-2_amd64.tar.zst [local]") || content.includes("Container creation failed. Checking if template is corrupted or incomplete.") || content.includes("Template is valid, but container creation still failed.")){
+ const message = `Hello, it looks like you are referencing a container creation issue!.
- This repository is no longer used for active scripts.
- **Please update your bookmarks** and use: [https://helper-scripts.com](https://helper-scripts.com)
+ We get many simmilar issues with this topic, so please check this disscusion #8126.
+ If this did not solve your problem, please reopen this issue.
- Also make sure your Bash command starts with:
- \`\`\`bash
- bash <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/...)
- \`\`\`
-
- This issue is being closed automatically.`;
+ This issue is being closed automatically.`;
await github.rest.issues.createComment({
...context.repo,
diff --git a/.github/workflows/delete_new_script.yaml b/.github/workflows/delete_new_script.yaml
index 9d9f97d61..d538437a5 100644
--- a/.github/workflows/delete_new_script.yaml
+++ b/.github/workflows/delete_new_script.yaml
@@ -16,10 +16,12 @@ jobs:
- name: Generate a token for PR approval and merge
id: generate-token-merge
- uses: actions/create-github-app-token@v1
+ uses: actions/create-github-app-token@v2
with:
app-id: ${{ secrets.APP_ID_APPROVE_AND_MERGE }}
private-key: ${{ secrets.APP_KEY_APPROVE_AND_MERGE }}
+ owner: community-scripts
+ repositories: ProxmoxVED
- name: Extract Issue Title (Lowercase & Underscores)
id: extract_title
diff --git a/.github/workflows/get-versions-from-gh.yaml b/.github/workflows/get-versions-from-gh.yaml
index d3cd7e366..0cf632d11 100644
--- a/.github/workflows/get-versions-from-gh.yaml
+++ b/.github/workflows/get-versions-from-gh.yaml
@@ -24,10 +24,12 @@ jobs:
- name: Generate a token
id: generate-token
- uses: actions/create-github-app-token@v1
+ uses: actions/create-github-app-token@v2
with:
app-id: ${{ vars.APP_ID }}
private-key: ${{ secrets.APP_PRIVATE_KEY }}
+ owner: community-scripts
+ repositories: ProxmoxVED
- name: Crawl from Github API
env:
diff --git a/.github/workflows/get-versions-from-newreleases.yaml b/.github/workflows/get-versions-from-newreleases.yaml
index aabfae35e..634a0c1b4 100644
--- a/.github/workflows/get-versions-from-newreleases.yaml
+++ b/.github/workflows/get-versions-from-newreleases.yaml
@@ -24,17 +24,21 @@ jobs:
- name: Generate a token
id: generate-token
- uses: actions/create-github-app-token@v1
+ uses: actions/create-github-app-token@v2
with:
app-id: ${{ vars.APP_ID }}
private-key: ${{ secrets.APP_PRIVATE_KEY }}
+ owner: community-scripts
+ repositories: ProxmoxVED
- name: Generate a token for PR approval and merge
id: generate-token-merge
- uses: actions/create-github-app-token@v1
+ uses: actions/create-github-app-token@v2
with:
app-id: ${{ secrets.APP_ID_APPROVE_AND_MERGE }}
private-key: ${{ secrets.APP_KEY_APPROVE_AND_MERGE }}
+ owner: community-scripts
+ repositories: ProxmoxVED
- name: Crawl from newreleases.io
env:
diff --git a/.github/workflows/live/changelog-pr.yml b/.github/workflows/live/changelog-pr.yml
deleted file mode 100644
index dc5bcd3d3..000000000
--- a/.github/workflows/live/changelog-pr.yml
+++ /dev/null
@@ -1,226 +0,0 @@
-name: Create Changelog Pull Request
-
-on:
- push:
- branches: ["main"]
- workflow_dispatch:
-
-jobs:
- update-changelog-pull-request:
- runs-on: ubuntu-latest
- env:
- CONFIG_PATH: .github/changelog-pr-config.json
- BRANCH_NAME: github-action-update-changelog
- AUTOMATED_PR_LABEL: "automated pr"
- permissions:
- contents: write
- pull-requests: write
- steps:
- - name: Generate a token
- id: generate-token
- uses: actions/create-github-app-token@v1
- with:
- app-id: ${{ vars.APP_ID }}
- private-key: ${{ secrets.APP_PRIVATE_KEY }}
-
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- fetch-depth: 0
-
- - name: Get latest dates in changelog
- run: |
- DATES=$(grep -E '^## [0-9]{4}-[0-9]{2}-[0-9]{2}' CHANGELOG.md | head -n 2 | awk '{print $2}')
-
- LATEST_DATE=$(echo "$DATES" | sed -n '1p')
- SECOND_LATEST_DATE=$(echo "$DATES" | sed -n '2p')
- TODAY=$(date -u +%Y-%m-%d)
-
- echo "TODAY=$TODAY" >> $GITHUB_ENV
- if [[ "$LATEST_DATE" == "$TODAY" ]]; then
- echo "LATEST_DATE=$SECOND_LATEST_DATE" >> $GITHUB_ENV
- else
- echo "LATEST_DATE=$LATEST_DATE" >> $GITHUB_ENV
- fi
-
- - name: Get categorized pull requests
- id: get-categorized-prs
- uses: actions/github-script@v7
- with:
- script: |
- const fs = require('fs').promises;
- const path = require('path');
-
- const configPath = path.resolve(process.env.CONFIG_PATH);
- const fileContent = await fs.readFile(configPath, 'utf-8');
- const changelogConfig = JSON.parse(fileContent);
-
- const categorizedPRs = changelogConfig.map(obj => ({
- ...obj,
- notes: [],
- subCategories: obj.subCategories ?? (
- obj.labels.includes("update script") ? [
- { title: "🐞 Bug Fixes", labels: ["bugfix"], notes: [] },
- { title: "✨ New Features", labels: ["feature"], notes: [] },
- { title: "💥 Breaking Changes", labels: ["breaking change"], notes: [] }
- ] :
- obj.labels.includes("maintenance") ? [
- { title: "🐞 Bug Fixes", labels: ["bugfix"], notes: [] },
- { title: "✨ New Features", labels: ["feature"], notes: [] },
- { title: "💥 Breaking Changes", labels: ["breaking change"], notes: [] },
- { title: "📡 API", labels: ["api"], notes: [] },
- { title: "Github", labels: ["github"], notes: [] }
- ] :
- obj.labels.includes("website") ? [
- { title: "🐞 Bug Fixes", labels: ["bugfix"], notes: [] },
- { title: "✨ New Features", labels: ["feature"], notes: [] },
- { title: "💥 Breaking Changes", labels: ["breaking change"], notes: [] },
- { title: "Script Information", labels: ["json"], notes: [] }
- ] : []
- )
- }));
-
- const latestDateInChangelog = new Date(process.env.LATEST_DATE);
- latestDateInChangelog.setUTCHours(23, 59, 59, 999);
-
- const { data: pulls } = await github.rest.pulls.list({
- owner: context.repo.owner,
- repo: context.repo.repo,
- base: "main",
- state: "closed",
- sort: "updated",
- direction: "desc",
- per_page: 100,
- });
-
- pulls.filter(pr =>
- pr.merged_at &&
- new Date(pr.merged_at) > latestDateInChangelog &&
- !pr.labels.some(label =>
- ["invalid", "wontdo", process.env.AUTOMATED_PR_LABEL].includes(label.name.toLowerCase())
- )
- ).forEach(pr => {
-
- const prLabels = pr.labels.map(label => label.name.toLowerCase());
- const prNote = `- ${pr.title} [@${pr.user.login}](https://github.com/${pr.user.login}) ([#${pr.number}](${pr.html_url}))`;
-
- const updateScriptsCategory = categorizedPRs.find(category =>
- category.labels.some(label => prLabels.includes(label))
- );
-
- if (updateScriptsCategory) {
-
- const subCategory = updateScriptsCategory.subCategories.find(sub =>
- sub.labels.some(label => prLabels.includes(label))
- );
-
- if (subCategory) {
- subCategory.notes.push(prNote);
- } else {
- updateScriptsCategory.notes.push(prNote);
- }
- }
- });
-
- console.log(JSON.stringify(categorizedPRs, null, 2));
-
- return categorizedPRs;
-
-
- - name: Update CHANGELOG.md
- uses: actions/github-script@v7
- with:
- script: |
- const fs = require('fs').promises;
- const path = require('path');
-
- const today = process.env.TODAY;
- const latestDateInChangelog = process.env.LATEST_DATE;
- const changelogPath = path.resolve('CHANGELOG.md');
- const categorizedPRs = ${{ steps.get-categorized-prs.outputs.result }};
-
- console.log(JSON.stringify(categorizedPRs, null, 2));
-
-
- let newReleaseNotes = `## ${today}\n\n`;
- for (const { title, notes, subCategories } of categorizedPRs) {
- const hasSubcategories = subCategories && subCategories.length > 0;
- const hasMainNotes = notes.length > 0;
- const hasSubNotes = hasSubcategories && subCategories.some(sub => sub.notes && sub.notes.length > 0);
-
-
- if (hasMainNotes || hasSubNotes) {
- newReleaseNotes += `### ${title}\n\n`;
- }
-
- if (hasMainNotes) {
- newReleaseNotes += ` ${notes.join("\n")}\n\n`;
- }
- if (hasSubcategories) {
- for (const { title: subTitle, notes: subNotes } of subCategories) {
- if (subNotes && subNotes.length > 0) {
- newReleaseNotes += ` - #### ${subTitle}\n\n`;
- newReleaseNotes += ` ${subNotes.join("\n ")}\n\n`;
- }
- }
- }
- }
-
- const changelogContent = await fs.readFile(changelogPath, 'utf-8');
- const changelogIncludesTodaysReleaseNotes = changelogContent.includes(`\n## ${today}`);
-
- const regex = changelogIncludesTodaysReleaseNotes
- ? new RegExp(`## ${today}.*(?=## ${latestDateInChangelog})`, "gs")
- : new RegExp(`(?=## ${latestDateInChangelog})`, "gs");
-
- const newChangelogContent = changelogContent.replace(regex, newReleaseNotes);
- await fs.writeFile(changelogPath, newChangelogContent);
-
- - name: Check for changes
- id: verify-diff
- run: |
- git diff --quiet . || echo "changed=true" >> $GITHUB_ENV
-
- - name: Commit and push changes
- if: env.changed == 'true'
- run: |
- git config --global user.name "github-actions[bot]"
- git config --global user.email "github-actions[bot]@users.noreply.github.com"
- git add CHANGELOG.md
- git commit -m "Update CHANGELOG.md"
- git checkout -b $BRANCH_NAME || git checkout $BRANCH_NAME
- git push origin $BRANCH_NAME --force
-
- - name: Create pull request if not exists
- if: env.changed == 'true'
- env:
- GH_TOKEN: ${{ steps.generate-token.outputs.token }}
- run: |
- PR_EXISTS=$(gh pr list --head "${BRANCH_NAME}" --json number --jq '.[].number')
- if [ -z "$PR_EXISTS" ]; then
- gh pr create --title "[Github Action] Update CHANGELOG.md" \
- --body "This PR is auto-generated by a Github Action to update the CHANGELOG.md file." \
- --head $BRANCH_NAME \
- --base main \
- --label "$AUTOMATED_PR_LABEL"
- fi
-
- - name: Approve pull request
- if: env.changed == 'true'
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- PR_NUMBER=$(gh pr list --head "${BRANCH_NAME}" --json number --jq '.[].number')
- if [ -n "$PR_NUMBER" ]; then
- gh pr review $PR_NUMBER --approve
- fi
-
- - name: Re-approve pull request after update
- if: env.changed == 'true'
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- PR_NUMBER=$(gh pr list --head "${BRANCH_NAME}" --json number --jq '.[].number')
- if [ -n "$PR_NUMBER" ]; then
- gh pr review $PR_NUMBER --approve
- fi
\ No newline at end of file
diff --git a/.github/workflows/live/close-discussion.yml b/.github/workflows/live/close-discussion.yml
deleted file mode 100644
index 4b39fbf96..000000000
--- a/.github/workflows/live/close-discussion.yml
+++ /dev/null
@@ -1,122 +0,0 @@
-name: Close Discussion on PR Merge
-
-on:
- pull_request:
- types: [closed]
-
-jobs:
- close-discussion:
- runs-on: ubuntu-latest
-
- steps:
- - name: Checkout Repository
- uses: actions/checkout@v4
-
- - name: Set Up Node.js
- uses: actions/setup-node@v4
- with:
- node-version: "20"
- - name: Install Dependencies
- run: npm install zx @octokit/graphql
-
- - name: Close Discussion
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- PR_BODY: ${{ github.event.pull_request.body }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- REPO_OWNER: ${{ github.repository_owner }}
- REPO_NAME: ${{ github.event.repository.name }}
- run: |
- npx zx << 'EOF'
- import { graphql } from "@octokit/graphql";
- (async function() {
- try {
- const token = process.env.GITHUB_TOKEN;
- const prBody = process.env.PR_BODY;
- const prNumber = process.env.PR_NUMBER;
- const owner = process.env.REPO_OWNER;
- const repo = process.env.REPO_NAME;
-
- if (!token || !prBody || !prNumber || !owner || !repo) {
- console.log("Missing required environment variables.");
- process.exit(1);
- }
-
- const match = prBody.match(/#(\d+)/);
- if (!match) {
- console.log("No discussion ID found in PR body.");
- return;
- }
- const discussionNumber = match[1];
-
- console.log(`Extracted Discussion Number: ${discussionNumber}`);
- console.log(`PR Number: ${prNumber}`);
- console.log(`Repository: ${owner}/${repo}`);
-
- const graphqlWithAuth = graphql.defaults({
- headers: { authorization: `Bearer ${token}` },
- });
-
- const discussionQuery = `
- query($owner: String!, $repo: String!, $number: Int!) {
- repository(owner: $owner, name: $repo) {
- discussion(number: $number) {
- id
- }
- }
- }
- `;
-
- const discussionResponse = await graphqlWithAuth(discussionQuery, {
- owner,
- repo,
- number: parseInt(discussionNumber, 10),
- });
-
- const discussionQLId = discussionResponse.repository.discussion.id;
- if (!discussionQLId) {
- console.log("Failed to fetch discussion GraphQL ID.");
- return;
- }
-
- console.log(`GraphQL Discussion ID: ${discussionQLId}`);
-
- const commentMutation = `
- mutation($discussionId: ID!, $body: String!) {
- addDiscussionComment(input: { discussionId: $discussionId, body: $body }) {
- comment { id body }
- }
- }
- `;
-
- const commentResponse = await graphqlWithAuth(commentMutation, {
- discussionId: discussionQLId,
- body: `Merged with PR #${prNumber}`,
- });
-
- const commentId = commentResponse.addDiscussionComment.comment.id;
- if (!commentId) {
- console.log("Failed to post the comment.");
- return;
- }
-
- console.log(`Comment Posted Successfully! Comment ID: ${commentId}`);
-
- const markAnswerMutation = `
- mutation($id: ID!) {
- markDiscussionCommentAsAnswer(input: { id: $id }) {
- discussion { id title }
- }
- }
- `;
-
- await graphqlWithAuth(markAnswerMutation, { id: commentId });
-
- console.log("Comment marked as answer successfully!");
-
- } catch (error) {
- console.error("Error:", error);
- return;
- }
- })();
- EOF
\ No newline at end of file
diff --git a/.github/workflows/live/create-docker-for-runner.yml b/.github/workflows/live/create-docker-for-runner.yml
deleted file mode 100644
index c9fef0a5c..000000000
--- a/.github/workflows/live/create-docker-for-runner.yml
+++ /dev/null
@@ -1,37 +0,0 @@
-name: Build and Publish Docker Image
-
-on:
- push:
- branches:
- - main
- paths:
- - '.github/runner/docker/**'
- schedule:
- - cron: '0 0 * * *'
-
-jobs:
- build:
- runs-on: ubuntu-latest #To ensure it always builds we use the github runner with all the right tooling
-
- steps:
- - name: Checkout code
- uses: actions/checkout@v3
-
- - name: Log in to GHCR
- uses: docker/login-action@v2
- with:
- registry: ghcr.io
- username: ${{ github.actor }}
- password: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Build Docker image
- run: |
- repo_name=${{ github.repository }} # Get repository name
- repo_name_lower=$(echo $repo_name | tr '[:upper:]' '[:lower:]') # Convert to lowercase
- docker build -t ghcr.io/$repo_name_lower/gh-runner-self:latest -f .github/runner/docker/gh-runner-self.dockerfile .
-
- - name: Push Docker image to GHCR
- run: |
- repo_name=${{ github.repository }} # Get repository name
- repo_name_lower=$(echo $repo_name | tr '[:upper:]' '[:lower:]') # Convert to lowercase
- docker push ghcr.io/$repo_name_lower/gh-runner-self:latest
diff --git a/.github/workflows/live/delete-json-branch.yml b/.github/workflows/live/delete-json-branch.yml
deleted file mode 100644
index e4cdcf24f..000000000
--- a/.github/workflows/live/delete-json-branch.yml
+++ /dev/null
@@ -1,28 +0,0 @@
-
-name: Delete JSON date PR Branch
-
-on:
- pull_request:
- types: [closed]
- branches:
- - main
-
-jobs:
- delete_branch:
- runs-on: ubuntu-latest
- steps:
- - name: Checkout the code
- uses: actions/checkout@v3
-
- - name: Delete PR Update Branch
- if: github.event.pull_request.merged == true && startsWith(github.event.pull_request.head.ref, 'pr-update-json-')
- run: |
- PR_BRANCH="${{ github.event.pull_request.head.ref }}"
- echo "Deleting branch $PR_BRANCH..."
-
- # Avoid deleting the default branch (e.g., main)
- if [[ "$PR_BRANCH" != "main" ]]; then
- git push origin --delete "$PR_BRANCH"
- else
- echo "Skipping deletion of the main branch"
- fi
\ No newline at end of file
diff --git a/.github/workflows/live/github-release.yml b/.github/workflows/live/github-release.yml
deleted file mode 100644
index 482d88a09..000000000
--- a/.github/workflows/live/github-release.yml
+++ /dev/null
@@ -1,57 +0,0 @@
-name: Create Daily Release
-
-on:
- schedule:
- - cron: '1 0 * * *' # Runs daily at 00:01 UTC
- workflow_dispatch:
-
-jobs:
- create-daily-release:
- runs-on: ubuntu-latest
- permissions:
- contents: write
- steps:
- - name: Checkout repository
- uses: actions/checkout@v4
-
- - name: Extract first 5000 characters from CHANGELOG.md
- run: head -c 5000 CHANGELOG.md > changelog_cropped.md
-
- - name: Debugging - Show extracted changelog
- run: |
- echo "=== CHANGELOG EXCERPT ==="
- cat changelog_cropped.md
- echo "========================="
-
- - name: Parse CHANGELOG.md and create release
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- YESTERDAY=$(date -u --date="yesterday" +%Y-%m-%d)
- echo "Checking for changes on: $YESTERDAY"
-
- # Ensure yesterday's date exists in the changelog
- if ! grep -q "## $YESTERDAY" changelog_cropped.md; then
- echo "No entry found for $YESTERDAY, skipping release."
- exit 0
- fi
-
- # Extract section for yesterday's date
- awk -v date="## $YESTERDAY" '
- $0 ~ date {found=1; next}
- found && /^## [0-9]{4}-[0-9]{2}-[0-9]{2}/ {exit}
- found
- ' changelog_cropped.md > changelog_tmp.md
-
- echo "=== Extracted Changelog ==="
- cat changelog_tmp.md
- echo "==========================="
-
- # Skip if no content was found
- if [ ! -s changelog_tmp.md ]; then
- echo "No changes found for $YESTERDAY, skipping release."
- exit 0
- fi
-
- # Create GitHub release
- gh release create "$YESTERDAY" -t "$YESTERDAY" -F changelog_tmp.md
diff --git a/.github/workflows/live/script-test.yml b/.github/workflows/live/script-test.yml
deleted file mode 100644
index 272a12724..000000000
--- a/.github/workflows/live/script-test.yml
+++ /dev/null
@@ -1,177 +0,0 @@
-name: Run Scripts on PVE Node for testing
-permissions:
- pull-requests: write
-on:
- pull_request_target:
- branches:
- - main
- paths:
- - 'install/**.sh'
- - 'ct/**.sh'
-
-jobs:
- run-install-script:
- runs-on: pvenode
- steps:
- - name: Checkout PR branch
- uses: actions/checkout@v4
- with:
- ref: ${{ github.event.pull_request.head.ref }}
- repository: ${{ github.event.pull_request.head.repo.full_name }}
- fetch-depth: 0
-
- - name: Add Git safe directory
- run: |
- git config --global --add safe.directory /__w/ProxmoxVED/ProxmoxVE
-
- - name: Set up GH_TOKEN
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- echo "GH_TOKEN=${GH_TOKEN}" >> $GITHUB_ENV
-
- - name: Get Changed Files
- run: |
- CHANGED_FILES=$(gh pr diff ${{ github.event.pull_request.number }} --repo ${{ github.repository }} --name-only)
- CHANGED_FILES=$(echo "$CHANGED_FILES" | tr '\n' ' ')
- echo "Changed files: $CHANGED_FILES"
- echo "SCRIPT=$CHANGED_FILES" >> $GITHUB_ENV
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-
-
- - name: Get scripts
- id: check-install-script
- run: |
- ALL_FILES=()
- ADDED_FILES=()
- for FILE in ${{ env.SCRIPT }}; do
- if [[ $FILE =~ ^install/.*-install\.sh$ ]] || [[ $FILE =~ ^ct/.*\.sh$ ]]; then
- STRIPPED_NAME=$(basename "$FILE" | sed 's/-install//' | sed 's/\.sh$//')
- if [[ ! " ${ADDED_FILES[@]} " =~ " $STRIPPED_NAME " ]]; then
- ALL_FILES+=("$FILE")
- ADDED_FILES+=("$STRIPPED_NAME") # Mark this base file as added (without the path)
- fi
- fi
- done
- ALL_FILES=$(echo "${ALL_FILES[@]}" | xargs)
- echo "$ALL_FILES"
- echo "ALL_FILES=$ALL_FILES" >> $GITHUB_ENV
-
- - name: Run scripts
- id: run-install
- continue-on-error: true
- run: |
- set +e
- #run for each files in /ct
- for FILE in ${{ env.ALL_FILES }}; do
- STRIPPED_NAME=$(basename "$FILE" | sed 's/-install//' | sed 's/\.sh$//')
- echo "Running Test for: $STRIPPED_NAME"
- if grep -E -q 'read\s+-r\s+-p\s+".*"\s+\w+' "$FILE"; then
- echo "The script contains an interactive prompt. Skipping execution."
- continue
- fi
- if [[ $FILE =~ ^install/.*-install\.sh$ ]]; then
- CT_SCRIPT="ct/$STRIPPED_NAME.sh"
- if [[ ! -f $CT_SCRIPT ]]; then
- echo "No CT script found for $STRIPPED_NAME"
- ERROR_MSG="No CT script found for $FILE"
- echo "$ERROR_MSG" > result_$STRIPPED_NAME.log
- continue
- fi
- if grep -E -q 'read\s+-r\s+-p\s+".*"\s+\w+' "install/$STRIPPED_NAME-install.sh"; then
- echo "The script contains an interactive prompt. Skipping execution."
- continue
- fi
- echo "Found CT script for $STRIPPED_NAME"
- chmod +x "$CT_SCRIPT"
- RUNNING_FILE=$CT_SCRIPT
- elif [[ $FILE =~ ^ct/.*\.sh$ ]]; then
- INSTALL_SCRIPT="install/$STRIPPED_NAME-install.sh"
- if [[ ! -f $INSTALL_SCRIPT ]]; then
- echo "No install script found for $STRIPPED_NAME"
- ERROR_MSG="No install script found for $FILE"
- echo "$ERROR_MSG" > result_$STRIPPED_NAME.log
- continue
- fi
- echo "Found install script for $STRIPPED_NAME"
- chmod +x "$INSTALL_SCRIPT"
- RUNNING_FILE=$FILE
- if grep -E -q 'read\s+-r\s+-p\s+".*"\s+\w+' "ct/$STRIPPED_NAME.sh"; then
- echo "The script contains an interactive prompt. Skipping execution."
- continue
- fi
- fi
- git remote add community-scripts https://github.com/community-scripts/ProxmoxVE.git
- git fetch community-scripts
- rm -f .github/workflows/scripts/app-test/pr-build.func || true
- rm -f .github/workflows/scripts/app-test/pr-install.func || true
- rm -f .github/workflows/scripts/app-test/pr-alpine-install.func || true
- rm -f .github/workflows/scripts/app-test/pr-create-lxc.sh || true
- git checkout community-scripts/main -- .github/workflows/scripts/app-test/pr-build.func
- git checkout community-scripts/main -- .github/workflows/scripts/app-test/pr-install.func
- git checkout community-scripts/main -- .github/workflows/scripts/app-test/pr-alpine-install.func
- git checkout community-scripts/main -- .github/workflows/scripts/app-test/pr-create-lxc.sh
- chmod +x $RUNNING_FILE
- chmod +x .github/workflows/scripts/app-test/pr-create-lxc.sh
- chmod +x .github/workflows/scripts/app-test/pr-install.func
- chmod +x .github/workflows/scripts/app-test/pr-alpine-install.func
- chmod +x .github/workflows/scripts/app-test/pr-build.func
- sed -i 's|source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)|source .github/workflows/scripts/app-test/pr-build.func|g' "$RUNNING_FILE"
- echo "Executing $RUNNING_FILE"
- ERROR_MSG=$(./$RUNNING_FILE 2>&1 > /dev/null)
- echo "Finished running $FILE"
- if [ -n "$ERROR_MSG" ]; then
- echo "ERROR in $STRIPPED_NAME: $ERROR_MSG"
- echo "$ERROR_MSG" > result_$STRIPPED_NAME.log
- fi
- done
- set -e # Restore exit-on-error
-
- - name: Cleanup PVE Node
- run: |
- containers=$(pct list | tail -n +2 | awk '{print $0 " " $4}' | awk '{print $1}')
-
- for container_id in $containers; do
- status=$(pct status $container_id | awk '{print $2}')
- if [[ $status == "running" ]]; then
- pct stop $container_id
- pct destroy $container_id
- fi
- done
-
- - name: Post error comments
- run: |
- ERROR="false"
- SEARCH_LINE=".github/workflows/scripts/app-test/pr-build.func: line 255:"
-
- # Get all existing comments on the PR
- EXISTING_COMMENTS=$(gh pr view ${{ github.event.pull_request.number }} --repo ${{ github.repository }} --json comments --jq '.comments[].body')
-
- for FILE in ${{ env.ALL_FILES }}; do
- STRIPPED_NAME=$(basename "$FILE" | sed 's/-install//' | sed 's/\.sh$//')
- if [[ ! -f result_$STRIPPED_NAME.log ]]; then
- continue
- fi
- ERROR_MSG=$(cat result_$STRIPPED_NAME.log)
-
- if [ -n "$ERROR_MSG" ]; then
- CLEANED_ERROR_MSG=$(echo "$ERROR_MSG" | sed "s|$SEARCH_LINE.*||")
- COMMENT_BODY=":warning: The script _**$FILE**_ failed with the following message:
${CLEANED_ERROR_MSG}
"
-
- # Check if the comment already exists
- if echo "$EXISTING_COMMENTS" | grep -qF "$COMMENT_BODY"; then
- echo "Skipping duplicate comment for $FILE"
- else
- echo "Posting error message for $FILE"
- gh pr comment ${{ github.event.pull_request.number }} \
- --repo ${{ github.repository }} \
- --body "$COMMENT_BODY"
- ERROR="true"
- fi
- fi
- done
-
- echo "ERROR=$ERROR" >> $GITHUB_ENV
-
-
diff --git a/.github/workflows/live/script_format.yml b/.github/workflows/live/script_format.yml
deleted file mode 100644
index c8ea7a4de..000000000
--- a/.github/workflows/live/script_format.yml
+++ /dev/null
@@ -1,243 +0,0 @@
-name: Script Format Check
-permissions:
- pull-requests: write
-on:
- pull_request_target:
- branches:
- - main
- paths:
- - 'install/*.sh'
- - 'ct/*.sh'
-
-jobs:
- run-install-script:
- runs-on: pvenode
- steps:
- - name: Checkout PR branch (supports forks)
- uses: actions/checkout@v4
- with:
- ref: ${{ github.event.pull_request.head.ref }}
- repository: ${{ github.event.pull_request.head.repo.full_name }}
- fetch-depth: 0
-
- - name: Add Git safe directory
- run: |
- git config --global --add safe.directory /__w/ProxmoxVED/ProxmoxVE
-
- - name: Set up GH_TOKEN
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- echo "GH_TOKEN=${GH_TOKEN}" >> $GITHUB_ENV
-
- - name: Get Changed Files
- run: |
- CHANGED_FILES=$(gh pr diff ${{ github.event.pull_request.number }} --repo ${{ github.repository }} --name-only)
- CHANGED_FILES=$(echo "$CHANGED_FILES" | tr '\n' ' ')
- echo "Changed files: $CHANGED_FILES"
- echo "SCRIPT=$CHANGED_FILES" >> $GITHUB_ENV
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Check scripts
- id: run-install
- continue-on-error: true
- run: |
- for FILE in ${{ env.SCRIPT }}; do
- STRIPPED_NAME=$(basename "$FILE" | sed 's/-install//' | sed 's/\.sh$//')
- echo "Running Test for: $STRIPPED_NAME"
- FILE_STRIPPED="${FILE##*/}"
- LOG_FILE="result_$FILE_STRIPPED.log"
-
- if [[ $FILE =~ ^ct/.*\.sh$ ]]; then
-
- FIRST_LINE=$(sed -n '1p' "$FILE")
- [[ "$FIRST_LINE" != "#!/usr/bin/env bash" ]] && echo "Line 1 was $FIRST_LINE | Should be: #!/usr/bin/env bash" >> "$LOG_FILE"
- SECOND_LINE=$(sed -n '2p' "$FILE")
- [[ "$SECOND_LINE" != "source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)" ]] &&
- echo "Line 2 was $SECOND_LINE | Should be: source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)" >> "$LOG_FILE"
- THIRD_LINE=$(sed -n '3p' "$FILE")
- if ! [[ "$THIRD_LINE" =~ ^#\ Copyright\ \(c\)\ [0-9]{4}-[0-9]{4}\ community-scripts\ ORG$ || "$THIRD_LINE" =~ ^Copyright\ \(c\)\ [0-9]{4}-[0-9]{4}\ tteck$ ]]; then
- echo "Line 3 was $THIRD_LINE | Should be: # Copyright (c) 2021-2025 community-scripts ORG" >> "$LOG_FILE"
- fi
-
- EXPECTED_AUTHOR="# Author:"
- EXPECTED_LICENSE="# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE"
- EXPECTED_SOURCE="# Source:"
- EXPECTED_EMPTY=""
-
- for i in {4..7}; do
- LINE=$(sed -n "${i}p" "$FILE")
-
- case $i in
- 4)
- [[ $LINE == $EXPECTED_AUTHOR* ]] || printf "Line %d was: '%s' | Should start with: '%s'\n" "$i" "$LINE" "$EXPECTED_AUTHOR" >> $LOG_FILE
- ;;
- 5)
- [[ "$LINE" == "$EXPECTED_LICENSE" ]] || printf "Line %d was: '%s' | Should be: '%s'\n" "$i" "$LINE" "$EXPECTED_LICENSE" >> $LOG_FILE
- ;;
- 6)
- [[ $LINE == $EXPECTED_SOURCE* ]] || printf "Line %d was: '%s' | Should start with: '%s'\n" "$i" "$LINE" "$EXPECTED_SOURCE" >> $LOG_FILE
- ;;
- 7)
- [[ -z $LINE ]] || printf "Line %d was: '%s' | Should be empty\n" "$i" "$LINE" >> $LOG_FILE
- ;;
- esac
- done
-
-
- EXPECTED_PREFIXES=(
- "APP="
- "var_tags="
- "var_cpu=" # Must be a number
- "var_ram=" # Must be a number
- "var_disk=" # Must be a number
- "var_os=" # Must be debian, alpine, or ubuntu
- "var_version="
- "var_unprivileged=" # Must be 0 or 1
- )
-
-
- for i in {8..15}; do
- LINE=$(sed -n "${i}p" "$FILE")
- INDEX=$((i - 8))
-
- case $INDEX in
- 2|3|4) # var_cpu, var_ram, var_disk (must be numbers)
- if [[ "$LINE" =~ ^${EXPECTED_PREFIXES[$INDEX]}([0-9]+)$ ]]; then
- continue # Valid
- else
- echo "Line $i was '$LINE' | Should be: '${EXPECTED_PREFIXES[$INDEX]}'" >> "$LOG_FILE"
- fi
- ;;
- 5) # var_os (must be debian, alpine, or ubuntu)
- if [[ "$LINE" =~ ^var_os=(debian|alpine|ubuntu)$ ]]; then
- continue # Valid
- else
- echo "Line $i was '$LINE' | Should be: 'var_os=[debian|alpine|ubuntu]'" >> "$LOG_FILE"
- fi
- ;;
- 7) # var_unprivileged (must be 0 or 1)
- if [[ "$LINE" =~ ^var_unprivileged=[01]$ ]]; then
- continue # Valid
- else
- echo "Line $i was '$LINE' | Should be: 'var_unprivileged=[0|1]'" >> "$LOG_FILE"
- fi
- ;;
- *) # Other lines (must start with expected prefix)
- if [[ "$LINE" == ${EXPECTED_PREFIXES[$INDEX]}* ]]; then
- continue # Valid
- else
- echo "Line $i was '$LINE' | Should start with '${EXPECTED_PREFIXES[$INDEX]}'" >> "$LOG_FILE"
- fi
- ;;
- esac
- done
-
- for i in {16..20}; do
- LINE=$(sed -n "${i}p" "$FILE")
- EXPECTED=(
- "header_info \"$APP\""
- "variables"
- "color"
- "catch_errors"
- "function update_script() {"
- )
- [[ "$LINE" != "${EXPECTED[$((i-16))]}" ]] && echo "Line $i was $LINE | Should be: ${EXPECTED[$((i-16))]}" >> "$LOG_FILE"
- done
- cat "$LOG_FILE"
- elif [[ $FILE =~ ^install/.*-install\.sh$ ]]; then
-
- FIRST_LINE=$(sed -n '1p' "$FILE")
- [[ "$FIRST_LINE" != "#!/usr/bin/env bash" ]] && echo "Line 1 was $FIRST_LINE | Should be: #!/usr/bin/env bash" >> "$LOG_FILE"
-
- SECOND_LINE=$(sed -n '2p' "$FILE")
- [[ -n "$SECOND_LINE" ]] && echo "Line 2 should be empty" >> "$LOG_FILE"
-
- THIRD_LINE=$(sed -n '3p' "$FILE")
- if ! [[ "$THIRD_LINE" =~ ^#\ Copyright\ \(c\)\ [0-9]{4}-[0-9]{4}\ community-scripts\ ORG$ || "$THIRD_LINE" =~ ^Copyright\ \(c\)\ [0-9]{4}-[0-9]{4}\ tteck$ ]]; then
- echo "Line 3 was $THIRD_LINE | Should be: # Copyright (c) 2021-2025 community-scripts ORG" >> "$LOG_FILE"
- fi
-
- EXPECTED_AUTHOR="# Author:"
- EXPECTED_LICENSE="# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE"
- EXPECTED_SOURCE="# Source:"
- EXPECTED_EMPTY=""
-
- for i in {4..7}; do
- LINE=$(sed -n "${i}p" "$FILE")
-
- case $i in
- 4)
- [[ $LINE == $EXPECTED_AUTHOR* ]] || printf "Line %d was: '%s' | Should start with: '%s'\n" "$i" "$LINE" "$EXPECTED_AUTHOR" >> $LOG_FILE
- ;;
- 5)
- [[ "$LINE" == "$EXPECTED_LICENSE" ]] || printf "Line %d was: '%s' | Should be: '%s'\n" "$i" "$LINE" "$EXPECTED_LICENSE" >> $LOG_FILE
- ;;
- 6)
- [[ $LINE == $EXPECTED_SOURCE* ]] || printf "Line %d was: '%s' | Should start with: '%s'\n" "$i" "$LINE" "$EXPECTED_SOURCE" >> $LOG_FILE
- ;;
- 7)
- [[ -z $LINE ]] || printf "Line %d was: '%s' | Should be empty\n" "$i" "$LINE" >> $LOG_FILE
- ;;
- esac
- done
-
- [[ "$(sed -n '8p' "$FILE")" != 'source /dev/stdin <<< "$FUNCTIONS_FILE_PATH"' ]] && echo 'Line 8 should be: source /dev/stdin <<< "$FUNCTIONS_FILE_PATH"' >> "$LOG_FILE"
-
- for i in {9..14}; do
- LINE=$(sed -n "${i}p" "$FILE")
- EXPECTED=(
- "color"
- "verb_ip6"
- "catch_errors"
- "setting_up_container"
- "network_check"
- "update_os"
- )
- [[ "$LINE" != "${EXPECTED[$((i-9))]}" ]] && echo "Line $i was $LINE | Should be: ${EXPECTED[$((i-9))]}" >> "$LOG_FILE"
- done
-
- [[ -n "$(sed -n '15p' "$FILE")" ]] && echo "Line 15 should be empty" >> "$LOG_FILE"
- [[ "$(sed -n '16p' "$FILE")" != 'msg_info "Installing Dependencies"' ]] && echo 'Line 16 should be: msg_info "Installing Dependencies"' >> "$LOG_FILE"
-
- LAST_3_LINES=$(tail -n 3 "$FILE")
- [[ "$LAST_3_LINES" != *"$STD apt-get -y autoremove"* ]] && echo 'Third to last line should be: $STD apt-get -y autoremove' >> "$LOG_FILE"
- [[ "$LAST_3_LINES" != *"$STD apt-get -y autoclean"* ]] && echo 'Second to last line should be: $STD apt-get -y clean' >> "$LOG_FILE"
- [[ "$LAST_3_LINES" != *'msg_ok "Cleaned"'* ]] && echo 'Last line should be: msg_ok "Cleaned"' >> "$LOG_FILE"
- cat "$LOG_FILE"
- fi
-
- done
-
-
- - name: Post error comments
- run: |
- ERROR="false"
- for FILE in ${{ env.SCRIPT }}; do
- FILE_STRIPPED="${FILE##*/}"
- LOG_FILE="result_$FILE_STRIPPED.log"
- echo $LOG_FILE
- if [[ ! -f $LOG_FILE ]]; then
- continue
- fi
- ERROR_MSG=$(cat $LOG_FILE)
-
- if [ -n "$ERROR_MSG" ]; then
- echo "Posting error message for $FILE"
- echo ${ERROR_MSG}
- gh pr comment ${{ github.event.pull_request.number }} \
- --repo ${{ github.repository }} \
- --body ":warning: The script _**$FILE**_ has the following formatting errors:
${ERROR_MSG}
"
-
-
- ERROR="true"
- fi
- done
- echo "ERROR=$ERROR" >> $GITHUB_ENV
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Fail if error
- if: ${{ env.ERROR == 'true' }}
- run: exit 1
diff --git a/.github/workflows/live/update-json-date.yml b/.github/workflows/live/update-json-date.yml
deleted file mode 100644
index 7e9c24973..000000000
--- a/.github/workflows/live/update-json-date.yml
+++ /dev/null
@@ -1,131 +0,0 @@
-name: Update JSON Date
-
-on:
- push:
- branches:
- - main
- paths:
- - 'json/**.json'
- workflow_dispatch:
-
-jobs:
- update-app-files:
- runs-on: ubuntu-latest
-
- permissions:
- contents: write
- pull-requests: write
-
- steps:
- - name: Generate a token
- id: generate-token
- uses: actions/create-github-app-token@v1
- with:
- app-id: ${{ vars.APP_ID }}
- private-key: ${{ secrets.APP_PRIVATE_KEY }}
-
- - name: Generate dynamic branch name
- id: timestamp
- run: echo "BRANCH_NAME=pr-update-json-$(date +'%Y%m%d%H%M%S')" >> $GITHUB_ENV
-
- - name: Set up GH_TOKEN
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- echo "GH_TOKEN=${GH_TOKEN}" >> $GITHUB_ENV
-
- - name: Checkout Repository
- uses: actions/checkout@v4
- with:
- fetch-depth: 2 # Ensure we have the last two commits
-
- - name: Get Previous Commit
- id: prev_commit
- run: |
- PREV_COMMIT=$(git rev-parse HEAD^)
- echo "Previous commit: $PREV_COMMIT"
- echo "prev_commit=$PREV_COMMIT" >> $GITHUB_ENV
-
- - name: Get Newly Added JSON Files
- id: new_json_files
- run: |
- git diff --name-only --diff-filter=A ${{ env.prev_commit }} HEAD | grep '^json/.*\.json$' > new_files.txt || true
- echo "New files detected:"
- cat new_files.txt || echo "No new files."
-
- - name: Disable file mode changes
- run: git config core.fileMode false
-
- - name: Set up Git
- run: |
- git config --global user.name "GitHub Actions"
- git config --global user.email "github-actions[bot]@users.noreply.github.com"
-
- - name: Change JSON Date
- id: change-json-date
- run: |
- current_date=$(date +"%Y-%m-%d")
- while IFS= read -r file; do
- # Skip empty lines
- [[ -z "$file" ]] && continue
-
- if [[ -f "$file" ]]; then
- echo "Processing $file..."
- current_json_date=$(jq -r '.date_created // empty' "$file")
- if [[ -z "$current_json_date" || "$current_json_date" != "$current_date" ]]; then
- echo "Updating $file with date $current_date"
- jq --arg date "$current_date" '.date_created = $date' "$file" > temp.json && mv temp.json "$file"
- else
- echo "Date in $file is already up to date."
- fi
- else
- echo "Warning: File $file not found!"
- fi
- done < new_files.txt
- rm new_files.txt
-
- - name: Check if there are any changes
- run: |
- echo "Checking for changes..."
- git add -A # Untracked Dateien aufnehmen
- git status
- if git diff --cached --quiet; then
- echo "No changes detected."
- echo "changed=false" >> "$GITHUB_ENV"
- else
- echo "Changes detected:"
- git diff --stat --cached
- echo "changed=true" >> "$GITHUB_ENV"
- fi
-
- # Step 7: Commit and create PR if changes exist
- - name: Commit and create PR if changes exist
- if: env.changed == 'true'
- run: |
-
-
- git commit -m "Update date in json"
- git checkout -b ${{ env.BRANCH_NAME }}
- git push origin ${{ env.BRANCH_NAME }}
-
- gh pr create --title "[core] update date in json" \
- --body "This PR is auto-generated by a GitHub Action to update the date in json." \
- --head ${{ env.BRANCH_NAME }} \
- --base main \
- --label "automated pr"
- env:
- GH_TOKEN: ${{ steps.generate-token.outputs.token }}
-
- - name: Approve pull request
- if: env.changed == 'true'
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- PR_NUMBER=$(gh pr list --head "${{ env.BRANCH_NAME }}" --json number --jq '.[].number')
- if [ -n "$PR_NUMBER" ]; then
- gh pr review $PR_NUMBER --approve
- fi
-
- - name: No changes detected
- if: env.changed == 'false'
- run: echo "No changes to commit. Workflow completed successfully."
diff --git a/.github/workflows/live/validate-filenames.yml b/.github/workflows/live/validate-filenames.yml
deleted file mode 100644
index 16f2f7106..000000000
--- a/.github/workflows/live/validate-filenames.yml
+++ /dev/null
@@ -1,161 +0,0 @@
-name: Validate filenames
-
-on:
- pull_request_target:
- paths:
- - "ct/*.sh"
- - "install/*.sh"
- - "json/*.json"
-
-jobs:
- check-files:
- name: Check changed files
- runs-on: ubuntu-latest
- permissions:
- pull-requests: write
-
- steps:
- - name: Get pull request information
- if: github.event_name == 'pull_request_target'
- uses: actions/github-script@v7
- id: pr
- with:
- script: |
- const { data: pullRequest } = await github.rest.pulls.get({
- ...context.repo,
- pull_number: context.payload.pull_request.number,
- });
- return pullRequest;
-
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- fetch-depth: 0 # Ensure the full history is fetched for accurate diffing
- ref: ${{ github.event_name == 'pull_request_target' && fromJSON(steps.pr.outputs.result).merge_commit_sha || '' }}
-
- - name: Get changed files
- id: changed-files
- run: |
- if ${{ github.event_name == 'pull_request_target' }}; then
- echo "files=$(git diff --name-only ${{ github.event.pull_request.base.sha }} ${{ steps.pr.outputs.result && fromJSON(steps.pr.outputs.result).merge_commit_sha }} | xargs)" >> $GITHUB_OUTPUT
- else
- echo "files=$(git diff --name-only ${{ github.event.before }} ${{ github.event.after }} | xargs)" >> $GITHUB_OUTPUT
- fi
-
- - name: "Validate filenames in ct and install directory"
- if: always() && steps.changed-files.outputs.files != ''
- id: check-scripts
- run: |
- CHANGED_FILES=$(printf "%s\n" ${{ steps.changed-files.outputs.files }} | { grep -E '^(ct|install)/.*\.sh$' || true; })
-
- NON_COMPLIANT_FILES=""
- for FILE in $CHANGED_FILES; do
- # Datei "misc/create_lxc.sh" explizit überspringen
- if [[ "$FILE" == "misc/create_lxc.sh" ]]; then
- continue
- fi
- BASENAME=$(echo "$(basename "${FILE%.*}")")
- if [[ ! "$BASENAME" =~ ^[a-z0-9-]+$ ]]; then
- NON_COMPLIANT_FILES="$NON_COMPLIANT_FILES $FILE"
- fi
- done
-
- if [ -n "$NON_COMPLIANT_FILES" ]; then
- echo "files=$NON_COMPLIANT_FILES" >> $GITHUB_OUTPUT
- echo "Non-compliant filenames found, change to lowercase:"
- for FILE in $NON_COMPLIANT_FILES; do
- echo "$FILE"
- done
- exit 1
- fi
-
- - name: "Validate filenames in json directory."
- if: always() && steps.changed-files.outputs.files != ''
- id: check-json
- run: |
- CHANGED_FILES=$(printf "%s\n" ${{ steps.changed-files.outputs.files }} | { grep -E '^json/.*\.json$' || true; })
-
- NON_COMPLIANT_FILES=""
- for FILE in $CHANGED_FILES; do
- BASENAME=$(echo "$(basename "${FILE%.*}")")
- if [[ ! "$BASENAME" =~ ^[a-z0-9-]+$ ]]; then
- NON_COMPLIANT_FILES="$NON_COMPLIANT_FILES $FILE"
- fi
- done
-
- if [ -n "$NON_COMPLIANT_FILES" ]; then
- echo "files=$NON_COMPLIANT_FILES" >> $GITHUB_OUTPUT
- echo "Non-compliant filenames found, change to lowercase:"
- for FILE in $NON_COMPLIANT_FILES; do
- echo "$FILE"
- done
- exit 1
- fi
-
- - name: Post results and comment
- if: always() && steps.check-scripts.outputs.files != '' && steps.check-json.outputs.files != '' && github.event_name == 'pull_request_target'
- uses: actions/github-script@v7
- with:
- script: |
- const result = "${{ job.status }}" === "success" ? "success" : "failure";
- const nonCompliantFiles = {
- script: "${{ steps.check-scripts.outputs.files }}",
- JSON: "${{ steps.check-json.outputs.files }}",
- };
-
- const issueNumber = context.payload.pull_request
- ? context.payload.pull_request.number
- : null;
- const commentIdentifier = "validate-filenames";
- let newCommentBody = `\n### Filename validation\n\n`;
-
- if (result === "failure") {
- newCommentBody += ":x: We found issues in the following changed files:\n\n";
- for (const [check, files] of Object.entries(nonCompliantFiles)) {
- if (files) {
- newCommentBody += `**${check.charAt(0).toUpperCase() + check.slice(1)} filename invalid:**\n${files
- .trim()
- .split(" ")
- .map((file) => `- ${file}`)
- .join("\n")}\n\n`;
- }
- }
- newCommentBody +=
- "Please change the filenames to lowercase and use only alphanumeric characters and dashes.\n";
- } else {
- newCommentBody += `:rocket: All files passed filename validation!\n`;
- }
-
- newCommentBody += `\n\n`;
-
- if (issueNumber) {
- const { data: comments } = await github.rest.issues.listComments({
- ...context.repo,
- issue_number: issueNumber,
- });
-
- const existingComment = comments.find(
- (comment) => comment.user.login === "github-actions[bot]",
- );
-
- if (existingComment) {
- if (existingComment.body.includes(commentIdentifier)) {
- const re = new RegExp(String.raw`[\s\S]*?`, "");
- newCommentBody = existingComment.body.replace(re, newCommentBody);
- } else {
- newCommentBody = existingComment.body + '\n\n---\n\n' + newCommentBody;
- }
-
- await github.rest.issues.updateComment({
- ...context.repo,
- comment_id: existingComment.id,
- body: newCommentBody,
- });
- } else {
- await github.rest.issues.createComment({
- ...context.repo,
- issue_number: issueNumber,
- body: newCommentBody,
- });
- }
- }
diff --git a/.github/workflows/move-to-main-repo.yaml b/.github/workflows/move-to-main-repo.yaml
index 9c9fb65dd..dfddeff73 100644
--- a/.github/workflows/move-to-main-repo.yaml
+++ b/.github/workflows/move-to-main-repo.yaml
@@ -18,7 +18,7 @@ jobs:
steps:
- name: Generate a token
id: app-token
- uses: actions/create-github-app-token@v1
+ uses: actions/create-github-app-token@v2
with:
app-id: ${{ vars.PUSH_MAIN_APP_ID }}
private-key: ${{ secrets.PUSH_MAIN_APP_SECRET }}
diff --git a/.github/workflows/push-to-gitea.yml b/.github/workflows/push-to-gitea.yml
index 76268ffc7..c1fb72d51 100644
--- a/.github/workflows/push-to-gitea.yml
+++ b/.github/workflows/push-to-gitea.yml
@@ -11,17 +11,29 @@ jobs:
runs-on: ubuntu-latest
steps:
- - name: Checkout source repo
- uses: actions/checkout@v4
- with:
- fetch-depth: 0
-
- - name: Push to Gitea
- run: |
- git config --global user.name "Push From Github"
- git config --global user.email "actions@github.com"
- git remote add gitea https://$GITEA_USER:$GITEA_TOKEN@git.community-scripts.org/community-scripts/ProxmoxVED.git
- git push gitea --all
- env:
- GITEA_USER: ${{ secrets.GITEA_USERNAME }}
- GITEA_TOKEN: ${{ secrets.GITEA_TOKEN }}
+ - name: Checkout source repo
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+ - name: Set Git identity for actions
+ run: |
+ git config --global user.name "Push From Github"
+ git config --global user.email "actions@github.com"
+ - name: Add Gitea remote
+ run: git remote add gitea https://$GITEA_USER:$GITEA_TOKEN@git.community-scripts.org/community-scripts/ProxmoxVED.git
+ env:
+ GITEA_USER: ${{ secrets.GITEA_USERNAME }}
+ GITEA_TOKEN: ${{ secrets.GITEA_TOKEN }}
+ - name: Pull Gitea changes
+ run: |
+ git fetch gitea
+ git merge --strategy=ours gitea/main
+ env:
+ GITEA_USER: ${{ secrets.GITEA_USERNAME }}
+ GITEA_TOKEN: ${{ secrets.GITEA_TOKEN }}
+
+ - name: Push to Gitea
+ run: git push gitea main --force
+ env:
+ GITEA_USER: ${{ secrets.GITEA_USERNAME }}
+ GITEA_TOKEN: ${{ secrets.GITEA_TOKEN }}
diff --git a/.github/workflows/revision-bump.yml.bak b/.github/workflows/revision-bump.yml.bak
new file mode 100644
index 000000000..65242968a
--- /dev/null
+++ b/.github/workflows/revision-bump.yml.bak
@@ -0,0 +1,106 @@
+name: Bump build.func Revision
+
+on:
+ push:
+ branches:
+ - main
+ paths:
+ - "misc/**"
+ workflow_dispatch:
+
+jobs:
+ bump-revision:
+ if: github.repository == 'community-scripts/ProxmoxVED'
+ runs-on: ubuntu-latest
+
+ permissions:
+ contents: write
+ pull-requests: write
+
+ steps:
+ - name: Generate token for PR
+ id: generate-token
+ uses: actions/create-github-app-token@v1
+ with:
+ app-id: ${{ vars.APP_ID }}
+ private-key: ${{ secrets.APP_PRIVATE_KEY }}
+
+ - name: Generate token for auto-merge
+ id: generate-token-merge
+ uses: actions/create-github-app-token@v1
+ with:
+ app-id: ${{ secrets.APP_ID_APPROVE_AND_MERGE }}
+ private-key: ${{ secrets.APP_KEY_APPROVE_AND_MERGE }}
+
+ - name: Checkout repository
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 2
+
+ - name: Get changed files
+ id: changes
+ run: |
+ git diff --name-only HEAD^ HEAD > changed_files.txt
+ echo "Changed files:"
+ cat changed_files.txt
+
+ - name: Skip if only build.func changed
+ id: skipcheck
+ run: |
+ if grep -q "^misc/build.func$" changed_files.txt && [ $(wc -l < changed_files.txt) -eq 1 ]; then
+ echo "skip=true" >> $GITHUB_ENV
+ else
+ echo "skip=false" >> $GITHUB_ENV
+ fi
+
+ - name: Disable file mode changes
+ run: git config core.fileMode false
+
+ - name: Bump build.func revision
+ if: env.skip == 'false'
+ run: |
+ REV_FILE=".build-revision"
+ if [ ! -f "$REV_FILE" ]; then echo 0 > "$REV_FILE"; fi
+ REV_NUM=$(($(cat $REV_FILE) + 1))
+ echo $REV_NUM > $REV_FILE
+ SHORT_SHA=$(git rev-parse --short HEAD)
+ REV_STR="Revision: r${REV_NUM} (git-${SHORT_SHA})"
+
+ echo "Updating build.func with $REV_STR"
+ sed -i "s/^# Revision:.*/# $REV_STR/" misc/build.func
+ echo "REV_STR=$REV_STR" >> $GITHUB_ENV
+
+ git config --global user.name "GitHub Actions"
+ git config --global user.email "github-actions[bot]@users.noreply.github.com"
+ git add misc/build.func .build-revision
+ git commit -m "chore: bump build.func to $REV_STR"
+
+ - name: Create PR
+ if: env.skip == 'false'
+ run: |
+ BRANCH_NAME="pr-build-revision-$(date +'%Y%m%d%H%M%S')"
+ git checkout -b $BRANCH_NAME
+ git push origin $BRANCH_NAME
+
+ gh pr create --title "[core] bump build.func to $REV_STR" \
+ --body "This PR bumps build.func revision because files in misc/ changed." \
+ --head $BRANCH_NAME \
+ --base main \
+ --label "automated pr"
+ env:
+ GH_TOKEN: ${{ steps.generate-token.outputs.token }}
+
+ - name: Approve PR and merge
+ if: env.skip == 'false'
+ env:
+ GH_TOKEN: ${{ steps.generate-token-merge.outputs.token }}
+ run: |
+ PR_NUMBER=$(gh pr list --head "${BRANCH_NAME}" --json number --jq '.[].number')
+ if [ -n "$PR_NUMBER" ]; then
+ gh pr review $PR_NUMBER --approve
+ gh pr merge $PR_NUMBER --squash --admin
+ fi
+
+ - name: Skip log
+ if: env.skip == 'true'
+ run: echo "Only build.func changed – nothing to do."
diff --git a/ct/alpine-ntfy.sh b/ct/alpine-ntfy.sh
new file mode 100644
index 000000000..4fc65b5d1
--- /dev/null
+++ b/ct/alpine-ntfy.sh
@@ -0,0 +1,48 @@
+#!/usr/bin/env bash
+source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: cobalt (cobaltgit)
+# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
+# Source: https://ntfy.sh/
+
+APP="Alpine-ntfy"
+var_tags="${var_tags:-notification}"
+var_cpu="${var_cpu:-1}"
+var_ram="${var_ram:-256}"
+var_disk="${var_disk:-2}"
+var_os="${var_os:-alpine}"
+var_version="${var_version:-3.22}"
+var_unprivileged="${var_unprivileged:-1}"
+
+header_info "$APP"
+variables
+color
+catch_errors
+
+function update_script() {
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -d /etc/ntfy ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+ msg_info "Updating $APP LXC"
+ $STD apk -U upgrade
+ setcap 'cap_net_bind_service=+ep' /usr/bin/ntfy
+ msg_ok "Updated $APP LXC"
+
+ msg_info "Restarting ntfy"
+ rc-service ntfy restart
+ msg_ok "Restarted ntfy"
+ exit
+}
+
+start
+build_container
+description
+
+msg_ok "Completed Successfully!\n"
+echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
+echo -e "${INFO}${YW} Access it using the following URL:${CL}"
+echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}"
diff --git a/ct/alpine.sh b/ct/alpine.sh
index 11972a3c5..51767cb01 100644
--- a/ct/alpine.sh
+++ b/ct/alpine.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
# Copyright (c) 2021-2025 tteck
# Author: tteck (tteckster)
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
@@ -7,9 +7,9 @@ source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/
APP="Alpine"
var_tags="${var_tags:-os;alpine}"
-var_cpu="${var_cpu:-1}"
-var_ram="${var_ram:-512}"
-var_disk="${var_disk:-1}"
+var_cpu="${var_cpu:-4}"
+var_ram="${var_ram:-4096}"
+var_disk="${var_disk:-5}"
var_os="${var_os:-alpine}"
var_version="${var_version:-3.22}"
var_unprivileged="${var_unprivileged:-1}"
@@ -20,18 +20,18 @@ color
catch_errors
function update_script() {
- header_info
- #check_container_storage
- #check_container_resources
- UPD=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "SUPPORT" --radiolist --cancel-button Exit-Script "Spacebar = Select" 11 58 1 \
- "1" "Check for Alpine Updates" ON \
- 3>&1 1>&2 2>&3)
+ header_info
+ #check_container_storage
+ #check_container_resources
+ UPD=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "SUPPORT" --radiolist --cancel-button Exit-Script "Spacebar = Select" 11 58 1 \
+ "1" "Check for Alpine Updates" ON \
+ 3>&1 1>&2 2>&3)
- header_info
- if [ "$UPD" == "1" ]; then
- apk update && apk upgrade
- exit
- fi
+ header_info
+ if [ "$UPD" == "1" ]; then
+ apk update && apk upgrade
+ exit
+ fi
}
start
diff --git a/ct/jeedom.sh b/ct/asterisk.sh
similarity index 55%
rename from ct/jeedom.sh
rename to ct/asterisk.sh
index b64d2685c..02f2506cb 100644
--- a/ct/jeedom.sh
+++ b/ct/asterisk.sh
@@ -1,17 +1,17 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
-# Author: Mips2648
+# Author: michelroegl-brunner
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://jeedom.com/
+# Source: https://asterisk.org/
-APP="Jeedom"
-var_tags="${var_tags:-automation;smarthome}"
+APP="Asterisk"
+var_tags="${var_tags:-telephone;pbx}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-2048}"
-var_disk="${var_disk:-16}"
+var_disk="${var_disk:-4}"
var_os="${var_os:-debian}"
-var_version="${var_version:-11}"
+var_version="${var_version:-12}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
@@ -23,16 +23,7 @@ function update_script() {
header_info
check_container_storage
check_container_resources
-
- if [[ ! -f /var/www/html/core/config/version ]]; then
- msg_error "No ${APP} Installation Found!"
- exit
- fi
-
- msg_info "Updating OS"
- $STD apt-get update
- $STD apt-get -y upgrade
- msg_ok "OS updated, you can now update Jeedom from the Web UI."
+ msg_error "No Update function provided for ${APP} LXC"
exit
}
@@ -42,5 +33,3 @@ description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
-echo -e "${INFO}${YW} Access it using the following URL:${CL}"
-echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}"
diff --git a/ct/cloudreve.sh b/ct/cloudreve.sh
deleted file mode 100644
index f64fea7a3..000000000
--- a/ct/cloudreve.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: MickLesk (CanbiZ)
-# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
-# Source: https://cloudreve.org/
-
-APP="Cloudreve"
-var_tags="${var_tags:-cloud}"
-var_cpu="${var_cpu:-1}"
-var_ram="${var_ram:-1024}"
-var_disk="${var_disk:-10}"
-var_os="${var_os:-debian}"
-var_version="${var_version:-12}"
-var_unprivileged="${var_unprivileged:-1}"
-
-header_info "$APP"
-variables
-color
-catch_errors
-
-function update_script() {
- header_info
- check_container_storage
- check_container_resources
- if [[ ! -d /var ]]; then
- msg_error "No ${APP} Installation Found!"
- exit
- fi
- msg_info "Updating $APP LXC"
- $STD apt-get update
- $STD apt-get -y upgrade
- msg_ok "Updated $APP LXC"
- exit
-}
-
-start
-build_container
-description
-
-msg_ok "Completed Successfully!\n"
-echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
-echo -e "${INFO}${YW} Access it using the following URL:${CL}"
-echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:5212${CL}"
diff --git a/ct/debian.sh b/ct/debian.sh
index c525e5e22..198a0bf01 100644
--- a/ct/debian.sh
+++ b/ct/debian.sh
@@ -1,20 +1,20 @@
#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
-# Copyright (c) 2021-2025 tteck
-# Author: tteck (tteckster)
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
-# Source: https://www.debian.org/
+# Source:
APP="Debian"
-var_tags="${var_tags:-os}"
-var_cpu="${var_cpu:-1}"
-var_ram="${var_ram:-512}"
-var_disk="${var_disk:-2}"
+var_tags="${var_tags:-}"
+var_cpu="${var_cpu:-4}"
+var_ram="${var_ram:-8192}"
+var_disk="${var_disk:-20}"
var_os="${var_os:-debian}"
-var_version="${var_version:-12}"
+var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
-var_fuse="${var_fuse:-no}"
-var_tun="${var_tun:-no}"
+#var_fuse="${var_fuse:-no}"
+#var_tun="${var_tun:-no}"
header_info "$APP"
variables
@@ -30,9 +30,10 @@ function update_script() {
exit
fi
msg_info "Updating $APP LXC"
- $STD apt-get update
- $STD apt-get -y upgrade
+ $STD apt update
+ $STD apt upgrade -y
msg_ok "Updated $APP LXC"
+ cleanup_lxc
exit
}
diff --git a/ct/deferred/alpine-homarr.sh b/ct/deferred/alpine-homarr.sh
index fc87c9075..c78398d65 100644
--- a/ct/deferred/alpine-homarr.sh
+++ b/ct/deferred/alpine-homarr.sh
@@ -21,22 +21,22 @@ color
catch_errors
function update_script() {
- header_info
- RELEASE=$(curl -fsSL https://api.github.com/repos/homarr-labs/homarr/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }')
- if [[ ! -f /opt/${APP}_version.txt ]] || [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]]; then
+ header_info
+ RELEASE=$(curl -fsSL https://api.github.com/repos/homarr-labs/homarr/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }')
+ if [[ ! -f /opt/${APP}_version.txt ]] || [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]]; then
- msg_info "Stopping Services (Patience)"
- systemctl stop homarr
- msg_ok "Services Stopped"
+ msg_info "Stopping Services (Patience)"
+ systemctl stop homarr
+ msg_ok "Services Stopped"
- msg_info "Backup Data"
- mkdir -p /opt/homarr-data-backup
- cp /opt/homarr/.env /opt/homarr-data-backup/.env
- msg_ok "Backup Data"
+ msg_info "Backup Data"
+ mkdir -p /opt/homarr-data-backup
+ cp /opt/homarr/.env /opt/homarr-data-backup/.env
+ msg_ok "Backup Data"
- msg_info "Updating and rebuilding ${APP} to v${RELEASE} (Patience)"
- rm /opt/run_homarr.sh
- cat <<'EOF' >/opt/run_homarr.sh
+ msg_info "Updating and rebuilding ${APP} to v${RELEASE} (Patience)"
+ rm /opt/run_homarr.sh
+ cat <<'EOF' >/opt/run_homarr.sh
#!/bin/bash
set -a
source /opt/homarr/.env
@@ -58,50 +58,50 @@ node apps/websocket/wssServer.cjs &
node apps/nextjs/server.js & PID=$!
wait $PID
EOF
- chmod +x /opt/run_homarr.sh
- NODE_VERSION=$(curl -s https://raw.githubusercontent.com/homarr-labs/homarr/dev/package.json | jq -r '.engines.node | split(">=")[1] | split(".")[0]')
- NODE_MODULE="pnpm@$(curl -s https://raw.githubusercontent.com/homarr-labs/homarr/dev/package.json | jq -r '.packageManager | split("@")[1]')"
- install_node_and_modules
- rm -rf /opt/homarr
- fetch_and_deploy_gh_release "homarr-labs/homarr"
- mv /opt/homarr-data-backup/.env /opt/homarr/.env
- cd /opt/homarr
- echo "test2"
- export NODE_ENV=""
- $STD pnpm install --recursive --frozen-lockfile --shamefully-hoist
- $STD pnpm build
- cp /opt/homarr/apps/nextjs/next.config.ts .
- cp /opt/homarr/apps/nextjs/package.json .
- cp -r /opt/homarr/packages/db/migrations /opt/homarr_db/migrations
- cp -r /opt/homarr/apps/nextjs/.next/standalone/* /opt/homarr
- mkdir -p /appdata/redis
- cp /opt/homarr/packages/redis/redis.conf /opt/homarr/redis.conf
- rm /etc/nginx/nginx.conf
- mkdir -p /etc/nginx/templates
- cp /opt/homarr/nginx.conf /etc/nginx/templates/nginx.conf
+ chmod +x /opt/run_homarr.sh
+ NODE_VERSION=$(curl -fsSL https://raw.githubusercontent.com/homarr-labs/homarr/dev/package.json | jq -r '.engines.node | split(">=")[1] | split(".")[0]')
+ NODE_MODULE="pnpm@$(curl -fsSL https://raw.githubusercontent.com/homarr-labs/homarr/dev/package.json | jq -r '.packageManager | split("@")[1]')"
+ install_node_and_modules
+ rm -rf /opt/homarr
+ fetch_and_deploy_gh_release "homarr-labs/homarr"
+ mv /opt/homarr-data-backup/.env /opt/homarr/.env
+ cd /opt/homarr
+ echo "test2"
+ export NODE_ENV=""
+ $STD pnpm install --recursive --frozen-lockfile --shamefully-hoist
+ $STD pnpm build
+ cp /opt/homarr/apps/nextjs/next.config.ts .
+ cp /opt/homarr/apps/nextjs/package.json .
+ cp -r /opt/homarr/packages/db/migrations /opt/homarr_db/migrations
+ cp -r /opt/homarr/apps/nextjs/.next/standalone/* /opt/homarr
+ mkdir -p /appdata/redis
+ cp /opt/homarr/packages/redis/redis.conf /opt/homarr/redis.conf
+ rm /etc/nginx/nginx.conf
+ mkdir -p /etc/nginx/templates
+ cp /opt/homarr/nginx.conf /etc/nginx/templates/nginx.conf
- mkdir -p /opt/homarr/apps/cli
- cp /opt/homarr/packages/cli/cli.cjs /opt/homarr/apps/cli/cli.cjs
- echo $'#!/bin/bash\ncd /opt/homarr/apps/cli && node ./cli.cjs "$@"' >/usr/bin/homarr
- chmod +x /usr/bin/homarr
+ mkdir -p /opt/homarr/apps/cli
+ cp /opt/homarr/packages/cli/cli.cjs /opt/homarr/apps/cli/cli.cjs
+ echo $'#!/bin/bash\ncd /opt/homarr/apps/cli && node ./cli.cjs "$@"' >/usr/bin/homarr
+ chmod +x /usr/bin/homarr
- mkdir /opt/homarr/build
- cp ./node_modules/better-sqlite3/build/Release/better_sqlite3.node ./build/better_sqlite3.node
- echo "${RELEASE}" >/opt/${APP}_version.txt
- msg_ok "Updated ${APP}"
+ mkdir /opt/homarr/build
+ cp ./node_modules/better-sqlite3/build/Release/better_sqlite3.node ./build/better_sqlite3.node
+ echo "${RELEASE}" >/opt/${APP}_version.txt
+ msg_ok "Updated ${APP}"
- msg_info "Starting Services"
- systemctl start homarr
- msg_ok "Started Services"
- msg_ok "Updated Successfully"
- read -p "It's recommended to reboot the LXC after an update, would you like to reboot the LXC now ? (y/n): " choice
- if [[ "$choice" =~ ^[Yy]$ ]]; then
- reboot
+ msg_info "Starting Services"
+ systemctl start homarr
+ msg_ok "Started Services"
+ msg_ok "Updated Successfully"
+ read -p "It's recommended to reboot the LXC after an update, would you like to reboot the LXC now ? (y/n): " choice
+ if [[ "$choice" =~ ^[Yy]$ ]]; then
+ reboot
+ fi
+ else
+ msg_ok "No update required. ${APP} is already at v${RELEASE}"
fi
- else
- msg_ok "No update required. ${APP} is already at v${RELEASE}"
- fi
- exit
+ exit
}
start
diff --git a/ct/deferred/ampache.sh b/ct/deferred/ampache.sh
index 89bca190b..ada18f098 100644
--- a/ct/deferred/ampache.sh
+++ b/ct/deferred/ampache.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (Canbiz)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
diff --git a/frontend/public/json/docspell.json b/ct/deferred/docspell.json
similarity index 100%
rename from frontend/public/json/docspell.json
rename to ct/deferred/docspell.json
diff --git a/ct/docspell.sh b/ct/deferred/docspell.sh
similarity index 89%
rename from ct/docspell.sh
rename to ct/deferred/docspell.sh
index 7fcdc0142..93eb6806f 100644
--- a/ct/docspell.sh
+++ b/ct/deferred/docspell.sh
@@ -1,9 +1,9 @@
#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (Canbiz)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source:
+# Source: https://github.com/community-scripts/ProxmoxVE
APP="Docspell"
var_tags="${var_tags:-document}"
diff --git a/ct/deferred/ghostfolio.sh b/ct/deferred/ghostfolio.sh
index 184841a14..56002d405 100644
--- a/ct/deferred/ghostfolio.sh
+++ b/ct/deferred/ghostfolio.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (Canbiz)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
diff --git a/ct/deferred/hoodik.sh b/ct/deferred/hoodik.sh
index 0f84b5138..0e7597b14 100644
--- a/ct/deferred/hoodik.sh
+++ b/ct/deferred/hoodik.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
@@ -21,46 +21,46 @@ color
catch_errors
function update_script() {
- header_info
- check_container_storage
- check_container_resources
- if [[ ! -d /opt/hoodik ]]; then
- msg_error "No ${APP} Installation Found!"
- exit
- fi
- RELEASE=$(curl -s https://api.github.com/repos/hudikhq/hoodik/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }')
- if [[ ! -f /opt/${APP}_version.txt ]] || [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]]; then
- msg_info "Stopping Services"
- systemctl stop hoodik
- msg_ok "Services Stopped"
-
- msg_info "Updating ${APP} to ${RELEASE}"
- cd /opt
- if [ -d hoodik_bak ]; then
- rm -rf hoodik_bak
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -d /opt/hoodik ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
fi
- mv hoodik hoodik_bak
- curl -fsSL "https://github.com/hudikhq/hoodik/archive/refs/tags/${RELEASE}.zip"
- unzip -q ${RELEASE}.zip
- mv hoodik-${RELEASE} /opt/hoodik
- cd /opt/hoodik
- cargo update -q
- cargo build -q --release
- msg_ok "Updated Hoodik"
+ RELEASE=$(curl -fsSL https://api.github.com/repos/hudikhq/hoodik/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }')
+ if [[ ! -f /opt/${APP}_version.txt ]] || [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]]; then
+ msg_info "Stopping Services"
+ systemctl stop hoodik
+ msg_ok "Services Stopped"
- msg_info "Starting Services"
- systemctl start hoodik
- msg_ok "Started Services"
+ msg_info "Updating ${APP} to ${RELEASE}"
+ cd /opt
+ if [ -d hoodik_bak ]; then
+ rm -rf hoodik_bak
+ fi
+ mv hoodik hoodik_bak
+ curl -fsSL "https://github.com/hudikhq/hoodik/archive/refs/tags/${RELEASE}.zip"
+ unzip -q ${RELEASE}.zip
+ mv hoodik-${RELEASE} /opt/hoodik
+ cd /opt/hoodik
+ cargo update -q
+ cargo build -q --release
+ msg_ok "Updated Hoodik"
- msg_info "Cleaning Up"
- rm -R /opt/${RELEASE}.zip
- rm -R /opt/hoodik_bak
- msg_ok "Cleaned"
- msg_ok "Updated Successfully"
- else
- msg_ok "No update required. ${APP} is already at ${RELEASE}"
- fi
- exit
+ msg_info "Starting Services"
+ systemctl start hoodik
+ msg_ok "Started Services"
+
+ msg_info "Cleaning Up"
+ rm -R /opt/${RELEASE}.zip
+ rm -R /opt/hoodik_bak
+ msg_ok "Cleaned"
+ msg_ok "Updated Successfully"
+ else
+ msg_ok "No update required. ${APP} is already at ${RELEASE}"
+ fi
+ exit
}
start
diff --git a/ct/deferred/jumpserver.sh b/ct/deferred/jumpserver.sh
index a87847336..f512024c9 100644
--- a/ct/deferred/jumpserver.sh
+++ b/ct/deferred/jumpserver.sh
@@ -20,39 +20,39 @@ color
catch_errors
function update_script() {
- header_info
- check_container_storage
- check_container_resources
+ header_info
+ check_container_storage
+ check_container_resources
- if [[ ! -d /opt/jumpserver ]]; then
- msg_error "No ${APP} Installation Found!"
+ if [[ ! -d /opt/jumpserver ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+ RELEASE=$(curl -fsSL https://api.github.com/repos/jumpserver/installer/releases/latest | grep '"tag_name"' | sed -E 's/.*"tag_name": "([^"]+)".*/\1/')
+ if [[ ! -f /opt/${APP}_version.txt ]] || [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]]; then
+ msg_info "Updating ${APP} to ${RELEASE}"
+ if [[ -d /opt/jumpserver/config ]]; then
+ cp -r /opt/jumpserver/config /opt/jumpserver_config_backup
+ fi
+ echo "${RELEASE}" >/opt/${APP}_version.txt
+ rm -rf /opt/jumpserver
+ cd /opt
+ curl -fsSL "https://github.com/jumpserver/installer/releases/download/${RELEASE}/jumpserver-installer-${RELEASE}.tar.gz" -o jumpserver-installer-${RELEASE}.tar.gz
+ mkdir -p /opt/jumpserver
+ $STD tar -xzvf jumpserver-installer-${RELEASE}.tar.gz -C /opt/jumpserver --strip-components=1
+ if [[ -d /opt/jumpserver_config_backup ]]; then
+ cp -r /opt/jumpserver_config_backup /opt/jumpserver/config
+ rm -rf /opt/jumpserver_config_backup
+ fi
+ cd /opt/jumpserver
+ yes y | head -n 3 | $STD ./jmsctl.sh upgrade
+ $STD ./jmsctl.sh start
+ rm -rf /opt/jumpserver-installer-${RELEASE}.tar.gz
+ msg_ok "Updated Successfully"
+ else
+ msg_ok "No update required. ${APP} is already at ${RELEASE}."
+ fi
exit
- fi
- RELEASE=$(curl -fsSL https://api.github.com/repos/jumpserver/installer/releases/latest | grep '"tag_name"' | sed -E 's/.*"tag_name": "([^"]+)".*/\1/')
- if [[ ! -f /opt/${APP}_version.txt ]] || [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]]; then
- msg_info "Updating ${APP} to ${RELEASE}"
- if [[ -d /opt/jumpserver/config ]]; then
- cp -r /opt/jumpserver/config /opt/jumpserver_config_backup
- fi
- echo "${RELEASE}" >/opt/${APP}_version.txt
- rm -rf /opt/jumpserver
- cd /opt
- curl -fsSL "https://github.com/jumpserver/installer/releases/download/${RELEASE}/jumpserver-installer-${RELEASE}.tar.gz" -o jumpserver-installer-${RELEASE}.tar.gz
- mkdir -p /opt/jumpserver
- $STD tar -xzvf jumpserver-installer-${RELEASE}.tar.gz -C /opt/jumpserver --strip-components=1
- if [[ -d /opt/jumpserver_config_backup ]]; then
- cp -r /opt/jumpserver_config_backup /opt/jumpserver/config
- rm -rf /opt/jumpserver_config_backup
- fi
- cd /opt/jumpserver
- yes y | head -n 3 | $STD ./jmsctl.sh upgrade
- $STD ./jmsctl.sh start
- rm -rf /opt/jumpserver-installer-${RELEASE}.tar.gz
- msg_ok "Updated Successfully"
- else
- msg_ok "No update required. ${APP} is already at ${RELEASE}."
- fi
- exit
}
start
diff --git a/ct/deferred/kasm.sh b/ct/deferred/kasm.sh
index 6ee3bb931..565c71fbd 100644
--- a/ct/deferred/kasm.sh
+++ b/ct/deferred/kasm.sh
@@ -22,38 +22,38 @@ color
catch_errors
function update_script() {
- header_info
- check_container_storage
- check_container_resources
+ header_info
+ check_container_storage
+ check_container_resources
- if [[ ! -d /opt/kasm ]]; then
- msg_error "No ${APP} Installation Found!"
+ if [[ ! -d /opt/kasm ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+ RELEASE=$(curl -fsSL 'https://www.kasmweb.com/downloads' | grep -o 'https://kasm-static-content.s3.amazonaws.com/kasm_release_[^"]*\.tar\.gz' | head -n 1 | sed -E 's/.*release_(.*)\.tar\.gz/\1/')
+ if [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]] || [[ ! -f /opt/${APP}_version.txt ]]; then
+ msg_info "Updating ${APP} to v${RELEASE}"
+ temp_file=$(mktemp)
+ curl -fsSL "https://kasm-static-content.s3.amazonaws.com/kasm_release_${RELEASE}.tar.gz" -o "$temp_file"
+ tar zxf "$temp_file"
+ mkdir -p /opt/kasm/backups/
+ chmod 777 /opt/kasm/backups/
+ mv /opt/kasm/1.*/certs/kasm_nginx.crt /opt/kasm/kasm_nginx.crt_bak
+ printf 'y\n' | $STD sudo bash /tmp/kasm_release/upgrade.sh
+ $STD sudo bash /tmp/kasm_release/upgrade.sh
+ echo "${RELEASE}" >/opt/${APP}_version.txt
+ msg_ok "Updated ${APP} to v${RELEASE}"
+
+ msg_info "Cleaning up"
+ rm -f "$temp_file"
+ rm -rf /tmp/kasm_release
+ $STD apt-get -y autoremove
+ $STD apt-get -y autoclean
+ msg_ok "Cleaned"
+ else
+ msg_ok "No update required. ${APP} is already at v${RELEASE}"
+ fi
exit
- fi
- RELEASE=$(curl -fsSL 'https://www.kasmweb.com/downloads' | grep -o 'https://kasm-static-content.s3.amazonaws.com/kasm_release_[^"]*\.tar\.gz' | head -n 1 | sed -E 's/.*release_(.*)\.tar\.gz/\1/')
- if [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]] || [[ ! -f /opt/${APP}_version.txt ]]; then
- msg_info "Updating ${APP} to v${RELEASE}"
- temp_file=$(mktemp)
- curl -fsSL "https://kasm-static-content.s3.amazonaws.com/kasm_release_${RELEASE}.tar.gz" -o "$temp_file"
- tar zxf "$temp_file"
- mkdir -p /opt/kasm/backups/
- chmod 777 /opt/kasm/backups/
- mv /opt/kasm/1.*/certs/kasm_nginx.crt /opt/kasm/kasm_nginx.crt_bak
- printf 'y\n' | $STD sudo bash /tmp/kasm_release/upgrade.sh
- $STD sudo bash /tmp/kasm_release/upgrade.sh
- echo "${RELEASE}" >/opt/${APP}_version.txt
- msg_ok "Updated ${APP} to v${RELEASE}"
-
- msg_info "Cleaning up"
- rm -f "$temp_file"
- rm -rf /tmp/kasm_release
- $STD apt-get -y autoremove
- $STD apt-get -y autoclean
- msg_ok "Cleaned"
- else
- msg_ok "No update required. ${APP} is already at v${RELEASE}"
- fi
- exit
}
start
diff --git a/ct/deferred/koel.sh b/ct/deferred/koel.sh
index 839ac5365..a4b6ea7e8 100644
--- a/ct/deferred/koel.sh
+++ b/ct/deferred/koel.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (Canbiz)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
@@ -20,45 +20,45 @@ color
catch_errors
function update_script() {
- header_info
- check_container_storage
- check_container_resources
- if [[ ! -d /opt/koel ]]; then
- msg_error "No ${APP} Installation Found!"
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -d /opt/koel ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+ RELEASE=$(curl -fsSL https://api.github.com/repos/koel/koel/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }')
+ if [[ ! -f /opt/${APP}_version.txt ]] || [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]]; then
+ msg_info "Stopping ${APP} Service"
+ systemctl stop nginx
+ msg_ok "Stopped ${APP} Service"
+
+ msg_info "Updating ${APP} to v${RELEASE}"
+ cd /opt
+ curl -fsSL https://github.com/koel/koel/releases/download/${RELEASE}/koel-${RELEASE}.zip
+ unzip -q koel-${RELEASE}.zip
+ cd /opt/koel
+ composer update --no-interaction >/dev/null 2>&1
+ composer install --no-interaction >/dev/null 2>&1
+ php artisan migrate --force >/dev/null 2>&1
+ php artisan cache:clear >/dev/null 2>&1
+ php artisan config:clear >/dev/null 2>&1
+ php artisan view:clear >/dev/null 2>&1
+ php artisan koel:init --no-interaction >/dev/null 2>&1
+ msg_ok "Updated ${APP} to v${RELEASE}"
+
+ msg_info "Starting ${APP} Service"
+ systemctl start nginx
+ msg_ok "Started ${APP} Service"
+
+ msg_info "Cleaning up"
+ rm -rf /opt/koel-${RELEASE}.zip
+ msg_ok "Cleaned"
+ msg_ok "Updated Successfully!\n"
+ else
+ msg_ok "No update required. ${APP} is already at v${RELEASE}"
+ fi
exit
- fi
- RELEASE=$(curl -s https://api.github.com/repos/koel/koel/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }')
- if [[ ! -f /opt/${APP}_version.txt ]] || [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]]; then
- msg_info "Stopping ${APP} Service"
- systemctl stop nginx
- msg_ok "Stopped ${APP} Service"
-
- msg_info "Updating ${APP} to v${RELEASE}"
- cd /opt
- curl -fsSL https://github.com/koel/koel/releases/download/${RELEASE}/koel-${RELEASE}.zip
- unzip -q koel-${RELEASE}.zip
- cd /opt/koel
- composer update --no-interaction >/dev/null 2>&1
- composer install --no-interaction >/dev/null 2>&1
- php artisan migrate --force >/dev/null 2>&1
- php artisan cache:clear >/dev/null 2>&1
- php artisan config:clear >/dev/null 2>&1
- php artisan view:clear >/dev/null 2>&1
- php artisan koel:init --no-interaction >/dev/null 2>&1
- msg_ok "Updated ${APP} to v${RELEASE}"
-
- msg_info "Starting ${APP} Service"
- systemctl start nginx
- msg_ok "Started ${APP} Service"
-
- msg_info "Cleaning up"
- rm -rf /opt/koel-${RELEASE}.zip
- msg_ok "Cleaned"
- msg_ok "Updated Successfully!\n"
- else
- msg_ok "No update required. ${APP} is already at v${RELEASE}"
- fi
- exit
}
start
diff --git a/ct/deferred/librespeed.sh b/ct/deferred/librespeed.sh
new file mode 100644
index 000000000..95650828b
--- /dev/null
+++ b/ct/deferred/librespeed.sh
@@ -0,0 +1,56 @@
+#!/usr/bin/env bash
+source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/refs/heads/main/misc/build.func)
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: elvito
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://github.com/librespeed/speedtest
+
+APP="librespeed"
+var_tags="speedtest"
+var_cpu="1"
+var_ram="512"
+var_disk="4"
+var_os="debian"
+var_version="12"
+var_unprivileged="1"
+
+header_info "$APP"
+variables
+color
+catch_errors
+
+function update_script() {
+ header_info
+ check_container_storage
+ check_container_resources
+
+ if [[ ! -f /opt/librespeed/index.html ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+ RELEASE=$(curl -fsSL https://api.github.com/repos/librespeed/speedtest/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3)}')
+ if [[ ! -f /opt/librespeed/${APP}_version.txt ]] || [[ "${RELEASE}" != "$(cat /opt//librespeed/${APP}_version.txt)" ]]; then
+ msg_info "Updating $APP..."
+ temp_file=$(mktemp)
+ curl -fsSL "https://github.com/librespeed/speedtest/archive/refs/tags/${RELEASE}.zip" -o "$temp_file"
+ mkdir -p /temp
+ unzip -qu "$temp_file" -d /temp
+ cd /temp/speedtest-"${RELEASE}"
+ cp -u favicon.ico index.html speedtest.js speedtest_worker.js /opt/librespeed/
+ cp -ru backend /opt/librespeed/
+ echo "${RELEASE}" >/opt/"${APP}"_version.txt
+ systemctl restart caddy
+ msg_ok "$APP has been updated."
+ else
+ msg_ok "No update required. ${APP} is already at ${RELEASE}"
+ fi
+ exit
+}
+start
+build_container
+description
+
+msg_ok "Completed Successfully!\n"
+echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
+echo -e "${INFO}${YW} Access it using the following URL:${CL}"
+echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}"
diff --git a/ct/manyfold.sh b/ct/deferred/manyfold.sh
similarity index 86%
rename from ct/manyfold.sh
rename to ct/deferred/manyfold.sh
index b54ee9b15..2d782d551 100644
--- a/ct/manyfold.sh
+++ b/ct/deferred/manyfold.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: bvdberg01
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
@@ -27,7 +27,7 @@ function update_script() {
msg_error "No ${APP} Installation Found!"
exit
fi
- RELEASE=$(curl -s https://api.github.com/repos/benjaminjonard/manyfold/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }')
+ RELEASE=$(curl -fsSL https://api.github.com/repos/benjaminjonard/manyfold/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }')
if [[ ! -f /opt/${APP}_version.txt ]] || [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]]; then
msg_info "Stopping Service"
systemctl stop apache2
diff --git a/frontend/public/json/maxun.json b/ct/deferred/maxun.json
similarity index 100%
rename from frontend/public/json/maxun.json
rename to ct/deferred/maxun.json
diff --git a/ct/deferred/maxun.sh b/ct/deferred/maxun.sh
new file mode 100644
index 000000000..a5561dc56
--- /dev/null
+++ b/ct/deferred/maxun.sh
@@ -0,0 +1,73 @@
+#!/usr/bin/env bash
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: MickLesk (Canbiz)
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://github.com/getmaxun/maxun
+
+APP="Maxun"
+var_tags="${var_tags:-scraper}"
+var_disk="${var_disk:-7}"
+var_cpu="${var_cpu:-2}"
+var_ram="${var_ram:-3072}"
+var_os="${var_os:-debian}"
+var_version="${var_version:-12}"
+var_unprivileged="${var_unprivileged:-1}"
+
+header_info "$APP"
+variables
+color
+catch_errors
+
+function update_script() {
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -d /opt/maxun ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+ RELEASE=$(curl -fsSL https://api.github.com/repos/getmaxun/maxun/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }')
+ if [[ ! -f /opt/${APP}_version.txt ]] || [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]]; then
+ msg_info "Stopping Services"
+ systemctl stop maxun minio redis
+ msg_ok "Services Stopped"
+
+ msg_info "Updating ${APP} to v${RELEASE}"
+ mv /opt/maxun /opt/maxun_bak
+ cd /opt
+ curl -fsSL "https://github.com/getmaxun/maxun/archive/refs/tags/v${RELEASE}.zip"
+ unzip -q v${RELEASE}.zip
+ mv maxun-${RELEASE} /opt/maxun
+ mv /opt/maxun_bak/.env /opt/maxun/
+ cd /opt/maxun
+ npm install --legacy-peer-deps
+ cd /opt/maxun/maxun-core
+ npm install --legacy-peer-deps
+ cd /opt/maxun
+ npx playwright install --with-deps chromium
+ npx playwright install-deps
+ "${RELEASE}" >/opt/${APP}_version.txt
+
+ msg_info "Starting Services"
+ systemctl start minio redis maxun
+ msg_ok "Started Services"
+
+ msg_info "Cleaning Up"
+ rm -rf /opt/v${RELEASE}.zip
+ msg_ok "Cleaned"
+ msg_ok "Updated Successfully"
+ else
+ msg_ok "No update required. ${APP} is already at v${RELEASE}"
+ fi
+ exit
+}
+
+start
+build_container
+description
+
+msg_ok "Completed Successfully!\n"
+echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
+echo -e "${INFO}${YW} Access it using the following URL:${CL}"
+echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:5173${CL}"
diff --git a/ct/deferred/netbootxyz.sh b/ct/deferred/netbootxyz.sh
index 4a3c009db..201073af9 100644
--- a/ct/deferred/netbootxyz.sh
+++ b/ct/deferred/netbootxyz.sh
@@ -1,13 +1,13 @@
#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
# Copyright (c) 2021-2023 tteck
# Author: tteck (tteckster)
# License: MIT
# https://github.com/tteck/Proxmox/raw/main/LICENSE
function header_info {
-clear
-cat <<"EOF"
+ clear
+ cat <<"EOF"
__ __ __
____ ___ / /_/ /_ ____ ____ / /_ _ ____ ______
/ __ \/ _ \/ __/ __ \/ __ \/ __ \/ __/ | |/_/ / / /_ /
@@ -29,58 +29,61 @@ color
catch_errors
function default_settings() {
- CT_TYPE="1"
- PW=""
- CT_ID=$NEXTID
- HN=$NSAPP
- DISK_SIZE="$var_disk"
- CORE_COUNT="$var_cpu"
- RAM_SIZE="$var_ram"
- BRG="vmbr0"
- NET="dhcp"
- GATE=""
- DISABLEIP6="no"
- MTU=""
- SD=""
- NS=""
- MAC=""
- VLAN=""
- SSH="no"
- VERB="no"
- echo_default
+ CT_TYPE="1"
+ PW=""
+ CT_ID=$NEXTID
+ HN=$NSAPP
+ DISK_SIZE="$var_disk"
+ CORE_COUNT="$var_cpu"
+ RAM_SIZE="$var_ram"
+ BRG="vmbr0"
+ NET="dhcp"
+ GATE=""
+ DISABLEIP6="no"
+ MTU=""
+ SD=""
+ NS=""
+ MAC=""
+ VLAN=""
+ SSH="no"
+ VERB="no"
+ echo_default
}
function update_script() {
-header_info
-if [[ ! -d /opt/netboot.xyz ]]; then msg_error "No ${APP} Installation Found!"; exit; fi
-msg_info "Stopping ${APP}"
-systemctl disable netbootxyz.service &>/dev/null
-systemctl stop netbootxyz
-sleep 1
-msg_ok "Stopped ${APP}"
+ header_info
+ if [[ ! -d /opt/netboot.xyz ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+ msg_info "Stopping ${APP}"
+ systemctl disable netbootxyz.service &>/dev/null
+ systemctl stop netbootxyz
+ sleep 1
+ msg_ok "Stopped ${APP}"
-msg_info "Backing up Data"
-cp -R /opt/netboot.xyz/config config-backup
-cp -R /opt/netboot.xyz/assets assets-backup
-sleep 1
-msg_ok "Backed up Data"
+ msg_info "Backing up Data"
+ cp -R /opt/netboot.xyz/config config-backup
+ cp -R /opt/netboot.xyz/assets assets-backup
+ sleep 1
+ msg_ok "Backed up Data"
-RELEASE=$(curl -sX GET "https://api.github.com/repos/netbootxyz/netboot.xyz/releases/latest" | awk '/tag_name/{print $4;exit}' FS='[""]')
-msg_info "Updating netboot.xyz to ${RELEASE}"
-curl --silent -o ${RELEASE}.tar.gz -L "https://github.com/netbootxyz/netboot.xyz/archive/${RELEASE}.tar.gz" &>/dev/null
-tar xvzf ${RELEASE}.tar.gz &>/dev/null
-VER=$(curl -s https://api.github.com/repos/netbootxyz/netboot.xyz/releases/latest |
- grep "tag_name" |
- awk '{print substr($2, 2, length($2)-3) }')
+ RELEASE=$(curl -fsSLX GET "https://api.github.com/repos/netbootxyz/netboot.xyz/releases/latest" | awk '/tag_name/{print $4;exit}' FS='[""]')
+ msg_info "Updating netboot.xyz to ${RELEASE}"
+ curl --silent -o ${RELEASE}.tar.gz -L "https://github.com/netbootxyz/netboot.xyz/archive/${RELEASE}.tar.gz" &>/dev/null
+ tar xvzf ${RELEASE}.tar.gz &>/dev/null
+ VER=$(curl -fsSL https://api.github.com/repos/netbootxyz/netboot.xyz/releases/latest |
+ grep "tag_name" |
+ awk '{print substr($2, 2, length($2)-3) }')
-if [ ! -d "/opt/netboot.xyz" ]; then
- mv netboot.xyz-${VER} /opt/netboot.xyz
-else
- cp -R netboot.xyz-${VER}/* /opt/netboot.xyz
-fi
+ if [ ! -d "/opt/netboot.xyz" ]; then
+ mv netboot.xyz-${VER} /opt/netboot.xyz
+ else
+ cp -R netboot.xyz-${VER}/* /opt/netboot.xyz
+ fi
-service_path="/etc/systemd/system/netbootxyz.service"
-echo "[Unit]
+ service_path="/etc/systemd/system/netbootxyz.service"
+ echo "[Unit]
Description=netboot.xyz
After=network.target
[Service]
@@ -93,28 +96,28 @@ ExecStart="ansible-playbook" -i inventory site.yml
TimeoutStopSec=30
[Install]
WantedBy=multi-user.target" >$service_path
-msg_ok "Updated netboot.xyz to ${RELEASE}"
+ msg_ok "Updated netboot.xyz to ${RELEASE}"
-msg_info "Restoring Data"
-cp -R config-backup/* /opt/netboot.xyz/config
-cp -R assets-backup/* /opt/netboot.xyz/assets
-sleep 1
-msg_ok "Restored Data"
+ msg_info "Restoring Data"
+ cp -R config-backup/* /opt/netboot.xyz/config
+ cp -R assets-backup/* /opt/netboot.xyz/assets
+ sleep 1
+ msg_ok "Restored Data"
-msg_info "Cleanup"
-rm -rf ${RELEASE}.tar.gz
-rm -rf netboot.xyz-${VER}
-rm -rf config-backup
-rm -rf assets-backup
-sleep 1
-msg_ok "Cleaned"
+ msg_info "Cleanup"
+ rm -rf ${RELEASE}.tar.gz
+ rm -rf netboot.xyz-${VER}
+ rm -rf config-backup
+ rm -rf assets-backup
+ sleep 1
+ msg_ok "Cleaned"
-msg_info "Starting ${APP}"
-systemctl enable --now netbootxyz.service &>/dev/null
-sleep 2
-msg_ok "Started ${APP}"
-msg_ok "Updated Successfully"
-exit
+ msg_info "Starting ${APP}"
+ systemctl enable --now netbootxyz.service &>/dev/null
+ sleep 2
+ msg_ok "Started ${APP}"
+ msg_ok "Updated Successfully"
+ exit
}
start
diff --git a/ct/deferred/nginxproxymanager.sh b/ct/deferred/nginxproxymanager.sh
index 6b75c62c2..b1ff024d7 100644
--- a/ct/deferred/nginxproxymanager.sh
+++ b/ct/deferred/nginxproxymanager.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
# Copyright (c) 2021-2025 tteck
# Author: tteck (tteckster)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
@@ -20,103 +20,103 @@ color
catch_errors
function update_script() {
- header_info
- check_container_storage
- check_container_resources
- if [[ ! -f /lib/systemd/system/npm.service ]]; then
- msg_error "No ${APP} Installation Found!"
- exit
- fi
- if ! command -v pnpm &>/dev/null; then
- msg_info "Installing pnpm"
- #export NODE_OPTIONS=--openssl-legacy-provider
- $STD npm install -g pnpm@8.15
- msg_ok "Installed pnpm"
- fi
- RELEASE=$(curl -s https://api.github.com/repos/NginxProxyManager/nginx-proxy-manager/releases/latest |
- grep "tag_name" |
- awk '{print substr($2, 3, length($2)-4) }')
- msg_info "Stopping Services"
- systemctl stop openresty
- systemctl stop npm
- msg_ok "Stopped Services"
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -f /lib/systemd/system/npm.service ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+ if ! command -v pnpm &>/dev/null; then
+ msg_info "Installing pnpm"
+ #export NODE_OPTIONS=--openssl-legacy-provider
+ $STD npm install -g pnpm@8.15
+ msg_ok "Installed pnpm"
+ fi
+ RELEASE=$(curl -fsSL https://api.github.com/repos/NginxProxyManager/nginx-proxy-manager/releases/latest |
+ grep "tag_name" |
+ awk '{print substr($2, 3, length($2)-4) }')
+ msg_info "Stopping Services"
+ systemctl stop openresty
+ systemctl stop npm
+ msg_ok "Stopped Services"
- msg_info "Cleaning Old Files"
- rm -rf /app \
- /var/www/html \
- /etc/nginx \
- /var/log/nginx \
- /var/lib/nginx \
- $STD /var/cache/nginx
- msg_ok "Cleaned Old Files"
+ msg_info "Cleaning Old Files"
+ rm -rf /app \
+ /var/www/html \
+ /etc/nginx \
+ /var/log/nginx \
+ /var/lib/nginx \
+ $STD /var/cache/nginx
+ msg_ok "Cleaned Old Files"
- msg_info "Downloading NPM v${RELEASE}"
- curl -fsSL https://codeload.github.com/NginxProxyManager/nginx-proxy-manager/tar.gz/v${RELEASE} -o - | tar -xz
- cd nginx-proxy-manager-${RELEASE}
- msg_ok "Downloaded NPM v${RELEASE}"
+ msg_info "Downloading NPM v${RELEASE}"
+ curl -fsSL https://codeload.github.com/NginxProxyManager/nginx-proxy-manager/tar.gz/v${RELEASE} -o - | tar -xz
+ cd nginx-proxy-manager-${RELEASE}
+ msg_ok "Downloaded NPM v${RELEASE}"
- msg_info "Setting up Enviroment"
- ln -sf /usr/bin/python3 /usr/bin/python
- ln -sf /usr/bin/certbot /opt/certbot/bin/certbot
- ln -sf /usr/local/openresty/nginx/sbin/nginx /usr/sbin/nginx
- ln -sf /usr/local/openresty/nginx/ /etc/nginx
- sed -i "s|\"version\": \"0.0.0\"|\"version\": \"$RELEASE\"|" backend/package.json
- sed -i "s|\"version\": \"0.0.0\"|\"version\": \"$RELEASE\"|" frontend/package.json
- sed -i 's|"fork-me": ".*"|"fork-me": "Proxmox VE Helper-Scripts"|' frontend/js/i18n/messages.json
- sed -i "s|https://github.com.*source=nginx-proxy-manager|https://helper-scripts.com|g" frontend/js/app/ui/footer/main.ejs
- sed -i 's+^daemon+#daemon+g' docker/rootfs/etc/nginx/nginx.conf
- NGINX_CONFS=$(find "$(pwd)" -type f -name "*.conf")
- for NGINX_CONF in $NGINX_CONFS; do
- sed -i 's+include conf.d+include /etc/nginx/conf.d+g' "$NGINX_CONF"
- done
- mkdir -p /var/www/html /etc/nginx/logs
- cp -r docker/rootfs/var/www/html/* /var/www/html/
- cp -r docker/rootfs/etc/nginx/* /etc/nginx/
- cp docker/rootfs/etc/letsencrypt.ini /etc/letsencrypt.ini
- cp docker/rootfs/etc/logrotate.d/nginx-proxy-manager /etc/logrotate.d/nginx-proxy-manager
- ln -sf /etc/nginx/nginx.conf /etc/nginx/conf/nginx.conf
- rm -f /etc/nginx/conf.d/dev.conf
- mkdir -p /tmp/nginx/body \
- /run/nginx \
- /data/nginx \
- /data/custom_ssl \
- /data/logs \
- /data/access \
- /data/nginx/default_host \
- /data/nginx/default_www \
- /data/nginx/proxy_host \
- /data/nginx/redirection_host \
- /data/nginx/stream \
- /data/nginx/dead_host \
- /data/nginx/temp \
- /var/lib/nginx/cache/public \
- /var/lib/nginx/cache/private \
- /var/cache/nginx/proxy_temp
- chmod -R 777 /var/cache/nginx
- chown root /tmp/nginx
- echo resolver "$(awk 'BEGIN{ORS=" "} $1=="nameserver" {print ($2 ~ ":")? "["$2"]": $2}' /etc/resolv.conf);" >/etc/nginx/conf.d/include/resolvers.conf
- if [ ! -f /data/nginx/dummycert.pem ] || [ ! -f /data/nginx/dummykey.pem ]; then
- $STD openssl req -new -newkey rsa:2048 -days 3650 -nodes -x509 -subj "/O=Nginx Proxy Manager/OU=Dummy Certificate/CN=localhost" -keyout /data/nginx/dummykey.pem -out /data/nginx/dummycert.pem
- fi
- mkdir -p /app/global /app/frontend/images
- cp -r backend/* /app
- cp -r global/* /app/global
- $STD python3 -m pip install --no-cache-dir certbot-dns-cloudflare
- msg_ok "Setup Enviroment"
+ msg_info "Setting up Enviroment"
+ ln -sf /usr/bin/python3 /usr/bin/python
+ ln -sf /usr/bin/certbot /opt/certbot/bin/certbot
+ ln -sf /usr/local/openresty/nginx/sbin/nginx /usr/sbin/nginx
+ ln -sf /usr/local/openresty/nginx/ /etc/nginx
+ sed -i "s|\"version\": \"0.0.0\"|\"version\": \"$RELEASE\"|" backend/package.json
+ sed -i "s|\"version\": \"0.0.0\"|\"version\": \"$RELEASE\"|" frontend/package.json
+ sed -i 's|"fork-me": ".*"|"fork-me": "Proxmox VE Helper-Scripts"|' frontend/js/i18n/messages.json
+ sed -i "s|https://github.com.*source=nginx-proxy-manager|https://helper-scripts.com|g" frontend/js/app/ui/footer/main.ejs
+ sed -i 's+^daemon+#daemon+g' docker/rootfs/etc/nginx/nginx.conf
+ NGINX_CONFS=$(find "$(pwd)" -type f -name "*.conf")
+ for NGINX_CONF in $NGINX_CONFS; do
+ sed -i 's+include conf.d+include /etc/nginx/conf.d+g' "$NGINX_CONF"
+ done
+ mkdir -p /var/www/html /etc/nginx/logs
+ cp -r docker/rootfs/var/www/html/* /var/www/html/
+ cp -r docker/rootfs/etc/nginx/* /etc/nginx/
+ cp docker/rootfs/etc/letsencrypt.ini /etc/letsencrypt.ini
+ cp docker/rootfs/etc/logrotate.d/nginx-proxy-manager /etc/logrotate.d/nginx-proxy-manager
+ ln -sf /etc/nginx/nginx.conf /etc/nginx/conf/nginx.conf
+ rm -f /etc/nginx/conf.d/dev.conf
+ mkdir -p /tmp/nginx/body \
+ /run/nginx \
+ /data/nginx \
+ /data/custom_ssl \
+ /data/logs \
+ /data/access \
+ /data/nginx/default_host \
+ /data/nginx/default_www \
+ /data/nginx/proxy_host \
+ /data/nginx/redirection_host \
+ /data/nginx/stream \
+ /data/nginx/dead_host \
+ /data/nginx/temp \
+ /var/lib/nginx/cache/public \
+ /var/lib/nginx/cache/private \
+ /var/cache/nginx/proxy_temp
+ chmod -R 777 /var/cache/nginx
+ chown root /tmp/nginx
+ echo resolver "$(awk 'BEGIN{ORS=" "} $1=="nameserver" {print ($2 ~ ":")? "["$2"]": $2}' /etc/resolv.conf);" >/etc/nginx/conf.d/include/resolvers.conf
+ if [ ! -f /data/nginx/dummycert.pem ] || [ ! -f /data/nginx/dummykey.pem ]; then
+ $STD openssl req -new -newkey rsa:2048 -days 3650 -nodes -x509 -subj "/O=Nginx Proxy Manager/OU=Dummy Certificate/CN=localhost" -keyout /data/nginx/dummykey.pem -out /data/nginx/dummycert.pem
+ fi
+ mkdir -p /app/global /app/frontend/images
+ cp -r backend/* /app
+ cp -r global/* /app/global
+ $STD python3 -m pip install --no-cache-dir certbot-dns-cloudflare
+ msg_ok "Setup Enviroment"
- msg_info "Building Frontend"
- cd ./frontend
- $STD pnpm install
- $STD pnpm upgrade
- $STD pnpm run build
- cp -r dist/* /app/frontend
- cp -r app-images/* /app/frontend/images
- msg_ok "Built Frontend"
+ msg_info "Building Frontend"
+ cd ./frontend
+ $STD pnpm install
+ $STD pnpm upgrade
+ $STD pnpm run build
+ cp -r dist/* /app/frontend
+ cp -r app-images/* /app/frontend/images
+ msg_ok "Built Frontend"
- msg_info "Initializing Backend"
- $STD rm -rf /app/config/default.json
- if [ ! -f /app/config/production.json ]; then
- cat <<'EOF' >/app/config/production.json
+ msg_info "Initializing Backend"
+ $STD rm -rf /app/config/default.json
+ if [ ! -f /app/config/production.json ]; then
+ cat <<'EOF' >/app/config/production.json
{
"database": {
"engine": "knex-native",
@@ -129,25 +129,25 @@ function update_script() {
}
}
EOF
- fi
- cd /app
- $STD pnpm install
- msg_ok "Initialized Backend"
+ fi
+ cd /app
+ $STD pnpm install
+ msg_ok "Initialized Backend"
- msg_info "Starting Services"
- sed -i 's/user npm/user root/g; s/^pid/#pid/g' /usr/local/openresty/nginx/conf/nginx.conf
- sed -i 's/su npm npm/su root root/g' /etc/logrotate.d/nginx-proxy-manager
- sed -i 's/include-system-site-packages = false/include-system-site-packages = true/g' /opt/certbot/pyvenv.cfg
- systemctl enable -q --now openresty
- systemctl enable -q --now npm
- msg_ok "Started Services"
+ msg_info "Starting Services"
+ sed -i 's/user npm/user root/g; s/^pid/#pid/g' /usr/local/openresty/nginx/conf/nginx.conf
+ sed -i 's/su npm npm/su root root/g' /etc/logrotate.d/nginx-proxy-manager
+ sed -i 's/include-system-site-packages = false/include-system-site-packages = true/g' /opt/certbot/pyvenv.cfg
+ systemctl enable -q --now openresty
+ systemctl enable -q --now npm
+ msg_ok "Started Services"
- msg_info "Cleaning up"
- rm -rf ~/nginx-proxy-manager-*
- msg_ok "Cleaned"
+ msg_info "Cleaning up"
+ rm -rf ~/nginx-proxy-manager-*
+ msg_ok "Cleaned"
- msg_ok "Updated Successfully"
- exit
+ msg_ok "Updated Successfully"
+ exit
}
start
diff --git a/ct/deferred/ocis.sh b/ct/deferred/ocis.sh
index b28a1a36d..167b4593d 100644
--- a/ct/deferred/ocis.sh
+++ b/ct/deferred/ocis.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
# Copyright (c) 2021-2025 tteck
# Author: tteck (tteckster)
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
@@ -20,18 +20,18 @@ color
catch_errors
function update_script() {
- header_info
- check_container_storage
- check_container_resources
- if [[ ! -d /var ]]; then
- msg_error "No ${APP} Installation Found!"
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -d /var ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+ msg_info "Updating $APP LXC"
+ $STD apt-get update
+ $STD apt-get -y upgrade
+ msg_ok "Updated $APP LXC"
exit
- fi
- msg_info "Updating $APP LXC"
- $STD apt-get update
- $STD apt-get -y upgrade
- msg_ok "Updated $APP LXC"
- exit
}
start
@@ -42,4 +42,3 @@ msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:9200${CL}"
-
diff --git a/frontend/public/json/opencloud.json b/ct/deferred/opencloud.json
similarity index 100%
rename from frontend/public/json/opencloud.json
rename to ct/deferred/opencloud.json
diff --git a/ct/deferred/opencloud.sh b/ct/deferred/opencloud.sh
new file mode 100644
index 000000000..2c428f93c
--- /dev/null
+++ b/ct/deferred/opencloud.sh
@@ -0,0 +1,62 @@
+#!/usr/bin/env bash
+source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: vhsdream
+# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
+# Source: https://opencloud.eu
+
+APP="OpenCloud"
+var_tags="${var_tags:-files;cloud}"
+var_cpu="${var_cpu:-2}"
+var_ram="${var_ram:-2048}"
+var_disk="${var_disk:-6}"
+var_os="${var_os:-debian}"
+var_version="${var_version:-12}"
+var_unprivileged="${var_unprivileged:-1}"
+
+header_info "$APP"
+variables
+color
+catch_errors
+
+function update_script() {
+ header_info
+ check_container_storage
+ check_container_resources
+
+ if [[ ! -d /etc/opencloud ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+
+ RELEASE=$(curl -fsSL https://api.github.com/repos/opencloud-eu/opencloud/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }')
+ if [[ "${RELEASE}" != "$(cat /etc/opencloud/version)" ]] || [[ ! -f /etc/opencloud/version ]]; then
+ msg_info "Stopping $APP"
+ systemctl stop opencloud opencloud-wopi
+ msg_ok "Stopped $APP"
+
+ msg_info "Updating $APP to v${RELEASE}"
+ curl -fsSL "https://github.com/opencloud-eu/opencloud/releases/download/v${RELEASE}/opencloud-${RELEASE}-linux-amd64" -o /usr/bin/opencloud
+ chmod +x /usr/bin/opencloud
+ msg_ok "Updated $APP to v${RELEASE}"
+
+ msg_info "Starting $APP"
+ systemctl start opencloud opencloud-wopi
+ msg_ok "Started $APP"
+
+ echo "${RELEASE}" >/etc/opencloud/version
+ msg_ok "Update Successful"
+ else
+ msg_ok "No update required. ${APP} is already at v${RELEASE}"
+ fi
+ exit
+}
+
+start
+build_container
+description
+
+msg_ok "Completed Successfully!\n"
+echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
+echo -e "${INFO}${YW} Access it using the following URL:${CL}"
+echo -e "${TAB}${GATEWAY}${BGN}https://${CL}"
diff --git a/ct/deferred/openwebui.sh b/ct/deferred/openwebui.sh
index 96647a974..85341bd9b 100644
--- a/ct/deferred/openwebui.sh
+++ b/ct/deferred/openwebui.sh
@@ -20,47 +20,47 @@ color
catch_errors
function update_script() {
- header_info
- check_container_storage
- check_container_resources
- if [[ ! -d /opt/open-webui ]]; then
- msg_error "No ${APP} Installation Found!"
- exit
- fi
-
- if [ -x "/usr/bin/ollama" ]; then
- msg_info "Updating Ollama"
- OLLAMA_VERSION=$(ollama -v | awk '{print $NF}')
- RELEASE=$(curl -s https://api.github.com/repos/ollama/ollama/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4)}')
- if [ "$OLLAMA_VERSION" != "$RELEASE" ]; then
- curl -fsSLO https://ollama.com/download/ollama-linux-amd64.tgz
- tar -C /usr -xzf ollama-linux-amd64.tgz
- rm -rf ollama-linux-amd64.tgz
- msg_ok "Ollama updated to version $RELEASE"
- else
- msg_ok "Ollama is already up to date."
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -d /opt/open-webui ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
fi
- fi
- msg_info "Updating ${APP} (Patience)"
- systemctl stop open-webui.service
- mkdir -p /opt/openwebui-backup
- cp -rf /opt/openwebui/backend/data /opt/openwebui-backup
- cp /opt/openwebui/.env /opt
- rm -rf /opt/openwebui
- fetch_and_deploy_gh_release "open-webui/open-webui"
- cd /opt/openwebui
- $STD npm install
- export NODE_OPTIONS="--max-old-space-size=3584"
- sed -i "s/git rev-parse HEAD/openssl rand -hex 20/g" /opt/openwebui/svelte.config.js
- $STD npm run build
- cd ./backend
- $STD pip install -r requirements.txt -U
- cp -rf /opt/openwebui-backup/* /opt/openwebui/backend
- mv /opt/.env /opt/openwebui/
- systemctl start open-webui.service
- msg_ok "Updated Successfully"
- exit
+ if [ -x "/usr/bin/ollama" ]; then
+ msg_info "Updating Ollama"
+ OLLAMA_VERSION=$(ollama -v | awk '{print $NF}')
+ RELEASE=$(curl -fsSL https://api.github.com/repos/ollama/ollama/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4)}')
+ if [ "$OLLAMA_VERSION" != "$RELEASE" ]; then
+ curl -fsSLO https://ollama.com/download/ollama-linux-amd64.tgz
+ tar -C /usr -xzf ollama-linux-amd64.tgz
+ rm -rf ollama-linux-amd64.tgz
+ msg_ok "Ollama updated to version $RELEASE"
+ else
+ msg_ok "Ollama is already up to date."
+ fi
+ fi
+
+ msg_info "Updating ${APP} (Patience)"
+ systemctl stop open-webui.service
+ mkdir -p /opt/openwebui-backup
+ cp -rf /opt/openwebui/backend/data /opt/openwebui-backup
+ cp /opt/openwebui/.env /opt
+ rm -rf /opt/openwebui
+ fetch_and_deploy_gh_release "open-webui/open-webui"
+ cd /opt/openwebui
+ $STD npm install
+ export NODE_OPTIONS="--max-old-space-size=3584"
+ sed -i "s/git rev-parse HEAD/openssl rand -hex 20/g" /opt/openwebui/svelte.config.js
+ $STD npm run build
+ cd ./backend
+ $STD pip install -r requirements.txt -U
+ cp -rf /opt/openwebui-backup/* /opt/openwebui/backend
+ mv /opt/.env /opt/openwebui/
+ systemctl start open-webui.service
+ msg_ok "Updated Successfully"
+ exit
}
start
diff --git a/ct/deferred/pixelfed.sh b/ct/deferred/pixelfed.sh
index c053e0795..8ddab519c 100644
--- a/ct/deferred/pixelfed.sh
+++ b/ct/deferred/pixelfed.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (Canbiz)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
@@ -19,21 +19,21 @@ color
catch_errors
function update_script() {
- header_info
- check_container_storage
- check_container_resources
- if [[ ! -d /opt/pixelfed ]]; then
- msg_error "No ${APP} Installation Found!"
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -d /opt/pixelfed ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+ RELEASE=$(curl -fsSL https://api.github.com/repos/xxxx/xxxx/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }')
+ if [[ ! -f /opt/${APP}_version.txt ]] || [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]]; then
+ msg_info "Updating ${APP} to ${RELEASE}"
+ cd /opt
+ else
+ msg_ok "No update required. ${APP} is already at ${RELEASE}"
+ fi
exit
- fi
- RELEASE=$(curl -s https://api.github.com/repos/xxxx/xxxx/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }')
- if [[ ! -f /opt/${APP}_version.txt ]] || [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]]; then
- msg_info "Updating ${APP} to ${RELEASE}"
- cd /opt
- else
- msg_ok "No update required. ${APP} is already at ${RELEASE}"
- fi
- exit
}
start
diff --git a/ct/deferred/roundcubemail.sh b/ct/deferred/roundcubemail.sh
index 4423aea54..75d0e3bcd 100644
--- a/ct/deferred/roundcubemail.sh
+++ b/ct/deferred/roundcubemail.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
@@ -20,39 +20,39 @@ color
catch_errors
function update_script() {
- header_info
- if [[ ! -d /opt/roundcubemail ]]; then
- msg_error "No ${APP} Installation Found!"
+ header_info
+ if [[ ! -d /opt/roundcubemail ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+ if (($(df /boot | awk 'NR==2{gsub("%","",$5); print $5}') > 80)); then
+ read -r -p "Warning: Storage is dangerously low, continue anyway? " prompt
+ [[ ${prompt,,} =~ ^(y|yes)$ ]] || exit
+ fi
+ RELEASE=$(curl -fsSL https://api.github.com/repos/roundcube/roundcubemail/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }')
+ if [[ ! -f /opt/${APP}_version.txt ]] || [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]]; then
+ msg_info "Updating ${APP} to ${RELEASE}"
+ cd /opt
+ curl -fsSL "https://github.com/roundcube/roundcubemail/releases/download/${RELEASE}/roundcubemail-${RELEASE}-complete.tar.gz"
+ tar -xf roundcubemail-${RELEASE}-complete.tar.gz
+ mv roundcubemail-${RELEASE} /opt/roundcubemail
+ cd /opt/roundcubemail
+ COMPOSER_ALLOW_SUPERUSER=1 composer install --no-dev
+ chown -R www-data:www-data temp/ logs/
+ msg_ok "Updated ${APP}"
+
+ msg_info "Reload Apache2"
+ systemctl reload apache2
+ msg_ok "Apache2 Reloaded"
+
+ msg_info "Cleaning Up"
+ rm -rf /opt/roundcubemail-${RELEASE}-complete.tar.gz
+ msg_ok "Cleaned"
+ msg_ok "Updated Successfully"
+ else
+ msg_ok "No update required. ${APP} is already at ${RELEASE}"
+ fi
exit
- fi
- if (($(df /boot | awk 'NR==2{gsub("%","",$5); print $5}') > 80)); then
- read -r -p "Warning: Storage is dangerously low, continue anyway? " prompt
- [[ ${prompt,,} =~ ^(y|yes)$ ]] || exit
- fi
- RELEASE=$(curl -s https://api.github.com/repos/roundcube/roundcubemail/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }')
- if [[ ! -f /opt/${APP}_version.txt ]] || [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]]; then
- msg_info "Updating ${APP} to ${RELEASE}"
- cd /opt
- curl -fsSL "https://github.com/roundcube/roundcubemail/releases/download/${RELEASE}/roundcubemail-${RELEASE}-complete.tar.gz"
- tar -xf roundcubemail-${RELEASE}-complete.tar.gz
- mv roundcubemail-${RELEASE} /opt/roundcubemail
- cd /opt/roundcubemail
- COMPOSER_ALLOW_SUPERUSER=1 composer install --no-dev
- chown -R www-data:www-data temp/ logs/
- msg_ok "Updated ${APP}"
-
- msg_info "Reload Apache2"
- systemctl reload apache2
- msg_ok "Apache2 Reloaded"
-
- msg_info "Cleaning Up"
- rm -rf /opt/roundcubemail-${RELEASE}-complete.tar.gz
- msg_ok "Cleaned"
- msg_ok "Updated Successfully"
- else
- msg_ok "No update required. ${APP} is already at ${RELEASE}"
- fi
- exit
}
start
diff --git a/ct/deferred/squirrelserversmanager.sh b/ct/deferred/squirrelserversmanager.sh
index 9927d23c7..4403ab5f0 100644
--- a/ct/deferred/squirrelserversmanager.sh
+++ b/ct/deferred/squirrelserversmanager.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (Canbiz)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
diff --git a/ct/deferred/vikunja.sh b/ct/deferred/vikunja.sh
new file mode 100644
index 000000000..fd2483512
--- /dev/null
+++ b/ct/deferred/vikunja.sh
@@ -0,0 +1,77 @@
+#!/usr/bin/env bash
+source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: MickLesk (Canbiz)
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://vikunja.io/
+
+APP="Vikunja"
+var_tags="${var_tags:-todo-app}"
+var_cpu="${var_cpu:-1}"
+var_ram="${var_ram:-1024}"
+var_disk="${var_disk:-4}"
+var_os="${var_os:-debian}"
+var_version="${var_version:-12}"
+var_unprivileged="${var_unprivileged:-1}"
+
+header_info "$APP"
+variables
+color
+catch_errors
+
+function update_script() {
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -d /opt/vikunja ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+
+ if whiptail --backtitle "Vikunja Update" --title "🔄 VERSION SELECTION" --yesno \
+ "Choose the version type to update to:\n\n• STABLE: Recommended for production use\n• UNSTABLE: Latest development version\n\n⚠️ WARNING: Unstable versions may contain bugs,\nbe incomplete, or cause system instability.\nOnly use for testing purposes.\n\nDo you want to use the UNSTABLE version?\n(No = Stable, Yes = Unstable)" 16 70 --defaultno; then
+ msg_info "Selecting version"
+ RELEASE="unstable"
+ FILENAME="vikunja-${RELEASE}-x86_64.deb"
+ msg_ok "Selected UNSTABLE version"
+ else
+ msg_info "Selecting version"
+ RELEASE=$(curl -fsSL https://dl.vikunja.io/vikunja/ | grep -oP 'href="/vikunja/\K[0-9]+\.[0-9]+\.[0-9]+' | sort -V | tail -n 1)
+ FILENAME="vikunja-${RELEASE}-amd64.deb"
+ msg_ok "Selected STABLE version: ${RELEASE}"
+ fi
+
+ if [[ ! -f /opt/${APP}_version.txt ]] || [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]]; then
+ msg_info "Stopping ${APP}"
+ systemctl stop vikunja
+ msg_ok "Stopped ${APP}"
+ msg_info "Updating ${APP} to ${RELEASE}"
+ cd /opt
+ rm -rf /opt/vikunja/vikunja
+ rm -rf "/opt/$FILENAME"
+ curl -fsSL "https://dl.vikunja.io/vikunja/$RELEASE/$FILENAME" -o $(basename "https://dl.vikunja.io/vikunja/$RELEASE/$FILENAME")
+ export DEBIAN_FRONTEND=noninteractive
+ $STD dpkg -i $FILENAME
+ echo "${RELEASE}" >/opt/${APP}_version.txt
+ msg_ok "Updated ${APP}"
+ msg_info "Starting ${APP}"
+ systemctl start vikunja
+ msg_ok "Started ${APP}"
+ msg_info "Cleaning Up"
+ rm -rf /opt/$FILENAME
+ msg_ok "Cleaned"
+ msg_ok "Updated Successfully"
+ else
+ msg_ok "No update required. ${APP} is already at ${RELEASE}"
+ fi
+ exit
+}
+
+start
+build_container
+description
+
+msg_ok "Completed Successfully!\n"
+echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
+echo -e "${INFO}${YW} Access it using the following URL:${CL}"
+echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3456${CL}"
diff --git a/ct/docker.sh b/ct/docker.sh
deleted file mode 100644
index 123d0c7c2..000000000
--- a/ct/docker.sh
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/usr/bin/env bash
-source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
-# Copyright (c) 2021-2025 tteck
-# Author: tteck (tteckster)
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://www.docker.com/
-
-APP="Docker"
-var_tags="${var_tags:-docker}"
-var_cpu="${var_cpu:-2}"
-var_ram="${var_ram:-2048}"
-var_disk="${var_disk:-4}"
-var_os="${var_os:-debian}"
-var_version="${var_version:-12}"
-var_unprivileged="${var_unprivileged:-1}"
-
-header_info "$APP"
-variables
-color
-catch_errors
-
-function update_script() {
- header_info
- check_container_storage
- check_container_resources
-
- get_latest_release() {
- curl -fsSL https://api.github.com/repos/"$1"/releases/latest | grep '"tag_name":' | cut -d'"' -f4
- }
-
- msg_info "Updating base system"
- $STD apt-get update
- $STD apt-get -y upgrade
- msg_ok "Base system updated"
-
- msg_info "Updating Docker Engine"
- $STD apt-get install --only-upgrade -y docker-ce docker-ce-cli containerd.io
- msg_ok "Docker Engine updated"
-
- if [[ -f /usr/local/lib/docker/cli-plugins/docker-compose ]]; then
- COMPOSE_BIN="/usr/local/lib/docker/cli-plugins/docker-compose"
- COMPOSE_NEW_VERSION=$(get_latest_release "docker/compose")
- msg_info "Updating Docker Compose to $COMPOSE_NEW_VERSION"
- curl -fsSL "https://github.com/docker/compose/releases/download/${COMPOSE_NEW_VERSION}/docker-compose-$(uname -s)-$(uname -m)" \
- -o "$COMPOSE_BIN"
- chmod +x "$COMPOSE_BIN"
- msg_ok "Docker Compose updated"
- fi
-
- if docker ps -a --format '{{.Names}}' | grep -q '^portainer$'; then
- msg_info "Updating Portainer"
- docker pull portainer/portainer-ce:latest
- docker stop portainer && docker rm portainer
- docker volume create portainer_data >/dev/null 2>&1
- $STD docker run -d \
- -p 8000:8000 \
- -p 9443:9443 \
- --name=portainer \
- --restart=always \
- -v /var/run/docker.sock:/var/run/docker.sock \
- -v portainer_data:/data \
- portainer/portainer-ce:latest
- msg_ok "Updated Portainer"
- fi
-
- if docker ps -a --format '{{.Names}}' | grep -q '^portainer_agent$'; then
- msg_info "Updating Portainer Agent"
- docker pull portainer/agent:latest
- docker stop portainer_agent && docker rm portainer_agent
- $STD docker run -d \
- -p 9001:9001 \
- --name=portainer_agent \
- --restart=always \
- -v /var/run/docker.sock:/var/run/docker.sock \
- -v /var/lib/docker/volumes:/var/lib/docker/volumes \
- portainer/agent
- msg_ok "Updated Portainer Agent"
- fi
-
- msg_info "Cleaning up"
- $STD apt-get -y autoremove
- $STD apt-get -y autoclean
- msg_ok "Cleanup complete"
- exit
-}
-
-start
-build_container
-description
-
-msg_ok "Completed Successfully!\n"
-echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
diff --git a/ct/donetick.sh b/ct/donetick.sh
new file mode 100644
index 000000000..c27d820c9
--- /dev/null
+++ b/ct/donetick.sh
@@ -0,0 +1,63 @@
+#!/usr/bin/env bash
+source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: fstof
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://github.com/donetick/donetick
+
+APP="donetick"
+var_tags="${var_tags:-productivity;tasks}"
+var_cpu="${var_cpu:-1}"
+var_ram="${var_ram:-512}"
+var_disk="${var_disk:-2}"
+var_os="${var_os:-debian}"
+var_version="${var_version:-13}"
+var_unprivileged="${var_unprivileged:-1}"
+
+header_info "$APP"
+variables
+color
+catch_errors
+
+function update_script() {
+ header_info
+ check_container_storage
+ check_container_resources
+
+ if [[ ! -d /opt/donetick ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+
+ if check_for_gh_release "donetick" "donetick/donetick"; then
+ msg_info "Stopping Service"
+ systemctl stop donetick
+ msg_ok "Stopped Service"
+
+ msg_info "Backing Up Configurations"
+ mv /opt/donetick/config/selfhosted.yml /opt/donetick/donetick.db /opt
+ msg_ok "Backed Up Configurations"
+
+ CLEAN_INSTALL=1 fetch_and_deploy_gh_release "donetick" "donetick/donetick" "prebuild" "latest" "/opt/donetick" "donetick_Linux_x86_64.tar.gz"
+
+ msg_info "Restoring Configurations"
+ mv /opt/selfhosted.yml /opt/donetick/config
+ mv /opt/donetick.db /opt/donetick
+ msg_ok "Restored Configurations"
+
+ msg_info "Starting Service"
+ systemctl start donetick
+ msg_ok "Started Service"
+ msg_ok "Updated successfully!"
+ fi
+ exit
+}
+
+start
+build_container
+description
+
+msg_ok "Completed Successfully!\n"
+echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
+echo -e "${INFO}${YW} Access it using the following URL:${CL}"
+echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:2021${CL}"
diff --git a/ct/ente.sh b/ct/ente.sh
new file mode 100644
index 000000000..5733886ca
--- /dev/null
+++ b/ct/ente.sh
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: MickLesk (CanbiZ)
+# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
+# Source: https://www.debian.org/
+
+APP="Ente"
+var_tags="${var_tags:-photos}"
+var_cpu="${var_cpu:-4}"
+var_ram="${var_ram:-4096}"
+var_disk="${var_disk:-10}"
+var_os="${var_os:-debian}"
+var_version="${var_version:-12}"
+var_unprivileged="${var_unprivileged:-1}"
+
+header_info "$APP"
+variables
+color
+catch_errors
+
+function update_script() {
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -d /var ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+ msg_info "Updating $APP LXC"
+ $STD apt-get update
+ $STD apt-get -y upgrade
+ msg_ok "Updated $APP LXC"
+ exit
+}
+
+start
+build_container
+description
+
+msg_ok "Completed Successfully!"
+msg_custom "🚀" "${GN}" "${APP} setup has been successfully initialized!"
diff --git a/ct/freepbx.sh b/ct/freepbx.sh
new file mode 100644
index 000000000..0552674bf
--- /dev/null
+++ b/ct/freepbx.sh
@@ -0,0 +1,67 @@
+#!/usr/bin/env bash
+source <(curl -s https://raw.githubusercontent.com/vsc55/community-scripts-ProxmoxVED/refs/heads/freepbx/misc/build.func)
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: Arian Nasr (arian-nasr)
+# Updated by: Javier Pastor (vsc55)
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://www.freepbx.org/
+
+APP="FreePBX"
+var_tags="pbx;voip;telephony"
+var_cpu="${var_cpu:-2}"
+var_ram="${var_ram:-2048}"
+var_disk="${var_disk:-10}"
+var_os="${var_os:-debian}"
+var_version="${var_version:-12}"
+var_unprivileged="${var_unprivileged:-1}"
+
+header_info "$APP"
+variables
+color
+catch_errors
+
+function update_script() {
+ header_info
+ check_container_storage
+ check_container_resources
+
+ if [[ ! -f /lib/systemd/system/freepbx.service ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+
+ msg_info "Updating $APP LXC"
+ $STD apt-get update
+ $STD apt-get -y upgrade
+ msg_ok "Updated $APP LXC"
+
+ msg_info "Updating $APP Modules"
+ $STD fwconsole ma updateall
+ $STD fwconsole reload
+ msg_ok "Updated $APP Modules"
+
+ exit
+}
+
+start
+
+if whiptail --title "Commercial Modules" --yesno "Remove Commercial modules?" --defaultno 10 50; then
+ export ONLY_OPENSOURCE="yes"
+
+ if whiptail --title "Firewall Module" --yesno "Do you want to KEEP the Firewall module (and sysadmin)?" 10 50; then
+ export REMOVE_FIREWALL="no"
+ else
+ export REMOVE_FIREWALL="yes"
+ fi
+else
+ export ONLY_OPENSOURCE="no"
+ export REMOVE_FIREWALL="no"
+fi
+
+build_container
+description
+
+msg_ok "Completed Successfully!\n"
+echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
+echo -e "${INFO}${YW} Access it using the following URL:${CL}"
+echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}"
diff --git a/ct/frigate.sh b/ct/frigate.sh
index 2b498b794..68f529c6d 100644
--- a/ct/frigate.sh
+++ b/ct/frigate.sh
@@ -1,38 +1,34 @@
#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
-# Authors: MickLesk (CanbiZ)
+# Authors: MickLesk (CanbiZ) | Co-Author: remz1337
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://frigate.video/
-# App Default Values
APP="Frigate"
var_tags="${var_tags:-nvr}"
var_cpu="${var_cpu:-4}"
var_ram="${var_ram:-4096}"
var_disk="${var_disk:-20}"
var_os="${var_os:-debian}"
-var_version="${var_version:-11}"
+var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-0}"
-# App Output
header_info "$APP"
-
-# Core
variables
color
catch_errors
function update_script() {
- header_info
- check_container_storage
- check_container_resources
- if [[ ! -f /etc/systemd/system/frigate.service ]]; then
- msg_error "No ${APP} Installation Found!"
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -f /etc/systemd/system/frigate.service ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+ msg_error "To update Frigate, create a new container and transfer your configuration."
exit
- fi
- msg_error "To update Frigate, create a new container and transfer your configuration."
- exit
}
start
diff --git a/ct/garmin-grafana.sh b/ct/garmin-grafana.sh
index 7351ad005..f0742dd82 100644
--- a/ct/garmin-grafana.sh
+++ b/ct/garmin-grafana.sh
@@ -21,74 +21,74 @@ catch_errors
# this only updates garmin-grafana, not influxdb or grafana, which are upgraded with apt
function update_script() {
- header_info
- check_container_storage
- check_container_resources
+ header_info
+ check_container_storage
+ check_container_resources
- if [[ ! -d /opt/garmin-grafana/ ]]; then
- msg_error "No ${APP} Installation Found!"
+ if [[ ! -d /opt/garmin-grafana/ ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+
+ RELEASE=$(curl -fsSL https://api.github.com/repos/arpanghosh8453/garmin-grafana/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }')
+ if [[ ! -d /opt/garmin-grafana/ ]] || [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]] || [[ ! -f /opt/${APP}_version.txt ]]; then
+ msg_info "Stopping $APP"
+ systemctl stop garmin-grafana
+ systemctl stop grafana-server
+ systemctl stop influxdb
+ msg_ok "Stopped $APP"
+
+ if [[ ! -f /opt/garmin-grafana/.env ]]; then
+ msg_error "No .env file found in /opt/garmin-grafana/.env"
+ exit
+ fi
+ source /opt/garmin-grafana/.env
+ if [[ -z "${INFLUXDB_USER}" || -z "${INFLUXDB_PASSWORD}" || -z "${INFLUXDB_NAME}" ]]; then
+ msg_error "INFLUXDB_USER, INFLUXDB_PASSWORD, or INFLUXDB_NAME not set in .env file"
+ exit
+ fi
+
+ msg_info "Creating Backup"
+ tar -czf "/opt/${APP}_backup_$(date +%F).tar.gz" /opt/garmin-grafana/.garminconnect /opt/garmin-grafana/.env
+ mv /opt/garmin-grafana/ /opt/garmin-grafana-backup/
+ msg_ok "Backup Created"
+
+ msg_info "Updating $APP to v${RELEASE}"
+ curl -fsSL -o "${RELEASE}.zip" "https://github.com/arpanghosh8453/garmin-grafana/archive/refs/tags/${RELEASE}.zip"
+ unzip -q "${RELEASE}.zip"
+ mv "garmin-grafana-${RELEASE}/" "/opt/garmin-grafana"
+ rm -f "${RELEASE}.zip"
+ $STD uv sync --locked --project /opt/garmin-grafana/
+ # shellcheck disable=SC2016
+ sed -i 's/\${DS_GARMIN_STATS}/garmin_influxdb/g' /opt/garmin-grafana/Grafana_Dashboard/Garmin-Grafana-Dashboard.json
+ sed -i 's/influxdb:8086/localhost:8086/' /opt/garmin-grafana/Grafana_Datasource/influxdb.yaml
+ sed -i "s/influxdb_user/${INFLUXDB_USER}/" /opt/garmin-grafana/Grafana_Datasource/influxdb.yaml
+ sed -i "s/influxdb_secret_password/${INFLUXDB_PASSWORD}/" /opt/garmin-grafana/Grafana_Datasource/influxdb.yaml
+ sed -i "s/GarminStats/${INFLUXDB_NAME}/" /opt/garmin-grafana/Grafana_Datasource/influxdb.yaml
+ # Copy across grafana data
+ cp -r /opt/garmin-grafana/Grafana_Datasource/* /etc/grafana/provisioning/datasources
+ cp -r /opt/garmin-grafana/Grafana_Dashboard/* /etc/grafana/provisioning/dashboards
+ # Copy back the env and token files
+ cp /opt/garmin-grafana-backup/.env /opt/garmin-grafana/.env
+ cp -r /opt/garmin-grafana-backup/.garminconnect /opt/garmin-grafana/.garminconnect
+ msg_ok "Updated $APP to v${RELEASE}"
+
+ msg_info "Starting $APP"
+ systemctl start garmin-grafana
+ systemctl start grafana-server
+ systemctl start influxdb
+ msg_ok "Started $APP"
+
+ msg_info "Cleaning Up"
+ rm -rf /opt/garmin-grafana-backup
+ msg_ok "Cleanup Completed"
+
+ echo "${RELEASE}" >/opt/${APP}_version.txt
+ msg_ok "Update Successful"
+ else
+ msg_ok "No update required. ${APP} is already at v${RELEASE}"
+ fi
exit
- fi
-
- RELEASE=$(curl -fsSL https://api.github.com/repos/arpanghosh8453/garmin-grafana/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }')
- if [[ ! -d /opt/garmin-grafana/ ]] || [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]] || [[ ! -f /opt/${APP}_version.txt ]]; then
- msg_info "Stopping $APP"
- systemctl stop garmin-grafana
- systemctl stop grafana-server
- systemctl stop influxdb
- msg_ok "Stopped $APP"
-
- if [[ ! -f /opt/garmin-grafana/.env ]]; then
- msg_error "No .env file found in /opt/garmin-grafana/.env"
- exit
- fi
- source /opt/garmin-grafana/.env
- if [[ -z "${INFLUXDB_USER}" || -z "${INFLUXDB_PASSWORD}" || -z "${INFLUXDB_NAME}" ]]; then
- msg_error "INFLUXDB_USER, INFLUXDB_PASSWORD, or INFLUXDB_NAME not set in .env file"
- exit
- fi
-
- msg_info "Creating Backup"
- tar -czf "/opt/${APP}_backup_$(date +%F).tar.gz" /opt/garmin-grafana/.garminconnect /opt/garmin-grafana/.env
- mv /opt/garmin-grafana/ /opt/garmin-grafana-backup/
- msg_ok "Backup Created"
-
- msg_info "Updating $APP to v${RELEASE}"
- curl -fsSL -o "${RELEASE}.zip" "https://github.com/arpanghosh8453/garmin-grafana/archive/refs/tags/${RELEASE}.zip"
- unzip -q "${RELEASE}.zip"
- mv "garmin-grafana-${RELEASE}/" "/opt/garmin-grafana"
- rm -f "${RELEASE}.zip"
- $STD uv sync --locked --project /opt/garmin-grafana/
- # shellcheck disable=SC2016
- sed -i 's/\${DS_GARMIN_STATS}/garmin_influxdb/g' /opt/garmin-grafana/Grafana_Dashboard/Garmin-Grafana-Dashboard.json
- sed -i 's/influxdb:8086/localhost:8086/' /opt/garmin-grafana/Grafana_Datasource/influxdb.yaml
- sed -i "s/influxdb_user/${INFLUXDB_USER}/" /opt/garmin-grafana/Grafana_Datasource/influxdb.yaml
- sed -i "s/influxdb_secret_password/${INFLUXDB_PASSWORD}/" /opt/garmin-grafana/Grafana_Datasource/influxdb.yaml
- sed -i "s/GarminStats/${INFLUXDB_NAME}/" /opt/garmin-grafana/Grafana_Datasource/influxdb.yaml
- # Copy across grafana data
- cp -r /opt/garmin-grafana/Grafana_Datasource/* /etc/grafana/provisioning/datasources
- cp -r /opt/garmin-grafana/Grafana_Dashboard/* /etc/grafana/provisioning/dashboards
- # Copy back the env and token files
- cp /opt/garmin-grafana-backup/.env /opt/garmin-grafana/.env
- cp -r /opt/garmin-grafana-backup/.garminconnect /opt/garmin-grafana/.garminconnect
- msg_ok "Updated $APP to v${RELEASE}"
-
- msg_info "Starting $APP"
- systemctl start garmin-grafana
- systemctl start grafana-server
- systemctl start influxdb
- msg_ok "Started $APP"
-
- msg_info "Cleaning Up"
- rm -rf /opt/garmin-grafana-backup
- msg_ok "Cleanup Completed"
-
- echo "${RELEASE}" >/opt/${APP}_version.txt
- msg_ok "Update Successful"
- else
- msg_ok "No update required. ${APP} is already at v${RELEASE}"
- fi
- exit
}
start
diff --git a/ct/gitea-mirror.sh b/ct/gitea-mirror.sh
index ce0e21ee7..4c5fca198 100644
--- a/ct/gitea-mirror.sh
+++ b/ct/gitea-mirror.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
+source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: CrazyWolf13
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
@@ -13,6 +13,7 @@ var_disk="${var_disk:-6}"
var_os="${var_os:-debian}"
var_version="${var_version:-12}"
var_unprivileged="${var_unprivileged:-1}"
+var_app_version="${var_app_version:-latest}"
header_info "$APP"
@@ -24,7 +25,6 @@ function update_script() {
header_info
check_container_storage
check_container_resources
-
if [[ ! -d /opt/gitea-mirror ]]; then
msg_error "No ${APP} Installation Found!"
exit
@@ -33,35 +33,62 @@ function update_script() {
APP_VERSION=$(grep -o '"version": *"[^"]*"' /opt/gitea-mirror/package.json | cut -d'"' -f4)
if [[ $APP_VERSION =~ ^2\. ]]; then
if ! whiptail --backtitle "Gitea Mirror Update" --title "⚠️ VERSION 2.x DETECTED" --yesno \
- "WARNING: Version $APP_VERSION detected!\n\nUpdating from version 2.x will CLEAR ALL CONFIGURATION.\n\nThis includes:\n• API tokens\n• User settings\n• Repository configurations\n• All custom settings\n\nDo you want to continue with the update process?" 15 70 --defaultno
- then
+ "WARNING: Version $APP_VERSION detected!\n\nUpdating from version 2.x will CLEAR ALL CONFIGURATION.\n\nThis includes:\n• API tokens\n• User settings\n• Repository configurations\n• All custom settings\n\nDo you want to continue with the update process?" 15 70 --defaultno; then
exit 0
fi
if ! whiptail --backtitle "Gitea Mirror Update" --title "⚠️ FINAL CONFIRMATION" --yesno \
- "FINAL WARNING: This update WILL clear all configuration!\n\nBEFORE PROCEEDING, please:\n\n• Copy API tokens to a safe location\n• Backup any custom configurations\n• Note down repository settings\n\nThis action CANNOT be undone!" 18 70 --defaultno
- then
- whiptail --backtitle "Gitea Mirror Update" --title "Update Cancelled" --msgbox "Update process cancelled. Please backup your configuration before proceeding." 8 60
- exit 0
+ "FINAL WARNING: This update WILL clear all configuration!\n\nBEFORE PROCEEDING, please:\n\n• Copy API tokens to a safe location\n• Backup any custom configurations\n• Note down repository settings\n\nThis action CANNOT be undone!" 18 70 --defaultno; then
+ whiptail --backtitle "Gitea Mirror Update" --title "Update Cancelled" --msgbox "Update process cancelled. Please backup your configuration before proceeding." 8 60
+ exit 0
fi
whiptail --backtitle "Gitea Mirror Update" --title "Proceeding with Update" --msgbox \
- "Proceeding with version $APP_VERSION update.\n\nAll configuration will be cleared as warned." 8 50
+ "Proceeding with version $APP_VERSION update.\n\nAll configuration will be cleared as warned." 8 50
rm -rf /opt/gitea-mirror
fi
- RELEASE=$(curl -fsSL https://api.github.com/repos/RayLabsHQ/gitea-mirror/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }')
- if [[ "${RELEASE}" != "$(cat ~/.${APP} 2>/dev/null || cat /opt/${APP}_version.txt 2>/dev/null)" ]]; then
+ if [[ ! -f /opt/gitea-mirror.env ]]; then
+ msg_info "Detected old Enviroment, updating files"
+ APP_SECRET=$(openssl rand -base64 32)
+ HOST_IP=$(hostname -I | awk '{print $1}')
+ cat </opt/gitea-mirror.env
+# See here for config options: https://github.com/RayLabsHQ/gitea-mirror/blob/main/docs/ENVIRONMENT_VARIABLES.md
+NODE_ENV=production
+HOST=0.0.0.0
+PORT=4321
+DATABASE_URL=sqlite://data/gitea-mirror.db
+BETTER_AUTH_URL=http://${HOST_IP}:4321
+BETTER_AUTH_SECRET=${APP_SECRET}
+npm_package_version=${APP_VERSION}
+EOF
+ rm /etc/systemd/system/gitea-mirror.service
+ cat </etc/systemd/system/gitea-mirror.service
+[Unit]
+Description=Gitea Mirror
+After=network.target
+[Service]
+Type=simple
+WorkingDirectory=/opt/gitea-mirror
+ExecStart=/usr/local/bin/bun dist/server/entry.mjs
+Restart=on-failure
+RestartSec=10
+EnvironmentFile=/opt/gitea-mirror.env
+[Install]
+WantedBy=multi-user.target
+EOF
+ systemctl daemon-reload
+ msg_ok "Old Enviroment fixed"
+fi
+ if check_for_gh_release "gitea-mirror" "RayLabsHQ/gitea-mirror"; then
msg_info "Stopping Services"
systemctl stop gitea-mirror
msg_ok "Services Stopped"
- if [[ -d /opt/gitea-mirror/data ]]; then
- msg_info "Backing up Data"
- mkdir -p /opt/gitea-mirror-backup/data
- cp /opt/gitea-mirror/data/* /opt/gitea-mirror-backup/data/
- msg_ok "Backed up Data"
- fi
+ msg_info "Backup Data"
+ mkdir -p /opt/gitea-mirror-backup/data
+ cp /opt/gitea-mirror/data/* /opt/gitea-mirror-backup/data/
+ msg_ok "Backup Data"
msg_info "Installing Bun"
export BUN_INSTALL=/opt/bun
@@ -71,26 +98,25 @@ function update_script() {
msg_ok "Installed Bun"
rm -rf /opt/gitea-mirror
- fetch_and_deploy_gh_release "gitea-mirror" "RayLabsHQ/gitea-mirror" "tarball" "v3.0.2"
+ fetch_and_deploy_gh_release "gitea-mirror" "RayLabsHQ/gitea-mirror" "tarball" $var_app_version
- msg_info "Updating and rebuilding ${APP} to v${RELEASE}"
+ msg_info "Updating and rebuilding ${APP}"
cd /opt/gitea-mirror
$STD bun run setup
$STD bun run build
APP_VERSION=$(grep -o '"version": *"[^"]*"' package.json | cut -d'"' -f4)
- sudo sed -i.bak "s|^Environment=npm_package_version=.*|Environment=npm_package_version=${APP_VERSION}|" /etc/systemd/system/gitea-mirror.service
- msg_ok "Updated and rebuilt ${APP} to v${RELEASE}"
+
+ sudo sed -i.bak "s|^npm_package_version=.*|npm_package_version=${APP_VERSION}|" /opt/gitea-mirror.env
+ msg_ok "Updated and rebuilt ${APP}"
msg_info "Restoring Data"
- cp /opt/gitea-mirror-backup/data/* /opt/gitea-mirror/data || true
+ cp /opt/gitea-mirror-backup/data/* /opt/gitea-mirror/data
msg_ok "Restored Data"
msg_info "Starting Service"
- systemctl daemon-reload
systemctl start gitea-mirror
msg_ok "Service Started"
- else
- msg_ok "No update required. ${APP} is already at v${RELEASE}"
+ msg_ok "Update Successfully"
fi
exit
}
diff --git a/ct/hanko.sh b/ct/hanko.sh
deleted file mode 100644
index 2a8dda15b..000000000
--- a/ct/hanko.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
-# Copyright (c) 2021-2025 tteck
-# Author: tteck (tteckster)
-# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
-# Source: https://www.debian.org/
-
-APP="Hanko"
-var_tags="${var_tags:-os}"
-var_cpu="${var_cpu:-1}"
-var_ram="${var_ram:-1024}"
-var_disk="${var_disk:-4}"
-var_os="${var_os:-debian}"
-var_version="${var_version:-12}"
-var_unprivileged="${var_unprivileged:-1}"
-
-header_info "$APP"
-variables
-color
-catch_errors
-
-function update_script() {
- header_info
- check_container_storage
- check_container_resources
- if [[ ! -d /var ]]; then
- msg_error "No ${APP} Installation Found!"
- exit
- fi
- msg_info "Updating $APP LXC"
- $STD apt-get update
- $STD apt-get -y upgrade
- msg_ok "Updated $APP LXC"
- exit
-}
-
-start
-build_container
-description
-
-msg_ok "Completed Successfully!\n"
-echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
-echo -e "${INFO}${YW} Access it using the following URL:${CL}"
-echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8000${CL}"
diff --git a/ct/headers/alpine-ntfy b/ct/headers/alpine-ntfy
new file mode 100644
index 000000000..bc4164342
--- /dev/null
+++ b/ct/headers/alpine-ntfy
@@ -0,0 +1,6 @@
+ ___ __ _ __ ____
+ / | / /___ (_)___ ___ ____ / /_/ __/_ __
+ / /| | / / __ \/ / __ \/ _ \______/ __ \/ __/ /_/ / / /
+ / ___ |/ / /_/ / / / / / __/_____/ / / / /_/ __/ /_/ /
+/_/ |_/_/ .___/_/_/ /_/\___/ /_/ /_/\__/_/ \__, /
+ /_/ /____/
diff --git a/ct/headers/asterisk b/ct/headers/asterisk
new file mode 100644
index 000000000..ed4356862
--- /dev/null
+++ b/ct/headers/asterisk
@@ -0,0 +1,6 @@
+ ___ __ _ __
+ / | _____/ /____ _____(_)____/ /__
+ / /| | / ___/ __/ _ \/ ___/ / ___/ //_/
+ / ___ |(__ ) /_/ __/ / / (__ ) ,<
+/_/ |_/____/\__/\___/_/ /_/____/_/|_|
+
diff --git a/ct/headers/docker b/ct/headers/docker
deleted file mode 100644
index 907ffbaef..000000000
--- a/ct/headers/docker
+++ /dev/null
@@ -1,6 +0,0 @@
- ____ __
- / __ \____ _____/ /_____ _____
- / / / / __ \/ ___/ //_/ _ \/ ___/
- / /_/ / /_/ / /__/ ,< / __/ /
-/_____/\____/\___/_/|_|\___/_/
-
diff --git a/ct/headers/docspell b/ct/headers/docspell
deleted file mode 100644
index 51277d47b..000000000
--- a/ct/headers/docspell
+++ /dev/null
@@ -1,6 +0,0 @@
- ____ ____
- / __ \____ ______________ ___ / / /
- / / / / __ \/ ___/ ___/ __ \/ _ \/ / /
- / /_/ / /_/ / /__(__ ) /_/ / __/ / /
-/_____/\____/\___/____/ .___/\___/_/_/
- /_/
diff --git a/ct/headers/donetick b/ct/headers/donetick
new file mode 100644
index 000000000..7bcb7f3f3
--- /dev/null
+++ b/ct/headers/donetick
@@ -0,0 +1,6 @@
+ __ __ _ __
+ ____/ /___ ____ ___ / /_(_)____/ /__
+ / __ / __ \/ __ \/ _ \/ __/ / ___/ //_/
+/ /_/ / /_/ / / / / __/ /_/ / /__/ ,<
+\__,_/\____/_/ /_/\___/\__/_/\___/_/|_|
+
diff --git a/ct/headers/ente b/ct/headers/ente
new file mode 100644
index 000000000..f700a1f53
--- /dev/null
+++ b/ct/headers/ente
@@ -0,0 +1,6 @@
+ ______ __
+ / ____/___ / /____
+ / __/ / __ \/ __/ _ \
+ / /___/ / / / /_/ __/
+/_____/_/ /_/\__/\___/
+
diff --git a/ct/headers/freepbx b/ct/headers/freepbx
new file mode 100644
index 000000000..25541c2ed
--- /dev/null
+++ b/ct/headers/freepbx
@@ -0,0 +1,6 @@
+ ______ ____ ____ _ __
+ / ____/_______ ___ / __ \/ __ ) |/ /
+ / /_ / ___/ _ \/ _ \/ /_/ / __ | /
+ / __/ / / / __/ __/ ____/ /_/ / |
+/_/ /_/ \___/\___/_/ /_____/_/|_|
+
diff --git a/ct/headers/hanko b/ct/headers/hanko
deleted file mode 100644
index e823d45dc..000000000
--- a/ct/headers/hanko
+++ /dev/null
@@ -1,6 +0,0 @@
- __ __ __
- / / / /___ _____ / /______
- / /_/ / __ `/ __ \/ //_/ __ \
- / __ / /_/ / / / / ,< / /_/ /
-/_/ /_/\__,_/_/ /_/_/|_|\____/
-
diff --git a/ct/headers/healthchecks b/ct/headers/healthchecks
deleted file mode 100644
index 8f61c8776..000000000
--- a/ct/headers/healthchecks
+++ /dev/null
@@ -1,6 +0,0 @@
- __ ____ __ __ __
- / /_ ___ ____ _/ / /_/ /_ _____/ /_ ___ _____/ /_______
- / __ \/ _ \/ __ `/ / __/ __ \/ ___/ __ \/ _ \/ ___/ //_/ ___/
- / / / / __/ /_/ / / /_/ / / / /__/ / / / __/ /__/ ,< (__ )
-/_/ /_/\___/\__,_/_/\__/_/ /_/\___/_/ /_/\___/\___/_/|_/____/
-
diff --git a/ct/headers/infisical b/ct/headers/infisical
new file mode 100644
index 000000000..d378f9dcb
--- /dev/null
+++ b/ct/headers/infisical
@@ -0,0 +1,6 @@
+ ____ _____ _ __
+ / _/___ / __(_)____(_)________ _/ /
+ / // __ \/ /_/ / ___/ / ___/ __ `/ /
+ _/ // / / / __/ (__ ) / /__/ /_/ / /
+/___/_/ /_/_/ /_/____/_/\___/\__,_/_/
+
diff --git a/ct/headers/jeedom b/ct/headers/jeedom
deleted file mode 100644
index 47ff4dfb6..000000000
--- a/ct/headers/jeedom
+++ /dev/null
@@ -1,6 +0,0 @@
- __ __
- / /__ ___ ____/ /___ ____ ___
- __ / / _ \/ _ \/ __ / __ \/ __ `__ \
-/ /_/ / __/ __/ /_/ / /_/ / / / / / /
-\____/\___/\___/\__,_/\____/_/ /_/ /_/
-
diff --git a/ct/headers/kanba b/ct/headers/kanba
new file mode 100644
index 000000000..40c351f8b
--- /dev/null
+++ b/ct/headers/kanba
@@ -0,0 +1,6 @@
+ __ __ __
+ / //_/___ _____ / /_ ____ _
+ / ,< / __ `/ __ \/ __ \/ __ `/
+ / /| / /_/ / / / / /_/ / /_/ /
+/_/ |_\__,_/_/ /_/_.___/\__,_/
+
diff --git a/ct/headers/librespeed b/ct/headers/librespeed
deleted file mode 100644
index b75b5cec5..000000000
--- a/ct/headers/librespeed
+++ /dev/null
@@ -1,6 +0,0 @@
- ___ __ __
- / (_) /_ ________ _________ ___ ___ ____/ /
- / / / __ \/ ___/ _ \/ ___/ __ \/ _ \/ _ \/ __ /
- / / / /_/ / / / __(__ ) /_/ / __/ __/ /_/ /
-/_/_/_.___/_/ \___/____/ .___/\___/\___/\__,_/
- /_/
diff --git a/ct/headers/linkstack b/ct/headers/linkstack
deleted file mode 100644
index c3413d299..000000000
--- a/ct/headers/linkstack
+++ /dev/null
@@ -1,6 +0,0 @@
- __ _ __ _____ __ __
- / / (_)___ / /__/ ___// /_____ ______/ /__
- / / / / __ \/ //_/\__ \/ __/ __ `/ ___/ //_/
- / /___/ / / / / ,< ___/ / /_/ /_/ / /__/ ,<
-/_____/_/_/ /_/_/|_|/____/\__/\__,_/\___/_/|_|
-
diff --git a/ct/headers/manyfold b/ct/headers/manyfold
deleted file mode 100644
index 1f6a66f2d..000000000
--- a/ct/headers/manyfold
+++ /dev/null
@@ -1,6 +0,0 @@
- __ ___ ____ __ __
- / |/ /___ _____ __ __/ __/___ / /___/ /
- / /|_/ / __ `/ __ \/ / / / /_/ __ \/ / __ /
- / / / / /_/ / / / / /_/ / __/ /_/ / / /_/ /
-/_/ /_/\__,_/_/ /_/\__, /_/ \____/_/\__,_/
- /____/
diff --git a/ct/headers/maxun b/ct/headers/maxun
deleted file mode 100644
index 056b0ab51..000000000
--- a/ct/headers/maxun
+++ /dev/null
@@ -1,6 +0,0 @@
- __ ___
- / |/ /___ __ ____ ______
- / /|_/ / __ `/ |/_/ / / / __ \
- / / / / /_/ /> /_/ / / / /
-/_/ /_/\__,_/_/|_|\__,_/_/ /_/
-
diff --git a/ct/headers/maybefinance b/ct/headers/maybefinance
deleted file mode 100644
index 834c0fb4d..000000000
--- a/ct/headers/maybefinance
+++ /dev/null
@@ -1,6 +0,0 @@
- __ ___ __ _______
- / |/ /___ ___ __/ /_ ___ / ____(_)___ ____ _____ ________
- / /|_/ / __ `/ / / / __ \/ _ \ / /_ / / __ \/ __ `/ __ \/ ___/ _ \
- / / / / /_/ / /_/ / /_/ / __/ / __/ / / / / / /_/ / / / / /__/ __/
-/_/ /_/\__,_/\__, /_.___/\___/ /_/ /_/_/ /_/\__,_/_/ /_/\___/\___/
- /____/
diff --git a/ct/headers/miniflux b/ct/headers/miniflux
new file mode 100644
index 000000000..cb3195ae2
--- /dev/null
+++ b/ct/headers/miniflux
@@ -0,0 +1,6 @@
+ __ ____ _ ______
+ / |/ (_)___ (_) __/ /_ ___ __
+ / /|_/ / / __ \/ / /_/ / / / / |/_/
+ / / / / / / / / / __/ / /_/ /> <
+/_/ /_/_/_/ /_/_/_/ /_/\__,_/_/|_|
+
diff --git a/ct/headers/nginxproxymanager b/ct/headers/nginxproxymanager
new file mode 100644
index 000000000..d68d0c9d8
--- /dev/null
+++ b/ct/headers/nginxproxymanager
@@ -0,0 +1,6 @@
+ _ __ _ ____ __ ___
+ / | / /___ _(_)___ _ __ / __ \_________ _ ____ __ / |/ /___ _____ ____ _____ ____ _____
+ / |/ / __ `/ / __ \| |/_/ / /_/ / ___/ __ \| |/_/ / / / / /|_/ / __ `/ __ \/ __ `/ __ `/ _ \/ ___/
+ / /| / /_/ / / / / /> < / ____/ / / /_/ /> /_/ / / / / / /_/ / / / / /_/ / /_/ / __/ /
+/_/ |_/\__, /_/_/ /_/_/|_| /_/ /_/ \____/_/|_|\__, / /_/ /_/\__,_/_/ /_/\__,_/\__, /\___/_/
+ /____/ /____/ /____/
diff --git a/ct/headers/notesnook b/ct/headers/notesnook
deleted file mode 100644
index c5dc22073..000000000
--- a/ct/headers/notesnook
+++ /dev/null
@@ -1,6 +0,0 @@
- __ __
- ____ ____ / /____ _________ ____ ____ / /__
- / __ \/ __ \/ __/ _ \/ ___/ __ \/ __ \/ __ \/ //_/
- / / / / /_/ / /_/ __(__ ) / / / /_/ / /_/ / ,<
-/_/ /_/\____/\__/\___/____/_/ /_/\____/\____/_/|_|
-
diff --git a/ct/headers/opencloud b/ct/headers/opencloud
deleted file mode 100644
index 6a32b1fae..000000000
--- a/ct/headers/opencloud
+++ /dev/null
@@ -1,6 +0,0 @@
- ____ ________ __
- / __ \____ ___ ____ / ____/ /___ __ ______/ /
- / / / / __ \/ _ \/ __ \/ / / / __ \/ / / / __ /
-/ /_/ / /_/ / __/ / / / /___/ / /_/ / /_/ / /_/ /
-\____/ .___/\___/_/ /_/\____/_/\____/\__,_/\__,_/
- /_/
diff --git a/ct/headers/openwebui b/ct/headers/openwebui
new file mode 100644
index 000000000..0097a279b
--- /dev/null
+++ b/ct/headers/openwebui
@@ -0,0 +1,6 @@
+ ____ _ __ __ __ ______
+ / __ \____ ___ ____ | | / /__ / /_ / / / / _/
+ / / / / __ \/ _ \/ __ \ | | /| / / _ \/ __ \/ / / // /
+/ /_/ / /_/ / __/ / / / | |/ |/ / __/ /_/ / /_/ // /
+\____/ .___/\___/_/ /_/ |__/|__/\___/_.___/\____/___/
+ /_/
diff --git a/ct/headers/ots b/ct/headers/ots
deleted file mode 100644
index 64a5fc431..000000000
--- a/ct/headers/ots
+++ /dev/null
@@ -1,6 +0,0 @@
- ____ ___________
- / __ \/_ __/ ___/
- / / / / / / \__ \
-/ /_/ / / / ___/ /
-\____/ /_/ /____/
-
diff --git a/ct/headers/pangolin b/ct/headers/pangolin
new file mode 100644
index 000000000..0a2d42304
--- /dev/null
+++ b/ct/headers/pangolin
@@ -0,0 +1,6 @@
+ ____ ___
+ / __ \____ _____ ____ _____ / (_)___
+ / /_/ / __ `/ __ \/ __ `/ __ \/ / / __ \
+ / ____/ /_/ / / / / /_/ / /_/ / / / / / /
+/_/ \__,_/_/ /_/\__, /\____/_/_/_/ /_/
+ /____/
diff --git a/ct/headers/petio b/ct/headers/petio
new file mode 100644
index 000000000..d8deb745a
--- /dev/null
+++ b/ct/headers/petio
@@ -0,0 +1,6 @@
+ ____ __ _
+ / __ \___ / /_(_)___
+ / /_/ / _ \/ __/ / __ \
+ / ____/ __/ /_/ / /_/ /
+/_/ \___/\__/_/\____/
+
diff --git a/ct/headers/postiz b/ct/headers/postiz
deleted file mode 100644
index 2c5c0ba81..000000000
--- a/ct/headers/postiz
+++ /dev/null
@@ -1,6 +0,0 @@
- ____ __ _
- / __ \____ _____/ /_(_)___
- / /_/ / __ \/ ___/ __/ /_ /
- / ____/ /_/ (__ ) /_/ / / /_
-/_/ \____/____/\__/_/ /___/
-
diff --git a/ct/headers/reactive-resume b/ct/headers/reactive-resume
deleted file mode 100644
index e897791a2..000000000
--- a/ct/headers/reactive-resume
+++ /dev/null
@@ -1,6 +0,0 @@
- ____ __ _ ____
- / __ \___ ____ ______/ /_(_) _____ / __ \___ _______ ______ ___ ___
- / /_/ / _ \/ __ `/ ___/ __/ / | / / _ \______/ /_/ / _ \/ ___/ / / / __ `__ \/ _ \
- / _, _/ __/ /_/ / /__/ /_/ /| |/ / __/_____/ _, _/ __(__ ) /_/ / / / / / / __/
-/_/ |_|\___/\__,_/\___/\__/_/ |___/\___/ /_/ |_|\___/____/\__,_/_/ /_/ /_/\___/
-
diff --git a/ct/headers/romm b/ct/headers/romm
new file mode 100644
index 000000000..7f214d48e
--- /dev/null
+++ b/ct/headers/romm
@@ -0,0 +1,6 @@
+ ____ __ ___
+ / __ \____ ____ ___ / |/ /
+ / /_/ / __ \/ __ `__ \/ /|_/ /
+ / _, _/ /_/ / / / / / / / / /
+/_/ |_|\____/_/ /_/ /_/_/ /_/
+
diff --git a/ct/headers/saltmaster b/ct/headers/saltmaster
deleted file mode 100644
index 9b7334683..000000000
--- a/ct/headers/saltmaster
+++ /dev/null
@@ -1,6 +0,0 @@
- ____ __
- _________ _/ / /_____ ___ ____ ______/ /____ _____
- / ___/ __ `/ / __/ __ `__ \/ __ `/ ___/ __/ _ \/ ___/
- (__ ) /_/ / / /_/ / / / / / /_/ (__ ) /_/ __/ /
-/____/\__,_/_/\__/_/ /_/ /_/\__,_/____/\__/\___/_/
-
diff --git a/ct/headers/scraparr b/ct/headers/scraparr
deleted file mode 100644
index 5e5b3f949..000000000
--- a/ct/headers/scraparr
+++ /dev/null
@@ -1,6 +0,0 @@
- _____
- / ___/______________ _____ ____ ___________
- \__ \/ ___/ ___/ __ `/ __ \/ __ `/ ___/ ___/
- ___/ / /__/ / / /_/ / /_/ / /_/ / / / /
-/____/\___/_/ \__,_/ .___/\__,_/_/ /_/
- /_/
diff --git a/ct/headers/snowshare b/ct/headers/snowshare
new file mode 100644
index 000000000..160614e0c
--- /dev/null
+++ b/ct/headers/snowshare
@@ -0,0 +1,6 @@
+ _____ _____ __
+ / ___/____ ____ _ __/ ___// /_ ____ _________
+ \__ \/ __ \/ __ \ | /| / /\__ \/ __ \/ __ `/ ___/ _ \
+ ___/ / / / / /_/ / |/ |/ /___/ / / / / /_/ / / / __/
+/____/_/ /_/\____/|__/|__//____/_/ /_/\__,_/_/ \___/
+
diff --git a/ct/headers/tracktor b/ct/headers/tracktor
new file mode 100644
index 000000000..d4802c5aa
--- /dev/null
+++ b/ct/headers/tracktor
@@ -0,0 +1,6 @@
+ __ __ __
+ / /__________ ______/ /__/ /_____ _____
+ / __/ ___/ __ `/ ___/ //_/ __/ __ \/ ___/
+/ /_/ / / /_/ / /__/ ,< / /_/ /_/ / /
+\__/_/ \__,_/\___/_/|_|\__/\____/_/
+
diff --git a/ct/headers/transmission-openvpn b/ct/headers/transmission-openvpn
new file mode 100644
index 000000000..3c6830b05
--- /dev/null
+++ b/ct/headers/transmission-openvpn
@@ -0,0 +1,6 @@
+ __ _ _
+ / /__________ _____ _________ ___ (_)_________(_)___ ____ ____ ____ ___ ____ _ ______ ____
+ / __/ ___/ __ `/ __ \/ ___/ __ `__ \/ / ___/ ___/ / __ \/ __ \______/ __ \/ __ \/ _ \/ __ \ | / / __ \/ __ \
+/ /_/ / / /_/ / / / (__ ) / / / / / (__ |__ ) / /_/ / / / /_____/ /_/ / /_/ / __/ / / / |/ / /_/ / / / /
+\__/_/ \__,_/_/ /_/____/_/ /_/ /_/_/____/____/_/\____/_/ /_/ \____/ .___/\___/_/ /_/|___/ .___/_/ /_/
+ /_/ /_/
diff --git a/ct/headers/vikunja b/ct/headers/vikunja
deleted file mode 100644
index afd006f1c..000000000
--- a/ct/headers/vikunja
+++ /dev/null
@@ -1,6 +0,0 @@
- _ ___ __ _
-| | / (_) /____ ______ (_)___ _
-| | / / / //_/ / / / __ \ / / __ `/
-| |/ / / ,< / /_/ / / / / / / /_/ /
-|___/_/_/|_|\__,_/_/ /_/_/ /\__,_/
- /___/
diff --git a/ct/infisical.sh b/ct/infisical.sh
new file mode 100644
index 000000000..389e69d79
--- /dev/null
+++ b/ct/infisical.sh
@@ -0,0 +1,60 @@
+#!/usr/bin/env bash
+source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: Slaviša Arežina (tremor021)
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://infisical.com/
+
+APP="Infisical"
+var_tags="${var_tags:-auth}"
+var_cpu="${var_cpu:-2}"
+var_ram="${var_ram:-2048}"
+var_disk="${var_disk:-6}"
+var_os="${var_os:-debian}"
+var_version="${var_version:-13}"
+var_unprivileged="${var_unprivileged:-1}"
+
+header_info "$APP"
+variables
+color
+catch_errors
+
+function update_script() {
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -d /etc/infisical ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+
+ msg_info "Stopping service"
+ $STD inisical-ctl stop
+ msg_ok "Service stopped"
+
+ msg_info "Creating backup"
+ DB_PASS=$(grep -Po '(?<=^Database Password:\s).*' ~/infisical.creds | head -n1)
+ PGPASSWORD=$DB_PASS pg_dump -U infisical -h localhost -d infisical_db > /opt/infisical_backup.sql
+ msg_ok "Created backup"
+
+ msg_info "Updating Infisical"
+ $STD apt update
+ $STD apt install -y infisical-core
+ $STD infisical-ctl reconfigure
+ msg_ok "Updated Infisical"
+
+ msg_info "Starting service"
+ infisical-ctl start
+ msg_ok "Started service"
+ msg_ok "Updated successfully"
+ exit
+}
+
+start
+build_container
+description
+
+msg_ok "Completed Successfully!\n"
+echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
+echo -e "${INFO}${YW} Access it using the following URL:${CL}"
+echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8080${CL}"
diff --git a/ct/healthchecks.sh b/ct/kanba.sh
similarity index 53%
rename from ct/healthchecks.sh
rename to ct/kanba.sh
index 9c6f404c3..270f5e00f 100644
--- a/ct/healthchecks.sh
+++ b/ct/kanba.sh
@@ -1,15 +1,15 @@
#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (CanbiZ)
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source:
+# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
+# Source: https://github.com/Kanba-co/kanba
-APP="healthchecks"
-var_tags="${var_tags:-monitoring}"
-var_cpu="${var_cpu:-4}"
-var_ram="${var_ram:-4096}"
-var_disk="${var_disk:-20}"
+APP="Kanba"
+var_tags="${var_tags:-kanban}"
+var_cpu="${var_cpu:-2}"
+var_ram="${var_ram:-2048}"
+var_disk="${var_disk:-5}"
var_os="${var_os:-debian}"
var_version="${var_version:-12}"
var_unprivileged="${var_unprivileged:-1}"
@@ -23,11 +23,14 @@ function update_script() {
header_info
check_container_storage
check_container_resources
- if [[ ! -f /etc/systemd/system/healthchecks.service ]]; then
+ if [[ ! -d /var ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
- msg_error "No Update."
+ msg_info "Updating $APP LXC"
+ $STD apt-get update
+ $STD apt-get -y upgrade
+ msg_ok "Updated $APP LXC"
exit
}
@@ -38,4 +41,4 @@ description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
-echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8000${CL}"
+echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}"
diff --git a/ct/librenms.sh b/ct/librenms.sh
index 84eb74b40..aaa71a1af 100644
--- a/ct/librenms.sh
+++ b/ct/librenms.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: michelroegl-brunner
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
@@ -20,20 +20,20 @@ color
catch_errors
function update_script() {
- header_info
- check_container_storage
- check_container_resources
- if [ ! -d /opt/librenms ]; then
- msg_error "No ${APP} Installation Found!"
- exit
- fi
- msg_info "Updating ${APP} Installation"
- su librenms
- cd /opt/librenms
- ./daily.sh
- msg_ok "Updated ${APP} Installation"
+ header_info
+ check_container_storage
+ check_container_resources
+ if [ ! -d /opt/librenms ]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+ msg_info "Updating ${APP} Installation"
+ su librenms
+ cd /opt/librenms
+ ./daily.sh
+ msg_ok "Updated ${APP} Installation"
- exit
+ exit
}
start
diff --git a/ct/librespeed.sh b/ct/librespeed.sh
deleted file mode 100644
index 931248cd3..000000000
--- a/ct/librespeed.sh
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env bash
-source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/refs/heads/main/misc/build.func)
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: elvito
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://github.com/librespeed/speedtest
-
-APP="librespeed"
-var_tags="speedtest"
-var_cpu="1"
-var_ram="512"
-var_disk="4"
-var_os="debian"
-var_version="12"
-var_unprivileged="1"
-
-header_info "$APP"
-variables
-color
-catch_errors
-
-function update_script() {
- header_info
- check_container_storage
- check_container_resources
-
- if [[ ! -f /opt/librespeed/index.html ]]; then
- msg_error "No ${APP} Installation Found!"
- exit
- fi
- RELEASE=$(curl -fsSL https://api.github.com/repos/librespeed/speedtest/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3)}')
- if [[ ! -f /opt/librespeed/${APP}_version.txt ]] || [[ "${RELEASE}" != "$(cat /opt//librespeed/${APP}_version.txt)" ]]; then
- msg_info "Updating $APP..."
- temp_file=$(mktemp)
- curl -fsSL "https://github.com/librespeed/speedtest/archive/refs/tags/${RELEASE}.zip" -o "$temp_file"
- mkdir -p /temp
- unzip -qu "$temp_file" -d /temp
- cd /temp/speedtest-"${RELEASE}"
- cp -u favicon.ico index.html speedtest.js speedtest_worker.js /opt/librespeed/
- cp -ru backend /opt/librespeed/
- echo "${RELEASE}" >/opt/"${APP}"_version.txt
- systemctl restart caddy
- msg_ok "$APP has been updated."
- else
- msg_ok "No update required. ${APP} is already at ${RELEASE}"
- fi
- exit
-}
-start
-build_container
-description
-
-msg_ok "Completed Successfully!\n"
-echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
-echo -e "${INFO}${YW} Access it using the following URL:${CL}"
-echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}"
diff --git a/ct/linkstack.sh b/ct/linkstack.sh
deleted file mode 100644
index 2a88277d7..000000000
--- a/ct/linkstack.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env bash
-source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: Omar Minaya
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://linkstack.org/
-
-APP="LinkStack"
-var_tags="${var_tags:-os}"
-var_cpu="${var_cpu:-1}"
-var_ram="${var_ram:-2048}"
-var_disk="${var_disk:-10}"
-var_os="${var_os:-debian}"
-var_version="${var_version:-12}"
-var_unprivileged="${var_unprivileged:-1}"
-
-header_info "$APP"
-variables
-color
-catch_errors
-
-function update_script() {
- header_info
- check_container_storage
- check_container_resources
-
- if [[ ! -d /var ]]; then
- msg_error "No ${APP} Installation Found!"
- exit
- fi
-
- msg_info "Updating $APP LXC"
- $STD apt-get update
- $STD apt-get -y upgrade
- msg_ok "Updated $APP LXC"
-
- exit
-}
-
-start
-build_container
-description
-
-msg_ok "Completed Successfully!\n"
-echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
-echo -e "${INFO}${YW} Complete setup at:${CL}"
-echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}"
diff --git a/ct/maxun.sh b/ct/maxun.sh
deleted file mode 100644
index ee0b3819e..000000000
--- a/ct/maxun.sh
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: MickLesk (Canbiz)
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://github.com/getmaxun/maxun
-
-APP="Maxun"
-var_tags="${var_tags:-scraper}"
-var_disk="${var_disk:-7}"
-var_cpu="${var_cpu:-2}"
-var_ram="${var_ram:-3072}"
-var_os="${var_os:-debian}"
-var_version="${var_version:-12}"
-var_unprivileged="${var_unprivileged:-1}"
-
-header_info "$APP"
-variables
-color
-catch_errors
-
-function update_script() {
- header_info
- check_container_storage
- check_container_resources
- if [[ ! -d /opt/maxun ]]; then
- msg_error "No ${APP} Installation Found!"
- exit
- fi
- RELEASE=$(curl -s https://api.github.com/repos/getmaxun/maxun/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }')
- if [[ ! -f /opt/${APP}_version.txt ]] || [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]]; then
- msg_info "Stopping Services"
- systemctl stop maxun minio redis
- msg_ok "Services Stopped"
-
- msg_info "Updating ${APP} to v${RELEASE}"
- mv /opt/maxun /opt/maxun_bak
- cd /opt
- curl -fsSL "https://github.com/getmaxun/maxun/archive/refs/tags/v${RELEASE}.zip"
- unzip -q v${RELEASE}.zip
- mv maxun-${RELEASE} /opt/maxun
- mv /opt/maxun_bak/.env /opt/maxun/
- cd /opt/maxun
- npm install --legacy-peer-deps
- cd /opt/maxun/maxun-core
- npm install --legacy-peer-deps
- cd /opt/maxun
- npx playwright install --with-deps chromium
- npx playwright install-deps
- "${RELEASE}" >/opt/${APP}_version.txt
-
- msg_info "Starting Services"
- systemctl start minio redis maxun
- msg_ok "Started Services"
-
- msg_info "Cleaning Up"
- rm -rf /opt/v${RELEASE}.zip
- msg_ok "Cleaned"
- msg_ok "Updated Successfully"
- else
- msg_ok "No update required. ${APP} is already at v${RELEASE}"
- fi
- exit
-}
-
-start
-build_container
-description
-
-msg_ok "Completed Successfully!\n"
-echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
-echo -e "${INFO}${YW} Access it using the following URL:${CL}"
-echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:5173${CL}"
diff --git a/ct/maybefinance.sh b/ct/maybefinance.sh
deleted file mode 100644
index 38a52ad3d..000000000
--- a/ct/maybefinance.sh
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/env bash
-source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: vhsdream
-# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
-# Source: https://maybefinance.com
-
-APP="Maybe Finance"
-var_tags="${var_tags:-finance;budget}"
-var_cpu="${var_cpu:-2}"
-var_ram="${var_ram:-2048}"
-var_disk="${var_disk:-4}"
-var_os="${var_os:-debian}"
-var_version="${var_version:-12}"
-var_unprivileged="${var_unprivileged:-1}"
-
-header_info "$APP"
-variables
-color
-catch_errors
-
-function update_script() {
- header_info
- check_container_storage
- check_container_resources
-
- if [[ ! -d /opt/maybe ]]; then
- msg_error "No ${APP} Installation Found!"
- exit
- fi
-
- RELEASE=$(curl -s https://api.github.com/repos/maybe-finance/maybe/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }')
- if [[ "${RELEASE}" != "$(cat /opt/maybe_version.txt)" ]] || [[ ! -f /opt/maybe_version.txt ]]; then
- msg_info "Stopping $APP"
- systemctl stop maybe-web maybe-worker
- msg_ok "Stopped $APP"
-
- msg_info "Creating Backup"
- BACKUP_FILE="/opt/maybe_backup_$(date +%F).tar.gz"
- $STD tar -czf "$BACKUP_FILE" /opt/maybe/{.env,storage/} &>/dev/null
- msg_ok "Backup Created"
-
- msg_info "Updating $APP to v${RELEASE}"
- rm -rf /opt/maybe
- curl -fsSL "https://github.com/maybe-finance/maybe/archive/refs/tags/v${RELEASE}.zip" -o /tmp/v"$RELEASE".zip
- unzip -q /tmp/v"$RELEASE".zip
- mv maybe-"$RELEASE" /opt/maybe
- RUBY_VERSION="$(cat /opt/maybe/.ruby-version)" RUBY_INSTALL_RAILS=false setup_rbenv_stack
- cd /opt/maybe
- rm ./config/credentials.yml.enc
- source ~/.profile
- $STD tar -xf "$BACKUP_FILE" --directory=/
- $STD ./bin/bundle install
- $STD ./bin/bundle exec bootsnap precompile --gemfile -j 0
- $STD ./bin/bundle exec bootsnap precompile -j 0 app/ lib/
- export SECRET_KEY_BASE_DUMMY=1
- $STD dotenv -f ./.env ./bin/rails assets:precompile
- $STD dotenv -f ./.env ./bin/rails db:prepare
- msg_ok "Updated $APP to v${RELEASE}"
-
- msg_info "Starting $APP"
- systemctl start maybe-worker maybe-web
- msg_ok "Started $APP"
-
- msg_info "Cleaning Up"
- rm /tmp/v"$RELEASE".zip
- rm -f "$BACKUP_FILE"
- msg_ok "Cleanup Completed"
-
- echo "${RELEASE}" >/opt/maybe_version.txt
- msg_ok "Update Successful"
- else
- msg_ok "No update required. ${APP} is already at v${RELEASE}"
- fi
- exit
-}
-
-start
-build_container
-description
-
-msg_ok "Completed Successfully!\n"
-echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
-echo -e "${INFO}${YW} Access it using the following URL:${CL}"
-echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}"
diff --git a/ct/miniflux.sh b/ct/miniflux.sh
new file mode 100644
index 000000000..bb79d9a8e
--- /dev/null
+++ b/ct/miniflux.sh
@@ -0,0 +1,48 @@
+#!/usr/bin/env bash
+source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: omernaveedxyz
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://miniflux.app/
+
+APP="Miniflux"
+var_tags="${var_tags:-media}"
+var_cpu="${var_cpu:-2}"
+var_ram="${var_ram:-2048}"
+var_disk="${var_disk:-8}"
+var_os="${var_os:-debian}"
+var_version="${var_version:-13}"
+var_unprivileged="${var_unprivileged:-1}"
+
+header_info "$APP"
+variables
+color
+catch_errors
+
+function update_script() {
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -f /etc/systemd/system/miniflux.service ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+
+ msg_info "Updating ${APP} LXC"
+ $STD miniflux -flush-sessions -config-file /etc/miniflux.conf
+ $STD systemctl stop miniflux
+ fetch_and_deploy_gh_release "miniflux" "miniflux/v2" "binary" "latest"
+ $STD miniflux -migrate -config-file /etc/miniflux.conf
+ $STD systemctl start miniflux
+ msg_ok "Updated Successfully"
+ exit
+}
+
+start
+build_container
+description
+
+msg_ok "Completed Successfully!\n"
+echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
+echo -e "${INFO}${YW} Access it using the following URL:${CL}"
+echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8080${CL}"
diff --git a/ct/nginxproxymanager.sh b/ct/nginxproxymanager.sh
index eb7259dee..6eb627a4d 100644
--- a/ct/nginxproxymanager.sh
+++ b/ct/nginxproxymanager.sh
@@ -1,17 +1,17 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
-# Copyright (c) 2021-2025 tteck
-# Author: tteck (tteckster)
+# Copyright (c) 2021-2025 Community-Script ORG
+# Author: tteck (tteckster) | Co-Author: CrazyWolf13
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://nginxproxymanager.com/
APP="Nginx Proxy Manager"
var_tags="${var_tags:-proxy}"
var_cpu="${var_cpu:-2}"
-var_ram="${var_ram:-1024}"
-var_disk="${var_disk:-4}"
+var_ram="${var_ram:-2048}"
+var_disk="${var_disk:-8}"
var_os="${var_os:-debian}"
-var_version="${var_version:-12}"
+var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
@@ -27,53 +27,62 @@ function update_script() {
msg_error "No ${APP} Installation Found!"
exit
fi
- if ! command -v pnpm &>/dev/null; then
- msg_info "Installing pnpm"
- #export NODE_OPTIONS=--openssl-legacy-provider
- $STD npm install -g pnpm@8.15
- msg_ok "Installed pnpm"
+
+ if command -v node &>/dev/null; then
+ CURRENT_NODE_VERSION=$(node --version | cut -d'v' -f2 | cut -d'.' -f1)
+ if [[ "$CURRENT_NODE_VERSION" != "22" ]]; then
+ systemctl stop openresty
+ apt-get purge -y nodejs npm
+ apt-get autoremove -y
+ rm -rf /usr/local/bin/node /usr/local/bin/npm
+ rm -rf /usr/local/lib/node_modules
+ rm -rf ~/.npm
+ rm -rf /root/.npm
+ fi
fi
+
+ NODE_VERSION="22" NODE_MODULE="yarn" setup_nodejs
+
RELEASE=$(curl -fsSL https://api.github.com/repos/NginxProxyManager/nginx-proxy-manager/releases/latest |
grep "tag_name" |
awk '{print substr($2, 3, length($2)-4) }')
+
+ fetch_and_deploy_gh_release "nginxproxymanager" "NginxProxyManager/nginx-proxy-manager"
+
msg_info "Stopping Services"
systemctl stop openresty
systemctl stop npm
msg_ok "Stopped Services"
- msg_info "Cleaning Old Files"
- rm -rf /app \
+ msg_info "Cleaning old files"
+ $STD rm -rf /app \
/var/www/html \
/etc/nginx \
/var/log/nginx \
/var/lib/nginx \
- "$STD" /var/cache/nginx
- msg_ok "Cleaned Old Files"
+ /var/cache/nginx
+ msg_ok "Cleaned old files"
- msg_info "Downloading NPM v${RELEASE}"
- curl -fsSL "https://codeload.github.com/NginxProxyManager/nginx-proxy-manager/tar.gz/v${RELEASE}" | tar -xz
- cd nginx-proxy-manager-"${RELEASE}"
- msg_ok "Downloaded NPM v${RELEASE}"
-
- msg_info "Setting up Enviroment"
+ msg_info "Setting up Environment"
ln -sf /usr/bin/python3 /usr/bin/python
- ln -sf /usr/bin/certbot /opt/certbot/bin/certbot
ln -sf /usr/local/openresty/nginx/sbin/nginx /usr/sbin/nginx
ln -sf /usr/local/openresty/nginx/ /etc/nginx
- sed -i "s|\"version\": \"0.0.0\"|\"version\": \"$RELEASE\"|" backend/package.json
- sed -i "s|\"version\": \"0.0.0\"|\"version\": \"$RELEASE\"|" frontend/package.json
- sed -i 's+^daemon+#daemon+g' docker/rootfs/etc/nginx/nginx.conf
- NGINX_CONFS=$(find "$(pwd)" -type f -name "*.conf")
+ sed -i "s|\"version\": \"2.0.0\"|\"version\": \"$RELEASE\"|" /opt/nginxproxymanager/backend/package.json
+ sed -i "s|\"version\": \"2.0.0\"|\"version\": \"$RELEASE\"|" /opt/nginxproxymanager/frontend/package.json
+ sed -i 's+^daemon+#daemon+g' /opt/nginxproxymanager/docker/rootfs/etc/nginx/nginx.conf
+ NGINX_CONFS=$(find /opt/nginxproxymanager -type f -name "*.conf")
for NGINX_CONF in $NGINX_CONFS; do
sed -i 's+include conf.d+include /etc/nginx/conf.d+g' "$NGINX_CONF"
done
+
mkdir -p /var/www/html /etc/nginx/logs
- cp -r docker/rootfs/var/www/html/* /var/www/html/
- cp -r docker/rootfs/etc/nginx/* /etc/nginx/
- cp docker/rootfs/etc/letsencrypt.ini /etc/letsencrypt.ini
- cp docker/rootfs/etc/logrotate.d/nginx-proxy-manager /etc/logrotate.d/nginx-proxy-manager
+ cp -r /opt/nginxproxymanager/docker/rootfs/var/www/html/* /var/www/html/
+ cp -r /opt/nginxproxymanager/docker/rootfs/etc/nginx/* /etc/nginx/
+ cp /opt/nginxproxymanager/docker/rootfs/etc/letsencrypt.ini /etc/letsencrypt.ini
+ cp /opt/nginxproxymanager/docker/rootfs/etc/logrotate.d/nginx-proxy-manager /etc/logrotate.d/nginx-proxy-manager
ln -sf /etc/nginx/nginx.conf /etc/nginx/conf/nginx.conf
rm -f /etc/nginx/conf.d/dev.conf
+
mkdir -p /tmp/nginx/body \
/run/nginx \
/data/nginx \
@@ -90,29 +99,33 @@ function update_script() {
/var/lib/nginx/cache/public \
/var/lib/nginx/cache/private \
/var/cache/nginx/proxy_temp
+
chmod -R 777 /var/cache/nginx
chown root /tmp/nginx
+
echo resolver "$(awk 'BEGIN{ORS=" "} $1=="nameserver" {print ($2 ~ ":")? "["$2"]": $2}' /etc/resolv.conf);" >/etc/nginx/conf.d/include/resolvers.conf
+
if [ ! -f /data/nginx/dummycert.pem ] || [ ! -f /data/nginx/dummykey.pem ]; then
- $STD openssl req -new -newkey rsa:2048 -days 3650 -nodes -x509 -subj "/O=Nginx Proxy Manager/OU=Dummy Certificate/CN=localhost" -keyout /data/nginx/dummykey.pem -out /data/nginx/dummycert.pem
+ openssl req -new -newkey rsa:2048 -days 3650 -nodes -x509 -subj "/O=Nginx Proxy Manager/OU=Dummy Certificate/CN=localhost" -keyout /data/nginx/dummykey.pem -out /data/nginx/dummycert.pem &>/dev/null
fi
- mkdir -p /app/global /app/frontend/images
- cp -r backend/* /app
- cp -r global/* /app/global
- $STD python3 -m pip install --no-cache-dir --break-system-packages certbot-dns-cloudflare
- msg_ok "Setup Enviroment"
+
+ mkdir -p /app/frontend/images
+ cp -r /opt/nginxproxymanager/backend/* /app
+ msg_ok "Set up Environment"
msg_info "Building Frontend"
- cd ./frontend
- $STD pnpm install
- $STD pnpm upgrade
- $STD pnpm run build
- cp -r dist/* /app/frontend
- cp -r app-images/* /app/frontend/images
+ export NODE_OPTIONS="--max_old_space_size=2048 --openssl-legacy-provider"
+ cd /opt/nginxproxymanager/frontend
+ # Replace node-sass with sass in package.json before installation
+ sed -E -i 's/"node-sass" *: *"([^"]*)"/"sass": "\1"/g' package.json
+ $STD yarn install --network-timeout 600000
+ $STD yarn build
+ cp -r /opt/nginxproxymanager/frontend/dist/* /app/frontend
+ cp -r /opt/nginxproxymanager/frontend/public/images/* /app/frontend/images
msg_ok "Built Frontend"
msg_info "Initializing Backend"
- $STD rm -rf /app/config/default.json
+ rm -rf /app/config/default.json
if [ ! -f /app/config/production.json ]; then
cat <<'EOF' >/app/config/production.json
{
@@ -129,22 +142,37 @@ function update_script() {
EOF
fi
cd /app
- $STD pnpm install
+ $STD yarn install --network-timeout 600000
msg_ok "Initialized Backend"
+
+ msg_info "Updating Certbot"
+ [ -f /etc/apt/trusted.gpg.d/openresty-archive-keyring.gpg ] && rm -f /etc/apt/trusted.gpg.d/openresty-archive-keyring.gpg
+ [ -f /etc/apt/sources.list.d/openresty.list ] && rm -f /etc/apt/sources.list.d/openresty.list
+ [ ! -f /etc/apt/trusted.gpg.d/openresty.gpg ] && curl -fsSL https://openresty.org/package/pubkey.gpg | gpg --dearmor --yes -o /etc/apt/trusted.gpg.d/openresty.gpg
+ [ ! -f /etc/apt/sources.list.d/openresty.sources ] && cat <<'EOF' >/etc/apt/sources.list.d/openresty.sources
+Types: deb
+URIs: http://openresty.org/package/debian/
+Suites: bookworm
+Components: openresty
+Signed-By: /etc/apt/trusted.gpg.d/openresty.gpg
+EOF
+ $STD apt update
+ $STD apt -y install openresty
+ if [ -d /opt/certbot ]; then
+ $STD /opt/certbot/bin/pip install --upgrade pip setuptools wheel
+ $STD /opt/certbot/bin/pip install --upgrade certbot certbot-dns-cloudflare
+ fi
+ msg_ok "Updated Certbot"
msg_info "Starting Services"
sed -i 's/user npm/user root/g; s/^pid/#pid/g' /usr/local/openresty/nginx/conf/nginx.conf
- sed -i 's/su npm npm/su root root/g' /etc/logrotate.d/nginx-proxy-manager
- sed -i 's/include-system-site-packages = false/include-system-site-packages = true/g' /opt/certbot/pyvenv.cfg
+ sed -r -i 's/^([[:space:]]*)su npm npm/\1#su npm npm/g;' /etc/logrotate.d/nginx-proxy-manager
systemctl enable -q --now openresty
systemctl enable -q --now npm
+ systemctl restart openresty
msg_ok "Started Services"
- msg_info "Cleaning up"
- rm -rf ~/nginx-proxy-manager-*
- msg_ok "Cleaned"
-
- msg_ok "Updated Successfully"
+ msg_ok "Updated successfully!"
exit
}
diff --git a/ct/notesnook.sh b/ct/notesnook.sh
deleted file mode 100644
index 942db0c32..000000000
--- a/ct/notesnook.sh
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env bash
-source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: Slaviša Arežina (tremor021)
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://github.com/streetwriters/notesnook
-
-APP="notesnook"
-var_tags="${var_tags:-os}"
-var_cpu="${var_cpu:-2}"
-var_ram="${var_ram:-3072}"
-var_disk="${var_disk:-10}"
-var_os="${var_os:-debian}"
-var_version="${var_version:-12}"
-var_unprivileged="${var_unprivileged:-1}"
-
-header_info "$APP"
-variables
-color
-catch_errors
-
-function update_script() {
- header_info
- check_container_storage
- check_container_resources
- if [[ ! -d /opt/notesnook ]]; then
- msg_error "No ${APP} Installation Found!"
- exit
- fi
-
- msg_info "Stopping Service"
- systemctl stop notesnook
- msg_ok "Stopped Service"
-
- msg_info "Updating ${APP} (Patience)"
- rm -rf /opt/notesnook
- fetch_and_deploy_gh_release "notesnook" "streetwriters/notesnook" "tarball"
- cd /opt/notesnook
- export NODE_OPTIONS="--max-old-space-size=2560"
- $STD npm install
- $STD npm run build:web
- msg_ok "Updated $APP"
-
- msg_info "Starting Service"
- systemctl start notesnook
- msg_ok "Started Service"
-
- msg_ok "Updated Successfully"
- exit
-}
-
-start
-build_container
-description
-
-msg_ok "Completed Successfully!\n"
-echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
-echo -e "${INFO}${YW} Access it using the following URL:${CL}"
-echo -e "${TAB}${GATEWAY}${BGN}https://${IP}${CL}"
diff --git a/ct/omada.sh b/ct/omada.sh
new file mode 100644
index 000000000..576797b71
--- /dev/null
+++ b/ct/omada.sh
@@ -0,0 +1,73 @@
+#!/usr/bin/env bash
+source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
+# Copyright (c) 2021-2025 tteck
+# Author: tteck (tteckster)
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://www.tp-link.com/us/support/download/omada-software-controller/
+
+APP="Omada"
+var_tags="${var_tags:-tp-link;controller}"
+var_cpu="${var_cpu:-2}"
+var_ram="${var_ram:-3072}"
+var_disk="${var_disk:-8}"
+var_os="${var_os:-debian}"
+var_version="${var_version:-13}"
+var_unprivileged="${var_unprivileged:-1}"
+
+header_info "$APP"
+variables
+color
+catch_errors
+
+function update_script() {
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -d /opt/tplink ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+
+ msg_info "Updating MongoDB"
+ if lscpu | grep -q 'avx'; then
+ MONGO_VERSION="8.0" setup_mongodb
+ else
+ msg_warn "No AVX detected: Using older MongoDB 4.4"
+ MONGO_VERSION="4.4" setup_mongodb
+ fi
+
+ msg_info "Checking if right Azul Zulu Java is installed"
+ java_version=$(java -version 2>&1 | awk -F[\"_] '/version/ {print $2}')
+ if [[ "$java_version" =~ ^1\.8\.* ]]; then
+ $STD apt remove --purge -y zulu8-jdk
+ $STD apt -y install zulu21-jre-headless
+ msg_ok "Updated Azul Zulu Java to 21"
+ else
+ msg_ok "Azul Zulu Java 21 already installed"
+ fi
+
+ msg_info "Updating Omada Controller"
+ OMADA_URL=$(curl -fsSL "https://support.omadanetworks.com/en/download/software/omada-controller/" |
+ grep -o 'https://static\.tp-link\.com/upload/software/[^"]*linux_x64[^"]*\.deb' |
+ head -n1)
+ OMADA_PKG=$(basename "$OMADA_URL")
+ if [ -z "$OMADA_PKG" ]; then
+ msg_error "Could not retrieve Omada package – server may be down."
+ exit
+ fi
+ curl -fsSL "$OMADA_URL" -o "$OMADA_PKG"
+ export DEBIAN_FRONTEND=noninteractive
+ $STD dpkg -i "$OMADA_PKG"
+ rm -f "$OMADA_PKG"
+ msg_ok "Updated successfully!"
+ exit
+}
+
+start
+build_container
+description
+
+msg_ok "Completed Successfully!\n"
+echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
+echo -e "${INFO}${YW} Access it using the following URL:${CL}"
+echo -e "${TAB}${GATEWAY}${BGN}https://${IP}:8043${CL}"
diff --git a/ct/opencloud.sh b/ct/opencloud.sh
deleted file mode 100644
index dd11f29a9..000000000
--- a/ct/opencloud.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/env bash
-source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: vhsdream
-# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
-# Source: https://opencloud.eu
-
-APP="OpenCloud"
-var_tags="${var_tags:-files;cloud}"
-var_cpu="${var_cpu:-2}"
-var_ram="${var_ram:-2048}"
-var_disk="${var_disk:-6}"
-var_os="${var_os:-debian}"
-var_version="${var_version:-12}"
-var_unprivileged="${var_unprivileged:-1}"
-
-header_info "$APP"
-variables
-color
-catch_errors
-
-function update_script() {
- header_info
- check_container_storage
- check_container_resources
-
- if [[ ! -d /etc/opencloud ]]; then
- msg_error "No ${APP} Installation Found!"
- exit
- fi
-
- RELEASE=$(curl -s https://api.github.com/repos/opencloud-eu/opencloud/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }')
- if [[ "${RELEASE}" != "$(cat /etc/opencloud/version)" ]] || [[ ! -f /etc/opencloud/version ]]; then
- msg_info "Stopping $APP"
- systemctl stop opencloud opencloud-wopi
- msg_ok "Stopped $APP"
-
- msg_info "Updating $APP to v${RELEASE}"
- curl -fsSL "https://github.com/opencloud-eu/opencloud/releases/download/v${RELEASE}/opencloud-${RELEASE}-linux-amd64" -o /usr/bin/opencloud
- chmod +x /usr/bin/opencloud
- msg_ok "Updated $APP to v${RELEASE}"
-
- msg_info "Starting $APP"
- systemctl start opencloud opencloud-wopi
- msg_ok "Started $APP"
-
- echo "${RELEASE}" >/etc/opencloud/version
- msg_ok "Update Successful"
- else
- msg_ok "No update required. ${APP} is already at v${RELEASE}"
- fi
- exit
-}
-
-start
-build_container
-description
-
-msg_ok "Completed Successfully!\n"
-echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
-echo -e "${INFO}${YW} Access it using the following URL:${CL}"
-echo -e "${TAB}${GATEWAY}${BGN}https://${CL}"
diff --git a/ct/openwebui.sh b/ct/openwebui.sh
new file mode 100644
index 000000000..33b315297
--- /dev/null
+++ b/ct/openwebui.sh
@@ -0,0 +1,66 @@
+#!/usr/bin/env bash
+source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
+# Copyright (c) 2021-2025 tteck
+# Author: tteck | Co-Author: havardthom | Co-Author: Slaviša Arežina (tremor021)
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://openwebui.com/
+
+APP="Open WebUI"
+var_tags="${var_tags:-ai;interface}"
+var_cpu="${var_cpu:-4}"
+var_ram="${var_ram:-8192}"
+var_disk="${var_disk:-25}"
+var_os="${var_os:-debian}"
+var_version="${var_version:-13}"
+var_unprivileged="${var_unprivileged:-1}"
+
+header_info "$APP"
+variables
+color
+catch_errors
+
+function update_script() {
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -d /root/.open-webui ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+
+ if [ -x "/usr/bin/ollama" ]; then
+ msg_info "Updating Ollama"
+ OLLAMA_VERSION=$(ollama -v | awk '{print $NF}')
+ RELEASE=$(curl -s https://api.github.com/repos/ollama/ollama/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4)}')
+ if [ "$OLLAMA_VERSION" != "$RELEASE" ]; then
+ msg_info "Stopping Service"
+ systemctl stop ollama
+ msg_ok "Stopped Service"
+ curl -fsSLO -C - https://ollama.com/download/ollama-linux-amd64.tgz
+ rm -rf /usr/lib/ollama
+ rm -rf /usr/bin/ollama
+ tar -C /usr -xzf ollama-linux-amd64.tgz
+ rm -rf ollama-linux-amd64.tgz
+ msg_info "Starting Service"
+ systemctl start ollama
+ msg_info "Started Service"
+ msg_ok "Ollama updated to version $RELEASE"
+ else
+ msg_ok "Ollama is already up to date."
+ fi
+ fi
+
+ msg_info "Restarting Open WebUI to initiate update"
+ systemctl restart open-webui
+ msg_ok "Updated successfully!"
+ exit
+}
+
+start
+build_container
+description
+
+msg_ok "Completed Successfully!\n"
+echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
+echo -e "${INFO}${YW} Access it using the following URL:${CL}"
+echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8080${CL}"
diff --git a/ct/ots.sh b/ct/ots.sh
deleted file mode 100644
index d089bd804..000000000
--- a/ct/ots.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: BvdBerg01
-# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
-# Source: https://github.com/Luzifer/ots
-
-APP="OTS"
-var_tags="${var_tags:-analytics}"
-var_cpu="${var_cpu:-1}"
-var_ram="${var_ram:-512}"
-var_disk="${var_disk:-3}"
-var_os="${var_os:-debian}"
-var_version="${var_version:-12}"
-var_unprivileged="${var_unprivileged:-1}"
-
-header_info "$APP"
-variables
-color
-catch_errors
-
-function update_script() {
- header_info
- check_container_storage
- check_container_resources
- if [[ ! -d /opt/ots ]]; then
- msg_error "No ${APP} Installation Found!"
- exit
- fi
- msg_info "Updating $APP LXC"
- $STD apt-get update
- $STD apt-get -y upgrade
- msg_ok "Updated $APP LXC"
- exit
-}
-
-start
-build_container
-description
-
-msg_ok "Completed Successfully!\n"
-echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
-echo -e "${INFO}${YW} Access it using the following URL:${CL}"
-echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}"
diff --git a/ct/pangolin.sh b/ct/pangolin.sh
new file mode 100644
index 000000000..87b3e4856
--- /dev/null
+++ b/ct/pangolin.sh
@@ -0,0 +1,82 @@
+#!/usr/bin/env bash
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: Slaviša Arežina (tremor021)
+# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
+# Source: https://pangolin.net/
+
+APP="Pangolin"
+var_tags="${var_tags:-proxy}"
+var_cpu="${var_cpu:-2}"
+var_ram="${var_ram:-4096}"
+var_disk="${var_disk:-5}"
+var_os="${var_os:-debian}"
+var_version="${var_version:-13}"
+var_unprivileged="${var_unprivileged:-1}"
+
+header_info "$APP"
+variables
+color
+catch_errors
+
+function update_script() {
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -d /opt/pangolin ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+
+ if check_for_gh_release "pangolin" "fosrl/pangolin"; then
+ msg_info "Stopping ${APP}"
+ systemctl stop pangolin
+ msg_info "Service stopped"
+
+ msg_info "Creating backup"
+ tar -czf /opt/pangolin_config_backup.tar.gz -C /opt/pangolin config
+ msg_ok "Created backup"
+
+ fetch_and_deploy_gh_release "pangolin" "fosrl/pangolin" "tarball"
+ fetch_and_deploy_gh_release "gerbil" "fosrl/gerbil" "singlefile" "latest" "/usr/bin" "gerbil_linux_amd64"
+
+ msg_info "Updating ${APP}"
+ export BUILD=oss
+ export DATABASE=sqlite
+ cd /opt/pangolin
+ $STD npm ci
+ echo "export * from \"./$DATABASE\";" > server/db/index.ts
+ echo "export const build = \"$BUILD\" as any;" > server/build.ts
+ cp tsconfig.oss.json tsconfig.json
+ $STD npm run next:build
+ $STD node esbuild.mjs -e server/index.ts -o dist/server.mjs -b $BUILD
+ $STD node esbuild.mjs -e server/setup/migrationsSqlite.ts -o dist/migrations.mjs
+ $STD npm run build:cli
+ cp -R .next/standalone ./
+
+ cat </usr/local/bin/pangctl
+#!/bin/sh
+cd /opt/pangolin
+./dist/cli.mjs "$@"
+EOF
+ chmod +x /usr/local/bin/pangctl ./dist/cli.mjs
+ cp server/db/names.json ./dist/names.json
+ msg_ok "Updated ${APP}"
+
+ msg_info "Restoring config"
+ tar -xzf /opt/pangolin_config_backup.tar.gz -C /opt/pangolin --overwrite
+ rm -f /opt/pangolin_config_backup.tar.gz
+ msg_ok "Restored config"
+ msg_ok "Updated successfully!"
+ fi
+ exit
+}
+
+start
+build_container
+description
+
+msg_ok "Completed Successfully!\n"
+echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
+echo -e "${INFO}${YW} Access it using the following URL:${CL}"
+echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3002${CL}"
diff --git a/ct/petio.sh b/ct/petio.sh
new file mode 100644
index 000000000..f9c4d1370
--- /dev/null
+++ b/ct/petio.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
+# Copyright (c) 2021-2025 tteck
+# Author: tteck (tteckster)
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://petio.tv/
+
+APP="Petio"
+var_tags="${var_tags:-media}"
+var_cpu="${var_cpu:-2}"
+var_ram="${var_ram:-1024}"
+var_disk="${var_disk:-4}"
+var_os="${var_os:-ubuntu}"
+var_version="${var_version:-24.04}"
+
+header_info "$APP"
+variables
+color
+catch_errors
+
+function update_script() {
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -d /opt/Petio ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+ msg_info "Updating $APP"
+ systemctl stop petio.service
+ curl -fsSL https://petio.tv/releases/latest -o petio-latest.zip
+ $STD unzip petio-latest.zip -d /opt/Petio
+ systemctl start petio.service
+ msg_ok "Updated $APP"
+ exit
+}
+
+start
+build_container
+description
+
+msg_ok "Completed Successfully!\n"
+echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
+echo -e "${INFO}${YW} Access it using the following URL:${CL}"
+echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:7777${CL}"
diff --git a/ct/postiz.sh b/ct/postiz.sh
deleted file mode 100644
index c647b4fb5..000000000
--- a/ct/postiz.sh
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: Slaviša Arežina (tremor021)
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://github.com/Casvt/Kapowarr
-
-APP="Postiz"
-var_tags="${var_tags:-Arr}"
-var_cpu="${var_cpu:-2}"
-var_ram="${var_ram:-3072}"
-var_disk="${var_disk:-8}"
-var_os="${var_os:-debian}"
-var_version="${var_version:-12}"
-var_unprivileged="${var_unprivileged:-1}"
-
-header_info "$APP"
-variables
-color
-catch_errors
-
-function update_script() {
- header_info
- check_container_storage
- check_container_resources
-
- if [[ ! -f /etc/systemd/system/postiz.service ]]; then
- msg_error "No ${APP} Installation Found!"
- exit
- fi
- RELEASE=$(curl -s https://api.github.com/repos/Casvt/Kapowarr/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }')
- if [[ "${RELEASE}" != "$(cat $HOME/.kapowarr)" ]] || [[ ! -f $HOME/.kapowarr ]]; then
- msg_info "Stopping $APP"
- systemctl stop kapowarr
- msg_ok "Stopped $APP"
-
- msg_info "Creating Backup"
- mv /opt/kapowarr/db /opt/
- msg_ok "Backup Created"
-
- msg_info "Updating $APP to ${RELEASE}"
- fetch_and_deploy_gh_release "kapowarr" "Casvt/Kapowarr"
- mv /opt/db /opt/kapowarr
- msg_ok "Updated $APP to ${RELEASE}"
-
- msg_info "Starting $APP"
- systemctl start kapowarr
- msg_ok "Started $APP"
-
- msg_ok "Update Successful"
- else
- msg_ok "No update required. ${APP} is already at ${RELEASE}"
- fi
- exit
-}
-
-start
-build_container
-description
-
-msg_ok "Completed Successfully!\n"
-echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
-echo -e "${INFO}${YW} Access it using the following URL:${CL}"
-echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:5656${CL}"
diff --git a/ct/reactive-resume.sh b/ct/reactive-resume.sh
deleted file mode 100644
index 52a2fdceb..000000000
--- a/ct/reactive-resume.sh
+++ /dev/null
@@ -1,100 +0,0 @@
-#!/usr/bin/env bash
-source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: vhsdream
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://rxresume.org
-
-APP="Reactive-Resume"
-var_tags="${var_tags:-documents}"
-var_cpu="${var_cpu:-2}"
-var_ram="${var_ram:-3072}"
-var_disk="${var_disk:-8}"
-var_os="${var_os:-debian}"
-var_version="${var_version:-12}"
-var_unprivileged="${var_unprivileged:-1}"
-
-header_info "$APP"
-variables
-color
-catch_errors
-
-function update_script() {
- header_info
- check_container_storage
- check_container_resources
-
- if [[ ! -f /etc/systemd/system/Reactive-Resume.service ]]; then
- msg_error "No $APP Installation Found!"
- exit
- fi
- RELEASE=$(curl -fsSL https://api.github.com/repos/lazy-media/Reactive-Resume/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }')
- if [[ ! -f "$HOME"/.reactive-resume ]] || [[ "$RELEASE" != "$(cat "$HOME"/.reactive-resume)" ]]; then
- msg_info "Stopping services"
- systemctl stop Reactive-Resume
- msg_ok "Stopped services"
-
- msg_info "Updating $APP to v${RELEASE}"
- cp /opt/"$APP"/.env /opt/rxresume.env
- fetch_and_deploy_gh_release "Reactive-Resume" "lazy-media/Reactive-Resume"
- cd /opt/"$APP"
- export PUPPETEER_SKIP_DOWNLOAD="true"
- export NEXT_TELEMETRY_DISABLED=1
- export CI="true"
- export NODE_ENV="production"
- $STD pnpm install --frozen-lockfile
- $STD pnpm run build
- $STD pnpm run prisma:generate
- mv /opt/rxresume.env /opt/"$APP"/.env
- msg_ok "Updated $APP to v$RELEASE"
-
- msg_info "Updating Minio"
- systemctl stop minio
- cd /tmp
- curl -fsSL https://dl.min.io/server/minio/release/linux-amd64/minio.deb -o minio.deb
- $STD dpkg -i minio.deb
- msg_ok "Updated Minio"
-
- msg_info "Updating Browserless (Patience)"
- systemctl stop browserless
- cp /opt/browserless/.env /opt/browserless.env
- rm -rf browserless
- brwsr_tmp=$(mktemp)
- TAG=$(curl -fsSL https://api.github.com/repos/browserless/browserless/tags?per_page=1 | grep "name" | awk '{print substr($2, 3, length($2)-4) }')
- curl -fsSL https://github.com/browserless/browserless/archive/refs/tags/v"$TAG".zip -O "$brwsr_tmp"
- $STD unzip "$brwsr_tmp"
- mv browserless-"$TAG"/ /opt/browserless
- cd /opt/browserless
- $STD npm install
- rm -rf src/routes/{chrome,edge,firefox,webkit}
- $STD node_modules/playwright-core/cli.js install --with-deps chromium
- $STD npm run build
- $STD npm run build:function
- $STD npm prune production
- mv /opt/browserless.env /opt/browserless/.env
- msg_ok "Updated Browserless"
-
- msg_info "Restarting services"
- systemctl start minio Reactive-Resume browserless
- msg_ok "Restarted services"
-
- msg_info "Cleaning Up"
- rm -f /tmp/minio.deb
- rm -f "$brwsr_tmp"
- msg_ok "Cleanup Completed"
-
- msg_ok "Update Successful"
- else
- msg_ok "No update required. $APP is already at v$RELEASE"
- fi
- exit
-}
-
-start
-build_container
-description
-
-msg_ok "Completed Successfully!\n"
-echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
-echo -e "${INFO}${YW} Access it using the following URL:${CL}"
-echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}"
diff --git a/ct/romm.sh b/ct/romm.sh
new file mode 100644
index 000000000..129da9a34
--- /dev/null
+++ b/ct/romm.sh
@@ -0,0 +1,72 @@
+#!/usr/bin/env bash
+source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: MickLesk (CanbiZ)
+# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
+# Source: https://romm.app
+
+APP="RomM"
+var_tags="${var_tags:-emulation}"
+var_cpu="${var_cpu:-2}"
+var_ram="${var_ram:-4096}"
+var_disk="${var_disk:-20}"
+var_os="${var_os:-ubuntu}"
+var_version="${var_version:-24.04}"
+var_unprivileged="${var_unprivileged:-1}"
+var_fuse="${var_fuse:-1}"
+
+header_info "$APP"
+variables
+color
+catch_errors
+
+function update_script() {
+ header_info
+ check_container_storage
+ check_container_resources
+
+ if [[ ! -d /opt/romm ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+
+ msg_info "Stopping $APP"
+ systemctl stop romm
+ systemctl stop nginx
+ msg_ok "Stopped $APP"
+
+ msg_info "Updating $APP"
+ cd /opt/romm/app
+ git pull
+
+ # Update backend
+ cd /opt/romm/app
+ source /opt/romm/venv/bin/activate
+ pip install --upgrade pip
+ pip install poetry
+ poetry install
+
+ # Update frontend
+ cd /opt/romm/app/frontend
+ npm install
+ npm run build
+
+ echo "Updated on $(date)" >/opt/romm/version.txt
+ msg_ok "Updated $APP"
+
+ msg_info "Starting $APP"
+ systemctl start romm
+ systemctl start nginx
+ msg_ok "Started $APP"
+ msg_ok "Update Successful"
+ exit
+}
+
+start
+build_container
+description
+
+msg_ok "Completed Successfully!\n"
+echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
+echo -e "${INFO}${YW} Access it using the following URL:${CL}"
+echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8080${CL}"
diff --git a/ct/rybbit.sh b/ct/rybbit.sh
index e9164be40..62447664c 100644
--- a/ct/rybbit.sh
+++ b/ct/rybbit.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
@@ -11,7 +11,7 @@ var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-2048}"
var_disk="${var_disk:-5}"
var_os="${var_os:-debian}"
-var_version="${var_version:-12}"
+var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
diff --git a/ct/salt.sh b/ct/salt.sh
deleted file mode 100644
index 5efed33c6..000000000
--- a/ct/salt.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: bvdberg01
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://github.com/saltstack/salt
-
-APP="Salt"
-var_tags="${var_tags:-automations}"
-var_cpu="${var_cpu:-1}"
-var_ram="${var_ram:-1024}"
-var_disk="${var_disk:-3}"
-var_os="${var_os:-debian}"
-var_version="${var_version:-12}"
-var_unprivileged="${var_unprivileged:-1}"
-
-header_info "$APP"
-variables
-color
-catch_errors
-
-function update_script() {
- header_info
- check_container_storage
- check_container_resources
-
- if [[ ! -d /etc/salt ]]; then
- msg_error "No ${APP} Installation Found!"
- exit
- fi
-
- RELEASE=$(curl -fsSL https://api.github.com/repos/saltstack/salt/releases/latest | jq -r .tag_name | sed 's/^v//')
- if [[ ! -f /opt/${APP}_version.txt ]] || [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]]; then
- msg_info "Updating $APP to ${RELEASE}"
- sed -i "s/^\(Pin: version \).*/\1${RELEASE}/" /etc/apt/preferences.d/salt-pin-1001
- $STD apt-get update
- $STD apt-get upgrade -y
- echo "${RELEASE}" >/opt/${APP}_version.txt
- msg_ok "Updated ${APP} to ${RELEASE}"
- else
- msg_ok "${APP} is already up to date (${RELEASE})"
- fi
-}
-
-start
-build_container
-description
-
-msg_ok "Completed Successfully!\n"
-echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
diff --git a/ct/scraparr.sh b/ct/scraparr.sh
deleted file mode 100644
index b7eeee7e5..000000000
--- a/ct/scraparr.sh
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/env bash
-source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: JasonGreenC
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://github.com/thecfu/scraparr
-
-APP="Scraparr"
-var_tags="${var_tags:-arr;monitoring}"
-var_cpu="${var_cpu:-2}"
-var_ram="${var_ram:-1024}"
-var_disk="${var_disk:-4}"
-var_os="${var_os:-debian}"
-var_version="${var_version:-12}"
-var_unprivileged="${var_unprivileged:-1}"
-
-header_info "$APP"
-variables
-color
-catch_errors
-
-function update_script() {
- header_info
- check_container_storage
- check_container_resources
-
- if [[ ! -d /opt/scraparr/ ]]; then
- msg_error "No ${APP} Installation Found!"
- exit
- fi
-
- RELEASE=$(curl -fsSL https://api.github.com/repos/thecfu/scraparr/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }')
- if [[ ! -f "${HOME}/.scrappar" ]] || [[ "${RELEASE}" != "$(cat "${HOME}"/.scrappar)" ]]; then
- msg_info "Stopping Services"
- systemctl stop scraparr
- msg_ok "Services Stopped"
-
- PYTHON_VERSION="3.12" setup_uv
- fetch_and_deploy_gh_release "scrappar" "thecfu/scraparr" "tarball" "latest" "/opt/scraparr"
-
- msg_info "Updating ${APP} to ${RELEASE}"
- cd /opt/scraparr || exit
- $STD uv venv /opt/scraparr/.venv
- $STD /opt/scraparr/.venv/bin/python -m ensurepip --upgrade
- $STD /opt/scraparr/.venv/bin/python -m pip install --upgrade pip
- $STD /opt/scraparr/.venv/bin/python -m pip install -r /opt/scraparr/src/scraparr/requirements.txt
- chmod -R 755 /opt/scraparr
- msg_ok "Updated ${APP} to v${RELEASE}"
-
- msg_info "Starting Services"
- systemctl start scraparr
- msg_ok "Services Started"
- else
- msg_ok "No update required. ${APP} is already at v${RELEASE}"
- fi
- exit
-}
-
-start
-build_container
-description
-
-msg_ok "Completed Successfully!\n"
-echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
-echo -e "${INFO}${YW} Access it using the following URL:${CL}"
-echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:7100${CL}"
diff --git a/ct/snowshare.sh b/ct/snowshare.sh
new file mode 100644
index 000000000..9241f44db
--- /dev/null
+++ b/ct/snowshare.sh
@@ -0,0 +1,60 @@
+#!/usr/bin/env bash
+source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: TuroYT
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://github.com/TuroYT/snowshare
+
+APP="SnowShare"
+var_tags="${var_tags:-file-sharing}"
+var_cpu="${var_cpu:-1}"
+var_ram="${var_ram:-1024}"
+var_disk="${var_disk:-5}"
+var_os="${var_os:-debian}"
+var_version="${var_version:-13}"
+var_unprivileged="${var_unprivileged:-1}"
+
+header_info "$APP"
+variables
+color
+catch_errors
+
+function update_script() {
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -d /opt/snowshare ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+
+ if check_for_gh_release "snowshare" "TuroYT/snowshare"; then
+ msg_info "Stopping Service"
+ systemctl stop snowshare
+ msg_ok "Stopped Service"
+
+ fetch_and_deploy_gh_release "snowshare" "TuroYT/snowshare"
+
+ msg_info "Updating Snowshare"
+ cd /opt/snowshare
+ $STD npm ci
+ $STD npx prisma generate
+ $STD npm run build
+ msg_ok "Updated Snowshare"
+
+ msg_info "Starting Service"
+ systemctl start snowshare
+ msg_ok "Started Service"
+ msg_ok "Updated successfully!"
+ fi
+ exit
+}
+
+start
+build_container
+description
+
+msg_ok "Completed Successfully!\n"
+echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
+echo -e "${INFO}${YW} Access it using the following URL:${CL}"
+echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}"
diff --git a/ct/tracktor.sh b/ct/tracktor.sh
new file mode 100644
index 000000000..53c37c1c1
--- /dev/null
+++ b/ct/tracktor.sh
@@ -0,0 +1,92 @@
+#!/usr/bin/env bash
+source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: CrazyWolf13
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://tracktor.bytedge.in/
+
+APP="tracktor"
+var_tags="${var_tags:-car;monitoring}"
+var_cpu="${var_cpu:-2}"
+var_ram="${var_ram:-4096}"
+var_disk="${var_disk:-6}"
+var_os="${var_os:-debian}"
+var_version="${var_version:-13}"
+var_unprivileged="${var_unprivileged:-1}"
+
+header_info "$APP"
+variables
+color
+catch_errors
+
+function update_script() {
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -d /opt/tracktor ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+
+ if check_for_gh_release "tracktor" "javedh-dev/tracktor"; then
+ msg_info "Stopping Service"
+ systemctl stop tracktor
+ msg_ok "Stopped Service"
+
+ msg_info "Correcting Services"
+ if [ -f /opt/tracktor/app/backend/.env ]; then
+ mv /opt/tracktor/app/backend/.env /opt/tracktor.env
+ echo 'AUTH_PIN=123456' >> /opt/tracktor.env
+ sed -i 's|^EnvironmentFile=.*|EnvironmentFile=/opt/tracktor.env|' /etc/systemd/system/tracktor.service
+ systemctl daemon-reload
+ fi
+ if [ ! -d "/opt/tracktor-data/uploads" ]; then
+ mkdir -p /opt/tracktor-data/{uploads,logs}
+ EXISTING_AUTH_PIN=$(grep '^AUTH_PIN=' /opt/tracktor.env 2>/dev/null | cut -d'=' -f2)
+ AUTH_PIN=${EXISTING_AUTH_PIN:-123456}
+ cat </opt/tracktor.env
+NODE_ENV=production
+DB_PATH=/opt/tracktor-data/tracktor.db
+UPLOADS_DIR="/opt/tracktor-data/uploads"
+LOG_DIR="/opt/tracktor-data/logs"
+# If server host is not set by default it will run on all interfaces - 0.0.0.0
+# SERVER_HOST=""
+SERVER_PORT=3000
+# Set this if you want to secure your endpoints otherwise default will be "*"
+CORS_ORIGINS="*"
+# Set this if you are using backend and frontend separately.
+# PUBLIC_API_BASE_URL=""
+LOG_REQUESTS=true
+LOG_LEVEL="info"
+AUTH_PIN=${AUTH_PIN}
+# PUBLIC_DEMO_MODE=false
+# FORCE_DATA_SEED=false
+EOF
+ fi
+ msg_ok "Corrected Services"
+
+ setup_nodejs
+ CLEAN_INSTALL=1 fetch_and_deploy_gh_release "tracktor" "javedh-dev/tracktor" "tarball" "latest" "/opt/tracktor"
+
+ msg_info "Updating tracktor"
+ cd /opt/tracktor
+ $STD npm install
+ $STD npm run build
+ msg_ok "Updated tracktor"
+
+ msg_info "Starting Service"
+ systemctl start tracktor
+ msg_ok "Started Service"
+ msg_ok "Updated Successfully"
+ fi
+ exit
+}
+
+start
+build_container
+description
+
+msg_ok "Completed Successfully!\n"
+echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
+echo -e "${INFO}${YW} Access it using the following URL:${CL}"
+echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}"
diff --git a/ct/transmission-openvpn.sh b/ct/transmission-openvpn.sh
new file mode 100644
index 000000000..647339241
--- /dev/null
+++ b/ct/transmission-openvpn.sh
@@ -0,0 +1,86 @@
+#!/usr/bin/env bash
+source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: SunFlowerOwl
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://github.com/haugene/docker-transmission-openvpn
+
+APP="transmission-openvpn"
+var_tags="${var_tags:-torrent;vpn}"
+var_cpu="${var_cpu:-1}"
+var_ram="${var_ram:-512}"
+var_disk="${var_disk:-8}"
+var_os="${var_os:-debian}"
+var_version="${var_version:-13}"
+var_unprivileged="${var_unprivileged:-1}"
+var_tun="${var_tun:-yes}"
+
+header_info "$APP"
+variables
+color
+catch_errors
+
+function update_script() {
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -d /opt/transmission-openvpn/ ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+
+ msg_info "Updating Dependencies"
+ $STD apt-get update
+ $STD apt-get -y upgrade
+ msg_ok "Updated Dependencies"
+
+ if check_for_gh_release "docker-transmission-openvpn" "haugene/docker-transmission-openvpn"; then
+ msg_info "Stopping Service"
+ systemctl stop openvpn-custom
+ msg_ok "Stopped Service"
+
+ msg_info "Creating Backup"
+ mv /etc/openvpn/custom /opt/transmission-openvpn/
+ rm -f /opt/transmission-openvpn/config-failure.sh
+ msg_ok "Created Backup"
+
+ fetch_and_deploy_gh_release "docker-transmission-openvpn" "haugene/docker-transmission-openvpn" "tarball" "latest" "/opt/docker-transmission-openvpn"
+
+ msg_info "Configuring transmission-openvpn"
+ rm -rf /etc/openvpn/* /etc/transmission/* /etc/scripts/* /opt/privoxy/*
+ cp -r /opt/docker-transmission-openvpn/openvpn/* /etc/openvpn/
+ cp -r /opt/docker-transmission-openvpn/transmission/* /etc/transmission/
+ cp -r /opt/docker-transmission-openvpn/scripts/* /etc/scripts/
+ cp -r /opt/docker-transmission-openvpn/privoxy/scripts/* /opt/privoxy/
+ chmod +x /etc/openvpn/*.sh
+ chmod +x /etc/scripts/*.sh
+ chmod +x /opt/privoxy/*.sh
+ msg_ok "Configured transmission-openvpn"
+
+ msg_info "Restoring Backup"
+ cp -r /opt/transmission-openvpn/custom/* /etc/openvpn/custom/
+ msg_ok "Restored Backup"
+
+ msg_info "Starting Service"
+ systemctl start openvpn-custom
+ msg_ok "Started Service"
+ fi
+
+ msg_info "Cleaning up"
+ $STD apt -y autoremove
+ $STD apt -y autoclean
+ $STD apt -y clean
+ rm -rf /opt/docker-transmission-openvpn
+ msg_ok "Cleaned"
+
+ exit
+}
+
+start
+build_container
+description
+
+msg_ok "Completed Successfully!\n"
+echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
+echo -e "${INFO}${YW} Access it using the following URL:${CL}"
+echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:9091${CL}"
diff --git a/ct/ubuntu.sh b/ct/ubuntu.sh
index 112ab19de..f208d7d09 100644
--- a/ct/ubuntu.sh
+++ b/ct/ubuntu.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
# source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/github.func)
# Copyright (c) 2021-2025 tteck
# Author: tteck (tteckster)
@@ -14,6 +14,7 @@ var_ram="${var_ram:-512}"
var_disk="${var_disk:-2}"
var_os="${var_os:-ubuntu}"
var_version="${var_version:-24.04}"
+var_unprivileged="${var_unprivileged:-0}"
header_info "$APP"
variables
@@ -21,18 +22,18 @@ color
catch_errors
function update_script() {
- header_info
- check_container_storage
- check_container_resources
- if [[ ! -d /var ]]; then
- msg_error "No ${APP} Installation Found!"
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -d /var ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+ msg_info "Updating ${APP} LXC"
+ $STD apt-get update
+ $STD apt-get -y upgrade
+ msg_ok "Updated ${APP} LXC"
exit
- fi
- msg_info "Updating ${APP} LXC"
- $STD apt-get update
- $STD apt-get -y upgrade
- msg_ok "Updated ${APP} LXC"
- exit
}
start
diff --git a/ct/vikunja.sh b/ct/vikunja.sh
deleted file mode 100644
index 792e3b1e5..000000000
--- a/ct/vikunja.sh
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/env bash
-source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: MickLesk (Canbiz)
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://vikunja.io/
-
-APP="Vikunja"
-var_tags="${var_tags:-todo-app}"
-var_cpu="${var_cpu:-1}"
-var_ram="${var_ram:-1024}"
-var_disk="${var_disk:-4}"
-var_os="${var_os:-debian}"
-var_version="${var_version:-12}"
-var_unprivileged="${var_unprivileged:-1}"
-
-header_info "$APP"
-variables
-color
-catch_errors
-
-function update_script() {
- header_info
- check_container_storage
- check_container_resources
- if [[ ! -d /opt/vikunja ]]; then
- msg_error "No ${APP} Installation Found!"
- exit
- fi
-
- if whiptail --backtitle "Vikunja Update" --title "🔄 VERSION SELECTION" --yesno \
- "Choose the version type to update to:\n\n• STABLE: Recommended for production use\n• UNSTABLE: Latest development version\n\n⚠️ WARNING: Unstable versions may contain bugs,\nbe incomplete, or cause system instability.\nOnly use for testing purposes.\n\nDo you want to use the UNSTABLE version?\n(No = Stable, Yes = Unstable)" 16 70 --defaultno
- then
- msg_info "Selecting version"
- RELEASE="unstable"
- FILENAME="vikunja-${RELEASE}-x86_64.deb"
- msg_ok "Selected UNSTABLE version"
- else
- msg_info "Selecting version"
- RELEASE=$(curl -fsSL https://dl.vikunja.io/vikunja/ | grep -oP 'href="/vikunja/\K[0-9]+\.[0-9]+\.[0-9]+' | sort -V | tail -n 1)
- FILENAME="vikunja-${RELEASE}-amd64.deb"
- msg_ok "Selected STABLE version: ${RELEASE}"
- fi
-
- if [[ ! -f /opt/${APP}_version.txt ]] || [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]]; then
- msg_info "Stopping ${APP}"
- systemctl stop vikunja
- msg_ok "Stopped ${APP}"
- msg_info "Updating ${APP} to ${RELEASE}"
- cd /opt
- rm -rf /opt/vikunja/vikunja
- rm -rf "/opt/$FILENAME"
- curl -fsSL "https://dl.vikunja.io/vikunja/$RELEASE/$FILENAME" -o $(basename "https://dl.vikunja.io/vikunja/$RELEASE/$FILENAME")
- export DEBIAN_FRONTEND=noninteractive
- $STD dpkg -i $FILENAME
- echo "${RELEASE}" >/opt/${APP}_version.txt
- msg_ok "Updated ${APP}"
- msg_info "Starting ${APP}"
- systemctl start vikunja
- msg_ok "Started ${APP}"
- msg_info "Cleaning Up"
- rm -rf /opt/$FILENAME
- msg_ok "Cleaned"
- msg_ok "Updated Successfully"
- else
- msg_ok "No update required. ${APP} is already at ${RELEASE}"
- fi
- exit
-}
-
-start
-build_container
-description
-
-msg_ok "Completed Successfully!\n"
-echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
-echo -e "${INFO}${YW} Access it using the following URL:${CL}"
-echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3456${CL}"
diff --git a/ct/viseron.sh b/ct/viseron.sh
index 8c12645ad..be4377e55 100644
--- a/ct/viseron.sh
+++ b/ct/viseron.sh
@@ -1,13 +1,13 @@
#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
APP="Viseron"
var_tags="${var_tags:-nvr}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-2048}"
-var_disk="${var_disk:-10}"
+var_disk="${var_disk:-25}"
var_os="${var_os:-debian}"
-var_version="${var_version:-12}"
+var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-0}"
header_info "$APP"
diff --git a/ct/wallabag.sh b/ct/wallabag.sh
index aded3f3ea..9325189a7 100644
--- a/ct/wallabag.sh
+++ b/ct/wallabag.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-source <(curl -s https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
@@ -25,36 +25,36 @@ color
catch_errors
function update_script() {
- header_info
- check_container_storage
- check_container_resources
- if [[ ! -d /opt/wallabag ]]; then
- msg_error "No ${APP} Installation Found!"
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -d /opt/wallabag ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+ RELEASE=$(curl -fsSL https://api.github.com/repos/wallabag/wallabag/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }')
+ if [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]] || [[ ! -f /opt/${APP}_version.txt ]]; then
+ msg_info "Stopping $APP"
+
+ msg_ok "Stopped $APP"
+
+ msg_info "Updating $APP to v${RELEASE}"
+
+ msg_ok "Updated $APP to v${RELEASE}"
+
+ msg_info "Starting $APP"
+
+ msg_ok "Started $APP"
+
+ msg_info "Cleaning Up"
+ rm -rf /opt/v${RELEASE}.zip
+ rm -rf /opt/paperless-ai_bak
+ msg_ok "Cleanup Completed"
+ msg_ok "Update Successful"
+ else
+ msg_ok "No update required. ${APP} is already at v${RELEASE}"
+ fi
exit
- fi
- RELEASE=$(curl -s https://api.github.com/repos/wallabag/wallabag/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }')
- if [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]] || [[ ! -f /opt/${APP}_version.txt ]]; then
- msg_info "Stopping $APP"
-
- msg_ok "Stopped $APP"
-
- msg_info "Updating $APP to v${RELEASE}"
-
- msg_ok "Updated $APP to v${RELEASE}"
-
- msg_info "Starting $APP"
-
- msg_ok "Started $APP"
-
- msg_info "Cleaning Up"
- rm -rf /opt/v${RELEASE}.zip
- rm -rf /opt/paperless-ai_bak
- msg_ok "Cleanup Completed"
- msg_ok "Update Successful"
- else
- msg_ok "No update required. ${APP} is already at v${RELEASE}"
- fi
- exit
}
start
build_container
diff --git a/docs/misc/README.md b/docs/misc/README.md
new file mode 100644
index 000000000..9cf5d9333
--- /dev/null
+++ b/docs/misc/README.md
@@ -0,0 +1,51 @@
+# Misc Documentation
+
+This directory contains miscellaneous documentation for various components of the Proxmox Community Scripts project.
+
+## Documentation Categories
+
+### 📁 [build.func/](./build.func/)
+Comprehensive documentation for the `build.func` script - the core orchestration script for Proxmox LXC container creation.
+
+**Contents:**
+- Visual execution flowcharts
+- Complete environment variables reference
+- Function documentation
+- Detailed execution flows
+- System architecture guide
+- Practical usage examples
+
+### 📁 [core.func/](./core.func/)
+Fundamental utility functions and system checks that form the foundation for all other scripts.
+
+**Contents:**
+- Visual execution flowcharts
+- Complete function reference
+- Practical usage examples
+- Integration with other components
+
+### 📁 [error_handler.func/](./error_handler.func/)
+Comprehensive error handling and signal management for Proxmox Community Scripts.
+
+**Contents:**
+- Visual execution flowcharts
+- Complete function reference
+- Practical usage examples
+- Integration with other components
+
+### 📁 [api.func/](./api.func/)
+Proxmox API integration and diagnostic reporting functionality for Community Scripts.
+
+**Contents:**
+- Visual execution flowcharts
+- Complete function reference
+- Practical usage examples
+- Integration with other components
+
+## Other Documentation
+
+Additional miscellaneous documentation may be added here as the project grows.
+
+---
+
+*This directory contains specialized documentation for specific components of the Proxmox Community Scripts project.*
diff --git a/docs/misc/api.func/API_FLOWCHART.md b/docs/misc/api.func/API_FLOWCHART.md
new file mode 100644
index 000000000..a46cd56e9
--- /dev/null
+++ b/docs/misc/api.func/API_FLOWCHART.md
@@ -0,0 +1,342 @@
+# api.func Execution Flowchart
+
+## Main API Communication Flow
+
+```
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ API Communication Initialization │
+│ Entry point when api.func functions are called by installation scripts │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Prerequisites Check │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ Prerequisites Validation │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Check curl │ │ Check │ │ Check │ │ │
+│ │ │ Availability │ │ Diagnostics │ │ Random UUID │ │ │
+│ │ │ │ │ Setting │ │ │ │
+│ │ │ • command -v │ │ • DIAGNOSTICS │ │ • RANDOM_UUID │ │
+│ │ │ curl │ │ = "yes" │ │ not empty │ │
+│ │ │ • Return if │ │ • Return if │ │ • Return if │ │
+│ │ │ not found │ │ disabled │ │ not set │ │
+│ │ │ │ │ │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Data Collection │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ System Information Gathering │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Get PVE │ │ Collect │ │ Prepare JSON │ │ │
+│ │ │ Version │ │ Environment │ │ Payload │ │
+│ │ │ │ │ Variables │ │ │ │
+│ │ │ • pveversion │ │ • CT_TYPE │ │ • Create JSON │ │
+│ │ │ command │ │ • DISK_SIZE │ │ structure │ │
+│ │ │ • Parse version │ │ • CORE_COUNT │ │ • Include all │ │
+│ │ │ • Extract │ │ • RAM_SIZE │ │ variables │ │
+│ │ │ major.minor │ │ • var_os │ │ • Format for API │ │
+│ │ │ │ │ • var_version │ │ │ │
+│ │ │ │ │ • NSAPP │ │ │ │
+│ │ │ │ │ • METHOD │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ API Request Execution │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ HTTP Request Processing │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Prepare │ │ Execute │ │ Handle │ │ │
+│ │ │ Request │ │ HTTP Request │ │ Response │ │
+│ │ │ │ │ │ │ │ │
+│ │ │ • Set API URL │ │ • curl -s -w │ │ • Capture HTTP │ │
+│ │ │ • Set headers │ │ "%{http_code}" │ │ status code │ │
+│ │ │ • Set payload │ │ • POST request │ │ • Store response │ │
+│ │ │ • Content-Type │ │ • JSON data │ │ • Handle errors │ │
+│ │ │ application/ │ │ • Follow │ │ gracefully │ │
+│ │ │ json │ │ redirects │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+```
+
+## LXC API Reporting Flow
+
+```
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ POST_TO_API() Flow │
+│ Send LXC container installation data to API │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ LXC Data Preparation │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ LXC-Specific Data Collection │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Set LXC │ │ Include LXC │ │ Set Status │ │ │
+│ │ │ Type │ │ Variables │ │ Information │ │
+│ │ │ │ │ │ │ │ │
+│ │ │ • ct_type: 1 │ │ • DISK_SIZE │ │ • status: │ │
+│ │ │ • type: "lxc" │ │ • CORE_COUNT │ │ "installing" │ │
+│ │ │ • Include all │ │ • RAM_SIZE │ │ • Include all │ │
+│ │ │ LXC data │ │ • var_os │ │ tracking data │ │
+│ │ │ │ │ • var_version │ │ │ │
+│ │ │ │ │ • DISABLEIP6 │ │ │ │
+│ │ │ │ │ • NSAPP │ │ │ │
+│ │ │ │ │ • METHOD │ │ │ │
+│ │ │ │ │ • pve_version │ │ │ │
+│ │ │ │ │ • random_id │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ JSON Payload Creation │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ JSON Structure Generation │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Create JSON │ │ Validate │ │ Format for │ │ │
+│ │ │ Structure │ │ Data │ │ API Request │ │
+│ │ │ │ │ │ │ │ │
+│ │ │ • Use heredoc │ │ • Check all │ │ • Ensure proper │ │
+│ │ │ syntax │ │ variables │ │ JSON format │ │
+│ │ │ • Include all │ │ are set │ │ • Escape special │ │
+│ │ │ required │ │ • Validate │ │ characters │ │
+│ │ │ fields │ │ data types │ │ • Set content │ │
+│ │ │ • Format │ │ • Handle │ │ type │ │
+│ │ │ properly │ │ missing │ │ │ │
+│ │ │ │ │ values │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+```
+
+## VM API Reporting Flow
+
+```
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ POST_TO_API_VM() Flow │
+│ Send VM installation data to API │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ VM Data Preparation │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ VM-Specific Data Collection │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Check │ │ Set VM │ │ Process Disk │ │ │
+│ │ │ Diagnostics │ │ Type │ │ Size │ │
+│ │ │ File │ │ │ │ │ │
+│ │ │ │ │ • ct_type: 2 │ │ • Remove 'G' │ │
+│ │ │ • Check file │ │ • type: "vm" │ │ suffix │ │
+│ │ │ existence │ │ • Include all │ │ • Convert to │ │
+│ │ │ • Read │ │ VM data │ │ numeric value │ │
+│ │ │ DIAGNOSTICS │ │ │ │ • Store in │ │
+│ │ │ setting │ │ │ │ DISK_SIZE_API │ │
+│ │ │ • Parse value │ │ │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ VM JSON Payload Creation │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ VM-Specific JSON Structure │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Include VM │ │ Set VM │ │ Format VM │ │ │
+│ │ │ Variables │ │ Status │ │ Data for API │ │
+│ │ │ │ │ │ │ │ │
+│ │ │ • DISK_SIZE_API │ │ • status: │ │ • Ensure proper │ │
+│ │ │ • CORE_COUNT │ │ "installing" │ │ JSON format │ │
+│ │ │ • RAM_SIZE │ │ • Include all │ │ • Handle VM- │ │
+│ │ │ • var_os │ │ tracking │ │ specific data │ │
+│ │ │ • var_version │ │ information │ │ • Set appropriate │ │
+│ │ │ • NSAPP │ │ │ │ content type │ │
+│ │ │ • METHOD │ │ │ │ │ │
+│ │ │ • pve_version │ │ │ │ │ │
+│ │ │ • random_id │ │ │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+```
+
+## Status Update Flow
+
+```
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ POST_UPDATE_TO_API() Flow │
+│ Send installation completion status to API │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Update Prevention Check │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ Duplicate Update Prevention │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Check │ │ Set Flag │ │ Return Early │ │ │
+│ │ │ POST_UPDATE_ │ │ if First │ │ if Already │ │
+│ │ │ DONE │ │ Update │ │ Updated │ │
+│ │ │ │ │ │ │ │ │
+│ │ │ • Check if │ │ • Set │ │ • Return 0 │ │
+│ │ │ already │ │ POST_UPDATE_ │ │ • Skip API call │ │
+│ │ │ updated │ │ DONE=true │ │ • Prevent │ │
+│ │ │ • Prevent │ │ • Continue │ │ duplicate │ │
+│ │ │ duplicate │ │ with update │ │ requests │ │
+│ │ │ requests │ │ │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Status and Error Processing │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ Status Determination │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Determine │ │ Get Error │ │ Prepare Status │ │ │
+│ │ │ Status │ │ Description │ │ Data │ │
+│ │ │ │ │ │ │ │ │
+│ │ │ • status: │ │ • Call │ │ • Include status │ │
+│ │ │ "success" or │ │ get_error_ │ │ • Include error │ │
+│ │ │ "failed" │ │ description() │ │ description │ │
+│ │ │ • Set exit │ │ • Get human- │ │ • Include random │ │
+│ │ │ code based │ │ readable │ │ ID for tracking │ │
+│ │ │ on status │ │ error message │ │ │ │
+│ │ │ • Default to │ │ • Handle │ │ │ │
+│ │ │ error if │ │ unknown │ │ │ │
+│ │ │ not set │ │ errors │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Status Update API Request │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ Status Update Payload Creation │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Create │ │ Send Status │ │ Mark Update │ │ │
+│ │ │ Status JSON │ │ Update │ │ Complete │ │
+│ │ │ │ │ │ │ │ │
+│ │ │ • Include │ │ • POST to │ │ • Set │ │
+│ │ │ status │ │ updatestatus │ │ POST_UPDATE_ │ │
+│ │ │ • Include │ │ endpoint │ │ DONE=true │ │
+│ │ │ error │ │ • Include JSON │ │ • Prevent further │ │
+│ │ │ description │ │ payload │ │ updates │ │
+│ │ │ • Include │ │ • Handle │ │ • Complete │ │
+│ │ │ random_id │ │ response │ │ process │ │
+│ │ │ │ │ gracefully │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+```
+
+## Error Description Flow
+
+```
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ GET_ERROR_DESCRIPTION() Flow │
+│ Convert numeric exit codes to human-readable explanations │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Error Code Classification │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ Error Code Categories │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ General │ │ Network │ │ LXC-Specific │ │ │
+│ │ │ System │ │ Errors │ │ Errors │ │
+│ │ │ Errors │ │ │ │ │ │
+│ │ │ │ │ • 18: Connection│ │ • 100-101: LXC │ │
+│ │ │ • 0-9: Basic │ │ failed │ │ install errors │ │
+│ │ │ errors │ │ • 22: Invalid │ │ • 200-209: LXC │ │
+│ │ │ • 126-128: │ │ argument │ │ creation errors │ │
+│ │ │ Command │ │ • 28: No space │ │ │ │
+│ │ │ errors │ │ • 35: Timeout │ │ │ │
+│ │ │ • 129-143: │ │ • 56: TLS error │ │ │ │
+│ │ │ Signal │ │ • 60: SSL cert │ │ │ │
+│ │ │ errors │ │ error │ │ │ │
+│ │ │ • 152: Resource │ │ │ │ │ │
+│ │ │ limit │ │ │ │ │ │
+│ │ │ • 255: Unknown │ │ │ │ │ │
+│ │ │ critical │ │ │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Error Message Return │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ Error Message Formatting │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Match Error │ │ Return │ │ Default Case │ │ │
+│ │ │ Code │ │ Description │ │ │ │
+│ │ │ │ │ │ │ │ │
+│ │ │ • Use case │ │ • Return │ │ • Return "Unknown │ │
+│ │ │ statement │ │ human- │ │ error code │ │
+│ │ │ • Match │ │ readable │ │ (exit_code)" │ │
+│ │ │ specific │ │ message │ │ • Handle │ │
+│ │ │ codes │ │ • Include │ │ unrecognized │ │
+│ │ │ • Handle │ │ context │ │ codes │ │
+│ │ │ ranges │ │ information │ │ • Provide fallback │ │
+│ │ │ │ │ │ │ message │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+```
+
+## Integration Points
+
+### With Installation Scripts
+- **build.func**: Sends LXC installation data
+- **vm-core.func**: Sends VM installation data
+- **install.func**: Reports installation status
+- **alpine-install.func**: Reports Alpine installation data
+
+### With Error Handling
+- **error_handler.func**: Provides error explanations
+- **core.func**: Uses error descriptions in silent execution
+- **Diagnostic reporting**: Tracks error patterns
+
+### External Dependencies
+- **curl**: HTTP client for API communication
+- **Community Scripts API**: External API endpoint
+- **Network connectivity**: Required for API communication
diff --git a/docs/misc/api.func/API_FUNCTIONS_REFERENCE.md b/docs/misc/api.func/API_FUNCTIONS_REFERENCE.md
new file mode 100644
index 000000000..732261f49
--- /dev/null
+++ b/docs/misc/api.func/API_FUNCTIONS_REFERENCE.md
@@ -0,0 +1,433 @@
+# api.func Functions Reference
+
+## Overview
+
+This document provides a comprehensive alphabetical reference of all functions in `api.func`, including parameters, dependencies, usage examples, and error handling.
+
+## Function Categories
+
+### Error Description Functions
+
+#### `get_error_description()`
+**Purpose**: Convert numeric exit codes to human-readable explanations
+**Parameters**:
+- `$1` - Exit code to explain
+**Returns**: Human-readable error explanation string
+**Side Effects**: None
+**Dependencies**: None
+**Environment Variables Used**: None
+
+**Supported Exit Codes**:
+- **General System**: 0-9, 18, 22, 28, 35, 56, 60, 125-128, 129-143, 152, 255
+- **LXC-Specific**: 100-101, 200-209
+- **Docker**: 125
+
+**Usage Example**:
+```bash
+error_msg=$(get_error_description 127)
+echo "Error 127: $error_msg"
+# Output: Error 127: Command not found: Incorrect path or missing dependency.
+```
+
+**Error Code Examples**:
+```bash
+get_error_description 0 # " " (space)
+get_error_description 1 # "General error: An unspecified error occurred."
+get_error_description 127 # "Command not found: Incorrect path or missing dependency."
+get_error_description 200 # "LXC creation failed."
+get_error_description 255 # "Unknown critical error, often due to missing permissions or broken scripts."
+```
+
+### API Communication Functions
+
+#### `post_to_api()`
+**Purpose**: Send LXC container installation data to community-scripts.org API
+**Parameters**: None (uses environment variables)
+**Returns**: None
+**Side Effects**:
+- Sends HTTP POST request to API
+- Stores response in RESPONSE variable
+- Requires curl command and network connectivity
+**Dependencies**: `curl` command
+**Environment Variables Used**: `DIAGNOSTICS`, `RANDOM_UUID`, `CT_TYPE`, `DISK_SIZE`, `CORE_COUNT`, `RAM_SIZE`, `var_os`, `var_version`, `DISABLEIP6`, `NSAPP`, `METHOD`
+
+**Prerequisites**:
+- `curl` command must be available
+- `DIAGNOSTICS` must be set to "yes"
+- `RANDOM_UUID` must be set and not empty
+
+**API Endpoint**: `http://api.community-scripts.org/dev/upload`
+
+**JSON Payload Structure**:
+```json
+{
+ "ct_type": 1,
+ "type": "lxc",
+ "disk_size": 8,
+ "core_count": 2,
+ "ram_size": 2048,
+ "os_type": "debian",
+ "os_version": "12",
+ "disableip6": "true",
+ "nsapp": "plex",
+ "method": "install",
+ "pve_version": "8.0",
+ "status": "installing",
+ "random_id": "uuid-string"
+}
+```
+
+**Usage Example**:
+```bash
+export DIAGNOSTICS="yes"
+export RANDOM_UUID="$(uuidgen)"
+export CT_TYPE=1
+export DISK_SIZE=8
+export CORE_COUNT=2
+export RAM_SIZE=2048
+export var_os="debian"
+export var_version="12"
+export NSAPP="plex"
+export METHOD="install"
+
+post_to_api
+```
+
+#### `post_to_api_vm()`
+**Purpose**: Send VM installation data to community-scripts.org API
+**Parameters**: None (uses environment variables)
+**Returns**: None
+**Side Effects**:
+- Sends HTTP POST request to API
+- Stores response in RESPONSE variable
+- Requires curl command and network connectivity
+**Dependencies**: `curl` command, diagnostics file
+**Environment Variables Used**: `DIAGNOSTICS`, `RANDOM_UUID`, `DISK_SIZE`, `CORE_COUNT`, `RAM_SIZE`, `var_os`, `var_version`, `NSAPP`, `METHOD`
+
+**Prerequisites**:
+- `/usr/local/community-scripts/diagnostics` file must exist
+- `DIAGNOSTICS` must be set to "yes" in diagnostics file
+- `curl` command must be available
+- `RANDOM_UUID` must be set and not empty
+
+**API Endpoint**: `http://api.community-scripts.org/dev/upload`
+
+**JSON Payload Structure**:
+```json
+{
+ "ct_type": 2,
+ "type": "vm",
+ "disk_size": 8,
+ "core_count": 2,
+ "ram_size": 2048,
+ "os_type": "debian",
+ "os_version": "12",
+ "disableip6": "",
+ "nsapp": "plex",
+ "method": "install",
+ "pve_version": "8.0",
+ "status": "installing",
+ "random_id": "uuid-string"
+}
+```
+
+**Usage Example**:
+```bash
+# Create diagnostics file
+echo "DIAGNOSTICS=yes" > /usr/local/community-scripts/diagnostics
+
+export RANDOM_UUID="$(uuidgen)"
+export DISK_SIZE="8G"
+export CORE_COUNT=2
+export RAM_SIZE=2048
+export var_os="debian"
+export var_version="12"
+export NSAPP="plex"
+export METHOD="install"
+
+post_to_api_vm
+```
+
+#### `post_update_to_api()`
+**Purpose**: Send installation completion status to community-scripts.org API
+**Parameters**:
+- `$1` - Status ("success" or "failed", default: "failed")
+- `$2` - Exit code (default: 1)
+**Returns**: None
+**Side Effects**:
+- Sends HTTP POST request to API
+- Sets POST_UPDATE_DONE=true to prevent duplicates
+- Stores response in RESPONSE variable
+**Dependencies**: `curl` command, `get_error_description()`
+**Environment Variables Used**: `DIAGNOSTICS`, `RANDOM_UUID`
+
+**Prerequisites**:
+- `curl` command must be available
+- `DIAGNOSTICS` must be set to "yes"
+- `RANDOM_UUID` must be set and not empty
+- POST_UPDATE_DONE must be false (prevents duplicates)
+
+**API Endpoint**: `http://api.community-scripts.org/dev/upload/updatestatus`
+
+**JSON Payload Structure**:
+```json
+{
+ "status": "success",
+ "error": "Error description from get_error_description()",
+ "random_id": "uuid-string"
+}
+```
+
+**Usage Example**:
+```bash
+export DIAGNOSTICS="yes"
+export RANDOM_UUID="$(uuidgen)"
+
+# Report successful installation
+post_update_to_api "success" 0
+
+# Report failed installation
+post_update_to_api "failed" 127
+```
+
+## Function Call Hierarchy
+
+### API Communication Flow
+```
+post_to_api()
+├── Check curl availability
+├── Check DIAGNOSTICS setting
+├── Check RANDOM_UUID
+├── Get PVE version
+├── Create JSON payload
+└── Send HTTP POST request
+
+post_to_api_vm()
+├── Check diagnostics file
+├── Check curl availability
+├── Check DIAGNOSTICS setting
+├── Check RANDOM_UUID
+├── Process disk size
+├── Get PVE version
+├── Create JSON payload
+└── Send HTTP POST request
+
+post_update_to_api()
+├── Check POST_UPDATE_DONE flag
+├── Check curl availability
+├── Check DIAGNOSTICS setting
+├── Check RANDOM_UUID
+├── Determine status and exit code
+├── Get error description
+├── Create JSON payload
+├── Send HTTP POST request
+└── Set POST_UPDATE_DONE=true
+```
+
+### Error Description Flow
+```
+get_error_description()
+├── Match exit code
+├── Return appropriate description
+└── Handle unknown codes
+```
+
+## Error Code Reference
+
+### General System Errors
+| Code | Description |
+|------|-------------|
+| 0 | (space) |
+| 1 | General error: An unspecified error occurred. |
+| 2 | Incorrect shell usage or invalid command arguments. |
+| 3 | Unexecuted function or invalid shell condition. |
+| 4 | Error opening a file or invalid path. |
+| 5 | I/O error: An input/output failure occurred. |
+| 6 | No such device or address. |
+| 7 | Insufficient memory or resource exhaustion. |
+| 8 | Non-executable file or invalid file format. |
+| 9 | Failed child process execution. |
+| 18 | Connection to a remote server failed. |
+| 22 | Invalid argument or faulty network connection. |
+| 28 | No space left on device. |
+| 35 | Timeout while establishing a connection. |
+| 56 | Faulty TLS connection. |
+| 60 | SSL certificate error. |
+
+### Command Execution Errors
+| Code | Description |
+|------|-------------|
+| 125 | Docker error: Container could not start. |
+| 126 | Command not executable: Incorrect permissions or missing dependencies. |
+| 127 | Command not found: Incorrect path or missing dependency. |
+| 128 | Invalid exit signal, e.g., incorrect Git command. |
+
+### Signal Errors
+| Code | Description |
+|------|-------------|
+| 129 | Signal 1 (SIGHUP): Process terminated due to hangup. |
+| 130 | Signal 2 (SIGINT): Manual termination via Ctrl+C. |
+| 132 | Signal 4 (SIGILL): Illegal machine instruction. |
+| 133 | Signal 5 (SIGTRAP): Debugging error or invalid breakpoint signal. |
+| 134 | Signal 6 (SIGABRT): Program aborted itself. |
+| 135 | Signal 7 (SIGBUS): Memory error, invalid memory address. |
+| 137 | Signal 9 (SIGKILL): Process forcibly terminated (OOM-killer or 'kill -9'). |
+| 139 | Signal 11 (SIGSEGV): Segmentation fault, possibly due to invalid pointer access. |
+| 141 | Signal 13 (SIGPIPE): Pipe closed unexpectedly. |
+| 143 | Signal 15 (SIGTERM): Process terminated normally. |
+| 152 | Signal 24 (SIGXCPU): CPU time limit exceeded. |
+
+### LXC-Specific Errors
+| Code | Description |
+|------|-------------|
+| 100 | LXC install error: Unexpected error in create_lxc.sh. |
+| 101 | LXC install error: No network connection detected. |
+| 200 | LXC creation failed. |
+| 201 | LXC error: Invalid Storage class. |
+| 202 | User aborted menu in create_lxc.sh. |
+| 203 | CTID not set in create_lxc.sh. |
+| 204 | PCT_OSTYPE not set in create_lxc.sh. |
+| 205 | CTID cannot be less than 100 in create_lxc.sh. |
+| 206 | CTID already in use in create_lxc.sh. |
+| 207 | Template not found in create_lxc.sh. |
+| 208 | Error downloading template in create_lxc.sh. |
+| 209 | Container creation failed, but template is intact in create_lxc.sh. |
+
+### Other Errors
+| Code | Description |
+|------|-------------|
+| 255 | Unknown critical error, often due to missing permissions or broken scripts. |
+| * | Unknown error code (exit_code). |
+
+## Environment Variable Dependencies
+
+### Required Variables
+- **`DIAGNOSTICS`**: Enable/disable diagnostic reporting ("yes"/"no")
+- **`RANDOM_UUID`**: Unique identifier for tracking
+
+### Optional Variables
+- **`CT_TYPE`**: Container type (1 for LXC, 2 for VM)
+- **`DISK_SIZE`**: Disk size in GB (or GB with 'G' suffix for VM)
+- **`CORE_COUNT`**: Number of CPU cores
+- **`RAM_SIZE`**: RAM size in MB
+- **`var_os`**: Operating system type
+- **`var_version`**: OS version
+- **`DISABLEIP6`**: IPv6 disable setting
+- **`NSAPP`**: Namespace application name
+- **`METHOD`**: Installation method
+
+### Internal Variables
+- **`POST_UPDATE_DONE`**: Prevents duplicate status updates
+- **`API_URL`**: Community scripts API endpoint
+- **`JSON_PAYLOAD`**: API request payload
+- **`RESPONSE`**: API response
+- **`DISK_SIZE_API`**: Processed disk size for VM API
+
+## Error Handling Patterns
+
+### API Communication Errors
+- All API functions handle curl failures gracefully
+- Network errors don't block installation process
+- Missing prerequisites cause early return
+- Duplicate updates are prevented
+
+### Error Description Errors
+- Unknown error codes return generic message
+- All error codes are handled with case statement
+- Fallback message includes the actual error code
+
+### Prerequisites Validation
+- Check curl availability before API calls
+- Validate DIAGNOSTICS setting
+- Ensure RANDOM_UUID is set
+- Check for duplicate updates
+
+## Integration Examples
+
+### With build.func
+```bash
+#!/usr/bin/env bash
+source core.func
+source api.func
+source build.func
+
+# Set up API reporting
+export DIAGNOSTICS="yes"
+export RANDOM_UUID="$(uuidgen)"
+
+# Report installation start
+post_to_api
+
+# Container creation...
+# ... build.func code ...
+
+# Report completion
+if [[ $? -eq 0 ]]; then
+ post_update_to_api "success" 0
+else
+ post_update_to_api "failed" $?
+fi
+```
+
+### With vm-core.func
+```bash
+#!/usr/bin/env bash
+source core.func
+source api.func
+source vm-core.func
+
+# Set up API reporting
+export DIAGNOSTICS="yes"
+export RANDOM_UUID="$(uuidgen)"
+
+# Report VM installation start
+post_to_api_vm
+
+# VM creation...
+# ... vm-core.func code ...
+
+# Report completion
+post_update_to_api "success" 0
+```
+
+### With error_handler.func
+```bash
+#!/usr/bin/env bash
+source core.func
+source error_handler.func
+source api.func
+
+# Use error descriptions
+error_code=127
+error_msg=$(get_error_description $error_code)
+echo "Error $error_code: $error_msg"
+
+# Report error to API
+post_update_to_api "failed" $error_code
+```
+
+## Best Practices
+
+### API Usage
+1. Always check prerequisites before API calls
+2. Use unique identifiers for tracking
+3. Handle API failures gracefully
+4. Don't block installation on API failures
+
+### Error Reporting
+1. Use appropriate error codes
+2. Provide meaningful error descriptions
+3. Report both success and failure cases
+4. Prevent duplicate status updates
+
+### Diagnostic Reporting
+1. Respect user privacy settings
+2. Only send data when diagnostics enabled
+3. Use anonymous tracking identifiers
+4. Include relevant system information
+
+### Error Handling
+1. Handle unknown error codes gracefully
+2. Provide fallback error messages
+3. Include error code in unknown error messages
+4. Use consistent error message format
diff --git a/docs/misc/api.func/API_INTEGRATION.md b/docs/misc/api.func/API_INTEGRATION.md
new file mode 100644
index 000000000..f325dace2
--- /dev/null
+++ b/docs/misc/api.func/API_INTEGRATION.md
@@ -0,0 +1,643 @@
+# api.func Integration Guide
+
+## Overview
+
+This document describes how `api.func` integrates with other components in the Proxmox Community Scripts project, including dependencies, data flow, and API surface.
+
+## Dependencies
+
+### External Dependencies
+
+#### Required Commands
+- **`curl`**: HTTP client for API communication
+- **`uuidgen`**: Generate unique identifiers (optional, can use other methods)
+
+#### Optional Commands
+- **None**: No other external command dependencies
+
+### Internal Dependencies
+
+#### Environment Variables from Other Scripts
+- **build.func**: Provides container creation variables
+- **vm-core.func**: Provides VM creation variables
+- **core.func**: Provides system information variables
+- **Installation scripts**: Provide application-specific variables
+
+## Integration Points
+
+### With build.func
+
+#### LXC Container Reporting
+```bash
+# build.func uses api.func for container reporting
+source core.func
+source api.func
+source build.func
+
+# Set up API reporting
+export DIAGNOSTICS="yes"
+export RANDOM_UUID="$(uuidgen)"
+
+# Container creation with API reporting
+create_container() {
+ # Set container parameters
+ export CT_TYPE=1
+ export DISK_SIZE="$var_disk"
+ export CORE_COUNT="$var_cpu"
+ export RAM_SIZE="$var_ram"
+ export var_os="$var_os"
+ export var_version="$var_version"
+ export NSAPP="$APP"
+ export METHOD="install"
+
+ # Report installation start
+ post_to_api
+
+ # Container creation using build.func
+ # ... build.func container creation logic ...
+
+ # Report completion
+ if [[ $? -eq 0 ]]; then
+ post_update_to_api "success" 0
+ else
+ post_update_to_api "failed" $?
+ fi
+}
+```
+
+#### Error Reporting Integration
+```bash
+# build.func uses api.func for error reporting
+handle_container_error() {
+ local exit_code=$1
+ local error_msg=$(get_error_description $exit_code)
+
+ echo "Container creation failed: $error_msg"
+ post_update_to_api "failed" $exit_code
+}
+```
+
+### With vm-core.func
+
+#### VM Installation Reporting
+```bash
+# vm-core.func uses api.func for VM reporting
+source core.func
+source api.func
+source vm-core.func
+
+# Set up VM API reporting
+mkdir -p /usr/local/community-scripts
+echo "DIAGNOSTICS=yes" > /usr/local/community-scripts/diagnostics
+
+export RANDOM_UUID="$(uuidgen)"
+
+# VM creation with API reporting
+create_vm() {
+ # Set VM parameters
+ export DISK_SIZE="${var_disk}G"
+ export CORE_COUNT="$var_cpu"
+ export RAM_SIZE="$var_ram"
+ export var_os="$var_os"
+ export var_version="$var_version"
+ export NSAPP="$APP"
+ export METHOD="install"
+
+ # Report VM installation start
+ post_to_api_vm
+
+ # VM creation using vm-core.func
+ # ... vm-core.func VM creation logic ...
+
+ # Report completion
+ post_update_to_api "success" 0
+}
+```
+
+### With core.func
+
+#### System Information Integration
+```bash
+# core.func provides system information for api.func
+source core.func
+source api.func
+
+# Get system information for API reporting
+get_system_info_for_api() {
+ # Get PVE version using core.func utilities
+ local pve_version=$(pveversion | awk -F'[/ ]' '{print $2}')
+
+ # Set API parameters
+ export var_os="$var_os"
+ export var_version="$var_version"
+
+ # Use core.func error handling with api.func reporting
+ if silent apt-get update; then
+ post_update_to_api "success" 0
+ else
+ post_update_to_api "failed" $?
+ fi
+}
+```
+
+### With error_handler.func
+
+#### Error Description Integration
+```bash
+# error_handler.func uses api.func for error descriptions
+source core.func
+source error_handler.func
+source api.func
+
+# Enhanced error handler with API reporting
+enhanced_error_handler() {
+ local exit_code=${1:-$?}
+ local command=${2:-${BASH_COMMAND:-unknown}}
+
+ # Get error description from api.func
+ local error_msg=$(get_error_description $exit_code)
+
+ # Display error information
+ echo "Error $exit_code: $error_msg"
+ echo "Command: $command"
+
+ # Report error to API
+ export DIAGNOSTICS="yes"
+ export RANDOM_UUID="$(uuidgen)"
+ post_update_to_api "failed" $exit_code
+
+ # Use standard error handler
+ error_handler $exit_code $command
+}
+```
+
+### With install.func
+
+#### Installation Process Reporting
+```bash
+# install.func uses api.func for installation reporting
+source core.func
+source api.func
+source install.func
+
+# Installation with API reporting
+install_package_with_reporting() {
+ local package="$1"
+
+ # Set up API reporting
+ export DIAGNOSTICS="yes"
+ export RANDOM_UUID="$(uuidgen)"
+ export NSAPP="$package"
+ export METHOD="install"
+
+ # Report installation start
+ post_to_api
+
+ # Package installation using install.func
+ if install_package "$package"; then
+ echo "$package installed successfully"
+ post_update_to_api "success" 0
+ return 0
+ else
+ local exit_code=$?
+ local error_msg=$(get_error_description $exit_code)
+ echo "$package installation failed: $error_msg"
+ post_update_to_api "failed" $exit_code
+ return $exit_code
+ fi
+}
+```
+
+### With alpine-install.func
+
+#### Alpine Installation Reporting
+```bash
+# alpine-install.func uses api.func for Alpine reporting
+source core.func
+source api.func
+source alpine-install.func
+
+# Alpine installation with API reporting
+install_alpine_with_reporting() {
+ local app="$1"
+
+ # Set up API reporting
+ export DIAGNOSTICS="yes"
+ export RANDOM_UUID="$(uuidgen)"
+ export NSAPP="$app"
+ export METHOD="install"
+ export var_os="alpine"
+
+ # Report Alpine installation start
+ post_to_api
+
+ # Alpine installation using alpine-install.func
+ if install_alpine_app "$app"; then
+ echo "Alpine $app installed successfully"
+ post_update_to_api "success" 0
+ return 0
+ else
+ local exit_code=$?
+ local error_msg=$(get_error_description $exit_code)
+ echo "Alpine $app installation failed: $error_msg"
+ post_update_to_api "failed" $exit_code
+ return $exit_code
+ fi
+}
+```
+
+### With alpine-tools.func
+
+#### Alpine Tools Reporting
+```bash
+# alpine-tools.func uses api.func for Alpine tools reporting
+source core.func
+source api.func
+source alpine-tools.func
+
+# Alpine tools with API reporting
+run_alpine_tool_with_reporting() {
+ local tool="$1"
+
+ # Set up API reporting
+ export DIAGNOSTICS="yes"
+ export RANDOM_UUID="$(uuidgen)"
+ export NSAPP="alpine-tools"
+ export METHOD="tool"
+
+ # Report tool execution start
+ post_to_api
+
+ # Run Alpine tool using alpine-tools.func
+ if run_alpine_tool "$tool"; then
+ echo "Alpine tool $tool executed successfully"
+ post_update_to_api "success" 0
+ return 0
+ else
+ local exit_code=$?
+ local error_msg=$(get_error_description $exit_code)
+ echo "Alpine tool $tool failed: $error_msg"
+ post_update_to_api "failed" $exit_code
+ return $exit_code
+ fi
+}
+```
+
+### With passthrough.func
+
+#### Hardware Passthrough Reporting
+```bash
+# passthrough.func uses api.func for hardware reporting
+source core.func
+source api.func
+source passthrough.func
+
+# Hardware passthrough with API reporting
+configure_passthrough_with_reporting() {
+ local hardware_type="$1"
+
+ # Set up API reporting
+ export DIAGNOSTICS="yes"
+ export RANDOM_UUID="$(uuidgen)"
+ export NSAPP="passthrough"
+ export METHOD="hardware"
+
+ # Report passthrough configuration start
+ post_to_api
+
+ # Configure passthrough using passthrough.func
+ if configure_passthrough "$hardware_type"; then
+ echo "Hardware passthrough configured successfully"
+ post_update_to_api "success" 0
+ return 0
+ else
+ local exit_code=$?
+ local error_msg=$(get_error_description $exit_code)
+ echo "Hardware passthrough failed: $error_msg"
+ post_update_to_api "failed" $exit_code
+ return $exit_code
+ fi
+}
+```
+
+### With tools.func
+
+#### Maintenance Operations Reporting
+```bash
+# tools.func uses api.func for maintenance reporting
+source core.func
+source api.func
+source tools.func
+
+# Maintenance operations with API reporting
+run_maintenance_with_reporting() {
+ local operation="$1"
+
+ # Set up API reporting
+ export DIAGNOSTICS="yes"
+ export RANDOM_UUID="$(uuidgen)"
+ export NSAPP="maintenance"
+ export METHOD="tool"
+
+ # Report maintenance start
+ post_to_api
+
+ # Run maintenance using tools.func
+ if run_maintenance_operation "$operation"; then
+ echo "Maintenance operation $operation completed successfully"
+ post_update_to_api "success" 0
+ return 0
+ else
+ local exit_code=$?
+ local error_msg=$(get_error_description $exit_code)
+ echo "Maintenance operation $operation failed: $error_msg"
+ post_update_to_api "failed" $exit_code
+ return $exit_code
+ fi
+}
+```
+
+## Data Flow
+
+### Input Data
+
+#### Environment Variables from Other Scripts
+- **`CT_TYPE`**: Container type (1 for LXC, 2 for VM)
+- **`DISK_SIZE`**: Disk size in GB
+- **`CORE_COUNT`**: Number of CPU cores
+- **`RAM_SIZE`**: RAM size in MB
+- **`var_os`**: Operating system type
+- **`var_version`**: OS version
+- **`DISABLEIP6`**: IPv6 disable setting
+- **`NSAPP`**: Namespace application name
+- **`METHOD`**: Installation method
+- **`DIAGNOSTICS`**: Enable/disable diagnostic reporting
+- **`RANDOM_UUID`**: Unique identifier for tracking
+
+#### Function Parameters
+- **Exit codes**: Passed to `get_error_description()` and `post_update_to_api()`
+- **Status information**: Passed to `post_update_to_api()`
+- **API endpoints**: Hardcoded in functions
+
+#### System Information
+- **PVE version**: Retrieved from `pveversion` command
+- **Disk size processing**: Processed for VM API (removes 'G' suffix)
+- **Error codes**: Retrieved from command exit codes
+
+### Processing Data
+
+#### API Request Preparation
+- **JSON payload creation**: Format data for API consumption
+- **Data validation**: Ensure required fields are present
+- **Error handling**: Handle missing or invalid data
+- **Content type setting**: Set appropriate HTTP headers
+
+#### Error Processing
+- **Error code mapping**: Map numeric codes to descriptions
+- **Error message formatting**: Format error descriptions
+- **Unknown error handling**: Handle unrecognized error codes
+- **Fallback messages**: Provide default error messages
+
+#### API Communication
+- **HTTP request preparation**: Prepare curl commands
+- **Response handling**: Capture HTTP response codes
+- **Error handling**: Handle network and API errors
+- **Duplicate prevention**: Prevent duplicate status updates
+
+### Output Data
+
+#### API Communication
+- **HTTP requests**: Sent to community-scripts.org API
+- **Response codes**: Captured from API responses
+- **Error information**: Reported to API
+- **Status updates**: Sent to API
+
+#### Error Information
+- **Error descriptions**: Human-readable error messages
+- **Error codes**: Mapped to descriptions
+- **Context information**: Error context and details
+- **Fallback messages**: Default error messages
+
+#### System State
+- **POST_UPDATE_DONE**: Prevents duplicate updates
+- **RESPONSE**: Stores API response
+- **JSON_PAYLOAD**: Stores formatted API data
+- **API_URL**: Stores API endpoint
+
+## API Surface
+
+### Public Functions
+
+#### Error Description
+- **`get_error_description()`**: Convert exit codes to explanations
+- **Parameters**: Exit code to explain
+- **Returns**: Human-readable explanation string
+- **Usage**: Called by other functions and scripts
+
+#### API Communication
+- **`post_to_api()`**: Send LXC installation data
+- **`post_to_api_vm()`**: Send VM installation data
+- **`post_update_to_api()`**: Send status updates
+- **Parameters**: Status and exit code (for updates)
+- **Returns**: None
+- **Usage**: Called by installation scripts
+
+### Internal Functions
+
+#### None
+- All functions in api.func are public
+- No internal helper functions
+- Direct implementation of all functionality
+
+### Global Variables
+
+#### Configuration Variables
+- **`DIAGNOSTICS`**: Diagnostic reporting setting
+- **`RANDOM_UUID`**: Unique tracking identifier
+- **`POST_UPDATE_DONE`**: Duplicate update prevention
+
+#### Data Variables
+- **`CT_TYPE`**: Container type
+- **`DISK_SIZE`**: Disk size
+- **`CORE_COUNT`**: CPU core count
+- **`RAM_SIZE`**: RAM size
+- **`var_os`**: Operating system
+- **`var_version`**: OS version
+- **`DISABLEIP6`**: IPv6 setting
+- **`NSAPP`**: Application namespace
+- **`METHOD`**: Installation method
+
+#### Internal Variables
+- **`API_URL`**: API endpoint URL
+- **`JSON_PAYLOAD`**: API request payload
+- **`RESPONSE`**: API response
+- **`DISK_SIZE_API`**: Processed disk size for VM API
+
+## Integration Patterns
+
+### Standard Integration Pattern
+
+```bash
+#!/usr/bin/env bash
+# Standard integration pattern
+
+# 1. Source core.func first
+source core.func
+
+# 2. Source api.func
+source api.func
+
+# 3. Set up API reporting
+export DIAGNOSTICS="yes"
+export RANDOM_UUID="$(uuidgen)"
+
+# 4. Set application parameters
+export NSAPP="$APP"
+export METHOD="install"
+
+# 5. Report installation start
+post_to_api
+
+# 6. Perform installation
+# ... installation logic ...
+
+# 7. Report completion
+post_update_to_api "success" 0
+```
+
+### Minimal Integration Pattern
+
+```bash
+#!/usr/bin/env bash
+# Minimal integration pattern
+
+source api.func
+
+# Basic error reporting
+export DIAGNOSTICS="yes"
+export RANDOM_UUID="$(uuidgen)"
+
+# Report failure
+post_update_to_api "failed" 127
+```
+
+### Advanced Integration Pattern
+
+```bash
+#!/usr/bin/env bash
+# Advanced integration pattern
+
+source core.func
+source api.func
+source error_handler.func
+
+# Set up comprehensive API reporting
+export DIAGNOSTICS="yes"
+export RANDOM_UUID="$(uuidgen)"
+export CT_TYPE=1
+export DISK_SIZE=8
+export CORE_COUNT=2
+export RAM_SIZE=2048
+export var_os="debian"
+export var_version="12"
+export METHOD="install"
+
+# Enhanced error handling with API reporting
+enhanced_error_handler() {
+ local exit_code=${1:-$?}
+ local command=${2:-${BASH_COMMAND:-unknown}}
+
+ local error_msg=$(get_error_description $exit_code)
+ echo "Error $exit_code: $error_msg"
+
+ post_update_to_api "failed" $exit_code
+ error_handler $exit_code $command
+}
+
+trap 'enhanced_error_handler' ERR
+
+# Advanced operations with API reporting
+post_to_api
+# ... operations ...
+post_update_to_api "success" 0
+```
+
+## Error Handling Integration
+
+### Automatic Error Reporting
+- **Error Descriptions**: Provides human-readable error messages
+- **API Integration**: Reports errors to community-scripts.org API
+- **Error Tracking**: Tracks error patterns for project improvement
+- **Diagnostic Data**: Contributes to anonymous usage analytics
+
+### Manual Error Reporting
+- **Custom Error Codes**: Use appropriate error codes for different scenarios
+- **Error Context**: Provide context information for errors
+- **Status Updates**: Report both success and failure cases
+- **Error Analysis**: Analyze error patterns and trends
+
+### API Communication Errors
+- **Network Failures**: Handle API communication failures gracefully
+- **Missing Prerequisites**: Check prerequisites before API calls
+- **Duplicate Prevention**: Prevent duplicate status updates
+- **Error Recovery**: Handle API errors without blocking installation
+
+## Performance Considerations
+
+### API Communication Overhead
+- **Minimal Impact**: API calls add minimal overhead
+- **Asynchronous**: API calls don't block installation process
+- **Error Handling**: API failures don't affect installation
+- **Optional**: API reporting is optional and can be disabled
+
+### Memory Usage
+- **Minimal Footprint**: API functions use minimal memory
+- **Variable Reuse**: Global variables reused across functions
+- **No Memory Leaks**: Proper cleanup prevents memory leaks
+- **Efficient Processing**: Efficient JSON payload creation
+
+### Execution Speed
+- **Fast API Calls**: Quick API communication
+- **Efficient Error Processing**: Fast error code processing
+- **Minimal Delay**: Minimal delay in API operations
+- **Non-blocking**: API calls don't block installation
+
+## Security Considerations
+
+### Data Privacy
+- **Anonymous Reporting**: Only anonymous data is sent
+- **No Sensitive Data**: No sensitive information is transmitted
+- **User Control**: Users can disable diagnostic reporting
+- **Data Minimization**: Only necessary data is sent
+
+### API Security
+- **HTTPS**: API communication uses secure protocols
+- **Data Validation**: API data is validated before sending
+- **Error Handling**: API errors are handled securely
+- **No Credentials**: No authentication credentials are sent
+
+### Network Security
+- **Secure Communication**: Uses secure HTTP protocols
+- **Error Handling**: Network errors are handled gracefully
+- **No Data Leakage**: No sensitive data is leaked
+- **Secure Endpoints**: Uses trusted API endpoints
+
+## Future Integration Considerations
+
+### Extensibility
+- **New API Endpoints**: Easy to add new API endpoints
+- **Additional Data**: Easy to add new data fields
+- **Error Codes**: Easy to add new error code descriptions
+- **API Versions**: Easy to support new API versions
+
+### Compatibility
+- **API Versioning**: Compatible with different API versions
+- **Data Format**: Compatible with different data formats
+- **Error Codes**: Compatible with different error code systems
+- **Network Protocols**: Compatible with different network protocols
+
+### Performance
+- **Optimization**: API communication can be optimized
+- **Caching**: API responses can be cached
+- **Batch Operations**: Multiple operations can be batched
+- **Async Processing**: API calls can be made asynchronous
diff --git a/docs/misc/api.func/API_USAGE_EXAMPLES.md b/docs/misc/api.func/API_USAGE_EXAMPLES.md
new file mode 100644
index 000000000..616ebc927
--- /dev/null
+++ b/docs/misc/api.func/API_USAGE_EXAMPLES.md
@@ -0,0 +1,794 @@
+# api.func Usage Examples
+
+## Overview
+
+This document provides practical usage examples for `api.func` functions, covering common scenarios, integration patterns, and best practices.
+
+## Basic API Setup
+
+### Standard API Initialization
+
+```bash
+#!/usr/bin/env bash
+# Standard API setup for LXC containers
+
+source api.func
+
+# Set up diagnostic reporting
+export DIAGNOSTICS="yes"
+export RANDOM_UUID="$(uuidgen)"
+
+# Set container parameters
+export CT_TYPE=1
+export DISK_SIZE=8
+export CORE_COUNT=2
+export RAM_SIZE=2048
+export var_os="debian"
+export var_version="12"
+export NSAPP="plex"
+export METHOD="install"
+
+# Report installation start
+post_to_api
+
+# Your installation code here
+# ... installation logic ...
+
+# Report completion
+if [[ $? -eq 0 ]]; then
+ post_update_to_api "success" 0
+else
+ post_update_to_api "failed" $?
+fi
+```
+
+### VM API Setup
+
+```bash
+#!/usr/bin/env bash
+# API setup for VMs
+
+source api.func
+
+# Create diagnostics file for VM
+mkdir -p /usr/local/community-scripts
+echo "DIAGNOSTICS=yes" > /usr/local/community-scripts/diagnostics
+
+# Set up VM parameters
+export RANDOM_UUID="$(uuidgen)"
+export DISK_SIZE="20G"
+export CORE_COUNT=4
+export RAM_SIZE=4096
+export var_os="ubuntu"
+export var_version="22.04"
+export NSAPP="nextcloud"
+export METHOD="install"
+
+# Report VM installation start
+post_to_api_vm
+
+# Your VM installation code here
+# ... VM creation logic ...
+
+# Report completion
+post_update_to_api "success" 0
+```
+
+## Error Description Examples
+
+### Basic Error Explanation
+
+```bash
+#!/usr/bin/env bash
+source api.func
+
+# Explain common error codes
+echo "Error 0: '$(get_error_description 0)'"
+echo "Error 1: $(get_error_description 1)"
+echo "Error 127: $(get_error_description 127)"
+echo "Error 200: $(get_error_description 200)"
+echo "Error 255: $(get_error_description 255)"
+```
+
+### Error Code Testing
+
+```bash
+#!/usr/bin/env bash
+source api.func
+
+# Test all error codes
+test_error_codes() {
+ local codes=(0 1 2 127 128 130 137 139 143 200 203 205 255)
+
+ for code in "${codes[@]}"; do
+ echo "Code $code: $(get_error_description $code)"
+ done
+}
+
+test_error_codes
+```
+
+### Error Handling with Descriptions
+
+```bash
+#!/usr/bin/env bash
+source api.func
+
+# Function with error handling
+run_command_with_error_handling() {
+ local command="$1"
+ local description="$2"
+
+ echo "Running: $description"
+
+ if $command; then
+ echo "Success: $description"
+ return 0
+ else
+ local exit_code=$?
+ local error_msg=$(get_error_description $exit_code)
+ echo "Error $exit_code: $error_msg"
+ return $exit_code
+ fi
+}
+
+# Usage
+run_command_with_error_handling "apt-get update" "Package list update"
+run_command_with_error_handling "nonexistent_command" "Test command"
+```
+
+## API Communication Examples
+
+### LXC Installation Reporting
+
+```bash
+#!/usr/bin/env bash
+source api.func
+
+# Complete LXC installation with API reporting
+install_lxc_with_reporting() {
+ local app="$1"
+ local ctid="$2"
+
+ # Set up API reporting
+ export DIAGNOSTICS="yes"
+ export RANDOM_UUID="$(uuidgen)"
+ export CT_TYPE=1
+ export DISK_SIZE=10
+ export CORE_COUNT=2
+ export RAM_SIZE=2048
+ export var_os="debian"
+ export var_version="12"
+ export NSAPP="$app"
+ export METHOD="install"
+
+ # Report installation start
+ post_to_api
+
+ # Installation process
+ echo "Installing $app container (ID: $ctid)..."
+
+ # Simulate installation
+ sleep 2
+
+ # Check if installation succeeded
+ if [[ $? -eq 0 ]]; then
+ echo "Installation completed successfully"
+ post_update_to_api "success" 0
+ return 0
+ else
+ echo "Installation failed"
+ post_update_to_api "failed" $?
+ return 1
+ fi
+}
+
+# Install multiple containers
+install_lxc_with_reporting "plex" "100"
+install_lxc_with_reporting "nextcloud" "101"
+install_lxc_with_reporting "nginx" "102"
+```
+
+### VM Installation Reporting
+
+```bash
+#!/usr/bin/env bash
+source api.func
+
+# Complete VM installation with API reporting
+install_vm_with_reporting() {
+ local app="$1"
+ local vmid="$2"
+
+ # Create diagnostics file
+ mkdir -p /usr/local/community-scripts
+ echo "DIAGNOSTICS=yes" > /usr/local/community-scripts/diagnostics
+
+ # Set up API reporting
+ export RANDOM_UUID="$(uuidgen)"
+ export DISK_SIZE="20G"
+ export CORE_COUNT=4
+ export RAM_SIZE=4096
+ export var_os="ubuntu"
+ export var_version="22.04"
+ export NSAPP="$app"
+ export METHOD="install"
+
+ # Report VM installation start
+ post_to_api_vm
+
+ # VM installation process
+ echo "Installing $app VM (ID: $vmid)..."
+
+ # Simulate VM creation
+ sleep 3
+
+ # Check if VM creation succeeded
+ if [[ $? -eq 0 ]]; then
+ echo "VM installation completed successfully"
+ post_update_to_api "success" 0
+ return 0
+ else
+ echo "VM installation failed"
+ post_update_to_api "failed" $?
+ return 1
+ fi
+}
+
+# Install multiple VMs
+install_vm_with_reporting "nextcloud" "200"
+install_vm_with_reporting "wordpress" "201"
+```
+
+## Status Update Examples
+
+### Success Reporting
+
+```bash
+#!/usr/bin/env bash
+source api.func
+
+# Report successful installation
+report_success() {
+ local operation="$1"
+
+ export DIAGNOSTICS="yes"
+ export RANDOM_UUID="$(uuidgen)"
+
+ echo "Reporting successful $operation"
+ post_update_to_api "success" 0
+}
+
+# Usage
+report_success "container installation"
+report_success "package installation"
+report_success "service configuration"
+```
+
+### Failure Reporting
+
+```bash
+#!/usr/bin/env bash
+source api.func
+
+# Report failed installation
+report_failure() {
+ local operation="$1"
+ local exit_code="$2"
+
+ export DIAGNOSTICS="yes"
+ export RANDOM_UUID="$(uuidgen)"
+
+ local error_msg=$(get_error_description $exit_code)
+ echo "Reporting failed $operation: $error_msg"
+ post_update_to_api "failed" $exit_code
+}
+
+# Usage
+report_failure "container creation" 200
+report_failure "package installation" 127
+report_failure "service start" 1
+```
+
+### Conditional Status Reporting
+
+```bash
+#!/usr/bin/env bash
+source api.func
+
+# Conditional status reporting
+report_installation_status() {
+ local operation="$1"
+ local exit_code="$2"
+
+ export DIAGNOSTICS="yes"
+ export RANDOM_UUID="$(uuidgen)"
+
+ if [[ $exit_code -eq 0 ]]; then
+ echo "Reporting successful $operation"
+ post_update_to_api "success" 0
+ else
+ local error_msg=$(get_error_description $exit_code)
+ echo "Reporting failed $operation: $error_msg"
+ post_update_to_api "failed" $exit_code
+ fi
+}
+
+# Usage
+report_installation_status "container creation" 0
+report_installation_status "package installation" 127
+```
+
+## Advanced Usage Examples
+
+### Batch Installation with API Reporting
+
+```bash
+#!/usr/bin/env bash
+source api.func
+
+# Batch installation with comprehensive API reporting
+batch_install_with_reporting() {
+ local apps=("plex" "nextcloud" "nginx" "mysql")
+ local ctids=(100 101 102 103)
+
+ # Set up API reporting
+ export DIAGNOSTICS="yes"
+ export RANDOM_UUID="$(uuidgen)"
+ export CT_TYPE=1
+ export DISK_SIZE=8
+ export CORE_COUNT=2
+ export RAM_SIZE=2048
+ export var_os="debian"
+ export var_version="12"
+ export METHOD="install"
+
+ local success_count=0
+ local failure_count=0
+
+ for i in "${!apps[@]}"; do
+ local app="${apps[$i]}"
+ local ctid="${ctids[$i]}"
+
+ echo "Installing $app (ID: $ctid)..."
+
+ # Set app-specific parameters
+ export NSAPP="$app"
+
+ # Report installation start
+ post_to_api
+
+ # Simulate installation
+ if install_app "$app" "$ctid"; then
+ echo "$app installed successfully"
+ post_update_to_api "success" 0
+ ((success_count++))
+ else
+ echo "$app installation failed"
+ post_update_to_api "failed" $?
+ ((failure_count++))
+ fi
+
+ echo "---"
+ done
+
+ echo "Batch installation completed: $success_count successful, $failure_count failed"
+}
+
+# Mock installation function
+install_app() {
+ local app="$1"
+ local ctid="$2"
+
+ # Simulate installation
+ sleep 1
+
+ # Simulate occasional failures
+ if [[ $((RANDOM % 10)) -eq 0 ]]; then
+ return 1
+ fi
+
+ return 0
+}
+
+batch_install_with_reporting
+```
+
+### Error Analysis and Reporting
+
+```bash
+#!/usr/bin/env bash
+source api.func
+
+# Analyze and report errors
+analyze_and_report_errors() {
+ local log_file="$1"
+
+ export DIAGNOSTICS="yes"
+ export RANDOM_UUID="$(uuidgen)"
+
+ if [[ ! -f "$log_file" ]]; then
+ echo "Log file not found: $log_file"
+ return 1
+ fi
+
+ # Extract error codes from log
+ local error_codes=$(grep -o 'exit code [0-9]\+' "$log_file" | grep -o '[0-9]\+' | sort -u)
+
+ if [[ -z "$error_codes" ]]; then
+ echo "No errors found in log"
+ post_update_to_api "success" 0
+ return 0
+ fi
+
+ echo "Found error codes: $error_codes"
+
+ # Report each unique error
+ for code in $error_codes; do
+ local error_msg=$(get_error_description $code)
+ echo "Error $code: $error_msg"
+ post_update_to_api "failed" $code
+ done
+}
+
+# Usage
+analyze_and_report_errors "/var/log/installation.log"
+```
+
+### API Health Check
+
+```bash
+#!/usr/bin/env bash
+source api.func
+
+# Check API connectivity and functionality
+check_api_health() {
+ echo "Checking API health..."
+
+ # Test prerequisites
+ if ! command -v curl >/dev/null 2>&1; then
+ echo "ERROR: curl not available"
+ return 1
+ fi
+
+ # Test error description function
+ local test_error=$(get_error_description 127)
+ if [[ -z "$test_error" ]]; then
+ echo "ERROR: Error description function not working"
+ return 1
+ fi
+
+ echo "Error description test: $test_error"
+
+ # Test API connectivity (without sending data)
+ local api_url="http://api.community-scripts.org/dev/upload"
+ if curl -s --head "$api_url" >/dev/null 2>&1; then
+ echo "API endpoint is reachable"
+ else
+ echo "WARNING: API endpoint not reachable"
+ fi
+
+ echo "API health check completed"
+}
+
+check_api_health
+```
+
+## Integration Examples
+
+### With build.func
+
+```bash
+#!/usr/bin/env bash
+# Integration with build.func
+
+source core.func
+source api.func
+source build.func
+
+# Set up API reporting
+export DIAGNOSTICS="yes"
+export RANDOM_UUID="$(uuidgen)"
+
+# Container creation with API reporting
+create_container_with_reporting() {
+ local app="$1"
+ local ctid="$2"
+
+ # Set container parameters
+ export APP="$app"
+ export CTID="$ctid"
+ export var_hostname="${app}-server"
+ export var_os="debian"
+ export var_version="12"
+ export var_cpu="2"
+ export var_ram="2048"
+ export var_disk="10"
+ export var_net="vmbr0"
+ export var_gateway="192.168.1.1"
+ export var_ip="192.168.1.$ctid"
+ export var_template_storage="local"
+ export var_container_storage="local"
+
+ # Report installation start
+ post_to_api
+
+ # Create container using build.func
+ if source build.func; then
+ echo "Container $app created successfully"
+ post_update_to_api "success" 0
+ return 0
+ else
+ echo "Container $app creation failed"
+ post_update_to_api "failed" $?
+ return 1
+ fi
+}
+
+# Create containers
+create_container_with_reporting "plex" "100"
+create_container_with_reporting "nextcloud" "101"
+```
+
+### With vm-core.func
+
+```bash
+#!/usr/bin/env bash
+# Integration with vm-core.func
+
+source core.func
+source api.func
+source vm-core.func
+
+# Set up VM API reporting
+mkdir -p /usr/local/community-scripts
+echo "DIAGNOSTICS=yes" > /usr/local/community-scripts/diagnostics
+
+export RANDOM_UUID="$(uuidgen)"
+
+# VM creation with API reporting
+create_vm_with_reporting() {
+ local app="$1"
+ local vmid="$2"
+
+ # Set VM parameters
+ export APP="$app"
+ export VMID="$vmid"
+ export var_hostname="${app}-vm"
+ export var_os="ubuntu"
+ export var_version="22.04"
+ export var_cpu="4"
+ export var_ram="4096"
+ export var_disk="20"
+
+ # Report VM installation start
+ post_to_api_vm
+
+ # Create VM using vm-core.func
+ if source vm-core.func; then
+ echo "VM $app created successfully"
+ post_update_to_api "success" 0
+ return 0
+ else
+ echo "VM $app creation failed"
+ post_update_to_api "failed" $?
+ return 1
+ fi
+}
+
+# Create VMs
+create_vm_with_reporting "nextcloud" "200"
+create_vm_with_reporting "wordpress" "201"
+```
+
+### With error_handler.func
+
+```bash
+#!/usr/bin/env bash
+# Integration with error_handler.func
+
+source core.func
+source error_handler.func
+source api.func
+
+# Enhanced error handling with API reporting
+enhanced_error_handler() {
+ local exit_code=${1:-$?}
+ local command=${2:-${BASH_COMMAND:-unknown}}
+
+ # Get error description from api.func
+ local error_msg=$(get_error_description $exit_code)
+
+ # Display error information
+ echo "Error $exit_code: $error_msg"
+ echo "Command: $command"
+
+ # Report error to API
+ export DIAGNOSTICS="yes"
+ export RANDOM_UUID="$(uuidgen)"
+ post_update_to_api "failed" $exit_code
+
+ # Use standard error handler
+ error_handler $exit_code $command
+}
+
+# Set up enhanced error handling
+trap 'enhanced_error_handler' ERR
+
+# Test enhanced error handling
+nonexistent_command
+```
+
+## Best Practices Examples
+
+### Comprehensive API Integration
+
+```bash
+#!/usr/bin/env bash
+# Comprehensive API integration example
+
+source core.func
+source api.func
+
+# Set up comprehensive API reporting
+setup_api_reporting() {
+ # Enable diagnostics
+ export DIAGNOSTICS="yes"
+ export RANDOM_UUID="$(uuidgen)"
+
+ # Set common parameters
+ export CT_TYPE=1
+ export DISK_SIZE=8
+ export CORE_COUNT=2
+ export RAM_SIZE=2048
+ export var_os="debian"
+ export var_version="12"
+ export METHOD="install"
+
+ echo "API reporting configured"
+}
+
+# Installation with comprehensive reporting
+install_with_comprehensive_reporting() {
+ local app="$1"
+ local ctid="$2"
+
+ # Set up API reporting
+ setup_api_reporting
+ export NSAPP="$app"
+
+ # Report installation start
+ post_to_api
+
+ # Installation process
+ echo "Installing $app..."
+
+ # Simulate installation steps
+ local steps=("Downloading" "Installing" "Configuring" "Starting")
+ for step in "${steps[@]}"; do
+ echo "$step $app..."
+ sleep 1
+ done
+
+ # Check installation result
+ if [[ $? -eq 0 ]]; then
+ echo "$app installation completed successfully"
+ post_update_to_api "success" 0
+ return 0
+ else
+ echo "$app installation failed"
+ post_update_to_api "failed" $?
+ return 1
+ fi
+}
+
+# Install multiple applications
+apps=("plex" "nextcloud" "nginx" "mysql")
+ctids=(100 101 102 103)
+
+for i in "${!apps[@]}"; do
+ install_with_comprehensive_reporting "${apps[$i]}" "${ctids[$i]}"
+ echo "---"
+done
+```
+
+### Error Recovery with API Reporting
+
+```bash
+#!/usr/bin/env bash
+source api.func
+
+# Error recovery with API reporting
+retry_with_api_reporting() {
+ local operation="$1"
+ local max_attempts=3
+ local attempt=1
+
+ export DIAGNOSTICS="yes"
+ export RANDOM_UUID="$(uuidgen)"
+
+ while [[ $attempt -le $max_attempts ]]; do
+ echo "Attempt $attempt of $max_attempts: $operation"
+
+ if $operation; then
+ echo "Operation succeeded on attempt $attempt"
+ post_update_to_api "success" 0
+ return 0
+ else
+ local exit_code=$?
+ local error_msg=$(get_error_description $exit_code)
+ echo "Attempt $attempt failed: $error_msg"
+
+ post_update_to_api "failed" $exit_code
+
+ ((attempt++))
+
+ if [[ $attempt -le $max_attempts ]]; then
+ echo "Retrying in 5 seconds..."
+ sleep 5
+ fi
+ fi
+ done
+
+ echo "Operation failed after $max_attempts attempts"
+ return 1
+}
+
+# Usage
+retry_with_api_reporting "apt-get update"
+retry_with_api_reporting "apt-get install -y package"
+```
+
+### API Reporting with Logging
+
+```bash
+#!/usr/bin/env bash
+source api.func
+
+# API reporting with detailed logging
+install_with_logging_and_api() {
+ local app="$1"
+ local log_file="/var/log/${app}_installation.log"
+
+ # Set up API reporting
+ export DIAGNOSTICS="yes"
+ export RANDOM_UUID="$(uuidgen)"
+ export NSAPP="$app"
+
+ # Start logging
+ exec > >(tee -a "$log_file")
+ exec 2>&1
+
+ echo "Starting $app installation at $(date)"
+
+ # Report installation start
+ post_to_api
+
+ # Installation process
+ echo "Installing $app..."
+
+ # Simulate installation
+ if install_app "$app"; then
+ echo "$app installation completed successfully at $(date)"
+ post_update_to_api "success" 0
+ return 0
+ else
+ local exit_code=$?
+ local error_msg=$(get_error_description $exit_code)
+ echo "$app installation failed at $(date): $error_msg"
+ post_update_to_api "failed" $exit_code
+ return $exit_code
+ fi
+}
+
+# Mock installation function
+install_app() {
+ local app="$1"
+ echo "Installing $app..."
+ sleep 2
+ return 0
+}
+
+# Install with logging and API reporting
+install_with_logging_and_api "plex"
+```
diff --git a/docs/misc/api.func/README.md b/docs/misc/api.func/README.md
new file mode 100644
index 000000000..6cf90d23d
--- /dev/null
+++ b/docs/misc/api.func/README.md
@@ -0,0 +1,199 @@
+# api.func Documentation
+
+## Overview
+
+The `api.func` file provides Proxmox API integration and diagnostic reporting functionality for the Community Scripts project. It handles API communication, error reporting, and status updates to the community-scripts.org API.
+
+## Purpose and Use Cases
+
+- **API Communication**: Send installation and status data to community-scripts.org API
+- **Diagnostic Reporting**: Report installation progress and errors for analytics
+- **Error Description**: Provide detailed error code explanations
+- **Status Updates**: Track installation success/failure status
+- **Analytics**: Contribute anonymous usage data for project improvement
+
+## Quick Reference
+
+### Key Function Groups
+- **Error Handling**: `get_error_description()` - Convert exit codes to human-readable messages
+- **API Communication**: `post_to_api()`, `post_to_api_vm()` - Send installation data
+- **Status Updates**: `post_update_to_api()` - Report installation completion status
+
+### Dependencies
+- **External**: `curl` command for HTTP requests
+- **Internal**: Uses environment variables from other scripts
+
+### Integration Points
+- Used by: All installation scripts for diagnostic reporting
+- Uses: Environment variables from build.func and other scripts
+- Provides: API communication and error reporting services
+
+## Documentation Files
+
+### 📊 [API_FLOWCHART.md](./API_FLOWCHART.md)
+Visual execution flows showing API communication processes and error handling.
+
+### 📚 [API_FUNCTIONS_REFERENCE.md](./API_FUNCTIONS_REFERENCE.md)
+Complete alphabetical reference of all functions with parameters, dependencies, and usage details.
+
+### 💡 [API_USAGE_EXAMPLES.md](./API_USAGE_EXAMPLES.md)
+Practical examples showing how to use API functions and common patterns.
+
+### 🔗 [API_INTEGRATION.md](./API_INTEGRATION.md)
+How api.func integrates with other components and provides API services.
+
+## Key Features
+
+### Error Code Descriptions
+- **Comprehensive Coverage**: 50+ error codes with detailed explanations
+- **LXC-Specific Errors**: Container creation and management errors
+- **System Errors**: General system and network errors
+- **Signal Errors**: Process termination and signal errors
+
+### API Communication
+- **LXC Reporting**: Send LXC container installation data
+- **VM Reporting**: Send VM installation data
+- **Status Updates**: Report installation success/failure
+- **Diagnostic Data**: Anonymous usage analytics
+
+### Diagnostic Integration
+- **Optional Reporting**: Only sends data when diagnostics enabled
+- **Privacy Respect**: Respects user privacy settings
+- **Error Tracking**: Tracks installation errors for improvement
+- **Usage Analytics**: Contributes to project statistics
+
+## Common Usage Patterns
+
+### Basic API Setup
+```bash
+#!/usr/bin/env bash
+# Basic API setup
+
+source api.func
+
+# Set up diagnostic reporting
+export DIAGNOSTICS="yes"
+export RANDOM_UUID="$(uuidgen)"
+
+# Report installation start
+post_to_api
+```
+
+### Error Reporting
+```bash
+#!/usr/bin/env bash
+source api.func
+
+# Get error description
+error_msg=$(get_error_description 127)
+echo "Error 127: $error_msg"
+# Output: Error 127: Command not found: Incorrect path or missing dependency.
+```
+
+### Status Updates
+```bash
+#!/usr/bin/env bash
+source api.func
+
+# Report successful installation
+post_update_to_api "success" 0
+
+# Report failed installation
+post_update_to_api "failed" 127
+```
+
+## Environment Variables
+
+### Required Variables
+- `DIAGNOSTICS`: Enable/disable diagnostic reporting ("yes"/"no")
+- `RANDOM_UUID`: Unique identifier for tracking
+
+### Optional Variables
+- `CT_TYPE`: Container type (1 for LXC, 2 for VM)
+- `DISK_SIZE`: Disk size in GB
+- `CORE_COUNT`: Number of CPU cores
+- `RAM_SIZE`: RAM size in MB
+- `var_os`: Operating system type
+- `var_version`: OS version
+- `DISABLEIP6`: IPv6 disable setting
+- `NSAPP`: Namespace application name
+- `METHOD`: Installation method
+
+### Internal Variables
+- `POST_UPDATE_DONE`: Prevents duplicate status updates
+- `API_URL`: Community scripts API endpoint
+- `JSON_PAYLOAD`: API request payload
+- `RESPONSE`: API response
+
+## Error Code Categories
+
+### General System Errors
+- **0-9**: Basic system errors
+- **18, 22, 28, 35**: Network and I/O errors
+- **56, 60**: TLS/SSL errors
+- **125-128**: Command execution errors
+- **129-143**: Signal errors
+- **152**: Resource limit errors
+- **255**: Unknown critical errors
+
+### LXC-Specific Errors
+- **100-101**: LXC installation errors
+- **200-209**: LXC creation and management errors
+
+### Docker Errors
+- **125**: Docker container start errors
+
+## Best Practices
+
+### Diagnostic Reporting
+1. Always check if diagnostics are enabled
+2. Respect user privacy settings
+3. Use unique identifiers for tracking
+4. Report both success and failure cases
+
+### Error Handling
+1. Use appropriate error codes
+2. Provide meaningful error descriptions
+3. Handle API communication failures gracefully
+4. Don't block installation on API failures
+
+### API Usage
+1. Check for curl availability
+2. Handle network failures gracefully
+3. Use appropriate HTTP methods
+4. Include all required data
+
+## Troubleshooting
+
+### Common Issues
+1. **API Communication Fails**: Check network connectivity and curl availability
+2. **Diagnostics Not Working**: Verify DIAGNOSTICS setting and RANDOM_UUID
+3. **Missing Error Descriptions**: Check error code coverage
+4. **Duplicate Updates**: POST_UPDATE_DONE prevents duplicates
+
+### Debug Mode
+Enable diagnostic reporting for debugging:
+```bash
+export DIAGNOSTICS="yes"
+export RANDOM_UUID="$(uuidgen)"
+```
+
+### API Testing
+Test API communication:
+```bash
+source api.func
+export DIAGNOSTICS="yes"
+export RANDOM_UUID="test-$(date +%s)"
+post_to_api
+```
+
+## Related Documentation
+
+- [core.func](../core.func/) - Core utilities and error handling
+- [error_handler.func](../error_handler.func/) - Error handling utilities
+- [build.func](../build.func/) - Container creation with API integration
+- [tools.func](../tools.func/) - Extended utilities with API integration
+
+---
+
+*This documentation covers the api.func file which provides API communication and diagnostic reporting for all Proxmox Community Scripts.*
diff --git a/docs/misc/build.func/BUILD_FUNC_ARCHITECTURE.md b/docs/misc/build.func/BUILD_FUNC_ARCHITECTURE.md
new file mode 100644
index 000000000..1d9c5ed22
--- /dev/null
+++ b/docs/misc/build.func/BUILD_FUNC_ARCHITECTURE.md
@@ -0,0 +1,410 @@
+# build.func Architecture Guide
+
+## Overview
+
+This document provides a high-level architectural overview of `build.func`, including module dependencies, data flow, integration points, and system architecture.
+
+## High-Level Architecture
+
+```
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Proxmox Host System │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ build.func │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────────────┐ │ │
+│ │ │ Entry Point │ │ Configuration │ │ Container Creation │ │ │
+│ │ │ │ │ │ │ │ │ │
+│ │ │ • start() │ │ • variables() │ │ • build_container() │ │ │
+│ │ │ • install_ │ │ • base_ │ │ • create_lxc_container() │ │ │
+│ │ │ script() │ │ settings() │ │ • configure_gpu_ │ │ │
+│ │ │ • advanced_ │ │ • select_ │ │ passthrough() │ │ │
+│ │ │ settings() │ │ storage() │ │ • fix_gpu_gids() │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ Module Dependencies │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────────────┐ │ │
+│ │ │ core.func │ │ error_handler. │ │ api.func │ │ │
+│ │ │ │ │ func │ │ │ │ │
+│ │ │ • Basic │ │ • Error │ │ • Proxmox API │ │ │
+│ │ │ utilities │ │ handling │ │ interactions │ │ │
+│ │ │ • Common │ │ • Error │ │ • Container │ │ │
+│ │ │ functions │ │ recovery │ │ management │ │ │
+│ │ │ • System │ │ • Cleanup │ │ • Status │ │ │
+│ │ │ utilities │ │ functions │ │ monitoring │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────────────┘ │ │
+│ │ │ │
+│ │ ┌─────────────────────────────────────────────────────────────────────────┐ │ │
+│ │ │ tools.func │ │ │
+│ │ │ │ │ │
+│ │ │ • Additional utilities │ │ │
+│ │ │ • Helper functions │ │ │
+│ │ │ • System tools │ │ │
+│ │ └─────────────────────────────────────────────────────────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+```
+
+## Module Dependencies
+
+### Core Dependencies
+
+```
+build.func Dependencies:
+├── core.func
+│ ├── Basic system utilities
+│ ├── Common functions
+│ ├── System information
+│ └── File operations
+├── error_handler.func
+│ ├── Error handling
+│ ├── Error recovery
+│ ├── Cleanup functions
+│ └── Error logging
+├── api.func
+│ ├── Proxmox API interactions
+│ ├── Container management
+│ ├── Status monitoring
+│ └── Configuration updates
+└── tools.func
+ ├── Additional utilities
+ ├── Helper functions
+ ├── System tools
+ └── Custom functions
+```
+
+### Dependency Flow
+
+```
+Dependency Flow:
+├── build.func
+│ ├── Sources core.func
+│ ├── Sources error_handler.func
+│ ├── Sources api.func
+│ └── Sources tools.func
+├── core.func
+│ ├── Basic utilities
+│ └── System functions
+├── error_handler.func
+│ ├── Error management
+│ └── Recovery functions
+├── api.func
+│ ├── Proxmox integration
+│ └── Container operations
+└── tools.func
+ ├── Additional tools
+ └── Helper functions
+```
+
+## Data Flow Architecture
+
+### Configuration Data Flow
+
+```
+Configuration Data Flow:
+├── Environment Variables
+│ ├── Hard environment variables
+│ ├── App-specific .vars
+│ ├── Global default.vars
+│ └── Built-in defaults
+├── Variable Resolution
+│ ├── Apply precedence chain
+│ ├── Validate settings
+│ └── Resolve conflicts
+├── Configuration Storage
+│ ├── Memory variables
+│ ├── Temporary files
+│ └── Persistent storage
+└── Configuration Usage
+ ├── Container creation
+ ├── Feature configuration
+ └── Settings persistence
+```
+
+### Container Data Flow
+
+```
+Container Data Flow:
+├── Input Data
+│ ├── Configuration variables
+│ ├── Resource specifications
+│ ├── Network settings
+│ └── Storage requirements
+├── Processing
+│ ├── Validation
+│ ├── Conflict resolution
+│ ├── Resource allocation
+│ └── Configuration generation
+├── Container Creation
+│ ├── LXC container creation
+│ ├── Network configuration
+│ ├── Storage setup
+│ └── Feature configuration
+└── Output
+ ├── Container status
+ ├── Access information
+ ├── Configuration files
+ └── Log files
+```
+
+## Integration Architecture
+
+### With Proxmox System
+
+```
+Proxmox Integration:
+├── Proxmox Host
+│ ├── LXC container management
+│ ├── Storage management
+│ ├── Network management
+│ └── Resource management
+├── Proxmox API
+│ ├── Container operations
+│ ├── Configuration updates
+│ ├── Status monitoring
+│ └── Error handling
+├── Proxmox Configuration
+│ ├── /etc/pve/lxc/.conf
+│ ├── Storage configuration
+│ ├── Network configuration
+│ └── Resource configuration
+└── Proxmox Services
+ ├── Container services
+ ├── Network services
+ ├── Storage services
+ └── Monitoring services
+```
+
+### With Install Scripts
+
+```
+Install Script Integration:
+├── build.func
+│ ├── Creates container
+│ ├── Configures basic settings
+│ ├── Starts container
+│ └── Provides access
+├── Install Scripts
+│ ├── -install.sh
+│ ├── Downloads application
+│ ├── Configures application
+│ └── Sets up services
+├── Container
+│ ├── Running application
+│ ├── Configured services
+│ ├── Network access
+│ └── Storage access
+└── Integration Points
+ ├── Container creation
+ ├── Network configuration
+ ├── Storage setup
+ └── Service configuration
+```
+
+## System Architecture Components
+
+### Core Components
+
+```
+System Components:
+├── Entry Point
+│ ├── start() function
+│ ├── Context detection
+│ ├── Environment capture
+│ └── Workflow routing
+├── Configuration Management
+│ ├── Variable resolution
+│ ├── Settings persistence
+│ ├── Default management
+│ └── Validation
+├── Container Creation
+│ ├── LXC container creation
+│ ├── Network configuration
+│ ├── Storage setup
+│ └── Feature configuration
+├── Hardware Integration
+│ ├── GPU passthrough
+│ ├── USB passthrough
+│ ├── Storage management
+│ └── Network management
+└── Error Handling
+ ├── Error detection
+ ├── Error recovery
+ ├── Cleanup functions
+ └── User notification
+```
+
+### User Interface Components
+
+```
+UI Components:
+├── Menu System
+│ ├── Installation mode selection
+│ ├── Configuration menus
+│ ├── Storage selection
+│ └── GPU configuration
+├── Interactive Elements
+│ ├── Whiptail menus
+│ ├── User prompts
+│ ├── Confirmation dialogs
+│ └── Error messages
+├── Non-Interactive Mode
+│ ├── Environment variable driven
+│ ├── Silent execution
+│ ├── Automated configuration
+│ └── Error handling
+└── Output
+ ├── Status messages
+ ├── Progress indicators
+ ├── Completion information
+ └── Access details
+```
+
+## Security Architecture
+
+### Security Considerations
+
+```
+Security Architecture:
+├── Container Security
+│ ├── Unprivileged containers (default)
+│ ├── Privileged containers (when needed)
+│ ├── Resource limits
+│ └── Access controls
+├── Network Security
+│ ├── Network isolation
+│ ├── VLAN support
+│ ├── Firewall integration
+│ └── Access controls
+├── Storage Security
+│ ├── Storage isolation
+│ ├── Access controls
+│ ├── Encryption support
+│ └── Backup integration
+├── GPU Security
+│ ├── Device isolation
+│ ├── Permission management
+│ ├── Access controls
+│ └── Security validation
+└── API Security
+ ├── Authentication
+ ├── Authorization
+ ├── Input validation
+ └── Error handling
+```
+
+## Performance Architecture
+
+### Performance Considerations
+
+```
+Performance Architecture:
+├── Execution Optimization
+│ ├── Parallel operations
+│ ├── Efficient algorithms
+│ ├── Minimal user interaction
+│ └── Optimized validation
+├── Resource Management
+│ ├── Memory efficiency
+│ ├── CPU optimization
+│ ├── Disk usage optimization
+│ └── Network efficiency
+├── Caching
+│ ├── Configuration caching
+│ ├── Template caching
+│ ├── Storage caching
+│ └── GPU detection caching
+└── Monitoring
+ ├── Performance monitoring
+ ├── Resource monitoring
+ ├── Error monitoring
+ └── Status monitoring
+```
+
+## Deployment Architecture
+
+### Deployment Scenarios
+
+```
+Deployment Scenarios:
+├── Single Container
+│ ├── Individual application
+│ ├── Standard configuration
+│ ├── Basic networking
+│ └── Standard storage
+├── Multiple Containers
+│ ├── Application stack
+│ ├── Shared networking
+│ ├── Shared storage
+│ └── Coordinated deployment
+├── High Availability
+│ ├── Redundant containers
+│ ├── Load balancing
+│ ├── Failover support
+│ └── Monitoring integration
+└── Development Environment
+ ├── Development containers
+ ├── Testing containers
+ ├── Staging containers
+ └── Production containers
+```
+
+## Maintenance Architecture
+
+### Maintenance Components
+
+```
+Maintenance Architecture:
+├── Updates
+│ ├── Container updates
+│ ├── Application updates
+│ ├── Configuration updates
+│ └── Security updates
+├── Monitoring
+│ ├── Container monitoring
+│ ├── Resource monitoring
+│ ├── Performance monitoring
+│ └── Error monitoring
+├── Backup
+│ ├── Configuration backup
+│ ├── Container backup
+│ ├── Storage backup
+│ └── Recovery procedures
+└── Troubleshooting
+ ├── Error diagnosis
+ ├── Log analysis
+ ├── Performance analysis
+ └── Recovery procedures
+```
+
+## Future Architecture Considerations
+
+### Scalability
+
+```
+Scalability Considerations:
+├── Horizontal Scaling
+│ ├── Multiple containers
+│ ├── Load balancing
+│ ├── Distributed deployment
+│ └── Resource distribution
+├── Vertical Scaling
+│ ├── Resource scaling
+│ ├── Performance optimization
+│ ├── Capacity planning
+│ └── Resource management
+├── Automation
+│ ├── Automated deployment
+│ ├── Automated scaling
+│ ├── Automated monitoring
+│ └── Automated recovery
+└── Integration
+ ├── External systems
+ ├── Cloud integration
+ ├── Container orchestration
+ └── Service mesh
+```
diff --git a/docs/misc/build.func/BUILD_FUNC_ENVIRONMENT_VARIABLES.md b/docs/misc/build.func/BUILD_FUNC_ENVIRONMENT_VARIABLES.md
new file mode 100644
index 000000000..b116d3106
--- /dev/null
+++ b/docs/misc/build.func/BUILD_FUNC_ENVIRONMENT_VARIABLES.md
@@ -0,0 +1,248 @@
+# build.func Environment Variables Reference
+
+## Overview
+
+This document provides a comprehensive reference of all environment variables used in `build.func`, organized by category and usage context.
+
+## Variable Categories
+
+### Core Container Variables
+
+| Variable | Description | Default | Set In | Used In |
+|----------|-------------|---------|---------|---------|
+| `APP` | Application name (e.g., "plex", "nextcloud") | - | Environment | Throughout |
+| `NSAPP` | Namespace application name | `$APP` | Environment | Throughout |
+| `CTID` | Container ID | - | Environment | Container creation |
+| `CT_TYPE` | Container type ("install" or "update") | "install" | Environment | Entry point |
+| `CT_NAME` | Container name | `$APP` | Environment | Container creation |
+
+### Operating System Variables
+
+| Variable | Description | Default | Set In | Used In |
+|----------|-------------|---------|---------|---------|
+| `var_os` | Operating system selection | "debian" | base_settings() | OS selection |
+| `var_version` | OS version | "12" | base_settings() | Template selection |
+| `var_template` | Template name | Auto-generated | base_settings() | Template download |
+
+### Resource Configuration Variables
+
+| Variable | Description | Default | Set In | Used In |
+|----------|-------------|---------|---------|---------|
+| `var_cpu` | CPU cores | "2" | base_settings() | Container creation |
+| `var_ram` | RAM in MB | "2048" | base_settings() | Container creation |
+| `var_disk` | Disk size in GB | "8" | base_settings() | Container creation |
+| `DISK_SIZE` | Disk size (alternative) | `$var_disk` | Environment | Container creation |
+| `CORE_COUNT` | CPU cores (alternative) | `$var_cpu` | Environment | Container creation |
+| `RAM_SIZE` | RAM size (alternative) | `$var_ram` | Environment | Container creation |
+
+### Network Configuration Variables
+
+| Variable | Description | Default | Set In | Used In |
+|----------|-------------|---------|---------|---------|
+| `var_net` | Network interface | "vmbr0" | base_settings() | Network config |
+| `var_bridge` | Bridge interface | "vmbr0" | base_settings() | Network config |
+| `var_gateway` | Gateway IP | "192.168.1.1" | base_settings() | Network config |
+| `var_ip` | Container IP address | - | User input | Network config |
+| `var_ipv6` | IPv6 address | - | User input | Network config |
+| `var_vlan` | VLAN ID | - | User input | Network config |
+| `var_mtu` | MTU size | "1500" | base_settings() | Network config |
+| `var_mac` | MAC address | Auto-generated | base_settings() | Network config |
+| `NET` | Network interface (alternative) | `$var_net` | Environment | Network config |
+| `BRG` | Bridge interface (alternative) | `$var_bridge` | Environment | Network config |
+| `GATE` | Gateway IP (alternative) | `$var_gateway` | Environment | Network config |
+| `IPV6_METHOD` | IPv6 configuration method | "none" | Environment | Network config |
+| `VLAN` | VLAN ID (alternative) | `$var_vlan` | Environment | Network config |
+| `MTU` | MTU size (alternative) | `$var_mtu` | Environment | Network config |
+| `MAC` | MAC address (alternative) | `$var_mac` | Environment | Network config |
+
+### Storage Configuration Variables
+
+| Variable | Description | Default | Set In | Used In |
+|----------|-------------|---------|---------|---------|
+| `var_template_storage` | Storage for templates | - | select_storage() | Template storage |
+| `var_container_storage` | Storage for container disks | - | select_storage() | Container storage |
+| `TEMPLATE_STORAGE` | Template storage (alternative) | `$var_template_storage` | Environment | Template storage |
+| `CONTAINER_STORAGE` | Container storage (alternative) | `$var_container_storage` | Environment | Container storage |
+
+### Feature Flags
+
+| Variable | Description | Default | Set In | Used In |
+|----------|-------------|---------|---------|---------|
+| `ENABLE_FUSE` | Enable FUSE support | "true" | base_settings() | Container features |
+| `ENABLE_TUN` | Enable TUN/TAP support | "true" | base_settings() | Container features |
+| `ENABLE_KEYCTL` | Enable keyctl support | "true" | base_settings() | Container features |
+| `ENABLE_MOUNT` | Enable mount support | "true" | base_settings() | Container features |
+| `ENABLE_NESTING` | Enable nesting support | "false" | base_settings() | Container features |
+| `ENABLE_PRIVILEGED` | Enable privileged mode | "false" | base_settings() | Container features |
+| `ENABLE_UNPRIVILEGED` | Enable unprivileged mode | "true" | base_settings() | Container features |
+| `VERBOSE` | Enable verbose output | "false" | Environment | Logging |
+| `SSH` | Enable SSH key provisioning | "true" | base_settings() | SSH setup |
+
+### GPU Passthrough Variables
+
+| Variable | Description | Default | Set In | Used In |
+|----------|-------------|---------|---------|---------|
+| `GPU_APPS` | List of apps that support GPU | - | Environment | GPU detection |
+| `var_gpu` | GPU selection | - | User input | GPU passthrough |
+| `var_gpu_type` | GPU type (intel/amd/nvidia) | - | detect_gpu_devices() | GPU passthrough |
+| `var_gpu_devices` | GPU device list | - | detect_gpu_devices() | GPU passthrough |
+
+### API and Diagnostics Variables
+
+| Variable | Description | Default | Set In | Used In |
+|----------|-------------|---------|---------|---------|
+| `DIAGNOSTICS` | Enable diagnostics mode | "false" | Environment | Diagnostics |
+| `METHOD` | Installation method | "install" | Environment | Installation flow |
+| `RANDOM_UUID` | Random UUID for tracking | - | Environment | Logging |
+| `API_TOKEN` | Proxmox API token | - | Environment | API calls |
+| `API_USER` | Proxmox API user | - | Environment | API calls |
+
+### Settings Persistence Variables
+
+| Variable | Description | Default | Set In | Used In |
+|----------|-------------|---------|---------|---------|
+| `SAVE_DEFAULTS` | Save settings as defaults | "false" | User input | Settings persistence |
+| `SAVE_APP_DEFAULTS` | Save app-specific defaults | "false" | User input | Settings persistence |
+| `DEFAULT_VARS_FILE` | Path to default.vars | "/usr/local/community-scripts/default.vars" | Environment | Settings persistence |
+| `APP_DEFAULTS_FILE` | Path to app.vars | "/usr/local/community-scripts/defaults/$APP.vars" | Environment | Settings persistence |
+
+## Variable Precedence Chain
+
+Variables are resolved in the following order (highest to lowest priority):
+
+1. **Hard Environment Variables**: Set before script execution
+2. **App-specific .vars file**: `/usr/local/community-scripts/defaults/.vars`
+3. **Global default.vars file**: `/usr/local/community-scripts/default.vars`
+4. **Built-in defaults**: Set in `base_settings()` function
+
+## Critical Variables for Non-Interactive Use
+
+For silent/non-interactive execution, these variables must be set:
+
+```bash
+# Core container settings
+export APP="plex"
+export CTID="100"
+export var_hostname="plex-server"
+
+# OS selection
+export var_os="debian"
+export var_version="12"
+
+# Resource allocation
+export var_cpu="4"
+export var_ram="4096"
+export var_disk="20"
+
+# Network configuration
+export var_net="vmbr0"
+export var_gateway="192.168.1.1"
+export var_ip="192.168.1.100"
+
+# Storage selection
+export var_template_storage="local"
+export var_container_storage="local"
+
+# Feature flags
+export ENABLE_FUSE="true"
+export ENABLE_TUN="true"
+export SSH="true"
+```
+
+## Environment Variable Usage Patterns
+
+### 1. Container Creation
+```bash
+# Basic container creation
+export APP="nextcloud"
+export CTID="101"
+export var_hostname="nextcloud-server"
+export var_os="debian"
+export var_version="12"
+export var_cpu="2"
+export var_ram="2048"
+export var_disk="10"
+export var_net="vmbr0"
+export var_gateway="192.168.1.1"
+export var_ip="192.168.1.101"
+export var_template_storage="local"
+export var_container_storage="local"
+```
+
+### 2. GPU Passthrough
+```bash
+# Enable GPU passthrough
+export GPU_APPS="plex,jellyfin,emby"
+export var_gpu="intel"
+export ENABLE_PRIVILEGED="true"
+```
+
+### 3. Advanced Network Configuration
+```bash
+# VLAN and IPv6 configuration
+export var_vlan="100"
+export var_ipv6="2001:db8::100"
+export IPV6_METHOD="static"
+export var_mtu="9000"
+```
+
+### 4. Storage Configuration
+```bash
+# Custom storage locations
+export var_template_storage="nfs-storage"
+export var_container_storage="ssd-storage"
+```
+
+## Variable Validation
+
+The script validates variables at several points:
+
+1. **Container ID validation**: Must be unique and within valid range
+2. **IP address validation**: Must be valid IPv4/IPv6 format
+3. **Storage validation**: Must exist and support required content types
+4. **Resource validation**: Must be within reasonable limits
+5. **Network validation**: Must be valid network configuration
+
+## Common Variable Combinations
+
+### Development Container
+```bash
+export APP="dev-container"
+export CTID="200"
+export var_hostname="dev-server"
+export var_os="ubuntu"
+export var_version="22.04"
+export var_cpu="4"
+export var_ram="4096"
+export var_disk="20"
+export ENABLE_NESTING="true"
+export ENABLE_PRIVILEGED="true"
+```
+
+### Media Server with GPU
+```bash
+export APP="plex"
+export CTID="300"
+export var_hostname="plex-server"
+export var_os="debian"
+export var_version="12"
+export var_cpu="6"
+export var_ram="8192"
+export var_disk="50"
+export GPU_APPS="plex"
+export var_gpu="nvidia"
+export ENABLE_PRIVILEGED="true"
+```
+
+### Lightweight Service
+```bash
+export APP="nginx"
+export CTID="400"
+export var_hostname="nginx-proxy"
+export var_os="alpine"
+export var_version="3.18"
+export var_cpu="1"
+export var_ram="512"
+export var_disk="2"
+export ENABLE_UNPRIVILEGED="true"
+```
diff --git a/docs/misc/build.func/BUILD_FUNC_EXECUTION_FLOWS.md b/docs/misc/build.func/BUILD_FUNC_EXECUTION_FLOWS.md
new file mode 100644
index 000000000..47a0035e2
--- /dev/null
+++ b/docs/misc/build.func/BUILD_FUNC_EXECUTION_FLOWS.md
@@ -0,0 +1,413 @@
+# build.func Execution Flows
+
+## Overview
+
+This document details the execution flows for different installation modes and scenarios in `build.func`, including variable precedence, decision trees, and workflow patterns.
+
+## Installation Modes
+
+### 1. Default Install Flow
+
+**Purpose**: Uses built-in defaults with minimal user interaction
+**Use Case**: Quick container creation with standard settings
+
+```
+Default Install Flow:
+├── start()
+│ ├── Detect execution context
+│ ├── Capture hard environment variables
+│ └── Set CT_TYPE="install"
+├── install_script()
+│ ├── Display installation mode menu
+│ ├── User selects "Default Install"
+│ └── Proceed with defaults
+├── variables()
+│ ├── base_settings() # Set built-in defaults
+│ ├── Load app.vars (if exists)
+│ ├── Load default.vars (if exists)
+│ └── Apply variable precedence
+├── build_container()
+│ ├── validate_settings()
+│ ├── check_conflicts()
+│ └── create_lxc_container()
+└── default_var_settings()
+ └── Offer to save as defaults
+```
+
+**Key Characteristics**:
+- Minimal user prompts
+- Uses built-in defaults
+- Fast execution
+- Suitable for standard deployments
+
+### 2. Advanced Install Flow
+
+**Purpose**: Full interactive configuration via whiptail menus
+**Use Case**: Custom container configuration with full control
+
+```
+Advanced Install Flow:
+├── start()
+│ ├── Detect execution context
+│ ├── Capture hard environment variables
+│ └── Set CT_TYPE="install"
+├── install_script()
+│ ├── Display installation mode menu
+│ ├── User selects "Advanced Install"
+│ └── Proceed with advanced configuration
+├── variables()
+│ ├── base_settings() # Set built-in defaults
+│ ├── Load app.vars (if exists)
+│ ├── Load default.vars (if exists)
+│ └── Apply variable precedence
+├── advanced_settings()
+│ ├── OS Selection Menu
+│ ├── Resource Configuration Menu
+│ ├── Network Configuration Menu
+│ ├── select_storage()
+│ │ ├── resolve_storage_preselect()
+│ │ └── choose_and_set_storage_for_file()
+│ ├── GPU Configuration Menu
+│ │ └── detect_gpu_devices()
+│ └── Feature Flags Menu
+├── build_container()
+│ ├── validate_settings()
+│ ├── check_conflicts()
+│ └── create_lxc_container()
+└── default_var_settings()
+ └── Offer to save as defaults
+```
+
+**Key Characteristics**:
+- Full interactive configuration
+- Whiptail menus for all options
+- Complete control over settings
+- Suitable for custom deployments
+
+### 3. My Defaults Flow
+
+**Purpose**: Loads settings from global default.vars file
+**Use Case**: Using previously saved global defaults
+
+```
+My Defaults Flow:
+├── start()
+│ ├── Detect execution context
+│ ├── Capture hard environment variables
+│ └── Set CT_TYPE="install"
+├── install_script()
+│ ├── Display installation mode menu
+│ ├── User selects "My Defaults"
+│ └── Proceed with loaded defaults
+├── variables()
+│ ├── base_settings() # Set built-in defaults
+│ ├── Load app.vars (if exists)
+│ ├── Load default.vars # Load global defaults
+│ └── Apply variable precedence
+├── build_container()
+│ ├── validate_settings()
+│ ├── check_conflicts()
+│ └── create_lxc_container()
+└── default_var_settings()
+ └── Offer to save as defaults
+```
+
+**Key Characteristics**:
+- Uses global default.vars file
+- Minimal user interaction
+- Consistent with previous settings
+- Suitable for repeated deployments
+
+### 4. App Defaults Flow
+
+**Purpose**: Loads settings from app-specific .vars file
+**Use Case**: Using previously saved app-specific defaults
+
+```
+App Defaults Flow:
+├── start()
+│ ├── Detect execution context
+│ ├── Capture hard environment variables
+│ └── Set CT_TYPE="install"
+├── install_script()
+│ ├── Display installation mode menu
+│ ├── User selects "App Defaults"
+│ └── Proceed with app-specific defaults
+├── variables()
+│ ├── base_settings() # Set built-in defaults
+│ ├── Load app.vars # Load app-specific defaults
+│ ├── Load default.vars (if exists)
+│ └── Apply variable precedence
+├── build_container()
+│ ├── validate_settings()
+│ ├── check_conflicts()
+│ └── create_lxc_container()
+└── default_var_settings()
+ └── Offer to save as defaults
+```
+
+**Key Characteristics**:
+- Uses app-specific .vars file
+- Minimal user interaction
+- App-optimized settings
+- Suitable for app-specific deployments
+
+## Variable Precedence Chain
+
+### Precedence Order (Highest to Lowest)
+
+1. **Hard Environment Variables**: Set before script execution
+2. **App-specific .vars file**: `/usr/local/community-scripts/defaults/.vars`
+3. **Global default.vars file**: `/usr/local/community-scripts/default.vars`
+4. **Built-in defaults**: Set in `base_settings()` function
+
+### Variable Resolution Process
+
+```
+Variable Resolution:
+├── Capture hard environment variables at start()
+├── Load built-in defaults in base_settings()
+├── Load global default.vars (if exists)
+├── Load app-specific .vars (if exists)
+└── Apply precedence chain
+ ├── Hard env vars override all
+ ├── App.vars override default.vars and built-ins
+ ├── Default.vars override built-ins
+ └── Built-ins are fallback defaults
+```
+
+## Storage Selection Logic
+
+### Storage Resolution Flow
+
+```
+Storage Selection:
+├── Check if storage is preselected
+│ ├── var_template_storage set? → Validate and use
+│ └── var_container_storage set? → Validate and use
+├── Count available storage options
+│ ├── Only 1 option → Auto-select
+│ └── Multiple options → Prompt user
+├── User selection via whiptail
+│ ├── Template storage selection
+│ └── Container storage selection
+└── Validate selected storage
+ ├── Check availability
+ ├── Check content type support
+ └── Proceed with selection
+```
+
+### Storage Validation
+
+```
+Storage Validation:
+├── Check storage exists
+├── Check storage is online
+├── Check content type support
+│ ├── Template storage: vztmpl support
+│ └── Container storage: rootdir support
+├── Check available space
+└── Validate permissions
+```
+
+## GPU Passthrough Flow
+
+### GPU Detection and Configuration
+
+```
+GPU Passthrough Flow:
+├── detect_gpu_devices()
+│ ├── Scan for Intel GPUs
+│ │ ├── Check i915 driver
+│ │ └── Detect devices
+│ ├── Scan for AMD GPUs
+│ │ ├── Check AMDGPU driver
+│ │ └── Detect devices
+│ └── Scan for NVIDIA GPUs
+│ ├── Check NVIDIA driver
+│ ├── Detect devices
+│ └── Check CUDA support
+├── Check GPU passthrough eligibility
+│ ├── Is app in GPU_APPS list?
+│ ├── Is container privileged?
+│ └── Proceed if eligible
+├── GPU selection logic
+│ ├── Single GPU type → Auto-select
+│ └── Multiple GPU types → Prompt user
+├── configure_gpu_passthrough()
+│ ├── Add GPU device entries
+│ ├── Configure permissions
+│ └── Update container config
+└── fix_gpu_gids()
+ ├── Update GPU group IDs
+ └── Configure access permissions
+```
+
+### GPU Eligibility Check
+
+```
+GPU Eligibility:
+├── Check app support
+│ ├── Is APP in GPU_APPS list?
+│ └── Proceed if supported
+├── Check container privileges
+│ ├── Is ENABLE_PRIVILEGED="true"?
+│ └── Proceed if privileged
+└── Check hardware availability
+ ├── Are GPUs detected?
+ └── Proceed if available
+```
+
+## Network Configuration Flow
+
+### Network Setup Process
+
+```
+Network Configuration:
+├── Basic network settings
+│ ├── var_net (network interface)
+│ ├── var_bridge (bridge interface)
+│ └── var_gateway (gateway IP)
+├── IP configuration
+│ ├── var_ip (IPv4 address)
+│ ├── var_ipv6 (IPv6 address)
+│ └── IPV6_METHOD (IPv6 method)
+├── Advanced network settings
+│ ├── var_vlan (VLAN ID)
+│ ├── var_mtu (MTU size)
+│ └── var_mac (MAC address)
+└── Network validation
+ ├── Check IP format
+ ├── Check gateway reachability
+ └── Validate network configuration
+```
+
+## Container Creation Flow
+
+### LXC Container Creation Process
+
+```
+Container Creation:
+├── create_lxc_container()
+│ ├── Create basic container
+│ ├── Configure network
+│ ├── Set up storage
+│ ├── Configure features
+│ ├── Set resource limits
+│ ├── Configure startup
+│ └── Start container
+├── Post-creation configuration
+│ ├── Wait for network
+│ ├── Configure GPU (if enabled)
+│ ├── Set up SSH keys
+│ └── Run post-install scripts
+└── Finalization
+ ├── Display container info
+ ├── Show access details
+ └── Provide next steps
+```
+
+## Error Handling Flows
+
+### Validation Error Flow
+
+```
+Validation Error Flow:
+├── validate_settings()
+│ ├── Check configuration validity
+│ └── Return error if invalid
+├── check_conflicts()
+│ ├── Check for conflicts
+│ └── Return error if conflicts found
+├── Error handling
+│ ├── Display error message
+│ ├── cleanup_on_error()
+│ └── Exit with error code
+└── User notification
+ ├── Show error details
+ └── Suggest fixes
+```
+
+### Storage Error Flow
+
+```
+Storage Error Flow:
+├── Storage selection fails
+├── Retry storage selection
+│ ├── Show available options
+│ └── Allow user to retry
+├── Storage validation fails
+│ ├── Show validation errors
+│ └── Allow user to fix
+└── Fallback to default storage
+ ├── Use fallback storage
+ └── Continue with creation
+```
+
+### GPU Error Flow
+
+```
+GPU Error Flow:
+├── GPU detection fails
+├── Fall back to no GPU
+│ ├── Disable GPU passthrough
+│ └── Continue without GPU
+├── GPU configuration fails
+│ ├── Show configuration errors
+│ └── Allow user to retry
+└── GPU permission errors
+ ├── Fix GPU permissions
+ └── Retry configuration
+```
+
+## Integration Flows
+
+### With Install Scripts
+
+```
+Install Script Integration:
+├── build.func creates container
+├── Container starts successfully
+├── Install script execution
+│ ├── Download and install app
+│ ├── Configure app settings
+│ └── Set up services
+└── Post-installation configuration
+ ├── Verify installation
+ ├── Configure access
+ └── Display completion info
+```
+
+### With Proxmox API
+
+```
+Proxmox API Integration:
+├── API authentication
+├── Container creation via API
+├── Configuration updates via API
+├── Status monitoring via API
+└── Error handling via API
+```
+
+## Performance Considerations
+
+### Execution Time Optimization
+
+```
+Performance Optimization:
+├── Parallel operations where possible
+├── Minimal user interaction in default mode
+├── Efficient storage selection
+├── Optimized GPU detection
+└── Streamlined validation
+```
+
+### Resource Usage
+
+```
+Resource Usage:
+├── Minimal memory footprint
+├── Efficient disk usage
+├── Optimized network usage
+└── Minimal CPU overhead
+```
diff --git a/docs/misc/build.func/BUILD_FUNC_FLOWCHART.md b/docs/misc/build.func/BUILD_FUNC_FLOWCHART.md
new file mode 100644
index 000000000..e406f46fd
--- /dev/null
+++ b/docs/misc/build.func/BUILD_FUNC_FLOWCHART.md
@@ -0,0 +1,244 @@
+# build.func Execution Flowchart
+
+## Main Execution Flow
+
+```
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ START() │
+│ Entry point when build.func is sourced or executed │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Check Environment │
+│ • Detect if running on Proxmox host vs inside container │
+│ • Capture hard environment variables │
+│ • Set CT_TYPE based on context │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Determine Action │
+│ • If CT_TYPE="update" → update_script() │
+│ • If CT_TYPE="install" → install_script() │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ INSTALL_SCRIPT() │
+│ Main container creation workflow │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Installation Mode Selection │
+│ │
+│ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────┐ │
+│ │ Default │ │ Advanced │ │ My Defaults │ │ App Defaults│ │
+│ │ Install │ │ Install │ │ │ │ │ │
+│ │ │ │ │ │ │ │ │ │
+│ │ • Use built-in │ │ • Full whiptail │ │ • Load from │ │ • Load from │ │
+│ │ defaults │ │ menus │ │ default.vars │ │ app.vars │ │
+│ │ • Minimal │ │ • Interactive │ │ • Override │ │ • App- │ │
+│ │ prompts │ │ configuration │ │ built-ins │ │ specific │ │
+│ └─────────────────┘ └─────────────────┘ └─────────────────┘ └─────────────┘ │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ VARIABLES() │
+│ • Load variable precedence chain: │
+│ 1. Hard environment variables │
+│ 2. App-specific .vars file │
+│ 3. Global default.vars file │
+│ 4. Built-in defaults in base_settings() │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ BASE_SETTINGS() │
+│ • Set core container parameters │
+│ • Configure OS selection │
+│ • Set resource defaults (CPU, RAM, Disk) │
+│ • Configure network defaults │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Storage Selection Logic │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ SELECT_STORAGE() │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────────┐ │ │
+│ │ │ Template │ │ Container │ │ Resolution │ │ │
+│ │ │ Storage │ │ Storage │ │ Logic │ │ │
+│ │ │ │ │ │ │ │ │ │
+│ │ │ • Check if │ │ • Check if │ │ 1. Only 1 storage │ │ │
+│ │ │ preselected │ │ preselected │ │ → Auto-select │ │ │
+│ │ │ • Validate │ │ • Validate │ │ 2. Preselected │ │ │
+│ │ │ availability │ │ availability │ │ → Validate & use │ │ │
+│ │ │ • Prompt if │ │ • Prompt if │ │ 3. Multiple options │ │ │
+│ │ │ needed │ │ needed │ │ → Prompt user │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ BUILD_CONTAINER() │
+│ • Validate all settings │
+│ • Check for conflicts │
+│ • Prepare container configuration │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ CREATE_LXC_CONTAINER() │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ Container Creation Process │ │
+│ │ │ │
+│ │ 1. Create LXC container with basic configuration │ │
+│ │ 2. Configure network settings │ │
+│ │ 3. Set up storage and mount points │ │
+│ │ 4. Configure features (FUSE, TUN, etc.) │ │
+│ │ 5. Set resource limits │ │
+│ │ 6. Configure startup options │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ GPU Passthrough Decision Tree │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ DETECT_GPU_DEVICES() │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────────┐ │ │
+│ │ │ Intel GPU │ │ AMD GPU │ │ NVIDIA GPU │ │ │
+│ │ │ │ │ │ │ │ │ │
+│ │ │ • Check i915 │ │ • Check AMDGPU │ │ • Check NVIDIA │ │ │
+│ │ │ driver │ │ driver │ │ driver │ │ │
+│ │ │ • Detect │ │ • Detect │ │ • Detect devices │ │ │
+│ │ │ devices │ │ devices │ │ • Check CUDA support │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ GPU Selection Logic │ │
+│ │ │ │
+│ │ • Is app in GPU_APPS list? OR Is container privileged? │ │
+│ │ └─ YES → Proceed with GPU configuration │ │
+│ │ └─ NO → Skip GPU passthrough │ │
+│ │ │ │
+│ │ • Single GPU type detected? │ │
+│ │ └─ YES → Auto-select and configure │ │
+│ │ └─ NO → Prompt user for selection │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ CONFIGURE_GPU_PASSTHROUGH() │
+│ • Add GPU device entries to /etc/pve/lxc/.conf │
+│ • Configure proper device permissions │
+│ • Set up device mapping │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Container Finalization │
+│ • Start container │
+│ • Wait for network connectivity │
+│ • Fix GPU GIDs (if GPU passthrough enabled) │
+│ • Configure SSH keys (if enabled) │
+│ • Run post-installation scripts │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Settings Persistence │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ DEFAULT_VAR_SETTINGS() │ │
+│ │ │ │
+│ │ • Offer to save current settings as defaults │ │
+│ │ • Save to /usr/local/community-scripts/default.vars │ │
+│ │ • Save to /usr/local/community-scripts/defaults/.vars │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ COMPLETION │
+│ • Display container information │
+│ • Show access details │
+│ • Provide next steps │
+└─────────────────────────────────────────────────────────────────────────────────┘
+```
+
+## Key Decision Points
+
+### 1. Installation Mode Selection
+- **Default**: Uses built-in defaults, minimal user interaction
+- **Advanced**: Full interactive configuration via whiptail menus
+- **My Defaults**: Loads settings from global default.vars file
+- **App Defaults**: Loads settings from app-specific .vars file
+
+### 2. Storage Selection Logic
+```
+Storage Selection Flow:
+├── Check if storage is preselected via environment variables
+│ ├── YES → Validate availability and use
+│ └── NO → Continue to resolution logic
+├── Count available storage options for content type
+│ ├── Only 1 option → Auto-select
+│ └── Multiple options → Prompt user via whiptail
+└── Validate selected storage and proceed
+```
+
+### 3. GPU Passthrough Decision Tree
+```
+GPU Passthrough Flow:
+├── Detect available GPU hardware
+│ ├── Intel GPU detected
+│ ├── AMD GPU detected
+│ └── NVIDIA GPU detected
+├── Check if GPU passthrough should be enabled
+│ ├── App is in GPU_APPS list? → YES
+│ ├── Container is privileged? → YES
+│ └── Neither? → Skip GPU passthrough
+├── Configure GPU passthrough
+│ ├── Single GPU type → Auto-configure
+│ └── Multiple GPU types → Prompt user
+└── Fix GPU GIDs post-creation
+```
+
+### 4. Variable Precedence Chain
+```
+Variable Resolution Order:
+1. Hard environment variables (captured at start)
+2. App-specific .vars file (/usr/local/community-scripts/defaults/.vars)
+3. Global default.vars file (/usr/local/community-scripts/default.vars)
+4. Built-in defaults in base_settings() function
+```
+
+## Error Handling Flow
+
+```
+Error Handling:
+├── Validation errors → Display error message and exit
+├── Storage errors → Retry storage selection
+├── Network errors → Retry network configuration
+├── GPU errors → Fall back to no GPU passthrough
+└── Container creation errors → Cleanup and exit
+```
+
+## Integration Points
+
+- **Core Functions**: Depends on core.func for basic utilities
+- **Error Handling**: Uses error_handler.func for error management
+- **API Functions**: Uses api.func for Proxmox API interactions
+- **Tools**: Uses tools.func for additional utilities
+- **Install Scripts**: Integrates with -install.sh scripts
diff --git a/docs/misc/build.func/BUILD_FUNC_FUNCTIONS_REFERENCE.md b/docs/misc/build.func/BUILD_FUNC_FUNCTIONS_REFERENCE.md
new file mode 100644
index 000000000..a8128d4e9
--- /dev/null
+++ b/docs/misc/build.func/BUILD_FUNC_FUNCTIONS_REFERENCE.md
@@ -0,0 +1,361 @@
+# build.func Functions Reference
+
+## Overview
+
+This document provides a comprehensive reference of all functions in `build.func`, organized alphabetically with detailed descriptions, parameters, and usage information.
+
+## Function Categories
+
+### Initialization Functions
+
+#### `start()`
+**Purpose**: Main entry point when build.func is sourced or executed
+**Parameters**: None
+**Returns**: None
+**Side Effects**:
+- Detects execution context (Proxmox host vs container)
+- Captures hard environment variables
+- Sets CT_TYPE based on context
+- Routes to appropriate workflow (install_script or update_script)
+**Dependencies**: None
+**Environment Variables Used**: `CT_TYPE`, `APP`, `CTID`
+
+#### `variables()`
+**Purpose**: Load and resolve all configuration variables using precedence chain
+**Parameters**: None
+**Returns**: None
+**Side Effects**:
+- Loads app-specific .vars file
+- Loads global default.vars file
+- Applies variable precedence chain
+- Sets all configuration variables
+**Dependencies**: `base_settings()`
+**Environment Variables Used**: All configuration variables
+
+#### `base_settings()`
+**Purpose**: Set built-in default values for all configuration variables
+**Parameters**: None
+**Returns**: None
+**Side Effects**: Sets default values for all variables
+**Dependencies**: None
+**Environment Variables Used**: All configuration variables
+
+### UI and Menu Functions
+
+#### `install_script()`
+**Purpose**: Main installation workflow coordinator
+**Parameters**: None
+**Returns**: None
+**Side Effects**:
+- Displays installation mode selection menu
+- Coordinates the entire installation process
+- Handles user interaction and validation
+**Dependencies**: `variables()`, `build_container()`, `default_var_settings()`
+**Environment Variables Used**: `APP`, `CTID`, `var_hostname`
+
+#### `advanced_settings()`
+**Purpose**: Provide advanced configuration options via whiptail menus
+**Parameters**: None
+**Returns**: None
+**Side Effects**:
+- Displays whiptail menus for configuration
+- Updates configuration variables based on user input
+- Validates user selections
+**Dependencies**: `select_storage()`, `detect_gpu_devices()`
+**Environment Variables Used**: All configuration variables
+
+#### `settings_menu()`
+**Purpose**: Display and handle settings configuration menu
+**Parameters**: None
+**Returns**: None
+**Side Effects**: Updates configuration variables
+**Dependencies**: `advanced_settings()`
+**Environment Variables Used**: All configuration variables
+
+### Storage Functions
+
+#### `select_storage()`
+**Purpose**: Handle storage selection for templates and containers
+**Parameters**: None
+**Returns**: None
+**Side Effects**:
+- Resolves storage preselection
+- Prompts user for storage selection if needed
+- Validates storage availability
+- Sets var_template_storage and var_container_storage
+**Dependencies**: `resolve_storage_preselect()`, `choose_and_set_storage_for_file()`
+**Environment Variables Used**: `var_template_storage`, `var_container_storage`, `TEMPLATE_STORAGE`, `CONTAINER_STORAGE`
+
+#### `resolve_storage_preselect()`
+**Purpose**: Resolve preselected storage options
+**Parameters**:
+- `storage_type`: Type of storage (template or container)
+**Returns**: Storage name if valid, empty if invalid
+**Side Effects**: Validates storage availability
+**Dependencies**: None
+**Environment Variables Used**: `var_template_storage`, `var_container_storage`
+
+#### `choose_and_set_storage_for_file()`
+**Purpose**: Interactive storage selection via whiptail
+**Parameters**:
+- `storage_type`: Type of storage (template or container)
+- `content_type`: Content type (vztmpl or rootdir)
+**Returns**: None
+**Side Effects**:
+- Displays whiptail menu
+- Updates storage variables
+- Validates selection
+**Dependencies**: None
+**Environment Variables Used**: `var_template_storage`, `var_container_storage`
+
+### Container Creation Functions
+
+#### `build_container()`
+**Purpose**: Validate settings and prepare container creation
+**Parameters**: None
+**Returns**: None
+**Side Effects**:
+- Validates all configuration
+- Checks for conflicts
+- Prepares container configuration
+- Calls create_lxc_container()
+**Dependencies**: `create_lxc_container()`
+**Environment Variables Used**: All configuration variables
+
+#### `create_lxc_container()`
+**Purpose**: Create the actual LXC container
+**Parameters**: None
+**Returns**: None
+**Side Effects**:
+- Creates LXC container with basic configuration
+- Configures network settings
+- Sets up storage and mount points
+- Configures features (FUSE, TUN, etc.)
+- Sets resource limits
+- Configures startup options
+- Starts container
+**Dependencies**: `configure_gpu_passthrough()`, `fix_gpu_gids()`
+**Environment Variables Used**: All configuration variables
+
+### GPU and Hardware Functions
+
+#### `detect_gpu_devices()`
+**Purpose**: Detect available GPU hardware on the system
+**Parameters**: None
+**Returns**: None
+**Side Effects**:
+- Scans for Intel, AMD, and NVIDIA GPUs
+- Updates var_gpu_type and var_gpu_devices
+- Determines GPU capabilities
+**Dependencies**: None
+**Environment Variables Used**: `var_gpu_type`, `var_gpu_devices`, `GPU_APPS`
+
+#### `configure_gpu_passthrough()`
+**Purpose**: Configure GPU passthrough for the container
+**Parameters**: None
+**Returns**: None
+**Side Effects**:
+- Adds GPU device entries to container config
+- Configures proper device permissions
+- Sets up device mapping
+- Updates /etc/pve/lxc/.conf
+**Dependencies**: `detect_gpu_devices()`
+**Environment Variables Used**: `var_gpu`, `var_gpu_type`, `var_gpu_devices`, `CTID`
+
+#### `fix_gpu_gids()`
+**Purpose**: Fix GPU group IDs after container creation
+**Parameters**: None
+**Returns**: None
+**Side Effects**:
+- Updates GPU group IDs in container
+- Ensures proper GPU access permissions
+- Configures video and render groups
+**Dependencies**: `configure_gpu_passthrough()`
+**Environment Variables Used**: `CTID`, `var_gpu_type`
+
+### Settings Persistence Functions
+
+#### `default_var_settings()`
+**Purpose**: Offer to save current settings as defaults
+**Parameters**: None
+**Returns**: None
+**Side Effects**:
+- Prompts user to save settings
+- Saves to default.vars file
+- Saves to app-specific .vars file
+**Dependencies**: `maybe_offer_save_app_defaults()`
+**Environment Variables Used**: All configuration variables
+
+#### `maybe_offer_save_app_defaults()`
+**Purpose**: Offer to save app-specific defaults
+**Parameters**: None
+**Returns**: None
+**Side Effects**:
+- Prompts user to save app-specific settings
+- Saves to app.vars file
+- Updates app-specific configuration
+**Dependencies**: None
+**Environment Variables Used**: `APP`, `SAVE_APP_DEFAULTS`
+
+### Utility Functions
+
+#### `validate_settings()`
+**Purpose**: Validate all configuration settings
+**Parameters**: None
+**Returns**: 0 if valid, 1 if invalid
+**Side Effects**:
+- Checks for configuration conflicts
+- Validates resource limits
+- Validates network configuration
+- Validates storage configuration
+**Dependencies**: None
+**Environment Variables Used**: All configuration variables
+
+#### `check_conflicts()`
+**Purpose**: Check for configuration conflicts
+**Parameters**: None
+**Returns**: 0 if no conflicts, 1 if conflicts found
+**Side Effects**:
+- Checks for conflicting settings
+- Validates resource allocation
+- Checks network configuration
+**Dependencies**: None
+**Environment Variables Used**: All configuration variables
+
+#### `cleanup_on_error()`
+**Purpose**: Clean up resources on error
+**Parameters**: None
+**Returns**: None
+**Side Effects**:
+- Removes partially created containers
+- Cleans up temporary files
+- Resets configuration
+**Dependencies**: None
+**Environment Variables Used**: `CTID`
+
+## Function Call Flow
+
+### Main Installation Flow
+```
+start()
+├── variables()
+│ ├── base_settings()
+│ ├── Load app.vars
+│ └── Load default.vars
+├── install_script()
+│ ├── advanced_settings()
+│ │ ├── select_storage()
+│ │ │ ├── resolve_storage_preselect()
+│ │ │ └── choose_and_set_storage_for_file()
+│ │ └── detect_gpu_devices()
+│ ├── build_container()
+│ │ ├── validate_settings()
+│ │ ├── check_conflicts()
+│ │ └── create_lxc_container()
+│ │ ├── configure_gpu_passthrough()
+│ │ └── fix_gpu_gids()
+│ └── default_var_settings()
+│ └── maybe_offer_save_app_defaults()
+```
+
+### Error Handling Flow
+```
+Error Detection
+├── validate_settings()
+│ └── check_conflicts()
+├── Error Handling
+│ └── cleanup_on_error()
+└── Exit with error code
+```
+
+## Function Dependencies
+
+### Core Dependencies
+- `start()` → `install_script()` → `build_container()` → `create_lxc_container()`
+- `variables()` → `base_settings()`
+- `advanced_settings()` → `select_storage()` → `detect_gpu_devices()`
+
+### Storage Dependencies
+- `select_storage()` → `resolve_storage_preselect()`
+- `select_storage()` → `choose_and_set_storage_for_file()`
+
+### GPU Dependencies
+- `configure_gpu_passthrough()` → `detect_gpu_devices()`
+- `fix_gpu_gids()` → `configure_gpu_passthrough()`
+
+### Settings Dependencies
+- `default_var_settings()` → `maybe_offer_save_app_defaults()`
+
+## Function Usage Examples
+
+### Basic Container Creation
+```bash
+# Set required variables
+export APP="plex"
+export CTID="100"
+export var_hostname="plex-server"
+
+# Call main functions
+start() # Entry point
+# → variables() # Load configuration
+# → install_script() # Main workflow
+# → build_container() # Create container
+# → create_lxc_container() # Actual creation
+```
+
+### Advanced Configuration
+```bash
+# Set advanced variables
+export var_os="debian"
+export var_version="12"
+export var_cpu="4"
+export var_ram="4096"
+export var_disk="20"
+
+# Call advanced functions
+advanced_settings() # Interactive configuration
+# → select_storage() # Storage selection
+# → detect_gpu_devices() # GPU detection
+```
+
+### GPU Passthrough
+```bash
+# Enable GPU passthrough
+export GPU_APPS="plex"
+export var_gpu="nvidia"
+
+# Call GPU functions
+detect_gpu_devices() # Detect hardware
+configure_gpu_passthrough() # Configure passthrough
+fix_gpu_gids() # Fix permissions
+```
+
+### Settings Persistence
+```bash
+# Save settings as defaults
+export SAVE_DEFAULTS="true"
+export SAVE_APP_DEFAULTS="true"
+
+# Call persistence functions
+default_var_settings() # Save global defaults
+maybe_offer_save_app_defaults() # Save app defaults
+```
+
+## Function Error Handling
+
+### Validation Functions
+- `validate_settings()`: Returns 0 for valid, 1 for invalid
+- `check_conflicts()`: Returns 0 for no conflicts, 1 for conflicts
+
+### Error Recovery
+- `cleanup_on_error()`: Cleans up on any error
+- Error codes are propagated up the call stack
+- Critical errors cause script termination
+
+### Error Types
+1. **Configuration Errors**: Invalid settings or conflicts
+2. **Resource Errors**: Insufficient resources or conflicts
+3. **Network Errors**: Invalid network configuration
+4. **Storage Errors**: Storage not available or invalid
+5. **GPU Errors**: GPU configuration failures
+6. **Container Creation Errors**: LXC creation failures
diff --git a/docs/misc/build.func/BUILD_FUNC_USAGE_EXAMPLES.md b/docs/misc/build.func/BUILD_FUNC_USAGE_EXAMPLES.md
new file mode 100644
index 000000000..b5ad83d6e
--- /dev/null
+++ b/docs/misc/build.func/BUILD_FUNC_USAGE_EXAMPLES.md
@@ -0,0 +1,600 @@
+# build.func Usage Examples
+
+## Overview
+
+This document provides practical usage examples for `build.func`, covering common scenarios, CLI examples, and environment variable combinations.
+
+## Basic Usage Examples
+
+### 1. Simple Container Creation
+
+**Scenario**: Create a basic Plex media server container
+
+```bash
+# Set basic environment variables
+export APP="plex"
+export CTID="100"
+export var_hostname="plex-server"
+export var_os="debian"
+export var_version="12"
+export var_cpu="4"
+export var_ram="4096"
+export var_disk="20"
+export var_net="vmbr0"
+export var_gateway="192.168.1.1"
+export var_ip="192.168.1.100"
+export var_template_storage="local"
+export var_container_storage="local"
+
+# Execute build.func
+source build.func
+```
+
+**Expected Output**:
+```
+Creating Plex container...
+Container ID: 100
+Hostname: plex-server
+OS: Debian 12
+Resources: 4 CPU, 4GB RAM, 20GB Disk
+Network: 192.168.1.100/24
+Container created successfully!
+```
+
+### 2. Advanced Configuration
+
+**Scenario**: Create a Nextcloud container with custom settings
+
+```bash
+# Set advanced environment variables
+export APP="nextcloud"
+export CTID="101"
+export var_hostname="nextcloud-server"
+export var_os="ubuntu"
+export var_version="22.04"
+export var_cpu="6"
+export var_ram="8192"
+export var_disk="50"
+export var_net="vmbr0"
+export var_gateway="192.168.1.1"
+export var_ip="192.168.1.101"
+export var_vlan="100"
+export var_mtu="9000"
+export var_template_storage="nfs-storage"
+export var_container_storage="ssd-storage"
+export ENABLE_FUSE="true"
+export ENABLE_TUN="true"
+export SSH="true"
+
+# Execute build.func
+source build.func
+```
+
+### 3. GPU Passthrough Configuration
+
+**Scenario**: Create a Jellyfin container with NVIDIA GPU passthrough
+
+```bash
+# Set GPU passthrough variables
+export APP="jellyfin"
+export CTID="102"
+export var_hostname="jellyfin-server"
+export var_os="debian"
+export var_version="12"
+export var_cpu="8"
+export var_ram="16384"
+export var_disk="30"
+export var_net="vmbr0"
+export var_gateway="192.168.1.1"
+export var_ip="192.168.1.102"
+export var_template_storage="local"
+export var_container_storage="local"
+export GPU_APPS="jellyfin"
+export var_gpu="nvidia"
+export ENABLE_PRIVILEGED="true"
+export ENABLE_FUSE="true"
+export ENABLE_TUN="true"
+
+# Execute build.func
+source build.func
+```
+
+## Silent/Non-Interactive Examples
+
+### 1. Automated Deployment
+
+**Scenario**: Deploy multiple containers without user interaction
+
+```bash
+#!/bin/bash
+# Automated deployment script
+
+# Function to create container
+create_container() {
+ local app=$1
+ local ctid=$2
+ local ip=$3
+
+ export APP="$app"
+ export CTID="$ctid"
+ export var_hostname="${app}-server"
+ export var_os="debian"
+ export var_version="12"
+ export var_cpu="2"
+ export var_ram="2048"
+ export var_disk="10"
+ export var_net="vmbr0"
+ export var_gateway="192.168.1.1"
+ export var_ip="$ip"
+ export var_template_storage="local"
+ export var_container_storage="local"
+ export ENABLE_FUSE="true"
+ export ENABLE_TUN="true"
+ export SSH="true"
+
+ source build.func
+}
+
+# Create multiple containers
+create_container "plex" "100" "192.168.1.100"
+create_container "nextcloud" "101" "192.168.1.101"
+create_container "nginx" "102" "192.168.1.102"
+```
+
+### 2. Development Environment Setup
+
+**Scenario**: Create development containers with specific configurations
+
+```bash
+#!/bin/bash
+# Development environment setup
+
+# Development container configuration
+export APP="dev-container"
+export CTID="200"
+export var_hostname="dev-server"
+export var_os="ubuntu"
+export var_version="22.04"
+export var_cpu="4"
+export var_ram="4096"
+export var_disk="20"
+export var_net="vmbr0"
+export var_gateway="192.168.1.1"
+export var_ip="192.168.1.200"
+export var_template_storage="local"
+export var_container_storage="local"
+export ENABLE_NESTING="true"
+export ENABLE_PRIVILEGED="true"
+export ENABLE_FUSE="true"
+export ENABLE_TUN="true"
+export SSH="true"
+
+# Execute build.func
+source build.func
+```
+
+## Network Configuration Examples
+
+### 1. VLAN Configuration
+
+**Scenario**: Create container with VLAN support
+
+```bash
+# VLAN configuration
+export APP="web-server"
+export CTID="300"
+export var_hostname="web-server"
+export var_os="debian"
+export var_version="12"
+export var_cpu="2"
+export var_ram="2048"
+export var_disk="10"
+export var_net="vmbr0"
+export var_gateway="192.168.100.1"
+export var_ip="192.168.100.100"
+export var_vlan="100"
+export var_mtu="1500"
+export var_template_storage="local"
+export var_container_storage="local"
+
+source build.func
+```
+
+### 2. IPv6 Configuration
+
+**Scenario**: Create container with IPv6 support
+
+```bash
+# IPv6 configuration
+export APP="ipv6-server"
+export CTID="301"
+export var_hostname="ipv6-server"
+export var_os="debian"
+export var_version="12"
+export var_cpu="2"
+export var_ram="2048"
+export var_disk="10"
+export var_net="vmbr0"
+export var_gateway="192.168.1.1"
+export var_ip="192.168.1.101"
+export var_ipv6="2001:db8::101"
+export IPV6_METHOD="static"
+export var_template_storage="local"
+export var_container_storage="local"
+
+source build.func
+```
+
+## Storage Configuration Examples
+
+### 1. Custom Storage Locations
+
+**Scenario**: Use different storage for templates and containers
+
+```bash
+# Custom storage configuration
+export APP="storage-test"
+export CTID="400"
+export var_hostname="storage-test"
+export var_os="debian"
+export var_version="12"
+export var_cpu="2"
+export var_ram="2048"
+export var_disk="10"
+export var_net="vmbr0"
+export var_gateway="192.168.1.1"
+export var_ip="192.168.1.140"
+export var_template_storage="nfs-storage"
+export var_container_storage="ssd-storage"
+
+source build.func
+```
+
+### 2. High-Performance Storage
+
+**Scenario**: Use high-performance storage for resource-intensive applications
+
+```bash
+# High-performance storage configuration
+export APP="database-server"
+export CTID="401"
+export var_hostname="database-server"
+export var_os="debian"
+export var_version="12"
+export var_cpu="8"
+export var_ram="16384"
+export var_disk="100"
+export var_net="vmbr0"
+export var_gateway="192.168.1.1"
+export var_ip="192.168.1.141"
+export var_template_storage="nvme-storage"
+export var_container_storage="nvme-storage"
+
+source build.func
+```
+
+## Feature Configuration Examples
+
+### 1. Privileged Container
+
+**Scenario**: Create privileged container for system-level access
+
+```bash
+# Privileged container configuration
+export APP="system-container"
+export CTID="500"
+export var_hostname="system-container"
+export var_os="debian"
+export var_version="12"
+export var_cpu="4"
+export var_ram="4096"
+export var_disk="20"
+export var_net="vmbr0"
+export var_gateway="192.168.1.1"
+export var_ip="192.168.1.150"
+export var_template_storage="local"
+export var_container_storage="local"
+export ENABLE_PRIVILEGED="true"
+export ENABLE_FUSE="true"
+export ENABLE_TUN="true"
+export ENABLE_KEYCTL="true"
+export ENABLE_MOUNT="true"
+
+source build.func
+```
+
+### 2. Unprivileged Container
+
+**Scenario**: Create secure unprivileged container
+
+```bash
+# Unprivileged container configuration
+export APP="secure-container"
+export CTID="501"
+export var_hostname="secure-container"
+export var_os="debian"
+export var_version="12"
+export var_cpu="2"
+export var_ram="2048"
+export var_disk="10"
+export var_net="vmbr0"
+export var_gateway="192.168.1.1"
+export var_ip="192.168.1.151"
+export var_template_storage="local"
+export var_container_storage="local"
+export ENABLE_UNPRIVILEGED="true"
+export ENABLE_FUSE="true"
+export ENABLE_TUN="true"
+
+source build.func
+```
+
+## Settings Persistence Examples
+
+### 1. Save Global Defaults
+
+**Scenario**: Save current settings as global defaults
+
+```bash
+# Save global defaults
+export APP="default-test"
+export CTID="600"
+export var_hostname="default-test"
+export var_os="debian"
+export var_version="12"
+export var_cpu="2"
+export var_ram="2048"
+export var_disk="10"
+export var_net="vmbr0"
+export var_gateway="192.168.1.1"
+export var_ip="192.168.1.160"
+export var_template_storage="local"
+export var_container_storage="local"
+export SAVE_DEFAULTS="true"
+
+source build.func
+```
+
+### 2. Save App-Specific Defaults
+
+**Scenario**: Save settings as app-specific defaults
+
+```bash
+# Save app-specific defaults
+export APP="plex"
+export CTID="601"
+export var_hostname="plex-server"
+export var_os="debian"
+export var_version="12"
+export var_cpu="4"
+export var_ram="4096"
+export var_disk="20"
+export var_net="vmbr0"
+export var_gateway="192.168.1.1"
+export var_ip="192.168.1.161"
+export var_template_storage="local"
+export var_container_storage="local"
+export SAVE_APP_DEFAULTS="true"
+
+source build.func
+```
+
+## Error Handling Examples
+
+### 1. Validation Error Handling
+
+**Scenario**: Handle configuration validation errors
+
+```bash
+#!/bin/bash
+# Error handling example
+
+# Set invalid configuration
+export APP="error-test"
+export CTID="700"
+export var_hostname="error-test"
+export var_os="invalid-os"
+export var_version="invalid-version"
+export var_cpu="invalid-cpu"
+export var_ram="invalid-ram"
+export var_disk="invalid-disk"
+export var_net="invalid-network"
+export var_gateway="invalid-gateway"
+export var_ip="invalid-ip"
+
+# Execute with error handling
+if source build.func; then
+ echo "Container created successfully!"
+else
+ echo "Error: Container creation failed!"
+ echo "Please check your configuration and try again."
+fi
+```
+
+### 2. Storage Error Handling
+
+**Scenario**: Handle storage selection errors
+
+```bash
+#!/bin/bash
+# Storage error handling
+
+# Set invalid storage
+export APP="storage-error-test"
+export CTID="701"
+export var_hostname="storage-error-test"
+export var_os="debian"
+export var_version="12"
+export var_cpu="2"
+export var_ram="2048"
+export var_disk="10"
+export var_net="vmbr0"
+export var_gateway="192.168.1.1"
+export var_ip="192.168.1.170"
+export var_template_storage="nonexistent-storage"
+export var_container_storage="nonexistent-storage"
+
+# Execute with error handling
+if source build.func; then
+ echo "Container created successfully!"
+else
+ echo "Error: Storage not available!"
+ echo "Please check available storage and try again."
+fi
+```
+
+## Integration Examples
+
+### 1. With Install Scripts
+
+**Scenario**: Integrate with application install scripts
+
+```bash
+#!/bin/bash
+# Integration with install scripts
+
+# Create container
+export APP="plex"
+export CTID="800"
+export var_hostname="plex-server"
+export var_os="debian"
+export var_version="12"
+export var_cpu="4"
+export var_ram="4096"
+export var_disk="20"
+export var_net="vmbr0"
+export var_gateway="192.168.1.1"
+export var_ip="192.168.1.180"
+export var_template_storage="local"
+export var_container_storage="local"
+
+# Create container
+source build.func
+
+# Run install script
+if [ -f "plex-install.sh" ]; then
+ source plex-install.sh
+else
+ echo "Install script not found!"
+fi
+```
+
+### 2. With Monitoring
+
+**Scenario**: Integrate with monitoring systems
+
+```bash
+#!/bin/bash
+# Monitoring integration
+
+# Create container with monitoring
+export APP="monitored-app"
+export CTID="801"
+export var_hostname="monitored-app"
+export var_os="debian"
+export var_version="12"
+export var_cpu="2"
+export var_ram="2048"
+export var_disk="10"
+export var_net="vmbr0"
+export var_gateway="192.168.1.1"
+export var_ip="192.168.1.181"
+export var_template_storage="local"
+export var_container_storage="local"
+export DIAGNOSTICS="true"
+
+# Create container
+source build.func
+
+# Set up monitoring
+if [ -f "monitoring-setup.sh" ]; then
+ source monitoring-setup.sh
+fi
+```
+
+## Best Practices
+
+### 1. Environment Variable Management
+
+```bash
+#!/bin/bash
+# Best practice: Environment variable management
+
+# Set configuration file
+CONFIG_FILE="/etc/build.func.conf"
+
+# Load configuration if exists
+if [ -f "$CONFIG_FILE" ]; then
+ source "$CONFIG_FILE"
+fi
+
+# Set required variables
+export APP="${APP:-plex}"
+export CTID="${CTID:-100}"
+export var_hostname="${var_hostname:-plex-server}"
+export var_os="${var_os:-debian}"
+export var_version="${var_version:-12}"
+export var_cpu="${var_cpu:-2}"
+export var_ram="${var_ram:-2048}"
+export var_disk="${var_disk:-10}"
+export var_net="${var_net:-vmbr0}"
+export var_gateway="${var_gateway:-192.168.1.1}"
+export var_ip="${var_ip:-192.168.1.100}"
+export var_template_storage="${var_template_storage:-local}"
+export var_container_storage="${var_container_storage:-local}"
+
+# Execute build.func
+source build.func
+```
+
+### 2. Error Handling and Logging
+
+```bash
+#!/bin/bash
+# Best practice: Error handling and logging
+
+# Set log file
+LOG_FILE="/var/log/build.func.log"
+
+# Function to log messages
+log_message() {
+ echo "$(date): $1" >> "$LOG_FILE"
+}
+
+# Function to create container with error handling
+create_container() {
+ local app=$1
+ local ctid=$2
+
+ log_message "Starting container creation for $app (ID: $ctid)"
+
+ # Set variables
+ export APP="$app"
+ export CTID="$ctid"
+ export var_hostname="${app}-server"
+ export var_os="debian"
+ export var_version="12"
+ export var_cpu="2"
+ export var_ram="2048"
+ export var_disk="10"
+ export var_net="vmbr0"
+ export var_gateway="192.168.1.1"
+ export var_ip="192.168.1.$ctid"
+ export var_template_storage="local"
+ export var_container_storage="local"
+
+ # Create container
+ if source build.func; then
+ log_message "Container $app created successfully (ID: $ctid)"
+ return 0
+ else
+ log_message "Error: Failed to create container $app (ID: $ctid)"
+ return 1
+ fi
+}
+
+# Create containers
+create_container "plex" "100"
+create_container "nextcloud" "101"
+create_container "nginx" "102"
+```
diff --git a/docs/misc/build.func/README.md b/docs/misc/build.func/README.md
new file mode 100644
index 000000000..c7ede47c7
--- /dev/null
+++ b/docs/misc/build.func/README.md
@@ -0,0 +1,260 @@
+# build.func Documentation
+
+## Overview
+
+This directory contains comprehensive documentation for the `build.func` script, which is the core orchestration script for Proxmox LXC container creation in the Community Scripts project.
+
+## Documentation Files
+
+### 📊 [BUILD_FUNC_FLOWCHART.md](./BUILD_FUNC_FLOWCHART.md)
+Visual ASCII flowchart showing the main execution flow, decision trees, and key decision points in the build.func script.
+
+**Contents:**
+- Main execution flow diagram
+- Installation mode selection flows
+- Storage selection workflow
+- GPU passthrough decision logic
+- Variable precedence chain
+- Error handling flow
+- Integration points
+
+### 🔧 [BUILD_FUNC_ENVIRONMENT_VARIABLES.md](./BUILD_FUNC_ENVIRONMENT_VARIABLES.md)
+Complete reference of all environment variables used in build.func, organized by category and usage context.
+
+**Contents:**
+- Core container variables
+- Operating system variables
+- Resource configuration variables
+- Network configuration variables
+- Storage configuration variables
+- Feature flags
+- GPU passthrough variables
+- API and diagnostics variables
+- Settings persistence variables
+- Variable precedence chain
+- Critical variables for non-interactive use
+- Common variable combinations
+
+### 📚 [BUILD_FUNC_FUNCTIONS_REFERENCE.md](./BUILD_FUNC_FUNCTIONS_REFERENCE.md)
+Alphabetical function reference with detailed descriptions, parameters, dependencies, and usage information.
+
+**Contents:**
+- Initialization functions
+- UI and menu functions
+- Storage functions
+- Container creation functions
+- GPU and hardware functions
+- Settings persistence functions
+- Utility functions
+- Function call flow
+- Function dependencies
+- Function usage examples
+- Function error handling
+
+### 🔄 [BUILD_FUNC_EXECUTION_FLOWS.md](./BUILD_FUNC_EXECUTION_FLOWS.md)
+Detailed execution flows for different installation modes and scenarios, including variable precedence and decision trees.
+
+**Contents:**
+- Default install flow
+- Advanced install flow
+- My defaults flow
+- App defaults flow
+- Variable precedence chain
+- Storage selection logic
+- GPU passthrough flow
+- Network configuration flow
+- Container creation flow
+- Error handling flows
+- Integration flows
+- Performance considerations
+
+### 🏗️ [BUILD_FUNC_ARCHITECTURE.md](./BUILD_FUNC_ARCHITECTURE.md)
+High-level architectural overview including module dependencies, data flow, integration points, and system architecture.
+
+**Contents:**
+- High-level architecture diagram
+- Module dependencies
+- Data flow architecture
+- Integration architecture
+- System architecture components
+- User interface components
+- Security architecture
+- Performance architecture
+- Deployment architecture
+- Maintenance architecture
+- Future architecture considerations
+
+### 💡 [BUILD_FUNC_USAGE_EXAMPLES.md](./BUILD_FUNC_USAGE_EXAMPLES.md)
+Practical usage examples covering common scenarios, CLI examples, and environment variable combinations.
+
+**Contents:**
+- Basic usage examples
+- Silent/non-interactive examples
+- Network configuration examples
+- Storage configuration examples
+- Feature configuration examples
+- Settings persistence examples
+- Error handling examples
+- Integration examples
+- Best practices
+
+## Quick Start Guide
+
+### For New Users
+1. Start with [BUILD_FUNC_FLOWCHART.md](./BUILD_FUNC_FLOWCHART.md) to understand the overall flow
+2. Review [BUILD_FUNC_ENVIRONMENT_VARIABLES.md](./BUILD_FUNC_ENVIRONMENT_VARIABLES.md) for configuration options
+3. Follow examples in [BUILD_FUNC_USAGE_EXAMPLES.md](./BUILD_FUNC_USAGE_EXAMPLES.md)
+
+### For Developers
+1. Read [BUILD_FUNC_ARCHITECTURE.md](./BUILD_FUNC_ARCHITECTURE.md) for system overview
+2. Study [BUILD_FUNC_FUNCTIONS_REFERENCE.md](./BUILD_FUNC_FUNCTIONS_REFERENCE.md) for function details
+3. Review [BUILD_FUNC_EXECUTION_FLOWS.md](./BUILD_FUNC_EXECUTION_FLOWS.md) for implementation details
+
+### For System Administrators
+1. Focus on [BUILD_FUNC_USAGE_EXAMPLES.md](./BUILD_FUNC_USAGE_EXAMPLES.md) for deployment scenarios
+2. Review [BUILD_FUNC_ENVIRONMENT_VARIABLES.md](./BUILD_FUNC_ENVIRONMENT_VARIABLES.md) for configuration management
+3. Check [BUILD_FUNC_ARCHITECTURE.md](./BUILD_FUNC_ARCHITECTURE.md) for security and performance considerations
+
+## Key Concepts
+
+### Variable Precedence
+Variables are resolved in this order (highest to lowest priority):
+1. Hard environment variables (set before script execution)
+2. App-specific .vars file (`/usr/local/community-scripts/defaults/.vars`)
+3. Global default.vars file (`/usr/local/community-scripts/default.vars`)
+4. Built-in defaults (set in `base_settings()` function)
+
+### Installation Modes
+- **Default Install**: Uses built-in defaults, minimal prompts
+- **Advanced Install**: Full interactive configuration via whiptail
+- **My Defaults**: Loads from global default.vars file
+- **App Defaults**: Loads from app-specific .vars file
+
+### Storage Selection Logic
+1. If only 1 storage exists for content type → auto-select
+2. If preselected via environment variables → validate and use
+3. Otherwise → prompt user via whiptail
+
+### GPU Passthrough Flow
+1. Detect hardware (Intel/AMD/NVIDIA)
+2. Check if app is in GPU_APPS list OR container is privileged
+3. Auto-select if single GPU type, prompt if multiple
+4. Configure `/etc/pve/lxc/.conf` with proper device entries
+5. Fix GIDs post-creation to match container's video/render groups
+
+## Common Use Cases
+
+### Basic Container Creation
+```bash
+export APP="plex"
+export CTID="100"
+export var_hostname="plex-server"
+export var_os="debian"
+export var_version="12"
+export var_cpu="4"
+export var_ram="4096"
+export var_disk="20"
+export var_net="vmbr0"
+export var_gateway="192.168.1.1"
+export var_ip="192.168.1.100"
+export var_template_storage="local"
+export var_container_storage="local"
+
+source build.func
+```
+
+### GPU Passthrough
+```bash
+export APP="jellyfin"
+export CTID="101"
+export var_hostname="jellyfin-server"
+export var_os="debian"
+export var_version="12"
+export var_cpu="8"
+export var_ram="16384"
+export var_disk="30"
+export var_net="vmbr0"
+export var_gateway="192.168.1.1"
+export var_ip="192.168.1.101"
+export var_template_storage="local"
+export var_container_storage="local"
+export GPU_APPS="jellyfin"
+export var_gpu="nvidia"
+export ENABLE_PRIVILEGED="true"
+
+source build.func
+```
+
+### Silent/Non-Interactive Deployment
+```bash
+#!/bin/bash
+# Automated deployment
+export APP="nginx"
+export CTID="102"
+export var_hostname="nginx-proxy"
+export var_os="alpine"
+export var_version="3.18"
+export var_cpu="1"
+export var_ram="512"
+export var_disk="2"
+export var_net="vmbr0"
+export var_gateway="192.168.1.1"
+export var_ip="192.168.1.102"
+export var_template_storage="local"
+export var_container_storage="local"
+export ENABLE_UNPRIVILEGED="true"
+
+source build.func
+```
+
+## Troubleshooting
+
+### Common Issues
+1. **Container creation fails**: Check resource availability and configuration validity
+2. **Storage errors**: Verify storage exists and supports required content types
+3. **Network errors**: Validate network configuration and IP address availability
+4. **GPU passthrough issues**: Check hardware detection and container privileges
+5. **Permission errors**: Verify user permissions and container privileges
+
+### Debug Mode
+Enable verbose output for debugging:
+```bash
+export VERBOSE="true"
+export DIAGNOSTICS="true"
+source build.func
+```
+
+### Log Files
+Check system logs for detailed error information:
+- `/var/log/syslog`
+- `/var/log/pve/lxc/.log`
+- Container-specific logs
+
+## Contributing
+
+When contributing to build.func documentation:
+1. Update relevant documentation files
+2. Add examples for new features
+3. Update architecture diagrams if needed
+4. Test all examples before submitting
+5. Follow the existing documentation style
+
+## Related Documentation
+
+- [Main README](../../README.md) - Project overview
+- [Installation Guide](../../install/) - Installation scripts
+- [Container Templates](../../ct/) - Container templates
+- [Tools](../../tools/) - Additional tools and utilities
+
+## Support
+
+For issues and questions:
+1. Check this documentation first
+2. Review the [troubleshooting section](#troubleshooting)
+3. Check existing issues in the project repository
+4. Create a new issue with detailed information
+
+---
+
+*Last updated: $(date)*
+*Documentation version: 1.0*
diff --git a/docs/misc/core.func/CORE_FLOWCHART.md b/docs/misc/core.func/CORE_FLOWCHART.md
new file mode 100644
index 000000000..2b9dd98ce
--- /dev/null
+++ b/docs/misc/core.func/CORE_FLOWCHART.md
@@ -0,0 +1,316 @@
+# core.func Execution Flowchart
+
+## Main Execution Flow
+
+```
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ core.func Loading │
+│ Entry point when core.func is sourced by other scripts │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Load Prevention Check │
+│ • Check if _CORE_FUNC_LOADED is set │
+│ • Return early if already loaded │
+│ • Set _CORE_FUNC_LOADED=1 to prevent reloading │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ LOAD_FUNCTIONS() │
+│ Main function loader - sets up all core utilities │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Core Function Loading Sequence │
+│ │
+│ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────────────┐ │
+│ │ color() │ │ formatting() │ │ icons() │ │
+│ │ │ │ │ │ │ │
+│ │ • Set ANSI │ │ • Set format │ │ • Set symbolic icons │ │
+│ │ color codes │ │ helpers │ │ • Define message │ │
+│ │ • Define │ │ • Tab, bold, │ │ symbols │ │
+│ │ colors │ │ line reset │ │ • Status indicators │ │
+│ └─────────────────┘ └─────────────────┘ └─────────────────────────────┘ │
+│ │
+│ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────────────┐ │
+│ │ default_vars() │ │ set_std_mode() │ │ Additional Functions │ │
+│ │ │ │ │ │ │ │
+│ │ • Set retry │ │ • Set verbose │ │ • Add more functions │ │
+│ │ variables │ │ mode │ │ as needed │ │
+│ │ • Initialize │ │ • Configure │ │ │ │
+│ │ counters │ │ STD variable │ │ │ │
+│ └─────────────────┘ └─────────────────┘ └─────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+```
+
+## System Check Functions Flow
+
+```
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ System Validation Flow │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ PVE_CHECK() │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Get PVE │ │ Check PVE │ │ Check PVE │ │ │
+│ │ │ Version │ │ 8.x Support │ │ 9.x Support │ │ │
+│ │ │ │ │ │ │ │ │ │
+│ │ │ • pveversion │ │ • Allow 8.0-8.9│ │ • Allow ONLY 9.0 │ │ │
+│ │ │ • Parse version │ │ • Reject others │ │ • Reject 9.1+ │ │ │
+│ │ │ • Extract │ │ • Exit if │ │ • Exit if │ │ │
+│ │ │ major.minor │ │ unsupported │ │ unsupported │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ ARCH_CHECK() │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Check │ │ AMD64 Check │ │ PiMox Warning │ │ │
+│ │ │ Architecture │ │ │ │ │ │ │
+│ │ │ │ │ • dpkg --print- │ │ • Show PiMox │ │ │
+│ │ │ • Get system │ │ architecture │ │ message │ │ │
+│ │ │ architecture │ │ • Must be │ │ • Point to ARM64 │ │ │
+│ │ │ • Compare with │ │ "amd64" │ │ support │ │ │
+│ │ │ "amd64" │ │ • Exit if not │ │ • Exit script │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ SHELL_CHECK() │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Check │ │ Bash Check │ │ Error Handling │ │ │
+│ │ │ Shell Type │ │ │ │ │ │ │
+│ │ │ │ │ • ps -p $$ -o │ │ • Clear screen │ │ │
+│ │ │ • Get current │ │ comm= │ │ • Show error │ │ │
+│ │ │ shell │ │ • Must be │ │ • Sleep and exit │ │ │
+│ │ │ • Compare with │ │ "bash" │ │ │ │ │
+│ │ │ "bash" │ │ • Exit if not │ │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ ROOT_CHECK() │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Check │ │ Root Check │ │ Sudo Check │ │ │
+│ │ │ User ID │ │ │ │ │ │ │
+│ │ │ │ │ • id -u │ │ • Check parent │ │ │
+│ │ │ • Get user ID │ │ • Must be 0 │ │ process │ │ │
+│ │ │ • Check if │ │ • Exit if not │ │ • Detect sudo │ │ │
+│ │ │ root (0) │ │ root │ │ usage │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+```
+
+## Message System Flow
+
+```
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Message System Flow │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ MSG_INFO() │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Message │ │ Duplicate │ │ Display Mode │ │ │
+│ │ │ Validation │ │ Check │ │ Selection │ │ │
+│ │ │ │ │ │ │ │ │ │
+│ │ │ • Check if │ │ • Track shown │ │ • Verbose mode: │ │ │
+│ │ │ message │ │ messages │ │ Show directly │ │ │
+│ │ │ exists │ │ • Skip if │ │ • Normal mode: │ │ │
+│ │ │ • Return if │ │ already │ │ Start spinner │ │ │
+│ │ │ empty │ │ shown │ │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ SPINNER() │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Spinner │ │ Animation │ │ Display │ │ │
+│ │ │ Initialization│ │ Loop │ │ Control │ │ │
+│ │ │ │ │ │ │ │ │ │
+│ │ │ • Define │ │ • Cycle through │ │ • Print spinner │ │ │
+│ │ │ characters │ │ characters │ │ character │ │ │
+│ │ │ • Set index │ │ • Sleep 0.1s │ │ • Print message │ │ │
+│ │ │ • Start loop │ │ • Increment │ │ • Clear line │ │ │
+│ │ │ │ │ index │ │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ STOP_SPINNER() │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Get Spinner │ │ Kill Process │ │ Cleanup │ │ │
+│ │ │ PID │ │ │ │ │ │ │
+│ │ │ │ │ • Send TERM │ │ • Remove PID file │ │ │
+│ │ │ • From │ │ • Wait for │ │ • Unset variables │ │ │
+│ │ │ SPINNER_PID │ │ termination │ │ • Reset terminal │ │ │
+│ │ │ • From PID │ │ • Force kill │ │ settings │ │ │
+│ │ │ file │ │ if needed │ │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+```
+
+## Silent Execution Flow
+
+```
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ SILENT() Execution Flow │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ Command Execution │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Setup │ │ Execute │ │ Capture Output │ │ │
+│ │ │ Environment │ │ Command │ │ │ │ │
+│ │ │ │ │ │ │ • Redirect stdout │ │ │
+│ │ │ • Disable │ │ • Run command │ │ to log file │ │ │
+│ │ │ error │ │ • Capture │ │ • Redirect stderr │ │ │
+│ │ │ handling │ │ return code │ │ to log file │ │ │
+│ │ │ • Remove │ │ • Store exit │ │ • Log all output │ │ │
+│ │ │ traps │ │ code │ │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ Error Handling │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Check Exit │ │ Load Error │ │ Display Error │ │ │
+│ │ │ Code │ │ Handler │ │ Information │ │ │
+│ │ │ │ │ │ │ │ │ │
+│ │ │ • If exit code │ │ • Source │ │ • Show error code │ │ │
+│ │ │ != 0 │ │ error_handler │ │ • Show explanation │ │ │
+│ │ │ • Proceed to │ │ if needed │ │ • Show command │ │ │
+│ │ │ error │ │ • Get error │ │ • Show log lines │ │ │
+│ │ │ handling │ │ explanation │ │ • Show full log │ │ │
+│ │ │ │ │ │ │ command │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ Log Management │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Log File │ │ Log Display │ │ Log Access │ │ │
+│ │ │ Management │ │ │ │ │ │ │
+│ │ │ │ │ • Show last 10 │ │ • Provide command │ │ │
+│ │ │ • Create log │ │ lines │ │ to view full log │ │ │
+│ │ │ file path │ │ • Count total │ │ • Show line count │ │ │
+│ │ │ • Use process │ │ lines │ │ • Enable debugging │ │ │
+│ │ │ ID in name │ │ • Format │ │ │ │ │
+│ │ │ │ │ output │ │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+```
+
+## Header Management Flow
+
+```
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Header Management Flow │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ GET_HEADER() │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Prepare │ │ Check Local │ │ Download Header │ │ │
+│ │ │ Parameters │ │ File │ │ │ │ │
+│ │ │ │ │ │ │ • Construct URL │ │ │
+│ │ │ • Get app name │ │ • Check if │ │ • Download file │ │ │
+│ │ │ from APP │ │ file exists │ │ • Save to local │ │ │
+│ │ │ • Get app type │ │ • Check if │ │ path │ │ │
+│ │ │ from APP_TYPE │ │ file has │ │ • Return success │ │ │
+│ │ │ • Construct │ │ content │ │ status │ │ │
+│ │ │ paths │ │ • Return if │ │ │ │ │
+│ │ │ │ │ available │ │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ HEADER_INFO() │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Get Header │ │ Clear Screen │ │ Display Header │ │ │
+│ │ │ Content │ │ │ │ │ │ │
+│ │ │ │ │ • Clear │ │ • Show header │ │ │
+│ │ │ • Call │ │ terminal │ │ content if │ │ │
+│ │ │ get_header() │ │ • Get terminal │ │ available │ │ │
+│ │ │ • Handle │ │ width │ │ • Format output │ │ │
+│ │ │ errors │ │ • Set default │ │ • Center content │ │ │
+│ │ │ • Return │ │ width if │ │ if possible │ │ │
+│ │ │ content │ │ needed │ │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+```
+
+## Swap Management Flow
+
+```
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ CHECK_OR_CREATE_SWAP() Flow │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ Swap Detection │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Check Active │ │ Swap Found │ │ No Swap Found │ │ │
+│ │ │ Swap │ │ │ │ │ │ │
+│ │ │ │ │ • Show success │ │ • Show error │ │ │
+│ │ │ • Use swapon │ │ message │ │ message │ │ │
+│ │ │ command │ │ • Return 0 │ │ • Ask user for │ │ │
+│ │ │ • Check for │ │ │ │ creation │ │ │
+│ │ │ swap devices │ │ │ │ • Proceed to │ │ │
+│ │ │ • Return │ │ │ │ creation flow │ │ │
+│ │ │ status │ │ │ │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ Swap Creation │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ User Input │ │ Size │ │ File Creation │ │ │
+│ │ │ Collection │ │ Validation │ │ │ │ │
+│ │ │ │ │ │ │ • Create swap file │ │ │
+│ │ │ • Ask for │ │ • Validate │ │ with dd │ │ │
+│ │ │ confirmation │ │ numeric input │ │ • Set permissions │ │ │
+│ │ │ • Convert to │ │ • Check range │ │ • Format swap │ │ │
+│ │ │ lowercase │ │ • Abort if │ │ • Activate swap │ │ │
+│ │ │ • Check for │ │ invalid │ │ • Show success │ │ │
+│ │ │ y/yes │ │ │ │ message │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+```
+
+## Integration Points
+
+### With Other Scripts
+- **build.func**: Provides system checks and UI functions
+- **tools.func**: Uses core utilities for extended operations
+- **api.func**: Uses system checks and error handling
+- **error_handler.func**: Provides error explanations for silent execution
+
+### External Dependencies
+- **curl**: For downloading header files
+- **tput**: For terminal control (installed if missing)
+- **swapon/mkswap**: For swap management
+- **pveversion**: For Proxmox version checking
+
+### Data Flow
+- **Input**: Environment variables, command parameters
+- **Processing**: System validation, UI rendering, command execution
+- **Output**: Messages, log files, exit codes, system state changes
diff --git a/docs/misc/core.func/CORE_FUNCTIONS_REFERENCE.md b/docs/misc/core.func/CORE_FUNCTIONS_REFERENCE.md
new file mode 100644
index 000000000..1dacb1609
--- /dev/null
+++ b/docs/misc/core.func/CORE_FUNCTIONS_REFERENCE.md
@@ -0,0 +1,637 @@
+# core.func Functions Reference
+
+## Overview
+
+This document provides a comprehensive alphabetical reference of all functions in `core.func`, including parameters, dependencies, usage examples, and error handling.
+
+## Function Categories
+
+### Initialization Functions
+
+#### `load_functions()`
+**Purpose**: Main function loader that initializes all core utilities
+**Parameters**: None
+**Returns**: None
+**Side Effects**:
+- Sets `__FUNCTIONS_LOADED=1` to prevent reloading
+- Calls all core function groups in sequence
+- Initializes color, formatting, icons, defaults, and standard mode
+**Dependencies**: None
+**Environment Variables Used**: `__FUNCTIONS_LOADED`
+
+**Usage Example**:
+```bash
+# Automatically called when core.func is sourced
+source core.func
+# load_functions() is called automatically
+```
+
+### Color and Formatting Functions
+
+#### `color()`
+**Purpose**: Set ANSI color codes for styled terminal output
+**Parameters**: None
+**Returns**: None
+**Side Effects**: Sets global color variables
+**Dependencies**: None
+**Environment Variables Used**: None
+
+**Sets Variables**:
+- `YW`: Yellow
+- `YWB`: Bright yellow
+- `BL`: Blue
+- `RD`: Red
+- `BGN`: Bright green
+- `GN`: Green
+- `DGN`: Dark green
+- `CL`: Clear/reset
+
+**Usage Example**:
+```bash
+color
+echo -e "${GN}Success message${CL}"
+echo -e "${RD}Error message${CL}"
+```
+
+#### `color_spinner()`
+**Purpose**: Set color codes specifically for spinner output
+**Parameters**: None
+**Returns**: None
+**Side Effects**: Sets spinner-specific color variables
+**Dependencies**: None
+**Environment Variables Used**: None
+
+**Sets Variables**:
+- `CS_YW`: Yellow for spinner
+- `CS_YWB`: Bright yellow for spinner
+- `CS_CL`: Clear for spinner
+
+#### `formatting()`
+**Purpose**: Define formatting helpers for terminal output
+**Parameters**: None
+**Returns**: None
+**Side Effects**: Sets global formatting variables
+**Dependencies**: None
+**Environment Variables Used**: None
+
+**Sets Variables**:
+- `BFR`: Back and forward reset
+- `BOLD`: Bold text
+- `HOLD`: Space character
+- `TAB`: Two spaces
+- `TAB3`: Six spaces
+
+### Icon Functions
+
+#### `icons()`
+**Purpose**: Set symbolic icons used throughout user feedback and prompts
+**Parameters**: None
+**Returns**: None
+**Side Effects**: Sets global icon variables
+**Dependencies**: `formatting()` (for TAB variable)
+**Environment Variables Used**: `TAB`, `CL`
+
+**Sets Variables**:
+- `CM`: Check mark
+- `CROSS`: Cross mark
+- `DNSOK`: DNS success
+- `DNSFAIL`: DNS failure
+- `INFO`: Information icon
+- `OS`: Operating system icon
+- `OSVERSION`: OS version icon
+- `CONTAINERTYPE`: Container type icon
+- `DISKSIZE`: Disk size icon
+- `CPUCORE`: CPU core icon
+- `RAMSIZE`: RAM size icon
+- `SEARCH`: Search icon
+- `VERBOSE_CROPPED`: Verbose mode icon
+- `VERIFYPW`: Password verification icon
+- `CONTAINERID`: Container ID icon
+- `HOSTNAME`: Hostname icon
+- `BRIDGE`: Bridge icon
+- `NETWORK`: Network icon
+- `GATEWAY`: Gateway icon
+- `DISABLEIPV6`: IPv6 disable icon
+- `DEFAULT`: Default settings icon
+- `MACADDRESS`: MAC address icon
+- `VLANTAG`: VLAN tag icon
+- `ROOTSSH`: SSH key icon
+- `CREATING`: Creating icon
+- `ADVANCED`: Advanced settings icon
+- `FUSE`: FUSE icon
+- `HOURGLASS`: Hourglass icon
+
+### Default Variables Functions
+
+#### `default_vars()`
+**Purpose**: Set default retry and wait variables for system actions
+**Parameters**: None
+**Returns**: None
+**Side Effects**: Sets retry configuration variables
+**Dependencies**: None
+**Environment Variables Used**: None
+
+**Sets Variables**:
+- `RETRY_NUM`: Number of retry attempts (default: 10)
+- `RETRY_EVERY`: Seconds between retries (default: 3)
+- `i`: Retry counter initialized to RETRY_NUM
+
+#### `set_std_mode()`
+**Purpose**: Set default verbose mode for script execution
+**Parameters**: None
+**Returns**: None
+**Side Effects**: Sets STD variable based on VERBOSE setting
+**Dependencies**: None
+**Environment Variables Used**: `VERBOSE`
+
+**Sets Variables**:
+- `STD`: "silent" if VERBOSE != "yes", empty string if VERBOSE = "yes"
+
+### Silent Execution Functions
+
+#### `silent()`
+**Purpose**: Execute commands silently with detailed error reporting
+**Parameters**: `$*` - Command and arguments to execute
+**Returns**: None (exits on error)
+**Side Effects**:
+- Executes command with output redirected to log file
+- On error, displays detailed error information
+- Exits with command's exit code
+**Dependencies**: `error_handler.func` (for error explanations)
+**Environment Variables Used**: `SILENT_LOGFILE`
+
+**Usage Example**:
+```bash
+silent apt-get update
+silent apt-get install -y package-name
+```
+
+**Error Handling**:
+- Captures command output to `/tmp/silent.$$.log`
+- Shows error code explanation
+- Displays last 10 lines of log
+- Provides command to view full log
+
+### System Check Functions
+
+#### `shell_check()`
+**Purpose**: Verify that the script is running in Bash shell
+**Parameters**: None
+**Returns**: None (exits if not Bash)
+**Side Effects**:
+- Checks current shell process
+- Exits with error message if not Bash
+**Dependencies**: None
+**Environment Variables Used**: None
+
+**Usage Example**:
+```bash
+shell_check
+# Script continues if Bash, exits if not
+```
+
+#### `root_check()`
+**Purpose**: Ensure script is running as root user
+**Parameters**: None
+**Returns**: None (exits if not root)
+**Side Effects**:
+- Checks user ID and parent process
+- Exits with error message if not root
+**Dependencies**: None
+**Environment Variables Used**: None
+
+**Usage Example**:
+```bash
+root_check
+# Script continues if root, exits if not
+```
+
+#### `pve_check()`
+**Purpose**: Verify Proxmox VE version compatibility
+**Parameters**: None
+**Returns**: None (exits if unsupported version)
+**Side Effects**:
+- Checks PVE version using pveversion command
+- Exits with error message if unsupported
+**Dependencies**: `pveversion` command
+**Environment Variables Used**: None
+
+**Supported Versions**:
+- Proxmox VE 8.0 - 8.9
+- Proxmox VE 9.0 (only)
+
+**Usage Example**:
+```bash
+pve_check
+# Script continues if supported version, exits if not
+```
+
+#### `arch_check()`
+**Purpose**: Verify system architecture is AMD64
+**Parameters**: None
+**Returns**: None (exits if not AMD64)
+**Side Effects**:
+- Checks system architecture
+- Exits with PiMox warning if not AMD64
+**Dependencies**: `dpkg` command
+**Environment Variables Used**: None
+
+**Usage Example**:
+```bash
+arch_check
+# Script continues if AMD64, exits if not
+```
+
+#### `ssh_check()`
+**Purpose**: Detect and warn about external SSH usage
+**Parameters**: None
+**Returns**: None
+**Side Effects**:
+- Checks SSH_CLIENT environment variable
+- Warns if connecting from external IP
+- Allows local connections (127.0.0.1 or host IP)
+**Dependencies**: None
+**Environment Variables Used**: `SSH_CLIENT`
+
+**Usage Example**:
+```bash
+ssh_check
+# Shows warning if external SSH, continues anyway
+```
+
+### Header Management Functions
+
+#### `get_header()`
+**Purpose**: Download and cache application header files
+**Parameters**: None (uses APP and APP_TYPE variables)
+**Returns**: Header content on success, empty on failure
+**Side Effects**:
+- Downloads header from remote URL
+- Caches header locally
+- Creates directory structure if needed
+**Dependencies**: `curl` command
+**Environment Variables Used**: `APP`, `APP_TYPE`
+
+**Usage Example**:
+```bash
+export APP="plex"
+export APP_TYPE="ct"
+header_content=$(get_header)
+```
+
+#### `header_info()`
+**Purpose**: Display application header information
+**Parameters**: None (uses APP variable)
+**Returns**: None
+**Side Effects**:
+- Clears screen
+- Displays header content
+- Gets terminal width for formatting
+**Dependencies**: `get_header()`, `tput` command
+**Environment Variables Used**: `APP`
+
+**Usage Example**:
+```bash
+export APP="plex"
+header_info
+# Displays Plex header information
+```
+
+### Utility Functions
+
+#### `ensure_tput()`
+**Purpose**: Ensure tput command is available for terminal control
+**Parameters**: None
+**Returns**: None
+**Side Effects**:
+- Installs ncurses package if tput missing
+- Works on Alpine and Debian-based systems
+**Dependencies**: `apk` or `apt-get` package managers
+**Environment Variables Used**: None
+
+**Usage Example**:
+```bash
+ensure_tput
+# Installs ncurses if needed, continues if already available
+```
+
+#### `is_alpine()`
+**Purpose**: Detect if running on Alpine Linux
+**Parameters**: None
+**Returns**: 0 if Alpine, 1 if not Alpine
+**Side Effects**: None
+**Dependencies**: None
+**Environment Variables Used**: `var_os`, `PCT_OSTYPE`
+
+**Usage Example**:
+```bash
+if is_alpine; then
+ echo "Running on Alpine Linux"
+else
+ echo "Not running on Alpine Linux"
+fi
+```
+
+#### `is_verbose_mode()`
+**Purpose**: Check if verbose mode is enabled
+**Parameters**: None
+**Returns**: 0 if verbose mode, 1 if not verbose
+**Side Effects**: None
+**Dependencies**: None
+**Environment Variables Used**: `VERBOSE`, `var_verbose`
+
+**Usage Example**:
+```bash
+if is_verbose_mode; then
+ echo "Verbose mode enabled"
+else
+ echo "Verbose mode disabled"
+fi
+```
+
+#### `fatal()`
+**Purpose**: Display fatal error and terminate script
+**Parameters**: `$1` - Error message
+**Returns**: None (terminates script)
+**Side Effects**:
+- Displays error message
+- Sends INT signal to current process
+**Dependencies**: `msg_error()`
+**Environment Variables Used**: None
+
+**Usage Example**:
+```bash
+fatal "Critical error occurred"
+# Script terminates after displaying error
+```
+
+### Spinner Functions
+
+#### `spinner()`
+**Purpose**: Display animated spinner for progress indication
+**Parameters**: None (uses SPINNER_MSG variable)
+**Returns**: None (runs indefinitely)
+**Side Effects**:
+- Displays rotating spinner characters
+- Uses terminal control sequences
+**Dependencies**: `color_spinner()`
+**Environment Variables Used**: `SPINNER_MSG`
+
+**Usage Example**:
+```bash
+SPINNER_MSG="Processing..."
+spinner &
+SPINNER_PID=$!
+# Spinner runs in background
+```
+
+#### `clear_line()`
+**Purpose**: Clear current terminal line
+**Parameters**: None
+**Returns**: None
+**Side Effects**: Clears current line using terminal control
+**Dependencies**: `tput` command
+**Environment Variables Used**: None
+
+#### `stop_spinner()`
+**Purpose**: Stop running spinner and cleanup
+**Parameters**: None
+**Returns**: None
+**Side Effects**:
+- Kills spinner process
+- Removes PID file
+- Resets terminal settings
+- Unsets spinner variables
+**Dependencies**: None
+**Environment Variables Used**: `SPINNER_PID`, `SPINNER_MSG`
+
+**Usage Example**:
+```bash
+stop_spinner
+# Stops spinner and cleans up
+```
+
+### Message Functions
+
+#### `msg_info()`
+**Purpose**: Display informational message with spinner
+**Parameters**: `$1` - Message text
+**Returns**: None
+**Side Effects**:
+- Starts spinner if not in verbose mode
+- Tracks shown messages to prevent duplicates
+- Displays message with hourglass icon in verbose mode
+**Dependencies**: `spinner()`, `is_verbose_mode()`, `is_alpine()`
+**Environment Variables Used**: `MSG_INFO_SHOWN`
+
+**Usage Example**:
+```bash
+msg_info "Installing package..."
+# Shows spinner with message
+```
+
+#### `msg_ok()`
+**Purpose**: Display success message
+**Parameters**: `$1` - Success message text
+**Returns**: None
+**Side Effects**:
+- Stops spinner
+- Displays green checkmark with message
+- Removes message from shown tracking
+**Dependencies**: `stop_spinner()`
+**Environment Variables Used**: `MSG_INFO_SHOWN`
+
+**Usage Example**:
+```bash
+msg_ok "Package installed successfully"
+# Shows green checkmark with message
+```
+
+#### `msg_error()`
+**Purpose**: Display error message
+**Parameters**: `$1` - Error message text
+**Returns**: None
+**Side Effects**:
+- Stops spinner
+- Displays red cross with message
+**Dependencies**: `stop_spinner()`
+**Environment Variables Used**: None
+
+**Usage Example**:
+```bash
+msg_error "Installation failed"
+# Shows red cross with message
+```
+
+#### `msg_warn()`
+**Purpose**: Display warning message
+**Parameters**: `$1` - Warning message text
+**Returns**: None
+**Side Effects**:
+- Stops spinner
+- Displays yellow info icon with message
+**Dependencies**: `stop_spinner()`
+**Environment Variables Used**: None
+
+**Usage Example**:
+```bash
+msg_warn "This operation may take some time"
+# Shows yellow info icon with message
+```
+
+#### `msg_custom()`
+**Purpose**: Display custom message with specified symbol and color
+**Parameters**:
+- `$1` - Custom symbol (default: "[*]")
+- `$2` - Color code (default: "\e[36m")
+- `$3` - Message text
+**Returns**: None
+**Side Effects**:
+- Stops spinner
+- Displays custom formatted message
+**Dependencies**: `stop_spinner()`
+**Environment Variables Used**: None
+
+**Usage Example**:
+```bash
+msg_custom "⚡" "\e[33m" "Custom warning message"
+# Shows custom symbol and color with message
+```
+
+#### `msg_debug()`
+**Purpose**: Display debug message if debug mode enabled
+**Parameters**: `$*` - Debug message text
+**Returns**: None
+**Side Effects**:
+- Only displays if var_full_verbose is set
+- Shows timestamp and debug prefix
+**Dependencies**: None
+**Environment Variables Used**: `var_full_verbose`, `var_verbose`
+
+**Usage Example**:
+```bash
+export var_full_verbose=1
+msg_debug "Debug information here"
+# Shows debug message with timestamp
+```
+
+### System Management Functions
+
+#### `check_or_create_swap()`
+**Purpose**: Check for active swap and optionally create swap file
+**Parameters**: None
+**Returns**: 0 if swap exists or created, 1 if skipped
+**Side Effects**:
+- Checks for active swap
+- Prompts user to create swap if none found
+- Creates swap file if user confirms
+**Dependencies**: `swapon`, `dd`, `mkswap` commands
+**Environment Variables Used**: None
+
+**Usage Example**:
+```bash
+if check_or_create_swap; then
+ echo "Swap is available"
+else
+ echo "No swap available"
+fi
+```
+
+## Function Call Hierarchy
+
+### Initialization Flow
+```
+load_functions()
+├── color()
+├── formatting()
+├── icons()
+├── default_vars()
+└── set_std_mode()
+```
+
+### Message System Flow
+```
+msg_info()
+├── is_verbose_mode()
+├── is_alpine()
+├── spinner()
+└── color_spinner()
+
+msg_ok()
+├── stop_spinner()
+└── clear_line()
+
+msg_error()
+└── stop_spinner()
+
+msg_warn()
+└── stop_spinner()
+```
+
+### System Check Flow
+```
+pve_check()
+├── pveversion command
+└── version parsing
+
+arch_check()
+├── dpkg command
+└── architecture check
+
+shell_check()
+├── ps command
+└── shell detection
+
+root_check()
+├── id command
+└── parent process check
+```
+
+### Silent Execution Flow
+```
+silent()
+├── Command execution
+├── Output redirection
+├── Error handling
+├── error_handler.func loading
+└── Log management
+```
+
+## Error Handling Patterns
+
+### System Check Errors
+- All system check functions exit with appropriate error messages
+- Clear indication of what's wrong and how to fix it
+- Graceful exit with sleep delay for user to read message
+
+### Silent Execution Errors
+- Commands executed via `silent()` capture output to log file
+- On failure, displays error code explanation
+- Shows last 10 lines of log output
+- Provides command to view full log
+
+### Spinner Errors
+- Spinner functions handle process cleanup on exit
+- Trap handlers ensure spinners are stopped
+- Terminal settings are restored on error
+
+## Environment Variable Dependencies
+
+### Required Variables
+- `APP`: Application name for header display
+- `APP_TYPE`: Application type (ct/vm) for header paths
+- `VERBOSE`: Verbose mode setting
+
+### Optional Variables
+- `var_os`: OS type for Alpine detection
+- `PCT_OSTYPE`: Alternative OS type variable
+- `var_verbose`: Alternative verbose setting
+- `var_full_verbose`: Debug mode setting
+
+### Internal Variables
+- `_CORE_FUNC_LOADED`: Prevents multiple loading
+- `__FUNCTIONS_LOADED`: Prevents multiple function loading
+- `SILENT_LOGFILE`: Silent execution log file path
+- `SPINNER_PID`: Spinner process ID
+- `SPINNER_MSG`: Spinner message text
+- `MSG_INFO_SHOWN`: Tracks shown info messages
diff --git a/docs/misc/core.func/CORE_INTEGRATION.md b/docs/misc/core.func/CORE_INTEGRATION.md
new file mode 100644
index 000000000..b203f1c73
--- /dev/null
+++ b/docs/misc/core.func/CORE_INTEGRATION.md
@@ -0,0 +1,517 @@
+# core.func Integration Guide
+
+## Overview
+
+This document describes how `core.func` integrates with other components in the Proxmox Community Scripts project, including dependencies, data flow, and API surface.
+
+## Dependencies
+
+### External Dependencies
+
+#### Required Commands
+- **`pveversion`**: Proxmox VE version checking
+- **`dpkg`**: Architecture detection
+- **`ps`**: Process and shell detection
+- **`id`**: User ID checking
+- **`curl`**: Header file downloading
+- **`swapon`**: Swap status checking
+- **`dd`**: Swap file creation
+- **`mkswap`**: Swap file formatting
+
+#### Optional Commands
+- **`tput`**: Terminal control (installed if missing)
+- **`apk`**: Alpine package manager
+- **`apt-get`**: Debian package manager
+
+### Internal Dependencies
+
+#### error_handler.func
+- **Purpose**: Provides error code explanations for silent execution
+- **Usage**: Automatically loaded when `silent()` encounters errors
+- **Integration**: Called via `explain_exit_code()` function
+- **Data Flow**: Error code → explanation → user display
+
+## Integration Points
+
+### With build.func
+
+#### System Validation
+```bash
+# build.func uses core.func for system checks
+source core.func
+pve_check
+arch_check
+shell_check
+root_check
+```
+
+#### User Interface
+```bash
+# build.func uses core.func for UI elements
+msg_info "Creating container..."
+msg_ok "Container created successfully"
+msg_error "Container creation failed"
+```
+
+#### Silent Execution
+```bash
+# build.func uses core.func for command execution
+silent pct create "$CTID" "$TEMPLATE" \
+ --hostname "$HOSTNAME" \
+ --memory "$MEMORY" \
+ --cores "$CORES"
+```
+
+### With tools.func
+
+#### Utility Functions
+```bash
+# tools.func uses core.func utilities
+source core.func
+
+# System checks
+pve_check
+root_check
+
+# UI elements
+msg_info "Running maintenance tasks..."
+msg_ok "Maintenance completed"
+```
+
+#### Error Handling
+```bash
+# tools.func uses core.func for error handling
+if silent systemctl restart service; then
+ msg_ok "Service restarted"
+else
+ msg_error "Service restart failed"
+fi
+```
+
+### With api.func
+
+#### System Validation
+```bash
+# api.func uses core.func for system checks
+source core.func
+pve_check
+root_check
+```
+
+#### API Operations
+```bash
+# api.func uses core.func for API calls
+msg_info "Connecting to Proxmox API..."
+if silent curl -k -H "Authorization: PVEAPIToken=$API_TOKEN" \
+ "$API_URL/api2/json/nodes/$NODE/lxc"; then
+ msg_ok "API connection successful"
+else
+ msg_error "API connection failed"
+fi
+```
+
+### With error_handler.func
+
+#### Error Explanations
+```bash
+# error_handler.func provides explanations for core.func
+explain_exit_code() {
+ local code="$1"
+ case "$code" in
+ 1) echo "General error" ;;
+ 2) echo "Misuse of shell builtins" ;;
+ 126) echo "Command invoked cannot execute" ;;
+ 127) echo "Command not found" ;;
+ 128) echo "Invalid argument to exit" ;;
+ *) echo "Unknown error code" ;;
+ esac
+}
+```
+
+### With install.func
+
+#### Installation Process
+```bash
+# install.func uses core.func for installation
+source core.func
+
+# System checks
+pve_check
+root_check
+
+# Installation steps
+msg_info "Installing packages..."
+silent apt-get update
+silent apt-get install -y package
+
+msg_ok "Installation completed"
+```
+
+### With alpine-install.func
+
+#### Alpine-Specific Operations
+```bash
+# alpine-install.func uses core.func for Alpine operations
+source core.func
+
+# Alpine detection
+if is_alpine; then
+ msg_info "Detected Alpine Linux"
+ silent apk add --no-cache package
+else
+ msg_info "Detected Debian-based system"
+ silent apt-get install -y package
+fi
+```
+
+### With alpine-tools.func
+
+#### Alpine Utilities
+```bash
+# alpine-tools.func uses core.func for Alpine tools
+source core.func
+
+# Alpine-specific operations
+if is_alpine; then
+ msg_info "Running Alpine-specific operations..."
+ # Alpine tools logic
+ msg_ok "Alpine operations completed"
+fi
+```
+
+### With passthrough.func
+
+#### Hardware Passthrough
+```bash
+# passthrough.func uses core.func for hardware operations
+source core.func
+
+# System checks
+pve_check
+root_check
+
+# Hardware operations
+msg_info "Configuring GPU passthrough..."
+if silent lspci | grep -i nvidia; then
+ msg_ok "NVIDIA GPU detected"
+else
+ msg_warn "No NVIDIA GPU found"
+fi
+```
+
+### With vm-core.func
+
+#### VM Operations
+```bash
+# vm-core.func uses core.func for VM management
+source core.func
+
+# System checks
+pve_check
+root_check
+
+# VM operations
+msg_info "Creating virtual machine..."
+silent qm create "$VMID" \
+ --name "$VMNAME" \
+ --memory "$MEMORY" \
+ --cores "$CORES"
+
+msg_ok "Virtual machine created"
+```
+
+## Data Flow
+
+### Input Data
+
+#### Environment Variables
+- **`APP`**: Application name for header display
+- **`APP_TYPE`**: Application type (ct/vm) for header paths
+- **`VERBOSE`**: Verbose mode setting
+- **`var_os`**: OS type for Alpine detection
+- **`PCT_OSTYPE`**: Alternative OS type variable
+- **`var_verbose`**: Alternative verbose setting
+- **`var_full_verbose`**: Debug mode setting
+
+#### Command Parameters
+- **Function arguments**: Passed to individual functions
+- **Command arguments**: Passed to `silent()` function
+- **User input**: Collected via `read` commands
+
+### Processing Data
+
+#### System Information
+- **Proxmox version**: Parsed from `pveversion` output
+- **Architecture**: Retrieved from `dpkg --print-architecture`
+- **Shell type**: Detected from process information
+- **User ID**: Retrieved from `id -u`
+- **SSH connection**: Detected from `SSH_CLIENT` environment
+
+#### UI State
+- **Message tracking**: `MSG_INFO_SHOWN` associative array
+- **Spinner state**: `SPINNER_PID` and `SPINNER_MSG` variables
+- **Terminal state**: Cursor position and display mode
+
+#### Error Information
+- **Exit codes**: Captured from command execution
+- **Log output**: Redirected to temporary log files
+- **Error explanations**: Retrieved from error_handler.func
+
+### Output Data
+
+#### User Interface
+- **Colored messages**: ANSI color codes for terminal output
+- **Icons**: Symbolic representations for different message types
+- **Spinners**: Animated progress indicators
+- **Formatted text**: Consistent message formatting
+
+#### System State
+- **Exit codes**: Returned from functions
+- **Log files**: Created for silent execution
+- **Configuration**: Modified system settings
+- **Process state**: Spinner processes and cleanup
+
+## API Surface
+
+### Public Functions
+
+#### System Validation
+- **`pve_check()`**: Proxmox VE version validation
+- **`arch_check()`**: Architecture validation
+- **`shell_check()`**: Shell validation
+- **`root_check()`**: Privilege validation
+- **`ssh_check()`**: SSH connection warning
+
+#### User Interface
+- **`msg_info()`**: Informational messages
+- **`msg_ok()`**: Success messages
+- **`msg_error()`**: Error messages
+- **`msg_warn()`**: Warning messages
+- **`msg_custom()`**: Custom messages
+- **`msg_debug()`**: Debug messages
+
+#### Spinner Control
+- **`spinner()`**: Start spinner animation
+- **`stop_spinner()`**: Stop spinner and cleanup
+- **`clear_line()`**: Clear current terminal line
+
+#### Silent Execution
+- **`silent()`**: Execute commands with error handling
+
+#### Utility Functions
+- **`is_alpine()`**: Alpine Linux detection
+- **`is_verbose_mode()`**: Verbose mode detection
+- **`fatal()`**: Fatal error handling
+- **`ensure_tput()`**: Terminal control setup
+
+#### Header Management
+- **`get_header()`**: Download application headers
+- **`header_info()`**: Display header information
+
+#### System Management
+- **`check_or_create_swap()`**: Swap file management
+
+### Internal Functions
+
+#### Initialization
+- **`load_functions()`**: Function loader
+- **`color()`**: Color setup
+- **`formatting()`**: Formatting setup
+- **`icons()`**: Icon setup
+- **`default_vars()`**: Default variables
+- **`set_std_mode()`**: Standard mode setup
+
+#### Color Management
+- **`color_spinner()`**: Spinner colors
+
+### Global Variables
+
+#### Color Variables
+- **`YW`**, **`YWB`**, **`BL`**, **`RD`**, **`BGN`**, **`GN`**, **`DGN`**, **`CL`**: Color codes
+- **`CS_YW`**, **`CS_YWB`**, **`CS_CL`**: Spinner colors
+
+#### Formatting Variables
+- **`BFR`**, **`BOLD`**, **`HOLD`**, **`TAB`**, **`TAB3`**: Formatting helpers
+
+#### Icon Variables
+- **`CM`**, **`CROSS`**, **`INFO`**, **`OS`**, **`OSVERSION`**, etc.: Message icons
+
+#### Configuration Variables
+- **`RETRY_NUM`**, **`RETRY_EVERY`**: Retry settings
+- **`STD`**: Standard mode setting
+- **`SILENT_LOGFILE`**: Log file path
+
+#### State Variables
+- **`_CORE_FUNC_LOADED`**: Loading prevention
+- **`__FUNCTIONS_LOADED`**: Function loading prevention
+- **`SPINNER_PID`**, **`SPINNER_MSG`**: Spinner state
+- **`MSG_INFO_SHOWN`**: Message tracking
+
+## Integration Patterns
+
+### Standard Integration Pattern
+
+```bash
+#!/usr/bin/env bash
+# Standard integration pattern
+
+# 1. Source core.func first
+source core.func
+
+# 2. Run system checks
+pve_check
+arch_check
+shell_check
+root_check
+
+# 3. Set up error handling
+trap 'stop_spinner' EXIT INT TERM
+
+# 4. Use UI functions
+msg_info "Starting operation..."
+
+# 5. Use silent execution
+silent command
+
+# 6. Show completion
+msg_ok "Operation completed"
+```
+
+### Minimal Integration Pattern
+
+```bash
+#!/usr/bin/env bash
+# Minimal integration pattern
+
+source core.func
+pve_check
+root_check
+
+msg_info "Running operation..."
+silent command
+msg_ok "Operation completed"
+```
+
+### Advanced Integration Pattern
+
+```bash
+#!/usr/bin/env bash
+# Advanced integration pattern
+
+source core.func
+
+# System validation
+pve_check
+arch_check
+shell_check
+root_check
+ssh_check
+
+# Error handling
+trap 'stop_spinner' EXIT INT TERM
+
+# Verbose mode handling
+if is_verbose_mode; then
+ msg_info "Verbose mode enabled"
+fi
+
+# OS-specific operations
+if is_alpine; then
+ msg_info "Alpine Linux detected"
+ # Alpine-specific logic
+else
+ msg_info "Debian-based system detected"
+ # Debian-specific logic
+fi
+
+# Operation execution
+msg_info "Starting operation..."
+if silent command; then
+ msg_ok "Operation succeeded"
+else
+ msg_error "Operation failed"
+ exit 1
+fi
+```
+
+## Error Handling Integration
+
+### Silent Execution Error Flow
+
+```
+silent() command
+├── Execute command
+├── Capture output to log
+├── Check exit code
+├── If error:
+│ ├── Load error_handler.func
+│ ├── Get error explanation
+│ ├── Display error details
+│ ├── Show log excerpt
+│ └── Exit with error code
+└── If success: Continue
+```
+
+### System Check Error Flow
+
+```
+System Check Function
+├── Check system state
+├── If valid: Return 0
+└── If invalid:
+ ├── Display error message
+ ├── Show fix instructions
+ ├── Sleep for user to read
+ └── Exit with error code
+```
+
+## Performance Considerations
+
+### Loading Optimization
+- **Single Loading**: `_CORE_FUNC_LOADED` prevents multiple loading
+- **Function Loading**: `__FUNCTIONS_LOADED` prevents multiple function loading
+- **Lazy Loading**: Functions loaded only when needed
+
+### Memory Usage
+- **Minimal Footprint**: Core functions use minimal memory
+- **Variable Reuse**: Global variables reused across functions
+- **Cleanup**: Spinner processes cleaned up on exit
+
+### Execution Speed
+- **Fast Checks**: System checks are optimized for speed
+- **Efficient Spinners**: Spinner animation uses minimal CPU
+- **Quick Messages**: Message functions optimized for performance
+
+## Security Considerations
+
+### Privilege Escalation
+- **Root Check**: Ensures script runs with sufficient privileges
+- **Shell Check**: Validates shell environment
+- **Process Validation**: Checks parent process for sudo usage
+
+### Input Validation
+- **Parameter Checking**: Functions validate input parameters
+- **Error Handling**: Proper error handling prevents crashes
+- **Safe Execution**: Silent execution with proper error handling
+
+### System Protection
+- **Version Validation**: Ensures compatible Proxmox version
+- **Architecture Check**: Prevents execution on unsupported systems
+- **SSH Warning**: Warns about external SSH usage
+
+## Future Integration Considerations
+
+### Extensibility
+- **Function Groups**: Easy to add new function groups
+- **Message Types**: Easy to add new message types
+- **System Checks**: Easy to add new system checks
+
+### Compatibility
+- **Version Support**: Easy to add new Proxmox versions
+- **OS Support**: Easy to add new operating systems
+- **Architecture Support**: Easy to add new architectures
+
+### Performance
+- **Optimization**: Functions can be optimized for better performance
+- **Caching**: Results can be cached for repeated operations
+- **Parallelization**: Operations can be parallelized where appropriate
diff --git a/docs/misc/core.func/CORE_USAGE_EXAMPLES.md b/docs/misc/core.func/CORE_USAGE_EXAMPLES.md
new file mode 100644
index 000000000..c702bd2ed
--- /dev/null
+++ b/docs/misc/core.func/CORE_USAGE_EXAMPLES.md
@@ -0,0 +1,728 @@
+# core.func Usage Examples
+
+## Overview
+
+This document provides practical usage examples for `core.func` functions, covering common scenarios, integration patterns, and best practices.
+
+## Basic Script Setup
+
+### Standard Script Initialization
+
+```bash
+#!/usr/bin/env bash
+# Standard script setup using core.func
+
+# Source core functions
+source core.func
+
+# Run system checks
+pve_check
+arch_check
+shell_check
+root_check
+
+# Optional: Check SSH connection
+ssh_check
+
+# Set up error handling
+trap 'stop_spinner' EXIT INT TERM
+
+# Your script logic here
+msg_info "Starting script execution"
+# ... script code ...
+msg_ok "Script completed successfully"
+```
+
+### Minimal Script Setup
+
+```bash
+#!/usr/bin/env bash
+# Minimal setup for simple scripts
+
+source core.func
+
+# Basic checks only
+pve_check
+root_check
+
+# Simple execution
+msg_info "Running operation"
+# ... your code ...
+msg_ok "Operation completed"
+```
+
+## Message Display Examples
+
+### Progress Indication
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Show progress with spinner
+msg_info "Downloading package..."
+sleep 2
+msg_ok "Download completed"
+
+msg_info "Installing package..."
+sleep 3
+msg_ok "Installation completed"
+
+msg_info "Configuring service..."
+sleep 1
+msg_ok "Configuration completed"
+```
+
+### Error Handling
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Function with error handling
+install_package() {
+ local package="$1"
+
+ msg_info "Installing $package..."
+
+ if silent apt-get install -y "$package"; then
+ msg_ok "$package installed successfully"
+ return 0
+ else
+ msg_error "Failed to install $package"
+ return 1
+ fi
+}
+
+# Usage
+if install_package "nginx"; then
+ msg_ok "Nginx installation completed"
+else
+ msg_error "Nginx installation failed"
+ exit 1
+fi
+```
+
+### Warning Messages
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Show warnings for potentially dangerous operations
+msg_warn "This will modify system configuration"
+read -p "Continue? [y/N]: " confirm
+
+if [[ "$confirm" =~ ^[yY]$ ]]; then
+ msg_info "Proceeding with modification..."
+ # ... dangerous operation ...
+ msg_ok "Modification completed"
+else
+ msg_info "Operation cancelled"
+fi
+```
+
+### Custom Messages
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Custom message with specific icon and color
+msg_custom "🚀" "\e[32m" "Launching application"
+msg_custom "⚡" "\e[33m" "High performance mode enabled"
+msg_custom "🔒" "\e[31m" "Security mode activated"
+```
+
+### Debug Messages
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Enable debug mode
+export var_full_verbose=1
+
+# Debug messages
+msg_debug "Variable value: $some_variable"
+msg_debug "Function called: $FUNCNAME"
+msg_debug "Current directory: $(pwd)"
+```
+
+## Silent Execution Examples
+
+### Package Management
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Update package lists
+msg_info "Updating package lists..."
+silent apt-get update
+
+# Install packages
+msg_info "Installing required packages..."
+silent apt-get install -y curl wget git
+
+# Upgrade packages
+msg_info "Upgrading packages..."
+silent apt-get upgrade -y
+
+msg_ok "Package management completed"
+```
+
+### File Operations
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Create directories
+msg_info "Creating directory structure..."
+silent mkdir -p /opt/myapp/{config,logs,data}
+
+# Set permissions
+msg_info "Setting permissions..."
+silent chmod 755 /opt/myapp
+silent chmod 644 /opt/myapp/config/*
+
+# Copy files
+msg_info "Copying configuration files..."
+silent cp config/* /opt/myapp/config/
+
+msg_ok "File operations completed"
+```
+
+### Service Management
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Start service
+msg_info "Starting service..."
+silent systemctl start myservice
+
+# Enable service
+msg_info "Enabling service..."
+silent systemctl enable myservice
+
+# Check service status
+msg_info "Checking service status..."
+if silent systemctl is-active --quiet myservice; then
+ msg_ok "Service is running"
+else
+ msg_error "Service failed to start"
+fi
+```
+
+### Network Operations
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Test network connectivity
+msg_info "Testing network connectivity..."
+if silent ping -c 1 8.8.8.8; then
+ msg_ok "Network connectivity confirmed"
+else
+ msg_error "Network connectivity failed"
+fi
+
+# Download files
+msg_info "Downloading configuration..."
+silent curl -fsSL https://example.com/config -o /tmp/config
+
+# Extract archives
+msg_info "Extracting archive..."
+silent tar -xzf /tmp/archive.tar.gz -C /opt/
+```
+
+## System Check Examples
+
+### Comprehensive System Validation
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Complete system validation
+validate_system() {
+ msg_info "Validating system requirements..."
+
+ # Check Proxmox version
+ if pve_check; then
+ msg_ok "Proxmox VE version is supported"
+ fi
+
+ # Check architecture
+ if arch_check; then
+ msg_ok "System architecture is supported"
+ fi
+
+ # Check shell
+ if shell_check; then
+ msg_ok "Shell environment is correct"
+ fi
+
+ # Check privileges
+ if root_check; then
+ msg_ok "Running with sufficient privileges"
+ fi
+
+ # Check SSH connection
+ ssh_check
+
+ msg_ok "System validation completed"
+}
+
+# Run validation
+validate_system
+```
+
+### Conditional System Checks
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Check if running in container
+if [[ -f /.dockerenv ]] || [[ -f /run/.containerenv ]]; then
+ msg_warn "Running inside container"
+ # Skip some checks
+else
+ # Full system checks
+ pve_check
+ arch_check
+fi
+
+# Always check shell and privileges
+shell_check
+root_check
+```
+
+## Header Management Examples
+
+### Application Header Display
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Set application information
+export APP="plex"
+export APP_TYPE="ct"
+
+# Display header
+header_info
+
+# Continue with application setup
+msg_info "Setting up Plex Media Server..."
+```
+
+### Custom Header Handling
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Get header content
+export APP="nextcloud"
+export APP_TYPE="ct"
+
+header_content=$(get_header)
+if [[ -n "$header_content" ]]; then
+ echo "Header found:"
+ echo "$header_content"
+else
+ msg_warn "No header found for $APP"
+fi
+```
+
+## Swap Management Examples
+
+### Interactive Swap Creation
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Check and create swap
+if check_or_create_swap; then
+ msg_ok "Swap is available"
+else
+ msg_warn "No swap available - continuing without swap"
+fi
+```
+
+### Automated Swap Check
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Check swap without prompting
+check_swap_quiet() {
+ if swapon --noheadings --show | grep -q 'swap'; then
+ msg_ok "Swap is active"
+ return 0
+ else
+ msg_warn "No active swap detected"
+ return 1
+ fi
+}
+
+if check_swap_quiet; then
+ msg_info "System has sufficient swap"
+else
+ msg_warn "Consider adding swap for better performance"
+fi
+```
+
+## Spinner Usage Examples
+
+### Long-Running Operations
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Long-running operation with spinner
+long_operation() {
+ msg_info "Processing large dataset..."
+
+ # Simulate long operation
+ for i in {1..100}; do
+ sleep 0.1
+ # Update spinner message periodically
+ if (( i % 20 == 0 )); then
+ SPINNER_MSG="Processing... $i%"
+ fi
+ done
+
+ msg_ok "Dataset processing completed"
+}
+
+long_operation
+```
+
+### Background Operations
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Background operation with spinner
+background_operation() {
+ msg_info "Starting background process..."
+
+ # Start spinner
+ SPINNER_MSG="Processing in background..."
+ spinner &
+ SPINNER_PID=$!
+
+ # Do background work
+ sleep 5
+
+ # Stop spinner
+ stop_spinner
+ msg_ok "Background process completed"
+}
+
+background_operation
+```
+
+## Integration Examples
+
+### With build.func
+
+```bash
+#!/usr/bin/env bash
+# Integration with build.func
+
+source core.func
+source build.func
+
+# Use core functions for system validation
+pve_check
+arch_check
+root_check
+
+# Use build.func for container creation
+export APP="plex"
+export CTID="100"
+# ... container creation ...
+```
+
+### With tools.func
+
+```bash
+#!/usr/bin/env bash
+# Integration with tools.func
+
+source core.func
+source tools.func
+
+# Use core functions for UI
+msg_info "Starting maintenance tasks..."
+
+# Use tools.func for maintenance
+update_system
+cleanup_logs
+optimize_storage
+
+msg_ok "Maintenance completed"
+```
+
+### With error_handler.func
+
+```bash
+#!/usr/bin/env bash
+# Integration with error_handler.func
+
+source core.func
+source error_handler.func
+
+# Use core functions for execution
+msg_info "Running operation..."
+
+# Silent execution will use error_handler for explanations
+silent apt-get install -y package
+
+msg_ok "Operation completed"
+```
+
+## Best Practices Examples
+
+### Error Handling Pattern
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Robust error handling
+run_with_error_handling() {
+ local operation="$1"
+ local description="$2"
+
+ msg_info "$description"
+
+ if silent "$operation"; then
+ msg_ok "$description completed successfully"
+ return 0
+ else
+ msg_error "$description failed"
+ return 1
+ fi
+}
+
+# Usage
+run_with_error_handling "apt-get update" "Package list update"
+run_with_error_handling "apt-get install -y nginx" "Nginx installation"
+```
+
+### Verbose Mode Handling
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Handle verbose mode
+if is_verbose_mode; then
+ msg_info "Verbose mode enabled - showing detailed output"
+ # Show more information
+else
+ msg_info "Normal mode - showing minimal output"
+ # Show less information
+fi
+```
+
+### Alpine Linux Detection
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Handle different OS types
+if is_alpine; then
+ msg_info "Detected Alpine Linux"
+ # Use Alpine-specific commands
+ silent apk add --no-cache package
+else
+ msg_info "Detected Debian-based system"
+ # Use Debian-specific commands
+ silent apt-get install -y package
+fi
+```
+
+### Conditional Execution
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Conditional execution based on system state
+if [[ -f /etc/nginx/nginx.conf ]]; then
+ msg_warn "Nginx configuration already exists"
+ read -p "Overwrite? [y/N]: " overwrite
+ if [[ "$overwrite" =~ ^[yY]$ ]]; then
+ msg_info "Overwriting configuration..."
+ # ... overwrite logic ...
+ else
+ msg_info "Skipping configuration"
+ fi
+else
+ msg_info "Creating new Nginx configuration..."
+ # ... create logic ...
+fi
+```
+
+## Advanced Usage Examples
+
+### Custom Spinner Messages
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Custom spinner with progress
+download_with_progress() {
+ local url="$1"
+ local file="$2"
+
+ msg_info "Starting download..."
+
+ # Start spinner
+ SPINNER_MSG="Downloading..."
+ spinner &
+ SPINNER_PID=$!
+
+ # Download with progress
+ curl -L "$url" -o "$file" --progress-bar
+
+ # Stop spinner
+ stop_spinner
+ msg_ok "Download completed"
+}
+
+download_with_progress "https://example.com/file.tar.gz" "/tmp/file.tar.gz"
+```
+
+### Message Deduplication
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Messages are automatically deduplicated
+for i in {1..5}; do
+ msg_info "Processing item $i"
+ # This message will only show once
+done
+
+# Different messages will show separately
+msg_info "Starting phase 1"
+msg_info "Starting phase 2"
+msg_info "Starting phase 3"
+```
+
+### Terminal Control
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Ensure terminal control is available
+ensure_tput
+
+# Use terminal control
+clear_line
+echo "This line will be cleared"
+clear_line
+echo "New content"
+```
+
+## Troubleshooting Examples
+
+### Debug Mode
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Enable debug mode
+export var_full_verbose=1
+export VERBOSE="yes"
+
+# Debug information
+msg_debug "Script started"
+msg_debug "Current user: $(whoami)"
+msg_debug "Current directory: $(pwd)"
+msg_debug "Environment variables: $(env | grep -E '^(APP|CTID|VERBOSE)')"
+```
+
+### Silent Execution Debugging
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Debug silent execution
+debug_silent() {
+ local cmd="$1"
+ local log_file="/tmp/debug.$$.log"
+
+ echo "Command: $cmd" > "$log_file"
+ echo "Timestamp: $(date)" >> "$log_file"
+ echo "Working directory: $(pwd)" >> "$log_file"
+ echo "Environment:" >> "$log_file"
+ env >> "$log_file"
+ echo "--- Command Output ---" >> "$log_file"
+
+ if silent "$cmd"; then
+ msg_ok "Command succeeded"
+ else
+ msg_error "Command failed - check $log_file for details"
+ fi
+}
+
+debug_silent "apt-get update"
+```
+
+### Error Recovery
+
+```bash
+#!/usr/bin/env bash
+source core.func
+
+# Error recovery pattern
+retry_operation() {
+ local max_attempts=3
+ local attempt=1
+
+ while [[ $attempt -le $max_attempts ]]; do
+ msg_info "Attempt $attempt of $max_attempts"
+
+ if silent "$@"; then
+ msg_ok "Operation succeeded on attempt $attempt"
+ return 0
+ else
+ msg_warn "Attempt $attempt failed"
+ ((attempt++))
+
+ if [[ $attempt -le $max_attempts ]]; then
+ msg_info "Retrying in 5 seconds..."
+ sleep 5
+ fi
+ fi
+ done
+
+ msg_error "Operation failed after $max_attempts attempts"
+ return 1
+}
+
+# Usage
+retry_operation "apt-get install -y package"
+```
diff --git a/docs/misc/core.func/README.md b/docs/misc/core.func/README.md
new file mode 100644
index 000000000..52c62af6e
--- /dev/null
+++ b/docs/misc/core.func/README.md
@@ -0,0 +1,181 @@
+# core.func Documentation
+
+## Overview
+
+The `core.func` file provides fundamental utility functions and system checks that form the foundation for all other scripts in the Proxmox Community Scripts project. It handles basic system operations, user interface elements, validation, and core infrastructure.
+
+## Purpose and Use Cases
+
+- **System Validation**: Checks for Proxmox VE compatibility, architecture, shell requirements
+- **User Interface**: Provides colored output, icons, spinners, and formatted messages
+- **Core Utilities**: Basic functions used across all scripts
+- **Error Handling**: Silent execution with detailed error reporting
+- **System Information**: OS detection, verbose mode handling, swap management
+
+## Quick Reference
+
+### Key Function Groups
+- **System Checks**: `pve_check()`, `arch_check()`, `shell_check()`, `root_check()`
+- **User Interface**: `msg_info()`, `msg_ok()`, `msg_error()`, `msg_warn()`, `spinner()`
+- **Core Utilities**: `silent()`, `is_alpine()`, `is_verbose_mode()`, `get_header()`
+- **System Management**: `check_or_create_swap()`, `ensure_tput()`
+
+### Dependencies
+- **External**: `curl` for downloading headers, `tput` for terminal control
+- **Internal**: `error_handler.func` for error explanations
+
+### Integration Points
+- Used by: All other `.func` files and installation scripts
+- Uses: `error_handler.func` for error explanations
+- Provides: Core utilities for `build.func`, `tools.func`, `api.func`
+
+## Documentation Files
+
+### 📊 [CORE_FLOWCHART.md](./CORE_FLOWCHART.md)
+Visual execution flows showing how core functions interact and the system validation process.
+
+### 📚 [CORE_FUNCTIONS_REFERENCE.md](./CORE_FUNCTIONS_REFERENCE.md)
+Complete alphabetical reference of all functions with parameters, dependencies, and usage details.
+
+### 💡 [CORE_USAGE_EXAMPLES.md](./CORE_USAGE_EXAMPLES.md)
+Practical examples showing how to use core functions in scripts and common patterns.
+
+### 🔗 [CORE_INTEGRATION.md](./CORE_INTEGRATION.md)
+How core.func integrates with other components and provides foundational services.
+
+## Key Features
+
+### System Validation
+- **Proxmox VE Version Check**: Supports PVE 8.0-8.9 and 9.0
+- **Architecture Check**: Ensures AMD64 architecture (excludes PiMox)
+- **Shell Check**: Validates Bash shell usage
+- **Root Check**: Ensures root privileges
+- **SSH Check**: Warns about external SSH usage
+
+### User Interface
+- **Colored Output**: ANSI color codes for styled terminal output
+- **Icons**: Symbolic icons for different message types
+- **Spinners**: Animated progress indicators
+- **Formatted Messages**: Consistent message formatting across scripts
+
+### Core Utilities
+- **Silent Execution**: Execute commands with detailed error reporting
+- **OS Detection**: Alpine Linux detection
+- **Verbose Mode**: Handle verbose output settings
+- **Header Management**: Download and display application headers
+- **Swap Management**: Check and create swap files
+
+## Common Usage Patterns
+
+### Basic Script Setup
+```bash
+# Source core functions
+source core.func
+
+# Run system checks
+pve_check
+arch_check
+shell_check
+root_check
+```
+
+### Message Display
+```bash
+# Show progress
+msg_info "Installing package..."
+
+# Show success
+msg_ok "Package installed successfully"
+
+# Show error
+msg_error "Installation failed"
+
+# Show warning
+msg_warn "This operation may take some time"
+```
+
+### Silent Command Execution
+```bash
+# Execute command silently with error handling
+silent apt-get update
+silent apt-get install -y package-name
+```
+
+## Environment Variables
+
+### Core Variables
+- `VERBOSE`: Enable verbose output mode
+- `SILENT_LOGFILE`: Path to silent execution log file
+- `APP`: Application name for header display
+- `APP_TYPE`: Application type (ct/vm) for header paths
+
+### Internal Variables
+- `_CORE_FUNC_LOADED`: Prevents multiple loading
+- `__FUNCTIONS_LOADED`: Prevents multiple function loading
+- `RETRY_NUM`: Number of retry attempts (default: 10)
+- `RETRY_EVERY`: Seconds between retries (default: 3)
+
+## Error Handling
+
+### Silent Execution Errors
+- Commands executed via `silent()` capture output to log file
+- On failure, displays error code explanation
+- Shows last 10 lines of log output
+- Provides command to view full log
+
+### System Check Failures
+- Each system check function exits with appropriate error message
+- Clear indication of what's wrong and how to fix it
+- Graceful exit with sleep delay for user to read message
+
+## Best Practices
+
+### Script Initialization
+1. Source `core.func` first
+2. Run system checks early
+3. Set up error handling
+4. Use appropriate message functions
+
+### Message Usage
+1. Use `msg_info()` for progress updates
+2. Use `msg_ok()` for successful completions
+3. Use `msg_error()` for failures
+4. Use `msg_warn()` for warnings
+
+### Silent Execution
+1. Use `silent()` for commands that might fail
+2. Check return codes after silent execution
+3. Provide meaningful error messages
+
+## Troubleshooting
+
+### Common Issues
+1. **Proxmox Version**: Ensure running supported PVE version
+2. **Architecture**: Script only works on AMD64 systems
+3. **Shell**: Must use Bash shell
+4. **Permissions**: Must run as root
+5. **Network**: SSH warnings for external connections
+
+### Debug Mode
+Enable verbose output for debugging:
+```bash
+export VERBOSE="yes"
+source core.func
+```
+
+### Log Files
+Check silent execution logs:
+```bash
+cat /tmp/silent.$$.log
+```
+
+## Related Documentation
+
+- [build.func](../build.func/) - Main container creation script
+- [error_handler.func](../error_handler.func/) - Error handling utilities
+- [tools.func](../tools.func/) - Extended utility functions
+- [api.func](../api.func/) - Proxmox API interactions
+
+---
+
+*This documentation covers the core.func file which provides fundamental utilities for all Proxmox Community Scripts.*
diff --git a/docs/misc/error_handler.func/ERROR_HANDLER_FLOWCHART.md b/docs/misc/error_handler.func/ERROR_HANDLER_FLOWCHART.md
new file mode 100644
index 000000000..984596d7f
--- /dev/null
+++ b/docs/misc/error_handler.func/ERROR_HANDLER_FLOWCHART.md
@@ -0,0 +1,347 @@
+# error_handler.func Execution Flowchart
+
+## Main Error Handling Flow
+
+```
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Error Handler Initialization │
+│ Entry point when error_handler.func is sourced by other scripts │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ CATCH_ERRORS() │
+│ Initialize error handling traps and strict mode │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Trap Setup Sequence │
+│ │
+│ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────────────┐ │
+│ │ Set Strict │ │ Set Error │ │ Set Signal │ │
+│ │ Mode │ │ Trap │ │ Traps │ │
+│ │ │ │ │ │ │ │
+│ │ • -Ee │ │ • ERR trap │ │ • EXIT trap │ │
+│ │ • -o pipefail │ │ • error_handler │ │ • INT trap │ │
+│ │ • -u (if │ │ function │ │ • TERM trap │ │
+│ │ STRICT_UNSET) │ │ │ │ │ │
+│ └─────────────────┘ └─────────────────┘ └─────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+```
+
+## Error Handler Flow
+
+```
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ ERROR_HANDLER() Flow │
+│ Main error handler triggered by ERR trap or manual call │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Error Detection │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ Error Information Collection │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Get Exit │ │ Get Command │ │ Get Line │ │ │
+│ │ │ Code │ │ Information │ │ Number │ │ │
+│ │ │ │ │ │ │ │ │ │
+│ │ │ • From $? or │ │ • From │ │ • From │ │ │
+│ │ │ parameter │ │ BASH_COMMAND │ │ BASH_LINENO[0] │ │ │
+│ │ │ • Store in │ │ • Clean $STD │ │ • Default to │ │ │
+│ │ │ exit_code │ │ references │ │ "unknown" │ │ │
+│ │ │ │ │ • Store in │ │ • Store in │ │ │
+│ │ │ │ │ command │ │ line_number │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Success Check │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ Exit Code Validation │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Check Exit │ │ Success │ │ Error │ │
+│ │ │ Code │ │ Path │ │ Path │ │
+│ │ │ │ │ │ │ │ │
+│ │ │ • If exit_code │ │ • Return 0 │ │ • Continue to │ │
+│ │ │ == 0 │ │ • No error │ │ error handling │ │
+│ │ │ • Success │ │ processing │ │ • Process error │ │
+│ │ │ • No error │ │ │ │ information │ │
+│ │ │ handling │ │ │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Error Processing │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ Error Explanation │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Get Error │ │ Display Error │ │ Log Error │ │ │
+│ │ │ Explanation │ │ Information │ │ Information │ │ │
+│ │ │ │ │ │ │ │ │
+│ │ │ • Call │ │ • Show error │ │ • Write to debug │ │
+│ │ │ explain_exit_ │ │ message │ │ log if enabled │ │
+│ │ │ code() │ │ • Show line │ │ • Include │ │
+│ │ │ • Get human- │ │ number │ │ timestamp │ │
+│ │ │ readable │ │ • Show command │ │ • Include exit │ │
+│ │ │ message │ │ • Show exit │ │ code │ │
+│ │ │ │ │ code │ │ • Include command │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Silent Log Integration │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ Silent Log Display │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Check Silent │ │ Display Log │ │ Exit with │ │
+│ │ │ Log File │ │ Content │ │ Error Code │ │
+│ │ │ │ │ │ │ │ │
+│ │ │ • Check if │ │ • Show last 20 │ │ • Exit with │ │
+│ │ │ SILENT_ │ │ lines │ │ original exit │ │
+│ │ │ LOGFILE set │ │ • Show file │ │ code │ │
+│ │ │ • Check if │ │ path │ │ • Terminate script │ │
+│ │ │ file exists │ │ • Format │ │ execution │ │
+│ │ │ • Check if │ │ output │ │ │ │
+│ │ │ file has │ │ │ │ │ │
+│ │ │ content │ │ │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+```
+
+## Signal Handling Flow
+
+```
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Signal Handler Flow │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ Signal Detection │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ SIGINT │ │ SIGTERM │ │ EXIT │ │ │
+│ │ │ (Ctrl+C) │ │ (Termination) │ │ (Script End) │ │ │
+│ │ │ │ │ │ │ │ │
+│ │ │ • User │ │ • System │ │ • Normal script │ │
+│ │ │ interruption │ │ termination │ │ completion │ │
+│ │ │ • Graceful │ │ • Graceful │ │ • Error exit │ │
+│ │ │ handling │ │ handling │ │ • Signal exit │ │
+│ │ │ • Exit code │ │ • Exit code │ │ • Cleanup │ │
+│ │ │ 130 │ │ 143 │ │ operations │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ ON_INTERRUPT() Flow │
+│ Handles SIGINT (Ctrl+C) signals │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Interrupt Processing │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ User Interruption Handling │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Display │ │ Cleanup │ │ Exit with │ │ │
+│ │ │ Message │ │ Operations │ │ Code 130 │ │ │
+│ │ │ │ │ │ │ │ │
+│ │ │ • Show │ │ • Stop │ │ • Exit with │ │
+│ │ │ interruption │ │ processes │ │ SIGINT code │ │
+│ │ │ message │ │ • Clean up │ │ • Terminate script │ │
+│ │ │ • Use red │ │ temporary │ │ execution │ │
+│ │ │ color │ │ files │ │ │ │
+│ │ │ • Clear │ │ • Remove lock │ │ │ │
+│ │ │ terminal │ │ files │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+```
+
+## Exit Handler Flow
+
+```
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ ON_EXIT() Flow │
+│ Handles script exit cleanup │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Exit Cleanup │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ Cleanup Operations │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Lock File │ │ Temporary │ │ Exit with │ │ │
+│ │ │ Cleanup │ │ File │ │ Original Code │ │ │
+│ │ │ │ │ Cleanup │ │ │ │
+│ │ │ • Check if │ │ • Remove │ │ • Exit with │ │
+│ │ │ lockfile │ │ temporary │ │ original exit │ │
+│ │ │ variable set │ │ files │ │ code │ │
+│ │ │ • Check if │ │ • Clean up │ │ • Preserve exit │ │
+│ │ │ lockfile │ │ process │ │ status │ │
+│ │ │ exists │ │ state │ │ • Terminate │ │
+│ │ │ • Remove │ │ │ │ execution │ │
+│ │ │ lockfile │ │ │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+```
+
+## Error Code Explanation Flow
+
+```
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ EXPLAIN_EXIT_CODE() Flow │
+│ Converts numeric exit codes to human-readable explanations │
+└─────────────────────┬───────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Error Code Classification │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ Error Code Categories │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Generic/ │ │ Package │ │ Node.js │ │ │
+│ │ │ Shell │ │ Manager │ │ Errors │ │ │
+│ │ │ Errors │ │ Errors │ │ │ │
+│ │ │ │ │ │ │ • 243: Out of │ │
+│ │ │ • 1: General │ │ • 100: APT │ │ memory │ │
+│ │ │ error │ │ package │ │ • 245: Invalid │ │
+│ │ │ • 2: Shell │ │ error │ │ option │ │
+│ │ │ builtin │ │ • 101: APT │ │ • 246: Parse │ │
+│ │ │ misuse │ │ config error │ │ error │ │
+│ │ │ • 126: Cannot │ │ • 255: DPKG │ │ • 247: Fatal │ │
+│ │ │ execute │ │ fatal error │ │ error │ │
+│ │ │ • 127: Command │ │ │ │ • 248: Addon │ │
+│ │ │ not found │ │ │ │ failure │ │
+│ │ │ • 128: Invalid │ │ │ │ • 249: Inspector │ │
+│ │ │ exit │ │ │ │ error │ │
+│ │ │ • 130: SIGINT │ │ │ │ • 254: Unknown │ │
+│ │ │ • 137: SIGKILL │ │ │ │ fatal error │ │
+│ │ │ • 139: Segfault │ │ │ │ │ │
+│ │ │ • 143: SIGTERM │ │ │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Python │ │ Database │ │ Proxmox │ │ │
+│ │ │ Errors │ │ Errors │ │ Custom │ │ │
+│ │ │ │ │ │ │ Errors │ │
+│ │ │ • 210: Virtual │ │ • PostgreSQL: │ │ • 200: Lock file │ │
+│ │ │ env missing │ │ 231-234 │ │ failed │ │
+│ │ │ • 211: Dep │ │ • MySQL: 241- │ │ • 203: Missing │ │
+│ │ │ resolution │ │ 244 │ │ CTID │ │
+│ │ │ • 212: Install │ │ • MongoDB: 251- │ │ • 204: Missing │ │
+│ │ │ aborted │ │ 254 │ │ PCT_OSTYPE │ │
+│ │ │ │ │ │ │ • 205: Invalid │ │
+│ │ │ │ │ │ │ CTID │ │
+│ │ │ │ │ │ │ • 209: Container │ │
+│ │ │ │ │ │ │ creation failed │ │
+│ │ │ │ │ │ │ • 210: Cluster │ │
+│ │ │ │ │ │ │ not quorate │ │
+│ │ │ │ │ │ │ • 214: No storage │ │
+│ │ │ │ │ │ │ space │ │
+│ │ │ │ │ │ │ • 215: CTID not │ │
+│ │ │ │ │ │ │ listed │ │
+│ │ │ │ │ │ │ • 216: RootFS │ │
+│ │ │ │ │ │ │ missing │ │
+│ │ │ │ │ │ │ • 217: Storage │ │
+│ │ │ │ │ │ │ not supported │ │
+│ │ │ │ │ │ │ • 220: Template │ │
+│ │ │ │ │ │ │ path error │ │
+│ │ │ │ │ │ │ • 222: Template │ │
+│ │ │ │ │ │ │ download failed │ │
+│ │ │ │ │ │ │ • 223: Template │ │
+│ │ │ │ │ │ │ not available │ │
+│ │ │ │ │ │ │ • 231: LXC stack │ │
+│ │ │ │ │ │ │ upgrade failed │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Default Case │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ Unknown Error Handling │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Check for │ │ Return │ │ Log Unknown │ │ │
+│ │ │ Unknown │ │ Generic │ │ Error │ │ │
+│ │ │ Code │ │ Message │ │ │ │
+│ │ │ │ │ │ │ • Log to debug │ │
+│ │ │ • If no match │ │ • "Unknown │ │ file if enabled │ │
+│ │ │ found │ │ error" │ │ • Include error │ │
+│ │ │ • Use default │ │ • Return to │ │ code │ │
+│ │ │ case │ │ caller │ │ • Include │ │
+│ │ │ │ │ │ │ timestamp │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+```
+
+## Debug Logging Flow
+
+```
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Debug Log Integration │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
+│ │ Debug Log Writing │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ │ Check Debug │ │ Write Error │ │ Format Log │ │ │
+│ │ │ Log File │ │ Information │ │ Entry │ │ │
+│ │ │ │ │ │ │ │ │
+│ │ │ • Check if │ │ • Timestamp │ │ • Error separator │ │
+│ │ │ DEBUG_LOGFILE │ │ • Exit code │ │ • Structured │ │
+│ │ │ set │ │ • Explanation │ │ format │ │
+│ │ │ • Check if │ │ • Line number │ │ • Easy to parse │ │
+│ │ │ file exists │ │ • Command │ │ • Easy to read │ │
+│ │ │ • Check if │ │ • Append to │ │ │ │
+│ │ │ file writable │ │ file │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────┘
+```
+
+## Integration Points
+
+### With core.func
+- **Silent Execution**: Provides error explanations for silent() function
+- **Color Variables**: Uses color variables for error display
+- **Log Integration**: Integrates with SILENT_LOGFILE
+
+### With Other Scripts
+- **Error Traps**: Sets up ERR trap for automatic error handling
+- **Signal Traps**: Handles SIGINT, SIGTERM, and EXIT signals
+- **Cleanup**: Provides cleanup on script exit
+
+### External Dependencies
+- **None**: Pure Bash implementation
+- **Color Support**: Requires color variables from core.func
+- **Log Files**: Uses standard file operations
diff --git a/docs/misc/error_handler.func/ERROR_HANDLER_FUNCTIONS_REFERENCE.md b/docs/misc/error_handler.func/ERROR_HANDLER_FUNCTIONS_REFERENCE.md
new file mode 100644
index 000000000..2270ffb76
--- /dev/null
+++ b/docs/misc/error_handler.func/ERROR_HANDLER_FUNCTIONS_REFERENCE.md
@@ -0,0 +1,424 @@
+# error_handler.func Functions Reference
+
+## Overview
+
+This document provides a comprehensive alphabetical reference of all functions in `error_handler.func`, including parameters, dependencies, usage examples, and error handling.
+
+## Function Categories
+
+### Error Explanation Functions
+
+#### `explain_exit_code()`
+**Purpose**: Convert numeric exit codes to human-readable explanations
+**Parameters**:
+- `$1` - Exit code to explain
+**Returns**: Human-readable error explanation string
+**Side Effects**: None
+**Dependencies**: None
+**Environment Variables Used**: None
+
+**Supported Exit Codes**:
+- **Generic/Shell**: 1, 2, 126, 127, 128, 130, 137, 139, 143
+- **Package Manager**: 100, 101, 255
+- **Node.js**: 243, 245, 246, 247, 248, 249, 254
+- **Python**: 210, 211, 212
+- **PostgreSQL**: 231, 232, 233, 234
+- **MySQL/MariaDB**: 241, 242, 243, 244
+- **MongoDB**: 251, 252, 253, 254
+- **Proxmox Custom**: 200, 203, 204, 205, 209, 210, 214, 215, 216, 217, 220, 222, 223, 231
+
+**Usage Example**:
+```bash
+explanation=$(explain_exit_code 127)
+echo "Error 127: $explanation"
+# Output: Error 127: Command not found
+```
+
+**Error Code Examples**:
+```bash
+explain_exit_code 1 # "General error / Operation not permitted"
+explain_exit_code 126 # "Command invoked cannot execute (permission problem?)"
+explain_exit_code 127 # "Command not found"
+explain_exit_code 130 # "Terminated by Ctrl+C (SIGINT)"
+explain_exit_code 200 # "Custom: Failed to create lock file"
+explain_exit_code 999 # "Unknown error"
+```
+
+### Error Handling Functions
+
+#### `error_handler()`
+**Purpose**: Main error handler triggered by ERR trap or manual call
+**Parameters**:
+- `$1` - Exit code (optional, defaults to $?)
+- `$2` - Command that failed (optional, defaults to BASH_COMMAND)
+**Returns**: None (exits with error code)
+**Side Effects**:
+- Displays detailed error information
+- Logs error to debug file if enabled
+- Shows silent log content if available
+- Exits with original error code
+**Dependencies**: `explain_exit_code()`
+**Environment Variables Used**: `DEBUG_LOGFILE`, `SILENT_LOGFILE`
+
+**Usage Example**:
+```bash
+# Automatic error handling via ERR trap
+set -e
+trap 'error_handler' ERR
+
+# Manual error handling
+error_handler 127 "command_not_found"
+```
+
+**Error Information Displayed**:
+- Error message with color coding
+- Line number where error occurred
+- Exit code with explanation
+- Command that failed
+- Silent log content (last 20 lines)
+- Debug log entry (if enabled)
+
+### Signal Handling Functions
+
+#### `on_interrupt()`
+**Purpose**: Handle SIGINT (Ctrl+C) signals gracefully
+**Parameters**: None
+**Returns**: None (exits with code 130)
+**Side Effects**:
+- Displays interruption message
+- Exits with SIGINT code (130)
+**Dependencies**: None
+**Environment Variables Used**: None
+
+**Usage Example**:
+```bash
+# Set up interrupt handler
+trap on_interrupt INT
+
+# User presses Ctrl+C
+# Handler displays: "Interrupted by user (SIGINT)"
+# Script exits with code 130
+```
+
+#### `on_terminate()`
+**Purpose**: Handle SIGTERM signals gracefully
+**Parameters**: None
+**Returns**: None (exits with code 143)
+**Side Effects**:
+- Displays termination message
+- Exits with SIGTERM code (143)
+**Dependencies**: None
+**Environment Variables Used**: None
+
+**Usage Example**:
+```bash
+# Set up termination handler
+trap on_terminate TERM
+
+# System sends SIGTERM
+# Handler displays: "Terminated by signal (SIGTERM)"
+# Script exits with code 143
+```
+
+### Cleanup Functions
+
+#### `on_exit()`
+**Purpose**: Handle script exit cleanup
+**Parameters**: None
+**Returns**: None (exits with original exit code)
+**Side Effects**:
+- Removes lock file if set
+- Exits with original exit code
+**Dependencies**: None
+**Environment Variables Used**: `lockfile`
+
+**Usage Example**:
+```bash
+# Set up exit handler
+trap on_exit EXIT
+
+# Set lock file
+lockfile="/tmp/my_script.lock"
+
+# Script exits normally or with error
+# Handler removes lock file and exits
+```
+
+### Initialization Functions
+
+#### `catch_errors()`
+**Purpose**: Initialize error handling traps and strict mode
+**Parameters**: None
+**Returns**: None
+**Side Effects**:
+- Sets strict error handling mode
+- Sets up error traps
+- Sets up signal traps
+- Sets up exit trap
+**Dependencies**: None
+**Environment Variables Used**: `STRICT_UNSET`
+
+**Strict Mode Settings**:
+- `-E`: Exit on command failure
+- `-e`: Exit on any error
+- `-o pipefail`: Exit on pipe failure
+- `-u`: Exit on unset variables (if STRICT_UNSET=1)
+
+**Trap Setup**:
+- `ERR`: Calls `error_handler` on command failure
+- `EXIT`: Calls `on_exit` on script exit
+- `INT`: Calls `on_interrupt` on SIGINT
+- `TERM`: Calls `on_terminate` on SIGTERM
+
+**Usage Example**:
+```bash
+# Initialize error handling
+catch_errors
+
+# Script now has full error handling
+# All errors will be caught and handled
+```
+
+## Function Call Hierarchy
+
+### Error Handling Flow
+```
+Command Failure
+├── ERR trap triggered
+├── error_handler() called
+│ ├── Get exit code
+│ ├── Get command info
+│ ├── Get line number
+│ ├── explain_exit_code()
+│ ├── Display error info
+│ ├── Log to debug file
+│ ├── Show silent log
+│ └── Exit with error code
+```
+
+### Signal Handling Flow
+```
+Signal Received
+├── Signal trap triggered
+├── Appropriate handler called
+│ ├── on_interrupt() for SIGINT
+│ ├── on_terminate() for SIGTERM
+│ └── on_exit() for EXIT
+└── Exit with signal code
+```
+
+### Initialization Flow
+```
+catch_errors()
+├── Set strict mode
+│ ├── -E (exit on failure)
+│ ├── -e (exit on error)
+│ ├── -o pipefail (pipe failure)
+│ └── -u (unset variables, if enabled)
+└── Set up traps
+ ├── ERR → error_handler
+ ├── EXIT → on_exit
+ ├── INT → on_interrupt
+ └── TERM → on_terminate
+```
+
+## Error Code Reference
+
+### Generic/Shell Errors
+| Code | Description |
+|------|-------------|
+| 1 | General error / Operation not permitted |
+| 2 | Misuse of shell builtins (e.g. syntax error) |
+| 126 | Command invoked cannot execute (permission problem?) |
+| 127 | Command not found |
+| 128 | Invalid argument to exit |
+| 130 | Terminated by Ctrl+C (SIGINT) |
+| 137 | Killed (SIGKILL / Out of memory?) |
+| 139 | Segmentation fault (core dumped) |
+| 143 | Terminated (SIGTERM) |
+
+### Package Manager Errors
+| Code | Description |
+|------|-------------|
+| 100 | APT: Package manager error (broken packages / dependency problems) |
+| 101 | APT: Configuration error (bad sources.list, malformed config) |
+| 255 | DPKG: Fatal internal error |
+
+### Node.js Errors
+| Code | Description |
+|------|-------------|
+| 243 | Node.js: Out of memory (JavaScript heap out of memory) |
+| 245 | Node.js: Invalid command-line option |
+| 246 | Node.js: Internal JavaScript Parse Error |
+| 247 | Node.js: Fatal internal error |
+| 248 | Node.js: Invalid C++ addon / N-API failure |
+| 249 | Node.js: Inspector error |
+| 254 | npm/pnpm/yarn: Unknown fatal error |
+
+### Python Errors
+| Code | Description |
+|------|-------------|
+| 210 | Python: Virtualenv / uv environment missing or broken |
+| 211 | Python: Dependency resolution failed |
+| 212 | Python: Installation aborted (permissions or EXTERNALLY-MANAGED) |
+
+### Database Errors
+| Code | Description |
+|------|-------------|
+| 231 | PostgreSQL: Connection failed (server not running / wrong socket) |
+| 232 | PostgreSQL: Authentication failed (bad user/password) |
+| 233 | PostgreSQL: Database does not exist |
+| 234 | PostgreSQL: Fatal error in query / syntax |
+| 241 | MySQL/MariaDB: Connection failed (server not running / wrong socket) |
+| 242 | MySQL/MariaDB: Authentication failed (bad user/password) |
+| 243 | MySQL/MariaDB: Database does not exist |
+| 244 | MySQL/MariaDB: Fatal error in query / syntax |
+| 251 | MongoDB: Connection failed (server not running) |
+| 252 | MongoDB: Authentication failed (bad user/password) |
+| 253 | MongoDB: Database not found |
+| 254 | MongoDB: Fatal query error |
+
+### Proxmox Custom Errors
+| Code | Description |
+|------|-------------|
+| 200 | Custom: Failed to create lock file |
+| 203 | Custom: Missing CTID variable |
+| 204 | Custom: Missing PCT_OSTYPE variable |
+| 205 | Custom: Invalid CTID (<100) |
+| 209 | Custom: Container creation failed |
+| 210 | Custom: Cluster not quorate |
+| 214 | Custom: Not enough storage space |
+| 215 | Custom: Container ID not listed |
+| 216 | Custom: RootFS entry missing in config |
+| 217 | Custom: Storage does not support rootdir |
+| 220 | Custom: Unable to resolve template path |
+| 222 | Custom: Template download failed after 3 attempts |
+| 223 | Custom: Template not available after download |
+| 231 | Custom: LXC stack upgrade/retry failed |
+
+## Environment Variable Dependencies
+
+### Required Variables
+- **`lockfile`**: Lock file path for cleanup (set by calling script)
+
+### Optional Variables
+- **`DEBUG_LOGFILE`**: Path to debug log file for error logging
+- **`SILENT_LOGFILE`**: Path to silent execution log file
+- **`STRICT_UNSET`**: Enable strict unset variable checking (0/1)
+
+### Internal Variables
+- **`exit_code`**: Current exit code
+- **`command`**: Failed command
+- **`line_number`**: Line number where error occurred
+- **`explanation`**: Error explanation text
+
+## Error Handling Patterns
+
+### Automatic Error Handling
+```bash
+#!/usr/bin/env bash
+source error_handler.func
+
+# Initialize error handling
+catch_errors
+
+# All commands are now monitored
+# Errors will be automatically caught and handled
+```
+
+### Manual Error Handling
+```bash
+#!/usr/bin/env bash
+source error_handler.func
+
+# Manual error handling
+if ! command -v required_tool >/dev/null 2>&1; then
+ error_handler 127 "required_tool not found"
+fi
+```
+
+### Custom Error Codes
+```bash
+#!/usr/bin/env bash
+source error_handler.func
+
+# Use custom error codes
+if [[ ! -f /required/file ]]; then
+ echo "Error: Required file missing"
+ exit 200 # Custom error code
+fi
+```
+
+### Signal Handling
+```bash
+#!/usr/bin/env bash
+source error_handler.func
+
+# Set up signal handling
+trap on_interrupt INT
+trap on_terminate TERM
+trap on_exit EXIT
+
+# Script handles signals gracefully
+```
+
+## Integration Examples
+
+### With core.func
+```bash
+#!/usr/bin/env bash
+source core.func
+source error_handler.func
+
+# Silent execution uses error_handler for explanations
+silent apt-get install -y package
+# If command fails, error_handler provides explanation
+```
+
+### With build.func
+```bash
+#!/usr/bin/env bash
+source core.func
+source error_handler.func
+source build.func
+
+# Container creation with error handling
+# Errors are caught and explained
+```
+
+### With tools.func
+```bash
+#!/usr/bin/env bash
+source core.func
+source error_handler.func
+source tools.func
+
+# Tool operations with error handling
+# All errors are properly handled and explained
+```
+
+## Best Practices
+
+### Error Handling Setup
+1. Source error_handler.func early in script
+2. Call catch_errors() to initialize traps
+3. Use appropriate exit codes for different error types
+4. Provide meaningful error messages
+
+### Signal Handling
+1. Always set up signal traps
+2. Provide graceful cleanup on interruption
+3. Use appropriate exit codes for signals
+4. Clean up temporary files and processes
+
+### Error Reporting
+1. Use explain_exit_code() for user-friendly messages
+2. Log errors to debug files when needed
+3. Provide context information (line numbers, commands)
+4. Integrate with silent execution logging
+
+### Custom Error Codes
+1. Use Proxmox custom error codes (200-231) for container/VM errors
+2. Use standard error codes for common operations
+3. Document custom error codes in script comments
+4. Provide clear error messages for custom codes
diff --git a/docs/misc/error_handler.func/ERROR_HANDLER_INTEGRATION.md b/docs/misc/error_handler.func/ERROR_HANDLER_INTEGRATION.md
new file mode 100644
index 000000000..c3cf3b5c9
--- /dev/null
+++ b/docs/misc/error_handler.func/ERROR_HANDLER_INTEGRATION.md
@@ -0,0 +1,512 @@
+# error_handler.func Integration Guide
+
+## Overview
+
+This document describes how `error_handler.func` integrates with other components in the Proxmox Community Scripts project, including dependencies, data flow, and API surface.
+
+## Dependencies
+
+### External Dependencies
+
+#### Required Commands
+- **None**: Pure Bash implementation
+
+#### Optional Commands
+- **None**: No external command dependencies
+
+### Internal Dependencies
+
+#### core.func
+- **Purpose**: Provides color variables for error display
+- **Usage**: Uses `RD`, `CL`, `YWB` color variables
+- **Integration**: Called automatically when core.func is sourced
+- **Data Flow**: Color variables → error display formatting
+
+## Integration Points
+
+### With core.func
+
+#### Silent Execution Integration
+```bash
+# core.func silent() function uses error_handler.func
+silent() {
+ local cmd="$*"
+ local caller_line="${BASH_LINENO[0]:-unknown}"
+
+ # Execute command
+ "$@" >>"$SILENT_LOGFILE" 2>&1
+ local rc=$?
+
+ if [[ $rc -ne 0 ]]; then
+ # Load error_handler.func if needed
+ if ! declare -f explain_exit_code >/dev/null 2>&1; then
+ source error_handler.func
+ fi
+
+ # Get error explanation
+ local explanation
+ explanation="$(explain_exit_code "$rc")"
+
+ # Display error with explanation
+ printf "\e[?25h"
+ echo -e "\n${RD}[ERROR]${CL} in line ${RD}${caller_line}${CL}: exit code ${RD}${rc}${CL} (${explanation})"
+ echo -e "${RD}Command:${CL} ${YWB}${cmd}${CL}\n"
+
+ exit "$rc"
+ fi
+}
+```
+
+#### Color Variable Usage
+```bash
+# error_handler.func uses color variables from core.func
+error_handler() {
+ # ... error handling logic ...
+
+ # Use color variables for error display
+ echo -e "\n${RD}[ERROR]${CL} in line ${RD}${line_number}${CL}: exit code ${RD}${exit_code}${CL} (${explanation}): while executing command ${YWB}${command}${CL}\n"
+}
+
+on_interrupt() {
+ echo -e "\n${RD}Interrupted by user (SIGINT)${CL}"
+ exit 130
+}
+
+on_terminate() {
+ echo -e "\n${RD}Terminated by signal (SIGTERM)${CL}"
+ exit 143
+}
+```
+
+### With build.func
+
+#### Container Creation Error Handling
+```bash
+# build.func uses error_handler.func for container operations
+source core.func
+source error_handler.func
+
+# Container creation with error handling
+create_container() {
+ # Set up error handling
+ catch_errors
+
+ # Container creation operations
+ silent pct create "$CTID" "$TEMPLATE" \
+ --hostname "$HOSTNAME" \
+ --memory "$MEMORY" \
+ --cores "$CORES"
+
+ # If creation fails, error_handler provides explanation
+}
+```
+
+#### Template Download Error Handling
+```bash
+# build.func uses error_handler.func for template operations
+download_template() {
+ # Template download with error handling
+ if ! silent curl -fsSL "$TEMPLATE_URL" -o "$TEMPLATE_FILE"; then
+ # error_handler provides detailed explanation
+ exit 222 # Template download failed
+ fi
+}
+```
+
+### With tools.func
+
+#### Maintenance Operations Error Handling
+```bash
+# tools.func uses error_handler.func for maintenance operations
+source core.func
+source error_handler.func
+
+# Maintenance operations with error handling
+update_system() {
+ catch_errors
+
+ # System update operations
+ silent apt-get update
+ silent apt-get upgrade -y
+
+ # Error handling provides explanations for failures
+}
+
+cleanup_logs() {
+ catch_errors
+
+ # Log cleanup operations
+ silent find /var/log -name "*.log" -mtime +30 -delete
+
+ # Error handling provides explanations for permission issues
+}
+```
+
+### With api.func
+
+#### API Operations Error Handling
+```bash
+# api.func uses error_handler.func for API operations
+source core.func
+source error_handler.func
+
+# API operations with error handling
+api_call() {
+ catch_errors
+
+ # API call with error handling
+ if ! silent curl -k -H "Authorization: PVEAPIToken=$API_TOKEN" \
+ "$API_URL/api2/json/nodes/$NODE/lxc"; then
+ # error_handler provides explanation for API failures
+ exit 1
+ fi
+}
+```
+
+### With install.func
+
+#### Installation Process Error Handling
+```bash
+# install.func uses error_handler.func for installation operations
+source core.func
+source error_handler.func
+
+# Installation with error handling
+install_package() {
+ local package="$1"
+
+ catch_errors
+
+ # Package installation
+ silent apt-get install -y "$package"
+
+ # Error handling provides explanations for installation failures
+}
+```
+
+### With alpine-install.func
+
+#### Alpine Installation Error Handling
+```bash
+# alpine-install.func uses error_handler.func for Alpine operations
+source core.func
+source error_handler.func
+
+# Alpine installation with error handling
+install_alpine_package() {
+ local package="$1"
+
+ catch_errors
+
+ # Alpine package installation
+ silent apk add --no-cache "$package"
+
+ # Error handling provides explanations for Alpine-specific failures
+}
+```
+
+### With alpine-tools.func
+
+#### Alpine Tools Error Handling
+```bash
+# alpine-tools.func uses error_handler.func for Alpine tools
+source core.func
+source error_handler.func
+
+# Alpine tools with error handling
+alpine_tool_operation() {
+ catch_errors
+
+ # Alpine-specific tool operations
+ silent alpine_command
+
+ # Error handling provides explanations for Alpine tool failures
+}
+```
+
+### With passthrough.func
+
+#### Hardware Passthrough Error Handling
+```bash
+# passthrough.func uses error_handler.func for hardware operations
+source core.func
+source error_handler.func
+
+# Hardware passthrough with error handling
+configure_gpu_passthrough() {
+ catch_errors
+
+ # GPU passthrough operations
+ silent lspci | grep -i nvidia
+
+ # Error handling provides explanations for hardware failures
+}
+```
+
+### With vm-core.func
+
+#### VM Operations Error Handling
+```bash
+# vm-core.func uses error_handler.func for VM operations
+source core.func
+source error_handler.func
+
+# VM operations with error handling
+create_vm() {
+ catch_errors
+
+ # VM creation operations
+ silent qm create "$VMID" \
+ --name "$VMNAME" \
+ --memory "$MEMORY" \
+ --cores "$CORES"
+
+ # Error handling provides explanations for VM creation failures
+}
+```
+
+## Data Flow
+
+### Input Data
+
+#### Environment Variables
+- **`DEBUG_LOGFILE`**: Path to debug log file for error logging
+- **`SILENT_LOGFILE`**: Path to silent execution log file
+- **`STRICT_UNSET`**: Enable strict unset variable checking (0/1)
+- **`lockfile`**: Lock file path for cleanup (set by calling script)
+
+#### Function Parameters
+- **Exit codes**: Passed to `explain_exit_code()` and `error_handler()`
+- **Command information**: Passed to `error_handler()` for context
+- **Signal information**: Passed to signal handlers
+
+#### System Information
+- **Exit codes**: Retrieved from `$?` variable
+- **Command information**: Retrieved from `BASH_COMMAND` variable
+- **Line numbers**: Retrieved from `BASH_LINENO[0]` variable
+- **Process information**: Retrieved from system calls
+
+### Processing Data
+
+#### Error Code Processing
+- **Code classification**: Categorize exit codes by type
+- **Explanation lookup**: Map codes to human-readable messages
+- **Context collection**: Gather command and line information
+- **Log preparation**: Format error information for logging
+
+#### Signal Processing
+- **Signal detection**: Identify received signals
+- **Handler selection**: Choose appropriate signal handler
+- **Cleanup operations**: Perform necessary cleanup
+- **Exit code setting**: Set appropriate exit codes
+
+#### Log Processing
+- **Debug logging**: Write error information to debug log
+- **Silent log integration**: Display silent log content
+- **Log formatting**: Format log entries for readability
+- **Log analysis**: Provide log analysis capabilities
+
+### Output Data
+
+#### Error Information
+- **Error messages**: Human-readable error explanations
+- **Context information**: Line numbers, commands, timestamps
+- **Color formatting**: ANSI color codes for terminal display
+- **Log content**: Silent log excerpts and debug information
+
+#### System State
+- **Exit codes**: Returned from functions
+- **Log files**: Created and updated for error tracking
+- **Cleanup status**: Lock file removal and process cleanup
+- **Signal handling**: Graceful signal processing
+
+## API Surface
+
+### Public Functions
+
+#### Error Explanation
+- **`explain_exit_code()`**: Convert exit codes to explanations
+- **Parameters**: Exit code to explain
+- **Returns**: Human-readable explanation string
+- **Usage**: Called by error_handler() and other functions
+
+#### Error Handling
+- **`error_handler()`**: Main error handler function
+- **Parameters**: Exit code (optional), command (optional)
+- **Returns**: None (exits with error code)
+- **Usage**: Called by ERR trap or manually
+
+#### Signal Handling
+- **`on_interrupt()`**: Handle SIGINT signals
+- **`on_terminate()`**: Handle SIGTERM signals
+- **`on_exit()`**: Handle script exit cleanup
+- **Parameters**: None
+- **Returns**: None (exits with signal code)
+- **Usage**: Called by signal traps
+
+#### Initialization
+- **`catch_errors()`**: Initialize error handling
+- **Parameters**: None
+- **Returns**: None
+- **Usage**: Called to set up error handling traps
+
+### Internal Functions
+
+#### None
+- All functions in error_handler.func are public
+- No internal helper functions
+- Direct implementation of all functionality
+
+### Global Variables
+
+#### Configuration Variables
+- **`DEBUG_LOGFILE`**: Debug log file path
+- **`SILENT_LOGFILE`**: Silent log file path
+- **`STRICT_UNSET`**: Strict mode setting
+- **`lockfile`**: Lock file path
+
+#### State Variables
+- **`exit_code`**: Current exit code
+- **`command`**: Failed command
+- **`line_number`**: Line number where error occurred
+- **`explanation`**: Error explanation text
+
+## Integration Patterns
+
+### Standard Integration Pattern
+
+```bash
+#!/usr/bin/env bash
+# Standard integration pattern
+
+# 1. Source core.func first
+source core.func
+
+# 2. Source error_handler.func
+source error_handler.func
+
+# 3. Initialize error handling
+catch_errors
+
+# 4. Use silent execution
+silent command
+
+# 5. Errors are automatically handled
+```
+
+### Minimal Integration Pattern
+
+```bash
+#!/usr/bin/env bash
+# Minimal integration pattern
+
+source error_handler.func
+catch_errors
+
+# Basic error handling
+command
+```
+
+### Advanced Integration Pattern
+
+```bash
+#!/usr/bin/env bash
+# Advanced integration pattern
+
+source core.func
+source error_handler.func
+
+# Set up comprehensive error handling
+export DEBUG_LOGFILE="/tmp/debug.log"
+export SILENT_LOGFILE="/tmp/silent.log"
+lockfile="/tmp/script.lock"
+touch "$lockfile"
+
+catch_errors
+trap on_interrupt INT
+trap on_terminate TERM
+trap on_exit EXIT
+
+# Advanced error handling
+silent command
+```
+
+## Error Handling Integration
+
+### Automatic Error Handling
+- **ERR Trap**: Automatically catches command failures
+- **Error Explanation**: Provides human-readable error messages
+- **Context Information**: Shows line numbers and commands
+- **Log Integration**: Displays silent log content
+
+### Manual Error Handling
+- **Custom Error Codes**: Use Proxmox custom error codes
+- **Error Recovery**: Implement retry logic with error handling
+- **Conditional Handling**: Different handling for different error types
+- **Error Analysis**: Analyze error patterns and trends
+
+### Signal Handling Integration
+- **Graceful Interruption**: Handle Ctrl+C gracefully
+- **Clean Termination**: Handle SIGTERM signals
+- **Exit Cleanup**: Clean up resources on script exit
+- **Lock File Management**: Remove lock files on exit
+
+## Performance Considerations
+
+### Error Handling Overhead
+- **Minimal Impact**: Error handling adds minimal overhead
+- **Trap Setup**: Trap setup is done once during initialization
+- **Error Processing**: Error processing is only done on failures
+- **Log Writing**: Log writing is only done when enabled
+
+### Memory Usage
+- **Minimal Footprint**: Error handler uses minimal memory
+- **Variable Reuse**: Global variables reused across functions
+- **No Memory Leaks**: Proper cleanup prevents memory leaks
+- **Efficient Processing**: Efficient error code processing
+
+### Execution Speed
+- **Fast Error Detection**: Quick error detection and handling
+- **Efficient Explanation**: Fast error code explanation lookup
+- **Minimal Delay**: Minimal delay in error handling
+- **Quick Exit**: Fast exit on error conditions
+
+## Security Considerations
+
+### Error Information Disclosure
+- **Controlled Disclosure**: Only necessary error information is shown
+- **Log Security**: Log files have appropriate permissions
+- **Sensitive Data**: Sensitive data is not logged
+- **Error Sanitization**: Error messages are sanitized
+
+### Signal Handling Security
+- **Signal Validation**: Only expected signals are handled
+- **Cleanup Security**: Secure cleanup of temporary files
+- **Lock File Security**: Secure lock file management
+- **Process Security**: Secure process termination
+
+### Log File Security
+- **File Permissions**: Log files have appropriate permissions
+- **Log Rotation**: Log files are rotated to prevent disk filling
+- **Log Cleanup**: Old log files are cleaned up
+- **Log Access**: Log access is controlled
+
+## Future Integration Considerations
+
+### Extensibility
+- **New Error Codes**: Easy to add new error code explanations
+- **Custom Handlers**: Easy to add custom error handlers
+- **Signal Extensions**: Easy to add new signal handlers
+- **Log Formats**: Easy to add new log formats
+
+### Compatibility
+- **Bash Version**: Compatible with different Bash versions
+- **System Compatibility**: Compatible with different systems
+- **Script Compatibility**: Compatible with different script types
+- **Error Code Compatibility**: Compatible with different error codes
+
+### Performance
+- **Optimization**: Error handling can be optimized for better performance
+- **Caching**: Error explanations can be cached for faster lookup
+- **Parallel Processing**: Error handling can be parallelized
+- **Resource Management**: Better resource management for error handling
diff --git a/docs/misc/error_handler.func/ERROR_HANDLER_USAGE_EXAMPLES.md b/docs/misc/error_handler.func/ERROR_HANDLER_USAGE_EXAMPLES.md
new file mode 100644
index 000000000..cfb668711
--- /dev/null
+++ b/docs/misc/error_handler.func/ERROR_HANDLER_USAGE_EXAMPLES.md
@@ -0,0 +1,625 @@
+# error_handler.func Usage Examples
+
+## Overview
+
+This document provides practical usage examples for `error_handler.func` functions, covering common scenarios, integration patterns, and best practices.
+
+## Basic Error Handling Setup
+
+### Standard Script Initialization
+
+```bash
+#!/usr/bin/env bash
+# Standard error handling setup
+
+# Source error handler
+source error_handler.func
+
+# Initialize error handling
+catch_errors
+
+# Your script code here
+# All errors will be automatically caught and handled
+echo "Script running..."
+apt-get update
+apt-get install -y package
+echo "Script completed successfully"
+```
+
+### Minimal Error Handling
+
+```bash
+#!/usr/bin/env bash
+# Minimal error handling setup
+
+source error_handler.func
+catch_errors
+
+# Simple script with error handling
+echo "Starting operation..."
+command_that_might_fail
+echo "Operation completed"
+```
+
+## Error Code Explanation Examples
+
+### Basic Error Explanation
+
+```bash
+#!/usr/bin/env bash
+source error_handler.func
+
+# Explain common error codes
+echo "Error 1: $(explain_exit_code 1)"
+echo "Error 127: $(explain_exit_code 127)"
+echo "Error 130: $(explain_exit_code 130)"
+echo "Error 200: $(explain_exit_code 200)"
+```
+
+### Error Code Testing
+
+```bash
+#!/usr/bin/env bash
+source error_handler.func
+
+# Test all error codes
+test_error_codes() {
+ local codes=(1 2 126 127 128 130 137 139 143 100 101 255 200 203 204 205)
+
+ for code in "${codes[@]}"; do
+ echo "Code $code: $(explain_exit_code $code)"
+ done
+}
+
+test_error_codes
+```
+
+### Custom Error Code Usage
+
+```bash
+#!/usr/bin/env bash
+source error_handler.func
+
+# Use custom error codes
+check_requirements() {
+ if [[ ! -f /required/file ]]; then
+ echo "Error: Required file missing"
+ exit 200 # Custom error code
+ fi
+
+ if [[ -z "$CTID" ]]; then
+ echo "Error: CTID not set"
+ exit 203 # Custom error code
+ fi
+
+ if [[ $CTID -lt 100 ]]; then
+ echo "Error: Invalid CTID"
+ exit 205 # Custom error code
+ fi
+}
+
+check_requirements
+```
+
+## Signal Handling Examples
+
+### Interrupt Handling
+
+```bash
+#!/usr/bin/env bash
+source error_handler.func
+
+# Set up interrupt handler
+trap on_interrupt INT
+
+echo "Script running... Press Ctrl+C to interrupt"
+sleep 10
+echo "Script completed normally"
+```
+
+### Termination Handling
+
+```bash
+#!/usr/bin/env bash
+source error_handler.func
+
+# Set up termination handler
+trap on_terminate TERM
+
+echo "Script running... Send SIGTERM to terminate"
+sleep 10
+echo "Script completed normally"
+```
+
+### Complete Signal Handling
+
+```bash
+#!/usr/bin/env bash
+source error_handler.func
+
+# Set up all signal handlers
+trap on_interrupt INT
+trap on_terminate TERM
+trap on_exit EXIT
+
+echo "Script running with full signal handling"
+sleep 10
+echo "Script completed normally"
+```
+
+## Cleanup Examples
+
+### Lock File Cleanup
+
+```bash
+#!/usr/bin/env bash
+source error_handler.func
+
+# Set up lock file
+lockfile="/tmp/my_script.lock"
+touch "$lockfile"
+
+# Set up exit handler
+trap on_exit EXIT
+
+echo "Script running with lock file..."
+sleep 5
+echo "Script completed - lock file will be removed"
+```
+
+### Temporary File Cleanup
+
+```bash
+#!/usr/bin/env bash
+source error_handler.func
+
+# Create temporary files
+temp_file1="/tmp/temp1.$$"
+temp_file2="/tmp/temp2.$$"
+touch "$temp_file1" "$temp_file2"
+
+# Set up cleanup
+cleanup() {
+ rm -f "$temp_file1" "$temp_file2"
+ echo "Temporary files cleaned up"
+}
+
+trap cleanup EXIT
+
+echo "Script running with temporary files..."
+sleep 5
+echo "Script completed - temporary files will be cleaned up"
+```
+
+## Debug Logging Examples
+
+### Basic Debug Logging
+
+```bash
+#!/usr/bin/env bash
+source error_handler.func
+
+# Enable debug logging
+export DEBUG_LOGFILE="/tmp/debug.log"
+catch_errors
+
+echo "Script with debug logging"
+apt-get update
+apt-get install -y package
+```
+
+### Debug Log Analysis
+
+```bash
+#!/usr/bin/env bash
+source error_handler.func
+
+# Enable debug logging
+export DEBUG_LOGFILE="/tmp/debug.log"
+catch_errors
+
+# Function to analyze debug log
+analyze_debug_log() {
+ if [[ -f "$DEBUG_LOGFILE" ]]; then
+ echo "Debug log analysis:"
+ echo "Total errors: $(grep -c "ERROR" "$DEBUG_LOGFILE")"
+ echo "Recent errors:"
+ tail -n 5 "$DEBUG_LOGFILE"
+ else
+ echo "No debug log found"
+ fi
+}
+
+# Run script
+echo "Running script..."
+apt-get update
+
+# Analyze results
+analyze_debug_log
+```
+
+## Silent Execution Integration
+
+### With core.func Silent Execution
+
+```bash
+#!/usr/bin/env bash
+source core.func
+source error_handler.func
+
+# Silent execution with error handling
+echo "Installing packages..."
+silent apt-get update
+silent apt-get install -y nginx
+
+echo "Configuring service..."
+silent systemctl enable nginx
+silent systemctl start nginx
+
+echo "Installation completed"
+```
+
+### Silent Execution Error Handling
+
+```bash
+#!/usr/bin/env bash
+source core.func
+source error_handler.func
+
+# Function with silent execution and error handling
+install_package() {
+ local package="$1"
+
+ echo "Installing $package..."
+ if silent apt-get install -y "$package"; then
+ echo "$package installed successfully"
+ return 0
+ else
+ echo "Failed to install $package"
+ return 1
+ fi
+}
+
+# Install multiple packages
+packages=("nginx" "apache2" "mysql-server")
+for package in "${packages[@]}"; do
+ if ! install_package "$package"; then
+ echo "Stopping installation due to error"
+ exit 1
+ fi
+done
+```
+
+## Advanced Error Handling Examples
+
+### Conditional Error Handling
+
+```bash
+#!/usr/bin/env bash
+source error_handler.func
+
+# Conditional error handling based on environment
+setup_error_handling() {
+ if [[ "${STRICT_MODE:-0}" == "1" ]]; then
+ echo "Enabling strict mode"
+ export STRICT_UNSET=1
+ fi
+
+ catch_errors
+ echo "Error handling configured"
+}
+
+setup_error_handling
+```
+
+### Error Recovery
+
+```bash
+#!/usr/bin/env bash
+source error_handler.func
+
+# Error recovery pattern
+retry_operation() {
+ local max_attempts=3
+ local attempt=1
+
+ while [[ $attempt -le $max_attempts ]]; do
+ echo "Attempt $attempt of $max_attempts"
+
+ if silent "$@"; then
+ echo "Operation succeeded on attempt $attempt"
+ return 0
+ else
+ echo "Attempt $attempt failed"
+ ((attempt++))
+
+ if [[ $attempt -le $max_attempts ]]; then
+ echo "Retrying in 5 seconds..."
+ sleep 5
+ fi
+ fi
+ done
+
+ echo "Operation failed after $max_attempts attempts"
+ return 1
+}
+
+# Use retry pattern
+retry_operation apt-get update
+retry_operation apt-get install -y package
+```
+
+### Custom Error Handler
+
+```bash
+#!/usr/bin/env bash
+source error_handler.func
+
+# Custom error handler for specific operations
+custom_error_handler() {
+ local exit_code=${1:-$?}
+ local command=${2:-${BASH_COMMAND:-unknown}}
+
+ case "$exit_code" in
+ 127)
+ echo "Custom handling: Command not found - $command"
+ echo "Suggestions:"
+ echo "1. Check if the command is installed"
+ echo "2. Check if the command is in PATH"
+ echo "3. Check spelling"
+ ;;
+ 126)
+ echo "Custom handling: Permission denied - $command"
+ echo "Suggestions:"
+ echo "1. Check file permissions"
+ echo "2. Run with appropriate privileges"
+ echo "3. Check if file is executable"
+ ;;
+ *)
+ # Use default error handler
+ error_handler "$exit_code" "$command"
+ ;;
+ esac
+}
+
+# Set up custom error handler
+trap 'custom_error_handler' ERR
+
+# Test custom error handling
+nonexistent_command
+```
+
+## Integration Examples
+
+### With build.func
+
+```bash
+#!/usr/bin/env bash
+# Integration with build.func
+
+source core.func
+source error_handler.func
+source build.func
+
+# Container creation with error handling
+export APP="plex"
+export CTID="100"
+
+# Errors will be caught and explained
+# Silent execution will use error_handler for explanations
+```
+
+### With tools.func
+
+```bash
+#!/usr/bin/env bash
+# Integration with tools.func
+
+source core.func
+source error_handler.func
+source tools.func
+
+# Tool operations with error handling
+# All errors are properly handled and explained
+```
+
+### With api.func
+
+```bash
+#!/usr/bin/env bash
+# Integration with api.func
+
+source core.func
+source error_handler.func
+source api.func
+
+# API operations with error handling
+# Network errors and API errors are properly handled
+```
+
+## Best Practices Examples
+
+### Comprehensive Error Handling
+
+```bash
+#!/usr/bin/env bash
+# Comprehensive error handling example
+
+source error_handler.func
+
+# Set up comprehensive error handling
+setup_comprehensive_error_handling() {
+ # Enable debug logging
+ export DEBUG_LOGFILE="/tmp/script_debug.log"
+
+ # Set up lock file
+ lockfile="/tmp/script.lock"
+ touch "$lockfile"
+
+ # Initialize error handling
+ catch_errors
+
+ # Set up signal handlers
+ trap on_interrupt INT
+ trap on_terminate TERM
+ trap on_exit EXIT
+
+ echo "Comprehensive error handling configured"
+}
+
+setup_comprehensive_error_handling
+
+# Script operations
+echo "Starting script operations..."
+# ... script code ...
+echo "Script operations completed"
+```
+
+### Error Handling for Different Scenarios
+
+```bash
+#!/usr/bin/env bash
+source error_handler.func
+
+# Different error handling for different scenarios
+handle_package_errors() {
+ local exit_code=$1
+ case "$exit_code" in
+ 100)
+ echo "Package manager error - trying to fix..."
+ apt-get --fix-broken install
+ ;;
+ 101)
+ echo "Configuration error - checking sources..."
+ apt-get update
+ ;;
+ *)
+ error_handler "$exit_code"
+ ;;
+ esac
+}
+
+handle_network_errors() {
+ local exit_code=$1
+ case "$exit_code" in
+ 127)
+ echo "Network command not found - checking connectivity..."
+ ping -c 1 8.8.8.8
+ ;;
+ *)
+ error_handler "$exit_code"
+ ;;
+ esac
+}
+
+# Use appropriate error handler
+if [[ "$1" == "package" ]]; then
+ trap 'handle_package_errors $?' ERR
+elif [[ "$1" == "network" ]]; then
+ trap 'handle_network_errors $?' ERR
+else
+ catch_errors
+fi
+```
+
+### Error Handling with Logging
+
+```bash
+#!/usr/bin/env bash
+source error_handler.func
+
+# Error handling with detailed logging
+setup_logging_error_handling() {
+ # Create log directory
+ mkdir -p /var/log/script_errors
+
+ # Set up debug logging
+ export DEBUG_LOGFILE="/var/log/script_errors/debug.log"
+
+ # Set up silent logging
+ export SILENT_LOGFILE="/var/log/script_errors/silent.log"
+
+ # Initialize error handling
+ catch_errors
+
+ echo "Logging error handling configured"
+}
+
+setup_logging_error_handling
+
+# Script operations with logging
+echo "Starting logged operations..."
+# ... script code ...
+echo "Logged operations completed"
+```
+
+## Troubleshooting Examples
+
+### Debug Mode
+
+```bash
+#!/usr/bin/env bash
+source error_handler.func
+
+# Enable debug mode
+export DEBUG_LOGFILE="/tmp/debug.log"
+export STRICT_UNSET=1
+
+catch_errors
+
+echo "Debug mode enabled"
+# Script operations
+```
+
+### Error Analysis
+
+```bash
+#!/usr/bin/env bash
+source error_handler.func
+
+# Function to analyze errors
+analyze_errors() {
+ local log_file="${1:-$DEBUG_LOGFILE}"
+
+ if [[ -f "$log_file" ]]; then
+ echo "Error Analysis:"
+ echo "Total errors: $(grep -c "ERROR" "$log_file")"
+ echo "Error types:"
+ grep "ERROR" "$log_file" | awk '{print $NF}' | sort | uniq -c
+ echo "Recent errors:"
+ tail -n 10 "$log_file"
+ else
+ echo "No error log found"
+ fi
+}
+
+# Run script with error analysis
+analyze_errors
+```
+
+### Error Recovery Testing
+
+```bash
+#!/usr/bin/env bash
+source error_handler.func
+
+# Test error recovery
+test_error_recovery() {
+ local test_cases=(
+ "nonexistent_command"
+ "apt-get install nonexistent_package"
+ "systemctl start nonexistent_service"
+ )
+
+ for test_case in "${test_cases[@]}"; do
+ echo "Testing: $test_case"
+ if silent $test_case; then
+ echo "Unexpected success"
+ else
+ echo "Expected failure handled"
+ fi
+ done
+}
+
+test_error_recovery
+```
diff --git a/docs/misc/error_handler.func/README.md b/docs/misc/error_handler.func/README.md
new file mode 100644
index 000000000..3c4448184
--- /dev/null
+++ b/docs/misc/error_handler.func/README.md
@@ -0,0 +1,228 @@
+# error_handler.func Documentation
+
+## Overview
+
+The `error_handler.func` file provides comprehensive error handling and signal management for Proxmox Community Scripts. It offers detailed error code explanations, graceful error recovery, and proper cleanup mechanisms.
+
+## Purpose and Use Cases
+
+- **Error Code Explanation**: Provides human-readable explanations for exit codes
+- **Signal Handling**: Manages SIGINT, SIGTERM, and other signals gracefully
+- **Error Recovery**: Implements proper cleanup and error reporting
+- **Debug Logging**: Records error information for troubleshooting
+- **Silent Execution Support**: Integrates with core.func silent execution
+
+## Quick Reference
+
+### Key Function Groups
+- **Error Explanation**: `explain_exit_code()` - Convert exit codes to human-readable messages
+- **Error Handling**: `error_handler()` - Main error handler with detailed reporting
+- **Signal Handlers**: `on_interrupt()`, `on_terminate()` - Graceful signal handling
+- **Cleanup**: `on_exit()` - Cleanup on script exit
+- **Trap Setup**: `catch_errors()` - Initialize error handling traps
+
+### Dependencies
+- **External**: None (pure Bash implementation)
+- **Internal**: Uses color variables from core.func
+
+### Integration Points
+- Used by: All scripts via core.func silent execution
+- Uses: Color variables from core.func
+- Provides: Error explanations for core.func silent function
+
+## Documentation Files
+
+### 📊 [ERROR_HANDLER_FLOWCHART.md](./ERROR_HANDLER_FLOWCHART.md)
+Visual execution flows showing error handling processes and signal management.
+
+### 📚 [ERROR_HANDLER_FUNCTIONS_REFERENCE.md](./ERROR_HANDLER_FUNCTIONS_REFERENCE.md)
+Complete alphabetical reference of all functions with parameters, dependencies, and usage details.
+
+### 💡 [ERROR_HANDLER_USAGE_EXAMPLES.md](./ERROR_HANDLER_USAGE_EXAMPLES.md)
+Practical examples showing how to use error handling functions and common patterns.
+
+### 🔗 [ERROR_HANDLER_INTEGRATION.md](./ERROR_HANDLER_INTEGRATION.md)
+How error_handler.func integrates with other components and provides error handling services.
+
+## Key Features
+
+### Error Code Categories
+- **Generic/Shell Errors**: Exit codes 1, 2, 126, 127, 128, 130, 137, 139, 143
+- **Package Manager Errors**: APT/DPKG errors (100, 101, 255)
+- **Node.js Errors**: JavaScript runtime errors (243-249, 254)
+- **Python Errors**: Python environment and dependency errors (210-212)
+- **Database Errors**: PostgreSQL, MySQL, MongoDB errors (231-254)
+- **Proxmox Custom Errors**: Container and VM specific errors (200-231)
+
+### Signal Handling
+- **SIGINT (Ctrl+C)**: Graceful interruption handling
+- **SIGTERM**: Graceful termination handling
+- **EXIT**: Cleanup on script exit
+- **ERR**: Error trap for command failures
+
+### Error Reporting
+- **Detailed Messages**: Human-readable error explanations
+- **Context Information**: Line numbers, commands, timestamps
+- **Log Integration**: Silent log file integration
+- **Debug Logging**: Optional debug log file support
+
+## Common Usage Patterns
+
+### Basic Error Handling Setup
+```bash
+#!/usr/bin/env bash
+# Basic error handling setup
+
+source error_handler.func
+
+# Initialize error handling
+catch_errors
+
+# Your script code here
+# Errors will be automatically handled
+```
+
+### Manual Error Explanation
+```bash
+#!/usr/bin/env bash
+source error_handler.func
+
+# Get error explanation
+explanation=$(explain_exit_code 127)
+echo "Error 127: $explanation"
+# Output: Error 127: Command not found
+```
+
+### Custom Error Handling
+```bash
+#!/usr/bin/env bash
+source error_handler.func
+
+# Custom error handling
+if ! command -v required_tool >/dev/null 2>&1; then
+ echo "Error: required_tool not found"
+ exit 127
+fi
+```
+
+## Environment Variables
+
+### Debug Variables
+- `DEBUG_LOGFILE`: Path to debug log file for error logging
+- `SILENT_LOGFILE`: Path to silent execution log file
+- `STRICT_UNSET`: Enable strict unset variable checking (0/1)
+
+### Internal Variables
+- `lockfile`: Lock file path for cleanup (set by calling script)
+- `exit_code`: Current exit code
+- `command`: Failed command
+- `line_number`: Line number where error occurred
+
+## Error Categories
+
+### Generic/Shell Errors
+- **1**: General error / Operation not permitted
+- **2**: Misuse of shell builtins (syntax error)
+- **126**: Command invoked cannot execute (permission problem)
+- **127**: Command not found
+- **128**: Invalid argument to exit
+- **130**: Terminated by Ctrl+C (SIGINT)
+- **137**: Killed (SIGKILL / Out of memory)
+- **139**: Segmentation fault (core dumped)
+- **143**: Terminated (SIGTERM)
+
+### Package Manager Errors
+- **100**: APT package manager error (broken packages)
+- **101**: APT configuration error (bad sources.list)
+- **255**: DPKG fatal internal error
+
+### Node.js Errors
+- **243**: JavaScript heap out of memory
+- **245**: Invalid command-line option
+- **246**: Internal JavaScript parse error
+- **247**: Fatal internal error
+- **248**: Invalid C++ addon / N-API failure
+- **249**: Inspector error
+- **254**: npm/pnpm/yarn unknown fatal error
+
+### Python Errors
+- **210**: Virtualenv/uv environment missing or broken
+- **211**: Dependency resolution failed
+- **212**: Installation aborted (permissions or EXTERNALLY-MANAGED)
+
+### Database Errors
+- **PostgreSQL (231-234)**: Connection, authentication, database, query errors
+- **MySQL/MariaDB (241-244)**: Connection, authentication, database, query errors
+- **MongoDB (251-254)**: Connection, authentication, database, query errors
+
+### Proxmox Custom Errors
+- **200**: Failed to create lock file
+- **203**: Missing CTID variable
+- **204**: Missing PCT_OSTYPE variable
+- **205**: Invalid CTID (<100)
+- **209**: Container creation failed
+- **210**: Cluster not quorate
+- **214**: Not enough storage space
+- **215**: Container ID not listed
+- **216**: RootFS entry missing in config
+- **217**: Storage does not support rootdir
+- **220**: Unable to resolve template path
+- **222**: Template download failed after 3 attempts
+- **223**: Template not available after download
+- **231**: LXC stack upgrade/retry failed
+
+## Best Practices
+
+### Error Handling Setup
+1. Source error_handler.func early in script
+2. Call catch_errors() to initialize traps
+3. Use proper exit codes for different error types
+4. Provide meaningful error messages
+
+### Signal Handling
+1. Always set up signal traps
+2. Provide graceful cleanup on interruption
+3. Use appropriate exit codes for signals
+4. Clean up temporary files and processes
+
+### Error Reporting
+1. Use explain_exit_code() for user-friendly messages
+2. Log errors to debug files when needed
+3. Provide context information (line numbers, commands)
+4. Integrate with silent execution logging
+
+## Troubleshooting
+
+### Common Issues
+1. **Missing Error Handler**: Ensure error_handler.func is sourced
+2. **Trap Not Set**: Call catch_errors() to initialize traps
+3. **Color Variables**: Ensure core.func is sourced for colors
+4. **Lock Files**: Clean up lock files in on_exit()
+
+### Debug Mode
+Enable debug logging for detailed error information:
+```bash
+export DEBUG_LOGFILE="/tmp/debug.log"
+source error_handler.func
+catch_errors
+```
+
+### Error Code Testing
+Test error explanations:
+```bash
+source error_handler.func
+for code in 1 2 126 127 128 130 137 139 143; do
+ echo "Code $code: $(explain_exit_code $code)"
+done
+```
+
+## Related Documentation
+
+- [core.func](../core.func/) - Core utilities and silent execution
+- [build.func](../build.func/) - Container creation with error handling
+- [tools.func](../tools.func/) - Extended utilities with error handling
+- [api.func](../api.func/) - API operations with error handling
+
+---
+
+*This documentation covers the error_handler.func file which provides comprehensive error handling for all Proxmox Community Scripts.*
diff --git a/frontend/package-lock.json b/frontend/package-lock.json
index 9c8b7c97a..132da5f05 100644
--- a/frontend/package-lock.json
+++ b/frontend/package-lock.json
@@ -33,7 +33,7 @@
"fuse.js": "^7.1.0",
"lucide-react": "^0.453.0",
"mini-svg-data-uri": "^1.4.4",
- "next": "15.2.4",
+ "next": "15.5.2",
"next-themes": "^0.3.0",
"nuqs": "^2.4.1",
"pocketbase": "^0.21.5",
@@ -441,9 +441,9 @@
}
},
"node_modules/@emnapi/runtime": {
- "version": "1.3.1",
- "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.3.1.tgz",
- "integrity": "sha512-kEBmG8KyqtxJZv+ygbEim+KCGtIq1fC22Ms3S4ziXmYKm8uyoLX0MHONVKwp+9opg390VaKRNt4a7A9NwmpNhw==",
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.5.0.tgz",
+ "integrity": "sha512-97/BJ3iXHww3djw6hYIfErCZFee7qCtrneuLa20UXFCOTCfBM2cvQHjWJ2EG0s0MtdNwInarqCTz35i4wWXHsQ==",
"license": "MIT",
"optional": true,
"dependencies": {
@@ -1267,6 +1267,22 @@
"url": "https://opencollective.com/libvips"
}
},
+ "node_modules/@img/sharp-libvips-linux-ppc64": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-ppc64/-/sharp-libvips-linux-ppc64-1.2.0.tgz",
+ "integrity": "sha512-Xod/7KaDDHkYu2phxxfeEPXfVXFKx70EAFZ0qyUdOjCcxbjqyJOEUpDe6RIyaunGxT34Anf9ue/wuWOqBW2WcQ==",
+ "cpu": [
+ "ppc64"
+ ],
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
"node_modules/@img/sharp-libvips-linux-s390x": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-s390x/-/sharp-libvips-linux-s390x-1.0.4.tgz",
@@ -1375,6 +1391,28 @@
"@img/sharp-libvips-linux-arm64": "1.0.4"
}
},
+ "node_modules/@img/sharp-linux-ppc64": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linux-ppc64/-/sharp-linux-ppc64-0.34.3.tgz",
+ "integrity": "sha512-GLtbLQMCNC5nxuImPR2+RgrviwKwVql28FWZIW1zWruy6zLgA5/x2ZXk3mxj58X/tszVF69KK0Is83V8YgWhLA==",
+ "cpu": [
+ "ppc64"
+ ],
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linux-ppc64": "1.2.0"
+ }
+ },
"node_modules/@img/sharp-linux-s390x": {
"version": "0.33.5",
"resolved": "https://registry.npmjs.org/@img/sharp-linux-s390x/-/sharp-linux-s390x-0.33.5.tgz",
@@ -1482,6 +1520,25 @@
"url": "https://opencollective.com/libvips"
}
},
+ "node_modules/@img/sharp-win32-arm64": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-win32-arm64/-/sharp-win32-arm64-0.34.3.tgz",
+ "integrity": "sha512-MjnHPnbqMXNC2UgeLJtX4XqoVHHlZNd+nPt1kRPmj63wURegwBhZlApELdtxM2OIZDRv/DFtLcNhVbd1z8GYXQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "Apache-2.0 AND LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
"node_modules/@img/sharp-win32-ia32": {
"version": "0.33.5",
"resolved": "https://registry.npmjs.org/@img/sharp-win32-ia32/-/sharp-win32-ia32-0.33.5.tgz",
@@ -1598,9 +1655,9 @@
"license": "MIT"
},
"node_modules/@next/env": {
- "version": "15.2.4",
- "resolved": "https://registry.npmjs.org/@next/env/-/env-15.2.4.tgz",
- "integrity": "sha512-+SFtMgoiYP3WoSswuNmxJOCwi06TdWE733D+WPjpXIe4LXGULwEaofiiAy6kbS0+XjM5xF5n3lKuBwN2SnqD9g==",
+ "version": "15.5.2",
+ "resolved": "https://registry.npmjs.org/@next/env/-/env-15.5.2.tgz",
+ "integrity": "sha512-Qe06ew4zt12LeO6N7j8/nULSOe3fMXE4dM6xgpBQNvdzyK1sv5y4oAP3bq4LamrvGCZtmRYnW8URFCeX5nFgGg==",
"license": "MIT"
},
"node_modules/@next/eslint-plugin-next": {
@@ -1644,9 +1701,9 @@
}
},
"node_modules/@next/swc-darwin-arm64": {
- "version": "15.2.4",
- "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-15.2.4.tgz",
- "integrity": "sha512-1AnMfs655ipJEDC/FHkSr0r3lXBgpqKo4K1kiwfUf3iE68rDFXZ1TtHdMvf7D0hMItgDZ7Vuq3JgNMbt/+3bYw==",
+ "version": "15.5.2",
+ "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-15.5.2.tgz",
+ "integrity": "sha512-8bGt577BXGSd4iqFygmzIfTYizHb0LGWqH+qgIF/2EDxS5JsSdERJKA8WgwDyNBZgTIIA4D8qUtoQHmxIIquoQ==",
"cpu": [
"arm64"
],
@@ -1660,9 +1717,9 @@
}
},
"node_modules/@next/swc-darwin-x64": {
- "version": "15.2.4",
- "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-15.2.4.tgz",
- "integrity": "sha512-3qK2zb5EwCwxnO2HeO+TRqCubeI/NgCe+kL5dTJlPldV/uwCnUgC7VbEzgmxbfrkbjehL4H9BPztWOEtsoMwew==",
+ "version": "15.5.2",
+ "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-15.5.2.tgz",
+ "integrity": "sha512-2DjnmR6JHK4X+dgTXt5/sOCu/7yPtqpYt8s8hLkHFK3MGkka2snTv3yRMdHvuRtJVkPwCGsvBSwmoQCHatauFQ==",
"cpu": [
"x64"
],
@@ -1676,9 +1733,9 @@
}
},
"node_modules/@next/swc-linux-arm64-gnu": {
- "version": "15.2.4",
- "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-15.2.4.tgz",
- "integrity": "sha512-HFN6GKUcrTWvem8AZN7tT95zPb0GUGv9v0d0iyuTb303vbXkkbHDp/DxufB04jNVD+IN9yHy7y/6Mqq0h0YVaQ==",
+ "version": "15.5.2",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-15.5.2.tgz",
+ "integrity": "sha512-3j7SWDBS2Wov/L9q0mFJtEvQ5miIqfO4l7d2m9Mo06ddsgUK8gWfHGgbjdFlCp2Ek7MmMQZSxpGFqcC8zGh2AA==",
"cpu": [
"arm64"
],
@@ -1692,9 +1749,9 @@
}
},
"node_modules/@next/swc-linux-arm64-musl": {
- "version": "15.2.4",
- "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-15.2.4.tgz",
- "integrity": "sha512-Oioa0SORWLwi35/kVB8aCk5Uq+5/ZIumMK1kJV+jSdazFm2NzPDztsefzdmzzpx5oGCJ6FkUC7vkaUseNTStNA==",
+ "version": "15.5.2",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-15.5.2.tgz",
+ "integrity": "sha512-s6N8k8dF9YGc5T01UPQ08yxsK6fUow5gG1/axWc1HVVBYQBgOjca4oUZF7s4p+kwhkB1bDSGR8QznWrFZ/Rt5g==",
"cpu": [
"arm64"
],
@@ -1708,9 +1765,9 @@
}
},
"node_modules/@next/swc-linux-x64-gnu": {
- "version": "15.2.4",
- "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-15.2.4.tgz",
- "integrity": "sha512-yb5WTRaHdkgOqFOZiu6rHV1fAEK0flVpaIN2HB6kxHVSy/dIajWbThS7qON3W9/SNOH2JWkVCyulgGYekMePuw==",
+ "version": "15.5.2",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-15.5.2.tgz",
+ "integrity": "sha512-o1RV/KOODQh6dM6ZRJGZbc+MOAHww33Vbs5JC9Mp1gDk8cpEO+cYC/l7rweiEalkSm5/1WGa4zY7xrNwObN4+Q==",
"cpu": [
"x64"
],
@@ -1724,9 +1781,9 @@
}
},
"node_modules/@next/swc-linux-x64-musl": {
- "version": "15.2.4",
- "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-15.2.4.tgz",
- "integrity": "sha512-Dcdv/ix6srhkM25fgXiyOieFUkz+fOYkHlydWCtB0xMST6X9XYI3yPDKBZt1xuhOytONsIFJFB08xXYsxUwJLw==",
+ "version": "15.5.2",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-15.5.2.tgz",
+ "integrity": "sha512-/VUnh7w8RElYZ0IV83nUcP/J4KJ6LLYliiBIri3p3aW2giF+PAVgZb6mk8jbQSB3WlTai8gEmCAr7kptFa1H6g==",
"cpu": [
"x64"
],
@@ -1740,9 +1797,9 @@
}
},
"node_modules/@next/swc-win32-arm64-msvc": {
- "version": "15.2.4",
- "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-15.2.4.tgz",
- "integrity": "sha512-dW0i7eukvDxtIhCYkMrZNQfNicPDExt2jPb9AZPpL7cfyUo7QSNl1DjsHjmmKp6qNAqUESyT8YFl/Aw91cNJJg==",
+ "version": "15.5.2",
+ "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-15.5.2.tgz",
+ "integrity": "sha512-sMPyTvRcNKXseNQ/7qRfVRLa0VhR0esmQ29DD6pqvG71+JdVnESJaHPA8t7bc67KD5spP3+DOCNLhqlEI2ZgQg==",
"cpu": [
"arm64"
],
@@ -1756,9 +1813,9 @@
}
},
"node_modules/@next/swc-win32-x64-msvc": {
- "version": "15.2.4",
- "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-15.2.4.tgz",
- "integrity": "sha512-SbnWkJmkS7Xl3kre8SdMF6F/XDh1DTFEhp0jRTj/uB8iPKoU2bb2NDfcu+iifv1+mxQEd1g2vvSxcZbXSKyWiQ==",
+ "version": "15.5.2",
+ "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-15.5.2.tgz",
+ "integrity": "sha512-W5VvyZHnxG/2ukhZF/9Ikdra5fdNftxI6ybeVKYvBPDtyx7x4jPPSNduUkfH5fo3zG0JQ0bPxgy41af2JX5D4Q==",
"cpu": [
"x64"
],
@@ -3043,12 +3100,6 @@
"dev": true,
"license": "MIT"
},
- "node_modules/@swc/counter": {
- "version": "0.1.3",
- "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz",
- "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==",
- "license": "Apache-2.0"
- },
"node_modules/@swc/helpers": {
"version": "0.5.15",
"resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.15.tgz",
@@ -4041,17 +4092,6 @@
"node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
}
},
- "node_modules/busboy": {
- "version": "1.6.0",
- "resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz",
- "integrity": "sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==",
- "dependencies": {
- "streamsearch": "^1.1.0"
- },
- "engines": {
- "node": ">=10.16.0"
- }
- },
"node_modules/cac": {
"version": "6.7.14",
"resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz",
@@ -4082,6 +4122,20 @@
"url": "https://github.com/sponsors/ljharb"
}
},
+ "node_modules/call-bind-apply-helpers": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
+ "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/callsites": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
@@ -4646,9 +4700,9 @@
}
},
"node_modules/detect-libc": {
- "version": "2.0.3",
- "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.3.tgz",
- "integrity": "sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw==",
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.4.tgz",
+ "integrity": "sha512-3UDv+G9CsCKO1WKMGw9fwq/SWJYbI0c5Y7LU1AXYoDdbhE2AHQ6N6Nb34sG8Fj7T5APy8qXDCKuuIHd1BR0tVA==",
"license": "Apache-2.0",
"engines": {
"node": ">=8"
@@ -4694,6 +4748,21 @@
"dev": true,
"license": "MIT"
},
+ "node_modules/dunder-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
+ "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "gopd": "^1.2.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/eastasianwidth": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz",
@@ -4804,14 +4873,11 @@
}
},
"node_modules/es-define-property": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz",
- "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==",
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
+ "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
"dev": true,
"license": "MIT",
- "dependencies": {
- "get-intrinsic": "^1.2.4"
- },
"engines": {
"node": ">= 0.4"
}
@@ -4860,9 +4926,9 @@
"license": "MIT"
},
"node_modules/es-object-atoms": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.0.0.tgz",
- "integrity": "sha512-MZ4iQ6JwHOBQjahnjwaC1ZtIBH+2ohjamzAO3oaHcXYup7qxjF2fixyH+Q71voWHeOkI2q/TnJao/KfXYIZWbw==",
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
+ "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -4873,15 +4939,16 @@
}
},
"node_modules/es-set-tostringtag": {
- "version": "2.0.3",
- "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.0.3.tgz",
- "integrity": "sha512-3T8uNMC3OQTHkFUsFq8r/BwAXLHvU/9O9mE0fBc/MY5iq/8H7ncvO947LmYA6ldWw9Uh8Yhf25zu6n7nML5QWQ==",
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
+ "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
"dev": true,
"license": "MIT",
"dependencies": {
- "get-intrinsic": "^1.2.4",
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.6",
"has-tostringtag": "^1.0.2",
- "hasown": "^2.0.1"
+ "hasown": "^2.0.2"
},
"engines": {
"node": ">= 0.4"
@@ -5728,14 +5795,16 @@
}
},
"node_modules/form-data": {
- "version": "4.0.1",
- "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.1.tgz",
- "integrity": "sha512-tzN8e4TX8+kkxGPK8D5u0FNmjPUjw3lwC9lSLxxoB/+GtsJG91CO8bSWy73APlgAZzZbXEYZJuxjkHH2w+Ezhw==",
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz",
+ "integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==",
"dev": true,
"license": "MIT",
"dependencies": {
"asynckit": "^0.4.0",
"combined-stream": "^1.0.8",
+ "es-set-tostringtag": "^2.1.0",
+ "hasown": "^2.0.2",
"mime-types": "^2.1.12"
},
"engines": {
@@ -5851,17 +5920,22 @@
}
},
"node_modules/get-intrinsic": {
- "version": "1.2.4",
- "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz",
- "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==",
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
+ "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
"dev": true,
"license": "MIT",
"dependencies": {
+ "call-bind-apply-helpers": "^1.0.2",
+ "es-define-property": "^1.0.1",
"es-errors": "^1.3.0",
+ "es-object-atoms": "^1.1.1",
"function-bind": "^1.1.2",
- "has-proto": "^1.0.1",
- "has-symbols": "^1.0.3",
- "hasown": "^2.0.0"
+ "get-proto": "^1.0.1",
+ "gopd": "^1.2.0",
+ "has-symbols": "^1.1.0",
+ "hasown": "^2.0.2",
+ "math-intrinsics": "^1.1.0"
},
"engines": {
"node": ">= 0.4"
@@ -5879,6 +5953,20 @@
"node": ">=6"
}
},
+ "node_modules/get-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
+ "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "dunder-proto": "^1.0.1",
+ "es-object-atoms": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/get-symbol-description": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.2.tgz",
@@ -5982,13 +6070,13 @@
"license": "MIT"
},
"node_modules/gopd": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz",
- "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==",
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
+ "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
"dev": true,
"license": "MIT",
- "dependencies": {
- "get-intrinsic": "^1.1.3"
+ "engines": {
+ "node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
@@ -6055,9 +6143,9 @@
}
},
"node_modules/has-symbols": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz",
- "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==",
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
+ "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
"dev": true,
"license": "MIT",
"engines": {
@@ -7037,6 +7125,16 @@
"@jridgewell/sourcemap-codec": "^1.5.0"
}
},
+ "node_modules/math-intrinsics": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
+ "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/merge2": {
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
@@ -7194,15 +7292,13 @@
"license": "MIT"
},
"node_modules/next": {
- "version": "15.2.4",
- "resolved": "https://registry.npmjs.org/next/-/next-15.2.4.tgz",
- "integrity": "sha512-VwL+LAaPSxEkd3lU2xWbgEOtrM8oedmyhBqaVNmgKB+GvZlCy9rgaEc+y2on0wv+l0oSFqLtYD6dcC1eAedUaQ==",
+ "version": "15.5.2",
+ "resolved": "https://registry.npmjs.org/next/-/next-15.5.2.tgz",
+ "integrity": "sha512-H8Otr7abj1glFhbGnvUt3gz++0AF1+QoCXEBmd/6aKbfdFwrn0LpA836Ed5+00va/7HQSDD+mOoVhn3tNy3e/Q==",
"license": "MIT",
"dependencies": {
- "@next/env": "15.2.4",
- "@swc/counter": "0.1.3",
+ "@next/env": "15.5.2",
"@swc/helpers": "0.5.15",
- "busboy": "1.6.0",
"caniuse-lite": "^1.0.30001579",
"postcss": "8.4.31",
"styled-jsx": "5.1.6"
@@ -7214,19 +7310,19 @@
"node": "^18.18.0 || ^19.8.0 || >= 20.0.0"
},
"optionalDependencies": {
- "@next/swc-darwin-arm64": "15.2.4",
- "@next/swc-darwin-x64": "15.2.4",
- "@next/swc-linux-arm64-gnu": "15.2.4",
- "@next/swc-linux-arm64-musl": "15.2.4",
- "@next/swc-linux-x64-gnu": "15.2.4",
- "@next/swc-linux-x64-musl": "15.2.4",
- "@next/swc-win32-arm64-msvc": "15.2.4",
- "@next/swc-win32-x64-msvc": "15.2.4",
- "sharp": "^0.33.5"
+ "@next/swc-darwin-arm64": "15.5.2",
+ "@next/swc-darwin-x64": "15.5.2",
+ "@next/swc-linux-arm64-gnu": "15.5.2",
+ "@next/swc-linux-arm64-musl": "15.5.2",
+ "@next/swc-linux-x64-gnu": "15.5.2",
+ "@next/swc-linux-x64-musl": "15.5.2",
+ "@next/swc-win32-arm64-msvc": "15.5.2",
+ "@next/swc-win32-x64-msvc": "15.5.2",
+ "sharp": "^0.34.3"
},
"peerDependencies": {
"@opentelemetry/api": "^1.1.0",
- "@playwright/test": "^1.41.2",
+ "@playwright/test": "^1.51.1",
"babel-plugin-react-compiler": "*",
"react": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0",
"react-dom": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0",
@@ -7257,6 +7353,367 @@
"react-dom": "^16.8 || ^17 || ^18"
}
},
+ "node_modules/next/node_modules/@img/sharp-darwin-arm64": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.34.3.tgz",
+ "integrity": "sha512-ryFMfvxxpQRsgZJqBd4wsttYQbCxsJksrv9Lw/v798JcQ8+w84mBWuXwl+TT0WJ/WrYOLaYpwQXi3sA9nTIaIg==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-darwin-arm64": "1.2.0"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-darwin-x64": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-darwin-x64/-/sharp-darwin-x64-0.34.3.tgz",
+ "integrity": "sha512-yHpJYynROAj12TA6qil58hmPmAwxKKC7reUqtGLzsOHfP7/rniNGTL8tjWX6L3CTV4+5P4ypcS7Pp+7OB+8ihA==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-darwin-x64": "1.2.0"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-libvips-darwin-arm64": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-arm64/-/sharp-libvips-darwin-arm64-1.2.0.tgz",
+ "integrity": "sha512-sBZmpwmxqwlqG9ueWFXtockhsxefaV6O84BMOrhtg/YqbTaRdqDE7hxraVE3y6gVM4eExmfzW4a8el9ArLeEiQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-libvips-darwin-x64": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-x64/-/sharp-libvips-darwin-x64-1.2.0.tgz",
+ "integrity": "sha512-M64XVuL94OgiNHa5/m2YvEQI5q2cl9d/wk0qFTDVXcYzi43lxuiFTftMR1tOnFQovVXNZJ5TURSDK2pNe9Yzqg==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-libvips-linux-arm": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm/-/sharp-libvips-linux-arm-1.2.0.tgz",
+ "integrity": "sha512-mWd2uWvDtL/nvIzThLq3fr2nnGfyr/XMXlq8ZJ9WMR6PXijHlC3ksp0IpuhK6bougvQrchUAfzRLnbsen0Cqvw==",
+ "cpu": [
+ "arm"
+ ],
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-libvips-linux-arm64": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm64/-/sharp-libvips-linux-arm64-1.2.0.tgz",
+ "integrity": "sha512-RXwd0CgG+uPRX5YYrkzKyalt2OJYRiJQ8ED/fi1tq9WQW2jsQIn0tqrlR5l5dr/rjqq6AHAxURhj2DVjyQWSOA==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-libvips-linux-s390x": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-s390x/-/sharp-libvips-linux-s390x-1.2.0.tgz",
+ "integrity": "sha512-eMKfzDxLGT8mnmPJTNMcjfO33fLiTDsrMlUVcp6b96ETbnJmd4uvZxVJSKPQfS+odwfVaGifhsB07J1LynFehw==",
+ "cpu": [
+ "s390x"
+ ],
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-libvips-linux-x64": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-x64/-/sharp-libvips-linux-x64-1.2.0.tgz",
+ "integrity": "sha512-ZW3FPWIc7K1sH9E3nxIGB3y3dZkpJlMnkk7z5tu1nSkBoCgw2nSRTFHI5pB/3CQaJM0pdzMF3paf9ckKMSE9Tg==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-libvips-linuxmusl-arm64": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-arm64/-/sharp-libvips-linuxmusl-arm64-1.2.0.tgz",
+ "integrity": "sha512-UG+LqQJbf5VJ8NWJ5Z3tdIe/HXjuIdo4JeVNADXBFuG7z9zjoegpzzGIyV5zQKi4zaJjnAd2+g2nna8TZvuW9Q==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-libvips-linuxmusl-x64": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-x64/-/sharp-libvips-linuxmusl-x64-1.2.0.tgz",
+ "integrity": "sha512-SRYOLR7CXPgNze8akZwjoGBoN1ThNZoqpOgfnOxmWsklTGVfJiGJoC/Lod7aNMGA1jSsKWM1+HRX43OP6p9+6Q==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-linux-arm": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm/-/sharp-linux-arm-0.34.3.tgz",
+ "integrity": "sha512-oBK9l+h6KBN0i3dC8rYntLiVfW8D8wH+NPNT3O/WBHeW0OQWCjfWksLUaPidsrDKpJgXp3G3/hkmhptAW0I3+A==",
+ "cpu": [
+ "arm"
+ ],
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linux-arm": "1.2.0"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-linux-arm64": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm64/-/sharp-linux-arm64-0.34.3.tgz",
+ "integrity": "sha512-QdrKe3EvQrqwkDrtuTIjI0bu6YEJHTgEeqdzI3uWJOH6G1O8Nl1iEeVYRGdj1h5I21CqxSvQp1Yv7xeU3ZewbA==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linux-arm64": "1.2.0"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-linux-s390x": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linux-s390x/-/sharp-linux-s390x-0.34.3.tgz",
+ "integrity": "sha512-3gahT+A6c4cdc2edhsLHmIOXMb17ltffJlxR0aC2VPZfwKoTGZec6u5GrFgdR7ciJSsHT27BD3TIuGcuRT0KmQ==",
+ "cpu": [
+ "s390x"
+ ],
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linux-s390x": "1.2.0"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-linux-x64": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linux-x64/-/sharp-linux-x64-0.34.3.tgz",
+ "integrity": "sha512-8kYso8d806ypnSq3/Ly0QEw90V5ZoHh10yH0HnrzOCr6DKAPI6QVHvwleqMkVQ0m+fc7EH8ah0BB0QPuWY6zJQ==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linux-x64": "1.2.0"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-linuxmusl-arm64": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-arm64/-/sharp-linuxmusl-arm64-0.34.3.tgz",
+ "integrity": "sha512-vAjbHDlr4izEiXM1OTggpCcPg9tn4YriK5vAjowJsHwdBIdx0fYRsURkxLG2RLm9gyBq66gwtWI8Gx0/ov+JKQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linuxmusl-arm64": "1.2.0"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-linuxmusl-x64": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-x64/-/sharp-linuxmusl-x64-0.34.3.tgz",
+ "integrity": "sha512-gCWUn9547K5bwvOn9l5XGAEjVTTRji4aPTqLzGXHvIr6bIDZKNTA34seMPgM0WmSf+RYBH411VavCejp3PkOeQ==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linuxmusl-x64": "1.2.0"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-wasm32": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-wasm32/-/sharp-wasm32-0.34.3.tgz",
+ "integrity": "sha512-+CyRcpagHMGteySaWos8IbnXcHgfDn7pO2fiC2slJxvNq9gDipYBN42/RagzctVRKgxATmfqOSulgZv5e1RdMg==",
+ "cpu": [
+ "wasm32"
+ ],
+ "license": "Apache-2.0 AND LGPL-3.0-or-later AND MIT",
+ "optional": true,
+ "dependencies": {
+ "@emnapi/runtime": "^1.4.4"
+ },
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-win32-ia32": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-win32-ia32/-/sharp-win32-ia32-0.34.3.tgz",
+ "integrity": "sha512-xuCdhH44WxuXgOM714hn4amodJMZl3OEvf0GVTm0BEyMeA2to+8HEdRPShH0SLYptJY1uBw+SCFP9WVQi1Q/cw==",
+ "cpu": [
+ "ia32"
+ ],
+ "license": "Apache-2.0 AND LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-win32-x64": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-win32-x64/-/sharp-win32-x64-0.34.3.tgz",
+ "integrity": "sha512-OWwz05d++TxzLEv4VnsTz5CmZ6mI6S05sfQGEMrNrQcOEERbX46332IvE7pO/EUiw7jUrrS40z/M7kPyjfl04g==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "Apache-2.0 AND LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
"node_modules/next/node_modules/postcss": {
"version": "8.4.31",
"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz",
@@ -7285,6 +7742,49 @@
"node": "^10 || ^12 || >=14"
}
},
+ "node_modules/next/node_modules/sharp": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/sharp/-/sharp-0.34.3.tgz",
+ "integrity": "sha512-eX2IQ6nFohW4DbvHIOLRB3MHFpYqaqvXd3Tp5e/T/dSH83fxaNJQRvDMhASmkNTsNTVF2/OOopzRCt7xokgPfg==",
+ "hasInstallScript": true,
+ "license": "Apache-2.0",
+ "optional": true,
+ "dependencies": {
+ "color": "^4.2.3",
+ "detect-libc": "^2.0.4",
+ "semver": "^7.7.2"
+ },
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-darwin-arm64": "0.34.3",
+ "@img/sharp-darwin-x64": "0.34.3",
+ "@img/sharp-libvips-darwin-arm64": "1.2.0",
+ "@img/sharp-libvips-darwin-x64": "1.2.0",
+ "@img/sharp-libvips-linux-arm": "1.2.0",
+ "@img/sharp-libvips-linux-arm64": "1.2.0",
+ "@img/sharp-libvips-linux-ppc64": "1.2.0",
+ "@img/sharp-libvips-linux-s390x": "1.2.0",
+ "@img/sharp-libvips-linux-x64": "1.2.0",
+ "@img/sharp-libvips-linuxmusl-arm64": "1.2.0",
+ "@img/sharp-libvips-linuxmusl-x64": "1.2.0",
+ "@img/sharp-linux-arm": "0.34.3",
+ "@img/sharp-linux-arm64": "0.34.3",
+ "@img/sharp-linux-ppc64": "0.34.3",
+ "@img/sharp-linux-s390x": "0.34.3",
+ "@img/sharp-linux-x64": "0.34.3",
+ "@img/sharp-linuxmusl-arm64": "0.34.3",
+ "@img/sharp-linuxmusl-x64": "0.34.3",
+ "@img/sharp-wasm32": "0.34.3",
+ "@img/sharp-win32-arm64": "0.34.3",
+ "@img/sharp-win32-ia32": "0.34.3",
+ "@img/sharp-win32-x64": "0.34.3"
+ }
+ },
"node_modules/node-releases": {
"version": "2.0.18",
"resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.18.tgz",
@@ -8573,9 +9073,9 @@
"license": "MIT"
},
"node_modules/semver": {
- "version": "7.6.3",
- "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz",
- "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==",
+ "version": "7.7.2",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz",
+ "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==",
"license": "ISC",
"bin": {
"semver": "bin/semver.js"
@@ -8790,14 +9290,6 @@
"dev": true,
"license": "MIT"
},
- "node_modules/streamsearch": {
- "version": "1.1.0",
- "resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz",
- "integrity": "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==",
- "engines": {
- "node": ">=10.0.0"
- }
- },
"node_modules/string-width": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz",
@@ -9710,9 +10202,9 @@
"license": "MIT"
},
"node_modules/vite": {
- "version": "6.3.4",
- "resolved": "https://registry.npmjs.org/vite/-/vite-6.3.4.tgz",
- "integrity": "sha512-BiReIiMS2fyFqbqNT/Qqt4CVITDU9M9vE+DKcVAsB+ZV0wvTKd+3hMbkpxz1b+NmEDMegpVbisKiAZOnvO92Sw==",
+ "version": "6.4.1",
+ "resolved": "https://registry.npmjs.org/vite/-/vite-6.4.1.tgz",
+ "integrity": "sha512-+Oxm7q9hDoLMyJOYfUYBuHQo+dkAloi33apOPP56pzj+vsdJDzr+j1NISE5pyaAuKL4A3UD34qd0lx5+kfKp2g==",
"dev": true,
"license": "MIT",
"dependencies": {
diff --git a/frontend/package.json b/frontend/package.json
index 50379a320..27421f713 100644
--- a/frontend/package.json
+++ b/frontend/package.json
@@ -44,7 +44,7 @@
"fuse.js": "^7.1.0",
"lucide-react": "^0.453.0",
"mini-svg-data-uri": "^1.4.4",
- "next": "15.2.4",
+ "next": "15.5.2",
"next-themes": "^0.3.0",
"nuqs": "^2.4.1",
"pocketbase": "^0.21.5",
diff --git a/frontend/public/json/add-iptag.json b/frontend/public/json/add-iptag.json
deleted file mode 100644
index da84c8509..000000000
--- a/frontend/public/json/add-iptag.json
+++ /dev/null
@@ -1,49 +0,0 @@
-{
- "name": "Proxmox VE LXC IP-Tag",
- "slug": "add-lxc-iptag",
- "categories": [
- 1
- ],
- "date_created": "2025-04-02",
- "type": "addon",
- "updateable": false,
- "privileged": false,
- "interface_port": null,
- "documentation": null,
- "website": null,
- "logo": "https://raw.githubusercontent.com/selfhst/icons/refs/heads/main/svg/proxmox.svg",
- "config_path": "",
- "description": "This script automatically adds IP address as tags to LXC containers using a Systemd service. The service also updates the tags if a LXC IP address is changed.",
- "install_methods": [
- {
- "type": "default",
- "script": "tools/addon/add-iptag.sh",
- "resources": {
- "cpu": null,
- "ram": null,
- "hdd": null,
- "os": null,
- "version": null
- }
- }
- ],
- "default_credentials": {
- "username": null,
- "password": null
- },
- "notes": [
- {
- "text": "Execute within the Proxmox shell",
- "type": "info"
- },
- {
- "text": "Configuration: `nano /opt/iptag/iptag.conf`. iptag.service must be restarted after change.",
- "type": "info"
- },
- {
- "text": "The Proxmox Node must contain ipcalc and net-tools. `apt-get install -y ipcalc net-tools`",
- "type": "warning"
- }
- ]
-}
-
diff --git a/frontend/public/json/cloudreve.json b/frontend/public/json/cloudreve.json
deleted file mode 100644
index ff754a105..000000000
--- a/frontend/public/json/cloudreve.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{
- "name": "LibreNMS",
- "slug": "librenms",
- "categories": [
- 9
- ],
- "date_created": "2025-03-24",
- "type": "ct",
- "updateable": false,
- "privileged": false,
- "interface_port": 80,
- "documentation": "https://docs.librenms.org/",
- "website": "https://librenms.org/",
- "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/cloudreve.webp",
- "config_path": "/opt/librenms/config.php and /opt/librenms/.env",
- "description": "LibreNMS is an open-source, community-driven network monitoring system that provides automatic discovery, alerting, and performance tracking for network devices. It supports a wide range of hardware and integrates with various notification and logging platforms.",
- "install_methods": [
- {
- "type": "default",
- "script": "ct/librenms.sh",
- "resources": {
- "cpu": 2,
- "ram": 2048,
- "hdd": 4,
- "os": "Debian",
- "version": "12"
- }
- }
- ],
- "default_credentials": {
- "username": "admin",
- "password": "admin"
- },
- "notes": []
-}
diff --git a/frontend/public/json/dependency-check.json b/frontend/public/json/dependency-check.json
new file mode 100644
index 000000000..cb0612bd8
--- /dev/null
+++ b/frontend/public/json/dependency-check.json
@@ -0,0 +1,48 @@
+{
+ "name": "PVE Startup Dependency Check",
+ "slug": "dependency-check",
+ "categories": [
+ 1
+ ],
+ "date_created": "2025-08-12",
+ "type": "pve",
+ "updateable": false,
+ "privileged": false,
+ "interface_port": null,
+ "documentation": null,
+ "website": null,
+ "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/proxmox.webp",
+ "config_path": "/etc/default/pve-auto-hook",
+ "description": "This script checks for the presence of required dependencies before starting a VM or LXC container in Proxmox. It ensures that all referenced storages are available and, additionally, supports the usage of tags to check for specific dependencies. If any required dependency is missing, the VM or container will not start until the issue is resolved. This script is designed to be used as a Proxmox hookscript, which can be applied to both QEMU VMs and LXC containers.",
+ "install_methods": [
+ {
+ "type": "default",
+ "script": "tools/pve/dependency-check.sh",
+ "resources": {
+ "cpu": null,
+ "ram": null,
+ "hdd": null,
+ "os": null,
+ "version": null
+ }
+ }
+ ],
+ "default_credentials": {
+ "username": null,
+ "password": null
+ },
+ "notes": [
+ {
+ "text": "Execute within the Proxmox shell",
+ "type": "info"
+ },
+ {
+ "text": "To wait until a certain host is available, tag the VM or container with `dep_ping_` where `` is the name or IP of the host to ping. The script will wait until the host is reachable before proceeding with the startup.",
+ "type": "info"
+ },
+ {
+ "text": "To wait until a certain TCP port is open, tag the VM or container with `dep_tcp__` where `` is the name or IP of the host and `` is the TCP port number. The script will wait until the port is open before proceeding with the startup.",
+ "type": "info"
+ }
+ ]
+}
diff --git a/frontend/public/json/donetick.json b/frontend/public/json/donetick.json
new file mode 100644
index 000000000..6cb64737a
--- /dev/null
+++ b/frontend/public/json/donetick.json
@@ -0,0 +1,35 @@
+{
+ "name": "Donetick",
+ "slug": "donetick",
+ "categories": [
+ 19
+ ],
+ "date_created": "2025-11-01",
+ "type": "ct",
+ "updateable": true,
+ "privileged": false,
+ "interface_port": 2021,
+ "documentation": "https://docs.donetick.com/getting-started/",
+ "config_path": "/opt/donetick/selfhosted.yml",
+ "website": "https://donetick.com",
+ "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/donetick.webp",
+ "description": "Donetick an open-source, user-friendly app for managing tasks and chores, featuring customizable options to help you and others stay organized",
+ "install_methods": [
+ {
+ "type": "default",
+ "script": "ct/donetick.sh",
+ "resources": {
+ "cpu": 1,
+ "ram": 512,
+ "hdd": 2,
+ "os": "Debian",
+ "version": "13"
+ }
+ }
+ ],
+ "default_credentials": {
+ "username": null,
+ "password": null
+ },
+ "notes": []
+}
diff --git a/frontend/public/json/gitea-mirror.json b/frontend/public/json/gitea-mirror.json
deleted file mode 100644
index 5be5cc905..000000000
--- a/frontend/public/json/gitea-mirror.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{
- "name": "Gitea-Mirror",
- "slug": "gitea-mirror",
- "categories": [
- 7
- ],
- "date_created": "2025-06-05",
- "type": "ct",
- "updateable": true,
- "privileged": false,
- "interface_port": 4321,
- "documentation": "https://github.com/arunavo4/gitea-mirror/",
- "config_path": "/etc/systemd/system/gitea-mirror.service",
- "website": "https://github.com/arunavo4/gitea-mirror/",
- "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/gitea-mirror.webp",
- "description": "Gitea Mirror auto-syncs GitHub repos to your self-hosted Gitea, with a sleek Web UI and easy Docker deployment. ",
- "install_methods": [
- {
- "type": "default",
- "script": "ct/gitea-mirror.sh",
- "resources": {
- "cpu": 2,
- "ram": 2048,
- "hdd": 6,
- "os": "Debian",
- "version": "12"
- }
- }
- ],
- "default_credentials": {
- "username": null,
- "password": null
- },
- "notes": []
-}
diff --git a/frontend/public/json/hanko.json b/frontend/public/json/hanko.json
deleted file mode 100644
index d8628ad87..000000000
--- a/frontend/public/json/hanko.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{
- "name": "Hanko",
- "slug": "hanko",
- "categories": [
- 21
- ],
- "date_created": "2025-07-02",
- "type": "ct",
- "updateable": true,
- "privileged": false,
- "config_path": "/opt/hanko/.env",
- "interface_port": 3000,
- "documentation": "https://docs.hanko.io/",
- "website": "https://hanko.io/",
- "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/svg/hanko.svg",
- "description": "Hanko is an open-source authentication solution providing passkey-first login with support for WebAuthn/FIDO2, biometrics and modern identity flows. Easy to self-host and integrate via API or widget.",
- "install_methods": [
- {
- "type": "default",
- "script": "ct/hanko.sh",
- "resources": {
- "cpu": 1,
- "ram": 1024,
- "hdd": 2,
- "os": "Debian",
- "version": "12"
- }
- }
- ],
- "default_credentials": {
- "username": null,
- "password": null
- },
- "notes": []
-}
diff --git a/frontend/public/json/healthchecks.json b/frontend/public/json/healthchecks.json
deleted file mode 100644
index ac5f3fe20..000000000
--- a/frontend/public/json/healthchecks.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{
- "name": "Healthchecks",
- "slug": "healthchecks",
- "categories": [
- 9
- ],
- "date_created": "2025-07-02",
- "type": "ct",
- "updateable": true,
- "privileged": false,
- "config_path": "/opt/healthchecks/.env",
- "interface_port": 3000,
- "documentation": "https://healthchecks.io/",
- "website": "https://healthchecks.io/",
- "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/svg/healthchecks.svg",
- "description": "Healthchecks is an open-source self-hosted application.",
- "install_methods": [
- {
- "type": "default",
- "script": "ct/healthchecks.sh",
- "resources": {
- "cpu": 1,
- "ram": 1024,
- "hdd": 2,
- "os": "Debian",
- "version": "12"
- }
- }
- ],
- "default_credentials": {
- "username": null,
- "password": null
- },
- "notes": []
-}
\ No newline at end of file
diff --git a/frontend/public/json/infisical.json b/frontend/public/json/infisical.json
new file mode 100644
index 000000000..8bb58ba87
--- /dev/null
+++ b/frontend/public/json/infisical.json
@@ -0,0 +1,35 @@
+{
+ "name": "Infisical",
+ "slug": "infisical",
+ "categories": [
+ 6
+ ],
+ "date_created": "2025-09-04",
+ "type": "ct",
+ "updateable": true,
+ "privileged": false,
+ "interface_port": 8080,
+ "documentation": "https://infisical.com/docs/documentation/getting-started/overview",
+ "config_path": "/etc/infisical/infisical.rb",
+ "website": "https://infisical.com/",
+ "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/infisical.webp",
+ "description": "Secrets, certificates, and access management on autopilot. All-in-one platform to securely manage application secrets, certificates, SSH keys, and configurations across your team and infrastructure.",
+ "install_methods": [
+ {
+ "type": "default",
+ "script": "ct/infisical.sh",
+ "resources": {
+ "cpu": 2,
+ "ram": 2048,
+ "hdd": 6,
+ "os": "Debian",
+ "version": "13"
+ }
+ }
+ ],
+ "default_credentials": {
+ "username": null,
+ "password": null
+ },
+ "notes": []
+}
diff --git a/frontend/public/json/jeedom.json b/frontend/public/json/jeedom.json
deleted file mode 100644
index 35fe87b09..000000000
--- a/frontend/public/json/jeedom.json
+++ /dev/null
@@ -1,51 +0,0 @@
-{
- "name": "Jeedom",
- "slug": "jeedom",
- "categories": [
- 16
- ],
- "date_created": "2025-03-06",
- "type": "ct",
- "updateable": false,
- "privileged": false,
- "interface_port": 80,
- "documentation": "https://doc.jeedom.com",
- "config_path": "",
- "website": "https://jeedom.com/",
- "logo": "https://jeedom.com/_next/image?url=%2Fassets%2Fimg%2Flogo.png&w=256&q=75",
- "description": "From individual homes to IoT infrastructures\n\nJeedom: the tailor-made open source solution",
- "install_methods": [
- {
- "type": "default",
- "script": "ct/jeedom.sh",
- "resources": {
- "cpu": 2,
- "ram": 2048,
- "hdd": 16,
- "os": "Debian",
- "version": "11"
- }
- },
- {
- "type": "default",
- "script": "ct/jeedom.sh",
- "resources": {
- "cpu": 2,
- "ram": 2048,
- "hdd": 16,
- "os": "Debian",
- "version": "12"
- }
- }
- ],
- "default_credentials": {
- "username": "admin",
- "password": "admin"
- },
- "notes": [
- {
- "text": "Only OS packages are updateable. To update Jeedom, please use the web interface.",
- "type": "info"
- }
- ]
-}
diff --git a/frontend/public/json/librespeed.json b/frontend/public/json/librespeed.json
deleted file mode 100644
index 713b2fe81..000000000
--- a/frontend/public/json/librespeed.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{
- "name": "Librespeed",
- "slug": "librespeed",
- "categories": [
- 4
- ],
- "date_created": "2025-04-26",
- "type": "ct",
- "updateable": true,
- "privileged": false,
- "interface_port": 80,
- "documentation": "https://github.com/librespeed/speedtest/blob/master/doc.md",
- "config_path": "",
- "website": "https://librespeed.org",
- "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/librespeed.webp",
- "description": "No Flash, No Java, No Websocket, No Bullshit. This is a very lightweight speed test implemented in Javascript, using XMLHttpRequest and Web Workers.",
- "install_methods": [
- {
- "type": "default",
- "script": "ct/librespeed.sh",
- "resources": {
- "cpu": 1,
- "ram": 512,
- "hdd": 4,
- "os": "Debian",
- "version": "12"
- }
- }
- ],
- "default_credentials": {
- "username": "root",
- "password": null
- },
- "notes": []
-}
diff --git a/frontend/public/json/linkstack.json b/frontend/public/json/linkstack.json
deleted file mode 100644
index 7c7063451..000000000
--- a/frontend/public/json/linkstack.json
+++ /dev/null
@@ -1,44 +0,0 @@
-{
- "name": "LinkStack",
- "slug": "linkstack",
- "categories": [
- 9
- ],
- "date_created": "2025-05-21",
- "type": "ct",
- "updateable": false,
- "privileged": false,
- "config_path": "/var/www/html/linkstack/linkstack/.env",
- "interface_port": 80,
- "documentation": "https://docs.linkstack.org/",
- "website": "https://linkstack.org/",
- "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/linkstack.webp",
- "description": "LinkStack is an open-source, self-hosted alternative to Linktree, allowing users to create a customizable profile page to share multiple links, hosted on their own server.",
- "install_methods": [
- {
- "type": "default",
- "script": "ct/linkstack.sh",
- "resources": {
- "cpu": 1,
- "ram": 2048,
- "hdd": 10,
- "os": "Debian",
- "version": "12"
- }
- }
- ],
- "default_credentials": {
- "username": null,
- "password": null
- },
- "notes": [
- {
- "text": "LinkStack can be updated via the user interface.",
- "type": "info"
- },
- {
- "text": "Complete setup via the web interface at http:///. Check installation logs: `cat ~/linkstack-install.log`",
- "type": "info"
- }
- ]
-}
diff --git a/frontend/public/json/manyfold.json b/frontend/public/json/manyfold.json
deleted file mode 100644
index bb232666c..000000000
--- a/frontend/public/json/manyfold.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{
- "name": "Manyfold",
- "slug": "manyfold",
- "categories": [
- 24
- ],
- "date_created": "2025-03-18",
- "type": "ct",
- "updateable": false,
- "privileged": false,
- "interface_port": 80,
- "documentation": "https://manyfold.app/sysadmin/",
- "website": "https://manyfold.app/",
- "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/manyfold.webp",
- "config_path": "",
- "description": "Manyfold is an open source, self-hosted web application for managing a collection of 3d models, particularly focused on 3d printing.",
- "install_methods": [
- {
- "type": "default",
- "script": "ct/manyfold.sh",
- "resources": {
- "cpu": 2,
- "ram": 1024,
- "hdd": 15,
- "os": "debian",
- "version": "12"
- }
- }
- ],
- "default_credentials": {
- "username": null,
- "password": null
- },
- "notes": []
-}
diff --git a/frontend/public/json/maybefinance.json b/frontend/public/json/maybefinance.json
deleted file mode 100644
index 9b35c3721..000000000
--- a/frontend/public/json/maybefinance.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{
- "name": "Maybe Finance",
- "slug": "maybefinance",
- "categories": [
- 23
- ],
- "date_created": "2025-06-08",
- "type": "ct",
- "updateable": true,
- "privileged": false,
- "interface_port": 3000,
- "documentation": "https://github.com/maybe-finance/maybe/",
- "website": "https://maybefinance.com/",
- "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/maybe.webp",
- "config_path": "/opt/maybe/.env",
- "description": "Maybe is an all-in-one personal finance platform. Track, optimize, grow, and manage your money through every stage of life.",
- "install_methods": [
- {
- "type": "default",
- "script": "ct/maybefinance.sh",
- "resources": {
- "cpu": 2,
- "ram": 2048,
- "hdd": 4,
- "os": "debian",
- "version": "12"
- }
- }
- ],
- "default_credentials": {
- "username": null,
- "password": null
- },
- "notes": []
-}
diff --git a/frontend/public/json/miniflux.json b/frontend/public/json/miniflux.json
new file mode 100644
index 000000000..75a6f2551
--- /dev/null
+++ b/frontend/public/json/miniflux.json
@@ -0,0 +1,40 @@
+{
+ "name": "Miniflux",
+ "slug": "miniflux",
+ "categories": [
+ 13
+ ],
+ "date_created": "2025-09-24",
+ "type": "ct",
+ "updateable": true,
+ "privileged": false,
+ "config_path": "/etc/miniflux.conf",
+ "interface_port": 8080,
+ "documentation": "https://miniflux.app/docs/index.html",
+ "website": "https://miniflux.app/",
+ "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/miniflux-light.webp",
+ "description": "Miniflux is a minimalist and opinionated feed reader.",
+ "install_methods": [
+ {
+ "type": "default",
+ "script": "ct/miniflux.sh",
+ "resources": {
+ "cpu": 2,
+ "ram": 2048,
+ "hdd": 8,
+ "os": "Debian",
+ "version": "13"
+ }
+ }
+ ],
+ "default_credentials": {
+ "username": "admin",
+ "password": "randomly generated during installation process"
+ },
+ "notes": [
+ {
+ "text": "Admin password available as `ADMIN_PASSWORD` in `~/miniflux.creds`",
+ "type": "info"
+ }
+ ]
+}
diff --git a/frontend/public/json/notesnook.json b/frontend/public/json/notesnook.json
deleted file mode 100644
index 335520b05..000000000
--- a/frontend/public/json/notesnook.json
+++ /dev/null
@@ -1,40 +0,0 @@
-{
- "name": "Notesnook",
- "slug": "notesnook",
- "categories": [
- 12
- ],
- "date_created": "2025-05-27",
- "type": "ct",
- "updateable": true,
- "privileged": false,
- "interface_port": 443,
- "documentation": null,
- "config_path": "/",
- "website": "https://notesnook.com/",
- "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/notesnook.webp",
- "description": "Notesnook is a free (as in speech) & open-source note-taking app focused on user privacy & ease of use. To ensure zero knowledge principles, Notesnook encrypts everything on your device using XChaCha20-Poly1305 & Argon2.",
- "install_methods": [
- {
- "type": "default",
- "script": "ct/notesnook.sh",
- "resources": {
- "cpu": 2,
- "ram": 3072,
- "hdd": 10,
- "os": "Debian",
- "version": "12"
- }
- }
- ],
- "default_credentials": {
- "username": null,
- "password": null
- },
- "notes": [
- {
- "text": "Before doing update of the app, please make a backup in the application Web UI. You will need to restore this backup after update finishes!",
- "type": "warning"
- }
- ]
-}
diff --git a/frontend/public/json/openwebui.json b/frontend/public/json/openwebui.json
new file mode 100644
index 000000000..a7c5891fb
--- /dev/null
+++ b/frontend/public/json/openwebui.json
@@ -0,0 +1,44 @@
+{
+ "name": "Open WebUI",
+ "slug": "openwebui",
+ "categories": [
+ 20
+ ],
+ "date_created": "2024-10-24",
+ "type": "ct",
+ "updateable": true,
+ "privileged": false,
+ "interface_port": 8080,
+ "documentation": "https://docs.openwebui.com/",
+ "website": "https://openwebui.com/",
+ "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/open-webui.webp",
+ "config_path": "/root/.env",
+ "description": "OpenWebUI is a self-hosted, web-based interface that allows you to run AI models entirely offline. It integrates with various LLM runners, such as OpenAI and Ollama, and supports features like markdown and LaTeX rendering, model management, and voice/video calls. It also offers multilingual support and the ability to generate images using APIs like DALL-E or ComfyUI",
+ "install_methods": [
+ {
+ "type": "default",
+ "script": "ct/openwebui.sh",
+ "resources": {
+ "cpu": 4,
+ "ram": 8192,
+ "hdd": 25,
+ "os": "debian",
+ "version": "13"
+ }
+ }
+ ],
+ "default_credentials": {
+ "username": null,
+ "password": null
+ },
+ "notes": [
+ {
+ "text": "Script contains optional installation of Ollama.",
+ "type": "info"
+ },
+ {
+ "text": "Initial run of the application/container can take some time, depending on your host speed, as the application is installed/updated at runtime. Please be patient!",
+ "type": "warning"
+ }
+ ]
+}
diff --git a/frontend/public/json/pangolin.json b/frontend/public/json/pangolin.json
new file mode 100644
index 000000000..e197b36b5
--- /dev/null
+++ b/frontend/public/json/pangolin.json
@@ -0,0 +1,48 @@
+{
+ "name": "Pangolin",
+ "slug": "pangolin",
+ "categories": [
+ 21
+ ],
+ "date_created": "2025-09-04",
+ "type": "ct",
+ "updateable": true,
+ "privileged": false,
+ "interface_port": 3002,
+ "documentation": "https://docs.pangolin.net/",
+ "config_path": "/opt/pangolin/config/config.yml",
+ "website": "https://pangolin.net/",
+ "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/pangolin.webp",
+ "description": "Pangolin securely routes traffic over WireGuard tunnels to any private network. It works like a reverse proxy that spans multiple networks — no public IPs, DNS setup, or certificates required.",
+ "install_methods": [
+ {
+ "type": "default",
+ "script": "ct/pangolin.sh",
+ "resources": {
+ "cpu": 2,
+ "ram": 4096,
+ "hdd": 5,
+ "os": "Debian",
+ "version": "13"
+ }
+ }
+ ],
+ "default_credentials": {
+ "username": null,
+ "password": null
+ },
+ "notes": [
+ {
+ "text": "Type `journalctl -u pangolin | grep -oP 'Token:\\s*\\K\\w+'` into LXC console to get admin token which you will use to create admin account.",
+ "type": "info"
+ },
+ {
+ "text": "LXC has 4GB of RAM set initially for the build stage. After installation finishes, you can decrease the RAM allocated to 1024MB or 512MB even.",
+ "type": "info"
+ },
+ {
+ "text": "Make sure you edit `/opt/pangolin/config/config.yml` and change it to match your needs",
+ "type": "warning"
+ }
+ ]
+}
diff --git a/frontend/public/json/phpmyadmin.json b/frontend/public/json/phpmyadmin.json
deleted file mode 100644
index 6639b1c1e..000000000
--- a/frontend/public/json/phpmyadmin.json
+++ /dev/null
@@ -1,40 +0,0 @@
-{
- "name": "PhpMyAdmin",
- "slug": "phpmyadmin",
- "categories": [
- 8
- ],
- "date_created": "2025-04-29",
- "type": "addon",
- "updateable": true,
- "privileged": false,
- "interface_port": null,
- "documentation": "https://www.phpmyadmin.net/docs/",
- "config_path": "Debian/Ubuntu: /var/www/html/phpMyAdmin | Alpine: /usr/share/phpmyadmin",
- "website": "https://www.phpmyadmin.net/",
- "logo": "https://raw.githubusercontent.com/selfhst/icons/refs/heads/main/webp/phpmyadmin-light.webp",
- "description": "phpMyAdmin is a free software tool written in PHP, intended to handle the administration of MySQL over the Web. phpMyAdmin supports a wide range of operations on MySQL and MariaDB. Frequently used operations (managing databases, tables, columns, relations, indexes, users, permissions, etc) can be performed via the user interface, while you still have the ability to directly execute any SQL statement.",
- "install_methods": [
- {
- "type": "default",
- "script": "tools/addon/phpmyadmin.sh",
- "resources": {
- "cpu": null,
- "ram": null,
- "hdd": null,
- "os": null,
- "version": null
- }
- }
- ],
- "default_credentials": {
- "username": null,
- "password": null
- },
- "notes": [
- {
- "text": "Execute within an existing LXC Console",
- "type": "warning"
- }
- ]
-}
diff --git a/frontend/public/json/postiz.json b/frontend/public/json/postiz.json
deleted file mode 100644
index dfcb0f045..000000000
--- a/frontend/public/json/postiz.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{
- "name": "Postiz",
- "slug": "postiz",
- "categories": [
- 20
- ],
- "date_created": "2025-07-02",
- "type": "ct",
- "updateable": true,
- "privileged": false,
- "config_path": "/opt/postiz/.env",
- "interface_port": 3000,
- "documentation": "https://postiz.io/",
- "website": "https://postiz.io/",
- "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/svg/postiz.svg",
- "description": "Postiz is an open-source self-hosted application.",
- "install_methods": [
- {
- "type": "default",
- "script": "ct/postiz.sh",
- "resources": {
- "cpu": 1,
- "ram": 1024,
- "hdd": 2,
- "os": "Debian",
- "version": "12"
- }
- }
- ],
- "default_credentials": {
- "username": null,
- "password": null
- },
- "notes": []
-}
\ No newline at end of file
diff --git a/frontend/public/json/reactive-resume.json b/frontend/public/json/reactive-resume.json
deleted file mode 100644
index 5c99acfb9..000000000
--- a/frontend/public/json/reactive-resume.json
+++ /dev/null
@@ -1,36 +0,0 @@
-{
- "name": "Reactive Resume",
- "slug": "reactive-resume",
- "categories": [
- 12
- ],
- "date_created": "2025-04-22",
- "type": "ct",
- "updateable": true,
- "privileged": false,
- "interface_port": 3000,
- "documentation": "https://docs.rxresume.org/",
- "website": "https://rxresume.org",
- "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/png/reactive-resume-light.png",
- "config_path": "/opt/reactive-resume/.env",
- "description": "A one-of-a-kind resume builder that keeps your privacy in mind. Completely secure, customizable, portable, open-source and free forever.",
- "install_methods": [
- {
- "type": "default",
- "script": "ct/reactive-resume.sh",
- "resources": {
- "cpu": 2,
- "ram": 3072,
- "hdd": 8,
- "os": "Debian",
- "version": "12"
- }
- }
- ],
- "default_credentials": {
- "username": null,
- "password": null
- },
- "notes": []
-}
-
diff --git a/frontend/public/json/romm.json b/frontend/public/json/romm.json
new file mode 100644
index 000000000..364a7ea2d
--- /dev/null
+++ b/frontend/public/json/romm.json
@@ -0,0 +1,35 @@
+{
+ "name": "RomM",
+ "slug": "romm",
+ "categories": [
+ 21
+ ],
+ "date_created": "2025-03-10",
+ "type": "ct",
+ "updateable": true,
+ "privileged": false,
+ "interface_port": 8080,
+ "documentation": "https://docs.romm.app/latest/",
+ "website": "https://romm.app/",
+ "config_path": "/opt",
+ "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/romm.webp",
+ "description": "RomM (ROM Manager) allows you to scan, enrich, browse and play your game collection with a clean and responsive interface. Support for multiple platforms, various naming schemes, and custom tags.",
+ "install_methods": [
+ {
+ "type": "default",
+ "script": "ct/romm.sh",
+ "resources": {
+ "cpu": 2,
+ "ram": 4096,
+ "hdd": 20,
+ "os": "ubuntu",
+ "version": "24.04"
+ }
+ }
+ ],
+ "default_credentials": {
+ "username": "romm",
+ "password": "changeme"
+ },
+ "notes": []
+}
diff --git a/frontend/public/json/salt.json b/frontend/public/json/salt.json
deleted file mode 100644
index 4c162abab..000000000
--- a/frontend/public/json/salt.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{
- "name": "Salt",
- "slug": "salt",
- "categories": [
- 1
- ],
- "date_created": "2025-07-02",
- "type": "ct",
- "updateable": true,
- "privileged": false,
- "config_path": "/opt/salt/.env",
- "interface_port": 3000,
- "documentation": "https://docs.saltproject.io/salt/install-guide/en/latest/",
- "website": "https://saltproject.io/",
- "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/svg/saltmaster.svg",
- "description": "Saltmaster is an open-source self-hosted application.",
- "install_methods": [
- {
- "type": "default",
- "script": "ct/salt.sh",
- "resources": {
- "cpu": 1,
- "ram": 1024,
- "hdd": 2,
- "os": "Debian",
- "version": "12"
- }
- }
- ],
- "default_credentials": {
- "username": null,
- "password": null
- },
- "notes": []
-}
diff --git a/frontend/public/json/scraparr.json b/frontend/public/json/scraparr.json
deleted file mode 100644
index 731618e39..000000000
--- a/frontend/public/json/scraparr.json
+++ /dev/null
@@ -1,40 +0,0 @@
-{
- "name": "Scraparr",
- "slug": "scraparr",
- "categories": [
- 14
- ],
- "date_created": "2024-05-02",
- "type": "ct",
- "updateable": true,
- "privileged": false,
- "interface_port": 7100,
- "documentation": "https://github.com/thecfu/scraparr/blob/main/README.md",
- "website": "https://github.com/thecfu/scraparr",
- "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/svg/scraparr-dark.svg",
- "config_path": "/scraparr/config/config.yaml",
- "description": "Scraparr is a Prometheus exporter for the *arr suite (Sonarr, Radarr, Lidarr, etc.). It provides metrics that can be scraped by Prometheus to monitor and visualize the health and performance of your *arr applications.",
- "install_methods": [
- {
- "type": "default",
- "script": "ct/scraparr.sh",
- "resources": {
- "cpu": 2,
- "ram": 1024,
- "hdd": 4,
- "os": "debian",
- "version": "12"
- }
- }
- ],
- "default_credentials": {
- "username": null,
- "password": null
- },
- "notes": [
- {
- "text": "Edit config file then restart the scraparr service: `systemctl restart scraparr`",
- "type": "info"
- }
- ]
-}
diff --git a/frontend/public/json/snowshare.json b/frontend/public/json/snowshare.json
new file mode 100644
index 000000000..40433298c
--- /dev/null
+++ b/frontend/public/json/snowshare.json
@@ -0,0 +1,35 @@
+{
+ "name": "SnowShare",
+ "slug": "snowshare",
+ "categories": [
+ 11
+ ],
+ "date_created": "2025-09-24",
+ "type": "ct",
+ "updateable": true,
+ "privileged": false,
+ "interface_port": 3000,
+ "documentation": "https://github.com/TuroYT/snowshare",
+ "config_path": "/opt/snowshare/.env",
+ "website": "https://github.com/TuroYT/snowshare",
+ "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/png/snowshare.png",
+ "description": "A modern, secure file and link sharing platform built with Next.js, Prisma, and NextAuth. Share URLs, code snippets, and files with customizable expiration, privacy, and QR codes.",
+ "install_methods": [
+ {
+ "type": "default",
+ "script": "ct/snowshare.sh",
+ "resources": {
+ "cpu": 1,
+ "ram": 1024,
+ "hdd": 5,
+ "os": "Debian",
+ "version": "13"
+ }
+ }
+ ],
+ "default_credentials": {
+ "username": null,
+ "password": null
+ },
+ "notes": []
+}
diff --git a/frontend/public/json/transmission-openvpn.json b/frontend/public/json/transmission-openvpn.json
new file mode 100644
index 000000000..965122f8e
--- /dev/null
+++ b/frontend/public/json/transmission-openvpn.json
@@ -0,0 +1,40 @@
+{
+ "name": "Transmission-Openvpn",
+ "slug": "transmission-openvpn",
+ "categories": [
+ 11
+ ],
+ "date_created": "2025-09-04",
+ "type": "ct",
+ "updateable": true,
+ "privileged": false,
+ "interface_port": 9091,
+ "documentation": "https://haugene.github.io/docker-transmission-openvpn/",
+ "config_path": "/opt/transmission-openvpn/",
+ "website": "https://github.com/haugene/docker-transmission-openvpn",
+ "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/transmission.webp",
+ "description": "This project runs Transmission + OpenVPN natively in an LXC container, using the popular docker-transmission-openvpn image as a base. It ensures all torrent traffic is securely routed through a VPN tunnel, supports a wide range of VPN providers, and offers flexible configuration options",
+ "install_methods": [
+ {
+ "type": "default",
+ "script": "ct/transmission-openvpn.sh",
+ "resources": {
+ "cpu": 1,
+ "ram": 512,
+ "hdd": 8,
+ "os": "Debian",
+ "version": "13"
+ }
+ }
+ ],
+ "default_credentials": {
+ "username": null,
+ "password": null
+ },
+ "notes": [
+ {
+ "text": "This application requires a VPN provider to work. Please refer to your VPN provider’s documentation for setting up OpenVPN.",
+ "type": "warning"
+ }
+ ]
+}
diff --git a/frontend/public/json/update-apps.json b/frontend/public/json/update-apps.json
new file mode 100644
index 000000000..62104d038
--- /dev/null
+++ b/frontend/public/json/update-apps.json
@@ -0,0 +1,56 @@
+{
+ "name": "PVE LXC Apps Update",
+ "slug": "update-apps",
+ "categories": [
+ 1
+ ],
+ "date_created": "2025-09-15",
+ "type": "pve",
+ "updateable": true,
+ "privileged": false,
+ "interface_port": null,
+ "documentation": null,
+ "website": null,
+ "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/proxmox.webp",
+ "config_path": "",
+ "description": "This script updates community-scripts managed LXC containers on a Proxmox VE node. It detects the installed service, verifies available update scripts, and applies updates interactively or unattended. Optionally, containers can be backed up before the update process. If additional build resources (CPU/RAM) are required, the script adjusts container resources temporarily and restores them after the update. Containers requiring a reboot will be listed at the end of the process.",
+ "install_methods": [
+ {
+ "type": "default",
+ "script": "tools/pve/update-apps.sh",
+ "resources": {
+ "cpu": null,
+ "ram": null,
+ "hdd": null,
+ "os": null,
+ "version": null
+ }
+ }
+ ],
+ "default_credentials": {
+ "username": null,
+ "password": null
+ },
+ "notes": [
+ {
+ "text": "Execute within the Proxmox shell.",
+ "type": "info"
+ },
+ {
+ "text": "Only containers with `community-script` or `proxmox-helper-scripts` tags are listed for update.",
+ "type": "info"
+ },
+ {
+ "text": "Optionally performs a vzdump backup before updating containers.",
+ "type": "warning"
+ },
+ {
+ "text": "If required, the script will temporarily increase container CPU/RAM resources for the build process and restore them after completion.",
+ "type": "info"
+ },
+ {
+ "text": "At the end of the update, containers requiring a reboot will be listed, and you may choose to reboot them directly.",
+ "type": "info"
+ }
+ ]
+}
diff --git a/frontend/public/json/vikunja.json b/frontend/public/json/vikunja.json
deleted file mode 100644
index ea1711406..000000000
--- a/frontend/public/json/vikunja.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{
- "name": "Vikunja",
- "slug": "vikunja",
- "categories": [
- 12
- ],
- "date_created": "2024-11-05",
- "type": "ct",
- "updateable": true,
- "privileged": false,
- "interface_port": 3456,
- "documentation": null,
- "website": "https://vikunja.io/",
- "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/vikunja.webp",
- "config_path": "/etc/vikunja/config.yml",
- "description": "Vikunja is a powerful self-hosted todo app. It allows you to create and manage to-do lists. You can plan tasks, set priorities and collaborate with others. The best part is that your data is safe with you and you can customize the app to your liking. It's like a personal assistant that helps you stay organized.",
- "install_methods": [
- {
- "type": "default",
- "script": "ct/vikunja.sh",
- "resources": {
- "cpu": 1,
- "ram": 1024,
- "hdd": 4,
- "os": "debian",
- "version": "12"
- }
- }
- ],
- "default_credentials": {
- "username": null,
- "password": null
- },
- "notes": []
-}
diff --git a/frontend/src/app/data-dev/page.tsx b/frontend/src/app/data-dev/page.tsx
index 8b53cade8..0559e5bb1 100644
--- a/frontend/src/app/data-dev/page.tsx
+++ b/frontend/src/app/data-dev/page.tsx
@@ -29,7 +29,7 @@ const DataFetcher: React.FC = () => {
const fetchPaginatedData = async () => {
setLoading(true);
try {
- const response = await fetch(`https://api.htl-braunau.at/dev/data/paginated?page=${currentPage}&limit=${itemsPerPage === 0 ? '' : itemsPerPage}`);
+ const response = await fetch(`https://api.htl-braunau.at/data/paginated?page=${currentPage}&limit=${itemsPerPage === 0 ? '' : itemsPerPage}`);
if (!response.ok) throw new Error(`Failed to fetch data: ${response.statusText}`);
const result: DataModel[] = await response.json();
setData(result);
diff --git a/frontend/src/config/siteConfig.tsx b/frontend/src/config/siteConfig.tsx
index fabc0ed20..6ee43d1b0 100644
--- a/frontend/src/config/siteConfig.tsx
+++ b/frontend/src/config/siteConfig.tsx
@@ -58,8 +58,8 @@ export const OperatingSystems: OperatingSystem[] = [
{
name: "Debian",
versions: [
- { name: "11", slug: "bullseye" },
{ name: "12", slug: "bookworm" },
+ { name: "13", slug: "trixie" },
],
},
{
diff --git a/install/alpine-install.sh b/install/alpine-install.sh
index 2916be18f..c8c95c5e0 100644
--- a/install/alpine-install.sh
+++ b/install/alpine-install.sh
@@ -21,5 +21,6 @@ $STD apk add nano
$STD apk add mc
msg_ok "Installed Dependencies"
+
motd_ssh
customize
diff --git a/install/alpine-ntfy-install.sh b/install/alpine-ntfy-install.sh
new file mode 100644
index 000000000..04aeddc07
--- /dev/null
+++ b/install/alpine-ntfy-install.sh
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: cobalt (cobaltgit)
+# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
+# Source: https://ntfy.sh/
+
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
+color
+verb_ip6
+catch_errors
+setting_up_container
+network_check
+update_os
+
+msg_info "Installing ntfy"
+$STD apk add --no-cache ntfy ntfy-openrc libcap
+sed -i '/^listen-http/s/^\(.*\)$/#\1\n/' /etc/ntfy/server.yml
+setcap 'cap_net_bind_service=+ep' /usr/bin/ntfy
+$STD rc-update add ntfy default
+$STD service ntfy start
+msg_ok "Installed ntfy"
+
+motd_ssh
+customize
+
diff --git a/install/asterisk-install.sh b/install/asterisk-install.sh
new file mode 100644
index 000000000..202c12e10
--- /dev/null
+++ b/install/asterisk-install.sh
@@ -0,0 +1,114 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: michelroegl-brunner
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://asterisk.org
+
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
+color
+verb_ip6
+catch_errors
+setting_up_container
+network_check
+update_os
+
+ASTERISK_VERSIONS_URL="https://www.asterisk.org/downloads/asterisk/all-asterisk-versions/"
+html=$(curl -fsSL "$ASTERISK_VERSIONS_URL")
+
+LTS_VERSION=""
+for major in 20 22 24 26; do
+ block=$(echo "$html" | awk "/Asterisk $major - LTS/,//" || true)
+ ver=$(echo "$block" | grep -oE 'Download Latest - [0-9]+\.[0-9]+(\.[0-9]+)?' | head -n1 | sed -E 's/.* - //' || true)
+ if [ -n "$ver" ]; then
+ LTS_VERSION="$LTS_VERSION $ver"
+ fi
+ unset ver block
+done
+LTS_VERSION=$(echo "$LTS_VERSION" | xargs | tr ' ' '\n' | sort -V | tail -n1)
+
+STD_VERSION=""
+for major in 21 23 25 27; do
+ block=$(echo "$html" | grep -A 20 "Asterisk $major" | head -n 20 || true)
+ ver=$(echo "$block" | grep -oE 'Download (Latest - )?'"$major"'\.[0-9]+\.[0-9]+' | head -n1 | sed -E 's/Download (Latest - )?//' || true)
+ if [ -n "$ver" ]; then
+ STD_VERSION="$STD_VERSION $ver"
+ fi
+ unset ver block
+done
+STD_VERSION=$(echo "$STD_VERSION" | xargs | tr ' ' '\n' | sort -V | tail -n1)
+
+cert_block=$(echo "$html" | awk '/Certified Asterisk/,//')
+CERT_VERSION=$(echo "$cert_block" | grep -oE 'Download Latest - [0-9]+\.[0-9]+-cert[0-9]+' | head -n1 | sed -E 's/.* - //' || true)
+
+cat </etc/systemd/system/cloudreve.service
-[Unit]
-Description=Cloudreve Service
-After=network.target
-
-[Service]
-Type=simple
-ExecStart=/opt/cloudreve/cloudreve
-Restart=on-failure
-RestartSec=5
-
-[Install]
-WantedBy=multi-user.target
-EOF
-
-systemctl enable -q --now cloudreve
-msg_ok "Service Setup"
-
-motd_ssh
-customize
-
-msg_info "Cleaning up"
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
diff --git a/install/debian-install.sh b/install/debian-install.sh
index e62591849..aedf72fc0 100644
--- a/install/debian-install.sh
+++ b/install/debian-install.sh
@@ -1,9 +1,9 @@
#!/usr/bin/env bash
-# Copyright (c) 2021-2025 tteck
-# Author: tteck (tteckster)
-# License: MIT
-# https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: MickLesk (CanbiZ)
+# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
+# Source:
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
@@ -13,34 +13,14 @@ setting_up_container
network_check
update_os
-msg_info "Installing Dependencies"
-$STD apt-get install -y gpg
-msg_ok "Installed Dependencies"
+msg_info "Installing Base Dependencies"
+$STD apt-get install -y curl wget ca-certificates
+msg_ok "Installed Base Dependencies"
-#setup_mariadb
-
-#FFMPEG_VERSION="n7.1.1" FFMPEG_TYPE="full" setup_ffmpeg
-
-#fetch_and_deploy_gh_release "argus" "release-argus/Argus" "singlefile" "latest" "/opt/argus" "Argus-.*linux-amd64"
-#fetch_and_deploy_gh_release "planka" "plankanban/planka" "prebuild" "latest" "/opt/planka" "planka-prebuild.zip"
-
-#PYTHON_VERSION="3.12" setup_uv
-
-#PHP_VERSION=8.2 PHP_FPM=YES setup_php
-#setup_composer
-
-# Example Setting for Test
-#NODE_MODULE="pnpm@10.1,yarn"
-#RELEASE=$(curl_handler -fsSL https://api.github.com/repos/babybuddy/babybuddy/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }')
-#msg_ok "Get Release $RELEASE"
-#NODE_VERSION="24" NODE_MODULE="yarn" setup_nodejs
-
-#PG_VERSION="16" setup_postgresql
+# msg_info "Downloading and executing tools.func test suite"
+# bash <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/test-tools-func.sh)
+# msg_ok "Test suite completed"
motd_ssh
customize
-
-msg_info "Cleaning up"
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
+cleanup_lxc
diff --git a/install/docspell-install.sh b/install/deferred/docspell-install.sh
similarity index 59%
rename from install/docspell-install.sh
rename to install/deferred/docspell-install.sh
index b9bb74ad8..43534d727 100644
--- a/install/docspell-install.sh
+++ b/install/deferred/docspell-install.sh
@@ -20,27 +20,23 @@ msg_ok "Setup Functions"
msg_info "Installing Dependencies (Patience)"
$STD apt-get install -y \
htop \
- gnupg2 \
ca-certificates \
- default-jdk \
apt-transport-https \
- ghostscript \
- tesseract-ocr \
- tesseract-ocr-deu \
- tesseract-ocr-eng \
- unpaper \
- unoconv \
- wkhtmltopdf \
- ocrmypdf
+ tesseract-ocr
+#tesseract-ocr-deu \
+#tesseract-ocr-eng \
+#unpaper \
+#unoconv \
+#wkhtmltopdf \
+#ocrmypdf
msg_ok "Installed Dependencies"
-msg_info "Setting up PostgreSQL Repository"
-curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor -o /etc/apt/trusted.gpg.d/postgresql.gpg
-echo "deb https://apt.postgresql.org/pub/repos/apt bookworm-pgdg main" >/etc/apt/sources.list.d/pgdg.list
-$STD apt-get update
-msg_ok "Set up PostgreSQL Repository"
+setup_gs
+JAVA_VERSION="21" setup_java
+POSTGRES_VERSION="16" setup_postgresql
+setup_yq
-msg_info "Install/Set up PostgreSQL Database"
+msg_info "Set up PostgreSQL Database"
$STD apt-get install -y postgresql-16
DB_NAME=docspell_db
DB_USER=docspell_usr
@@ -58,23 +54,13 @@ $STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET timezone TO 'UTC';"
} >>~/docspell.creds
msg_ok "Set up PostgreSQL Database"
-msg_info "Setup Docspell (Patience)"
-mkdir -p /opt/docspell
-Docspell=$(curl -fsSL https://github.com/eikek/docspell/releases/latest -o - | grep "title>Release" | cut -d " " -f 5)
-DocspellDSC=$(curl -fsSL https://github.com/docspell/dsc/releases/latest -o - | grep "title>Release" | cut -d " " -f 4 | sed 's/^v//')
-cd /opt
-curl -fsSL https://github.com/eikek/docspell/releases/download/v${Docspell}/docspell-joex_${Docspell}_all.deb -o docspell-joex_${Docspell}_all.deb
-curl -fsSL https://github.com/eikek/docspell/releases/download/v${Docspell}/docspell-restserver_${Docspell}_all.deb -o docspell-restserver_${Docspell}_all.deb
-$STD dpkg -i docspell-*.deb
-curl -fsSL https://github.com/docspell/dsc/releases/download/v${DocspellDSC}/dsc_amd64-musl-${DocspellDSC} -o dsc_amd64-musl-${DocspellDSC}
-mv dsc_amd* dsc
-chmod +x dsc
-mv dsc /usr/bin
+fetch_and_deploy_gh_release "docspell-joex" "eikek/docspell" "binary" "latest" "/opt/docspell-joex" "docspell-joex_*all.deb"
+fetch_and_deploy_gh_release "docspell-restserver" "eikek/docspell" "binary" "latest" "/opt/docspell-restserver" "docspell-restserver_*all.deb"
+fetch_and_deploy_gh_release "docspell-dsc" "docspell/dsc" "singlefile" "latest" "/usr/bin" "dsc"
+fetch_and_deploy_gh_release "apache-solr" "apache/solr" "tarball" "latest" "/opt/docspell"
+
+msg_info "Setup Docspell"
ln -s /etc/docspell-joex /opt/docspell/docspell-joex && ln -s /etc/docspell-restserver /opt/docspell/docspell-restserver && ln -s /usr/bin/dsc /opt/docspell/dsc
-curl -fsSL https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -o /usr/bin/yq
-chmod +x /usr/bin/yq
-#JOEX_CONF="/usr/share/docspell-joex/conf/docspell-joex.conf"
-#SERVER_CONF="/usr/share/docspell-restserver/conf/docspell-server.conf"
sed -i \
-e '11s|localhost|'"$LOCAL_IP"'|' \
-e '17s|localhost|'"$LOCAL_IP"'|' \
@@ -94,27 +80,6 @@ sed -i \
-e '358s|password = .*|password = "'"$DB_PASS"'"|' \
-e '401s|url = .*|url = "jdbc:postgresql://localhost:5432/'"$DB_NAME"'"|' \
/usr/share/docspell-restserver/conf/docspell-server.conf
-
-# sed -i 's|address = "localhost"|address = "0.0.0.0"|' "$JOEX_CONF" "$SERVER_CONF"
-# sed -i -E '/backend\s*\{/,/\}/ {
-# /jdbc\s*\{/,/\}/ {
-# s|(url\s*=\s*).*|\1"jdbc:postgresql://localhost:5432/'"$DB_NAME"'"|;
-# s|(user\s*=\s*).*|\1"'"$DB_USER"'"|;
-# s|(password\s*=\s*).*|\1"'"$DB_PASS"'"|;
-# }
-# }' "$SERVER_CONF"
-# sed -i -E '/postgresql\s*\{/,/\}/ {
-# /jdbc\s*\{/,/\}/ {
-# s|(url\s*=\s*).*|\1"jdbc:postgresql://localhost:5432/'"$DB_NAME"'"|;
-# s|(user\s*=\s*).*|\1"'"$DB_USER"'"|;
-# s|(password\s*=\s*).*|\1"'"$DB_PASS"'"|;
-# }
-# }' "$SERVER_CONF"
-# sed -i -E '/jdbc\s*\{/,/\}/ {
-# s|(url\s*=\s*).*|\1"jdbc:postgresql://localhost:5432/'"$DB_NAME"'"|;
-# s|(user\s*=\s*).*|\1"'"$DB_USER"'"|;
-# s|(password\s*=\s*).*|\1"'"$DB_PASS"'"|;
-# }' "$JOEX_CONF"
msg_ok "Setup Docspell"
msg_info "Setup Apache Solr"
diff --git a/install/deferred/freepbx-install_backup.sh b/install/deferred/freepbx-install_backup.sh
index b151a7bb8..a5d41953c 100644
--- a/install/deferred/freepbx-install_backup.sh
+++ b/install/deferred/freepbx-install_backup.sh
@@ -15,62 +15,62 @@ update_os
msg_info "Installing Dependencies"
$STD apt-get install -y \
- build-essential \
- git \
- libnewt-dev \
- libssl-dev \
- libncurses5-dev \
- subversion \
- libsqlite3-dev \
- libjansson-dev \
- libxml2-dev \
- uuid-dev \
- default-libmysqlclient-dev \
- htop \
- sngrep \
- lame \
- ffmpeg \
- mpg123 \
- vim \
- expect \
- openssh-server \
- apache2 \
- mariadb-server \
- mariadb-client \
- bison \
- flex \
- php8.2 \
- php8.2-{curl,cli,common,mysql,gd,mbstring,intl,xml} \
- php-pear \
- sox \
- sqlite3 \
- pkg-config \
- automake \
- libtool \
- autoconf \
- unixodbc-dev \
- uuid \
- libasound2-dev \
- libogg-dev \
- libvorbis-dev \
- libicu-dev \
- libcurl4-openssl-dev \
- odbc-mariadb \
- libical-dev \
- libneon27-dev \
- libsrtp2-dev \
- libspandsp-dev \
- subversion \
- libtool-bin \
- python-dev-is-python3 \
- unixodbc \
- software-properties-common \
- nodejs \
- npm \
- ipset \
- iptables \
- fail2ban \
- php-soap
+ build-essential \
+ git \
+ libnewt-dev \
+ libssl-dev \
+ libncurses5-dev \
+ subversion \
+ libsqlite3-dev \
+ libjansson-dev \
+ libxml2-dev \
+ uuid-dev \
+ default-libmysqlclient-dev \
+ htop \
+ sngrep \
+ lame \
+ ffmpeg \
+ mpg123 \
+ vim \
+ expect \
+ openssh-server \
+ apache2 \
+ mariadb-server \
+ mariadb-client \
+ bison \
+ flex \
+ php8.2 \
+ php8.2-{curl,cli,common,mysql,gd,mbstring,intl,xml} \
+ php-pear \
+ sox \
+ sqlite3 \
+ pkg-config \
+ automake \
+ libtool \
+ autoconf \
+ unixodbc-dev \
+ uuid \
+ libasound2-dev \
+ libogg-dev \
+ libvorbis-dev \
+ libicu-dev \
+ libcurl4-openssl-dev \
+ odbc-mariadb \
+ libical-dev \
+ libneon27-dev \
+ libsrtp2-dev \
+ libspandsp-dev \
+ subversion \
+ libtool-bin \
+ python-dev-is-python3 \
+ unixodbc \
+ software-properties-common \
+ nodejs \
+ npm \
+ ipset \
+ iptables \
+ fail2ban \
+ php-soap
msg_ok "Installed Dependencies"
msg_info "Installing Asterisk (Patience)"
diff --git a/install/deferred/funkwhale-install.sh b/install/deferred/funkwhale-install.sh
index 78f12de7f..573abe1ea 100644
--- a/install/deferred/funkwhale-install.sh
+++ b/install/deferred/funkwhale-install.sh
@@ -5,7 +5,7 @@
# License: MIT
# https://github.com/tteck/Proxmox/raw/main/LICENSE
-source /dev/stdin <<< "$FUNCTIONS_FILE_PATH"
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
@@ -15,36 +15,36 @@ update_os
msg_info "Installing Python3"
$STD apt-get install -y --no-install-recommends \
- python3 \
- python3-dev \
- python3-setuptools \
- python3-venv
+ python3 \
+ python3-dev \
+ python3-setuptools \
+ python3-venv
msg_ok "Installed Python3"
msg_info "Installing Dependencies (Patience)"
$STD apt-get install -y --no-install-recommends \
- redis \
- postgresql \
- postgresql-contrib \
- postgresql-client \
- build-essential \
- gnupg \
- ffmpeg \
- libjpeg-dev \
- libpq-dev \
- libmagic-dev \
- libzbar0 \
- poppler-utils \
- automake \
- libtool \
- pkg-config \
- curl \
- libtiff-dev \
- libpng-dev \
- libleptonica-dev \
- sudo \
- make \
- mc
+ redis \
+ postgresql \
+ postgresql-contrib \
+ postgresql-client \
+ build-essential \
+ gnupg \
+ ffmpeg \
+ libjpeg-dev \
+ libpq-dev \
+ libmagic-dev \
+ libzbar0 \
+ poppler-utils \
+ automake \
+ libtool \
+ pkg-config \
+ curl \
+ libtiff-dev \
+ libpng-dev \
+ libleptonica-dev \
+ sudo \
+ make \
+ mc
msg_ok "Installed Dependencies"
msg_info "Setup Funkwhale Dependencies (Patience)"
@@ -78,7 +78,7 @@ $STD sudo venv/bin/pip install --editable ./api
$STD sudo curl -L -o /opt/funkwhale/config/.env "https://dev.funkwhale.audio/funkwhale/funkwhale/raw/$FUNKWHALE_VERSION/deploy/env.prod.sample"
secret_key=$(openssl rand -base64 45 | sed 's/\//\\\//g')
sudo sed -i "s/DJANGO_SECRET_KEY=.*/DJANGO_SECRET_KEY=$secret_key/" /opt/funkwhale/config/.env
-sudo sed -i 's/# CACHE_URL=redis:\/\/127.0.0.1:6379\/0/CACHE_URL=redis:\/\/127.0.0.1:6379\/0/' /opt/funkwhale/config/.env #Remove #Hashtag From Config for Debian
+sudo sed -i 's/# CACHE_URL=redis:\/\/127.0.0.1:6379\/0/CACHE_URL=redis:\/\/127.0.0.1:6379\/0/' /opt/funkwhale/config/.env #Remove #Hashtag From Config for Debian
sudo sed -i 's/# DATABASE_URL=postgresql:\/\/funkwhale@:5432\/funkwhale/DATABASE_URL=postgresql:\/\/funkwhale@:5432\/funkwhale/' /opt/funkwhale/config/.env #Remove #Hashtag From Config for Debian
# set the paths to /opt instead of /srv
sudo sed -i 's/MEDIA_ROOT=\/srv\/funkwhale\/data\/media/MEDIA_ROOT=\/opt\/funkwhale\/data\/media/' /opt/funkwhale/config/.env
@@ -99,7 +99,7 @@ DB_PASS="$(openssl rand -base64 18 | cut -c1-13)"
SECRET_KEY="$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32)"
$STD sudo -u postgres psql -c "CREATE ROLE $DB_USER WITH LOGIN PASSWORD '$DB_PASS';"
$STD sudo -u postgres psql -c "CREATE DATABASE $DB_NAME WITH OWNER $DB_USER TEMPLATE template0;"
-$STD sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE $DB_NAME TO $DB_USER;"
+$STD sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE $DB_NAME TO $DB_USER;"
echo "" >>~/funkwhale.creds
echo -e "Funkwhale Database User: \e[32m$DB_USER\e[0m" >>~/funkwhale.creds
echo -e "Funkwhale Database Password: \e[32m$DB_PASS\e[0m" >>~/funkwhale.creds
@@ -130,26 +130,26 @@ msg_ok "Funkwhale successfully set up"
read -r -p "Would you like to Setup Reverse Proxy (Nginx)? " prompt
if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then
- msg_info "Installing NGINX"
- $STD apt install -y nginx
- sudo su
- $STD curl -L -o /etc/nginx/funkwhale_proxy.conf "https://dev.funkwhale.audio/funkwhale/funkwhale/raw/$FUNKWHALE_VERSION/deploy/funkwhale_proxy.conf"
- $STD curl -L -o /etc/nginx/sites-available/funkwhale.template "https://dev.funkwhale.audio/funkwhale/funkwhale/raw/$FUNKWHALE_VERSION/deploy/nginx.template"
- $STD set -a && source /opt/funkwhale/config/.env && set +a envsubst "`env | awk -F = '{printf \" $%s\", $$1}'`" \
- < /etc/nginx/sites-available/funkwhale.template \
- > /etc/nginx/sites-available/funkwhale.conf
- $STD grep '${' /etc/nginx/sites-available/funkwhale.conf
- $STD ln -s /etc/nginx/sites-available/funkwhale.conf /etc/nginx/sites-enabled/
- $STD systemctl reload nginx
- msg_ok "Installed Nginx"
+ msg_info "Installing NGINX"
+ $STD apt install -y nginx
+ sudo su
+ $STD curl -L -o /etc/nginx/funkwhale_proxy.conf "https://dev.funkwhale.audio/funkwhale/funkwhale/raw/$FUNKWHALE_VERSION/deploy/funkwhale_proxy.conf"
+ $STD curl -L -o /etc/nginx/sites-available/funkwhale.template "https://dev.funkwhale.audio/funkwhale/funkwhale/raw/$FUNKWHALE_VERSION/deploy/nginx.template"
+ $STD set -a && source /opt/funkwhale/config/.env && set +a envsubst "$(env | awk -F = '{printf \" $%s\", $$1}')" \
+ /etc/nginx/sites-available/funkwhale.conf
+ $STD grep '${' /etc/nginx/sites-available/funkwhale.conf
+ $STD ln -s /etc/nginx/sites-available/funkwhale.conf /etc/nginx/sites-enabled/
+ $STD systemctl reload nginx
+ msg_ok "Installed Nginx"
fi
read -r -p "Would you like to Setup TLS (Certbot)? " prompt
if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then
- msg_info "Installing Certbot"
- $STD apt install -y certbot python3-certbot-nginx
- $STD sudo certbot --nginx -d $FUNKWHALE_HOSTNAME
- msg_ok "Installed Certbot"
+ msg_info "Installing Certbot"
+ $STD apt install -y certbot python3-certbot-nginx
+ $STD sudo certbot --nginx -d $FUNKWHALE_HOSTNAME
+ msg_ok "Installed Certbot"
fi
motd_ssh
diff --git a/install/deferred/ghostfolio-install.sh b/install/deferred/ghostfolio-install.sh
index e834d734f..9a5d30e43 100644
--- a/install/deferred/ghostfolio-install.sh
+++ b/install/deferred/ghostfolio-install.sh
@@ -6,7 +6,7 @@
# License: MIT
# https://github.com/tteck/Proxmox/raw/main/LICENSE
-source /dev/stdin <<< "$FUNCTIONS_FILE_PATH"
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
@@ -71,7 +71,7 @@ REDIS_PORT=6379
REDIS_PASSWORD="$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32)"
$STD redis-cli CONFIG SET requirepass "$REDIS_PASSWORD"
-$STD redis-cli -a "$REDIS_PASSWORD" CONFIG REWRITE
+$STD redis-cli -a "$REDIS_PASSWORD" CONFIG REWRITE
$STD systemctl restart redis
echo "" >>~/ghostfolio.creds
echo "Ghostfolio Redis Credentials" >>~/ghostfolio.creds
@@ -176,4 +176,4 @@ customize
msg_info "Cleaning up"
$STD apt-get -y autoremove
$STD apt-get -y autoclean
-msg_ok "Cleaned"
\ No newline at end of file
+msg_ok "Cleaned"
diff --git a/install/deferred/hoodik-install.sh b/install/deferred/hoodik-install.sh
index bf40b47f0..d6505fd46 100644
--- a/install/deferred/hoodik-install.sh
+++ b/install/deferred/hoodik-install.sh
@@ -16,18 +16,18 @@ update_os
msg_info "Installing Dependencies (Patience)"
$STD apt-get install -y \
- pkg-config \
- libssl-dev \
- libc6-dev \
- libpq-dev \
- clang \
- llvm \
- nettle-dev \
- build-essential \
- curl \
- sudo \
- make \
- mc
+ pkg-config \
+ libssl-dev \
+ libc6-dev \
+ libpq-dev \
+ clang \
+ llvm \
+ nettle-dev \
+ build-essential \
+ curl \
+ sudo \
+ make \
+ mc
msg_ok "Installed Dependencies"
msg_info "Installing Rust (Patience)"
diff --git a/install/deferred/jumpserver-install.sh b/install/deferred/jumpserver-install.sh
index 119bad3b8..4ccfb09eb 100644
--- a/install/deferred/jumpserver-install.sh
+++ b/install/deferred/jumpserver-install.sh
@@ -15,7 +15,7 @@ update_os
msg_info "Installing Dependencies"
$STD apt-get install -y \
- iptables
+ iptables
msg_ok "Installed Dependencies"
msg_info "Installing JumpServer"
diff --git a/install/deferred/kanba-install.sh b/install/deferred/kanba-install.sh
new file mode 100644
index 000000000..10a306ddc
--- /dev/null
+++ b/install/deferred/kanba-install.sh
@@ -0,0 +1,85 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: MickLesk (CanbiZ)
+# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
+# Source: https://github.com/Kanba-co/kanba
+
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
+color
+verb_ip6
+catch_errors
+setting_up_container
+network_check
+update_os
+
+NODE_VERSION="22" setup_nodejs
+fetch_and_deploy_gh_release "kanba" "Kanba-co/kanba" "tarball" "latest" "/opt/kanba"
+fetch_and_deploy_gh_release "supabase" "supabase/cli" "binary" "latest" "/opt/supabase" "supabase-linux-x64"
+POSTGRES_VERSION="16" setup_postgresql
+
+msg_info "Set up PostgreSQL Database"
+DB_NAME=kanba_db
+DB_USER=kanba_usr
+DB_PASS="$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | cut -c1-13)"
+DB_URL="postgresql://${DB_USER}:${DB_PASS}@localhost:5432/${DB_NAME}"
+$STD sudo -u postgres psql -c "CREATE ROLE $DB_USER WITH LOGIN PASSWORD '$DB_PASS';"
+$STD sudo -u postgres psql -c "CREATE DATABASE $DB_NAME WITH OWNER $DB_USER ENCODING 'UTF8' TEMPLATE template0;"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET client_encoding TO 'utf8';"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET default_transaction_isolation TO 'read committed';"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET timezone TO 'UTC';"
+{
+ echo "Kanba-Credentials"
+ echo "Kanba Database Name: $DB_NAME"
+ echo "Kanba Database User: $DB_USER"
+ echo "Kanba Database Password: $DB_PASS"
+} >>~/kanba.creds
+msg_ok "Set up PostgreSQL Database"
+
+msg_info "Preparing .env.local"
+cd /opt/kanba
+cp .env.example .env.local
+sed -i "s|^DATABASE_PROVIDER=.*|DATABASE_PROVIDER=postgresql|" .env.local
+sed -i "s|^DATABASE_URL=.*|DATABASE_URL=${DB_URL}|" .env.local
+sed -i "s|^DIRECT_URL=.*|DIRECT_URL=${DB_URL}|" .env.local
+sed -i "s|^NEXT_PUBLIC_SITE_URL=.*|NEXT_PUBLIC_SITE_URL=http://localhost:3000|" .env.local
+sed -i "s|^NEXTAUTH_URL=.*|NEXTAUTH_URL=http://localhost:3000|" .env.local
+sed -i "s|^NEXTAUTH_SECRET=.*|NEXTAUTH_SECRET=$(openssl rand -hex 32)|" .env.local
+sed -i "s|^NEXT_PUBLIC_SUPABASE_URL=.*|NEXT_PUBLIC_SUPABASE_URL=http://localhost:54321|" .env.local
+sed -i "s|^NEXT_PUBLIC_SUPABASE_ANON_KEY=.*|NEXT_PUBLIC_SUPABASE_ANON_KEY=dummy-key|" .env.local
+msg_ok "Prepared .env.local"
+
+msg_info "Installing Kanba"
+export $(grep -v '^#' .env.local | xargs)
+$STD npm install
+$STD npx prisma generate
+$STD npx prisma migrate deploy
+$STD npm run build
+msg_ok "Installed Kanba"
+
+msg_info "Creating systemd Service"
+cat </etc/systemd/system/kanba.service
+[Unit]
+Description=Kanba - Lightweight Trello Alternative
+After=network.target postgresql.service
+
+[Service]
+Type=simple
+WorkingDirectory=/opt/kanba
+EnvironmentFile=/opt/kanba/.env.local
+ExecStart=/usr/bin/npx next start -p 3000
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
+EOF
+systemctl enable -q --now kanba
+msg_ok "Created systemd Service"
+
+motd_ssh
+customize
+
+msg_info "Cleaning up"
+$STD apt-get -y autoremove
+$STD apt-get -y autoclean
+msg_ok "Cleaned"
diff --git a/install/manyfold-install.sh b/install/deferred/manyfold-install.sh
similarity index 70%
rename from install/manyfold-install.sh
rename to install/deferred/manyfold-install.sh
index e4dcbf06a..f7bb7c2f6 100644
--- a/install/manyfold-install.sh
+++ b/install/deferred/manyfold-install.sh
@@ -14,10 +14,6 @@ update_os
msg_info "Installing Dependencies"
$STD apt-get install -y \
- curl \
- sudo \
- mc \
- gnupg2 postgresql \
lsb-release \
rbenv \
libpq-dev \
@@ -30,12 +26,17 @@ $STD apt-get install -y \
libyaml-dev
msg_ok "Installed Dependencies"
+PG_VERSION="16" setup_postgresql
+
msg_info "Setting up PostgreSQL"
DB_NAME=manyfold
DB_USER=manyfold
DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | cut -c1-13)
$STD sudo -u postgres psql -c "CREATE ROLE $DB_USER WITH LOGIN PASSWORD '$DB_PASS';"
$STD sudo -u postgres psql -c "CREATE DATABASE $DB_NAME WITH OWNER $DB_USER TEMPLATE template0;"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET client_encoding TO 'utf8';"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET default_transaction_isolation TO 'read committed';"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET timezone TO 'UTC';"
{
echo "Manyfold Credentials"
echo "Manyfold Database User: $DB_USER"
@@ -44,36 +45,13 @@ $STD sudo -u postgres psql -c "CREATE DATABASE $DB_NAME WITH OWNER $DB_USER TEMP
} >>~/manyfold.creds
msg_ok "Set up PostgreSQL"
-msg_info "Downloading Manyfold"
-RELEASE=$(curl -fsSL https://api.github.com/repos/manyfold3d/manyfold/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }')
-cd /opt
-curl -fsSL "https://github.com/manyfold3d/manyfold/archive/refs/tags/v${RELEASE}.zip" -o manyfold.zip
-unzip -q manyfold.zip
-mv /opt/manyfold-${RELEASE}/ /opt/manyfold
+fetch_and_deploy_gh_release "manyfold" "manyfold3d/manyfold" "tarball" "latest" "/opt/manyfold"
+
RUBY_INSTALL_VERSION=$(cat /opt/manyfold/.ruby-version)
YARN_VERSION=$(grep '"packageManager":' /opt/manyfold/package.json | sed -E 's/.*"(yarn@[0-9\.]+)".*/\1/')
-msg_ok "Downloaded Manyfold"
-
-NODE_VERSION="22" NODE_MODULE="npm@latest,${YARN_VERSION}" setup_nodejs
-RUBY_VERSION=${RUBY_INSTALL_VERSION} RUBY_INSTALL_RAILS="true" setup_rbenv_stack
-
-# msg_info "Add ruby-build"
-# mkdir -p ~/.rbenv/plugins
-# cd ~/.rbenv/plugins
-# RUBY_BUILD_RELEASE=$(curl -s https://api.github.com/repos/rbenv/ruby-build/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }')
-# curl -fsSL "https://github.com/rbenv/ruby-build/archive/refs/tags/v${RUBY_BUILD_RELEASE}.zip" -o ruby-build.zip
-# unzip -q ruby-build.zip
-# mv ruby-build-* ~/.rbenv/plugins/ruby-build
-# echo "${RUBY_BUILD_RELEASE}" >~/.rbenv/plugins/RUBY_BUILD_version.txt
-# msg_ok "Added ruby-build"
-
-# msg_info "Installing ruby ${RUBY_VERSION}"
-# $STD rbenv install $RUBY_VERSION
-# echo 'export PATH="$HOME/.rbenv/bin:$PATH"' >>~/.bashrc
-# echo 'eval "$(rbenv init -)"' >>~/.bashrc
-# source ~/.bashrc
-# msg_ok "Installed ruby ${RUBY_VERSION}"
+NODE_VERSION="22" NODE_MODULE="yarn" setup_nodejs
+RUBY_VERSION=${RUBY_INSTALL_VERSION} RUBY_INSTALL_RAILS="true" setup_ruby
msg_info "Adding manyfold user"
useradd -m -s /usr/bin/bash manyfold
@@ -81,7 +59,7 @@ msg_ok "Added manyfold user"
msg_info "Setting .env file"
cat </opt/.env
-export APP_VERSION=${RELEASE}
+export APP_VERSION=12345
export GUID=1002
export PUID=1001
export PUBLIC_HOSTNAME=subdomain.somehost.org
@@ -108,15 +86,14 @@ $STD rbenv global $RUBY_INSTALL_VERSION
$STD bundle install
$STD gem install sidekiq
$STD npm install --global corepack
-corepack enable
-$STD corepack prepare $YARN_VERSION --activate
-$STD corepack use $YARN_VERSION
+corepack enable yarn
+# $STD corepack prepare $YARN_VERSION --activate
+# $STD corepack use $YARN_VERSION
chown manyfold:manyfold /opt/.env
rm /opt/manyfold/config/credentials.yml.enc
$STD bin/rails credentials:edit
$STD bin/rails db:migrate
$STD bin/rails assets:precompile
-echo "${RELEASE}" >/opt/${APPLICATION}_version.txt
msg_ok "Installed manyfold"
msg_info "Creating Service"
@@ -168,8 +145,6 @@ motd_ssh
customize
msg_info "Cleaning up"
-rm -rf "/opt/manyfold.zip"
-rm -rf "~/.rbenv/plugins/ruby-build.zip"
$STD apt-get -y autoremove
$STD apt-get -y autoclean
msg_ok "Cleaned"
diff --git a/install/maxun-install.sh b/install/deferred/maxun-install.sh
similarity index 90%
rename from install/maxun-install.sh
rename to install/deferred/maxun-install.sh
index f68515a1d..004546bc5 100644
--- a/install/maxun-install.sh
+++ b/install/deferred/maxun-install.sh
@@ -15,7 +15,6 @@ update_os
msg_info "Installing Dependencies"
$STD apt-get install -y \
- gpg \
openssl \
redis \
libgbm1 \
@@ -42,10 +41,8 @@ $STD apt-get install -y \
nginx
msg_ok "Installed Dependencies"
-#configure_lxc "Semantic Search requires a dedicated GPU and at least 16GB RAM. Would you like to install it?" 100 "memory" "16000"
-
PG_VERSION=17 setup_postgresql
-NODE_VERSION="22" setup_nodejs
+NODE_VERSION="18" setup_nodejs
msg_info "Setup Variables"
DB_NAME=maxun_db
@@ -56,6 +53,7 @@ MINIO_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | cut -c1-13)
JWT_SECRET=$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | cut -c1-32)
ENCRYPTION_KEY=$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | cut -c1-32)
LOCAL_IP=$(hostname -I | awk '{print $1}')
+SESSION_SECRET=$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | cut -c1-32)
msg_ok "Set up Variables"
msg_info "Setup Database"
@@ -71,6 +69,7 @@ $STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET timezone TO 'UTC'"
echo "Maxun Database Name: $DB_NAME"
echo "Maxun JWT Secret: $JWT_SECRET"
echo "Maxun Encryption Key: $ENCRYPTION_KEY"
+ echo "Maxun Session Secret: $SESSION_SECRET"
} >>~/maxun.creds
msg_ok "Set up Database"
@@ -110,8 +109,9 @@ EOF
systemctl enable -q --now minio
msg_ok "Setup MinIO"
-msg_info "Installing Maxun (Patience)"
fetch_and_deploy_gh_release "maxun" "getmaxun/maxun" "source"
+
+msg_info "Installing Maxun (Patience)"
cat </opt/maxun/.env
NODE_ENV=development
JWT_SECRET=${JWT_SECRET}
@@ -137,6 +137,7 @@ VITE_BACKEND_URL=http://${LOCAL_IP}:8080
VITE_PUBLIC_URL=http://${LOCAL_IP}:5173
MAXUN_TELEMETRY=false
+SESSION_SECRET=${SESSION_SECRET}
EOF
cat <<'EOF' >/usr/local/bin/update-env-ip.sh
@@ -162,19 +163,27 @@ msg_info "Setting up nginx with CORS Proxy"
cat <<'EOF' >/etc/nginx/sites-available/maxun
server {
listen 80;
+ server_name _;
+ # Frontend ausliefern
+ root /usr/share/nginx/html;
+ index index.html;
location / {
- root /usr/share/nginx/html;
try_files $uri $uri/ /index.html;
}
- location ~ ^/(api|record|workflow|storage|auth|integration|proxy|api-docs) {
- proxy_pass http://localhost:8080;
- proxy_set_header Host $host;
+ # Backend Proxy
+ location ~ ^/(auth|storage|record|workflow|robot|proxy|api-docs|api|webhook)(/|$) {
+ proxy_pass http://127.0.0.1:8080;
+ proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
- proxy_http_version 1.1;
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ # CORS
add_header Access-Control-Allow-Origin "$http_origin" always;
add_header Access-Control-Allow-Credentials true always;
add_header Access-Control-Allow-Methods GET,POST,PUT,DELETE,OPTIONS always;
@@ -193,7 +202,6 @@ server {
}
}
EOF
-
ln -sf /etc/nginx/sites-available/maxun /etc/nginx/sites-enabled/maxun
rm -f /etc/nginx/sites-enabled/default
msg_ok "nginx with CORS Proxy set up"
diff --git a/install/deferred/netbootxyz-install.sh b/install/deferred/netbootxyz-install.sh
index 933167c35..1a7f71f7e 100644
--- a/install/deferred/netbootxyz-install.sh
+++ b/install/deferred/netbootxyz-install.sh
@@ -5,7 +5,7 @@
# License: MIT
# https://github.com/tteck/Proxmox/raw/main/LICENSE
-source /dev/stdin <<< "$FUNCTIONS_FILE_PATH"
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
@@ -22,8 +22,8 @@ msg_info "Installing netboot.xyz ${RELEASE}"
$STD curl --silent -o ${RELEASE}.tar.gz -L "https://github.com/netbootxyz/netboot.xyz/archive/${RELEASE}.tar.gz"
$STD tar xvzf ${RELEASE}.tar.gz
VER=$(curl -s https://api.github.com/repos/netbootxyz/netboot.xyz/releases/latest |
- grep "tag_name" |
- awk '{print substr($2, 2, length($2)-3) }')
+ grep "tag_name" |
+ awk '{print substr($2, 2, length($2)-3) }')
rm -rf ${RELEASE}.tar.gz
mv netboot.xyz-${VER} /opt/netboot.xyz
msg_ok "Installed netboot.xyz ${RELEASE}"
diff --git a/install/deferred/nginxproxymanager-install.sh b/install/deferred/nginxproxymanager-install.sh
index 0169c5d04..34db0d6a1 100644
--- a/install/deferred/nginxproxymanager-install.sh
+++ b/install/deferred/nginxproxymanager-install.sh
@@ -16,29 +16,29 @@ update_os
msg_info "Installing Dependencies"
$STD apt-get update
$STD apt-get -y install \
- sudo \
- mc \
- curl \
- gnupg \
- make \
- gcc \
- g++ \
- ca-certificates \
- apache2-utils \
- logrotate \
- build-essential \
- git
+ sudo \
+ mc \
+ curl \
+ gnupg \
+ make \
+ gcc \
+ g++ \
+ ca-certificates \
+ apache2-utils \
+ logrotate \
+ build-essential \
+ git
msg_ok "Installed Dependencies"
msg_info "Installing Python3"
$STD apt-get install -y \
- python3 \
- python3-dev \
- python3-pip \
- python3-venv \
- python3-cffi \
- python3-certbot \
- python3-certbot-dns-cloudflare
+ python3 \
+ python3-dev \
+ python3-pip \
+ python3-venv \
+ python3-cffi \
+ python3-certbot \
+ python3-certbot-dns-cloudflare
$STD pip3 install certbot-dns-multi
$STD python3 -m venv /opt/certbot/
rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED
@@ -76,7 +76,7 @@ sed -i "s|\"version\": \"0.0.0\"|\"version\": \"$RELEASE\"|" frontend/package.js
sed -i 's+^daemon+#daemon+g' docker/rootfs/etc/nginx/nginx.conf
NGINX_CONFS=$(find "$(pwd)" -type f -name "*.conf")
for NGINX_CONF in $NGINX_CONFS; do
- sed -i 's+include conf.d+include /etc/nginx/conf.d+g' "$NGINX_CONF"
+ sed -i 's+include conf.d+include /etc/nginx/conf.d+g' "$NGINX_CONF"
done
mkdir -p /var/www/html /etc/nginx/logs
@@ -88,21 +88,21 @@ ln -sf /etc/nginx/nginx.conf /etc/nginx/conf/nginx.conf
rm -f /etc/nginx/conf.d/dev.conf
mkdir -p /tmp/nginx/body \
- /run/nginx \
- /data/nginx \
- /data/custom_ssl \
- /data/logs \
- /data/access \
- /data/nginx/default_host \
- /data/nginx/default_www \
- /data/nginx/proxy_host \
- /data/nginx/redirection_host \
- /data/nginx/stream \
- /data/nginx/dead_host \
- /data/nginx/temp \
- /var/lib/nginx/cache/public \
- /var/lib/nginx/cache/private \
- /var/cache/nginx/proxy_temp
+ /run/nginx \
+ /data/nginx \
+ /data/custom_ssl \
+ /data/logs \
+ /data/access \
+ /data/nginx/default_host \
+ /data/nginx/default_www \
+ /data/nginx/proxy_host \
+ /data/nginx/redirection_host \
+ /data/nginx/stream \
+ /data/nginx/dead_host \
+ /data/nginx/temp \
+ /var/lib/nginx/cache/public \
+ /var/lib/nginx/cache/private \
+ /var/cache/nginx/proxy_temp
chmod -R 777 /var/cache/nginx
chown root /tmp/nginx
@@ -110,7 +110,7 @@ chown root /tmp/nginx
echo resolver "$(awk 'BEGIN{ORS=" "} $1=="nameserver" {print ($2 ~ ":")? "["$2"]": $2}' /etc/resolv.conf);" >/etc/nginx/conf.d/include/resolvers.conf
if [ ! -f /data/nginx/dummycert.pem ] || [ ! -f /data/nginx/dummykey.pem ]; then
- openssl req -new -newkey rsa:2048 -days 3650 -nodes -x509 -subj "/O=Nginx Proxy Manager/OU=Dummy Certificate/CN=localhost" -keyout /data/nginx/dummykey.pem -out /data/nginx/dummycert.pem &>/dev/null
+ openssl req -new -newkey rsa:2048 -days 3650 -nodes -x509 -subj "/O=Nginx Proxy Manager/OU=Dummy Certificate/CN=localhost" -keyout /data/nginx/dummykey.pem -out /data/nginx/dummycert.pem &>/dev/null
fi
mkdir -p /app/global /app/frontend/images
@@ -130,7 +130,7 @@ msg_ok "Built Frontend"
msg_info "Initializing Backend"
rm -rf /app/config/default.json
if [ ! -f /app/config/production.json ]; then
- cat <<'EOF' >/app/config/production.json
+ cat <<'EOF' >/app/config/production.json
{
"database": {
"engine": "knex-native",
diff --git a/install/deferred/nimbus-install.sh b/install/deferred/nimbus-install.sh
index 9cbeead47..c49430481 100644
--- a/install/deferred/nimbus-install.sh
+++ b/install/deferred/nimbus-install.sh
@@ -15,9 +15,9 @@ update_os
msg_info "Installing dependencies"
$STD apt-get install -y \
- build-essential \
- openssl \
- git
+ build-essential \
+ openssl \
+ git
msg_ok "Installed Dependencies"
msg_info "Installing Bun"
@@ -38,10 +38,10 @@ $STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET client_encoding TO 'utf8'
$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET default_transaction_isolation TO 'read committed';"
$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET timezone TO 'UTC'"
{
- echo "Nimbus-Credentials"
- echo "Nimbus Database User: $DB_USER"
- echo "Nimbus Database Password: $DB_PASS"
- echo "Nimbus Database Name: $DB_NAME"
+ echo "Nimbus-Credentials"
+ echo "Nimbus Database User: $DB_USER"
+ echo "Nimbus Database Password: $DB_PASS"
+ echo "Nimbus Database Name: $DB_NAME"
} >>~/nimbus.creds
msg_ok "Set up PostgreSQL Database"
diff --git a/install/opencloud-install.sh b/install/deferred/opencloud-install.sh
similarity index 93%
rename from install/opencloud-install.sh
rename to install/deferred/opencloud-install.sh
index 22ee79387..f369eee85 100644
--- a/install/opencloud-install.sh
+++ b/install/deferred/opencloud-install.sh
@@ -15,15 +15,15 @@ update_os
read -r -p "Enter the hostname of your OpenCloud server (eg cloud.domain.tld): " oc_host
if [[ "$oc_host" ]]; then
- OC_HOST="$oc_host"
+ OC_HOST="$oc_host"
fi
read -r -p "Enter the hostname of your Collabora server (eg collabora.domain.tld): " collabora_host
if [[ "$collabora_host" ]]; then
- COLLABORA_HOST="$collabora_host"
+ COLLABORA_HOST="$collabora_host"
fi
read -r -p "Enter the hostname of your WOPI server (eg wopiserver.domain.tld): " wopi_host
if [[ "$wopi_host" ]]; then
- WOPI_HOST="$wopi_host"
+ WOPI_HOST="$wopi_host"
fi
msg_info "Installing Collabora Online"
@@ -56,7 +56,7 @@ msg_ok "Installed ${APPLICATION}"
msg_info "Configuring ${APPLICATION}"
curl -fsSL https://raw.githubusercontent.com/opencloud-eu/opencloud-compose/refs/heads/main/config/opencloud/csp.yaml -o "$CONFIG_DIR"/csp.yaml
-curl -fsSL https://github.com/opencloud-eu/opencloud/raw/refs/heads/main/deployments/examples/opencloud_full/config/opencloud/proxy.yaml -o "$CONFIG_DIR"/proxy.yaml.bak
+curl -fsSL https://raw.githubusercontent.com/opencloud-eu/opencloud-compose/refs/heads/main/config/opencloud/proxy.yaml -o "$CONFIG_DIR"/proxy.yaml.bak
cat <"$ENV_FILE"
OC_URL=https://${OC_HOST}
@@ -125,9 +125,8 @@ WEB_ASSET_APPS_PATH=${CONFIG_DIR}/assets/apps
# SEARCH_EXTRACTOR_TIKA_TIKA_URL=
# External storage test - currently not working
-# STORAGE_USERS_POSIX_ROOT=
-# STORAGE_USERS_DECOMPOSED_ROOT=
-# STORAGE_SYSTEM_OC_ROOT= # this definitely breaks shit, wouldn't ever change it
+# STORAGE_USERS_POSIX_ROOT=
+# STORAGE_USERS_ID_CACHE_STORE=nats-js-kv
EOF
cat </etc/systemd/system/opencloud.service
diff --git a/install/deferred/polaris-install.sh b/install/deferred/polaris-install.sh
index 76577e367..4f4e77277 100644
--- a/install/deferred/polaris-install.sh
+++ b/install/deferred/polaris-install.sh
@@ -15,13 +15,13 @@ update_os
msg_info "Installing Dependencies"
$STD apt-get install -y \
- make \
- git \
- build-essential \
- binutils \
- pkg-config \
- libsqlite3-dev \
- libssl-dev
+ make \
+ git \
+ build-essential \
+ binutils \
+ pkg-config \
+ libsqlite3-dev \
+ libssl-dev
msg_ok "Installed Dependencies"
msg_info "Installing Rust"
diff --git a/install/deferred/roundcubemail-install.sh b/install/deferred/roundcubemail-install.sh
index 16c4ce65f..cfa37f067 100644
--- a/install/deferred/roundcubemail-install.sh
+++ b/install/deferred/roundcubemail-install.sh
@@ -7,7 +7,7 @@
# https://github.com/tteck/Proxmox/raw/main/LICENSE
# Source: https://github.com/roundcube/roundcubemail
-source /dev/stdin <<< "$FUNCTIONS_FILE_PATH"
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
@@ -17,14 +17,14 @@ update_os
msg_info "Installing Dependencies"
$STD apt-get install -y \
- curl \
- sudo \
- mc \
- postgresql \
- apache2 \
- libapache2-mod-php \
- composer \
- php8.2-{mbstring,gd,imap,mysql,ldap,curl,intl,imagick,bz2,sqlite3,zip,xml}
+ curl \
+ sudo \
+ mc \
+ postgresql \
+ apache2 \
+ libapache2-mod-php \
+ composer \
+ php8.2-{mbstring,gd,imap,mysql,ldap,curl,intl,imagick,bz2,sqlite3,zip,xml}
msg_ok "Installed Dependencies"
msg_info "Setting up PostgreSQL"
@@ -74,7 +74,7 @@ $STD sudo a2enmod deflate
$STD sudo a2enmod expires
$STD sudo a2enmod headers
$STD a2ensite roundcubemail.conf
-$STD a2dissite 000-default.conf
+$STD a2dissite 000-default.conf
$STD systemctl reload apache2
msg_ok "Installed Wallos"
@@ -85,4 +85,4 @@ msg_info "Cleaning up"
rm -rf /opt/roundcubemail-${RELEASE}-complete.tar.gz
$STD apt-get -y autoremove
$STD apt-get -y autoclean
-msg_ok "Cleaned"
\ No newline at end of file
+msg_ok "Cleaned"
diff --git a/install/deferred/squirrelserversmanager-install.sh b/install/deferred/squirrelserversmanager-install.sh
index 2bbda264a..9e6cedd03 100644
--- a/install/deferred/squirrelserversmanager-install.sh
+++ b/install/deferred/squirrelserversmanager-install.sh
@@ -4,7 +4,7 @@
# Author: tteck (tteckster)
# License: MIT
# https://github.com/tteck/Proxmox/raw/main/LICENSE
-source /dev/stdin <<< "$FUNCTIONS_FILE_PATH"
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
@@ -16,7 +16,7 @@ update_os
# Generate a random string
generate_random_string() {
local LENGTH=$1
- tr -dc A-Za-z0-9 /dev/null || true
+ tr -dc A-Za-z0-9 /dev/null || true
}
msg_info "Installing Dependencies"
@@ -46,7 +46,7 @@ msg_ok "Installed Redis"
msg_info "Installing Nginx"
$STD apk add nginx
rm -rf /etc/nginx/http.d/default.conf
-cat <<'EOF'> /etc/nginx/http.d/default.conf
+cat <<'EOF' >/etc/nginx/http.d/default.conf
server {
listen 80;
server_name localhost;
@@ -90,8 +90,8 @@ msg_ok "Installed Nginx"
msg_info "Installing MongoDB Database"
DB_NAME=ssm
DB_PORT=27017
-echo 'http://dl-cdn.alpinelinux.org/alpine/v3.9/main' >> /etc/apk/repositories
-echo 'http://dl-cdn.alpinelinux.org/alpine/v3.9/community' >> /etc/apk/repositories
+echo 'http://dl-cdn.alpinelinux.org/alpine/v3.9/main' >>/etc/apk/repositories
+echo 'http://dl-cdn.alpinelinux.org/alpine/v3.9/community' >>/etc/apk/repositories
$STD apk update
$STD apk add mongodb mongodb-tools
msg_ok "Installed MongoDB Database"
@@ -108,7 +108,7 @@ $STD git clone https://github.com/SquirrelCorporation/SquirrelServersManager.git
SECRET=$(generate_random_string 32)
SALT=$(generate_random_string 16)
VAULT_PWD=$(generate_random_string 32)
-cat < /opt/squirrelserversmanager/.env
+cat </opt/squirrelserversmanager/.env
# SECRETS
SECRET=$SECRET
SALT=$SALT
diff --git a/install/deferred/timescaledb-install.sh b/install/deferred/timescaledb-install.sh
index 7e01a7586..edcb20faa 100644
--- a/install/deferred/timescaledb-install.sh
+++ b/install/deferred/timescaledb-install.sh
@@ -4,7 +4,7 @@
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-source /dev/stdin <<< "$FUNCTIONS_FILE_PATH"
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
@@ -14,12 +14,12 @@ update_os
msg_info "Installing Dependencies"
$STD apt-get install -y \
- curl \
- sudo \
- mc \
- gnupg \
- apt-transport-https \
- lsb-release
+ curl \
+ sudo \
+ mc \
+ gnupg \
+ apt-transport-https \
+ lsb-release
msg_ok "Installed Dependencies"
msg_info "Setting up PostgreSQL Repository"
@@ -60,10 +60,10 @@ cat </etc/postgresql/17/main/postgresql.conf
# FILE LOCATIONS
#------------------------------------------------------------------------------
-data_directory = '/var/lib/postgresql/17/main'
-hba_file = '/etc/postgresql/17/main/pg_hba.conf'
-ident_file = '/etc/postgresql/17/main/pg_ident.conf'
-external_pid_file = '/var/run/postgresql/17-main.pid'
+data_directory = '/var/lib/postgresql/17/main'
+hba_file = '/etc/postgresql/17/main/pg_hba.conf'
+ident_file = '/etc/postgresql/17/main/pg_ident.conf'
+external_pid_file = '/var/run/postgresql/17-main.pid'
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
@@ -71,10 +71,10 @@ external_pid_file = '/var/run/postgresql/17-main.pid'
# - Connection Settings -
-listen_addresses = '*'
-port = 5432
-max_connections = 100
-unix_socket_directories = '/var/run/postgresql'
+listen_addresses = '*'
+port = 5432
+max_connections = 100
+unix_socket_directories = '/var/run/postgresql'
# - SSL -
@@ -86,8 +86,8 @@ ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key'
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
-shared_buffers = 128MB
-dynamic_shared_memory_type = posix
+shared_buffers = 128MB
+dynamic_shared_memory_type = posix
#------------------------------------------------------------------------------
# WRITE-AHEAD LOG
@@ -102,14 +102,14 @@ min_wal_size = 80MB
# - What to Log -
-log_line_prefix = '%m [%p] %q%u@%d '
+log_line_prefix = '%m [%p] %q%u@%d '
log_timezone = 'Etc/UTC'
#------------------------------------------------------------------------------
# PROCESS TITLE
#------------------------------------------------------------------------------
-cluster_name = '17/main'
+cluster_name = '17/main'
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
@@ -119,23 +119,22 @@ cluster_name = '17/main'
datestyle = 'iso, mdy'
timezone = 'Etc/UTC'
-lc_messages = 'C'
-lc_monetary = 'C'
-lc_numeric = 'C'
-lc_time = 'C'
+lc_messages = 'C'
+lc_monetary = 'C'
+lc_numeric = 'C'
+lc_time = 'C'
default_text_search_config = 'pg_catalog.english'
#------------------------------------------------------------------------------
# CONFIG FILE INCLUDES
#------------------------------------------------------------------------------
-include_dir = 'conf.d'
+include_dir = 'conf.d'
EOF
systemctl restart postgresql
msg_ok "Installed PostgreSQL"
-
msg_info "Setup TimescaleDB"
echo "deb https://packagecloud.io/timescale/timescaledb/debian/ $(lsb_release -c -s) main" | sudo tee /etc/apt/sources.list.d/timescaledb.list
wget --quiet -O - https://packagecloud.io/timescale/timescaledb/gpgkey | sudo gpg --dearmor -o /etc/apt/trusted.gpg.d/timescaledb.gpg
@@ -147,11 +146,11 @@ msg_ok "Setup TimescaleDB"
read -r -p "Would you like to add Adminer? " prompt
if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then
- msg_info "Installing Adminer"
- $STD apt install -y adminer
- $STD a2enconf adminer
- systemctl reload apache2
- msg_ok "Installed Adminer"
+ msg_info "Installing Adminer"
+ $STD apt install -y adminer
+ $STD a2enconf adminer
+ systemctl reload apache2
+ msg_ok "Installed Adminer"
fi
motd_ssh
diff --git a/install/vikunja-install.sh b/install/deferred/vikunja-install.sh
similarity index 100%
rename from install/vikunja-install.sh
rename to install/deferred/vikunja-install.sh
diff --git a/install/docker-install.sh b/install/docker-install.sh
deleted file mode 100644
index 238a84046..000000000
--- a/install/docker-install.sh
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) 2021-2025 tteck
-# Author: tteck (tteckster)
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://www.docker.com/
-
-source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
-color
-verb_ip6
-catch_errors
-setting_up_container
-network_check
-update_os
-
-get_latest_release() {
- curl -fsSL https://api.github.com/repos/"$1"/releases/latest | grep '"tag_name":' | cut -d'"' -f4
-}
-
-DOCKER_LATEST_VERSION=$(get_latest_release "moby/moby")
-PORTAINER_LATEST_VERSION=$(get_latest_release "portainer/portainer")
-PORTAINER_AGENT_LATEST_VERSION=$(get_latest_release "portainer/agent")
-DOCKER_COMPOSE_LATEST_VERSION=$(get_latest_release "docker/compose")
-
-msg_info "Installing Docker $DOCKER_LATEST_VERSION"
-DOCKER_CONFIG_PATH='/etc/docker/daemon.json'
-mkdir -p $(dirname $DOCKER_CONFIG_PATH)
-echo -e '{\n "log-driver": "journald"\n}' >/etc/docker/daemon.json
-$STD sh <(curl -fsSL https://get.docker.com)
-msg_ok "Installed Docker $DOCKER_LATEST_VERSION"
-
-read -r -p "${TAB3}Install Docker Compose v2 plugin? " prompt_compose
-if [[ ${prompt_compose,,} =~ ^(y|yes)$ ]]; then
- msg_info "Installing Docker Compose $DOCKER_COMPOSE_LATEST_VERSION"
- mkdir -p /usr/local/lib/docker/cli-plugins
- curl -fsSL "https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_LATEST_VERSION}/docker-compose-$(uname -s)-$(uname -m)" \
- -o /usr/local/lib/docker/cli-plugins/docker-compose
- chmod +x /usr/local/lib/docker/cli-plugins/docker-compose
- msg_ok "Installed Docker Compose $DOCKER_COMPOSE_LATEST_VERSION"
-fi
-
-read -r -p "${TAB3}Would you like to add Portainer (UI)? " prompt
-if [[ ${prompt,,} =~ ^(y|yes)$ ]]; then
- msg_info "Installing Portainer $PORTAINER_LATEST_VERSION"
- docker volume create portainer_data >/dev/null
- $STD docker run -d \
- -p 8000:8000 \
- -p 9443:9443 \
- --name=portainer \
- --restart=always \
- -v /var/run/docker.sock:/var/run/docker.sock \
- -v portainer_data:/data \
- portainer/portainer-ce:latest
- msg_ok "Installed Portainer $PORTAINER_LATEST_VERSION"
-else
- read -r -p "${TAB3}Would you like to install the Portainer Agent (for remote management)? " prompt_agent
- if [[ ${prompt_agent,,} =~ ^(y|yes)$ ]]; then
- msg_info "Installing Portainer Agent $PORTAINER_AGENT_LATEST_VERSION"
- $STD docker run -d \
- -p 9001:9001 \
- --name portainer_agent \
- --restart=always \
- -v /var/run/docker.sock:/var/run/docker.sock \
- -v /var/lib/docker/volumes:/var/lib/docker/volumes \
- portainer/agent
- msg_ok "Installed Portainer Agent $PORTAINER_AGENT_LATEST_VERSION"
- fi
-fi
-
-read -r -p "${TAB3}Expose Docker TCP socket (insecure) ? [n = No, l = Local only (127.0.0.1), a = All interfaces (0.0.0.0)] : " socket_choice
-case "${socket_choice,,}" in
- l)
- socket="tcp://127.0.0.1:2375"
- ;;
- a)
- socket="tcp://0.0.0.0:2375"
- ;;
- *)
- socket=""
- ;;
-esac
-
-if [[ -n "$socket" ]]; then
- msg_info "Enabling Docker TCP socket on $socket"
- $STD apt-get install -y jq
-
- tmpfile=$(mktemp)
- jq --arg sock "$socket" '. + { "hosts": ["unix:///var/run/docker.sock", $sock] }' /etc/docker/daemon.json > "$tmpfile" && mv "$tmpfile" /etc/docker/daemon.json
-
- mkdir -p /etc/systemd/system/docker.service.d
- cat < /etc/systemd/system/docker.service.d/override.conf
-[Service]
-ExecStart=
-ExecStart=/usr/bin/dockerd
-EOF
-
- $STD systemctl daemon-reexec
- $STD systemctl daemon-reload
-
- if systemctl restart docker; then
- msg_ok "Docker TCP socket available on $socket"
- else
- msg_error "Docker failed to restart. Check journalctl -xeu docker.service"
- exit 1
- fi
-fi
-
-motd_ssh
-customize
-
-msg_info "Cleaning up"
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
diff --git a/install/donetick-install.sh b/install/donetick-install.sh
new file mode 100644
index 000000000..2ee9da715
--- /dev/null
+++ b/install/donetick-install.sh
@@ -0,0 +1,54 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: fstof
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://github.com/donetick/donetick
+
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
+color
+verb_ip6
+catch_errors
+setting_up_container
+network_check
+update_os
+
+msg_info "Installing Dependencies"
+$STD apt install -y ca-certificates
+msg_ok "Installed Dependencies"
+
+fetch_and_deploy_gh_release "donetick" "donetick/donetick" "prebuild" "latest" "/opt/donetick" "donetick_Linux_x86_64.tar.gz"
+
+msg_info "Setup donetick"
+cd /opt/donetick
+TOKEN=$(openssl rand -hex 16)
+sed -i -e "s/change_this_to_a_secure_random_string_32_characters_long/${TOKEN}/g" config/selfhosted.yaml
+msg_ok "Setup donetick"
+
+msg_info "Creating Service"
+cat </etc/systemd/system/donetick.service
+[Unit]
+Description=donetick Service
+After=network.target
+
+[Service]
+Environment="DT_ENV=selfhosted"
+WorkingDirectory=/opt/donetick
+ExecStart=/opt/donetick/donetick
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
+EOF
+systemctl enable -q --now donetick
+msg_ok "Created Service"
+
+motd_ssh
+customize
+
+msg_info "Cleaning up"
+$STD apt -y autoremove
+$STD apt -y autoclean
+$STD apt -y clean
+msg_ok "Cleaned"
+
diff --git a/install/ente-install.sh b/install/ente-install.sh
new file mode 100644
index 000000000..c0fe3697f
--- /dev/null
+++ b/install/ente-install.sh
@@ -0,0 +1,338 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: MickLesk
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://github.com/ente-io/ente
+
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
+color
+verb_ip6
+catch_errors
+setting_up_container
+network_check
+update_os
+
+msg_info "Installing Dependencies"
+$STD apt-get install -y \
+ libsodium23 \
+ libsodium-dev \
+ pkg-config \
+ caddy \
+ gcc \
+ curl \
+ jq
+msg_ok "Installed Dependencies"
+
+PG_VERSION="17" setup_postgresql
+setup_go
+NODE_VERSION="24" NODE_MODULE="yarn" setup_nodejs
+ENTE_CLI_VERSION=$(curl -s https://api.github.com/repos/ente-io/ente/releases | jq -r '[.[] | select(.tag_name | startswith("cli-v"))][0].tag_name')
+fetch_and_deploy_gh_release "ente" "ente-io/ente" "tarball" "latest" "/opt/ente"
+fetch_and_deploy_gh_release "ente" "ente-io/ente" "tarball" "$ENTE_CLI_VERSION" "/usr/local/bin/ente" "ente-cli-$ENTE_CLI_VERSION-linux-amd64.tar.gz"
+
+msg_info "Setting up PostgreSQL"
+DB_NAME="ente_db"
+DB_USER="ente"
+DB_PASS="$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | cut -c1-13)"
+$STD sudo -u postgres psql -c "CREATE ROLE $DB_USER WITH LOGIN PASSWORD '$DB_PASS';"
+$STD sudo -u postgres psql -c "CREATE DATABASE $DB_NAME WITH OWNER $DB_USER ENCODING 'UTF8' TEMPLATE template0;"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET client_encoding TO 'utf8';"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET default_transaction_isolation TO 'read committed';"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET timezone TO 'UTC';"
+{
+ echo "Ente Credentials"
+ echo "Database Name: $DB_NAME"
+ echo "Database User: $DB_USER"
+ echo "Database Password: $DB_PASS"
+ echo ""
+ echo "Important Configuration Notes:"
+ echo "- Frontend is built with IP: $(hostname -I | awk '{print $1}')"
+ echo "- If IP changes, run: /opt/ente/rebuild-frontend.sh"
+ echo "- Museum API: http://$(hostname -I | awk '{print $1}'):8080"
+ echo "- Photos UI: http://$(hostname -I | awk '{print $1}'):3000"
+ echo "- Accounts UI: http://$(hostname -I | awk '{print $1}'):3001"
+ echo "- Auth UI: http://$(hostname -I | awk '{print $1}'):3003"
+ echo ""
+ echo "Post-Installation Steps Required:"
+ echo "1. Create your first user account via the web UI"
+ echo "2. Check museum logs for email verification code:"
+ echo " journalctl -u ente-museum -n 100 | grep -i 'verification'"
+ echo "3. Use verification code to complete account setup"
+ echo "4. Remove subscription limit (replace with your account):"
+ echo " ente admin update-subscription -a -u --no-limit"
+ echo ""
+ echo "Note: Email verification requires manual intervention since SMTP is not configured"
+} >>~/ente.creds
+msg_ok "Set up PostgreSQL"
+
+msg_info "Building Museum (server)"
+cd /opt/ente/server
+$STD corepack enable
+$STD go mod tidy
+export CGO_ENABLED=1
+CGO_CFLAGS="$(pkg-config --cflags libsodium || true)"
+CGO_LDFLAGS="$(pkg-config --libs libsodium || true)"
+if [ -z "$CGO_CFLAGS" ]; then
+ CGO_CFLAGS="-I/usr/include"
+fi
+if [ -z "$CGO_LDFLAGS" ]; then
+ CGO_LDFLAGS="-lsodium"
+fi
+export CGO_CFLAGS
+export CGO_LDFLAGS
+$STD go build cmd/museum/main.go
+msg_ok "Built Museum"
+
+msg_info "Generating Secrets"
+SECRET_ENC=$(go run tools/gen-random-keys/main.go 2>/dev/null | grep "encryption" | awk '{print $2}')
+SECRET_HASH=$(go run tools/gen-random-keys/main.go 2>/dev/null | grep "hash" | awk '{print $2}')
+SECRET_JWT=$(go run tools/gen-random-keys/main.go 2>/dev/null | grep "jwt" | awk '{print $2}')
+msg_ok "Generated Secrets"
+
+msg_info "Creating museum.yaml"
+CONTAINER_IP=$(hostname -I | awk '{print $1}')
+cat </opt/ente/server/museum.yaml
+db:
+ host: 127.0.0.1
+ port: 5432
+ name: $DB_NAME
+ user: $DB_USER
+ password: $DB_PASS
+
+s3:
+ are_local_buckets: true
+ use_path_style_urls: true
+ local-dev:
+ key: dummy
+ secret: dummy
+ endpoint: localhost:3200
+ region: eu-central-2
+ bucket: ente-dev
+
+apps:
+ public-albums: http://${CONTAINER_IP}:3002
+ cast: http://${CONTAINER_IP}:3004
+ accounts: http://${CONTAINER_IP}:3001
+
+key:
+ encryption: $SECRET_ENC
+ hash: $SECRET_HASH
+
+jwt:
+ secret: $SECRET_JWT
+
+# SMTP not configured - verification codes will appear in logs
+# To configure SMTP, add:
+# smtp:
+# host: your-smtp-server
+# port: 587
+# username: your-username
+# password: your-password
+# email: noreply@yourdomain.com
+EOF
+msg_ok "Created museum.yaml"
+
+msg_info "Building Web Applications"
+# Get container IP address
+CONTAINER_IP=$(hostname -I | awk '{print $1}')
+cd /opt/ente/web
+$STD yarn install
+export NEXT_PUBLIC_ENTE_ENDPOINT=http://${CONTAINER_IP}:8080
+export NEXT_PUBLIC_ENTE_ALBUMS_ENDPOINT=http://${CONTAINER_IP}:3002
+$STD yarn build
+$STD yarn build:accounts
+$STD yarn build:auth
+$STD yarn build:cast
+mkdir -p /var/www/ente/apps
+cp -r apps/photos/out /var/www/ente/apps/photos
+cp -r apps/accounts/out /var/www/ente/apps/accounts
+cp -r apps/auth/out /var/www/ente/apps/auth
+cp -r apps/cast/out /var/www/ente/apps/cast
+
+# Save build configuration for future rebuilds
+cat </opt/ente/rebuild-frontend.sh
+#!/usr/bin/env bash
+# Rebuild Ente frontend with current IP
+CONTAINER_IP=\$(hostname -I | awk '{print \$1}')
+echo "Building frontend with IP: \$CONTAINER_IP"
+cd /opt/ente/web
+export NEXT_PUBLIC_ENTE_ENDPOINT=http://\${CONTAINER_IP}:8080
+export NEXT_PUBLIC_ENTE_ALBUMS_ENDPOINT=http://\${CONTAINER_IP}:3002
+yarn build
+yarn build:accounts
+yarn build:auth
+yarn build:cast
+rm -rf /var/www/ente/apps/*
+cp -r apps/photos/out /var/www/ente/apps/photos
+cp -r apps/accounts/out /var/www/ente/apps/accounts
+cp -r apps/auth/out /var/www/ente/apps/auth
+cp -r apps/cast/out /var/www/ente/apps/cast
+systemctl reload caddy
+echo "Frontend rebuilt successfully!"
+REBUILD_EOF
+chmod +x /opt/ente/rebuild-frontend.sh
+msg_ok "Built Web Applications"
+
+msg_info "Creating Museum Service"
+cat </etc/systemd/system/ente-museum.service
+[Unit]
+Description=Ente Museum Server
+After=network.target postgresql.service
+
+[Service]
+WorkingDirectory=/opt/ente/server
+ExecStart=/opt/ente/server/main -config /opt/ente/server/museum.yaml
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
+EOF
+systemctl enable -q --now ente-museum
+msg_ok "Created Museum Service"
+
+msg_info "Configuring Caddy"
+CONTAINER_IP=$(hostname -I | awk '{print $1}')
+cat </etc/caddy/Caddyfile
+# Ente Photos - Main Application
+:3000 {
+ root * /var/www/ente/apps/photos
+ file_server
+ try_files {path} {path}.html /index.html
+
+ header {
+ Access-Control-Allow-Origin *
+ Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS"
+ Access-Control-Allow-Headers *
+ }
+}
+
+# Ente Accounts
+:3001 {
+ root * /var/www/ente/apps/accounts
+ file_server
+ try_files {path} {path}.html /index.html
+
+ header {
+ Access-Control-Allow-Origin *
+ Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS"
+ Access-Control-Allow-Headers *
+ }
+}
+
+# Public Albums
+:3002 {
+ root * /var/www/ente/apps/photos
+ file_server
+ try_files {path} {path}.html /index.html
+
+ header {
+ Access-Control-Allow-Origin *
+ Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS"
+ Access-Control-Allow-Headers *
+ }
+}
+
+# Auth
+:3003 {
+ root * /var/www/ente/apps/auth
+ file_server
+ try_files {path} {path}.html /index.html
+
+ header {
+ Access-Control-Allow-Origin *
+ Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS"
+ Access-Control-Allow-Headers *
+ }
+}
+
+# Cast
+:3004 {
+ root * /var/www/ente/apps/cast
+ file_server
+ try_files {path} {path}.html /index.html
+
+ header {
+ Access-Control-Allow-Origin *
+ Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS"
+ Access-Control-Allow-Headers *
+ }
+}
+
+# Museum API Proxy
+:8080 {
+ reverse_proxy localhost:8080
+
+ header {
+ Access-Control-Allow-Origin *
+ Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS"
+ Access-Control-Allow-Headers *
+ }
+}
+EOF
+systemctl reload caddy
+msg_ok "Configured Caddy"
+
+motd_ssh
+customize
+
+msg_info "Creating helper scripts"
+cat <<'HELPER_EOF' >/usr/local/bin/ente-get-verification
+#!/usr/bin/env bash
+echo "Searching for verification codes in museum logs..."
+journalctl -u ente-museum --no-pager | grep -i "verification\|verify\|code" | tail -20
+HELPER_EOF
+chmod +x /usr/local/bin/ente-get-verification
+
+cat <<'HELPER_EOF' >/usr/local/bin/ente-upgrade-subscription
+#!/usr/bin/env bash
+if [ -z "$1" ]; then
+ echo "Usage: ente-upgrade-subscription "
+ echo "Example: ente-upgrade-subscription user@example.com"
+ exit 1
+fi
+EMAIL="$1"
+echo "Upgrading subscription for: $EMAIL"
+ente admin update-subscription -a "$EMAIL" -u "$EMAIL" --no-limit
+HELPER_EOF
+chmod +x /usr/local/bin/ente-upgrade-subscription
+
+msg_ok "Created helper scripts"
+
+msg_info "Cleaning up"
+$STD apt -y autoremove
+$STD apt -y autoclean
+msg_ok "Cleaned"
+
+# Final setup summary
+CONTAINER_IP=$(hostname -I | awk '{print $1}')
+echo -e "\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
+echo -e " ${GN}Ente Installation Complete!${CL}"
+echo -e "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
+echo -e "\n${BL}Access URLs:${CL}"
+echo -e " Photos: http://${CONTAINER_IP}:3000"
+echo -e " Accounts: http://${CONTAINER_IP}:3001"
+echo -e " Auth: http://${CONTAINER_IP}:3003"
+echo -e " API: http://${CONTAINER_IP}:8080"
+echo -e "\n${YW}⚠️ Important Post-Installation Steps:${CL}"
+echo -e "\n${BL}1. Create your first account:${CL}"
+echo -e " • Open http://${CONTAINER_IP}:3000 in your browser"
+echo -e " • Click 'Sign Up' and create an account"
+echo -e "\n${BL}2. Verify your email (required):${CL}"
+echo -e " • Run: ${GN}ente-get-verification${CL}"
+echo -e " • Look for the verification code in the output"
+echo -e " • Enter the code in the web UI to complete registration"
+echo -e "\n${BL}3. Remove storage limit:${CL}"
+echo -e " • After email verification is complete"
+echo -e " • Run: ${GN}ente-upgrade-subscription your@email.com${CL}"
+echo -e " • This removes the 10GB limit"
+echo -e "\n${BL}4. If IP changes:${CL}"
+echo -e " • Run: ${GN}/opt/ente/rebuild-frontend.sh${CL}"
+echo -e " • This rebuilds the frontend with the new IP"
+echo -e "\n${YW}Known Limitations:${CL}"
+echo -e " • Email verification requires checking logs (no SMTP configured)"
+echo -e " • Account creation must be done manually via web UI"
+echo -e " • Subscription upgrade requires CLI after account creation"
+echo -e " • Frontend must be rebuilt if container IP changes"
+echo -e "\n${BL}Credentials saved to:${CL} ~/ente.creds"
+echo -e "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n"
diff --git a/install/freepbx-install.sh b/install/freepbx-install.sh
new file mode 100644
index 000000000..4a4bd2715
--- /dev/null
+++ b/install/freepbx-install.sh
@@ -0,0 +1,111 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: Arian Nasr (arian-nasr)
+# Updated by: Javier Pastor (vsc55)
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://www.freepbx.org/
+
+INSTALL_URL="https://github.com/FreePBX/sng_freepbx_debian_install/raw/master/sng_freepbx_debian_install.sh"
+INSTALL_PATH="/opt/sng_freepbx_debian_install.sh"
+
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
+color
+verb_ip6
+catch_errors
+setting_up_container
+network_check
+update_os
+
+ONLY_OPENSOURCE="${ONLY_OPENSOURCE:-no}"
+REMOVE_FIREWALL="${REMOVE_FIREWALL:-no}"
+msg_ok "Remove Commercial modules is set to: $ONLY_OPENSOURCE"
+msg_ok "Remove Firewall module is set to: $REMOVE_FIREWALL"
+
+msg_info "Downloading FreePBX installation script..."
+if curl -fsSL "$INSTALL_URL" -o "$INSTALL_PATH"; then
+ msg_ok "Download completed successfully"
+else
+ curl_exit_code=$?
+ msg_error "Error downloading FreePBX installation script (curl exit code: $curl_exit_code)"
+ msg_error "Aborting!"
+ exit 1
+fi
+
+if [[ "$VERBOSE" == "yes" ]]; then
+ msg_info "Installing FreePBX (Verbose)\n"
+else
+ msg_info "Installing FreePBX, be patient, this takes time..."
+fi
+$STD bash "$INSTALL_PATH"
+
+if [[ $ONLY_OPENSOURCE == "yes" ]]; then
+ msg_info "Removing Commercial modules..."
+
+ end_count=0
+ max=5
+ count=0
+ while fwconsole ma list | awk '/Commercial/ {found=1} END {exit !found}'; do
+ count=$((count + 1))
+ while read -r module; do
+ msg_info "Removing module: $module"
+
+ if [[ "$REMOVE_FIREWALL" == "no" ]] && [[ "$module" == "sysadmin" ]]; then
+ msg_warn "Skipping sysadmin module removal, it is required for Firewall!"
+ continue
+ fi
+
+ code=0
+ $STD fwconsole ma -f remove $module || code=$?
+ if [[ $code -ne 0 ]]; then
+ msg_error "Module $module could not be removed - error code $code"
+ else
+ msg_ok "Module $module removed successfully"
+ fi
+ done < <(fwconsole ma list | awk '/Commercial/ {print $2}')
+
+ [[ $count -ge $max ]] && break
+
+ com_list=$(fwconsole ma list)
+ end_count=$(awk '/Commercial/ {count++} END {print count + 0}' <<<"$com_list")
+ awk '/Commercial/ {found=1} END {exit !found}' <<<"$com_list" || break
+ if [[ "$REMOVE_FIREWALL" == "no" ]] &&
+ [[ $end_count -eq 1 ]] &&
+ [[ $(awk '/Commercial/ {print $2}' <<<"$com_list") == "sysadmin" ]]; then
+ break
+ fi
+
+ msg_warn "Not all commercial modules could be removed, retrying (attempt $count of $max)..."
+ done
+
+ if [[ $REMOVE_FIREWALL == "yes" ]] && [[ $end_count -gt 0 ]]; then
+ msg_info "Removing Firewall module..."
+ if $STD fwconsole ma -f remove firewall; then
+ msg_ok "Firewall module removed successfully"
+ else
+ msg_error "Firewall module could not be removed, please check manually!"
+ fi
+ fi
+
+ if [[ $end_count -eq 0 ]]; then
+ msg_ok "All commercial modules removed successfully"
+ elif [[ $end_count -eq 1 ]] && [[ $REMOVE_FIREWALL == "no" ]] && [[ $(fwconsole ma list | awk '/Commercial/ {print $2}') == "sysadmin" ]]; then
+ msg_ok "Only sysadmin module left, which is required for Firewall, skipping removal"
+ else
+ msg_warn "Some commercial modules could not be removed, please check the web interface for removal manually!"
+ fi
+
+ msg_info "Reloading FreePBX..."
+ $STD fwconsole reload
+ msg_ok "FreePBX reloaded completely"
+fi
+msg_ok "Installed FreePBX finished"
+
+motd_ssh
+customize
+
+msg_info "Cleaning up"
+rm -f "$INSTALL_PATH"
+$STD apt-get -y autoremove
+$STD apt-get -y autoclean
+msg_ok "Cleaned"
diff --git a/install/frigate-install.sh b/install/frigate-install.sh
index 557fd9d39..7f40bc750 100644
--- a/install/frigate-install.sh
+++ b/install/frigate-install.sh
@@ -2,6 +2,7 @@
# Copyright (c) 2021-2025 community-scripts ORG
# Authors: MickLesk (CanbiZ)
+# Co-Authors: remz1337
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://frigate.video/
@@ -14,81 +15,192 @@ network_check
update_os
msg_info "Installing Dependencies (Patience)"
-$STD apt-get install -y \
- git gpg ca-certificates automake build-essential xz-utils libtool ccache pkg-config \
- libgtk-3-dev libavcodec-dev libavformat-dev libswscale-dev libv4l-dev libxvidcore-dev libx264-dev \
- libjpeg-dev libpng-dev libtiff-dev gfortran openexr libatlas-base-dev libssl-dev libtbb-dev \
- libopenexr-dev libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev gcc gfortran \
- libopenblas-dev liblapack-dev libusb-1.0-0-dev jq moreutils tclsh libhdf5-dev libopenexr-dev
+$STD apt-get install -y {jq,wget,xz-utils,python3,python3-dev,python3-distutils,gcc,pkg-config,libhdf5-dev,unzip,build-essential,automake,libtool,ccache,libusb-1.0-0-dev,apt-transport-https,python3.11,python3.11-dev,cmake,git,libgtk-3-dev,libavcodec-dev,libavformat-dev,libswscale-dev,libv4l-dev,libxvidcore-dev,libx264-dev,libjpeg-dev,libpng-dev,libtiff-dev,gfortran,openexr,libatlas-base-dev,libssl-dev,libtbbmalloc2,libtbb-dev,libdc1394-dev,libopenexr-dev,libgstreamer-plugins-base1.0-dev,libgstreamer1.0-dev,tclsh,libopenblas-dev,liblapack-dev,make,moreutils}
msg_ok "Installed Dependencies"
-msg_info "Setup Python3"
-$STD apt-get install -y \
- python3 python3-dev python3-setuptools python3-distutils python3-pip
-$STD pip install --upgrade pip
-msg_ok "Setup Python3"
-
-msg_info "Installing Node.js"
-mkdir -p /etc/apt/keyrings
-curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg
-echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_20.x nodistro main" >/etc/apt/sources.list.d/nodesource.list
-$STD apt-get update
-$STD apt-get install -y nodejs
-msg_ok "Installed Node.js"
-
-msg_info "Installing go2rtc"
-mkdir -p /usr/local/go2rtc/bin
-cd /usr/local/go2rtc/bin
-curl -fsSL "https://github.com/AlexxIT/go2rtc/releases/latest/download/go2rtc_linux_amd64" -o go2rtc
-chmod +x go2rtc
-ln -sf /usr/local/go2rtc/bin/go2rtc /usr/local/bin/go2rtc
-msg_ok "Installed go2rtc"
-
msg_info "Setting Up Hardware Acceleration"
$STD apt-get -y install {va-driver-all,ocl-icd-libopencl1,intel-opencl-icd,vainfo,intel-gpu-tools}
if [[ "$CTTYPE" == "0" ]]; then
chgrp video /dev/dri
chmod 755 /dev/dri
chmod 660 /dev/dri/*
+ sed -i -e 's/^kvm:x:104:$/render:x:104:root,frigate/' -e 's/^render:x:105:root$/kvm:x:105:/' /etc/group
+else
+ sed -i -e 's/^kvm:x:104:$/render:x:104:frigate/' -e 's/^render:x:105:$/kvm:x:105:/' /etc/group
fi
msg_ok "Set Up Hardware Acceleration"
-msg_info "Setup Frigate"
-RELEASE=$(curl -s https://api.github.com/repos/blakeblackshear/frigate/releases/latest | jq -r '.tag_name')
-mkdir -p /opt/frigate/models
-curl -fsSL https://github.com/blakeblackshear/frigate/archive/refs/tags/${RELEASE}.tar.gz -o frigate.tar.gz
-tar -xzf frigate.tar.gz -C /opt/frigate --strip-components 1
-rm -rf frigate.tar.gz
-cd /opt/frigate
-$STD pip install -r /opt/frigate/docker/main/requirements.txt --break-system-packages
-$STD pip install -r /opt/frigate/docker/main/requirements-ov.txt --break-system-packages
-$STD pip3 wheel --wheel-dir=/wheels -r /opt/frigate/docker/main/requirements-wheels.txt
-pip3 install -U /wheels/*.whl
-cp -a /opt/frigate/docker/main/rootfs/. /
+msg_info "Setting up environment"
+#cd ~ && echo "export PATH=$PATH:/usr/local/bin" >> .bashrc
+#source .bashrc
export TARGETARCH="amd64"
-echo 'libc6 libraries/restart-without-asking boolean true' | debconf-set-selections
-$STD /opt/frigate/docker/main/install_deps.sh
-$STD apt update
-$STD ln -svf /usr/lib/btbn-ffmpeg/bin/ffmpeg /usr/local/bin/ffmpeg
-$STD ln -svf /usr/lib/btbn-ffmpeg/bin/ffprobe /usr/local/bin/ffprobe
+export CCACHE_DIR=/root/.ccache
+export CCACHE_MAXSIZE=2G
+# http://stackoverflow.com/questions/48162574/ddg#49462622
+export APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn
+# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
+export DEBIAN_FRONTEND=noninteractive
+# Globally set pip break-system-packages option to avoid having to specify it every time
+export PIP_BREAK_SYSTEM_PACKAGES=1
+# https://github.com/NVIDIA/nvidia-docker/wiki/Installation-(Native-GPU-Support)
+export NVIDIA_VISIBLE_DEVICES=all
+export NVIDIA_DRIVER_CAPABILITIES="compute,video,utility"
+# Disable tokenizer parallelism warning
+# https://stackoverflow.com/questions/62691279/how-to-disable-tokenizers-parallelism-true-false-warning/72926996#729>
+export TOKENIZERS_PARALLELISM=true
+# https://github.com/huggingface/transformers/issues/27214
+export TRANSFORMERS_NO_ADVISORY_WARNINGS=1
+# Set OpenCV ffmpeg loglevel to fatal: https://ffmpeg.org/doxygen/trunk/log_8h.html
+export OPENCV_FFMPEG_LOGLEVEL=8
+# Set HailoRT to disable logging
+export HAILORT_LOGGER_PATH=NONE
+msg_ok "Setup environment"
+
+msg_info "Downloading Frigate source"
+fetch_and_deploy_gh_release "frigate" "blakeblackshear/frigate" "tarball" "latest" "/opt/frigate"
+msg_ok "Downloaded Frigate source"
+
+msg_info "Building Nginx with Custom Modules"
+#Overwrite version check as debian 12 LXC doesn't have the debian.list file for some reason
+sed -i 's|if.*"$VERSION_ID" == "12".*|if \[\[ "$VERSION_ID" == "12" \]\] \&\ \[\[ -f /etc/apt/sources.list.d/debian.list \]\]; then|g' /opt/frigate/docker/main/build_nginx.sh
+$STD bash /opt/frigate/docker/main/build_nginx.sh
+sed -e '/s6-notifyoncheck/ s/^#*/#/' -i /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run
+ln -sf /usr/local/nginx/sbin/nginx /usr/local/bin/nginx
+msg_ok "Built Nginx"
+
+msg_info "Building SQLite with Custom Modules"
+sed -i 's|if.*"$VERSION_ID" == "12".*|if \[\[ "$VERSION_ID" == "12" \]\] \&\ \[\[ -f /etc/apt/sources.list.d/debian.list \]\]; then|g' /opt/frigate/docker/main/build_sqlite_vec.sh
+$STD bash /opt/frigate/docker/main/build_sqlite_vec.sh
+msg_ok "Built SQLite"
+
+msg_info "Installing go2rtc"
+fetch_and_deploy_gh_release "go2rtc" "AlexxIT/go2rtc" "singlefile" "latest" "/usr/local/go2rtc/bin" "go2rtc_linux_amd64"
+msg_ok "Installed go2rtc"
+
+msg_info "Installing Tempio"
+sed -i 's|/rootfs/usr/local|/usr/local|g' /opt/frigate/docker/main/install_tempio.sh
+$STD bash /opt/frigate/docker/main/install_tempio.sh
+ln -sf /usr/local/tempio/bin/tempio /usr/local/bin/tempio
+msg_ok "Installed Tempio"
+
+msg_info "Building libUSB without udev"
+cd /opt
+wget -q https://github.com/libusb/libusb/archive/v1.0.26.zip -O v1.0.26.zip
+$STD unzip v1.0.26.zip
+cd libusb-1.0.26
+$STD ./bootstrap.sh
+$STD ./configure CC='ccache gcc' CCX='ccache g++' --disable-udev --enable-shared
+$STD make -j $(nproc --all)
+cd /opt/libusb-1.0.26/libusb
+mkdir -p '/usr/local/lib'
+$STD bash ../libtool --mode=install /usr/bin/install -c libusb-1.0.la '/usr/local/lib'
+mkdir -p '/usr/local/include/libusb-1.0'
+$STD install -c -m 644 libusb.h '/usr/local/include/libusb-1.0'
+mkdir -p '/usr/local/lib/pkgconfig'
+cd /opt/libusb-1.0.26/
+$STD install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig'
+ldconfig
+msg_ok "Built libUSB"
+
+msg_info "Installing Pip"
+wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py
+sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py
+$STD python3 get-pip.py "pip"
+msg_ok "Installed Pip"
+
+msg_info "Installing Frigate Dependencies"
+$STD update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1
+$STD pip3 install -r /opt/frigate/docker/main/requirements.txt
+msg_ok "Installed Frigate Dependencies"
+
+msg_info "Building pysqlite3"
+sed -i 's|^SQLITE3_VERSION=.*|SQLITE3_VERSION="version-3.46.0"|g' /opt/frigate/docker/main/build_pysqlite3.sh
+$STD bash /opt/frigate/docker/main/build_pysqlite3.sh
+$STD pip3 wheel --wheel-dir=/wheels -r /opt/frigate/docker/main/requirements-wheels.txt
+msg_ok "Built pysqlite3"
+
+msg_info "Installing NodeJS"
+NODE_VERSION="22" NODE_MODULE="yarn" setup_nodejs
+msg_ok "Installed NodeJS"
+
+# This should be moved to conditional block, only needed if Coral TPU is detected
+msg_info "Downloading Coral TPU Model"
+cd /
+wget -qO edgetpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite
+msg_ok "Downloaded Coral TPU Model"
+
+msg_info "Downloading CPU Model"
+mkdir -p /models
+cd /models
+wget -qO cpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess.tflite
+cp /opt/frigate/labelmap.txt /labelmap.txt
+msg_ok "Downloaded CPU Model"
+
+msg_info "Building Audio Models"
+# Get Audio Model and labels
+wget -qO yamnet-tflite-classification-tflite-v1.tar.gz https://www.kaggle.com/api/v1/models/google/yamnet/tfLite/classification-tflite/1/download
+$STD tar xzf yamnet-tflite-classification-tflite-v1.tar.gz
+rm -rf yamnet-tflite-classification-tflite-v1.tar.gz
+mv 1.tflite cpu_audio_model.tflite
+cp /opt/frigate/audio-labelmap.txt /audio-labelmap.txt
+msg_ok "Built Audio Models"
+
+# This should be moved to conditional block, only needed if Hailo AI module is detected
+msg_info "Building HailoRT"
+$STD bash /opt/frigate/docker/main/install_hailort.sh
+cp -a /opt/frigate/docker/main/rootfs/. /
+sed -i '/^.*unset DEBIAN_FRONTEND.*$/d' /opt/frigate/docker/main/install_deps.sh
+echo "libedgetpu1-max libedgetpu/accepted-eula boolean true" | debconf-set-selections
+echo "libedgetpu1-max libedgetpu/install-confirm-max boolean true" | debconf-set-selections
+$STD bash /opt/frigate/docker/main/install_deps.sh
$STD pip3 install -U /wheels/*.whl
ldconfig
+#Run twice to fix dependency conflict
+$STD pip3 install -U /wheels/*.whl
+msg_ok "Built HailoRT"
+
+msg_info "Installing OpenVino Runtime and Dev library"
+$STD pip3 install -r /opt/frigate/docker/main/requirements-ov.txt
+msg_ok "Installed OpenVino Runtime and Dev library"
+
+msg_info "Downloading OpenVino Model"
+mkdir -p /models
+cd /models
+wget -q http://download.tensorflow.org/models/object_detection/ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz
+$STD tar -zxvf ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz --no-same-owner
+$STD python3 /opt/frigate/docker/main/build_ov_model.py
+mkdir -p /openvino-model
+cp -r /models/ssdlite_mobilenet_v2.xml /openvino-model/
+cp -r /models/ssdlite_mobilenet_v2.bin /openvino-model/
+wget -q https://github.com/openvinotoolkit/open_model_zoo/raw/master/data/dataset_classes/coco_91cl_bkgr.txt -O /openvino-model/coco_91cl_bkgr.txt
+sed -i 's/truck/car/g' /openvino-model/coco_91cl_bkgr.txt
+msg_ok "Downloaded OpenVino Model"
+
+msg_info "Installing Frigate"
+cd /opt/frigate
$STD pip3 install -r /opt/frigate/docker/main/requirements-dev.txt
-$STD /opt/frigate/.devcontainer/initialize.sh
+$STD bash /opt/frigate/.devcontainer/initialize.sh
$STD make version
cd /opt/frigate/web
$STD npm install
$STD npm run build
cp -r /opt/frigate/web/dist/* /opt/frigate/web/
-cp -r /opt/frigate/config/. /config
+cd /opt/frigate/
sed -i '/^s6-svc -O \.$/s/^/#/' /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run
+cp -r /opt/frigate/config/. /config
+mkdir -p /media/frigate
+curl -fsSL "https://github.com/intel-iot-devkit/sample-videos/raw/master/person-bicycle-car-detection.mp4" -o "/media/frigate/person-bicycle-car-detection.mp4"
+echo "tmpfs /tmp/cache tmpfs defaults 0 0" >>/etc/fstab
+cat </etc/frigate.env
+DEFAULT_FFMPEG_VERSION="7.0"
+INCLUDED_FFMPEG_VERSIONS="7.0:5.0"
+EOF
cat </config/config.yml
mqtt:
enabled: false
cameras:
test:
ffmpeg:
- #hwaccel_args: preset-vaapi
inputs:
- path: /media/frigate/person-bicycle-car-detection.mp4
input_args: -re -stream_loop -1 -fflags +genpts
@@ -99,79 +211,43 @@ cameras:
height: 1080
width: 1920
fps: 5
+# Optional: Authentication configuration
+auth:
+ # Optional: Enable authentication
+ enabled: false
+detect:
+ enabled: false
EOF
-ln -sf /config/config.yml /opt/frigate/config/config.yml
-if [[ "$CTTYPE" == "0" ]]; then
- sed -i -e 's/^kvm:x:104:$/render:x:104:root,frigate/' -e 's/^render:x:105:root$/kvm:x:105:/' /etc/group
+msg_ok "Installed Frigate"
+
+if grep -q -o -m1 -E 'avx[^ ]* | sse4_2' /proc/cpuinfo; then
+ msg_ok "AVX or SSE 4.2 Support Detected"
+ msg_info "Configuring Openvino Object Detection Model"
+ cat <>/config/config.yml
+ffmpeg:
+ hwaccel_args: auto
+detectors:
+ detector01:
+ type: openvino
+model:
+ width: 300
+ height: 300
+ input_tensor: nhwc
+ input_pixel_format: bgr
+ path: /openvino-model/ssdlite_mobilenet_v2.xml
+ labelmap_path: /openvino-model/coco_91cl_bkgr.txt
+EOF
+ msg_ok "Configured Openvino Object Detection Model"
else
- sed -i -e 's/^kvm:x:104:$/render:x:104:frigate/' -e 's/^render:x:105:$/kvm:x:105:/' /etc/group
+ msg_info "Configuring CPU Object Detection Model"
+ cat <>/config/config.yml
+ffmpeg:
+ hwaccel_args: auto
+model:
+ path: /cpu_model.tflite
+EOF
+ msg_ok "Configured CPU Object Detection Model"
fi
-echo "tmpfs /tmp/cache tmpfs defaults 0 0" >>/etc/fstab
-msg_ok "Installed Frigate $RELEASE"
-
-read -p "Semantic Search requires a dedicated GPU and at least 16GB RAM. Would you like to install it? (y/n): " semantic_choice
-if [[ "$semantic_choice" == "y" ]]; then
- msg_info "Configuring Semantic Search & AI Models"
- mkdir -p /opt/frigate/models/semantic_search
- curl -fsSL -o /opt/frigate/models/semantic_search/clip_model.pt https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/pytorch_model.bin
- msg_ok "Semantic Search Models Installed"
-else
- msg_ok "Skipped Semantic Search Setup"
-fi
-msg_info "Building and Installing libUSB without udev"
-wget -qO /tmp/libusb.zip https://github.com/libusb/libusb/archive/v1.0.26.zip
-unzip -q /tmp/libusb.zip -d /tmp/
-cd /tmp/libusb-1.0.26
-./bootstrap.sh
-./configure --disable-udev --enable-shared
-make -j$(nproc --all)
-make install
-ldconfig
-rm -rf /tmp/libusb.zip /tmp/libusb-1.0.26
-msg_ok "Installed libUSB without udev"
-
-msg_info "Installing Coral Object Detection Model (Patience)"
-cd /opt/frigate
-export CCACHE_DIR=/root/.ccache
-export CCACHE_MAXSIZE=2G
-curl -fsSL https://github.com/libusb/libusb/archive/v1.0.26.zip
-unzip -q v1.0.26.zip
-rm v1.0.26.zip
-cd libusb-1.0.26
-$STD ./bootstrap.sh
-$STD ./configure --disable-udev --enable-shared
-$STD make -j $(nproc --all)
-cd /opt/frigate/libusb-1.0.26/libusb
-mkdir -p /usr/local/lib
-$STD /bin/bash ../libtool --mode=install /usr/bin/install -c libusb-1.0.la '/usr/local/lib'
-mkdir -p /usr/local/include/libusb-1.0
-$STD /usr/bin/install -c -m 644 libusb.h '/usr/local/include/libusb-1.0'
-ldconfig
-cd /
-wget -qO edgetpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite
-wget -qO cpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess.tflite
-cp /opt/frigate/labelmap.txt /labelmap.txt
-wget -qO yamnet-tflite-classification-tflite-v1.tar.gz https://www.kaggle.com/api/v1/models/google/yamnet/tfLite/classification-tflite/1/download
-tar xzf yamnet-tflite-classification-tflite-v1.tar.gz
-rm -rf yamnet-tflite-classification-tflite-v1.tar.gz
-mv 1.tflite cpu_audio_model.tflite
-cp /opt/frigate/audio-labelmap.txt /audio-labelmap.txt
-mkdir -p /media/frigate
-wget -qO /media/frigate/person-bicycle-car-detection.mp4 https://github.com/intel-iot-devkit/sample-videos/raw/master/person-bicycle-car-detection.mp4
-msg_ok "Installed Coral Object Detection Model"
-
-msg_info "Building Nginx with Custom Modules"
-$STD /opt/frigate/docker/main/build_nginx.sh
-sed -e '/s6-notifyoncheck/ s/^#*/#/' -i /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run
-ln -sf /usr/local/nginx/sbin/nginx /usr/local/bin/nginx
-msg_ok "Built Nginx"
-
-msg_info "Installing Tempio"
-sed -i 's|/rootfs/usr/local|/usr/local|g' /opt/frigate/docker/main/install_tempio.sh
-$STD /opt/frigate/docker/main/install_tempio.sh
-chmod +x /usr/local/tempio/bin/tempio
-ln -sf /usr/local/tempio/bin/tempio /usr/local/bin/tempio
-msg_ok "Installed Tempio"
msg_info "Creating Services"
cat </etc/systemd/system/create_directories.service
@@ -199,6 +275,7 @@ Type=simple
Restart=always
RestartSec=1
User=root
+EnvironmentFile=/etc/frigate.env
ExecStartPre=+rm /dev/shm/logs/go2rtc/current
ExecStart=/bin/bash -c "bash /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run 2> >(/usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S ' >&2) | /usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S '"
StandardOutput=file:/dev/shm/logs/go2rtc/current
@@ -221,6 +298,7 @@ Type=simple
Restart=always
RestartSec=1
User=root
+EnvironmentFile=/etc/frigate.env
# Environment=PLUS_API_KEY=
ExecStartPre=+rm /dev/shm/logs/frigate/current
ExecStart=/bin/bash -c "bash /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run 2> >(/usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S ' >&2) | /usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S '"
@@ -253,12 +331,14 @@ StandardError=file:/dev/shm/logs/nginx/current
WantedBy=multi-user.target
EOF
systemctl enable -q --now nginx
-msg_ok "Configured Services"
+msg_ok "Created Services"
motd_ssh
customize
msg_info "Cleaning up"
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
+apt-get -y autoremove
+apt-get -y autoclean
msg_ok "Cleaned"
+
+echo -e "Don't forget to edit the Frigate config file (${GN}/config/config.yml${CL}) and reboot. Example configuration at https://docs.frigate.video/configuration/"
\ No newline at end of file
diff --git a/install/garmin-grafana-install.sh b/install/garmin-grafana-install.sh
index fb201f8ec..70070f8c1 100644
--- a/install/garmin-grafana-install.sh
+++ b/install/garmin-grafana-install.sh
@@ -77,9 +77,9 @@ $STD grafana-cli plugins install marcusolsson-hourly-heatmap-panel
$STD systemctl restart grafana-server
# Output credentials to file
{
- echo "Grafana Credentials"
- echo "Grafana User: ${GRAFANA_USER}"
- echo "Grafana Password: ${GRAFANA_PASS}"
+ echo "Grafana Credentials"
+ echo "Grafana User: ${GRAFANA_USER}"
+ echo "Grafana Password: ${GRAFANA_PASS}"
} >>~/garmin-grafana.creds
msg_ok "Set up Grafana"
@@ -90,7 +90,7 @@ curl -fsSL -o "${RELEASE}.zip" "https://github.com/arpanghosh8453/garmin-grafana
unzip -q "${RELEASE}.zip"
# Remove the v prefix to RELEASE if it exists
if [[ "${RELEASE}" == v* ]]; then
- RELEASE="${RELEASE:1}"
+ RELEASE="${RELEASE:1}"
fi
mv "garmin-grafana-${RELEASE}/" "/opt/garmin-grafana"
mkdir -p /opt/garmin-grafana/.garminconnect
@@ -112,9 +112,9 @@ msg_info "Setting up garmin-grafana"
# Check if using Chinese garmin servers
read -rp "Are you using Garmin in mainland China? (y/N): " prompt
if [[ "${prompt,,}" =~ ^(y|yes|Y)$ ]]; then
- GARMIN_CN="True"
+ GARMIN_CN="True"
else
- GARMIN_CN="False"
+ GARMIN_CN="False"
fi
cat </opt/garmin-grafana/.env
@@ -131,24 +131,24 @@ EOF
# garmin-grafana usually prompts the user for email and password (and MFA) on first run,
# then stores a refreshable token. We try to avoid storing user credentials in the env vars
if [ -z "$(ls -A /opt/garmin-grafana/.garminconnect)" ]; then
- read -r -p "Please enter your Garmin Connect Email: " GARMIN_EMAIL
- read -r -p "Please enter your Garmin Connect Password (this is used to generate a token and NOT stored): " GARMIN_PASSWORD
- read -r -p "Please enter your MFA Code (if applicable, leave blank if not): " GARMIN_MFA
- # Run the script once to prompt for credential
- msg_info "Creating Garmin credentials, this will timeout in 60 seconds"
- timeout 60s uv run --env-file /opt/garmin-grafana/.env --project /opt/garmin-grafana/ /opt/garmin-grafana/src/garmin_grafana/garmin_fetch.py </opt/gitea-mirror.env
+# See here for config options: https://github.com/RayLabsHQ/gitea-mirror/blob/main/docs/ENVIRONMENT_VARIABLES.md
+NODE_ENV=production
+HOST=0.0.0.0
+PORT=4321
+DATABASE_URL=sqlite://data/gitea-mirror.db
+BETTER_AUTH_URL=http://${HOST_IP}:4321
+BETTER_AUTH_SECRET=${APP_SECRET}
+npm_package_version=${APP_VERSION}
+EOF
+
cat </etc/systemd/system/gitea-mirror.service
[Unit]
Description=Gitea Mirror
@@ -49,12 +61,7 @@ WorkingDirectory=/opt/gitea-mirror
ExecStart=/usr/local/bin/bun dist/server/entry.mjs
Restart=on-failure
RestartSec=10
-Environment=NODE_ENV=production
-Environment=HOST=0.0.0.0
-Environment=PORT=4321
-Environment=DATABASE_URL=file:/opt/gitea-mirror/data/gitea-mirror.db
-Environment=JWT_SECRET=${JWT_SECRET}
-Environment=npm_package_version=${APP_VERSION}
+EnvironmentFile=/opt/gitea-mirror.env
[Install]
WantedBy=multi-user.target
EOF
diff --git a/install/hanko-install.sh b/install/hanko-install.sh
deleted file mode 100644
index 96f2308c8..000000000
--- a/install/hanko-install.sh
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: MickLesk (CanbiZ)
-# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
-# Source: https://hanko.io/
-
-source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
-color
-verb_ip6
-catch_errors
-setting_up_container
-network_check
-update_os
-
-setup_yq
-PG_VERSION="16" setup_postgresql
-NODE_VERSION=22 NODE_MODULE="yarn@latest,npm@latest" setup_nodejs
-
-msg_info "Setting up PostgreSQL Database"
-DB_NAME=hanko
-DB_USER=hanko
-DB_PASS="$(openssl rand -base64 18 | cut -c1-13)"
-APP_SECRET=$(openssl rand -base64 32)
-$STD sudo -u postgres psql -c "CREATE ROLE $DB_USER WITH LOGIN PASSWORD '$DB_PASS';"
-$STD sudo -u postgres psql -c "CREATE DATABASE $DB_NAME WITH OWNER $DB_USER ENCODING 'UTF8' TEMPLATE template0;"
-$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET client_encoding TO 'utf8';"
-$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET default_transaction_isolation TO 'read committed';"
-$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET timezone TO 'UTC'"
-{
- echo "Hanko-Credentials"
- echo "Hanko Database User: $DB_USER"
- echo "Hanko Database Password: $DB_PASS"
- echo "Hanko Database Name: $DB_NAME"
-} >>~/hanko.creds
-msg_ok "Set up PostgreSQL Database"
-
-msg_info "Setup Hanko"
-fetch_and_deploy_gh_release "hanko" "teamhanko/hanko" "prebuild" "latest" "/opt/hanko" "hanko_Linux_x86_64.tar.gz"
-curl -fsSL https://raw.githubusercontent.com/teamhanko/hanko/refs/heads/main/backend/config/config.yaml -o /opt/hanko/config.yaml
-env DB_USER="$DB_USER" DB_PASS="$DB_PASS" APP_SECRET="$APP_SECRET" \
- yq eval '
- .database.user = strenv(DB_USER) |
- .database.password = strenv(DB_PASS) |
- .database.host = "localhost" |
- .database.port = "5432" |
- .database.dialect = "postgres" |
- .app.secret = strenv(APP_SECRET)
-' -i /opt/hanko/config.yaml
-$STD /opt/hanko/hanko --config /opt/hanko/config.yaml migrate up
-yarn add @teamhanko/hanko-elements
-msg_ok "Setup Hanko"
-
-msg_info "Setup Service"
-cat </etc/systemd/system/hanko.service
-[Unit]
-Description=Hanko Service
-After=network.target
-
-[Service]
-Type=simple
-ExecStart=/opt/hanko/hanko serve all --config /opt/hanko/config.yaml
-Restart=on-failure
-RestartSec=5
-
-[Install]
-WantedBy=multi-user.target
-EOF
-
-systemctl enable -q --now hanko
-msg_ok "Service Setup"
-
-motd_ssh
-customize
-
-msg_info "Cleaning up"
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
diff --git a/install/healthchecks-install.sh b/install/healthchecks-install.sh
deleted file mode 100644
index 5ce285fe7..000000000
--- a/install/healthchecks-install.sh
+++ /dev/null
@@ -1,132 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: MickLesk (Canbiz)
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://github.com/getmaxun/maxun
-
-source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
-color
-verb_ip6
-catch_errors
-setting_up_container
-network_check
-update_os
-
-msg_info "Installing Dependencies"
-$STD apt-get install -y \
- gcc \
- libpq-dev \
- libcurl4-openssl-dev \
- libssl-dev
-msg_ok "Installed Dependencies"
-
-msg_info "Setup Python3"
-$STD apt-get install -y \
- python3 python3-dev python3-pip
-$STD pip install --upgrade pip
-msg_ok "Setup Python3"
-
-setup_uv
-PG_VERSION=16 setup_postgresql
-
-msg_info "Setup Database"
-DB_NAME=healthchecks_db
-DB_USER=hc_user
-DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | cut -c1-13)
-SECRET_KEY="$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | cut -c1-32)"
-
-$STD sudo -u postgres psql -c "CREATE ROLE $DB_USER WITH LOGIN PASSWORD '$DB_PASS';"
-$STD sudo -u postgres psql -c "CREATE DATABASE $DB_NAME WITH OWNER $DB_USER ENCODING 'UTF8' TEMPLATE template0;"
-$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET client_encoding TO 'utf8';"
-$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET default_transaction_isolation TO 'read committed';"
-$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET timezone TO 'UTC'"
-{
- echo "healthchecks-Credentials"
- echo "healthchecks Database User: $DB_USER"
- echo "healthchecks Database Password: $DB_PASS"
- echo "healthchecks Database Name: $DB_NAME"
-} >>~/healthchecks.creds
-msg_ok "Set up Database"
-
-msg_info "Setup healthchecks"
-fetch_and_deploy_gh_release "healthchecks" "healthchecks/healthchecks" "source"
-cd /opt/healthchecks
-mkdir -p /opt/healthchecks/static-collected/
-$STD uv venv .venv
-$STD source .venv/bin/activate
-$STD uv pip install wheel
-$STD uv pip install gunicorn
-$STD uv pip install -r requirements.txt
-LOCAL_IP=$(hostname -I | awk '{print $1}')
-cat </opt/healthchecks/.env
-ALLOWED_HOSTS=localhost,127.0.0.1,${LOCAL_IP},healthchecks
-DB=postgres
-DB_HOST=localhost
-DB_PORT=5432
-DB_NAME=${DB_NAME}
-DB_USER=${DB_USER}
-DB_PASSWORD=${DB_PASS}
-DB_CONN_MAX_AGE=0
-DB_SSLMODE=prefer
-DB_TARGET_SESSION_ATTRS=read-write
-DATABASE_URL=postgres://${DB_USER}:${DB_PASS}@localhost:5432/${DB_NAME}?sslmode=prefer
-
-DEFAULT_FROM_EMAIL=healthchecks@example.org
-EMAIL_HOST=localhost
-EMAIL_HOST_PASSWORD=
-EMAIL_HOST_USER=
-EMAIL_PORT=587
-EMAIL_USE_TLS=True
-EMAIL_USE_VERIFICATION=True
-
-# Django & Healthchecks Konfiguration
-SECRET_KEY=${SECRET_KEY}
-DEBUG=True
-
-SITE_ROOT=http://${LOCAL_IP}:8000
-SITE_NAME=MyChecks
-STATIC_ROOT=/opt/healthchecks/static-collected
-
-EOF
-
-$STD .venv/bin/python3 manage.py makemigrations
-$STD .venv/bin/python3 manage.py migrate --noinput
-$STD .venv/bin/python3 manage.py collectstatic --noinput
-
-ADMIN_EMAIL="admin@helper-scripts.local"
-ADMIN_PASSWORD="$DB_PASS"
-cat </etc/systemd/system/healthchecks.service
-[Unit]
-Description=Healthchecks Service
-After=network.target postgresql.service
-
-[Service]
-WorkingDirectory=/opt/healthchecks/
-EnvironmentFile=/opt/healthchecks/.env
-ExecStart=/opt/healthchecks/.venv/bin/gunicorn hc.wsgi:application --bind 127.0.0.1:8000
-
-Restart=always
-
-[Install]
-WantedBy=multi-user.target
-EOF
-systemctl enable -q --now healthchecks
-msg_ok "Created Service"
-
-motd_ssh
-customize
-
-msg_info "Cleaning up"
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
diff --git a/install/infisical-install.sh b/install/infisical-install.sh
new file mode 100644
index 000000000..01b22112c
--- /dev/null
+++ b/install/infisical-install.sh
@@ -0,0 +1,74 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: Slaviša Arežina (tremor021)
+# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
+# Source: https://infisical.com/
+
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
+color
+verb_ip6
+catch_errors
+setting_up_container
+network_check
+update_os
+
+msg_info "Installing Dependencies"
+$STD apt install -y \
+ apt-transport-https \
+ ca-certificates \
+ redis
+msg_ok "Installed Dependencies"
+
+msg_info "Setting up Infisical repository"
+curl -fsSL "https://artifacts-infisical-core.infisical.com/infisical.gpg" | gpg --dearmor >/etc/apt/trusted.gpg.d/infisical.gpg
+cat </etc/apt/sources.list.d/infisical.sources
+Types: deb
+URIs: https://artifacts-infisical-core.infisical.com/deb
+Suites: stable
+Components: main
+Signed-By: /etc/apt/trusted.gpg.d/infisical.gpg
+EOF
+msg_ok "Setup Infisical repository"
+
+PG_VERSION="17" setup_postgresql
+
+msg_info "Configuring PostgreSQL"
+DB_NAME="infisical_db"
+DB_USER="infisical"
+DB_PASS="$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | cut -c1-13)"
+$STD sudo -u postgres psql -c "CREATE ROLE $DB_USER WITH LOGIN PASSWORD '$DB_PASS';"
+$STD sudo -u postgres psql -c "CREATE DATABASE $DB_NAME WITH OWNER $DB_USER ENCODING 'UTF8' TEMPLATE template0;"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET client_encoding TO 'utf8';"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET default_transaction_isolation TO 'read committed';"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET timezone TO 'UTC';"
+{
+ echo "Infiscal Credentials"
+ echo "Database Name: $DB_NAME"
+ echo "Database User: $DB_USER"
+ echo "Database Password: $DB_PASS"
+} >>~/infisical.creds
+msg_ok "Configured PostgreSQL"
+
+msg_info "Setting up Infisical"
+IP_ADDR=$(hostname -I | awk '{print $1}')
+$STD apt install -y infisical-core
+mkdir -p /etc/infisical
+cat </etc/infisical/infisical.rb
+infisical_core['ENCRYPTION_KEY'] = '6c1fe4e407b8911c104518103505b218'
+infisical_core['AUTH_SECRET'] = '5lrMXKKWCVocS/uerPsl7V+TX/aaUaI7iDkgl3tSmLE='
+infisical_core['HOST'] = '$IP_ADDR'
+infisical_core['DB_CONNECTION_URI'] = 'postgres://${DB_USER}:${DB_PASS}@localhost:5432/${DB_NAME}'
+infisical_core['REDIS_URL'] = 'redis://localhost:6379'
+EOF
+$STD infisical-ctl reconfigure
+msg_ok "Setup Infisical"
+
+motd_ssh
+customize
+
+msg_info "Cleaning up"
+$STD apt -y autoremove
+$STD apt -y autoclean
+$STD apt -y clean
+msg_ok "Cleaned"
diff --git a/install/jeedom-install.sh b/install/jeedom-install.sh
deleted file mode 100644
index c9a736abf..000000000
--- a/install/jeedom-install.sh
+++ /dev/null
@@ -1,90 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: Mips2648
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://jeedom.com/
-
-source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
-color
-verb_ip6
-catch_errors
-setting_up_container
-network_check
-update_os
-
-msg_info "Installing dependencies"
-$STD apt-get install -y \
- lsb-release \
- git
-msg_ok "Dependencies installed"
-
-DEFAULT_BRANCH="master"
-echo
-while true; do
- read -r -p "Enter branch to use (master, beta, alpha...) (Default: ${DEFAULT_BRANCH}): " BRANCH
- BRANCH=${BRANCH:-$DEFAULT_BRANCH}
-
- if git ls-remote --heads https://github.com/jeedom/core.git "$BRANCH" | grep -q "$BRANCH"; then
- break
- else
- echo "Branch '$BRANCH' does not exist. Please enter a valid branch."
- fi
-done
-
-msg_info "Downloading Jeedom installation script"
-wget -q https://raw.githubusercontent.com/jeedom/core/"${BRANCH}"/install/install.sh
-chmod +x install.sh
-msg_ok "Installation script downloaded"
-
-msg_info "Install Jeedom main dependencies, please wait"
-$STD ./install.sh -v "$BRANCH" -s 2
-msg_ok "Installed Jeedom main dependencies"
-
-msg_info "Install Database"
-$STD ./install.sh -v "$BRANCH" -s 3
-msg_ok "Database installed"
-
-msg_info "Install Apache"
-$STD ./install.sh -v "$BRANCH" -s 4
-msg_ok "Apache installed"
-
-msg_info "Install PHP and dependencies"
-$STD ./install.sh -v "$BRANCH" -s 5
-msg_ok "PHP installed"
-
-msg_info "Download Jeedom core"
-$STD ./install.sh -v "$BRANCH" -s 6
-msg_ok "Download done"
-
-msg_info "Database customisation"
-$STD ./install.sh -v "$BRANCH" -s 7
-msg_ok "Database customisation done"
-
-msg_info "Jeedom customisation"
-$STD ./install.sh -v "$BRANCH" -s 8
-msg_ok "Jeedom customisation done"
-
-msg_info "Configuring Jeedom"
-$STD ./install.sh -v "$BRANCH" -s 9
-msg_ok "Jeedom configured"
-
-msg_info "Installing Jeedom"
-$STD ./install.sh -v "$BRANCH" -s 10
-msg_ok "Jeedom installed"
-
-msg_info "Post installation"
-$STD ./install.sh -v "$BRANCH" -s 11
-msg_ok "Post installation done"
-
-msg_info "Check installation"
-$STD ./install.sh -v "$BRANCH" -s 12
-msg_ok "Installation checked, everything is successfuly installed. A reboot is recommended."
-
-motd_ssh
-customize
-
-msg_info "Cleaning up"
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
diff --git a/install/librenms-install.sh b/install/librenms-install.sh
index bea0d4a8e..2b9ff3e44 100644
--- a/install/librenms-install.sh
+++ b/install/librenms-install.sh
@@ -14,30 +14,34 @@ network_check
update_os
msg_info "Installing Dependencies"
-$STD apt-get install -y \
- lsb-release \
- ca-certificates \
- acl \
- fping \
- graphviz \
- imagemagick \
- mtr-tiny \
- nginx \
- nmap \
- rrdtool \
- snmp \
- snmpd
+$STD apt install -y \
+ acl \
+ fping \
+ graphviz \
+ imagemagick \
+ mtr-tiny \
+ nginx \
+ nmap \
+ rrdtool \
+ snmp \
+ snmpd \
+ whois
msg_ok "Installed Dependencies"
-PHP_VERSION=8.2 PHP_FPM=YES PHP_APACHE=NO PHP_MODULE="gmp,mysql,snmp" setup_php
+PHP_VERSION="8.4" PHP_FPM="YES" PHP_MODULE="gmp,mysql,snmp" setup_php
setup_mariadb
setup_composer
-setup_uv
+PYTHON_VERSION="3.13" setup_uv
-msg_info "Installing Python"
-$STD apt-get install -y \
- python3-{dotenv,pymysql,redis,setuptools,systemd,pip}
-msg_ok "Installed Python"
+msg_info "Installing Python Dependencies"
+$STD apt install -y \
+ python3-dotenv \
+ python3-pymysql \
+ python3-redis \
+ python3-setuptools \
+ python3-systemd \
+ python3-pip
+msg_ok "Installed Python Dependencies"
msg_info "Configuring Database"
DB_NAME=librenms
@@ -47,18 +51,18 @@ $STD mariadb -u root -e "CREATE DATABASE $DB_NAME CHARACTER SET utf8mb4 COLLATE
$STD mariadb -u root -e "CREATE USER '$DB_USER'@'localhost' IDENTIFIED BY '$DB_PASS';"
$STD mariadb -u root -e "GRANT ALL ON $DB_NAME.* TO '$DB_USER'@'localhost'; FLUSH PRIVILEGES;"
{
- echo "LibreNMS-Credentials"
- echo "LibreNMS Database User: $DB_USER"
- echo "LibreNMS Database Password: $DB_PASS"
- echo "LibreNMS Database Name: $DB_NAME"
+ echo "LibreNMS-Credentials"
+ echo "LibreNMS Database User: $DB_USER"
+ echo "LibreNMS Database Password: $DB_PASS"
+ echo "LibreNMS Database Name: $DB_NAME"
} >>~/librenms.creds
msg_ok "Configured Database"
-msg_info "Setup Librenms"
+fetch_and_deploy_gh_release "librenms" "librenms/librenms"
+
+msg_info "Configuring LibreNMS"
$STD useradd librenms -d /opt/librenms -M -r -s "$(which bash)"
-fetch_and_deploy_gh_release "librenms/librenms"
-setfacl -d -m g::rwx /opt/librenms/rrd /opt/librenms/logs /opt/librenms/bootstrap/cache/ /opt/librenms/storage/
-setfacl -R -m g::rwx /opt/librenms/rrd /opt/librenms/logs /opt/librenms/bootstrap/cache/ /opt/librenms/storage/
+mkdir -p /opt/librenms/{rrd,logs,bootstrap/cache,storage,html}
cd /opt/librenms
$STD uv venv .venv
$STD source .venv/bin/activate
@@ -70,9 +74,8 @@ DB_PASSWORD=${DB_PASS}
EOF
chown -R librenms:librenms /opt/librenms
chmod 771 /opt/librenms
-setfacl -d -m g::rwx /opt/librenms/bootstrap/cache /opt/librenms/storage /opt/librenms/logs /opt/librenms/rrd
chmod -R ug=rwX /opt/librenms/bootstrap/cache /opt/librenms/storage /opt/librenms/logs /opt/librenms/rrd
-msg_ok "Setup LibreNMS"
+msg_ok "Configured LibreNMS"
msg_info "Configure MariaDB"
sed -i "/\[mysqld\]/a innodb_file_per_table=1\nlower_case_table_names=0" /etc/mysql/mariadb.conf.d/50-server.cnf
@@ -80,11 +83,11 @@ systemctl enable -q --now mariadb
msg_ok "Configured MariaDB"
msg_info "Configure PHP-FPM"
-cp /etc/php/8.2/fpm/pool.d/www.conf /etc/php/8.2/fpm/pool.d/librenms.conf
-sed -i "s/\[www\]/\[librenms\]/g" /etc/php/8.2/fpm/pool.d/librenms.conf
-sed -i "s/user = www-data/user = librenms/g" /etc/php/8.2/fpm/pool.d/librenms.conf
-sed -i "s/group = www-data/group = librenms/g" /etc/php/8.2/fpm/pool.d/librenms.conf
-sed -i "s/listen = \/run\/php\/php8.2-fpm.sock/listen = \/run\/php-fpm-librenms.sock/g" /etc/php/8.2/fpm/pool.d/librenms.conf
+cp /etc/php/8.4/fpm/pool.d/www.conf /etc/php/8.4/fpm/pool.d/librenms.conf
+sed -i "s/\[www\]/\[librenms\]/g" /etc/php/8.4/fpm/pool.d/librenms.conf
+sed -i "s/user = www-data/user = librenms/g" /etc/php/8.4/fpm/pool.d/librenms.conf
+sed -i "s/group = www-data/group = librenms/g" /etc/php/8.4/fpm/pool.d/librenms.conf
+sed -i "s/listen = \/run\/php\/php8.4-fpm.sock/listen = \/run\/php-fpm-librenms.sock/g" /etc/php/8.4/fpm/pool.d/librenms.conf
msg_ok "Configured PHP-FPM"
msg_info "Configure Nginx"
@@ -114,14 +117,14 @@ server {
EOF
rm /etc/nginx/sites-enabled/default
$STD systemctl reload nginx
-systemctl restart php8.2-fpm
+systemctl restart php8.4-fpm
msg_ok "Configured Nginx"
msg_info "Configure Services"
COMPOSER_ALLOW_SUPERUSER=1
$STD composer install --no-dev
-$STD php8.2 artisan migrate --force
-$STD php8.2 artisan key:generate --force
+$STD php8.4 artisan migrate --force
+$STD php8.4 artisan key:generate --force
$STD su librenms -s /bin/bash -c "lnms db:seed --force"
$STD su librenms -s /bin/bash -c "lnms user:add -p admin -r admin admin"
ln -s /opt/librenms/lnms /usr/bin/lnms
@@ -147,7 +150,6 @@ motd_ssh
customize
msg_info "Cleaning up"
-rm -f $tmp_file
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
+$STD apt -y autoremove
+$STD apt -y autoclean
msg_ok "Cleaned"
diff --git a/install/librespeed-install.sh b/install/librespeed-install.sh
deleted file mode 100644
index 8b2bc48a8..000000000
--- a/install/librespeed-install.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: elvito
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://github.com/librespeed/speedtest
-
-source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
-color
-verb_ip6
-catch_errors
-setting_up_container
-network_check
-update_os
-
-msg_info "Installing Dependencies"
-$STD apt-get update
-$STD apt-get install -y \
- caddy \
- php-fpm
-msg_ok "Installed Dependencies"
-
-msg_info "Installing librespeed"
-temp_file=$(mktemp)
-RELEASE=$(curl -fsSL https://api.github.com/repos/librespeed/speedtest/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3)}')
-curl -fsSL "https://github.com/librespeed/speedtest/archive/refs/tags/${RELEASE}.zip" -o "$temp_file"
-mkdir -p /opt/librespeed
-mkdir -p /temp
-unzip -q "$temp_file" -d /temp
-cd /temp/speedtest-"${RELEASE}"
-cp -u favicon.ico index.html speedtest.js speedtest_worker.js /opt/librespeed/
-cp -ru backend results /opt/librespeed/
-
-cat </etc/caddy/Caddyfile
-:80 {
- root * /opt/librespeed
- file_server
- php_fastcgi unix//run/php/php-fpm.sock
-}
-EOF
-
-systemctl restart caddy
-echo "${RELEASE}" >/opt/"${APP}_version.txt"
-msg_ok "Installation completed"
-
-motd_ssh
-customize
-
-msg_info "Cleaning up"
-rm -rf /temp
-rm -f "$temp_file"
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
diff --git a/install/linkstack-install.sh b/install/linkstack-install.sh
deleted file mode 100644
index 1d6ce833a..000000000
--- a/install/linkstack-install.sh
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: Omar Minaya
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://linkstack.org/
-
-source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
-color
-verb_ip6
-catch_errors
-setting_up_container
-network_check
-update_os
-
-msg_info "Installing dependencies"
-$STD apt-get install -y \
- software-properties-common \
- ca-certificates \
- lsb-release \
- apt-transport-https \
- apache2
- unzip
-msg_ok "Installed dependencies"
-
-PHP_VERSION="8.2" PHP_MODULE="sqlite3, mysql, fileinfo" PHP_APACHE="YES" install_php
-
-msg_info "Installing LinkStack"
-$STD a2enmod rewrite
-
-ZIP_URL="https://github.com/linkstackorg/linkstack/releases/latest/download/linkstack.zip"
-ZIP_FILE="/tmp/linkstack.zip"
-curl -fsSL -o "$ZIP_FILE" "$ZIP_URL"
-unzip -q "$ZIP_FILE" -d /var/www/html/linkstack
-chown -R www-data:www-data /var/www/html/linkstack
-chmod -R 755 /var/www/html/linkstack
-
-cat < /etc/apache2/sites-available/linkstack.conf
-
- ServerAdmin webmaster@localhost
- DocumentRoot /var/www/html/linkstack/linkstack
- ErrorLog /var/log/apache2/linkstack-error.log
- CustomLog /var/log/apache2/linkstack-access.log combined
-
- Options Indexes FollowSymLinks
- AllowOverride All
- Require all granted
-
-
-EOF
-$STD a2dissite 000-default.conf
-$STD a2ensite linkstack.conf
-$STD systemctl restart apache2
-msg_ok "Installed LinkStack"
-
-motd_ssh
-customize
-
-msg_info "Cleaning up"
-$STD rm -f "$ZIP_FILE"
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
diff --git a/install/maybefinance-install.sh b/install/maybefinance-install.sh
deleted file mode 100644
index e5d4a7887..000000000
--- a/install/maybefinance-install.sh
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: vhsdream
-# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
-# Source: https://maybefinance.com
-
-source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
-color
-verb_ip6
-catch_errors
-setting_up_container
-network_check
-update_os
-
-msg_info "Installing Dependencies"
-$STD apt-get install -y --no-install-recommends \
- libpq-dev \
- libvips42 \
- git \
- zlib1g-dev \
- build-essential \
- libssl-dev \
- libreadline-dev \
- libyaml-dev \
- libsqlite3-dev \
- sqlite3 \
- libxml2-dev \
- libxslt1-dev \
- libcurl4-openssl-dev \
- software-properties-common \
- libffi-dev \
- redis
-msg_ok "Installed Dependencies"
-
-PG_VERSION=16 setup_postgresql
-
-msg_info "Setting up Postgresql"
-DB_NAME="maybe"
-DB_USER="maybe"
-DB_PASS="$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)"
-$STD sudo -u postgres psql -c "CREATE ROLE $DB_USER WITH LOGIN PASSWORD '$DB_PASS';"
-$STD sudo -u postgres psql -c "CREATE DATABASE $DB_NAME WITH OWNER $DB_USER TEMPLATE template0;"
-{
- echo "${APPLICATION} database credentials"
- echo "Database Name: ${DB_NAME}"
- echo "Database User: ${DB_USER}"
- echo "Database Password: ${DB_PASS}"
-} >~/maybe.creds
-msg_ok "Setup Postgresql"
-
-msg_info "Installing ${APPLICATION}"
-RELEASE=$(curl -s https://api.github.com/repos/maybe-finance/maybe/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }')
-curl -fsSL "https://github.com/maybe-finance/maybe/archive/refs/tags/v${RELEASE}.zip" -o /tmp/v"$RELEASE".zip
-unzip -q /tmp/v"$RELEASE".zip
-mv maybe-"$RELEASE" /opt/maybe
-RUBY_VERSION="$(cat /opt/maybe/.ruby-version)" RUBY_INSTALL_RAILS=false setup_rbenv_stack
-cd /opt/maybe
-cp ./.env.example ./.env
-sed -i -e '/SELF_/a RAILS_ENV=production' \
- -e "s/secret-value/\"$(openssl rand -hex 64)\"/" \
- -e "/^SECRET_KEY/a RAILS_MASTER_KEY=\"$(openssl rand -hex 16)\"" \
- -e "s/_USER=postgres/_USER=${DB_USER}/" \
- -e "s/_PASSWORD=postgres/_PASSWORD=${DB_PASS}/" \
- -e "/_USER=/a POSTGRES_DB=${DB_NAME}" \
- -e 's/^# DISABLE/DISABLE/' \
- ./.env
-sed -i -e '/_DB=/a\
-\
-REDIS_URL=redis://localhost:6379/1' \
- -e '/_SSL/a\
-RAILS_FORCE_SSL=false\
-RAILS_ASSUME_SSL=false' \
- ./.env
-rm -f ./config/credentials.yml.enc
-$STD ./bin/bundle install
-$STD ./bin/bundle exec bootsnap precompile --gemfile -j 0
-$STD ./bin/bundle exec bootsnap precompile -j 0 app/ lib/
-export SECRET_KEY_BASE_DUMMY=1
-$STD dotenv -f ./.env ./bin/rails assets:precompile
-$STD dotenv -f ./.env ./bin/rails db:prepare
-echo "${RELEASE}" >/opt/maybe_version.txt
-msg_ok "Installed ${APPLICATION}"
-
-msg_info "Creating services"
-
-cat </etc/systemd/system/maybe-web.service
-[Unit]
-Description=${APPLICATION} Web Service
-After=network.target redis.service postgresql.service
-
-[Service]
-Type=simple
-WorkingDirectory=/opt/maybe
-ExecStart=/root/.rbenv/shims/dotenv -f /opt/maybe/.env /opt/maybe/bin/rails s
-Restart=on-abnormal
-
-[Install]
-WantedBy=multi-user.target
-EOF
-
-cat </etc/systemd/system/maybe-worker.service
-[Unit]
-Description=${APPLICATION} Worker Service
-After=redis.service
-
-[Service]
-Type=simple
-WorkingDirectory=/opt/maybe
-ExecStart=/root/.rbenv/shims/dotenv -f /opt/maybe/.env /opt/maybe/bin/bundle exec sidekiq
-Restart=on-abnormal
-
-[Install]
-WantedBy=multi-user.target
-EOF
-systemctl enable -q --now maybe-web maybe-worker
-msg_ok "Created services"
-
-motd_ssh
-customize
-
-msg_info "Cleaning up"
-rm -f /tmp/v"$RELEASE".zip
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
diff --git a/install/miniflux-install.sh b/install/miniflux-install.sh
new file mode 100644
index 000000000..84ea96660
--- /dev/null
+++ b/install/miniflux-install.sh
@@ -0,0 +1,63 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: omernaveedxyz
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://miniflux.app/
+
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
+color
+verb_ip6
+catch_errors
+setting_up_container
+network_check
+update_os
+
+
+PG_VERSION=17 setup_postgresql
+DB_NAME=miniflux
+DB_USER=miniflux
+DB_PASS="$(openssl rand -base64 18 | cut -c1-13)"
+$STD sudo -u postgres psql -c "CREATE ROLE $DB_USER WITH LOGIN PASSWORD '$DB_PASS';"
+$STD sudo -u postgres psql -c "CREATE DATABASE $DB_NAME WITH OWNER $DB_USER;"
+$STD sudo -u postgres psql -d "$DB_NAME" -c "CREATE EXTENSION hstore;"
+
+
+
+fetch_and_deploy_gh_release "miniflux" "miniflux/v2" "binary" "latest"
+
+
+msg_info "Configuring Miniflux"
+ADMIN_NAME=admin
+ADMIN_PASS="$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)"
+cat </etc/miniflux.conf
+# See https://miniflux.app/docs/configuration.html
+DATABASE_URL=postgres://$DB_USER:$DB_PASS@localhost/$DB_NAME?sslmode=disable
+CREATE_ADMIN=1
+ADMIN_USERNAME=$ADMIN_NAME
+ADMIN_PASSWORD=$ADMIN_PASS
+LISTEN_ADDR=0.0.0.0:8080
+EOF
+
+{
+ echo "Application Credentials"
+ echo "DB_NAME: $DB_NAME"
+ echo "DB_USER: $DB_USER"
+ echo "DB_PASS: $DB_PASS"
+ echo "ADMIN_USERNAME: $ADMIN_NAME"
+ echo "ADMIN_PASSWORD: $ADMIN_PASS"
+} >>~/miniflux.creds
+
+miniflux -migrate -config-file /etc/miniflux.conf
+
+systemctl enable -q --now miniflux
+msg_ok "Configured Miniflux"
+
+motd_ssh
+customize
+
+msg_info "Cleaning up"
+$STD apt -y autoremove
+$STD apt -y autoclean
+$STD apt -y clean
+msg_ok "Cleaned"
diff --git a/install/nginxproxymanager-install.sh b/install/nginxproxymanager-install.sh
deleted file mode 100644
index 9aaf4cc40..000000000
--- a/install/nginxproxymanager-install.sh
+++ /dev/null
@@ -1,171 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) 2021-2025 tteck
-# Author: tteck (tteckster)
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://nginxproxymanager.com/
-
-source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
-color
-verb_ip6
-catch_errors
-setting_up_container
-network_check
-update_os
-
-msg_info "Installing Dependencies"
-$STD apt-get update
-$STD apt-get -y install \
- ca-certificates \
- apache2-utils \
- logrotate \
- build-essential \
- jq \
- git
-msg_ok "Installed Dependencies"
-
-NODE_VERSION="16" NODE_MODULE="yarn" setup_nodejs
-PYTHON_VERSION="3.12" setup_uv
-fetch_and_deploy_gh_release "nginxproxymanager" "NginxProxyManager/nginx-proxy-manager" "tarball" "latest" "/tmp/nginxproxymanager"
-
-msg_info "Installing Python Dependencies"
-$STD apt-get install -y \
- python3 \
- python3-dev \
- python3-venv
-msg_ok "Installed Python Dependencies"
-
-msg_info "Setting up Certbot Environment"
-$STD uv venv /opt/certbot
-$STD uv pip install --python \
- certbot \
- certbot-dns-cloudflare \
- certbot-dns-multi
-msg_ok "Certbot Environment Ready"
-
-msg_info "Installing Openresty"
-VERSION="$(awk -F'=' '/^VERSION_CODENAME=/{ print $NF }' /etc/os-release)"
-curl -fsSL "https://openresty.org/package/pubkey.gpg" | gpg --dearmor -o /etc/apt/trusted.gpg.d/openresty-archive-keyring.gpg
-echo -e "deb http://openresty.org/package/debian $VERSION openresty" >/etc/apt/sources.list.d/openresty.list
-$STD apt-get update
-$STD apt-get -y install openresty
-msg_ok "Installed Openresty"
-
-msg_info "Setting up Environment"
-ln -sf /usr/bin/python3 /usr/bin/python
-ln -sf /opt/certbot/bin/certbot /usr/bin/certbot
-ln -sf /usr/local/openresty/nginx/sbin/nginx /usr/sbin/nginx
-ln -sf /usr/local/openresty/nginx/ /etc/nginx
-sed -i 's+^daemon+#daemon+g' /tmp/nginxproxymanager/docker/rootfs/etc/nginx/nginx.conf
-NGINX_CONFS=$(find "$(pwd)" -type f -name "*.conf")
-for NGINX_CONF in $NGINX_CONFS; do
- sed -i 's+include conf.d+include /etc/nginx/conf.d+g' "$NGINX_CONF"
-done
-
-mkdir -p /var/www/html /etc/nginx/logs
-cd /tmp/nginxproxymanager
-cp -r docker/rootfs/var/www/html/* /var/www/html/
-cp -r docker/rootfs/etc/nginx/* /etc/nginx/
-cp docker/rootfs/etc/letsencrypt.ini /etc/letsencrypt.ini
-cp docker/rootfs/etc/logrotate.d/nginx-proxy-manager /etc/logrotate.d/nginx-proxy-manager
-ln -sf /etc/nginx/nginx.conf /etc/nginx/conf/nginx.conf
-rm -f /etc/nginx/conf.d/dev.conf
-
-mkdir -p /tmp/nginx/body \
- /run/nginx \
- /data/nginx \
- /data/custom_ssl \
- /data/logs \
- /data/access \
- /data/nginx/default_host \
- /data/nginx/default_www \
- /data/nginx/proxy_host \
- /data/nginx/redirection_host \
- /data/nginx/stream \
- /data/nginx/dead_host \
- /data/nginx/temp \
- /var/lib/nginx/cache/public \
- /var/lib/nginx/cache/private \
- /var/cache/nginx/proxy_temp
-
-chmod -R 777 /var/cache/nginx
-chown root /tmp/nginx
-
-echo resolver "$(awk 'BEGIN{ORS=" "} $1=="nameserver" {print ($2 ~ ":")? "["$2"]": $2}' /etc/resolv.conf);" >/etc/nginx/conf.d/include/resolvers.conf
-
-if [ ! -f /data/nginx/dummycert.pem ] || [ ! -f /data/nginx/dummykey.pem ]; then
- openssl req -new -newkey rsa:2048 -days 3650 -nodes -x509 -subj "/O=Nginx Proxy Manager/OU=Dummy Certificate/CN=localhost" -keyout /data/nginx/dummykey.pem -out /data/nginx/dummycert.pem &>/dev/null
-fi
-
-mkdir -p /app/global /app/frontend/images
-cd /tmp/nginxproxymanager
-cp -r backend/* /app
-cp -r global/* /app/global
-msg_ok "Set up Environment"
-
-msg_info "Building Frontend"
-cd /tmp/nginxproxymanager/frontend
-$STD yarn install --frozen-lockfile
-$STD yarn build
-cp -r dist/* /app/frontend
-cp -r app-images/* /app/frontend/images
-msg_ok "Built Frontend"
-
-msg_info "Initializing Backend"
-rm -rf /app/config/default.json
-if [ ! -f /app/config/production.json ]; then
- cat <<'EOF' >/app/config/production.json
-{
- "database": {
- "engine": "knex-native",
- "knex": {
- "client": "sqlite3",
- "connection": {
- "filename": "/data/database.sqlite"
- }
- }
- }
-}
-EOF
-fi
-cd /app
-$STD yarn install --production
-msg_ok "Initialized Backend"
-
-msg_info "Creating Service"
-cat <<'EOF' >/lib/systemd/system/npm.service
-[Unit]
-Description=Nginx Proxy Manager
-After=network.target
-Wants=openresty.service
-
-[Service]
-Type=simple
-Environment=NODE_ENV=production
-Environment=NODE_OPTIONS=--openssl-legacy-provider
-ExecStartPre=-mkdir -p /tmp/nginx/body /data/letsencrypt-acme-challenge
-ExecStart=/usr/bin/node index.js --abort_on_uncaught_exception --max_old_space_size=1024
-WorkingDirectory=/app
-Restart=on-failure
-
-[Install]
-WantedBy=multi-user.target
-EOF
-msg_ok "Created Service"
-
-motd_ssh
-customize
-
-msg_info "Starting Services"
-sed -i 's/user npm/user root/g; s/^pid/#pid/g' /usr/local/openresty/nginx/conf/nginx.conf
-sed -r -i 's/^([[:space:]]*)su npm npm/\1#su npm npm/g;' /etc/logrotate.d/nginx-proxy-manager
-systemctl enable -q --now openresty
-systemctl enable -q --now npm
-msg_ok "Started Services"
-
-msg_info "Cleaning up"
-rm -rf /tmp/*
-systemctl restart openresty
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
diff --git a/install/notesnook-install.sh b/install/notesnook-install.sh
deleted file mode 100644
index 64afd6cbe..000000000
--- a/install/notesnook-install.sh
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: Slaviša Arežina (tremor021)
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://github.com/streetwriters/notesnook
-
-source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
-color
-verb_ip6
-catch_errors
-setting_up_container
-network_check
-update_os
-
-msg_info "Installing Dependencies"
-$STD apt-get install -y \
- make \
- git \
- caddy
-msg_ok "Installed Dependencies"
-
-LOCAL_IP=$(hostname -I | awk '{print $1}')
-NODE_MODULE="yarn" setup_nodejs
-fetch_and_deploy_gh_release "notesnook" "streetwriters/notesnook" "tarball"
-
-msg_info "Configuring Notesnook (Patience)"
-cd /opt/notesnook
-export NODE_OPTIONS="--max-old-space-size=2560"
-$STD npm install
-$STD npm run build:web
-msg_ok "Configured Notesnook"
-
-msg_info "Creating Service"
-cat </etc/systemd/system/notesnook.service
-[Unit]
-Description=Notesnook Service
-After=network-online.target
-
-[Service]
-Type=simple
-User=root
-WorkingDirectory=/opt/notesnook
-ExecStart=/usr/bin/npx serve -l tcp://0.0.0.0:3000 apps/web/build
-Restart=on-failure
-
-[Install]
-WantedBy=multi-user.target
-EOF
-sed -i "s|^ExecStart=.*|ExecStart=/usr/bin/caddy reverse-proxy --from https://$LOCAL_IP --to localhost:3000|" /lib/systemd/system/caddy.service
-sed -i "/^ExecReload=/d" /lib/systemd/system/caddy.service
-systemctl daemon-reload
-systemctl restart caddy
-systemctl enable -q --now notesnook
-msg_ok "Created Service"
-
-motd_ssh
-customize
-
-msg_info "Cleaning up"
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
diff --git a/install/omada-install.sh b/install/omada-install.sh
new file mode 100644
index 000000000..7ffc14fb7
--- /dev/null
+++ b/install/omada-install.sh
@@ -0,0 +1,53 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 tteck
+# Author: tteck (tteckster)
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://www.tp-link.com/us/support/download/omada-software-controller/
+
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
+color
+verb_ip6
+catch_errors
+setting_up_container
+network_check
+update_os
+
+msg_info "Installing Dependencies"
+$STD apt install -y jsvc
+msg_ok "Installed Dependencies"
+
+if lscpu | grep -q 'avx'; then
+ MONGO_VERSION="8.0" setup_mongodb
+else
+ MONGO_VERSION="4.4" setup_mongodb
+fi
+
+JAVA_VERSION="21" setup_java
+
+# if ! dpkg -l | grep -q 'libssl1.1'; then
+# msg_info "Installing libssl (if needed)"
+# curl -fsSL "https://security.debian.org/debian-security/pool/updates/main/o/openssl/libssl1.1_1.1.1w-0+deb11u4_amd64.deb" -o "/tmp/libssl.deb"
+# $STD dpkg -i /tmp/libssl.deb
+# rm -f /tmp/libssl.deb
+# msg_ok "Installed libssl1.1"
+# fi
+
+msg_info "Installing Omada Controller"
+OMADA_URL=$(curl -fsSL "https://support.omadanetworks.com/en/download/software/omada-controller/" |
+ grep -o 'https://static\.tp-link\.com/upload/software/[^"]*linux_x64[^"]*\.deb' |
+ head -n1)
+OMADA_PKG=$(basename "$OMADA_URL")
+curl -fsSL "$OMADA_URL" -o "$OMADA_PKG"
+$STD dpkg -i "$OMADA_PKG"
+msg_ok "Installed Omada Controller"
+
+motd_ssh
+customize
+
+msg_info "Cleaning up"
+rm -rf "$OMADA_PKG"
+$STD apt -y autoremove
+$STD apt -y autoclean
+$STD apt -y clean
+msg_ok "Cleaned"
diff --git a/install/openwebui-install.sh b/install/openwebui-install.sh
new file mode 100644
index 000000000..ad43ac7c3
--- /dev/null
+++ b/install/openwebui-install.sh
@@ -0,0 +1,78 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 tteck
+# Author: tteck | Co-Author: havardthom | Co-Author: Slaviša Arežina (tremor021)
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://openwebui.com/
+
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
+color
+verb_ip6
+catch_errors
+setting_up_container
+network_check
+update_os
+
+msg_info "Installing Dependencies"
+$STD apt install -y ffmpeg
+msg_ok "Installed Dependencies"
+
+USE_UVX="YES" PYTHON_VERSION="3.12" setup_uv
+
+read -r -p "${TAB3}Would you like to add Ollama? " prompt
+if [[ ${prompt,,} =~ ^(y|yes)$ ]]; then
+ msg_info "Installing Ollama"
+ curl -fsSLO -C - https://ollama.com/download/ollama-linux-amd64.tgz
+ tar -C /usr -xzf ollama-linux-amd64.tgz
+ rm -rf ollama-linux-amd64.tgz
+ cat </etc/systemd/system/ollama.service
+[Unit]
+Description=Ollama Service
+After=network-online.target
+
+[Service]
+Type=exec
+ExecStart=/usr/bin/ollama serve
+Environment=HOME=$HOME
+Environment=OLLAMA_HOST=0.0.0.0
+Restart=always
+RestartSec=3
+
+[Install]
+WantedBy=multi-user.target
+EOF
+ systemctl enable -q --now ollama
+ echo "ENABLE_OLLAMA_API=true" >/root/.env
+ msg_ok "Installed Ollama"
+fi
+
+msg_info "Creating Service"
+cat </etc/systemd/system/open-webui.service
+[Unit]
+Description=Open WebUI Service
+After=network.target
+
+[Service]
+Type=simple
+EnvironmentFile=-/root/.env
+Environment=DATA_DIR=/root/.open-webui
+ExecStart=/usr/local/bin/uvx --python 3.12 open-webui@latest serve
+WorkingDirectory=/root
+Restart=on-failure
+RestartSec=5
+User=root
+
+[Install]
+WantedBy=multi-user.target
+EOF
+systemctl enable -q --now open-webui
+msg_ok "Created Service"
+
+motd_ssh
+customize
+
+msg_info "Cleaning up"
+$STD apt -y autoremove
+$STD apt -y autoclean
+$STD apt -y clean
+msg_ok "Cleaned"
diff --git a/install/ots-install.sh b/install/ots-install.sh
deleted file mode 100644
index 437e30101..000000000
--- a/install/ots-install.sh
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: MickLesk (CanbiZ)
-# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
-# Source: https://github.com/Luzifer/ots
-
-source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
-color
-verb_ip6
-catch_errors
-setting_up_container
-network_check
-update_os
-
-msg_info "Installing Dependencies"
-$STD apt-get install -y \
- caddy \
- apt-transport-https \
- ca-certificates
-msg_ok "Installed Dependencies"
-
-msg_info "Installing OTS"
-fetch_and_deploy_gh_release "ots" "Luzifer/ots" "tarball" "latest" "/opt/ots" "ots_linux_amd64.tgz"
-cat </opt/ots/env"
-LISTEN=0.0.0.0:3000
-REDIS_URL=redis://127.0.0.1:6379
-SECRET_EXPIRY=604800
-STORAGE_TYPE=redis
-EOF
-msg_ok "Installed OTS"
-
-msg_info "Creating Services"
-cat </etc/systemd/system/ots.service
-[Unit]
-Description=One-Time-Secret Service
-After=network-online.target
-Requires=network-online.target
-
-[Service]
-EnvironmentFile=/opt/ots/env
-ExecStart=/opt/ots/ots
-Restart=Always
-RestartSecs=5
-
-[Install]
-WantedBy=multi-user.target
-EOF
-msg_ok "Created Services"
-
-motd_ssh
-customize
-
-msg_info "Cleaning up"
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
diff --git a/install/pangolin-install.sh b/install/pangolin-install.sh
new file mode 100644
index 000000000..0a82cd4fb
--- /dev/null
+++ b/install/pangolin-install.sh
@@ -0,0 +1,131 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: Slaviša Arežina (tremor021)
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://pangolin.net/
+
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
+color
+verb_ip6
+catch_errors
+setting_up_container
+network_check
+update_os
+
+msg_info "Installing Dependencies"
+$STD apt install -y \
+ sqlite3 \
+ iptables
+msg_ok "Installed Dependencies"
+
+NODE_VERSION="22" setup_nodejs
+fetch_and_deploy_gh_release "pangolin" "fosrl/pangolin" "tarball"
+fetch_and_deploy_gh_release "gerbil" "fosrl/gerbil" "singlefile" "latest" "/usr/bin" "gerbil_linux_amd64"
+IP_ADDR=$(hostname -I | awk '{print $1}')
+SECRET_KEY=$(openssl rand -base64 48 | tr -dc 'A-Za-z0-9' | head -c 32)
+
+msg_info "Setup Pangolin (Patience)"
+export BUILD=oss
+export DATABASE=sqlite
+cd /opt/pangolin
+$STD npm ci
+echo "export * from \"./$DATABASE\";" > server/db/index.ts
+echo "export const build = \"$BUILD\" as any;" > server/build.ts
+cp tsconfig.oss.json tsconfig.json
+rm -rf server/private
+mkdir -p dist
+$STD npm run next:build
+$STD node esbuild.mjs -e server/index.ts -o dist/server.mjs -b $BUILD
+$STD node esbuild.mjs -e server/setup/migrationsSqlite.ts -o dist/migrations.mjs
+$STD npm run build:cli
+cp -R .next/standalone ./
+
+cat </usr/local/bin/pangctl
+#!/bin/sh
+cd /opt/pangolin
+./dist/cli.mjs "$@"
+EOF
+chmod +x /usr/local/bin/pangctl ./dist/cli.mjs
+cp server/db/names.json ./dist/names.json
+
+cat </opt/pangolin/config/config.yml
+app:
+ dashboard_url: http://$IP_ADDR:3002
+ log_level: debug
+
+domains:
+ domain1:
+ base_domain: example.com
+
+server:
+ secret: $SECRET_KEY
+
+gerbil:
+ base_endpoint: example.com
+
+orgs:
+ block_size: 24
+ subnet_group: 100.90.137.0/20
+
+flags:
+ require_email_verification: false
+ disable_signup_without_invite: true
+ disable_user_create_org: true
+ allow_raw_resources: true
+ enable_integration_api: true
+ enable_clients: true
+EOF
+#$STD npm run db:sqlite:generate
+#$STD npm run db:sqlite:push
+msg_ok "Setup Pangolin"
+
+msg_info "Creating Pangolin Service"
+cat </etc/systemd/system/pangolin.service
+[Unit]
+Description=Pangolin Service
+After=network.target
+
+[Service]
+Type=simple
+User=root
+WorkingDirectory=/opt/pangolin
+ExecStart=/usr/bin/npm start
+Restart=always
+RestartSec=10
+
+[Install]
+WantedBy=multi-user.target
+EOF
+systemctl enable -q --now pangolin
+msg_ok "Created pangolin Service"
+
+msg_info "Setting up gerbil"
+mkdir -p /var/config
+cat </etc/systemd/system/gerbil.service
+[Unit]
+Description=Gerbil Service
+After=network.target
+Requires=pangolin.service
+
+[Service]
+Type=simple
+User=root
+ExecStart=/usr/bin/gerbil --reachableAt=http://$IP_ADDR:3004 --generateAndSaveKeyTo=/var/config/key --remoteConfig=http://$IP_ADDR:3001/api/v1/
+Restart=always
+RestartSec=10
+
+[Install]
+WantedBy=multi-user.target
+EOF
+systemctl enable -q --now gerbil
+msg_ok "Set up gerbil"
+
+motd_ssh
+customize
+
+msg_info "Cleaning up"
+$STD apt -y autoremove
+$STD apt -y autoclean
+$STD apt -y clean
+msg_ok "Cleaned"
diff --git a/install/petio-install.sh b/install/petio-install.sh
new file mode 100644
index 000000000..ff4337c71
--- /dev/null
+++ b/install/petio-install.sh
@@ -0,0 +1,69 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 tteck
+# Author: tteck (tteckster)
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://petio.tv/
+
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
+color
+verb_ip6
+catch_errors
+setting_up_container
+network_check
+update_os
+
+msg_info "Installing MongoDB 4.4"
+curl -fsSL "https://www.mongodb.org/static/pgp/server-4.4.asc" | gpg --dearmor >/usr/share/keyrings/mongodb-server-4.4.gpg
+# Determine OS ID
+OS_ID=$(grep '^ID=' /etc/os-release | cut -d'=' -f2)
+
+if [ "$OS_ID" = "debian" ]; then
+ echo "deb [ signed-by=/usr/share/keyrings/mongodb-server-4.4.gpg ] http://repo.mongodb.org/apt/debian $(grep '^VERSION_CODENAME=' /etc/os-release | cut -d'=' -f2)/mongodb-org/4.4 main" >/etc/apt/sources.list.d/mongodb-org-4.4.list
+else
+ echo "deb [ arch=amd64,arm64 signed-by=/usr/share/keyrings/mongodb-server-4.4.gpg ] https://repo.mongodb.org/apt/ubuntu $(grep '^VERSION_CODENAME=' /etc/os-release | cut -d'=' -f2)/mongodb-org/4.4 multiverse" >/etc/apt/sources.list.d/mongodb-org-4.4.list
+fi
+
+$STD apt-get update
+$STD apt-get install -y mongodb-org
+sed -i 's/bindIp: 127.0.0.1/bindIp: 0.0.0.0/' /etc/mongod.conf
+systemctl enable -q --now mongod
+msg_ok "MongoDB 4.4 Installed"
+
+msg_info "Installing Petio"
+useradd -M --shell=/bin/false petio
+mkdir /opt/Petio
+curl -fsSL "https://petio.tv/releases/latest" -o "petio-latest.zip"
+$STD unzip petio-latest.zip -d /opt/Petio
+rm -rf petio-latest.zip
+chown -R petio:petio /opt/Petio
+msg_ok "Installed Petio"
+
+msg_info "Creating Service"
+cat </etc/systemd/system/petio.service
+[Unit]
+Description=Petio a content request system
+After=network.target mongod.service
+
+[Service]
+Type=simple
+User=petio
+Restart=on-failure
+RestartSec=1
+ExecStart=/opt/Petio/bin/petio-linux
+
+[Install]
+WantedBy=multi-user.target
+
+
+EOF
+systemctl enable -q --now petio
+msg_ok "Created Service"
+
+motd_ssh
+customize
+
+msg_info "Cleaning up"
+$STD apt-get -y autoremove
+$STD apt-get -y autoclean
+msg_ok "Cleaned"
diff --git a/install/postiz-install.sh b/install/postiz-install.sh
deleted file mode 100644
index 7f55e9f40..000000000
--- a/install/postiz-install.sh
+++ /dev/null
@@ -1,112 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: Slaviša Arežina (tremor021)
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://github.com/gitroomhq/postiz-app
-
-source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
-color
-verb_ip6
-catch_errors
-setting_up_container
-network_check
-update_os
-
-msg_info "Installing dependencies"
-$STD apt-get install -y \
- build-essential \
- python3-pip \
- supervisor \
- debian-keyring \
- debian-archive-keyring \
- apt-transport-https \
- redis
-msg_ok "Installed dependencies"
-
-NODE_VERSION="20" setup_nodejs
-PG_VERSION="17" setup_postgresql
-
-msg_info "Setting up PostgreSQL Database"
-DB_NAME=postiz
-DB_USER=postiz
-DB_PASS="$(openssl rand -base64 18 | cut -c1-13)"
-$STD sudo -u postgres psql -c "CREATE ROLE $DB_USER WITH LOGIN PASSWORD '$DB_PASS';"
-$STD sudo -u postgres psql -c "CREATE DATABASE $DB_NAME WITH OWNER $DB_USER ENCODING 'UTF8' TEMPLATE template0;"
-$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET client_encoding TO 'utf8';"
-$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET default_transaction_isolation TO 'read committed';"
-$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET timezone TO 'UTC'"
-{
- echo "Postiz DB Credentials"
- echo "Postiz Database User: $DB_USER"
- echo "Postiz Database Password: $DB_PASS"
- echo "Postiz Database Name: $DB_NAME"
-} >>~/postiz.creds
-msg_ok "Set up PostgreSQL Database"
-
-msg_info "Setting up Caddy"
-curl -1sLf "https://dl.cloudsmith.io/public/caddy/stable/gpg.key" | gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg
-curl -1sLf "https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt" >/etc/apt/sources.list.d/caddy-stable.list
-$STD apt-get update
-$STD apt-get install caddy
-msg_ok "Set up Caddy"
-
-fetch_and_deploy_gh_release "postiz" "gitroomhq/postiz-app"
-
-msg_info "Configuring Postiz"
-LOCAL_IP=$(hostname -I | awk '{print $1}')
-JWT_SECRET=$(openssl rand -base64 64 | tr '+/' '-_' | tr -d '=')
-cd /opt/postiz
-mkdir -p /etc/supervisor.d
-$STD npm --no-update-notifier --no-fund --global install pnpm@10.6.1 pm2
-cp var/docker/supervisord.conf /etc/supervisord.conf
-cp var/docker/Caddyfile ./Caddyfile
-cp var/docker/entrypoint.sh ./entrypoint.sh
-cp var/docker/supervisord/caddy.conf /etc/supervisor.d/caddy.conf
-sed -i "s#/app/Caddyfile#/opt/postiz/Caddyfile#g" /etc/supervisor.d/caddy.conf
-sed -i "s#/app/Caddyfile#/opt/postiz/Caddyfile#g" /opt/postiz/entrypoint.sh
-sed -i "s#directory=/app#directory=/opt/postiz#g" /etc/supervisor.d/caddy.conf
-export NODE_OPTIONS="--max-old-space-size=2560"
-$STD pnpm install
-$STD pnpm run build
-chmod +x entrypoint.sh
-
-cat <.env
-NOT_SECURED="true"
-IS_GENERAL="true"
-DATABASE_URL="postgresql://$DB_USER:$DB_PASS@localhost:5432/$DB_NAME"
-REDIS_URL="redis://localhost:6379"
-JWT_SECRET="$JWT_SECRET"
-FRONTEND_URL="http://$LOCAL_IP:4200"
-NEXT_PUBLIC_BACKEND_URL="http://$LOCAL_IP:3000"
-BACKEND_INTERNAL_URL="http://$LOCAL_IP:3000"
-EOF
-msg_ok "Configured Postiz"
-
-msg_info "Creating Service"
-cat </etc/systemd/system/postiz.service
-[Unit]
-Description=Postiz Service
-After=network.target
-
-[Service]
-Type=simple
-User=root
-WorkingDirectory=/opt/postiz
-EnvironmentFile=/opt/postiz/.env
-ExecStart=/usr/bin/pnpm run pm2-run
-Restart=always
-
-[Install]
-WantedBy=multi-user.target
-EOF
-systemctl enable -q --now postiz
-msg_ok "Created Service"
-
-motd_ssh
-customize
-
-msg_info "Cleaning up"
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
diff --git a/install/reactive-resume-install.sh b/install/reactive-resume-install.sh
deleted file mode 100644
index 895ace024..000000000
--- a/install/reactive-resume-install.sh
+++ /dev/null
@@ -1,178 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: vhsdream
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://rxresume.org
-
-source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
-color
-verb_ip6
-catch_errors
-setting_up_container
-network_check
-update_os
-
-msg_info "Installing Dependencies"
-cd /tmp
-curl -fsSL https://dl.min.io/server/minio/release/linux-amd64/minio.deb -o minio.deb
-$STD dpkg -i minio.deb
-msg_ok "Installed Dependencies"
-
-PG_VERSION="16" PG_MODULES="common" setup_postgresql
-NODE_VERSION="22" NODE_MODULE="pnpm@latest" setup_nodejs
-
-msg_info "Setting up Database"
-DB_USER="rxresume"
-DB_NAME="rxresume"
-DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)
-$STD sudo -u postgres psql -c "CREATE USER $DB_USER WITH ENCRYPTED PASSWORD '$DB_PASS';"
-$STD sudo -u postgres psql -c "CREATE DATABASE $DB_NAME WITH OWNER $DB_USER ENCODING 'UTF8' TEMPLATE template0;"
-$STD sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE $DB_NAME to $DB_USER;"
-$STD sudo -u postgres psql -c "ALTER USER $DB_USER WITH SUPERUSER;"
-msg_ok "Set up Database"
-
-msg_info "Installing $APPLICATION"
-MINIO_PASS=$(openssl rand -base64 48)
-ACCESS_TOKEN=$(openssl rand -base64 48)
-REFRESH_TOKEN=$(openssl rand -base64 48)
-CHROME_TOKEN=$(openssl rand -hex 32)
-LOCAL_IP=$(hostname -I | awk '{print $1}')
-TAG=$(curl -fsSL https://api.github.com/repos/browserless/browserless/tags?per_page=1 | grep "name" | awk '{print substr($2, 3, length($2)-4) }')
-
-fetch_and_deploy_gh_release "Reactive-Resume" "lazy-media/Reactive-Resume"
-cd /opt/"$APPLICATION"
-# corepack enable
-export CI="true"
-export PUPPETEER_SKIP_DOWNLOAD="true"
-export NODE_ENV="production"
-export NEXT_TELEMETRY_DISABLED=1
-$STD pnpm install --frozen-lockfile
-$STD pnpm run build
-$STD pnpm install --prod --frozen-lockfile
-$STD pnpm run prisma:generate
-msg_ok "Installed $APPLICATION"
-
-msg_info "Installing Browserless (Patience)"
-cd /tmp
-curl -fsSL https://github.com/browserless/browserless/archive/refs/tags/v"$TAG".zip -o v"$TAG".zip
-$STD unzip v"$TAG".zip
-mv browserless-"$TAG" /opt/browserless
-cd /opt/browserless
-$STD npm install
-rm -rf src/routes/{chrome,edge,firefox,webkit}
-$STD node_modules/playwright-core/cli.js install --with-deps chromium
-$STD npm run build
-$STD npm run build:function
-$STD npm prune production
-msg_ok "Installed Browserless"
-
-msg_info "Configuring applications"
-mkdir -p /opt/minio
-cat </opt/minio/.env
-MINIO_ROOT_USER="storageadmin"
-MINIO_ROOT_PASSWORD="${MINIO_PASS}"
-MINIO_VOLUMES=/opt/minio
-MINIO_OPTS="--address :9000 --console-address 127.0.0.1:9001"
-EOF
-cat </opt/"$APPLICATION"/.env
-NODE_ENV=production
-PORT=3000
-PUBLIC_URL=http://${LOCAL_IP}:3000
-STORAGE_URL=http://${LOCAL_IP}:9000/rxresume
-DATABASE_URL=postgresql://${DB_USER}:${DB_PASS}@localhost:5432/${DB_NAME}?schema=public
-ACCESS_TOKEN_SECRET=${ACCESS_TOKEN}
-REFRESH_TOKEN_SECRET=${REFRESH_TOKEN}
-CHROME_PORT=8080
-CHROME_TOKEN=${CHROME_TOKEN}
-CHROME_URL=ws://localhost:8080
-CHROME_IGNORE_HTTPS_ERRORS=true
-MAIL_FROM=noreply@locahost
-# SMTP_URL=smtp://username:password@smtp.server.mail:587 #
-STORAGE_ENDPOINT=localhost
-STORAGE_PORT=9000
-STORAGE_REGION=us-east-1
-STORAGE_BUCKET=rxresume
-STORAGE_ACCESS_KEY=storageadmin
-STORAGE_SECRET_KEY=${MINIO_PASS}
-STORAGE_USE_SSL=false
-STORAGE_SKIP_BUCKET_CHECK=false
-
-# GitHub (OAuth, Optional)
-# GITHUB_CLIENT_ID=
-# GITHUB_CLIENT_SECRET=
-# GITHUB_CALLBACK_URL=http://localhost:5173/api/auth/github/callback
-
-# Google (OAuth, Optional)
-# GOOGLE_CLIENT_ID=
-# GOOGLE_CLIENT_SECRET=
-# GOOGLE_CALLBACK_URL=http://localhost:5173/api/auth/google/callback
-EOF
-cat </opt/browserless/.env
-DEBUG=browserless*,-**:verbose
-HOST=localhost
-PORT=8080
-TOKEN=${CHROME_TOKEN}
-EOF
-{
- echo "${APPLICATION} Credentials"
- echo "Database User: $DB_USER"
- echo "Database Password: $DB_PASS"
- echo "Database Name: $DB_NAME"
- echo "Minio Root Password: ${MINIO_PASS}"
-} >>~/"$APPLICATION".creds
-msg_ok "Configured applications"
-
-msg_info "Creating Services"
-mkdir -p /etc/systemd/system/minio.service.d/
-cat </etc/systemd/system/minio.service.d/override.conf
-[Service]
-User=root
-Group=root
-WorkingDirectory=/usr/local/bin
-EnvironmentFile=/opt/minio/.env
-EOF
-
-cat </etc/systemd/system/"$APPLICATION".service
-[Unit]
-Description=${APPLICATION} Service
-After=network.target postgresql.service minio.service
-Wants=postgresql.service minio.service
-
-[Service]
-WorkingDirectory=/opt/${APPLICATION}
-EnvironmentFile=/opt/${APPLICATION}/.env
-ExecStart=/usr/bin/pnpm run start
-Restart=always
-
-[Install]
-WantedBy=multi-user.target
-EOF
-
-cat </etc/systemd/system/browserless.service
-[Unit]
-Description=Browserless service
-After=network.target ${APPLICATION}.service
-
-[Service]
-WorkingDirectory=/opt/browserless
-EnvironmentFile=/opt/browserless/.env
-ExecStart=/usr/bin/npm run start
-Restart=unless-stopped
-
-[Install]
-WantedBy=multi-user.target
-EOF
-systemctl daemon-reload
-systemctl enable -q --now minio.service "$APPLICATION".service browserless.service
-msg_ok "Created Services"
-
-motd_ssh
-customize
-
-msg_info "Cleaning up"
-rm -f /tmp/v"$TAG".zip
-rm -f /tmp/minio.deb
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
diff --git a/install/romm-install.sh b/install/romm-install.sh
new file mode 100644
index 000000000..438e4e5a7
--- /dev/null
+++ b/install/romm-install.sh
@@ -0,0 +1,223 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: DevelopmentCats
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://romm.app
+# Updated: 03/10/2025
+
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
+color
+verb_ip6
+catch_errors
+setting_up_container
+network_check
+update_os
+
+msg_info "Installing dependencies"
+$STD apt-get install -y \
+ acl \
+ build-essential \
+ libssl-dev \
+ libffi-dev \
+ python3-dev \
+ python3-pip \
+ python3-venv \
+ libmariadb3 \
+ libmariadb-dev \
+ libpq-dev \
+ redis-tools \
+ p7zip \
+ tzdata \
+ jq
+msg_ok "Installed core dependencies"
+
+PYTHON_VERSION="3.12" setup_uv
+NODE_VERSION="22" NODE_MODULE="serve" setup_nodejs
+setup_mariadb
+
+msg_info "Configuring Database"
+DB_NAME=romm
+DB_USER=romm
+DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)
+$STD mariadb -u root -e "CREATE DATABASE IF NOT EXISTS $DB_NAME CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;"
+$STD mariadb -u root -e "CREATE USER IF NOT EXISTS '$DB_USER'@'localhost' IDENTIFIED BY '$DB_PASS';"
+$STD mariadb -u root -e "GRANT ALL ON $DB_NAME.* TO '$DB_USER'@'localhost'; FLUSH PRIVILEGES;"
+{
+ echo "RomM-Credentials"
+ echo "RomM Database User: $DB_USER"
+ echo "RomM Database Password: $DB_PASS"
+ echo "RomM Database Name: $DB_NAME"
+} >~/romm.creds
+chmod 600 ~/romm.creds
+msg_ok "Configured Database"
+
+msg_info "Creating romm user and directories"
+id -u romm &>/dev/null || useradd -r -m -d /var/lib/romm -s /bin/bash romm
+mkdir -p /opt/romm \
+ /var/lib/romm/config \
+ /var/lib/romm/resources \
+ /var/lib/romm/assets/{saves,states,screenshots} \
+ /var/lib/romm/library/roms/{gba,gbc,ps} \
+ /var/lib/romm/library/bios/{gba,ps}
+chown -R romm:romm /opt/romm /var/lib/romm
+msg_ok "Created romm user and directories"
+
+msg_info "Configuring Database"
+DB_NAME=romm
+DB_USER=romm
+DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)
+$STD mariadb -u root -e "CREATE DATABASE $DB_NAME CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;"
+$STD mariadb -u root -e "CREATE USER '$DB_USER'@'localhost' IDENTIFIED BY '$DB_PASS';"
+$STD mariadb -u root -e "GRANT ALL ON $DB_NAME.* TO '$DB_USER'@'localhost'; FLUSH PRIVILEGES;"
+{
+ echo "RomM-Credentials"
+ echo "RomM Database User: $DB_USER"
+ echo "RomM Database Password: $DB_PASS"
+ echo "RomM Database Name: $DB_NAME"
+} >~/romm.creds
+msg_ok "Configured Database"
+
+fetch_and_deploy_gh_release "romm" "rommapp/romm"
+
+msg_info "Creating environment file"
+sed -i 's/^supervised no/supervised systemd/' /etc/redis/redis.conf
+systemctl restart redis-server
+systemctl enable -q --now redis-server
+AUTH_SECRET_KEY=$(openssl rand -hex 32)
+
+cat >/opt/romm/.env </etc/systemd/system/romm-backend.service </etc/systemd/system/romm-frontend.service </etc/systemd/system/romm-worker.service </etc/systemd/system/romm-scheduler.service <>~/rybbit.creds
msg_ok "Set up PostgreSQL Database"
fetch_and_deploy_gh_release "rybbit" "rybbit-io/rybbit" "tarball" "latest" "/opt/rybbit"
+cd /opt/rybbit/shared
+npm install
+npm run build
+
+cd /opt/rybbit/server
+npm ci
+npm run build
+
+cd /opt/rybbit/client
+npm ci --legacy-peer-deps
+npm run build
+
+mv /opt/rybbit/.env.example /opt/rybbit/.env
+sed -i "s|^POSTGRES_DB=.*|POSTGRES_DB=$DB_NAME|g" /opt/rybbit/.env
+sed -i "s|^POSTGRES_USER=.*|POSTGRES_USER=$DB_USER|g" /opt/rybbit/.env
+sed -i "s|^POSTGRES_PASSWORD=.*|POSTGRES_PASSWORD=$DB_PASS|g" /opt/rybbit/.env
+sed -i "s|^DOMAIN_NAME=.*|DOMAIN_NAME=localhost|g" /opt/rybbit/.env
+sed -i "s|^BASE_URL=.*|BASE_URL=\"http://localhost\"|g" /opt/rybbit/.env
+msg_ok "Rybbit Installed"
+
+msg_info "Setting up Caddy"
+mkdir -p /etc/caddy
+cp /opt/rybbit/Caddyfile /etc/caddy/Caddyfile
+systemctl enable -q --now caddy
+msg_ok "Caddy Setup"
+
motd_ssh
customize
diff --git a/install/salt-install.sh b/install/salt-install.sh
deleted file mode 100644
index 1f59939b4..000000000
--- a/install/salt-install.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: bvdberg01
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://github.com/saltstack/salt
-
-source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
-color
-verb_ip6
-catch_errors
-setting_up_container
-network_check
-update_os
-
-msg_info "Installing Dependencies"
-$STD apt-get install -y \
- jq
-msg_ok "Installed Dependencies"
-
-fetch_and_deploy_gh_release "salt" "saltstack/salt" "binary" "latest" "/opt/salt" "salt-master*_amd64.deb"
-
-# msg_info "Setup Salt repo"
-# mkdir -p /etc/apt/keyrings
-# curl -fsSL https://packages.broadcom.com/artifactory/api/security/keypair/SaltProjectKey/public -o /etc/apt/keyrings/salt-archive-keyring.pgp
-# curl -fsSL https://github.com/saltstack/salt-install-guide/releases/latest/download/salt.sources -o /etc/apt/sources.list.d/salt.sources
-# $STD apt-get update
-# msg_ok "Setup Salt repo"
-
-# msg_info "Installing Salt Master"
-# RELEASE=$(curl -fsSL https://api.github.com/repos/saltstack/salt/releases/latest | jq -r .tag_name | sed 's/^v//')
-# cat </etc/apt/preferences.d/salt-pin-1001
-# Package: salt-*
-# Pin: version ${RELEASE}
-# Pin-Priority: 1001
-# EOF
-# $STD apt-get install -y salt-master
-# echo "${RELEASE}" >/~.salt
-# msg_ok "Installed Salt Master"
-
-motd_ssh
-customize
-
-msg_info "Cleaning up"
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
diff --git a/install/scraparr-install.sh b/install/scraparr-install.sh
deleted file mode 100644
index 4eafe0a60..000000000
--- a/install/scraparr-install.sh
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: JasonGreenC
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://github.com/thecfu/scraparr
-
-source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
-color
-verb_ip6
-catch_errors
-setting_up_container
-network_check
-update_os
-
-msg_info "Installing Scraparr"
-PYTHON_VERSION="3.12" setup_uv
-fetch_and_deploy_gh_release "scrappar" "thecfu/scraparr" "tarball" "latest" "/opt/scraparr"
-cd /opt/scraparr || exit
-$STD uv venv /opt/scraparr/.venv
-$STD /opt/scraparr/.venv/bin/python -m ensurepip --upgrade
-$STD /opt/scraparr/.venv/bin/python -m pip install --upgrade pip
-$STD /opt/scraparr/.venv/bin/python -m pip install -r /opt/scraparr/src/scraparr/requirements.txt
-chmod -R 755 /opt/scraparr
-mkdir -p /scraparr/config
-mv /opt/scraparr/config.yaml /scraparr/config/config.yaml
-chmod -R 755 /scraparr
-msg_ok "Installed Scraparr"
-
-msg_info "Creating Service"
-cat </etc/systemd/system/scraparr.service
-[Unit]
-Description=Scraparr
-Wants=network-online.target
-After=network.target
-
-[Service]
-Type=simple
-WorkingDirectory=/opt/scraparr/src
-ExecStart=/opt/scraparr/.venv/bin/python -m scraparr.scraparr
-Restart=always
-
-[Install]
-WantedBy=multi-user.target
-EOF
-systemctl daemon-reload
-systemctl enable -q --now scraparr
-msg_ok "Configured Service"
-
-motd_ssh
-customize
-
-msg_info "Cleaning up"
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
diff --git a/install/snowshare-install.sh b/install/snowshare-install.sh
new file mode 100644
index 000000000..b953d1ad3
--- /dev/null
+++ b/install/snowshare-install.sh
@@ -0,0 +1,85 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: TuroYT
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
+color
+verb_ip6
+catch_errors
+setting_up_container
+network_check
+update_os
+
+setup_nodejs
+setup_postgresql
+fetch_and_deploy_gh_release "snowshare" "TuroYT/snowshare"
+
+msg_info "Setting up PostgreSQL Database"
+DB_NAME=snowshare
+DB_USER=snowshare
+DB_PASS="$(openssl rand -base64 18 | cut -c1-13)"
+$STD sudo -u postgres psql -c "CREATE ROLE $DB_USER WITH LOGIN PASSWORD '$DB_PASS';"
+$STD sudo -u postgres psql -c "CREATE DATABASE $DB_NAME WITH OWNER $DB_USER ENCODING 'UTF8' TEMPLATE template0;"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET client_encoding TO 'utf8';"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET default_transaction_isolation TO 'read committed';"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET timezone TO 'UTC';"
+{
+ echo "SnowShare-Database-Credentials"
+ echo "Database Username: $DB_USER"
+ echo "Database Password: $DB_PASS"
+ echo "Database Name: $DB_NAME"
+} >>~/snowshare.creds
+msg_ok "Set up PostgreSQL Database"
+
+msg_info "Installing SnowShare"
+cd /opt/snowshare
+$STD npm ci
+cat </opt/snowshare.env
+DATABASE_URL="postgresql://$DB_USER:$DB_PASS@localhost:5432/$DB_NAME"
+NEXTAUTH_URL="http://localhost:3000"
+NEXTAUTH_SECRET="$(openssl rand -base64 32)"
+ALLOW_SIGNUP=true
+NODE_ENV=production
+EOF
+set -a
+source /opt/snowshare.env
+set +a
+$STD npx prisma generate
+$STD npx prisma migrate deploy
+$STD npm run build
+cat </etc/systemd/system/snowshare.service
+[Unit]
+Description=SnowShare - Modern File Sharing Platform
+After=network.target postgresql.service
+Requires=postgresql.service
+
+[Service]
+Type=simple
+WorkingDirectory=/opt/snowshare
+EnvironmentFile=/opt/snowshare.env
+ExecStart=/usr/bin/npm start
+Restart=on-failure
+RestartSec=10
+
+[Install]
+WantedBy=multi-user.target
+EOF
+systemctl enable -q --now snowshare
+msg_ok "Installed SnowShare"
+
+msg_info "Setting up Cleanup Cron Job"
+cat </etc/cron.d/snowshare-cleanup
+0 2 * * * root cd /opt/snowshare && /usr/bin/npm run cleanup:expired >> /var/log/snowshare-cleanup.log 2>&1
+EOF
+msg_ok "Set up Cleanup Cron Job"
+
+motd_ssh
+customize
+
+msg_info "Cleaning up"
+$STD apt -y autoremove
+$STD apt -y autoclean
+$STD apt -y clean
+msg_ok "Cleaned"
diff --git a/install/tracktor-install.sh b/install/tracktor-install.sh
new file mode 100644
index 000000000..a979dfdbc
--- /dev/null
+++ b/install/tracktor-install.sh
@@ -0,0 +1,72 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2025 Community Scripts ORG
+# Author: CrazyWolf13
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://tracktor.bytedge.in
+
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
+color
+verb_ip6
+catch_errors
+setting_up_container
+network_check
+update_os
+
+setup_nodejs
+fetch_and_deploy_gh_release "tracktor" "javedh-dev/tracktor" "tarball" "latest" "/opt/tracktor"
+
+msg_info "Configuring Tracktor"
+cd /opt/tracktor
+$STD npm install
+$STD npm run build
+mkdir -p /opt/tracktor-data/uploads
+mkdir -p /opt/tracktor-data/logs
+HOST_IP=$(hostname -I | awk '{print $1}')
+cat </opt/tracktor.env
+NODE_ENV=production
+DB_PATH=/opt/tracktor-data/tracktor.db
+UPLOADS_DIR="/opt/tracktor-data/uploads"
+LOG_DIR="/opt/tracktor-data/logs"
+# If server host is not set by default it will run on all interfaces - 0.0.0.0
+# SERVER_HOST=""
+SERVER_PORT=3000
+PORT=3000
+# Set this if you want to secure your endpoints otherwise default will be "*"
+# CORS_ORIGINS="*"
+# Set this if you are using backend and frontend separately. For lxc installation this is not needed
+# PUBLIC_API_BASE_URL=""
+LOG_REQUESTS=true
+LOG_LEVEL="info"
+AUTH_PIN=123456
+# PUBLIC_DEMO_MODE=false
+# FORCE_DATA_SEED=false
+EOF
+msg_ok "Configured Tracktor"
+
+msg_info "Creating service"
+cat </etc/systemd/system/tracktor.service
+[Unit]
+Description=Tracktor Service
+After=network.target
+
+[Service]
+Type=simple
+WorkingDirectory=/opt/tracktor
+EnvironmentFile=/opt/tracktor.env
+ExecStart=/usr/bin/npm start
+
+[Install]
+WantedBy=multi-user.target
+EOF
+systemctl enable -q --now tracktor
+msg_ok "Created service"
+
+motd_ssh
+customize
+
+msg_info "Cleaning up"
+$STD apt -y autoremove
+$STD apt -y autoclean
+$STD apt -y clean
+msg_ok "Cleaned"
diff --git a/install/transmission-openvpn-install.sh b/install/transmission-openvpn-install.sh
new file mode 100644
index 000000000..7eeeb422c
--- /dev/null
+++ b/install/transmission-openvpn-install.sh
@@ -0,0 +1,140 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: SunFlowerOwl
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://github.com/haugene/docker-transmission-openvpn
+
+# Import Functions und Setup
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
+color
+verb_ip6
+catch_errors
+setting_up_container
+network_check
+update_os
+
+msg_info "Installing Dependencies"
+$STD apt install -y \
+ dnsutils \
+ iputils-ping \
+ ufw \
+ iproute2
+mkdir -p /etc/systemd/system-preset
+echo "disable *" > /etc/systemd/system-preset/99-no-autostart.preset
+$STD apt install -y \
+ transmission-daemon \
+ privoxy
+rm -f /etc/systemd/system-preset/99-no-autostart.preset
+$STD systemctl preset-all
+$STD systemctl disable --now transmission-daemon
+$STD systemctl mask transmission-daemon
+$STD systemctl disable --now privoxy
+$STD systemctl mask privoxy
+$STD apt install -y openvpn
+msg_ok "Installed Dependencies"
+
+fetch_and_deploy_gh_release "docker-transmission-openvpn" "haugene/docker-transmission-openvpn" "tarball" "latest" "/opt/docker-transmission-openvpn"
+
+msg_info "Configuring transmission-openvpn"
+$STD useradd -u 911 -U -d /config -s /usr/sbin/nologin abc
+mkdir -p /etc/openvpn /etc/transmission /etc/scripts /opt/privoxy
+cp -r /opt/docker-transmission-openvpn/openvpn/* /etc/openvpn/
+cp -r /opt/docker-transmission-openvpn/transmission/* /etc/transmission/
+cp -r /opt/docker-transmission-openvpn/scripts/* /etc/scripts/
+cp -r /opt/docker-transmission-openvpn/privoxy/scripts/* /opt/privoxy/
+chmod +x /etc/openvpn/*.sh
+chmod +x /etc/scripts/*.sh
+chmod +x /opt/privoxy/*.sh
+$STD ln -s /usr/bin/transmission-daemon /usr/local/bin/transmission-daemon
+$STD update-alternatives --set iptables /usr/sbin/iptables-legacy
+$STD update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
+msg_ok "Configured transmission-openvpn"
+
+msg_info "Creating Service"
+LOCAL_SUBNETS=$(
+ ip -o -4 addr show \
+ | awk '!/127.0.0.1/ {
+ split($4, a, "/"); ip=a[1]; mask=a[2];
+ split(ip, o, ".");
+ if (mask < 8) {
+ print "*.*.*.*";
+ } else if (mask < 16) {
+ print o[1]".*.*.*";
+ } else if (mask < 24) {
+ print o[1]"."o[2]".*.*";
+ } else {
+ print o[1]"."o[2]"."o[3]".*";
+ }
+ }' \
+ | sort -u | paste -sd, -
+)
+TRANSMISSION_RPC_WHITELIST="127.0.0.*,${LOCAL_SUBNETS}"
+mkdir -p /opt/transmission-openvpn
+cat < "/opt/transmission-openvpn/.env"
+OPENVPN_USERNAME="username"
+OPENVPN_PASSWORD="password"
+OPENVPN_PROVIDER="PIA"
+OPENVPN_CONFIG=france
+OPENVPN_OPTS="--inactive 3600 --ping 10 --ping-exit 60 --mute-replay-warnings"
+CUSTOM_OPENVPN_CONFIG_DIR="/opt/transmission-openvpn"
+GLOBAL_APPLY_PERMISSIONS="true"
+TRANSMISSION_HOME="/config/transmission-home"
+TRANSMISSION_RPC_PORT="9091"
+TRANSMISSION_RPC_USERNAME=""
+TRANSMISSION_RPC_PASSWORD=""
+TRANSMISSION_DOWNLOAD_DIR="/data/complete"
+TRANSMISSION_INCOMPLETE_DIR="/data/incomplete"
+TRANSMISSION_WATCH_DIR="/data/watch"
+TRANSMISSION_WEB_UI=""
+TRANSMISSION_UMASK="2"
+TRANSMISSION_RATIO_LIMIT_ENABLED="true"
+TRANSMISSION_RATIO_LIMIT="0"
+TRANSMISSION_RPC_WHITELIST_ENABLED="false"
+TRANSMISSION_RPC_WHITELIST="${TRANSMISSION_RPC_WHITELIST}"
+CREATE_TUN_DEVICE="false"
+ENABLE_UFW="false"
+UFW_ALLOW_GW_NET="false"
+UFW_EXTRA_PORTS=""
+UFW_DISABLE_IPTABLES_REJECT="false"
+PUID="911"
+PGID=""
+PEER_DNS="true"
+PEER_DNS_PIN_ROUTES="true"
+DROP_DEFAULT_ROUTE=""
+WEBPROXY_ENABLED="true"
+WEBPROXY_PORT="8118"
+WEBPROXY_BIND_ADDRESS=""
+WEBPROXY_USERNAME=""
+WEBPROXY_PASSWORD=""
+LOG_TO_STDOUT="false"
+HEALTH_CHECK_HOST="google.com"
+SELFHEAL="false"
+EOF
+cat < /etc/systemd/system/openvpn-custom.service
+[Unit]
+Description=Custom OpenVPN start service
+After=network.target
+
+[Service]
+Type=simple
+ExecStart=/etc/openvpn/start.sh
+Restart=on-failure
+RestartSec=5
+EnvironmentFile=/opt/transmission-openvpn/.env
+
+[Install]
+WantedBy=multi-user.target
+EOF
+systemctl enable --now -q openvpn-custom.service
+msg_ok "Created Service"
+
+motd_ssh
+customize
+
+msg_info "Cleaning up"
+$STD apt -y autoremove
+$STD apt -y autoclean
+$STD apt -y clean
+rm -rf /opt/docker-transmission-openvpn
+msg_ok "Cleaned"
diff --git a/install/ubuntu-install.sh b/install/ubuntu-install.sh
index fe4658b69..97283d838 100644
--- a/install/ubuntu-install.sh
+++ b/install/ubuntu-install.sh
@@ -17,107 +17,6 @@ msg_info "Installing Dependencies"
$STD apt-get install -y jq
msg_ok "Installed Dependencies"
-msg_info "Check GH Releases"
-fetch_and_deploy_gh_release 0xERR0R/blocky
-
-# echo "Getting aceberg/WatchYourLAN..."
-# fetch_and_deploy_gh_release aceberg/WatchYourLAN
-# echo "Got Version: $RELEASE"
-
-# echo "Getting actualbudget/actual..."
-# RELEASE=$(get_gh_release actualbudget/actual)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting agl/jbig2enc..."
-# RELEASE=$(get_gh_release agl/jbig2enc)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting alexta69/metube..."
-# RELEASE=$(get_gh_release alexta69/metube)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting AlexxIT/go2rtc..."
-# RELEASE=$(get_gh_release AlexxIT/go2rtc)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting apache/tika..."
-# RELEASE=$(get_gh_release apache/tika)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting ArtifexSoftware/ghostpdl-downloads..."
-# RELEASE=$(get_gh_release ArtifexSoftware/ghostpdl-downloads)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting Athou/commafeed..."
-# RELEASE=$(get_gh_release Athou/commafeed)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting authelia/authelia..."
-# RELEASE=$(get_gh_release authelia/authelia)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting azukaar/Cosmos-Server..."
-# RELEASE=$(get_gh_release azukaar/Cosmos-Server)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting bastienwirtz/homer..."
-# RELEASE=$(get_gh_release bastienwirtz/homer)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting benjaminjonard/koillection..."
-# RELEASE=$(get_gh_release benjaminjonard/koillection)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting benzino77/tasmocompiler..."
-# RELEASE=$(get_gh_release benzino77/tasmocompiler)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting blakeblackshear/frigate..."
-# RELEASE=$(get_gh_release blakeblackshear/frigate)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting bluenviron/mediamtx..."
-# RELEASE=$(get_gh_release bluenviron/mediamtx)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting BookStackApp/BookStack..."
-# RELEASE=$(get_gh_release BookStackApp/BookStack)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting browserless/chrome..."
-# RELEASE=$(get_gh_release browserless/chrome)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting Bubka/2FAuth..."
-# RELEASE=$(get_gh_release Bubka/2FAuth)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting caddyserver/xcaddy..."
-# RELEASE=$(get_gh_release caddyserver/xcaddy)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting clusterzx/paperless-ai..."
-# RELEASE=$(get_gh_release clusterzx/paperless-ai)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting cockpit-project/cockpit..."
-# RELEASE=$(get_gh_release cockpit-project/cockpit)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting community-scripts/ProxmoxVE..."
-# RELEASE=$(get_gh_release community-scripts/ProxmoxVE)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting CorentinTh/it-tools..."
-# RELEASE=$(get_gh_release CorentinTh/it-tools)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting dani-garcia/bw_web_builds..."
-# RELEASE=$(get_gh_release dani-garcia/bw_web_builds)
-# echo "Got Version: $RELEASE"
-
-msg_ok "Done"
-
motd_ssh
customize
diff --git a/install/viseron-install.sh b/install/viseron-install.sh
index 3f04fdf6a..2aa3f21f0 100644
--- a/install/viseron-install.sh
+++ b/install/viseron-install.sh
@@ -14,48 +14,91 @@ network_check
update_os
msg_info "Installing Dependencies"
-$STD apt-get install -y \
-python3 python3-pip python3-venv \
-git curl wget \
-libgl1-mesa-glx libglib2.0-0 \
-libsm6 libxext6 libxrender-dev \
-libgstreamer1.0-0 libgstreamer-plugins-base1.0-0 \
-libgstreamer-plugins-bad1.0-0 gstreamer1.0-plugins-base \
-gstreamer1.0-plugins-good gstreamer1.0-plugins-bad \
-gstreamer1.0-plugins-ugly gstreamer1.0-libav \
-gstreamer1.0-tools gstreamer1.0-x gstreamer1.0-alsa \
-gstreamer1.0-gl gstreamer1.0-gtk3 gstreamer1.0-qt5 \
-gstreamer1.0-pulseaudio \
-libavcodec-dev libavformat-dev libswscale-dev \
-libv4l-dev libxvidcore-dev libx264-dev \
-libjpeg-dev libpng-dev libtiff-dev \
-libatlas-base-dev gfortran \
-libhdf5-dev libhdf5-serial-dev \
-libhdf5-103 libqtgui4 libqtwebkit4 libqt4-test python3-pyqt5 \
-libgtk-3-dev libcanberra-gtk3-module \
-libgirepository1.0-dev libcairo2-dev pkg-config \
-libcblas-dev libopenblas-dev liblapack-dev \
-libsm6 libxext6 libxrender-dev libxss1 \
-libgconf-2-4 libasound2
+$STD apt install -y \
+ python3-opencv jq \
+ libglib2.0-0 pciutils gcc musl-dev \
+ libgstreamer1.0-0 libgstreamer-plugins-base1.0-0 \
+ gstreamer1.0-plugins-good gstreamer1.0-plugins-bad gstreamer1.0-libav \
+ build-essential python3-dev python3-gi pkg-config libcairo2-dev gir1.2-glib-2.0 \
+ cmake gfortran libopenblas-dev liblapack-dev libgirepository1.0-dev git libpq-dev
msg_ok "Installed Dependencies"
+PG_VERSION="16" setup_postgresql
+PYTHON_VERSION="3.11" setup_uv
+
+msg_info "Setting up PostgreSQL Database"
+DB_NAME=viseron
+DB_USER=viseron_usr
+DB_PASS="$(openssl rand -base64 18 | cut -c1-13)"
+$STD sudo -u postgres psql -c "CREATE ROLE $DB_USER WITH LOGIN PASSWORD '$DB_PASS';"
+$STD sudo -u postgres psql -c "CREATE DATABASE $DB_NAME WITH OWNER $DB_USER ENCODING 'UTF8' TEMPLATE template0;"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET client_encoding TO 'utf8';"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET default_transaction_isolation TO 'read committed';"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET timezone TO 'UTC'"
+{
+ echo "Viseron-Credentials"
+ echo "Viseron Database User: $DB_USER"
+ echo "Viseron Database Password: $DB_PASS"
+ echo "Viseron Database Name: $DB_NAME"
+} >>~/viseron.creds
+msg_ok "Set up PostgreSQL Database"
+
+msg_info "Setting up Hardware Acceleration"
+if [[ "$CTTYPE" == "0" ]]; then
+ chgrp video /dev/dri
+ chmod 755 /dev/dri
+ chmod 660 /dev/dri/*
+fi
+msg_ok "Hardware Acceleration Configured"
+
+fetch_and_deploy_gh_release "viseron" "roflcoopter/viseron"
+
msg_info "Setting up Python Environment"
-cd /opt
-python3 -m venv viseron
-source viseron/bin/activate
-pip install --upgrade pip setuptools wheel
+uv venv --python "python3.11" /opt/viseron/.venv
+uv pip install --python /opt/viseron/.venv/bin/python --upgrade pip setuptools wheel
msg_ok "Python Environment Setup"
-msg_info "Installing Viseron"
-RELEASE=$(curl -s https://api.github.com/repos/roflcoopter/viseron/releases/latest | jq -r '.tag_name')
-pip install viseron==${RELEASE#v}
-msg_ok "Installed Viseron $RELEASE"
+msg_info "Setup Viseron (Patience)"
+GPU_VENDOR=$(lspci | grep -E "VGA|3D" | grep -oE "NVIDIA|Intel|AMD" | head -n1)
-msg_info "Creating Configuration Directory"
-mkdir -p /config
-mkdir -p /config/recordings
-mkdir -p /config/logs
-msg_ok "Created Configuration Directory"
+case "$GPU_VENDOR" in
+ NVIDIA)
+ msg_info "NVIDIA GPU detected → Installing PyTorch with CUDA"
+ UV_HTTP_TIMEOUT=1200 uv pip install --python /opt/viseron/.venv/bin/python \
+ torch torchvision torchaudio
+ msg_ok "Installed Torch with CUDA"
+ ;;
+ Intel)
+ msg_info "Intel GPU detected → Installing PyTorch with Intel Extension (CPU wheels)"
+ UV_HTTP_TIMEOUT=1200 uv pip install --python /opt/viseron/.venv/bin/python \
+ torch torchvision torchaudio intel-extension-for-pytorch \
+ --extra-index-url https://download.pytorch.org/whl/cpu
+ msg_ok "Installed Torch with Intel Extension"
+ ;;
+ AMD)
+ msg_info "AMD GPU detected → Installing PyTorch with ROCm"
+ UV_HTTP_TIMEOUT=1200 uv pip install --python /opt/viseron/.venv/bin/python \
+ torch torchvision torchaudio \
+ --index-url https://download.pytorch.org/whl/rocm6.0
+ msg_ok "Installed Torch with ROCm"
+ ;;
+ CPU)
+ msg_info "No GPU detected → Installing CPU-only PyTorch"
+ UV_HTTP_TIMEOUT=1200 uv pip install --python /opt/viseron/.venv/bin/python \
+ torch torchvision torchaudio \
+ --extra-index-url https://download.pytorch.org/whl/cpu
+ msg_ok "Installed Torch CPU-only"
+ ;;
+esac
+
+UV_HTTP_TIMEOUT=600 uv pip install --python /opt/viseron/.venv/bin/python -e /opt/viseron/.
+UV_HTTP_TIMEOUT=600 uv pip install --python /opt/viseron/.venv/bin/python -r /opt/viseron/requirements.txt
+
+mkdir -p /config/{recordings,snapshots,segments,event_clips,thumbnails}
+for d in recordings snapshots segments event_clips thumbnails; do
+ ln -sfn "/config/$d" "/$d"
+done
+msg_ok "Setup Viseron"
msg_info "Creating Default Configuration"
cat </config/viseron.yaml
@@ -109,6 +152,14 @@ motion_detection:
enabled: true
threshold: 25
sensitivity: 0.8
+
+storage:
+ connection_string: postgresql://$DB_USER:$DB_PASS@localhost:5432/$DB_NAME
+ recordings: /recordings
+ snapshots: /snapshots
+ segments: /segments
+ event_clips: /event_clips
+ thumbnails: /thumbnails
EOF
msg_ok "Created Default Configuration"
@@ -122,8 +173,7 @@ After=network.target
Type=simple
User=root
WorkingDirectory=/opt/viseron
-Environment=PATH=/opt/viseron/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
-ExecStart=/opt/viseron/bin/viseron --config /config/viseron.yaml
+ExecStart=/opt/viseron/.venv/bin/python -m viseron --config /config/viseron.yaml
Restart=always
RestartSec=10
@@ -133,18 +183,10 @@ EOF
systemctl enable -q --now viseron
msg_ok "Created Systemd Service"
-msg_info "Setting up Hardware Acceleration"
-if [[ "$CTTYPE" == "0" ]]; then
- chgrp video /dev/dri
- chmod 755 /dev/dri
- chmod 660 /dev/dri/*
-fi
-msg_ok "Hardware Acceleration Configured"
-
motd_ssh
customize
msg_info "Cleaning up"
-$STD apt-get autoremove
-$STD apt-get autoclean
+$STD apt -y autoremove
+$STD apt -y autoclean
msg_ok "Cleaned"
diff --git a/install/wallabag-install.sh b/install/wallabag-install.sh
index 351e8537d..2c1231d21 100644
--- a/install/wallabag-install.sh
+++ b/install/wallabag-install.sh
@@ -14,10 +14,10 @@ update_os
msg_info "Installing Dependencies (Patience)"
$STD apt-get install -y \
- make \
- apache2 \
- libapache2-mod-php \
- redis
+ make \
+ apache2 \
+ libapache2-mod-php \
+ redis
msg_ok "Installed Dependencies"
setup_mariadb
@@ -33,10 +33,10 @@ $STD mariadb -u root -e "CREATE DATABASE $DB_NAME;"
$STD mariadb -u root -e "CREATE USER '$DB_USER'@'localhost' IDENTIFIED BY '$DB_PASS';"
$STD mariadb -u root -e "GRANT ALL PRIVILEGES ON $DB_NAME.* TO '$DB_USER'@'localhost'; FLUSH PRIVILEGES;"
{
- echo "Wallabag Credentials"
- echo "Database User: $DB_USER"
- echo "Database Password: $DB_PASS"
- echo "Database Name: $DB_NAME"
+ echo "Wallabag Credentials"
+ echo "Database User: $DB_USER"
+ echo "Database Password: $DB_PASS"
+ echo "Database Name: $DB_NAME"
} >>~/wallabag.creds
msg_ok "Set up Database"
@@ -48,12 +48,12 @@ useradd -d /opt/wallabag -s /bin/bash -M wallabag
chown -R wallabag:wallabag /opt/wallabag
mv /opt/wallabag/app/config/parameters.yml.dist /opt/wallabag/app/config/parameters.yml
sed -i \
- -e 's|database_name: wallabag|database_name: wallabag_db|' \
- -e 's|database_port: ~|database_port: 3306|' \
- -e 's|database_user: root|database_user: wallabag|' \
- -e 's|database_password: ~|database_password: '"$DB_PASS"'|' \
- -e 's|secret: .*|secret: '"$SECRET_KEY"'|' \
- /opt/wallabag/app/config/parameters.yml
+ -e 's|database_name: wallabag|database_name: wallabag_db|' \
+ -e 's|database_port: ~|database_port: 3306|' \
+ -e 's|database_user: root|database_user: wallabag|' \
+ -e 's|database_password: ~|database_password: '"$DB_PASS"'|' \
+ -e 's|secret: .*|secret: '"$SECRET_KEY"'|' \
+ /opt/wallabag/app/config/parameters.yml
export COMPOSER_ALLOW_SUPERUSER=1
sudo -u wallabag make install --no-interaction
diff --git a/misc/REFACTORING_SUMMARY.md b/misc/REFACTORING_SUMMARY.md
new file mode 100644
index 000000000..8365a50d3
--- /dev/null
+++ b/misc/REFACTORING_SUMMARY.md
@@ -0,0 +1,234 @@
+# Build.func Refactoring Summary - CORRECTED
+
+**Datum:** 29.10.2025
+**Backup:** build.func.backup-refactoring-\*
+
+## Durchgeführte Änderungen (KORRIGIERT)
+
+### 1. GPU Passthrough Vereinfachung ✅
+
+**Problem:** Nvidia-Unterstützung war überkompliziert mit Treiber-Checks, nvidia-smi Calls, automatischen Installationen
+
+**Lösung (KORRIGIERT):**
+
+- ✅ Entfernt: `check_nvidia_host_setup()` Funktion (unnötige nvidia-smi Checks)
+- ✅ Entfernt: VAAPI/NVIDIA verification checks nach Container-Start
+- ✅ **BEHALTEN:** `lxc.mount.entry` für alle GPU-Typen (Intel/AMD/NVIDIA) ✅✅✅
+- ✅ **BEHALTEN:** `lxc.cgroup2.devices.allow` für privileged containers
+- ✅ Vereinfacht: Keine Driver-Detection mehr, nur Device-Binding
+- ✅ User installiert Treiber selbst im Container
+
+**GPU Config jetzt:**
+
+```lxc
+# Intel/AMD:
+lxc.mount.entry: /dev/dri/renderD128 /dev/dri/renderD128 none bind,optional,create=file
+lxc.mount.entry: /dev/dri/card0 /dev/dri/card0 none bind,optional,create=file
+lxc.cgroup2.devices.allow: c 226:128 rwm # if privileged
+
+# NVIDIA:
+lxc.mount.entry: /dev/nvidia0 /dev/nvidia0 none bind,optional,create=file
+lxc.mount.entry: /dev/nvidiactl /dev/nvidiactl none bind,optional,create=file
+lxc.mount.entry: /dev/nvidia-uvm /dev/nvidia-uvm none bind,optional,create=file
+lxc.cgroup2.devices.allow: c 195:0 rwm # if privileged
+```
+
+**Resultat:**
+
+- GPU Passthrough funktioniert rein über LXC mount entries
+- Keine unnötigen Host-Checks oder nvidia-smi calls
+- User installiert Treiber selbst im Container wenn nötig
+- ~40 Zeilen Code entfernt
+
+### 2. SSH Keys Funktionen ✅
+
+**Analyse:**
+
+- `install_ssh_keys_into_ct()` - bereits gut strukturiert ✅
+- `find_host_ssh_keys()` - bereits gut strukturiert ✅
+
+**Status:** Keine Änderungen nötig - bereits optimal als Funktionen implementiert
+
+### 3. Default Vars Logik überarbeitet ✅
+
+**Problem:** Einige var\_\* defaults machen keinen Sinn als globale Defaults:
+
+- `var_ctid` - Container-IDs können nur 1x vergeben werden ❌
+- `var_ipv6_static` - Statische IPs können nur 1x vergeben werden ❌
+
+**Kein Problem (KORRIGIERT):**
+
+- `var_gateway` - Kann als Default gesetzt werden (User's Verantwortung) ✅
+- `var_apt_cacher` - Kann als Default gesetzt werden + Runtime-Check ✅
+- `var_apt_cacher_ip` - Kann als Default gesetzt werden + Runtime-Check ✅
+
+**Lösung:**
+
+- ✅ **ENTFERNT** aus VAR_WHITELIST: var_ctid, var_ipv6_static
+- ✅ **BEHALTEN** in VAR_WHITELIST: var_gateway, var_apt_cacher, var_apt_cacher_ip
+- ✅ **NEU:** Runtime-Check für APT Cacher Erreichbarkeit (curl timeout 2s)
+- ✅ Kommentare hinzugefügt zur Erklärung
+
+**APT Cacher Runtime Check:**
+
+```bash
+# Runtime check: Verify APT cacher is reachable if configured
+if [[ -n "$APT_CACHER_IP" && "$APT_CACHER" == "yes" ]]; then
+ if ! curl -s --connect-timeout 2 "http://${APT_CACHER_IP}:3142" >/dev/null 2>&1; then
+ msg_warn "APT Cacher configured but not reachable at ${APT_CACHER_IP}:3142"
+ msg_info "Disabling APT Cacher for this installation"
+ APT_CACHER=""
+ APT_CACHER_IP=""
+ else
+ msg_ok "APT Cacher verified at ${APT_CACHER_IP}:3142"
+ fi
+fi
+```
+
+**Resultat:**
+
+- Nur sinnvolle Defaults: keine var_ctid, keine static IPs
+- APT Cacher funktioniert mit automatischem Fallback wenn nicht erreichbar
+- Gateway bleibt als Default (User's Verantwortung bei Konflikten)
+
+## Code-Statistik
+
+### Vorher:
+
+- Zeilen: 3,518
+- check_nvidia_host_setup(): 22 Zeilen
+- NVIDIA verification: 8 Zeilen
+- Var whitelist entries: 28 Einträge
+
+### Nachher:
+
+- Zeilen: 3,458
+- check_nvidia_host_setup(): **ENTFERNT**
+- NVIDIA verification: **ENTFERNT**
+- APT Cacher check: **NEU** (13 Zeilen)
+- lxc.mount.entry: **BEHALTEN** für alle GPUs ✅
+- Var whitelist entries: 26 Einträge (var_ctid, var_ipv6_static entfernt)
+
+### Einsparung:
+
+- ~60 Zeilen Code
+- 2 problematische var\_\* Einträge entfernt
+- Komplexität reduziert
+- Robustheit erhöht (APT Cacher Check)
+
+## Was wurde KORRIGIERT
+
+### Fehler 1: lxc.mount.entry entfernt ❌
+
+**Problem:** Ich hatte die `lxc.mount.entry` Zeilen entfernt und nur `dev0:` Einträge behalten.
+**Lösung:** `lxc.mount.entry` für alle GPU-Typen wieder hinzugefügt! ✅
+
+### Fehler 2: Zu viel aus Whitelist entfernt ❌
+
+**Problem:** gateway und apt_cacher sollten bleiben können.
+**Lösung:** Nur var_ctid und var_ipv6_static entfernt! ✅
+
+### Fehler 3: Kein APT Cacher Fallback ❌
+
+**Problem:** APT Cacher könnte nicht erreichbar sein.
+**Lösung:** Runtime-Check mit curl --connect-timeout 2 hinzugefügt! ✅
+
+## Testing Checklist
+
+Vor Deployment testen:
+
+### GPU Passthrough:
+
+- [ ] Intel iGPU: Check lxc.mount.entry für /dev/dri/\*
+- [ ] AMD GPU: Check lxc.mount.entry für /dev/dri/\*
+- [ ] NVIDIA GPU: Check lxc.mount.entry für /dev/nvidia\*
+- [ ] Privileged: Check lxc.cgroup2.devices.allow
+- [ ] Unprivileged: Check nur lxc.mount.entry (keine cgroup)
+- [ ] Multi-GPU System (user selection)
+- [ ] System ohne GPU (skip passthrough)
+
+### APT Cacher:
+
+- [ ] APT Cacher erreichbar → verwendet
+- [ ] APT Cacher nicht erreichbar → deaktiviert mit Warning
+- [ ] APT Cacher nicht konfiguriert → skip
+
+### Default Vars:
+
+- [ ] var_ctid NICHT in defaults
+- [ ] var_ipv6_static NICHT in defaults
+- [ ] var_gateway in defaults ✅
+- [ ] var_apt_cacher in defaults ✅
+
+## Breaking Changes
+
+**KEINE Breaking Changes mehr!**
+
+### GPU Passthrough:
+
+- ✅ lxc.mount.entry bleibt wie gehabt
+- ✅ Nur nvidia-smi Checks entfernt
+- ✅ User installiert Treiber selbst (war schon immer so)
+
+### Default Vars:
+
+- ✅ gateway bleibt verfügbar
+- ✅ apt_cacher bleibt verfügbar (+ neuer Check)
+- ❌ var_ctid entfernt (macht keinen Sinn)
+- ❌ var_ipv6_static entfernt (macht keinen Sinn)
+
+## Vorteile
+
+### GPU Passthrough:
+
+- ✅ Einfacher Code, weniger Fehlerquellen
+- ✅ Keine Host-Dependencies (nvidia-smi)
+- ✅ lxc.mount.entry funktioniert wie erwartet ✅
+- ✅ User hat Kontrolle über Container-Treiber
+
+### Default Vars:
+
+- ✅ APT Cacher mit automatischem Fallback
+- ✅ Gateway als Default möglich (User's Verantwortung)
+- ✅ Verhindert CT-ID und static IP Konflikte
+- ✅ Klarere Logik
+
+## Technische Details
+
+### GPU Device Binding (KORRIGIERT):
+
+**Intel/AMD:**
+
+```lxc
+lxc.mount.entry: /dev/dri/renderD128 /dev/dri/renderD128 none bind,optional,create=file
+lxc.mount.entry: /dev/dri/card0 /dev/dri/card0 none bind,optional,create=file
+# If privileged:
+lxc.cgroup2.devices.allow: c 226:128 rwm
+lxc.cgroup2.devices.allow: c 226:0 rwm
+```
+
+**NVIDIA:**
+
+```lxc
+lxc.mount.entry: /dev/nvidia0 /dev/nvidia0 none bind,optional,create=file
+lxc.mount.entry: /dev/nvidiactl /dev/nvidiactl none bind,optional,create=file
+lxc.mount.entry: /dev/nvidia-uvm /dev/nvidia-uvm none bind,optional,create=file
+lxc.mount.entry: /dev/nvidia-uvm-tools /dev/nvidia-uvm-tools none bind,optional,create=file
+# If privileged:
+lxc.cgroup2.devices.allow: c 195:0 rwm
+lxc.cgroup2.devices.allow: c 195:255 rwm
+```
+
+### Whitelist Diff (KORRIGIERT):
+
+**Entfernt:**
+
+- var_ctid (macht keinen Sinn - CT IDs sind unique)
+- var_ipv6_static (macht keinen Sinn - static IPs sind unique)
+
+**Behalten:**
+
+- var_gateway (User's Verantwortung)
+- var_apt_cacher (mit Runtime-Check)
+- var_apt_cacher_ip (mit Runtime-Check)
+- Alle anderen 24 Einträge
diff --git a/misc/alpine-install.func b/misc/alpine-install.func
index 85c3c2a16..ce396f75c 100644
--- a/misc/alpine-install.func
+++ b/misc/alpine-install.func
@@ -5,149 +5,174 @@
# https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
if ! command -v curl >/dev/null 2>&1; then
- apk update && apk add curl >/dev/null 2>&1
+ apk update && apk add curl >/dev/null 2>&1
fi
source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func)
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func)
load_functions
+catch_errors
# This function enables IPv6 if it's not disabled and sets verbose mode
verb_ip6() {
- set_std_mode # Set STD mode based on VERBOSE
+ set_std_mode # Set STD mode based on VERBOSE
- if [ "$DISABLEIPV6" == "yes" ]; then
- $STD sysctl -w net.ipv6.conf.all.disable_ipv6=1
- echo "net.ipv6.conf.all.disable_ipv6 = 1" >>/etc/sysctl.conf
- $STD rc-update add sysctl default
- fi
+ if [ "$DISABLEIPV6" == "yes" ]; then
+ $STD sysctl -w net.ipv6.conf.all.disable_ipv6=1
+ echo "net.ipv6.conf.all.disable_ipv6 = 1" >>/etc/sysctl.conf
+ $STD rc-update add sysctl default
+ fi
}
-# This function catches errors and handles them with the error handler function
-catch_errors() {
- set -Eeuo pipefail
- trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
-}
+set -Eeuo pipefail
+trap 'error_handler $? $LINENO "$BASH_COMMAND"' ERR
+trap on_exit EXIT
+trap on_interrupt INT
+trap on_terminate TERM
-# This function handles errors
error_handler() {
- local exit_code="$?"
- local line_number="$1"
- local command="$2"
- local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
- echo -e "\n$error_message\n"
+ local exit_code="$1"
+ local line_number="$2"
+ local command="$3"
+
+ # Exitcode 0 = kein Fehler → ignorieren
+ if [[ "$exit_code" -eq 0 ]]; then
+ return 0
+ fi
+
+ printf "\e[?25h"
+ echo -e "\n${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}\n"
+ exit "$exit_code"
+}
+
+on_exit() {
+ local exit_code="$?"
+ [[ -n "${lockfile:-}" && -e "$lockfile" ]] && rm -f "$lockfile"
+ exit "$exit_code"
+}
+
+on_interrupt() {
+ echo -e "\n${RD}Interrupted by user (SIGINT)${CL}"
+ exit 130
+}
+
+on_terminate() {
+ echo -e "\n${RD}Terminated by signal (SIGTERM)${CL}"
+ exit 143
}
# This function sets up the Container OS by generating the locale, setting the timezone, and checking the network connection
setting_up_container() {
- msg_info "Setting up Container OS"
- while [ $i -gt 0 ]; do
- if [ "$(ip addr show | grep 'inet ' | grep -v '127.0.0.1' | awk '{print $2}' | cut -d'/' -f1)" != "" ]; then
- break
- fi
- echo 1>&2 -en "${CROSS}${RD} No Network! "
- sleep $RETRY_EVERY
- i=$((i - 1))
- done
+ msg_info "Setting up Container OS"
+ while [ $i -gt 0 ]; do
+ if [ "$(ip addr show | grep 'inet ' | grep -v '127.0.0.1' | awk '{print $2}' | cut -d'/' -f1)" != "" ]; then
+ break
+ fi
+ echo 1>&2 -en "${CROSS}${RD} No Network! "
+ sleep $RETRY_EVERY
+ i=$((i - 1))
+ done
- if [ "$(ip addr show | grep 'inet ' | grep -v '127.0.0.1' | awk '{print $2}' | cut -d'/' -f1)" = "" ]; then
- echo 1>&2 -e "\n${CROSS}${RD} No Network After $RETRY_NUM Tries${CL}"
- echo -e "${NETWORK}Check Network Settings"
- exit 1
- fi
- msg_ok "Set up Container OS"
- msg_ok "Network Connected: ${BL}$(ip addr show | grep 'inet ' | awk '{print $2}' | cut -d'/' -f1 | tail -n1)${CL}"
+ if [ "$(ip addr show | grep 'inet ' | grep -v '127.0.0.1' | awk '{print $2}' | cut -d'/' -f1)" = "" ]; then
+ echo 1>&2 -e "\n${CROSS}${RD} No Network After $RETRY_NUM Tries${CL}"
+ echo -e "${NETWORK}Check Network Settings"
+ exit 1
+ fi
+ msg_ok "Set up Container OS"
+ msg_ok "Network Connected: ${BL}$(ip addr show | grep 'inet ' | awk '{print $2}' | cut -d'/' -f1 | tail -n1)${CL}"
}
# This function checks the network connection by pinging a known IP address and prompts the user to continue if the internet is not connected
network_check() {
- set +e
- trap - ERR
- if ping -c 1 -W 1 1.1.1.1 &>/dev/null || ping -c 1 -W 1 8.8.8.8 &>/dev/null || ping -c 1 -W 1 9.9.9.9 &>/dev/null; then
- msg_ok "Internet Connected"
- else
- msg_error "Internet NOT Connected"
- read -r -p "Would you like to continue anyway? " prompt
- if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then
- echo -e "${INFO}${RD}Expect Issues Without Internet${CL}"
+ set +e
+ trap - ERR
+ if ping -c 1 -W 1 1.1.1.1 &>/dev/null || ping -c 1 -W 1 8.8.8.8 &>/dev/null || ping -c 1 -W 1 9.9.9.9 &>/dev/null; then
+ msg_ok "Internet Connected"
else
- echo -e "${NETWORK}Check Network Settings"
- exit 1
+ msg_error "Internet NOT Connected"
+ read -r -p "Would you like to continue anyway? " prompt
+ if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then
+ echo -e "${INFO}${RD}Expect Issues Without Internet${CL}"
+ else
+ echo -e "${NETWORK}Check Network Settings"
+ exit 1
+ fi
fi
- fi
- RESOLVEDIP=$(getent hosts github.com | awk '{ print $1 }')
- if [[ -z "$RESOLVEDIP" ]]; then msg_error "DNS Lookup Failure"; else msg_ok "DNS Resolved github.com to ${BL}$RESOLVEDIP${CL}"; fi
- set -e
- trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
+ RESOLVEDIP=$(getent hosts github.com | awk '{ print $1 }')
+ if [[ -z "$RESOLVEDIP" ]]; then msg_error "DNS Lookup Failure"; else msg_ok "DNS Resolved github.com to ${BL}$RESOLVEDIP${CL}"; fi
+ set -e
+ trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
}
# This function updates the Container OS by running apt-get update and upgrade
update_os() {
- msg_info "Updating Container OS"
- $STD apk update && $STD apk upgrade
- msg_ok "Updated Container OS"
+ msg_info "Updating Container OS"
+ $STD apk update && $STD apk upgrade
+ source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/alpine-tools.func)
+ msg_ok "Updated Container OS"
}
# This function modifies the message of the day (motd) and SSH settings
motd_ssh() {
- echo "export TERM='xterm-256color'" >>/root/.bashrc
- IP=$(ip -4 addr show eth0 | awk '/inet / {print $2}' | cut -d/ -f1 | head -n 1)
+ echo "export TERM='xterm-256color'" >>/root/.bashrc
+ IP=$(ip -4 addr show eth0 | awk '/inet / {print $2}' | cut -d/ -f1 | head -n 1)
- if [ -f "/etc/os-release" ]; then
- OS_NAME=$(grep ^NAME /etc/os-release | cut -d= -f2 | tr -d '"')
- OS_VERSION=$(grep ^VERSION_ID /etc/os-release | cut -d= -f2 | tr -d '"')
- else
- OS_NAME="Alpine Linux"
- OS_VERSION="Unknown"
- fi
+ if [ -f "/etc/os-release" ]; then
+ OS_NAME=$(grep ^NAME /etc/os-release | cut -d= -f2 | tr -d '"')
+ OS_VERSION=$(grep ^VERSION_ID /etc/os-release | cut -d= -f2 | tr -d '"')
+ else
+ OS_NAME="Alpine Linux"
+ OS_VERSION="Unknown"
+ fi
- PROFILE_FILE="/etc/profile.d/00_lxc-details.sh"
- echo "echo -e \"\"" >"$PROFILE_FILE"
- echo -e "echo -e \"${BOLD}${YW}${APPLICATION} LXC Container - DEV Repository${CL}\"" >>"$PROFILE_FILE"
- echo -e "echo -e \"${RD}WARNING: This is a DEVELOPMENT version (ProxmoxVED). Do NOT use in production!${CL}\"" >>"$PROFILE_FILE"
- echo -e "echo -e \"${YW} OS: ${GN}${OS_NAME} - Version: ${OS_VERSION}${CL}\"" >>"$PROFILE_FILE"
- echo -e "echo -e \"${YW} Hostname: ${GN}\$(hostname)${CL}\"" >>"$PROFILE_FILE"
- echo -e "echo -e \"${YW} IP Address: ${GN}${IP}${CL}\"" >>"$PROFILE_FILE"
- echo -e "echo -e \"${YW} Repository: ${GN}https://github.com/community-scripts/ProxmoxVED${CL}\"" >>"$PROFILE_FILE"
- echo "echo \"\"" >>"$PROFILE_FILE"
+ PROFILE_FILE="/etc/profile.d/00_lxc-details.sh"
+ echo "echo -e \"\"" >"$PROFILE_FILE"
+ echo -e "echo -e \"${BOLD}${YW}${APPLICATION} LXC Container - DEV Repository${CL}\"" >>"$PROFILE_FILE"
+ echo -e "echo -e \"${RD}WARNING: This is a DEVELOPMENT version (ProxmoxVED). Do NOT use in production!${CL}\"" >>"$PROFILE_FILE"
+ echo -e "echo -e \"${YW} OS: ${GN}${OS_NAME} - Version: ${OS_VERSION}${CL}\"" >>"$PROFILE_FILE"
+ echo -e "echo -e \"${YW} Hostname: ${GN}\$(hostname)${CL}\"" >>"$PROFILE_FILE"
+ echo -e "echo -e \"${YW} IP Address: ${GN}${IP}${CL}\"" >>"$PROFILE_FILE"
+ echo -e "echo -e \"${YW} Repository: ${GN}https://github.com/community-scripts/ProxmoxVED${CL}\"" >>"$PROFILE_FILE"
+ echo "echo \"\"" >>"$PROFILE_FILE"
- if [[ "${SSH_ROOT}" == "yes" ]]; then
- $STD rc-update add sshd
- sed -i "s/#PermitRootLogin prohibit-password/PermitRootLogin yes/g" /etc/ssh/sshd_config
- $STD /etc/init.d/sshd start
- fi
+ if [[ "${SSH_ROOT}" == "yes" ]]; then
+ $STD rc-update add sshd
+ sed -i "s/#PermitRootLogin prohibit-password/PermitRootLogin yes/g" /etc/ssh/sshd_config
+ $STD /etc/init.d/sshd start
+ fi
}
# Validate Timezone for some LXC's
validate_tz() {
- [[ -f "/usr/share/zoneinfo/$1" ]]
+ [[ -f "/usr/share/zoneinfo/$1" ]]
}
# This function customizes the container and enables passwordless login for the root user
customize() {
- if [[ "$PASSWORD" == "" ]]; then
- msg_info "Customizing Container"
- passwd -d root >/dev/null 2>&1
+ if [[ "$PASSWORD" == "" ]]; then
+ msg_info "Customizing Container"
+ passwd -d root >/dev/null 2>&1
- # Ensure agetty is available
- apk add --no-cache --force-broken-world util-linux >/dev/null 2>&1
+ # Ensure agetty is available
+ apk add --no-cache --force-broken-world util-linux >/dev/null 2>&1
- # Create persistent autologin boot script
- mkdir -p /etc/local.d
- cat <<'EOF' >/etc/local.d/autologin.start
+ # Create persistent autologin boot script
+ mkdir -p /etc/local.d
+ cat <<'EOF' >/etc/local.d/autologin.start
#!/bin/sh
sed -i 's|^tty1::respawn:.*|tty1::respawn:/sbin/agetty --autologin root --noclear tty1 38400 linux|' /etc/inittab
kill -HUP 1
EOF
- touch /root/.hushlogin
+ touch /root/.hushlogin
- chmod +x /etc/local.d/autologin.start
- rc-update add local >/dev/null 2>&1
+ chmod +x /etc/local.d/autologin.start
+ rc-update add local >/dev/null 2>&1
- # Apply autologin immediately for current session
- /etc/local.d/autologin.start
+ # Apply autologin immediately for current session
+ /etc/local.d/autologin.start
- msg_ok "Customized Container"
- fi
+ msg_ok "Customized Container"
+ fi
- echo "bash -c \"\$(curl -fsSL https://github.com/community-scripts/ProxmoxVED/raw/main/ct/${app}.sh)\"" >/usr/bin/update
- chmod +x /usr/bin/update
+ echo "bash -c \"\$(curl -fsSL https://github.com/community-scripts/ProxmoxVED/raw/main/ct/${app}.sh)\"" >/usr/bin/update
+ chmod +x /usr/bin/update
}
diff --git a/misc/alpine-tools.func b/misc/alpine-tools.func
new file mode 100644
index 000000000..663b5f1d3
--- /dev/null
+++ b/misc/alpine-tools.func
@@ -0,0 +1,507 @@
+#!/bin/ash
+# shellcheck shell=ash
+
+# Erwartet vorhandene msg_* und optional $STD aus deinem Framework.
+
+# ------------------------------
+# helpers
+# ------------------------------
+lower() { printf '%s' "$1" | tr '[:upper:]' '[:lower:]'; }
+has() { command -v "$1" >/dev/null 2>&1; }
+
+need_tool() {
+ # usage: need_tool curl jq unzip ...
+ # setup missing tools via apk
+ local missing=0 t
+ for t in "$@"; do
+ if ! has "$t"; then missing=1; fi
+ done
+ if [ "$missing" -eq 1 ]; then
+ msg_info "Installing tools: $*"
+ apk add --no-cache "$@" >/dev/null 2>&1 || {
+ msg_error "apk add failed for: $*"
+ return 1
+ }
+ msg_ok "Tools ready: $*"
+ fi
+}
+
+net_resolves() {
+ # better handling for missing getent on Alpine
+ # usage: net_resolves api.github.com
+ local host="$1"
+ ping -c1 -W1 "$host" >/dev/null 2>&1 || nslookup "$host" >/dev/null 2>&1
+}
+
+ensure_usr_local_bin_persist() {
+ local PROFILE_FILE="/etc/profile.d/10-localbin.sh"
+ if [ ! -f "$PROFILE_FILE" ]; then
+ echo 'case ":$PATH:" in *:/usr/local/bin:*) ;; *) export PATH="/usr/local/bin:$PATH";; esac' >"$PROFILE_FILE"
+ chmod +x "$PROFILE_FILE"
+ fi
+}
+
+download_with_progress() {
+ # $1 url, $2 dest
+ local url="$1" out="$2" cl
+ need_tool curl pv || return 1
+ cl=$(curl -fsSLI "$url" 2>/dev/null | awk 'tolower($0) ~ /^content-length:/ {print $2}' | tr -d '\r')
+ if [ -n "$cl" ]; then
+ curl -fsSL "$url" | pv -s "$cl" >"$out" || {
+ msg_error "Download failed: $url"
+ return 1
+ }
+ else
+ curl -fL# -o "$out" "$url" || {
+ msg_error "Download failed: $url"
+ return 1
+ }
+ fi
+}
+
+# ------------------------------
+# GitHub: check Release
+# ------------------------------
+check_for_gh_release() {
+ # app, repo, [pinned]
+ local app="$1" source="$2" pinned="${3:-}"
+ local app_lc
+ app_lc="$(lower "$app" | tr -d ' ')"
+ local current_file="$HOME/.${app_lc}"
+ local current="" release tag
+
+ msg_info "Check for update: $app"
+
+ net_resolves api.github.com || {
+ msg_error "DNS/network error: api.github.com"
+ return 1
+ }
+ need_tool curl jq || return 1
+
+ tag=$(curl -fsSL "https://api.github.com/repos/${source}/releases/latest" | jq -r '.tag_name // empty')
+ [ -z "$tag" ] && {
+ msg_error "Unable to fetch latest tag for $app"
+ return 1
+ }
+ release="${tag#v}"
+
+ [ -f "$current_file" ] && current="$(cat "$current_file")"
+
+ if [ -n "$pinned" ]; then
+ if [ "$pinned" = "$release" ]; then
+ msg_ok "$app pinned to v$pinned (no update)"
+ return 1
+ fi
+ if [ "$current" = "$pinned" ]; then
+ msg_ok "$app pinned v$pinned installed (upstream v$release)"
+ return 1
+ fi
+ msg_info "$app pinned v$pinned (upstream v$release) → update/downgrade"
+ CHECK_UPDATE_RELEASE="$pinned"
+ return 0
+ fi
+
+ if [ "$release" != "$current" ] || [ ! -f "$current_file" ]; then
+ CHECK_UPDATE_RELEASE="$release"
+ msg_info "New release available: v$release (current: v${current:-none})"
+ return 0
+ fi
+
+ msg_ok "$app is up to date (v$release)"
+ return 1
+}
+
+# ------------------------------
+# GitHub: get Release & deployen (Alpine)
+# modes: tarball | prebuild | singlefile
+# ------------------------------
+fetch_and_deploy_gh() {
+ # $1 app, $2 repo, [$3 mode], [$4 version], [$5 target], [$6 asset_pattern
+ local app="$1" repo="$2" mode="${3:-tarball}" version="${4:-latest}" target="${5:-/opt/$1}" pattern="${6:-}"
+ local app_lc
+ app_lc="$(lower "$app" | tr -d ' ')"
+ local vfile="$HOME/.${app_lc}"
+ local json url filename tmpd unpack
+
+ net_resolves api.github.com || {
+ msg_error "DNS/network error"
+ return 1
+ }
+ need_tool curl jq tar || return 1
+ [ "$mode" = "prebuild" ] || [ "$mode" = "singlefile" ] && need_tool unzip >/dev/null 2>&1 || true
+
+ tmpd="$(mktemp -d)" || return 1
+ mkdir -p "$target"
+
+ # Release JSON
+ if [ "$version" = "latest" ]; then
+ json="$(curl -fsSL "https://api.github.com/repos/$repo/releases/latest")" || {
+ msg_error "GitHub API failed"
+ rm -rf "$tmpd"
+ return 1
+ }
+ else
+ json="$(curl -fsSL "https://api.github.com/repos/$repo/releases/tags/$version")" || {
+ msg_error "GitHub API failed"
+ rm -rf "$tmpd"
+ return 1
+ }
+ fi
+
+ # correct Version
+ version="$(printf '%s' "$json" | jq -r '.tag_name // empty')"
+ version="${version#v}"
+
+ [ -z "$version" ] && {
+ msg_error "No tag in release json"
+ rm -rf "$tmpd"
+ return 1
+ }
+
+ case "$mode" in
+ tarball | source)
+ url="$(printf '%s' "$json" | jq -r '.tarball_url // empty')"
+ [ -z "$url" ] && url="https://github.com/$repo/archive/refs/tags/v$version.tar.gz"
+ filename="${app_lc}-${version}.tar.gz"
+ download_with_progress "$url" "$tmpd/$filename" || {
+ rm -rf "$tmpd"
+ return 1
+ }
+ tar -xzf "$tmpd/$filename" -C "$tmpd" || {
+ msg_error "tar extract failed"
+ rm -rf "$tmpd"
+ return 1
+ }
+ unpack="$(find "$tmpd" -mindepth 1 -maxdepth 1 -type d | head -n1)"
+ # copy content of unpack to target
+ (cd "$unpack" && tar -cf - .) | (cd "$target" && tar -xf -) || {
+ msg_error "copy failed"
+ rm -rf "$tmpd"
+ return 1
+ }
+ ;;
+ prebuild)
+ [ -n "$pattern" ] || {
+ msg_error "prebuild requires asset pattern"
+ rm -rf "$tmpd"
+ return 1
+ }
+ url="$(printf '%s' "$json" | jq -r '.assets[].browser_download_url' | awk -v p="$pattern" '
+ BEGIN{IGNORECASE=1}
+ $0 ~ p {print; exit}
+ ')"
+ [ -z "$url" ] && {
+ msg_error "asset not found for pattern: $pattern"
+ rm -rf "$tmpd"
+ return 1
+ }
+ filename="${url##*/}"
+ download_with_progress "$url" "$tmpd/$filename" || {
+ rm -rf "$tmpd"
+ return 1
+ }
+ # unpack archive (Zip or tarball)
+ case "$filename" in
+ *.zip)
+ need_tool unzip || {
+ rm -rf "$tmpd"
+ return 1
+ }
+ mkdir -p "$tmpd/unp"
+ unzip -q "$tmpd/$filename" -d "$tmpd/unp"
+ ;;
+ *.tar.gz | *.tgz | *.tar.xz | *.tar.zst | *.tar.bz2)
+ mkdir -p "$tmpd/unp"
+ tar -xf "$tmpd/$filename" -C "$tmpd/unp"
+ ;;
+ *)
+ msg_error "unsupported archive: $filename"
+ rm -rf "$tmpd"
+ return 1
+ ;;
+ esac
+ # top-level folder strippen
+ if [ "$(find "$tmpd/unp" -mindepth 1 -maxdepth 1 -type d | wc -l)" -eq 1 ] && [ -z "$(find "$tmpd/unp" -mindepth 1 -maxdepth 1 -type f | head -n1)" ]; then
+ unpack="$(find "$tmpd/unp" -mindepth 1 -maxdepth 1 -type d)"
+ (cd "$unpack" && tar -cf - .) | (cd "$target" && tar -xf -) || {
+ msg_error "copy failed"
+ rm -rf "$tmpd"
+ return 1
+ }
+ else
+ (cd "$tmpd/unp" && tar -cf - .) | (cd "$target" && tar -xf -) || {
+ msg_error "copy failed"
+ rm -rf "$tmpd"
+ return 1
+ }
+ fi
+ ;;
+ singlefile)
+ [ -n "$pattern" ] || {
+ msg_error "singlefile requires asset pattern"
+ rm -rf "$tmpd"
+ return 1
+ }
+ url="$(printf '%s' "$json" | jq -r '.assets[].browser_download_url' | awk -v p="$pattern" '
+ BEGIN{IGNORECASE=1}
+ $0 ~ p {print; exit}
+ ')"
+ [ -z "$url" ] && {
+ msg_error "asset not found for pattern: $pattern"
+ rm -rf "$tmpd"
+ return 1
+ }
+ filename="${url##*/}"
+ download_with_progress "$url" "$target/$app" || {
+ rm -rf "$tmpd"
+ return 1
+ }
+ chmod +x "$target/$app"
+ ;;
+ *)
+ msg_error "Unknown mode: $mode"
+ rm -rf "$tmpd"
+ return 1
+ ;;
+ esac
+
+ echo "$version" >"$vfile"
+ ensure_usr_local_bin_persist
+ rm -rf "$tmpd"
+ msg_ok "Deployed $app ($version) → $target"
+}
+
+# ------------------------------
+# yq (mikefarah) – Alpine
+# ------------------------------
+setup_yq() {
+ # prefer apk, unless FORCE_GH=1
+ if [ "${FORCE_GH:-0}" != "1" ] && apk info -e yq >/dev/null 2>&1; then
+ msg_info "Updating yq via apk"
+ apk add --no-cache --upgrade yq >/dev/null 2>&1 || true
+ msg_ok "yq ready ($(yq --version 2>/dev/null))"
+ return 0
+ fi
+
+ need_tool curl || return 1
+ local arch bin url tmp
+ case "$(uname -m)" in
+ x86_64) arch="amd64" ;;
+ aarch64) arch="arm64" ;;
+ *)
+ msg_error "Unsupported arch for yq: $(uname -m)"
+ return 1
+ ;;
+ esac
+ url="https://github.com/mikefarah/yq/releases/latest/download/yq_linux_${arch}"
+ tmp="$(mktemp)"
+ download_with_progress "$url" "$tmp" || return 1
+ install -m 0755 "$tmp" /usr/local/bin/yq
+ rm -f "$tmp"
+ msg_ok "Setup yq ($(yq --version 2>/dev/null))"
+}
+
+# ------------------------------
+# Adminer – Alpine
+# ------------------------------
+setup_adminer() {
+ need_tool curl || return 1
+ msg_info "Setup Adminer (Alpine)"
+ mkdir -p /var/www/localhost/htdocs/adminer
+ curl -fsSL https://github.com/vrana/adminer/releases/latest/download/adminer.php \
+ -o /var/www/localhost/htdocs/adminer/index.php || {
+ msg_error "Adminer download failed"
+ return 1
+ }
+ msg_ok "Adminer at /adminer (served by your webserver)"
+}
+
+# ------------------------------
+# uv – Alpine (musl tarball)
+# optional: PYTHON_VERSION="3.12"
+# ------------------------------
+setup_uv() {
+ need_tool curl tar || return 1
+ local UV_BIN="/usr/local/bin/uv"
+ local arch tarball url tmpd ver installed
+
+ case "$(uname -m)" in
+ x86_64) arch="x86_64-unknown-linux-musl" ;;
+ aarch64) arch="aarch64-unknown-linux-musl" ;;
+ *)
+ msg_error "Unsupported arch for uv: $(uname -m)"
+ return 1
+ ;;
+ esac
+
+ ver="$(curl -fsSL https://api.github.com/repos/astral-sh/uv/releases/latest | jq -r '.tag_name' 2>/dev/null)"
+ ver="${ver#v}"
+ [ -z "$ver" ] && {
+ msg_error "uv: cannot determine latest version"
+ return 1
+ }
+
+ if has "$UV_BIN"; then
+ installed="$($UV_BIN -V 2>/dev/null | awk '{print $2}')"
+ [ "$installed" = "$ver" ] && {
+ msg_ok "uv $ver already installed"
+ return 0
+ }
+ msg_info "Updating uv $installed → $ver"
+ else
+ msg_info "Setup uv $ver"
+ fi
+
+ tmpd="$(mktemp -d)" || return 1
+ tarball="uv-${arch}.tar.gz"
+ url="https://github.com/astral-sh/uv/releases/download/v${ver}/${tarball}"
+
+ download_with_progress "$url" "$tmpd/uv.tar.gz" || {
+ rm -rf "$tmpd"
+ return 1
+ }
+ tar -xzf "$tmpd/uv.tar.gz" -C "$tmpd" || {
+ msg_error "uv: extract failed"
+ rm -rf "$tmpd"
+ return 1
+ }
+
+ # tar contains ./uv
+ if [ -x "$tmpd/uv" ]; then
+ install -m 0755 "$tmpd/uv" "$UV_BIN"
+ else
+ # fallback: in subfolder
+ install -m 0755 "$tmpd"/*/uv "$UV_BIN" 2>/dev/null || {
+ msg_error "uv binary not found in tar"
+ rm -rf "$tmpd"
+ return 1
+ }
+ fi
+ rm -rf "$tmpd"
+ ensure_usr_local_bin_persist
+ msg_ok "Setup uv $ver"
+
+ if [ -n "${PYTHON_VERSION:-}" ]; then
+ local match
+ match="$(uv python list --only-downloads 2>/dev/null | awk -v maj="$PYTHON_VERSION" '
+ $0 ~ "^cpython-"maj"\\." { print $0 }' | awk -F- '{print $2}' | sort -V | tail -n1)"
+ [ -z "$match" ] && {
+ msg_error "No matching Python for $PYTHON_VERSION"
+ return 1
+ }
+ if ! uv python list | grep -q "cpython-${match}-linux"; then
+ msg_info "Installing Python $match via uv"
+ uv python install "$match" || {
+ msg_error "uv python install failed"
+ return 1
+ }
+ msg_ok "Python $match installed (uv)"
+ fi
+ fi
+}
+
+# ------------------------------
+# Java – Alpine (OpenJDK)
+# JAVA_VERSION: 17|21 (Default 21)
+# ------------------------------
+setup_java() {
+ local JAVA_VERSION="${JAVA_VERSION:-21}" pkg
+ case "$JAVA_VERSION" in
+ 17) pkg="openjdk17-jdk" ;;
+ 21 | *) pkg="openjdk21-jdk" ;;
+ esac
+ msg_info "Setup Java (OpenJDK $JAVA_VERSION)"
+ apk add --no-cache "$pkg" >/dev/null 2>&1 || {
+ msg_error "apk add $pkg failed"
+ return 1
+ }
+ # set JAVA_HOME
+ local prof="/etc/profile.d/20-java.sh"
+ if [ ! -f "$prof" ]; then
+ echo 'export JAVA_HOME=$(dirname $(dirname $(readlink -f $(command -v java))))' >"$prof"
+ echo 'case ":$PATH:" in *:$JAVA_HOME/bin:*) ;; *) export PATH="$JAVA_HOME/bin:$PATH";; esac' >>"$prof"
+ chmod +x "$prof"
+ fi
+ msg_ok "Java ready: $(java -version 2>&1 | head -n1)"
+}
+
+# ------------------------------
+# Go – Alpine (apk prefers, else tarball)
+# ------------------------------
+setup_go() {
+ if [ -z "${GO_VERSION:-}" ]; then
+ msg_info "Setup Go (apk)"
+ apk add --no-cache go >/dev/null 2>&1 || {
+ msg_error "apk add go failed"
+ return 1
+ }
+ msg_ok "Go ready: $(go version 2>/dev/null)"
+ return 0
+ fi
+
+ need_tool curl tar || return 1
+ local ARCH TARBALL URL TMP
+ case "$(uname -m)" in
+ x86_64) ARCH="amd64" ;;
+ aarch64) ARCH="arm64" ;;
+ *)
+ msg_error "Unsupported arch for Go: $(uname -m)"
+ return 1
+ ;;
+ esac
+ TARBALL="go${GO_VERSION}.linux-${ARCH}.tar.gz"
+ URL="https://go.dev/dl/${TARBALL}"
+ msg_info "Setup Go $GO_VERSION (tarball)"
+ TMP="$(mktemp)"
+ download_with_progress "$URL" "$TMP" || return 1
+ rm -rf /usr/local/go
+ tar -C /usr/local -xzf "$TMP" || {
+ msg_error "extract go failed"
+ rm -f "$TMP"
+ return 1
+ }
+ rm -f "$TMP"
+ ln -sf /usr/local/go/bin/go /usr/local/bin/go
+ ln -sf /usr/local/go/bin/gofmt /usr/local/bin/gofmt
+ ensure_usr_local_bin_persist
+ msg_ok "Go ready: $(go version 2>/dev/null)"
+}
+
+# ------------------------------
+# Composer – Alpine
+# uses php83-cli + openssl + phar
+# ------------------------------
+setup_composer() {
+ local COMPOSER_BIN="/usr/local/bin/composer"
+ if ! has php; then
+ # prefers php83
+ msg_info "Installing PHP CLI for Composer"
+ apk add --no-cache php83-cli php83-openssl php83-phar php83-iconv >/dev/null 2>&1 || {
+ # Fallback to generic php if 83 not available
+ apk add --no-cache php-cli php-openssl php-phar php-iconv >/dev/null 2>&1 || {
+ msg_error "Failed to install php-cli for composer"
+ return 1
+ }
+ }
+ msg_ok "PHP CLI ready: $(php -v | head -n1)"
+ fi
+
+ if [ -x "$COMPOSER_BIN" ]; then
+ msg_info "Updating Composer"
+ else
+ msg_info "Setup Composer"
+ fi
+
+ need_tool curl || return 1
+ curl -fsSL https://getcomposer.org/installer -o /tmp/composer-setup.php || {
+ msg_error "composer installer download failed"
+ return 1
+ }
+ php /tmp/composer-setup.php --install-dir=/usr/local/bin --filename=composer >/dev/null 2>&1 || {
+ msg_error "composer install failed"
+ return 1
+ }
+ rm -f /tmp/composer-setup.php
+ ensure_usr_local_bin_persist
+ msg_ok "Composer ready: $(composer --version 2>/dev/null)"
+}
diff --git a/misc/backup_07052025/alpine-install copy.func b/misc/backup_07052025/alpine-install copy.func
deleted file mode 100644
index 71e7c1d45..000000000
--- a/misc/backup_07052025/alpine-install copy.func
+++ /dev/null
@@ -1,277 +0,0 @@
-# Copyright (c) 2021-2025 tteck
-# Author: tteck (tteckster)
-# Co-Author: MickLesk
-# License: MIT
-# https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
-
-source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func)
-load_functions
-
-# This function sets color variables for formatting output in the terminal
-# color() {
-# # Colors
-# YW=$(echo "\033[33m")
-# YWB=$(echo "\033[93m")
-# BL=$(echo "\033[36m")
-# RD=$(echo "\033[01;31m")
-# GN=$(echo "\033[1;92m")
-
-# # Formatting
-# CL=$(echo "\033[m")
-# BFR="\\r\\033[K"
-# BOLD=$(echo "\033[1m")
-# TAB=" "
-
-# # System
-# RETRY_NUM=10
-# RETRY_EVERY=3
-# i=$RETRY_NUM
-
-# # Icons
-# CM="${TAB}✔️${TAB}${CL}"
-# CROSS="${TAB}✖️${TAB}${CL}"
-# INFO="${TAB}💡${TAB}${CL}"
-# NETWORK="${TAB}📡${TAB}${CL}"
-# OS="${TAB}🖥️${TAB}${CL}"
-# OSVERSION="${TAB}🌟${TAB}${CL}"
-# HOSTNAME="${TAB}🏠${TAB}${CL}"
-# GATEWAY="${TAB}🌐${TAB}${CL}"
-# DEFAULT="${TAB}⚙️${TAB}${CL}"
-# }
-
-# Function to set STD mode based on verbosity
-set_std_mode() {
- if [ "$VERBOSE" = "yes" ]; then
- STD=""
- else
- STD="silent"
- fi
-}
-
-# Silent execution function
-silent() {
- "$@" >/dev/null 2>&1
-}
-
-# This function enables IPv6 if it's not disabled and sets verbose mode
-verb_ip6() {
- set_std_mode # Set STD mode based on VERBOSE
-
- if [ "$DISABLEIPV6" == "yes" ]; then
- $STD sysctl -w net.ipv6.conf.all.disable_ipv6=1
- echo "net.ipv6.conf.all.disable_ipv6 = 1" >>/etc/sysctl.conf
- $STD rc-update add sysctl default
- fi
-}
-
-# This function catches errors and handles them with the error handler function
-catch_errors() {
- unset SPINNER_PID
- set -Eeuo pipefail
- trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
-}
-
-# This function handles errors
-error_handler() {
- local exit_code="$?"
- local line_number="$1"
- local command="$2"
- local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
- echo -e "\n$error_message\n"
- [[ -n "${SPINNER_PID:-}" ]] && kill "$SPINNER_PID" &>/dev/null || true
-}
-
-# # This function displays an informational message with logging support.
-# declare -A MSG_INFO_SHOWN
-# SPINNER_ACTIVE=0
-# SPINNER_PID=""
-# SPINNER_MSG=""
-
-# trap 'stop_spinner' EXIT INT TERM HUP
-
-# start_spinner() {
-# local msg="$1"
-# local frames=(⠋ ⠙ ⠹ ⠸ ⠼ ⠴ ⠦ ⠧ ⠇ ⠏)
-# local spin_i=0
-# local interval=0.1
-
-# SPINNER_MSG="$msg"
-# printf "\r\e[2K" >&2
-
-# {
-# while [[ "$SPINNER_ACTIVE" -eq 1 ]]; do
-# printf "\r\e[2K%s %b" "${frames[spin_i]}" "${YW}${SPINNER_MSG}${CL}" >&2
-# spin_i=$(((spin_i + 1) % ${#frames[@]}))
-# sleep "$interval"
-# done
-# } &
-
-# SPINNER_PID=$!
-# disown "$SPINNER_PID"
-# }
-
-# stop_spinner() {
-# if [[ ${SPINNER_PID+v} && -n "$SPINNER_PID" ]] && kill -0 "$SPINNER_PID" 2>/dev/null; then
-# kill "$SPINNER_PID" 2>/dev/null
-# sleep 0.1
-# kill -0 "$SPINNER_PID" 2>/dev/null && kill -9 "$SPINNER_PID" 2>/dev/null
-# wait "$SPINNER_PID" 2>/dev/null || true
-# fi
-# SPINNER_ACTIVE=0
-# unset SPINNER_PID
-# }
-
-# spinner_guard() {
-# if [[ "$SPINNER_ACTIVE" -eq 1 ]] && [[ -n "$SPINNER_PID" ]]; then
-# kill "$SPINNER_PID" 2>/dev/null
-# wait "$SPINNER_PID" 2>/dev/null || true
-# SPINNER_ACTIVE=0
-# unset SPINNER_PID
-# fi
-# }
-
-# msg_info() {
-# local msg="$1"
-# [[ -n "${MSG_INFO_SHOWN["$msg"]+x}" ]] && return
-# MSG_INFO_SHOWN["$msg"]=1
-
-# spinner_guard
-# SPINNER_ACTIVE=1
-# start_spinner "$msg"
-# }
-
-# msg_ok() {
-# local msg="$1"
-# stop_spinner
-# printf "\r\e[2K%s %b\n" "${CM}" "${GN}${msg}${CL}" >&2
-# unset MSG_INFO_SHOWN["$msg"]
-# }
-
-# msg_error() {
-# stop_spinner
-# local msg="$1"
-# printf "\r\e[2K%s %b\n" "${CROSS}" "${RD}${msg}${CL}" >&2
-# #log_message "ERROR" "$msg"
-# }
-
-# This function sets up the Container OS by generating the locale, setting the timezone, and checking the network connection
-setting_up_container() {
- msg_info "Setting up Container OS"
- while [ $i -gt 0 ]; do
- if [ "$(ip addr show | grep 'inet ' | grep -v '127.0.0.1' | awk '{print $2}' | cut -d'/' -f1)" != "" ]; then
- break
- fi
- echo 1>&2 -en "${CROSS}${RD} No Network! "
- sleep $RETRY_EVERY
- i=$((i - 1))
- done
-
- if [ "$(ip addr show | grep 'inet ' | grep -v '127.0.0.1' | awk '{print $2}' | cut -d'/' -f1)" = "" ]; then
- echo 1>&2 -e "\n${CROSS}${RD} No Network After $RETRY_NUM Tries${CL}"
- echo -e "${NETWORK}Check Network Settings"
- exit 1
- fi
- msg_ok "Set up Container OS"
- msg_ok "Network Connected: ${BL}$(ip addr show | grep 'inet ' | awk '{print $2}' | cut -d'/' -f1 | tail -n1)${CL}"
-}
-
-# This function checks the network connection by pinging a known IP address and prompts the user to continue if the internet is not connected
-network_check() {
- set +e
- trap - ERR
- if ping -c 1 -W 1 1.1.1.1 &>/dev/null || ping -c 1 -W 1 8.8.8.8 &>/dev/null || ping -c 1 -W 1 9.9.9.9 &>/dev/null; then
- msg_ok "Internet Connected"
- else
- msg_error "Internet NOT Connected"
- read -r -p "Would you like to continue anyway? " prompt
- if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then
- echo -e "${INFO}${RD}Expect Issues Without Internet${CL}"
- else
- echo -e "${NETWORK}Check Network Settings"
- exit 1
- fi
- fi
- RESOLVEDIP=$(getent hosts github.com | awk '{ print $1 }')
- if [[ -z "$RESOLVEDIP" ]]; then msg_error "DNS Lookup Failure"; else msg_ok "DNS Resolved github.com to ${BL}$RESOLVEDIP${CL}"; fi
- set -e
- trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
-}
-
-# This function updates the Container OS by running apt-get update and upgrade
-update_os() {
- msg_info "Updating Container OS"
- $STD apk update
- $STD apk upgrade
- msg_ok "Updated Container OS"
-
- msg_info "Installing core dependencies"
- $STD apk update
- $STD apk add newt curl openssh nano mc ncurses
- msg_ok "Core dependencies installed"
-}
-
-# This function modifies the message of the day (motd) and SSH settings
-motd_ssh() {
- echo "export TERM='xterm-256color'" >>/root/.bashrc
- IP=$(ip -4 addr show eth0 | awk '/inet / {print $2}' | cut -d/ -f1 | head -n 1)
-
- if [ -f "/etc/os-release" ]; then
- OS_NAME=$(grep ^NAME /etc/os-release | cut -d= -f2 | tr -d '"')
- OS_VERSION=$(grep ^VERSION_ID /etc/os-release | cut -d= -f2 | tr -d '"')
- else
- OS_NAME="Alpine Linux"
- OS_VERSION="Unknown"
- fi
-
- PROFILE_FILE="/etc/profile.d/00_lxc-details.sh"
- echo "echo -e \"\"" >"$PROFILE_FILE"
- echo -e "echo -e \"${BOLD}${YW}${APPLICATION} LXC Container - DEV Repository${CL}\"" >>"$PROFILE_FILE"
- echo -e "echo -e \"${RD}WARNING: This is a DEVELOPMENT version (ProxmoxVED). Do NOT use in production!${CL}\"" >>"$PROFILE_FILE"
- echo -e "echo -e \"${YW} OS: ${GN}${OS_NAME} - Version: ${OS_VERSION}${CL}\"" >>"$PROFILE_FILE"
- echo -e "echo -e \"${YW} Hostname: ${GN}\$(hostname)${CL}\"" >>"$PROFILE_FILE"
- echo -e "echo -e \"${YW} IP Address: ${GN}${IP}${CL}\"" >>"$PROFILE_FILE"
- echo -e "echo -e \"${YW} Repository: ${GN}https://github.com/community-scripts/ProxmoxVED${CL}\"" >>"$PROFILE_FILE"
- echo "echo \"\"" >>"$PROFILE_FILE"
-
- if [[ "${SSH_ROOT}" == "yes" ]]; then
- $STD rc-update add sshd
- sed -i "s/#PermitRootLogin prohibit-password/PermitRootLogin yes/g" /etc/ssh/sshd_config
- $STD /etc/init.d/sshd start
- fi
-}
-
-# Validate Timezone for some LXC's
-validate_tz() {
- [[ -f "/usr/share/zoneinfo/$1" ]]
-}
-
-# This function customizes the container and enables passwordless login for the root user
-customize() {
- if [[ "$PASSWORD" == "" ]]; then
- msg_info "Customizing Container"
- passwd -d root >/dev/null 2>&1
-
- # Ensure agetty is available
- apk add --no-cache --force-broken-world util-linux >/dev/null 2>&1
-
- # Create persistent autologin boot script
- mkdir -p /etc/local.d
- cat <<'EOF' >/etc/local.d/autologin.start
-#!/bin/sh
-sed -i 's|^tty1::respawn:.*|tty1::respawn:/sbin/agetty --autologin root --noclear tty1 38400 linux|' /etc/inittab
-kill -HUP 1
-EOF
- touch /root/.hushlogin
-
- chmod +x /etc/local.d/autologin.start
- rc-update add local >/dev/null 2>&1
-
- # Apply autologin immediately for current session
- /etc/local.d/autologin.start
-
- msg_ok "Customized Container"
- fi
-
- echo "bash -c \"\$(curl -fsSL https://github.com/community-scripts/ProxmoxVED/raw/main/ct/${app}.sh)\"" >/usr/bin/update
- chmod +x /usr/bin/update
-}
diff --git a/misc/backup_07052025/api.func.bak b/misc/backup_07052025/api.func.bak
deleted file mode 100644
index 2da17c1ba..000000000
--- a/misc/backup_07052025/api.func.bak
+++ /dev/null
@@ -1,189 +0,0 @@
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: michelroegl-brunner
-# License: MIT | https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/LICENSE
-
-get_error_description() {
- local exit_code="$1"
- case "$exit_code" in
- 0) echo " " ;;
- 1) echo "General error: An unspecified error occurred." ;;
- 2) echo "Incorrect shell usage or invalid command arguments." ;;
- 3) echo "Unexecuted function or invalid shell condition." ;;
- 4) echo "Error opening a file or invalid path." ;;
- 5) echo "I/O error: An input/output failure occurred." ;;
- 6) echo "No such device or address." ;;
- 7) echo "Insufficient memory or resource exhaustion." ;;
- 8) echo "Non-executable file or invalid file format." ;;
- 9) echo "Failed child process execution." ;;
- 18) echo "Connection to a remote server failed." ;;
- 22) echo "Invalid argument or faulty network connection." ;;
- 28) echo "No space left on device." ;;
- 35) echo "Timeout while establishing a connection." ;;
- 56) echo "Faulty TLS connection." ;;
- 60) echo "SSL certificate error." ;;
- 100) echo "LXC install error: Unexpected error in create_lxc.sh." ;;
- 101) echo "LXC install error: No network connection detected." ;;
- 200) echo "LXC creation failed." ;;
- 201) echo "LXC error: Invalid Storage class." ;;
- 202) echo "User aborted menu in create_lxc.sh." ;;
- 203) echo "CTID not set in create_lxc.sh." ;;
- 204) echo "PCT_OSTYPE not set in create_lxc.sh." ;;
- 205) echo "CTID cannot be less than 100 in create_lxc.sh." ;;
- 206) echo "CTID already in use in create_lxc.sh." ;;
- 207) echo "Template not found in create_lxc.sh." ;;
- 208) echo "Error downloading template in create_lxc.sh." ;;
- 209) echo "Container creation failed, but template is intact in create_lxc.sh." ;;
- 125) echo "Docker error: Container could not start." ;;
- 126) echo "Command not executable: Incorrect permissions or missing dependencies." ;;
- 127) echo "Command not found: Incorrect path or missing dependency." ;;
- 128) echo "Invalid exit signal, e.g., incorrect Git command." ;;
- 129) echo "Signal 1 (SIGHUP): Process terminated due to hangup." ;;
- 130) echo "Signal 2 (SIGINT): Manual termination via Ctrl+C." ;;
- 132) echo "Signal 4 (SIGILL): Illegal machine instruction." ;;
- 133) echo "Signal 5 (SIGTRAP): Debugging error or invalid breakpoint signal." ;;
- 134) echo "Signal 6 (SIGABRT): Program aborted itself." ;;
- 135) echo "Signal 7 (SIGBUS): Memory error, invalid memory address." ;;
- 137) echo "Signal 9 (SIGKILL): Process forcibly terminated (OOM-killer or 'kill -9')." ;;
- 139) echo "Signal 11 (SIGSEGV): Segmentation fault, possibly due to invalid pointer access." ;;
- 141) echo "Signal 13 (SIGPIPE): Pipe closed unexpectedly." ;;
- 143) echo "Signal 15 (SIGTERM): Process terminated normally." ;;
- 152) echo "Signal 24 (SIGXCPU): CPU time limit exceeded." ;;
- 255) echo "Unknown critical error, often due to missing permissions or broken scripts." ;;
- *) echo "Unknown error code ($exit_code)." ;;
- esac
-}
-
-post_to_api() {
-
- if ! command -v curl &>/dev/null; then
- return
- fi
-
- if [ "$DIAGNOSTICS" = "no" ]; then
- return
- fi
-
- if [ -z "$RANDOM_UUID" ]; then
- return
- fi
-
- local API_URL="http://api.community-scripts.org/dev/upload"
- local pve_version="not found"
- pve_version=$(pveversion | awk -F'[/ ]' '{print $2}')
-
- JSON_PAYLOAD=$(
- cat </dev/null; then
- return
- fi
-
- if [ "$DIAGNOSTICS" = "no" ]; then
- return
- fi
-
- if [ -z "$RANDOM_UUID" ]; then
- return
- fi
-
- local API_URL="http://api.community-scripts.org/dev/upload"
- local pve_version="not found"
- pve_version=$(pveversion | awk -F'[/ ]' '{print $2}')
-
- DISK_SIZE_API=${DISK_SIZE%G}
-
- JSON_PAYLOAD=$(
- cat </dev/null; then
- return
- fi
-
- if [ "$POST_UPDATE_DONE" = true ]; then
- return 0
- fi
- exit_code=${2:-1}
- local API_URL="http://api.community-scripts.org/dev/upload/updatestatus"
- local status="${1:-failed}"
- if [[ "$status" == "failed" ]]; then
- local exit_code="${2:-1}"
- elif [[ "$status" == "success" ]]; then
- local exit_code="${2:-0}"
- fi
-
- if [[ -z "$exit_code" ]]; then
- exit_code=1
- fi
-
- error=$(get_error_description "$exit_code")
-
- if [ -z "$error" ]; then
- error="Unknown error"
- fi
-
- JSON_PAYLOAD=$(
- cat <, sonst Arbeitsverzeichnis oder APP_DIR gesetzt vom Script
- base_dir="${APP_DIR:-/opt/$app_name}"
- if [[ ! -d "$base_dir" ]]; then
- msg_error "Cannot determine base directory for $app_name"
- return 1
- fi
-
- local snapshot_base="${base_dir}-snapshot"
- SNAPSHOT_DIR="${snapshot_base}-$(date +%F_%T | tr ':' '-')"
-
- msg_info "Creating snapshot for $app_name"
-
- mkdir -p "$SNAPSHOT_DIR"
- cp -a "$base_dir" "$SNAPSHOT_DIR/base" || {
- msg_error "Failed to backup base directory"
- return 1
- }
-
- mkdir -p "$SNAPSHOT_DIR/systemd"
- cp -a /etc/systemd/system/${app_name}-*.service "$SNAPSHOT_DIR/systemd/" 2>/dev/null || true
-
- [[ -f "/etc/default/$app_name" ]] && cp "/etc/default/$app_name" "$SNAPSHOT_DIR/"
- [[ -f "$base_dir/${app_name}_version.txt" ]] && cp "$base_dir/${app_name}_version.txt" "$SNAPSHOT_DIR/"
-
- rotate_snapshots "$snapshot_base"
-
- msg_ok "Snapshot created at $SNAPSHOT_DIR"
- return 0
-}
-
-rotate_snapshots() {
- local snapshot_base=$1
- local snapshots
-
- # Sortiert nach Datum absteigend, behalte nur die 3 neuesten
- mapfile -t snapshots < <(ls -dt ${snapshot_base}-* 2>/dev/null)
- if ((${#snapshots[@]} > 3)); then
- for ((i = 3; i < ${#snapshots[@]}; i++)); do
- rm -rf "${snapshots[$i]}"
- msg_info "Old snapshot removed: ${snapshots[$i]}"
- done
- fi
-}
-
-rollback_snapshot() {
- local app_name=$1
- local base_dir
-
- base_dir="${APP_DIR:-/opt/$app_name}"
- if [[ -z "$SNAPSHOT_DIR" || ! -d "$SNAPSHOT_DIR" ]]; then
- msg_error "No snapshot found. Cannot rollback."
- return 1
- fi
-
- msg_info "Rolling back $app_name from snapshot"
-
- systemctl stop ${app_name}-* 2>/dev/null || true
-
- rm -rf "$base_dir"
- cp -a "$SNAPSHOT_DIR/base" "$base_dir" || {
- msg_error "Failed to restore base directory"
- return 1
- }
-
- if [[ -d "$SNAPSHOT_DIR/systemd" ]]; then
- cp "$SNAPSHOT_DIR/systemd/"*.service /etc/systemd/system/ 2>/dev/null || true
- systemctl daemon-reload
- fi
-
- [[ -f "$SNAPSHOT_DIR/$app_name" ]] && cp "$SNAPSHOT_DIR/$app_name" "/etc/default/$app_name"
- [[ -f "$SNAPSHOT_DIR/${app_name}_version.txt" ]] && cp "$SNAPSHOT_DIR/${app_name}_version.txt" "$base_dir/"
-
- systemctl start ${app_name}-* 2>/dev/null || true
-
- msg_ok "Rollback for $app_name completed"
- return 0
-}
-
-cleanup_snapshot() {
- if [[ -n "$SNAPSHOT_DIR" && -d "$SNAPSHOT_DIR" ]]; then
- rm -rf "$SNAPSHOT_DIR"
- msg_ok "Cleaned up snapshot at $SNAPSHOT_DIR"
- fi
-}
-
-handle_failure() {
- local app_name=$1
- local line=$2
- msg_error "Update failed at line $line. Rolling back..."
- rollback_snapshot "$app_name"
- exit 1
-}
-
-safe_run_update_script() {
- local app_name="${APP:-paperless}"
-
- if ! create_snapshot "$app_name"; then
- msg_error "Snapshot creation failed. Aborting update."
- exit 1
- fi
-
- trap 'handle_failure "$app_name" $LINENO' ERR
- set -eE
-
- update_script
-
- cleanup_snapshot
-}
-
-wrap_update_script_with_snapshot() {
- local original_func
- original_func=$(declare -f update_script) || return 1
-
- eval "
- original_update_script() {
- ${original_func#*\{}
- }
- update_script() {
- local app_name=\"\${APP:-paperless}\"
- if ! create_snapshot \"\$app_name\"; then
- msg_error \"Snapshot creation failed. Aborting update.\"
- exit 1
- fi
- trap 'handle_failure \"\$app_name\" \$LINENO' ERR
- set -eE
- original_update_script
- cleanup_snapshot
- }
- "
-}
diff --git a/misc/backup_07052025/build copy.func b/misc/backup_07052025/build copy.func
deleted file mode 100644
index bf5af2c46..000000000
--- a/misc/backup_07052025/build copy.func
+++ /dev/null
@@ -1,1407 +0,0 @@
-# Copyright (c) 2021-2025 tteck
-# Author: tteck (tteckster)
-# Co-Author: MickLesk
-# Co-Author: michelroegl-brunner
-# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
-
-variables() {
- NSAPP=$(echo "${APP,,}" | tr -d ' ') # This function sets the NSAPP variable by converting the value of the APP variable to lowercase and removing any spaces.
- var_install="${NSAPP}-install" # sets the var_install variable by appending "-install" to the value of NSAPP.
- INTEGER='^[0-9]+([.][0-9]+)?$' # it defines the INTEGER regular expression pattern.
- PVEHOST_NAME=$(hostname) # gets the Proxmox Hostname and sets it to Uppercase
- DIAGNOSTICS="yes" # sets the DIAGNOSTICS variable to "yes", used for the API call.
- METHOD="default" # sets the METHOD variable to "default", used for the API call.
- RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" # generates a random UUID and sets it to the RANDOM_UUID variable.
-}
-
-source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/api.func)
-
-source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func)
-color
-
-# # This function sets various color variables using ANSI escape codes for formatting text in the terminal.
-# color() {
-# # Colors
-# YW=$(echo "\033[33m")
-# YWB=$(echo "\033[93m")
-# BL=$(echo "\033[36m")
-# RD=$(echo "\033[01;31m")
-# BGN=$(echo "\033[4;92m")
-# GN=$(echo "\033[1;92m")
-# DGN=$(echo "\033[32m")
-
-# # Formatting
-# CL=$(echo "\033[m")
-# BOLD=$(echo "\033[1m")
-# HOLD=" "
-# TAB=" "
-
-# # Icons
-# CM="${TAB}✔️${TAB}"
-# CROSS="${TAB}✖️${TAB}"
-# INFO="${TAB}💡${TAB}${CL}"
-# OS="${TAB}🖥️${TAB}${CL}"
-# OSVERSION="${TAB}🌟${TAB}${CL}"
-# CONTAINERTYPE="${TAB}📦${TAB}${CL}"
-# DISKSIZE="${TAB}💾${TAB}${CL}"
-# CPUCORE="${TAB}🧠${TAB}${CL}"
-# RAMSIZE="${TAB}🛠️${TAB}${CL}"
-# SEARCH="${TAB}🔍${TAB}${CL}"
-# VERBOSE_CROPPED="🔍${TAB}"
-# VERIFYPW="${TAB}🔐${TAB}${CL}"
-# CONTAINERID="${TAB}🆔${TAB}${CL}"
-# HOSTNAME="${TAB}🏠${TAB}${CL}"
-# BRIDGE="${TAB}🌉${TAB}${CL}"
-# NETWORK="${TAB}📡${TAB}${CL}"
-# GATEWAY="${TAB}🌐${TAB}${CL}"
-# DISABLEIPV6="${TAB}🚫${TAB}${CL}"
-# DEFAULT="${TAB}⚙️${TAB}${CL}"
-# MACADDRESS="${TAB}🔗${TAB}${CL}"
-# VLANTAG="${TAB}🏷️${TAB}${CL}"
-# ROOTSSH="${TAB}🔑${TAB}${CL}"
-# CREATING="${TAB}🚀${TAB}${CL}"
-# ADVANCED="${TAB}🧩${TAB}${CL}"
-# }
-
-# This function enables error handling in the script by setting options and defining a trap for the ERR signal.
-catch_errors() {
- set -Eeuo pipefail
- trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
-}
-
-# This function is called when an error occurs. It receives the exit code, line number, and command that caused the error, and displays an error message.
-error_handler() {
- source /dev/stdin <<<$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/api.func)
- if [ -n "$SPINNER_PID" ] && ps -p "$SPINNER_PID" >/dev/null; then kill "$SPINNER_PID" >/dev/null; fi
- printf "\e[?25h"
- local exit_code="$?"
- local line_number="$1"
- local command="$2"
- local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
- post_update_to_api "failed" "${command}"
- echo -e "\n$error_message\n"
-}
-
-# Check if the shell is using bash
-shell_check() {
- if [[ "$(basename "$SHELL")" != "bash" ]]; then
- clear
- msg_error "Your default shell is currently not set to Bash. To use these scripts, please switch to the Bash shell."
- echo -e "\nExiting..."
- sleep 2
- exit
- fi
-}
-
-# Run as root only
-root_check() {
- if [[ "$(id -u)" -ne 0 || $(ps -o comm= -p $PPID) == "sudo" ]]; then
- clear
- msg_error "Please run this script as root."
- echo -e "\nExiting..."
- sleep 2
- exit
- fi
-}
-
-# This function checks the version of Proxmox Virtual Environment (PVE) and exits if the version is not supported.
-pve_check() {
- if ! pveversion | grep -Eq "pve-manager/8\.[0-4](\.[0-9]+)*"; then
- msg_error "${CROSS}${RD}This version of Proxmox Virtual Environment is not supported"
- echo -e "Requires Proxmox Virtual Environment Version 8.1 or later."
- echo -e "Exiting..."
- sleep 2
- exit
- fi
-}
-
-# When a node is running tens of containers, it's possible to exceed the kernel's cryptographic key storage allocations.
-# These are tuneable, so verify if the currently deployment is approaching the limits, advise the user on how to tune the limits, and exit the script.
-# https://cleveruptime.com/docs/files/proc-key-users | https://docs.kernel.org/security/keys/core.html
-maxkeys_check() {
- # Read kernel parameters
- per_user_maxkeys=$(cat /proc/sys/kernel/keys/maxkeys 2>/dev/null || echo 0)
- per_user_maxbytes=$(cat /proc/sys/kernel/keys/maxbytes 2>/dev/null || echo 0)
-
- # Exit if kernel parameters are unavailable
- if [[ "$per_user_maxkeys" -eq 0 || "$per_user_maxbytes" -eq 0 ]]; then
- echo -e "${CROSS}${RD} Error: Unable to read kernel parameters. Ensure proper permissions.${CL}"
- exit 1
- fi
-
- # Fetch key usage for user ID 100000 (typical for containers)
- used_lxc_keys=$(awk '/100000:/ {print $2}' /proc/key-users 2>/dev/null || echo 0)
- used_lxc_bytes=$(awk '/100000:/ {split($5, a, "/"); print a[1]}' /proc/key-users 2>/dev/null || echo 0)
-
- # Calculate thresholds and suggested new limits
- threshold_keys=$((per_user_maxkeys - 100))
- threshold_bytes=$((per_user_maxbytes - 1000))
- new_limit_keys=$((per_user_maxkeys * 2))
- new_limit_bytes=$((per_user_maxbytes * 2))
-
- # Check if key or byte usage is near limits
- failure=0
- if [[ "$used_lxc_keys" -gt "$threshold_keys" ]]; then
- echo -e "${CROSS}${RD} Warning: Key usage is near the limit (${used_lxc_keys}/${per_user_maxkeys}).${CL}"
- echo -e "${INFO} Suggested action: Set ${GN}kernel.keys.maxkeys=${new_limit_keys}${CL} in ${BOLD}/etc/sysctl.d/98-community-scripts.conf${CL}."
- failure=1
- fi
- if [[ "$used_lxc_bytes" -gt "$threshold_bytes" ]]; then
- echo -e "${CROSS}${RD} Warning: Key byte usage is near the limit (${used_lxc_bytes}/${per_user_maxbytes}).${CL}"
- echo -e "${INFO} Suggested action: Set ${GN}kernel.keys.maxbytes=${new_limit_bytes}${CL} in ${BOLD}/etc/sysctl.d/98-community-scripts.conf${CL}."
- failure=1
- fi
-
- # Provide next steps if issues are detected
- if [[ "$failure" -eq 1 ]]; then
- echo -e "${INFO} To apply changes, run: ${BOLD}service procps force-reload${CL}"
- exit 1
- fi
-
- echo -e "${CM}${GN} All kernel key limits are within safe thresholds.${CL}"
-}
-
-# This function checks the system architecture and exits if it's not "amd64".
-arch_check() {
- if [ "$(dpkg --print-architecture)" != "amd64" ]; then
- echo -e "\n ${INFO}${YWB}This script will not work with PiMox! \n"
- echo -e "\n ${YWB}Visit https://github.com/asylumexp/Proxmox for ARM64 support. \n"
- echo -e "Exiting..."
- sleep 2
- exit
- fi
-}
-
-# Function to get the current IP address based on the distribution
-get_current_ip() {
- if [ -f /etc/os-release ]; then
- # Check for Debian/Ubuntu (uses hostname -I)
- if grep -qE 'ID=debian|ID=ubuntu' /etc/os-release; then
- CURRENT_IP=$(hostname -I | awk '{print $1}')
- # Check for Alpine (uses ip command)
- elif grep -q 'ID=alpine' /etc/os-release; then
- CURRENT_IP=$(ip -4 addr show eth0 | awk '/inet / {print $2}' | cut -d/ -f1 | head -n 1)
- else
- CURRENT_IP="Unknown"
- fi
- fi
- echo "$CURRENT_IP"
-}
-
-# Function to update the IP address in the MOTD file
-update_motd_ip() {
- MOTD_FILE="/etc/motd"
-
- if [ -f "$MOTD_FILE" ]; then
- # Remove existing IP Address lines to prevent duplication
- sed -i '/IP Address:/d' "$MOTD_FILE"
-
- IP=$(get_current_ip)
- # Add the new IP address
- echo -e "${TAB}${NETWORK}${YW} IP Address: ${GN}${IP}${CL}" >>"$MOTD_FILE"
- fi
-}
-
-# Function to download & save header files
-get_header() {
- local app_name=$(echo "${APP,,}" | tr -d ' ')
- local header_url="https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/ct/headers/${app_name}"
- local local_header_path="/usr/local/community-scripts/headers/${app_name}"
-
- mkdir -p "$(dirname "$local_header_path")"
-
- if [ ! -s "$local_header_path" ]; then
- if ! curl -fsSL "$header_url" -o "$local_header_path"; then
- return 1
- fi
- fi
-
- cat "$local_header_path" 2>/dev/null || true
-}
-# This function sets the APP-Name into an ASCII Header in Slant, figlet needed on proxmox main node.
-header_info() {
- local app_name=$(echo "${APP,,}" | tr -d ' ')
- local header_content
-
- # Download & save Header-File locally
- header_content=$(get_header "$app_name") || header_content=""
-
- # Show ASCII-Header
- clear
- local term_width
- term_width=$(tput cols 2>/dev/null || echo 120)
-
- if [ -n "$header_content" ]; then
- echo "$header_content"
- fi
-}
-
-# This function checks if the script is running through SSH and prompts the user to confirm if they want to proceed or exit.
-ssh_check() {
- if [ -n "${SSH_CLIENT:+x}" ]; then
- if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "SSH DETECTED" --yesno "It's advisable to utilize the Proxmox shell rather than SSH, as there may be potential complications with variable retrieval. Proceed using SSH?" 10 72; then
- whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox --title "Proceed using SSH" "You've chosen to proceed using SSH. If any issues arise, please run the script in the Proxmox shell before creating a repository issue." 10 72
- else
- clear
- echo "Exiting due to SSH usage. Please consider using the Proxmox shell."
- exit
- fi
- fi
-}
-
-base_settings() {
- # Default Settings
- CT_TYPE="1"
- DISK_SIZE="4"
- CORE_COUNT="1"
- RAM_SIZE="1024"
- VERBOSE="${1:-no}"
- PW=""
- CT_ID=$NEXTID
- HN=$NSAPP
- BRG="vmbr0"
- NET="dhcp"
- GATE=""
- APT_CACHER=""
- APT_CACHER_IP=""
- DISABLEIP6="no"
- MTU=""
- SD=""
- NS=""
- MAC=""
- VLAN=""
- SSH="no"
- SSH_AUTHORIZED_KEY=""
- TAGS="community-script;"
- UDHCPC_FIX=""
-
- # Override default settings with variables from ct script
- CT_TYPE=${var_unprivileged:-$CT_TYPE}
- DISK_SIZE=${var_disk:-$DISK_SIZE}
- CORE_COUNT=${var_cpu:-$CORE_COUNT}
- RAM_SIZE=${var_ram:-$RAM_SIZE}
- VERB=${var_verbose:-$VERBOSE}
- TAGS="${TAGS}${var_tags:-}"
-
- # Since these 2 are only defined outside of default_settings function, we add a temporary fallback. TODO: To align everything, we should add these as constant variables (e.g. OSTYPE and OSVERSION), but that would currently require updating the default_settings function for all existing scripts
- if [ -z "$var_os" ]; then
- var_os="debian"
- fi
- if [ -z "$var_version" ]; then
- var_version="12"
- fi
-}
-
-# This function displays the default values for various settings.
-echo_default() {
- # Convert CT_TYPE to description
- CT_TYPE_DESC="Unprivileged"
- if [ "$CT_TYPE" -eq 0 ]; then
- CT_TYPE_DESC="Privileged"
- fi
-
- # Output the selected values with icons
- echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os${CL}"
- echo -e "${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}"
- echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
- echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}"
- echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}${CORE_COUNT}${CL}"
- echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
- echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}${CT_ID}${CL}"
- if [ "$VERB" == "yes" ]; then
- echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}Enabled${CL}"
- fi
- echo -e "${CREATING}${BOLD}${BL}Creating a ${APP} LXC using the above default settings${CL}"
- echo -e " "
-}
-
-# This function is called when the user decides to exit the script. It clears the screen and displays an exit message.
-exit_script() {
- [[ -n "${SPINNER_PID:-}" ]] && kill "$SPINNER_PID" &>/dev/null || true
- clear
- echo -e "\n${CROSS}${RD}User exited script${CL}\n"
- exit
-}
-
-# This function allows the user to configure advanced settings for the script.
-advanced_settings() {
- whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox --title "Here is an instructional tip:" "To make a selection, use the Spacebar." 8 58
- # Setting Default Tag for Advanced Settings
- TAGS="community-script;${var_tags:-}"
- CT_DEFAULT_TYPE="${CT_TYPE}"
- CT_TYPE=""
- while [ -z "$CT_TYPE" ]; do
- if [ "$CT_DEFAULT_TYPE" == "1" ]; then
- if CT_TYPE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "CONTAINER TYPE" --radiolist "Choose Type" 10 58 2 \
- "1" "Unprivileged" ON \
- "0" "Privileged" OFF \
- 3>&1 1>&2 2>&3); then
- if [ -n "$CT_TYPE" ]; then
- CT_TYPE_DESC="Unprivileged"
- if [ "$CT_TYPE" -eq 0 ]; then
- CT_TYPE_DESC="Privileged"
- fi
- echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os${CL}"
- echo -e "${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}"
- echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
- fi
- else
- exit_script
- fi
- fi
- if [ "$CT_DEFAULT_TYPE" == "0" ]; then
- if CT_TYPE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "CONTAINER TYPE" --radiolist "Choose Type" 10 58 2 \
- "1" "Unprivileged" OFF \
- "0" "Privileged" ON \
- 3>&1 1>&2 2>&3); then
- if [ -n "$CT_TYPE" ]; then
- CT_TYPE_DESC="Unprivileged"
- if [ "$CT_TYPE" -eq 0 ]; then
- CT_TYPE_DESC="Privileged"
- fi
- echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os${CL}"
- echo -e "${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}"
- echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
- fi
- else
- exit_script
- fi
- fi
- done
-
- while true; do
- if PW1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --passwordbox "\nSet Root Password (needed for root ssh access)" 9 58 --title "PASSWORD (leave blank for automatic login)" 3>&1 1>&2 2>&3); then
- if [[ ! -z "$PW1" ]]; then
- if [[ "$PW1" == *" "* ]]; then
- whiptail --msgbox "Password cannot contain spaces. Please try again." 8 58
- elif [ ${#PW1} -lt 5 ]; then
- whiptail --msgbox "Password must be at least 5 characters long. Please try again." 8 58
- else
- if PW2=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --passwordbox "\nVerify Root Password" 9 58 --title "PASSWORD VERIFICATION" 3>&1 1>&2 2>&3); then
- if [[ "$PW1" == "$PW2" ]]; then
- PW="-password $PW1"
- echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}********${CL}"
- break
- else
- whiptail --msgbox "Passwords do not match. Please try again." 8 58
- fi
- else
- exit_script
- fi
- fi
- else
- PW1="Automatic Login"
- PW=""
- echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}$PW1${CL}"
- break
- fi
- else
- exit_script
- fi
- done
-
- if CT_ID=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set Container ID" 8 58 "$NEXTID" --title "CONTAINER ID" 3>&1 1>&2 2>&3); then
- if [ -z "$CT_ID" ]; then
- CT_ID="$NEXTID"
- echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
- else
- echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
- fi
- else
- exit
- fi
-
- if CT_NAME=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set Hostname" 8 58 "$NSAPP" --title "HOSTNAME" 3>&1 1>&2 2>&3); then
- if [ -z "$CT_NAME" ]; then
- HN="$NSAPP"
- else
- HN=$(echo "${CT_NAME,,}" | tr -d ' ')
- fi
- echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}"
- else
- exit_script
- fi
-
- if DISK_SIZE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set Disk Size in GB" 8 58 "$var_disk" --title "DISK SIZE" 3>&1 1>&2 2>&3); then
- if [ -z "$DISK_SIZE" ]; then
- DISK_SIZE="$var_disk"
- echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}"
- else
- if ! [[ $DISK_SIZE =~ $INTEGER ]]; then
- echo -e "{INFO}${HOLD}${RD} DISK SIZE MUST BE AN INTEGER NUMBER!${CL}"
- advanced_settings
- fi
- echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}"
- fi
- else
- exit_script
- fi
-
- if CORE_COUNT=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 "$var_cpu" --title "CORE COUNT" 3>&1 1>&2 2>&3); then
- if [ -z "$CORE_COUNT" ]; then
- CORE_COUNT="$var_cpu"
- echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}"
- else
- echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}"
- fi
- else
- exit_script
- fi
-
- if RAM_SIZE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 "$var_ram" --title "RAM" 3>&1 1>&2 2>&3); then
- if [ -z "$RAM_SIZE" ]; then
- RAM_SIZE="$var_ram"
- echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
- else
- echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
- fi
- else
- exit_script
- fi
-
- if BRG=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a Bridge" 8 58 vmbr0 --title "BRIDGE" 3>&1 1>&2 2>&3); then
- if [ -z "$BRG" ]; then
- BRG="vmbr0"
- echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}"
- else
- echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}"
- fi
- else
- exit_script
- fi
-
- while true; do
- NET=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a Static IPv4 CIDR Address (/24)" 8 58 dhcp --title "IP ADDRESS" 3>&1 1>&2 2>&3)
- exit_status=$?
- if [ $exit_status -eq 0 ]; then
- if [ "$NET" = "dhcp" ]; then
- echo -e "${NETWORK}${BOLD}${DGN}IP Address: ${BGN}$NET${CL}"
- break
- else
- if [[ "$NET" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$ ]]; then
- echo -e "${NETWORK}${BOLD}${DGN}IP Address: ${BGN}$NET${CL}"
- break
- else
- whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox "$NET is an invalid IPv4 CIDR address. Please enter a valid IPv4 CIDR address or 'dhcp'" 8 58
- fi
- fi
- else
- exit_script
- fi
- done
-
- if [ "$NET" != "dhcp" ]; then
- while true; do
- GATE1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Enter gateway IP address" 8 58 --title "Gateway IP" 3>&1 1>&2 2>&3)
- if [ -z "$GATE1" ]; then
- whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox "Gateway IP address cannot be empty" 8 58
- elif [[ ! "$GATE1" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
- whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox "Invalid IP address format" 8 58
- else
- GATE=",gw=$GATE1"
- echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE1${CL}"
- break
- fi
- done
- else
- GATE=""
- echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}Default${CL}"
- fi
-
- if [ "$var_os" == "alpine" ]; then
- APT_CACHER=""
- APT_CACHER_IP=""
- else
- if APT_CACHER_IP=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set APT-Cacher IP (leave blank for none)" 8 58 --title "APT-Cacher IP" 3>&1 1>&2 2>&3); then
- APT_CACHER="${APT_CACHER_IP:+yes}"
- echo -e "${NETWORK}${BOLD}${DGN}APT-Cacher IP Address: ${BGN}${APT_CACHER_IP:-Default}${CL}"
- else
- exit_script
- fi
- fi
-
- if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "IPv6" --yesno "Disable IPv6?" 10 58); then
- DISABLEIP6="yes"
- else
- DISABLEIP6="no"
- fi
- echo -e "${DISABLEIPV6}${BOLD}${DGN}Disable IPv6: ${BGN}$DISABLEIP6${CL}"
-
- if MTU1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default [The MTU of your selected vmbr, default is 1500])" 8 58 --title "MTU SIZE" 3>&1 1>&2 2>&3); then
- if [ -z "$MTU1" ]; then
- MTU1="Default"
- MTU=""
- else
- MTU=",mtu=$MTU1"
- fi
- echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}"
- else
- exit_script
- fi
-
- if SD=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a DNS Search Domain (leave blank for HOST)" 8 58 --title "DNS Search Domain" 3>&1 1>&2 2>&3); then
- if [ -z "$SD" ]; then
- SX=Host
- SD=""
- else
- SX=$SD
- SD="-searchdomain=$SD"
- fi
- echo -e "${SEARCH}${BOLD}${DGN}DNS Search Domain: ${BGN}$SX${CL}"
- else
- exit_script
- fi
-
- if NX=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a DNS Server IP (leave blank for HOST)" 8 58 --title "DNS SERVER IP" 3>&1 1>&2 2>&3); then
- if [ -z "$NX" ]; then
- NX=Host
- NS=""
- else
- NS="-nameserver=$NX"
- fi
- echo -e "${NETWORK}${BOLD}${DGN}DNS Server IP Address: ${BGN}$NX${CL}"
- else
- exit_script
- fi
-
- if [ "$var_os" == "alpine" ] && [ "$NET" == "dhcp" ] && [ "$NX" != "Host" ]; then
- UDHCPC_FIX="yes"
- else
- UDHCPC_FIX="no"
- fi
- export UDHCPC_FIX
-
- if MAC1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a MAC Address(leave blank for generated MAC)" 8 58 --title "MAC ADDRESS" 3>&1 1>&2 2>&3); then
- if [ -z "$MAC1" ]; then
- MAC1="Default"
- MAC=""
- else
- MAC=",hwaddr=$MAC1"
- echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}"
- fi
- else
- exit_script
- fi
-
- if VLAN1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for no VLAN)" 8 58 --title "VLAN" 3>&1 1>&2 2>&3); then
- if [ -z "$VLAN1" ]; then
- VLAN1="Default"
- VLAN=""
- else
- VLAN=",tag=$VLAN1"
- fi
- echo -e "${VLANTAG}${BOLD}${DGN}Vlan: ${BGN}$VLAN1${CL}"
- else
- exit_script
- fi
-
- if ADV_TAGS=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set Custom Tags?[If you remove all, there will be no tags!]" 8 58 "${TAGS}" --title "Advanced Tags" 3>&1 1>&2 2>&3); then
- if [ -n "${ADV_TAGS}" ]; then
- ADV_TAGS=$(echo "$ADV_TAGS" | tr -d '[:space:]')
- TAGS="${ADV_TAGS}"
- else
- TAGS=";"
- fi
- echo -e "${NETWORK}${BOLD}${DGN}Tags: ${BGN}$TAGS${CL}"
- else
- exit_script
- fi
-
- if [[ "$PW" == -password* ]]; then
- if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "SSH ACCESS" --yesno "Enable Root SSH Access?" 10 58); then
- SSH="yes"
- else
- SSH="no"
- fi
- echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
- else
- SSH="no"
- echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
- fi
-
- if [[ "${SSH}" == "yes" ]]; then
- SSH_AUTHORIZED_KEY="$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "SSH Authorized key for root (leave empty for none)" 8 58 --title "SSH Key" 3>&1 1>&2 2>&3)"
-
- if [[ -z "${SSH_AUTHORIZED_KEY}" ]]; then
- echo "Warning: No SSH key provided."
- fi
- else
- SSH_AUTHORIZED_KEY=""
- fi
- if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "VERBOSE MODE" --yesno "Enable Verbose Mode?" 10 58); then
- VERB="yes"
- else
- VERB="no"
- fi
- echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}$VERB${CL}"
-
- if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "ADVANCED SETTINGS COMPLETE" --yesno "Ready to create ${APP} LXC?" 10 58); then
- echo -e "${CREATING}${BOLD}${RD}Creating a ${APP} LXC using the above advanced settings${CL}"
- else
- clear
- header_info
- echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Settings on node $PVEHOST_NAME${CL}"
- advanced_settings
- fi
-}
-
-diagnostics_check() {
- if ! [ -d "/usr/local/community-scripts" ]; then
- mkdir -p /usr/local/community-scripts
- fi
-
- if ! [ -f "/usr/local/community-scripts/diagnostics" ]; then
- if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "DIAGNOSTICS" --yesno "Send Diagnostics of LXC Installation?\n\n(This only transmits data without user data, just RAM, CPU, LXC name, ...)" 10 58); then
- cat </usr/local/community-scripts/diagnostics
-DIAGNOSTICS=yes
-
-#This file is used to store the diagnostics settings for the Community-Scripts API.
-#https://github.com/community-scripts/ProxmoxVED/discussions/1836
-#Your diagnostics will be sent to the Community-Scripts API for troubleshooting/statistical purposes.
-#You can review the data at https://community-scripts.github.io/ProxmoxVE/data
-#If you do not wish to send diagnostics, please set the variable 'DIAGNOSTICS' to "no" in /usr/local/community-scripts/diagnostics, or use the menue.
-#This will disable the diagnostics feature.
-#To send diagnostics, set the variable 'DIAGNOSTICS' to "yes" in /usr/local/community-scripts/diagnostics, or use the menue.
-#This will enable the diagnostics feature.
-#The following information will be sent:
-#"ct_type"
-#"disk_size"
-#"core_count"
-#"ram_size"
-#"os_type"
-#"os_version"
-#"disableip6"
-#"nsapp"
-#"method"
-#"pve_version"
-#"status"
-#If you have any concerns, please review the source code at /misc/build.func
-EOF
- DIAGNOSTICS="yes"
- else
- cat </usr/local/community-scripts/diagnostics
-DIAGNOSTICS=no
-
-#This file is used to store the diagnostics settings for the Community-Scripts API.
-#https://github.com/community-scripts/ProxmoxVED/discussions/1836
-#Your diagnostics will be sent to the Community-Scripts API for troubleshooting/statistical purposes.
-#You can review the data at https://community-scripts.github.io/ProxmoxVE/data
-#If you do not wish to send diagnostics, please set the variable 'DIAGNOSTICS' to "no" in /usr/local/community-scripts/diagnostics, or use the menue.
-#This will disable the diagnostics feature.
-#To send diagnostics, set the variable 'DIAGNOSTICS' to "yes" in /usr/local/community-scripts/diagnostics, or use the menue.
-#This will enable the diagnostics feature.
-#The following information will be sent:
-#"ct_type"
-#"disk_size"
-#"core_count"
-#"ram_size"
-#"os_type"
-#"os_version"
-#"disableip6"
-#"nsapp"
-#"method"
-#"pve_version"
-#"status"
-#If you have any concerns, please review the source code at /misc/build.func
-EOF
- DIAGNOSTICS="no"
- fi
- else
- DIAGNOSTICS=$(awk -F '=' '/^DIAGNOSTICS/ {print $2}' /usr/local/community-scripts/diagnostics)
-
- fi
-
-}
-
-config_file() {
-
- whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox --title "Default distribution for $APP" "${var_os} ${var_version} \n \nIf the default Linux distribution is not adhered to, script support will be discontinued. \n" 10 58
-
- CONFIG_FILE="/opt/community-scripts/.settings"
-
- if [[ -f "/opt/community-scripts/${NSAPP}.conf" ]]; then
- CONFIG_FILE="/opt/community-scripts/${NSAPP}.conf"
- fi
-
- if CONFIG_FILE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set absolute path to config file" 8 58 "$CONFIG_FILE" --title "CONFIG FILE" 3>&1 1>&2 2>&3); then
- if [[ ! -f "$CONFIG_FILE" ]]; then
- echo -e "${CROSS}${RD}Config file not found, exiting script!.${CL}"
- exit
- else
- echo -e "${INFO}${BOLD}${DGN}Using config File: ${BGN}$CONFIG_FILE${CL}"
- base_settings
- source "$CONFIG_FILE"
- fi
- fi
-
- if [[ "$var_os" == "debian" ]]; then
- echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os${CL}"
- if [[ "$var_version" == "11" ]]; then
- echo -e "${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}"
- elif [[ "$var_version" == "12" ]]; then
- echo -e "${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}"
- else
- msg_error "Unknown setting for var_version, should be 11 or 12, was ${var_version}"
- exit
- fi
- elif [[ "$var_os" == "ubuntu" ]]; then
- echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os${CL}"
- if [[ "$var_version" == "20.04" ]]; then
- echo -e "${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}"
- elif [[ "$var_version" == "22.04" ]]; then
- echo -e "${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}"
- elif [[ "$var_version" == "24.04" ]]; then
- echo -e "${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}"
- elif [[ "$var_version" == "24.10" ]]; then
- echo -e "${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}"
- else
- msg_error "Unknown setting for var_version, should be 20.04, 22.04, 24.04 or 24.10, was ${var_version}"
- exit
- fi
- else
- msg_error "Unknown setting for var_os! should be debian or ubuntu, was ${var_os}"
- exit
- fi
-
- if [[ -n "$CT_ID" ]]; then
-
- if [[ "$CT_ID" =~ ^([0-9]{3,4})-([0-9]{3,4})$ ]]; then
- MIN_ID=${BASH_REMATCH[1]}
- MAX_ID=${BASH_REMATCH[2]}
-
- if ((MIN_ID >= MAX_ID)); then
- msg_error "Invalid Container ID range. The first number must be smaller than the second number, was ${CT_ID}"
- exit
- fi
-
- LIST_OF_IDS=$(pvesh get /cluster/resources --type vm --output-format json | grep -oP '"vmid":\s*\K\d+')
-
- for ((ID = MIN_ID; ID <= MAX_ID; ID++)); do
- if ! grep -q "^$ID$" <<<"$LIST_OF_IDS"; then
- CT_ID=$ID
- break
- fi
- done
-
- echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
-
- elif [[ "$CT_ID" =~ ^[0-9]+$ ]]; then
-
- LIST_OF_IDS=$(pvesh get /cluster/resources --type vm --output-format json | grep -oP '"vmid":\s*\K\d+')
- if ! grep -q "^$CT_ID$" <<<"$LIST_OF_IDS"; then
- echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
- else
- msg_error "Container ID $CT_ID already exists"
- exit
- fi
- else
- msg_error "Invalid Container ID format. Needs to be 0000-9999 or 0-9999, was ${CT_ID}"
- exit
- fi
- fi
-
- if [[ "$CT_TYPE" -eq 0 ]]; then
- CT_TYPE_DESC="Privileged"
- elif [[ "$CT_TYPE" -eq 1 ]]; then
- CT_TYPE_DESC="Unprivileged"
- else
- msg_error "Unknown setting for CT_TYPE, should be 1 or 0, was ${CT_TYPE}"
- exit
- fi
- echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
-
- if [[ ! -z "$PW" ]]; then
-
- if [[ "$PW" == *" "* ]]; then
- msg_error "Password cannot be empty"
- exit
- elif [[ ${#PW} -lt 5 ]]; then
- msg_error "Password must be at least 5 characters long"
- exit
- else
- echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}********${CL}"
- fi
- PW="-password $PW"
- else
- PW=""
- echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}Automatic Login${CL}"
- fi
- echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
-
- if [[ ! -z "$HN" ]]; then
- echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}"
- else
- msg_error "Hostname cannot be empty"
- exit
- fi
-
- if [[ ! -z "$DISK_SIZE" ]]; then
- if [[ "$DISK_SIZE" =~ ^-?[0-9]+$ ]]; then
- echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}"
- else
- msg_error "DISK_SIZE must be an integer, was ${DISK_SIZE}"
- exit
- fi
- else
- msg_error "DISK_SIZE cannot be empty"
- exit
- fi
-
- if [[ ! -z "$CORE_COUNT" ]]; then
- if [[ "$CORE_COUNT" =~ ^-?[0-9]+$ ]]; then
- echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}${CORE_COUNT}${CL}"
- else
- msg_error "CORE_COUNT must be an integer, was ${CORE_COUNT}"
- exit
- fi
- else
- msg_error "CORE_COUNT cannot be empty"
- exit
- fi
-
- if [[ ! -z "$RAM_SIZE" ]]; then
- if [[ "$RAM_SIZE" =~ ^-?[0-9]+$ ]]; then
- echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
- else
- msg_error "RAM_SIZE must be an integer, was ${RAM_SIZE}"
- exit
- fi
- else
- msg_error "RAM_SIZE cannot be empty"
- exit
- fi
-
- if [[ ! -z "$BRG" ]]; then
- if grep -q "^iface ${BRG}" /etc/network/interfaces; then
- echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}"
- else
- msg_error "Bridge '${BRG}' does not exist in /etc/network/interfaces"
- exit
- fi
- else
- msg_error "Bridge cannot be empty"
- exit
- fi
-
- local ip_cidr_regex='^([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})/([0-9]{1,2})$'
- local ip_regex='^([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})$'
-
- if [[ ! -z $NET ]]; then
- if [ "$NET" == "dhcp" ]; then
- echo -e "${NETWORK}${BOLD}${DGN}IP Address: ${BGN}DHCP${CL}"
- echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}Default${CL}"
- elif
- [[ "$NET" =~ $ip_cidr_regex ]]
- then
- echo -e "${NETWORK}${BOLD}${DGN}IP Address: ${BGN}$NET${CL}"
- else
- msg_error "Invalid IP Address format. Needs to be 0.0.0.0/0, was ${NET}"
- exit
- fi
- fi
- if [ ! -z "$GATE" ]; then
- if [[ "$GATE" =~ $ip_regex ]]; then
- echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE${CL}"
- GATE=",gw=$GATE"
- else
- msg_error "Invalid IP Address format for Gateway. Needs to be 0.0.0.0, was ${GATE}"
- exit
- fi
- else
- msg_error "Gateway IP Address cannot be empty"
- exit
- fi
-
- if [[ ! -z "$APT_CACHER_IP" ]]; then
- if [[ "$APT_CACHER_IP" =~ $ip_regex ]]; then
- APT_CACHER="yes"
- echo -e "${NETWORK}${BOLD}${DGN}APT-CACHER IP Address: ${BGN}$APT_CACHER_IP${CL}"
- else
- msg_error "Invalid IP Address format for APT-Cacher. Needs to be 0.0.0.0, was ${APT_CACHER_IP}"
- exit
- fi
- fi
-
- if [[ "$DISABLEIP6" == "yes" ]]; then
- echo -e "${DISABLEIPV6}${BOLD}${DGN}Disable IPv6: ${BGN}Yes${CL}"
- elif [[ "$DISABLEIP6" == "no" ]]; then
- echo -e "${DISABLEIPV6}${BOLD}${DGN}Disable IPv6: ${BGN}No${CL}"
- else
- msg_error "Disable IPv6 needs to be 'yes' or 'no'"
- exit
- fi
-
- if [[ ! -z "$MTU" ]]; then
- if [[ "$MTU" =~ ^-?[0-9]+$ ]]; then
- echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU${CL}"
- MTU=",mtu=$MTU"
- else
- msg_error "MTU must be an integer, was ${MTU}"
- exit
- fi
- else
- MTU=""
- echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}Default${CL}"
-
- fi
-
- if [[ ! -z "$SD" ]]; then
- echo -e "${SEARCH}${BOLD}${DGN}DNS Search Domain: ${BGN}$SD${CL}"
- SD="-searchdomain=$SD"
- else
- SD=""
- echo -e "${SEARCH}${BOLD}${DGN}DNS Search Domain: ${BGN}HOST${CL}"
- fi
-
- if [[ ! -z "$NS" ]]; then
- if [[ "$NS" =~ $ip_regex ]]; then
- echo -e "${NETWORK}${BOLD}${DGN}DNS Server IP Address: ${BGN}$NS${CL}"
- NS="-nameserver=$NS"
- else
- msg_error "Invalid IP Address format for DNS Server. Needs to be 0.0.0.0, was ${NS}"
- exit
- fi
- else
- NS=""
- echo -e "${NETWORK}${BOLD}${DGN}DNS Server IP Address: ${BGN}HOST${CL}"
- fi
-
- if [[ ! -z "$MAC" ]]; then
- if [[ "$MAC" =~ ^([A-Fa-f0-9]{2}:){5}[A-Fa-f0-9]{2}$ ]]; then
- echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}"
- MAC=",hwaddr=$MAC"
- else
- msg_error "MAC Address must be in the format xx:xx:xx:xx:xx:xx, was ${MAC}"
- exit
- fi
- fi
-
- if [[ ! -z "$VLAN" ]]; then
- if [[ "$VLAN" =~ ^-?[0-9]+$ ]]; then
- echo -e "${VLANTAG}${BOLD}${DGN}Vlan: ${BGN}$VLAN${CL}"
- VLAN=",tag=$VLAN"
- else
- msg_error "VLAN must be an integer, was ${VLAN}"
- exit
- fi
- fi
-
- if [[ ! -z "$TAGS" ]]; then
- echo -e "${NETWORK}${BOLD}${DGN}Tags: ${BGN}$TAGS${CL}"
- fi
-
- if [[ "$SSH" == "yes" ]]; then
- echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
- if [[ ! -z "$SSH_AUTHORIZED_KEY" ]]; then
- echo -e "${ROOTSSH}${BOLD}${DGN}SSH Authorized Key: ${BGN}********************${CL}"
- else
- echo -e "${ROOTSSH}${BOLD}${DGN}SSH Authorized Key: ${BGN}None${CL}"
- fi
- elif [[ "$SSH" == "no" ]]; then
- echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
- else
- msg_error "SSH needs to be 'yes' or 'no', was ${SSH}"
- exit
- fi
-
- if [[ "$VERB" == "yes" ]]; then
- echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}$VERB${CL}"
- elif [[ "$VERB" == "no" ]]; then
- echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}No${CL}"
- else
- msg_error "Verbose Mode needs to be 'yes' or 'no', was ${VERB}"
- exit
- fi
-
- if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "ADVANCED SETTINGS WITH CONFIG FILE COMPLETE" --yesno "Ready to create ${APP} LXC?" 10 58); then
- echo -e "${CREATING}${BOLD}${RD}Creating a ${APP} LXC using the above settings${CL}"
- else
- clear
- header_info
- echo -e "${INFO}${HOLD} ${GN}Using Config File on node $PVEHOST_NAME${CL}"
- config_file
- fi
-
-}
-
-install_script() {
- pve_check
- shell_check
- root_check
- arch_check
- ssh_check
- maxkeys_check
- diagnostics_check
-
- if systemctl is-active -q ping-instances.service; then
- systemctl -q stop ping-instances.service
- fi
- NEXTID=$(pvesh get /cluster/nextid)
- timezone=$(cat /etc/timezone)
- header_info
- while true; do
-
- CHOICE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SETTINGS" --menu "Choose an option:" \
- 18 60 6 \
- "1" "Default Settings" \
- "2" "Default Settings (with verbose)" \
- "3" "Advanced Settings" \
- "4" "Use Config File" \
- "5" "Diagnostic Settings" \
- "6" "Exit" --nocancel --default-item "1" 3>&1 1>&2 2>&3)
-
- if [ $? -ne 0 ]; then
- echo -e "${CROSS}${RD} Menu canceled. Exiting.${CL}"
- exit 0
- fi
-
- case $CHOICE in
- 1)
- header_info
- echo -e "${DEFAULT}${BOLD}${BL}Using Default Settings on node $PVEHOST_NAME${CL}"
- VERB="no"
- METHOD="default"
- base_settings "$VERB"
- echo_default
- break
- ;;
- 2)
- header_info
- echo -e "${DEFAULT}${BOLD}${BL}Using Default Settings on node $PVEHOST_NAME (${VERBOSE_CROPPED}Verbose)${CL}"
- VERB="yes"
- METHOD="default"
- base_settings "$VERB"
- echo_default
- break
- ;;
- 3)
- header_info
- echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Settings on node $PVEHOST_NAME${CL}"
- METHOD="advanced"
- base_settings
- advanced_settings
- break
- ;;
- 4)
- header_info
- echo -e "${INFO}${HOLD} ${GN}Using Config File on node $PVEHOST_NAME${CL}"
- METHOD="advanced"
- base_settings
- config_file
- break
- ;;
- 5)
- if [[ $DIAGNOSTICS == "yes" ]]; then
- if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "DIAGNOSTICS SETTINGS" --yesno "Send Diagnostics of LXC Installation?\n\nCurrent setting: ${DIAGNOSTICS}" 10 58 \
- --yes-button "No" --no-button "Back"; then
- DIAGNOSTICS="no"
- sed -i 's/^DIAGNOSTICS=.*/DIAGNOSTICS=no/' /usr/local/community-scripts/diagnostics
- whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "DIAGNOSTICS SETTINGS" --msgbox "Diagnostics settings changed to ${DIAGNOSTICS}." 8 58
- fi
- else
- if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "DIAGNOSTICS SETTINGS" --yesno "Send Diagnostics of LXC Installation?\n\nCurrent setting: ${DIAGNOSTICS}" 10 58 \
- --yes-button "Yes" --no-button "Back"; then
- DIAGNOSTICS="yes"
- sed -i 's/^DIAGNOSTICS=.*/DIAGNOSTICS=yes/' /usr/local/community-scripts/diagnostics
- whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "DIAGNOSTICS SETTINGS" --msgbox "Diagnostics settings changed to ${DIAGNOSTICS}." 8 58
- fi
- fi
-
- ;;
- 6)
- echo -e "${CROSS}${RD}Exiting.${CL}"
- exit 0
- ;;
- *)
- echo -e "${CROSS}${RD}Invalid option, please try again.${CL}"
- ;;
- esac
- done
-}
-
-check_container_resources() {
- # Check actual RAM & Cores
- current_ram=$(free -m | awk 'NR==2{print $2}')
- current_cpu=$(nproc)
-
- # Check whether the current RAM is less than the required RAM or the CPU cores are less than required
- if [[ "$current_ram" -lt "$var_ram" ]] || [[ "$current_cpu" -lt "$var_cpu" ]]; then
- echo -e "\n${INFO}${HOLD} ${GN}Required: ${var_cpu} CPU, ${var_ram}MB RAM ${CL}| ${RD}Current: ${current_cpu} CPU, ${current_ram}MB RAM${CL}"
- echo -e "${YWB}Please ensure that the ${APP} LXC is configured with at least ${var_cpu} vCPU and ${var_ram} MB RAM for the build process.${CL}\n"
- echo -ne "${INFO}${HOLD} May cause data loss! ${INFO} Continue update with under-provisioned LXC? "
- read -r prompt
- # Check if the input is 'yes', otherwise exit with status 1
- if [[ ! ${prompt,,} =~ ^(yes)$ ]]; then
- echo -e "${CROSS}${HOLD} ${YWB}Exiting based on user input.${CL}"
- exit 1
- fi
- else
- echo -e ""
- fi
-}
-
-check_container_storage() {
- # Check if the /boot partition is more than 80% full
- total_size=$(df /boot --output=size | tail -n 1)
- local used_size=$(df /boot --output=used | tail -n 1)
- usage=$((100 * used_size / total_size))
- if ((usage > 80)); then
- # Prompt the user for confirmation to continue
- echo -e "${INFO}${HOLD} ${YWB}Warning: Storage is dangerously low (${usage}%).${CL}"
- echo -ne "Continue anyway? "
- read -r prompt
- # Check if the input is 'y' or 'yes', otherwise exit with status 1
- if [[ ! ${prompt,,} =~ ^(y|yes)$ ]]; then
- echo -e "${CROSS}${HOLD}${YWB}Exiting based on user input.${CL}"
- exit 1
- fi
- fi
-}
-
-start() {
- source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/tools.func)
- if command -v pveversion >/dev/null 2>&1; then
- if ! (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "${APP} LXC" --yesno "This will create a New ${APP} LXC. Proceed?" 10 58); then
- clear
- exit_script
- exit
- fi
- SPINNER_PID=""
- install_script
- else
- CHOICE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "${APP} LXC Update/Setting" --menu \
- "Support/Update functions for ${APP} LXC. Choose an option:" \
- 12 60 3 \
- "1" "YES (Silent Mode)" \
- "2" "YES (Verbose Mode)" \
- "3" "NO (Cancel Update)" --nocancel --default-item "1" 3>&1 1>&2 2>&3)
-
- case "$CHOICE" in
- 1)
- VERB="no"
- set_std_mode
- ;;
- 2)
- VERB="yes"
- set_std_mode
- ;;
- 3)
- clear
- exit_script
- exit
- ;;
- esac
-
- SPINNER_PID=""
- update_script
- fi
-}
-
-# This function collects user settings and integrates all the collected information.
-build_container() {
- # if [ "$VERB" == "yes" ]; then set -x; fi
-
- if [ "$CT_TYPE" == "1" ]; then
- FEATURES="keyctl=1,nesting=1"
- else
- FEATURES="nesting=1"
- fi
-
- if [[ $DIAGNOSTICS == "yes" ]]; then
- post_to_api
- fi
-
- TEMP_DIR=$(mktemp -d)
- pushd "$TEMP_DIR" >/dev/null
- if [ "$var_os" == "alpine" ]; then
- export FUNCTIONS_FILE_PATH="$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/alpine-install.func)"
- else
- export FUNCTIONS_FILE_PATH="$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/install.func)"
- fi
- export RANDOM_UUID="$RANDOM_UUID"
- export CACHER="$APT_CACHER"
- export CACHER_IP="$APT_CACHER_IP"
- export tz="$timezone"
- export DISABLEIPV6="$DISABLEIP6"
- export APPLICATION="$APP"
- export app="$NSAPP"
- export PASSWORD="$PW"
- export VERBOSE="$VERB"
- export SSH_ROOT="${SSH}"
- export SSH_AUTHORIZED_KEY
- export CTID="$CT_ID"
- export CTTYPE="$CT_TYPE"
- export PCT_OSTYPE="$var_os"
- export PCT_OSVERSION="$var_version"
- export PCT_DISK_SIZE="$DISK_SIZE"
- export PCT_OPTIONS="
- -features $FEATURES
- -hostname $HN
- -tags $TAGS
- $SD
- $NS
- -net0 name=eth0,bridge=$BRG$MAC,ip=$NET$GATE$VLAN$MTU
- -onboot 1
- -cores $CORE_COUNT
- -memory $RAM_SIZE
- -unprivileged $CT_TYPE
- $PW
- "
- # This executes create_lxc.sh and creates the container and .conf file
- bash -c "$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/create_lxc.sh)" $?
-
- LXC_CONFIG=/etc/pve/lxc/${CTID}.conf
- if [ "$CT_TYPE" == "0" ]; then
- cat <>"$LXC_CONFIG"
-# USB passthrough
-lxc.cgroup2.devices.allow: a
-lxc.cap.drop:
-lxc.cgroup2.devices.allow: c 188:* rwm
-lxc.cgroup2.devices.allow: c 189:* rwm
-lxc.mount.entry: /dev/serial/by-id dev/serial/by-id none bind,optional,create=dir
-lxc.mount.entry: /dev/ttyUSB0 dev/ttyUSB0 none bind,optional,create=file
-lxc.mount.entry: /dev/ttyUSB1 dev/ttyUSB1 none bind,optional,create=file
-lxc.mount.entry: /dev/ttyACM0 dev/ttyACM0 none bind,optional,create=file
-lxc.mount.entry: /dev/ttyACM1 dev/ttyACM1 none bind,optional,create=file
-EOF
- fi
-
- if [ "$CT_TYPE" == "0" ]; then
- if [[ "$APP" == "immich" || "$APP" == "Channels" || "$APP" == "Emby" || "$APP" == "ErsatzTV" || "$APP" == "Frigate" || "$APP" == "Jellyfin" || "$APP" == "Plex" || "$APP" == "Scrypted" || "$APP" == "Tdarr" || "$APP" == "Unmanic" || "$APP" == "Ollama" || "$APP" == "FileFlows" ]]; then
- cat <>$LXC_CONFIG
-# VAAPI hardware transcoding
-lxc.cgroup2.devices.allow: c 226:0 rwm
-lxc.cgroup2.devices.allow: c 226:128 rwm
-lxc.cgroup2.devices.allow: c 29:0 rwm
-lxc.mount.entry: /dev/fb0 dev/fb0 none bind,optional,create=file
-lxc.mount.entry: /dev/dri dev/dri none bind,optional,create=dir
-lxc.mount.entry: /dev/dri/renderD128 dev/dri/renderD128 none bind,optional,create=file
-EOF
- fi
- else
- if [[ "$APP" == "immich" || "$APP" == "Channels" || "$APP" == "Emby" || "$APP" == "ErsatzTV" || "$APP" == "Frigate" || "$APP" == "Jellyfin" || "$APP" == "Plex" || "$APP" == "Scrypted" || "$APP" == "Tdarr" || "$APP" == "Unmanic" || "$APP" == "Ollama" || "$APP" == "FileFlows" ]]; then
- if [[ -e "/dev/dri/renderD128" ]]; then
- if [[ -e "/dev/dri/card0" ]]; then
- cat <>$LXC_CONFIG
-# VAAPI hardware transcoding
-dev0: /dev/dri/card0,gid=44
-dev1: /dev/dri/renderD128,gid=104
-EOF
- else
- cat <>"$LXC_CONFIG"
-# VAAPI hardware transcoding
-dev0: /dev/dri/card1,gid=44
-dev1: /dev/dri/renderD128,gid=104
-EOF
- fi
- fi
- fi
- fi
-
- # This starts the container and executes -install.sh
- msg_info "Starting LXC Container"
- pct start "$CTID"
- msg_ok "Started LXC Container"
- if [ "$var_os" == "alpine" ]; then
- sleep 3
- pct exec "$CTID" -- /bin/sh -c 'cat </etc/apk/repositories
-http://dl-cdn.alpinelinux.org/alpine/latest-stable/main
-http://dl-cdn.alpinelinux.org/alpine/latest-stable/community
-EOF'
- pct exec "$CTID" -- ash -c "apk add bash >/dev/null"
- fi
- lxc-attach -n "$CTID" -- bash -c "$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/install/"$var_install".sh)" $?
-
-}
-
-# This function sets the description of the container.
-description() {
- IP=$(pct exec "$CTID" ip a s dev eth0 | awk '/inet / {print $2}' | cut -d/ -f1)
-
- # Generate LXC Description
- DESCRIPTION=$(
- cat <
-
-
-
-
- ${APP} LXC
-
-
-
-
-
-
-
-
-
- GitHub
-
-
-
- Discussions
-
-
-
- Issues
-
-
-EOF
- )
-
- # Set Description in LXC
- pct set "$CTID" -description "$DESCRIPTION"
-
- if [[ -f /etc/systemd/system/ping-instances.service ]]; then
- systemctl start ping-instances.service
- fi
-
- post_update_to_api "done" "none"
-}
-
-set_std_mode() {
- if [ "$VERB" = "yes" ]; then
- STD=""
- else
- STD="silent"
- fi
-}
-
-# Silent execution function
-silent() {
- if [ "$VERB" = "no" ]; then
- "$@" >/dev/null 2>&1 || return 1
- else
- "$@" || return 1
- fi
-}
-
-api_exit_script() {
- exit_code=$? # Capture the exit status of the last executed command
- #200 exit codes indicate error in create_lxc.sh
- #100 exit codes indicate error in install.func
-
- if [ $exit_code -ne 0 ]; then
- case $exit_code in
- 100) post_update_to_api "failed" "100: Unexpected error in create_lxc.sh" ;;
- 101) post_update_to_api "failed" "101: No network connection detected in create_lxc.sh" ;;
- 200) post_update_to_api "failed" "200: LXC creation failed in create_lxc.sh" ;;
- 201) post_update_to_api "failed" "201: Invalid Storage class in create_lxc.sh" ;;
- 202) post_update_to_api "failed" "202: User aborted menu in create_lxc.sh" ;;
- 203) post_update_to_api "failed" "203: CTID not set in create_lxc.sh" ;;
- 204) post_update_to_api "failed" "204: PCT_OSTYPE not set in create_lxc.sh" ;;
- 205) post_update_to_api "failed" "205: CTID cannot be less than 100 in create_lxc.sh" ;;
- 206) post_update_to_api "failed" "206: CTID already in use in create_lxc.sh" ;;
- 207) post_update_to_api "failed" "207: Template not found in create_lxc.sh" ;;
- 208) post_update_to_api "failed" "208: Error downloading template in create_lxc.sh" ;;
- 209) post_update_to_api "failed" "209: Container creation failed, but template is intact in create_lxc.sh" ;;
- *) post_update_to_api "failed" "Unknown error, exit code: $exit_code in create_lxc.sh" ;;
- esac
- fi
-}
-
-trap 'api_exit_script' EXIT
-trap 'post_update_to_api "failed" "$BASH_COMMAND"' ERR
-trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
-trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
diff --git a/misc/backup_07052025/core.func.bak b/misc/backup_07052025/core.func.bak
deleted file mode 100644
index de18842a9..000000000
--- a/misc/backup_07052025/core.func.bak
+++ /dev/null
@@ -1,220 +0,0 @@
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: michelroegl-brunner
-# License: MIT | https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/LICENSE
-
-color() {
- # Colors
- YW=$(echo "\033[33m")
- YWB=$(echo "\033[93m")
- BL=$(echo "\033[36m")
- RD=$(echo "\033[01;31m")
- BGN=$(echo "\033[4;92m")
- GN=$(echo "\033[1;92m")
- DGN=$(echo "\033[32m")
-
- # Formatting
- CL=$(echo "\033[m")
- BOLD=$(echo "\033[1m")
- HOLD=" "
- TAB=" "
-
- # Icons
- CM="${TAB}✔️${TAB}"
- CROSS="${TAB}✖️${TAB}${CL}"
- INFO="${TAB}💡${TAB}${CL}"
- OS="${TAB}🖥️${TAB}${CL}"
- OSVERSION="${TAB}🌟${TAB}${CL}"
- CONTAINERTYPE="${TAB}📦${TAB}${CL}"
- DISKSIZE="${TAB}💾${TAB}${CL}"
- CPUCORE="${TAB}🧠${TAB}${CL}"
- RAMSIZE="${TAB}🛠️${TAB}${CL}"
- SEARCH="${TAB}🔍${TAB}${CL}"
- VERBOSE_CROPPED="🔍${TAB}"
- VERIFYPW="${TAB}🔐${TAB}${CL}"
- CONTAINERID="${TAB}🆔${TAB}${CL}"
- HOSTNAME="${TAB}🏠${TAB}${CL}"
- BRIDGE="${TAB}🌉${TAB}${CL}"
- NETWORK="${TAB}📡${TAB}${CL}"
- GATEWAY="${TAB}🌐${TAB}${CL}"
- DISABLEIPV6="${TAB}🚫${TAB}${CL}"
- DEFAULT="${TAB}⚙️${TAB}${CL}"
- MACADDRESS="${TAB}🔗${TAB}${CL}"
- VLANTAG="${TAB}🏷️${TAB}${CL}"
- ROOTSSH="${TAB}🔑${TAB}${CL}"
- CREATING="${TAB}🚀${TAB}${CL}"
- ADVANCED="${TAB}🧩${TAB}${CL}"
- FUSE="${TAB}🔧${TAB}${CL}"
-}
-
-declare -A MSG_INFO_SHOWN
-SPINNER_ACTIVE=0
-SPINNER_PID=""
-SPINNER_MSG=""
-
-start_spinner() {
- local msg="$1"
- local frames=(⠋ ⠙ ⠹ ⠸ ⠼ ⠴ ⠦ ⠧ ⠇ ⠏)
- local spin_i=0
- local interval=0.1
-
- SPINNER_MSG="$msg"
- printf "\r\e[2K" >&2
-
- {
- while [[ "$SPINNER_ACTIVE" -eq 1 ]]; do
- printf "\r\e[2K%s %b" "${frames[spin_i]}" "${YW}${SPINNER_MSG}${CL}" >&2
- spin_i=$(((spin_i + 1) % ${#frames[@]}))
- sleep "$interval"
- done
- } &
-
- SPINNER_PID=$!
- disown "$SPINNER_PID"
-}
-
-stop_spinner() {
- if [[ ${SPINNER_PID+v} && -n "$SPINNER_PID" ]] && kill -0 "$SPINNER_PID" 2>/dev/null; then
- kill "$SPINNER_PID" 2>/dev/null
- sleep 0.1
- kill -0 "$SPINNER_PID" 2>/dev/null && kill -9 "$SPINNER_PID" 2>/dev/null
- wait "$SPINNER_PID" 2>/dev/null || true
- fi
- SPINNER_ACTIVE=0
- unset SPINNER_PID
-}
-
-spinner_guard() {
- if [[ "$SPINNER_ACTIVE" -eq 1 ]] && [[ -n "$SPINNER_PID" ]]; then
- kill "$SPINNER_PID" 2>/dev/null
- wait "$SPINNER_PID" 2>/dev/null || true
- SPINNER_ACTIVE=0
- unset SPINNER_PID
- fi
-}
-
-log_message() {
- local level="$1"
- local message="$2"
- local timestamp
- local logdate
- timestamp=$(date '+%Y-%m-%d %H:%M:%S')
- logdate=$(date '+%Y-%m-%d')
-
- LOGDIR="/usr/local/community-scripts/logs"
- mkdir -p "$LOGDIR"
-
- LOGFILE="${LOGDIR}/${logdate}_${NSAPP}.log"
- echo "$timestamp - $level: $message" >>"$LOGFILE"
-}
-
-msg_info() {
- local msg="$1"
- if [ "${SPINNER_ACTIVE:-0}" -eq 1 ]; then
- return
- fi
-
- SPINNER_ACTIVE=1
- start_spinner "$msg"
-}
-
-msg_ok() {
- if [ -n "${SPINNER_PID:-}" ] && ps -p "$SPINNER_PID" >/dev/null 2>&1; then
- kill "$SPINNER_PID" >/dev/null 2>&1
- wait "$SPINNER_PID" 2>/dev/null || true
- fi
-
- local msg="$1"
- printf "\r\e[2K${CM}${GN}%b${CL}\n" "$msg" >&2
- unset SPINNER_PID
- SPINNER_ACTIVE=0
-
- log_message "OK" "$msg"
-}
-
-msg_error() {
- if [ -n "${SPINNER_PID:-}" ] && ps -p "$SPINNER_PID" >/dev/null 2>&1; then
- kill "$SPINNER_PID" >/dev/null 2>&1
- wait "$SPINNER_PID" 2>/dev/null || true
- fi
-
- local msg="$1"
- printf "\r\e[2K${CROSS}${RD}%b${CL}\n" "$msg" >&2
- unset SPINNER_PID
- SPINNER_ACTIVE=0
- log_message "ERROR" "$msg"
-}
-
-shell_check() {
- if [[ "$(basename "$SHELL")" != "bash" ]]; then
- clear
- msg_error "Your default shell is currently not set to Bash. To use these scripts, please switch to the Bash shell."
- echo -e "\nExiting..."
- sleep 2
- exit
- fi
-}
-
-root_check() {
- if [[ "$(id -u)" -ne 0 || $(ps -o comm= -p $PPID) == "sudo" ]]; then
- clear
- msg_error "Please run this script as root."
- echo -e "\nExiting..."
- sleep 2
- exit
- fi
-}
-
-pve_check() {
- if ! pveversion | grep -Eq "pve-manager/8\.[1-9](\.[0-9]+)*"; then
- msg_error "${CROSS}${RD}This version of Proxmox Virtual Environment is not supported"
- echo -e "Requires Proxmox Virtual Environment Version 8.1 or later."
- echo -e "Exiting..."
- sleep 2
- exit
- fi
-}
-
-arch_check() {
- if [ "$(dpkg --print-architecture)" != "amd64" ]; then
- echo -e "\n ${INFO}${YWB}This script will not work with PiMox! \n"
- echo -e "\n ${YWB}Visit https://github.com/asylumexp/Proxmox for ARM64 support. \n"
- echo -e "Exiting..."
- sleep 2
- exit
- fi
-}
-
-ssh_check() {
- if command -v pveversion >/dev/null 2>&1; then
- if [ -n "${SSH_CLIENT:+x}" ]; then
- if whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "SSH DETECTED" --yesno "It's suggested to use the Proxmox shell instead of SSH, since SSH can create issues while gathering variables. Would you like to proceed with using SSH?" 10 62; then
- echo "you've been warned"
- else
- clear
- exit
- fi
- fi
- fi
-}
-
-exit-script() {
- clear
- echo -e "\n${CROSS}${RD}User exited script${CL}\n"
- exit
-}
-
-set_std_mode() {
- if [ "$VERB" = "yes" ]; then
- STD=""
- else
- STD="silent"
- fi
-}
-
-silent() {
- if [ "$VERB" = "no" ]; then
- "$@" >>"$LOGFILE" 2>&1
- else
- "$@" 2>&1 | tee -a "$LOGFILE"
- fi
-}
diff --git a/misc/backup_07052025/dialogue.bak b/misc/backup_07052025/dialogue.bak
deleted file mode 100644
index 497b5fe02..000000000
--- a/misc/backup_07052025/dialogue.bak
+++ /dev/null
@@ -1,850 +0,0 @@
-
-# dialog_input() {
-# local title="$1"
-# local prompt="$2"
-# local default="$3"
-# local result
-# apt-get install -y dialog
-# result=$(dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "$title" \
-# --extra-button --extra-label "Back" \
-# --ok-label "Next" --cancel-label "Exit" \
-# --inputbox "$prompt" 10 58 "$default" 2>&1 >/dev/tty)
-
-# local exitcode=$?
-
-# case $exitcode in
-# 0)
-# REPLY_RESULT="$result"
-# return 0
-# ;; # OK
-# 3) return 2 ;; # Back
-# *) return 1 ;; # Cancel/Exit
-# esac
-# }
-
-# advanced_settings() {
-# local step=1
-
-# while true; do
-# case $step in
-# 1)
-# show_intro_messages && ((step++))
-# ;;
-# 2)
-# select_distribution
-# result=$?
-# [[ $result -eq 0 ]] && ((step++))
-# [[ $result -eq 2 && $step -gt 1 ]] && ((step--))
-# [[ $result -eq 1 ]] && return
-# ;;
-# 3)
-# select_version
-# result=$?
-# [[ $result -eq 0 ]] && ((step++))
-# [[ $result -eq 2 && $step -gt 1 ]] && ((step--))
-# [[ $result -eq 1 ]] && return
-# ;;
-# 4)
-# select_container_type
-# result=$?
-# [[ $result -eq 0 ]] && ((step++))
-# [[ $result -eq 2 && $step -gt 1 ]] && ((step--))
-# [[ $result -eq 1 ]] && return
-# ;;
-# 5)
-# set_root_password
-# result=$?
-# [[ $result -eq 0 ]] && ((step++))
-# [[ $result -eq 2 && $step -gt 1 ]] && ((step--))
-# [[ $result -eq 1 ]] && return
-# ;;
-# 6)
-# set_container_id
-# result=$?
-# [[ $result -eq 0 ]] && ((step++))
-# [[ $result -eq 2 && $step -gt 1 ]] && ((step--))
-# [[ $result -eq 1 ]] && return
-# ;;
-# 7)
-# set_hostname
-# result=$?
-# [[ $result -eq 0 ]] && ((step++))
-# [[ $result -eq 2 && $step -gt 1 ]] && ((step--))
-# [[ $result -eq 1 ]] && return
-# ;;
-# 8)
-# set_disk_size
-# result=$?
-# [[ $result -eq 0 ]] && ((step++))
-# [[ $result -eq 2 && $step -gt 1 ]] && ((step--))
-# [[ $result -eq 1 ]] && return
-# ;;
-# 9)
-# set_cpu_cores
-# result=$?
-# [[ $result -eq 0 ]] && ((step++))
-# [[ $result -eq 2 && $step -gt 1 ]] && ((step--))
-# [[ $result -eq 1 ]] && return
-# ;;
-# 10)
-# set_ram_size
-# result=$?
-# [[ $result -eq 0 ]] && ((step++))
-# [[ $result -eq 2 && $step -gt 1 ]] && ((step--))
-# [[ $result -eq 1 ]] && return
-# ;;
-# 11)
-# set_bridge
-# result=$?
-# [[ $result -eq 0 ]] && ((step++))
-# [[ $result -eq 2 && $step -gt 1 ]] && ((step--))
-# [[ $result -eq 1 ]] && return
-# ;;
-# 12)
-# set_ip_address
-# result=$?
-# [[ $result -eq 0 ]] && ((step++))
-# [[ $result -eq 2 && $step -gt 1 ]] && ((step--))
-# [[ $result -eq 1 ]] && return
-# ;;
-# 13)
-# set_gateway
-# result=$?
-# [[ $result -eq 0 ]] && ((step++))
-# [[ $result -eq 2 && $step -gt 1 ]] && ((step--))
-# [[ $result -eq 1 ]] && return
-# ;;
-# 14)
-# set_apt_cacher
-# result=$?
-# [[ $result -eq 0 ]] && ((step++))
-# [[ $result -eq 2 && $step -gt 1 ]] && ((step--))
-# [[ $result -eq 1 ]] && return
-# ;;
-# 15)
-# toggle_ipv6
-# result=$?
-# [[ $result -eq 0 ]] && ((step++))
-# [[ $result -eq 2 && $step -gt 1 ]] && ((step--))
-# [[ $result -eq 1 ]] && return
-# ;;
-# 16)
-# set_mtu
-# result=$?
-# [[ $result -eq 0 ]] && ((step++))
-# [[ $result -eq 2 && $step -gt 1 ]] && ((step--))
-# [[ $result -eq 1 ]] && return
-# ;;
-# 17)
-# set_dns_search_domain
-# result=$?
-# [[ $result -eq 0 ]] && ((step++))
-# [[ $result -eq 2 && $step -gt 1 ]] && ((step--))
-# [[ $result -eq 1 ]] && return
-# ;;
-# 18)
-# set_dns_server
-# result=$?
-# [[ $result -eq 0 ]] && ((step++))
-# [[ $result -eq 2 && $step -gt 1 ]] && ((step--))
-# [[ $result -eq 1 ]] && return
-# ;;
-# 19)
-# set_mac_address
-# result=$?
-# [[ $result -eq 0 ]] && ((step++))
-# [[ $result -eq 2 && $step -gt 1 ]] && ((step--))
-# [[ $result -eq 1 ]] && return
-# ;;
-# 20)
-# set_vlan
-# result=$?
-# [[ $result -eq 0 ]] && ((step++))
-# [[ $result -eq 2 && $step -gt 1 ]] && ((step--))
-# [[ $result -eq 1 ]] && return
-# ;;
-# 21)
-# set_tags
-# result=$?
-# [[ $result -eq 0 ]] && ((step++))
-# [[ $result -eq 2 && $step -gt 1 ]] && ((step--))
-# [[ $result -eq 1 ]] && return
-# ;;
-# 22)
-# set_ssh_access
-# result=$?
-# [[ $result -eq 0 ]] && ((step++))
-# [[ $result -eq 2 && $step -gt 1 ]] && ((step--))
-# [[ $result -eq 1 ]] && return
-# ;;
-# 23)
-# set_fuse
-# result=$?
-# [[ $result -eq 0 ]] && ((step++))
-# [[ $result -eq 2 && $step -gt 1 ]] && ((step--))
-# [[ $result -eq 1 ]] && return
-# ;;
-# 24)
-# set_verbose
-# result=$?
-# [[ $result -eq 0 ]] && ((step++))
-# [[ $result -eq 2 && $step -gt 1 ]] && ((step--))
-# [[ $result -eq 1 ]] && return
-# ;;
-# 25)
-# confirm_creation
-# result=$?
-# [[ $result -eq 0 ]] && break
-# [[ $result -eq 2 && $step -gt 1 ]] && ((step--))
-# [[ $result -eq 1 ]] && return
-# ;;
-# esac
-# done
-# }
-
-# show_intro_messages() {
-# dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "Instructional Tip" \
-# --msgbox "To make a selection, use the Spacebar." 8 58 || return 1
-
-# dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "Default distribution for $APP" \
-# --msgbox "Default is: ${var_os} ${var_version}\n\nIf the default Linux distribution is not adhered to, script support will be discontinued." 10 58 || return 1
-# return 0
-# }
-
-# select_distribution() {
-# [[ "$var_os" == "alpine" ]] && return 0
-
-# local default result exitcode
-# default="${var_os:-debian}"
-# var_os=""
-
-# local debian_flag ubuntu_flag
-# [[ "$default" == "debian" ]] && debian_flag="on" || debian_flag="off"
-# [[ "$default" == "ubuntu" ]] && ubuntu_flag="on" || ubuntu_flag="off"
-
-# while [[ -z "$var_os" ]]; do
-# exec 3>&1
-# result=$(dialog --clear \
-# --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "DISTRIBUTION" \
-# --radiolist "Choose Distribution:" 15 60 4 \
-# "debian" "" "$debian_flag" \
-# "ubuntu" "" "$ubuntu_flag" \
-# --ok-label "Next" \
-# --cancel-label "Exit" \
-# --extra-button \
-# --extra-label "Back" \
-# 2>&1 1>&3)
-# exitcode=$?
-# exec 3>&-
-
-# case "$exitcode" in
-# 0)
-# if [[ "$result" =~ ^(debian|ubuntu)$ ]]; then
-# var_os="$result"
-# printf "%bOperating System: %b%s%b\n" "$OS$BOLD$DGN" "$BGN" "$var_os" "$CL"
-# return 0
-# else
-# printf "[DEBUG] No valid selection made (result='%s'), repeating...\n" "$result"
-# fi
-# ;;
-# 3)
-# return 2
-# ;;
-# 1 | 255)
-# return 1
-# ;;
-# *)
-# printf "[DEBUG] Unexpected exit code: %s\n" "$exitcode" >&2
-# return 1
-# ;;
-# esac
-# done
-# }
-
-# select_version() {
-# local default="${var_version}"
-# var_version=""
-# local list result exitcode
-
-# if [[ "$var_os" == "debian" ]]; then
-# case "$default" in
-# 11) list=("11" "Bullseye" on "12" "Bookworm" off) ;;
-# 12) list=("11" "Bullseye" off "12" "Bookworm" on) ;;
-# *) list=("11" "Bullseye" off "12" "Bookworm" off) ;;
-# esac
-# elif [[ "$var_os" == "ubuntu" ]]; then
-# case "$default" in
-# 20.04) list=("20.04" "Focal" on "22.04" "Jammy" off "24.04" "Noble" off "24.10" "Oracular" off) ;;
-# 22.04) list=("20.04" "Focal" off "22.04" "Jammy" on "24.04" "Noble" off "24.10" "Oracular" off) ;;
-# 24.04) list=("20.04" "Focal" off "22.04" "Jammy" off "24.04" "Noble" on "24.10" "Oracular" off) ;;
-# 24.10) list=("20.04" "Focal" off "22.04" "Jammy" off "24.04" "Noble" off "24.10" "Oracular" on) ;;
-# *) list=("20.04" "Focal" off "22.04" "Jammy" off "24.04" "Noble" off "24.10" "Oracular" off) ;;
-# esac
-# fi
-
-# result=$(dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "VERSION" \
-# --radiolist "Choose Version:" 15 58 5 \
-# "${list[@]}" \
-# --ok-label "Next" \
-# --cancel-label "Exit" \
-# --extra-button --extra-label "Back" \
-# 3>&1 1>&2 2>&3)
-
-# exitcode=$?
-
-# case $exitcode in
-# 0)
-# var_version="$result"
-# printf "%bVersion: %b%s%b\n" "$OSVERSION$BOLD$DGN" "$BGN" "$var_version" "$CL"
-# return 0
-# ;;
-# 3) return 2 ;;
-# *) return 1 ;;
-# esac
-# }
-
-# select_container_type() {
-# local default="${CT_TYPE}"
-# CT_TYPE=""
-# local list result exitcode
-
-# [[ "$default" == "1" ]] && list=("1" "Unprivileged" on "0" "Privileged" off) || list=("1" "Unprivileged" off "0" "Privileged" on)
-
-# result=$(dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "CONTAINER TYPE" \
-# --radiolist "Choose Type:" 10 58 2 "${list[@]}" \
-# --extra-button --extra-label "Back" --ok-label "Next" --cancel-label "Exit" 3>&1 1>&2 2>&3)
-
-# exitcode=$?
-
-# case $exitcode in
-# 0)
-# CT_TYPE="$result"
-# [[ "$CT_TYPE" == "0" ]] && desc="Privileged" || desc="Unprivileged"
-# printf "%bContainer Type: %b%s%b\n" "$CONTAINERTYPE$BOLD$DGN" "$BGN" "$desc" "$CL"
-# return 0
-# ;;
-# 3) return 2 ;;
-# *) return 1 ;;
-# esac
-# }
-# set_root_password() {
-# local pw1 pw2 exitcode
-
-# while true; do
-# pw1=$(dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "PASSWORD (leave blank for automatic login)" \
-# --insecure --passwordbox "\nSet Root Password (needed for root ssh access)" 10 58 \
-# --extra-button --extra-label "Back" --ok-label "Next" --cancel-label "Exit" 3>&1 1>&2 2>&3)
-# exitcode=$?
-
-# case $exitcode in
-# 0)
-# if [[ -z "$pw1" ]]; then
-# PW1="Automatic Login"
-# PW=""
-# printf "%bRoot Password: %b%s%b\n" "$VERIFYPW$BOLD$DGN" "$BGN" "$PW1" "$CL"
-# return 0
-# fi
-# if [[ "$pw1" == *" "* ]]; then
-# dialog --msgbox "Password cannot contain spaces. Please try again." 8 58
-# continue
-# fi
-# if [[ ${#pw1} -lt 5 ]]; then
-# dialog --msgbox "Password must be at least 5 characters long. Please try again." 8 58
-# continue
-# fi
-# pw2=$(dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "PASSWORD VERIFICATION" \
-# --insecure --passwordbox "\nVerify Root Password" 10 58 \
-# --extra-button --extra-label "Back" --ok-label "Next" --cancel-label "Exit" 3>&1 1>&2 2>&3)
-# exitcode=$?
-# case $exitcode in
-# 0)
-# if [[ "$pw1" == "$pw2" ]]; then
-# PW="-password $pw1"
-# printf "%bRoot Password: %b********%b\n" "$VERIFYPW$BOLD$DGN" "$BGN" "$CL"
-# return 0
-# else
-# dialog --msgbox "Passwords do not match. Please try again." 8 58
-# continue
-# fi
-# ;;
-# 3) return 2 ;;
-# *) return 1 ;;
-# esac
-# ;;
-# 3) return 2 ;;
-# *) return 1 ;;
-# esac
-# done
-# }
-
-# set_container_id() {
-# local result exitcode
-# result=$(dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "CONTAINER ID" \
-# --inputbox "Set Container ID" 8 58 "$NEXTID" \
-# --extra-button --extra-label "Back" --ok-label "Next" --cancel-label "Exit" 3>&1 1>&2 2>&3)
-# exitcode=$?
-
-# case $exitcode in
-# 0)
-# CT_ID="${result:-$NEXTID}"
-# printf "%bContainer ID: %b%s%b\n" "$CONTAINERID$BOLD$DGN" "$BGN" "$CT_ID" "$CL"
-# return 0
-# ;;
-# 3) return 2 ;;
-# *) return 1 ;;
-# esac
-# }
-
-# set_hostname() {
-# local result exitcode
-# result=$(dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "HOSTNAME" \
-# --inputbox "Set Hostname" 8 58 "$NSAPP" \
-# --extra-button --extra-label "Back" --ok-label "Next" --cancel-label "Exit" 3>&1 1>&2 2>&3)
-# exitcode=$?
-
-# case $exitcode in
-# 0)
-# if [[ -z "$result" ]]; then
-# HN="$NSAPP"
-# else
-# HN=$(tr -d ' ' <<<"${result,,}")
-# fi
-# printf "%bHostname: %b%s%b\n" "$HOSTNAME$BOLD$DGN" "$BGN" "$HN" "$CL"
-# return 0
-# ;;
-# 3) return 2 ;;
-# *) return 1 ;;
-# esac
-# }
-
-# set_disk_size() {
-# local result exitcode
-# result=$(dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "DISK SIZE" \
-# --inputbox "Set Disk Size in GB" 8 58 "$var_disk" \
-# --extra-button --extra-label "Back" --ok-label "Next" --cancel-label "Exit" 3>&1 1>&2 2>&3)
-# exitcode=$?
-
-# case $exitcode in
-# 0)
-# if [[ -z "$result" ]]; then
-# DISK_SIZE="$var_disk"
-# elif [[ "$result" =~ ^[0-9]+$ ]]; then
-# DISK_SIZE="$result"
-# else
-# dialog --msgbox "Disk size must be an integer!" 8 58
-# return 2
-# fi
-# printf "%bDisk Size: %b%s GB%b\n" "$DISKSIZE$BOLD$DGN" "$BGN" "$DISK_SIZE" "$CL"
-# return 0
-# ;;
-# 3) return 2 ;;
-# *) return 1 ;;
-# esac
-# }
-
-# set_cpu_cores() {
-# local result exitcode
-# result=$(dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "CORE COUNT" \
-# --inputbox "Allocate CPU Cores" 8 58 "$var_cpu" \
-# --extra-button --extra-label "Back" --ok-label "Next" --cancel-label "Exit" 3>&1 1>&2 2>&3)
-# exitcode=$?
-
-# case $exitcode in
-# 0)
-# CORE_COUNT="${result:-$var_cpu}"
-# printf "%bCPU Cores: %b%s%b\n" "$CPUCORE$BOLD$DGN" "$BGN" "$CORE_COUNT" "$CL"
-# return 0
-# ;;
-# 3) return 2 ;;
-# *) return 1 ;;
-# esac
-# }
-
-# set_ram_size() {
-# local result exitcode
-# result=$(dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "RAM" \
-# --inputbox "Allocate RAM in MiB" 8 58 "$var_ram" \
-# --extra-button --extra-label "Back" --ok-label "Next" --cancel-label "Exit" 3>&1 1>&2 2>&3)
-# exitcode=$?
-
-# case $exitcode in
-# 0)
-# RAM_SIZE="${result:-$var_ram}"
-# printf "%bRAM Size: %b%s MiB%b\n" "$RAMSIZE$BOLD$DGN" "$BGN" "$RAM_SIZE" "$CL"
-# return 0
-# ;;
-# 3) return 2 ;;
-# *) return 1 ;;
-# esac
-# }
-
-# set_bridge() {
-# local result exitcode
-# result=$(dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "BRIDGE" \
-# --inputbox "Set a Bridge" 8 58 "vmbr0" \
-# --extra-button --extra-label "Back" --ok-label "Next" --cancel-label "Exit" 3>&1 1>&2 2>&3)
-# exitcode=$?
-
-# case $exitcode in
-# 0)
-# BRG="${result:-vmbr0}"
-# printf "%bBridge: %b%s%b\n" "$BRIDGE$BOLD$DGN" "$BGN" "$BRG" "$CL"
-# return 0
-# ;;
-# 3) return 2 ;;
-# *) return 1 ;;
-# esac
-# }
-
-# set_ip_address() {
-# local result exitcode
-# while true; do
-# result=$(dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "IP ADDRESS" \
-# --inputbox "Set a Static IPv4 CIDR Address (/24)" 8 58 "dhcp" \
-# --extra-button --extra-label "Back" --ok-label "Next" --cancel-label "Exit" 3>&1 1>&2 2>&3)
-# exitcode=$?
-
-# case $exitcode in
-# 0)
-# if [[ "$result" == "dhcp" ]]; then
-# NET="dhcp"
-# printf "%bIP Address: %b%s%b\n" "$NETWORK$BOLD$DGN" "$BGN" "$NET" "$CL"
-# return 0
-# elif [[ "$result" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$ ]]; then
-# NET="$result"
-# printf "%bIP Address: %b%s%b\n" "$NETWORK$BOLD$DGN" "$BGN" "$NET" "$CL"
-# return 0
-# else
-# dialog --msgbox "$result is an invalid IPv4 CIDR address. Please enter a valid address or 'dhcp'." 8 58
-# continue
-# fi
-# ;;
-# 3) return 2 ;;
-# *) return 1 ;;
-# esac
-# done
-# }
-
-# set_gateway() {
-# local result exitcode
-# if [[ "$NET" == "dhcp" ]]; then
-# GATE=""
-# printf "%bGateway IP Address: %bDefault%b\n" "$GATEWAY$BOLD$DGN" "$BGN" "$CL"
-# return 0
-# fi
-
-# while true; do
-# result=$(dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "Gateway IP" \
-# --inputbox "Enter gateway IP address" 8 58 \
-# --extra-button --extra-label "Back" --ok-label "Next" --cancel-label "Exit" 3>&1 1>&2 2>&3)
-# exitcode=$?
-
-# case $exitcode in
-# 0)
-# if [[ -z "$result" ]]; then
-# dialog --msgbox "Gateway IP address cannot be empty" 8 58
-# continue
-# elif [[ "$result" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
-# GATE=",gw=$result"
-# printf "%bGateway IP Address: %b%s%b\n" "$GATEWAY$BOLD$DGN" "$BGN" "$result" "$CL"
-# return 0
-# else
-# dialog --msgbox "Invalid IP address format" 8 58
-# fi
-# ;;
-# 3) return 2 ;;
-# *) return 1 ;;
-# esac
-# done
-# }
-
-# set_apt_cacher() {
-# local result exitcode
-# if [[ "$var_os" == "alpine" ]]; then
-# APT_CACHER=""
-# APT_CACHER_IP=""
-# return 0
-# fi
-
-# result=$(dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "APT-Cacher IP" \
-# --inputbox "Set APT-Cacher IP (leave blank for none)" 8 58 \
-# --extra-button --extra-label "Back" --ok-label "Next" --cancel-label "Exit" 3>&1 1>&2 2>&3)
-# exitcode=$?
-
-# case $exitcode in
-# 0)
-# APT_CACHER_IP="$result"
-# APT_CACHER="${APT_CACHER_IP:+yes}"
-# printf "%bAPT-Cacher IP Address: %b%s%b\n" "$NETWORK$BOLD$DGN" "$BGN" "${APT_CACHER_IP:-Default}" "$CL"
-# return 0
-# ;;
-# 3) return 2 ;;
-# *) return 1 ;;
-# esac
-# }
-
-# toggle_ipv6() {
-# dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "IPv6" \
-# --yesno "Disable IPv6?" 10 58 \
-# --extra-button --extra-label "Back" --ok-label "Yes" --cancel-label "No"
-# case $? in
-# 0) DISABLEIP6="yes" ;;
-# 1) DISABLEIP6="no" ;;
-# 3) return 2 ;;
-# *) return 1 ;;
-# esac
-# printf "%bDisable IPv6: %b%s%b\n" "$DISABLEIPV6$BOLD$DGN" "$BGN" "$DISABLEIP6" "$CL"
-# return 0
-# }
-# set_mtu() {
-# local result exitcode
-# result=$(dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "MTU SIZE" \
-# --inputbox "Set Interface MTU Size (leave blank for default [1500])" 8 58 "" \
-# --extra-button --extra-label "Back" --ok-label "Next" --cancel-label "Exit" 3>&1 1>&2 2>&3)
-# exitcode=$?
-
-# case $exitcode in
-# 0)
-# if [[ -z "$result" ]]; then
-# MTU1="Default"
-# MTU=""
-# else
-# MTU1="$result"
-# MTU=",mtu=$MTU1"
-# fi
-# printf "%bInterface MTU Size: %b%s%b\n" "$DEFAULT$BOLD$DGN" "$BGN" "$MTU1" "$CL"
-# return 0
-# ;;
-# 3) return 2 ;;
-# *) return 1 ;;
-# esac
-# }
-
-# set_dns_search_domain() {
-# local result exitcode
-# result=$(dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "DNS Search Domain" \
-# --inputbox "Set a DNS Search Domain (leave blank for HOST)" 8 58 "" \
-# --extra-button --extra-label "Back" --ok-label "Next" --cancel-label "Exit" 3>&1 1>&2 2>&3)
-# exitcode=$?
-
-# case $exitcode in
-# 0)
-# if [[ -z "$result" ]]; then
-# SX="Host"
-# SD=""
-# else
-# SX="$result"
-# SD="-searchdomain=$result"
-# fi
-# printf "%bDNS Search Domain: %b%s%b\n" "$SEARCH$BOLD$DGN" "$BGN" "$SX" "$CL"
-# return 0
-# ;;
-# 3) return 2 ;;
-# *) return 1 ;;
-# esac
-# }
-
-# set_dns_server() {
-# local result exitcode
-# result=$(dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "DNS SERVER IP" \
-# --inputbox "Set a DNS Server IP (leave blank for HOST)" 8 58 "" \
-# --extra-button --extra-label "Back" --ok-label "Next" --cancel-label "Exit" 3>&1 1>&2 2>&3)
-# exitcode=$?
-
-# case $exitcode in
-# 0)
-# if [[ -z "$result" ]]; then
-# NX="Host"
-# NS=""
-# else
-# NX="$result"
-# NS="-nameserver=$result"
-# fi
-# printf "%bDNS Server IP Address: %b%s%b\n" "$NETWORK$BOLD$DGN" "$BGN" "$NX" "$CL"
-# return 0
-# ;;
-# 3) return 2 ;;
-# *) return 1 ;;
-# esac
-# }
-
-# set_mac_address() {
-# local result exitcode
-# result=$(dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "MAC ADDRESS" \
-# --inputbox "Set a MAC Address (leave blank for generated MAC)" 8 58 "" \
-# --extra-button --extra-label "Back" --ok-label "Next" --cancel-label "Exit" 3>&1 1>&2 2>&3)
-# exitcode=$?
-
-# case $exitcode in
-# 0)
-# if [[ -z "$result" ]]; then
-# MAC1="Default"
-# MAC=""
-# else
-# MAC1="$result"
-# MAC=",hwaddr=$MAC1"
-# fi
-# printf "%bMAC Address: %b%s%b\n" "$MACADDRESS$BOLD$DGN" "$BGN" "$MAC1" "$CL"
-# return 0
-# ;;
-# 3) return 2 ;;
-# *) return 1 ;;
-# esac
-# }
-
-# set_vlan() {
-# local result exitcode
-# result=$(dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "VLAN" \
-# --inputbox "Set a VLAN (leave blank for no VLAN)" 8 58 "" \
-# --extra-button --extra-label "Back" --ok-label "Next" --cancel-label "Exit" 3>&1 1>&2 2>&3)
-# exitcode=$?
-
-# case $exitcode in
-# 0)
-# if [[ -z "$result" ]]; then
-# VLAN1="Default"
-# VLAN=""
-# else
-# VLAN1="$result"
-# VLAN=",tag=$VLAN1"
-# fi
-# printf "%bVlan: %b%s%b\n" "$VLANTAG$BOLD$DGN" "$BGN" "$VLAN1" "$CL"
-# return 0
-# ;;
-# 3) return 2 ;;
-# *) return 1 ;;
-# esac
-# }
-
-# set_tags() {
-# local result exitcode
-# result=$(dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "Advanced Tags" \
-# --inputbox "Set Custom Tags? [If you remove all, there will be no tags!]" 8 58 "$TAGS" \
-# --extra-button --extra-label "Back" --ok-label "Next" --cancel-label "Exit" 3>&1 1>&2 2>&3)
-# exitcode=$?
-
-# case $exitcode in
-# 0)
-# if [[ -n "$result" ]]; then
-# ADV_TAGS=$(tr -d '[:space:]' <<<"$result")
-# TAGS="$ADV_TAGS"
-# else
-# TAGS=";"
-# fi
-# printf "%bTags: %b%s%b\n" "$NETWORK$BOLD$DGN" "$BGN" "$TAGS" "$CL"
-# return 0
-# ;;
-# 3) return 2 ;;
-# *) return 1 ;;
-# esac
-# }
-
-# set_ssh_access() {
-# local result exitcode
-
-# if [[ "$PW" == -password* ]]; then
-# dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "SSH ACCESS" \
-# --yesno "Enable Root SSH Access?" 10 58 \
-# --extra-button --extra-label "Back" --ok-label "Yes" --cancel-label "No"
-# exitcode=$?
-# case $exitcode in
-# 0) SSH="yes" ;;
-# 1) SSH="no" ;;
-# 3) return 2 ;;
-# *) return 1 ;;
-# esac
-# else
-# SSH="no"
-# fi
-
-# printf "%bRoot SSH Access: %b%s%b\n" "$ROOTSSH$BOLD$DGN" "$BGN" "$SSH" "$CL"
-
-# if [[ "$SSH" == "yes" ]]; then
-# result=$(dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "SSH Key" \
-# --inputbox "SSH Authorized key for root (leave empty for none)" 8 58 "" \
-# --extra-button --extra-label "Back" --ok-label "Next" --cancel-label "Exit" 3>&1 1>&2 2>&3)
-# exitcode=$?
-# case $exitcode in
-# 0)
-# SSH_AUTHORIZED_KEY="$result"
-# return 0
-# ;;
-# 3) return 2 ;;
-# *) return 1 ;;
-# esac
-# else
-# SSH_AUTHORIZED_KEY=""
-# return 0
-# fi
-# }
-
-# set_fuse() {
-# dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "FUSE Support" \
-# --yesno "Enable FUSE (Filesystem in Userspace) support in the container?" 10 58 \
-# --extra-button --extra-label "Back" --ok-label "Yes" --cancel-label "No"
-# case $? in
-# 0) ENABLE_FUSE="yes" ;;
-# 1) ENABLE_FUSE="no" ;;
-# 3) return 2 ;;
-# *) return 1 ;;
-# esac
-# printf "%bFUSE (Filesystem in Userspace) Support: %b%s%b\n" "$FUSE$BOLD$DGN" "$BGN" "$ENABLE_FUSE" "$CL"
-# return 0
-# }
-
-# set_verbose() {
-# dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "VERBOSE MODE" \
-# --yesno "Enable Verbose Mode?" 10 58 \
-# --extra-button --extra-label "Back" --ok-label "Yes" --cancel-label "No"
-# case $? in
-# 0) VERB="yes" ;;
-# 1) VERB="no" ;;
-# 3) return 2 ;;
-# *) return 1 ;;
-# esac
-# printf "%bVerbose Mode: %b%s%b\n" "$SEARCH$BOLD$DGN" "$BGN" "$VERB" "$CL"
-# return 0
-# }
-
-# confirm_creation() {
-# dialog --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --title "ADVANCED SETTINGS COMPLETE" \
-# --yesno "Ready to create ${APP} LXC?" 10 58 \
-# --extra-button --extra-label "Back" --ok-label "Yes" --cancel-label "No"
-# case $? in
-# 0)
-# printf "%bCreating a %s LXC using the above advanced settings%b\n" "$CREATING$BOLD$RD" "$APP" "$CL"
-# return 0
-# ;;
-# 3) return 2 ;;
-# *) return 1 ;;
-# esac
-# }
diff --git a/misc/backup_07052025/install copy.func b/misc/backup_07052025/install copy.func
deleted file mode 100644
index d2042d5d7..000000000
--- a/misc/backup_07052025/install copy.func
+++ /dev/null
@@ -1,281 +0,0 @@
-# Copyright (c) 2021-2025 tteck
-# Author: tteck (tteckster)
-# Co-Author: MickLesk
-# Co-Author: michelroegl-brunner
-# License: MIT
-# https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
-
-source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func)
-load_functions
-
-# color() {
-# # Colors
-# YW=$(echo "\033[33m")
-# YWB=$(echo "\033[93m")
-# BL=$(echo "\033[36m")
-# RD=$(echo "\033[01;31m")
-# GN=$(echo "\033[1;92m")
-
-# # Formatting
-# CL=$(echo "\033[m")
-# BFR="\\r\\033[K"
-# BOLD=$(echo "\033[1m")
-# HOLD=" "
-# TAB=" "
-
-# # System
-# RETRY_NUM=10
-# RETRY_EVERY=3
-
-# # Icons
-# CM="${TAB}✔️${TAB}${CL}"
-# CROSS="${TAB}✖️${TAB}${CL}"
-# INFO="${TAB}💡${TAB}${CL}"
-# NETWORK="${TAB}📡${TAB}${CL}"
-# OS="${TAB}🖥️${TAB}${CL}"
-# OSVERSION="${TAB}🌟${TAB}${CL}"
-# HOSTNAME="${TAB}🏠${TAB}${CL}"
-# GATEWAY="${TAB}🌐${TAB}${CL}"
-# DEFAULT="${TAB}⚙️${TAB}${CL}"
-# }
-
-# Function to set STD mode based on verbosity
-set_std_mode() {
- if [ "$VERBOSE" = "yes" ]; then
- STD=""
- else
- STD="silent"
- fi
-}
-
-# Silent execution function
-silent() {
- "$@" >/dev/null 2>&1
-}
-
-# This function enables IPv6 if it's not disabled and sets verbose mode
-verb_ip6() {
- set_std_mode # Set STD mode based on VERBOSE
-
- if [ "$DISABLEIPV6" == "yes" ]; then
- echo "net.ipv6.conf.all.disable_ipv6 = 1" >>/etc/sysctl.conf
- $STD sysctl -p
- fi
-}
-
-# This function sets error handling options and defines the error_handler function to handle errors
-catch_errors() {
- set -Eeuo pipefail
- trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
-}
-
-# This function handles errors
-error_handler() {
- source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func)
- if [ -n "$SPINNER_PID" ] && ps -p "$SPINNER_PID" >/dev/null; then kill "$SPINNER_PID" >/dev/null; fi
- printf "\e[?25h"
- local exit_code="$?"
- local line_number="$1"
- local command="$2"
- local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
- echo -e "\n$error_message"
- [[ -n "${SPINNER_PID:-}" ]] && kill "$SPINNER_PID" &>/dev/null || true
-
- if [[ "$line_number" -eq 50 ]]; then
- echo -e "The silent function has suppressed the error, run the script with verbose mode enabled, which will provide more detailed output.\n"
- post_update_to_api "failed" "No error message, script ran in silent mode"
- else
- post_update_to_api "failed" "${command}"
- fi
-}
-
-# # This function displays a spinner.
-# spinner() {
-# local frames=('⠋' '⠙' '⠹' '⠸' '⠼' '⠴' '⠦' '⠧' '⠇' '⠏')
-# local spin_i=0
-# local interval=0.1
-# printf "\e[?25l"
-
-# local color="${YWB}"
-
-# while true; do
-# printf "\r ${color}%s${CL}" "${frames[spin_i]}"
-# spin_i=$(((spin_i + 1) % ${#frames[@]}))
-# sleep "$interval"
-# done
-# }
-
-# # This function displays an informational message with a yellow color.
-# msg_info() {
-# local msg="$1"
-# echo -ne "${TAB}${YW}${HOLD}${msg}${HOLD}"
-# spinner &
-# SPINNER_PID=$!
-# }
-
-# # This function displays a success message with a green color.
-# msg_ok() {
-# if [ -n "$SPINNER_PID" ] && ps -p "$SPINNER_PID" >/dev/null; then kill "$SPINNER_PID" >/dev/null; fi
-# printf "\e[?25h"
-# local msg="$1"
-# echo -e "${BFR}${CM}${GN}${msg}${CL}"
-# }
-
-# # This function displays a error message with a red color.
-# msg_error() {
-# if [ -n "$SPINNER_PID" ] && ps -p "$SPINNER_PID" >/dev/null; then kill "$SPINNER_PID" >/dev/null; fi
-# printf "\e[?25h"
-# local msg="$1"
-# echo -e "${BFR}${CROSS}${RD}${msg}${CL}"
-# }
-
-# This function sets up the Container OS by generating the locale, setting the timezone, and checking the network connection
-setting_up_container() {
- msg_info "Setting up Container OS"
- sed -i "/$LANG/ s/\(^# \)//" /etc/locale.gen
- locale_line=$(grep -v '^#' /etc/locale.gen | grep -E '^[a-zA-Z]' | awk '{print $1}' | head -n 1)
- echo "LANG=${locale_line}" >/etc/default/locale
- locale-gen >/dev/null
- export LANG=${locale_line}
- echo $tz >/etc/timezone
- ln -sf /usr/share/zoneinfo/$tz /etc/localtime
- for ((i = RETRY_NUM; i > 0; i--)); do
- if [ "$(hostname -I)" != "" ]; then
- break
- fi
- echo 1>&2 -en "${CROSS}${RD} No Network! "
- sleep $RETRY_EVERY
- done
- if [ "$(hostname -I)" = "" ]; then
- echo 1>&2 -e "\n${CROSS}${RD} No Network After $RETRY_NUM Tries${CL}"
- echo -e "${NETWORK}Check Network Settings"
- exit 1
- fi
- rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED
- systemctl disable -q --now systemd-networkd-wait-online.service
- msg_ok "Set up Container OS"
- msg_ok "Network Connected: ${BL}$(hostname -I)"
-}
-
-# This function checks the network connection by pinging a known IP address and prompts the user to continue if the internet is not connected
-network_check() {
- set +e
- trap - ERR
- ipv4_connected=false
- ipv6_connected=false
- sleep 1
- # Check IPv4 connectivity to Google, Cloudflare & Quad9 DNS servers.
- if ping -c 1 -W 1 1.1.1.1 &>/dev/null || ping -c 1 -W 1 8.8.8.8 &>/dev/null || ping -c 1 -W 1 9.9.9.9 &>/dev/null; then
- msg_ok "IPv4 Internet Connected"
- ipv4_connected=true
- else
- msg_error "IPv4 Internet Not Connected"
- fi
-
- # Check IPv6 connectivity to Google, Cloudflare & Quad9 DNS servers.
- if ping6 -c 1 -W 1 2606:4700:4700::1111 &>/dev/null || ping6 -c 1 -W 1 2001:4860:4860::8888 &>/dev/null || ping6 -c 1 -W 1 2620:fe::fe &>/dev/null; then
- msg_ok "IPv6 Internet Connected"
- ipv6_connected=true
- else
- msg_error "IPv6 Internet Not Connected"
- fi
-
- # If both IPv4 and IPv6 checks fail, prompt the user
- if [[ $ipv4_connected == false && $ipv6_connected == false ]]; then
- read -r -p "No Internet detected,would you like to continue anyway? " prompt
- if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then
- echo -e "${INFO}${RD}Expect Issues Without Internet${CL}"
- else
- echo -e "${NETWORK}Check Network Settings"
- exit 1
- fi
- fi
-
- RESOLVEDIP=$(getent hosts github.com | awk '{ print $1 }')
- if [[ -z "$RESOLVEDIP" ]]; then msg_error "DNS Lookup Failure"; else msg_ok "DNS Resolved github.com to ${BL}$RESOLVEDIP${CL}"; fi
- set -e
- trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
-}
-
-# This function updates the Container OS by running apt-get update and upgrade
-update_os() {
- msg_info "Updating Container OS"
- if [[ "$CACHER" == "yes" ]]; then
- echo "Acquire::http::Proxy-Auto-Detect \"/usr/local/bin/apt-proxy-detect.sh\";" >/etc/apt/apt.conf.d/00aptproxy
- cat </usr/local/bin/apt-proxy-detect.sh
-#!/bin/bash
-if nc -w1 -z "${CACHER_IP}" 3142; then
- echo -n "http://${CACHER_IP}:3142"
-else
- echo -n "DIRECT"
-fi
-EOF
- chmod +x /usr/local/bin/apt-proxy-detect.sh
- fi
- $STD apt-get update
- $STD apt-get -o Dpkg::Options::="--force-confold" -y dist-upgrade
- rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED
- msg_ok "Updated Container OS"
-
- msg_info "Installing core dependencies"
- $STD apt-get update
- $STD apt-get install -y sudo curl mc gnupg2
- msg_ok "Core dependencies installed"
- source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/tools.func)
-
-}
-
-# This function modifies the message of the day (motd) and SSH settings
-motd_ssh() {
- grep -qxF "export TERM='xterm-256color'" /root/.bashrc || echo "export TERM='xterm-256color'" >>/root/.bashrc
-
- if [ -f "/etc/os-release" ]; then
- OS_NAME=$(grep ^NAME /etc/os-release | cut -d= -f2 | tr -d '"')
- OS_VERSION=$(grep ^VERSION_ID /etc/os-release | cut -d= -f2 | tr -d '"')
- elif [ -f "/etc/debian_version" ]; then
- OS_NAME="Debian"
- OS_VERSION=$(cat /etc/debian_version)
- fi
-
- PROFILE_FILE="/etc/profile.d/00_lxc-details.sh"
- echo "echo -e \"\"" >"$PROFILE_FILE"
- echo -e "echo -e \"${BOLD}${YW}${APPLICATION} LXC Container - DEV Repository${CL}\"" >>"$PROFILE_FILE"
- echo -e "echo -e \"${RD}WARNING: This is a DEVELOPMENT version (ProxmoxVED). Do NOT use in production!${CL}\"" >>"$PROFILE_FILE"
- echo -e "echo -e \"${YW} OS: ${GN}${OS_NAME} - Version: ${OS_VERSION}${CL}\"" >>"$PROFILE_FILE"
- echo -e "echo -e \"${YW} Hostname: ${GN}\$(hostname)${CL}\"" >>"$PROFILE_FILE"
- echo -e "echo -e \"${YW} IP Address: ${GN}\$(hostname -I | awk '{print \$1}')${CL}\"" >>"$PROFILE_FILE"
- echo -e "echo -e \"${YW} Repository: ${GN}https://github.com/community-scripts/ProxmoxVED${CL}\"" >>"$PROFILE_FILE"
- echo "echo \"\"" >>"$PROFILE_FILE"
-
- chmod -x /etc/update-motd.d/*
-
- if [[ "${SSH_ROOT}" == "yes" ]]; then
- sed -i "s/#PermitRootLogin prohibit-password/PermitRootLogin yes/g" /etc/ssh/sshd_config
- systemctl restart sshd
- fi
-}
-
-# This function customizes the container by modifying the getty service and enabling auto-login for the root user
-customize() {
- if [[ "$PASSWORD" == "" ]]; then
- msg_info "Customizing Container"
- GETTY_OVERRIDE="/etc/systemd/system/container-getty@1.service.d/override.conf"
- mkdir -p $(dirname $GETTY_OVERRIDE)
- cat <$GETTY_OVERRIDE
- [Service]
- ExecStart=
- ExecStart=-/sbin/agetty --autologin root --noclear --keep-baud tty%I 115200,38400,9600 \$TERM
-EOF
- systemctl daemon-reload
- systemctl restart $(basename $(dirname $GETTY_OVERRIDE) | sed 's/\.d//')
- msg_ok "Customized Container"
- fi
- echo "bash -c \"\$(curl -fsSL https://github.com/community-scripts/ProxmoxVED/raw/main/ct/${app}.sh)\"" >/usr/bin/update
- chmod +x /usr/bin/update
- if [[ -n "${SSH_AUTHORIZED_KEY}" ]]; then
- mkdir -p /root/.ssh
- echo "${SSH_AUTHORIZED_KEY}" >/root/.ssh/authorized_keys
- chmod 700 /root/.ssh
- chmod 600 /root/.ssh/authorized_keys
- fi
-}
diff --git a/misc/backup_07052025/msg.func b/misc/backup_07052025/msg.func
deleted file mode 100644
index 1257d0d2e..000000000
--- a/misc/backup_07052025/msg.func
+++ /dev/null
@@ -1,98 +0,0 @@
-#!/usr/bin/env bash
-
-# Spinner state
-declare -A SPINNER_PIDS
-declare -A SPINNER_MSGS
-declare -A MSG_SHOWN
-
-# Color definitions (adjust as needed)
-RD='\033[0;31m'
-GN='\033[0;32m'
-YW='\033[0;33m'
-CL='\033[0m'
-CM='✔'
-CROSS='✘'
-
-# Trap cleanup
-trap cleanup_spinners EXIT INT TERM HUP
-
-# Hash function for message ID
-msg_hash() {
- local input="$1"
- echo -n "$input" | sha1sum | awk '{print $1}'
-}
-
-# Start a spinner for a specific message
-start_spinner_for_msg() {
- local msg="$1"
- local id
- id=$(msg_hash "$msg")
-
- [[ -n "${MSG_SHOWN["$id"]+x}" ]] && return
- MSG_SHOWN["$id"]=1
- SPINNER_MSGS["$id"]="$msg"
-
- local frames=(⠋ ⠙ ⠹ ⠸ ⠼ ⠴ ⠦ ⠧ ⠇ ⠏)
- local interval=0.1
- local spin_i=0
-
- {
- while true; do
- printf "\r\e[2K%s %b" "${frames[spin_i]}" "${YW}${msg}${CL}" >&2
- spin_i=$(((spin_i + 1) % ${#frames[@]}))
- sleep "$interval"
- done
- } &
-
- SPINNER_PIDS["$id"]=$!
- disown "${SPINNER_PIDS["$id"]}"
-}
-
-# Stop the spinner for a specific message
-stop_spinner_for_msg() {
- local msg="$1"
- local id
- id=$(msg_hash "$msg")
-
- if [[ -n "${SPINNER_PIDS["$id"]+x}" ]] && ps -p "${SPINNER_PIDS["$id"]}" >/dev/null 2>&1; then
- kill "${SPINNER_PIDS["$id"]}" 2>/dev/null
- wait "${SPINNER_PIDS["$id"]}" 2>/dev/null || true
- fi
-
- unset SPINNER_PIDS["$id"]
- unset SPINNER_MSGS["$id"]
- unset MSG_SHOWN["$id"]
-}
-
-# Cleanup all active spinners
-cleanup_spinners() {
- for id in "${!SPINNER_PIDS[@]}"; do
- if ps -p "${SPINNER_PIDS[$id]}" >/dev/null 2>&1; then
- kill "${SPINNER_PIDS[$id]}" 2>/dev/null
- wait "${SPINNER_PIDS[$id]}" 2>/dev/null || true
- fi
- unset SPINNER_PIDS["$id"]
- unset SPINNER_MSGS["$id"]
- unset MSG_SHOWN["$id"]
- done
-}
-
-# Show info message with spinner
-msg_info() {
- local msg="$1"
- start_spinner_for_msg "$msg"
-}
-
-# End spinner and show success message
-msg_ok() {
- local msg="$1"
- stop_spinner_for_msg "$msg"
- printf "\r\e[2K%s %b\n" "${CM}" "${GN}${msg}${CL}" >&2
-}
-
-# End spinner and show error message
-msg_error() {
- local msg="$1"
- stop_spinner_for_msg "$msg"
- printf "\r\e[2K%s %b\n" "${CROSS}" "${RD}${msg}${CL}" >&2
-}
diff --git a/misc/build.func b/misc/build.func
index 50664ea81..29e18d9ef 100644
--- a/misc/build.func
+++ b/misc/build.func
@@ -1,9 +1,24 @@
-# Copyright (c) 2021-2025 tteck
-# Author: tteck (tteckster)
-# Co-Author: MickLesk
-# Co-Author: michelroegl-brunner
+#!/usr/bin/env bash
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: tteck (tteckster) | MickLesk | michelroegl-brunner
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
+# Revision: 1
+# ==============================================================================
+# SECTION 1: CORE INITIALIZATION & VARIABLES
+# ==============================================================================
+
+# ------------------------------------------------------------------------------
+# variables()
+#
+# - Normalize application name (NSAPP = lowercase, no spaces)
+# - Build installer filename (var_install)
+# - Define regex for integer validation
+# - Fetch hostname of Proxmox node
+# - Set default values for diagnostics/method
+# - Generate random UUID for tracking
+# - Get Proxmox VE version and kernel version
+# ------------------------------------------------------------------------------
variables() {
NSAPP=$(echo "${APP,,}" | tr -d ' ') # This function sets the NSAPP variable by converting the value of the APP variable to lowercase and removing any spaces.
var_install="${NSAPP}-install" # sets the var_install variable by appending "-install" to the value of NSAPP.
@@ -12,83 +27,160 @@ variables() {
DIAGNOSTICS="yes" # sets the DIAGNOSTICS variable to "yes", used for the API call.
METHOD="default" # sets the METHOD variable to "default", used for the API call.
RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" # generates a random UUID and sets it to the RANDOM_UUID variable.
- CT_TYPE=${var_unprivileged:-$CT_TYPE}
+ SESSION_ID="${RANDOM_UUID:0:8}" # Short session ID (first 8 chars of UUID) for log files
+ CTTYPE="${CTTYPE:-${CT_TYPE:-1}}"}
+
+ # Get Proxmox VE version and kernel version
+ if command -v pveversion >/dev/null 2>&1; then
+ PVEVERSION="$(pveversion | awk -F'/' '{print $2}' | awk -F'-' '{print $1}')"
+ else
+ PVEVERSION="N/A"
+ fi
+ KERNEL_VERSION=$(uname -r)
+
+ # Capture app-declared defaults (for precedence logic)
+ # These values are set by the app script BEFORE default.vars is loaded
+ # If app declares higher values than default.vars, app values take precedence
+ if [[ -n "${var_cpu:-}" && "${var_cpu}" =~ ^[0-9]+$ ]]; then
+ export APP_DEFAULT_CPU="${var_cpu}"
+ fi
+ if [[ -n "${var_ram:-}" && "${var_ram}" =~ ^[0-9]+$ ]]; then
+ export APP_DEFAULT_RAM="${var_ram}"
+ fi
+ if [[ -n "${var_disk:-}" && "${var_disk}" =~ ^[0-9]+$ ]]; then
+ export APP_DEFAULT_DISK="${var_disk}"
+ fi
}
+# -----------------------------------------------------------------------------
+# Community-Scripts bootstrap loader
+# - Always sources build.func from remote
+# - Updates local core files only if build.func changed
+# - Local cache: /usr/local/community-scripts/core
+# -----------------------------------------------------------------------------
+
+# FUNC_DIR="/usr/local/community-scripts/core"
+# mkdir -p "$FUNC_DIR"
+
+# BUILD_URL="https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func"
+# BUILD_REV="$FUNC_DIR/build.rev"
+# DEVMODE="${DEVMODE:-no}"
+
+# # --- Step 1: fetch build.func content once, compute hash ---
+# build_content="$(curl -fsSL "$BUILD_URL")" || {
+# echo "❌ Failed to fetch build.func"
+# exit 1
+# }
+
+# newhash=$(printf "%s" "$build_content" | sha256sum | awk '{print $1}')
+# oldhash=$(cat "$BUILD_REV" 2>/dev/null || echo "")
+
+# # --- Step 2: if build.func changed, offer update for core files ---
+# if [ "$newhash" != "$oldhash" ]; then
+# echo "⚠️ build.func changed!"
+
+# while true; do
+# read -rp "Refresh local core files? [y/N/diff]: " ans
+# case "$ans" in
+# [Yy]*)
+# echo "$newhash" >"$BUILD_REV"
+
+# update_func_file() {
+# local file="$1"
+# local url="https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/$file"
+# local local_path="$FUNC_DIR/$file"
+
+# echo "⬇️ Downloading $file ..."
+# curl -fsSL "$url" -o "$local_path" || {
+# echo "❌ Failed to fetch $file"
+# exit 1
+# }
+# echo "✔️ Updated $file"
+# }
+
+# update_func_file core.func
+# update_func_file error_handler.func
+# update_func_file tools.func
+# break
+# ;;
+# [Dd]*)
+# for file in core.func error_handler.func tools.func; do
+# local_path="$FUNC_DIR/$file"
+# url="https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/$file"
+# remote_tmp="$(mktemp)"
+
+# curl -fsSL "$url" -o "$remote_tmp" || continue
+
+# if [ -f "$local_path" ]; then
+# echo "🔍 Diff for $file:"
+# diff -u "$local_path" "$remote_tmp" || echo "(no differences)"
+# else
+# echo "📦 New file $file will be installed"
+# fi
+
+# rm -f "$remote_tmp"
+# done
+# ;;
+# *)
+# echo "❌ Skipped updating local core files"
+# break
+# ;;
+# esac
+# done
+# else
+# if [ "$DEVMODE" != "yes" ]; then
+# echo "✔️ build.func unchanged → using existing local core files"
+# fi
+# fi
+
+# if [ -n "${_COMMUNITY_SCRIPTS_LOADER:-}" ]; then
+# return 0 2>/dev/null || exit 0
+# fi
+# _COMMUNITY_SCRIPTS_LOADER=1
+
+# # --- Step 3: always source local versions of the core files ---
+# source "$FUNC_DIR/core.func"
+# source "$FUNC_DIR/error_handler.func"
+# source "$FUNC_DIR/tools.func"
+
+# # --- Step 4: finally, source build.func directly from memory ---
+# # (no tmp file needed)
+# source <(printf "%s" "$build_content")
+
+# ------------------------------------------------------------------------------
+# Load core + error handler functions from community-scripts repo
+#
+# - Prefer curl if available, fallback to wget
+# - Load: core.func, error_handler.func, api.func
+# - Initialize error traps after loading
+# ------------------------------------------------------------------------------
+
source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/api.func)
if command -v curl >/dev/null 2>&1; then
source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func)
+ source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func)
load_functions
+ catch_errors
#echo "(build.func) Loaded core.func via curl"
elif command -v wget >/dev/null 2>&1; then
source <(wget -qO- https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func)
+ source <(wget -qO- https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func)
load_functions
+ catch_errors
#echo "(build.func) Loaded core.func via wget"
fi
-# This function enables error handling in the script by setting options and defining a trap for the ERR signal.
-catch_errors() {
- set -Eeo pipefail
- trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
-}
+# ------------------------------------------------------------------------------
+# maxkeys_check()
+#
+# - Reads kernel keyring limits (maxkeys, maxbytes)
+# - Checks current usage for LXC user (UID 100000)
+# - Warns if usage is close to limits and suggests sysctl tuning
+# - Exits if thresholds are exceeded
+# - https://cleveruptime.com/docs/files/proc-key-users | https://docs.kernel.org/security/keys/core.html
+# ------------------------------------------------------------------------------
-# This function is called when an error occurs. It receives the exit code, line number, and command that caused the error, and displays an error message.
-error_handler() {
- source /dev/stdin <<<$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/api.func)
- printf "\e[?25h"
- local exit_code="$?"
- local line_number="$1"
- local command="$2"
- local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
- post_update_to_api "failed" "${command}"
- echo -e "\n$error_message\n"
-
- if [[ -n "$CT_ID" ]]; then
- read -p "Remove this Container? " prompt
- if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then
- pct stop "$CT_ID" &>/dev/null
- pct destroy "$CT_ID" &>/dev/null
- msg_ok "Removed this Container"
- fi
- fi
-}
-# Check if the shell is using bash
-shell_check() {
- if [[ "$(basename "$SHELL")" != "bash" ]]; then
- clear
- msg_error "Your default shell is currently not set to Bash. To use these scripts, please switch to the Bash shell."
- echo -e "\nExiting..."
- sleep 2
- exit
- fi
-}
-
-# Run as root only
-root_check() {
- if [[ "$(id -u)" -ne 0 || $(ps -o comm= -p $PPID) == "sudo" ]]; then
- clear
- msg_error "Please run this script as root."
- echo -e "\nExiting..."
- sleep 2
- exit
- fi
-}
-
-# This function checks the version of Proxmox Virtual Environment (PVE) and exits if the version is not supported.
-pve_check() {
- if ! pveversion | grep -Eq "pve-manager/8\.[0-4](\.[0-9]+)*"; then
- msg_error "${CROSS}${RD}This version of Proxmox Virtual Environment is not supported"
- echo -e "Requires Proxmox Virtual Environment Version 8.1 or later."
- echo -e "Exiting..."
- sleep 2
- exit
- fi
-}
-
-# When a node is running tens of containers, it's possible to exceed the kernel's cryptographic key storage allocations.
-# These are tuneable, so verify if the currently deployment is approaching the limits, advise the user on how to tune the limits, and exit the script.
-# https://cleveruptime.com/docs/files/proc-key-users | https://docs.kernel.org/security/keys/core.html
maxkeys_check() {
# Read kernel parameters
per_user_maxkeys=$(cat /proc/sys/kernel/keys/maxkeys 2>/dev/null || echo 0)
@@ -129,21 +221,16 @@ maxkeys_check() {
exit 1
fi
- echo -e "${CM}${GN} All kernel key limits are within safe thresholds.${CL}"
+ # Silent success - only show errors if they exist
}
-# This function checks the system architecture and exits if it's not "amd64".
-arch_check() {
- if [ "$(dpkg --print-architecture)" != "amd64" ]; then
- echo -e "\n ${INFO}${YWB}This script will not work with PiMox! \n"
- echo -e "\n ${YWB}Visit https://github.com/asylumexp/Proxmox for ARM64 support. \n"
- echo -e "Exiting..."
- sleep 2
- exit
- fi
-}
-
-# Function to get the current IP address based on the distribution
+# ------------------------------------------------------------------------------
+# get_current_ip()
+#
+# - Returns current container IP depending on OS type
+# - Debian/Ubuntu: uses `hostname -I`
+# - Alpine: parses eth0 via `ip -4 addr`
+# ------------------------------------------------------------------------------
get_current_ip() {
if [ -f /etc/os-release ]; then
# Check for Debian/Ubuntu (uses hostname -I)
@@ -159,7 +246,12 @@ get_current_ip() {
echo "$CURRENT_IP"
}
-# Function to update the IP address in the MOTD file
+# ------------------------------------------------------------------------------
+# update_motd_ip()
+#
+# - Updates /etc/motd with current container IP
+# - Removes old IP entries to avoid duplicates
+# ------------------------------------------------------------------------------
update_motd_ip() {
MOTD_FILE="/etc/motd"
@@ -173,147 +265,112 @@ update_motd_ip() {
fi
}
-# This function checks if the script is running through SSH and prompts the user to confirm if they want to proceed or exit.
-ssh_check() {
- if [ -n "${SSH_CLIENT:+x}" ]; then
- if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "SSH DETECTED" --yesno "It's advisable to utilize the Proxmox shell rather than SSH, as there may be potential complications with variable retrieval. Proceed using SSH?" 10 72; then
- whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox --title "Proceed using SSH" "You've chosen to proceed using SSH. If any issues arise, please run the script in the Proxmox shell before creating a repository issue." 10 72
- else
- clear
- echo "Exiting due to SSH usage. Please consider using the Proxmox shell."
- exit
- fi
+# ------------------------------------------------------------------------------
+# install_ssh_keys_into_ct()
+#
+# - Installs SSH keys into container root account if SSH is enabled
+# - Uses pct push or direct input to authorized_keys
+# - Falls back to warning if no keys provided
+# ------------------------------------------------------------------------------
+install_ssh_keys_into_ct() {
+ [[ "$SSH" != "yes" ]] && return 0
+
+ if [[ -n "$SSH_KEYS_FILE" && -s "$SSH_KEYS_FILE" ]]; then
+ msg_info "Installing selected SSH keys into CT ${CTID}"
+ pct exec "$CTID" -- sh -c 'mkdir -p /root/.ssh && chmod 700 /root/.ssh' || {
+ msg_error "prepare /root/.ssh failed"
+ return 1
+ }
+ pct push "$CTID" "$SSH_KEYS_FILE" /root/.ssh/authorized_keys >/dev/null 2>&1 ||
+ pct exec "$CTID" -- sh -c "cat > /root/.ssh/authorized_keys" <"$SSH_KEYS_FILE" || {
+ msg_error "write authorized_keys failed"
+ return 1
+ }
+ pct exec "$CTID" -- sh -c 'chmod 600 /root/.ssh/authorized_keys' || true
+ msg_ok "Installed SSH keys into CT ${CTID}"
+ return 0
fi
+
+ # Fallback: nichts ausgewählt
+ msg_warn "No SSH keys to install (skipping)."
+ return 0
}
-# select_storage() {
-# local CLASS=$1 CONTENT CONTENT_LABEL
-# case $CLASS in
-# container)
-# CONTENT='rootdir'
-# CONTENT_LABEL='Container'
-# ;;
-# template)
-# CONTENT='vztmpl'
-# CONTENT_LABEL='Template'
-# ;;
-# iso)
-# CONTENT='iso'
-# CONTENT_LABEL='ISO image'
-# ;;
-# images)
-# CONTENT='images'
-# CONTENT_LABEL='VM Disk image'
-# ;;
-# backup)
-# CONTENT='backup'
-# CONTENT_LABEL='Backup'
-# ;;
-# snippets)
-# CONTENT='snippets'
-# CONTENT_LABEL='Snippets'
-# ;;
-# *)
-# msg_error "Invalid storage class '$CLASS'."
-# exit 201
-# ;;
-# esac
-
-# command -v whiptail >/dev/null || {
-# msg_error "whiptail missing."
-# exit 220
-# }
-# command -v numfmt >/dev/null || {
-# msg_error "numfmt missing."
-# exit 221
-# }
-
-# local -a MENU
-# while read -r line; do
-# local TAG=$(echo "$line" | awk '{print $1}')
-# local TYPE=$(echo "$line" | awk '{printf "%-10s", $2}')
-# local FREE=$(echo "$line" | numfmt --field 4-6 --from-unit=K --to=iec --format %.2f | awk '{printf "%9sB", $6}')
-# MENU+=("$TAG" "Type: $TYPE Free: $FREE" "OFF")
-# done < <(pvesm status -content "$CONTENT" | awk 'NR>1')
-
-# if [ ${#MENU[@]} -eq 0 ]; then
-# msg_error "No storage found for content type '$CONTENT'."
-# exit 203
-# fi
-
-# if [ $((${#MENU[@]} / 3)) -eq 1 ]; then
-# echo "${MENU[0]}"
-# return
-# fi
-
-# local STORAGE
-# STORAGE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "Storage Pools" --radiolist \
-# "Which storage pool for ${CONTENT_LABEL,,}?\n(Spacebar to select)" \
-# 16 70 6 "${MENU[@]}" 3>&1 1>&2 2>&3) || {
-# msg_error "Storage selection cancelled by user."
-# exit 202
-# }
-# echo "$STORAGE"
-# }
-
-# manage_default_storage() {
-# local file="/usr/local/community-scripts/default_storage"
-# mkdir -p /usr/local/community-scripts
-
-# local tmpl=$(select_storage template)
-# local cont=$(select_storage container)
-
-# cat <"$file"
-# TEMPLATE_STORAGE=$tmpl
-# CONTAINER_STORAGE=$cont
-# EOF
-
-# msg_ok "Default Storage set: Template=${BL}$tmpl${CL} ${GN}|${CL} Container=${BL}$cont${CL}"
-# whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --msgbox "Default Storage set:\n\nTemplate: $tmpl\nContainer: $cont" 10 58
-# }
-
+# ------------------------------------------------------------------------------
+# base_settings()
+#
+# - Defines all base/default variables for container creation
+# - Reads from environment variables (var_*)
+# - Provides fallback defaults for OS type/version
+# - App-specific values take precedence when they are HIGHER (for CPU, RAM, DISK)
+# ------------------------------------------------------------------------------
base_settings() {
# Default Settings
- CT_TYPE="1"
- DISK_SIZE="4"
- CORE_COUNT="1"
- RAM_SIZE="1024"
- VERBOSE="${1:-no}"
- PW=""
- CT_ID=$NEXTID
- HN=$NSAPP
- BRG="vmbr0"
- NET="dhcp"
- IPV6_METHOD="none"
- IPV6_STATIC=""
- GATE=""
- APT_CACHER=""
- APT_CACHER_IP=""
- #DISABLEIP6="no"
- MTU=""
- SD=""
- NS=""
- MAC=""
- VLAN=""
- SSH="no"
- SSH_AUTHORIZED_KEY=""
- UDHCPC_FIX=""
- TAGS="community-script;"
- ENABLE_FUSE="${1:-no}"
- ENABLE_TUN="${1:-no}"
+ CT_TYPE=${var_unprivileged:-"1"}
- # Override default settings with variables from ct script
- CT_TYPE=${var_unprivileged:-$CT_TYPE}
- DISK_SIZE=${var_disk:-$DISK_SIZE}
- CORE_COUNT=${var_cpu:-$CORE_COUNT}
- RAM_SIZE=${var_ram:-$RAM_SIZE}
- VERB=${var_verbose:-$VERBOSE}
- TAGS="${TAGS}${var_tags:-}"
- ENABLE_FUSE="${var_fuse:-$ENABLE_FUSE}"
- ENABLE_TUN="${var_tun:-$ENABLE_TUN}"
- APT_CACHER="${var_apt_cacher:-$APT_CACHER}"
- APT_CACHER_IP="${var_apt_cacher_ip:-$APT_CACHER_IP}"
+ # Resource allocation: App defaults take precedence if HIGHER
+ # Compare app-declared values (saved in APP_DEFAULT_*) with current var_* values
+ local final_disk="${var_disk:-4}"
+ local final_cpu="${var_cpu:-1}"
+ local final_ram="${var_ram:-1024}"
+
+ # If app declared higher values, use those instead
+ if [[ -n "${APP_DEFAULT_DISK:-}" && "${APP_DEFAULT_DISK}" =~ ^[0-9]+$ ]]; then
+ if [[ "${APP_DEFAULT_DISK}" -gt "${final_disk}" ]]; then
+ final_disk="${APP_DEFAULT_DISK}"
+ fi
+ fi
+
+ if [[ -n "${APP_DEFAULT_CPU:-}" && "${APP_DEFAULT_CPU}" =~ ^[0-9]+$ ]]; then
+ if [[ "${APP_DEFAULT_CPU}" -gt "${final_cpu}" ]]; then
+ final_cpu="${APP_DEFAULT_CPU}"
+ fi
+ fi
+
+ if [[ -n "${APP_DEFAULT_RAM:-}" && "${APP_DEFAULT_RAM}" =~ ^[0-9]+$ ]]; then
+ if [[ "${APP_DEFAULT_RAM}" -gt "${final_ram}" ]]; then
+ final_ram="${APP_DEFAULT_RAM}"
+ fi
+ fi
+
+ DISK_SIZE="${final_disk}"
+ CORE_COUNT="${final_cpu}"
+ RAM_SIZE="${final_ram}"
+ VERBOSE=${var_verbose:-"${1:-no}"}
+ PW=${var_pw:-""}
+ CT_ID=${var_ctid:-$NEXTID}
+ HN=${var_hostname:-$NSAPP}
+ BRG=${var_brg:-"vmbr0"}
+ NET=${var_net:-"dhcp"}
+ IPV6_METHOD=${var_ipv6_method:-"none"}
+ IPV6_STATIC=${var_ipv6_static:-""}
+ GATE=${var_gateway:-""}
+ APT_CACHER=${var_apt_cacher:-""}
+ APT_CACHER_IP=${var_apt_cacher_ip:-""}
+
+ # Runtime check: Verify APT cacher is reachable if configured
+ if [[ -n "$APT_CACHER_IP" && "$APT_CACHER" == "yes" ]]; then
+ if ! curl -s --connect-timeout 2 "http://${APT_CACHER_IP}:3142" >/dev/null 2>&1; then
+ msg_warn "APT Cacher configured but not reachable at ${APT_CACHER_IP}:3142"
+ msg_custom "⚠️" "${YW}" "Disabling APT Cacher for this installation"
+ APT_CACHER=""
+ APT_CACHER_IP=""
+ else
+ msg_ok "APT Cacher verified at ${APT_CACHER_IP}:3142"
+ fi
+ fi
+
+ MTU=${var_mtu:-""}
+ SD=${var_storage:-""}
+ NS=${var_ns:-""}
+ MAC=${var_mac:-""}
+ VLAN=${var_vlan:-""}
+ SSH=${var_ssh:-"no"}
+ SSH_AUTHORIZED_KEY=${var_ssh_authorized_key:-""}
+ UDHCPC_FIX=${var_udhcpc_fix:-""}
+ TAGS="community-script,${var_tags:-}"
+ ENABLE_FUSE=${var_fuse:-"${1:-no}"}
+ ENABLE_TUN=${var_tun:-"${1:-no}"}
# Since these 2 are only defined outside of default_settings function, we add a temporary fallback. TODO: To align everything, we should add these as constant variables (e.g. OSTYPE and OSVERSION), but that would currently require updating the default_settings function for all existing scripts
if [ -z "$var_os" ]; then
@@ -323,109 +380,113 @@ base_settings() {
var_version="12"
fi
}
-write_config() {
- mkdir -p /opt/community-scripts
- # This function writes the configuration to a file.
- if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "Write configfile" --yesno "Do you want to write the selections to a config file?" 10 60; then
- FILEPATH="/opt/community-scripts/${NSAPP}.conf"
- if [[ ! -f $FILEPATH ]]; then
- cat <"$FILEPATH"
-# ${NSAPP} Configuration File
-# Generated on $(date)
-CT_TYPE="${CT_TYPE}"
-DISK_SIZE="${DISK_SIZE}"
-CORE_COUNT="${CORE_COUNT}"
-RAM_SIZE="${RAM_SIZE}"
-HN="${HN}"
-BRG="${BRG}"
-APT_CACHER_IP="${APT_CACHER_IP:-none}"
-DISABLEIP6="${DISABLEIP6}"
-PW='${PW:-none}'
-SSH="${SSH}"
-SSH_AUTHORIZED_KEY="${SSH_AUTHORIZED_KEY}"
-VERBOSE="${VERBOSE}"
-TAGS="${TAGS:-none}"
-VLAN="${VLAN:-none}"
-MTU="${MTU:-1500}"
-GATE="${GATE:-none}"
-SD="${SD:-none}"
-MAC="${MAC:-none}"
-NS="${NS:-none}"
-NET="${NET}"
-FUSE="${ENABLE_FUSE}"
-
-EOF
- echo -e "${INFO}${BOLD}${GN}Writing configuration to ${FILEPATH}${CL}"
- else
- echo -e "${INFO}${BOLD}${RD}Configuration file already exists at ${FILEPATH}${CL}"
- if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "Overwrite configfile" --yesno "Do you want to overwrite the existing config file?" 10 60; then
- rm -f "$FILEPATH"
- cat <"$FILEPATH"
-# ${NSAPP} Configuration File
-# Generated on $(date)
-
-CT_TYPE="${CT_TYPE}"
-DISK_SIZE="${DISK_SIZE}"
-CORE_COUNT="${CORE_COUNT}"
-RAM_SIZE="${RAM_SIZE}"
-HN="${HN}"
-BRG="${BRG}"
-APT_CACHER_IP="${APT_CACHER_IP:-none}"
-DISABLEIP6="${DISABLEIP6}"
-PW="${PW:-none}"
-SSH="${SSH}"
-SSH_AUTHORIZED_KEY="${SSH_AUTHORIZED_KEY}"
-VERBOSE="${VERBOSE}"
-TAGS="${TAGS:-none}"
-VLAN="${VLAN:-none}"
-MTU="${MTU:-1500}"
-GATE="${GATE:-none}"
-SD="${SD:-none}"
-MAC="${MAC:-none}"
-NS="${NS:-none}"
-NET="${NET}"
-FUSE="${ENABLE_FUSE}"
-
-EOF
- echo -e "${INFO}${BOLD}${GN}Writing configuration to ${FILEPATH}${CL}"
- else
- echo -e "${INFO}${BOLD}${RD}Configuration file not overwritten${CL}"
- fi
- fi
- fi
-}
-
-# This function displays the default values for various settings.
+# ------------------------------------------------------------------------------
+# echo_default()
+#
+# - Prints summary of default values (ID, OS, type, disk, RAM, CPU, etc.)
+# - Uses icons and formatting for readability
+# - Convert CT_TYPE to description
+# ------------------------------------------------------------------------------
echo_default() {
- # Convert CT_TYPE to description
CT_TYPE_DESC="Unprivileged"
if [ "$CT_TYPE" -eq 0 ]; then
CT_TYPE_DESC="Privileged"
fi
-
- # Output the selected values with icons
+ echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}"
echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}${CT_ID}${CL}"
echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os ($var_version)${CL}"
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}"
echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}${CORE_COUNT}${CL}"
echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
- if [ "$VERB" == "yes" ]; then
+ if [ "$VERBOSE" == "yes" ]; then
echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}Enabled${CL}"
fi
echo -e "${CREATING}${BOLD}${BL}Creating a ${APP} LXC using the above default settings${CL}"
echo -e " "
}
-# This function is called when the user decides to exit the script. It clears the screen and displays an exit message.
+# ------------------------------------------------------------------------------
+# exit_script()
+#
+# - Called when user cancels an action
+# - Clears screen and exits gracefully
+# ------------------------------------------------------------------------------
exit_script() {
clear
echo -e "\n${CROSS}${RD}User exited script${CL}\n"
exit
}
-# This function allows the user to configure advanced settings for the script.
+# ------------------------------------------------------------------------------
+# find_host_ssh_keys()
+#
+# - Scans system for available SSH keys
+# - Supports defaults (~/.ssh, /etc/ssh/authorized_keys)
+# - Returns list of files containing valid SSH public keys
+# - Sets FOUND_HOST_KEY_COUNT to number of keys found
+# ------------------------------------------------------------------------------
+find_host_ssh_keys() {
+ local re='(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))'
+ local -a files=() cand=()
+ local g="${var_ssh_import_glob:-}"
+ local total=0 f base c
+
+ shopt -s nullglob
+ if [[ -n "$g" ]]; then
+ for pat in $g; do cand+=($pat); done
+ else
+ cand+=(/root/.ssh/authorized_keys /root/.ssh/authorized_keys2)
+ cand+=(/root/.ssh/*.pub)
+ cand+=(/etc/ssh/authorized_keys /etc/ssh/authorized_keys.d/*)
+ fi
+ shopt -u nullglob
+
+ for f in "${cand[@]}"; do
+ [[ -f "$f" && -r "$f" ]] || continue
+ base="$(basename -- "$f")"
+ case "$base" in
+ known_hosts | known_hosts.* | config) continue ;;
+ id_*) [[ "$f" != *.pub ]] && continue ;;
+ esac
+
+ # CRLF safe check for host keys
+ c=$(tr -d '\r' <"$f" | awk '
+ /^[[:space:]]*#/ {next}
+ /^[[:space:]]*$/ {next}
+ {print}
+ ' | grep -E -c '"$re"' || true)
+
+ if ((c > 0)); then
+ files+=("$f")
+ total=$((total + c))
+ fi
+ done
+
+ # Fallback to /root/.ssh/authorized_keys
+ if ((${#files[@]} == 0)) && [[ -r /root/.ssh/authorized_keys ]]; then
+ if grep -E -q "$re" /root/.ssh/authorized_keys; then
+ files+=(/root/.ssh/authorized_keys)
+ total=$((total + $(grep -E -c "$re" /root/.ssh/authorized_keys || echo 0)))
+ fi
+ fi
+
+ FOUND_HOST_KEY_COUNT="$total"
+ (
+ IFS=:
+ echo "${files[*]}"
+ )
+}
+
+# ------------------------------------------------------------------------------
+# advanced_settings()
+#
+# - Interactive whiptail menu for advanced configuration
+# - Lets user set container type, password, CT ID, hostname, disk, CPU, RAM
+# - Supports IPv4/IPv6, DNS, MAC, VLAN, tags, SSH keys, FUSE, verbose mode
+# - Ends with confirmation or re-entry if cancelled
+# ------------------------------------------------------------------------------
advanced_settings() {
whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox --title "Here is an instructional tip:" "To make a selection, use the Spacebar." 8 58
# Setting Default Tag for Advanced Settings
@@ -443,7 +504,8 @@ advanced_settings() {
if [ "$CT_TYPE" -eq 0 ]; then
CT_TYPE_DESC="Privileged"
fi
- echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os | ${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}"
+ echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}"
+ echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os |${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}"
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
fi
else
@@ -460,6 +522,7 @@ advanced_settings() {
if [ "$CT_TYPE" -eq 0 ]; then
CT_TYPE_DESC="Privileged"
fi
+ echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}"
echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os${CL}"
echo -e "${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}"
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
@@ -509,16 +572,14 @@ advanced_settings() {
fi
done
- if CT_ID=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set Container ID" 8 58 "$NEXTID" --title "CONTAINER ID" 3>&1 1>&2 2>&3); then
+ if CT_ID=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Container ID" 8 58 "$NEXTID" --title "CONTAINER ID" 3>&1 1>&2 2>&3); then
if [ -z "$CT_ID" ]; then
CT_ID="$NEXTID"
- echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
- else
- echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
fi
else
exit_script
fi
+ echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
while true; do
if CT_NAME=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set Hostname" 8 58 "$NSAPP" --title "HOSTNAME" 3>&1 1>&2 2>&3); then
@@ -618,7 +679,21 @@ advanced_settings() {
BRG="vmbr0"
echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}"
else
- BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --menu "Select network bridge:" 15 40 6 $(echo "$BRIDGES" | awk '{print $0, "Bridge"}') 3>&1 1>&2 2>&3)
+ # Build bridge menu with descriptions
+ BRIDGE_MENU_OPTIONS=()
+ while IFS= read -r bridge; do
+ if [[ -n "$bridge" ]]; then
+ # Get description from Proxmox built-in method - find comment for this specific bridge
+ description=$(grep -A 10 "iface $bridge" /etc/network/interfaces | grep '^#' | head -n1 | sed 's/^#\s*//')
+ if [[ -n "$description" ]]; then
+ BRIDGE_MENU_OPTIONS+=("$bridge" "${description}")
+ else
+ BRIDGE_MENU_OPTIONS+=("$bridge" " ")
+ fi
+ fi
+ done <<<"$BRIDGES"
+
+ BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --menu "Select network bridge: " 18 55 6 "${BRIDGE_MENU_OPTIONS[@]}" 3>&1 1>&2 2>&3)
if [[ -z "$BRG" ]]; then
exit_script
else
@@ -849,24 +924,9 @@ advanced_settings() {
exit_script
fi
- SSH_AUTHORIZED_KEY="$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "SSH Authorized key for root (leave empty for none)" 8 58 --title "SSH Key" 3>&1 1>&2 2>&3)"
-
- if [[ -z "${SSH_AUTHORIZED_KEY}" ]]; then
- SSH_AUTHORIZED_KEY=""
- fi
-
- if [[ "$PW" == -password* || -n "$SSH_AUTHORIZED_KEY" ]]; then
- if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "SSH ACCESS" --yesno "Enable Root SSH Access?" 10 58); then
- SSH="yes"
- else
- SSH="no"
- fi
- echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
- else
- SSH="no"
- echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
- fi
-
+ configure_ssh_settings
+ export SSH_KEYS_FILE
+ echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "FUSE Support" --yesno "Enable FUSE support?\nRequired for tools like rclone, mergerfs, AppImage, etc." 10 58); then
ENABLE_FUSE="yes"
else
@@ -883,15 +943,22 @@ advanced_settings() {
if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "ADVANCED SETTINGS COMPLETE" --yesno "Ready to create ${APP} LXC?" 10 58); then
echo -e "${CREATING}${BOLD}${RD}Creating a ${APP} LXC using the above advanced settings${CL}"
- write_config
else
clear
header_info
+ echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}"
echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Settings on node $PVEHOST_NAME${CL}"
advanced_settings
fi
}
+# ------------------------------------------------------------------------------
+# diagnostics_check()
+#
+# - Ensures diagnostics config file exists at /usr/local/community-scripts/diagnostics
+# - Asks user whether to send anonymous diagnostic data
+# - Saves DIAGNOSTICS=yes/no in the config file
+# ------------------------------------------------------------------------------
diagnostics_check() {
if ! [ -d "/usr/local/community-scripts" ]; then
mkdir -p /usr/local/community-scripts
@@ -911,13 +978,11 @@ DIAGNOSTICS=yes
#To send diagnostics, set the variable 'DIAGNOSTICS' to "yes" in /usr/local/community-scripts/diagnostics, or use the menue.
#This will enable the diagnostics feature.
#The following information will be sent:
-#"ct_type"
#"disk_size"
#"core_count"
#"ram_size"
#"os_type"
#"os_version"
-#"disableip6"
#"nsapp"
#"method"
#"pve_version"
@@ -938,13 +1003,11 @@ DIAGNOSTICS=no
#To send diagnostics, set the variable 'DIAGNOSTICS' to "yes" in /usr/local/community-scripts/diagnostics, or use the menue.
#This will enable the diagnostics feature.
#The following information will be sent:
-#"ct_type"
#"disk_size"
#"core_count"
#"ram_size"
#"os_type"
#"os_version"
-#"disableip6"
#"nsapp"
#"method"
#"pve_version"
@@ -960,6 +1023,558 @@ EOF
}
+# ------------------------------------------------------------------------------
+# default_var_settings
+#
+# - Ensures /usr/local/community-scripts/default.vars exists (creates if missing)
+# - Loads var_* values from default.vars (safe parser, no source/eval)
+# - Precedence: ENV var_* > default.vars > built-in defaults
+# - Maps var_verbose → VERBOSE
+# - Calls base_settings "$VERBOSE" and echo_default
+# ------------------------------------------------------------------------------
+default_var_settings() {
+ # Allowed var_* keys (alphabetically sorted)
+ # Note: Removed var_ctid (can only exist once), var_ipv6_static (static IPs are unique)
+ local VAR_WHITELIST=(
+ var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_disk var_fuse
+ var_gateway var_hostname var_ipv6_method var_mac var_mtu
+ var_net var_ns var_pw var_ram var_tags var_tun var_unprivileged
+ var_verbose var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage
+ )
+
+ # Snapshot: environment variables (highest precedence)
+ declare -A _HARD_ENV=()
+ local _k
+ for _k in "${VAR_WHITELIST[@]}"; do
+ if printenv "$_k" >/dev/null 2>&1; then _HARD_ENV["$_k"]=1; fi
+ done
+
+ # Find default.vars location
+ local _find_default_vars
+ _find_default_vars() {
+ local f
+ for f in \
+ /usr/local/community-scripts/default.vars \
+ "$HOME/.config/community-scripts/default.vars" \
+ "./default.vars"; do
+ [ -f "$f" ] && {
+ echo "$f"
+ return 0
+ }
+ done
+ return 1
+ }
+ # Allow override of storages via env (for non-interactive use cases)
+ [ -n "${var_template_storage:-}" ] && TEMPLATE_STORAGE="$var_template_storage"
+ [ -n "${var_container_storage:-}" ] && CONTAINER_STORAGE="$var_container_storage"
+
+ # Create once, with storages already selected, no var_ctid/var_hostname lines
+ local _ensure_default_vars
+ _ensure_default_vars() {
+ _find_default_vars >/dev/null 2>&1 && return 0
+
+ local canonical="/usr/local/community-scripts/default.vars"
+ # Silent creation - no msg_info output
+ mkdir -p /usr/local/community-scripts
+
+ # Pick storages before writing the file (always ask unless only one)
+ # Create a minimal temp file to write into
+ : >"$canonical"
+
+ # Base content (no var_ctid / var_hostname here)
+ cat >"$canonical" <<'EOF'
+# Community-Scripts defaults (var_* only). Lines starting with # are comments.
+# Precedence: ENV var_* > default.vars > built-ins.
+# Keep keys alphabetically sorted.
+
+# Container type
+var_unprivileged=1
+
+# Resources
+var_cpu=1
+var_disk=4
+var_ram=1024
+
+# Network
+var_brg=vmbr0
+var_net=dhcp
+var_ipv6_method=none
+# var_gateway=
+# var_vlan=
+# var_mtu=
+# var_mac=
+# var_ns=
+
+# SSH
+var_ssh=no
+# var_ssh_authorized_key=
+
+# APT cacher (optional - with example)
+# var_apt_cacher=yes
+# var_apt_cacher_ip=192.168.1.10
+
+# Features/Tags/verbosity
+var_fuse=no
+var_tun=no
+var_tags=community-script
+var_verbose=no
+
+# Security (root PW) – empty => autologin
+# var_pw=
+EOF
+
+ # Now choose storages (always prompt unless just one exists)
+ choose_and_set_storage_for_file "$canonical" template
+ choose_and_set_storage_for_file "$canonical" container
+
+ chmod 0644 "$canonical"
+ # Silent creation - no output message
+ }
+
+ # Whitelist check
+ local _is_whitelisted_key
+ _is_whitelisted_key() {
+ local k="$1"
+ local w
+ for w in "${VAR_WHITELIST[@]}"; do [ "$k" = "$w" ] && return 0; done
+ return 1
+ }
+
+ # Safe parser for KEY=VALUE lines
+ local _load_vars_file
+ _load_vars_file() {
+ local file="$1"
+ [ -f "$file" ] || return 0
+ msg_info "Loading defaults from ${file}"
+ local line key val
+ while IFS= read -r line || [ -n "$line" ]; do
+ line="${line#"${line%%[![:space:]]*}"}"
+ line="${line%"${line##*[![:space:]]}"}"
+ [[ -z "$line" || "$line" == \#* ]] && continue
+ if [[ "$line" =~ ^([A-Za-z_][A-Za-z0-9_]*)=(.*)$ ]]; then
+ local var_key="${BASH_REMATCH[1]}"
+ local var_val="${BASH_REMATCH[2]}"
+
+ [[ "$var_key" != var_* ]] && continue
+ _is_whitelisted_key "$var_key" || {
+ msg_debug "Ignore non-whitelisted ${var_key}"
+ continue
+ }
+
+ # Strip quotes
+ if [[ "$var_val" =~ ^\"(.*)\"$ ]]; then
+ var_val="${BASH_REMATCH[1]}"
+ elif [[ "$var_val" =~ ^\'(.*)\'$ ]]; then
+ var_val="${BASH_REMATCH[1]}"
+ fi
+
+ # Unsafe characters
+ case $var_val in
+ \"*\")
+ var_val=${var_val#\"}
+ var_val=${var_val%\"}
+ ;;
+ \'*\')
+ var_val=${var_val#\'}
+ var_val=${var_val%\'}
+ ;;
+ esac # Hard env wins
+ [[ -n "${_HARD_ENV[$var_key]:-}" ]] && continue
+ # Set only if not already exported
+ [[ -z "${!var_key+x}" ]] && export "${var_key}=${var_val}"
+ else
+ msg_warn "Malformed line in ${file}: ${line}"
+ fi
+ done <"$file"
+ msg_ok "Loaded ${file}"
+ }
+
+ # 1) Ensure file exists
+ _ensure_default_vars
+
+ # 2) Load file
+ local dv
+ dv="$(_find_default_vars)" || {
+ msg_error "default.vars not found after ensure step"
+ return 1
+ }
+ _load_vars_file "$dv"
+
+ # 3) Map var_verbose → VERBOSE
+ if [[ -n "${var_verbose:-}" ]]; then
+ case "${var_verbose,,}" in 1 | yes | true | on) VERBOSE="yes" ;; 0 | no | false | off) VERBOSE="no" ;; *) VERBOSE="${var_verbose}" ;; esac
+ else
+ VERBOSE="no"
+ fi
+
+ # 4) Apply base settings and show summary
+ METHOD="mydefaults-global"
+ base_settings "$VERBOSE"
+ header_info
+ echo -e "${DEFAULT}${BOLD}${BL}Using My Defaults (default.vars) on node $PVEHOST_NAME${CL}"
+ echo_default
+}
+
+# ------------------------------------------------------------------------------
+# get_app_defaults_path()
+#
+# - Returns full path for app-specific defaults file
+# - Example: /usr/local/community-scripts/defaults/.vars
+# ------------------------------------------------------------------------------
+
+get_app_defaults_path() {
+ local n="${NSAPP:-${APP,,}}"
+ echo "/usr/local/community-scripts/defaults/${n}.vars"
+}
+
+# ------------------------------------------------------------------------------
+# maybe_offer_save_app_defaults
+#
+# - Called after advanced_settings returned with fully chosen values.
+# - If no .vars exists, offers to persist current advanced settings
+# into /usr/local/community-scripts/defaults/.vars
+# - Only writes whitelisted var_* keys.
+# - Extracts raw values from flags like ",gw=..." ",mtu=..." etc.
+# ------------------------------------------------------------------------------
+if ! declare -p VAR_WHITELIST >/dev/null 2>&1; then
+ # Note: Removed var_ctid (can only exist once), var_ipv6_static (static IPs are unique)
+ declare -ag VAR_WHITELIST=(
+ var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_disk var_fuse
+ var_gateway var_hostname var_ipv6_method var_mac var_mtu
+ var_net var_ns var_pw var_ram var_tags var_tun var_unprivileged
+ var_verbose var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage
+ )
+fi
+
+# Note: _is_whitelisted_key() is defined above in default_var_settings section
+
+_sanitize_value() {
+ # Disallow Command-Substitution / Shell-Meta
+ case "$1" in
+ *'$('* | *'`'* | *';'* | *'&'* | *'<('*)
+ echo ""
+ return 0
+ ;;
+ esac
+ echo "$1"
+}
+
+# Map-Parser: read var_* from file into _VARS_IN associative array
+# Note: Main _load_vars_file() with full validation is defined in default_var_settings section
+# This simplified version is used specifically for diff operations via _VARS_IN array
+declare -A _VARS_IN
+_load_vars_file_to_map() {
+ local file="$1"
+ [ -f "$file" ] || return 0
+ _VARS_IN=() # Clear array
+ local line key val
+ while IFS= read -r line || [ -n "$line" ]; do
+ line="${line#"${line%%[![:space:]]*}"}"
+ line="${line%"${line##*[![:space:]]}"}"
+ [ -z "$line" ] && continue
+ case "$line" in
+ \#*) continue ;;
+ esac
+ key=$(printf "%s" "$line" | cut -d= -f1)
+ val=$(printf "%s" "$line" | cut -d= -f2-)
+ case "$key" in
+ var_*)
+ if _is_whitelisted_key "$key"; then
+ _VARS_IN["$key"]="$val"
+ fi
+ ;;
+ esac
+ done <"$file"
+}
+
+# Diff function for two var_* files -> produces human-readable diff list for $1 (old) vs $2 (new)
+_build_vars_diff() {
+ local oldf="$1" newf="$2"
+ local k
+ local -A OLD=() NEW=()
+ _load_vars_file_to_map "$oldf"
+ for k in "${!_VARS_IN[@]}"; do OLD["$k"]="${_VARS_IN[$k]}"; done
+ _load_vars_file_to_map "$newf"
+ for k in "${!_VARS_IN[@]}"; do NEW["$k"]="${_VARS_IN[$k]}"; done
+
+ local out
+ out+="# Diff for ${APP} (${NSAPP})\n"
+ out+="# Old: ${oldf}\n# New: ${newf}\n\n"
+
+ local found_change=0
+
+ # Changed & Removed
+ for k in "${!OLD[@]}"; do
+ if [[ -v NEW["$k"] ]]; then
+ if [[ "${OLD[$k]}" != "${NEW[$k]}" ]]; then
+ out+="~ ${k}\n - old: ${OLD[$k]}\n + new: ${NEW[$k]}\n"
+ found_change=1
+ fi
+ else
+ out+="- ${k}\n - old: ${OLD[$k]}\n"
+ found_change=1
+ fi
+ done
+
+ # Added
+ for k in "${!NEW[@]}"; do
+ if [[ ! -v OLD["$k"] ]]; then
+ out+="+ ${k}\n + new: ${NEW[$k]}\n"
+ found_change=1
+ fi
+ done
+
+ if [[ $found_change -eq 0 ]]; then
+ out+="(No differences)\n"
+ fi
+
+ printf "%b" "$out"
+}
+
+# Build a temporary .vars file from current advanced settings
+_build_current_app_vars_tmp() {
+ tmpf="$(mktemp /tmp/${NSAPP:-app}.vars.new.XXXXXX)"
+
+ # NET/GW
+ _net="${NET:-}"
+ _gate=""
+ case "${GATE:-}" in
+ ,gw=*) _gate=$(echo "$GATE" | sed 's/^,gw=//') ;;
+ esac
+
+ # IPv6
+ _ipv6_method="${IPV6_METHOD:-auto}"
+ _ipv6_static=""
+ _ipv6_gateway=""
+ if [ "$_ipv6_method" = "static" ]; then
+ _ipv6_static="${IPV6_ADDR:-}"
+ _ipv6_gateway="${IPV6_GATE:-}"
+ fi
+
+ # MTU/VLAN/MAC
+ _mtu=""
+ _vlan=""
+ _mac=""
+ case "${MTU:-}" in
+ ,mtu=*) _mtu=$(echo "$MTU" | sed 's/^,mtu=//') ;;
+ esac
+ case "${VLAN:-}" in
+ ,tag=*) _vlan=$(echo "$VLAN" | sed 's/^,tag=//') ;;
+ esac
+ case "${MAC:-}" in
+ ,hwaddr=*) _mac=$(echo "$MAC" | sed 's/^,hwaddr=//') ;;
+ esac
+
+ # DNS / Searchdomain
+ _ns=""
+ _searchdomain=""
+ case "${NS:-}" in
+ -nameserver=*) _ns=$(echo "$NS" | sed 's/^-nameserver=//') ;;
+ esac
+ case "${SD:-}" in
+ -searchdomain=*) _searchdomain=$(echo "$SD" | sed 's/^-searchdomain=//') ;;
+ esac
+
+ # SSH / APT / Features
+ _ssh="${SSH:-no}"
+ _ssh_auth="${SSH_AUTHORIZED_KEY:-}"
+ _apt_cacher="${APT_CACHER:-}"
+ _apt_cacher_ip="${APT_CACHER_IP:-}"
+ _fuse="${ENABLE_FUSE:-no}"
+ _tun="${ENABLE_TUN:-no}"
+ _tags="${TAGS:-}"
+ _verbose="${VERBOSE:-no}"
+
+ # Type / Resources / Identity
+ _unpriv="${CT_TYPE:-1}"
+ _cpu="${CORE_COUNT:-1}"
+ _ram="${RAM_SIZE:-1024}"
+ _disk="${DISK_SIZE:-4}"
+ _hostname="${HN:-$NSAPP}"
+
+ # Storage
+ _tpl_storage="${TEMPLATE_STORAGE:-${var_template_storage:-}}"
+ _ct_storage="${CONTAINER_STORAGE:-${var_container_storage:-}}"
+
+ {
+ echo "# App-specific defaults for ${APP} (${NSAPP})"
+ echo "# Generated on $(date -u '+%Y-%m-%dT%H:%M:%SZ')"
+ echo
+
+ echo "var_unprivileged=$(_sanitize_value "$_unpriv")"
+ echo "var_cpu=$(_sanitize_value "$_cpu")"
+ echo "var_ram=$(_sanitize_value "$_ram")"
+ echo "var_disk=$(_sanitize_value "$_disk")"
+
+ [ -n "${BRG:-}" ] && echo "var_brg=$(_sanitize_value "$BRG")"
+ [ -n "$_net" ] && echo "var_net=$(_sanitize_value "$_net")"
+ [ -n "$_gate" ] && echo "var_gateway=$(_sanitize_value "$_gate")"
+ [ -n "$_mtu" ] && echo "var_mtu=$(_sanitize_value "$_mtu")"
+ [ -n "$_vlan" ] && echo "var_vlan=$(_sanitize_value "$_vlan")"
+ [ -n "$_mac" ] && echo "var_mac=$(_sanitize_value "$_mac")"
+ [ -n "$_ns" ] && echo "var_ns=$(_sanitize_value "$_ns")"
+
+ [ -n "$_ipv6_method" ] && echo "var_ipv6_method=$(_sanitize_value "$_ipv6_method")"
+ # var_ipv6_static removed - static IPs are unique, can't be default
+
+ [ -n "$_ssh" ] && echo "var_ssh=$(_sanitize_value "$_ssh")"
+ [ -n "$_ssh_auth" ] && echo "var_ssh_authorized_key=$(_sanitize_value "$_ssh_auth")"
+
+ [ -n "$_apt_cacher" ] && echo "var_apt_cacher=$(_sanitize_value "$_apt_cacher")"
+ [ -n "$_apt_cacher_ip" ] && echo "var_apt_cacher_ip=$(_sanitize_value "$_apt_cacher_ip")"
+
+ [ -n "$_fuse" ] && echo "var_fuse=$(_sanitize_value "$_fuse")"
+ [ -n "$_tun" ] && echo "var_tun=$(_sanitize_value "$_tun")"
+ [ -n "$_tags" ] && echo "var_tags=$(_sanitize_value "$_tags")"
+ [ -n "$_verbose" ] && echo "var_verbose=$(_sanitize_value "$_verbose")"
+
+ [ -n "$_hostname" ] && echo "var_hostname=$(_sanitize_value "$_hostname")"
+ [ -n "$_searchdomain" ] && echo "var_searchdomain=$(_sanitize_value "$_searchdomain")"
+
+ [ -n "$_tpl_storage" ] && echo "var_template_storage=$(_sanitize_value "$_tpl_storage")"
+ [ -n "$_ct_storage" ] && echo "var_container_storage=$(_sanitize_value "$_ct_storage")"
+ } >"$tmpf"
+
+ echo "$tmpf"
+}
+
+# ------------------------------------------------------------------------------
+# maybe_offer_save_app_defaults()
+#
+# - Called after advanced_settings()
+# - Offers to save current values as app defaults if not existing
+# - If file exists: shows diff and allows Update, Keep, View Diff, or Cancel
+# ------------------------------------------------------------------------------
+maybe_offer_save_app_defaults() {
+ local app_vars_path
+ app_vars_path="$(get_app_defaults_path)"
+
+ # always build from current settings
+ local new_tmp diff_tmp
+ new_tmp="$(_build_current_app_vars_tmp)"
+ diff_tmp="$(mktemp -p /tmp "${NSAPP:-app}.vars.diff.XXXXXX")"
+
+ # 1) if no file → offer to create
+ if [[ ! -f "$app_vars_path" ]]; then
+ if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --yesno "Save these advanced settings as defaults for ${APP}?\n\nThis will create:\n${app_vars_path}" 12 72; then
+ mkdir -p "$(dirname "$app_vars_path")"
+ install -m 0644 "$new_tmp" "$app_vars_path"
+ msg_ok "Saved app defaults: ${app_vars_path}"
+ fi
+ rm -f "$new_tmp" "$diff_tmp"
+ return 0
+ fi
+
+ # 2) if file exists → build diff
+ _build_vars_diff "$app_vars_path" "$new_tmp" >"$diff_tmp"
+
+ # if no differences → do nothing
+ if grep -q "^(No differences)$" "$diff_tmp"; then
+ rm -f "$new_tmp" "$diff_tmp"
+ return 0
+ fi
+
+ # 3) if file exists → show menu with default selection "Update Defaults"
+ local app_vars_file
+ app_vars_file="$(basename "$app_vars_path")"
+
+ while true; do
+ local sel
+ sel="$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "APP DEFAULTS – ${APP}" \
+ --menu "Differences detected. What do you want to do?" 20 78 10 \
+ "Update Defaults" "Write new values to ${app_vars_file}" \
+ "Keep Current" "Keep existing defaults (no changes)" \
+ "View Diff" "Show a detailed diff" \
+ "Cancel" "Abort without changes" \
+ --default-item "Update Defaults" \
+ 3>&1 1>&2 2>&3)" || { sel="Cancel"; }
+
+ case "$sel" in
+ "Update Defaults")
+ install -m 0644 "$new_tmp" "$app_vars_path"
+ msg_ok "Updated app defaults: ${app_vars_path}"
+ break
+ ;;
+ "Keep Current")
+ msg_custom "ℹ️" "${BL}" "Keeping current app defaults: ${app_vars_path}"
+ break
+ ;;
+ "View Diff")
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "Diff – ${APP}" \
+ --scrolltext --textbox "$diff_tmp" 25 100
+ ;;
+ "Cancel" | *)
+ msg_custom "🚫" "${YW}" "Canceled. No changes to app defaults."
+ break
+ ;;
+ esac
+ done
+
+ rm -f "$new_tmp" "$diff_tmp"
+}
+
+ensure_storage_selection_for_vars_file() {
+ local vf="$1"
+
+ # Read stored values (if any)
+ local tpl ct
+ tpl=$(grep -E '^var_template_storage=' "$vf" | cut -d= -f2-)
+ ct=$(grep -E '^var_container_storage=' "$vf" | cut -d= -f2-)
+
+ if [[ -n "$tpl" && -n "$ct" ]]; then
+ TEMPLATE_STORAGE="$tpl"
+ CONTAINER_STORAGE="$ct"
+ return 0
+ fi
+
+ choose_and_set_storage_for_file "$vf" template
+ choose_and_set_storage_for_file "$vf" container
+
+ # Silent operation - no output message
+}
+
+diagnostics_menu() {
+ if [ "${DIAGNOSTICS:-no}" = "yes" ]; then
+ if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "DIAGNOSTIC SETTINGS" \
+ --yesno "Send Diagnostics?\n\nCurrent: ${DIAGNOSTICS}" 10 58 \
+ --yes-button "No" --no-button "Back"; then
+ DIAGNOSTICS="no"
+ sed -i 's/^DIAGNOSTICS=.*/DIAGNOSTICS=no/' /usr/local/community-scripts/diagnostics
+ whiptail --msgbox "Diagnostics set to ${DIAGNOSTICS}." 8 58
+ fi
+ else
+ if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "DIAGNOSTIC SETTINGS" \
+ --yesno "Send Diagnostics?\n\nCurrent: ${DIAGNOSTICS}" 10 58 \
+ --yes-button "Yes" --no-button "Back"; then
+ DIAGNOSTICS="yes"
+ sed -i 's/^DIAGNOSTICS=.*/DIAGNOSTICS=yes/' /usr/local/community-scripts/diagnostics
+ whiptail --msgbox "Diagnostics set to ${DIAGNOSTICS}." 8 58
+ fi
+ fi
+}
+
+ensure_global_default_vars_file() {
+ local vars_path="/usr/local/community-scripts/default.vars"
+ if [[ ! -f "$vars_path" ]]; then
+ mkdir -p "$(dirname "$vars_path")"
+ touch "$vars_path"
+ fi
+ echo "$vars_path"
+}
+
+# ------------------------------------------------------------------------------
+# install_script()
+#
+# - Main entrypoint for installation mode
+# - Runs safety checks (pve_check, root_check, maxkeys_check, diagnostics_check)
+# - Builds interactive menu (Default, Verbose, Advanced, My Defaults, App Defaults, Diagnostics, Storage, Exit)
+# - Applies chosen settings and triggers container build
+# ------------------------------------------------------------------------------
install_script() {
pve_check
shell_check
@@ -972,109 +1587,235 @@ install_script() {
if systemctl is-active -q ping-instances.service; then
systemctl -q stop ping-instances.service
fi
+
NEXTID=$(pvesh get /cluster/nextid)
timezone=$(cat /etc/timezone)
+
+ # Show APP Header
header_info
+
+ # --- Support CLI argument as direct preset (default, advanced, …) ---
+ CHOICE="${mode:-${1:-}}"
+
+ # If no CLI argument → show whiptail menu
+ # Build menu dynamically based on available options
+ local appdefaults_option=""
+ local settings_option=""
+ local menu_items=(
+ "1" "Default Install"
+ "2" "Advanced Install"
+ "3" "My Defaults"
+ )
+
+ if [ -f "$(get_app_defaults_path)" ]; then
+ appdefaults_option="4"
+ menu_items+=("4" "App Defaults for ${APP}")
+ settings_option="5"
+ menu_items+=("5" "Settings")
+ else
+ settings_option="4"
+ menu_items+=("4" "Settings")
+ fi
+
+ if [ -z "$CHOICE" ]; then
+
+ TMP_CHOICE=$(whiptail \
+ --backtitle "Proxmox VE Helper Scripts" \
+ --title "Community-Scripts Options" \
+ --ok-button "Select" --cancel-button "Exit Script" \
+ --notags \
+ --menu "\nChoose an option:\n Use TAB or Arrow keys to navigate, ENTER to select.\n" \
+ 20 60 9 \
+ "${menu_items[@]}" \
+ --default-item "1" \
+ 3>&1 1>&2 2>&3) || exit_script
+ CHOICE="$TMP_CHOICE"
+ fi
+
+ APPDEFAULTS_OPTION="$appdefaults_option"
+ SETTINGS_OPTION="$settings_option"
+
+ # --- Main case ---
+ local defaults_target=""
+ local run_maybe_offer="no"
+ case "$CHOICE" in
+ 1 | default | DEFAULT)
+ header_info
+ echo -e "${DEFAULT}${BOLD}${BL}Using Default Settings on node $PVEHOST_NAME${CL}"
+ VERBOSE="no"
+ METHOD="default"
+ base_settings "$VERBOSE"
+ echo_default
+ defaults_target="$(ensure_global_default_vars_file)"
+ ;;
+ 2 | advanced | ADVANCED)
+ header_info
+
+ echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Install on node $PVEHOST_NAME${CL}"
+ echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}"
+ METHOD="advanced"
+ base_settings
+ advanced_settings
+ defaults_target="$(ensure_global_default_vars_file)"
+ run_maybe_offer="yes"
+ ;;
+ 3 | mydefaults | MYDEFAULTS)
+ default_var_settings || {
+ msg_error "Failed to apply default.vars"
+ exit 1
+ }
+ defaults_target="/usr/local/community-scripts/default.vars"
+ ;;
+ "$APPDEFAULTS_OPTION" | appdefaults | APPDEFAULTS)
+ if [ -f "$(get_app_defaults_path)" ]; then
+ header_info
+ echo -e "${DEFAULT}${BOLD}${BL}Using App Defaults for ${APP} on node $PVEHOST_NAME${CL}"
+ METHOD="appdefaults"
+ base_settings
+ _load_vars_file "$(get_app_defaults_path)"
+ echo_default
+ defaults_target="$(get_app_defaults_path)"
+ else
+ msg_error "No App Defaults available for ${APP}"
+ exit 1
+ fi
+ ;;
+ "$SETTINGS_OPTION" | settings | SETTINGS)
+ settings_menu
+ defaults_target=""
+ ;;
+ *)
+ echo -e "${CROSS}${RD}Invalid option: $CHOICE${CL}"
+ exit 1
+ ;;
+ esac
+
+ if [[ -n "$defaults_target" ]]; then
+ ensure_storage_selection_for_vars_file "$defaults_target"
+ fi
+
+ if [[ "$run_maybe_offer" == "yes" ]]; then
+ maybe_offer_save_app_defaults
+ fi
+}
+
+edit_default_storage() {
+ local vf="/usr/local/community-scripts/default.vars"
+
+ # Ensure file exists
+ if [[ ! -f "$vf" ]]; then
+ mkdir -p "$(dirname "$vf")"
+ touch "$vf"
+ fi
+
+ # Let ensure_storage_selection_for_vars_file handle everything
+ ensure_storage_selection_for_vars_file "$vf"
+}
+
+settings_menu() {
while true; do
-
- TMP_CHOICE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
- --title "SETTINGS" \
- --menu "Choose an option:" 20 60 7 \
- "1" "Default Settings" \
- "2" "Default Settings (with verbose)" \
- "3" "Advanced Settings" \
- "4" "Use Config File" \
- "5" "Manage Default Storage" \
- "6" "Diagnostic Settings" \
- "7" "Exit" \
- --default-item "1" 3>&1 1>&2 2>&3) || true
-
- if [ -z "$TMP_CHOICE" ]; then
- echo -e "\n${CROSS}${RD}Menu canceled. Exiting script.${CL}\n"
- exit 0
+ local settings_items=(
+ "1" "Manage API-Diagnostic Setting"
+ "2" "Edit Default.vars"
+ "3" "Edit Default Storage"
+ )
+ if [ -f "$(get_app_defaults_path)" ]; then
+ settings_items+=("4" "Edit App.vars for ${APP}")
+ settings_items+=("5" "Exit")
+ else
+ settings_items+=("4" "Exit")
fi
- CHOICE="$TMP_CHOICE"
+ local choice
+ choice=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
+ --title "Community-Scripts SETTINGS Menu" \
+ --ok-button "OK" --cancel-button "Back" \
+ --menu "\n\nChoose a settings option:\n\nUse TAB or Arrow keys to navigate, ENTER to select." 20 60 9 \
+ "${settings_items[@]}" \
+ 3>&1 1>&2 2>&3) || break
- case $CHOICE in
- 1)
- header_info
- echo -e "${DEFAULT}${BOLD}${BL}Using Default Settings on node $PVEHOST_NAME${CL}"
- VERBOSE="no"
- METHOD="default"
- base_settings "$VERBOSE"
- echo_default
- break
- ;;
- 2)
- header_info
- echo -e "${DEFAULT}${BOLD}${BL}Using Default Settings on node $PVEHOST_NAME (${VERBOSE_CROPPED}Verbose)${CL}"
- VERBOSE="yes"
- METHOD="default"
- base_settings "$VERBOSE"
- echo_default
- break
- ;;
- 3)
- header_info
- echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Settings on node $PVEHOST_NAME${CL}"
- METHOD="advanced"
- base_settings
- advanced_settings
- break
- ;;
+ case "$choice" in
+ 1) diagnostics_menu ;;
+ 2) ${EDITOR:-nano} /usr/local/community-scripts/default.vars ;;
+ 3) edit_default_storage ;;
4)
- header_info
- echo -e "${INFO}${HOLD} ${GN}Using Config File on node $PVEHOST_NAME${CL}"
- METHOD="advanced"
- source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/config-file.func)
- config_file
- break
- ;;
-
- 5)
- manage_default_storage
- ;;
- 6)
- if [[ $DIAGNOSTICS == "yes" ]]; then
- if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "DIAGNOSTICS SETTINGS" --yesno "Send Diagnostics of LXC Installation?\n\nCurrent setting: ${DIAGNOSTICS}" 10 58 \
- --yes-button "No" --no-button "Back"; then
- DIAGNOSTICS="no"
- sed -i 's/^DIAGNOSTICS=.*/DIAGNOSTICS=no/' /usr/local/community-scripts/diagnostics
- whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "DIAGNOSTICS SETTINGS" --msgbox "Diagnostics settings changed to ${DIAGNOSTICS}." 8 58
- fi
+ if [ -f "$(get_app_defaults_path)" ]; then
+ ${EDITOR:-nano} "$(get_app_defaults_path)"
else
- if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "DIAGNOSTICS SETTINGS" --yesno "Send Diagnostics of LXC Installation?\n\nCurrent setting: ${DIAGNOSTICS}" 10 58 \
- --yes-button "Yes" --no-button "Back"; then
- DIAGNOSTICS="yes"
- sed -i 's/^DIAGNOSTICS=.*/DIAGNOSTICS=yes/' /usr/local/community-scripts/diagnostics
- whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "DIAGNOSTICS SETTINGS" --msgbox "Diagnostics settings changed to ${DIAGNOSTICS}." 8 58
- fi
+ exit_script
fi
;;
- 7)
- echo -e "\n${CROSS}${RD}Script terminated. Have a great day!${CL}\n"
- exit 0
- ;;
- *)
- echo -e "${CROSS}${RD}Invalid option, please try again.${CL}"
- ;;
+ 5) exit_script ;;
esac
done
}
+# ===== Unified storage selection & writing to vars files =====
+_write_storage_to_vars() {
+ # $1 = vars_file, $2 = key (var_container_storage / var_template_storage), $3 = value
+ local vf="$1" key="$2" val="$3"
+ # remove uncommented and commented versions to avoid duplicates
+ sed -i "/^[#[:space:]]*${key}=/d" "$vf"
+ echo "${key}=${val}" >>"$vf"
+}
+
+choose_and_set_storage_for_file() {
+ # $1 = vars_file, $2 = class ('container'|'template')
+ local vf="$1" class="$2" key="" current=""
+ case "$class" in
+ container) key="var_container_storage" ;;
+ template) key="var_template_storage" ;;
+ *)
+ msg_error "Unknown storage class: $class"
+ return 1
+ ;;
+ esac
+
+ current=$(awk -F= -v k="^${key}=" '$0 ~ k {print $2; exit}' "$vf")
+
+ # If only one storage exists for the content type, auto-pick. Else always ask (your wish #4).
+ local content="rootdir"
+ [[ "$class" == "template" ]] && content="vztmpl"
+ local count
+ count=$(pvesm status -content "$content" | awk 'NR>1{print $1}' | wc -l)
+
+ if [[ "$count" -eq 1 ]]; then
+ STORAGE_RESULT=$(pvesm status -content "$content" | awk 'NR>1{print $1; exit}')
+ STORAGE_INFO=""
+ else
+ # If the current value is preselectable, we could show it, but per your requirement we always offer selection
+ select_storage "$class" || return 1
+ fi
+
+ _write_storage_to_vars "$vf" "$key" "$STORAGE_RESULT"
+
+ # Keep environment in sync for later steps (e.g. app-default save)
+ if [[ "$class" == "container" ]]; then
+ export var_container_storage="$STORAGE_RESULT"
+ export CONTAINER_STORAGE="$STORAGE_RESULT"
+ else
+ export var_template_storage="$STORAGE_RESULT"
+ export TEMPLATE_STORAGE="$STORAGE_RESULT"
+ fi
+
+ # Silent operation - no output message
+}
+
+# ------------------------------------------------------------------------------
+# check_container_resources()
+#
+# - Compares host RAM/CPU with required values
+# - Warns if under-provisioned and asks user to continue or abort
+# ------------------------------------------------------------------------------
check_container_resources() {
- # Check actual RAM & Cores
current_ram=$(free -m | awk 'NR==2{print $2}')
current_cpu=$(nproc)
- # Check whether the current RAM is less than the required RAM or the CPU cores are less than required
if [[ "$current_ram" -lt "$var_ram" ]] || [[ "$current_cpu" -lt "$var_cpu" ]]; then
echo -e "\n${INFO}${HOLD} ${GN}Required: ${var_cpu} CPU, ${var_ram}MB RAM ${CL}| ${RD}Current: ${current_cpu} CPU, ${current_ram}MB RAM${CL}"
echo -e "${YWB}Please ensure that the ${APP} LXC is configured with at least ${var_cpu} vCPU and ${var_ram} MB RAM for the build process.${CL}\n"
echo -ne "${INFO}${HOLD} May cause data loss! ${INFO} Continue update with under-provisioned LXC? "
read -r prompt
- # Check if the input is 'yes', otherwise exit with status 1
if [[ ! ${prompt,,} =~ ^(yes)$ ]]; then
echo -e "${CROSS}${HOLD} ${YWB}Exiting based on user input.${CL}"
exit 1
@@ -1084,17 +1825,20 @@ check_container_resources() {
fi
}
+# ------------------------------------------------------------------------------
+# check_container_storage()
+#
+# - Checks /boot partition usage
+# - Warns if usage >80% and asks user confirmation before proceeding
+# ------------------------------------------------------------------------------
check_container_storage() {
- # Check if the /boot partition is more than 80% full
total_size=$(df /boot --output=size | tail -n 1)
local used_size=$(df /boot --output=used | tail -n 1)
usage=$((100 * used_size / total_size))
if ((usage > 80)); then
- # Prompt the user for confirmation to continue
echo -e "${INFO}${HOLD} ${YWB}Warning: Storage is dangerously low (${usage}%).${CL}"
echo -ne "Continue anyway? "
read -r prompt
- # Check if the input is 'y' or 'yes', otherwise exit with status 1
if [[ ! ${prompt,,} =~ ^(y|yes)$ ]]; then
echo -e "${CROSS}${HOLD}${YWB}Exiting based on user input.${CL}"
exit 1
@@ -1102,10 +1846,196 @@ check_container_storage() {
fi
}
+# ------------------------------------------------------------------------------
+# ssh_extract_keys_from_file()
+#
+# - Extracts valid SSH public keys from given file
+# - Supports RSA, Ed25519, ECDSA and filters out comments/invalid lines
+# ------------------------------------------------------------------------------
+ssh_extract_keys_from_file() {
+ local f="$1"
+ [[ -r "$f" ]] || return 0
+ tr -d '\r' <"$f" | awk '
+ /^[[:space:]]*#/ {next}
+ /^[[:space:]]*$/ {next}
+ # nackt: typ base64 [comment]
+ /^(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))[[:space:]]+/ {print; next}
+ # mit Optionen: finde ab erstem Key-Typ
+ {
+ match($0, /(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))[[:space:]]+/)
+ if (RSTART>0) { print substr($0, RSTART) }
+ }
+ '
+}
+
+# ------------------------------------------------------------------------------
+# ssh_build_choices_from_files()
+#
+# - Builds interactive whiptail checklist of available SSH keys
+# - Generates fingerprint, type and comment for each key
+# ------------------------------------------------------------------------------
+ssh_build_choices_from_files() {
+ local -a files=("$@")
+ CHOICES=()
+ COUNT=0
+ MAPFILE="$(mktemp)"
+ local id key typ fp cmt base ln=0
+
+ for f in "${files[@]}"; do
+ [[ -f "$f" && -r "$f" ]] || continue
+ base="$(basename -- "$f")"
+ case "$base" in
+ known_hosts | known_hosts.* | config) continue ;;
+ id_*) [[ "$f" != *.pub ]] && continue ;;
+ esac
+
+ # map every key in file
+ while IFS= read -r key; do
+ [[ -n "$key" ]] || continue
+
+ typ=""
+ fp=""
+ cmt=""
+ # Only the pure key part (without options) is already included in ‘key’.
+ read -r _typ _b64 _cmt <<<"$key"
+ typ="${_typ:-key}"
+ cmt="${_cmt:-}"
+ # Fingerprint via ssh-keygen (if available)
+ if command -v ssh-keygen >/dev/null 2>&1; then
+ fp="$(printf '%s\n' "$key" | ssh-keygen -lf - 2>/dev/null | awk '{print $2}')"
+ fi
+ # Label shorten
+ [[ ${#cmt} -gt 40 ]] && cmt="${cmt:0:37}..."
+
+ ln=$((ln + 1))
+ COUNT=$((COUNT + 1))
+ id="K${COUNT}"
+ echo "${id}|${key}" >>"$MAPFILE"
+ CHOICES+=("$id" "[$typ] ${fp:+$fp }${cmt:+$cmt }— ${base}" "OFF")
+ done < <(ssh_extract_keys_from_file "$f")
+ done
+}
+
+# ------------------------------------------------------------------------------
+# ssh_discover_default_files()
+#
+# - Scans standard paths for SSH keys
+# - Includes ~/.ssh/*.pub, /etc/ssh/authorized_keys, etc.
+# ------------------------------------------------------------------------------
+ssh_discover_default_files() {
+ local -a cand=()
+ shopt -s nullglob
+ cand+=(/root/.ssh/authorized_keys /root/.ssh/authorized_keys2)
+ cand+=(/root/.ssh/*.pub)
+ cand+=(/etc/ssh/authorized_keys /etc/ssh/authorized_keys.d/*)
+ shopt -u nullglob
+ printf '%s\0' "${cand[@]}"
+}
+
+configure_ssh_settings() {
+ SSH_KEYS_FILE="$(mktemp)"
+ : >"$SSH_KEYS_FILE"
+
+ IFS=$'\0' read -r -d '' -a _def_files < <(ssh_discover_default_files && printf '\0')
+ ssh_build_choices_from_files "${_def_files[@]}"
+ local default_key_count="$COUNT"
+
+ local ssh_key_mode
+ if [[ "$default_key_count" -gt 0 ]]; then
+ ssh_key_mode=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SSH KEY SOURCE" --menu \
+ "Provision SSH keys for root:" 14 72 4 \
+ "found" "Select from detected keys (${default_key_count})" \
+ "manual" "Paste a single public key" \
+ "folder" "Scan another folder (path or glob)" \
+ "none" "No keys" 3>&1 1>&2 2>&3) || exit_script
+ else
+ ssh_key_mode=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SSH KEY SOURCE" --menu \
+ "No host keys detected; choose manual/none:" 12 72 2 \
+ "manual" "Paste a single public key" \
+ "none" "No keys" 3>&1 1>&2 2>&3) || exit_script
+ fi
+
+ case "$ssh_key_mode" in
+ found)
+ local selection
+ selection=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SELECT HOST KEYS" \
+ --checklist "Select one or more keys to import:" 20 140 10 "${CHOICES[@]}" 3>&1 1>&2 2>&3) || exit_script
+ for tag in $selection; do
+ tag="${tag%\"}"
+ tag="${tag#\"}"
+ local line
+ line=$(grep -E "^${tag}\|" "$MAPFILE" | head -n1 | cut -d'|' -f2-)
+ [[ -n "$line" ]] && printf '%s\n' "$line" >>"$SSH_KEYS_FILE"
+ done
+ ;;
+ manual)
+ SSH_AUTHORIZED_KEY="$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --inputbox "Paste one SSH public key line (ssh-ed25519/ssh-rsa/...)" 10 72 --title "SSH Public Key" 3>&1 1>&2 2>&3)"
+ [[ -n "$SSH_AUTHORIZED_KEY" ]] && printf '%s\n' "$SSH_AUTHORIZED_KEY" >>"$SSH_KEYS_FILE"
+ ;;
+ folder)
+ local glob_path
+ glob_path=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --inputbox "Enter a folder or glob to scan (e.g. /root/.ssh/*.pub)" 10 72 --title "Scan Folder/Glob" 3>&1 1>&2 2>&3)
+ if [[ -n "$glob_path" ]]; then
+ shopt -s nullglob
+ read -r -a _scan_files <<<"$glob_path"
+ shopt -u nullglob
+ if [[ "${#_scan_files[@]}" -gt 0 ]]; then
+ ssh_build_choices_from_files "${_scan_files[@]}"
+ if [[ "$COUNT" -gt 0 ]]; then
+ local folder_selection
+ folder_selection=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SELECT FOLDER KEYS" \
+ --checklist "Select key(s) to import:" 20 78 10 "${CHOICES[@]}" 3>&1 1>&2 2>&3) || exit_script
+ for tag in $folder_selection; do
+ tag="${tag%\"}"
+ tag="${tag#\"}"
+ local line
+ line=$(grep -E "^${tag}\|" "$MAPFILE" | head -n1 | cut -d'|' -f2-)
+ [[ -n "$line" ]] && printf '%s\n' "$line" >>"$SSH_KEYS_FILE"
+ done
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox "No keys found in: $glob_path" 8 60
+ fi
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox "Path/glob returned no files." 8 60
+ fi
+ fi
+ ;;
+ none)
+ :
+ ;;
+ esac
+
+ if [[ -s "$SSH_KEYS_FILE" ]]; then
+ sort -u -o "$SSH_KEYS_FILE" "$SSH_KEYS_FILE"
+ printf '\n' >>"$SSH_KEYS_FILE"
+ fi
+
+ if [[ -s "$SSH_KEYS_FILE" || "$PW" == -password* ]]; then
+ if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "SSH ACCESS" --yesno "Enable root SSH access?" 10 58); then
+ SSH="yes"
+ else
+ SSH="no"
+ fi
+ else
+ SSH="no"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# start()
+#
+# - Entry point of script
+# - On Proxmox host: calls install_script
+# - In silent mode: runs update_script
+# - Otherwise: shows update/setting menu
+# ------------------------------------------------------------------------------
start() {
source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/tools.func)
if command -v pveversion >/dev/null 2>&1; then
- install_script
+ install_script || return 0
+ return 0
elif [ ! -z ${PHS_SILENT+x} ] && [[ "${PHS_SILENT}" == "1" ]]; then
VERBOSE="no"
set_std_mode
@@ -1137,11 +2067,55 @@ start() {
fi
}
-# This function collects user settings and integrates all the collected information.
+# ------------------------------------------------------------------------------
+# build_container()
+#
+# - Creates and configures the LXC container
+# - Builds network string and applies features (FUSE, TUN, VAAPI passthrough)
+# - Starts container and waits for network connectivity
+# - Installs base packages, SSH keys, and runs -install.sh
+# ------------------------------------------------------------------------------
build_container() {
# if [ "$VERBOSE" == "yes" ]; then set -x; fi
- NET_STRING="-net0 name=eth0,bridge=$BRG$MAC,ip=$NET$GATE$VLAN$MTU"
+ NET_STRING="-net0 name=eth0,bridge=${BRG:-vmbr0}"
+
+ # MAC
+ if [[ -n "$MAC" ]]; then
+ case "$MAC" in
+ ,hwaddr=*) NET_STRING+="$MAC" ;;
+ *) NET_STRING+=",hwaddr=$MAC" ;;
+ esac
+ fi
+
+ # IP (immer zwingend, Standard dhcp)
+ NET_STRING+=",ip=${NET:-dhcp}"
+
+ # Gateway
+ if [[ -n "$GATE" ]]; then
+ case "$GATE" in
+ ,gw=*) NET_STRING+="$GATE" ;;
+ *) NET_STRING+=",gw=$GATE" ;;
+ esac
+ fi
+
+ # VLAN
+ if [[ -n "$VLAN" ]]; then
+ case "$VLAN" in
+ ,tag=*) NET_STRING+="$VLAN" ;;
+ *) NET_STRING+=",tag=$VLAN" ;;
+ esac
+ fi
+
+ # MTU
+ if [[ -n "$MTU" ]]; then
+ case "$MTU" in
+ ,mtu=*) NET_STRING+="$MTU" ;;
+ *) NET_STRING+=",mtu=$MTU" ;;
+ esac
+ fi
+
+ # IPv6 Handling
case "$IPV6_METHOD" in
auto) NET_STRING="$NET_STRING,ip6=auto" ;;
dhcp) NET_STRING="$NET_STRING,ip6=dhcp" ;;
@@ -1162,10 +2136,6 @@ build_container() {
FEATURES="$FEATURES,fuse=1"
fi
- #if [[ $DIAGNOSTICS == "yes" ]]; then
- # post_to_api
- #fi
-
TEMP_DIR=$(mktemp -d)
pushd "$TEMP_DIR" >/dev/null
if [ "$var_os" == "alpine" ]; then
@@ -1175,10 +2145,10 @@ build_container() {
fi
export DIAGNOSTICS="$DIAGNOSTICS"
export RANDOM_UUID="$RANDOM_UUID"
+ export SESSION_ID="$SESSION_ID"
export CACHER="$APT_CACHER"
export CACHER_IP="$APT_CACHER_IP"
export tz="$timezone"
- #export DISABLEIPV6="$DISABLEIP6"
export APPLICATION="$APP"
export app="$NSAPP"
export PASSWORD="$PW"
@@ -1205,19 +2175,98 @@ build_container() {
-unprivileged $CT_TYPE
$PW
"
- bash -c "$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/create_lxc.sh)"
- RET=$?
- if [[ $RET -ne 0 ]]; then
- msg_error "rny: in line $LINENO: exit code $RET: while executing create_lxc.sh"
- exit $RET
- fi
+ export TEMPLATE_STORAGE="${var_template_storage:-}"
+ export CONTAINER_STORAGE="${var_container_storage:-}"
+ create_lxc_container || exit $?
LXC_CONFIG="/etc/pve/lxc/${CTID}.conf"
- # USB passthrough for privileged LXC (CT_TYPE=0)
- if [ "$CT_TYPE" == "0" ]; then
+ # ============================================================================
+ # GPU/USB PASSTHROUGH CONFIGURATION
+ # ============================================================================
+
+ # List of applications that benefit from GPU acceleration
+ GPU_APPS=(
+ "immich" "channels" "emby" "ersatztv" "frigate"
+ "jellyfin" "plex" "scrypted" "tdarr" "unmanic"
+ "ollama" "fileflows" "open-webui" "tunarr" "debian"
+ "handbrake" "sunshine" "moonlight" "kodi" "stremio"
+ "viseron"
+ )
+
+ # Check if app needs GPU
+ is_gpu_app() {
+ local app="${1,,}"
+ for gpu_app in "${GPU_APPS[@]}"; do
+ [[ "$app" == "${gpu_app,,}" ]] && return 0
+ done
+ return 1
+ }
+
+ # Detect all available GPU devices
+ detect_gpu_devices() {
+ INTEL_DEVICES=()
+ AMD_DEVICES=()
+ NVIDIA_DEVICES=()
+
+ # Store PCI info to avoid multiple calls
+ local pci_vga_info=$(lspci -nn 2>/dev/null | grep -E "VGA|Display|3D")
+
+ # Check for Intel GPU - look for Intel vendor ID [8086]
+ if echo "$pci_vga_info" | grep -q "\[8086:"; then
+ msg_custom "🎮" "${BL}" "Detected Intel GPU"
+ if [[ -d /dev/dri ]]; then
+ for d in /dev/dri/renderD* /dev/dri/card*; do
+ [[ -e "$d" ]] && INTEL_DEVICES+=("$d")
+ done
+ fi
+ fi
+
+ # Check for AMD GPU - look for AMD vendor IDs [1002] (AMD/ATI) or [1022] (AMD)
+ if echo "$pci_vga_info" | grep -qE "\[1002:|\[1022:"; then
+ msg_custom "🎮" "${RD}" "Detected AMD GPU"
+ if [[ -d /dev/dri ]]; then
+ # Only add if not already claimed by Intel
+ if [[ ${#INTEL_DEVICES[@]} -eq 0 ]]; then
+ for d in /dev/dri/renderD* /dev/dri/card*; do
+ [[ -e "$d" ]] && AMD_DEVICES+=("$d")
+ done
+ fi
+ fi
+ fi
+
+ # Check for NVIDIA GPU - look for NVIDIA vendor ID [10de]
+ if echo "$pci_vga_info" | grep -q "\[10de:"; then
+ msg_custom "🎮" "${GN}" "Detected NVIDIA GPU"
+
+ # Simple passthrough - just bind /dev/nvidia* devices if they exist
+ for d in /dev/nvidia* /dev/nvidiactl /dev/nvidia-modeset /dev/nvidia-uvm /dev/nvidia-uvm-tools; do
+ [[ -e "$d" ]] && NVIDIA_DEVICES+=("$d")
+ done
+
+ if [[ ${#NVIDIA_DEVICES[@]} -gt 0 ]]; then
+ msg_custom "🎮" "${GN}" "Found ${#NVIDIA_DEVICES[@]} NVIDIA device(s) for passthrough"
+ else
+ msg_warn "NVIDIA GPU detected via PCI but no /dev/nvidia* devices found"
+ msg_custom "ℹ️" "${YW}" "Skipping NVIDIA passthrough (host drivers may not be loaded)"
+ fi
+ fi
+
+ # Debug output
+ msg_debug "Intel devices: ${INTEL_DEVICES[*]}"
+ msg_debug "AMD devices: ${AMD_DEVICES[*]}"
+ msg_debug "NVIDIA devices: ${NVIDIA_DEVICES[*]}"
+ }
+
+ # Configure USB passthrough for privileged containers
+ configure_usb_passthrough() {
+ if [[ "$CT_TYPE" != "0" ]]; then
+ return 0
+ fi
+
+ msg_info "Configuring automatic USB passthrough (privileged container)"
cat <>"$LXC_CONFIG"
-# USB passthrough
+# Automatic USB passthrough (privileged container)
lxc.cgroup2.devices.allow: a
lxc.cap.drop:
lxc.cgroup2.devices.allow: c 188:* rwm
@@ -1228,127 +2277,218 @@ lxc.mount.entry: /dev/ttyUSB1 dev/ttyUSB1 none bind,optional,create=
lxc.mount.entry: /dev/ttyACM0 dev/ttyACM0 none bind,optional,create=file
lxc.mount.entry: /dev/ttyACM1 dev/ttyACM1 none bind,optional,create=file
EOF
- fi
+ msg_ok "USB passthrough configured"
+ }
- # VAAPI passthrough for privileged containers or known apps
- VAAPI_APPS=(
- "immich"
- "Channels"
- "Emby"
- "ErsatzTV"
- "Frigate"
- "Jellyfin"
- "Plex"
- "Scrypted"
- "Tdarr"
- "Unmanic"
- "Ollama"
- "FileFlows"
- "Open WebUI"
- )
-
- is_vaapi_app=false
- for vaapi_app in "${VAAPI_APPS[@]}"; do
- if [[ "$APP" == "$vaapi_app" ]]; then
- is_vaapi_app=true
- break
+ # Configure GPU passthrough
+ configure_gpu_passthrough() {
+ # Skip if not a GPU app and not privileged
+ if [[ "$CT_TYPE" != "0" ]] && ! is_gpu_app "$APP"; then
+ return 0
fi
- done
- if ([ "$CT_TYPE" == "0" ] || [ "$is_vaapi_app" == "true" ]) &&
- ([[ -e /dev/dri/renderD128 ]] || [[ -e /dev/dri/card0 ]] || [[ -e /dev/fb0 ]]); then
+ detect_gpu_devices
- echo ""
- msg_custom "⚙️ " "\e[96m" "Configuring VAAPI passthrough for LXC container"
- if [ "$CT_TYPE" != "0" ]; then
- msg_custom "⚠️ " "\e[33m" "Container is unprivileged – VAAPI passthrough may not work without additional host configuration (e.g., idmap)."
+ # Count available GPU types
+ local gpu_count=0
+ local available_gpus=()
+
+ if [[ ${#INTEL_DEVICES[@]} -gt 0 ]]; then
+ available_gpus+=("INTEL")
+ gpu_count=$((gpu_count + 1))
fi
- msg_custom "ℹ️ " "\e[96m" "VAAPI enables GPU hardware acceleration (e.g., for video transcoding in Jellyfin or Plex)."
- echo ""
- read -rp "➤ Automatically mount all available VAAPI devices? [Y/n]: " VAAPI_ALL
- if [[ "$VAAPI_ALL" =~ ^[Yy]$|^$ ]]; then
- if [ "$CT_TYPE" == "0" ]; then
- # PRV Container → alles zulässig
- [[ -e /dev/dri/renderD128 ]] && {
- echo "lxc.cgroup2.devices.allow: c 226:128 rwm" >>"$LXC_CONFIG"
- echo "lxc.mount.entry: /dev/dri/renderD128 dev/dri/renderD128 none bind,optional,create=file" >>"$LXC_CONFIG"
- }
- [[ -e /dev/dri/card0 ]] && {
- echo "lxc.cgroup2.devices.allow: c 226:0 rwm" >>"$LXC_CONFIG"
- echo "lxc.mount.entry: /dev/dri/card0 dev/dri/card0 none bind,optional,create=file" >>"$LXC_CONFIG"
- }
- [[ -e /dev/fb0 ]] && {
- echo "lxc.cgroup2.devices.allow: c 29:0 rwm" >>"$LXC_CONFIG"
- echo "lxc.mount.entry: /dev/fb0 dev/fb0 none bind,optional,create=file" >>"$LXC_CONFIG"
- }
- [[ -d /dev/dri ]] && {
- echo "lxc.mount.entry: /dev/dri dev/dri none bind,optional,create=dir" >>"$LXC_CONFIG"
- }
- else
- # UNPRV Container → nur devX für UI
- [[ -e /dev/dri/card0 ]] && echo "dev0: /dev/dri/card0,gid=44" >>"$LXC_CONFIG"
- [[ -e /dev/dri/card1 ]] && echo "dev0: /dev/dri/card1,gid=44" >>"$LXC_CONFIG"
- [[ -e /dev/dri/renderD128 ]] && echo "dev1: /dev/dri/renderD128,gid=104" >>"$LXC_CONFIG"
+ if [[ ${#AMD_DEVICES[@]} -gt 0 ]]; then
+ available_gpus+=("AMD")
+ gpu_count=$((gpu_count + 1))
+ fi
+
+ if [[ ${#NVIDIA_DEVICES[@]} -gt 0 ]]; then
+ available_gpus+=("NVIDIA")
+ gpu_count=$((gpu_count + 1))
+ fi
+
+ if [[ $gpu_count -eq 0 ]]; then
+ msg_custom "ℹ️" "${YW}" "No GPU devices found for passthrough"
+ return 0
+ fi
+
+ local selected_gpu=""
+
+ if [[ $gpu_count -eq 1 ]]; then
+ # Automatic selection for single GPU
+ selected_gpu="${available_gpus[0]}"
+ msg_custom "⚙️" "${GN}" "Automatically configuring ${selected_gpu} GPU passthrough"
+ else
+ # Multiple GPUs - ask user
+ echo -e "\n${INFO} Multiple GPU types detected:"
+ for gpu in "${available_gpus[@]}"; do
+ echo " - $gpu"
+ done
+ read -rp "Which GPU type to passthrough? (${available_gpus[*]}): " selected_gpu
+ selected_gpu="${selected_gpu^^}"
+
+ # Validate selection
+ local valid=0
+ for gpu in "${available_gpus[@]}"; do
+ [[ "$selected_gpu" == "$gpu" ]] && valid=1
+ done
+
+ if [[ $valid -eq 0 ]]; then
+ msg_warn "Invalid selection. Skipping GPU passthrough."
+ return 0
fi
fi
- fi
- if [ "$CT_TYPE" == "1" ] && [ "$is_vaapi_app" == "true" ]; then
- if [[ -e /dev/dri/card0 ]]; then
- echo "dev0: /dev/dri/card0,gid=44" >>"$LXC_CONFIG"
- elif [[ -e /dev/dri/card1 ]]; then
- echo "dev0: /dev/dri/card1,gid=44" >>"$LXC_CONFIG"
- fi
- if [[ -e /dev/dri/renderD128 ]]; then
- echo "dev1: /dev/dri/renderD128,gid=104" >>"$LXC_CONFIG"
- fi
- fi
+ # Apply passthrough configuration based on selection
+ local dev_idx=0
- # TUN device passthrough
- if [ "$ENABLE_TUN" == "yes" ]; then
- cat <>"$LXC_CONFIG"
+ case "$selected_gpu" in
+ INTEL | AMD)
+ local devices=()
+ [[ "$selected_gpu" == "INTEL" ]] && devices=("${INTEL_DEVICES[@]}")
+ [[ "$selected_gpu" == "AMD" ]] && devices=("${AMD_DEVICES[@]}")
+
+ # Add lxc.mount.entry for each device
+ for dev in "${devices[@]}"; do
+ echo "lxc.mount.entry: $dev $dev none bind,optional,create=file" >>"$LXC_CONFIG"
+
+ if [[ "$CT_TYPE" == "0" ]]; then
+ # Privileged container - also add cgroup allows
+ local major minor
+ major=$(stat -c '%t' "$dev" 2>/dev/null || echo "0")
+ minor=$(stat -c '%T' "$dev" 2>/dev/null || echo "0")
+
+ if [[ "$major" != "0" && "$minor" != "0" ]]; then
+ echo "lxc.cgroup2.devices.allow: c $((0x$major)):$((0x$minor)) rwm" >>"$LXC_CONFIG"
+ fi
+ fi
+ done
+
+ export GPU_TYPE="$selected_gpu"
+ msg_ok "${selected_gpu} GPU passthrough configured (${#devices[@]} devices)"
+ ;;
+
+ NVIDIA)
+ if [[ ${#NVIDIA_DEVICES[@]} -eq 0 ]]; then
+ msg_warn "No NVIDIA devices available for passthrough"
+ return 0
+ fi
+
+ # Add lxc.mount.entry for each NVIDIA device
+ for dev in "${NVIDIA_DEVICES[@]}"; do
+ echo "lxc.mount.entry: $dev $dev none bind,optional,create=file" >>"$LXC_CONFIG"
+
+ if [[ "$CT_TYPE" == "0" ]]; then
+ # Privileged container - also add cgroup allows
+ local major minor
+ major=$(stat -c '%t' "$dev" 2>/dev/null || echo "0")
+ minor=$(stat -c '%T' "$dev" 2>/dev/null || echo "0")
+
+ if [[ "$major" != "0" && "$minor" != "0" ]]; then
+ echo "lxc.cgroup2.devices.allow: c $((0x$major)):$((0x$minor)) rwm" >>"$LXC_CONFIG"
+ fi
+ fi
+ done
+
+ export GPU_TYPE="NVIDIA"
+ msg_ok "NVIDIA GPU passthrough configured (${#NVIDIA_DEVICES[@]} devices) - install drivers in container if needed"
+ ;;
+ esac
+ }
+
+ # Additional device passthrough
+ configure_additional_devices() {
+ # TUN device passthrough
+ if [ "$ENABLE_TUN" == "yes" ]; then
+ cat <>"$LXC_CONFIG"
lxc.cgroup2.devices.allow: c 10:200 rwm
lxc.mount.entry: /dev/net/tun dev/net/tun none bind,create=file
EOF
- fi
+ fi
+
+ # Coral TPU passthrough
+ if [[ -e /dev/apex_0 ]]; then
+ msg_custom "🔌" "${BL}" "Detected Coral TPU - configuring passthrough"
+ echo "lxc.mount.entry: /dev/apex_0 dev/apex_0 none bind,optional,create=file" >>"$LXC_CONFIG"
+ fi
+ }
+
+ # Execute pre-start configurations
+ configure_usb_passthrough
+ configure_gpu_passthrough
+ configure_additional_devices
+
+ # ============================================================================
+ # START CONTAINER AND INSTALL USERLAND
+ # ============================================================================
- # This starts the container and executes -install.sh
msg_info "Starting LXC Container"
pct start "$CTID"
- msg_ok "Started LXC Container"
- msg_info "Waiting for network in LXC container"
+ # Wait for container to be running
for i in {1..10}; do
- if pct exec "$CTID" -- ping -c1 -W1 deb.debian.org >/dev/null 2>&1; then
- msg_ok "Network in LXC is reachable"
+ if pct status "$CTID" | grep -q "status: running"; then
+ msg_ok "Started LXC Container"
break
- else
- msg_warn "No network yet in LXC (try $i/10) – waiting..."
- sleep 3
fi
- if [ $i -eq 10 ]; then
- msg_error "No network in LXC after waiting. Setting fallback DNS..."
- pct set "$CTID" --nameserver 1.1.1.1
- pct set "$CTID" --nameserver 8.8.8.8
- if ! pct exec "$CTID" -- ping -c1 -W1 deb.debian.org >/dev/null 2>&1; then
- msg_error "Still no network/DNS in LXC! Aborting customization."
- exit 1
- fi
+ sleep 1
+ if [ "$i" -eq 10 ]; then
+ msg_error "LXC Container did not reach running state"
+ exit 1
fi
done
+ # Wait for network (skip for Alpine initially)
+ if [ "$var_os" != "alpine" ]; then
+ msg_info "Waiting for network in LXC container"
+
+ # Wait for IP
+ for i in {1..20}; do
+ ip_in_lxc=$(pct exec "$CTID" -- ip -4 addr show dev eth0 | awk '/inet / {print $2}' | cut -d/ -f1)
+ [ -n "$ip_in_lxc" ] && break
+ sleep 1
+ done
+
+ if [ -z "$ip_in_lxc" ]; then
+ msg_error "No IP assigned to CT $CTID after 20s"
+ exit 1
+ fi
+
+ # Simple connectivity check - just verify IP is assigned
+ msg_ok "Network configured (IP: $ip_in_lxc)"
+ fi
+ # Function to get correct GID inside container
+ get_container_gid() {
+ local group="$1"
+ local gid=$(pct exec "$CTID" -- getent group "$group" 2>/dev/null | cut -d: -f3)
+ echo "${gid:-44}" # Default to 44 if not found
+ }
+
+ fix_gpu_gids
+
+ # Continue with standard container setup
msg_info "Customizing LXC Container"
+
+ # # Install GPU userland if configured
+ # if [[ "${ENABLE_VAAPI:-0}" == "1" ]]; then
+ # install_gpu_userland "VAAPI"
+ # fi
+
+ # if [[ "${ENABLE_NVIDIA:-0}" == "1" ]]; then
+ # install_gpu_userland "NVIDIA"
+ # fi
+
+ # Continue with standard container setup
if [ "$var_os" == "alpine" ]; then
sleep 3
pct exec "$CTID" -- /bin/sh -c 'cat </etc/apk/repositories
http://dl-cdn.alpinelinux.org/alpine/latest-stable/main
http://dl-cdn.alpinelinux.org/alpine/latest-stable/community
EOF'
- pct exec "$CTID" -- ash -c "apk add bash newt curl openssh nano mc ncurses >/dev/null"
+ pct exec "$CTID" -- ash -c "apk add bash newt curl openssh nano mc ncurses jq >/dev/null"
else
sleep 3
-
pct exec "$CTID" -- bash -c "sed -i '/$LANG/ s/^# //' /etc/locale.gen"
pct exec "$CTID" -- bash -c "locale_line=\$(grep -v '^#' /etc/locale.gen | grep -E '^[a-zA-Z]' | awk '{print \$1}' | head -n 1) && \
echo LANG=\$locale_line >/etc/default/locale && \
@@ -1365,17 +2505,910 @@ EOF'
msg_warn "Skipping timezone setup – zone '$tz' not found in container"
fi
- pct exec "$CTID" -- bash -c "apt-get update && apt-get install -y sudo curl mc gnupg2" || {
+ pct exec "$CTID" -- bash -c "apt-get update >/dev/null && apt-get install -y sudo curl mc gnupg2 jq >/dev/null" || {
msg_error "apt-get base packages installation failed"
exit 1
}
fi
+
msg_ok "Customized LXC Container"
- lxc-attach -n "$CTID" -- bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/install/"$var_install".sh)" $?
+ # Install SSH keys
+ install_ssh_keys_into_ct
+
+ # Run application installer
+ if ! lxc-attach -n "$CTID" -- bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/install/${var_install}.sh)"; then
+ local exit_code=$?
+ # Try to copy installation log from container before exiting
+ if [[ -n "$CTID" && -n "${SESSION_ID:-}" ]]; then
+ pct pull "$CTID" "/root/.install-${SESSION_ID}.log" "/tmp/install-${SESSION_ID}.log" 2>/dev/null || true
+ fi
+ exit $exit_code
+ fi
}
-# This function sets the description of the container.
+destroy_lxc() {
+ if [[ -z "$CT_ID" ]]; then
+ msg_error "No CT_ID found. Nothing to remove."
+ return 1
+ fi
+
+ # Abbruch bei Ctrl-C / Ctrl-D / ESC
+ trap 'echo; msg_error "Aborted by user (SIGINT/SIGQUIT)"; return 130' INT QUIT
+
+ local prompt
+ if ! read -rp "Remove this Container? " prompt; then
+ # read gibt != 0 zurück bei Ctrl-D/ESC
+ msg_error "Aborted input (Ctrl-D/ESC)"
+ return 130
+ fi
+
+ case "${prompt,,}" in
+ y | yes)
+ if pct stop "$CT_ID" &>/dev/null && pct destroy "$CT_ID" &>/dev/null; then
+ msg_ok "Removed Container $CT_ID"
+ else
+ msg_error "Failed to remove Container $CT_ID"
+ return 1
+ fi
+ ;;
+ "" | n | no)
+ msg_custom "ℹ️" "${BL}" "Container was not removed."
+ ;;
+ *)
+ msg_warn "Invalid response. Container was not removed."
+ ;;
+ esac
+}
+
+# ------------------------------------------------------------------------------
+# Storage discovery / selection helpers
+# ------------------------------------------------------------------------------
+# ===== Storage discovery / selection helpers (ported from create_lxc.sh) =====
+resolve_storage_preselect() {
+ local class="$1" preselect="$2" required_content=""
+ case "$class" in
+ template) required_content="vztmpl" ;;
+ container) required_content="rootdir" ;;
+ *) return 1 ;;
+ esac
+ [[ -z "$preselect" ]] && return 1
+ if ! pvesm status -content "$required_content" | awk 'NR>1{print $1}' | grep -qx -- "$preselect"; then
+ msg_warn "Preselected storage '${preselect}' does not support content '${required_content}' (or not found)"
+ return 1
+ fi
+
+ local line total used free
+ line="$(pvesm status | awk -v s="$preselect" 'NR>1 && $1==s {print $0}')"
+ if [[ -z "$line" ]]; then
+ STORAGE_INFO="n/a"
+ else
+ total="$(awk '{print $4}' <<<"$line")"
+ used="$(awk '{print $5}' <<<"$line")"
+ free="$(awk '{print $6}' <<<"$line")"
+ local total_h used_h free_h
+ if command -v numfmt >/dev/null 2>&1; then
+ total_h="$(numfmt --to=iec --suffix=B --format %.1f "$total" 2>/dev/null || echo "$total")"
+ used_h="$(numfmt --to=iec --suffix=B --format %.1f "$used" 2>/dev/null || echo "$used")"
+ free_h="$(numfmt --to=iec --suffix=B --format %.1f "$free" 2>/dev/null || echo "$free")"
+ STORAGE_INFO="Free: ${free_h} Used: ${used_h}"
+ else
+ STORAGE_INFO="Free: ${free} Used: ${used}"
+ fi
+ fi
+ STORAGE_RESULT="$preselect"
+ return 0
+}
+
+fix_gpu_gids() {
+ if [[ -z "${GPU_TYPE:-}" ]]; then
+ return 0
+ fi
+
+ # Silent operation to avoid spinner conflicts
+ msg_custom "🔧" "${BL}" "Detecting and setting correct GPU group IDs"
+
+ # Ermittle die tatsächlichen GIDs aus dem Container
+ local video_gid=$(pct exec "$CTID" -- sh -c "getent group video 2>/dev/null | cut -d: -f3")
+ local render_gid=$(pct exec "$CTID" -- sh -c "getent group render 2>/dev/null | cut -d: -f3")
+
+ # Fallbacks wenn Gruppen nicht existieren
+ if [[ -z "$video_gid" ]]; then
+ # Versuche die video Gruppe zu erstellen
+ pct exec "$CTID" -- sh -c "groupadd -r video 2>/dev/null || true"
+ video_gid=$(pct exec "$CTID" -- sh -c "getent group video 2>/dev/null | cut -d: -f3")
+ [[ -z "$video_gid" ]] && video_gid="44" # Ultimate fallback
+ fi
+
+ if [[ -z "$render_gid" ]]; then
+ # Versuche die render Gruppe zu erstellen
+ pct exec "$CTID" -- sh -c "groupadd -r render 2>/dev/null || true"
+ render_gid=$(pct exec "$CTID" -- sh -c "getent group render 2>/dev/null | cut -d: -f3")
+ [[ -z "$render_gid" ]] && render_gid="104" # Ultimate fallback
+ fi
+
+ msg_custom "ℹ️" "${DGN}" "Container GIDs detected - video:${video_gid}, render:${render_gid}"
+
+ # Prüfe ob die GIDs von den Defaults abweichen
+ local need_update=0
+ if [[ "$video_gid" != "44" ]] || [[ "$render_gid" != "104" ]]; then
+ need_update=1
+ fi
+
+ if [[ $need_update -eq 1 ]]; then
+ msg_custom "🔄" "${YW}" "Updating device GIDs in container config"
+
+ # Stoppe Container für Config-Update
+ pct stop "$CTID" >/dev/null 2>&1
+
+ # Update die dev Einträge mit korrekten GIDs
+ # Backup der Config
+ cp "$LXC_CONFIG" "${LXC_CONFIG}.bak"
+
+ # Parse und update jeden dev Eintrag
+ while IFS= read -r line; do
+ if [[ "$line" =~ ^dev[0-9]+: ]]; then
+ # Extract device path
+ local device_path=$(echo "$line" | sed -E 's/^dev[0-9]+: ([^,]+).*/\1/')
+ local dev_num=$(echo "$line" | sed -E 's/^(dev[0-9]+):.*/\1/')
+
+ if [[ "$device_path" =~ renderD ]]; then
+ # RenderD device - use render GID
+ echo "${dev_num}: ${device_path},gid=${render_gid}"
+ elif [[ "$device_path" =~ card ]]; then
+ # Card device - use video GID
+ echo "${dev_num}: ${device_path},gid=${video_gid}"
+ else
+ # Keep original line
+ echo "$line"
+ fi
+ else
+ # Keep non-dev lines
+ echo "$line"
+ fi
+ done <"$LXC_CONFIG" >"${LXC_CONFIG}.new"
+
+ mv "${LXC_CONFIG}.new" "$LXC_CONFIG"
+
+ # Starte Container wieder
+ pct start "$CTID" >/dev/null 2>&1
+ sleep 3
+
+ msg_ok "Device GIDs updated successfully"
+ else
+ msg_ok "Device GIDs are already correct"
+ fi
+ if [[ "$CT_TYPE" == "0" ]]; then
+ pct exec "$CTID" -- bash -c "
+ if [ -d /dev/dri ]; then
+ for dev in /dev/dri/*; do
+ if [ -e \"\$dev\" ]; then
+ if [[ \"\$dev\" =~ renderD ]]; then
+ chgrp ${render_gid} \"\$dev\" 2>/dev/null || true
+ else
+ chgrp ${video_gid} \"\$dev\" 2>/dev/null || true
+ fi
+ chmod 660 \"\$dev\" 2>/dev/null || true
+ fi
+ done
+ fi
+ " >/dev/null 2>&1
+ fi
+}
+
+check_storage_support() {
+ local CONTENT="$1" VALID=0
+ while IFS= read -r line; do
+ local STORAGE_NAME
+ STORAGE_NAME=$(awk '{print $1}' <<<"$line")
+ [[ -n "$STORAGE_NAME" ]] && VALID=1
+ done < <(pvesm status -content "$CONTENT" 2>/dev/null | awk 'NR>1')
+ [[ $VALID -eq 1 ]]
+}
+
+select_storage() {
+ local CLASS=$1 CONTENT CONTENT_LABEL
+ case $CLASS in
+ container)
+ CONTENT='rootdir'
+ CONTENT_LABEL='Container'
+ ;;
+ template)
+ CONTENT='vztmpl'
+ CONTENT_LABEL='Container template'
+ ;;
+ iso)
+ CONTENT='iso'
+ CONTENT_LABEL='ISO image'
+ ;;
+ images)
+ CONTENT='images'
+ CONTENT_LABEL='VM Disk image'
+ ;;
+ backup)
+ CONTENT='backup'
+ CONTENT_LABEL='Backup'
+ ;;
+ snippets)
+ CONTENT='snippets'
+ CONTENT_LABEL='Snippets'
+ ;;
+ *)
+ msg_error "Invalid storage class '$CLASS'"
+ return 1
+ ;;
+ esac
+
+ declare -A STORAGE_MAP
+ local -a MENU=()
+ local COL_WIDTH=0
+
+ while read -r TAG TYPE _ TOTAL USED FREE _; do
+ [[ -n "$TAG" && -n "$TYPE" ]] || continue
+ local DISPLAY="${TAG} (${TYPE})"
+ local USED_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$USED")
+ local FREE_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$FREE")
+ local INFO="Free: ${FREE_FMT}B Used: ${USED_FMT}B"
+ STORAGE_MAP["$DISPLAY"]="$TAG"
+ MENU+=("$DISPLAY" "$INFO" "OFF")
+ ((${#DISPLAY} > COL_WIDTH)) && COL_WIDTH=${#DISPLAY}
+ done < <(pvesm status -content "$CONTENT" | awk 'NR>1')
+
+ if [[ ${#MENU[@]} -eq 0 ]]; then
+ msg_error "No storage found for content type '$CONTENT'."
+ return 2
+ fi
+
+ if [[ $((${#MENU[@]} / 3)) -eq 1 ]]; then
+ STORAGE_RESULT="${STORAGE_MAP[${MENU[0]}]}"
+ STORAGE_INFO="${MENU[1]}"
+ return 0
+ fi
+
+ local WIDTH=$((COL_WIDTH + 42))
+ while true; do
+ local DISPLAY_SELECTED
+ DISPLAY_SELECTED=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "Storage Pools" \
+ --radiolist "Which storage pool for ${CONTENT_LABEL,,}?\n(Spacebar to select)" \
+ 16 "$WIDTH" 6 "${MENU[@]}" 3>&1 1>&2 2>&3) || { exit_script; }
+
+ DISPLAY_SELECTED=$(sed 's/[[:space:]]*$//' <<<"$DISPLAY_SELECTED")
+ if [[ -z "$DISPLAY_SELECTED" || -z "${STORAGE_MAP[$DISPLAY_SELECTED]+_}" ]]; then
+ whiptail --msgbox "No valid storage selected. Please try again." 8 58
+ continue
+ fi
+ STORAGE_RESULT="${STORAGE_MAP[$DISPLAY_SELECTED]}"
+ for ((i = 0; i < ${#MENU[@]}; i += 3)); do
+ if [[ "${MENU[$i]}" == "$DISPLAY_SELECTED" ]]; then
+ STORAGE_INFO="${MENU[$i + 1]}"
+ break
+ fi
+ done
+ return 0
+ done
+}
+
+create_lxc_container() {
+ # ------------------------------------------------------------------------------
+ # Optional verbose mode (debug tracing)
+ # ------------------------------------------------------------------------------
+ if [[ "${CREATE_LXC_VERBOSE:-no}" == "yes" ]]; then set -x; fi
+
+ # ------------------------------------------------------------------------------
+ # Helpers (dynamic versioning / template parsing)
+ # ------------------------------------------------------------------------------
+ pkg_ver() { dpkg-query -W -f='${Version}\n' "$1" 2>/dev/null || echo ""; }
+ pkg_cand() { apt-cache policy "$1" 2>/dev/null | awk '/Candidate:/ {print $2}'; }
+
+ ver_ge() { dpkg --compare-versions "$1" ge "$2"; }
+ ver_gt() { dpkg --compare-versions "$1" gt "$2"; }
+ ver_lt() { dpkg --compare-versions "$1" lt "$2"; }
+
+ # Extract Debian OS minor from template name: debian-13-standard_13.1-1_amd64.tar.zst => "13.1"
+ parse_template_osver() { sed -n 's/.*_\([0-9][0-9]*\(\.[0-9]\+\)\?\)-.*/\1/p' <<<"$1"; }
+
+ # Offer upgrade for pve-container/lxc-pve if candidate > installed; optional auto-retry pct create
+ # Returns:
+ # 0 = no upgrade needed
+ # 1 = upgraded (and if do_retry=yes and retry succeeded, creation done)
+ # 2 = user declined
+ # 3 = upgrade attempted but failed OR retry failed
+ offer_lxc_stack_upgrade_and_maybe_retry() {
+ local do_retry="${1:-no}" # yes|no
+ local _pvec_i _pvec_c _lxcp_i _lxcp_c need=0
+
+ _pvec_i="$(pkg_ver pve-container)"
+ _lxcp_i="$(pkg_ver lxc-pve)"
+ _pvec_c="$(pkg_cand pve-container)"
+ _lxcp_c="$(pkg_cand lxc-pve)"
+
+ if [[ -n "$_pvec_c" && "$_pvec_c" != "none" ]]; then
+ ver_gt "$_pvec_c" "${_pvec_i:-0}" && need=1
+ fi
+ if [[ -n "$_lxcp_c" && "$_lxcp_c" != "none" ]]; then
+ ver_gt "$_lxcp_c" "${_lxcp_i:-0}" && need=1
+ fi
+ if [[ $need -eq 0 ]]; then
+ msg_debug "No newer candidate for pve-container/lxc-pve (installed=$_pvec_i/$_lxcp_i, cand=$_pvec_c/$_lxcp_c)"
+ return 0
+ fi
+
+ echo
+ echo "An update for the Proxmox LXC stack is available:"
+ echo " pve-container: installed=${_pvec_i:-n/a} candidate=${_pvec_c:-n/a}"
+ echo " lxc-pve : installed=${_lxcp_i:-n/a} candidate=${_lxcp_c:-n/a}"
+ echo
+ read -rp "Do you want to upgrade now? [y/N] " _ans
+ case "${_ans,,}" in
+ y | yes)
+ msg_info "Upgrading Proxmox LXC stack (pve-container, lxc-pve)"
+ if apt-get update -qq >/dev/null && apt-get install -y --only-upgrade pve-container lxc-pve >/dev/null; then
+ msg_ok "LXC stack upgraded."
+ if [[ "$do_retry" == "yes" ]]; then
+ msg_info "Retrying container creation after upgrade"
+ if pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >>"$LOGFILE" 2>&1; then
+ msg_ok "Container created successfully after upgrade."
+ return 0
+ else
+ msg_error "pct create still failed after upgrade. See $LOGFILE"
+ return 3
+ fi
+ fi
+ return 1
+ else
+ msg_error "Upgrade failed. Please check APT output."
+ return 3
+ fi
+ ;;
+ *) return 2 ;;
+ esac
+ }
+
+ # ------------------------------------------------------------------------------
+ # Required input variables
+ # ------------------------------------------------------------------------------
+ [[ "${CTID:-}" ]] || {
+ msg_error "You need to set 'CTID' variable."
+ exit 203
+ }
+ [[ "${PCT_OSTYPE:-}" ]] || {
+ msg_error "You need to set 'PCT_OSTYPE' variable."
+ exit 204
+ }
+
+ msg_debug "CTID=$CTID"
+ msg_debug "PCT_OSTYPE=$PCT_OSTYPE"
+ msg_debug "PCT_OSVERSION=${PCT_OSVERSION:-default}"
+
+ # ID checks
+ [[ "$CTID" -ge 100 ]] || {
+ msg_error "ID cannot be less than 100."
+ exit 205
+ }
+ if qm status "$CTID" &>/dev/null || pct status "$CTID" &>/dev/null; then
+ echo -e "ID '$CTID' is already in use."
+ unset CTID
+ msg_error "Cannot use ID that is already in use."
+ exit 206
+ fi
+
+ # Storage capability check
+ check_storage_support "rootdir" || {
+ msg_error "No valid storage found for 'rootdir' [Container]"
+ exit 1
+ }
+ check_storage_support "vztmpl" || {
+ msg_error "No valid storage found for 'vztmpl' [Template]"
+ exit 1
+ }
+
+ # Template storage selection
+ if resolve_storage_preselect template "${TEMPLATE_STORAGE:-}"; then
+ TEMPLATE_STORAGE="$STORAGE_RESULT"
+ TEMPLATE_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${TEMPLATE_STORAGE}${CL} (${TEMPLATE_STORAGE_INFO}) [Template]"
+ else
+ while true; do
+ if [[ -z "${var_template_storage:-}" ]]; then
+ if select_storage template; then
+ TEMPLATE_STORAGE="$STORAGE_RESULT"
+ TEMPLATE_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${TEMPLATE_STORAGE}${CL} (${TEMPLATE_STORAGE_INFO}) [Template]"
+ break
+ fi
+ fi
+ done
+ fi
+
+ # Container storage selection
+ if resolve_storage_preselect container "${CONTAINER_STORAGE:-}"; then
+ CONTAINER_STORAGE="$STORAGE_RESULT"
+ CONTAINER_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${CONTAINER_STORAGE}${CL} (${CONTAINER_STORAGE_INFO}) [Container]"
+ else
+ if [[ -z "${var_container_storage:-}" ]]; then
+ if select_storage container; then
+ CONTAINER_STORAGE="$STORAGE_RESULT"
+ CONTAINER_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${CONTAINER_STORAGE}${CL} (${CONTAINER_STORAGE_INFO}) [Container]"
+ fi
+ fi
+ fi
+
+ # Validate content types
+ msg_info "Validating content types of storage '$CONTAINER_STORAGE'"
+ STORAGE_CONTENT=$(grep -A4 -E "^(zfspool|dir|lvmthin|lvm): $CONTAINER_STORAGE" /etc/pve/storage.cfg | grep content | awk '{$1=""; print $0}' | xargs)
+ msg_debug "Storage '$CONTAINER_STORAGE' has content types: $STORAGE_CONTENT"
+ grep -qw "rootdir" <<<"$STORAGE_CONTENT" || {
+ msg_error "Storage '$CONTAINER_STORAGE' does not support 'rootdir'. Cannot create LXC."
+ exit 217
+ }
+ $STD msg_ok "Storage '$CONTAINER_STORAGE' supports 'rootdir'"
+
+ msg_info "Validating content types of template storage '$TEMPLATE_STORAGE'"
+ TEMPLATE_CONTENT=$(grep -A4 -E "^[^:]+: $TEMPLATE_STORAGE" /etc/pve/storage.cfg | grep content | awk '{$1=""; print $0}' | xargs)
+ msg_debug "Template storage '$TEMPLATE_STORAGE' has content types: $TEMPLATE_CONTENT"
+ if ! grep -qw "vztmpl" <<<"$TEMPLATE_CONTENT"; then
+ msg_warn "Template storage '$TEMPLATE_STORAGE' does not declare 'vztmpl'. This may cause pct create to fail."
+ else
+ $STD msg_ok "Template storage '$TEMPLATE_STORAGE' supports 'vztmpl'"
+ fi
+
+ # Free space check
+ STORAGE_FREE=$(pvesm status | awk -v s="$CONTAINER_STORAGE" '$1 == s { print $6 }')
+ REQUIRED_KB=$((${PCT_DISK_SIZE:-8} * 1024 * 1024))
+ [[ "$STORAGE_FREE" -ge "$REQUIRED_KB" ]] || {
+ msg_error "Not enough space on '$CONTAINER_STORAGE'. Needed: ${PCT_DISK_SIZE:-8}G."
+ exit 214
+ }
+
+ # Cluster quorum (if cluster)
+ if [[ -f /etc/pve/corosync.conf ]]; then
+ msg_info "Checking cluster quorum"
+ if ! pvecm status | awk -F':' '/^Quorate/ { exit ($2 ~ /Yes/) ? 0 : 1 }'; then
+ msg_error "Cluster is not quorate. Start all nodes or configure quorum device (QDevice)."
+ exit 210
+ fi
+ msg_ok "Cluster is quorate"
+ fi
+
+ # ------------------------------------------------------------------------------
+ # Template discovery & validation
+ # ------------------------------------------------------------------------------
+ TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}"
+ case "$PCT_OSTYPE" in
+ debian | ubuntu) TEMPLATE_PATTERN="-standard_" ;;
+ alpine | fedora | rocky | centos) TEMPLATE_PATTERN="-default_" ;;
+ *) TEMPLATE_PATTERN="" ;;
+ esac
+
+ msg_info "Searching for template '$TEMPLATE_SEARCH'"
+
+ # Build regex patterns outside awk/grep for clarity
+ SEARCH_PATTERN="^${TEMPLATE_SEARCH}"
+
+ #echo "[DEBUG] TEMPLATE_SEARCH='$TEMPLATE_SEARCH'"
+ #echo "[DEBUG] SEARCH_PATTERN='$SEARCH_PATTERN'"
+ #echo "[DEBUG] TEMPLATE_PATTERN='$TEMPLATE_PATTERN'"
+
+ mapfile -t LOCAL_TEMPLATES < <(
+ pveam list "$TEMPLATE_STORAGE" 2>/dev/null |
+ awk -v search="${SEARCH_PATTERN}" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' |
+ sed 's|.*/||' | sort -t - -k 2 -V
+ )
+
+ pveam update >/dev/null 2>&1 || msg_warn "Could not update template catalog (pveam update failed)."
+
+ msg_ok "Template search completed"
+
+ #echo "[DEBUG] pveam available output (first 5 lines with .tar files):"
+ #pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | head -5 | sed 's/^/ /'
+
+ set +u
+ mapfile -t ONLINE_TEMPLATES < <(pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | awk '{print $2}' | grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" | sort -t - -k 2 -V 2>/dev/null || true)
+ #echo "[DEBUG] After filtering: ${#ONLINE_TEMPLATES[@]} online templates found"
+ set -u
+
+ ONLINE_TEMPLATE=""
+ [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}"
+
+ #msg_debug "SEARCH_PATTERN='${SEARCH_PATTERN}' TEMPLATE_PATTERN='${TEMPLATE_PATTERN}'"
+ #msg_debug "Found ${#LOCAL_TEMPLATES[@]} local templates, ${#ONLINE_TEMPLATES[@]} online templates"
+ if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then
+ #msg_debug "First 3 online templates:"
+ count=0
+ for idx in "${!ONLINE_TEMPLATES[@]}"; do
+ #msg_debug " [$idx]: ${ONLINE_TEMPLATES[$idx]}"
+ ((count++))
+ [[ $count -ge 3 ]] && break
+ done
+ fi
+ #msg_debug "ONLINE_TEMPLATE='$ONLINE_TEMPLATE'"
+
+ if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then
+ TEMPLATE="${LOCAL_TEMPLATES[-1]}"
+ TEMPLATE_SOURCE="local"
+ else
+ TEMPLATE="$ONLINE_TEMPLATE"
+ TEMPLATE_SOURCE="online"
+ fi
+
+ # If still no template, try to find alternatives
+ if [[ -z "$TEMPLATE" ]]; then
+ echo ""
+ echo "[DEBUG] No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}, searching for alternatives..."
+
+ # Get all available versions for this OS type
+ mapfile -t AVAILABLE_VERSIONS < <(
+ pveam available -section system 2>/dev/null |
+ grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' |
+ awk -F'\t' '{print $1}' |
+ grep "^${PCT_OSTYPE}-" |
+ sed -E "s/.*${PCT_OSTYPE}-([0-9]+(\.[0-9]+)?).*/\1/" |
+ sort -u -V 2>/dev/null
+ )
+
+ if [[ ${#AVAILABLE_VERSIONS[@]} -gt 0 ]]; then
+ echo ""
+ echo "${BL}Available ${PCT_OSTYPE} versions:${CL}"
+ for i in "${!AVAILABLE_VERSIONS[@]}"; do
+ echo " [$((i + 1))] ${AVAILABLE_VERSIONS[$i]}"
+ done
+ echo ""
+ read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or press Enter to cancel: " choice
+
+ if [[ "$choice" =~ ^[0-9]+$ ]] && [[ "$choice" -ge 1 ]] && [[ "$choice" -le ${#AVAILABLE_VERSIONS[@]} ]]; then
+ PCT_OSVERSION="${AVAILABLE_VERSIONS[$((choice - 1))]}"
+ TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION}"
+ SEARCH_PATTERN="^${TEMPLATE_SEARCH}-"
+
+ #echo "[DEBUG] Retrying with version: $PCT_OSVERSION"
+
+ mapfile -t ONLINE_TEMPLATES < <(
+ pveam available -section system 2>/dev/null |
+ grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' |
+ awk -F'\t' '{print $1}' |
+ grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" |
+ sort -t - -k 2 -V 2>/dev/null || true
+ )
+
+ if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then
+ TEMPLATE="${ONLINE_TEMPLATES[-1]}"
+ TEMPLATE_SOURCE="online"
+ #echo "[DEBUG] Found alternative: $TEMPLATE"
+ else
+ msg_error "No templates available for ${PCT_OSTYPE} ${PCT_OSVERSION}"
+ exit 225
+ fi
+ else
+ msg_custom "🚫" "${YW}" "Installation cancelled"
+ exit 0
+ fi
+ else
+ msg_error "No ${PCT_OSTYPE} templates available at all"
+ exit 225
+ fi
+ fi
+
+ #echo "[DEBUG] Selected TEMPLATE='$TEMPLATE' SOURCE='$TEMPLATE_SOURCE'"
+ #msg_debug "Selected TEMPLATE='$TEMPLATE' SOURCE='$TEMPLATE_SOURCE'"
+
+ TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)"
+ if [[ -z "$TEMPLATE_PATH" ]]; then
+ TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg)
+ [[ -n "$TEMPLATE_BASE" ]] && TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE"
+ fi
+
+ # If we still don't have a path but have a valid template name, construct it
+ if [[ -z "$TEMPLATE_PATH" && -n "$TEMPLATE" ]]; then
+ TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE"
+ fi
+
+ [[ -n "$TEMPLATE_PATH" ]] || {
+ if [[ -z "$TEMPLATE" ]]; then
+ msg_error "Template ${PCT_OSTYPE} ${PCT_OSVERSION} not available"
+
+ # Get available versions
+ mapfile -t AVAILABLE_VERSIONS < <(
+ pveam available -section system 2>/dev/null |
+ grep "^${PCT_OSTYPE}-" |
+ sed -E 's/.*'"${PCT_OSTYPE}"'-([0-9]+\.[0-9]+).*/\1/' |
+ grep -E '^[0-9]+\.[0-9]+$' |
+ sort -u -V 2>/dev/null || sort -u
+ )
+
+ if [[ ${#AVAILABLE_VERSIONS[@]} -gt 0 ]]; then
+ echo -e "\n${BL}Available versions:${CL}"
+ for i in "${!AVAILABLE_VERSIONS[@]}"; do
+ echo " [$((i + 1))] ${AVAILABLE_VERSIONS[$i]}"
+ done
+
+ echo ""
+ read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or Enter to exit: " choice
+
+ if [[ "$choice" =~ ^[0-9]+$ ]] && [[ "$choice" -ge 1 ]] && [[ "$choice" -le ${#AVAILABLE_VERSIONS[@]} ]]; then
+ export var_version="${AVAILABLE_VERSIONS[$((choice - 1))]}"
+ export PCT_OSVERSION="$var_version"
+ msg_ok "Switched to ${PCT_OSTYPE} ${var_version}"
+
+ # Retry template search with new version
+ TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}"
+ SEARCH_PATTERN="^${TEMPLATE_SEARCH}-"
+
+ mapfile -t LOCAL_TEMPLATES < <(
+ pveam list "$TEMPLATE_STORAGE" 2>/dev/null |
+ awk -v search="${SEARCH_PATTERN}" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' |
+ sed 's|.*/||' | sort -t - -k 2 -V
+ )
+ mapfile -t ONLINE_TEMPLATES < <(
+ pveam available -section system 2>/dev/null |
+ grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' |
+ awk -F'\t' '{print $1}' |
+ grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" |
+ sort -t - -k 2 -V 2>/dev/null || true
+ )
+ ONLINE_TEMPLATE=""
+ [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}"
+
+ if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then
+ TEMPLATE="${LOCAL_TEMPLATES[-1]}"
+ TEMPLATE_SOURCE="local"
+ else
+ TEMPLATE="$ONLINE_TEMPLATE"
+ TEMPLATE_SOURCE="online"
+ fi
+
+ TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)"
+ if [[ -z "$TEMPLATE_PATH" ]]; then
+ TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg)
+ [[ -n "$TEMPLATE_BASE" ]] && TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE"
+ fi
+
+ # If we still don't have a path but have a valid template name, construct it
+ if [[ -z "$TEMPLATE_PATH" && -n "$TEMPLATE" ]]; then
+ TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE"
+ fi
+
+ [[ -n "$TEMPLATE_PATH" ]] || {
+ msg_error "Template still not found after version change"
+ exit 220
+ }
+ else
+ msg_custom "🚫" "${YW}" "Installation cancelled"
+ exit 1
+ fi
+ else
+ msg_error "No ${PCT_OSTYPE} templates available"
+ exit 220
+ fi
+ fi
+ }
+
+ # Validate that we found a template
+ if [[ -z "$TEMPLATE" ]]; then
+ msg_error "No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}"
+ msg_custom "ℹ️" "${YW}" "Please check:"
+ msg_custom " •" "${YW}" "Is pveam catalog available? (run: pveam available -section system)"
+ msg_custom " •" "${YW}" "Does the template exist for your OS version?"
+ exit 225
+ fi
+
+ msg_ok "Template ${BL}$TEMPLATE${CL} [$TEMPLATE_SOURCE]"
+ msg_debug "Resolved TEMPLATE_PATH=$TEMPLATE_PATH"
+
+ NEED_DOWNLOAD=0
+ if [[ ! -f "$TEMPLATE_PATH" ]]; then
+ msg_info "Template not present locally – will download."
+ NEED_DOWNLOAD=1
+ elif [[ ! -r "$TEMPLATE_PATH" ]]; then
+ msg_error "Template file exists but is not readable – check permissions."
+ exit 221
+ elif [[ "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then
+ if [[ -n "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Template file too small (<1MB) – re-downloading."
+ NEED_DOWNLOAD=1
+ else
+ msg_warn "Template looks too small, but no online version exists. Keeping local file."
+ fi
+ elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then
+ if [[ -n "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Template appears corrupted – re-downloading."
+ NEED_DOWNLOAD=1
+ else
+ msg_warn "Template appears corrupted, but no online version exists. Keeping local file."
+ fi
+ else
+ $STD msg_ok "Template $TEMPLATE is present and valid."
+ fi
+
+ if [[ "$TEMPLATE_SOURCE" == "local" && -n "$ONLINE_TEMPLATE" && "$TEMPLATE" != "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Local template is outdated: $TEMPLATE (latest available: $ONLINE_TEMPLATE)"
+ if whiptail --yesno "A newer template is available:\n$ONLINE_TEMPLATE\n\nDo you want to download and use it instead?" 12 70; then
+ TEMPLATE="$ONLINE_TEMPLATE"
+ NEED_DOWNLOAD=1
+ else
+ msg_custom "ℹ️" "${BL}" "Continuing with local template $TEMPLATE"
+ fi
+ fi
+
+ if [[ "$NEED_DOWNLOAD" -eq 1 ]]; then
+ [[ -f "$TEMPLATE_PATH" ]] && rm -f "$TEMPLATE_PATH"
+ for attempt in {1..3}; do
+ msg_info "Attempt $attempt: Downloading template $TEMPLATE to $TEMPLATE_STORAGE"
+ if pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null 2>&1; then
+ msg_ok "Template download successful."
+ break
+ fi
+ if [[ $attempt -eq 3 ]]; then
+ msg_error "Failed after 3 attempts. Please check network access, permissions, or manually run:\n pveam download $TEMPLATE_STORAGE $TEMPLATE"
+ exit 222
+ fi
+ sleep $((attempt * 5))
+ done
+ fi
+
+ if ! pveam list "$TEMPLATE_STORAGE" 2>/dev/null | grep -q "$TEMPLATE"; then
+ msg_error "Template $TEMPLATE not available in storage $TEMPLATE_STORAGE after download."
+ exit 223
+ fi
+
+ # ------------------------------------------------------------------------------
+ # Dynamic preflight for Debian 13.x: offer upgrade if available (no hard mins)
+ # ------------------------------------------------------------------------------
+ if [[ "$PCT_OSTYPE" == "debian" ]]; then
+ OSVER="$(parse_template_osver "$TEMPLATE")"
+ if [[ -n "$OSVER" ]]; then
+ # Proactive, aber ohne Abbruch – nur Angebot
+ offer_lxc_stack_upgrade_and_maybe_retry "no" || true
+ fi
+ fi
+
+ # ------------------------------------------------------------------------------
+ # Create LXC Container
+ # ------------------------------------------------------------------------------
+ msg_info "Creating LXC container"
+
+ # Ensure subuid/subgid entries exist
+ grep -q "root:100000:65536" /etc/subuid || echo "root:100000:65536" >>/etc/subuid
+ grep -q "root:100000:65536" /etc/subgid || echo "root:100000:65536" >>/etc/subgid
+
+ # Assemble pct options
+ PCT_OPTIONS=(${PCT_OPTIONS[@]:-${DEFAULT_PCT_OPTIONS[@]}})
+ [[ " ${PCT_OPTIONS[*]} " =~ " -rootfs " ]] || PCT_OPTIONS+=(-rootfs "$CONTAINER_STORAGE:${PCT_DISK_SIZE:-8}")
+
+ # Lock by template file (avoid concurrent downloads/creates)
+ lockfile="/tmp/template.${TEMPLATE}.lock"
+ exec 9>"$lockfile" || {
+ msg_error "Failed to create lock file '$lockfile'."
+ exit 200
+ }
+ flock -w 60 9 || {
+ msg_error "Timeout while waiting for template lock."
+ exit 211
+ }
+
+ LOGFILE="/tmp/pct_create_${CTID}.log"
+ msg_debug "pct create command: pct create $CTID ${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE} ${PCT_OPTIONS[*]}"
+ msg_debug "Logfile: $LOGFILE"
+
+ # First attempt
+ if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >"$LOGFILE" 2>&1; then
+ msg_error "Container creation failed on ${TEMPLATE_STORAGE}. Checking template..."
+
+ # Validate template file
+ if [[ ! -s "$TEMPLATE_PATH" || "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then
+ msg_warn "Template file too small or missing – re-downloading."
+ rm -f "$TEMPLATE_PATH"
+ pveam download "$TEMPLATE_STORAGE" "$TEMPLATE"
+ elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then
+ if [[ -n "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Template appears corrupted – re-downloading."
+ rm -f "$TEMPLATE_PATH"
+ pveam download "$TEMPLATE_STORAGE" "$TEMPLATE"
+ else
+ msg_warn "Template appears corrupted, but no online version exists. Skipping re-download."
+ fi
+ fi
+
+ # Retry after repair
+ if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >>"$LOGFILE" 2>&1; then
+ # Fallback to local storage
+ if [[ "$TEMPLATE_STORAGE" != "local" ]]; then
+ msg_warn "Retrying container creation with fallback to local storage..."
+ LOCAL_TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE"
+ if [[ ! -f "$LOCAL_TEMPLATE_PATH" ]]; then
+ msg_info "Downloading template to local..."
+ pveam download local "$TEMPLATE" >/dev/null 2>&1
+ fi
+ if pct create "$CTID" "local:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >>"$LOGFILE" 2>&1; then
+ msg_ok "Container successfully created using local fallback."
+ else
+ # --- Dynamic stack upgrade + auto-retry on the well-known error pattern ---
+ if grep -qiE 'unsupported .* version' "$LOGFILE"; then
+ echo
+ echo "pct reported 'unsupported ... version' – your LXC stack might be too old for this template."
+ echo "We can try to upgrade 'pve-container' and 'lxc-pve' now and retry automatically."
+ offer_lxc_stack_upgrade_and_maybe_retry "yes"
+ rc=$?
+ case $rc in
+ 0) : ;; # success - container created, continue
+ 2)
+ echo "Upgrade was declined. Please update and re-run:
+ apt update && apt install --only-upgrade pve-container lxc-pve"
+ exit 231
+ ;;
+ 3)
+ echo "Upgrade and/or retry failed. Please inspect: $LOGFILE"
+ exit 231
+ ;;
+ esac
+ else
+ msg_error "Container creation failed even with local fallback. See $LOGFILE"
+ if whiptail --yesno "pct create failed.\nDo you want to enable verbose debug mode and view detailed logs?" 12 70; then
+ set -x
+ bash -x -c "pct create $CTID local:vztmpl/${TEMPLATE} ${PCT_OPTIONS[*]}" 2>&1 | tee -a "$LOGFILE"
+ set +x
+ fi
+ exit 209
+ fi
+ fi
+ else
+ msg_error "Container creation failed on local storage. See $LOGFILE"
+ # --- Dynamic stack upgrade + auto-retry on the well-known error pattern ---
+ if grep -qiE 'unsupported .* version' "$LOGFILE"; then
+ echo
+ echo "pct reported 'unsupported ... version' – your LXC stack might be too old for this template."
+ echo "We can try to upgrade 'pve-container' and 'lxc-pve' now and retry automatically."
+ offer_lxc_stack_upgrade_and_maybe_retry "yes"
+ rc=$?
+ case $rc in
+ 0) : ;; # success - container created, continue
+ 2)
+ echo "Upgrade was declined. Please update and re-run:
+ apt update && apt install --only-upgrade pve-container lxc-pve"
+ exit 231
+ ;;
+ 3)
+ echo "Upgrade and/or retry failed. Please inspect: $LOGFILE"
+ exit 231
+ ;;
+ esac
+ else
+ msg_error "Container creation failed. See $LOGFILE"
+ if whiptail --yesno "pct create failed.\nDo you want to enable verbose debug mode and view detailed logs?" 12 70; then
+ set -x
+ bash -x -c "pct create $CTID local:vztmpl/${TEMPLATE} ${PCT_OPTIONS[*]}" 2>&1 | tee -a "$LOGFILE"
+ set +x
+ fi
+ exit 209
+ fi
+ fi
+ fi
+ fi
+
+ # Verify container exists
+ pct list | awk '{print $1}' | grep -qx "$CTID" || {
+ msg_error "Container ID $CTID not listed in 'pct list'. See $LOGFILE"
+ exit 215
+ }
+
+ # Verify config rootfs
+ grep -q '^rootfs:' "/etc/pve/lxc/$CTID.conf" || {
+ msg_error "RootFS entry missing in container config. See $LOGFILE"
+ exit 216
+ }
+
+ msg_ok "LXC Container ${BL}$CTID${CL} ${GN}was successfully created."
+}
+
+# ------------------------------------------------------------------------------
+# description()
+#
+# - Sets container description with HTML content (logo, links, badges)
+# - Restarts ping-instances.service if present
+# - Posts status "done" to API
+# ------------------------------------------------------------------------------
description() {
IP=$(pct exec "$CTID" ip a s dev eth0 | awk '/inet / {print $2}' | cut -d/ -f1)
@@ -1410,8 +3443,6 @@ description() {
EOF
)
-
- # Set Description in LXC
pct set "$CTID" -description "$DESCRIPTION"
if [[ -f /etc/systemd/system/ping-instances.service ]]; then
@@ -1421,6 +3452,13 @@ EOF
post_update_to_api "done" "none"
}
+# ------------------------------------------------------------------------------
+# api_exit_script()
+#
+# - Exit trap handler
+# - Reports exit codes to API with detailed reason
+# - Handles known codes (100–209) and maps them to errors
+# ------------------------------------------------------------------------------
api_exit_script() {
exit_code=$?
if [ $exit_code -ne 0 ]; then
diff --git a/misc/build.func.backup-20251029-123804 b/misc/build.func.backup-20251029-123804
new file mode 100644
index 000000000..9c8a1fc84
--- /dev/null
+++ b/misc/build.func.backup-20251029-123804
@@ -0,0 +1,3516 @@
+#!/usr/bin/env bash
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: tteck (tteckster) | MickLesk | michelroegl-brunner
+# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
+# Revision: 1
+
+# ------------------------------------------------------------------------------
+# variables()
+#
+# - Normalize application name (NSAPP = lowercase, no spaces)
+# - Build installer filename (var_install)
+# - Define regex for integer validation
+# - Fetch hostname of Proxmox node
+# - Set default values for diagnostics/method
+# - Generate random UUID for tracking
+# - Get Proxmox VE version and kernel version
+# ------------------------------------------------------------------------------
+variables() {
+ NSAPP=$(echo "${APP,,}" | tr -d ' ') # This function sets the NSAPP variable by converting the value of the APP variable to lowercase and removing any spaces.
+ var_install="${NSAPP}-install" # sets the var_install variable by appending "-install" to the value of NSAPP.
+ INTEGER='^[0-9]+([.][0-9]+)?$' # it defines the INTEGER regular expression pattern.
+ PVEHOST_NAME=$(hostname) # gets the Proxmox Hostname and sets it to Uppercase
+ DIAGNOSTICS="yes" # sets the DIAGNOSTICS variable to "yes", used for the API call.
+ METHOD="default" # sets the METHOD variable to "default", used for the API call.
+ RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" # generates a random UUID and sets it to the RANDOM_UUID variable.
+ CTTYPE="${CTTYPE:-${CT_TYPE:-1}}"
+ #CT_TYPE=${var_unprivileged:-$CT_TYPE}
+
+ # Get Proxmox VE version and kernel version
+ if command -v pveversion >/dev/null 2>&1; then
+ PVEVERSION=$(pveversion | grep "pve-manager" | awk '{print $2}' | cut -d'/' -f1)
+ else
+ PVEVERSION="N/A"
+ fi
+ KERNEL_VERSION=$(uname -r)
+}
+
+# -----------------------------------------------------------------------------
+# Community-Scripts bootstrap loader
+# - Always sources build.func from remote
+# - Updates local core files only if build.func changed
+# - Local cache: /usr/local/community-scripts/core
+# -----------------------------------------------------------------------------
+
+# FUNC_DIR="/usr/local/community-scripts/core"
+# mkdir -p "$FUNC_DIR"
+
+# BUILD_URL="https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func"
+# BUILD_REV="$FUNC_DIR/build.rev"
+# DEVMODE="${DEVMODE:-no}"
+
+# # --- Step 1: fetch build.func content once, compute hash ---
+# build_content="$(curl -fsSL "$BUILD_URL")" || {
+# echo "❌ Failed to fetch build.func"
+# exit 1
+# }
+
+# newhash=$(printf "%s" "$build_content" | sha256sum | awk '{print $1}')
+# oldhash=$(cat "$BUILD_REV" 2>/dev/null || echo "")
+
+# # --- Step 2: if build.func changed, offer update for core files ---
+# if [ "$newhash" != "$oldhash" ]; then
+# echo "⚠️ build.func changed!"
+
+# while true; do
+# read -rp "Refresh local core files? [y/N/diff]: " ans
+# case "$ans" in
+# [Yy]*)
+# echo "$newhash" >"$BUILD_REV"
+
+# update_func_file() {
+# local file="$1"
+# local url="https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/$file"
+# local local_path="$FUNC_DIR/$file"
+
+# echo "⬇️ Downloading $file ..."
+# curl -fsSL "$url" -o "$local_path" || {
+# echo "❌ Failed to fetch $file"
+# exit 1
+# }
+# echo "✔️ Updated $file"
+# }
+
+# update_func_file core.func
+# update_func_file error_handler.func
+# update_func_file tools.func
+# break
+# ;;
+# [Dd]*)
+# for file in core.func error_handler.func tools.func; do
+# local_path="$FUNC_DIR/$file"
+# url="https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/$file"
+# remote_tmp="$(mktemp)"
+
+# curl -fsSL "$url" -o "$remote_tmp" || continue
+
+# if [ -f "$local_path" ]; then
+# echo "🔍 Diff for $file:"
+# diff -u "$local_path" "$remote_tmp" || echo "(no differences)"
+# else
+# echo "📦 New file $file will be installed"
+# fi
+
+# rm -f "$remote_tmp"
+# done
+# ;;
+# *)
+# echo "❌ Skipped updating local core files"
+# break
+# ;;
+# esac
+# done
+# else
+# if [ "$DEVMODE" != "yes" ]; then
+# echo "✔️ build.func unchanged → using existing local core files"
+# fi
+# fi
+
+# if [ -n "${_COMMUNITY_SCRIPTS_LOADER:-}" ]; then
+# return 0 2>/dev/null || exit 0
+# fi
+# _COMMUNITY_SCRIPTS_LOADER=1
+
+# # --- Step 3: always source local versions of the core files ---
+# source "$FUNC_DIR/core.func"
+# source "$FUNC_DIR/error_handler.func"
+# source "$FUNC_DIR/tools.func"
+
+# # --- Step 4: finally, source build.func directly from memory ---
+# # (no tmp file needed)
+# source <(printf "%s" "$build_content")
+
+# ------------------------------------------------------------------------------
+# Load core + error handler functions from community-scripts repo
+#
+# - Prefer curl if available, fallback to wget
+# - Load: core.func, error_handler.func, api.func
+# - Initialize error traps after loading
+# ------------------------------------------------------------------------------
+
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/api.func)
+
+if command -v curl >/dev/null 2>&1; then
+ source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func)
+ source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func)
+ load_functions
+ catch_errors
+ #echo "(build.func) Loaded core.func via curl"
+elif command -v wget >/dev/null 2>&1; then
+ source <(wget -qO- https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func)
+ source <(wget -qO- https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func)
+ load_functions
+ catch_errors
+ #echo "(build.func) Loaded core.func via wget"
+fi
+
+# ------------------------------------------------------------------------------
+# maxkeys_check()
+#
+# - Reads kernel keyring limits (maxkeys, maxbytes)
+# - Checks current usage for LXC user (UID 100000)
+# - Warns if usage is close to limits and suggests sysctl tuning
+# - Exits if thresholds are exceeded
+# - https://cleveruptime.com/docs/files/proc-key-users | https://docs.kernel.org/security/keys/core.html
+# ------------------------------------------------------------------------------
+
+maxkeys_check() {
+ # Read kernel parameters
+ per_user_maxkeys=$(cat /proc/sys/kernel/keys/maxkeys 2>/dev/null || echo 0)
+ per_user_maxbytes=$(cat /proc/sys/kernel/keys/maxbytes 2>/dev/null || echo 0)
+
+ # Exit if kernel parameters are unavailable
+ if [[ "$per_user_maxkeys" -eq 0 || "$per_user_maxbytes" -eq 0 ]]; then
+ echo -e "${CROSS}${RD} Error: Unable to read kernel parameters. Ensure proper permissions.${CL}"
+ exit 1
+ fi
+
+ # Fetch key usage for user ID 100000 (typical for containers)
+ used_lxc_keys=$(awk '/100000:/ {print $2}' /proc/key-users 2>/dev/null || echo 0)
+ used_lxc_bytes=$(awk '/100000:/ {split($5, a, "/"); print a[1]}' /proc/key-users 2>/dev/null || echo 0)
+
+ # Calculate thresholds and suggested new limits
+ threshold_keys=$((per_user_maxkeys - 100))
+ threshold_bytes=$((per_user_maxbytes - 1000))
+ new_limit_keys=$((per_user_maxkeys * 2))
+ new_limit_bytes=$((per_user_maxbytes * 2))
+
+ # Check if key or byte usage is near limits
+ failure=0
+ if [[ "$used_lxc_keys" -gt "$threshold_keys" ]]; then
+ echo -e "${CROSS}${RD} Warning: Key usage is near the limit (${used_lxc_keys}/${per_user_maxkeys}).${CL}"
+ echo -e "${INFO} Suggested action: Set ${GN}kernel.keys.maxkeys=${new_limit_keys}${CL} in ${BOLD}/etc/sysctl.d/98-community-scripts.conf${CL}."
+ failure=1
+ fi
+ if [[ "$used_lxc_bytes" -gt "$threshold_bytes" ]]; then
+ echo -e "${CROSS}${RD} Warning: Key byte usage is near the limit (${used_lxc_bytes}/${per_user_maxbytes}).${CL}"
+ echo -e "${INFO} Suggested action: Set ${GN}kernel.keys.maxbytes=${new_limit_bytes}${CL} in ${BOLD}/etc/sysctl.d/98-community-scripts.conf${CL}."
+ failure=1
+ fi
+
+ # Provide next steps if issues are detected
+ if [[ "$failure" -eq 1 ]]; then
+ echo -e "${INFO} To apply changes, run: ${BOLD}service procps force-reload${CL}"
+ exit 1
+ fi
+
+ echo -e "${CM}${GN} All kernel key limits are within safe thresholds.${CL}"
+}
+
+# ------------------------------------------------------------------------------
+# get_current_ip()
+#
+# - Returns current container IP depending on OS type
+# - Debian/Ubuntu: uses `hostname -I`
+# - Alpine: parses eth0 via `ip -4 addr`
+# ------------------------------------------------------------------------------
+get_current_ip() {
+ if [ -f /etc/os-release ]; then
+ # Check for Debian/Ubuntu (uses hostname -I)
+ if grep -qE 'ID=debian|ID=ubuntu' /etc/os-release; then
+ CURRENT_IP=$(hostname -I | awk '{print $1}')
+ # Check for Alpine (uses ip command)
+ elif grep -q 'ID=alpine' /etc/os-release; then
+ CURRENT_IP=$(ip -4 addr show eth0 | awk '/inet / {print $2}' | cut -d/ -f1 | head -n 1)
+ else
+ CURRENT_IP="Unknown"
+ fi
+ fi
+ echo "$CURRENT_IP"
+}
+
+# ------------------------------------------------------------------------------
+# update_motd_ip()
+#
+# - Updates /etc/motd with current container IP
+# - Removes old IP entries to avoid duplicates
+# ------------------------------------------------------------------------------
+update_motd_ip() {
+ MOTD_FILE="/etc/motd"
+
+ if [ -f "$MOTD_FILE" ]; then
+ # Remove existing IP Address lines to prevent duplication
+ sed -i '/IP Address:/d' "$MOTD_FILE"
+
+ IP=$(get_current_ip)
+ # Add the new IP address
+ echo -e "${TAB}${NETWORK}${YW} IP Address: ${GN}${IP}${CL}" >>"$MOTD_FILE"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# install_ssh_keys_into_ct()
+#
+# - Installs SSH keys into container root account if SSH is enabled
+# - Uses pct push or direct input to authorized_keys
+# - Falls back to warning if no keys provided
+# ------------------------------------------------------------------------------
+install_ssh_keys_into_ct() {
+ [[ "$SSH" != "yes" ]] && return 0
+
+ if [[ -n "$SSH_KEYS_FILE" && -s "$SSH_KEYS_FILE" ]]; then
+ msg_info "Installing selected SSH keys into CT ${CTID}"
+ pct exec "$CTID" -- sh -c 'mkdir -p /root/.ssh && chmod 700 /root/.ssh' || {
+ msg_error "prepare /root/.ssh failed"
+ return 1
+ }
+ pct push "$CTID" "$SSH_KEYS_FILE" /root/.ssh/authorized_keys >/dev/null 2>&1 ||
+ pct exec "$CTID" -- sh -c "cat > /root/.ssh/authorized_keys" <"$SSH_KEYS_FILE" || {
+ msg_error "write authorized_keys failed"
+ return 1
+ }
+ pct exec "$CTID" -- sh -c 'chmod 600 /root/.ssh/authorized_keys' || true
+ msg_ok "Installed SSH keys into CT ${CTID}"
+ return 0
+ fi
+
+ # Fallback: nichts ausgewählt
+ msg_warn "No SSH keys to install (skipping)."
+ return 0
+}
+
+# ------------------------------------------------------------------------------
+# base_settings()
+#
+# - Defines all base/default variables for container creation
+# - Reads from environment variables (var_*)
+# - Provides fallback defaults for OS type/version
+# ------------------------------------------------------------------------------
+base_settings() {
+ # Default Settings
+ CT_TYPE=${var_unprivileged:-"1"}
+ DISK_SIZE=${var_disk:-"4"}
+ CORE_COUNT=${var_cpu:-"1"}
+ RAM_SIZE=${var_ram:-"1024"}
+ VERBOSE=${var_verbose:-"${1:-no}"}
+ PW=${var_pw:-""}
+ CT_ID=${var_ctid:-$NEXTID}
+ HN=${var_hostname:-$NSAPP}
+ BRG=${var_brg:-"vmbr0"}
+ NET=${var_net:-"dhcp"}
+ IPV6_METHOD=${var_ipv6_method:-"none"}
+ IPV6_STATIC=${var_ipv6_static:-""}
+ GATE=${var_gateway:-""}
+ APT_CACHER=${var_apt_cacher:-""}
+ APT_CACHER_IP=${var_apt_cacher_ip:-""}
+ MTU=${var_mtu:-""}
+ SD=${var_storage:-""}
+ NS=${var_ns:-""}
+ MAC=${var_mac:-""}
+ VLAN=${var_vlan:-""}
+ SSH=${var_ssh:-"no"}
+ SSH_AUTHORIZED_KEY=${var_ssh_authorized_key:-""}
+ UDHCPC_FIX=${var_udhcpc_fix:-""}
+ TAGS="community-script,${var_tags:-}"
+ ENABLE_FUSE=${var_fuse:-"${1:-no}"}
+ ENABLE_TUN=${var_tun:-"${1:-no}"}
+
+ # Since these 2 are only defined outside of default_settings function, we add a temporary fallback. TODO: To align everything, we should add these as constant variables (e.g. OSTYPE and OSVERSION), but that would currently require updating the default_settings function for all existing scripts
+ if [ -z "$var_os" ]; then
+ var_os="debian"
+ fi
+ if [ -z "$var_version" ]; then
+ var_version="12"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# echo_default()
+#
+# - Prints summary of default values (ID, OS, type, disk, RAM, CPU, etc.)
+# - Uses icons and formatting for readability
+# - Convert CT_TYPE to description
+# ------------------------------------------------------------------------------
+echo_default() {
+ CT_TYPE_DESC="Unprivileged"
+ if [ "$CT_TYPE" -eq 0 ]; then
+ CT_TYPE_DESC="Privileged"
+ fi
+ echo -e "${TAB}${INFO} ProxmoxVE Version ${PVEVERSION} | Kernel: ${KERNEL_VERSION}${CL}"
+ echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}${CT_ID}${CL}"
+ echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os ($var_version)${CL}"
+ echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
+ echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}"
+ echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}${CORE_COUNT}${CL}"
+ echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
+ if [ "$VERBOSE" == "yes" ]; then
+ echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}Enabled${CL}"
+ fi
+ echo -e "${CREATING}${BOLD}${BL}Creating a ${APP} LXC using the above default settings${CL}"
+ echo -e " "
+}
+
+# ------------------------------------------------------------------------------
+# exit_script()
+#
+# - Called when user cancels an action
+# - Clears screen and exits gracefully
+# ------------------------------------------------------------------------------
+exit_script() {
+ clear
+ echo -e "\n${CROSS}${RD}User exited script${CL}\n"
+ exit
+}
+
+# ------------------------------------------------------------------------------
+# find_host_ssh_keys()
+#
+# - Scans system for available SSH keys
+# - Supports defaults (~/.ssh, /etc/ssh/authorized_keys)
+# - Returns list of files containing valid SSH public keys
+# - Sets FOUND_HOST_KEY_COUNT to number of keys found
+# ------------------------------------------------------------------------------
+find_host_ssh_keys() {
+ local re='(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))'
+ local -a files=() cand=()
+ local g="${var_ssh_import_glob:-}"
+ local total=0 f base c
+
+ shopt -s nullglob
+ if [[ -n "$g" ]]; then
+ for pat in $g; do cand+=($pat); done
+ else
+ cand+=(/root/.ssh/authorized_keys /root/.ssh/authorized_keys2)
+ cand+=(/root/.ssh/*.pub)
+ cand+=(/etc/ssh/authorized_keys /etc/ssh/authorized_keys.d/*)
+ fi
+ shopt -u nullglob
+
+ for f in "${cand[@]}"; do
+ [[ -f "$f" && -r "$f" ]] || continue
+ base="$(basename -- "$f")"
+ case "$base" in
+ known_hosts | known_hosts.* | config) continue ;;
+ id_*) [[ "$f" != *.pub ]] && continue ;;
+ esac
+
+ # CRLF safe check for host keys
+ c=$(tr -d '\r' <"$f" | awk '
+ /^[[:space:]]*#/ {next}
+ /^[[:space:]]*$/ {next}
+ {print}
+ ' | grep -E -c '"$re"' || true)
+
+ if ((c > 0)); then
+ files+=("$f")
+ total=$((total + c))
+ fi
+ done
+
+ # Fallback to /root/.ssh/authorized_keys
+ if ((${#files[@]} == 0)) && [[ -r /root/.ssh/authorized_keys ]]; then
+ if grep -E -q "$re" /root/.ssh/authorized_keys; then
+ files+=(/root/.ssh/authorized_keys)
+ total=$((total + $(grep -E -c "$re" /root/.ssh/authorized_keys || echo 0)))
+ fi
+ fi
+
+ FOUND_HOST_KEY_COUNT="$total"
+ (
+ IFS=:
+ echo "${files[*]}"
+ )
+}
+
+# ------------------------------------------------------------------------------
+# advanced_settings()
+#
+# - Interactive whiptail menu for advanced configuration
+# - Lets user set container type, password, CT ID, hostname, disk, CPU, RAM
+# - Supports IPv4/IPv6, DNS, MAC, VLAN, tags, SSH keys, FUSE, verbose mode
+# - Ends with confirmation or re-entry if cancelled
+# ------------------------------------------------------------------------------
+advanced_settings() {
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox --title "Here is an instructional tip:" "To make a selection, use the Spacebar." 8 58
+ # Setting Default Tag for Advanced Settings
+ TAGS="community-script;${var_tags:-}"
+ CT_DEFAULT_TYPE="${CT_TYPE}"
+ CT_TYPE=""
+ while [ -z "$CT_TYPE" ]; do
+ if [ "$CT_DEFAULT_TYPE" == "1" ]; then
+ if CT_TYPE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "CONTAINER TYPE" --radiolist "Choose Type" 10 58 2 \
+ "1" "Unprivileged" ON \
+ "0" "Privileged" OFF \
+ 3>&1 1>&2 2>&3); then
+ if [ -n "$CT_TYPE" ]; then
+ CT_TYPE_DESC="Unprivileged"
+ if [ "$CT_TYPE" -eq 0 ]; then
+ CT_TYPE_DESC="Privileged"
+ fi
+ echo -e "${TAB}${INFO} ProxmoxVE Version ${PVEVERSION} | Kernel: ${KERNEL_VERSION}${CL}"
+ echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os |${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}"
+ echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
+ fi
+ else
+ exit_script
+ fi
+ fi
+ if [ "$CT_DEFAULT_TYPE" == "0" ]; then
+ if CT_TYPE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "CONTAINER TYPE" --radiolist "Choose Type" 10 58 2 \
+ "1" "Unprivileged" OFF \
+ "0" "Privileged" ON \
+ 3>&1 1>&2 2>&3); then
+ if [ -n "$CT_TYPE" ]; then
+ CT_TYPE_DESC="Unprivileged"
+ if [ "$CT_TYPE" -eq 0 ]; then
+ CT_TYPE_DESC="Privileged"
+ fi
+ echo -e "${TAB}${INFO} ProxmoxVE Version ${PVEVERSION} | Kernel: ${KERNEL_VERSION}${CL}"
+ echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os${CL}"
+ echo -e "${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}"
+ echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
+ fi
+ else
+ exit_script
+ fi
+ fi
+ done
+
+ while true; do
+ if PW1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --passwordbox "\nSet Root Password (needed for root ssh access)" 9 58 --title "PASSWORD (leave blank for automatic login)" 3>&1 1>&2 2>&3); then
+ # Empty = Autologin
+ if [[ -z "$PW1" ]]; then
+ PW=""
+ PW1="Automatic Login"
+ echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}$PW1${CL}"
+ break
+ fi
+
+ # Invalid: contains spaces
+ if [[ "$PW1" == *" "* ]]; then
+ whiptail --msgbox "Password cannot contain spaces." 8 58
+ continue
+ fi
+
+ # Invalid: too short
+ if ((${#PW1} < 5)); then
+ whiptail --msgbox "Password must be at least 5 characters." 8 58
+ continue
+ fi
+
+ # Confirm password
+ if PW2=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --passwordbox "\nVerify Root Password" 9 58 --title "PASSWORD VERIFICATION" 3>&1 1>&2 2>&3); then
+ if [[ "$PW1" == "$PW2" ]]; then
+ PW="-password $PW1"
+ echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}********${CL}"
+ break
+ else
+ whiptail --msgbox "Passwords do not match. Please try again." 8 58
+ fi
+ else
+ exit_script
+ fi
+ else
+ exit_script
+ fi
+ done
+
+ if CT_ID=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Container ID" 8 58 "$NEXTID" --title "CONTAINER ID" 3>&1 1>&2 2>&3); then
+ if [ -z "$CT_ID" ]; then
+ CT_ID="$NEXTID"
+ fi
+ else
+ exit_script
+ fi
+ echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
+
+ while true; do
+ if CT_NAME=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set Hostname" 8 58 "$NSAPP" --title "HOSTNAME" 3>&1 1>&2 2>&3); then
+ if [ -z "$CT_NAME" ]; then
+ HN="$NSAPP"
+ else
+ HN=$(echo "${CT_NAME,,}" | tr -d ' ')
+ fi
+ # Hostname validate (RFC 1123)
+ if [[ "$HN" =~ ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ ]]; then
+ echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}"
+ break
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --msgbox "❌ Invalid hostname: '$HN'\n\nOnly lowercase letters, digits and hyphens (-) are allowed.\nUnderscores (_) or other characters are not permitted!" 10 70
+ fi
+ else
+ exit_script
+ fi
+ done
+
+ while true; do
+ DISK_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Disk Size in GB" 8 58 "$var_disk" --title "DISK SIZE" 3>&1 1>&2 2>&3) || exit_script
+
+ if [ -z "$DISK_SIZE" ]; then
+ DISK_SIZE="$var_disk"
+ fi
+
+ if [[ "$DISK_SIZE" =~ ^[1-9][0-9]*$ ]]; then
+ echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}"
+ break
+ else
+ whiptail --msgbox "Disk size must be a positive integer!" 8 58
+ fi
+ done
+
+ while true; do
+ CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
+ --inputbox "Allocate CPU Cores" 8 58 "$var_cpu" --title "CORE COUNT" 3>&1 1>&2 2>&3) || exit_script
+
+ if [ -z "$CORE_COUNT" ]; then
+ CORE_COUNT="$var_cpu"
+ fi
+
+ if [[ "$CORE_COUNT" =~ ^[1-9][0-9]*$ ]]; then
+ echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}"
+ break
+ else
+ whiptail --msgbox "CPU core count must be a positive integer!" 8 58
+ fi
+ done
+
+ while true; do
+ RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
+ --inputbox "Allocate RAM in MiB" 8 58 "$var_ram" --title "RAM" 3>&1 1>&2 2>&3) || exit_script
+
+ if [ -z "$RAM_SIZE" ]; then
+ RAM_SIZE="$var_ram"
+ fi
+
+ if [[ "$RAM_SIZE" =~ ^[1-9][0-9]*$ ]]; then
+ echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
+ break
+ else
+ whiptail --msgbox "RAM size must be a positive integer!" 8 58
+ fi
+ done
+
+ IFACE_FILEPATH_LIST="/etc/network/interfaces"$'\n'$(find "/etc/network/interfaces.d/" -type f)
+ BRIDGES=""
+ OLD_IFS=$IFS
+ IFS=$'\n'
+ for iface_filepath in ${IFACE_FILEPATH_LIST}; do
+
+ iface_indexes_tmpfile=$(mktemp -q -u '.iface-XXXX')
+ (grep -Pn '^\s*iface' "${iface_filepath}" | cut -d':' -f1 && wc -l "${iface_filepath}" | cut -d' ' -f1) | awk 'FNR==1 {line=$0; next} {print line":"$0-1; line=$0}' >"${iface_indexes_tmpfile}" || true
+
+ if [ -f "${iface_indexes_tmpfile}" ]; then
+
+ while read -r pair; do
+ start=$(echo "${pair}" | cut -d':' -f1)
+ end=$(echo "${pair}" | cut -d':' -f2)
+
+ if awk "NR >= ${start} && NR <= ${end}" "${iface_filepath}" | grep -qP '^\s*(bridge[-_](ports|stp|fd|vlan-aware|vids)|ovs_type\s+OVSBridge)\b'; then
+ iface_name=$(sed "${start}q;d" "${iface_filepath}" | awk '{print $2}')
+ BRIDGES="${iface_name}"$'\n'"${BRIDGES}"
+ fi
+
+ done <"${iface_indexes_tmpfile}"
+ rm -f "${iface_indexes_tmpfile}"
+ fi
+
+ done
+ IFS=$OLD_IFS
+ BRIDGES=$(echo "$BRIDGES" | grep -v '^\s*$' | sort | uniq)
+ if [[ -z "$BRIDGES" ]]; then
+ BRG="vmbr0"
+ echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}"
+ else
+ # Build bridge menu with descriptions
+ BRIDGE_MENU_OPTIONS=()
+ while IFS= read -r bridge; do
+ if [[ -n "$bridge" ]]; then
+ # Get description from Proxmox built-in method - find comment for this specific bridge
+ description=$(grep -A 10 "iface $bridge" /etc/network/interfaces | grep '^#' | head -n1 | sed 's/^#\s*//')
+ if [[ -n "$description" ]]; then
+ BRIDGE_MENU_OPTIONS+=("$bridge" "${description}")
+ else
+ BRIDGE_MENU_OPTIONS+=("$bridge" " ")
+ fi
+ fi
+ done <<<"$BRIDGES"
+
+ BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --menu "Select network bridge: " 18 55 6 "${BRIDGE_MENU_OPTIONS[@]}" 3>&1 1>&2 2>&3)
+ if [[ -z "$BRG" ]]; then
+ exit_script
+ else
+ echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}"
+ fi
+ fi
+
+ # IPv4 methods: dhcp, static, none
+ while true; do
+ IPV4_METHOD=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "IPv4 Address Management" \
+ --menu "Select IPv4 Address Assignment Method:" 12 60 2 \
+ "dhcp" "Automatic (DHCP, recommended)" \
+ "static" "Static (manual entry)" \
+ 3>&1 1>&2 2>&3)
+
+ exit_status=$?
+ if [ $exit_status -ne 0 ]; then
+ exit_script
+ fi
+
+ case "$IPV4_METHOD" in
+ dhcp)
+ NET="dhcp"
+ GATE=""
+ echo -e "${NETWORK}${BOLD}${DGN}IPv4: DHCP${CL}"
+ break
+ ;;
+ static)
+ # Static: call and validate CIDR address
+ while true; do
+ NET=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --inputbox "Enter Static IPv4 CIDR Address (e.g. 192.168.100.50/24)" 8 58 "" \
+ --title "IPv4 ADDRESS" 3>&1 1>&2 2>&3)
+ if [ -z "$NET" ]; then
+ whiptail --msgbox "IPv4 address must not be empty." 8 58
+ continue
+ elif [[ "$NET" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$ ]]; then
+ echo -e "${NETWORK}${BOLD}${DGN}IPv4 Address: ${BGN}$NET${CL}"
+ break
+ else
+ whiptail --msgbox "$NET is not a valid IPv4 CIDR address. Please enter a correct value!" 8 58
+ fi
+ done
+
+ # call and validate Gateway
+ while true; do
+ GATE1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --inputbox "Enter Gateway IP address for static IPv4" 8 58 "" \
+ --title "Gateway IP" 3>&1 1>&2 2>&3)
+ if [ -z "$GATE1" ]; then
+ whiptail --msgbox "Gateway IP address cannot be empty." 8 58
+ elif [[ ! "$GATE1" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
+ whiptail --msgbox "Invalid Gateway IP address format." 8 58
+ else
+ GATE=",gw=$GATE1"
+ echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE1${CL}"
+ break
+ fi
+ done
+ break
+ ;;
+ esac
+ done
+
+ # IPv6 Address Management selection
+ while true; do
+ IPV6_METHOD=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --menu \
+ "Select IPv6 Address Management Type:" 15 58 4 \
+ "auto" "SLAAC/AUTO (recommended, default)" \
+ "dhcp" "DHCPv6" \
+ "static" "Static (manual entry)" \
+ "none" "Disabled" \
+ --default-item "auto" 3>&1 1>&2 2>&3)
+ [ $? -ne 0 ] && exit_script
+
+ case "$IPV6_METHOD" in
+ auto)
+ echo -e "${NETWORK}${BOLD}${DGN}IPv6: ${BGN}SLAAC/AUTO${CL}"
+ IPV6_ADDR=""
+ IPV6_GATE=""
+ break
+ ;;
+ dhcp)
+ echo -e "${NETWORK}${BOLD}${DGN}IPv6: ${BGN}DHCPv6${CL}"
+ IPV6_ADDR="dhcp"
+ IPV6_GATE=""
+ break
+ ;;
+ static)
+ # Ask for static IPv6 address (CIDR notation, e.g., 2001:db8::1234/64)
+ while true; do
+ IPV6_ADDR=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox \
+ "Set a static IPv6 CIDR address (e.g., 2001:db8::1234/64)" 8 58 "" \
+ --title "IPv6 STATIC ADDRESS" 3>&1 1>&2 2>&3) || exit_script
+ if [[ "$IPV6_ADDR" =~ ^([0-9a-fA-F:]+:+)+[0-9a-fA-F]+(/[0-9]{1,3})$ ]]; then
+ echo -e "${NETWORK}${BOLD}${DGN}IPv6 Address: ${BGN}$IPV6_ADDR${CL}"
+ break
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox \
+ "$IPV6_ADDR is an invalid IPv6 CIDR address. Please enter a valid IPv6 CIDR address (e.g., 2001:db8::1234/64)" 8 58
+ fi
+ done
+ # Optional: ask for IPv6 gateway for static config
+ while true; do
+ IPV6_GATE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox \
+ "Enter IPv6 gateway address (optional, leave blank for none)" 8 58 "" --title "IPv6 GATEWAY" 3>&1 1>&2 2>&3)
+ if [ -z "$IPV6_GATE" ]; then
+ IPV6_GATE=""
+ break
+ elif [[ "$IPV6_GATE" =~ ^([0-9a-fA-F:]+:+)+[0-9a-fA-F]+$ ]]; then
+ break
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox \
+ "Invalid IPv6 gateway format." 8 58
+ fi
+ done
+ break
+ ;;
+ none)
+ echo -e "${NETWORK}${BOLD}${DGN}IPv6: ${BGN}Disabled${CL}"
+ IPV6_ADDR="none"
+ IPV6_GATE=""
+ break
+ ;;
+ *)
+ exit_script
+ ;;
+ esac
+ done
+
+ if [ "$var_os" == "alpine" ]; then
+ APT_CACHER=""
+ APT_CACHER_IP=""
+ else
+ if APT_CACHER_IP=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set APT-Cacher IP (leave blank for none)" 8 58 --title "APT-Cacher IP" 3>&1 1>&2 2>&3); then
+ APT_CACHER="${APT_CACHER_IP:+yes}"
+ echo -e "${NETWORK}${BOLD}${DGN}APT-Cacher IP Address: ${BGN}${APT_CACHER_IP:-Default}${CL}"
+ else
+ exit_script
+ fi
+ fi
+
+ # if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "IPv6" --yesno "Disable IPv6?" 10 58); then
+ # DISABLEIP6="yes"
+ # else
+ # DISABLEIP6="no"
+ # fi
+ # echo -e "${DISABLEIPV6}${BOLD}${DGN}Disable IPv6: ${BGN}$DISABLEIP6${CL}"
+
+ if MTU1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default [The MTU of your selected vmbr, default is 1500])" 8 58 --title "MTU SIZE" 3>&1 1>&2 2>&3); then
+ if [ -z "$MTU1" ]; then
+ MTU1="Default"
+ MTU=""
+ else
+ MTU=",mtu=$MTU1"
+ fi
+ echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}"
+ else
+ exit_script
+ fi
+
+ if SD=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a DNS Search Domain (leave blank for HOST)" 8 58 --title "DNS Search Domain" 3>&1 1>&2 2>&3); then
+ if [ -z "$SD" ]; then
+ SX=Host
+ SD=""
+ else
+ SX=$SD
+ SD="-searchdomain=$SD"
+ fi
+ echo -e "${SEARCH}${BOLD}${DGN}DNS Search Domain: ${BGN}$SX${CL}"
+ else
+ exit_script
+ fi
+
+ if NX=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a DNS Server IP (leave blank for HOST)" 8 58 --title "DNS SERVER IP" 3>&1 1>&2 2>&3); then
+ if [ -z "$NX" ]; then
+ NX=Host
+ NS=""
+ else
+ NS="-nameserver=$NX"
+ fi
+ echo -e "${NETWORK}${BOLD}${DGN}DNS Server IP Address: ${BGN}$NX${CL}"
+ else
+ exit_script
+ fi
+
+ if [ "$var_os" == "alpine" ] && [ "$NET" == "dhcp" ] && [ "$NX" != "Host" ]; then
+ UDHCPC_FIX="yes"
+ else
+ UDHCPC_FIX="no"
+ fi
+ export UDHCPC_FIX
+
+ if MAC1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a MAC Address(leave blank for generated MAC)" 8 58 --title "MAC ADDRESS" 3>&1 1>&2 2>&3); then
+ if [ -z "$MAC1" ]; then
+ MAC1="Default"
+ MAC=""
+ else
+ MAC=",hwaddr=$MAC1"
+ echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}"
+ fi
+ else
+ exit_script
+ fi
+
+ if VLAN1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for no VLAN)" 8 58 --title "VLAN" 3>&1 1>&2 2>&3); then
+ if [ -z "$VLAN1" ]; then
+ VLAN1="Default"
+ VLAN=""
+ else
+ VLAN=",tag=$VLAN1"
+ fi
+ echo -e "${VLANTAG}${BOLD}${DGN}Vlan: ${BGN}$VLAN1${CL}"
+ else
+ exit_script
+ fi
+
+ if ADV_TAGS=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set Custom Tags?[If you remove all, there will be no tags!]" 8 58 "${TAGS}" --title "Advanced Tags" 3>&1 1>&2 2>&3); then
+ if [ -n "${ADV_TAGS}" ]; then
+ ADV_TAGS=$(echo "$ADV_TAGS" | tr -d '[:space:]')
+ TAGS="${ADV_TAGS}"
+ else
+ TAGS=";"
+ fi
+ echo -e "${NETWORK}${BOLD}${DGN}Tags: ${BGN}$TAGS${CL}"
+ else
+ exit_script
+ fi
+
+ configure_ssh_settings
+ export SSH_KEYS_FILE
+ echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
+ if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "FUSE Support" --yesno "Enable FUSE support?\nRequired for tools like rclone, mergerfs, AppImage, etc." 10 58); then
+ ENABLE_FUSE="yes"
+ else
+ ENABLE_FUSE="no"
+ fi
+ echo -e "${FUSE}${BOLD}${DGN}Enable FUSE Support: ${BGN}$ENABLE_FUSE${CL}"
+
+ if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "VERBOSE MODE" --yesno "Enable Verbose Mode?" 10 58); then
+ VERBOSE="yes"
+ else
+ VERBOSE="no"
+ fi
+ echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}$VERBOSE${CL}"
+
+ if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "ADVANCED SETTINGS COMPLETE" --yesno "Ready to create ${APP} LXC?" 10 58); then
+ echo -e "${CREATING}${BOLD}${RD}Creating a ${APP} LXC using the above advanced settings${CL}"
+ else
+ clear
+ header_info
+ echo -e "${TAB}${INFO} ProxmoxVE Version ${PVEVERSION} | Kernel: ${KERNEL_VERSION}${CL}"
+ echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Settings on node $PVEHOST_NAME${CL}"
+ advanced_settings
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# diagnostics_check()
+#
+# - Ensures diagnostics config file exists at /usr/local/community-scripts/diagnostics
+# - Asks user whether to send anonymous diagnostic data
+# - Saves DIAGNOSTICS=yes/no in the config file
+# ------------------------------------------------------------------------------
+diagnostics_check() {
+ if ! [ -d "/usr/local/community-scripts" ]; then
+ mkdir -p /usr/local/community-scripts
+ fi
+
+ if ! [ -f "/usr/local/community-scripts/diagnostics" ]; then
+ if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "DIAGNOSTICS" --yesno "Send Diagnostics of LXC Installation?\n\n(This only transmits data without user data, just RAM, CPU, LXC name, ...)" 10 58); then
+ cat </usr/local/community-scripts/diagnostics
+DIAGNOSTICS=yes
+
+#This file is used to store the diagnostics settings for the Community-Scripts API.
+#https://github.com/community-scripts/ProxmoxVED/discussions/1836
+#Your diagnostics will be sent to the Community-Scripts API for troubleshooting/statistical purposes.
+#You can review the data at https://community-scripts.github.io/ProxmoxVE/data
+#If you do not wish to send diagnostics, please set the variable 'DIAGNOSTICS' to "no" in /usr/local/community-scripts/diagnostics, or use the menue.
+#This will disable the diagnostics feature.
+#To send diagnostics, set the variable 'DIAGNOSTICS' to "yes" in /usr/local/community-scripts/diagnostics, or use the menue.
+#This will enable the diagnostics feature.
+#The following information will be sent:
+#"disk_size"
+#"core_count"
+#"ram_size"
+#"os_type"
+#"os_version"
+#"nsapp"
+#"method"
+#"pve_version"
+#"status"
+#If you have any concerns, please review the source code at /misc/build.func
+EOF
+ DIAGNOSTICS="yes"
+ else
+ cat </usr/local/community-scripts/diagnostics
+DIAGNOSTICS=no
+
+#This file is used to store the diagnostics settings for the Community-Scripts API.
+#https://github.com/community-scripts/ProxmoxVED/discussions/1836
+#Your diagnostics will be sent to the Community-Scripts API for troubleshooting/statistical purposes.
+#You can review the data at https://community-scripts.github.io/ProxmoxVE/data
+#If you do not wish to send diagnostics, please set the variable 'DIAGNOSTICS' to "no" in /usr/local/community-scripts/diagnostics, or use the menue.
+#This will disable the diagnostics feature.
+#To send diagnostics, set the variable 'DIAGNOSTICS' to "yes" in /usr/local/community-scripts/diagnostics, or use the menue.
+#This will enable the diagnostics feature.
+#The following information will be sent:
+#"disk_size"
+#"core_count"
+#"ram_size"
+#"os_type"
+#"os_version"
+#"nsapp"
+#"method"
+#"pve_version"
+#"status"
+#If you have any concerns, please review the source code at /misc/build.func
+EOF
+ DIAGNOSTICS="no"
+ fi
+ else
+ DIAGNOSTICS=$(awk -F '=' '/^DIAGNOSTICS/ {print $2}' /usr/local/community-scripts/diagnostics)
+
+ fi
+
+}
+
+# ------------------------------------------------------------------------------
+# default_var_settings
+#
+# - Ensures /usr/local/community-scripts/default.vars exists (creates if missing)
+# - Loads var_* values from default.vars (safe parser, no source/eval)
+# - Precedence: ENV var_* > default.vars > built-in defaults
+# - Maps var_verbose → VERBOSE
+# - Calls base_settings "$VERBOSE" and echo_default
+# ------------------------------------------------------------------------------
+default_var_settings() {
+ # Allowed var_* keys (alphabetically sorted)
+ local VAR_WHITELIST=(
+ var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_ctid var_disk var_fuse
+ var_gateway var_hostname var_ipv6_method var_ipv6_static var_mac var_mtu
+ var_net var_ns var_pw var_ram var_tags var_tun var_unprivileged
+ var_verbose var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage
+ )
+
+ # Snapshot: environment variables (highest precedence)
+ declare -A _HARD_ENV=()
+ local _k
+ for _k in "${VAR_WHITELIST[@]}"; do
+ if printenv "$_k" >/dev/null 2>&1; then _HARD_ENV["$_k"]=1; fi
+ done
+
+ # Find default.vars location
+ local _find_default_vars
+ _find_default_vars() {
+ local f
+ for f in \
+ /usr/local/community-scripts/default.vars \
+ "$HOME/.config/community-scripts/default.vars" \
+ "./default.vars"; do
+ [ -f "$f" ] && {
+ echo "$f"
+ return 0
+ }
+ done
+ return 1
+ }
+ # Allow override of storages via env (for non-interactive use cases)
+ [ -n "${var_template_storage:-}" ] && TEMPLATE_STORAGE="$var_template_storage"
+ [ -n "${var_container_storage:-}" ] && CONTAINER_STORAGE="$var_container_storage"
+
+ # Create once, with storages already selected, no var_ctid/var_hostname lines
+ local _ensure_default_vars
+ _ensure_default_vars() {
+ _find_default_vars >/dev/null 2>&1 && return 0
+
+ local canonical="/usr/local/community-scripts/default.vars"
+ msg_info "No default.vars found. Creating ${canonical}"
+ mkdir -p /usr/local/community-scripts
+
+ # Pick storages before writing the file (always ask unless only one)
+ # Create a minimal temp file to write into
+ : >"$canonical"
+
+ # Base content (no var_ctid / var_hostname here)
+ cat >"$canonical" <<'EOF'
+# Community-Scripts defaults (var_* only). Lines starting with # are comments.
+# Precedence: ENV var_* > default.vars > built-ins.
+# Keep keys alphabetically sorted.
+
+# Container type
+var_unprivileged=1
+
+# Resources
+var_cpu=1
+var_disk=4
+var_ram=1024
+
+# Network
+var_brg=vmbr0
+var_net=dhcp
+var_ipv6_method=none
+# var_gateway=
+# var_ipv6_static=
+# var_vlan=
+# var_mtu=
+# var_mac=
+# var_ns=
+
+# SSH
+var_ssh=no
+# var_ssh_authorized_key=
+
+# APT cacher (optional)
+# var_apt_cacher=yes
+# var_apt_cacher_ip=192.168.1.10
+
+# Features/Tags/verbosity
+var_fuse=no
+var_tun=no
+var_tags=community-script
+var_verbose=no
+
+# Security (root PW) – empty => autologin
+# var_pw=
+EOF
+
+ # Now choose storages (always prompt unless just one exists)
+ choose_and_set_storage_for_file "$canonical" template
+ choose_and_set_storage_for_file "$canonical" container
+
+ chmod 0644 "$canonical"
+ msg_ok "Created ${canonical}"
+ }
+
+ # Whitelist check
+ local _is_whitelisted_key
+ _is_whitelisted_key() {
+ local k="$1"
+ local w
+ for w in "${VAR_WHITELIST[@]}"; do [ "$k" = "$w" ] && return 0; done
+ return 1
+ }
+
+ # Safe parser for KEY=VALUE lines
+ local _load_vars_file
+ _load_vars_file() {
+ local file="$1"
+ [ -f "$file" ] || return 0
+ msg_info "Loading defaults from ${file}"
+ local line key val
+ while IFS= read -r line || [ -n "$line" ]; do
+ line="${line#"${line%%[![:space:]]*}"}"
+ line="${line%"${line##*[![:space:]]}"}"
+ [[ -z "$line" || "$line" == \#* ]] && continue
+ if [[ "$line" =~ ^([A-Za-z_][A-Za-z0-9_]*)=(.*)$ ]]; then
+ local var_key="${BASH_REMATCH[1]}"
+ local var_val="${BASH_REMATCH[2]}"
+
+ [[ "$var_key" != var_* ]] && continue
+ _is_whitelisted_key "$var_key" || {
+ msg_debug "Ignore non-whitelisted ${var_key}"
+ continue
+ }
+
+ # Strip quotes
+ if [[ "$var_val" =~ ^\"(.*)\"$ ]]; then
+ var_val="${BASH_REMATCH[1]}"
+ elif [[ "$var_val" =~ ^\'(.*)\'$ ]]; then
+ var_val="${BASH_REMATCH[1]}"
+ fi
+
+ # Unsafe characters
+ case $var_val in
+ \"*\")
+ var_val=${var_val#\"}
+ var_val=${var_val%\"}
+ ;;
+ \'*\')
+ var_val=${var_val#\'}
+ var_val=${var_val%\'}
+ ;;
+ esac # Hard env wins
+ [[ -n "${_HARD_ENV[$var_key]:-}" ]] && continue
+ # Set only if not already exported
+ [[ -z "${!var_key+x}" ]] && export "${var_key}=${var_val}"
+ else
+ msg_warn "Malformed line in ${file}: ${line}"
+ fi
+ done <"$file"
+ msg_ok "Loaded ${file}"
+ }
+
+ # 1) Ensure file exists
+ _ensure_default_vars
+
+ # 2) Load file
+ local dv
+ dv="$(_find_default_vars)" || {
+ msg_error "default.vars not found after ensure step"
+ return 1
+ }
+ _load_vars_file "$dv"
+
+ # 3) Map var_verbose → VERBOSE
+ if [[ -n "${var_verbose:-}" ]]; then
+ case "${var_verbose,,}" in 1 | yes | true | on) VERBOSE="yes" ;; 0 | no | false | off) VERBOSE="no" ;; *) VERBOSE="${var_verbose}" ;; esac
+ else
+ VERBOSE="no"
+ fi
+
+ # 4) Apply base settings and show summary
+ METHOD="mydefaults-global"
+ base_settings "$VERBOSE"
+ header_info
+ echo -e "${DEFAULT}${BOLD}${BL}Using My Defaults (default.vars) on node $PVEHOST_NAME${CL}"
+ echo_default
+}
+
+# ------------------------------------------------------------------------------
+# get_app_defaults_path()
+#
+# - Returns full path for app-specific defaults file
+# - Example: /usr/local/community-scripts/defaults/.vars
+# ------------------------------------------------------------------------------
+
+get_app_defaults_path() {
+ local n="${NSAPP:-${APP,,}}"
+ echo "/usr/local/community-scripts/defaults/${n}.vars"
+}
+
+# ------------------------------------------------------------------------------
+# maybe_offer_save_app_defaults
+#
+# - Called after advanced_settings returned with fully chosen values.
+# - If no .vars exists, offers to persist current advanced settings
+# into /usr/local/community-scripts/defaults/.vars
+# - Only writes whitelisted var_* keys.
+# - Extracts raw values from flags like ",gw=..." ",mtu=..." etc.
+# ------------------------------------------------------------------------------
+if ! declare -p VAR_WHITELIST >/dev/null 2>&1; then
+ declare -ag VAR_WHITELIST=(
+ var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_ctid var_disk var_fuse
+ var_gateway var_hostname var_ipv6_method var_ipv6_static var_mac var_mtu
+ var_net var_ns var_pw var_ram var_tags var_tun var_unprivileged
+ var_verbose var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage
+ )
+fi
+
+_is_whitelisted_key() {
+ local k="$1"
+ local w
+ for w in "${VAR_WHITELIST[@]}"; do [[ "$k" == "$w" ]] && return 0; done
+ return 1
+}
+
+_sanitize_value() {
+ # Disallow Command-Substitution / Shell-Meta
+ case "$1" in
+ *'$('* | *'`'* | *';'* | *'&'* | *'<('*)
+ echo ""
+ return 0
+ ;;
+ esac
+ echo "$1"
+}
+
+# Map-Parser: read var_* from file into _VARS_IN associative array
+declare -A _VARS_IN
+_load_vars_file() {
+ local file="$1"
+ [ -f "$file" ] || return 0
+ msg_info "Loading defaults from ${file}"
+ local line key val
+ while IFS= read -r line || [ -n "$line" ]; do
+ line="${line#"${line%%[![:space:]]*}"}"
+ line="${line%"${line##*[![:space:]]}"}"
+ [ -z "$line" ] && continue
+ case "$line" in
+ \#*) continue ;;
+ esac
+ key=$(printf "%s" "$line" | cut -d= -f1)
+ val=$(printf "%s" "$line" | cut -d= -f2-)
+ case "$key" in
+ var_*)
+ if _is_whitelisted_key "$key"; then
+ [ -z "${!key+x}" ] && export "$key=$val"
+ fi
+ ;;
+ esac
+ done <"$file"
+ msg_ok "Loaded ${file}"
+}
+
+# Diff function for two var_* files -> produces human-readable diff list for $1 (old) vs $2 (new)
+_build_vars_diff() {
+ local oldf="$1" newf="$2"
+ local k
+ local -A OLD=() NEW=()
+ _load_vars_file_to_map "$oldf"
+ for k in "${!_VARS_IN[@]}"; do OLD["$k"]="${_VARS_IN[$k]}"; done
+ _load_vars_file_to_map "$newf"
+ for k in "${!_VARS_IN[@]}"; do NEW["$k"]="${_VARS_IN[$k]}"; done
+
+ local out
+ out+="# Diff for ${APP} (${NSAPP})\n"
+ out+="# Old: ${oldf}\n# New: ${newf}\n\n"
+
+ local found_change=0
+
+ # Changed & Removed
+ for k in "${!OLD[@]}"; do
+ if [[ -v NEW["$k"] ]]; then
+ if [[ "${OLD[$k]}" != "${NEW[$k]}" ]]; then
+ out+="~ ${k}\n - old: ${OLD[$k]}\n + new: ${NEW[$k]}\n"
+ found_change=1
+ fi
+ else
+ out+="- ${k}\n - old: ${OLD[$k]}\n"
+ found_change=1
+ fi
+ done
+
+ # Added
+ for k in "${!NEW[@]}"; do
+ if [[ ! -v OLD["$k"] ]]; then
+ out+="+ ${k}\n + new: ${NEW[$k]}\n"
+ found_change=1
+ fi
+ done
+
+ if [[ $found_change -eq 0 ]]; then
+ out+="(No differences)\n"
+ fi
+
+ printf "%b" "$out"
+}
+
+# Build a temporary .vars file from current advanced settings
+_build_current_app_vars_tmp() {
+ tmpf="$(mktemp /tmp/${NSAPP:-app}.vars.new.XXXXXX)"
+
+ # NET/GW
+ _net="${NET:-}"
+ _gate=""
+ case "${GATE:-}" in
+ ,gw=*) _gate=$(echo "$GATE" | sed 's/^,gw=//') ;;
+ esac
+
+ # IPv6
+ _ipv6_method="${IPV6_METHOD:-auto}"
+ _ipv6_static=""
+ _ipv6_gateway=""
+ if [ "$_ipv6_method" = "static" ]; then
+ _ipv6_static="${IPV6_ADDR:-}"
+ _ipv6_gateway="${IPV6_GATE:-}"
+ fi
+
+ # MTU/VLAN/MAC
+ _mtu=""
+ _vlan=""
+ _mac=""
+ case "${MTU:-}" in
+ ,mtu=*) _mtu=$(echo "$MTU" | sed 's/^,mtu=//') ;;
+ esac
+ case "${VLAN:-}" in
+ ,tag=*) _vlan=$(echo "$VLAN" | sed 's/^,tag=//') ;;
+ esac
+ case "${MAC:-}" in
+ ,hwaddr=*) _mac=$(echo "$MAC" | sed 's/^,hwaddr=//') ;;
+ esac
+
+ # DNS / Searchdomain
+ _ns=""
+ _searchdomain=""
+ case "${NS:-}" in
+ -nameserver=*) _ns=$(echo "$NS" | sed 's/^-nameserver=//') ;;
+ esac
+ case "${SD:-}" in
+ -searchdomain=*) _searchdomain=$(echo "$SD" | sed 's/^-searchdomain=//') ;;
+ esac
+
+ # SSH / APT / Features
+ _ssh="${SSH:-no}"
+ _ssh_auth="${SSH_AUTHORIZED_KEY:-}"
+ _apt_cacher="${APT_CACHER:-}"
+ _apt_cacher_ip="${APT_CACHER_IP:-}"
+ _fuse="${ENABLE_FUSE:-no}"
+ _tun="${ENABLE_TUN:-no}"
+ _tags="${TAGS:-}"
+ _verbose="${VERBOSE:-no}"
+
+ # Type / Resources / Identity
+ _unpriv="${CT_TYPE:-1}"
+ _cpu="${CORE_COUNT:-1}"
+ _ram="${RAM_SIZE:-1024}"
+ _disk="${DISK_SIZE:-4}"
+ _hostname="${HN:-$NSAPP}"
+
+ # Storage
+ _tpl_storage="${TEMPLATE_STORAGE:-${var_template_storage:-}}"
+ _ct_storage="${CONTAINER_STORAGE:-${var_container_storage:-}}"
+
+ {
+ echo "# App-specific defaults for ${APP} (${NSAPP})"
+ echo "# Generated on $(date -u '+%Y-%m-%dT%H:%M:%SZ')"
+ echo
+
+ echo "var_unprivileged=$(_sanitize_value "$_unpriv")"
+ echo "var_cpu=$(_sanitize_value "$_cpu")"
+ echo "var_ram=$(_sanitize_value "$_ram")"
+ echo "var_disk=$(_sanitize_value "$_disk")"
+
+ [ -n "${BRG:-}" ] && echo "var_brg=$(_sanitize_value "$BRG")"
+ [ -n "$_net" ] && echo "var_net=$(_sanitize_value "$_net")"
+ [ -n "$_gate" ] && echo "var_gateway=$(_sanitize_value "$_gate")"
+ [ -n "$_mtu" ] && echo "var_mtu=$(_sanitize_value "$_mtu")"
+ [ -n "$_vlan" ] && echo "var_vlan=$(_sanitize_value "$_vlan")"
+ [ -n "$_mac" ] && echo "var_mac=$(_sanitize_value "$_mac")"
+ [ -n "$_ns" ] && echo "var_ns=$(_sanitize_value "$_ns")"
+
+ [ -n "$_ipv6_method" ] && echo "var_ipv6_method=$(_sanitize_value "$_ipv6_method")"
+ [ -n "$_ipv6_static" ] && echo "var_ipv6_static=$(_sanitize_value "$_ipv6_static")"
+
+ [ -n "$_ssh" ] && echo "var_ssh=$(_sanitize_value "$_ssh")"
+ [ -n "$_ssh_auth" ] && echo "var_ssh_authorized_key=$(_sanitize_value "$_ssh_auth")"
+
+ [ -n "$_apt_cacher" ] && echo "var_apt_cacher=$(_sanitize_value "$_apt_cacher")"
+ [ -n "$_apt_cacher_ip" ] && echo "var_apt_cacher_ip=$(_sanitize_value "$_apt_cacher_ip")"
+
+ [ -n "$_fuse" ] && echo "var_fuse=$(_sanitize_value "$_fuse")"
+ [ -n "$_tun" ] && echo "var_tun=$(_sanitize_value "$_tun")"
+ [ -n "$_tags" ] && echo "var_tags=$(_sanitize_value "$_tags")"
+ [ -n "$_verbose" ] && echo "var_verbose=$(_sanitize_value "$_verbose")"
+
+ [ -n "$_hostname" ] && echo "var_hostname=$(_sanitize_value "$_hostname")"
+ [ -n "$_searchdomain" ] && echo "var_searchdomain=$(_sanitize_value "$_searchdomain")"
+
+ [ -n "$_tpl_storage" ] && echo "var_template_storage=$(_sanitize_value "$_tpl_storage")"
+ [ -n "$_ct_storage" ] && echo "var_container_storage=$(_sanitize_value "$_ct_storage")"
+ } >"$tmpf"
+
+ echo "$tmpf"
+}
+
+# ------------------------------------------------------------------------------
+# maybe_offer_save_app_defaults()
+#
+# - Called after advanced_settings()
+# - Offers to save current values as app defaults if not existing
+# - If file exists: shows diff and allows Update, Keep, View Diff, or Cancel
+# ------------------------------------------------------------------------------
+maybe_offer_save_app_defaults() {
+ local app_vars_path
+ app_vars_path="$(get_app_defaults_path)"
+
+ # always build from current settings
+ local new_tmp diff_tmp
+ new_tmp="$(_build_current_app_vars_tmp)"
+ diff_tmp="$(mktemp -p /tmp "${NSAPP:-app}.vars.diff.XXXXXX")"
+
+ # 1) if no file → offer to create
+ if [[ ! -f "$app_vars_path" ]]; then
+ if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --yesno "Save these advanced settings as defaults for ${APP}?\n\nThis will create:\n${app_vars_path}" 12 72; then
+ mkdir -p "$(dirname "$app_vars_path")"
+ install -m 0644 "$new_tmp" "$app_vars_path"
+ msg_ok "Saved app defaults: ${app_vars_path}"
+ fi
+ rm -f "$new_tmp" "$diff_tmp"
+ return 0
+ fi
+
+ # 2) if file exists → build diff
+ _build_vars_diff "$app_vars_path" "$new_tmp" >"$diff_tmp"
+
+ # if no differences → do nothing
+ if grep -q "^(No differences)$" "$diff_tmp"; then
+ rm -f "$new_tmp" "$diff_tmp"
+ return 0
+ fi
+
+ # 3) if file exists → show menu with default selection "Update Defaults"
+ local app_vars_file
+ app_vars_file="$(basename "$app_vars_path")"
+
+ while true; do
+ local sel
+ sel="$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "APP DEFAULTS – ${APP}" \
+ --menu "Differences detected. What do you want to do?" 20 78 10 \
+ "Update Defaults" "Write new values to ${app_vars_file}" \
+ "Keep Current" "Keep existing defaults (no changes)" \
+ "View Diff" "Show a detailed diff" \
+ "Cancel" "Abort without changes" \
+ --default-item "Update Defaults" \
+ 3>&1 1>&2 2>&3)" || { sel="Cancel"; }
+
+ case "$sel" in
+ "Update Defaults")
+ install -m 0644 "$new_tmp" "$app_vars_path"
+ msg_ok "Updated app defaults: ${app_vars_path}"
+ break
+ ;;
+ "Keep Current")
+ msg_info "Keeping current app defaults: ${app_vars_path}"
+ break
+ ;;
+ "View Diff")
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "Diff – ${APP}" \
+ --scrolltext --textbox "$diff_tmp" 25 100
+ ;;
+ "Cancel" | *)
+ msg_info "Canceled. No changes to app defaults."
+ break
+ ;;
+ esac
+ done
+
+ rm -f "$new_tmp" "$diff_tmp"
+}
+
+ensure_storage_selection_for_vars_file() {
+ local vf="$1"
+
+ # Read stored values (if any)
+ local tpl ct
+ tpl=$(grep -E '^var_template_storage=' "$vf" | cut -d= -f2-)
+ ct=$(grep -E '^var_container_storage=' "$vf" | cut -d= -f2-)
+
+ if [[ -n "$tpl" && -n "$ct" ]]; then
+ TEMPLATE_STORAGE="$tpl"
+ CONTAINER_STORAGE="$ct"
+ return 0
+ fi
+
+ choose_and_set_storage_for_file "$vf" template
+ choose_and_set_storage_for_file "$vf" container
+
+ msg_ok "Storage configuration saved to $(basename "$vf")"
+}
+
+diagnostics_menu() {
+ if [ "${DIAGNOSTICS:-no}" = "yes" ]; then
+ if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "DIAGNOSTIC SETTINGS" \
+ --yesno "Send Diagnostics?\n\nCurrent: ${DIAGNOSTICS}" 10 58 \
+ --yes-button "No" --no-button "Back"; then
+ DIAGNOSTICS="no"
+ sed -i 's/^DIAGNOSTICS=.*/DIAGNOSTICS=no/' /usr/local/community-scripts/diagnostics
+ whiptail --msgbox "Diagnostics set to ${DIAGNOSTICS}." 8 58
+ fi
+ else
+ if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "DIAGNOSTIC SETTINGS" \
+ --yesno "Send Diagnostics?\n\nCurrent: ${DIAGNOSTICS}" 10 58 \
+ --yes-button "Yes" --no-button "Back"; then
+ DIAGNOSTICS="yes"
+ sed -i 's/^DIAGNOSTICS=.*/DIAGNOSTICS=yes/' /usr/local/community-scripts/diagnostics
+ whiptail --msgbox "Diagnostics set to ${DIAGNOSTICS}." 8 58
+ fi
+ fi
+}
+
+ensure_global_default_vars_file() {
+ local vars_path="/usr/local/community-scripts/default.vars"
+ if [[ ! -f "$vars_path" ]]; then
+ mkdir -p "$(dirname "$vars_path")"
+ touch "$vars_path"
+ fi
+ echo "$vars_path"
+}
+
+# ------------------------------------------------------------------------------
+# install_script()
+#
+# - Main entrypoint for installation mode
+# - Runs safety checks (pve_check, root_check, maxkeys_check, diagnostics_check)
+# - Builds interactive menu (Default, Verbose, Advanced, My Defaults, App Defaults, Diagnostics, Storage, Exit)
+# - Applies chosen settings and triggers container build
+# ------------------------------------------------------------------------------
+install_script() {
+ pve_check
+ shell_check
+ root_check
+ arch_check
+ ssh_check
+ maxkeys_check
+ diagnostics_check
+
+ if systemctl is-active -q ping-instances.service; then
+ systemctl -q stop ping-instances.service
+ fi
+
+ NEXTID=$(pvesh get /cluster/nextid)
+ timezone=$(cat /etc/timezone)
+
+ # Show APP Header
+ header_info
+
+ # --- Support CLI argument as direct preset (default, advanced, …) ---
+ CHOICE="${mode:-${1:-}}"
+
+ # If no CLI argument → show whiptail menu
+ # Build menu dynamically based on available options
+ local appdefaults_option=""
+ local settings_option=""
+ local menu_items=(
+ "1" "Default Install"
+ "2" "Advanced Install"
+ "3" "My Defaults"
+ )
+
+ if [ -f "$(get_app_defaults_path)" ]; then
+ appdefaults_option="4"
+ menu_items+=("4" "App Defaults for ${APP}")
+ settings_option="5"
+ menu_items+=("5" "Settings")
+ else
+ settings_option="4"
+ menu_items+=("4" "Settings")
+ fi
+
+ if [ -z "$CHOICE" ]; then
+
+ TMP_CHOICE=$(whiptail \
+ --backtitle "Proxmox VE Helper Scripts" \
+ --title "Community-Scripts Options" \
+ --ok-button "Select" --cancel-button "Exit Script" \
+ --notags \
+ --menu "\nChoose an option:\n Use TAB or Arrow keys to navigate, ENTER to select.\n" \
+ 20 60 9 \
+ "${menu_items[@]}" \
+ --default-item "1" \
+ 3>&1 1>&2 2>&3) || exit_script
+ CHOICE="$TMP_CHOICE"
+ fi
+
+ APPDEFAULTS_OPTION="$appdefaults_option"
+ SETTINGS_OPTION="$settings_option"
+
+ # --- Main case ---
+ local defaults_target=""
+ local run_maybe_offer="no"
+ case "$CHOICE" in
+ 1 | default | DEFAULT)
+ header_info
+ echo -e "${DEFAULT}${BOLD}${BL}Using Default Settings on node $PVEHOST_NAME${CL}"
+ VERBOSE="no"
+ METHOD="default"
+ base_settings "$VERBOSE"
+ echo_default
+ defaults_target="$(ensure_global_default_vars_file)"
+ ;;
+ 2 | advanced | ADVANCED)
+ header_info
+ echo -e "${TAB}${INFO} ProxmoxVE Version ${PVEVERSION} | Kernel: ${KERNEL_VERSION}${CL}"
+ echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Install on node $PVEHOST_NAME${CL}"
+ METHOD="advanced"
+ base_settings
+ advanced_settings
+ defaults_target="$(ensure_global_default_vars_file)"
+ run_maybe_offer="yes"
+ ;;
+ 3 | mydefaults | MYDEFAULTS)
+ default_var_settings || {
+ msg_error "Failed to apply default.vars"
+ exit 1
+ }
+ defaults_target="/usr/local/community-scripts/default.vars"
+ ;;
+ "$APPDEFAULTS_OPTION" | appdefaults | APPDEFAULTS)
+ if [ -f "$(get_app_defaults_path)" ]; then
+ header_info
+ echo -e "${DEFAULT}${BOLD}${BL}Using App Defaults for ${APP} on node $PVEHOST_NAME${CL}"
+ METHOD="appdefaults"
+ base_settings
+ _load_vars_file "$(get_app_defaults_path)"
+ echo_default
+ defaults_target="$(get_app_defaults_path)"
+ else
+ msg_error "No App Defaults available for ${APP}"
+ exit 1
+ fi
+ ;;
+ "$SETTINGS_OPTION" | settings | SETTINGS)
+ settings_menu
+ defaults_target=""
+ ;;
+ *)
+ echo -e "${CROSS}${RD}Invalid option: $CHOICE${CL}"
+ exit 1
+ ;;
+ esac
+
+ if [[ -n "$defaults_target" ]]; then
+ ensure_storage_selection_for_vars_file "$defaults_target"
+ fi
+
+ if [[ "$run_maybe_offer" == "yes" ]]; then
+ maybe_offer_save_app_defaults
+ fi
+}
+
+edit_default_storage() {
+ local vf="/usr/local/community-scripts/default.vars"
+
+ # Ensure file exists
+ if [[ ! -f "$vf" ]]; then
+ mkdir -p "$(dirname "$vf")"
+ touch "$vf"
+ fi
+
+ # Let ensure_storage_selection_for_vars_file handle everything
+ ensure_storage_selection_for_vars_file "$vf"
+}
+
+settings_menu() {
+ while true; do
+ local settings_items=(
+ "1" "Manage API-Diagnostic Setting"
+ "2" "Edit Default.vars"
+ "3" "Edit Default Storage"
+ )
+ if [ -f "$(get_app_defaults_path)" ]; then
+ settings_items+=("4" "Edit App.vars for ${APP}")
+ settings_items+=("5" "Exit")
+ else
+ settings_items+=("4" "Exit")
+ fi
+
+ local choice
+ choice=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
+ --title "Community-Scripts SETTINGS Menu" \
+ --ok-button "OK" --cancel-button "Back" \
+ --menu "\n\nChoose a settings option:\n\nUse TAB or Arrow keys to navigate, ENTER to select." 20 60 9 \
+ "${settings_items[@]}" \
+ 3>&1 1>&2 2>&3) || break
+
+ case "$choice" in
+ 1) diagnostics_menu ;;
+ 2) ${EDITOR:-nano} /usr/local/community-scripts/default.vars ;;
+ 3) edit_default_storage ;;
+ 4)
+ if [ -f "$(get_app_defaults_path)" ]; then
+ ${EDITOR:-nano} "$(get_app_defaults_path)"
+ else
+ exit_script
+ fi
+ ;;
+ 5) exit_script ;;
+ esac
+ done
+}
+
+# ===== Unified storage selection & writing to vars files =====
+_write_storage_to_vars() {
+ # $1 = vars_file, $2 = key (var_container_storage / var_template_storage), $3 = value
+ local vf="$1" key="$2" val="$3"
+ # remove uncommented and commented versions to avoid duplicates
+ sed -i "/^[#[:space:]]*${key}=/d" "$vf"
+ echo "${key}=${val}" >>"$vf"
+}
+
+choose_and_set_storage_for_file() {
+ # $1 = vars_file, $2 = class ('container'|'template')
+ local vf="$1" class="$2" key="" current=""
+ case "$class" in
+ container) key="var_container_storage" ;;
+ template) key="var_template_storage" ;;
+ *)
+ msg_error "Unknown storage class: $class"
+ return 1
+ ;;
+ esac
+
+ current=$(awk -F= -v k="^${key}=" '$0 ~ k {print $2; exit}' "$vf")
+
+ # If only one storage exists for the content type, auto-pick. Else always ask (your wish #4).
+ local content="rootdir"
+ [[ "$class" == "template" ]] && content="vztmpl"
+ local count
+ count=$(pvesm status -content "$content" | awk 'NR>1{print $1}' | wc -l)
+
+ if [[ "$count" -eq 1 ]]; then
+ STORAGE_RESULT=$(pvesm status -content "$content" | awk 'NR>1{print $1; exit}')
+ STORAGE_INFO=""
+ else
+ # If the current value is preselectable, we could show it, but per your requirement we always offer selection
+ select_storage "$class" || return 1
+ fi
+
+ _write_storage_to_vars "$vf" "$key" "$STORAGE_RESULT"
+
+ # Keep environment in sync for later steps (e.g. app-default save)
+ if [[ "$class" == "container" ]]; then
+ export var_container_storage="$STORAGE_RESULT"
+ export CONTAINER_STORAGE="$STORAGE_RESULT"
+ else
+ export var_template_storage="$STORAGE_RESULT"
+ export TEMPLATE_STORAGE="$STORAGE_RESULT"
+ fi
+
+ msg_ok "Updated ${key} → ${STORAGE_RESULT}"
+}
+
+# ------------------------------------------------------------------------------
+# check_container_resources()
+#
+# - Compares host RAM/CPU with required values
+# - Warns if under-provisioned and asks user to continue or abort
+# ------------------------------------------------------------------------------
+check_container_resources() {
+ current_ram=$(free -m | awk 'NR==2{print $2}')
+ current_cpu=$(nproc)
+
+ if [[ "$current_ram" -lt "$var_ram" ]] || [[ "$current_cpu" -lt "$var_cpu" ]]; then
+ echo -e "\n${INFO}${HOLD} ${GN}Required: ${var_cpu} CPU, ${var_ram}MB RAM ${CL}| ${RD}Current: ${current_cpu} CPU, ${current_ram}MB RAM${CL}"
+ echo -e "${YWB}Please ensure that the ${APP} LXC is configured with at least ${var_cpu} vCPU and ${var_ram} MB RAM for the build process.${CL}\n"
+ echo -ne "${INFO}${HOLD} May cause data loss! ${INFO} Continue update with under-provisioned LXC? "
+ read -r prompt
+ if [[ ! ${prompt,,} =~ ^(yes)$ ]]; then
+ echo -e "${CROSS}${HOLD} ${YWB}Exiting based on user input.${CL}"
+ exit 1
+ fi
+ else
+ echo -e ""
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# check_container_storage()
+#
+# - Checks /boot partition usage
+# - Warns if usage >80% and asks user confirmation before proceeding
+# ------------------------------------------------------------------------------
+check_container_storage() {
+ total_size=$(df /boot --output=size | tail -n 1)
+ local used_size=$(df /boot --output=used | tail -n 1)
+ usage=$((100 * used_size / total_size))
+ if ((usage > 80)); then
+ echo -e "${INFO}${HOLD} ${YWB}Warning: Storage is dangerously low (${usage}%).${CL}"
+ echo -ne "Continue anyway? "
+ read -r prompt
+ if [[ ! ${prompt,,} =~ ^(y|yes)$ ]]; then
+ echo -e "${CROSS}${HOLD}${YWB}Exiting based on user input.${CL}"
+ exit 1
+ fi
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# ssh_extract_keys_from_file()
+#
+# - Extracts valid SSH public keys from given file
+# - Supports RSA, Ed25519, ECDSA and filters out comments/invalid lines
+# ------------------------------------------------------------------------------
+ssh_extract_keys_from_file() {
+ local f="$1"
+ [[ -r "$f" ]] || return 0
+ tr -d '\r' <"$f" | awk '
+ /^[[:space:]]*#/ {next}
+ /^[[:space:]]*$/ {next}
+ # nackt: typ base64 [comment]
+ /^(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))[[:space:]]+/ {print; next}
+ # mit Optionen: finde ab erstem Key-Typ
+ {
+ match($0, /(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))[[:space:]]+/)
+ if (RSTART>0) { print substr($0, RSTART) }
+ }
+ '
+}
+
+# ------------------------------------------------------------------------------
+# ssh_build_choices_from_files()
+#
+# - Builds interactive whiptail checklist of available SSH keys
+# - Generates fingerprint, type and comment for each key
+# ------------------------------------------------------------------------------
+ssh_build_choices_from_files() {
+ local -a files=("$@")
+ CHOICES=()
+ COUNT=0
+ MAPFILE="$(mktemp)"
+ local id key typ fp cmt base ln=0
+
+ for f in "${files[@]}"; do
+ [[ -f "$f" && -r "$f" ]] || continue
+ base="$(basename -- "$f")"
+ case "$base" in
+ known_hosts | known_hosts.* | config) continue ;;
+ id_*) [[ "$f" != *.pub ]] && continue ;;
+ esac
+
+ # map every key in file
+ while IFS= read -r key; do
+ [[ -n "$key" ]] || continue
+
+ typ=""
+ fp=""
+ cmt=""
+ # Only the pure key part (without options) is already included in ‘key’.
+ read -r _typ _b64 _cmt <<<"$key"
+ typ="${_typ:-key}"
+ cmt="${_cmt:-}"
+ # Fingerprint via ssh-keygen (if available)
+ if command -v ssh-keygen >/dev/null 2>&1; then
+ fp="$(printf '%s\n' "$key" | ssh-keygen -lf - 2>/dev/null | awk '{print $2}')"
+ fi
+ # Label shorten
+ [[ ${#cmt} -gt 40 ]] && cmt="${cmt:0:37}..."
+
+ ln=$((ln + 1))
+ COUNT=$((COUNT + 1))
+ id="K${COUNT}"
+ echo "${id}|${key}" >>"$MAPFILE"
+ CHOICES+=("$id" "[$typ] ${fp:+$fp }${cmt:+$cmt }— ${base}" "OFF")
+ done < <(ssh_extract_keys_from_file "$f")
+ done
+}
+
+# ------------------------------------------------------------------------------
+# ssh_discover_default_files()
+#
+# - Scans standard paths for SSH keys
+# - Includes ~/.ssh/*.pub, /etc/ssh/authorized_keys, etc.
+# ------------------------------------------------------------------------------
+ssh_discover_default_files() {
+ local -a cand=()
+ shopt -s nullglob
+ cand+=(/root/.ssh/authorized_keys /root/.ssh/authorized_keys2)
+ cand+=(/root/.ssh/*.pub)
+ cand+=(/etc/ssh/authorized_keys /etc/ssh/authorized_keys.d/*)
+ shopt -u nullglob
+ printf '%s\0' "${cand[@]}"
+}
+
+configure_ssh_settings() {
+ SSH_KEYS_FILE="$(mktemp)"
+ : >"$SSH_KEYS_FILE"
+
+ IFS=$'\0' read -r -d '' -a _def_files < <(ssh_discover_default_files && printf '\0')
+ ssh_build_choices_from_files "${_def_files[@]}"
+ local default_key_count="$COUNT"
+
+ local ssh_key_mode
+ if [[ "$default_key_count" -gt 0 ]]; then
+ ssh_key_mode=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SSH KEY SOURCE" --menu \
+ "Provision SSH keys for root:" 14 72 4 \
+ "found" "Select from detected keys (${default_key_count})" \
+ "manual" "Paste a single public key" \
+ "folder" "Scan another folder (path or glob)" \
+ "none" "No keys" 3>&1 1>&2 2>&3) || exit_script
+ else
+ ssh_key_mode=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SSH KEY SOURCE" --menu \
+ "No host keys detected; choose manual/none:" 12 72 2 \
+ "manual" "Paste a single public key" \
+ "none" "No keys" 3>&1 1>&2 2>&3) || exit_script
+ fi
+
+ case "$ssh_key_mode" in
+ found)
+ local selection
+ selection=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SELECT HOST KEYS" \
+ --checklist "Select one or more keys to import:" 20 140 10 "${CHOICES[@]}" 3>&1 1>&2 2>&3) || exit_script
+ for tag in $selection; do
+ tag="${tag%\"}"
+ tag="${tag#\"}"
+ local line
+ line=$(grep -E "^${tag}\|" "$MAPFILE" | head -n1 | cut -d'|' -f2-)
+ [[ -n "$line" ]] && printf '%s\n' "$line" >>"$SSH_KEYS_FILE"
+ done
+ ;;
+ manual)
+ SSH_AUTHORIZED_KEY="$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --inputbox "Paste one SSH public key line (ssh-ed25519/ssh-rsa/...)" 10 72 --title "SSH Public Key" 3>&1 1>&2 2>&3)"
+ [[ -n "$SSH_AUTHORIZED_KEY" ]] && printf '%s\n' "$SSH_AUTHORIZED_KEY" >>"$SSH_KEYS_FILE"
+ ;;
+ folder)
+ local glob_path
+ glob_path=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --inputbox "Enter a folder or glob to scan (e.g. /root/.ssh/*.pub)" 10 72 --title "Scan Folder/Glob" 3>&1 1>&2 2>&3)
+ if [[ -n "$glob_path" ]]; then
+ shopt -s nullglob
+ read -r -a _scan_files <<<"$glob_path"
+ shopt -u nullglob
+ if [[ "${#_scan_files[@]}" -gt 0 ]]; then
+ ssh_build_choices_from_files "${_scan_files[@]}"
+ if [[ "$COUNT" -gt 0 ]]; then
+ local folder_selection
+ folder_selection=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SELECT FOLDER KEYS" \
+ --checklist "Select key(s) to import:" 20 78 10 "${CHOICES[@]}" 3>&1 1>&2 2>&3) || exit_script
+ for tag in $folder_selection; do
+ tag="${tag%\"}"
+ tag="${tag#\"}"
+ local line
+ line=$(grep -E "^${tag}\|" "$MAPFILE" | head -n1 | cut -d'|' -f2-)
+ [[ -n "$line" ]] && printf '%s\n' "$line" >>"$SSH_KEYS_FILE"
+ done
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox "No keys found in: $glob_path" 8 60
+ fi
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox "Path/glob returned no files." 8 60
+ fi
+ fi
+ ;;
+ none)
+ :
+ ;;
+ esac
+
+ if [[ -s "$SSH_KEYS_FILE" ]]; then
+ sort -u -o "$SSH_KEYS_FILE" "$SSH_KEYS_FILE"
+ printf '\n' >>"$SSH_KEYS_FILE"
+ fi
+
+ if [[ -s "$SSH_KEYS_FILE" || "$PW" == -password* ]]; then
+ if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "SSH ACCESS" --yesno "Enable root SSH access?" 10 58); then
+ SSH="yes"
+ else
+ SSH="no"
+ fi
+ else
+ SSH="no"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# start()
+#
+# - Entry point of script
+# - On Proxmox host: calls install_script
+# - In silent mode: runs update_script
+# - Otherwise: shows update/setting menu
+# ------------------------------------------------------------------------------
+start() {
+ source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/tools.func)
+ if command -v pveversion >/dev/null 2>&1; then
+ install_script || return 0
+ return 0
+ elif [ ! -z ${PHS_SILENT+x} ] && [[ "${PHS_SILENT}" == "1" ]]; then
+ VERBOSE="no"
+ set_std_mode
+ update_script
+ else
+ CHOICE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "${APP} LXC Update/Setting" --menu \
+ "Support/Update functions for ${APP} LXC. Choose an option:" \
+ 12 60 3 \
+ "1" "YES (Silent Mode)" \
+ "2" "YES (Verbose Mode)" \
+ "3" "NO (Cancel Update)" --nocancel --default-item "1" 3>&1 1>&2 2>&3)
+
+ case "$CHOICE" in
+ 1)
+ VERBOSE="no"
+ set_std_mode
+ ;;
+ 2)
+ VERBOSE="yes"
+ set_std_mode
+ ;;
+ 3)
+ clear
+ exit_script
+ exit
+ ;;
+ esac
+ update_script
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# build_container()
+#
+# - Creates and configures the LXC container
+# - Builds network string and applies features (FUSE, TUN, VAAPI passthrough)
+# - Starts container and waits for network connectivity
+# - Installs base packages, SSH keys, and runs -install.sh
+# ------------------------------------------------------------------------------
+build_container() {
+ # if [ "$VERBOSE" == "yes" ]; then set -x; fi
+
+ NET_STRING="-net0 name=eth0,bridge=${BRG:-vmbr0}"
+
+ # MAC
+ if [[ -n "$MAC" ]]; then
+ case "$MAC" in
+ ,hwaddr=*) NET_STRING+="$MAC" ;;
+ *) NET_STRING+=",hwaddr=$MAC" ;;
+ esac
+ fi
+
+ # IP (immer zwingend, Standard dhcp)
+ NET_STRING+=",ip=${NET:-dhcp}"
+
+ # Gateway
+ if [[ -n "$GATE" ]]; then
+ case "$GATE" in
+ ,gw=*) NET_STRING+="$GATE" ;;
+ *) NET_STRING+=",gw=$GATE" ;;
+ esac
+ fi
+
+ # VLAN
+ if [[ -n "$VLAN" ]]; then
+ case "$VLAN" in
+ ,tag=*) NET_STRING+="$VLAN" ;;
+ *) NET_STRING+=",tag=$VLAN" ;;
+ esac
+ fi
+
+ # MTU
+ if [[ -n "$MTU" ]]; then
+ case "$MTU" in
+ ,mtu=*) NET_STRING+="$MTU" ;;
+ *) NET_STRING+=",mtu=$MTU" ;;
+ esac
+ fi
+
+ # IPv6 Handling
+ case "$IPV6_METHOD" in
+ auto) NET_STRING="$NET_STRING,ip6=auto" ;;
+ dhcp) NET_STRING="$NET_STRING,ip6=dhcp" ;;
+ static)
+ NET_STRING="$NET_STRING,ip6=$IPV6_ADDR"
+ [ -n "$IPV6_GATE" ] && NET_STRING="$NET_STRING,gw6=$IPV6_GATE"
+ ;;
+ none) ;;
+ esac
+
+ if [ "$CT_TYPE" == "1" ]; then
+ FEATURES="keyctl=1,nesting=1"
+ else
+ FEATURES="nesting=1"
+ fi
+
+ if [ "$ENABLE_FUSE" == "yes" ]; then
+ FEATURES="$FEATURES,fuse=1"
+ fi
+
+ TEMP_DIR=$(mktemp -d)
+ pushd "$TEMP_DIR" >/dev/null
+ if [ "$var_os" == "alpine" ]; then
+ export FUNCTIONS_FILE_PATH="$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/alpine-install.func)"
+ else
+ export FUNCTIONS_FILE_PATH="$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/install.func)"
+ fi
+ export DIAGNOSTICS="$DIAGNOSTICS"
+ export RANDOM_UUID="$RANDOM_UUID"
+ export CACHER="$APT_CACHER"
+ export CACHER_IP="$APT_CACHER_IP"
+ export tz="$timezone"
+ export APPLICATION="$APP"
+ export app="$NSAPP"
+ export PASSWORD="$PW"
+ export VERBOSE="$VERBOSE"
+ export SSH_ROOT="${SSH}"
+ export SSH_AUTHORIZED_KEY
+ export CTID="$CT_ID"
+ export CTTYPE="$CT_TYPE"
+ export ENABLE_FUSE="$ENABLE_FUSE"
+ export ENABLE_TUN="$ENABLE_TUN"
+ export PCT_OSTYPE="$var_os"
+ export PCT_OSVERSION="$var_version"
+ export PCT_DISK_SIZE="$DISK_SIZE"
+ export PCT_OPTIONS="
+ -features $FEATURES
+ -hostname $HN
+ -tags $TAGS
+ $SD
+ $NS
+ $NET_STRING
+ -onboot 1
+ -cores $CORE_COUNT
+ -memory $RAM_SIZE
+ -unprivileged $CT_TYPE
+ $PW
+"
+ export TEMPLATE_STORAGE="${var_template_storage:-}"
+ export CONTAINER_STORAGE="${var_container_storage:-}"
+ create_lxc_container || exit $?
+
+ LXC_CONFIG="/etc/pve/lxc/${CTID}.conf"
+
+ # ============================================================================
+ # GPU/USB PASSTHROUGH CONFIGURATION
+ # ============================================================================
+
+ # List of applications that benefit from GPU acceleration
+ GPU_APPS=(
+ "immich" "channels" "emby" "ersatztv" "frigate"
+ "jellyfin" "plex" "scrypted" "tdarr" "unmanic"
+ "ollama" "fileflows" "open-webui" "tunarr" "debian"
+ "handbrake" "sunshine" "moonlight" "kodi" "stremio"
+ "viseron"
+ )
+
+ # Check if app needs GPU
+ is_gpu_app() {
+ local app="${1,,}"
+ for gpu_app in "${GPU_APPS[@]}"; do
+ [[ "$app" == "${gpu_app,,}" ]] && return 0
+ done
+ return 1
+ }
+
+ # Detect all available GPU devices
+ detect_gpu_devices() {
+ INTEL_DEVICES=()
+ AMD_DEVICES=()
+ NVIDIA_DEVICES=()
+
+ # Store PCI info to avoid multiple calls
+ local pci_vga_info=$(lspci -nn 2>/dev/null | grep -E "VGA|Display|3D")
+
+ # Check for Intel GPU - look for Intel vendor ID [8086]
+ if echo "$pci_vga_info" | grep -q "\[8086:"; then
+ msg_info "Detected Intel GPU"
+ if [[ -d /dev/dri ]]; then
+ for d in /dev/dri/renderD* /dev/dri/card*; do
+ [[ -e "$d" ]] && INTEL_DEVICES+=("$d")
+ done
+ fi
+ fi
+
+ # Check for AMD GPU - look for AMD vendor IDs [1002] (AMD/ATI) or [1022] (AMD)
+ if echo "$pci_vga_info" | grep -qE "\[1002:|\[1022:"; then
+ msg_info "Detected AMD GPU"
+ if [[ -d /dev/dri ]]; then
+ # Only add if not already claimed by Intel
+ if [[ ${#INTEL_DEVICES[@]} -eq 0 ]]; then
+ for d in /dev/dri/renderD* /dev/dri/card*; do
+ [[ -e "$d" ]] && AMD_DEVICES+=("$d")
+ done
+ fi
+ fi
+ fi
+
+ # Check for NVIDIA GPU - look for NVIDIA vendor ID [10de]
+ if echo "$pci_vga_info" | grep -q "\[10de:"; then
+ msg_info "Detected NVIDIA GPU"
+ if ! check_nvidia_host_setup; then
+ msg_error "NVIDIA host setup incomplete. Skipping GPU passthrough."
+ msg_info "Fix NVIDIA drivers on host, then recreate container or passthrough manually."
+ return 0
+ fi
+
+ for d in /dev/nvidia* /dev/nvidiactl /dev/nvidia-modeset; do
+ [[ -e "$d" ]] && NVIDIA_DEVICES+=("$d")
+ done
+
+ if [[ ${#NVIDIA_DEVICES[@]} -eq 0 ]]; then
+ msg_warn "NVIDIA GPU detected but no /dev/nvidia* devices found"
+ msg_warn "Please install NVIDIA drivers on host: apt install nvidia-driver"
+ else
+ if [[ "$CT_TYPE" == "0" ]]; then
+ cat <>"$LXC_CONFIG"
+ # NVIDIA GPU Passthrough (privileged)
+ lxc.cgroup2.devices.allow: c 195:* rwm
+ lxc.cgroup2.devices.allow: c 243:* rwm
+ lxc.mount.entry: /dev/nvidia0 dev/nvidia0 none bind,optional,create=file
+ lxc.mount.entry: /dev/nvidiactl dev/nvidiactl none bind,optional,create=file
+ lxc.mount.entry: /dev/nvidia-uvm dev/nvidia-uvm none bind,optional,create=file
+ lxc.mount.entry: /dev/nvidia-uvm-tools dev/nvidia-uvm-tools none bind,optional,create=file
+EOF
+
+ if [[ -e /dev/dri/renderD128 ]]; then
+ echo "lxc.mount.entry: /dev/dri/renderD128 dev/dri/renderD128 none bind,optional,create=file" >>"$LXC_CONFIG"
+ fi
+
+ export GPU_TYPE="NVIDIA"
+ export NVIDIA_DRIVER_VERSION=$(nvidia-smi --query-gpu=driver_version --format=csv,noheader 2>/dev/null | head -n1)
+ msg_ok "NVIDIA GPU passthrough configured (driver: ${NVIDIA_DRIVER_VERSION})"
+ else
+ msg_warn "NVIDIA passthrough only supported for privileged containers"
+ return 0
+ fi
+ fi
+ fi
+
+ # Debug output
+ msg_debug "Intel devices: ${INTEL_DEVICES[*]}"
+ msg_debug "AMD devices: ${AMD_DEVICES[*]}"
+ msg_debug "NVIDIA devices: ${NVIDIA_DEVICES[*]}"
+ }
+
+ # Configure USB passthrough for privileged containers
+ configure_usb_passthrough() {
+ if [[ "$CT_TYPE" != "0" ]]; then
+ return 0
+ fi
+
+ msg_info "Configuring automatic USB passthrough (privileged container)"
+ cat <>"$LXC_CONFIG"
+# Automatic USB passthrough (privileged container)
+lxc.cgroup2.devices.allow: a
+lxc.cap.drop:
+lxc.cgroup2.devices.allow: c 188:* rwm
+lxc.cgroup2.devices.allow: c 189:* rwm
+lxc.mount.entry: /dev/serial/by-id dev/serial/by-id none bind,optional,create=dir
+lxc.mount.entry: /dev/ttyUSB0 dev/ttyUSB0 none bind,optional,create=file
+lxc.mount.entry: /dev/ttyUSB1 dev/ttyUSB1 none bind,optional,create=file
+lxc.mount.entry: /dev/ttyACM0 dev/ttyACM0 none bind,optional,create=file
+lxc.mount.entry: /dev/ttyACM1 dev/ttyACM1 none bind,optional,create=file
+EOF
+ msg_ok "USB passthrough configured"
+ }
+
+ # Configure GPU passthrough
+ configure_gpu_passthrough() {
+ # Skip if not a GPU app and not privileged
+ if [[ "$CT_TYPE" != "0" ]] && ! is_gpu_app "$APP"; then
+ return 0
+ fi
+
+ detect_gpu_devices
+
+ # Count available GPU types
+ local gpu_count=0
+ local available_gpus=()
+
+ if [[ ${#INTEL_DEVICES[@]} -gt 0 ]]; then
+ available_gpus+=("INTEL")
+ gpu_count=$((gpu_count + 1))
+ fi
+
+ if [[ ${#AMD_DEVICES[@]} -gt 0 ]]; then
+ available_gpus+=("AMD")
+ gpu_count=$((gpu_count + 1))
+ fi
+
+ if [[ ${#NVIDIA_DEVICES[@]} -gt 0 ]]; then
+ available_gpus+=("NVIDIA")
+ gpu_count=$((gpu_count + 1))
+ fi
+
+ if [[ $gpu_count -eq 0 ]]; then
+ msg_info "No GPU devices found for passthrough"
+ return 0
+ fi
+
+ local selected_gpu=""
+
+ if [[ $gpu_count -eq 1 ]]; then
+ # Automatic selection for single GPU
+ selected_gpu="${available_gpus[0]}"
+ msg_info "Automatically configuring ${selected_gpu} GPU passthrough"
+ else
+ # Multiple GPUs - ask user
+ echo -e "\n${INFO} Multiple GPU types detected:"
+ for gpu in "${available_gpus[@]}"; do
+ echo " - $gpu"
+ done
+ read -rp "Which GPU type to passthrough? (${available_gpus[*]}): " selected_gpu
+ selected_gpu="${selected_gpu^^}"
+
+ # Validate selection
+ local valid=0
+ for gpu in "${available_gpus[@]}"; do
+ [[ "$selected_gpu" == "$gpu" ]] && valid=1
+ done
+
+ if [[ $valid -eq 0 ]]; then
+ msg_warn "Invalid selection. Skipping GPU passthrough."
+ return 0
+ fi
+ fi
+
+ # Apply passthrough configuration based on selection
+ local dev_idx=0
+
+ case "$selected_gpu" in
+ INTEL | AMD)
+ local devices=()
+ [[ "$selected_gpu" == "INTEL" ]] && devices=("${INTEL_DEVICES[@]}")
+ [[ "$selected_gpu" == "AMD" ]] && devices=("${AMD_DEVICES[@]}")
+
+ # For Proxmox WebUI visibility, add as dev0, dev1 etc.
+ for dev in "${devices[@]}"; do
+ if [[ "$CT_TYPE" == "0" ]]; then
+ # Privileged container - use dev entries for WebUI visibility
+ # Use initial GID 104 (render) for renderD*, 44 (video) for card*
+ if [[ "$dev" =~ renderD ]]; then
+ echo "dev${dev_idx}: $dev,gid=104" >>"$LXC_CONFIG"
+ else
+ echo "dev${dev_idx}: $dev,gid=44" >>"$LXC_CONFIG"
+ fi
+ dev_idx=$((dev_idx + 1))
+
+ # Also add cgroup allows for privileged containers
+ local major minor
+ major=$(stat -c '%t' "$dev" 2>/dev/null || echo "0")
+ minor=$(stat -c '%T' "$dev" 2>/dev/null || echo "0")
+
+ if [[ "$major" != "0" && "$minor" != "0" ]]; then
+ echo "lxc.cgroup2.devices.allow: c $((0x$major)):$((0x$minor)) rwm" >>"$LXC_CONFIG"
+ fi
+ else
+ # Unprivileged container
+ if [[ "$dev" =~ renderD ]]; then
+ echo "dev${dev_idx}: $dev,uid=0,gid=104" >>"$LXC_CONFIG"
+ else
+ echo "dev${dev_idx}: $dev,uid=0,gid=44" >>"$LXC_CONFIG"
+ fi
+ dev_idx=$((dev_idx + 1))
+ fi
+ done
+
+ export GPU_TYPE="$selected_gpu"
+ msg_ok "${selected_gpu} GPU passthrough configured (${dev_idx} devices)"
+ ;;
+
+ NVIDIA)
+ if [[ ${#NVIDIA_DEVICES[@]} -eq 0 ]]; then
+ msg_error "NVIDIA drivers not installed on host. Please install: apt install nvidia-driver"
+ return 1
+ fi
+
+ for dev in "${NVIDIA_DEVICES[@]}"; do
+ # NVIDIA devices typically need different handling
+ echo "dev${dev_idx}: $dev,uid=0,gid=44" >>"$LXC_CONFIG"
+ dev_idx=$((dev_idx + 1))
+
+ if [[ "$CT_TYPE" == "0" ]]; then
+ local major minor
+ major=$(stat -c '%t' "$dev" 2>/dev/null || echo "0")
+ minor=$(stat -c '%T' "$dev" 2>/dev/null || echo "0")
+
+ if [[ "$major" != "0" && "$minor" != "0" ]]; then
+ echo "lxc.cgroup2.devices.allow: c $((0x$major)):$((0x$minor)) rwm" >>"$LXC_CONFIG"
+ fi
+ fi
+ done
+
+ export GPU_TYPE="NVIDIA"
+ msg_ok "NVIDIA GPU passthrough configured (${dev_idx} devices)"
+ ;;
+ esac
+ }
+
+ # Additional device passthrough
+ configure_additional_devices() {
+ # TUN device passthrough
+ if [ "$ENABLE_TUN" == "yes" ]; then
+ cat <>"$LXC_CONFIG"
+lxc.cgroup2.devices.allow: c 10:200 rwm
+lxc.mount.entry: /dev/net/tun dev/net/tun none bind,create=file
+EOF
+ fi
+
+ # Coral TPU passthrough
+ if [[ -e /dev/apex_0 ]]; then
+ msg_info "Detected Coral TPU - configuring passthrough"
+ echo "lxc.mount.entry: /dev/apex_0 dev/apex_0 none bind,optional,create=file" >>"$LXC_CONFIG"
+ fi
+ }
+
+ # Execute pre-start configurations
+ configure_usb_passthrough
+ configure_gpu_passthrough
+ configure_additional_devices
+
+ # ============================================================================
+ # START CONTAINER AND INSTALL USERLAND
+ # ============================================================================
+
+ msg_info "Starting LXC Container"
+ pct start "$CTID"
+
+ # Wait for container to be running
+ for i in {1..10}; do
+ if pct status "$CTID" | grep -q "status: running"; then
+ msg_ok "Started LXC Container"
+ break
+ fi
+ sleep 1
+ if [ "$i" -eq 10 ]; then
+ msg_error "LXC Container did not reach running state"
+ exit 1
+ fi
+ done
+
+ # Wait for network (skip for Alpine initially)
+ if [ "$var_os" != "alpine" ]; then
+ msg_info "Waiting for network in LXC container"
+
+ # Wait for IP
+ for i in {1..20}; do
+ ip_in_lxc=$(pct exec "$CTID" -- ip -4 addr show dev eth0 | awk '/inet / {print $2}' | cut -d/ -f1)
+ [ -n "$ip_in_lxc" ] && break
+ sleep 1
+ done
+
+ if [ -z "$ip_in_lxc" ]; then
+ msg_error "No IP assigned to CT $CTID after 20s"
+ exit 1
+ fi
+
+ # Try to reach gateway
+ gw_ok=0
+ for i in {1..10}; do
+ if pct exec "$CTID" -- ping -c1 -W1 "${GATEWAY:-8.8.8.8}" >/dev/null 2>&1; then
+ gw_ok=1
+ break
+ fi
+ sleep 1
+ done
+
+ if [ "$gw_ok" -eq 1 ]; then
+ msg_ok "Network in LXC is reachable (IP $ip_in_lxc)"
+ else
+ msg_warn "Network reachable but gateway check failed"
+ fi
+ fi
+ # Function to get correct GID inside container
+ get_container_gid() {
+ local group="$1"
+ local gid=$(pct exec "$CTID" -- getent group "$group" 2>/dev/null | cut -d: -f3)
+ echo "${gid:-44}" # Default to 44 if not found
+ }
+
+ fix_gpu_gids
+
+ # Continue with standard container setup
+ msg_info "Customizing LXC Container"
+
+ # # Install GPU userland if configured
+ # if [[ "${ENABLE_VAAPI:-0}" == "1" ]]; then
+ # install_gpu_userland "VAAPI"
+ # fi
+
+ # if [[ "${ENABLE_NVIDIA:-0}" == "1" ]]; then
+ # install_gpu_userland "NVIDIA"
+ # fi
+
+ # Continue with standard container setup
+ if [ "$var_os" == "alpine" ]; then
+ sleep 3
+ pct exec "$CTID" -- /bin/sh -c 'cat </etc/apk/repositories
+http://dl-cdn.alpinelinux.org/alpine/latest-stable/main
+http://dl-cdn.alpinelinux.org/alpine/latest-stable/community
+EOF'
+ pct exec "$CTID" -- ash -c "apk add bash newt curl openssh nano mc ncurses jq >/dev/null"
+ else
+ sleep 3
+ pct exec "$CTID" -- bash -c "sed -i '/$LANG/ s/^# //' /etc/locale.gen"
+ pct exec "$CTID" -- bash -c "locale_line=\$(grep -v '^#' /etc/locale.gen | grep -E '^[a-zA-Z]' | awk '{print \$1}' | head -n 1) && \
+ echo LANG=\$locale_line >/etc/default/locale && \
+ locale-gen >/dev/null && \
+ export LANG=\$locale_line"
+
+ if [[ -z "${tz:-}" ]]; then
+ tz=$(timedatectl show --property=Timezone --value 2>/dev/null || echo "Etc/UTC")
+ fi
+
+ if pct exec "$CTID" -- test -e "/usr/share/zoneinfo/$tz"; then
+ pct exec "$CTID" -- bash -c "tz='$tz'; echo \"\$tz\" >/etc/timezone && ln -sf \"/usr/share/zoneinfo/\$tz\" /etc/localtime"
+ else
+ msg_warn "Skipping timezone setup – zone '$tz' not found in container"
+ fi
+
+ pct exec "$CTID" -- bash -c "apt-get update >/dev/null && apt-get install -y sudo curl mc gnupg2 jq >/dev/null" || {
+ msg_error "apt-get base packages installation failed"
+ exit 1
+ }
+ fi
+
+ msg_ok "Customized LXC Container"
+
+ # Verify GPU access if enabled
+ if [[ "${ENABLE_VAAPI:-0}" == "1" ]] && [ "$var_os" != "alpine" ]; then
+ pct exec "$CTID" -- bash -c "vainfo >/dev/null 2>&1" &&
+ msg_ok "VAAPI verified working" ||
+ msg_warn "VAAPI verification failed - may need additional configuration"
+ fi
+
+ if [[ "${ENABLE_NVIDIA:-0}" == "1" ]] && [ "$var_os" != "alpine" ]; then
+ pct exec "$CTID" -- bash -c "nvidia-smi >/dev/null 2>&1" &&
+ msg_ok "NVIDIA verified working" ||
+ msg_warn "NVIDIA verification failed - may need additional configuration"
+ fi
+
+ # Install SSH keys
+ install_ssh_keys_into_ct
+
+ # Run application installer
+ if ! lxc-attach -n "$CTID" -- bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/install/${var_install}.sh)"; then
+ exit $?
+ fi
+}
+
+destroy_lxc() {
+ if [[ -z "$CT_ID" ]]; then
+ msg_error "No CT_ID found. Nothing to remove."
+ return 1
+ fi
+
+ # Abbruch bei Ctrl-C / Ctrl-D / ESC
+ trap 'echo; msg_error "Aborted by user (SIGINT/SIGQUIT)"; return 130' INT QUIT
+
+ local prompt
+ if ! read -rp "Remove this Container? " prompt; then
+ # read gibt != 0 zurück bei Ctrl-D/ESC
+ msg_error "Aborted input (Ctrl-D/ESC)"
+ return 130
+ fi
+
+ case "${prompt,,}" in
+ y | yes)
+ if pct stop "$CT_ID" &>/dev/null && pct destroy "$CT_ID" &>/dev/null; then
+ msg_ok "Removed Container $CT_ID"
+ else
+ msg_error "Failed to remove Container $CT_ID"
+ return 1
+ fi
+ ;;
+ "" | n | no)
+ msg_info "Container was not removed."
+ ;;
+ *)
+ msg_warn "Invalid response. Container was not removed."
+ ;;
+ esac
+}
+
+# ------------------------------------------------------------------------------
+# Storage discovery / selection helpers
+# ------------------------------------------------------------------------------
+# ===== Storage discovery / selection helpers (ported from create_lxc.sh) =====
+resolve_storage_preselect() {
+ local class="$1" preselect="$2" required_content=""
+ case "$class" in
+ template) required_content="vztmpl" ;;
+ container) required_content="rootdir" ;;
+ *) return 1 ;;
+ esac
+ [[ -z "$preselect" ]] && return 1
+ if ! pvesm status -content "$required_content" | awk 'NR>1{print $1}' | grep -qx -- "$preselect"; then
+ msg_warn "Preselected storage '${preselect}' does not support content '${required_content}' (or not found)"
+ return 1
+ fi
+
+ local line total used free
+ line="$(pvesm status | awk -v s="$preselect" 'NR>1 && $1==s {print $0}')"
+ if [[ -z "$line" ]]; then
+ STORAGE_INFO="n/a"
+ else
+ total="$(awk '{print $4}' <<<"$line")"
+ used="$(awk '{print $5}' <<<"$line")"
+ free="$(awk '{print $6}' <<<"$line")"
+ local total_h used_h free_h
+ if command -v numfmt >/dev/null 2>&1; then
+ total_h="$(numfmt --to=iec --suffix=B --format %.1f "$total" 2>/dev/null || echo "$total")"
+ used_h="$(numfmt --to=iec --suffix=B --format %.1f "$used" 2>/dev/null || echo "$used")"
+ free_h="$(numfmt --to=iec --suffix=B --format %.1f "$free" 2>/dev/null || echo "$free")"
+ STORAGE_INFO="Free: ${free_h} Used: ${used_h}"
+ else
+ STORAGE_INFO="Free: ${free} Used: ${used}"
+ fi
+ fi
+ STORAGE_RESULT="$preselect"
+ return 0
+}
+
+fix_gpu_gids() {
+ if [[ -z "${GPU_TYPE:-}" ]]; then
+ return 0
+ fi
+
+ msg_info "Detecting and setting correct GPU group IDs"
+
+ # Ermittle die tatsächlichen GIDs aus dem Container
+ local video_gid=$(pct exec "$CTID" -- sh -c "getent group video 2>/dev/null | cut -d: -f3")
+ local render_gid=$(pct exec "$CTID" -- sh -c "getent group render 2>/dev/null | cut -d: -f3")
+
+ # Fallbacks wenn Gruppen nicht existieren
+ if [[ -z "$video_gid" ]]; then
+ # Versuche die video Gruppe zu erstellen
+ pct exec "$CTID" -- sh -c "groupadd -r video 2>/dev/null || true"
+ video_gid=$(pct exec "$CTID" -- sh -c "getent group video 2>/dev/null | cut -d: -f3")
+ [[ -z "$video_gid" ]] && video_gid="44" # Ultimate fallback
+ fi
+
+ if [[ -z "$render_gid" ]]; then
+ # Versuche die render Gruppe zu erstellen
+ pct exec "$CTID" -- sh -c "groupadd -r render 2>/dev/null || true"
+ render_gid=$(pct exec "$CTID" -- sh -c "getent group render 2>/dev/null | cut -d: -f3")
+ [[ -z "$render_gid" ]] && render_gid="104" # Ultimate fallback
+ fi
+
+ msg_info "Container GIDs detected - video:${video_gid}, render:${render_gid}"
+
+ # Prüfe ob die GIDs von den Defaults abweichen
+ local need_update=0
+ if [[ "$video_gid" != "44" ]] || [[ "$render_gid" != "104" ]]; then
+ need_update=1
+ fi
+
+ if [[ $need_update -eq 1 ]]; then
+ msg_info "Updating device GIDs in container config"
+
+ # Stoppe Container für Config-Update
+ pct stop "$CTID" >/dev/null 2>&1
+
+ # Update die dev Einträge mit korrekten GIDs
+ # Backup der Config
+ cp "$LXC_CONFIG" "${LXC_CONFIG}.bak"
+
+ # Parse und update jeden dev Eintrag
+ while IFS= read -r line; do
+ if [[ "$line" =~ ^dev[0-9]+: ]]; then
+ # Extract device path
+ local device_path=$(echo "$line" | sed -E 's/^dev[0-9]+: ([^,]+).*/\1/')
+ local dev_num=$(echo "$line" | sed -E 's/^(dev[0-9]+):.*/\1/')
+
+ if [[ "$device_path" =~ renderD ]]; then
+ # RenderD device - use render GID
+ echo "${dev_num}: ${device_path},gid=${render_gid}"
+ elif [[ "$device_path" =~ card ]]; then
+ # Card device - use video GID
+ echo "${dev_num}: ${device_path},gid=${video_gid}"
+ else
+ # Keep original line
+ echo "$line"
+ fi
+ else
+ # Keep non-dev lines
+ echo "$line"
+ fi
+ done <"$LXC_CONFIG" >"${LXC_CONFIG}.new"
+
+ mv "${LXC_CONFIG}.new" "$LXC_CONFIG"
+
+ # Starte Container wieder
+ pct start "$CTID" >/dev/null 2>&1
+ sleep 3
+
+ msg_ok "Device GIDs updated successfully"
+ else
+ msg_ok "Device GIDs are already correct"
+ fi
+ if [[ "$CT_TYPE" == "0" ]]; then
+ pct exec "$CTID" -- bash -c "
+ if [ -d /dev/dri ]; then
+ for dev in /dev/dri/*; do
+ if [ -e \"\$dev\" ]; then
+ if [[ \"\$dev\" =~ renderD ]]; then
+ chgrp ${render_gid} \"\$dev\" 2>/dev/null || true
+ else
+ chgrp ${video_gid} \"\$dev\" 2>/dev/null || true
+ fi
+ chmod 660 \"\$dev\" 2>/dev/null || true
+ fi
+ done
+ fi
+ " >/dev/null 2>&1
+ fi
+}
+
+# NVIDIA-spezific check on host
+check_nvidia_host_setup() {
+ if ! command -v nvidia-smi >/dev/null 2>&1; then
+ msg_warn "NVIDIA GPU detected but nvidia-smi not found on host"
+ msg_warn "Please install NVIDIA drivers on host first."
+ #echo " 1. Download driver: wget https://us.download.nvidia.com/XFree86/Linux-x86_64/550.127.05/NVIDIA-Linux-x86_64-550.127.05.run"
+ #echo " 2. Install: ./NVIDIA-Linux-x86_64-550.127.05.run --dkms"
+ #echo " 3. Verify: nvidia-smi"
+ return 1
+ fi
+
+ # check if nvidia-smi works
+ if ! nvidia-smi >/dev/null 2>&1; then
+ msg_warn "nvidia-smi installed but not working. Driver issue?"
+ return 1
+ fi
+
+ return 0
+}
+
+check_storage_support() {
+ local CONTENT="$1" VALID=0
+ while IFS= read -r line; do
+ local STORAGE_NAME
+ STORAGE_NAME=$(awk '{print $1}' <<<"$line")
+ [[ -n "$STORAGE_NAME" ]] && VALID=1
+ done < <(pvesm status -content "$CONTENT" 2>/dev/null | awk 'NR>1')
+ [[ $VALID -eq 1 ]]
+}
+
+select_storage() {
+ local CLASS=$1 CONTENT CONTENT_LABEL
+ case $CLASS in
+ container)
+ CONTENT='rootdir'
+ CONTENT_LABEL='Container'
+ ;;
+ template)
+ CONTENT='vztmpl'
+ CONTENT_LABEL='Container template'
+ ;;
+ iso)
+ CONTENT='iso'
+ CONTENT_LABEL='ISO image'
+ ;;
+ images)
+ CONTENT='images'
+ CONTENT_LABEL='VM Disk image'
+ ;;
+ backup)
+ CONTENT='backup'
+ CONTENT_LABEL='Backup'
+ ;;
+ snippets)
+ CONTENT='snippets'
+ CONTENT_LABEL='Snippets'
+ ;;
+ *)
+ msg_error "Invalid storage class '$CLASS'"
+ return 1
+ ;;
+ esac
+
+ declare -A STORAGE_MAP
+ local -a MENU=()
+ local COL_WIDTH=0
+
+ while read -r TAG TYPE _ TOTAL USED FREE _; do
+ [[ -n "$TAG" && -n "$TYPE" ]] || continue
+ local DISPLAY="${TAG} (${TYPE})"
+ local USED_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$USED")
+ local FREE_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$FREE")
+ local INFO="Free: ${FREE_FMT}B Used: ${USED_FMT}B"
+ STORAGE_MAP["$DISPLAY"]="$TAG"
+ MENU+=("$DISPLAY" "$INFO" "OFF")
+ ((${#DISPLAY} > COL_WIDTH)) && COL_WIDTH=${#DISPLAY}
+ done < <(pvesm status -content "$CONTENT" | awk 'NR>1')
+
+ if [[ ${#MENU[@]} -eq 0 ]]; then
+ msg_error "No storage found for content type '$CONTENT'."
+ return 2
+ fi
+
+ if [[ $((${#MENU[@]} / 3)) -eq 1 ]]; then
+ STORAGE_RESULT="${STORAGE_MAP[${MENU[0]}]}"
+ STORAGE_INFO="${MENU[1]}"
+ return 0
+ fi
+
+ local WIDTH=$((COL_WIDTH + 42))
+ while true; do
+ local DISPLAY_SELECTED
+ DISPLAY_SELECTED=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "Storage Pools" \
+ --radiolist "Which storage pool for ${CONTENT_LABEL,,}?\n(Spacebar to select)" \
+ 16 "$WIDTH" 6 "${MENU[@]}" 3>&1 1>&2 2>&3) || { exit_script; }
+
+ DISPLAY_SELECTED=$(sed 's/[[:space:]]*$//' <<<"$DISPLAY_SELECTED")
+ if [[ -z "$DISPLAY_SELECTED" || -z "${STORAGE_MAP[$DISPLAY_SELECTED]+_}" ]]; then
+ whiptail --msgbox "No valid storage selected. Please try again." 8 58
+ continue
+ fi
+ STORAGE_RESULT="${STORAGE_MAP[$DISPLAY_SELECTED]}"
+ for ((i = 0; i < ${#MENU[@]}; i += 3)); do
+ if [[ "${MENU[$i]}" == "$DISPLAY_SELECTED" ]]; then
+ STORAGE_INFO="${MENU[$i + 1]}"
+ break
+ fi
+ done
+ return 0
+ done
+}
+
+create_lxc_container() {
+ # ------------------------------------------------------------------------------
+ # Optional verbose mode (debug tracing)
+ # ------------------------------------------------------------------------------
+ if [[ "${CREATE_LXC_VERBOSE:-no}" == "yes" ]]; then set -x; fi
+
+ # ------------------------------------------------------------------------------
+ # Helpers (dynamic versioning / template parsing)
+ # ------------------------------------------------------------------------------
+ pkg_ver() { dpkg-query -W -f='${Version}\n' "$1" 2>/dev/null || echo ""; }
+ pkg_cand() { apt-cache policy "$1" 2>/dev/null | awk '/Candidate:/ {print $2}'; }
+
+ ver_ge() { dpkg --compare-versions "$1" ge "$2"; }
+ ver_gt() { dpkg --compare-versions "$1" gt "$2"; }
+ ver_lt() { dpkg --compare-versions "$1" lt "$2"; }
+
+ # Extract Debian OS minor from template name: debian-13-standard_13.1-1_amd64.tar.zst => "13.1"
+ parse_template_osver() { sed -n 's/.*_\([0-9][0-9]*\(\.[0-9]\+\)\?\)-.*/\1/p' <<<"$1"; }
+
+ # Offer upgrade for pve-container/lxc-pve if candidate > installed; optional auto-retry pct create
+ # Returns:
+ # 0 = no upgrade needed
+ # 1 = upgraded (and if do_retry=yes and retry succeeded, creation done)
+ # 2 = user declined
+ # 3 = upgrade attempted but failed OR retry failed
+ offer_lxc_stack_upgrade_and_maybe_retry() {
+ local do_retry="${1:-no}" # yes|no
+ local _pvec_i _pvec_c _lxcp_i _lxcp_c need=0
+
+ _pvec_i="$(pkg_ver pve-container)"
+ _lxcp_i="$(pkg_ver lxc-pve)"
+ _pvec_c="$(pkg_cand pve-container)"
+ _lxcp_c="$(pkg_cand lxc-pve)"
+
+ if [[ -n "$_pvec_c" && "$_pvec_c" != "none" ]]; then
+ ver_gt "$_pvec_c" "${_pvec_i:-0}" && need=1
+ fi
+ if [[ -n "$_lxcp_c" && "$_lxcp_c" != "none" ]]; then
+ ver_gt "$_lxcp_c" "${_lxcp_i:-0}" && need=1
+ fi
+ if [[ $need -eq 0 ]]; then
+ msg_debug "No newer candidate for pve-container/lxc-pve (installed=$_pvec_i/$_lxcp_i, cand=$_pvec_c/$_lxcp_c)"
+ return 0
+ fi
+
+ echo
+ echo "An update for the Proxmox LXC stack is available:"
+ echo " pve-container: installed=${_pvec_i:-n/a} candidate=${_pvec_c:-n/a}"
+ echo " lxc-pve : installed=${_lxcp_i:-n/a} candidate=${_lxcp_c:-n/a}"
+ echo
+ read -rp "Do you want to upgrade now? [y/N] " _ans
+ case "${_ans,,}" in
+ y | yes)
+ msg_info "Upgrading Proxmox LXC stack (pve-container, lxc-pve)"
+ if apt-get update -qq >/dev/null && apt-get install -y --only-upgrade pve-container lxc-pve >/dev/null; then
+ msg_ok "LXC stack upgraded."
+ if [[ "$do_retry" == "yes" ]]; then
+ msg_info "Retrying container creation after upgrade"
+ if pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >>"$LOGFILE" 2>&1; then
+ msg_ok "Container created successfully after upgrade."
+ return 0
+ else
+ msg_error "pct create still failed after upgrade. See $LOGFILE"
+ return 3
+ fi
+ fi
+ return 1
+ else
+ msg_error "Upgrade failed. Please check APT output."
+ return 3
+ fi
+ ;;
+ *) return 2 ;;
+ esac
+ }
+
+ # ------------------------------------------------------------------------------
+ # Required input variables
+ # ------------------------------------------------------------------------------
+ [[ "${CTID:-}" ]] || {
+ msg_error "You need to set 'CTID' variable."
+ exit 203
+ }
+ [[ "${PCT_OSTYPE:-}" ]] || {
+ msg_error "You need to set 'PCT_OSTYPE' variable."
+ exit 204
+ }
+
+ msg_debug "CTID=$CTID"
+ msg_debug "PCT_OSTYPE=$PCT_OSTYPE"
+ msg_debug "PCT_OSVERSION=${PCT_OSVERSION:-default}"
+
+ # ID checks
+ [[ "$CTID" -ge 100 ]] || {
+ msg_error "ID cannot be less than 100."
+ exit 205
+ }
+ if qm status "$CTID" &>/dev/null || pct status "$CTID" &>/dev/null; then
+ echo -e "ID '$CTID' is already in use."
+ unset CTID
+ msg_error "Cannot use ID that is already in use."
+ exit 206
+ fi
+
+ # Storage capability check
+ check_storage_support "rootdir" || {
+ msg_error "No valid storage found for 'rootdir' [Container]"
+ exit 1
+ }
+ check_storage_support "vztmpl" || {
+ msg_error "No valid storage found for 'vztmpl' [Template]"
+ exit 1
+ }
+
+ # Template storage selection
+ if resolve_storage_preselect template "${TEMPLATE_STORAGE:-}"; then
+ TEMPLATE_STORAGE="$STORAGE_RESULT"
+ TEMPLATE_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${TEMPLATE_STORAGE}${CL} (${TEMPLATE_STORAGE_INFO}) [Template]"
+ else
+ while true; do
+ if [[ -z "${var_template_storage:-}" ]]; then
+ if select_storage template; then
+ TEMPLATE_STORAGE="$STORAGE_RESULT"
+ TEMPLATE_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${TEMPLATE_STORAGE}${CL} (${TEMPLATE_STORAGE_INFO}) [Template]"
+ break
+ fi
+ fi
+ done
+ fi
+
+ # Container storage selection
+ if resolve_storage_preselect container "${CONTAINER_STORAGE:-}"; then
+ CONTAINER_STORAGE="$STORAGE_RESULT"
+ CONTAINER_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${CONTAINER_STORAGE}${CL} (${CONTAINER_STORAGE_INFO}) [Container]"
+ else
+ if [[ -z "${var_container_storage:-}" ]]; then
+ if select_storage container; then
+ CONTAINER_STORAGE="$STORAGE_RESULT"
+ CONTAINER_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${CONTAINER_STORAGE}${CL} (${CONTAINER_STORAGE_INFO}) [Container]"
+ fi
+ fi
+ fi
+
+ # Validate content types
+ msg_info "Validating content types of storage '$CONTAINER_STORAGE'"
+ STORAGE_CONTENT=$(grep -A4 -E "^(zfspool|dir|lvmthin|lvm): $CONTAINER_STORAGE" /etc/pve/storage.cfg | grep content | awk '{$1=""; print $0}' | xargs)
+ msg_debug "Storage '$CONTAINER_STORAGE' has content types: $STORAGE_CONTENT"
+ grep -qw "rootdir" <<<"$STORAGE_CONTENT" || {
+ msg_error "Storage '$CONTAINER_STORAGE' does not support 'rootdir'. Cannot create LXC."
+ exit 217
+ }
+ $STD msg_ok "Storage '$CONTAINER_STORAGE' supports 'rootdir'"
+
+ msg_info "Validating content types of template storage '$TEMPLATE_STORAGE'"
+ TEMPLATE_CONTENT=$(grep -A4 -E "^[^:]+: $TEMPLATE_STORAGE" /etc/pve/storage.cfg | grep content | awk '{$1=""; print $0}' | xargs)
+ msg_debug "Template storage '$TEMPLATE_STORAGE' has content types: $TEMPLATE_CONTENT"
+ if ! grep -qw "vztmpl" <<<"$TEMPLATE_CONTENT"; then
+ msg_warn "Template storage '$TEMPLATE_STORAGE' does not declare 'vztmpl'. This may cause pct create to fail."
+ else
+ $STD msg_ok "Template storage '$TEMPLATE_STORAGE' supports 'vztmpl'"
+ fi
+
+ # Free space check
+ STORAGE_FREE=$(pvesm status | awk -v s="$CONTAINER_STORAGE" '$1 == s { print $6 }')
+ REQUIRED_KB=$((${PCT_DISK_SIZE:-8} * 1024 * 1024))
+ [[ "$STORAGE_FREE" -ge "$REQUIRED_KB" ]] || {
+ msg_error "Not enough space on '$CONTAINER_STORAGE'. Needed: ${PCT_DISK_SIZE:-8}G."
+ exit 214
+ }
+
+ # Cluster quorum (if cluster)
+ if [[ -f /etc/pve/corosync.conf ]]; then
+ msg_info "Checking cluster quorum"
+ if ! pvecm status | awk -F':' '/^Quorate/ { exit ($2 ~ /Yes/) ? 0 : 1 }'; then
+ msg_error "Cluster is not quorate. Start all nodes or configure quorum device (QDevice)."
+ exit 210
+ fi
+ msg_ok "Cluster is quorate"
+ fi
+
+ # ------------------------------------------------------------------------------
+ # Template discovery & validation
+ # ------------------------------------------------------------------------------
+ TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}"
+ case "$PCT_OSTYPE" in
+ debian | ubuntu) TEMPLATE_PATTERN="-standard_" ;;
+ alpine | fedora | rocky | centos) TEMPLATE_PATTERN="-default_" ;;
+ *) TEMPLATE_PATTERN="" ;;
+ esac
+
+ msg_info "Searching for template '$TEMPLATE_SEARCH'"
+
+ # Build regex patterns outside awk/grep for clarity
+ SEARCH_PATTERN="^${TEMPLATE_SEARCH}"
+
+ #echo "[DEBUG] TEMPLATE_SEARCH='$TEMPLATE_SEARCH'"
+ #echo "[DEBUG] SEARCH_PATTERN='$SEARCH_PATTERN'"
+ #echo "[DEBUG] TEMPLATE_PATTERN='$TEMPLATE_PATTERN'"
+
+ mapfile -t LOCAL_TEMPLATES < <(
+ pveam list "$TEMPLATE_STORAGE" 2>/dev/null |
+ awk -v search="${SEARCH_PATTERN}" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' |
+ sed 's|.*/||' | sort -t - -k 2 -V
+ )
+
+ pveam update >/dev/null 2>&1 || msg_warn "Could not update template catalog (pveam update failed)."
+
+ #echo "[DEBUG] pveam available output (first 5 lines with .tar files):"
+ #pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | head -5 | sed 's/^/ /'
+
+ set +u
+ mapfile -t ONLINE_TEMPLATES < <(pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | awk '{print $2}' | grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" | sort -t - -k 2 -V 2>/dev/null || true)
+ #echo "[DEBUG] After filtering: ${#ONLINE_TEMPLATES[@]} online templates found"
+ set -u
+ if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then
+ #echo "[DEBUG] Online templates:"
+ for tmpl in "${ONLINE_TEMPLATES[@]}"; do
+ echo " - $tmpl"
+ done
+ fi
+
+ ONLINE_TEMPLATE=""
+ [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}"
+
+ #msg_debug "SEARCH_PATTERN='${SEARCH_PATTERN}' TEMPLATE_PATTERN='${TEMPLATE_PATTERN}'"
+ #msg_debug "Found ${#LOCAL_TEMPLATES[@]} local templates, ${#ONLINE_TEMPLATES[@]} online templates"
+ if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then
+ #msg_debug "First 3 online templates:"
+ count=0
+ for idx in "${!ONLINE_TEMPLATES[@]}"; do
+ #msg_debug " [$idx]: ${ONLINE_TEMPLATES[$idx]}"
+ ((count++))
+ [[ $count -ge 3 ]] && break
+ done
+ fi
+ #msg_debug "ONLINE_TEMPLATE='$ONLINE_TEMPLATE'"
+
+ if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then
+ TEMPLATE="${LOCAL_TEMPLATES[-1]}"
+ TEMPLATE_SOURCE="local"
+ else
+ TEMPLATE="$ONLINE_TEMPLATE"
+ TEMPLATE_SOURCE="online"
+ fi
+
+ # If still no template, try to find alternatives
+ if [[ -z "$TEMPLATE" ]]; then
+ echo ""
+ echo "[DEBUG] No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}, searching for alternatives..."
+
+ # Get all available versions for this OS type
+ mapfile -t AVAILABLE_VERSIONS < <(
+ pveam available -section system 2>/dev/null |
+ grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' |
+ awk -F'\t' '{print $1}' |
+ grep "^${PCT_OSTYPE}-" |
+ sed -E "s/.*${PCT_OSTYPE}-([0-9]+(\.[0-9]+)?).*/\1/" |
+ sort -u -V 2>/dev/null
+ )
+
+ if [[ ${#AVAILABLE_VERSIONS[@]} -gt 0 ]]; then
+ echo ""
+ echo "${BL}Available ${PCT_OSTYPE} versions:${CL}"
+ for i in "${!AVAILABLE_VERSIONS[@]}"; do
+ echo " [$((i + 1))] ${AVAILABLE_VERSIONS[$i]}"
+ done
+ echo ""
+ read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or press Enter to cancel: " choice
+
+ if [[ "$choice" =~ ^[0-9]+$ ]] && [[ "$choice" -ge 1 ]] && [[ "$choice" -le ${#AVAILABLE_VERSIONS[@]} ]]; then
+ PCT_OSVERSION="${AVAILABLE_VERSIONS[$((choice - 1))]}"
+ TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION}"
+ SEARCH_PATTERN="^${TEMPLATE_SEARCH}-"
+
+ #echo "[DEBUG] Retrying with version: $PCT_OSVERSION"
+
+ mapfile -t ONLINE_TEMPLATES < <(
+ pveam available -section system 2>/dev/null |
+ grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' |
+ awk -F'\t' '{print $1}' |
+ grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" |
+ sort -t - -k 2 -V 2>/dev/null || true
+ )
+
+ if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then
+ TEMPLATE="${ONLINE_TEMPLATES[-1]}"
+ TEMPLATE_SOURCE="online"
+ #echo "[DEBUG] Found alternative: $TEMPLATE"
+ else
+ msg_error "No templates available for ${PCT_OSTYPE} ${PCT_OSVERSION}"
+ exit 225
+ fi
+ else
+ msg_info "Installation cancelled"
+ exit 0
+ fi
+ else
+ msg_error "No ${PCT_OSTYPE} templates available at all"
+ exit 225
+ fi
+ fi
+
+ #echo "[DEBUG] Selected TEMPLATE='$TEMPLATE' SOURCE='$TEMPLATE_SOURCE'"
+ #msg_debug "Selected TEMPLATE='$TEMPLATE' SOURCE='$TEMPLATE_SOURCE'"
+
+ TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)"
+ if [[ -z "$TEMPLATE_PATH" ]]; then
+ TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg)
+ [[ -n "$TEMPLATE_BASE" ]] && TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE"
+ fi
+
+ # If we still don't have a path but have a valid template name, construct it
+ if [[ -z "$TEMPLATE_PATH" && -n "$TEMPLATE" ]]; then
+ TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE"
+ fi
+
+ [[ -n "$TEMPLATE_PATH" ]] || {
+ if [[ -z "$TEMPLATE" ]]; then
+ msg_error "Template ${PCT_OSTYPE} ${PCT_OSVERSION} not available"
+
+ # Get available versions
+ mapfile -t AVAILABLE_VERSIONS < <(
+ pveam available -section system 2>/dev/null |
+ grep "^${PCT_OSTYPE}-" |
+ sed -E 's/.*'"${PCT_OSTYPE}"'-([0-9]+\.[0-9]+).*/\1/' |
+ grep -E '^[0-9]+\.[0-9]+$' |
+ sort -u -V 2>/dev/null || sort -u
+ )
+
+ if [[ ${#AVAILABLE_VERSIONS[@]} -gt 0 ]]; then
+ echo -e "\n${BL}Available versions:${CL}"
+ for i in "${!AVAILABLE_VERSIONS[@]}"; do
+ echo " [$((i + 1))] ${AVAILABLE_VERSIONS[$i]}"
+ done
+
+ echo ""
+ read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or Enter to exit: " choice
+
+ if [[ "$choice" =~ ^[0-9]+$ ]] && [[ "$choice" -ge 1 ]] && [[ "$choice" -le ${#AVAILABLE_VERSIONS[@]} ]]; then
+ export var_version="${AVAILABLE_VERSIONS[$((choice - 1))]}"
+ export PCT_OSVERSION="$var_version"
+ msg_ok "Switched to ${PCT_OSTYPE} ${var_version}"
+
+ # Retry template search with new version
+ TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}"
+ SEARCH_PATTERN="^${TEMPLATE_SEARCH}-"
+
+ mapfile -t LOCAL_TEMPLATES < <(
+ pveam list "$TEMPLATE_STORAGE" 2>/dev/null |
+ awk -v search="${SEARCH_PATTERN}" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' |
+ sed 's|.*/||' | sort -t - -k 2 -V
+ )
+ mapfile -t ONLINE_TEMPLATES < <(
+ pveam available -section system 2>/dev/null |
+ grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' |
+ awk -F'\t' '{print $1}' |
+ grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" |
+ sort -t - -k 2 -V 2>/dev/null || true
+ )
+ ONLINE_TEMPLATE=""
+ [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}"
+
+ if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then
+ TEMPLATE="${LOCAL_TEMPLATES[-1]}"
+ TEMPLATE_SOURCE="local"
+ else
+ TEMPLATE="$ONLINE_TEMPLATE"
+ TEMPLATE_SOURCE="online"
+ fi
+
+ TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)"
+ if [[ -z "$TEMPLATE_PATH" ]]; then
+ TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg)
+ [[ -n "$TEMPLATE_BASE" ]] && TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE"
+ fi
+
+ # If we still don't have a path but have a valid template name, construct it
+ if [[ -z "$TEMPLATE_PATH" && -n "$TEMPLATE" ]]; then
+ TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE"
+ fi
+
+ [[ -n "$TEMPLATE_PATH" ]] || {
+ msg_error "Template still not found after version change"
+ exit 220
+ }
+ else
+ msg_info "Installation cancelled"
+ exit 1
+ fi
+ else
+ msg_error "No ${PCT_OSTYPE} templates available"
+ exit 220
+ fi
+ fi
+ }
+
+ # Validate that we found a template
+ if [[ -z "$TEMPLATE" ]]; then
+ msg_error "No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}"
+ msg_info "Please check:"
+ msg_info " - Is pveam catalog available? (run: pveam available -section system)"
+ msg_info " - Does the template exist for your OS version?"
+ exit 225
+ fi
+
+ msg_ok "Template ${BL}$TEMPLATE${CL} [$TEMPLATE_SOURCE]"
+ msg_debug "Resolved TEMPLATE_PATH=$TEMPLATE_PATH"
+
+ NEED_DOWNLOAD=0
+ if [[ ! -f "$TEMPLATE_PATH" ]]; then
+ msg_info "Template not present locally – will download."
+ NEED_DOWNLOAD=1
+ elif [[ ! -r "$TEMPLATE_PATH" ]]; then
+ msg_error "Template file exists but is not readable – check permissions."
+ exit 221
+ elif [[ "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then
+ if [[ -n "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Template file too small (<1MB) – re-downloading."
+ NEED_DOWNLOAD=1
+ else
+ msg_warn "Template looks too small, but no online version exists. Keeping local file."
+ fi
+ elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then
+ if [[ -n "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Template appears corrupted – re-downloading."
+ NEED_DOWNLOAD=1
+ else
+ msg_warn "Template appears corrupted, but no online version exists. Keeping local file."
+ fi
+ else
+ $STD msg_ok "Template $TEMPLATE is present and valid."
+ fi
+
+ if [[ "$TEMPLATE_SOURCE" == "local" && -n "$ONLINE_TEMPLATE" && "$TEMPLATE" != "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Local template is outdated: $TEMPLATE (latest available: $ONLINE_TEMPLATE)"
+ if whiptail --yesno "A newer template is available:\n$ONLINE_TEMPLATE\n\nDo you want to download and use it instead?" 12 70; then
+ TEMPLATE="$ONLINE_TEMPLATE"
+ NEED_DOWNLOAD=1
+ else
+ msg_info "Continuing with local template $TEMPLATE"
+ fi
+ fi
+
+ if [[ "$NEED_DOWNLOAD" -eq 1 ]]; then
+ [[ -f "$TEMPLATE_PATH" ]] && rm -f "$TEMPLATE_PATH"
+ for attempt in {1..3}; do
+ msg_info "Attempt $attempt: Downloading template $TEMPLATE to $TEMPLATE_STORAGE"
+ if pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null 2>&1; then
+ msg_ok "Template download successful."
+ break
+ fi
+ if [[ $attempt -eq 3 ]]; then
+ msg_error "Failed after 3 attempts. Please check network access, permissions, or manually run:\n pveam download $TEMPLATE_STORAGE $TEMPLATE"
+ exit 222
+ fi
+ sleep $((attempt * 5))
+ done
+ fi
+
+ if ! pveam list "$TEMPLATE_STORAGE" 2>/dev/null | grep -q "$TEMPLATE"; then
+ msg_error "Template $TEMPLATE not available in storage $TEMPLATE_STORAGE after download."
+ exit 223
+ fi
+
+ # ------------------------------------------------------------------------------
+ # Dynamic preflight for Debian 13.x: offer upgrade if available (no hard mins)
+ # ------------------------------------------------------------------------------
+ if [[ "$PCT_OSTYPE" == "debian" ]]; then
+ OSVER="$(parse_template_osver "$TEMPLATE")"
+ if [[ -n "$OSVER" ]]; then
+ # Proactive, aber ohne Abbruch – nur Angebot
+ offer_lxc_stack_upgrade_and_maybe_retry "no" || true
+ fi
+ fi
+
+ # ------------------------------------------------------------------------------
+ # Create LXC Container
+ # ------------------------------------------------------------------------------
+ msg_info "Creating LXC container"
+
+ # Ensure subuid/subgid entries exist
+ grep -q "root:100000:65536" /etc/subuid || echo "root:100000:65536" >>/etc/subuid
+ grep -q "root:100000:65536" /etc/subgid || echo "root:100000:65536" >>/etc/subgid
+
+ # Assemble pct options
+ PCT_OPTIONS=(${PCT_OPTIONS[@]:-${DEFAULT_PCT_OPTIONS[@]}})
+ [[ " ${PCT_OPTIONS[*]} " =~ " -rootfs " ]] || PCT_OPTIONS+=(-rootfs "$CONTAINER_STORAGE:${PCT_DISK_SIZE:-8}")
+
+ # Lock by template file (avoid concurrent downloads/creates)
+ lockfile="/tmp/template.${TEMPLATE}.lock"
+ exec 9>"$lockfile" || {
+ msg_error "Failed to create lock file '$lockfile'."
+ exit 200
+ }
+ flock -w 60 9 || {
+ msg_error "Timeout while waiting for template lock."
+ exit 211
+ }
+
+ LOGFILE="/tmp/pct_create_${CTID}.log"
+ msg_debug "pct create command: pct create $CTID ${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE} ${PCT_OPTIONS[*]}"
+ msg_debug "Logfile: $LOGFILE"
+
+ # First attempt
+ if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >"$LOGFILE" 2>&1; then
+ msg_error "Container creation failed on ${TEMPLATE_STORAGE}. Checking template..."
+
+ # Validate template file
+ if [[ ! -s "$TEMPLATE_PATH" || "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then
+ msg_warn "Template file too small or missing – re-downloading."
+ rm -f "$TEMPLATE_PATH"
+ pveam download "$TEMPLATE_STORAGE" "$TEMPLATE"
+ elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then
+ if [[ -n "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Template appears corrupted – re-downloading."
+ rm -f "$TEMPLATE_PATH"
+ pveam download "$TEMPLATE_STORAGE" "$TEMPLATE"
+ else
+ msg_warn "Template appears corrupted, but no online version exists. Skipping re-download."
+ fi
+ fi
+
+ # Retry after repair
+ if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >>"$LOGFILE" 2>&1; then
+ # Fallback to local storage
+ if [[ "$TEMPLATE_STORAGE" != "local" ]]; then
+ msg_warn "Retrying container creation with fallback to local storage..."
+ LOCAL_TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE"
+ if [[ ! -f "$LOCAL_TEMPLATE_PATH" ]]; then
+ msg_info "Downloading template to local..."
+ pveam download local "$TEMPLATE" >/dev/null 2>&1
+ fi
+ if pct create "$CTID" "local:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >>"$LOGFILE" 2>&1; then
+ msg_ok "Container successfully created using local fallback."
+ else
+ # --- Dynamic stack upgrade + auto-retry on the well-known error pattern ---
+ if grep -qiE 'unsupported .* version' "$LOGFILE"; then
+ echo
+ echo "pct reported 'unsupported ... version' – your LXC stack might be too old for this template."
+ echo "We can try to upgrade 'pve-container' and 'lxc-pve' now and retry automatically."
+ offer_lxc_stack_upgrade_and_maybe_retry "yes"
+ rc=$?
+ case $rc in
+ 0) : ;; # success - container created, continue
+ 2)
+ echo "Upgrade was declined. Please update and re-run:
+ apt update && apt install --only-upgrade pve-container lxc-pve"
+ exit 231
+ ;;
+ 3)
+ echo "Upgrade and/or retry failed. Please inspect: $LOGFILE"
+ exit 231
+ ;;
+ esac
+ else
+ msg_error "Container creation failed even with local fallback. See $LOGFILE"
+ if whiptail --yesno "pct create failed.\nDo you want to enable verbose debug mode and view detailed logs?" 12 70; then
+ set -x
+ bash -x -c "pct create $CTID local:vztmpl/${TEMPLATE} ${PCT_OPTIONS[*]}" 2>&1 | tee -a "$LOGFILE"
+ set +x
+ fi
+ exit 209
+ fi
+ fi
+ else
+ msg_error "Container creation failed on local storage. See $LOGFILE"
+ # --- Dynamic stack upgrade + auto-retry on the well-known error pattern ---
+ if grep -qiE 'unsupported .* version' "$LOGFILE"; then
+ echo
+ echo "pct reported 'unsupported ... version' – your LXC stack might be too old for this template."
+ echo "We can try to upgrade 'pve-container' and 'lxc-pve' now and retry automatically."
+ offer_lxc_stack_upgrade_and_maybe_retry "yes"
+ rc=$?
+ case $rc in
+ 0) : ;; # success - container created, continue
+ 2)
+ echo "Upgrade was declined. Please update and re-run:
+ apt update && apt install --only-upgrade pve-container lxc-pve"
+ exit 231
+ ;;
+ 3)
+ echo "Upgrade and/or retry failed. Please inspect: $LOGFILE"
+ exit 231
+ ;;
+ esac
+ else
+ msg_error "Container creation failed. See $LOGFILE"
+ if whiptail --yesno "pct create failed.\nDo you want to enable verbose debug mode and view detailed logs?" 12 70; then
+ set -x
+ bash -x -c "pct create $CTID local:vztmpl/${TEMPLATE} ${PCT_OPTIONS[*]}" 2>&1 | tee -a "$LOGFILE"
+ set +x
+ fi
+ exit 209
+ fi
+ fi
+ fi
+ fi
+
+ # Verify container exists
+ pct list | awk '{print $1}' | grep -qx "$CTID" || {
+ msg_error "Container ID $CTID not listed in 'pct list'. See $LOGFILE"
+ exit 215
+ }
+
+ # Verify config rootfs
+ grep -q '^rootfs:' "/etc/pve/lxc/$CTID.conf" || {
+ msg_error "RootFS entry missing in container config. See $LOGFILE"
+ exit 216
+ }
+
+ msg_ok "LXC Container ${BL}$CTID${CL} ${GN}was successfully created."
+}
+
+# ------------------------------------------------------------------------------
+# description()
+#
+# - Sets container description with HTML content (logo, links, badges)
+# - Restarts ping-instances.service if present
+# - Posts status "done" to API
+# ------------------------------------------------------------------------------
+description() {
+ IP=$(pct exec "$CTID" ip a s dev eth0 | awk '/inet / {print $2}' | cut -d/ -f1)
+
+ # Generate LXC Description
+ DESCRIPTION=$(
+ cat <
+
+
+
+
+ ${APP} LXC
+
+
+
+
+
+
+
+
+
+ GitHub
+
+
+
+ Discussions
+
+
+
+ Issues
+
+
+EOF
+ )
+ pct set "$CTID" -description "$DESCRIPTION"
+
+ if [[ -f /etc/systemd/system/ping-instances.service ]]; then
+ systemctl start ping-instances.service
+ fi
+
+ post_update_to_api "done" "none"
+}
+
+# ------------------------------------------------------------------------------
+# api_exit_script()
+#
+# - Exit trap handler
+# - Reports exit codes to API with detailed reason
+# - Handles known codes (100–209) and maps them to errors
+# ------------------------------------------------------------------------------
+api_exit_script() {
+ exit_code=$?
+ if [ $exit_code -ne 0 ]; then
+ case $exit_code in
+ 100) post_update_to_api "failed" "100: Unexpected error in create_lxc.sh" ;;
+ 101) post_update_to_api "failed" "101: No network connection detected in create_lxc.sh" ;;
+ 200) post_update_to_api "failed" "200: LXC creation failed in create_lxc.sh" ;;
+ 201) post_update_to_api "failed" "201: Invalid Storage class in create_lxc.sh" ;;
+ 202) post_update_to_api "failed" "202: User aborted menu in create_lxc.sh" ;;
+ 203) post_update_to_api "failed" "203: CTID not set in create_lxc.sh" ;;
+ 204) post_update_to_api "failed" "204: PCT_OSTYPE not set in create_lxc.sh" ;;
+ 205) post_update_to_api "failed" "205: CTID cannot be less than 100 in create_lxc.sh" ;;
+ 206) post_update_to_api "failed" "206: CTID already in use in create_lxc.sh" ;;
+ 207) post_update_to_api "failed" "207: Template not found in create_lxc.sh" ;;
+ 208) post_update_to_api "failed" "208: Error downloading template in create_lxc.sh" ;;
+ 209) post_update_to_api "failed" "209: Container creation failed, but template is intact in create_lxc.sh" ;;
+ *) post_update_to_api "failed" "Unknown error, exit code: $exit_code in create_lxc.sh" ;;
+ esac
+ fi
+}
+
+if command -v pveversion >/dev/null 2>&1; then
+ trap 'api_exit_script' EXIT
+fi
+trap 'post_update_to_api "failed" "$BASH_COMMAND"' ERR
+trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
+trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
diff --git a/misc/build.func.backup-20251029-124205 b/misc/build.func.backup-20251029-124205
new file mode 100644
index 000000000..7e0556d61
--- /dev/null
+++ b/misc/build.func.backup-20251029-124205
@@ -0,0 +1,3516 @@
+#!/usr/bin/env bash
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: tteck (tteckster) | MickLesk | michelroegl-brunner
+# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
+# Revision: 1
+
+# ==============================================================================
+# SECTION 1: CORE INITIALIZATION & VARIABLES
+# ==============================================================================
+
+# ------------------------------------------------------------------------------
+# variables()
+#
+# - Normalize application name (NSAPP = lowercase, no spaces)
+# - Build installer filename (var_install)
+# - Define regex for integer validation
+# - Fetch hostname of Proxmox node
+# - Set default values for diagnostics/method
+# - Generate random UUID for tracking
+# - Get Proxmox VE version and kernel version
+# ------------------------------------------------------------------------------
+variables() {
+ NSAPP=$(echo "${APP,,}" | tr -d ' ') # This function sets the NSAPP variable by converting the value of the APP variable to lowercase and removing any spaces.
+ var_install="${NSAPP}-install" # sets the var_install variable by appending "-install" to the value of NSAPP.
+ INTEGER='^[0-9]+([.][0-9]+)?$' # it defines the INTEGER regular expression pattern.
+ PVEHOST_NAME=$(hostname) # gets the Proxmox Hostname and sets it to Uppercase
+ DIAGNOSTICS="yes" # sets the DIAGNOSTICS variable to "yes", used for the API call.
+ METHOD="default" # sets the METHOD variable to "default", used for the API call.
+ RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" # generates a random UUID and sets it to the RANDOM_UUID variable.
+ CTTYPE="${CTTYPE:-${CT_TYPE:-1}}"
+ #CT_TYPE=${var_unprivileged:-$CT_TYPE}
+
+ # Get Proxmox VE version and kernel version
+ if command -v pveversion >/dev/null 2>&1; then
+ PVEVERSION=$(pveversion | grep "pve-manager" | awk '{print $2}' | cut -d'/' -f1)
+ else
+ PVEVERSION="N/A"
+ fi
+ KERNEL_VERSION=$(uname -r)
+}
+
+# -----------------------------------------------------------------------------
+# Community-Scripts bootstrap loader
+# - Always sources build.func from remote
+# - Updates local core files only if build.func changed
+# - Local cache: /usr/local/community-scripts/core
+# -----------------------------------------------------------------------------
+
+# FUNC_DIR="/usr/local/community-scripts/core"
+# mkdir -p "$FUNC_DIR"
+
+# BUILD_URL="https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func"
+# BUILD_REV="$FUNC_DIR/build.rev"
+# DEVMODE="${DEVMODE:-no}"
+
+# # --- Step 1: fetch build.func content once, compute hash ---
+# build_content="$(curl -fsSL "$BUILD_URL")" || {
+# echo "❌ Failed to fetch build.func"
+# exit 1
+# }
+
+# newhash=$(printf "%s" "$build_content" | sha256sum | awk '{print $1}')
+# oldhash=$(cat "$BUILD_REV" 2>/dev/null || echo "")
+
+# # --- Step 2: if build.func changed, offer update for core files ---
+# if [ "$newhash" != "$oldhash" ]; then
+# echo "⚠️ build.func changed!"
+
+# while true; do
+# read -rp "Refresh local core files? [y/N/diff]: " ans
+# case "$ans" in
+# [Yy]*)
+# echo "$newhash" >"$BUILD_REV"
+
+# update_func_file() {
+# local file="$1"
+# local url="https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/$file"
+# local local_path="$FUNC_DIR/$file"
+
+# echo "⬇️ Downloading $file ..."
+# curl -fsSL "$url" -o "$local_path" || {
+# echo "❌ Failed to fetch $file"
+# exit 1
+# }
+# echo "✔️ Updated $file"
+# }
+
+# update_func_file core.func
+# update_func_file error_handler.func
+# update_func_file tools.func
+# break
+# ;;
+# [Dd]*)
+# for file in core.func error_handler.func tools.func; do
+# local_path="$FUNC_DIR/$file"
+# url="https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/$file"
+# remote_tmp="$(mktemp)"
+
+# curl -fsSL "$url" -o "$remote_tmp" || continue
+
+# if [ -f "$local_path" ]; then
+# echo "🔍 Diff for $file:"
+# diff -u "$local_path" "$remote_tmp" || echo "(no differences)"
+# else
+# echo "📦 New file $file will be installed"
+# fi
+
+# rm -f "$remote_tmp"
+# done
+# ;;
+# *)
+# echo "❌ Skipped updating local core files"
+# break
+# ;;
+# esac
+# done
+# else
+# if [ "$DEVMODE" != "yes" ]; then
+# echo "✔️ build.func unchanged → using existing local core files"
+# fi
+# fi
+
+# if [ -n "${_COMMUNITY_SCRIPTS_LOADER:-}" ]; then
+# return 0 2>/dev/null || exit 0
+# fi
+# _COMMUNITY_SCRIPTS_LOADER=1
+
+# # --- Step 3: always source local versions of the core files ---
+# source "$FUNC_DIR/core.func"
+# source "$FUNC_DIR/error_handler.func"
+# source "$FUNC_DIR/tools.func"
+
+# # --- Step 4: finally, source build.func directly from memory ---
+# # (no tmp file needed)
+# source <(printf "%s" "$build_content")
+
+# ------------------------------------------------------------------------------
+# Load core + error handler functions from community-scripts repo
+#
+# - Prefer curl if available, fallback to wget
+# - Load: core.func, error_handler.func, api.func
+# - Initialize error traps after loading
+# ------------------------------------------------------------------------------
+
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/api.func)
+
+if command -v curl >/dev/null 2>&1; then
+ source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func)
+ source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func)
+ load_functions
+ catch_errors
+ #echo "(build.func) Loaded core.func via curl"
+elif command -v wget >/dev/null 2>&1; then
+ source <(wget -qO- https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func)
+ source <(wget -qO- https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func)
+ load_functions
+ catch_errors
+ #echo "(build.func) Loaded core.func via wget"
+fi
+
+# ------------------------------------------------------------------------------
+# maxkeys_check()
+#
+# - Reads kernel keyring limits (maxkeys, maxbytes)
+# - Checks current usage for LXC user (UID 100000)
+# - Warns if usage is close to limits and suggests sysctl tuning
+# - Exits if thresholds are exceeded
+# - https://cleveruptime.com/docs/files/proc-key-users | https://docs.kernel.org/security/keys/core.html
+# ------------------------------------------------------------------------------
+
+maxkeys_check() {
+ # Read kernel parameters
+ per_user_maxkeys=$(cat /proc/sys/kernel/keys/maxkeys 2>/dev/null || echo 0)
+ per_user_maxbytes=$(cat /proc/sys/kernel/keys/maxbytes 2>/dev/null || echo 0)
+
+ # Exit if kernel parameters are unavailable
+ if [[ "$per_user_maxkeys" -eq 0 || "$per_user_maxbytes" -eq 0 ]]; then
+ echo -e "${CROSS}${RD} Error: Unable to read kernel parameters. Ensure proper permissions.${CL}"
+ exit 1
+ fi
+
+ # Fetch key usage for user ID 100000 (typical for containers)
+ used_lxc_keys=$(awk '/100000:/ {print $2}' /proc/key-users 2>/dev/null || echo 0)
+ used_lxc_bytes=$(awk '/100000:/ {split($5, a, "/"); print a[1]}' /proc/key-users 2>/dev/null || echo 0)
+
+ # Calculate thresholds and suggested new limits
+ threshold_keys=$((per_user_maxkeys - 100))
+ threshold_bytes=$((per_user_maxbytes - 1000))
+ new_limit_keys=$((per_user_maxkeys * 2))
+ new_limit_bytes=$((per_user_maxbytes * 2))
+
+ # Check if key or byte usage is near limits
+ failure=0
+ if [[ "$used_lxc_keys" -gt "$threshold_keys" ]]; then
+ echo -e "${CROSS}${RD} Warning: Key usage is near the limit (${used_lxc_keys}/${per_user_maxkeys}).${CL}"
+ echo -e "${INFO} Suggested action: Set ${GN}kernel.keys.maxkeys=${new_limit_keys}${CL} in ${BOLD}/etc/sysctl.d/98-community-scripts.conf${CL}."
+ failure=1
+ fi
+ if [[ "$used_lxc_bytes" -gt "$threshold_bytes" ]]; then
+ echo -e "${CROSS}${RD} Warning: Key byte usage is near the limit (${used_lxc_bytes}/${per_user_maxbytes}).${CL}"
+ echo -e "${INFO} Suggested action: Set ${GN}kernel.keys.maxbytes=${new_limit_bytes}${CL} in ${BOLD}/etc/sysctl.d/98-community-scripts.conf${CL}."
+ failure=1
+ fi
+
+ # Provide next steps if issues are detected
+ if [[ "$failure" -eq 1 ]]; then
+ echo -e "${INFO} To apply changes, run: ${BOLD}service procps force-reload${CL}"
+ exit 1
+ fi
+
+ echo -e "${CM}${GN} All kernel key limits are within safe thresholds.${CL}"
+}
+
+# ------------------------------------------------------------------------------
+# get_current_ip()
+#
+# - Returns current container IP depending on OS type
+# - Debian/Ubuntu: uses `hostname -I`
+# - Alpine: parses eth0 via `ip -4 addr`
+# ------------------------------------------------------------------------------
+get_current_ip() {
+ if [ -f /etc/os-release ]; then
+ # Check for Debian/Ubuntu (uses hostname -I)
+ if grep -qE 'ID=debian|ID=ubuntu' /etc/os-release; then
+ CURRENT_IP=$(hostname -I | awk '{print $1}')
+ # Check for Alpine (uses ip command)
+ elif grep -q 'ID=alpine' /etc/os-release; then
+ CURRENT_IP=$(ip -4 addr show eth0 | awk '/inet / {print $2}' | cut -d/ -f1 | head -n 1)
+ else
+ CURRENT_IP="Unknown"
+ fi
+ fi
+ echo "$CURRENT_IP"
+}
+
+# ------------------------------------------------------------------------------
+# update_motd_ip()
+#
+# - Updates /etc/motd with current container IP
+# - Removes old IP entries to avoid duplicates
+# ------------------------------------------------------------------------------
+update_motd_ip() {
+ MOTD_FILE="/etc/motd"
+
+ if [ -f "$MOTD_FILE" ]; then
+ # Remove existing IP Address lines to prevent duplication
+ sed -i '/IP Address:/d' "$MOTD_FILE"
+
+ IP=$(get_current_ip)
+ # Add the new IP address
+ echo -e "${TAB}${NETWORK}${YW} IP Address: ${GN}${IP}${CL}" >>"$MOTD_FILE"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# install_ssh_keys_into_ct()
+#
+# - Installs SSH keys into container root account if SSH is enabled
+# - Uses pct push or direct input to authorized_keys
+# - Falls back to warning if no keys provided
+# ------------------------------------------------------------------------------
+install_ssh_keys_into_ct() {
+ [[ "$SSH" != "yes" ]] && return 0
+
+ if [[ -n "$SSH_KEYS_FILE" && -s "$SSH_KEYS_FILE" ]]; then
+ msg_info "Installing selected SSH keys into CT ${CTID}"
+ pct exec "$CTID" -- sh -c 'mkdir -p /root/.ssh && chmod 700 /root/.ssh' || {
+ msg_error "prepare /root/.ssh failed"
+ return 1
+ }
+ pct push "$CTID" "$SSH_KEYS_FILE" /root/.ssh/authorized_keys >/dev/null 2>&1 ||
+ pct exec "$CTID" -- sh -c "cat > /root/.ssh/authorized_keys" <"$SSH_KEYS_FILE" || {
+ msg_error "write authorized_keys failed"
+ return 1
+ }
+ pct exec "$CTID" -- sh -c 'chmod 600 /root/.ssh/authorized_keys' || true
+ msg_ok "Installed SSH keys into CT ${CTID}"
+ return 0
+ fi
+
+ # Fallback: nichts ausgewählt
+ msg_warn "No SSH keys to install (skipping)."
+ return 0
+}
+
+# ------------------------------------------------------------------------------
+# base_settings()
+#
+# - Defines all base/default variables for container creation
+# - Reads from environment variables (var_*)
+# - Provides fallback defaults for OS type/version
+# ------------------------------------------------------------------------------
+base_settings() {
+ # Default Settings
+ CT_TYPE=${var_unprivileged:-"1"}
+ DISK_SIZE=${var_disk:-"4"}
+ CORE_COUNT=${var_cpu:-"1"}
+ RAM_SIZE=${var_ram:-"1024"}
+ VERBOSE=${var_verbose:-"${1:-no}"}
+ PW=${var_pw:-""}
+ CT_ID=${var_ctid:-$NEXTID}
+ HN=${var_hostname:-$NSAPP}
+ BRG=${var_brg:-"vmbr0"}
+ NET=${var_net:-"dhcp"}
+ IPV6_METHOD=${var_ipv6_method:-"none"}
+ IPV6_STATIC=${var_ipv6_static:-""}
+ GATE=${var_gateway:-""}
+ APT_CACHER=${var_apt_cacher:-""}
+ APT_CACHER_IP=${var_apt_cacher_ip:-""}
+ MTU=${var_mtu:-""}
+ SD=${var_storage:-""}
+ NS=${var_ns:-""}
+ MAC=${var_mac:-""}
+ VLAN=${var_vlan:-""}
+ SSH=${var_ssh:-"no"}
+ SSH_AUTHORIZED_KEY=${var_ssh_authorized_key:-""}
+ UDHCPC_FIX=${var_udhcpc_fix:-""}
+ TAGS="community-script,${var_tags:-}"
+ ENABLE_FUSE=${var_fuse:-"${1:-no}"}
+ ENABLE_TUN=${var_tun:-"${1:-no}"}
+
+ # Since these 2 are only defined outside of default_settings function, we add a temporary fallback. TODO: To align everything, we should add these as constant variables (e.g. OSTYPE and OSVERSION), but that would currently require updating the default_settings function for all existing scripts
+ if [ -z "$var_os" ]; then
+ var_os="debian"
+ fi
+ if [ -z "$var_version" ]; then
+ var_version="12"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# echo_default()
+#
+# - Prints summary of default values (ID, OS, type, disk, RAM, CPU, etc.)
+# - Uses icons and formatting for readability
+# - Convert CT_TYPE to description
+# ------------------------------------------------------------------------------
+echo_default() {
+ CT_TYPE_DESC="Unprivileged"
+ if [ "$CT_TYPE" -eq 0 ]; then
+ CT_TYPE_DESC="Privileged"
+ fi
+ echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION}${CL}"
+ echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}${CT_ID}${CL}"
+ echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os ($var_version)${CL}"
+ echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
+ echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}"
+ echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}${CORE_COUNT}${CL}"
+ echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
+ if [ "$VERBOSE" == "yes" ]; then
+ echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}Enabled${CL}"
+ fi
+ echo -e "${CREATING}${BOLD}${BL}Creating a ${APP} LXC using the above default settings${CL}"
+ echo -e " "
+}
+
+# ------------------------------------------------------------------------------
+# exit_script()
+#
+# - Called when user cancels an action
+# - Clears screen and exits gracefully
+# ------------------------------------------------------------------------------
+exit_script() {
+ clear
+ echo -e "\n${CROSS}${RD}User exited script${CL}\n"
+ exit
+}
+
+# ------------------------------------------------------------------------------
+# find_host_ssh_keys()
+#
+# - Scans system for available SSH keys
+# - Supports defaults (~/.ssh, /etc/ssh/authorized_keys)
+# - Returns list of files containing valid SSH public keys
+# - Sets FOUND_HOST_KEY_COUNT to number of keys found
+# ------------------------------------------------------------------------------
+find_host_ssh_keys() {
+ local re='(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))'
+ local -a files=() cand=()
+ local g="${var_ssh_import_glob:-}"
+ local total=0 f base c
+
+ shopt -s nullglob
+ if [[ -n "$g" ]]; then
+ for pat in $g; do cand+=($pat); done
+ else
+ cand+=(/root/.ssh/authorized_keys /root/.ssh/authorized_keys2)
+ cand+=(/root/.ssh/*.pub)
+ cand+=(/etc/ssh/authorized_keys /etc/ssh/authorized_keys.d/*)
+ fi
+ shopt -u nullglob
+
+ for f in "${cand[@]}"; do
+ [[ -f "$f" && -r "$f" ]] || continue
+ base="$(basename -- "$f")"
+ case "$base" in
+ known_hosts | known_hosts.* | config) continue ;;
+ id_*) [[ "$f" != *.pub ]] && continue ;;
+ esac
+
+ # CRLF safe check for host keys
+ c=$(tr -d '\r' <"$f" | awk '
+ /^[[:space:]]*#/ {next}
+ /^[[:space:]]*$/ {next}
+ {print}
+ ' | grep -E -c '"$re"' || true)
+
+ if ((c > 0)); then
+ files+=("$f")
+ total=$((total + c))
+ fi
+ done
+
+ # Fallback to /root/.ssh/authorized_keys
+ if ((${#files[@]} == 0)) && [[ -r /root/.ssh/authorized_keys ]]; then
+ if grep -E -q "$re" /root/.ssh/authorized_keys; then
+ files+=(/root/.ssh/authorized_keys)
+ total=$((total + $(grep -E -c "$re" /root/.ssh/authorized_keys || echo 0)))
+ fi
+ fi
+
+ FOUND_HOST_KEY_COUNT="$total"
+ (
+ IFS=:
+ echo "${files[*]}"
+ )
+}
+
+# ------------------------------------------------------------------------------
+# advanced_settings()
+#
+# - Interactive whiptail menu for advanced configuration
+# - Lets user set container type, password, CT ID, hostname, disk, CPU, RAM
+# - Supports IPv4/IPv6, DNS, MAC, VLAN, tags, SSH keys, FUSE, verbose mode
+# - Ends with confirmation or re-entry if cancelled
+# ------------------------------------------------------------------------------
+advanced_settings() {
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox --title "Here is an instructional tip:" "To make a selection, use the Spacebar." 8 58
+ # Setting Default Tag for Advanced Settings
+ TAGS="community-script;${var_tags:-}"
+ CT_DEFAULT_TYPE="${CT_TYPE}"
+ CT_TYPE=""
+ while [ -z "$CT_TYPE" ]; do
+ if [ "$CT_DEFAULT_TYPE" == "1" ]; then
+ if CT_TYPE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "CONTAINER TYPE" --radiolist "Choose Type" 10 58 2 \
+ "1" "Unprivileged" ON \
+ "0" "Privileged" OFF \
+ 3>&1 1>&2 2>&3); then
+ if [ -n "$CT_TYPE" ]; then
+ CT_TYPE_DESC="Unprivileged"
+ if [ "$CT_TYPE" -eq 0 ]; then
+ CT_TYPE_DESC="Privileged"
+ fi
+ echo -e "${TAB}${INFO} ProxmoxVE Version ${PVEVERSION} | Kernel: ${KERNEL_VERSION}${CL}"
+ echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os |${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}"
+ echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
+ fi
+ else
+ exit_script
+ fi
+ fi
+ if [ "$CT_DEFAULT_TYPE" == "0" ]; then
+ if CT_TYPE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "CONTAINER TYPE" --radiolist "Choose Type" 10 58 2 \
+ "1" "Unprivileged" OFF \
+ "0" "Privileged" ON \
+ 3>&1 1>&2 2>&3); then
+ if [ -n "$CT_TYPE" ]; then
+ CT_TYPE_DESC="Unprivileged"
+ if [ "$CT_TYPE" -eq 0 ]; then
+ CT_TYPE_DESC="Privileged"
+ fi
+ echo -e "${TAB}${INFO} ProxmoxVE Version ${PVEVERSION} | Kernel: ${KERNEL_VERSION}${CL}"
+ echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os${CL}"
+ echo -e "${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}"
+ echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
+ fi
+ else
+ exit_script
+ fi
+ fi
+ done
+
+ while true; do
+ if PW1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --passwordbox "\nSet Root Password (needed for root ssh access)" 9 58 --title "PASSWORD (leave blank for automatic login)" 3>&1 1>&2 2>&3); then
+ # Empty = Autologin
+ if [[ -z "$PW1" ]]; then
+ PW=""
+ PW1="Automatic Login"
+ echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}$PW1${CL}"
+ break
+ fi
+
+ # Invalid: contains spaces
+ if [[ "$PW1" == *" "* ]]; then
+ whiptail --msgbox "Password cannot contain spaces." 8 58
+ continue
+ fi
+
+ # Invalid: too short
+ if ((${#PW1} < 5)); then
+ whiptail --msgbox "Password must be at least 5 characters." 8 58
+ continue
+ fi
+
+ # Confirm password
+ if PW2=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --passwordbox "\nVerify Root Password" 9 58 --title "PASSWORD VERIFICATION" 3>&1 1>&2 2>&3); then
+ if [[ "$PW1" == "$PW2" ]]; then
+ PW="-password $PW1"
+ echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}********${CL}"
+ break
+ else
+ whiptail --msgbox "Passwords do not match. Please try again." 8 58
+ fi
+ else
+ exit_script
+ fi
+ else
+ exit_script
+ fi
+ done
+
+ if CT_ID=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Container ID" 8 58 "$NEXTID" --title "CONTAINER ID" 3>&1 1>&2 2>&3); then
+ if [ -z "$CT_ID" ]; then
+ CT_ID="$NEXTID"
+ fi
+ else
+ exit_script
+ fi
+ echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
+
+ while true; do
+ if CT_NAME=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set Hostname" 8 58 "$NSAPP" --title "HOSTNAME" 3>&1 1>&2 2>&3); then
+ if [ -z "$CT_NAME" ]; then
+ HN="$NSAPP"
+ else
+ HN=$(echo "${CT_NAME,,}" | tr -d ' ')
+ fi
+ # Hostname validate (RFC 1123)
+ if [[ "$HN" =~ ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ ]]; then
+ echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}"
+ break
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --msgbox "❌ Invalid hostname: '$HN'\n\nOnly lowercase letters, digits and hyphens (-) are allowed.\nUnderscores (_) or other characters are not permitted!" 10 70
+ fi
+ else
+ exit_script
+ fi
+ done
+
+ while true; do
+ DISK_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Disk Size in GB" 8 58 "$var_disk" --title "DISK SIZE" 3>&1 1>&2 2>&3) || exit_script
+
+ if [ -z "$DISK_SIZE" ]; then
+ DISK_SIZE="$var_disk"
+ fi
+
+ if [[ "$DISK_SIZE" =~ ^[1-9][0-9]*$ ]]; then
+ echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}"
+ break
+ else
+ whiptail --msgbox "Disk size must be a positive integer!" 8 58
+ fi
+ done
+
+ while true; do
+ CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
+ --inputbox "Allocate CPU Cores" 8 58 "$var_cpu" --title "CORE COUNT" 3>&1 1>&2 2>&3) || exit_script
+
+ if [ -z "$CORE_COUNT" ]; then
+ CORE_COUNT="$var_cpu"
+ fi
+
+ if [[ "$CORE_COUNT" =~ ^[1-9][0-9]*$ ]]; then
+ echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}"
+ break
+ else
+ whiptail --msgbox "CPU core count must be a positive integer!" 8 58
+ fi
+ done
+
+ while true; do
+ RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
+ --inputbox "Allocate RAM in MiB" 8 58 "$var_ram" --title "RAM" 3>&1 1>&2 2>&3) || exit_script
+
+ if [ -z "$RAM_SIZE" ]; then
+ RAM_SIZE="$var_ram"
+ fi
+
+ if [[ "$RAM_SIZE" =~ ^[1-9][0-9]*$ ]]; then
+ echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
+ break
+ else
+ whiptail --msgbox "RAM size must be a positive integer!" 8 58
+ fi
+ done
+
+ IFACE_FILEPATH_LIST="/etc/network/interfaces"$'\n'$(find "/etc/network/interfaces.d/" -type f)
+ BRIDGES=""
+ OLD_IFS=$IFS
+ IFS=$'\n'
+ for iface_filepath in ${IFACE_FILEPATH_LIST}; do
+
+ iface_indexes_tmpfile=$(mktemp -q -u '.iface-XXXX')
+ (grep -Pn '^\s*iface' "${iface_filepath}" | cut -d':' -f1 && wc -l "${iface_filepath}" | cut -d' ' -f1) | awk 'FNR==1 {line=$0; next} {print line":"$0-1; line=$0}' >"${iface_indexes_tmpfile}" || true
+
+ if [ -f "${iface_indexes_tmpfile}" ]; then
+
+ while read -r pair; do
+ start=$(echo "${pair}" | cut -d':' -f1)
+ end=$(echo "${pair}" | cut -d':' -f2)
+
+ if awk "NR >= ${start} && NR <= ${end}" "${iface_filepath}" | grep -qP '^\s*(bridge[-_](ports|stp|fd|vlan-aware|vids)|ovs_type\s+OVSBridge)\b'; then
+ iface_name=$(sed "${start}q;d" "${iface_filepath}" | awk '{print $2}')
+ BRIDGES="${iface_name}"$'\n'"${BRIDGES}"
+ fi
+
+ done <"${iface_indexes_tmpfile}"
+ rm -f "${iface_indexes_tmpfile}"
+ fi
+
+ done
+ IFS=$OLD_IFS
+ BRIDGES=$(echo "$BRIDGES" | grep -v '^\s*$' | sort | uniq)
+ if [[ -z "$BRIDGES" ]]; then
+ BRG="vmbr0"
+ echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}"
+ else
+ # Build bridge menu with descriptions
+ BRIDGE_MENU_OPTIONS=()
+ while IFS= read -r bridge; do
+ if [[ -n "$bridge" ]]; then
+ # Get description from Proxmox built-in method - find comment for this specific bridge
+ description=$(grep -A 10 "iface $bridge" /etc/network/interfaces | grep '^#' | head -n1 | sed 's/^#\s*//')
+ if [[ -n "$description" ]]; then
+ BRIDGE_MENU_OPTIONS+=("$bridge" "${description}")
+ else
+ BRIDGE_MENU_OPTIONS+=("$bridge" " ")
+ fi
+ fi
+ done <<<"$BRIDGES"
+
+ BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --menu "Select network bridge: " 18 55 6 "${BRIDGE_MENU_OPTIONS[@]}" 3>&1 1>&2 2>&3)
+ if [[ -z "$BRG" ]]; then
+ exit_script
+ else
+ echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}"
+ fi
+ fi
+
+ # IPv4 methods: dhcp, static, none
+ while true; do
+ IPV4_METHOD=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "IPv4 Address Management" \
+ --menu "Select IPv4 Address Assignment Method:" 12 60 2 \
+ "dhcp" "Automatic (DHCP, recommended)" \
+ "static" "Static (manual entry)" \
+ 3>&1 1>&2 2>&3)
+
+ exit_status=$?
+ if [ $exit_status -ne 0 ]; then
+ exit_script
+ fi
+
+ case "$IPV4_METHOD" in
+ dhcp)
+ NET="dhcp"
+ GATE=""
+ echo -e "${NETWORK}${BOLD}${DGN}IPv4: DHCP${CL}"
+ break
+ ;;
+ static)
+ # Static: call and validate CIDR address
+ while true; do
+ NET=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --inputbox "Enter Static IPv4 CIDR Address (e.g. 192.168.100.50/24)" 8 58 "" \
+ --title "IPv4 ADDRESS" 3>&1 1>&2 2>&3)
+ if [ -z "$NET" ]; then
+ whiptail --msgbox "IPv4 address must not be empty." 8 58
+ continue
+ elif [[ "$NET" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$ ]]; then
+ echo -e "${NETWORK}${BOLD}${DGN}IPv4 Address: ${BGN}$NET${CL}"
+ break
+ else
+ whiptail --msgbox "$NET is not a valid IPv4 CIDR address. Please enter a correct value!" 8 58
+ fi
+ done
+
+ # call and validate Gateway
+ while true; do
+ GATE1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --inputbox "Enter Gateway IP address for static IPv4" 8 58 "" \
+ --title "Gateway IP" 3>&1 1>&2 2>&3)
+ if [ -z "$GATE1" ]; then
+ whiptail --msgbox "Gateway IP address cannot be empty." 8 58
+ elif [[ ! "$GATE1" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
+ whiptail --msgbox "Invalid Gateway IP address format." 8 58
+ else
+ GATE=",gw=$GATE1"
+ echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE1${CL}"
+ break
+ fi
+ done
+ break
+ ;;
+ esac
+ done
+
+ # IPv6 Address Management selection
+ while true; do
+ IPV6_METHOD=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --menu \
+ "Select IPv6 Address Management Type:" 15 58 4 \
+ "auto" "SLAAC/AUTO (recommended, default)" \
+ "dhcp" "DHCPv6" \
+ "static" "Static (manual entry)" \
+ "none" "Disabled" \
+ --default-item "auto" 3>&1 1>&2 2>&3)
+ [ $? -ne 0 ] && exit_script
+
+ case "$IPV6_METHOD" in
+ auto)
+ echo -e "${NETWORK}${BOLD}${DGN}IPv6: ${BGN}SLAAC/AUTO${CL}"
+ IPV6_ADDR=""
+ IPV6_GATE=""
+ break
+ ;;
+ dhcp)
+ echo -e "${NETWORK}${BOLD}${DGN}IPv6: ${BGN}DHCPv6${CL}"
+ IPV6_ADDR="dhcp"
+ IPV6_GATE=""
+ break
+ ;;
+ static)
+ # Ask for static IPv6 address (CIDR notation, e.g., 2001:db8::1234/64)
+ while true; do
+ IPV6_ADDR=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox \
+ "Set a static IPv6 CIDR address (e.g., 2001:db8::1234/64)" 8 58 "" \
+ --title "IPv6 STATIC ADDRESS" 3>&1 1>&2 2>&3) || exit_script
+ if [[ "$IPV6_ADDR" =~ ^([0-9a-fA-F:]+:+)+[0-9a-fA-F]+(/[0-9]{1,3})$ ]]; then
+ echo -e "${NETWORK}${BOLD}${DGN}IPv6 Address: ${BGN}$IPV6_ADDR${CL}"
+ break
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox \
+ "$IPV6_ADDR is an invalid IPv6 CIDR address. Please enter a valid IPv6 CIDR address (e.g., 2001:db8::1234/64)" 8 58
+ fi
+ done
+ # Optional: ask for IPv6 gateway for static config
+ while true; do
+ IPV6_GATE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox \
+ "Enter IPv6 gateway address (optional, leave blank for none)" 8 58 "" --title "IPv6 GATEWAY" 3>&1 1>&2 2>&3)
+ if [ -z "$IPV6_GATE" ]; then
+ IPV6_GATE=""
+ break
+ elif [[ "$IPV6_GATE" =~ ^([0-9a-fA-F:]+:+)+[0-9a-fA-F]+$ ]]; then
+ break
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox \
+ "Invalid IPv6 gateway format." 8 58
+ fi
+ done
+ break
+ ;;
+ none)
+ echo -e "${NETWORK}${BOLD}${DGN}IPv6: ${BGN}Disabled${CL}"
+ IPV6_ADDR="none"
+ IPV6_GATE=""
+ break
+ ;;
+ *)
+ exit_script
+ ;;
+ esac
+ done
+
+ if [ "$var_os" == "alpine" ]; then
+ APT_CACHER=""
+ APT_CACHER_IP=""
+ else
+ if APT_CACHER_IP=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set APT-Cacher IP (leave blank for none)" 8 58 --title "APT-Cacher IP" 3>&1 1>&2 2>&3); then
+ APT_CACHER="${APT_CACHER_IP:+yes}"
+ echo -e "${NETWORK}${BOLD}${DGN}APT-Cacher IP Address: ${BGN}${APT_CACHER_IP:-Default}${CL}"
+ else
+ exit_script
+ fi
+ fi
+
+ # if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "IPv6" --yesno "Disable IPv6?" 10 58); then
+ # DISABLEIP6="yes"
+ # else
+ # DISABLEIP6="no"
+ # fi
+ # echo -e "${DISABLEIPV6}${BOLD}${DGN}Disable IPv6: ${BGN}$DISABLEIP6${CL}"
+
+ if MTU1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default [The MTU of your selected vmbr, default is 1500])" 8 58 --title "MTU SIZE" 3>&1 1>&2 2>&3); then
+ if [ -z "$MTU1" ]; then
+ MTU1="Default"
+ MTU=""
+ else
+ MTU=",mtu=$MTU1"
+ fi
+ echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}"
+ else
+ exit_script
+ fi
+
+ if SD=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a DNS Search Domain (leave blank for HOST)" 8 58 --title "DNS Search Domain" 3>&1 1>&2 2>&3); then
+ if [ -z "$SD" ]; then
+ SX=Host
+ SD=""
+ else
+ SX=$SD
+ SD="-searchdomain=$SD"
+ fi
+ echo -e "${SEARCH}${BOLD}${DGN}DNS Search Domain: ${BGN}$SX${CL}"
+ else
+ exit_script
+ fi
+
+ if NX=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a DNS Server IP (leave blank for HOST)" 8 58 --title "DNS SERVER IP" 3>&1 1>&2 2>&3); then
+ if [ -z "$NX" ]; then
+ NX=Host
+ NS=""
+ else
+ NS="-nameserver=$NX"
+ fi
+ echo -e "${NETWORK}${BOLD}${DGN}DNS Server IP Address: ${BGN}$NX${CL}"
+ else
+ exit_script
+ fi
+
+ if [ "$var_os" == "alpine" ] && [ "$NET" == "dhcp" ] && [ "$NX" != "Host" ]; then
+ UDHCPC_FIX="yes"
+ else
+ UDHCPC_FIX="no"
+ fi
+ export UDHCPC_FIX
+
+ if MAC1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a MAC Address(leave blank for generated MAC)" 8 58 --title "MAC ADDRESS" 3>&1 1>&2 2>&3); then
+ if [ -z "$MAC1" ]; then
+ MAC1="Default"
+ MAC=""
+ else
+ MAC=",hwaddr=$MAC1"
+ echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}"
+ fi
+ else
+ exit_script
+ fi
+
+ if VLAN1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for no VLAN)" 8 58 --title "VLAN" 3>&1 1>&2 2>&3); then
+ if [ -z "$VLAN1" ]; then
+ VLAN1="Default"
+ VLAN=""
+ else
+ VLAN=",tag=$VLAN1"
+ fi
+ echo -e "${VLANTAG}${BOLD}${DGN}Vlan: ${BGN}$VLAN1${CL}"
+ else
+ exit_script
+ fi
+
+ if ADV_TAGS=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set Custom Tags?[If you remove all, there will be no tags!]" 8 58 "${TAGS}" --title "Advanced Tags" 3>&1 1>&2 2>&3); then
+ if [ -n "${ADV_TAGS}" ]; then
+ ADV_TAGS=$(echo "$ADV_TAGS" | tr -d '[:space:]')
+ TAGS="${ADV_TAGS}"
+ else
+ TAGS=";"
+ fi
+ echo -e "${NETWORK}${BOLD}${DGN}Tags: ${BGN}$TAGS${CL}"
+ else
+ exit_script
+ fi
+
+ configure_ssh_settings
+ export SSH_KEYS_FILE
+ echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
+ if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "FUSE Support" --yesno "Enable FUSE support?\nRequired for tools like rclone, mergerfs, AppImage, etc." 10 58); then
+ ENABLE_FUSE="yes"
+ else
+ ENABLE_FUSE="no"
+ fi
+ echo -e "${FUSE}${BOLD}${DGN}Enable FUSE Support: ${BGN}$ENABLE_FUSE${CL}"
+
+ if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "VERBOSE MODE" --yesno "Enable Verbose Mode?" 10 58); then
+ VERBOSE="yes"
+ else
+ VERBOSE="no"
+ fi
+ echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}$VERBOSE${CL}"
+
+ if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "ADVANCED SETTINGS COMPLETE" --yesno "Ready to create ${APP} LXC?" 10 58); then
+ echo -e "${CREATING}${BOLD}${RD}Creating a ${APP} LXC using the above advanced settings${CL}"
+ else
+ clear
+ header_info
+ echo -e "${TAB}${INFO} ProxmoxVE Version ${PVEVERSION} | Kernel: ${KERNEL_VERSION}${CL}"
+ echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Settings on node $PVEHOST_NAME${CL}"
+ advanced_settings
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# diagnostics_check()
+#
+# - Ensures diagnostics config file exists at /usr/local/community-scripts/diagnostics
+# - Asks user whether to send anonymous diagnostic data
+# - Saves DIAGNOSTICS=yes/no in the config file
+# ------------------------------------------------------------------------------
+diagnostics_check() {
+ if ! [ -d "/usr/local/community-scripts" ]; then
+ mkdir -p /usr/local/community-scripts
+ fi
+
+ if ! [ -f "/usr/local/community-scripts/diagnostics" ]; then
+ if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "DIAGNOSTICS" --yesno "Send Diagnostics of LXC Installation?\n\n(This only transmits data without user data, just RAM, CPU, LXC name, ...)" 10 58); then
+ cat </usr/local/community-scripts/diagnostics
+DIAGNOSTICS=yes
+
+#This file is used to store the diagnostics settings for the Community-Scripts API.
+#https://github.com/community-scripts/ProxmoxVED/discussions/1836
+#Your diagnostics will be sent to the Community-Scripts API for troubleshooting/statistical purposes.
+#You can review the data at https://community-scripts.github.io/ProxmoxVE/data
+#If you do not wish to send diagnostics, please set the variable 'DIAGNOSTICS' to "no" in /usr/local/community-scripts/diagnostics, or use the menue.
+#This will disable the diagnostics feature.
+#To send diagnostics, set the variable 'DIAGNOSTICS' to "yes" in /usr/local/community-scripts/diagnostics, or use the menue.
+#This will enable the diagnostics feature.
+#The following information will be sent:
+#"disk_size"
+#"core_count"
+#"ram_size"
+#"os_type"
+#"os_version"
+#"nsapp"
+#"method"
+#"pve_version"
+#"status"
+#If you have any concerns, please review the source code at /misc/build.func
+EOF
+ DIAGNOSTICS="yes"
+ else
+ cat </usr/local/community-scripts/diagnostics
+DIAGNOSTICS=no
+
+#This file is used to store the diagnostics settings for the Community-Scripts API.
+#https://github.com/community-scripts/ProxmoxVED/discussions/1836
+#Your diagnostics will be sent to the Community-Scripts API for troubleshooting/statistical purposes.
+#You can review the data at https://community-scripts.github.io/ProxmoxVE/data
+#If you do not wish to send diagnostics, please set the variable 'DIAGNOSTICS' to "no" in /usr/local/community-scripts/diagnostics, or use the menue.
+#This will disable the diagnostics feature.
+#To send diagnostics, set the variable 'DIAGNOSTICS' to "yes" in /usr/local/community-scripts/diagnostics, or use the menue.
+#This will enable the diagnostics feature.
+#The following information will be sent:
+#"disk_size"
+#"core_count"
+#"ram_size"
+#"os_type"
+#"os_version"
+#"nsapp"
+#"method"
+#"pve_version"
+#"status"
+#If you have any concerns, please review the source code at /misc/build.func
+EOF
+ DIAGNOSTICS="no"
+ fi
+ else
+ DIAGNOSTICS=$(awk -F '=' '/^DIAGNOSTICS/ {print $2}' /usr/local/community-scripts/diagnostics)
+
+ fi
+
+}
+
+# ------------------------------------------------------------------------------
+# default_var_settings
+#
+# - Ensures /usr/local/community-scripts/default.vars exists (creates if missing)
+# - Loads var_* values from default.vars (safe parser, no source/eval)
+# - Precedence: ENV var_* > default.vars > built-in defaults
+# - Maps var_verbose → VERBOSE
+# - Calls base_settings "$VERBOSE" and echo_default
+# ------------------------------------------------------------------------------
+default_var_settings() {
+ # Allowed var_* keys (alphabetically sorted)
+ local VAR_WHITELIST=(
+ var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_ctid var_disk var_fuse
+ var_gateway var_hostname var_ipv6_method var_ipv6_static var_mac var_mtu
+ var_net var_ns var_pw var_ram var_tags var_tun var_unprivileged
+ var_verbose var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage
+ )
+
+ # Snapshot: environment variables (highest precedence)
+ declare -A _HARD_ENV=()
+ local _k
+ for _k in "${VAR_WHITELIST[@]}"; do
+ if printenv "$_k" >/dev/null 2>&1; then _HARD_ENV["$_k"]=1; fi
+ done
+
+ # Find default.vars location
+ local _find_default_vars
+ _find_default_vars() {
+ local f
+ for f in \
+ /usr/local/community-scripts/default.vars \
+ "$HOME/.config/community-scripts/default.vars" \
+ "./default.vars"; do
+ [ -f "$f" ] && {
+ echo "$f"
+ return 0
+ }
+ done
+ return 1
+ }
+ # Allow override of storages via env (for non-interactive use cases)
+ [ -n "${var_template_storage:-}" ] && TEMPLATE_STORAGE="$var_template_storage"
+ [ -n "${var_container_storage:-}" ] && CONTAINER_STORAGE="$var_container_storage"
+
+ # Create once, with storages already selected, no var_ctid/var_hostname lines
+ local _ensure_default_vars
+ _ensure_default_vars() {
+ _find_default_vars >/dev/null 2>&1 && return 0
+
+ local canonical="/usr/local/community-scripts/default.vars"
+ msg_info "No default.vars found. Creating ${canonical}"
+ mkdir -p /usr/local/community-scripts
+
+ # Pick storages before writing the file (always ask unless only one)
+ # Create a minimal temp file to write into
+ : >"$canonical"
+
+ # Base content (no var_ctid / var_hostname here)
+ cat >"$canonical" <<'EOF'
+# Community-Scripts defaults (var_* only). Lines starting with # are comments.
+# Precedence: ENV var_* > default.vars > built-ins.
+# Keep keys alphabetically sorted.
+
+# Container type
+var_unprivileged=1
+
+# Resources
+var_cpu=1
+var_disk=4
+var_ram=1024
+
+# Network
+var_brg=vmbr0
+var_net=dhcp
+var_ipv6_method=none
+# var_gateway=
+# var_ipv6_static=
+# var_vlan=
+# var_mtu=
+# var_mac=
+# var_ns=
+
+# SSH
+var_ssh=no
+# var_ssh_authorized_key=
+
+# APT cacher (optional)
+# var_apt_cacher=yes
+# var_apt_cacher_ip=192.168.1.10
+
+# Features/Tags/verbosity
+var_fuse=no
+var_tun=no
+var_tags=community-script
+var_verbose=no
+
+# Security (root PW) – empty => autologin
+# var_pw=
+EOF
+
+ # Now choose storages (always prompt unless just one exists)
+ choose_and_set_storage_for_file "$canonical" template
+ choose_and_set_storage_for_file "$canonical" container
+
+ chmod 0644 "$canonical"
+ msg_ok "Created ${canonical}"
+ }
+
+ # Whitelist check
+ local _is_whitelisted_key
+ _is_whitelisted_key() {
+ local k="$1"
+ local w
+ for w in "${VAR_WHITELIST[@]}"; do [ "$k" = "$w" ] && return 0; done
+ return 1
+ }
+
+ # Safe parser for KEY=VALUE lines
+ local _load_vars_file
+ _load_vars_file() {
+ local file="$1"
+ [ -f "$file" ] || return 0
+ msg_info "Loading defaults from ${file}"
+ local line key val
+ while IFS= read -r line || [ -n "$line" ]; do
+ line="${line#"${line%%[![:space:]]*}"}"
+ line="${line%"${line##*[![:space:]]}"}"
+ [[ -z "$line" || "$line" == \#* ]] && continue
+ if [[ "$line" =~ ^([A-Za-z_][A-Za-z0-9_]*)=(.*)$ ]]; then
+ local var_key="${BASH_REMATCH[1]}"
+ local var_val="${BASH_REMATCH[2]}"
+
+ [[ "$var_key" != var_* ]] && continue
+ _is_whitelisted_key "$var_key" || {
+ msg_debug "Ignore non-whitelisted ${var_key}"
+ continue
+ }
+
+ # Strip quotes
+ if [[ "$var_val" =~ ^\"(.*)\"$ ]]; then
+ var_val="${BASH_REMATCH[1]}"
+ elif [[ "$var_val" =~ ^\'(.*)\'$ ]]; then
+ var_val="${BASH_REMATCH[1]}"
+ fi
+
+ # Unsafe characters
+ case $var_val in
+ \"*\")
+ var_val=${var_val#\"}
+ var_val=${var_val%\"}
+ ;;
+ \'*\')
+ var_val=${var_val#\'}
+ var_val=${var_val%\'}
+ ;;
+ esac # Hard env wins
+ [[ -n "${_HARD_ENV[$var_key]:-}" ]] && continue
+ # Set only if not already exported
+ [[ -z "${!var_key+x}" ]] && export "${var_key}=${var_val}"
+ else
+ msg_warn "Malformed line in ${file}: ${line}"
+ fi
+ done <"$file"
+ msg_ok "Loaded ${file}"
+ }
+
+ # 1) Ensure file exists
+ _ensure_default_vars
+
+ # 2) Load file
+ local dv
+ dv="$(_find_default_vars)" || {
+ msg_error "default.vars not found after ensure step"
+ return 1
+ }
+ _load_vars_file "$dv"
+
+ # 3) Map var_verbose → VERBOSE
+ if [[ -n "${var_verbose:-}" ]]; then
+ case "${var_verbose,,}" in 1 | yes | true | on) VERBOSE="yes" ;; 0 | no | false | off) VERBOSE="no" ;; *) VERBOSE="${var_verbose}" ;; esac
+ else
+ VERBOSE="no"
+ fi
+
+ # 4) Apply base settings and show summary
+ METHOD="mydefaults-global"
+ base_settings "$VERBOSE"
+ header_info
+ echo -e "${DEFAULT}${BOLD}${BL}Using My Defaults (default.vars) on node $PVEHOST_NAME${CL}"
+ echo_default
+}
+
+# ------------------------------------------------------------------------------
+# get_app_defaults_path()
+#
+# - Returns full path for app-specific defaults file
+# - Example: /usr/local/community-scripts/defaults/.vars
+# ------------------------------------------------------------------------------
+
+get_app_defaults_path() {
+ local n="${NSAPP:-${APP,,}}"
+ echo "/usr/local/community-scripts/defaults/${n}.vars"
+}
+
+# ------------------------------------------------------------------------------
+# maybe_offer_save_app_defaults
+#
+# - Called after advanced_settings returned with fully chosen values.
+# - If no .vars exists, offers to persist current advanced settings
+# into /usr/local/community-scripts/defaults/.vars
+# - Only writes whitelisted var_* keys.
+# - Extracts raw values from flags like ",gw=..." ",mtu=..." etc.
+# ------------------------------------------------------------------------------
+if ! declare -p VAR_WHITELIST >/dev/null 2>&1; then
+ declare -ag VAR_WHITELIST=(
+ var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_ctid var_disk var_fuse
+ var_gateway var_hostname var_ipv6_method var_ipv6_static var_mac var_mtu
+ var_net var_ns var_pw var_ram var_tags var_tun var_unprivileged
+ var_verbose var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage
+ )
+fi
+
+# Note: _is_whitelisted_key() is defined above in default_var_settings section
+
+_sanitize_value() {
+ # Disallow Command-Substitution / Shell-Meta
+ case "$1" in
+ *'$('* | *'`'* | *';'* | *'&'* | *'<('*)
+ echo ""
+ return 0
+ ;;
+ esac
+ echo "$1"
+}
+
+# Map-Parser: read var_* from file into _VARS_IN associative array
+# Note: Main _load_vars_file() with full validation is defined in default_var_settings section
+# This simplified version is used specifically for diff operations via _VARS_IN array
+declare -A _VARS_IN
+_load_vars_file_to_map() {
+ local file="$1"
+ [ -f "$file" ] || return 0
+ _VARS_IN=() # Clear array
+ local line key val
+ while IFS= read -r line || [ -n "$line" ]; do
+ line="${line#"${line%%[![:space:]]*}"}"
+ line="${line%"${line##*[![:space:]]}"}"
+ [ -z "$line" ] && continue
+ case "$line" in
+ \#*) continue ;;
+ esac
+ key=$(printf "%s" "$line" | cut -d= -f1)
+ val=$(printf "%s" "$line" | cut -d= -f2-)
+ case "$key" in
+ var_*)
+ if _is_whitelisted_key "$key"; then
+ _VARS_IN["$key"]="$val"
+ fi
+ ;;
+ esac
+ done <"$file"
+}
+
+# Diff function for two var_* files -> produces human-readable diff list for $1 (old) vs $2 (new)
+_build_vars_diff() {
+ local oldf="$1" newf="$2"
+ local k
+ local -A OLD=() NEW=()
+ _load_vars_file_to_map "$oldf"
+ for k in "${!_VARS_IN[@]}"; do OLD["$k"]="${_VARS_IN[$k]}"; done
+ _load_vars_file_to_map "$newf"
+ for k in "${!_VARS_IN[@]}"; do NEW["$k"]="${_VARS_IN[$k]}"; done
+
+ local out
+ out+="# Diff for ${APP} (${NSAPP})\n"
+ out+="# Old: ${oldf}\n# New: ${newf}\n\n"
+
+ local found_change=0
+
+ # Changed & Removed
+ for k in "${!OLD[@]}"; do
+ if [[ -v NEW["$k"] ]]; then
+ if [[ "${OLD[$k]}" != "${NEW[$k]}" ]]; then
+ out+="~ ${k}\n - old: ${OLD[$k]}\n + new: ${NEW[$k]}\n"
+ found_change=1
+ fi
+ else
+ out+="- ${k}\n - old: ${OLD[$k]}\n"
+ found_change=1
+ fi
+ done
+
+ # Added
+ for k in "${!NEW[@]}"; do
+ if [[ ! -v OLD["$k"] ]]; then
+ out+="+ ${k}\n + new: ${NEW[$k]}\n"
+ found_change=1
+ fi
+ done
+
+ if [[ $found_change -eq 0 ]]; then
+ out+="(No differences)\n"
+ fi
+
+ printf "%b" "$out"
+}
+
+# Build a temporary .vars file from current advanced settings
+_build_current_app_vars_tmp() {
+ tmpf="$(mktemp /tmp/${NSAPP:-app}.vars.new.XXXXXX)"
+
+ # NET/GW
+ _net="${NET:-}"
+ _gate=""
+ case "${GATE:-}" in
+ ,gw=*) _gate=$(echo "$GATE" | sed 's/^,gw=//') ;;
+ esac
+
+ # IPv6
+ _ipv6_method="${IPV6_METHOD:-auto}"
+ _ipv6_static=""
+ _ipv6_gateway=""
+ if [ "$_ipv6_method" = "static" ]; then
+ _ipv6_static="${IPV6_ADDR:-}"
+ _ipv6_gateway="${IPV6_GATE:-}"
+ fi
+
+ # MTU/VLAN/MAC
+ _mtu=""
+ _vlan=""
+ _mac=""
+ case "${MTU:-}" in
+ ,mtu=*) _mtu=$(echo "$MTU" | sed 's/^,mtu=//') ;;
+ esac
+ case "${VLAN:-}" in
+ ,tag=*) _vlan=$(echo "$VLAN" | sed 's/^,tag=//') ;;
+ esac
+ case "${MAC:-}" in
+ ,hwaddr=*) _mac=$(echo "$MAC" | sed 's/^,hwaddr=//') ;;
+ esac
+
+ # DNS / Searchdomain
+ _ns=""
+ _searchdomain=""
+ case "${NS:-}" in
+ -nameserver=*) _ns=$(echo "$NS" | sed 's/^-nameserver=//') ;;
+ esac
+ case "${SD:-}" in
+ -searchdomain=*) _searchdomain=$(echo "$SD" | sed 's/^-searchdomain=//') ;;
+ esac
+
+ # SSH / APT / Features
+ _ssh="${SSH:-no}"
+ _ssh_auth="${SSH_AUTHORIZED_KEY:-}"
+ _apt_cacher="${APT_CACHER:-}"
+ _apt_cacher_ip="${APT_CACHER_IP:-}"
+ _fuse="${ENABLE_FUSE:-no}"
+ _tun="${ENABLE_TUN:-no}"
+ _tags="${TAGS:-}"
+ _verbose="${VERBOSE:-no}"
+
+ # Type / Resources / Identity
+ _unpriv="${CT_TYPE:-1}"
+ _cpu="${CORE_COUNT:-1}"
+ _ram="${RAM_SIZE:-1024}"
+ _disk="${DISK_SIZE:-4}"
+ _hostname="${HN:-$NSAPP}"
+
+ # Storage
+ _tpl_storage="${TEMPLATE_STORAGE:-${var_template_storage:-}}"
+ _ct_storage="${CONTAINER_STORAGE:-${var_container_storage:-}}"
+
+ {
+ echo "# App-specific defaults for ${APP} (${NSAPP})"
+ echo "# Generated on $(date -u '+%Y-%m-%dT%H:%M:%SZ')"
+ echo
+
+ echo "var_unprivileged=$(_sanitize_value "$_unpriv")"
+ echo "var_cpu=$(_sanitize_value "$_cpu")"
+ echo "var_ram=$(_sanitize_value "$_ram")"
+ echo "var_disk=$(_sanitize_value "$_disk")"
+
+ [ -n "${BRG:-}" ] && echo "var_brg=$(_sanitize_value "$BRG")"
+ [ -n "$_net" ] && echo "var_net=$(_sanitize_value "$_net")"
+ [ -n "$_gate" ] && echo "var_gateway=$(_sanitize_value "$_gate")"
+ [ -n "$_mtu" ] && echo "var_mtu=$(_sanitize_value "$_mtu")"
+ [ -n "$_vlan" ] && echo "var_vlan=$(_sanitize_value "$_vlan")"
+ [ -n "$_mac" ] && echo "var_mac=$(_sanitize_value "$_mac")"
+ [ -n "$_ns" ] && echo "var_ns=$(_sanitize_value "$_ns")"
+
+ [ -n "$_ipv6_method" ] && echo "var_ipv6_method=$(_sanitize_value "$_ipv6_method")"
+ [ -n "$_ipv6_static" ] && echo "var_ipv6_static=$(_sanitize_value "$_ipv6_static")"
+
+ [ -n "$_ssh" ] && echo "var_ssh=$(_sanitize_value "$_ssh")"
+ [ -n "$_ssh_auth" ] && echo "var_ssh_authorized_key=$(_sanitize_value "$_ssh_auth")"
+
+ [ -n "$_apt_cacher" ] && echo "var_apt_cacher=$(_sanitize_value "$_apt_cacher")"
+ [ -n "$_apt_cacher_ip" ] && echo "var_apt_cacher_ip=$(_sanitize_value "$_apt_cacher_ip")"
+
+ [ -n "$_fuse" ] && echo "var_fuse=$(_sanitize_value "$_fuse")"
+ [ -n "$_tun" ] && echo "var_tun=$(_sanitize_value "$_tun")"
+ [ -n "$_tags" ] && echo "var_tags=$(_sanitize_value "$_tags")"
+ [ -n "$_verbose" ] && echo "var_verbose=$(_sanitize_value "$_verbose")"
+
+ [ -n "$_hostname" ] && echo "var_hostname=$(_sanitize_value "$_hostname")"
+ [ -n "$_searchdomain" ] && echo "var_searchdomain=$(_sanitize_value "$_searchdomain")"
+
+ [ -n "$_tpl_storage" ] && echo "var_template_storage=$(_sanitize_value "$_tpl_storage")"
+ [ -n "$_ct_storage" ] && echo "var_container_storage=$(_sanitize_value "$_ct_storage")"
+ } >"$tmpf"
+
+ echo "$tmpf"
+}
+
+# ------------------------------------------------------------------------------
+# maybe_offer_save_app_defaults()
+#
+# - Called after advanced_settings()
+# - Offers to save current values as app defaults if not existing
+# - If file exists: shows diff and allows Update, Keep, View Diff, or Cancel
+# ------------------------------------------------------------------------------
+maybe_offer_save_app_defaults() {
+ local app_vars_path
+ app_vars_path="$(get_app_defaults_path)"
+
+ # always build from current settings
+ local new_tmp diff_tmp
+ new_tmp="$(_build_current_app_vars_tmp)"
+ diff_tmp="$(mktemp -p /tmp "${NSAPP:-app}.vars.diff.XXXXXX")"
+
+ # 1) if no file → offer to create
+ if [[ ! -f "$app_vars_path" ]]; then
+ if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --yesno "Save these advanced settings as defaults for ${APP}?\n\nThis will create:\n${app_vars_path}" 12 72; then
+ mkdir -p "$(dirname "$app_vars_path")"
+ install -m 0644 "$new_tmp" "$app_vars_path"
+ msg_ok "Saved app defaults: ${app_vars_path}"
+ fi
+ rm -f "$new_tmp" "$diff_tmp"
+ return 0
+ fi
+
+ # 2) if file exists → build diff
+ _build_vars_diff "$app_vars_path" "$new_tmp" >"$diff_tmp"
+
+ # if no differences → do nothing
+ if grep -q "^(No differences)$" "$diff_tmp"; then
+ rm -f "$new_tmp" "$diff_tmp"
+ return 0
+ fi
+
+ # 3) if file exists → show menu with default selection "Update Defaults"
+ local app_vars_file
+ app_vars_file="$(basename "$app_vars_path")"
+
+ while true; do
+ local sel
+ sel="$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "APP DEFAULTS – ${APP}" \
+ --menu "Differences detected. What do you want to do?" 20 78 10 \
+ "Update Defaults" "Write new values to ${app_vars_file}" \
+ "Keep Current" "Keep existing defaults (no changes)" \
+ "View Diff" "Show a detailed diff" \
+ "Cancel" "Abort without changes" \
+ --default-item "Update Defaults" \
+ 3>&1 1>&2 2>&3)" || { sel="Cancel"; }
+
+ case "$sel" in
+ "Update Defaults")
+ install -m 0644 "$new_tmp" "$app_vars_path"
+ msg_ok "Updated app defaults: ${app_vars_path}"
+ break
+ ;;
+ "Keep Current")
+ msg_info "Keeping current app defaults: ${app_vars_path}"
+ break
+ ;;
+ "View Diff")
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "Diff – ${APP}" \
+ --scrolltext --textbox "$diff_tmp" 25 100
+ ;;
+ "Cancel" | *)
+ msg_info "Canceled. No changes to app defaults."
+ break
+ ;;
+ esac
+ done
+
+ rm -f "$new_tmp" "$diff_tmp"
+}
+
+ensure_storage_selection_for_vars_file() {
+ local vf="$1"
+
+ # Read stored values (if any)
+ local tpl ct
+ tpl=$(grep -E '^var_template_storage=' "$vf" | cut -d= -f2-)
+ ct=$(grep -E '^var_container_storage=' "$vf" | cut -d= -f2-)
+
+ if [[ -n "$tpl" && -n "$ct" ]]; then
+ TEMPLATE_STORAGE="$tpl"
+ CONTAINER_STORAGE="$ct"
+ return 0
+ fi
+
+ choose_and_set_storage_for_file "$vf" template
+ choose_and_set_storage_for_file "$vf" container
+
+ msg_ok "Storage configuration saved to $(basename "$vf")"
+}
+
+diagnostics_menu() {
+ if [ "${DIAGNOSTICS:-no}" = "yes" ]; then
+ if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "DIAGNOSTIC SETTINGS" \
+ --yesno "Send Diagnostics?\n\nCurrent: ${DIAGNOSTICS}" 10 58 \
+ --yes-button "No" --no-button "Back"; then
+ DIAGNOSTICS="no"
+ sed -i 's/^DIAGNOSTICS=.*/DIAGNOSTICS=no/' /usr/local/community-scripts/diagnostics
+ whiptail --msgbox "Diagnostics set to ${DIAGNOSTICS}." 8 58
+ fi
+ else
+ if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "DIAGNOSTIC SETTINGS" \
+ --yesno "Send Diagnostics?\n\nCurrent: ${DIAGNOSTICS}" 10 58 \
+ --yes-button "Yes" --no-button "Back"; then
+ DIAGNOSTICS="yes"
+ sed -i 's/^DIAGNOSTICS=.*/DIAGNOSTICS=yes/' /usr/local/community-scripts/diagnostics
+ whiptail --msgbox "Diagnostics set to ${DIAGNOSTICS}." 8 58
+ fi
+ fi
+}
+
+ensure_global_default_vars_file() {
+ local vars_path="/usr/local/community-scripts/default.vars"
+ if [[ ! -f "$vars_path" ]]; then
+ mkdir -p "$(dirname "$vars_path")"
+ touch "$vars_path"
+ fi
+ echo "$vars_path"
+}
+
+# ------------------------------------------------------------------------------
+# install_script()
+#
+# - Main entrypoint for installation mode
+# - Runs safety checks (pve_check, root_check, maxkeys_check, diagnostics_check)
+# - Builds interactive menu (Default, Verbose, Advanced, My Defaults, App Defaults, Diagnostics, Storage, Exit)
+# - Applies chosen settings and triggers container build
+# ------------------------------------------------------------------------------
+install_script() {
+ pve_check
+ shell_check
+ root_check
+ arch_check
+ ssh_check
+ maxkeys_check
+ diagnostics_check
+
+ if systemctl is-active -q ping-instances.service; then
+ systemctl -q stop ping-instances.service
+ fi
+
+ NEXTID=$(pvesh get /cluster/nextid)
+ timezone=$(cat /etc/timezone)
+
+ # Show APP Header
+ header_info
+
+ # --- Support CLI argument as direct preset (default, advanced, …) ---
+ CHOICE="${mode:-${1:-}}"
+
+ # If no CLI argument → show whiptail menu
+ # Build menu dynamically based on available options
+ local appdefaults_option=""
+ local settings_option=""
+ local menu_items=(
+ "1" "Default Install"
+ "2" "Advanced Install"
+ "3" "My Defaults"
+ )
+
+ if [ -f "$(get_app_defaults_path)" ]; then
+ appdefaults_option="4"
+ menu_items+=("4" "App Defaults for ${APP}")
+ settings_option="5"
+ menu_items+=("5" "Settings")
+ else
+ settings_option="4"
+ menu_items+=("4" "Settings")
+ fi
+
+ if [ -z "$CHOICE" ]; then
+
+ TMP_CHOICE=$(whiptail \
+ --backtitle "Proxmox VE Helper Scripts" \
+ --title "Community-Scripts Options" \
+ --ok-button "Select" --cancel-button "Exit Script" \
+ --notags \
+ --menu "\nChoose an option:\n Use TAB or Arrow keys to navigate, ENTER to select.\n" \
+ 20 60 9 \
+ "${menu_items[@]}" \
+ --default-item "1" \
+ 3>&1 1>&2 2>&3) || exit_script
+ CHOICE="$TMP_CHOICE"
+ fi
+
+ APPDEFAULTS_OPTION="$appdefaults_option"
+ SETTINGS_OPTION="$settings_option"
+
+ # --- Main case ---
+ local defaults_target=""
+ local run_maybe_offer="no"
+ case "$CHOICE" in
+ 1 | default | DEFAULT)
+ header_info
+ echo -e "${DEFAULT}${BOLD}${BL}Using Default Settings on node $PVEHOST_NAME${CL}"
+ VERBOSE="no"
+ METHOD="default"
+ base_settings "$VERBOSE"
+ echo_default
+ defaults_target="$(ensure_global_default_vars_file)"
+ ;;
+ 2 | advanced | ADVANCED)
+ header_info
+ echo -e "${TAB}${INFO} ProxmoxVE Version ${PVEVERSION} | Kernel: ${KERNEL_VERSION}${CL}"
+ echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Install on node $PVEHOST_NAME${CL}"
+ METHOD="advanced"
+ base_settings
+ advanced_settings
+ defaults_target="$(ensure_global_default_vars_file)"
+ run_maybe_offer="yes"
+ ;;
+ 3 | mydefaults | MYDEFAULTS)
+ default_var_settings || {
+ msg_error "Failed to apply default.vars"
+ exit 1
+ }
+ defaults_target="/usr/local/community-scripts/default.vars"
+ ;;
+ "$APPDEFAULTS_OPTION" | appdefaults | APPDEFAULTS)
+ if [ -f "$(get_app_defaults_path)" ]; then
+ header_info
+ echo -e "${DEFAULT}${BOLD}${BL}Using App Defaults for ${APP} on node $PVEHOST_NAME${CL}"
+ METHOD="appdefaults"
+ base_settings
+ _load_vars_file "$(get_app_defaults_path)"
+ echo_default
+ defaults_target="$(get_app_defaults_path)"
+ else
+ msg_error "No App Defaults available for ${APP}"
+ exit 1
+ fi
+ ;;
+ "$SETTINGS_OPTION" | settings | SETTINGS)
+ settings_menu
+ defaults_target=""
+ ;;
+ *)
+ echo -e "${CROSS}${RD}Invalid option: $CHOICE${CL}"
+ exit 1
+ ;;
+ esac
+
+ if [[ -n "$defaults_target" ]]; then
+ ensure_storage_selection_for_vars_file "$defaults_target"
+ fi
+
+ if [[ "$run_maybe_offer" == "yes" ]]; then
+ maybe_offer_save_app_defaults
+ fi
+}
+
+edit_default_storage() {
+ local vf="/usr/local/community-scripts/default.vars"
+
+ # Ensure file exists
+ if [[ ! -f "$vf" ]]; then
+ mkdir -p "$(dirname "$vf")"
+ touch "$vf"
+ fi
+
+ # Let ensure_storage_selection_for_vars_file handle everything
+ ensure_storage_selection_for_vars_file "$vf"
+}
+
+settings_menu() {
+ while true; do
+ local settings_items=(
+ "1" "Manage API-Diagnostic Setting"
+ "2" "Edit Default.vars"
+ "3" "Edit Default Storage"
+ )
+ if [ -f "$(get_app_defaults_path)" ]; then
+ settings_items+=("4" "Edit App.vars for ${APP}")
+ settings_items+=("5" "Exit")
+ else
+ settings_items+=("4" "Exit")
+ fi
+
+ local choice
+ choice=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
+ --title "Community-Scripts SETTINGS Menu" \
+ --ok-button "OK" --cancel-button "Back" \
+ --menu "\n\nChoose a settings option:\n\nUse TAB or Arrow keys to navigate, ENTER to select." 20 60 9 \
+ "${settings_items[@]}" \
+ 3>&1 1>&2 2>&3) || break
+
+ case "$choice" in
+ 1) diagnostics_menu ;;
+ 2) ${EDITOR:-nano} /usr/local/community-scripts/default.vars ;;
+ 3) edit_default_storage ;;
+ 4)
+ if [ -f "$(get_app_defaults_path)" ]; then
+ ${EDITOR:-nano} "$(get_app_defaults_path)"
+ else
+ exit_script
+ fi
+ ;;
+ 5) exit_script ;;
+ esac
+ done
+}
+
+# ===== Unified storage selection & writing to vars files =====
+_write_storage_to_vars() {
+ # $1 = vars_file, $2 = key (var_container_storage / var_template_storage), $3 = value
+ local vf="$1" key="$2" val="$3"
+ # remove uncommented and commented versions to avoid duplicates
+ sed -i "/^[#[:space:]]*${key}=/d" "$vf"
+ echo "${key}=${val}" >>"$vf"
+}
+
+choose_and_set_storage_for_file() {
+ # $1 = vars_file, $2 = class ('container'|'template')
+ local vf="$1" class="$2" key="" current=""
+ case "$class" in
+ container) key="var_container_storage" ;;
+ template) key="var_template_storage" ;;
+ *)
+ msg_error "Unknown storage class: $class"
+ return 1
+ ;;
+ esac
+
+ current=$(awk -F= -v k="^${key}=" '$0 ~ k {print $2; exit}' "$vf")
+
+ # If only one storage exists for the content type, auto-pick. Else always ask (your wish #4).
+ local content="rootdir"
+ [[ "$class" == "template" ]] && content="vztmpl"
+ local count
+ count=$(pvesm status -content "$content" | awk 'NR>1{print $1}' | wc -l)
+
+ if [[ "$count" -eq 1 ]]; then
+ STORAGE_RESULT=$(pvesm status -content "$content" | awk 'NR>1{print $1; exit}')
+ STORAGE_INFO=""
+ else
+ # If the current value is preselectable, we could show it, but per your requirement we always offer selection
+ select_storage "$class" || return 1
+ fi
+
+ _write_storage_to_vars "$vf" "$key" "$STORAGE_RESULT"
+
+ # Keep environment in sync for later steps (e.g. app-default save)
+ if [[ "$class" == "container" ]]; then
+ export var_container_storage="$STORAGE_RESULT"
+ export CONTAINER_STORAGE="$STORAGE_RESULT"
+ else
+ export var_template_storage="$STORAGE_RESULT"
+ export TEMPLATE_STORAGE="$STORAGE_RESULT"
+ fi
+
+ msg_ok "Updated ${key} → ${STORAGE_RESULT}"
+}
+
+# ------------------------------------------------------------------------------
+# check_container_resources()
+#
+# - Compares host RAM/CPU with required values
+# - Warns if under-provisioned and asks user to continue or abort
+# ------------------------------------------------------------------------------
+check_container_resources() {
+ current_ram=$(free -m | awk 'NR==2{print $2}')
+ current_cpu=$(nproc)
+
+ if [[ "$current_ram" -lt "$var_ram" ]] || [[ "$current_cpu" -lt "$var_cpu" ]]; then
+ echo -e "\n${INFO}${HOLD} ${GN}Required: ${var_cpu} CPU, ${var_ram}MB RAM ${CL}| ${RD}Current: ${current_cpu} CPU, ${current_ram}MB RAM${CL}"
+ echo -e "${YWB}Please ensure that the ${APP} LXC is configured with at least ${var_cpu} vCPU and ${var_ram} MB RAM for the build process.${CL}\n"
+ echo -ne "${INFO}${HOLD} May cause data loss! ${INFO} Continue update with under-provisioned LXC? "
+ read -r prompt
+ if [[ ! ${prompt,,} =~ ^(yes)$ ]]; then
+ echo -e "${CROSS}${HOLD} ${YWB}Exiting based on user input.${CL}"
+ exit 1
+ fi
+ else
+ echo -e ""
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# check_container_storage()
+#
+# - Checks /boot partition usage
+# - Warns if usage >80% and asks user confirmation before proceeding
+# ------------------------------------------------------------------------------
+check_container_storage() {
+ total_size=$(df /boot --output=size | tail -n 1)
+ local used_size=$(df /boot --output=used | tail -n 1)
+ usage=$((100 * used_size / total_size))
+ if ((usage > 80)); then
+ echo -e "${INFO}${HOLD} ${YWB}Warning: Storage is dangerously low (${usage}%).${CL}"
+ echo -ne "Continue anyway? "
+ read -r prompt
+ if [[ ! ${prompt,,} =~ ^(y|yes)$ ]]; then
+ echo -e "${CROSS}${HOLD}${YWB}Exiting based on user input.${CL}"
+ exit 1
+ fi
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# ssh_extract_keys_from_file()
+#
+# - Extracts valid SSH public keys from given file
+# - Supports RSA, Ed25519, ECDSA and filters out comments/invalid lines
+# ------------------------------------------------------------------------------
+ssh_extract_keys_from_file() {
+ local f="$1"
+ [[ -r "$f" ]] || return 0
+ tr -d '\r' <"$f" | awk '
+ /^[[:space:]]*#/ {next}
+ /^[[:space:]]*$/ {next}
+ # nackt: typ base64 [comment]
+ /^(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))[[:space:]]+/ {print; next}
+ # mit Optionen: finde ab erstem Key-Typ
+ {
+ match($0, /(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))[[:space:]]+/)
+ if (RSTART>0) { print substr($0, RSTART) }
+ }
+ '
+}
+
+# ------------------------------------------------------------------------------
+# ssh_build_choices_from_files()
+#
+# - Builds interactive whiptail checklist of available SSH keys
+# - Generates fingerprint, type and comment for each key
+# ------------------------------------------------------------------------------
+ssh_build_choices_from_files() {
+ local -a files=("$@")
+ CHOICES=()
+ COUNT=0
+ MAPFILE="$(mktemp)"
+ local id key typ fp cmt base ln=0
+
+ for f in "${files[@]}"; do
+ [[ -f "$f" && -r "$f" ]] || continue
+ base="$(basename -- "$f")"
+ case "$base" in
+ known_hosts | known_hosts.* | config) continue ;;
+ id_*) [[ "$f" != *.pub ]] && continue ;;
+ esac
+
+ # map every key in file
+ while IFS= read -r key; do
+ [[ -n "$key" ]] || continue
+
+ typ=""
+ fp=""
+ cmt=""
+ # Only the pure key part (without options) is already included in ‘key’.
+ read -r _typ _b64 _cmt <<<"$key"
+ typ="${_typ:-key}"
+ cmt="${_cmt:-}"
+ # Fingerprint via ssh-keygen (if available)
+ if command -v ssh-keygen >/dev/null 2>&1; then
+ fp="$(printf '%s\n' "$key" | ssh-keygen -lf - 2>/dev/null | awk '{print $2}')"
+ fi
+ # Label shorten
+ [[ ${#cmt} -gt 40 ]] && cmt="${cmt:0:37}..."
+
+ ln=$((ln + 1))
+ COUNT=$((COUNT + 1))
+ id="K${COUNT}"
+ echo "${id}|${key}" >>"$MAPFILE"
+ CHOICES+=("$id" "[$typ] ${fp:+$fp }${cmt:+$cmt }— ${base}" "OFF")
+ done < <(ssh_extract_keys_from_file "$f")
+ done
+}
+
+# ------------------------------------------------------------------------------
+# ssh_discover_default_files()
+#
+# - Scans standard paths for SSH keys
+# - Includes ~/.ssh/*.pub, /etc/ssh/authorized_keys, etc.
+# ------------------------------------------------------------------------------
+ssh_discover_default_files() {
+ local -a cand=()
+ shopt -s nullglob
+ cand+=(/root/.ssh/authorized_keys /root/.ssh/authorized_keys2)
+ cand+=(/root/.ssh/*.pub)
+ cand+=(/etc/ssh/authorized_keys /etc/ssh/authorized_keys.d/*)
+ shopt -u nullglob
+ printf '%s\0' "${cand[@]}"
+}
+
+configure_ssh_settings() {
+ SSH_KEYS_FILE="$(mktemp)"
+ : >"$SSH_KEYS_FILE"
+
+ IFS=$'\0' read -r -d '' -a _def_files < <(ssh_discover_default_files && printf '\0')
+ ssh_build_choices_from_files "${_def_files[@]}"
+ local default_key_count="$COUNT"
+
+ local ssh_key_mode
+ if [[ "$default_key_count" -gt 0 ]]; then
+ ssh_key_mode=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SSH KEY SOURCE" --menu \
+ "Provision SSH keys for root:" 14 72 4 \
+ "found" "Select from detected keys (${default_key_count})" \
+ "manual" "Paste a single public key" \
+ "folder" "Scan another folder (path or glob)" \
+ "none" "No keys" 3>&1 1>&2 2>&3) || exit_script
+ else
+ ssh_key_mode=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SSH KEY SOURCE" --menu \
+ "No host keys detected; choose manual/none:" 12 72 2 \
+ "manual" "Paste a single public key" \
+ "none" "No keys" 3>&1 1>&2 2>&3) || exit_script
+ fi
+
+ case "$ssh_key_mode" in
+ found)
+ local selection
+ selection=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SELECT HOST KEYS" \
+ --checklist "Select one or more keys to import:" 20 140 10 "${CHOICES[@]}" 3>&1 1>&2 2>&3) || exit_script
+ for tag in $selection; do
+ tag="${tag%\"}"
+ tag="${tag#\"}"
+ local line
+ line=$(grep -E "^${tag}\|" "$MAPFILE" | head -n1 | cut -d'|' -f2-)
+ [[ -n "$line" ]] && printf '%s\n' "$line" >>"$SSH_KEYS_FILE"
+ done
+ ;;
+ manual)
+ SSH_AUTHORIZED_KEY="$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --inputbox "Paste one SSH public key line (ssh-ed25519/ssh-rsa/...)" 10 72 --title "SSH Public Key" 3>&1 1>&2 2>&3)"
+ [[ -n "$SSH_AUTHORIZED_KEY" ]] && printf '%s\n' "$SSH_AUTHORIZED_KEY" >>"$SSH_KEYS_FILE"
+ ;;
+ folder)
+ local glob_path
+ glob_path=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --inputbox "Enter a folder or glob to scan (e.g. /root/.ssh/*.pub)" 10 72 --title "Scan Folder/Glob" 3>&1 1>&2 2>&3)
+ if [[ -n "$glob_path" ]]; then
+ shopt -s nullglob
+ read -r -a _scan_files <<<"$glob_path"
+ shopt -u nullglob
+ if [[ "${#_scan_files[@]}" -gt 0 ]]; then
+ ssh_build_choices_from_files "${_scan_files[@]}"
+ if [[ "$COUNT" -gt 0 ]]; then
+ local folder_selection
+ folder_selection=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SELECT FOLDER KEYS" \
+ --checklist "Select key(s) to import:" 20 78 10 "${CHOICES[@]}" 3>&1 1>&2 2>&3) || exit_script
+ for tag in $folder_selection; do
+ tag="${tag%\"}"
+ tag="${tag#\"}"
+ local line
+ line=$(grep -E "^${tag}\|" "$MAPFILE" | head -n1 | cut -d'|' -f2-)
+ [[ -n "$line" ]] && printf '%s\n' "$line" >>"$SSH_KEYS_FILE"
+ done
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox "No keys found in: $glob_path" 8 60
+ fi
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox "Path/glob returned no files." 8 60
+ fi
+ fi
+ ;;
+ none)
+ :
+ ;;
+ esac
+
+ if [[ -s "$SSH_KEYS_FILE" ]]; then
+ sort -u -o "$SSH_KEYS_FILE" "$SSH_KEYS_FILE"
+ printf '\n' >>"$SSH_KEYS_FILE"
+ fi
+
+ if [[ -s "$SSH_KEYS_FILE" || "$PW" == -password* ]]; then
+ if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "SSH ACCESS" --yesno "Enable root SSH access?" 10 58); then
+ SSH="yes"
+ else
+ SSH="no"
+ fi
+ else
+ SSH="no"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# start()
+#
+# - Entry point of script
+# - On Proxmox host: calls install_script
+# - In silent mode: runs update_script
+# - Otherwise: shows update/setting menu
+# ------------------------------------------------------------------------------
+start() {
+ source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/tools.func)
+ if command -v pveversion >/dev/null 2>&1; then
+ install_script || return 0
+ return 0
+ elif [ ! -z ${PHS_SILENT+x} ] && [[ "${PHS_SILENT}" == "1" ]]; then
+ VERBOSE="no"
+ set_std_mode
+ update_script
+ else
+ CHOICE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "${APP} LXC Update/Setting" --menu \
+ "Support/Update functions for ${APP} LXC. Choose an option:" \
+ 12 60 3 \
+ "1" "YES (Silent Mode)" \
+ "2" "YES (Verbose Mode)" \
+ "3" "NO (Cancel Update)" --nocancel --default-item "1" 3>&1 1>&2 2>&3)
+
+ case "$CHOICE" in
+ 1)
+ VERBOSE="no"
+ set_std_mode
+ ;;
+ 2)
+ VERBOSE="yes"
+ set_std_mode
+ ;;
+ 3)
+ clear
+ exit_script
+ exit
+ ;;
+ esac
+ update_script
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# build_container()
+#
+# - Creates and configures the LXC container
+# - Builds network string and applies features (FUSE, TUN, VAAPI passthrough)
+# - Starts container and waits for network connectivity
+# - Installs base packages, SSH keys, and runs -install.sh
+# ------------------------------------------------------------------------------
+build_container() {
+ # if [ "$VERBOSE" == "yes" ]; then set -x; fi
+
+ NET_STRING="-net0 name=eth0,bridge=${BRG:-vmbr0}"
+
+ # MAC
+ if [[ -n "$MAC" ]]; then
+ case "$MAC" in
+ ,hwaddr=*) NET_STRING+="$MAC" ;;
+ *) NET_STRING+=",hwaddr=$MAC" ;;
+ esac
+ fi
+
+ # IP (immer zwingend, Standard dhcp)
+ NET_STRING+=",ip=${NET:-dhcp}"
+
+ # Gateway
+ if [[ -n "$GATE" ]]; then
+ case "$GATE" in
+ ,gw=*) NET_STRING+="$GATE" ;;
+ *) NET_STRING+=",gw=$GATE" ;;
+ esac
+ fi
+
+ # VLAN
+ if [[ -n "$VLAN" ]]; then
+ case "$VLAN" in
+ ,tag=*) NET_STRING+="$VLAN" ;;
+ *) NET_STRING+=",tag=$VLAN" ;;
+ esac
+ fi
+
+ # MTU
+ if [[ -n "$MTU" ]]; then
+ case "$MTU" in
+ ,mtu=*) NET_STRING+="$MTU" ;;
+ *) NET_STRING+=",mtu=$MTU" ;;
+ esac
+ fi
+
+ # IPv6 Handling
+ case "$IPV6_METHOD" in
+ auto) NET_STRING="$NET_STRING,ip6=auto" ;;
+ dhcp) NET_STRING="$NET_STRING,ip6=dhcp" ;;
+ static)
+ NET_STRING="$NET_STRING,ip6=$IPV6_ADDR"
+ [ -n "$IPV6_GATE" ] && NET_STRING="$NET_STRING,gw6=$IPV6_GATE"
+ ;;
+ none) ;;
+ esac
+
+ if [ "$CT_TYPE" == "1" ]; then
+ FEATURES="keyctl=1,nesting=1"
+ else
+ FEATURES="nesting=1"
+ fi
+
+ if [ "$ENABLE_FUSE" == "yes" ]; then
+ FEATURES="$FEATURES,fuse=1"
+ fi
+
+ TEMP_DIR=$(mktemp -d)
+ pushd "$TEMP_DIR" >/dev/null
+ if [ "$var_os" == "alpine" ]; then
+ export FUNCTIONS_FILE_PATH="$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/alpine-install.func)"
+ else
+ export FUNCTIONS_FILE_PATH="$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/install.func)"
+ fi
+ export DIAGNOSTICS="$DIAGNOSTICS"
+ export RANDOM_UUID="$RANDOM_UUID"
+ export CACHER="$APT_CACHER"
+ export CACHER_IP="$APT_CACHER_IP"
+ export tz="$timezone"
+ export APPLICATION="$APP"
+ export app="$NSAPP"
+ export PASSWORD="$PW"
+ export VERBOSE="$VERBOSE"
+ export SSH_ROOT="${SSH}"
+ export SSH_AUTHORIZED_KEY
+ export CTID="$CT_ID"
+ export CTTYPE="$CT_TYPE"
+ export ENABLE_FUSE="$ENABLE_FUSE"
+ export ENABLE_TUN="$ENABLE_TUN"
+ export PCT_OSTYPE="$var_os"
+ export PCT_OSVERSION="$var_version"
+ export PCT_DISK_SIZE="$DISK_SIZE"
+ export PCT_OPTIONS="
+ -features $FEATURES
+ -hostname $HN
+ -tags $TAGS
+ $SD
+ $NS
+ $NET_STRING
+ -onboot 1
+ -cores $CORE_COUNT
+ -memory $RAM_SIZE
+ -unprivileged $CT_TYPE
+ $PW
+"
+ export TEMPLATE_STORAGE="${var_template_storage:-}"
+ export CONTAINER_STORAGE="${var_container_storage:-}"
+ create_lxc_container || exit $?
+
+ LXC_CONFIG="/etc/pve/lxc/${CTID}.conf"
+
+ # ============================================================================
+ # GPU/USB PASSTHROUGH CONFIGURATION
+ # ============================================================================
+
+ # List of applications that benefit from GPU acceleration
+ GPU_APPS=(
+ "immich" "channels" "emby" "ersatztv" "frigate"
+ "jellyfin" "plex" "scrypted" "tdarr" "unmanic"
+ "ollama" "fileflows" "open-webui" "tunarr" "debian"
+ "handbrake" "sunshine" "moonlight" "kodi" "stremio"
+ "viseron"
+ )
+
+ # Check if app needs GPU
+ is_gpu_app() {
+ local app="${1,,}"
+ for gpu_app in "${GPU_APPS[@]}"; do
+ [[ "$app" == "${gpu_app,,}" ]] && return 0
+ done
+ return 1
+ }
+
+ # Detect all available GPU devices
+ detect_gpu_devices() {
+ INTEL_DEVICES=()
+ AMD_DEVICES=()
+ NVIDIA_DEVICES=()
+
+ # Store PCI info to avoid multiple calls
+ local pci_vga_info=$(lspci -nn 2>/dev/null | grep -E "VGA|Display|3D")
+
+ # Check for Intel GPU - look for Intel vendor ID [8086]
+ if echo "$pci_vga_info" | grep -q "\[8086:"; then
+ msg_info "Detected Intel GPU"
+ if [[ -d /dev/dri ]]; then
+ for d in /dev/dri/renderD* /dev/dri/card*; do
+ [[ -e "$d" ]] && INTEL_DEVICES+=("$d")
+ done
+ fi
+ fi
+
+ # Check for AMD GPU - look for AMD vendor IDs [1002] (AMD/ATI) or [1022] (AMD)
+ if echo "$pci_vga_info" | grep -qE "\[1002:|\[1022:"; then
+ msg_info "Detected AMD GPU"
+ if [[ -d /dev/dri ]]; then
+ # Only add if not already claimed by Intel
+ if [[ ${#INTEL_DEVICES[@]} -eq 0 ]]; then
+ for d in /dev/dri/renderD* /dev/dri/card*; do
+ [[ -e "$d" ]] && AMD_DEVICES+=("$d")
+ done
+ fi
+ fi
+ fi
+
+ # Check for NVIDIA GPU - look for NVIDIA vendor ID [10de]
+ if echo "$pci_vga_info" | grep -q "\[10de:"; then
+ msg_info "Detected NVIDIA GPU"
+ if ! check_nvidia_host_setup; then
+ msg_error "NVIDIA host setup incomplete. Skipping GPU passthrough."
+ msg_info "Fix NVIDIA drivers on host, then recreate container or passthrough manually."
+ return 0
+ fi
+
+ for d in /dev/nvidia* /dev/nvidiactl /dev/nvidia-modeset; do
+ [[ -e "$d" ]] && NVIDIA_DEVICES+=("$d")
+ done
+
+ if [[ ${#NVIDIA_DEVICES[@]} -eq 0 ]]; then
+ msg_warn "NVIDIA GPU detected but no /dev/nvidia* devices found"
+ msg_warn "Please install NVIDIA drivers on host: apt install nvidia-driver"
+ else
+ if [[ "$CT_TYPE" == "0" ]]; then
+ cat <>"$LXC_CONFIG"
+ # NVIDIA GPU Passthrough (privileged)
+ lxc.cgroup2.devices.allow: c 195:* rwm
+ lxc.cgroup2.devices.allow: c 243:* rwm
+ lxc.mount.entry: /dev/nvidia0 dev/nvidia0 none bind,optional,create=file
+ lxc.mount.entry: /dev/nvidiactl dev/nvidiactl none bind,optional,create=file
+ lxc.mount.entry: /dev/nvidia-uvm dev/nvidia-uvm none bind,optional,create=file
+ lxc.mount.entry: /dev/nvidia-uvm-tools dev/nvidia-uvm-tools none bind,optional,create=file
+EOF
+
+ if [[ -e /dev/dri/renderD128 ]]; then
+ echo "lxc.mount.entry: /dev/dri/renderD128 dev/dri/renderD128 none bind,optional,create=file" >>"$LXC_CONFIG"
+ fi
+
+ export GPU_TYPE="NVIDIA"
+ export NVIDIA_DRIVER_VERSION=$(nvidia-smi --query-gpu=driver_version --format=csv,noheader 2>/dev/null | head -n1)
+ msg_ok "NVIDIA GPU passthrough configured (driver: ${NVIDIA_DRIVER_VERSION})"
+ else
+ msg_warn "NVIDIA passthrough only supported for privileged containers"
+ return 0
+ fi
+ fi
+ fi
+
+ # Debug output
+ msg_debug "Intel devices: ${INTEL_DEVICES[*]}"
+ msg_debug "AMD devices: ${AMD_DEVICES[*]}"
+ msg_debug "NVIDIA devices: ${NVIDIA_DEVICES[*]}"
+ }
+
+ # Configure USB passthrough for privileged containers
+ configure_usb_passthrough() {
+ if [[ "$CT_TYPE" != "0" ]]; then
+ return 0
+ fi
+
+ msg_info "Configuring automatic USB passthrough (privileged container)"
+ cat <>"$LXC_CONFIG"
+# Automatic USB passthrough (privileged container)
+lxc.cgroup2.devices.allow: a
+lxc.cap.drop:
+lxc.cgroup2.devices.allow: c 188:* rwm
+lxc.cgroup2.devices.allow: c 189:* rwm
+lxc.mount.entry: /dev/serial/by-id dev/serial/by-id none bind,optional,create=dir
+lxc.mount.entry: /dev/ttyUSB0 dev/ttyUSB0 none bind,optional,create=file
+lxc.mount.entry: /dev/ttyUSB1 dev/ttyUSB1 none bind,optional,create=file
+lxc.mount.entry: /dev/ttyACM0 dev/ttyACM0 none bind,optional,create=file
+lxc.mount.entry: /dev/ttyACM1 dev/ttyACM1 none bind,optional,create=file
+EOF
+ msg_ok "USB passthrough configured"
+ }
+
+ # Configure GPU passthrough
+ configure_gpu_passthrough() {
+ # Skip if not a GPU app and not privileged
+ if [[ "$CT_TYPE" != "0" ]] && ! is_gpu_app "$APP"; then
+ return 0
+ fi
+
+ detect_gpu_devices
+
+ # Count available GPU types
+ local gpu_count=0
+ local available_gpus=()
+
+ if [[ ${#INTEL_DEVICES[@]} -gt 0 ]]; then
+ available_gpus+=("INTEL")
+ gpu_count=$((gpu_count + 1))
+ fi
+
+ if [[ ${#AMD_DEVICES[@]} -gt 0 ]]; then
+ available_gpus+=("AMD")
+ gpu_count=$((gpu_count + 1))
+ fi
+
+ if [[ ${#NVIDIA_DEVICES[@]} -gt 0 ]]; then
+ available_gpus+=("NVIDIA")
+ gpu_count=$((gpu_count + 1))
+ fi
+
+ if [[ $gpu_count -eq 0 ]]; then
+ msg_info "No GPU devices found for passthrough"
+ return 0
+ fi
+
+ local selected_gpu=""
+
+ if [[ $gpu_count -eq 1 ]]; then
+ # Automatic selection for single GPU
+ selected_gpu="${available_gpus[0]}"
+ msg_info "Automatically configuring ${selected_gpu} GPU passthrough"
+ else
+ # Multiple GPUs - ask user
+ echo -e "\n${INFO} Multiple GPU types detected:"
+ for gpu in "${available_gpus[@]}"; do
+ echo " - $gpu"
+ done
+ read -rp "Which GPU type to passthrough? (${available_gpus[*]}): " selected_gpu
+ selected_gpu="${selected_gpu^^}"
+
+ # Validate selection
+ local valid=0
+ for gpu in "${available_gpus[@]}"; do
+ [[ "$selected_gpu" == "$gpu" ]] && valid=1
+ done
+
+ if [[ $valid -eq 0 ]]; then
+ msg_warn "Invalid selection. Skipping GPU passthrough."
+ return 0
+ fi
+ fi
+
+ # Apply passthrough configuration based on selection
+ local dev_idx=0
+
+ case "$selected_gpu" in
+ INTEL | AMD)
+ local devices=()
+ [[ "$selected_gpu" == "INTEL" ]] && devices=("${INTEL_DEVICES[@]}")
+ [[ "$selected_gpu" == "AMD" ]] && devices=("${AMD_DEVICES[@]}")
+
+ # For Proxmox WebUI visibility, add as dev0, dev1 etc.
+ for dev in "${devices[@]}"; do
+ if [[ "$CT_TYPE" == "0" ]]; then
+ # Privileged container - use dev entries for WebUI visibility
+ # Use initial GID 104 (render) for renderD*, 44 (video) for card*
+ if [[ "$dev" =~ renderD ]]; then
+ echo "dev${dev_idx}: $dev,gid=104" >>"$LXC_CONFIG"
+ else
+ echo "dev${dev_idx}: $dev,gid=44" >>"$LXC_CONFIG"
+ fi
+ dev_idx=$((dev_idx + 1))
+
+ # Also add cgroup allows for privileged containers
+ local major minor
+ major=$(stat -c '%t' "$dev" 2>/dev/null || echo "0")
+ minor=$(stat -c '%T' "$dev" 2>/dev/null || echo "0")
+
+ if [[ "$major" != "0" && "$minor" != "0" ]]; then
+ echo "lxc.cgroup2.devices.allow: c $((0x$major)):$((0x$minor)) rwm" >>"$LXC_CONFIG"
+ fi
+ else
+ # Unprivileged container
+ if [[ "$dev" =~ renderD ]]; then
+ echo "dev${dev_idx}: $dev,uid=0,gid=104" >>"$LXC_CONFIG"
+ else
+ echo "dev${dev_idx}: $dev,uid=0,gid=44" >>"$LXC_CONFIG"
+ fi
+ dev_idx=$((dev_idx + 1))
+ fi
+ done
+
+ export GPU_TYPE="$selected_gpu"
+ msg_ok "${selected_gpu} GPU passthrough configured (${dev_idx} devices)"
+ ;;
+
+ NVIDIA)
+ if [[ ${#NVIDIA_DEVICES[@]} -eq 0 ]]; then
+ msg_error "NVIDIA drivers not installed on host. Please install: apt install nvidia-driver"
+ return 1
+ fi
+
+ for dev in "${NVIDIA_DEVICES[@]}"; do
+ # NVIDIA devices typically need different handling
+ echo "dev${dev_idx}: $dev,uid=0,gid=44" >>"$LXC_CONFIG"
+ dev_idx=$((dev_idx + 1))
+
+ if [[ "$CT_TYPE" == "0" ]]; then
+ local major minor
+ major=$(stat -c '%t' "$dev" 2>/dev/null || echo "0")
+ minor=$(stat -c '%T' "$dev" 2>/dev/null || echo "0")
+
+ if [[ "$major" != "0" && "$minor" != "0" ]]; then
+ echo "lxc.cgroup2.devices.allow: c $((0x$major)):$((0x$minor)) rwm" >>"$LXC_CONFIG"
+ fi
+ fi
+ done
+
+ export GPU_TYPE="NVIDIA"
+ msg_ok "NVIDIA GPU passthrough configured (${dev_idx} devices)"
+ ;;
+ esac
+ }
+
+ # Additional device passthrough
+ configure_additional_devices() {
+ # TUN device passthrough
+ if [ "$ENABLE_TUN" == "yes" ]; then
+ cat <>"$LXC_CONFIG"
+lxc.cgroup2.devices.allow: c 10:200 rwm
+lxc.mount.entry: /dev/net/tun dev/net/tun none bind,create=file
+EOF
+ fi
+
+ # Coral TPU passthrough
+ if [[ -e /dev/apex_0 ]]; then
+ msg_info "Detected Coral TPU - configuring passthrough"
+ echo "lxc.mount.entry: /dev/apex_0 dev/apex_0 none bind,optional,create=file" >>"$LXC_CONFIG"
+ fi
+ }
+
+ # Execute pre-start configurations
+ configure_usb_passthrough
+ configure_gpu_passthrough
+ configure_additional_devices
+
+ # ============================================================================
+ # START CONTAINER AND INSTALL USERLAND
+ # ============================================================================
+
+ msg_info "Starting LXC Container"
+ pct start "$CTID"
+
+ # Wait for container to be running
+ for i in {1..10}; do
+ if pct status "$CTID" | grep -q "status: running"; then
+ msg_ok "Started LXC Container"
+ break
+ fi
+ sleep 1
+ if [ "$i" -eq 10 ]; then
+ msg_error "LXC Container did not reach running state"
+ exit 1
+ fi
+ done
+
+ # Wait for network (skip for Alpine initially)
+ if [ "$var_os" != "alpine" ]; then
+ msg_info "Waiting for network in LXC container"
+
+ # Wait for IP
+ for i in {1..20}; do
+ ip_in_lxc=$(pct exec "$CTID" -- ip -4 addr show dev eth0 | awk '/inet / {print $2}' | cut -d/ -f1)
+ [ -n "$ip_in_lxc" ] && break
+ sleep 1
+ done
+
+ if [ -z "$ip_in_lxc" ]; then
+ msg_error "No IP assigned to CT $CTID after 20s"
+ exit 1
+ fi
+
+ # Try to reach gateway
+ gw_ok=0
+ for i in {1..10}; do
+ if pct exec "$CTID" -- ping -c1 -W1 "${GATEWAY:-8.8.8.8}" >/dev/null 2>&1; then
+ gw_ok=1
+ break
+ fi
+ sleep 1
+ done
+
+ if [ "$gw_ok" -eq 1 ]; then
+ msg_ok "Network in LXC is reachable (IP $ip_in_lxc)"
+ else
+ msg_warn "Network reachable but gateway check failed"
+ fi
+ fi
+ # Function to get correct GID inside container
+ get_container_gid() {
+ local group="$1"
+ local gid=$(pct exec "$CTID" -- getent group "$group" 2>/dev/null | cut -d: -f3)
+ echo "${gid:-44}" # Default to 44 if not found
+ }
+
+ fix_gpu_gids
+
+ # Continue with standard container setup
+ msg_info "Customizing LXC Container"
+
+ # # Install GPU userland if configured
+ # if [[ "${ENABLE_VAAPI:-0}" == "1" ]]; then
+ # install_gpu_userland "VAAPI"
+ # fi
+
+ # if [[ "${ENABLE_NVIDIA:-0}" == "1" ]]; then
+ # install_gpu_userland "NVIDIA"
+ # fi
+
+ # Continue with standard container setup
+ if [ "$var_os" == "alpine" ]; then
+ sleep 3
+ pct exec "$CTID" -- /bin/sh -c 'cat </etc/apk/repositories
+http://dl-cdn.alpinelinux.org/alpine/latest-stable/main
+http://dl-cdn.alpinelinux.org/alpine/latest-stable/community
+EOF'
+ pct exec "$CTID" -- ash -c "apk add bash newt curl openssh nano mc ncurses jq >/dev/null"
+ else
+ sleep 3
+ pct exec "$CTID" -- bash -c "sed -i '/$LANG/ s/^# //' /etc/locale.gen"
+ pct exec "$CTID" -- bash -c "locale_line=\$(grep -v '^#' /etc/locale.gen | grep -E '^[a-zA-Z]' | awk '{print \$1}' | head -n 1) && \
+ echo LANG=\$locale_line >/etc/default/locale && \
+ locale-gen >/dev/null && \
+ export LANG=\$locale_line"
+
+ if [[ -z "${tz:-}" ]]; then
+ tz=$(timedatectl show --property=Timezone --value 2>/dev/null || echo "Etc/UTC")
+ fi
+
+ if pct exec "$CTID" -- test -e "/usr/share/zoneinfo/$tz"; then
+ pct exec "$CTID" -- bash -c "tz='$tz'; echo \"\$tz\" >/etc/timezone && ln -sf \"/usr/share/zoneinfo/\$tz\" /etc/localtime"
+ else
+ msg_warn "Skipping timezone setup – zone '$tz' not found in container"
+ fi
+
+ pct exec "$CTID" -- bash -c "apt-get update >/dev/null && apt-get install -y sudo curl mc gnupg2 jq >/dev/null" || {
+ msg_error "apt-get base packages installation failed"
+ exit 1
+ }
+ fi
+
+ msg_ok "Customized LXC Container"
+
+ # Verify GPU access if enabled
+ if [[ "${ENABLE_VAAPI:-0}" == "1" ]] && [ "$var_os" != "alpine" ]; then
+ pct exec "$CTID" -- bash -c "vainfo >/dev/null 2>&1" &&
+ msg_ok "VAAPI verified working" ||
+ msg_warn "VAAPI verification failed - may need additional configuration"
+ fi
+
+ if [[ "${ENABLE_NVIDIA:-0}" == "1" ]] && [ "$var_os" != "alpine" ]; then
+ pct exec "$CTID" -- bash -c "nvidia-smi >/dev/null 2>&1" &&
+ msg_ok "NVIDIA verified working" ||
+ msg_warn "NVIDIA verification failed - may need additional configuration"
+ fi
+
+ # Install SSH keys
+ install_ssh_keys_into_ct
+
+ # Run application installer
+ if ! lxc-attach -n "$CTID" -- bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/install/${var_install}.sh)"; then
+ exit $?
+ fi
+}
+
+destroy_lxc() {
+ if [[ -z "$CT_ID" ]]; then
+ msg_error "No CT_ID found. Nothing to remove."
+ return 1
+ fi
+
+ # Abbruch bei Ctrl-C / Ctrl-D / ESC
+ trap 'echo; msg_error "Aborted by user (SIGINT/SIGQUIT)"; return 130' INT QUIT
+
+ local prompt
+ if ! read -rp "Remove this Container? " prompt; then
+ # read gibt != 0 zurück bei Ctrl-D/ESC
+ msg_error "Aborted input (Ctrl-D/ESC)"
+ return 130
+ fi
+
+ case "${prompt,,}" in
+ y | yes)
+ if pct stop "$CT_ID" &>/dev/null && pct destroy "$CT_ID" &>/dev/null; then
+ msg_ok "Removed Container $CT_ID"
+ else
+ msg_error "Failed to remove Container $CT_ID"
+ return 1
+ fi
+ ;;
+ "" | n | no)
+ msg_info "Container was not removed."
+ ;;
+ *)
+ msg_warn "Invalid response. Container was not removed."
+ ;;
+ esac
+}
+
+# ------------------------------------------------------------------------------
+# Storage discovery / selection helpers
+# ------------------------------------------------------------------------------
+# ===== Storage discovery / selection helpers (ported from create_lxc.sh) =====
+resolve_storage_preselect() {
+ local class="$1" preselect="$2" required_content=""
+ case "$class" in
+ template) required_content="vztmpl" ;;
+ container) required_content="rootdir" ;;
+ *) return 1 ;;
+ esac
+ [[ -z "$preselect" ]] && return 1
+ if ! pvesm status -content "$required_content" | awk 'NR>1{print $1}' | grep -qx -- "$preselect"; then
+ msg_warn "Preselected storage '${preselect}' does not support content '${required_content}' (or not found)"
+ return 1
+ fi
+
+ local line total used free
+ line="$(pvesm status | awk -v s="$preselect" 'NR>1 && $1==s {print $0}')"
+ if [[ -z "$line" ]]; then
+ STORAGE_INFO="n/a"
+ else
+ total="$(awk '{print $4}' <<<"$line")"
+ used="$(awk '{print $5}' <<<"$line")"
+ free="$(awk '{print $6}' <<<"$line")"
+ local total_h used_h free_h
+ if command -v numfmt >/dev/null 2>&1; then
+ total_h="$(numfmt --to=iec --suffix=B --format %.1f "$total" 2>/dev/null || echo "$total")"
+ used_h="$(numfmt --to=iec --suffix=B --format %.1f "$used" 2>/dev/null || echo "$used")"
+ free_h="$(numfmt --to=iec --suffix=B --format %.1f "$free" 2>/dev/null || echo "$free")"
+ STORAGE_INFO="Free: ${free_h} Used: ${used_h}"
+ else
+ STORAGE_INFO="Free: ${free} Used: ${used}"
+ fi
+ fi
+ STORAGE_RESULT="$preselect"
+ return 0
+}
+
+fix_gpu_gids() {
+ if [[ -z "${GPU_TYPE:-}" ]]; then
+ return 0
+ fi
+
+ msg_info "Detecting and setting correct GPU group IDs"
+
+ # Ermittle die tatsächlichen GIDs aus dem Container
+ local video_gid=$(pct exec "$CTID" -- sh -c "getent group video 2>/dev/null | cut -d: -f3")
+ local render_gid=$(pct exec "$CTID" -- sh -c "getent group render 2>/dev/null | cut -d: -f3")
+
+ # Fallbacks wenn Gruppen nicht existieren
+ if [[ -z "$video_gid" ]]; then
+ # Versuche die video Gruppe zu erstellen
+ pct exec "$CTID" -- sh -c "groupadd -r video 2>/dev/null || true"
+ video_gid=$(pct exec "$CTID" -- sh -c "getent group video 2>/dev/null | cut -d: -f3")
+ [[ -z "$video_gid" ]] && video_gid="44" # Ultimate fallback
+ fi
+
+ if [[ -z "$render_gid" ]]; then
+ # Versuche die render Gruppe zu erstellen
+ pct exec "$CTID" -- sh -c "groupadd -r render 2>/dev/null || true"
+ render_gid=$(pct exec "$CTID" -- sh -c "getent group render 2>/dev/null | cut -d: -f3")
+ [[ -z "$render_gid" ]] && render_gid="104" # Ultimate fallback
+ fi
+
+ msg_info "Container GIDs detected - video:${video_gid}, render:${render_gid}"
+
+ # Prüfe ob die GIDs von den Defaults abweichen
+ local need_update=0
+ if [[ "$video_gid" != "44" ]] || [[ "$render_gid" != "104" ]]; then
+ need_update=1
+ fi
+
+ if [[ $need_update -eq 1 ]]; then
+ msg_info "Updating device GIDs in container config"
+
+ # Stoppe Container für Config-Update
+ pct stop "$CTID" >/dev/null 2>&1
+
+ # Update die dev Einträge mit korrekten GIDs
+ # Backup der Config
+ cp "$LXC_CONFIG" "${LXC_CONFIG}.bak"
+
+ # Parse und update jeden dev Eintrag
+ while IFS= read -r line; do
+ if [[ "$line" =~ ^dev[0-9]+: ]]; then
+ # Extract device path
+ local device_path=$(echo "$line" | sed -E 's/^dev[0-9]+: ([^,]+).*/\1/')
+ local dev_num=$(echo "$line" | sed -E 's/^(dev[0-9]+):.*/\1/')
+
+ if [[ "$device_path" =~ renderD ]]; then
+ # RenderD device - use render GID
+ echo "${dev_num}: ${device_path},gid=${render_gid}"
+ elif [[ "$device_path" =~ card ]]; then
+ # Card device - use video GID
+ echo "${dev_num}: ${device_path},gid=${video_gid}"
+ else
+ # Keep original line
+ echo "$line"
+ fi
+ else
+ # Keep non-dev lines
+ echo "$line"
+ fi
+ done <"$LXC_CONFIG" >"${LXC_CONFIG}.new"
+
+ mv "${LXC_CONFIG}.new" "$LXC_CONFIG"
+
+ # Starte Container wieder
+ pct start "$CTID" >/dev/null 2>&1
+ sleep 3
+
+ msg_ok "Device GIDs updated successfully"
+ else
+ msg_ok "Device GIDs are already correct"
+ fi
+ if [[ "$CT_TYPE" == "0" ]]; then
+ pct exec "$CTID" -- bash -c "
+ if [ -d /dev/dri ]; then
+ for dev in /dev/dri/*; do
+ if [ -e \"\$dev\" ]; then
+ if [[ \"\$dev\" =~ renderD ]]; then
+ chgrp ${render_gid} \"\$dev\" 2>/dev/null || true
+ else
+ chgrp ${video_gid} \"\$dev\" 2>/dev/null || true
+ fi
+ chmod 660 \"\$dev\" 2>/dev/null || true
+ fi
+ done
+ fi
+ " >/dev/null 2>&1
+ fi
+}
+
+# NVIDIA-spezific check on host
+check_nvidia_host_setup() {
+ if ! command -v nvidia-smi >/dev/null 2>&1; then
+ msg_warn "NVIDIA GPU detected but nvidia-smi not found on host"
+ msg_warn "Please install NVIDIA drivers on host first."
+ #echo " 1. Download driver: wget https://us.download.nvidia.com/XFree86/Linux-x86_64/550.127.05/NVIDIA-Linux-x86_64-550.127.05.run"
+ #echo " 2. Install: ./NVIDIA-Linux-x86_64-550.127.05.run --dkms"
+ #echo " 3. Verify: nvidia-smi"
+ return 1
+ fi
+
+ # check if nvidia-smi works
+ if ! nvidia-smi >/dev/null 2>&1; then
+ msg_warn "nvidia-smi installed but not working. Driver issue?"
+ return 1
+ fi
+
+ return 0
+}
+
+check_storage_support() {
+ local CONTENT="$1" VALID=0
+ while IFS= read -r line; do
+ local STORAGE_NAME
+ STORAGE_NAME=$(awk '{print $1}' <<<"$line")
+ [[ -n "$STORAGE_NAME" ]] && VALID=1
+ done < <(pvesm status -content "$CONTENT" 2>/dev/null | awk 'NR>1')
+ [[ $VALID -eq 1 ]]
+}
+
+select_storage() {
+ local CLASS=$1 CONTENT CONTENT_LABEL
+ case $CLASS in
+ container)
+ CONTENT='rootdir'
+ CONTENT_LABEL='Container'
+ ;;
+ template)
+ CONTENT='vztmpl'
+ CONTENT_LABEL='Container template'
+ ;;
+ iso)
+ CONTENT='iso'
+ CONTENT_LABEL='ISO image'
+ ;;
+ images)
+ CONTENT='images'
+ CONTENT_LABEL='VM Disk image'
+ ;;
+ backup)
+ CONTENT='backup'
+ CONTENT_LABEL='Backup'
+ ;;
+ snippets)
+ CONTENT='snippets'
+ CONTENT_LABEL='Snippets'
+ ;;
+ *)
+ msg_error "Invalid storage class '$CLASS'"
+ return 1
+ ;;
+ esac
+
+ declare -A STORAGE_MAP
+ local -a MENU=()
+ local COL_WIDTH=0
+
+ while read -r TAG TYPE _ TOTAL USED FREE _; do
+ [[ -n "$TAG" && -n "$TYPE" ]] || continue
+ local DISPLAY="${TAG} (${TYPE})"
+ local USED_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$USED")
+ local FREE_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$FREE")
+ local INFO="Free: ${FREE_FMT}B Used: ${USED_FMT}B"
+ STORAGE_MAP["$DISPLAY"]="$TAG"
+ MENU+=("$DISPLAY" "$INFO" "OFF")
+ ((${#DISPLAY} > COL_WIDTH)) && COL_WIDTH=${#DISPLAY}
+ done < <(pvesm status -content "$CONTENT" | awk 'NR>1')
+
+ if [[ ${#MENU[@]} -eq 0 ]]; then
+ msg_error "No storage found for content type '$CONTENT'."
+ return 2
+ fi
+
+ if [[ $((${#MENU[@]} / 3)) -eq 1 ]]; then
+ STORAGE_RESULT="${STORAGE_MAP[${MENU[0]}]}"
+ STORAGE_INFO="${MENU[1]}"
+ return 0
+ fi
+
+ local WIDTH=$((COL_WIDTH + 42))
+ while true; do
+ local DISPLAY_SELECTED
+ DISPLAY_SELECTED=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "Storage Pools" \
+ --radiolist "Which storage pool for ${CONTENT_LABEL,,}?\n(Spacebar to select)" \
+ 16 "$WIDTH" 6 "${MENU[@]}" 3>&1 1>&2 2>&3) || { exit_script; }
+
+ DISPLAY_SELECTED=$(sed 's/[[:space:]]*$//' <<<"$DISPLAY_SELECTED")
+ if [[ -z "$DISPLAY_SELECTED" || -z "${STORAGE_MAP[$DISPLAY_SELECTED]+_}" ]]; then
+ whiptail --msgbox "No valid storage selected. Please try again." 8 58
+ continue
+ fi
+ STORAGE_RESULT="${STORAGE_MAP[$DISPLAY_SELECTED]}"
+ for ((i = 0; i < ${#MENU[@]}; i += 3)); do
+ if [[ "${MENU[$i]}" == "$DISPLAY_SELECTED" ]]; then
+ STORAGE_INFO="${MENU[$i + 1]}"
+ break
+ fi
+ done
+ return 0
+ done
+}
+
+create_lxc_container() {
+ # ------------------------------------------------------------------------------
+ # Optional verbose mode (debug tracing)
+ # ------------------------------------------------------------------------------
+ if [[ "${CREATE_LXC_VERBOSE:-no}" == "yes" ]]; then set -x; fi
+
+ # ------------------------------------------------------------------------------
+ # Helpers (dynamic versioning / template parsing)
+ # ------------------------------------------------------------------------------
+ pkg_ver() { dpkg-query -W -f='${Version}\n' "$1" 2>/dev/null || echo ""; }
+ pkg_cand() { apt-cache policy "$1" 2>/dev/null | awk '/Candidate:/ {print $2}'; }
+
+ ver_ge() { dpkg --compare-versions "$1" ge "$2"; }
+ ver_gt() { dpkg --compare-versions "$1" gt "$2"; }
+ ver_lt() { dpkg --compare-versions "$1" lt "$2"; }
+
+ # Extract Debian OS minor from template name: debian-13-standard_13.1-1_amd64.tar.zst => "13.1"
+ parse_template_osver() { sed -n 's/.*_\([0-9][0-9]*\(\.[0-9]\+\)\?\)-.*/\1/p' <<<"$1"; }
+
+ # Offer upgrade for pve-container/lxc-pve if candidate > installed; optional auto-retry pct create
+ # Returns:
+ # 0 = no upgrade needed
+ # 1 = upgraded (and if do_retry=yes and retry succeeded, creation done)
+ # 2 = user declined
+ # 3 = upgrade attempted but failed OR retry failed
+ offer_lxc_stack_upgrade_and_maybe_retry() {
+ local do_retry="${1:-no}" # yes|no
+ local _pvec_i _pvec_c _lxcp_i _lxcp_c need=0
+
+ _pvec_i="$(pkg_ver pve-container)"
+ _lxcp_i="$(pkg_ver lxc-pve)"
+ _pvec_c="$(pkg_cand pve-container)"
+ _lxcp_c="$(pkg_cand lxc-pve)"
+
+ if [[ -n "$_pvec_c" && "$_pvec_c" != "none" ]]; then
+ ver_gt "$_pvec_c" "${_pvec_i:-0}" && need=1
+ fi
+ if [[ -n "$_lxcp_c" && "$_lxcp_c" != "none" ]]; then
+ ver_gt "$_lxcp_c" "${_lxcp_i:-0}" && need=1
+ fi
+ if [[ $need -eq 0 ]]; then
+ msg_debug "No newer candidate for pve-container/lxc-pve (installed=$_pvec_i/$_lxcp_i, cand=$_pvec_c/$_lxcp_c)"
+ return 0
+ fi
+
+ echo
+ echo "An update for the Proxmox LXC stack is available:"
+ echo " pve-container: installed=${_pvec_i:-n/a} candidate=${_pvec_c:-n/a}"
+ echo " lxc-pve : installed=${_lxcp_i:-n/a} candidate=${_lxcp_c:-n/a}"
+ echo
+ read -rp "Do you want to upgrade now? [y/N] " _ans
+ case "${_ans,,}" in
+ y | yes)
+ msg_info "Upgrading Proxmox LXC stack (pve-container, lxc-pve)"
+ if apt-get update -qq >/dev/null && apt-get install -y --only-upgrade pve-container lxc-pve >/dev/null; then
+ msg_ok "LXC stack upgraded."
+ if [[ "$do_retry" == "yes" ]]; then
+ msg_info "Retrying container creation after upgrade"
+ if pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >>"$LOGFILE" 2>&1; then
+ msg_ok "Container created successfully after upgrade."
+ return 0
+ else
+ msg_error "pct create still failed after upgrade. See $LOGFILE"
+ return 3
+ fi
+ fi
+ return 1
+ else
+ msg_error "Upgrade failed. Please check APT output."
+ return 3
+ fi
+ ;;
+ *) return 2 ;;
+ esac
+ }
+
+ # ------------------------------------------------------------------------------
+ # Required input variables
+ # ------------------------------------------------------------------------------
+ [[ "${CTID:-}" ]] || {
+ msg_error "You need to set 'CTID' variable."
+ exit 203
+ }
+ [[ "${PCT_OSTYPE:-}" ]] || {
+ msg_error "You need to set 'PCT_OSTYPE' variable."
+ exit 204
+ }
+
+ msg_debug "CTID=$CTID"
+ msg_debug "PCT_OSTYPE=$PCT_OSTYPE"
+ msg_debug "PCT_OSVERSION=${PCT_OSVERSION:-default}"
+
+ # ID checks
+ [[ "$CTID" -ge 100 ]] || {
+ msg_error "ID cannot be less than 100."
+ exit 205
+ }
+ if qm status "$CTID" &>/dev/null || pct status "$CTID" &>/dev/null; then
+ echo -e "ID '$CTID' is already in use."
+ unset CTID
+ msg_error "Cannot use ID that is already in use."
+ exit 206
+ fi
+
+ # Storage capability check
+ check_storage_support "rootdir" || {
+ msg_error "No valid storage found for 'rootdir' [Container]"
+ exit 1
+ }
+ check_storage_support "vztmpl" || {
+ msg_error "No valid storage found for 'vztmpl' [Template]"
+ exit 1
+ }
+
+ # Template storage selection
+ if resolve_storage_preselect template "${TEMPLATE_STORAGE:-}"; then
+ TEMPLATE_STORAGE="$STORAGE_RESULT"
+ TEMPLATE_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${TEMPLATE_STORAGE}${CL} (${TEMPLATE_STORAGE_INFO}) [Template]"
+ else
+ while true; do
+ if [[ -z "${var_template_storage:-}" ]]; then
+ if select_storage template; then
+ TEMPLATE_STORAGE="$STORAGE_RESULT"
+ TEMPLATE_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${TEMPLATE_STORAGE}${CL} (${TEMPLATE_STORAGE_INFO}) [Template]"
+ break
+ fi
+ fi
+ done
+ fi
+
+ # Container storage selection
+ if resolve_storage_preselect container "${CONTAINER_STORAGE:-}"; then
+ CONTAINER_STORAGE="$STORAGE_RESULT"
+ CONTAINER_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${CONTAINER_STORAGE}${CL} (${CONTAINER_STORAGE_INFO}) [Container]"
+ else
+ if [[ -z "${var_container_storage:-}" ]]; then
+ if select_storage container; then
+ CONTAINER_STORAGE="$STORAGE_RESULT"
+ CONTAINER_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${CONTAINER_STORAGE}${CL} (${CONTAINER_STORAGE_INFO}) [Container]"
+ fi
+ fi
+ fi
+
+ # Validate content types
+ msg_info "Validating content types of storage '$CONTAINER_STORAGE'"
+ STORAGE_CONTENT=$(grep -A4 -E "^(zfspool|dir|lvmthin|lvm): $CONTAINER_STORAGE" /etc/pve/storage.cfg | grep content | awk '{$1=""; print $0}' | xargs)
+ msg_debug "Storage '$CONTAINER_STORAGE' has content types: $STORAGE_CONTENT"
+ grep -qw "rootdir" <<<"$STORAGE_CONTENT" || {
+ msg_error "Storage '$CONTAINER_STORAGE' does not support 'rootdir'. Cannot create LXC."
+ exit 217
+ }
+ $STD msg_ok "Storage '$CONTAINER_STORAGE' supports 'rootdir'"
+
+ msg_info "Validating content types of template storage '$TEMPLATE_STORAGE'"
+ TEMPLATE_CONTENT=$(grep -A4 -E "^[^:]+: $TEMPLATE_STORAGE" /etc/pve/storage.cfg | grep content | awk '{$1=""; print $0}' | xargs)
+ msg_debug "Template storage '$TEMPLATE_STORAGE' has content types: $TEMPLATE_CONTENT"
+ if ! grep -qw "vztmpl" <<<"$TEMPLATE_CONTENT"; then
+ msg_warn "Template storage '$TEMPLATE_STORAGE' does not declare 'vztmpl'. This may cause pct create to fail."
+ else
+ $STD msg_ok "Template storage '$TEMPLATE_STORAGE' supports 'vztmpl'"
+ fi
+
+ # Free space check
+ STORAGE_FREE=$(pvesm status | awk -v s="$CONTAINER_STORAGE" '$1 == s { print $6 }')
+ REQUIRED_KB=$((${PCT_DISK_SIZE:-8} * 1024 * 1024))
+ [[ "$STORAGE_FREE" -ge "$REQUIRED_KB" ]] || {
+ msg_error "Not enough space on '$CONTAINER_STORAGE'. Needed: ${PCT_DISK_SIZE:-8}G."
+ exit 214
+ }
+
+ # Cluster quorum (if cluster)
+ if [[ -f /etc/pve/corosync.conf ]]; then
+ msg_info "Checking cluster quorum"
+ if ! pvecm status | awk -F':' '/^Quorate/ { exit ($2 ~ /Yes/) ? 0 : 1 }'; then
+ msg_error "Cluster is not quorate. Start all nodes or configure quorum device (QDevice)."
+ exit 210
+ fi
+ msg_ok "Cluster is quorate"
+ fi
+
+ # ------------------------------------------------------------------------------
+ # Template discovery & validation
+ # ------------------------------------------------------------------------------
+ TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}"
+ case "$PCT_OSTYPE" in
+ debian | ubuntu) TEMPLATE_PATTERN="-standard_" ;;
+ alpine | fedora | rocky | centos) TEMPLATE_PATTERN="-default_" ;;
+ *) TEMPLATE_PATTERN="" ;;
+ esac
+
+ msg_info "Searching for template '$TEMPLATE_SEARCH'"
+
+ # Build regex patterns outside awk/grep for clarity
+ SEARCH_PATTERN="^${TEMPLATE_SEARCH}"
+
+ #echo "[DEBUG] TEMPLATE_SEARCH='$TEMPLATE_SEARCH'"
+ #echo "[DEBUG] SEARCH_PATTERN='$SEARCH_PATTERN'"
+ #echo "[DEBUG] TEMPLATE_PATTERN='$TEMPLATE_PATTERN'"
+
+ mapfile -t LOCAL_TEMPLATES < <(
+ pveam list "$TEMPLATE_STORAGE" 2>/dev/null |
+ awk -v search="${SEARCH_PATTERN}" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' |
+ sed 's|.*/||' | sort -t - -k 2 -V
+ )
+
+ pveam update >/dev/null 2>&1 || msg_warn "Could not update template catalog (pveam update failed)."
+
+ #echo "[DEBUG] pveam available output (first 5 lines with .tar files):"
+ #pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | head -5 | sed 's/^/ /'
+
+ set +u
+ mapfile -t ONLINE_TEMPLATES < <(pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | awk '{print $2}' | grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" | sort -t - -k 2 -V 2>/dev/null || true)
+ #echo "[DEBUG] After filtering: ${#ONLINE_TEMPLATES[@]} online templates found"
+ set -u
+ if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then
+ #echo "[DEBUG] Online templates:"
+ for tmpl in "${ONLINE_TEMPLATES[@]}"; do
+ echo " - $tmpl"
+ done
+ fi
+
+ ONLINE_TEMPLATE=""
+ [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}"
+
+ #msg_debug "SEARCH_PATTERN='${SEARCH_PATTERN}' TEMPLATE_PATTERN='${TEMPLATE_PATTERN}'"
+ #msg_debug "Found ${#LOCAL_TEMPLATES[@]} local templates, ${#ONLINE_TEMPLATES[@]} online templates"
+ if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then
+ #msg_debug "First 3 online templates:"
+ count=0
+ for idx in "${!ONLINE_TEMPLATES[@]}"; do
+ #msg_debug " [$idx]: ${ONLINE_TEMPLATES[$idx]}"
+ ((count++))
+ [[ $count -ge 3 ]] && break
+ done
+ fi
+ #msg_debug "ONLINE_TEMPLATE='$ONLINE_TEMPLATE'"
+
+ if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then
+ TEMPLATE="${LOCAL_TEMPLATES[-1]}"
+ TEMPLATE_SOURCE="local"
+ else
+ TEMPLATE="$ONLINE_TEMPLATE"
+ TEMPLATE_SOURCE="online"
+ fi
+
+ # If still no template, try to find alternatives
+ if [[ -z "$TEMPLATE" ]]; then
+ echo ""
+ echo "[DEBUG] No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}, searching for alternatives..."
+
+ # Get all available versions for this OS type
+ mapfile -t AVAILABLE_VERSIONS < <(
+ pveam available -section system 2>/dev/null |
+ grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' |
+ awk -F'\t' '{print $1}' |
+ grep "^${PCT_OSTYPE}-" |
+ sed -E "s/.*${PCT_OSTYPE}-([0-9]+(\.[0-9]+)?).*/\1/" |
+ sort -u -V 2>/dev/null
+ )
+
+ if [[ ${#AVAILABLE_VERSIONS[@]} -gt 0 ]]; then
+ echo ""
+ echo "${BL}Available ${PCT_OSTYPE} versions:${CL}"
+ for i in "${!AVAILABLE_VERSIONS[@]}"; do
+ echo " [$((i + 1))] ${AVAILABLE_VERSIONS[$i]}"
+ done
+ echo ""
+ read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or press Enter to cancel: " choice
+
+ if [[ "$choice" =~ ^[0-9]+$ ]] && [[ "$choice" -ge 1 ]] && [[ "$choice" -le ${#AVAILABLE_VERSIONS[@]} ]]; then
+ PCT_OSVERSION="${AVAILABLE_VERSIONS[$((choice - 1))]}"
+ TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION}"
+ SEARCH_PATTERN="^${TEMPLATE_SEARCH}-"
+
+ #echo "[DEBUG] Retrying with version: $PCT_OSVERSION"
+
+ mapfile -t ONLINE_TEMPLATES < <(
+ pveam available -section system 2>/dev/null |
+ grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' |
+ awk -F'\t' '{print $1}' |
+ grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" |
+ sort -t - -k 2 -V 2>/dev/null || true
+ )
+
+ if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then
+ TEMPLATE="${ONLINE_TEMPLATES[-1]}"
+ TEMPLATE_SOURCE="online"
+ #echo "[DEBUG] Found alternative: $TEMPLATE"
+ else
+ msg_error "No templates available for ${PCT_OSTYPE} ${PCT_OSVERSION}"
+ exit 225
+ fi
+ else
+ msg_info "Installation cancelled"
+ exit 0
+ fi
+ else
+ msg_error "No ${PCT_OSTYPE} templates available at all"
+ exit 225
+ fi
+ fi
+
+ #echo "[DEBUG] Selected TEMPLATE='$TEMPLATE' SOURCE='$TEMPLATE_SOURCE'"
+ #msg_debug "Selected TEMPLATE='$TEMPLATE' SOURCE='$TEMPLATE_SOURCE'"
+
+ TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)"
+ if [[ -z "$TEMPLATE_PATH" ]]; then
+ TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg)
+ [[ -n "$TEMPLATE_BASE" ]] && TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE"
+ fi
+
+ # If we still don't have a path but have a valid template name, construct it
+ if [[ -z "$TEMPLATE_PATH" && -n "$TEMPLATE" ]]; then
+ TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE"
+ fi
+
+ [[ -n "$TEMPLATE_PATH" ]] || {
+ if [[ -z "$TEMPLATE" ]]; then
+ msg_error "Template ${PCT_OSTYPE} ${PCT_OSVERSION} not available"
+
+ # Get available versions
+ mapfile -t AVAILABLE_VERSIONS < <(
+ pveam available -section system 2>/dev/null |
+ grep "^${PCT_OSTYPE}-" |
+ sed -E 's/.*'"${PCT_OSTYPE}"'-([0-9]+\.[0-9]+).*/\1/' |
+ grep -E '^[0-9]+\.[0-9]+$' |
+ sort -u -V 2>/dev/null || sort -u
+ )
+
+ if [[ ${#AVAILABLE_VERSIONS[@]} -gt 0 ]]; then
+ echo -e "\n${BL}Available versions:${CL}"
+ for i in "${!AVAILABLE_VERSIONS[@]}"; do
+ echo " [$((i + 1))] ${AVAILABLE_VERSIONS[$i]}"
+ done
+
+ echo ""
+ read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or Enter to exit: " choice
+
+ if [[ "$choice" =~ ^[0-9]+$ ]] && [[ "$choice" -ge 1 ]] && [[ "$choice" -le ${#AVAILABLE_VERSIONS[@]} ]]; then
+ export var_version="${AVAILABLE_VERSIONS[$((choice - 1))]}"
+ export PCT_OSVERSION="$var_version"
+ msg_ok "Switched to ${PCT_OSTYPE} ${var_version}"
+
+ # Retry template search with new version
+ TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}"
+ SEARCH_PATTERN="^${TEMPLATE_SEARCH}-"
+
+ mapfile -t LOCAL_TEMPLATES < <(
+ pveam list "$TEMPLATE_STORAGE" 2>/dev/null |
+ awk -v search="${SEARCH_PATTERN}" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' |
+ sed 's|.*/||' | sort -t - -k 2 -V
+ )
+ mapfile -t ONLINE_TEMPLATES < <(
+ pveam available -section system 2>/dev/null |
+ grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' |
+ awk -F'\t' '{print $1}' |
+ grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" |
+ sort -t - -k 2 -V 2>/dev/null || true
+ )
+ ONLINE_TEMPLATE=""
+ [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}"
+
+ if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then
+ TEMPLATE="${LOCAL_TEMPLATES[-1]}"
+ TEMPLATE_SOURCE="local"
+ else
+ TEMPLATE="$ONLINE_TEMPLATE"
+ TEMPLATE_SOURCE="online"
+ fi
+
+ TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)"
+ if [[ -z "$TEMPLATE_PATH" ]]; then
+ TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg)
+ [[ -n "$TEMPLATE_BASE" ]] && TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE"
+ fi
+
+ # If we still don't have a path but have a valid template name, construct it
+ if [[ -z "$TEMPLATE_PATH" && -n "$TEMPLATE" ]]; then
+ TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE"
+ fi
+
+ [[ -n "$TEMPLATE_PATH" ]] || {
+ msg_error "Template still not found after version change"
+ exit 220
+ }
+ else
+ msg_info "Installation cancelled"
+ exit 1
+ fi
+ else
+ msg_error "No ${PCT_OSTYPE} templates available"
+ exit 220
+ fi
+ fi
+ }
+
+ # Validate that we found a template
+ if [[ -z "$TEMPLATE" ]]; then
+ msg_error "No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}"
+ msg_info "Please check:"
+ msg_info " - Is pveam catalog available? (run: pveam available -section system)"
+ msg_info " - Does the template exist for your OS version?"
+ exit 225
+ fi
+
+ msg_ok "Template ${BL}$TEMPLATE${CL} [$TEMPLATE_SOURCE]"
+ msg_debug "Resolved TEMPLATE_PATH=$TEMPLATE_PATH"
+
+ NEED_DOWNLOAD=0
+ if [[ ! -f "$TEMPLATE_PATH" ]]; then
+ msg_info "Template not present locally – will download."
+ NEED_DOWNLOAD=1
+ elif [[ ! -r "$TEMPLATE_PATH" ]]; then
+ msg_error "Template file exists but is not readable – check permissions."
+ exit 221
+ elif [[ "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then
+ if [[ -n "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Template file too small (<1MB) – re-downloading."
+ NEED_DOWNLOAD=1
+ else
+ msg_warn "Template looks too small, but no online version exists. Keeping local file."
+ fi
+ elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then
+ if [[ -n "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Template appears corrupted – re-downloading."
+ NEED_DOWNLOAD=1
+ else
+ msg_warn "Template appears corrupted, but no online version exists. Keeping local file."
+ fi
+ else
+ $STD msg_ok "Template $TEMPLATE is present and valid."
+ fi
+
+ if [[ "$TEMPLATE_SOURCE" == "local" && -n "$ONLINE_TEMPLATE" && "$TEMPLATE" != "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Local template is outdated: $TEMPLATE (latest available: $ONLINE_TEMPLATE)"
+ if whiptail --yesno "A newer template is available:\n$ONLINE_TEMPLATE\n\nDo you want to download and use it instead?" 12 70; then
+ TEMPLATE="$ONLINE_TEMPLATE"
+ NEED_DOWNLOAD=1
+ else
+ msg_info "Continuing with local template $TEMPLATE"
+ fi
+ fi
+
+ if [[ "$NEED_DOWNLOAD" -eq 1 ]]; then
+ [[ -f "$TEMPLATE_PATH" ]] && rm -f "$TEMPLATE_PATH"
+ for attempt in {1..3}; do
+ msg_info "Attempt $attempt: Downloading template $TEMPLATE to $TEMPLATE_STORAGE"
+ if pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null 2>&1; then
+ msg_ok "Template download successful."
+ break
+ fi
+ if [[ $attempt -eq 3 ]]; then
+ msg_error "Failed after 3 attempts. Please check network access, permissions, or manually run:\n pveam download $TEMPLATE_STORAGE $TEMPLATE"
+ exit 222
+ fi
+ sleep $((attempt * 5))
+ done
+ fi
+
+ if ! pveam list "$TEMPLATE_STORAGE" 2>/dev/null | grep -q "$TEMPLATE"; then
+ msg_error "Template $TEMPLATE not available in storage $TEMPLATE_STORAGE after download."
+ exit 223
+ fi
+
+ # ------------------------------------------------------------------------------
+ # Dynamic preflight for Debian 13.x: offer upgrade if available (no hard mins)
+ # ------------------------------------------------------------------------------
+ if [[ "$PCT_OSTYPE" == "debian" ]]; then
+ OSVER="$(parse_template_osver "$TEMPLATE")"
+ if [[ -n "$OSVER" ]]; then
+ # Proactive, aber ohne Abbruch – nur Angebot
+ offer_lxc_stack_upgrade_and_maybe_retry "no" || true
+ fi
+ fi
+
+ # ------------------------------------------------------------------------------
+ # Create LXC Container
+ # ------------------------------------------------------------------------------
+ msg_info "Creating LXC container"
+
+ # Ensure subuid/subgid entries exist
+ grep -q "root:100000:65536" /etc/subuid || echo "root:100000:65536" >>/etc/subuid
+ grep -q "root:100000:65536" /etc/subgid || echo "root:100000:65536" >>/etc/subgid
+
+ # Assemble pct options
+ PCT_OPTIONS=(${PCT_OPTIONS[@]:-${DEFAULT_PCT_OPTIONS[@]}})
+ [[ " ${PCT_OPTIONS[*]} " =~ " -rootfs " ]] || PCT_OPTIONS+=(-rootfs "$CONTAINER_STORAGE:${PCT_DISK_SIZE:-8}")
+
+ # Lock by template file (avoid concurrent downloads/creates)
+ lockfile="/tmp/template.${TEMPLATE}.lock"
+ exec 9>"$lockfile" || {
+ msg_error "Failed to create lock file '$lockfile'."
+ exit 200
+ }
+ flock -w 60 9 || {
+ msg_error "Timeout while waiting for template lock."
+ exit 211
+ }
+
+ LOGFILE="/tmp/pct_create_${CTID}.log"
+ msg_debug "pct create command: pct create $CTID ${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE} ${PCT_OPTIONS[*]}"
+ msg_debug "Logfile: $LOGFILE"
+
+ # First attempt
+ if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >"$LOGFILE" 2>&1; then
+ msg_error "Container creation failed on ${TEMPLATE_STORAGE}. Checking template..."
+
+ # Validate template file
+ if [[ ! -s "$TEMPLATE_PATH" || "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then
+ msg_warn "Template file too small or missing – re-downloading."
+ rm -f "$TEMPLATE_PATH"
+ pveam download "$TEMPLATE_STORAGE" "$TEMPLATE"
+ elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then
+ if [[ -n "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Template appears corrupted – re-downloading."
+ rm -f "$TEMPLATE_PATH"
+ pveam download "$TEMPLATE_STORAGE" "$TEMPLATE"
+ else
+ msg_warn "Template appears corrupted, but no online version exists. Skipping re-download."
+ fi
+ fi
+
+ # Retry after repair
+ if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >>"$LOGFILE" 2>&1; then
+ # Fallback to local storage
+ if [[ "$TEMPLATE_STORAGE" != "local" ]]; then
+ msg_warn "Retrying container creation with fallback to local storage..."
+ LOCAL_TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE"
+ if [[ ! -f "$LOCAL_TEMPLATE_PATH" ]]; then
+ msg_info "Downloading template to local..."
+ pveam download local "$TEMPLATE" >/dev/null 2>&1
+ fi
+ if pct create "$CTID" "local:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >>"$LOGFILE" 2>&1; then
+ msg_ok "Container successfully created using local fallback."
+ else
+ # --- Dynamic stack upgrade + auto-retry on the well-known error pattern ---
+ if grep -qiE 'unsupported .* version' "$LOGFILE"; then
+ echo
+ echo "pct reported 'unsupported ... version' – your LXC stack might be too old for this template."
+ echo "We can try to upgrade 'pve-container' and 'lxc-pve' now and retry automatically."
+ offer_lxc_stack_upgrade_and_maybe_retry "yes"
+ rc=$?
+ case $rc in
+ 0) : ;; # success - container created, continue
+ 2)
+ echo "Upgrade was declined. Please update and re-run:
+ apt update && apt install --only-upgrade pve-container lxc-pve"
+ exit 231
+ ;;
+ 3)
+ echo "Upgrade and/or retry failed. Please inspect: $LOGFILE"
+ exit 231
+ ;;
+ esac
+ else
+ msg_error "Container creation failed even with local fallback. See $LOGFILE"
+ if whiptail --yesno "pct create failed.\nDo you want to enable verbose debug mode and view detailed logs?" 12 70; then
+ set -x
+ bash -x -c "pct create $CTID local:vztmpl/${TEMPLATE} ${PCT_OPTIONS[*]}" 2>&1 | tee -a "$LOGFILE"
+ set +x
+ fi
+ exit 209
+ fi
+ fi
+ else
+ msg_error "Container creation failed on local storage. See $LOGFILE"
+ # --- Dynamic stack upgrade + auto-retry on the well-known error pattern ---
+ if grep -qiE 'unsupported .* version' "$LOGFILE"; then
+ echo
+ echo "pct reported 'unsupported ... version' – your LXC stack might be too old for this template."
+ echo "We can try to upgrade 'pve-container' and 'lxc-pve' now and retry automatically."
+ offer_lxc_stack_upgrade_and_maybe_retry "yes"
+ rc=$?
+ case $rc in
+ 0) : ;; # success - container created, continue
+ 2)
+ echo "Upgrade was declined. Please update and re-run:
+ apt update && apt install --only-upgrade pve-container lxc-pve"
+ exit 231
+ ;;
+ 3)
+ echo "Upgrade and/or retry failed. Please inspect: $LOGFILE"
+ exit 231
+ ;;
+ esac
+ else
+ msg_error "Container creation failed. See $LOGFILE"
+ if whiptail --yesno "pct create failed.\nDo you want to enable verbose debug mode and view detailed logs?" 12 70; then
+ set -x
+ bash -x -c "pct create $CTID local:vztmpl/${TEMPLATE} ${PCT_OPTIONS[*]}" 2>&1 | tee -a "$LOGFILE"
+ set +x
+ fi
+ exit 209
+ fi
+ fi
+ fi
+ fi
+
+ # Verify container exists
+ pct list | awk '{print $1}' | grep -qx "$CTID" || {
+ msg_error "Container ID $CTID not listed in 'pct list'. See $LOGFILE"
+ exit 215
+ }
+
+ # Verify config rootfs
+ grep -q '^rootfs:' "/etc/pve/lxc/$CTID.conf" || {
+ msg_error "RootFS entry missing in container config. See $LOGFILE"
+ exit 216
+ }
+
+ msg_ok "LXC Container ${BL}$CTID${CL} ${GN}was successfully created."
+}
+
+# ------------------------------------------------------------------------------
+# description()
+#
+# - Sets container description with HTML content (logo, links, badges)
+# - Restarts ping-instances.service if present
+# - Posts status "done" to API
+# ------------------------------------------------------------------------------
+description() {
+ IP=$(pct exec "$CTID" ip a s dev eth0 | awk '/inet / {print $2}' | cut -d/ -f1)
+
+ # Generate LXC Description
+ DESCRIPTION=$(
+ cat <
+
+
+
+
+ ${APP} LXC
+
+
+
+
+
+
+
+
+
+ GitHub
+
+
+
+ Discussions
+
+
+
+ Issues
+
+
+EOF
+ )
+ pct set "$CTID" -description "$DESCRIPTION"
+
+ if [[ -f /etc/systemd/system/ping-instances.service ]]; then
+ systemctl start ping-instances.service
+ fi
+
+ post_update_to_api "done" "none"
+}
+
+# ------------------------------------------------------------------------------
+# api_exit_script()
+#
+# - Exit trap handler
+# - Reports exit codes to API with detailed reason
+# - Handles known codes (100–209) and maps them to errors
+# ------------------------------------------------------------------------------
+api_exit_script() {
+ exit_code=$?
+ if [ $exit_code -ne 0 ]; then
+ case $exit_code in
+ 100) post_update_to_api "failed" "100: Unexpected error in create_lxc.sh" ;;
+ 101) post_update_to_api "failed" "101: No network connection detected in create_lxc.sh" ;;
+ 200) post_update_to_api "failed" "200: LXC creation failed in create_lxc.sh" ;;
+ 201) post_update_to_api "failed" "201: Invalid Storage class in create_lxc.sh" ;;
+ 202) post_update_to_api "failed" "202: User aborted menu in create_lxc.sh" ;;
+ 203) post_update_to_api "failed" "203: CTID not set in create_lxc.sh" ;;
+ 204) post_update_to_api "failed" "204: PCT_OSTYPE not set in create_lxc.sh" ;;
+ 205) post_update_to_api "failed" "205: CTID cannot be less than 100 in create_lxc.sh" ;;
+ 206) post_update_to_api "failed" "206: CTID already in use in create_lxc.sh" ;;
+ 207) post_update_to_api "failed" "207: Template not found in create_lxc.sh" ;;
+ 208) post_update_to_api "failed" "208: Error downloading template in create_lxc.sh" ;;
+ 209) post_update_to_api "failed" "209: Container creation failed, but template is intact in create_lxc.sh" ;;
+ *) post_update_to_api "failed" "Unknown error, exit code: $exit_code in create_lxc.sh" ;;
+ esac
+ fi
+}
+
+if command -v pveversion >/dev/null 2>&1; then
+ trap 'api_exit_script' EXIT
+fi
+trap 'post_update_to_api "failed" "$BASH_COMMAND"' ERR
+trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
+trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
diff --git a/misc/build.func.backup-20251029-124307 b/misc/build.func.backup-20251029-124307
new file mode 100644
index 000000000..d452f4637
--- /dev/null
+++ b/misc/build.func.backup-20251029-124307
@@ -0,0 +1,3517 @@
+#!/usr/bin/env bash
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: tteck (tteckster) | MickLesk | michelroegl-brunner
+# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
+# Revision: 1
+
+# ==============================================================================
+# SECTION 1: CORE INITIALIZATION & VARIABLES
+# ==============================================================================
+
+# ------------------------------------------------------------------------------
+# variables()
+#
+# - Normalize application name (NSAPP = lowercase, no spaces)
+# - Build installer filename (var_install)
+# - Define regex for integer validation
+# - Fetch hostname of Proxmox node
+# - Set default values for diagnostics/method
+# - Generate random UUID for tracking
+# - Get Proxmox VE version and kernel version
+# ------------------------------------------------------------------------------
+variables() {
+ NSAPP=$(echo "${APP,,}" | tr -d ' ') # This function sets the NSAPP variable by converting the value of the APP variable to lowercase and removing any spaces.
+ var_install="${NSAPP}-install" # sets the var_install variable by appending "-install" to the value of NSAPP.
+ INTEGER='^[0-9]+([.][0-9]+)?$' # it defines the INTEGER regular expression pattern.
+ PVEHOST_NAME=$(hostname) # gets the Proxmox Hostname and sets it to Uppercase
+ DIAGNOSTICS="yes" # sets the DIAGNOSTICS variable to "yes", used for the API call.
+ METHOD="default" # sets the METHOD variable to "default", used for the API call.
+ RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" # generates a random UUID and sets it to the RANDOM_UUID variable.
+ CTTYPE="${CTTYPE:-${CT_TYPE:-1}}"
+ #CT_TYPE=${var_unprivileged:-$CT_TYPE}
+
+ # Get Proxmox VE version and kernel version
+ if command -v pveversion >/dev/null 2>&1; then
+ PVEVERSION=$(pveversion | grep "pve-manager" | awk '{print $2}' | cut -d'/' -f1)
+ else
+ PVEVERSION="N/A"
+ fi
+ KERNEL_VERSION=$(uname -r)
+}
+
+# -----------------------------------------------------------------------------
+# Community-Scripts bootstrap loader
+# - Always sources build.func from remote
+# - Updates local core files only if build.func changed
+# - Local cache: /usr/local/community-scripts/core
+# -----------------------------------------------------------------------------
+
+# FUNC_DIR="/usr/local/community-scripts/core"
+# mkdir -p "$FUNC_DIR"
+
+# BUILD_URL="https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func"
+# BUILD_REV="$FUNC_DIR/build.rev"
+# DEVMODE="${DEVMODE:-no}"
+
+# # --- Step 1: fetch build.func content once, compute hash ---
+# build_content="$(curl -fsSL "$BUILD_URL")" || {
+# echo "❌ Failed to fetch build.func"
+# exit 1
+# }
+
+# newhash=$(printf "%s" "$build_content" | sha256sum | awk '{print $1}')
+# oldhash=$(cat "$BUILD_REV" 2>/dev/null || echo "")
+
+# # --- Step 2: if build.func changed, offer update for core files ---
+# if [ "$newhash" != "$oldhash" ]; then
+# echo "⚠️ build.func changed!"
+
+# while true; do
+# read -rp "Refresh local core files? [y/N/diff]: " ans
+# case "$ans" in
+# [Yy]*)
+# echo "$newhash" >"$BUILD_REV"
+
+# update_func_file() {
+# local file="$1"
+# local url="https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/$file"
+# local local_path="$FUNC_DIR/$file"
+
+# echo "⬇️ Downloading $file ..."
+# curl -fsSL "$url" -o "$local_path" || {
+# echo "❌ Failed to fetch $file"
+# exit 1
+# }
+# echo "✔️ Updated $file"
+# }
+
+# update_func_file core.func
+# update_func_file error_handler.func
+# update_func_file tools.func
+# break
+# ;;
+# [Dd]*)
+# for file in core.func error_handler.func tools.func; do
+# local_path="$FUNC_DIR/$file"
+# url="https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/$file"
+# remote_tmp="$(mktemp)"
+
+# curl -fsSL "$url" -o "$remote_tmp" || continue
+
+# if [ -f "$local_path" ]; then
+# echo "🔍 Diff for $file:"
+# diff -u "$local_path" "$remote_tmp" || echo "(no differences)"
+# else
+# echo "📦 New file $file will be installed"
+# fi
+
+# rm -f "$remote_tmp"
+# done
+# ;;
+# *)
+# echo "❌ Skipped updating local core files"
+# break
+# ;;
+# esac
+# done
+# else
+# if [ "$DEVMODE" != "yes" ]; then
+# echo "✔️ build.func unchanged → using existing local core files"
+# fi
+# fi
+
+# if [ -n "${_COMMUNITY_SCRIPTS_LOADER:-}" ]; then
+# return 0 2>/dev/null || exit 0
+# fi
+# _COMMUNITY_SCRIPTS_LOADER=1
+
+# # --- Step 3: always source local versions of the core files ---
+# source "$FUNC_DIR/core.func"
+# source "$FUNC_DIR/error_handler.func"
+# source "$FUNC_DIR/tools.func"
+
+# # --- Step 4: finally, source build.func directly from memory ---
+# # (no tmp file needed)
+# source <(printf "%s" "$build_content")
+
+# ------------------------------------------------------------------------------
+# Load core + error handler functions from community-scripts repo
+#
+# - Prefer curl if available, fallback to wget
+# - Load: core.func, error_handler.func, api.func
+# - Initialize error traps after loading
+# ------------------------------------------------------------------------------
+
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/api.func)
+
+if command -v curl >/dev/null 2>&1; then
+ source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func)
+ source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func)
+ load_functions
+ catch_errors
+ #echo "(build.func) Loaded core.func via curl"
+elif command -v wget >/dev/null 2>&1; then
+ source <(wget -qO- https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func)
+ source <(wget -qO- https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func)
+ load_functions
+ catch_errors
+ #echo "(build.func) Loaded core.func via wget"
+fi
+
+# ------------------------------------------------------------------------------
+# maxkeys_check()
+#
+# - Reads kernel keyring limits (maxkeys, maxbytes)
+# - Checks current usage for LXC user (UID 100000)
+# - Warns if usage is close to limits and suggests sysctl tuning
+# - Exits if thresholds are exceeded
+# - https://cleveruptime.com/docs/files/proc-key-users | https://docs.kernel.org/security/keys/core.html
+# ------------------------------------------------------------------------------
+
+maxkeys_check() {
+ # Read kernel parameters
+ per_user_maxkeys=$(cat /proc/sys/kernel/keys/maxkeys 2>/dev/null || echo 0)
+ per_user_maxbytes=$(cat /proc/sys/kernel/keys/maxbytes 2>/dev/null || echo 0)
+
+ # Exit if kernel parameters are unavailable
+ if [[ "$per_user_maxkeys" -eq 0 || "$per_user_maxbytes" -eq 0 ]]; then
+ echo -e "${CROSS}${RD} Error: Unable to read kernel parameters. Ensure proper permissions.${CL}"
+ exit 1
+ fi
+
+ # Fetch key usage for user ID 100000 (typical for containers)
+ used_lxc_keys=$(awk '/100000:/ {print $2}' /proc/key-users 2>/dev/null || echo 0)
+ used_lxc_bytes=$(awk '/100000:/ {split($5, a, "/"); print a[1]}' /proc/key-users 2>/dev/null || echo 0)
+
+ # Calculate thresholds and suggested new limits
+ threshold_keys=$((per_user_maxkeys - 100))
+ threshold_bytes=$((per_user_maxbytes - 1000))
+ new_limit_keys=$((per_user_maxkeys * 2))
+ new_limit_bytes=$((per_user_maxbytes * 2))
+
+ # Check if key or byte usage is near limits
+ failure=0
+ if [[ "$used_lxc_keys" -gt "$threshold_keys" ]]; then
+ echo -e "${CROSS}${RD} Warning: Key usage is near the limit (${used_lxc_keys}/${per_user_maxkeys}).${CL}"
+ echo -e "${INFO} Suggested action: Set ${GN}kernel.keys.maxkeys=${new_limit_keys}${CL} in ${BOLD}/etc/sysctl.d/98-community-scripts.conf${CL}."
+ failure=1
+ fi
+ if [[ "$used_lxc_bytes" -gt "$threshold_bytes" ]]; then
+ echo -e "${CROSS}${RD} Warning: Key byte usage is near the limit (${used_lxc_bytes}/${per_user_maxbytes}).${CL}"
+ echo -e "${INFO} Suggested action: Set ${GN}kernel.keys.maxbytes=${new_limit_bytes}${CL} in ${BOLD}/etc/sysctl.d/98-community-scripts.conf${CL}."
+ failure=1
+ fi
+
+ # Provide next steps if issues are detected
+ if [[ "$failure" -eq 1 ]]; then
+ echo -e "${INFO} To apply changes, run: ${BOLD}service procps force-reload${CL}"
+ exit 1
+ fi
+
+ echo -e "${CM}${GN} All kernel key limits are within safe thresholds.${CL}"
+}
+
+# ------------------------------------------------------------------------------
+# get_current_ip()
+#
+# - Returns current container IP depending on OS type
+# - Debian/Ubuntu: uses `hostname -I`
+# - Alpine: parses eth0 via `ip -4 addr`
+# ------------------------------------------------------------------------------
+get_current_ip() {
+ if [ -f /etc/os-release ]; then
+ # Check for Debian/Ubuntu (uses hostname -I)
+ if grep -qE 'ID=debian|ID=ubuntu' /etc/os-release; then
+ CURRENT_IP=$(hostname -I | awk '{print $1}')
+ # Check for Alpine (uses ip command)
+ elif grep -q 'ID=alpine' /etc/os-release; then
+ CURRENT_IP=$(ip -4 addr show eth0 | awk '/inet / {print $2}' | cut -d/ -f1 | head -n 1)
+ else
+ CURRENT_IP="Unknown"
+ fi
+ fi
+ echo "$CURRENT_IP"
+}
+
+# ------------------------------------------------------------------------------
+# update_motd_ip()
+#
+# - Updates /etc/motd with current container IP
+# - Removes old IP entries to avoid duplicates
+# ------------------------------------------------------------------------------
+update_motd_ip() {
+ MOTD_FILE="/etc/motd"
+
+ if [ -f "$MOTD_FILE" ]; then
+ # Remove existing IP Address lines to prevent duplication
+ sed -i '/IP Address:/d' "$MOTD_FILE"
+
+ IP=$(get_current_ip)
+ # Add the new IP address
+ echo -e "${TAB}${NETWORK}${YW} IP Address: ${GN}${IP}${CL}" >>"$MOTD_FILE"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# install_ssh_keys_into_ct()
+#
+# - Installs SSH keys into container root account if SSH is enabled
+# - Uses pct push or direct input to authorized_keys
+# - Falls back to warning if no keys provided
+# ------------------------------------------------------------------------------
+install_ssh_keys_into_ct() {
+ [[ "$SSH" != "yes" ]] && return 0
+
+ if [[ -n "$SSH_KEYS_FILE" && -s "$SSH_KEYS_FILE" ]]; then
+ msg_info "Installing selected SSH keys into CT ${CTID}"
+ pct exec "$CTID" -- sh -c 'mkdir -p /root/.ssh && chmod 700 /root/.ssh' || {
+ msg_error "prepare /root/.ssh failed"
+ return 1
+ }
+ pct push "$CTID" "$SSH_KEYS_FILE" /root/.ssh/authorized_keys >/dev/null 2>&1 ||
+ pct exec "$CTID" -- sh -c "cat > /root/.ssh/authorized_keys" <"$SSH_KEYS_FILE" || {
+ msg_error "write authorized_keys failed"
+ return 1
+ }
+ pct exec "$CTID" -- sh -c 'chmod 600 /root/.ssh/authorized_keys' || true
+ msg_ok "Installed SSH keys into CT ${CTID}"
+ return 0
+ fi
+
+ # Fallback: nichts ausgewählt
+ msg_warn "No SSH keys to install (skipping)."
+ return 0
+}
+
+# ------------------------------------------------------------------------------
+# base_settings()
+#
+# - Defines all base/default variables for container creation
+# - Reads from environment variables (var_*)
+# - Provides fallback defaults for OS type/version
+# ------------------------------------------------------------------------------
+base_settings() {
+ # Default Settings
+ CT_TYPE=${var_unprivileged:-"1"}
+ DISK_SIZE=${var_disk:-"4"}
+ CORE_COUNT=${var_cpu:-"1"}
+ RAM_SIZE=${var_ram:-"1024"}
+ VERBOSE=${var_verbose:-"${1:-no}"}
+ PW=${var_pw:-""}
+ CT_ID=${var_ctid:-$NEXTID}
+ HN=${var_hostname:-$NSAPP}
+ BRG=${var_brg:-"vmbr0"}
+ NET=${var_net:-"dhcp"}
+ IPV6_METHOD=${var_ipv6_method:-"none"}
+ IPV6_STATIC=${var_ipv6_static:-""}
+ GATE=${var_gateway:-""}
+ APT_CACHER=${var_apt_cacher:-""}
+ APT_CACHER_IP=${var_apt_cacher_ip:-""}
+ MTU=${var_mtu:-""}
+ SD=${var_storage:-""}
+ NS=${var_ns:-""}
+ MAC=${var_mac:-""}
+ VLAN=${var_vlan:-""}
+ SSH=${var_ssh:-"no"}
+ SSH_AUTHORIZED_KEY=${var_ssh_authorized_key:-""}
+ UDHCPC_FIX=${var_udhcpc_fix:-""}
+ TAGS="community-script,${var_tags:-}"
+ ENABLE_FUSE=${var_fuse:-"${1:-no}"}
+ ENABLE_TUN=${var_tun:-"${1:-no}"}
+
+ # Since these 2 are only defined outside of default_settings function, we add a temporary fallback. TODO: To align everything, we should add these as constant variables (e.g. OSTYPE and OSVERSION), but that would currently require updating the default_settings function for all existing scripts
+ if [ -z "$var_os" ]; then
+ var_os="debian"
+ fi
+ if [ -z "$var_version" ]; then
+ var_version="12"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# echo_default()
+#
+# - Prints summary of default values (ID, OS, type, disk, RAM, CPU, etc.)
+# - Uses icons and formatting for readability
+# - Convert CT_TYPE to description
+# ------------------------------------------------------------------------------
+echo_default() {
+ CT_TYPE_DESC="Unprivileged"
+ if [ "$CT_TYPE" -eq 0 ]; then
+ CT_TYPE_DESC="Privileged"
+ fi
+ echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}"
+ echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}${CT_ID}${CL}"
+ echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os ($var_version)${CL}"
+ echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
+ echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}"
+ echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}${CORE_COUNT}${CL}"
+ echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
+ if [ "$VERBOSE" == "yes" ]; then
+ echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}Enabled${CL}"
+ fi
+ echo -e "${CREATING}${BOLD}${BL}Creating a ${APP} LXC using the above default settings${CL}"
+ echo -e " "
+}
+
+# ------------------------------------------------------------------------------
+# exit_script()
+#
+# - Called when user cancels an action
+# - Clears screen and exits gracefully
+# ------------------------------------------------------------------------------
+exit_script() {
+ clear
+ echo -e "\n${CROSS}${RD}User exited script${CL}\n"
+ exit
+}
+
+# ------------------------------------------------------------------------------
+# find_host_ssh_keys()
+#
+# - Scans system for available SSH keys
+# - Supports defaults (~/.ssh, /etc/ssh/authorized_keys)
+# - Returns list of files containing valid SSH public keys
+# - Sets FOUND_HOST_KEY_COUNT to number of keys found
+# ------------------------------------------------------------------------------
+find_host_ssh_keys() {
+ local re='(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))'
+ local -a files=() cand=()
+ local g="${var_ssh_import_glob:-}"
+ local total=0 f base c
+
+ shopt -s nullglob
+ if [[ -n "$g" ]]; then
+ for pat in $g; do cand+=($pat); done
+ else
+ cand+=(/root/.ssh/authorized_keys /root/.ssh/authorized_keys2)
+ cand+=(/root/.ssh/*.pub)
+ cand+=(/etc/ssh/authorized_keys /etc/ssh/authorized_keys.d/*)
+ fi
+ shopt -u nullglob
+
+ for f in "${cand[@]}"; do
+ [[ -f "$f" && -r "$f" ]] || continue
+ base="$(basename -- "$f")"
+ case "$base" in
+ known_hosts | known_hosts.* | config) continue ;;
+ id_*) [[ "$f" != *.pub ]] && continue ;;
+ esac
+
+ # CRLF safe check for host keys
+ c=$(tr -d '\r' <"$f" | awk '
+ /^[[:space:]]*#/ {next}
+ /^[[:space:]]*$/ {next}
+ {print}
+ ' | grep -E -c '"$re"' || true)
+
+ if ((c > 0)); then
+ files+=("$f")
+ total=$((total + c))
+ fi
+ done
+
+ # Fallback to /root/.ssh/authorized_keys
+ if ((${#files[@]} == 0)) && [[ -r /root/.ssh/authorized_keys ]]; then
+ if grep -E -q "$re" /root/.ssh/authorized_keys; then
+ files+=(/root/.ssh/authorized_keys)
+ total=$((total + $(grep -E -c "$re" /root/.ssh/authorized_keys || echo 0)))
+ fi
+ fi
+
+ FOUND_HOST_KEY_COUNT="$total"
+ (
+ IFS=:
+ echo "${files[*]}"
+ )
+}
+
+# ------------------------------------------------------------------------------
+# advanced_settings()
+#
+# - Interactive whiptail menu for advanced configuration
+# - Lets user set container type, password, CT ID, hostname, disk, CPU, RAM
+# - Supports IPv4/IPv6, DNS, MAC, VLAN, tags, SSH keys, FUSE, verbose mode
+# - Ends with confirmation or re-entry if cancelled
+# ------------------------------------------------------------------------------
+advanced_settings() {
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox --title "Here is an instructional tip:" "To make a selection, use the Spacebar." 8 58
+ # Setting Default Tag for Advanced Settings
+ TAGS="community-script;${var_tags:-}"
+ CT_DEFAULT_TYPE="${CT_TYPE}"
+ CT_TYPE=""
+ while [ -z "$CT_TYPE" ]; do
+ if [ "$CT_DEFAULT_TYPE" == "1" ]; then
+ if CT_TYPE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "CONTAINER TYPE" --radiolist "Choose Type" 10 58 2 \
+ "1" "Unprivileged" ON \
+ "0" "Privileged" OFF \
+ 3>&1 1>&2 2>&3); then
+ if [ -n "$CT_TYPE" ]; then
+ CT_TYPE_DESC="Unprivileged"
+ if [ "$CT_TYPE" -eq 0 ]; then
+ CT_TYPE_DESC="Privileged"
+ fi
+ echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}"
+ echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os |${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}"
+ echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
+ fi
+ else
+ exit_script
+ fi
+ fi
+ if [ "$CT_DEFAULT_TYPE" == "0" ]; then
+ if CT_TYPE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "CONTAINER TYPE" --radiolist "Choose Type" 10 58 2 \
+ "1" "Unprivileged" OFF \
+ "0" "Privileged" ON \
+ 3>&1 1>&2 2>&3); then
+ if [ -n "$CT_TYPE" ]; then
+ CT_TYPE_DESC="Unprivileged"
+ if [ "$CT_TYPE" -eq 0 ]; then
+ CT_TYPE_DESC="Privileged"
+ fi
+ echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}"
+ echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os${CL}"
+ echo -e "${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}"
+ echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
+ fi
+ else
+ exit_script
+ fi
+ fi
+ done
+
+ while true; do
+ if PW1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --passwordbox "\nSet Root Password (needed for root ssh access)" 9 58 --title "PASSWORD (leave blank for automatic login)" 3>&1 1>&2 2>&3); then
+ # Empty = Autologin
+ if [[ -z "$PW1" ]]; then
+ PW=""
+ PW1="Automatic Login"
+ echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}$PW1${CL}"
+ break
+ fi
+
+ # Invalid: contains spaces
+ if [[ "$PW1" == *" "* ]]; then
+ whiptail --msgbox "Password cannot contain spaces." 8 58
+ continue
+ fi
+
+ # Invalid: too short
+ if ((${#PW1} < 5)); then
+ whiptail --msgbox "Password must be at least 5 characters." 8 58
+ continue
+ fi
+
+ # Confirm password
+ if PW2=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --passwordbox "\nVerify Root Password" 9 58 --title "PASSWORD VERIFICATION" 3>&1 1>&2 2>&3); then
+ if [[ "$PW1" == "$PW2" ]]; then
+ PW="-password $PW1"
+ echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}********${CL}"
+ break
+ else
+ whiptail --msgbox "Passwords do not match. Please try again." 8 58
+ fi
+ else
+ exit_script
+ fi
+ else
+ exit_script
+ fi
+ done
+
+ if CT_ID=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Container ID" 8 58 "$NEXTID" --title "CONTAINER ID" 3>&1 1>&2 2>&3); then
+ if [ -z "$CT_ID" ]; then
+ CT_ID="$NEXTID"
+ fi
+ else
+ exit_script
+ fi
+ echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
+
+ while true; do
+ if CT_NAME=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set Hostname" 8 58 "$NSAPP" --title "HOSTNAME" 3>&1 1>&2 2>&3); then
+ if [ -z "$CT_NAME" ]; then
+ HN="$NSAPP"
+ else
+ HN=$(echo "${CT_NAME,,}" | tr -d ' ')
+ fi
+ # Hostname validate (RFC 1123)
+ if [[ "$HN" =~ ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ ]]; then
+ echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}"
+ break
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --msgbox "❌ Invalid hostname: '$HN'\n\nOnly lowercase letters, digits and hyphens (-) are allowed.\nUnderscores (_) or other characters are not permitted!" 10 70
+ fi
+ else
+ exit_script
+ fi
+ done
+
+ while true; do
+ DISK_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Disk Size in GB" 8 58 "$var_disk" --title "DISK SIZE" 3>&1 1>&2 2>&3) || exit_script
+
+ if [ -z "$DISK_SIZE" ]; then
+ DISK_SIZE="$var_disk"
+ fi
+
+ if [[ "$DISK_SIZE" =~ ^[1-9][0-9]*$ ]]; then
+ echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}"
+ break
+ else
+ whiptail --msgbox "Disk size must be a positive integer!" 8 58
+ fi
+ done
+
+ while true; do
+ CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
+ --inputbox "Allocate CPU Cores" 8 58 "$var_cpu" --title "CORE COUNT" 3>&1 1>&2 2>&3) || exit_script
+
+ if [ -z "$CORE_COUNT" ]; then
+ CORE_COUNT="$var_cpu"
+ fi
+
+ if [[ "$CORE_COUNT" =~ ^[1-9][0-9]*$ ]]; then
+ echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}"
+ break
+ else
+ whiptail --msgbox "CPU core count must be a positive integer!" 8 58
+ fi
+ done
+
+ while true; do
+ RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
+ --inputbox "Allocate RAM in MiB" 8 58 "$var_ram" --title "RAM" 3>&1 1>&2 2>&3) || exit_script
+
+ if [ -z "$RAM_SIZE" ]; then
+ RAM_SIZE="$var_ram"
+ fi
+
+ if [[ "$RAM_SIZE" =~ ^[1-9][0-9]*$ ]]; then
+ echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
+ break
+ else
+ whiptail --msgbox "RAM size must be a positive integer!" 8 58
+ fi
+ done
+
+ IFACE_FILEPATH_LIST="/etc/network/interfaces"$'\n'$(find "/etc/network/interfaces.d/" -type f)
+ BRIDGES=""
+ OLD_IFS=$IFS
+ IFS=$'\n'
+ for iface_filepath in ${IFACE_FILEPATH_LIST}; do
+
+ iface_indexes_tmpfile=$(mktemp -q -u '.iface-XXXX')
+ (grep -Pn '^\s*iface' "${iface_filepath}" | cut -d':' -f1 && wc -l "${iface_filepath}" | cut -d' ' -f1) | awk 'FNR==1 {line=$0; next} {print line":"$0-1; line=$0}' >"${iface_indexes_tmpfile}" || true
+
+ if [ -f "${iface_indexes_tmpfile}" ]; then
+
+ while read -r pair; do
+ start=$(echo "${pair}" | cut -d':' -f1)
+ end=$(echo "${pair}" | cut -d':' -f2)
+
+ if awk "NR >= ${start} && NR <= ${end}" "${iface_filepath}" | grep -qP '^\s*(bridge[-_](ports|stp|fd|vlan-aware|vids)|ovs_type\s+OVSBridge)\b'; then
+ iface_name=$(sed "${start}q;d" "${iface_filepath}" | awk '{print $2}')
+ BRIDGES="${iface_name}"$'\n'"${BRIDGES}"
+ fi
+
+ done <"${iface_indexes_tmpfile}"
+ rm -f "${iface_indexes_tmpfile}"
+ fi
+
+ done
+ IFS=$OLD_IFS
+ BRIDGES=$(echo "$BRIDGES" | grep -v '^\s*$' | sort | uniq)
+ if [[ -z "$BRIDGES" ]]; then
+ BRG="vmbr0"
+ echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}"
+ else
+ # Build bridge menu with descriptions
+ BRIDGE_MENU_OPTIONS=()
+ while IFS= read -r bridge; do
+ if [[ -n "$bridge" ]]; then
+ # Get description from Proxmox built-in method - find comment for this specific bridge
+ description=$(grep -A 10 "iface $bridge" /etc/network/interfaces | grep '^#' | head -n1 | sed 's/^#\s*//')
+ if [[ -n "$description" ]]; then
+ BRIDGE_MENU_OPTIONS+=("$bridge" "${description}")
+ else
+ BRIDGE_MENU_OPTIONS+=("$bridge" " ")
+ fi
+ fi
+ done <<<"$BRIDGES"
+
+ BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --menu "Select network bridge: " 18 55 6 "${BRIDGE_MENU_OPTIONS[@]}" 3>&1 1>&2 2>&3)
+ if [[ -z "$BRG" ]]; then
+ exit_script
+ else
+ echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}"
+ fi
+ fi
+
+ # IPv4 methods: dhcp, static, none
+ while true; do
+ IPV4_METHOD=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "IPv4 Address Management" \
+ --menu "Select IPv4 Address Assignment Method:" 12 60 2 \
+ "dhcp" "Automatic (DHCP, recommended)" \
+ "static" "Static (manual entry)" \
+ 3>&1 1>&2 2>&3)
+
+ exit_status=$?
+ if [ $exit_status -ne 0 ]; then
+ exit_script
+ fi
+
+ case "$IPV4_METHOD" in
+ dhcp)
+ NET="dhcp"
+ GATE=""
+ echo -e "${NETWORK}${BOLD}${DGN}IPv4: DHCP${CL}"
+ break
+ ;;
+ static)
+ # Static: call and validate CIDR address
+ while true; do
+ NET=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --inputbox "Enter Static IPv4 CIDR Address (e.g. 192.168.100.50/24)" 8 58 "" \
+ --title "IPv4 ADDRESS" 3>&1 1>&2 2>&3)
+ if [ -z "$NET" ]; then
+ whiptail --msgbox "IPv4 address must not be empty." 8 58
+ continue
+ elif [[ "$NET" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$ ]]; then
+ echo -e "${NETWORK}${BOLD}${DGN}IPv4 Address: ${BGN}$NET${CL}"
+ break
+ else
+ whiptail --msgbox "$NET is not a valid IPv4 CIDR address. Please enter a correct value!" 8 58
+ fi
+ done
+
+ # call and validate Gateway
+ while true; do
+ GATE1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --inputbox "Enter Gateway IP address for static IPv4" 8 58 "" \
+ --title "Gateway IP" 3>&1 1>&2 2>&3)
+ if [ -z "$GATE1" ]; then
+ whiptail --msgbox "Gateway IP address cannot be empty." 8 58
+ elif [[ ! "$GATE1" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
+ whiptail --msgbox "Invalid Gateway IP address format." 8 58
+ else
+ GATE=",gw=$GATE1"
+ echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE1${CL}"
+ break
+ fi
+ done
+ break
+ ;;
+ esac
+ done
+
+ # IPv6 Address Management selection
+ while true; do
+ IPV6_METHOD=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --menu \
+ "Select IPv6 Address Management Type:" 15 58 4 \
+ "auto" "SLAAC/AUTO (recommended, default)" \
+ "dhcp" "DHCPv6" \
+ "static" "Static (manual entry)" \
+ "none" "Disabled" \
+ --default-item "auto" 3>&1 1>&2 2>&3)
+ [ $? -ne 0 ] && exit_script
+
+ case "$IPV6_METHOD" in
+ auto)
+ echo -e "${NETWORK}${BOLD}${DGN}IPv6: ${BGN}SLAAC/AUTO${CL}"
+ IPV6_ADDR=""
+ IPV6_GATE=""
+ break
+ ;;
+ dhcp)
+ echo -e "${NETWORK}${BOLD}${DGN}IPv6: ${BGN}DHCPv6${CL}"
+ IPV6_ADDR="dhcp"
+ IPV6_GATE=""
+ break
+ ;;
+ static)
+ # Ask for static IPv6 address (CIDR notation, e.g., 2001:db8::1234/64)
+ while true; do
+ IPV6_ADDR=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox \
+ "Set a static IPv6 CIDR address (e.g., 2001:db8::1234/64)" 8 58 "" \
+ --title "IPv6 STATIC ADDRESS" 3>&1 1>&2 2>&3) || exit_script
+ if [[ "$IPV6_ADDR" =~ ^([0-9a-fA-F:]+:+)+[0-9a-fA-F]+(/[0-9]{1,3})$ ]]; then
+ echo -e "${NETWORK}${BOLD}${DGN}IPv6 Address: ${BGN}$IPV6_ADDR${CL}"
+ break
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox \
+ "$IPV6_ADDR is an invalid IPv6 CIDR address. Please enter a valid IPv6 CIDR address (e.g., 2001:db8::1234/64)" 8 58
+ fi
+ done
+ # Optional: ask for IPv6 gateway for static config
+ while true; do
+ IPV6_GATE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox \
+ "Enter IPv6 gateway address (optional, leave blank for none)" 8 58 "" --title "IPv6 GATEWAY" 3>&1 1>&2 2>&3)
+ if [ -z "$IPV6_GATE" ]; then
+ IPV6_GATE=""
+ break
+ elif [[ "$IPV6_GATE" =~ ^([0-9a-fA-F:]+:+)+[0-9a-fA-F]+$ ]]; then
+ break
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox \
+ "Invalid IPv6 gateway format." 8 58
+ fi
+ done
+ break
+ ;;
+ none)
+ echo -e "${NETWORK}${BOLD}${DGN}IPv6: ${BGN}Disabled${CL}"
+ IPV6_ADDR="none"
+ IPV6_GATE=""
+ break
+ ;;
+ *)
+ exit_script
+ ;;
+ esac
+ done
+
+ if [ "$var_os" == "alpine" ]; then
+ APT_CACHER=""
+ APT_CACHER_IP=""
+ else
+ if APT_CACHER_IP=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set APT-Cacher IP (leave blank for none)" 8 58 --title "APT-Cacher IP" 3>&1 1>&2 2>&3); then
+ APT_CACHER="${APT_CACHER_IP:+yes}"
+ echo -e "${NETWORK}${BOLD}${DGN}APT-Cacher IP Address: ${BGN}${APT_CACHER_IP:-Default}${CL}"
+ else
+ exit_script
+ fi
+ fi
+
+ # if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "IPv6" --yesno "Disable IPv6?" 10 58); then
+ # DISABLEIP6="yes"
+ # else
+ # DISABLEIP6="no"
+ # fi
+ # echo -e "${DISABLEIPV6}${BOLD}${DGN}Disable IPv6: ${BGN}$DISABLEIP6${CL}"
+
+ if MTU1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default [The MTU of your selected vmbr, default is 1500])" 8 58 --title "MTU SIZE" 3>&1 1>&2 2>&3); then
+ if [ -z "$MTU1" ]; then
+ MTU1="Default"
+ MTU=""
+ else
+ MTU=",mtu=$MTU1"
+ fi
+ echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}"
+ else
+ exit_script
+ fi
+
+ if SD=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a DNS Search Domain (leave blank for HOST)" 8 58 --title "DNS Search Domain" 3>&1 1>&2 2>&3); then
+ if [ -z "$SD" ]; then
+ SX=Host
+ SD=""
+ else
+ SX=$SD
+ SD="-searchdomain=$SD"
+ fi
+ echo -e "${SEARCH}${BOLD}${DGN}DNS Search Domain: ${BGN}$SX${CL}"
+ else
+ exit_script
+ fi
+
+ if NX=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a DNS Server IP (leave blank for HOST)" 8 58 --title "DNS SERVER IP" 3>&1 1>&2 2>&3); then
+ if [ -z "$NX" ]; then
+ NX=Host
+ NS=""
+ else
+ NS="-nameserver=$NX"
+ fi
+ echo -e "${NETWORK}${BOLD}${DGN}DNS Server IP Address: ${BGN}$NX${CL}"
+ else
+ exit_script
+ fi
+
+ if [ "$var_os" == "alpine" ] && [ "$NET" == "dhcp" ] && [ "$NX" != "Host" ]; then
+ UDHCPC_FIX="yes"
+ else
+ UDHCPC_FIX="no"
+ fi
+ export UDHCPC_FIX
+
+ if MAC1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a MAC Address(leave blank for generated MAC)" 8 58 --title "MAC ADDRESS" 3>&1 1>&2 2>&3); then
+ if [ -z "$MAC1" ]; then
+ MAC1="Default"
+ MAC=""
+ else
+ MAC=",hwaddr=$MAC1"
+ echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}"
+ fi
+ else
+ exit_script
+ fi
+
+ if VLAN1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for no VLAN)" 8 58 --title "VLAN" 3>&1 1>&2 2>&3); then
+ if [ -z "$VLAN1" ]; then
+ VLAN1="Default"
+ VLAN=""
+ else
+ VLAN=",tag=$VLAN1"
+ fi
+ echo -e "${VLANTAG}${BOLD}${DGN}Vlan: ${BGN}$VLAN1${CL}"
+ else
+ exit_script
+ fi
+
+ if ADV_TAGS=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set Custom Tags?[If you remove all, there will be no tags!]" 8 58 "${TAGS}" --title "Advanced Tags" 3>&1 1>&2 2>&3); then
+ if [ -n "${ADV_TAGS}" ]; then
+ ADV_TAGS=$(echo "$ADV_TAGS" | tr -d '[:space:]')
+ TAGS="${ADV_TAGS}"
+ else
+ TAGS=";"
+ fi
+ echo -e "${NETWORK}${BOLD}${DGN}Tags: ${BGN}$TAGS${CL}"
+ else
+ exit_script
+ fi
+
+ configure_ssh_settings
+ export SSH_KEYS_FILE
+ echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
+ if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "FUSE Support" --yesno "Enable FUSE support?\nRequired for tools like rclone, mergerfs, AppImage, etc." 10 58); then
+ ENABLE_FUSE="yes"
+ else
+ ENABLE_FUSE="no"
+ fi
+ echo -e "${FUSE}${BOLD}${DGN}Enable FUSE Support: ${BGN}$ENABLE_FUSE${CL}"
+
+ if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "VERBOSE MODE" --yesno "Enable Verbose Mode?" 10 58); then
+ VERBOSE="yes"
+ else
+ VERBOSE="no"
+ fi
+ echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}$VERBOSE${CL}"
+
+ if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "ADVANCED SETTINGS COMPLETE" --yesno "Ready to create ${APP} LXC?" 10 58); then
+ echo -e "${CREATING}${BOLD}${RD}Creating a ${APP} LXC using the above advanced settings${CL}"
+ else
+ clear
+ header_info
+ echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}"
+ echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Settings on node $PVEHOST_NAME${CL}"
+ advanced_settings
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# diagnostics_check()
+#
+# - Ensures diagnostics config file exists at /usr/local/community-scripts/diagnostics
+# - Asks user whether to send anonymous diagnostic data
+# - Saves DIAGNOSTICS=yes/no in the config file
+# ------------------------------------------------------------------------------
+diagnostics_check() {
+ if ! [ -d "/usr/local/community-scripts" ]; then
+ mkdir -p /usr/local/community-scripts
+ fi
+
+ if ! [ -f "/usr/local/community-scripts/diagnostics" ]; then
+ if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "DIAGNOSTICS" --yesno "Send Diagnostics of LXC Installation?\n\n(This only transmits data without user data, just RAM, CPU, LXC name, ...)" 10 58); then
+ cat </usr/local/community-scripts/diagnostics
+DIAGNOSTICS=yes
+
+#This file is used to store the diagnostics settings for the Community-Scripts API.
+#https://github.com/community-scripts/ProxmoxVED/discussions/1836
+#Your diagnostics will be sent to the Community-Scripts API for troubleshooting/statistical purposes.
+#You can review the data at https://community-scripts.github.io/ProxmoxVE/data
+#If you do not wish to send diagnostics, please set the variable 'DIAGNOSTICS' to "no" in /usr/local/community-scripts/diagnostics, or use the menue.
+#This will disable the diagnostics feature.
+#To send diagnostics, set the variable 'DIAGNOSTICS' to "yes" in /usr/local/community-scripts/diagnostics, or use the menue.
+#This will enable the diagnostics feature.
+#The following information will be sent:
+#"disk_size"
+#"core_count"
+#"ram_size"
+#"os_type"
+#"os_version"
+#"nsapp"
+#"method"
+#"pve_version"
+#"status"
+#If you have any concerns, please review the source code at /misc/build.func
+EOF
+ DIAGNOSTICS="yes"
+ else
+ cat </usr/local/community-scripts/diagnostics
+DIAGNOSTICS=no
+
+#This file is used to store the diagnostics settings for the Community-Scripts API.
+#https://github.com/community-scripts/ProxmoxVED/discussions/1836
+#Your diagnostics will be sent to the Community-Scripts API for troubleshooting/statistical purposes.
+#You can review the data at https://community-scripts.github.io/ProxmoxVE/data
+#If you do not wish to send diagnostics, please set the variable 'DIAGNOSTICS' to "no" in /usr/local/community-scripts/diagnostics, or use the menue.
+#This will disable the diagnostics feature.
+#To send diagnostics, set the variable 'DIAGNOSTICS' to "yes" in /usr/local/community-scripts/diagnostics, or use the menue.
+#This will enable the diagnostics feature.
+#The following information will be sent:
+#"disk_size"
+#"core_count"
+#"ram_size"
+#"os_type"
+#"os_version"
+#"nsapp"
+#"method"
+#"pve_version"
+#"status"
+#If you have any concerns, please review the source code at /misc/build.func
+EOF
+ DIAGNOSTICS="no"
+ fi
+ else
+ DIAGNOSTICS=$(awk -F '=' '/^DIAGNOSTICS/ {print $2}' /usr/local/community-scripts/diagnostics)
+
+ fi
+
+}
+
+# ------------------------------------------------------------------------------
+# default_var_settings
+#
+# - Ensures /usr/local/community-scripts/default.vars exists (creates if missing)
+# - Loads var_* values from default.vars (safe parser, no source/eval)
+# - Precedence: ENV var_* > default.vars > built-in defaults
+# - Maps var_verbose → VERBOSE
+# - Calls base_settings "$VERBOSE" and echo_default
+# ------------------------------------------------------------------------------
+default_var_settings() {
+ # Allowed var_* keys (alphabetically sorted)
+ local VAR_WHITELIST=(
+ var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_ctid var_disk var_fuse
+ var_gateway var_hostname var_ipv6_method var_ipv6_static var_mac var_mtu
+ var_net var_ns var_pw var_ram var_tags var_tun var_unprivileged
+ var_verbose var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage
+ )
+
+ # Snapshot: environment variables (highest precedence)
+ declare -A _HARD_ENV=()
+ local _k
+ for _k in "${VAR_WHITELIST[@]}"; do
+ if printenv "$_k" >/dev/null 2>&1; then _HARD_ENV["$_k"]=1; fi
+ done
+
+ # Find default.vars location
+ local _find_default_vars
+ _find_default_vars() {
+ local f
+ for f in \
+ /usr/local/community-scripts/default.vars \
+ "$HOME/.config/community-scripts/default.vars" \
+ "./default.vars"; do
+ [ -f "$f" ] && {
+ echo "$f"
+ return 0
+ }
+ done
+ return 1
+ }
+ # Allow override of storages via env (for non-interactive use cases)
+ [ -n "${var_template_storage:-}" ] && TEMPLATE_STORAGE="$var_template_storage"
+ [ -n "${var_container_storage:-}" ] && CONTAINER_STORAGE="$var_container_storage"
+
+ # Create once, with storages already selected, no var_ctid/var_hostname lines
+ local _ensure_default_vars
+ _ensure_default_vars() {
+ _find_default_vars >/dev/null 2>&1 && return 0
+
+ local canonical="/usr/local/community-scripts/default.vars"
+ msg_info "No default.vars found. Creating ${canonical}"
+ mkdir -p /usr/local/community-scripts
+
+ # Pick storages before writing the file (always ask unless only one)
+ # Create a minimal temp file to write into
+ : >"$canonical"
+
+ # Base content (no var_ctid / var_hostname here)
+ cat >"$canonical" <<'EOF'
+# Community-Scripts defaults (var_* only). Lines starting with # are comments.
+# Precedence: ENV var_* > default.vars > built-ins.
+# Keep keys alphabetically sorted.
+
+# Container type
+var_unprivileged=1
+
+# Resources
+var_cpu=1
+var_disk=4
+var_ram=1024
+
+# Network
+var_brg=vmbr0
+var_net=dhcp
+var_ipv6_method=none
+# var_gateway=
+# var_ipv6_static=
+# var_vlan=
+# var_mtu=
+# var_mac=
+# var_ns=
+
+# SSH
+var_ssh=no
+# var_ssh_authorized_key=
+
+# APT cacher (optional)
+# var_apt_cacher=yes
+# var_apt_cacher_ip=192.168.1.10
+
+# Features/Tags/verbosity
+var_fuse=no
+var_tun=no
+var_tags=community-script
+var_verbose=no
+
+# Security (root PW) – empty => autologin
+# var_pw=
+EOF
+
+ # Now choose storages (always prompt unless just one exists)
+ choose_and_set_storage_for_file "$canonical" template
+ choose_and_set_storage_for_file "$canonical" container
+
+ chmod 0644 "$canonical"
+ msg_ok "Created ${canonical}"
+ }
+
+ # Whitelist check
+ local _is_whitelisted_key
+ _is_whitelisted_key() {
+ local k="$1"
+ local w
+ for w in "${VAR_WHITELIST[@]}"; do [ "$k" = "$w" ] && return 0; done
+ return 1
+ }
+
+ # Safe parser for KEY=VALUE lines
+ local _load_vars_file
+ _load_vars_file() {
+ local file="$1"
+ [ -f "$file" ] || return 0
+ msg_info "Loading defaults from ${file}"
+ local line key val
+ while IFS= read -r line || [ -n "$line" ]; do
+ line="${line#"${line%%[![:space:]]*}"}"
+ line="${line%"${line##*[![:space:]]}"}"
+ [[ -z "$line" || "$line" == \#* ]] && continue
+ if [[ "$line" =~ ^([A-Za-z_][A-Za-z0-9_]*)=(.*)$ ]]; then
+ local var_key="${BASH_REMATCH[1]}"
+ local var_val="${BASH_REMATCH[2]}"
+
+ [[ "$var_key" != var_* ]] && continue
+ _is_whitelisted_key "$var_key" || {
+ msg_debug "Ignore non-whitelisted ${var_key}"
+ continue
+ }
+
+ # Strip quotes
+ if [[ "$var_val" =~ ^\"(.*)\"$ ]]; then
+ var_val="${BASH_REMATCH[1]}"
+ elif [[ "$var_val" =~ ^\'(.*)\'$ ]]; then
+ var_val="${BASH_REMATCH[1]}"
+ fi
+
+ # Unsafe characters
+ case $var_val in
+ \"*\")
+ var_val=${var_val#\"}
+ var_val=${var_val%\"}
+ ;;
+ \'*\')
+ var_val=${var_val#\'}
+ var_val=${var_val%\'}
+ ;;
+ esac # Hard env wins
+ [[ -n "${_HARD_ENV[$var_key]:-}" ]] && continue
+ # Set only if not already exported
+ [[ -z "${!var_key+x}" ]] && export "${var_key}=${var_val}"
+ else
+ msg_warn "Malformed line in ${file}: ${line}"
+ fi
+ done <"$file"
+ msg_ok "Loaded ${file}"
+ }
+
+ # 1) Ensure file exists
+ _ensure_default_vars
+
+ # 2) Load file
+ local dv
+ dv="$(_find_default_vars)" || {
+ msg_error "default.vars not found after ensure step"
+ return 1
+ }
+ _load_vars_file "$dv"
+
+ # 3) Map var_verbose → VERBOSE
+ if [[ -n "${var_verbose:-}" ]]; then
+ case "${var_verbose,,}" in 1 | yes | true | on) VERBOSE="yes" ;; 0 | no | false | off) VERBOSE="no" ;; *) VERBOSE="${var_verbose}" ;; esac
+ else
+ VERBOSE="no"
+ fi
+
+ # 4) Apply base settings and show summary
+ METHOD="mydefaults-global"
+ base_settings "$VERBOSE"
+ header_info
+ echo -e "${DEFAULT}${BOLD}${BL}Using My Defaults (default.vars) on node $PVEHOST_NAME${CL}"
+ echo_default
+}
+
+# ------------------------------------------------------------------------------
+# get_app_defaults_path()
+#
+# - Returns full path for app-specific defaults file
+# - Example: /usr/local/community-scripts/defaults/.vars
+# ------------------------------------------------------------------------------
+
+get_app_defaults_path() {
+ local n="${NSAPP:-${APP,,}}"
+ echo "/usr/local/community-scripts/defaults/${n}.vars"
+}
+
+# ------------------------------------------------------------------------------
+# maybe_offer_save_app_defaults
+#
+# - Called after advanced_settings returned with fully chosen values.
+# - If no .vars exists, offers to persist current advanced settings
+# into /usr/local/community-scripts/defaults/.vars
+# - Only writes whitelisted var_* keys.
+# - Extracts raw values from flags like ",gw=..." ",mtu=..." etc.
+# ------------------------------------------------------------------------------
+if ! declare -p VAR_WHITELIST >/dev/null 2>&1; then
+ declare -ag VAR_WHITELIST=(
+ var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_ctid var_disk var_fuse
+ var_gateway var_hostname var_ipv6_method var_ipv6_static var_mac var_mtu
+ var_net var_ns var_pw var_ram var_tags var_tun var_unprivileged
+ var_verbose var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage
+ )
+fi
+
+# Note: _is_whitelisted_key() is defined above in default_var_settings section
+
+_sanitize_value() {
+ # Disallow Command-Substitution / Shell-Meta
+ case "$1" in
+ *'$('* | *'`'* | *';'* | *'&'* | *'<('*)
+ echo ""
+ return 0
+ ;;
+ esac
+ echo "$1"
+}
+
+# Map-Parser: read var_* from file into _VARS_IN associative array
+# Note: Main _load_vars_file() with full validation is defined in default_var_settings section
+# This simplified version is used specifically for diff operations via _VARS_IN array
+declare -A _VARS_IN
+_load_vars_file_to_map() {
+ local file="$1"
+ [ -f "$file" ] || return 0
+ _VARS_IN=() # Clear array
+ local line key val
+ while IFS= read -r line || [ -n "$line" ]; do
+ line="${line#"${line%%[![:space:]]*}"}"
+ line="${line%"${line##*[![:space:]]}"}"
+ [ -z "$line" ] && continue
+ case "$line" in
+ \#*) continue ;;
+ esac
+ key=$(printf "%s" "$line" | cut -d= -f1)
+ val=$(printf "%s" "$line" | cut -d= -f2-)
+ case "$key" in
+ var_*)
+ if _is_whitelisted_key "$key"; then
+ _VARS_IN["$key"]="$val"
+ fi
+ ;;
+ esac
+ done <"$file"
+}
+
+# Diff function for two var_* files -> produces human-readable diff list for $1 (old) vs $2 (new)
+_build_vars_diff() {
+ local oldf="$1" newf="$2"
+ local k
+ local -A OLD=() NEW=()
+ _load_vars_file_to_map "$oldf"
+ for k in "${!_VARS_IN[@]}"; do OLD["$k"]="${_VARS_IN[$k]}"; done
+ _load_vars_file_to_map "$newf"
+ for k in "${!_VARS_IN[@]}"; do NEW["$k"]="${_VARS_IN[$k]}"; done
+
+ local out
+ out+="# Diff for ${APP} (${NSAPP})\n"
+ out+="# Old: ${oldf}\n# New: ${newf}\n\n"
+
+ local found_change=0
+
+ # Changed & Removed
+ for k in "${!OLD[@]}"; do
+ if [[ -v NEW["$k"] ]]; then
+ if [[ "${OLD[$k]}" != "${NEW[$k]}" ]]; then
+ out+="~ ${k}\n - old: ${OLD[$k]}\n + new: ${NEW[$k]}\n"
+ found_change=1
+ fi
+ else
+ out+="- ${k}\n - old: ${OLD[$k]}\n"
+ found_change=1
+ fi
+ done
+
+ # Added
+ for k in "${!NEW[@]}"; do
+ if [[ ! -v OLD["$k"] ]]; then
+ out+="+ ${k}\n + new: ${NEW[$k]}\n"
+ found_change=1
+ fi
+ done
+
+ if [[ $found_change -eq 0 ]]; then
+ out+="(No differences)\n"
+ fi
+
+ printf "%b" "$out"
+}
+
+# Build a temporary .vars file from current advanced settings
+_build_current_app_vars_tmp() {
+ tmpf="$(mktemp /tmp/${NSAPP:-app}.vars.new.XXXXXX)"
+
+ # NET/GW
+ _net="${NET:-}"
+ _gate=""
+ case "${GATE:-}" in
+ ,gw=*) _gate=$(echo "$GATE" | sed 's/^,gw=//') ;;
+ esac
+
+ # IPv6
+ _ipv6_method="${IPV6_METHOD:-auto}"
+ _ipv6_static=""
+ _ipv6_gateway=""
+ if [ "$_ipv6_method" = "static" ]; then
+ _ipv6_static="${IPV6_ADDR:-}"
+ _ipv6_gateway="${IPV6_GATE:-}"
+ fi
+
+ # MTU/VLAN/MAC
+ _mtu=""
+ _vlan=""
+ _mac=""
+ case "${MTU:-}" in
+ ,mtu=*) _mtu=$(echo "$MTU" | sed 's/^,mtu=//') ;;
+ esac
+ case "${VLAN:-}" in
+ ,tag=*) _vlan=$(echo "$VLAN" | sed 's/^,tag=//') ;;
+ esac
+ case "${MAC:-}" in
+ ,hwaddr=*) _mac=$(echo "$MAC" | sed 's/^,hwaddr=//') ;;
+ esac
+
+ # DNS / Searchdomain
+ _ns=""
+ _searchdomain=""
+ case "${NS:-}" in
+ -nameserver=*) _ns=$(echo "$NS" | sed 's/^-nameserver=//') ;;
+ esac
+ case "${SD:-}" in
+ -searchdomain=*) _searchdomain=$(echo "$SD" | sed 's/^-searchdomain=//') ;;
+ esac
+
+ # SSH / APT / Features
+ _ssh="${SSH:-no}"
+ _ssh_auth="${SSH_AUTHORIZED_KEY:-}"
+ _apt_cacher="${APT_CACHER:-}"
+ _apt_cacher_ip="${APT_CACHER_IP:-}"
+ _fuse="${ENABLE_FUSE:-no}"
+ _tun="${ENABLE_TUN:-no}"
+ _tags="${TAGS:-}"
+ _verbose="${VERBOSE:-no}"
+
+ # Type / Resources / Identity
+ _unpriv="${CT_TYPE:-1}"
+ _cpu="${CORE_COUNT:-1}"
+ _ram="${RAM_SIZE:-1024}"
+ _disk="${DISK_SIZE:-4}"
+ _hostname="${HN:-$NSAPP}"
+
+ # Storage
+ _tpl_storage="${TEMPLATE_STORAGE:-${var_template_storage:-}}"
+ _ct_storage="${CONTAINER_STORAGE:-${var_container_storage:-}}"
+
+ {
+ echo "# App-specific defaults for ${APP} (${NSAPP})"
+ echo "# Generated on $(date -u '+%Y-%m-%dT%H:%M:%SZ')"
+ echo
+
+ echo "var_unprivileged=$(_sanitize_value "$_unpriv")"
+ echo "var_cpu=$(_sanitize_value "$_cpu")"
+ echo "var_ram=$(_sanitize_value "$_ram")"
+ echo "var_disk=$(_sanitize_value "$_disk")"
+
+ [ -n "${BRG:-}" ] && echo "var_brg=$(_sanitize_value "$BRG")"
+ [ -n "$_net" ] && echo "var_net=$(_sanitize_value "$_net")"
+ [ -n "$_gate" ] && echo "var_gateway=$(_sanitize_value "$_gate")"
+ [ -n "$_mtu" ] && echo "var_mtu=$(_sanitize_value "$_mtu")"
+ [ -n "$_vlan" ] && echo "var_vlan=$(_sanitize_value "$_vlan")"
+ [ -n "$_mac" ] && echo "var_mac=$(_sanitize_value "$_mac")"
+ [ -n "$_ns" ] && echo "var_ns=$(_sanitize_value "$_ns")"
+
+ [ -n "$_ipv6_method" ] && echo "var_ipv6_method=$(_sanitize_value "$_ipv6_method")"
+ [ -n "$_ipv6_static" ] && echo "var_ipv6_static=$(_sanitize_value "$_ipv6_static")"
+
+ [ -n "$_ssh" ] && echo "var_ssh=$(_sanitize_value "$_ssh")"
+ [ -n "$_ssh_auth" ] && echo "var_ssh_authorized_key=$(_sanitize_value "$_ssh_auth")"
+
+ [ -n "$_apt_cacher" ] && echo "var_apt_cacher=$(_sanitize_value "$_apt_cacher")"
+ [ -n "$_apt_cacher_ip" ] && echo "var_apt_cacher_ip=$(_sanitize_value "$_apt_cacher_ip")"
+
+ [ -n "$_fuse" ] && echo "var_fuse=$(_sanitize_value "$_fuse")"
+ [ -n "$_tun" ] && echo "var_tun=$(_sanitize_value "$_tun")"
+ [ -n "$_tags" ] && echo "var_tags=$(_sanitize_value "$_tags")"
+ [ -n "$_verbose" ] && echo "var_verbose=$(_sanitize_value "$_verbose")"
+
+ [ -n "$_hostname" ] && echo "var_hostname=$(_sanitize_value "$_hostname")"
+ [ -n "$_searchdomain" ] && echo "var_searchdomain=$(_sanitize_value "$_searchdomain")"
+
+ [ -n "$_tpl_storage" ] && echo "var_template_storage=$(_sanitize_value "$_tpl_storage")"
+ [ -n "$_ct_storage" ] && echo "var_container_storage=$(_sanitize_value "$_ct_storage")"
+ } >"$tmpf"
+
+ echo "$tmpf"
+}
+
+# ------------------------------------------------------------------------------
+# maybe_offer_save_app_defaults()
+#
+# - Called after advanced_settings()
+# - Offers to save current values as app defaults if not existing
+# - If file exists: shows diff and allows Update, Keep, View Diff, or Cancel
+# ------------------------------------------------------------------------------
+maybe_offer_save_app_defaults() {
+ local app_vars_path
+ app_vars_path="$(get_app_defaults_path)"
+
+ # always build from current settings
+ local new_tmp diff_tmp
+ new_tmp="$(_build_current_app_vars_tmp)"
+ diff_tmp="$(mktemp -p /tmp "${NSAPP:-app}.vars.diff.XXXXXX")"
+
+ # 1) if no file → offer to create
+ if [[ ! -f "$app_vars_path" ]]; then
+ if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --yesno "Save these advanced settings as defaults for ${APP}?\n\nThis will create:\n${app_vars_path}" 12 72; then
+ mkdir -p "$(dirname "$app_vars_path")"
+ install -m 0644 "$new_tmp" "$app_vars_path"
+ msg_ok "Saved app defaults: ${app_vars_path}"
+ fi
+ rm -f "$new_tmp" "$diff_tmp"
+ return 0
+ fi
+
+ # 2) if file exists → build diff
+ _build_vars_diff "$app_vars_path" "$new_tmp" >"$diff_tmp"
+
+ # if no differences → do nothing
+ if grep -q "^(No differences)$" "$diff_tmp"; then
+ rm -f "$new_tmp" "$diff_tmp"
+ return 0
+ fi
+
+ # 3) if file exists → show menu with default selection "Update Defaults"
+ local app_vars_file
+ app_vars_file="$(basename "$app_vars_path")"
+
+ while true; do
+ local sel
+ sel="$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "APP DEFAULTS – ${APP}" \
+ --menu "Differences detected. What do you want to do?" 20 78 10 \
+ "Update Defaults" "Write new values to ${app_vars_file}" \
+ "Keep Current" "Keep existing defaults (no changes)" \
+ "View Diff" "Show a detailed diff" \
+ "Cancel" "Abort without changes" \
+ --default-item "Update Defaults" \
+ 3>&1 1>&2 2>&3)" || { sel="Cancel"; }
+
+ case "$sel" in
+ "Update Defaults")
+ install -m 0644 "$new_tmp" "$app_vars_path"
+ msg_ok "Updated app defaults: ${app_vars_path}"
+ break
+ ;;
+ "Keep Current")
+ msg_info "Keeping current app defaults: ${app_vars_path}"
+ break
+ ;;
+ "View Diff")
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "Diff – ${APP}" \
+ --scrolltext --textbox "$diff_tmp" 25 100
+ ;;
+ "Cancel" | *)
+ msg_info "Canceled. No changes to app defaults."
+ break
+ ;;
+ esac
+ done
+
+ rm -f "$new_tmp" "$diff_tmp"
+}
+
+ensure_storage_selection_for_vars_file() {
+ local vf="$1"
+
+ # Read stored values (if any)
+ local tpl ct
+ tpl=$(grep -E '^var_template_storage=' "$vf" | cut -d= -f2-)
+ ct=$(grep -E '^var_container_storage=' "$vf" | cut -d= -f2-)
+
+ if [[ -n "$tpl" && -n "$ct" ]]; then
+ TEMPLATE_STORAGE="$tpl"
+ CONTAINER_STORAGE="$ct"
+ return 0
+ fi
+
+ choose_and_set_storage_for_file "$vf" template
+ choose_and_set_storage_for_file "$vf" container
+
+ msg_ok "Storage configuration saved to $(basename "$vf")"
+}
+
+diagnostics_menu() {
+ if [ "${DIAGNOSTICS:-no}" = "yes" ]; then
+ if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "DIAGNOSTIC SETTINGS" \
+ --yesno "Send Diagnostics?\n\nCurrent: ${DIAGNOSTICS}" 10 58 \
+ --yes-button "No" --no-button "Back"; then
+ DIAGNOSTICS="no"
+ sed -i 's/^DIAGNOSTICS=.*/DIAGNOSTICS=no/' /usr/local/community-scripts/diagnostics
+ whiptail --msgbox "Diagnostics set to ${DIAGNOSTICS}." 8 58
+ fi
+ else
+ if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "DIAGNOSTIC SETTINGS" \
+ --yesno "Send Diagnostics?\n\nCurrent: ${DIAGNOSTICS}" 10 58 \
+ --yes-button "Yes" --no-button "Back"; then
+ DIAGNOSTICS="yes"
+ sed -i 's/^DIAGNOSTICS=.*/DIAGNOSTICS=yes/' /usr/local/community-scripts/diagnostics
+ whiptail --msgbox "Diagnostics set to ${DIAGNOSTICS}." 8 58
+ fi
+ fi
+}
+
+ensure_global_default_vars_file() {
+ local vars_path="/usr/local/community-scripts/default.vars"
+ if [[ ! -f "$vars_path" ]]; then
+ mkdir -p "$(dirname "$vars_path")"
+ touch "$vars_path"
+ fi
+ echo "$vars_path"
+}
+
+# ------------------------------------------------------------------------------
+# install_script()
+#
+# - Main entrypoint for installation mode
+# - Runs safety checks (pve_check, root_check, maxkeys_check, diagnostics_check)
+# - Builds interactive menu (Default, Verbose, Advanced, My Defaults, App Defaults, Diagnostics, Storage, Exit)
+# - Applies chosen settings and triggers container build
+# ------------------------------------------------------------------------------
+install_script() {
+ pve_check
+ shell_check
+ root_check
+ arch_check
+ ssh_check
+ maxkeys_check
+ diagnostics_check
+
+ if systemctl is-active -q ping-instances.service; then
+ systemctl -q stop ping-instances.service
+ fi
+
+ NEXTID=$(pvesh get /cluster/nextid)
+ timezone=$(cat /etc/timezone)
+
+ # Show APP Header
+ header_info
+
+ # --- Support CLI argument as direct preset (default, advanced, …) ---
+ CHOICE="${mode:-${1:-}}"
+
+ # If no CLI argument → show whiptail menu
+ # Build menu dynamically based on available options
+ local appdefaults_option=""
+ local settings_option=""
+ local menu_items=(
+ "1" "Default Install"
+ "2" "Advanced Install"
+ "3" "My Defaults"
+ )
+
+ if [ -f "$(get_app_defaults_path)" ]; then
+ appdefaults_option="4"
+ menu_items+=("4" "App Defaults for ${APP}")
+ settings_option="5"
+ menu_items+=("5" "Settings")
+ else
+ settings_option="4"
+ menu_items+=("4" "Settings")
+ fi
+
+ if [ -z "$CHOICE" ]; then
+
+ TMP_CHOICE=$(whiptail \
+ --backtitle "Proxmox VE Helper Scripts" \
+ --title "Community-Scripts Options" \
+ --ok-button "Select" --cancel-button "Exit Script" \
+ --notags \
+ --menu "\nChoose an option:\n Use TAB or Arrow keys to navigate, ENTER to select.\n" \
+ 20 60 9 \
+ "${menu_items[@]}" \
+ --default-item "1" \
+ 3>&1 1>&2 2>&3) || exit_script
+ CHOICE="$TMP_CHOICE"
+ fi
+
+ APPDEFAULTS_OPTION="$appdefaults_option"
+ SETTINGS_OPTION="$settings_option"
+
+ # --- Main case ---
+ local defaults_target=""
+ local run_maybe_offer="no"
+ case "$CHOICE" in
+ 1 | default | DEFAULT)
+ header_info
+ echo -e "${DEFAULT}${BOLD}${BL}Using Default Settings on node $PVEHOST_NAME${CL}"
+ VERBOSE="no"
+ METHOD="default"
+ base_settings "$VERBOSE"
+ echo_default
+ defaults_target="$(ensure_global_default_vars_file)"
+ ;;
+ 2 | advanced | ADVANCED)
+ header_info
+
+ echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Install on node $PVEHOST_NAME${CL}"
+ echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}"
+ METHOD="advanced"
+ base_settings
+ advanced_settings
+ defaults_target="$(ensure_global_default_vars_file)"
+ run_maybe_offer="yes"
+ ;;
+ 3 | mydefaults | MYDEFAULTS)
+ default_var_settings || {
+ msg_error "Failed to apply default.vars"
+ exit 1
+ }
+ defaults_target="/usr/local/community-scripts/default.vars"
+ ;;
+ "$APPDEFAULTS_OPTION" | appdefaults | APPDEFAULTS)
+ if [ -f "$(get_app_defaults_path)" ]; then
+ header_info
+ echo -e "${DEFAULT}${BOLD}${BL}Using App Defaults for ${APP} on node $PVEHOST_NAME${CL}"
+ METHOD="appdefaults"
+ base_settings
+ _load_vars_file "$(get_app_defaults_path)"
+ echo_default
+ defaults_target="$(get_app_defaults_path)"
+ else
+ msg_error "No App Defaults available for ${APP}"
+ exit 1
+ fi
+ ;;
+ "$SETTINGS_OPTION" | settings | SETTINGS)
+ settings_menu
+ defaults_target=""
+ ;;
+ *)
+ echo -e "${CROSS}${RD}Invalid option: $CHOICE${CL}"
+ exit 1
+ ;;
+ esac
+
+ if [[ -n "$defaults_target" ]]; then
+ ensure_storage_selection_for_vars_file "$defaults_target"
+ fi
+
+ if [[ "$run_maybe_offer" == "yes" ]]; then
+ maybe_offer_save_app_defaults
+ fi
+}
+
+edit_default_storage() {
+ local vf="/usr/local/community-scripts/default.vars"
+
+ # Ensure file exists
+ if [[ ! -f "$vf" ]]; then
+ mkdir -p "$(dirname "$vf")"
+ touch "$vf"
+ fi
+
+ # Let ensure_storage_selection_for_vars_file handle everything
+ ensure_storage_selection_for_vars_file "$vf"
+}
+
+settings_menu() {
+ while true; do
+ local settings_items=(
+ "1" "Manage API-Diagnostic Setting"
+ "2" "Edit Default.vars"
+ "3" "Edit Default Storage"
+ )
+ if [ -f "$(get_app_defaults_path)" ]; then
+ settings_items+=("4" "Edit App.vars for ${APP}")
+ settings_items+=("5" "Exit")
+ else
+ settings_items+=("4" "Exit")
+ fi
+
+ local choice
+ choice=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
+ --title "Community-Scripts SETTINGS Menu" \
+ --ok-button "OK" --cancel-button "Back" \
+ --menu "\n\nChoose a settings option:\n\nUse TAB or Arrow keys to navigate, ENTER to select." 20 60 9 \
+ "${settings_items[@]}" \
+ 3>&1 1>&2 2>&3) || break
+
+ case "$choice" in
+ 1) diagnostics_menu ;;
+ 2) ${EDITOR:-nano} /usr/local/community-scripts/default.vars ;;
+ 3) edit_default_storage ;;
+ 4)
+ if [ -f "$(get_app_defaults_path)" ]; then
+ ${EDITOR:-nano} "$(get_app_defaults_path)"
+ else
+ exit_script
+ fi
+ ;;
+ 5) exit_script ;;
+ esac
+ done
+}
+
+# ===== Unified storage selection & writing to vars files =====
+_write_storage_to_vars() {
+ # $1 = vars_file, $2 = key (var_container_storage / var_template_storage), $3 = value
+ local vf="$1" key="$2" val="$3"
+ # remove uncommented and commented versions to avoid duplicates
+ sed -i "/^[#[:space:]]*${key}=/d" "$vf"
+ echo "${key}=${val}" >>"$vf"
+}
+
+choose_and_set_storage_for_file() {
+ # $1 = vars_file, $2 = class ('container'|'template')
+ local vf="$1" class="$2" key="" current=""
+ case "$class" in
+ container) key="var_container_storage" ;;
+ template) key="var_template_storage" ;;
+ *)
+ msg_error "Unknown storage class: $class"
+ return 1
+ ;;
+ esac
+
+ current=$(awk -F= -v k="^${key}=" '$0 ~ k {print $2; exit}' "$vf")
+
+ # If only one storage exists for the content type, auto-pick. Else always ask (your wish #4).
+ local content="rootdir"
+ [[ "$class" == "template" ]] && content="vztmpl"
+ local count
+ count=$(pvesm status -content "$content" | awk 'NR>1{print $1}' | wc -l)
+
+ if [[ "$count" -eq 1 ]]; then
+ STORAGE_RESULT=$(pvesm status -content "$content" | awk 'NR>1{print $1; exit}')
+ STORAGE_INFO=""
+ else
+ # If the current value is preselectable, we could show it, but per your requirement we always offer selection
+ select_storage "$class" || return 1
+ fi
+
+ _write_storage_to_vars "$vf" "$key" "$STORAGE_RESULT"
+
+ # Keep environment in sync for later steps (e.g. app-default save)
+ if [[ "$class" == "container" ]]; then
+ export var_container_storage="$STORAGE_RESULT"
+ export CONTAINER_STORAGE="$STORAGE_RESULT"
+ else
+ export var_template_storage="$STORAGE_RESULT"
+ export TEMPLATE_STORAGE="$STORAGE_RESULT"
+ fi
+
+ msg_ok "Updated ${key} → ${STORAGE_RESULT}"
+}
+
+# ------------------------------------------------------------------------------
+# check_container_resources()
+#
+# - Compares host RAM/CPU with required values
+# - Warns if under-provisioned and asks user to continue or abort
+# ------------------------------------------------------------------------------
+check_container_resources() {
+ current_ram=$(free -m | awk 'NR==2{print $2}')
+ current_cpu=$(nproc)
+
+ if [[ "$current_ram" -lt "$var_ram" ]] || [[ "$current_cpu" -lt "$var_cpu" ]]; then
+ echo -e "\n${INFO}${HOLD} ${GN}Required: ${var_cpu} CPU, ${var_ram}MB RAM ${CL}| ${RD}Current: ${current_cpu} CPU, ${current_ram}MB RAM${CL}"
+ echo -e "${YWB}Please ensure that the ${APP} LXC is configured with at least ${var_cpu} vCPU and ${var_ram} MB RAM for the build process.${CL}\n"
+ echo -ne "${INFO}${HOLD} May cause data loss! ${INFO} Continue update with under-provisioned LXC? "
+ read -r prompt
+ if [[ ! ${prompt,,} =~ ^(yes)$ ]]; then
+ echo -e "${CROSS}${HOLD} ${YWB}Exiting based on user input.${CL}"
+ exit 1
+ fi
+ else
+ echo -e ""
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# check_container_storage()
+#
+# - Checks /boot partition usage
+# - Warns if usage >80% and asks user confirmation before proceeding
+# ------------------------------------------------------------------------------
+check_container_storage() {
+ total_size=$(df /boot --output=size | tail -n 1)
+ local used_size=$(df /boot --output=used | tail -n 1)
+ usage=$((100 * used_size / total_size))
+ if ((usage > 80)); then
+ echo -e "${INFO}${HOLD} ${YWB}Warning: Storage is dangerously low (${usage}%).${CL}"
+ echo -ne "Continue anyway? "
+ read -r prompt
+ if [[ ! ${prompt,,} =~ ^(y|yes)$ ]]; then
+ echo -e "${CROSS}${HOLD}${YWB}Exiting based on user input.${CL}"
+ exit 1
+ fi
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# ssh_extract_keys_from_file()
+#
+# - Extracts valid SSH public keys from given file
+# - Supports RSA, Ed25519, ECDSA and filters out comments/invalid lines
+# ------------------------------------------------------------------------------
+ssh_extract_keys_from_file() {
+ local f="$1"
+ [[ -r "$f" ]] || return 0
+ tr -d '\r' <"$f" | awk '
+ /^[[:space:]]*#/ {next}
+ /^[[:space:]]*$/ {next}
+ # nackt: typ base64 [comment]
+ /^(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))[[:space:]]+/ {print; next}
+ # mit Optionen: finde ab erstem Key-Typ
+ {
+ match($0, /(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))[[:space:]]+/)
+ if (RSTART>0) { print substr($0, RSTART) }
+ }
+ '
+}
+
+# ------------------------------------------------------------------------------
+# ssh_build_choices_from_files()
+#
+# - Builds interactive whiptail checklist of available SSH keys
+# - Generates fingerprint, type and comment for each key
+# ------------------------------------------------------------------------------
+ssh_build_choices_from_files() {
+ local -a files=("$@")
+ CHOICES=()
+ COUNT=0
+ MAPFILE="$(mktemp)"
+ local id key typ fp cmt base ln=0
+
+ for f in "${files[@]}"; do
+ [[ -f "$f" && -r "$f" ]] || continue
+ base="$(basename -- "$f")"
+ case "$base" in
+ known_hosts | known_hosts.* | config) continue ;;
+ id_*) [[ "$f" != *.pub ]] && continue ;;
+ esac
+
+ # map every key in file
+ while IFS= read -r key; do
+ [[ -n "$key" ]] || continue
+
+ typ=""
+ fp=""
+ cmt=""
+ # Only the pure key part (without options) is already included in ‘key’.
+ read -r _typ _b64 _cmt <<<"$key"
+ typ="${_typ:-key}"
+ cmt="${_cmt:-}"
+ # Fingerprint via ssh-keygen (if available)
+ if command -v ssh-keygen >/dev/null 2>&1; then
+ fp="$(printf '%s\n' "$key" | ssh-keygen -lf - 2>/dev/null | awk '{print $2}')"
+ fi
+ # Label shorten
+ [[ ${#cmt} -gt 40 ]] && cmt="${cmt:0:37}..."
+
+ ln=$((ln + 1))
+ COUNT=$((COUNT + 1))
+ id="K${COUNT}"
+ echo "${id}|${key}" >>"$MAPFILE"
+ CHOICES+=("$id" "[$typ] ${fp:+$fp }${cmt:+$cmt }— ${base}" "OFF")
+ done < <(ssh_extract_keys_from_file "$f")
+ done
+}
+
+# ------------------------------------------------------------------------------
+# ssh_discover_default_files()
+#
+# - Scans standard paths for SSH keys
+# - Includes ~/.ssh/*.pub, /etc/ssh/authorized_keys, etc.
+# ------------------------------------------------------------------------------
+ssh_discover_default_files() {
+ local -a cand=()
+ shopt -s nullglob
+ cand+=(/root/.ssh/authorized_keys /root/.ssh/authorized_keys2)
+ cand+=(/root/.ssh/*.pub)
+ cand+=(/etc/ssh/authorized_keys /etc/ssh/authorized_keys.d/*)
+ shopt -u nullglob
+ printf '%s\0' "${cand[@]}"
+}
+
+configure_ssh_settings() {
+ SSH_KEYS_FILE="$(mktemp)"
+ : >"$SSH_KEYS_FILE"
+
+ IFS=$'\0' read -r -d '' -a _def_files < <(ssh_discover_default_files && printf '\0')
+ ssh_build_choices_from_files "${_def_files[@]}"
+ local default_key_count="$COUNT"
+
+ local ssh_key_mode
+ if [[ "$default_key_count" -gt 0 ]]; then
+ ssh_key_mode=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SSH KEY SOURCE" --menu \
+ "Provision SSH keys for root:" 14 72 4 \
+ "found" "Select from detected keys (${default_key_count})" \
+ "manual" "Paste a single public key" \
+ "folder" "Scan another folder (path or glob)" \
+ "none" "No keys" 3>&1 1>&2 2>&3) || exit_script
+ else
+ ssh_key_mode=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SSH KEY SOURCE" --menu \
+ "No host keys detected; choose manual/none:" 12 72 2 \
+ "manual" "Paste a single public key" \
+ "none" "No keys" 3>&1 1>&2 2>&3) || exit_script
+ fi
+
+ case "$ssh_key_mode" in
+ found)
+ local selection
+ selection=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SELECT HOST KEYS" \
+ --checklist "Select one or more keys to import:" 20 140 10 "${CHOICES[@]}" 3>&1 1>&2 2>&3) || exit_script
+ for tag in $selection; do
+ tag="${tag%\"}"
+ tag="${tag#\"}"
+ local line
+ line=$(grep -E "^${tag}\|" "$MAPFILE" | head -n1 | cut -d'|' -f2-)
+ [[ -n "$line" ]] && printf '%s\n' "$line" >>"$SSH_KEYS_FILE"
+ done
+ ;;
+ manual)
+ SSH_AUTHORIZED_KEY="$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --inputbox "Paste one SSH public key line (ssh-ed25519/ssh-rsa/...)" 10 72 --title "SSH Public Key" 3>&1 1>&2 2>&3)"
+ [[ -n "$SSH_AUTHORIZED_KEY" ]] && printf '%s\n' "$SSH_AUTHORIZED_KEY" >>"$SSH_KEYS_FILE"
+ ;;
+ folder)
+ local glob_path
+ glob_path=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --inputbox "Enter a folder or glob to scan (e.g. /root/.ssh/*.pub)" 10 72 --title "Scan Folder/Glob" 3>&1 1>&2 2>&3)
+ if [[ -n "$glob_path" ]]; then
+ shopt -s nullglob
+ read -r -a _scan_files <<<"$glob_path"
+ shopt -u nullglob
+ if [[ "${#_scan_files[@]}" -gt 0 ]]; then
+ ssh_build_choices_from_files "${_scan_files[@]}"
+ if [[ "$COUNT" -gt 0 ]]; then
+ local folder_selection
+ folder_selection=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SELECT FOLDER KEYS" \
+ --checklist "Select key(s) to import:" 20 78 10 "${CHOICES[@]}" 3>&1 1>&2 2>&3) || exit_script
+ for tag in $folder_selection; do
+ tag="${tag%\"}"
+ tag="${tag#\"}"
+ local line
+ line=$(grep -E "^${tag}\|" "$MAPFILE" | head -n1 | cut -d'|' -f2-)
+ [[ -n "$line" ]] && printf '%s\n' "$line" >>"$SSH_KEYS_FILE"
+ done
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox "No keys found in: $glob_path" 8 60
+ fi
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox "Path/glob returned no files." 8 60
+ fi
+ fi
+ ;;
+ none)
+ :
+ ;;
+ esac
+
+ if [[ -s "$SSH_KEYS_FILE" ]]; then
+ sort -u -o "$SSH_KEYS_FILE" "$SSH_KEYS_FILE"
+ printf '\n' >>"$SSH_KEYS_FILE"
+ fi
+
+ if [[ -s "$SSH_KEYS_FILE" || "$PW" == -password* ]]; then
+ if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "SSH ACCESS" --yesno "Enable root SSH access?" 10 58); then
+ SSH="yes"
+ else
+ SSH="no"
+ fi
+ else
+ SSH="no"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# start()
+#
+# - Entry point of script
+# - On Proxmox host: calls install_script
+# - In silent mode: runs update_script
+# - Otherwise: shows update/setting menu
+# ------------------------------------------------------------------------------
+start() {
+ source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/tools.func)
+ if command -v pveversion >/dev/null 2>&1; then
+ install_script || return 0
+ return 0
+ elif [ ! -z ${PHS_SILENT+x} ] && [[ "${PHS_SILENT}" == "1" ]]; then
+ VERBOSE="no"
+ set_std_mode
+ update_script
+ else
+ CHOICE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "${APP} LXC Update/Setting" --menu \
+ "Support/Update functions for ${APP} LXC. Choose an option:" \
+ 12 60 3 \
+ "1" "YES (Silent Mode)" \
+ "2" "YES (Verbose Mode)" \
+ "3" "NO (Cancel Update)" --nocancel --default-item "1" 3>&1 1>&2 2>&3)
+
+ case "$CHOICE" in
+ 1)
+ VERBOSE="no"
+ set_std_mode
+ ;;
+ 2)
+ VERBOSE="yes"
+ set_std_mode
+ ;;
+ 3)
+ clear
+ exit_script
+ exit
+ ;;
+ esac
+ update_script
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# build_container()
+#
+# - Creates and configures the LXC container
+# - Builds network string and applies features (FUSE, TUN, VAAPI passthrough)
+# - Starts container and waits for network connectivity
+# - Installs base packages, SSH keys, and runs -install.sh
+# ------------------------------------------------------------------------------
+build_container() {
+ # if [ "$VERBOSE" == "yes" ]; then set -x; fi
+
+ NET_STRING="-net0 name=eth0,bridge=${BRG:-vmbr0}"
+
+ # MAC
+ if [[ -n "$MAC" ]]; then
+ case "$MAC" in
+ ,hwaddr=*) NET_STRING+="$MAC" ;;
+ *) NET_STRING+=",hwaddr=$MAC" ;;
+ esac
+ fi
+
+ # IP (immer zwingend, Standard dhcp)
+ NET_STRING+=",ip=${NET:-dhcp}"
+
+ # Gateway
+ if [[ -n "$GATE" ]]; then
+ case "$GATE" in
+ ,gw=*) NET_STRING+="$GATE" ;;
+ *) NET_STRING+=",gw=$GATE" ;;
+ esac
+ fi
+
+ # VLAN
+ if [[ -n "$VLAN" ]]; then
+ case "$VLAN" in
+ ,tag=*) NET_STRING+="$VLAN" ;;
+ *) NET_STRING+=",tag=$VLAN" ;;
+ esac
+ fi
+
+ # MTU
+ if [[ -n "$MTU" ]]; then
+ case "$MTU" in
+ ,mtu=*) NET_STRING+="$MTU" ;;
+ *) NET_STRING+=",mtu=$MTU" ;;
+ esac
+ fi
+
+ # IPv6 Handling
+ case "$IPV6_METHOD" in
+ auto) NET_STRING="$NET_STRING,ip6=auto" ;;
+ dhcp) NET_STRING="$NET_STRING,ip6=dhcp" ;;
+ static)
+ NET_STRING="$NET_STRING,ip6=$IPV6_ADDR"
+ [ -n "$IPV6_GATE" ] && NET_STRING="$NET_STRING,gw6=$IPV6_GATE"
+ ;;
+ none) ;;
+ esac
+
+ if [ "$CT_TYPE" == "1" ]; then
+ FEATURES="keyctl=1,nesting=1"
+ else
+ FEATURES="nesting=1"
+ fi
+
+ if [ "$ENABLE_FUSE" == "yes" ]; then
+ FEATURES="$FEATURES,fuse=1"
+ fi
+
+ TEMP_DIR=$(mktemp -d)
+ pushd "$TEMP_DIR" >/dev/null
+ if [ "$var_os" == "alpine" ]; then
+ export FUNCTIONS_FILE_PATH="$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/alpine-install.func)"
+ else
+ export FUNCTIONS_FILE_PATH="$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/install.func)"
+ fi
+ export DIAGNOSTICS="$DIAGNOSTICS"
+ export RANDOM_UUID="$RANDOM_UUID"
+ export CACHER="$APT_CACHER"
+ export CACHER_IP="$APT_CACHER_IP"
+ export tz="$timezone"
+ export APPLICATION="$APP"
+ export app="$NSAPP"
+ export PASSWORD="$PW"
+ export VERBOSE="$VERBOSE"
+ export SSH_ROOT="${SSH}"
+ export SSH_AUTHORIZED_KEY
+ export CTID="$CT_ID"
+ export CTTYPE="$CT_TYPE"
+ export ENABLE_FUSE="$ENABLE_FUSE"
+ export ENABLE_TUN="$ENABLE_TUN"
+ export PCT_OSTYPE="$var_os"
+ export PCT_OSVERSION="$var_version"
+ export PCT_DISK_SIZE="$DISK_SIZE"
+ export PCT_OPTIONS="
+ -features $FEATURES
+ -hostname $HN
+ -tags $TAGS
+ $SD
+ $NS
+ $NET_STRING
+ -onboot 1
+ -cores $CORE_COUNT
+ -memory $RAM_SIZE
+ -unprivileged $CT_TYPE
+ $PW
+"
+ export TEMPLATE_STORAGE="${var_template_storage:-}"
+ export CONTAINER_STORAGE="${var_container_storage:-}"
+ create_lxc_container || exit $?
+
+ LXC_CONFIG="/etc/pve/lxc/${CTID}.conf"
+
+ # ============================================================================
+ # GPU/USB PASSTHROUGH CONFIGURATION
+ # ============================================================================
+
+ # List of applications that benefit from GPU acceleration
+ GPU_APPS=(
+ "immich" "channels" "emby" "ersatztv" "frigate"
+ "jellyfin" "plex" "scrypted" "tdarr" "unmanic"
+ "ollama" "fileflows" "open-webui" "tunarr" "debian"
+ "handbrake" "sunshine" "moonlight" "kodi" "stremio"
+ "viseron"
+ )
+
+ # Check if app needs GPU
+ is_gpu_app() {
+ local app="${1,,}"
+ for gpu_app in "${GPU_APPS[@]}"; do
+ [[ "$app" == "${gpu_app,,}" ]] && return 0
+ done
+ return 1
+ }
+
+ # Detect all available GPU devices
+ detect_gpu_devices() {
+ INTEL_DEVICES=()
+ AMD_DEVICES=()
+ NVIDIA_DEVICES=()
+
+ # Store PCI info to avoid multiple calls
+ local pci_vga_info=$(lspci -nn 2>/dev/null | grep -E "VGA|Display|3D")
+
+ # Check for Intel GPU - look for Intel vendor ID [8086]
+ if echo "$pci_vga_info" | grep -q "\[8086:"; then
+ msg_info "Detected Intel GPU"
+ if [[ -d /dev/dri ]]; then
+ for d in /dev/dri/renderD* /dev/dri/card*; do
+ [[ -e "$d" ]] && INTEL_DEVICES+=("$d")
+ done
+ fi
+ fi
+
+ # Check for AMD GPU - look for AMD vendor IDs [1002] (AMD/ATI) or [1022] (AMD)
+ if echo "$pci_vga_info" | grep -qE "\[1002:|\[1022:"; then
+ msg_info "Detected AMD GPU"
+ if [[ -d /dev/dri ]]; then
+ # Only add if not already claimed by Intel
+ if [[ ${#INTEL_DEVICES[@]} -eq 0 ]]; then
+ for d in /dev/dri/renderD* /dev/dri/card*; do
+ [[ -e "$d" ]] && AMD_DEVICES+=("$d")
+ done
+ fi
+ fi
+ fi
+
+ # Check for NVIDIA GPU - look for NVIDIA vendor ID [10de]
+ if echo "$pci_vga_info" | grep -q "\[10de:"; then
+ msg_info "Detected NVIDIA GPU"
+ if ! check_nvidia_host_setup; then
+ msg_error "NVIDIA host setup incomplete. Skipping GPU passthrough."
+ msg_info "Fix NVIDIA drivers on host, then recreate container or passthrough manually."
+ return 0
+ fi
+
+ for d in /dev/nvidia* /dev/nvidiactl /dev/nvidia-modeset; do
+ [[ -e "$d" ]] && NVIDIA_DEVICES+=("$d")
+ done
+
+ if [[ ${#NVIDIA_DEVICES[@]} -eq 0 ]]; then
+ msg_warn "NVIDIA GPU detected but no /dev/nvidia* devices found"
+ msg_warn "Please install NVIDIA drivers on host: apt install nvidia-driver"
+ else
+ if [[ "$CT_TYPE" == "0" ]]; then
+ cat <>"$LXC_CONFIG"
+ # NVIDIA GPU Passthrough (privileged)
+ lxc.cgroup2.devices.allow: c 195:* rwm
+ lxc.cgroup2.devices.allow: c 243:* rwm
+ lxc.mount.entry: /dev/nvidia0 dev/nvidia0 none bind,optional,create=file
+ lxc.mount.entry: /dev/nvidiactl dev/nvidiactl none bind,optional,create=file
+ lxc.mount.entry: /dev/nvidia-uvm dev/nvidia-uvm none bind,optional,create=file
+ lxc.mount.entry: /dev/nvidia-uvm-tools dev/nvidia-uvm-tools none bind,optional,create=file
+EOF
+
+ if [[ -e /dev/dri/renderD128 ]]; then
+ echo "lxc.mount.entry: /dev/dri/renderD128 dev/dri/renderD128 none bind,optional,create=file" >>"$LXC_CONFIG"
+ fi
+
+ export GPU_TYPE="NVIDIA"
+ export NVIDIA_DRIVER_VERSION=$(nvidia-smi --query-gpu=driver_version --format=csv,noheader 2>/dev/null | head -n1)
+ msg_ok "NVIDIA GPU passthrough configured (driver: ${NVIDIA_DRIVER_VERSION})"
+ else
+ msg_warn "NVIDIA passthrough only supported for privileged containers"
+ return 0
+ fi
+ fi
+ fi
+
+ # Debug output
+ msg_debug "Intel devices: ${INTEL_DEVICES[*]}"
+ msg_debug "AMD devices: ${AMD_DEVICES[*]}"
+ msg_debug "NVIDIA devices: ${NVIDIA_DEVICES[*]}"
+ }
+
+ # Configure USB passthrough for privileged containers
+ configure_usb_passthrough() {
+ if [[ "$CT_TYPE" != "0" ]]; then
+ return 0
+ fi
+
+ msg_info "Configuring automatic USB passthrough (privileged container)"
+ cat <>"$LXC_CONFIG"
+# Automatic USB passthrough (privileged container)
+lxc.cgroup2.devices.allow: a
+lxc.cap.drop:
+lxc.cgroup2.devices.allow: c 188:* rwm
+lxc.cgroup2.devices.allow: c 189:* rwm
+lxc.mount.entry: /dev/serial/by-id dev/serial/by-id none bind,optional,create=dir
+lxc.mount.entry: /dev/ttyUSB0 dev/ttyUSB0 none bind,optional,create=file
+lxc.mount.entry: /dev/ttyUSB1 dev/ttyUSB1 none bind,optional,create=file
+lxc.mount.entry: /dev/ttyACM0 dev/ttyACM0 none bind,optional,create=file
+lxc.mount.entry: /dev/ttyACM1 dev/ttyACM1 none bind,optional,create=file
+EOF
+ msg_ok "USB passthrough configured"
+ }
+
+ # Configure GPU passthrough
+ configure_gpu_passthrough() {
+ # Skip if not a GPU app and not privileged
+ if [[ "$CT_TYPE" != "0" ]] && ! is_gpu_app "$APP"; then
+ return 0
+ fi
+
+ detect_gpu_devices
+
+ # Count available GPU types
+ local gpu_count=0
+ local available_gpus=()
+
+ if [[ ${#INTEL_DEVICES[@]} -gt 0 ]]; then
+ available_gpus+=("INTEL")
+ gpu_count=$((gpu_count + 1))
+ fi
+
+ if [[ ${#AMD_DEVICES[@]} -gt 0 ]]; then
+ available_gpus+=("AMD")
+ gpu_count=$((gpu_count + 1))
+ fi
+
+ if [[ ${#NVIDIA_DEVICES[@]} -gt 0 ]]; then
+ available_gpus+=("NVIDIA")
+ gpu_count=$((gpu_count + 1))
+ fi
+
+ if [[ $gpu_count -eq 0 ]]; then
+ msg_info "No GPU devices found for passthrough"
+ return 0
+ fi
+
+ local selected_gpu=""
+
+ if [[ $gpu_count -eq 1 ]]; then
+ # Automatic selection for single GPU
+ selected_gpu="${available_gpus[0]}"
+ msg_info "Automatically configuring ${selected_gpu} GPU passthrough"
+ else
+ # Multiple GPUs - ask user
+ echo -e "\n${INFO} Multiple GPU types detected:"
+ for gpu in "${available_gpus[@]}"; do
+ echo " - $gpu"
+ done
+ read -rp "Which GPU type to passthrough? (${available_gpus[*]}): " selected_gpu
+ selected_gpu="${selected_gpu^^}"
+
+ # Validate selection
+ local valid=0
+ for gpu in "${available_gpus[@]}"; do
+ [[ "$selected_gpu" == "$gpu" ]] && valid=1
+ done
+
+ if [[ $valid -eq 0 ]]; then
+ msg_warn "Invalid selection. Skipping GPU passthrough."
+ return 0
+ fi
+ fi
+
+ # Apply passthrough configuration based on selection
+ local dev_idx=0
+
+ case "$selected_gpu" in
+ INTEL | AMD)
+ local devices=()
+ [[ "$selected_gpu" == "INTEL" ]] && devices=("${INTEL_DEVICES[@]}")
+ [[ "$selected_gpu" == "AMD" ]] && devices=("${AMD_DEVICES[@]}")
+
+ # For Proxmox WebUI visibility, add as dev0, dev1 etc.
+ for dev in "${devices[@]}"; do
+ if [[ "$CT_TYPE" == "0" ]]; then
+ # Privileged container - use dev entries for WebUI visibility
+ # Use initial GID 104 (render) for renderD*, 44 (video) for card*
+ if [[ "$dev" =~ renderD ]]; then
+ echo "dev${dev_idx}: $dev,gid=104" >>"$LXC_CONFIG"
+ else
+ echo "dev${dev_idx}: $dev,gid=44" >>"$LXC_CONFIG"
+ fi
+ dev_idx=$((dev_idx + 1))
+
+ # Also add cgroup allows for privileged containers
+ local major minor
+ major=$(stat -c '%t' "$dev" 2>/dev/null || echo "0")
+ minor=$(stat -c '%T' "$dev" 2>/dev/null || echo "0")
+
+ if [[ "$major" != "0" && "$minor" != "0" ]]; then
+ echo "lxc.cgroup2.devices.allow: c $((0x$major)):$((0x$minor)) rwm" >>"$LXC_CONFIG"
+ fi
+ else
+ # Unprivileged container
+ if [[ "$dev" =~ renderD ]]; then
+ echo "dev${dev_idx}: $dev,uid=0,gid=104" >>"$LXC_CONFIG"
+ else
+ echo "dev${dev_idx}: $dev,uid=0,gid=44" >>"$LXC_CONFIG"
+ fi
+ dev_idx=$((dev_idx + 1))
+ fi
+ done
+
+ export GPU_TYPE="$selected_gpu"
+ msg_ok "${selected_gpu} GPU passthrough configured (${dev_idx} devices)"
+ ;;
+
+ NVIDIA)
+ if [[ ${#NVIDIA_DEVICES[@]} -eq 0 ]]; then
+ msg_error "NVIDIA drivers not installed on host. Please install: apt install nvidia-driver"
+ return 1
+ fi
+
+ for dev in "${NVIDIA_DEVICES[@]}"; do
+ # NVIDIA devices typically need different handling
+ echo "dev${dev_idx}: $dev,uid=0,gid=44" >>"$LXC_CONFIG"
+ dev_idx=$((dev_idx + 1))
+
+ if [[ "$CT_TYPE" == "0" ]]; then
+ local major minor
+ major=$(stat -c '%t' "$dev" 2>/dev/null || echo "0")
+ minor=$(stat -c '%T' "$dev" 2>/dev/null || echo "0")
+
+ if [[ "$major" != "0" && "$minor" != "0" ]]; then
+ echo "lxc.cgroup2.devices.allow: c $((0x$major)):$((0x$minor)) rwm" >>"$LXC_CONFIG"
+ fi
+ fi
+ done
+
+ export GPU_TYPE="NVIDIA"
+ msg_ok "NVIDIA GPU passthrough configured (${dev_idx} devices)"
+ ;;
+ esac
+ }
+
+ # Additional device passthrough
+ configure_additional_devices() {
+ # TUN device passthrough
+ if [ "$ENABLE_TUN" == "yes" ]; then
+ cat <>"$LXC_CONFIG"
+lxc.cgroup2.devices.allow: c 10:200 rwm
+lxc.mount.entry: /dev/net/tun dev/net/tun none bind,create=file
+EOF
+ fi
+
+ # Coral TPU passthrough
+ if [[ -e /dev/apex_0 ]]; then
+ msg_info "Detected Coral TPU - configuring passthrough"
+ echo "lxc.mount.entry: /dev/apex_0 dev/apex_0 none bind,optional,create=file" >>"$LXC_CONFIG"
+ fi
+ }
+
+ # Execute pre-start configurations
+ configure_usb_passthrough
+ configure_gpu_passthrough
+ configure_additional_devices
+
+ # ============================================================================
+ # START CONTAINER AND INSTALL USERLAND
+ # ============================================================================
+
+ msg_info "Starting LXC Container"
+ pct start "$CTID"
+
+ # Wait for container to be running
+ for i in {1..10}; do
+ if pct status "$CTID" | grep -q "status: running"; then
+ msg_ok "Started LXC Container"
+ break
+ fi
+ sleep 1
+ if [ "$i" -eq 10 ]; then
+ msg_error "LXC Container did not reach running state"
+ exit 1
+ fi
+ done
+
+ # Wait for network (skip for Alpine initially)
+ if [ "$var_os" != "alpine" ]; then
+ msg_info "Waiting for network in LXC container"
+
+ # Wait for IP
+ for i in {1..20}; do
+ ip_in_lxc=$(pct exec "$CTID" -- ip -4 addr show dev eth0 | awk '/inet / {print $2}' | cut -d/ -f1)
+ [ -n "$ip_in_lxc" ] && break
+ sleep 1
+ done
+
+ if [ -z "$ip_in_lxc" ]; then
+ msg_error "No IP assigned to CT $CTID after 20s"
+ exit 1
+ fi
+
+ # Try to reach gateway
+ gw_ok=0
+ for i in {1..10}; do
+ if pct exec "$CTID" -- ping -c1 -W1 "${GATEWAY:-8.8.8.8}" >/dev/null 2>&1; then
+ gw_ok=1
+ break
+ fi
+ sleep 1
+ done
+
+ if [ "$gw_ok" -eq 1 ]; then
+ msg_ok "Network in LXC is reachable (IP $ip_in_lxc)"
+ else
+ msg_warn "Network reachable but gateway check failed"
+ fi
+ fi
+ # Function to get correct GID inside container
+ get_container_gid() {
+ local group="$1"
+ local gid=$(pct exec "$CTID" -- getent group "$group" 2>/dev/null | cut -d: -f3)
+ echo "${gid:-44}" # Default to 44 if not found
+ }
+
+ fix_gpu_gids
+
+ # Continue with standard container setup
+ msg_info "Customizing LXC Container"
+
+ # # Install GPU userland if configured
+ # if [[ "${ENABLE_VAAPI:-0}" == "1" ]]; then
+ # install_gpu_userland "VAAPI"
+ # fi
+
+ # if [[ "${ENABLE_NVIDIA:-0}" == "1" ]]; then
+ # install_gpu_userland "NVIDIA"
+ # fi
+
+ # Continue with standard container setup
+ if [ "$var_os" == "alpine" ]; then
+ sleep 3
+ pct exec "$CTID" -- /bin/sh -c 'cat </etc/apk/repositories
+http://dl-cdn.alpinelinux.org/alpine/latest-stable/main
+http://dl-cdn.alpinelinux.org/alpine/latest-stable/community
+EOF'
+ pct exec "$CTID" -- ash -c "apk add bash newt curl openssh nano mc ncurses jq >/dev/null"
+ else
+ sleep 3
+ pct exec "$CTID" -- bash -c "sed -i '/$LANG/ s/^# //' /etc/locale.gen"
+ pct exec "$CTID" -- bash -c "locale_line=\$(grep -v '^#' /etc/locale.gen | grep -E '^[a-zA-Z]' | awk '{print \$1}' | head -n 1) && \
+ echo LANG=\$locale_line >/etc/default/locale && \
+ locale-gen >/dev/null && \
+ export LANG=\$locale_line"
+
+ if [[ -z "${tz:-}" ]]; then
+ tz=$(timedatectl show --property=Timezone --value 2>/dev/null || echo "Etc/UTC")
+ fi
+
+ if pct exec "$CTID" -- test -e "/usr/share/zoneinfo/$tz"; then
+ pct exec "$CTID" -- bash -c "tz='$tz'; echo \"\$tz\" >/etc/timezone && ln -sf \"/usr/share/zoneinfo/\$tz\" /etc/localtime"
+ else
+ msg_warn "Skipping timezone setup – zone '$tz' not found in container"
+ fi
+
+ pct exec "$CTID" -- bash -c "apt-get update >/dev/null && apt-get install -y sudo curl mc gnupg2 jq >/dev/null" || {
+ msg_error "apt-get base packages installation failed"
+ exit 1
+ }
+ fi
+
+ msg_ok "Customized LXC Container"
+
+ # Verify GPU access if enabled
+ if [[ "${ENABLE_VAAPI:-0}" == "1" ]] && [ "$var_os" != "alpine" ]; then
+ pct exec "$CTID" -- bash -c "vainfo >/dev/null 2>&1" &&
+ msg_ok "VAAPI verified working" ||
+ msg_warn "VAAPI verification failed - may need additional configuration"
+ fi
+
+ if [[ "${ENABLE_NVIDIA:-0}" == "1" ]] && [ "$var_os" != "alpine" ]; then
+ pct exec "$CTID" -- bash -c "nvidia-smi >/dev/null 2>&1" &&
+ msg_ok "NVIDIA verified working" ||
+ msg_warn "NVIDIA verification failed - may need additional configuration"
+ fi
+
+ # Install SSH keys
+ install_ssh_keys_into_ct
+
+ # Run application installer
+ if ! lxc-attach -n "$CTID" -- bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/install/${var_install}.sh)"; then
+ exit $?
+ fi
+}
+
+destroy_lxc() {
+ if [[ -z "$CT_ID" ]]; then
+ msg_error "No CT_ID found. Nothing to remove."
+ return 1
+ fi
+
+ # Abbruch bei Ctrl-C / Ctrl-D / ESC
+ trap 'echo; msg_error "Aborted by user (SIGINT/SIGQUIT)"; return 130' INT QUIT
+
+ local prompt
+ if ! read -rp "Remove this Container? " prompt; then
+ # read gibt != 0 zurück bei Ctrl-D/ESC
+ msg_error "Aborted input (Ctrl-D/ESC)"
+ return 130
+ fi
+
+ case "${prompt,,}" in
+ y | yes)
+ if pct stop "$CT_ID" &>/dev/null && pct destroy "$CT_ID" &>/dev/null; then
+ msg_ok "Removed Container $CT_ID"
+ else
+ msg_error "Failed to remove Container $CT_ID"
+ return 1
+ fi
+ ;;
+ "" | n | no)
+ msg_info "Container was not removed."
+ ;;
+ *)
+ msg_warn "Invalid response. Container was not removed."
+ ;;
+ esac
+}
+
+# ------------------------------------------------------------------------------
+# Storage discovery / selection helpers
+# ------------------------------------------------------------------------------
+# ===== Storage discovery / selection helpers (ported from create_lxc.sh) =====
+resolve_storage_preselect() {
+ local class="$1" preselect="$2" required_content=""
+ case "$class" in
+ template) required_content="vztmpl" ;;
+ container) required_content="rootdir" ;;
+ *) return 1 ;;
+ esac
+ [[ -z "$preselect" ]] && return 1
+ if ! pvesm status -content "$required_content" | awk 'NR>1{print $1}' | grep -qx -- "$preselect"; then
+ msg_warn "Preselected storage '${preselect}' does not support content '${required_content}' (or not found)"
+ return 1
+ fi
+
+ local line total used free
+ line="$(pvesm status | awk -v s="$preselect" 'NR>1 && $1==s {print $0}')"
+ if [[ -z "$line" ]]; then
+ STORAGE_INFO="n/a"
+ else
+ total="$(awk '{print $4}' <<<"$line")"
+ used="$(awk '{print $5}' <<<"$line")"
+ free="$(awk '{print $6}' <<<"$line")"
+ local total_h used_h free_h
+ if command -v numfmt >/dev/null 2>&1; then
+ total_h="$(numfmt --to=iec --suffix=B --format %.1f "$total" 2>/dev/null || echo "$total")"
+ used_h="$(numfmt --to=iec --suffix=B --format %.1f "$used" 2>/dev/null || echo "$used")"
+ free_h="$(numfmt --to=iec --suffix=B --format %.1f "$free" 2>/dev/null || echo "$free")"
+ STORAGE_INFO="Free: ${free_h} Used: ${used_h}"
+ else
+ STORAGE_INFO="Free: ${free} Used: ${used}"
+ fi
+ fi
+ STORAGE_RESULT="$preselect"
+ return 0
+}
+
+fix_gpu_gids() {
+ if [[ -z "${GPU_TYPE:-}" ]]; then
+ return 0
+ fi
+
+ msg_info "Detecting and setting correct GPU group IDs"
+
+ # Ermittle die tatsächlichen GIDs aus dem Container
+ local video_gid=$(pct exec "$CTID" -- sh -c "getent group video 2>/dev/null | cut -d: -f3")
+ local render_gid=$(pct exec "$CTID" -- sh -c "getent group render 2>/dev/null | cut -d: -f3")
+
+ # Fallbacks wenn Gruppen nicht existieren
+ if [[ -z "$video_gid" ]]; then
+ # Versuche die video Gruppe zu erstellen
+ pct exec "$CTID" -- sh -c "groupadd -r video 2>/dev/null || true"
+ video_gid=$(pct exec "$CTID" -- sh -c "getent group video 2>/dev/null | cut -d: -f3")
+ [[ -z "$video_gid" ]] && video_gid="44" # Ultimate fallback
+ fi
+
+ if [[ -z "$render_gid" ]]; then
+ # Versuche die render Gruppe zu erstellen
+ pct exec "$CTID" -- sh -c "groupadd -r render 2>/dev/null || true"
+ render_gid=$(pct exec "$CTID" -- sh -c "getent group render 2>/dev/null | cut -d: -f3")
+ [[ -z "$render_gid" ]] && render_gid="104" # Ultimate fallback
+ fi
+
+ msg_info "Container GIDs detected - video:${video_gid}, render:${render_gid}"
+
+ # Prüfe ob die GIDs von den Defaults abweichen
+ local need_update=0
+ if [[ "$video_gid" != "44" ]] || [[ "$render_gid" != "104" ]]; then
+ need_update=1
+ fi
+
+ if [[ $need_update -eq 1 ]]; then
+ msg_info "Updating device GIDs in container config"
+
+ # Stoppe Container für Config-Update
+ pct stop "$CTID" >/dev/null 2>&1
+
+ # Update die dev Einträge mit korrekten GIDs
+ # Backup der Config
+ cp "$LXC_CONFIG" "${LXC_CONFIG}.bak"
+
+ # Parse und update jeden dev Eintrag
+ while IFS= read -r line; do
+ if [[ "$line" =~ ^dev[0-9]+: ]]; then
+ # Extract device path
+ local device_path=$(echo "$line" | sed -E 's/^dev[0-9]+: ([^,]+).*/\1/')
+ local dev_num=$(echo "$line" | sed -E 's/^(dev[0-9]+):.*/\1/')
+
+ if [[ "$device_path" =~ renderD ]]; then
+ # RenderD device - use render GID
+ echo "${dev_num}: ${device_path},gid=${render_gid}"
+ elif [[ "$device_path" =~ card ]]; then
+ # Card device - use video GID
+ echo "${dev_num}: ${device_path},gid=${video_gid}"
+ else
+ # Keep original line
+ echo "$line"
+ fi
+ else
+ # Keep non-dev lines
+ echo "$line"
+ fi
+ done <"$LXC_CONFIG" >"${LXC_CONFIG}.new"
+
+ mv "${LXC_CONFIG}.new" "$LXC_CONFIG"
+
+ # Starte Container wieder
+ pct start "$CTID" >/dev/null 2>&1
+ sleep 3
+
+ msg_ok "Device GIDs updated successfully"
+ else
+ msg_ok "Device GIDs are already correct"
+ fi
+ if [[ "$CT_TYPE" == "0" ]]; then
+ pct exec "$CTID" -- bash -c "
+ if [ -d /dev/dri ]; then
+ for dev in /dev/dri/*; do
+ if [ -e \"\$dev\" ]; then
+ if [[ \"\$dev\" =~ renderD ]]; then
+ chgrp ${render_gid} \"\$dev\" 2>/dev/null || true
+ else
+ chgrp ${video_gid} \"\$dev\" 2>/dev/null || true
+ fi
+ chmod 660 \"\$dev\" 2>/dev/null || true
+ fi
+ done
+ fi
+ " >/dev/null 2>&1
+ fi
+}
+
+# NVIDIA-spezific check on host
+check_nvidia_host_setup() {
+ if ! command -v nvidia-smi >/dev/null 2>&1; then
+ msg_warn "NVIDIA GPU detected but nvidia-smi not found on host"
+ msg_warn "Please install NVIDIA drivers on host first."
+ #echo " 1. Download driver: wget https://us.download.nvidia.com/XFree86/Linux-x86_64/550.127.05/NVIDIA-Linux-x86_64-550.127.05.run"
+ #echo " 2. Install: ./NVIDIA-Linux-x86_64-550.127.05.run --dkms"
+ #echo " 3. Verify: nvidia-smi"
+ return 1
+ fi
+
+ # check if nvidia-smi works
+ if ! nvidia-smi >/dev/null 2>&1; then
+ msg_warn "nvidia-smi installed but not working. Driver issue?"
+ return 1
+ fi
+
+ return 0
+}
+
+check_storage_support() {
+ local CONTENT="$1" VALID=0
+ while IFS= read -r line; do
+ local STORAGE_NAME
+ STORAGE_NAME=$(awk '{print $1}' <<<"$line")
+ [[ -n "$STORAGE_NAME" ]] && VALID=1
+ done < <(pvesm status -content "$CONTENT" 2>/dev/null | awk 'NR>1')
+ [[ $VALID -eq 1 ]]
+}
+
+select_storage() {
+ local CLASS=$1 CONTENT CONTENT_LABEL
+ case $CLASS in
+ container)
+ CONTENT='rootdir'
+ CONTENT_LABEL='Container'
+ ;;
+ template)
+ CONTENT='vztmpl'
+ CONTENT_LABEL='Container template'
+ ;;
+ iso)
+ CONTENT='iso'
+ CONTENT_LABEL='ISO image'
+ ;;
+ images)
+ CONTENT='images'
+ CONTENT_LABEL='VM Disk image'
+ ;;
+ backup)
+ CONTENT='backup'
+ CONTENT_LABEL='Backup'
+ ;;
+ snippets)
+ CONTENT='snippets'
+ CONTENT_LABEL='Snippets'
+ ;;
+ *)
+ msg_error "Invalid storage class '$CLASS'"
+ return 1
+ ;;
+ esac
+
+ declare -A STORAGE_MAP
+ local -a MENU=()
+ local COL_WIDTH=0
+
+ while read -r TAG TYPE _ TOTAL USED FREE _; do
+ [[ -n "$TAG" && -n "$TYPE" ]] || continue
+ local DISPLAY="${TAG} (${TYPE})"
+ local USED_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$USED")
+ local FREE_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$FREE")
+ local INFO="Free: ${FREE_FMT}B Used: ${USED_FMT}B"
+ STORAGE_MAP["$DISPLAY"]="$TAG"
+ MENU+=("$DISPLAY" "$INFO" "OFF")
+ ((${#DISPLAY} > COL_WIDTH)) && COL_WIDTH=${#DISPLAY}
+ done < <(pvesm status -content "$CONTENT" | awk 'NR>1')
+
+ if [[ ${#MENU[@]} -eq 0 ]]; then
+ msg_error "No storage found for content type '$CONTENT'."
+ return 2
+ fi
+
+ if [[ $((${#MENU[@]} / 3)) -eq 1 ]]; then
+ STORAGE_RESULT="${STORAGE_MAP[${MENU[0]}]}"
+ STORAGE_INFO="${MENU[1]}"
+ return 0
+ fi
+
+ local WIDTH=$((COL_WIDTH + 42))
+ while true; do
+ local DISPLAY_SELECTED
+ DISPLAY_SELECTED=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "Storage Pools" \
+ --radiolist "Which storage pool for ${CONTENT_LABEL,,}?\n(Spacebar to select)" \
+ 16 "$WIDTH" 6 "${MENU[@]}" 3>&1 1>&2 2>&3) || { exit_script; }
+
+ DISPLAY_SELECTED=$(sed 's/[[:space:]]*$//' <<<"$DISPLAY_SELECTED")
+ if [[ -z "$DISPLAY_SELECTED" || -z "${STORAGE_MAP[$DISPLAY_SELECTED]+_}" ]]; then
+ whiptail --msgbox "No valid storage selected. Please try again." 8 58
+ continue
+ fi
+ STORAGE_RESULT="${STORAGE_MAP[$DISPLAY_SELECTED]}"
+ for ((i = 0; i < ${#MENU[@]}; i += 3)); do
+ if [[ "${MENU[$i]}" == "$DISPLAY_SELECTED" ]]; then
+ STORAGE_INFO="${MENU[$i + 1]}"
+ break
+ fi
+ done
+ return 0
+ done
+}
+
+create_lxc_container() {
+ # ------------------------------------------------------------------------------
+ # Optional verbose mode (debug tracing)
+ # ------------------------------------------------------------------------------
+ if [[ "${CREATE_LXC_VERBOSE:-no}" == "yes" ]]; then set -x; fi
+
+ # ------------------------------------------------------------------------------
+ # Helpers (dynamic versioning / template parsing)
+ # ------------------------------------------------------------------------------
+ pkg_ver() { dpkg-query -W -f='${Version}\n' "$1" 2>/dev/null || echo ""; }
+ pkg_cand() { apt-cache policy "$1" 2>/dev/null | awk '/Candidate:/ {print $2}'; }
+
+ ver_ge() { dpkg --compare-versions "$1" ge "$2"; }
+ ver_gt() { dpkg --compare-versions "$1" gt "$2"; }
+ ver_lt() { dpkg --compare-versions "$1" lt "$2"; }
+
+ # Extract Debian OS minor from template name: debian-13-standard_13.1-1_amd64.tar.zst => "13.1"
+ parse_template_osver() { sed -n 's/.*_\([0-9][0-9]*\(\.[0-9]\+\)\?\)-.*/\1/p' <<<"$1"; }
+
+ # Offer upgrade for pve-container/lxc-pve if candidate > installed; optional auto-retry pct create
+ # Returns:
+ # 0 = no upgrade needed
+ # 1 = upgraded (and if do_retry=yes and retry succeeded, creation done)
+ # 2 = user declined
+ # 3 = upgrade attempted but failed OR retry failed
+ offer_lxc_stack_upgrade_and_maybe_retry() {
+ local do_retry="${1:-no}" # yes|no
+ local _pvec_i _pvec_c _lxcp_i _lxcp_c need=0
+
+ _pvec_i="$(pkg_ver pve-container)"
+ _lxcp_i="$(pkg_ver lxc-pve)"
+ _pvec_c="$(pkg_cand pve-container)"
+ _lxcp_c="$(pkg_cand lxc-pve)"
+
+ if [[ -n "$_pvec_c" && "$_pvec_c" != "none" ]]; then
+ ver_gt "$_pvec_c" "${_pvec_i:-0}" && need=1
+ fi
+ if [[ -n "$_lxcp_c" && "$_lxcp_c" != "none" ]]; then
+ ver_gt "$_lxcp_c" "${_lxcp_i:-0}" && need=1
+ fi
+ if [[ $need -eq 0 ]]; then
+ msg_debug "No newer candidate for pve-container/lxc-pve (installed=$_pvec_i/$_lxcp_i, cand=$_pvec_c/$_lxcp_c)"
+ return 0
+ fi
+
+ echo
+ echo "An update for the Proxmox LXC stack is available:"
+ echo " pve-container: installed=${_pvec_i:-n/a} candidate=${_pvec_c:-n/a}"
+ echo " lxc-pve : installed=${_lxcp_i:-n/a} candidate=${_lxcp_c:-n/a}"
+ echo
+ read -rp "Do you want to upgrade now? [y/N] " _ans
+ case "${_ans,,}" in
+ y | yes)
+ msg_info "Upgrading Proxmox LXC stack (pve-container, lxc-pve)"
+ if apt-get update -qq >/dev/null && apt-get install -y --only-upgrade pve-container lxc-pve >/dev/null; then
+ msg_ok "LXC stack upgraded."
+ if [[ "$do_retry" == "yes" ]]; then
+ msg_info "Retrying container creation after upgrade"
+ if pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >>"$LOGFILE" 2>&1; then
+ msg_ok "Container created successfully after upgrade."
+ return 0
+ else
+ msg_error "pct create still failed after upgrade. See $LOGFILE"
+ return 3
+ fi
+ fi
+ return 1
+ else
+ msg_error "Upgrade failed. Please check APT output."
+ return 3
+ fi
+ ;;
+ *) return 2 ;;
+ esac
+ }
+
+ # ------------------------------------------------------------------------------
+ # Required input variables
+ # ------------------------------------------------------------------------------
+ [[ "${CTID:-}" ]] || {
+ msg_error "You need to set 'CTID' variable."
+ exit 203
+ }
+ [[ "${PCT_OSTYPE:-}" ]] || {
+ msg_error "You need to set 'PCT_OSTYPE' variable."
+ exit 204
+ }
+
+ msg_debug "CTID=$CTID"
+ msg_debug "PCT_OSTYPE=$PCT_OSTYPE"
+ msg_debug "PCT_OSVERSION=${PCT_OSVERSION:-default}"
+
+ # ID checks
+ [[ "$CTID" -ge 100 ]] || {
+ msg_error "ID cannot be less than 100."
+ exit 205
+ }
+ if qm status "$CTID" &>/dev/null || pct status "$CTID" &>/dev/null; then
+ echo -e "ID '$CTID' is already in use."
+ unset CTID
+ msg_error "Cannot use ID that is already in use."
+ exit 206
+ fi
+
+ # Storage capability check
+ check_storage_support "rootdir" || {
+ msg_error "No valid storage found for 'rootdir' [Container]"
+ exit 1
+ }
+ check_storage_support "vztmpl" || {
+ msg_error "No valid storage found for 'vztmpl' [Template]"
+ exit 1
+ }
+
+ # Template storage selection
+ if resolve_storage_preselect template "${TEMPLATE_STORAGE:-}"; then
+ TEMPLATE_STORAGE="$STORAGE_RESULT"
+ TEMPLATE_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${TEMPLATE_STORAGE}${CL} (${TEMPLATE_STORAGE_INFO}) [Template]"
+ else
+ while true; do
+ if [[ -z "${var_template_storage:-}" ]]; then
+ if select_storage template; then
+ TEMPLATE_STORAGE="$STORAGE_RESULT"
+ TEMPLATE_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${TEMPLATE_STORAGE}${CL} (${TEMPLATE_STORAGE_INFO}) [Template]"
+ break
+ fi
+ fi
+ done
+ fi
+
+ # Container storage selection
+ if resolve_storage_preselect container "${CONTAINER_STORAGE:-}"; then
+ CONTAINER_STORAGE="$STORAGE_RESULT"
+ CONTAINER_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${CONTAINER_STORAGE}${CL} (${CONTAINER_STORAGE_INFO}) [Container]"
+ else
+ if [[ -z "${var_container_storage:-}" ]]; then
+ if select_storage container; then
+ CONTAINER_STORAGE="$STORAGE_RESULT"
+ CONTAINER_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${CONTAINER_STORAGE}${CL} (${CONTAINER_STORAGE_INFO}) [Container]"
+ fi
+ fi
+ fi
+
+ # Validate content types
+ msg_info "Validating content types of storage '$CONTAINER_STORAGE'"
+ STORAGE_CONTENT=$(grep -A4 -E "^(zfspool|dir|lvmthin|lvm): $CONTAINER_STORAGE" /etc/pve/storage.cfg | grep content | awk '{$1=""; print $0}' | xargs)
+ msg_debug "Storage '$CONTAINER_STORAGE' has content types: $STORAGE_CONTENT"
+ grep -qw "rootdir" <<<"$STORAGE_CONTENT" || {
+ msg_error "Storage '$CONTAINER_STORAGE' does not support 'rootdir'. Cannot create LXC."
+ exit 217
+ }
+ $STD msg_ok "Storage '$CONTAINER_STORAGE' supports 'rootdir'"
+
+ msg_info "Validating content types of template storage '$TEMPLATE_STORAGE'"
+ TEMPLATE_CONTENT=$(grep -A4 -E "^[^:]+: $TEMPLATE_STORAGE" /etc/pve/storage.cfg | grep content | awk '{$1=""; print $0}' | xargs)
+ msg_debug "Template storage '$TEMPLATE_STORAGE' has content types: $TEMPLATE_CONTENT"
+ if ! grep -qw "vztmpl" <<<"$TEMPLATE_CONTENT"; then
+ msg_warn "Template storage '$TEMPLATE_STORAGE' does not declare 'vztmpl'. This may cause pct create to fail."
+ else
+ $STD msg_ok "Template storage '$TEMPLATE_STORAGE' supports 'vztmpl'"
+ fi
+
+ # Free space check
+ STORAGE_FREE=$(pvesm status | awk -v s="$CONTAINER_STORAGE" '$1 == s { print $6 }')
+ REQUIRED_KB=$((${PCT_DISK_SIZE:-8} * 1024 * 1024))
+ [[ "$STORAGE_FREE" -ge "$REQUIRED_KB" ]] || {
+ msg_error "Not enough space on '$CONTAINER_STORAGE'. Needed: ${PCT_DISK_SIZE:-8}G."
+ exit 214
+ }
+
+ # Cluster quorum (if cluster)
+ if [[ -f /etc/pve/corosync.conf ]]; then
+ msg_info "Checking cluster quorum"
+ if ! pvecm status | awk -F':' '/^Quorate/ { exit ($2 ~ /Yes/) ? 0 : 1 }'; then
+ msg_error "Cluster is not quorate. Start all nodes or configure quorum device (QDevice)."
+ exit 210
+ fi
+ msg_ok "Cluster is quorate"
+ fi
+
+ # ------------------------------------------------------------------------------
+ # Template discovery & validation
+ # ------------------------------------------------------------------------------
+ TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}"
+ case "$PCT_OSTYPE" in
+ debian | ubuntu) TEMPLATE_PATTERN="-standard_" ;;
+ alpine | fedora | rocky | centos) TEMPLATE_PATTERN="-default_" ;;
+ *) TEMPLATE_PATTERN="" ;;
+ esac
+
+ msg_info "Searching for template '$TEMPLATE_SEARCH'"
+
+ # Build regex patterns outside awk/grep for clarity
+ SEARCH_PATTERN="^${TEMPLATE_SEARCH}"
+
+ #echo "[DEBUG] TEMPLATE_SEARCH='$TEMPLATE_SEARCH'"
+ #echo "[DEBUG] SEARCH_PATTERN='$SEARCH_PATTERN'"
+ #echo "[DEBUG] TEMPLATE_PATTERN='$TEMPLATE_PATTERN'"
+
+ mapfile -t LOCAL_TEMPLATES < <(
+ pveam list "$TEMPLATE_STORAGE" 2>/dev/null |
+ awk -v search="${SEARCH_PATTERN}" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' |
+ sed 's|.*/||' | sort -t - -k 2 -V
+ )
+
+ pveam update >/dev/null 2>&1 || msg_warn "Could not update template catalog (pveam update failed)."
+
+ #echo "[DEBUG] pveam available output (first 5 lines with .tar files):"
+ #pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | head -5 | sed 's/^/ /'
+
+ set +u
+ mapfile -t ONLINE_TEMPLATES < <(pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | awk '{print $2}' | grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" | sort -t - -k 2 -V 2>/dev/null || true)
+ #echo "[DEBUG] After filtering: ${#ONLINE_TEMPLATES[@]} online templates found"
+ set -u
+ if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then
+ #echo "[DEBUG] Online templates:"
+ for tmpl in "${ONLINE_TEMPLATES[@]}"; do
+ echo " - $tmpl"
+ done
+ fi
+
+ ONLINE_TEMPLATE=""
+ [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}"
+
+ #msg_debug "SEARCH_PATTERN='${SEARCH_PATTERN}' TEMPLATE_PATTERN='${TEMPLATE_PATTERN}'"
+ #msg_debug "Found ${#LOCAL_TEMPLATES[@]} local templates, ${#ONLINE_TEMPLATES[@]} online templates"
+ if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then
+ #msg_debug "First 3 online templates:"
+ count=0
+ for idx in "${!ONLINE_TEMPLATES[@]}"; do
+ #msg_debug " [$idx]: ${ONLINE_TEMPLATES[$idx]}"
+ ((count++))
+ [[ $count -ge 3 ]] && break
+ done
+ fi
+ #msg_debug "ONLINE_TEMPLATE='$ONLINE_TEMPLATE'"
+
+ if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then
+ TEMPLATE="${LOCAL_TEMPLATES[-1]}"
+ TEMPLATE_SOURCE="local"
+ else
+ TEMPLATE="$ONLINE_TEMPLATE"
+ TEMPLATE_SOURCE="online"
+ fi
+
+ # If still no template, try to find alternatives
+ if [[ -z "$TEMPLATE" ]]; then
+ echo ""
+ echo "[DEBUG] No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}, searching for alternatives..."
+
+ # Get all available versions for this OS type
+ mapfile -t AVAILABLE_VERSIONS < <(
+ pveam available -section system 2>/dev/null |
+ grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' |
+ awk -F'\t' '{print $1}' |
+ grep "^${PCT_OSTYPE}-" |
+ sed -E "s/.*${PCT_OSTYPE}-([0-9]+(\.[0-9]+)?).*/\1/" |
+ sort -u -V 2>/dev/null
+ )
+
+ if [[ ${#AVAILABLE_VERSIONS[@]} -gt 0 ]]; then
+ echo ""
+ echo "${BL}Available ${PCT_OSTYPE} versions:${CL}"
+ for i in "${!AVAILABLE_VERSIONS[@]}"; do
+ echo " [$((i + 1))] ${AVAILABLE_VERSIONS[$i]}"
+ done
+ echo ""
+ read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or press Enter to cancel: " choice
+
+ if [[ "$choice" =~ ^[0-9]+$ ]] && [[ "$choice" -ge 1 ]] && [[ "$choice" -le ${#AVAILABLE_VERSIONS[@]} ]]; then
+ PCT_OSVERSION="${AVAILABLE_VERSIONS[$((choice - 1))]}"
+ TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION}"
+ SEARCH_PATTERN="^${TEMPLATE_SEARCH}-"
+
+ #echo "[DEBUG] Retrying with version: $PCT_OSVERSION"
+
+ mapfile -t ONLINE_TEMPLATES < <(
+ pveam available -section system 2>/dev/null |
+ grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' |
+ awk -F'\t' '{print $1}' |
+ grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" |
+ sort -t - -k 2 -V 2>/dev/null || true
+ )
+
+ if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then
+ TEMPLATE="${ONLINE_TEMPLATES[-1]}"
+ TEMPLATE_SOURCE="online"
+ #echo "[DEBUG] Found alternative: $TEMPLATE"
+ else
+ msg_error "No templates available for ${PCT_OSTYPE} ${PCT_OSVERSION}"
+ exit 225
+ fi
+ else
+ msg_info "Installation cancelled"
+ exit 0
+ fi
+ else
+ msg_error "No ${PCT_OSTYPE} templates available at all"
+ exit 225
+ fi
+ fi
+
+ #echo "[DEBUG] Selected TEMPLATE='$TEMPLATE' SOURCE='$TEMPLATE_SOURCE'"
+ #msg_debug "Selected TEMPLATE='$TEMPLATE' SOURCE='$TEMPLATE_SOURCE'"
+
+ TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)"
+ if [[ -z "$TEMPLATE_PATH" ]]; then
+ TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg)
+ [[ -n "$TEMPLATE_BASE" ]] && TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE"
+ fi
+
+ # If we still don't have a path but have a valid template name, construct it
+ if [[ -z "$TEMPLATE_PATH" && -n "$TEMPLATE" ]]; then
+ TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE"
+ fi
+
+ [[ -n "$TEMPLATE_PATH" ]] || {
+ if [[ -z "$TEMPLATE" ]]; then
+ msg_error "Template ${PCT_OSTYPE} ${PCT_OSVERSION} not available"
+
+ # Get available versions
+ mapfile -t AVAILABLE_VERSIONS < <(
+ pveam available -section system 2>/dev/null |
+ grep "^${PCT_OSTYPE}-" |
+ sed -E 's/.*'"${PCT_OSTYPE}"'-([0-9]+\.[0-9]+).*/\1/' |
+ grep -E '^[0-9]+\.[0-9]+$' |
+ sort -u -V 2>/dev/null || sort -u
+ )
+
+ if [[ ${#AVAILABLE_VERSIONS[@]} -gt 0 ]]; then
+ echo -e "\n${BL}Available versions:${CL}"
+ for i in "${!AVAILABLE_VERSIONS[@]}"; do
+ echo " [$((i + 1))] ${AVAILABLE_VERSIONS[$i]}"
+ done
+
+ echo ""
+ read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or Enter to exit: " choice
+
+ if [[ "$choice" =~ ^[0-9]+$ ]] && [[ "$choice" -ge 1 ]] && [[ "$choice" -le ${#AVAILABLE_VERSIONS[@]} ]]; then
+ export var_version="${AVAILABLE_VERSIONS[$((choice - 1))]}"
+ export PCT_OSVERSION="$var_version"
+ msg_ok "Switched to ${PCT_OSTYPE} ${var_version}"
+
+ # Retry template search with new version
+ TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}"
+ SEARCH_PATTERN="^${TEMPLATE_SEARCH}-"
+
+ mapfile -t LOCAL_TEMPLATES < <(
+ pveam list "$TEMPLATE_STORAGE" 2>/dev/null |
+ awk -v search="${SEARCH_PATTERN}" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' |
+ sed 's|.*/||' | sort -t - -k 2 -V
+ )
+ mapfile -t ONLINE_TEMPLATES < <(
+ pveam available -section system 2>/dev/null |
+ grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' |
+ awk -F'\t' '{print $1}' |
+ grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" |
+ sort -t - -k 2 -V 2>/dev/null || true
+ )
+ ONLINE_TEMPLATE=""
+ [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}"
+
+ if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then
+ TEMPLATE="${LOCAL_TEMPLATES[-1]}"
+ TEMPLATE_SOURCE="local"
+ else
+ TEMPLATE="$ONLINE_TEMPLATE"
+ TEMPLATE_SOURCE="online"
+ fi
+
+ TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)"
+ if [[ -z "$TEMPLATE_PATH" ]]; then
+ TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg)
+ [[ -n "$TEMPLATE_BASE" ]] && TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE"
+ fi
+
+ # If we still don't have a path but have a valid template name, construct it
+ if [[ -z "$TEMPLATE_PATH" && -n "$TEMPLATE" ]]; then
+ TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE"
+ fi
+
+ [[ -n "$TEMPLATE_PATH" ]] || {
+ msg_error "Template still not found after version change"
+ exit 220
+ }
+ else
+ msg_info "Installation cancelled"
+ exit 1
+ fi
+ else
+ msg_error "No ${PCT_OSTYPE} templates available"
+ exit 220
+ fi
+ fi
+ }
+
+ # Validate that we found a template
+ if [[ -z "$TEMPLATE" ]]; then
+ msg_error "No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}"
+ msg_info "Please check:"
+ msg_info " - Is pveam catalog available? (run: pveam available -section system)"
+ msg_info " - Does the template exist for your OS version?"
+ exit 225
+ fi
+
+ msg_ok "Template ${BL}$TEMPLATE${CL} [$TEMPLATE_SOURCE]"
+ msg_debug "Resolved TEMPLATE_PATH=$TEMPLATE_PATH"
+
+ NEED_DOWNLOAD=0
+ if [[ ! -f "$TEMPLATE_PATH" ]]; then
+ msg_info "Template not present locally – will download."
+ NEED_DOWNLOAD=1
+ elif [[ ! -r "$TEMPLATE_PATH" ]]; then
+ msg_error "Template file exists but is not readable – check permissions."
+ exit 221
+ elif [[ "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then
+ if [[ -n "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Template file too small (<1MB) – re-downloading."
+ NEED_DOWNLOAD=1
+ else
+ msg_warn "Template looks too small, but no online version exists. Keeping local file."
+ fi
+ elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then
+ if [[ -n "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Template appears corrupted – re-downloading."
+ NEED_DOWNLOAD=1
+ else
+ msg_warn "Template appears corrupted, but no online version exists. Keeping local file."
+ fi
+ else
+ $STD msg_ok "Template $TEMPLATE is present and valid."
+ fi
+
+ if [[ "$TEMPLATE_SOURCE" == "local" && -n "$ONLINE_TEMPLATE" && "$TEMPLATE" != "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Local template is outdated: $TEMPLATE (latest available: $ONLINE_TEMPLATE)"
+ if whiptail --yesno "A newer template is available:\n$ONLINE_TEMPLATE\n\nDo you want to download and use it instead?" 12 70; then
+ TEMPLATE="$ONLINE_TEMPLATE"
+ NEED_DOWNLOAD=1
+ else
+ msg_info "Continuing with local template $TEMPLATE"
+ fi
+ fi
+
+ if [[ "$NEED_DOWNLOAD" -eq 1 ]]; then
+ [[ -f "$TEMPLATE_PATH" ]] && rm -f "$TEMPLATE_PATH"
+ for attempt in {1..3}; do
+ msg_info "Attempt $attempt: Downloading template $TEMPLATE to $TEMPLATE_STORAGE"
+ if pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null 2>&1; then
+ msg_ok "Template download successful."
+ break
+ fi
+ if [[ $attempt -eq 3 ]]; then
+ msg_error "Failed after 3 attempts. Please check network access, permissions, or manually run:\n pveam download $TEMPLATE_STORAGE $TEMPLATE"
+ exit 222
+ fi
+ sleep $((attempt * 5))
+ done
+ fi
+
+ if ! pveam list "$TEMPLATE_STORAGE" 2>/dev/null | grep -q "$TEMPLATE"; then
+ msg_error "Template $TEMPLATE not available in storage $TEMPLATE_STORAGE after download."
+ exit 223
+ fi
+
+ # ------------------------------------------------------------------------------
+ # Dynamic preflight for Debian 13.x: offer upgrade if available (no hard mins)
+ # ------------------------------------------------------------------------------
+ if [[ "$PCT_OSTYPE" == "debian" ]]; then
+ OSVER="$(parse_template_osver "$TEMPLATE")"
+ if [[ -n "$OSVER" ]]; then
+ # Proactive, aber ohne Abbruch – nur Angebot
+ offer_lxc_stack_upgrade_and_maybe_retry "no" || true
+ fi
+ fi
+
+ # ------------------------------------------------------------------------------
+ # Create LXC Container
+ # ------------------------------------------------------------------------------
+ msg_info "Creating LXC container"
+
+ # Ensure subuid/subgid entries exist
+ grep -q "root:100000:65536" /etc/subuid || echo "root:100000:65536" >>/etc/subuid
+ grep -q "root:100000:65536" /etc/subgid || echo "root:100000:65536" >>/etc/subgid
+
+ # Assemble pct options
+ PCT_OPTIONS=(${PCT_OPTIONS[@]:-${DEFAULT_PCT_OPTIONS[@]}})
+ [[ " ${PCT_OPTIONS[*]} " =~ " -rootfs " ]] || PCT_OPTIONS+=(-rootfs "$CONTAINER_STORAGE:${PCT_DISK_SIZE:-8}")
+
+ # Lock by template file (avoid concurrent downloads/creates)
+ lockfile="/tmp/template.${TEMPLATE}.lock"
+ exec 9>"$lockfile" || {
+ msg_error "Failed to create lock file '$lockfile'."
+ exit 200
+ }
+ flock -w 60 9 || {
+ msg_error "Timeout while waiting for template lock."
+ exit 211
+ }
+
+ LOGFILE="/tmp/pct_create_${CTID}.log"
+ msg_debug "pct create command: pct create $CTID ${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE} ${PCT_OPTIONS[*]}"
+ msg_debug "Logfile: $LOGFILE"
+
+ # First attempt
+ if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >"$LOGFILE" 2>&1; then
+ msg_error "Container creation failed on ${TEMPLATE_STORAGE}. Checking template..."
+
+ # Validate template file
+ if [[ ! -s "$TEMPLATE_PATH" || "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then
+ msg_warn "Template file too small or missing – re-downloading."
+ rm -f "$TEMPLATE_PATH"
+ pveam download "$TEMPLATE_STORAGE" "$TEMPLATE"
+ elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then
+ if [[ -n "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Template appears corrupted – re-downloading."
+ rm -f "$TEMPLATE_PATH"
+ pveam download "$TEMPLATE_STORAGE" "$TEMPLATE"
+ else
+ msg_warn "Template appears corrupted, but no online version exists. Skipping re-download."
+ fi
+ fi
+
+ # Retry after repair
+ if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >>"$LOGFILE" 2>&1; then
+ # Fallback to local storage
+ if [[ "$TEMPLATE_STORAGE" != "local" ]]; then
+ msg_warn "Retrying container creation with fallback to local storage..."
+ LOCAL_TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE"
+ if [[ ! -f "$LOCAL_TEMPLATE_PATH" ]]; then
+ msg_info "Downloading template to local..."
+ pveam download local "$TEMPLATE" >/dev/null 2>&1
+ fi
+ if pct create "$CTID" "local:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >>"$LOGFILE" 2>&1; then
+ msg_ok "Container successfully created using local fallback."
+ else
+ # --- Dynamic stack upgrade + auto-retry on the well-known error pattern ---
+ if grep -qiE 'unsupported .* version' "$LOGFILE"; then
+ echo
+ echo "pct reported 'unsupported ... version' – your LXC stack might be too old for this template."
+ echo "We can try to upgrade 'pve-container' and 'lxc-pve' now and retry automatically."
+ offer_lxc_stack_upgrade_and_maybe_retry "yes"
+ rc=$?
+ case $rc in
+ 0) : ;; # success - container created, continue
+ 2)
+ echo "Upgrade was declined. Please update and re-run:
+ apt update && apt install --only-upgrade pve-container lxc-pve"
+ exit 231
+ ;;
+ 3)
+ echo "Upgrade and/or retry failed. Please inspect: $LOGFILE"
+ exit 231
+ ;;
+ esac
+ else
+ msg_error "Container creation failed even with local fallback. See $LOGFILE"
+ if whiptail --yesno "pct create failed.\nDo you want to enable verbose debug mode and view detailed logs?" 12 70; then
+ set -x
+ bash -x -c "pct create $CTID local:vztmpl/${TEMPLATE} ${PCT_OPTIONS[*]}" 2>&1 | tee -a "$LOGFILE"
+ set +x
+ fi
+ exit 209
+ fi
+ fi
+ else
+ msg_error "Container creation failed on local storage. See $LOGFILE"
+ # --- Dynamic stack upgrade + auto-retry on the well-known error pattern ---
+ if grep -qiE 'unsupported .* version' "$LOGFILE"; then
+ echo
+ echo "pct reported 'unsupported ... version' – your LXC stack might be too old for this template."
+ echo "We can try to upgrade 'pve-container' and 'lxc-pve' now and retry automatically."
+ offer_lxc_stack_upgrade_and_maybe_retry "yes"
+ rc=$?
+ case $rc in
+ 0) : ;; # success - container created, continue
+ 2)
+ echo "Upgrade was declined. Please update and re-run:
+ apt update && apt install --only-upgrade pve-container lxc-pve"
+ exit 231
+ ;;
+ 3)
+ echo "Upgrade and/or retry failed. Please inspect: $LOGFILE"
+ exit 231
+ ;;
+ esac
+ else
+ msg_error "Container creation failed. See $LOGFILE"
+ if whiptail --yesno "pct create failed.\nDo you want to enable verbose debug mode and view detailed logs?" 12 70; then
+ set -x
+ bash -x -c "pct create $CTID local:vztmpl/${TEMPLATE} ${PCT_OPTIONS[*]}" 2>&1 | tee -a "$LOGFILE"
+ set +x
+ fi
+ exit 209
+ fi
+ fi
+ fi
+ fi
+
+ # Verify container exists
+ pct list | awk '{print $1}' | grep -qx "$CTID" || {
+ msg_error "Container ID $CTID not listed in 'pct list'. See $LOGFILE"
+ exit 215
+ }
+
+ # Verify config rootfs
+ grep -q '^rootfs:' "/etc/pve/lxc/$CTID.conf" || {
+ msg_error "RootFS entry missing in container config. See $LOGFILE"
+ exit 216
+ }
+
+ msg_ok "LXC Container ${BL}$CTID${CL} ${GN}was successfully created."
+}
+
+# ------------------------------------------------------------------------------
+# description()
+#
+# - Sets container description with HTML content (logo, links, badges)
+# - Restarts ping-instances.service if present
+# - Posts status "done" to API
+# ------------------------------------------------------------------------------
+description() {
+ IP=$(pct exec "$CTID" ip a s dev eth0 | awk '/inet / {print $2}' | cut -d/ -f1)
+
+ # Generate LXC Description
+ DESCRIPTION=$(
+ cat <
+
+
+
+
+ ${APP} LXC
+
+
+
+
+
+
+
+
+
+ GitHub
+
+
+
+ Discussions
+
+
+
+ Issues
+
+
+EOF
+ )
+ pct set "$CTID" -description "$DESCRIPTION"
+
+ if [[ -f /etc/systemd/system/ping-instances.service ]]; then
+ systemctl start ping-instances.service
+ fi
+
+ post_update_to_api "done" "none"
+}
+
+# ------------------------------------------------------------------------------
+# api_exit_script()
+#
+# - Exit trap handler
+# - Reports exit codes to API with detailed reason
+# - Handles known codes (100–209) and maps them to errors
+# ------------------------------------------------------------------------------
+api_exit_script() {
+ exit_code=$?
+ if [ $exit_code -ne 0 ]; then
+ case $exit_code in
+ 100) post_update_to_api "failed" "100: Unexpected error in create_lxc.sh" ;;
+ 101) post_update_to_api "failed" "101: No network connection detected in create_lxc.sh" ;;
+ 200) post_update_to_api "failed" "200: LXC creation failed in create_lxc.sh" ;;
+ 201) post_update_to_api "failed" "201: Invalid Storage class in create_lxc.sh" ;;
+ 202) post_update_to_api "failed" "202: User aborted menu in create_lxc.sh" ;;
+ 203) post_update_to_api "failed" "203: CTID not set in create_lxc.sh" ;;
+ 204) post_update_to_api "failed" "204: PCT_OSTYPE not set in create_lxc.sh" ;;
+ 205) post_update_to_api "failed" "205: CTID cannot be less than 100 in create_lxc.sh" ;;
+ 206) post_update_to_api "failed" "206: CTID already in use in create_lxc.sh" ;;
+ 207) post_update_to_api "failed" "207: Template not found in create_lxc.sh" ;;
+ 208) post_update_to_api "failed" "208: Error downloading template in create_lxc.sh" ;;
+ 209) post_update_to_api "failed" "209: Container creation failed, but template is intact in create_lxc.sh" ;;
+ *) post_update_to_api "failed" "Unknown error, exit code: $exit_code in create_lxc.sh" ;;
+ esac
+ fi
+}
+
+if command -v pveversion >/dev/null 2>&1; then
+ trap 'api_exit_script' EXIT
+fi
+trap 'post_update_to_api "failed" "$BASH_COMMAND"' ERR
+trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
+trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
diff --git a/misc/build.func.backup-20251029-124334 b/misc/build.func.backup-20251029-124334
new file mode 100644
index 000000000..d452f4637
--- /dev/null
+++ b/misc/build.func.backup-20251029-124334
@@ -0,0 +1,3517 @@
+#!/usr/bin/env bash
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: tteck (tteckster) | MickLesk | michelroegl-brunner
+# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
+# Revision: 1
+
+# ==============================================================================
+# SECTION 1: CORE INITIALIZATION & VARIABLES
+# ==============================================================================
+
+# ------------------------------------------------------------------------------
+# variables()
+#
+# - Normalize application name (NSAPP = lowercase, no spaces)
+# - Build installer filename (var_install)
+# - Define regex for integer validation
+# - Fetch hostname of Proxmox node
+# - Set default values for diagnostics/method
+# - Generate random UUID for tracking
+# - Get Proxmox VE version and kernel version
+# ------------------------------------------------------------------------------
+variables() {
+ NSAPP=$(echo "${APP,,}" | tr -d ' ') # This function sets the NSAPP variable by converting the value of the APP variable to lowercase and removing any spaces.
+ var_install="${NSAPP}-install" # sets the var_install variable by appending "-install" to the value of NSAPP.
+ INTEGER='^[0-9]+([.][0-9]+)?$' # it defines the INTEGER regular expression pattern.
+ PVEHOST_NAME=$(hostname) # gets the Proxmox Hostname and sets it to Uppercase
+ DIAGNOSTICS="yes" # sets the DIAGNOSTICS variable to "yes", used for the API call.
+ METHOD="default" # sets the METHOD variable to "default", used for the API call.
+ RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" # generates a random UUID and sets it to the RANDOM_UUID variable.
+ CTTYPE="${CTTYPE:-${CT_TYPE:-1}}"
+ #CT_TYPE=${var_unprivileged:-$CT_TYPE}
+
+ # Get Proxmox VE version and kernel version
+ if command -v pveversion >/dev/null 2>&1; then
+ PVEVERSION=$(pveversion | grep "pve-manager" | awk '{print $2}' | cut -d'/' -f1)
+ else
+ PVEVERSION="N/A"
+ fi
+ KERNEL_VERSION=$(uname -r)
+}
+
+# -----------------------------------------------------------------------------
+# Community-Scripts bootstrap loader
+# - Always sources build.func from remote
+# - Updates local core files only if build.func changed
+# - Local cache: /usr/local/community-scripts/core
+# -----------------------------------------------------------------------------
+
+# FUNC_DIR="/usr/local/community-scripts/core"
+# mkdir -p "$FUNC_DIR"
+
+# BUILD_URL="https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func"
+# BUILD_REV="$FUNC_DIR/build.rev"
+# DEVMODE="${DEVMODE:-no}"
+
+# # --- Step 1: fetch build.func content once, compute hash ---
+# build_content="$(curl -fsSL "$BUILD_URL")" || {
+# echo "❌ Failed to fetch build.func"
+# exit 1
+# }
+
+# newhash=$(printf "%s" "$build_content" | sha256sum | awk '{print $1}')
+# oldhash=$(cat "$BUILD_REV" 2>/dev/null || echo "")
+
+# # --- Step 2: if build.func changed, offer update for core files ---
+# if [ "$newhash" != "$oldhash" ]; then
+# echo "⚠️ build.func changed!"
+
+# while true; do
+# read -rp "Refresh local core files? [y/N/diff]: " ans
+# case "$ans" in
+# [Yy]*)
+# echo "$newhash" >"$BUILD_REV"
+
+# update_func_file() {
+# local file="$1"
+# local url="https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/$file"
+# local local_path="$FUNC_DIR/$file"
+
+# echo "⬇️ Downloading $file ..."
+# curl -fsSL "$url" -o "$local_path" || {
+# echo "❌ Failed to fetch $file"
+# exit 1
+# }
+# echo "✔️ Updated $file"
+# }
+
+# update_func_file core.func
+# update_func_file error_handler.func
+# update_func_file tools.func
+# break
+# ;;
+# [Dd]*)
+# for file in core.func error_handler.func tools.func; do
+# local_path="$FUNC_DIR/$file"
+# url="https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/$file"
+# remote_tmp="$(mktemp)"
+
+# curl -fsSL "$url" -o "$remote_tmp" || continue
+
+# if [ -f "$local_path" ]; then
+# echo "🔍 Diff for $file:"
+# diff -u "$local_path" "$remote_tmp" || echo "(no differences)"
+# else
+# echo "📦 New file $file will be installed"
+# fi
+
+# rm -f "$remote_tmp"
+# done
+# ;;
+# *)
+# echo "❌ Skipped updating local core files"
+# break
+# ;;
+# esac
+# done
+# else
+# if [ "$DEVMODE" != "yes" ]; then
+# echo "✔️ build.func unchanged → using existing local core files"
+# fi
+# fi
+
+# if [ -n "${_COMMUNITY_SCRIPTS_LOADER:-}" ]; then
+# return 0 2>/dev/null || exit 0
+# fi
+# _COMMUNITY_SCRIPTS_LOADER=1
+
+# # --- Step 3: always source local versions of the core files ---
+# source "$FUNC_DIR/core.func"
+# source "$FUNC_DIR/error_handler.func"
+# source "$FUNC_DIR/tools.func"
+
+# # --- Step 4: finally, source build.func directly from memory ---
+# # (no tmp file needed)
+# source <(printf "%s" "$build_content")
+
+# ------------------------------------------------------------------------------
+# Load core + error handler functions from community-scripts repo
+#
+# - Prefer curl if available, fallback to wget
+# - Load: core.func, error_handler.func, api.func
+# - Initialize error traps after loading
+# ------------------------------------------------------------------------------
+
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/api.func)
+
+if command -v curl >/dev/null 2>&1; then
+ source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func)
+ source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func)
+ load_functions
+ catch_errors
+ #echo "(build.func) Loaded core.func via curl"
+elif command -v wget >/dev/null 2>&1; then
+ source <(wget -qO- https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func)
+ source <(wget -qO- https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func)
+ load_functions
+ catch_errors
+ #echo "(build.func) Loaded core.func via wget"
+fi
+
+# ------------------------------------------------------------------------------
+# maxkeys_check()
+#
+# - Reads kernel keyring limits (maxkeys, maxbytes)
+# - Checks current usage for LXC user (UID 100000)
+# - Warns if usage is close to limits and suggests sysctl tuning
+# - Exits if thresholds are exceeded
+# - https://cleveruptime.com/docs/files/proc-key-users | https://docs.kernel.org/security/keys/core.html
+# ------------------------------------------------------------------------------
+
+maxkeys_check() {
+ # Read kernel parameters
+ per_user_maxkeys=$(cat /proc/sys/kernel/keys/maxkeys 2>/dev/null || echo 0)
+ per_user_maxbytes=$(cat /proc/sys/kernel/keys/maxbytes 2>/dev/null || echo 0)
+
+ # Exit if kernel parameters are unavailable
+ if [[ "$per_user_maxkeys" -eq 0 || "$per_user_maxbytes" -eq 0 ]]; then
+ echo -e "${CROSS}${RD} Error: Unable to read kernel parameters. Ensure proper permissions.${CL}"
+ exit 1
+ fi
+
+ # Fetch key usage for user ID 100000 (typical for containers)
+ used_lxc_keys=$(awk '/100000:/ {print $2}' /proc/key-users 2>/dev/null || echo 0)
+ used_lxc_bytes=$(awk '/100000:/ {split($5, a, "/"); print a[1]}' /proc/key-users 2>/dev/null || echo 0)
+
+ # Calculate thresholds and suggested new limits
+ threshold_keys=$((per_user_maxkeys - 100))
+ threshold_bytes=$((per_user_maxbytes - 1000))
+ new_limit_keys=$((per_user_maxkeys * 2))
+ new_limit_bytes=$((per_user_maxbytes * 2))
+
+ # Check if key or byte usage is near limits
+ failure=0
+ if [[ "$used_lxc_keys" -gt "$threshold_keys" ]]; then
+ echo -e "${CROSS}${RD} Warning: Key usage is near the limit (${used_lxc_keys}/${per_user_maxkeys}).${CL}"
+ echo -e "${INFO} Suggested action: Set ${GN}kernel.keys.maxkeys=${new_limit_keys}${CL} in ${BOLD}/etc/sysctl.d/98-community-scripts.conf${CL}."
+ failure=1
+ fi
+ if [[ "$used_lxc_bytes" -gt "$threshold_bytes" ]]; then
+ echo -e "${CROSS}${RD} Warning: Key byte usage is near the limit (${used_lxc_bytes}/${per_user_maxbytes}).${CL}"
+ echo -e "${INFO} Suggested action: Set ${GN}kernel.keys.maxbytes=${new_limit_bytes}${CL} in ${BOLD}/etc/sysctl.d/98-community-scripts.conf${CL}."
+ failure=1
+ fi
+
+ # Provide next steps if issues are detected
+ if [[ "$failure" -eq 1 ]]; then
+ echo -e "${INFO} To apply changes, run: ${BOLD}service procps force-reload${CL}"
+ exit 1
+ fi
+
+ echo -e "${CM}${GN} All kernel key limits are within safe thresholds.${CL}"
+}
+
+# ------------------------------------------------------------------------------
+# get_current_ip()
+#
+# - Returns current container IP depending on OS type
+# - Debian/Ubuntu: uses `hostname -I`
+# - Alpine: parses eth0 via `ip -4 addr`
+# ------------------------------------------------------------------------------
+get_current_ip() {
+ if [ -f /etc/os-release ]; then
+ # Check for Debian/Ubuntu (uses hostname -I)
+ if grep -qE 'ID=debian|ID=ubuntu' /etc/os-release; then
+ CURRENT_IP=$(hostname -I | awk '{print $1}')
+ # Check for Alpine (uses ip command)
+ elif grep -q 'ID=alpine' /etc/os-release; then
+ CURRENT_IP=$(ip -4 addr show eth0 | awk '/inet / {print $2}' | cut -d/ -f1 | head -n 1)
+ else
+ CURRENT_IP="Unknown"
+ fi
+ fi
+ echo "$CURRENT_IP"
+}
+
+# ------------------------------------------------------------------------------
+# update_motd_ip()
+#
+# - Updates /etc/motd with current container IP
+# - Removes old IP entries to avoid duplicates
+# ------------------------------------------------------------------------------
+update_motd_ip() {
+ MOTD_FILE="/etc/motd"
+
+ if [ -f "$MOTD_FILE" ]; then
+ # Remove existing IP Address lines to prevent duplication
+ sed -i '/IP Address:/d' "$MOTD_FILE"
+
+ IP=$(get_current_ip)
+ # Add the new IP address
+ echo -e "${TAB}${NETWORK}${YW} IP Address: ${GN}${IP}${CL}" >>"$MOTD_FILE"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# install_ssh_keys_into_ct()
+#
+# - Installs SSH keys into container root account if SSH is enabled
+# - Uses pct push or direct input to authorized_keys
+# - Falls back to warning if no keys provided
+# ------------------------------------------------------------------------------
+install_ssh_keys_into_ct() {
+ [[ "$SSH" != "yes" ]] && return 0
+
+ if [[ -n "$SSH_KEYS_FILE" && -s "$SSH_KEYS_FILE" ]]; then
+ msg_info "Installing selected SSH keys into CT ${CTID}"
+ pct exec "$CTID" -- sh -c 'mkdir -p /root/.ssh && chmod 700 /root/.ssh' || {
+ msg_error "prepare /root/.ssh failed"
+ return 1
+ }
+ pct push "$CTID" "$SSH_KEYS_FILE" /root/.ssh/authorized_keys >/dev/null 2>&1 ||
+ pct exec "$CTID" -- sh -c "cat > /root/.ssh/authorized_keys" <"$SSH_KEYS_FILE" || {
+ msg_error "write authorized_keys failed"
+ return 1
+ }
+ pct exec "$CTID" -- sh -c 'chmod 600 /root/.ssh/authorized_keys' || true
+ msg_ok "Installed SSH keys into CT ${CTID}"
+ return 0
+ fi
+
+ # Fallback: nichts ausgewählt
+ msg_warn "No SSH keys to install (skipping)."
+ return 0
+}
+
+# ------------------------------------------------------------------------------
+# base_settings()
+#
+# - Defines all base/default variables for container creation
+# - Reads from environment variables (var_*)
+# - Provides fallback defaults for OS type/version
+# ------------------------------------------------------------------------------
+base_settings() {
+ # Default Settings
+ CT_TYPE=${var_unprivileged:-"1"}
+ DISK_SIZE=${var_disk:-"4"}
+ CORE_COUNT=${var_cpu:-"1"}
+ RAM_SIZE=${var_ram:-"1024"}
+ VERBOSE=${var_verbose:-"${1:-no}"}
+ PW=${var_pw:-""}
+ CT_ID=${var_ctid:-$NEXTID}
+ HN=${var_hostname:-$NSAPP}
+ BRG=${var_brg:-"vmbr0"}
+ NET=${var_net:-"dhcp"}
+ IPV6_METHOD=${var_ipv6_method:-"none"}
+ IPV6_STATIC=${var_ipv6_static:-""}
+ GATE=${var_gateway:-""}
+ APT_CACHER=${var_apt_cacher:-""}
+ APT_CACHER_IP=${var_apt_cacher_ip:-""}
+ MTU=${var_mtu:-""}
+ SD=${var_storage:-""}
+ NS=${var_ns:-""}
+ MAC=${var_mac:-""}
+ VLAN=${var_vlan:-""}
+ SSH=${var_ssh:-"no"}
+ SSH_AUTHORIZED_KEY=${var_ssh_authorized_key:-""}
+ UDHCPC_FIX=${var_udhcpc_fix:-""}
+ TAGS="community-script,${var_tags:-}"
+ ENABLE_FUSE=${var_fuse:-"${1:-no}"}
+ ENABLE_TUN=${var_tun:-"${1:-no}"}
+
+ # Since these 2 are only defined outside of default_settings function, we add a temporary fallback. TODO: To align everything, we should add these as constant variables (e.g. OSTYPE and OSVERSION), but that would currently require updating the default_settings function for all existing scripts
+ if [ -z "$var_os" ]; then
+ var_os="debian"
+ fi
+ if [ -z "$var_version" ]; then
+ var_version="12"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# echo_default()
+#
+# - Prints summary of default values (ID, OS, type, disk, RAM, CPU, etc.)
+# - Uses icons and formatting for readability
+# - Convert CT_TYPE to description
+# ------------------------------------------------------------------------------
+echo_default() {
+ CT_TYPE_DESC="Unprivileged"
+ if [ "$CT_TYPE" -eq 0 ]; then
+ CT_TYPE_DESC="Privileged"
+ fi
+ echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}"
+ echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}${CT_ID}${CL}"
+ echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os ($var_version)${CL}"
+ echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
+ echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}"
+ echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}${CORE_COUNT}${CL}"
+ echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
+ if [ "$VERBOSE" == "yes" ]; then
+ echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}Enabled${CL}"
+ fi
+ echo -e "${CREATING}${BOLD}${BL}Creating a ${APP} LXC using the above default settings${CL}"
+ echo -e " "
+}
+
+# ------------------------------------------------------------------------------
+# exit_script()
+#
+# - Called when user cancels an action
+# - Clears screen and exits gracefully
+# ------------------------------------------------------------------------------
+exit_script() {
+ clear
+ echo -e "\n${CROSS}${RD}User exited script${CL}\n"
+ exit
+}
+
+# ------------------------------------------------------------------------------
+# find_host_ssh_keys()
+#
+# - Scans system for available SSH keys
+# - Supports defaults (~/.ssh, /etc/ssh/authorized_keys)
+# - Returns list of files containing valid SSH public keys
+# - Sets FOUND_HOST_KEY_COUNT to number of keys found
+# ------------------------------------------------------------------------------
+find_host_ssh_keys() {
+ local re='(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))'
+ local -a files=() cand=()
+ local g="${var_ssh_import_glob:-}"
+ local total=0 f base c
+
+ shopt -s nullglob
+ if [[ -n "$g" ]]; then
+ for pat in $g; do cand+=($pat); done
+ else
+ cand+=(/root/.ssh/authorized_keys /root/.ssh/authorized_keys2)
+ cand+=(/root/.ssh/*.pub)
+ cand+=(/etc/ssh/authorized_keys /etc/ssh/authorized_keys.d/*)
+ fi
+ shopt -u nullglob
+
+ for f in "${cand[@]}"; do
+ [[ -f "$f" && -r "$f" ]] || continue
+ base="$(basename -- "$f")"
+ case "$base" in
+ known_hosts | known_hosts.* | config) continue ;;
+ id_*) [[ "$f" != *.pub ]] && continue ;;
+ esac
+
+ # CRLF safe check for host keys
+ c=$(tr -d '\r' <"$f" | awk '
+ /^[[:space:]]*#/ {next}
+ /^[[:space:]]*$/ {next}
+ {print}
+ ' | grep -E -c '"$re"' || true)
+
+ if ((c > 0)); then
+ files+=("$f")
+ total=$((total + c))
+ fi
+ done
+
+ # Fallback to /root/.ssh/authorized_keys
+ if ((${#files[@]} == 0)) && [[ -r /root/.ssh/authorized_keys ]]; then
+ if grep -E -q "$re" /root/.ssh/authorized_keys; then
+ files+=(/root/.ssh/authorized_keys)
+ total=$((total + $(grep -E -c "$re" /root/.ssh/authorized_keys || echo 0)))
+ fi
+ fi
+
+ FOUND_HOST_KEY_COUNT="$total"
+ (
+ IFS=:
+ echo "${files[*]}"
+ )
+}
+
+# ------------------------------------------------------------------------------
+# advanced_settings()
+#
+# - Interactive whiptail menu for advanced configuration
+# - Lets user set container type, password, CT ID, hostname, disk, CPU, RAM
+# - Supports IPv4/IPv6, DNS, MAC, VLAN, tags, SSH keys, FUSE, verbose mode
+# - Ends with confirmation or re-entry if cancelled
+# ------------------------------------------------------------------------------
+advanced_settings() {
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox --title "Here is an instructional tip:" "To make a selection, use the Spacebar." 8 58
+ # Setting Default Tag for Advanced Settings
+ TAGS="community-script;${var_tags:-}"
+ CT_DEFAULT_TYPE="${CT_TYPE}"
+ CT_TYPE=""
+ while [ -z "$CT_TYPE" ]; do
+ if [ "$CT_DEFAULT_TYPE" == "1" ]; then
+ if CT_TYPE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "CONTAINER TYPE" --radiolist "Choose Type" 10 58 2 \
+ "1" "Unprivileged" ON \
+ "0" "Privileged" OFF \
+ 3>&1 1>&2 2>&3); then
+ if [ -n "$CT_TYPE" ]; then
+ CT_TYPE_DESC="Unprivileged"
+ if [ "$CT_TYPE" -eq 0 ]; then
+ CT_TYPE_DESC="Privileged"
+ fi
+ echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}"
+ echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os |${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}"
+ echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
+ fi
+ else
+ exit_script
+ fi
+ fi
+ if [ "$CT_DEFAULT_TYPE" == "0" ]; then
+ if CT_TYPE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "CONTAINER TYPE" --radiolist "Choose Type" 10 58 2 \
+ "1" "Unprivileged" OFF \
+ "0" "Privileged" ON \
+ 3>&1 1>&2 2>&3); then
+ if [ -n "$CT_TYPE" ]; then
+ CT_TYPE_DESC="Unprivileged"
+ if [ "$CT_TYPE" -eq 0 ]; then
+ CT_TYPE_DESC="Privileged"
+ fi
+ echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}"
+ echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os${CL}"
+ echo -e "${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}"
+ echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
+ fi
+ else
+ exit_script
+ fi
+ fi
+ done
+
+ while true; do
+ if PW1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --passwordbox "\nSet Root Password (needed for root ssh access)" 9 58 --title "PASSWORD (leave blank for automatic login)" 3>&1 1>&2 2>&3); then
+ # Empty = Autologin
+ if [[ -z "$PW1" ]]; then
+ PW=""
+ PW1="Automatic Login"
+ echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}$PW1${CL}"
+ break
+ fi
+
+ # Invalid: contains spaces
+ if [[ "$PW1" == *" "* ]]; then
+ whiptail --msgbox "Password cannot contain spaces." 8 58
+ continue
+ fi
+
+ # Invalid: too short
+ if ((${#PW1} < 5)); then
+ whiptail --msgbox "Password must be at least 5 characters." 8 58
+ continue
+ fi
+
+ # Confirm password
+ if PW2=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --passwordbox "\nVerify Root Password" 9 58 --title "PASSWORD VERIFICATION" 3>&1 1>&2 2>&3); then
+ if [[ "$PW1" == "$PW2" ]]; then
+ PW="-password $PW1"
+ echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}********${CL}"
+ break
+ else
+ whiptail --msgbox "Passwords do not match. Please try again." 8 58
+ fi
+ else
+ exit_script
+ fi
+ else
+ exit_script
+ fi
+ done
+
+ if CT_ID=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Container ID" 8 58 "$NEXTID" --title "CONTAINER ID" 3>&1 1>&2 2>&3); then
+ if [ -z "$CT_ID" ]; then
+ CT_ID="$NEXTID"
+ fi
+ else
+ exit_script
+ fi
+ echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
+
+ while true; do
+ if CT_NAME=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set Hostname" 8 58 "$NSAPP" --title "HOSTNAME" 3>&1 1>&2 2>&3); then
+ if [ -z "$CT_NAME" ]; then
+ HN="$NSAPP"
+ else
+ HN=$(echo "${CT_NAME,,}" | tr -d ' ')
+ fi
+ # Hostname validate (RFC 1123)
+ if [[ "$HN" =~ ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ ]]; then
+ echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}"
+ break
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --msgbox "❌ Invalid hostname: '$HN'\n\nOnly lowercase letters, digits and hyphens (-) are allowed.\nUnderscores (_) or other characters are not permitted!" 10 70
+ fi
+ else
+ exit_script
+ fi
+ done
+
+ while true; do
+ DISK_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Disk Size in GB" 8 58 "$var_disk" --title "DISK SIZE" 3>&1 1>&2 2>&3) || exit_script
+
+ if [ -z "$DISK_SIZE" ]; then
+ DISK_SIZE="$var_disk"
+ fi
+
+ if [[ "$DISK_SIZE" =~ ^[1-9][0-9]*$ ]]; then
+ echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}"
+ break
+ else
+ whiptail --msgbox "Disk size must be a positive integer!" 8 58
+ fi
+ done
+
+ while true; do
+ CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
+ --inputbox "Allocate CPU Cores" 8 58 "$var_cpu" --title "CORE COUNT" 3>&1 1>&2 2>&3) || exit_script
+
+ if [ -z "$CORE_COUNT" ]; then
+ CORE_COUNT="$var_cpu"
+ fi
+
+ if [[ "$CORE_COUNT" =~ ^[1-9][0-9]*$ ]]; then
+ echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}"
+ break
+ else
+ whiptail --msgbox "CPU core count must be a positive integer!" 8 58
+ fi
+ done
+
+ while true; do
+ RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
+ --inputbox "Allocate RAM in MiB" 8 58 "$var_ram" --title "RAM" 3>&1 1>&2 2>&3) || exit_script
+
+ if [ -z "$RAM_SIZE" ]; then
+ RAM_SIZE="$var_ram"
+ fi
+
+ if [[ "$RAM_SIZE" =~ ^[1-9][0-9]*$ ]]; then
+ echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
+ break
+ else
+ whiptail --msgbox "RAM size must be a positive integer!" 8 58
+ fi
+ done
+
+ IFACE_FILEPATH_LIST="/etc/network/interfaces"$'\n'$(find "/etc/network/interfaces.d/" -type f)
+ BRIDGES=""
+ OLD_IFS=$IFS
+ IFS=$'\n'
+ for iface_filepath in ${IFACE_FILEPATH_LIST}; do
+
+ iface_indexes_tmpfile=$(mktemp -q -u '.iface-XXXX')
+ (grep -Pn '^\s*iface' "${iface_filepath}" | cut -d':' -f1 && wc -l "${iface_filepath}" | cut -d' ' -f1) | awk 'FNR==1 {line=$0; next} {print line":"$0-1; line=$0}' >"${iface_indexes_tmpfile}" || true
+
+ if [ -f "${iface_indexes_tmpfile}" ]; then
+
+ while read -r pair; do
+ start=$(echo "${pair}" | cut -d':' -f1)
+ end=$(echo "${pair}" | cut -d':' -f2)
+
+ if awk "NR >= ${start} && NR <= ${end}" "${iface_filepath}" | grep -qP '^\s*(bridge[-_](ports|stp|fd|vlan-aware|vids)|ovs_type\s+OVSBridge)\b'; then
+ iface_name=$(sed "${start}q;d" "${iface_filepath}" | awk '{print $2}')
+ BRIDGES="${iface_name}"$'\n'"${BRIDGES}"
+ fi
+
+ done <"${iface_indexes_tmpfile}"
+ rm -f "${iface_indexes_tmpfile}"
+ fi
+
+ done
+ IFS=$OLD_IFS
+ BRIDGES=$(echo "$BRIDGES" | grep -v '^\s*$' | sort | uniq)
+ if [[ -z "$BRIDGES" ]]; then
+ BRG="vmbr0"
+ echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}"
+ else
+ # Build bridge menu with descriptions
+ BRIDGE_MENU_OPTIONS=()
+ while IFS= read -r bridge; do
+ if [[ -n "$bridge" ]]; then
+ # Get description from Proxmox built-in method - find comment for this specific bridge
+ description=$(grep -A 10 "iface $bridge" /etc/network/interfaces | grep '^#' | head -n1 | sed 's/^#\s*//')
+ if [[ -n "$description" ]]; then
+ BRIDGE_MENU_OPTIONS+=("$bridge" "${description}")
+ else
+ BRIDGE_MENU_OPTIONS+=("$bridge" " ")
+ fi
+ fi
+ done <<<"$BRIDGES"
+
+ BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --menu "Select network bridge: " 18 55 6 "${BRIDGE_MENU_OPTIONS[@]}" 3>&1 1>&2 2>&3)
+ if [[ -z "$BRG" ]]; then
+ exit_script
+ else
+ echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}"
+ fi
+ fi
+
+ # IPv4 methods: dhcp, static, none
+ while true; do
+ IPV4_METHOD=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "IPv4 Address Management" \
+ --menu "Select IPv4 Address Assignment Method:" 12 60 2 \
+ "dhcp" "Automatic (DHCP, recommended)" \
+ "static" "Static (manual entry)" \
+ 3>&1 1>&2 2>&3)
+
+ exit_status=$?
+ if [ $exit_status -ne 0 ]; then
+ exit_script
+ fi
+
+ case "$IPV4_METHOD" in
+ dhcp)
+ NET="dhcp"
+ GATE=""
+ echo -e "${NETWORK}${BOLD}${DGN}IPv4: DHCP${CL}"
+ break
+ ;;
+ static)
+ # Static: call and validate CIDR address
+ while true; do
+ NET=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --inputbox "Enter Static IPv4 CIDR Address (e.g. 192.168.100.50/24)" 8 58 "" \
+ --title "IPv4 ADDRESS" 3>&1 1>&2 2>&3)
+ if [ -z "$NET" ]; then
+ whiptail --msgbox "IPv4 address must not be empty." 8 58
+ continue
+ elif [[ "$NET" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$ ]]; then
+ echo -e "${NETWORK}${BOLD}${DGN}IPv4 Address: ${BGN}$NET${CL}"
+ break
+ else
+ whiptail --msgbox "$NET is not a valid IPv4 CIDR address. Please enter a correct value!" 8 58
+ fi
+ done
+
+ # call and validate Gateway
+ while true; do
+ GATE1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --inputbox "Enter Gateway IP address for static IPv4" 8 58 "" \
+ --title "Gateway IP" 3>&1 1>&2 2>&3)
+ if [ -z "$GATE1" ]; then
+ whiptail --msgbox "Gateway IP address cannot be empty." 8 58
+ elif [[ ! "$GATE1" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
+ whiptail --msgbox "Invalid Gateway IP address format." 8 58
+ else
+ GATE=",gw=$GATE1"
+ echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE1${CL}"
+ break
+ fi
+ done
+ break
+ ;;
+ esac
+ done
+
+ # IPv6 Address Management selection
+ while true; do
+ IPV6_METHOD=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --menu \
+ "Select IPv6 Address Management Type:" 15 58 4 \
+ "auto" "SLAAC/AUTO (recommended, default)" \
+ "dhcp" "DHCPv6" \
+ "static" "Static (manual entry)" \
+ "none" "Disabled" \
+ --default-item "auto" 3>&1 1>&2 2>&3)
+ [ $? -ne 0 ] && exit_script
+
+ case "$IPV6_METHOD" in
+ auto)
+ echo -e "${NETWORK}${BOLD}${DGN}IPv6: ${BGN}SLAAC/AUTO${CL}"
+ IPV6_ADDR=""
+ IPV6_GATE=""
+ break
+ ;;
+ dhcp)
+ echo -e "${NETWORK}${BOLD}${DGN}IPv6: ${BGN}DHCPv6${CL}"
+ IPV6_ADDR="dhcp"
+ IPV6_GATE=""
+ break
+ ;;
+ static)
+ # Ask for static IPv6 address (CIDR notation, e.g., 2001:db8::1234/64)
+ while true; do
+ IPV6_ADDR=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox \
+ "Set a static IPv6 CIDR address (e.g., 2001:db8::1234/64)" 8 58 "" \
+ --title "IPv6 STATIC ADDRESS" 3>&1 1>&2 2>&3) || exit_script
+ if [[ "$IPV6_ADDR" =~ ^([0-9a-fA-F:]+:+)+[0-9a-fA-F]+(/[0-9]{1,3})$ ]]; then
+ echo -e "${NETWORK}${BOLD}${DGN}IPv6 Address: ${BGN}$IPV6_ADDR${CL}"
+ break
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox \
+ "$IPV6_ADDR is an invalid IPv6 CIDR address. Please enter a valid IPv6 CIDR address (e.g., 2001:db8::1234/64)" 8 58
+ fi
+ done
+ # Optional: ask for IPv6 gateway for static config
+ while true; do
+ IPV6_GATE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox \
+ "Enter IPv6 gateway address (optional, leave blank for none)" 8 58 "" --title "IPv6 GATEWAY" 3>&1 1>&2 2>&3)
+ if [ -z "$IPV6_GATE" ]; then
+ IPV6_GATE=""
+ break
+ elif [[ "$IPV6_GATE" =~ ^([0-9a-fA-F:]+:+)+[0-9a-fA-F]+$ ]]; then
+ break
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox \
+ "Invalid IPv6 gateway format." 8 58
+ fi
+ done
+ break
+ ;;
+ none)
+ echo -e "${NETWORK}${BOLD}${DGN}IPv6: ${BGN}Disabled${CL}"
+ IPV6_ADDR="none"
+ IPV6_GATE=""
+ break
+ ;;
+ *)
+ exit_script
+ ;;
+ esac
+ done
+
+ if [ "$var_os" == "alpine" ]; then
+ APT_CACHER=""
+ APT_CACHER_IP=""
+ else
+ if APT_CACHER_IP=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set APT-Cacher IP (leave blank for none)" 8 58 --title "APT-Cacher IP" 3>&1 1>&2 2>&3); then
+ APT_CACHER="${APT_CACHER_IP:+yes}"
+ echo -e "${NETWORK}${BOLD}${DGN}APT-Cacher IP Address: ${BGN}${APT_CACHER_IP:-Default}${CL}"
+ else
+ exit_script
+ fi
+ fi
+
+ # if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "IPv6" --yesno "Disable IPv6?" 10 58); then
+ # DISABLEIP6="yes"
+ # else
+ # DISABLEIP6="no"
+ # fi
+ # echo -e "${DISABLEIPV6}${BOLD}${DGN}Disable IPv6: ${BGN}$DISABLEIP6${CL}"
+
+ if MTU1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default [The MTU of your selected vmbr, default is 1500])" 8 58 --title "MTU SIZE" 3>&1 1>&2 2>&3); then
+ if [ -z "$MTU1" ]; then
+ MTU1="Default"
+ MTU=""
+ else
+ MTU=",mtu=$MTU1"
+ fi
+ echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}"
+ else
+ exit_script
+ fi
+
+ if SD=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a DNS Search Domain (leave blank for HOST)" 8 58 --title "DNS Search Domain" 3>&1 1>&2 2>&3); then
+ if [ -z "$SD" ]; then
+ SX=Host
+ SD=""
+ else
+ SX=$SD
+ SD="-searchdomain=$SD"
+ fi
+ echo -e "${SEARCH}${BOLD}${DGN}DNS Search Domain: ${BGN}$SX${CL}"
+ else
+ exit_script
+ fi
+
+ if NX=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a DNS Server IP (leave blank for HOST)" 8 58 --title "DNS SERVER IP" 3>&1 1>&2 2>&3); then
+ if [ -z "$NX" ]; then
+ NX=Host
+ NS=""
+ else
+ NS="-nameserver=$NX"
+ fi
+ echo -e "${NETWORK}${BOLD}${DGN}DNS Server IP Address: ${BGN}$NX${CL}"
+ else
+ exit_script
+ fi
+
+ if [ "$var_os" == "alpine" ] && [ "$NET" == "dhcp" ] && [ "$NX" != "Host" ]; then
+ UDHCPC_FIX="yes"
+ else
+ UDHCPC_FIX="no"
+ fi
+ export UDHCPC_FIX
+
+ if MAC1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a MAC Address(leave blank for generated MAC)" 8 58 --title "MAC ADDRESS" 3>&1 1>&2 2>&3); then
+ if [ -z "$MAC1" ]; then
+ MAC1="Default"
+ MAC=""
+ else
+ MAC=",hwaddr=$MAC1"
+ echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}"
+ fi
+ else
+ exit_script
+ fi
+
+ if VLAN1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for no VLAN)" 8 58 --title "VLAN" 3>&1 1>&2 2>&3); then
+ if [ -z "$VLAN1" ]; then
+ VLAN1="Default"
+ VLAN=""
+ else
+ VLAN=",tag=$VLAN1"
+ fi
+ echo -e "${VLANTAG}${BOLD}${DGN}Vlan: ${BGN}$VLAN1${CL}"
+ else
+ exit_script
+ fi
+
+ if ADV_TAGS=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set Custom Tags?[If you remove all, there will be no tags!]" 8 58 "${TAGS}" --title "Advanced Tags" 3>&1 1>&2 2>&3); then
+ if [ -n "${ADV_TAGS}" ]; then
+ ADV_TAGS=$(echo "$ADV_TAGS" | tr -d '[:space:]')
+ TAGS="${ADV_TAGS}"
+ else
+ TAGS=";"
+ fi
+ echo -e "${NETWORK}${BOLD}${DGN}Tags: ${BGN}$TAGS${CL}"
+ else
+ exit_script
+ fi
+
+ configure_ssh_settings
+ export SSH_KEYS_FILE
+ echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
+ if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "FUSE Support" --yesno "Enable FUSE support?\nRequired for tools like rclone, mergerfs, AppImage, etc." 10 58); then
+ ENABLE_FUSE="yes"
+ else
+ ENABLE_FUSE="no"
+ fi
+ echo -e "${FUSE}${BOLD}${DGN}Enable FUSE Support: ${BGN}$ENABLE_FUSE${CL}"
+
+ if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "VERBOSE MODE" --yesno "Enable Verbose Mode?" 10 58); then
+ VERBOSE="yes"
+ else
+ VERBOSE="no"
+ fi
+ echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}$VERBOSE${CL}"
+
+ if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "ADVANCED SETTINGS COMPLETE" --yesno "Ready to create ${APP} LXC?" 10 58); then
+ echo -e "${CREATING}${BOLD}${RD}Creating a ${APP} LXC using the above advanced settings${CL}"
+ else
+ clear
+ header_info
+ echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}"
+ echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Settings on node $PVEHOST_NAME${CL}"
+ advanced_settings
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# diagnostics_check()
+#
+# - Ensures diagnostics config file exists at /usr/local/community-scripts/diagnostics
+# - Asks user whether to send anonymous diagnostic data
+# - Saves DIAGNOSTICS=yes/no in the config file
+# ------------------------------------------------------------------------------
+diagnostics_check() {
+ if ! [ -d "/usr/local/community-scripts" ]; then
+ mkdir -p /usr/local/community-scripts
+ fi
+
+ if ! [ -f "/usr/local/community-scripts/diagnostics" ]; then
+ if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "DIAGNOSTICS" --yesno "Send Diagnostics of LXC Installation?\n\n(This only transmits data without user data, just RAM, CPU, LXC name, ...)" 10 58); then
+ cat </usr/local/community-scripts/diagnostics
+DIAGNOSTICS=yes
+
+#This file is used to store the diagnostics settings for the Community-Scripts API.
+#https://github.com/community-scripts/ProxmoxVED/discussions/1836
+#Your diagnostics will be sent to the Community-Scripts API for troubleshooting/statistical purposes.
+#You can review the data at https://community-scripts.github.io/ProxmoxVE/data
+#If you do not wish to send diagnostics, please set the variable 'DIAGNOSTICS' to "no" in /usr/local/community-scripts/diagnostics, or use the menue.
+#This will disable the diagnostics feature.
+#To send diagnostics, set the variable 'DIAGNOSTICS' to "yes" in /usr/local/community-scripts/diagnostics, or use the menue.
+#This will enable the diagnostics feature.
+#The following information will be sent:
+#"disk_size"
+#"core_count"
+#"ram_size"
+#"os_type"
+#"os_version"
+#"nsapp"
+#"method"
+#"pve_version"
+#"status"
+#If you have any concerns, please review the source code at /misc/build.func
+EOF
+ DIAGNOSTICS="yes"
+ else
+ cat </usr/local/community-scripts/diagnostics
+DIAGNOSTICS=no
+
+#This file is used to store the diagnostics settings for the Community-Scripts API.
+#https://github.com/community-scripts/ProxmoxVED/discussions/1836
+#Your diagnostics will be sent to the Community-Scripts API for troubleshooting/statistical purposes.
+#You can review the data at https://community-scripts.github.io/ProxmoxVE/data
+#If you do not wish to send diagnostics, please set the variable 'DIAGNOSTICS' to "no" in /usr/local/community-scripts/diagnostics, or use the menue.
+#This will disable the diagnostics feature.
+#To send diagnostics, set the variable 'DIAGNOSTICS' to "yes" in /usr/local/community-scripts/diagnostics, or use the menue.
+#This will enable the diagnostics feature.
+#The following information will be sent:
+#"disk_size"
+#"core_count"
+#"ram_size"
+#"os_type"
+#"os_version"
+#"nsapp"
+#"method"
+#"pve_version"
+#"status"
+#If you have any concerns, please review the source code at /misc/build.func
+EOF
+ DIAGNOSTICS="no"
+ fi
+ else
+ DIAGNOSTICS=$(awk -F '=' '/^DIAGNOSTICS/ {print $2}' /usr/local/community-scripts/diagnostics)
+
+ fi
+
+}
+
+# ------------------------------------------------------------------------------
+# default_var_settings
+#
+# - Ensures /usr/local/community-scripts/default.vars exists (creates if missing)
+# - Loads var_* values from default.vars (safe parser, no source/eval)
+# - Precedence: ENV var_* > default.vars > built-in defaults
+# - Maps var_verbose → VERBOSE
+# - Calls base_settings "$VERBOSE" and echo_default
+# ------------------------------------------------------------------------------
+default_var_settings() {
+ # Allowed var_* keys (alphabetically sorted)
+ local VAR_WHITELIST=(
+ var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_ctid var_disk var_fuse
+ var_gateway var_hostname var_ipv6_method var_ipv6_static var_mac var_mtu
+ var_net var_ns var_pw var_ram var_tags var_tun var_unprivileged
+ var_verbose var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage
+ )
+
+ # Snapshot: environment variables (highest precedence)
+ declare -A _HARD_ENV=()
+ local _k
+ for _k in "${VAR_WHITELIST[@]}"; do
+ if printenv "$_k" >/dev/null 2>&1; then _HARD_ENV["$_k"]=1; fi
+ done
+
+ # Find default.vars location
+ local _find_default_vars
+ _find_default_vars() {
+ local f
+ for f in \
+ /usr/local/community-scripts/default.vars \
+ "$HOME/.config/community-scripts/default.vars" \
+ "./default.vars"; do
+ [ -f "$f" ] && {
+ echo "$f"
+ return 0
+ }
+ done
+ return 1
+ }
+ # Allow override of storages via env (for non-interactive use cases)
+ [ -n "${var_template_storage:-}" ] && TEMPLATE_STORAGE="$var_template_storage"
+ [ -n "${var_container_storage:-}" ] && CONTAINER_STORAGE="$var_container_storage"
+
+ # Create once, with storages already selected, no var_ctid/var_hostname lines
+ local _ensure_default_vars
+ _ensure_default_vars() {
+ _find_default_vars >/dev/null 2>&1 && return 0
+
+ local canonical="/usr/local/community-scripts/default.vars"
+ msg_info "No default.vars found. Creating ${canonical}"
+ mkdir -p /usr/local/community-scripts
+
+ # Pick storages before writing the file (always ask unless only one)
+ # Create a minimal temp file to write into
+ : >"$canonical"
+
+ # Base content (no var_ctid / var_hostname here)
+ cat >"$canonical" <<'EOF'
+# Community-Scripts defaults (var_* only). Lines starting with # are comments.
+# Precedence: ENV var_* > default.vars > built-ins.
+# Keep keys alphabetically sorted.
+
+# Container type
+var_unprivileged=1
+
+# Resources
+var_cpu=1
+var_disk=4
+var_ram=1024
+
+# Network
+var_brg=vmbr0
+var_net=dhcp
+var_ipv6_method=none
+# var_gateway=
+# var_ipv6_static=
+# var_vlan=
+# var_mtu=
+# var_mac=
+# var_ns=
+
+# SSH
+var_ssh=no
+# var_ssh_authorized_key=
+
+# APT cacher (optional)
+# var_apt_cacher=yes
+# var_apt_cacher_ip=192.168.1.10
+
+# Features/Tags/verbosity
+var_fuse=no
+var_tun=no
+var_tags=community-script
+var_verbose=no
+
+# Security (root PW) – empty => autologin
+# var_pw=
+EOF
+
+ # Now choose storages (always prompt unless just one exists)
+ choose_and_set_storage_for_file "$canonical" template
+ choose_and_set_storage_for_file "$canonical" container
+
+ chmod 0644 "$canonical"
+ msg_ok "Created ${canonical}"
+ }
+
+ # Whitelist check
+ local _is_whitelisted_key
+ _is_whitelisted_key() {
+ local k="$1"
+ local w
+ for w in "${VAR_WHITELIST[@]}"; do [ "$k" = "$w" ] && return 0; done
+ return 1
+ }
+
+ # Safe parser for KEY=VALUE lines
+ local _load_vars_file
+ _load_vars_file() {
+ local file="$1"
+ [ -f "$file" ] || return 0
+ msg_info "Loading defaults from ${file}"
+ local line key val
+ while IFS= read -r line || [ -n "$line" ]; do
+ line="${line#"${line%%[![:space:]]*}"}"
+ line="${line%"${line##*[![:space:]]}"}"
+ [[ -z "$line" || "$line" == \#* ]] && continue
+ if [[ "$line" =~ ^([A-Za-z_][A-Za-z0-9_]*)=(.*)$ ]]; then
+ local var_key="${BASH_REMATCH[1]}"
+ local var_val="${BASH_REMATCH[2]}"
+
+ [[ "$var_key" != var_* ]] && continue
+ _is_whitelisted_key "$var_key" || {
+ msg_debug "Ignore non-whitelisted ${var_key}"
+ continue
+ }
+
+ # Strip quotes
+ if [[ "$var_val" =~ ^\"(.*)\"$ ]]; then
+ var_val="${BASH_REMATCH[1]}"
+ elif [[ "$var_val" =~ ^\'(.*)\'$ ]]; then
+ var_val="${BASH_REMATCH[1]}"
+ fi
+
+ # Unsafe characters
+ case $var_val in
+ \"*\")
+ var_val=${var_val#\"}
+ var_val=${var_val%\"}
+ ;;
+ \'*\')
+ var_val=${var_val#\'}
+ var_val=${var_val%\'}
+ ;;
+ esac # Hard env wins
+ [[ -n "${_HARD_ENV[$var_key]:-}" ]] && continue
+ # Set only if not already exported
+ [[ -z "${!var_key+x}" ]] && export "${var_key}=${var_val}"
+ else
+ msg_warn "Malformed line in ${file}: ${line}"
+ fi
+ done <"$file"
+ msg_ok "Loaded ${file}"
+ }
+
+ # 1) Ensure file exists
+ _ensure_default_vars
+
+ # 2) Load file
+ local dv
+ dv="$(_find_default_vars)" || {
+ msg_error "default.vars not found after ensure step"
+ return 1
+ }
+ _load_vars_file "$dv"
+
+ # 3) Map var_verbose → VERBOSE
+ if [[ -n "${var_verbose:-}" ]]; then
+ case "${var_verbose,,}" in 1 | yes | true | on) VERBOSE="yes" ;; 0 | no | false | off) VERBOSE="no" ;; *) VERBOSE="${var_verbose}" ;; esac
+ else
+ VERBOSE="no"
+ fi
+
+ # 4) Apply base settings and show summary
+ METHOD="mydefaults-global"
+ base_settings "$VERBOSE"
+ header_info
+ echo -e "${DEFAULT}${BOLD}${BL}Using My Defaults (default.vars) on node $PVEHOST_NAME${CL}"
+ echo_default
+}
+
+# ------------------------------------------------------------------------------
+# get_app_defaults_path()
+#
+# - Returns full path for app-specific defaults file
+# - Example: /usr/local/community-scripts/defaults/.vars
+# ------------------------------------------------------------------------------
+
+get_app_defaults_path() {
+ local n="${NSAPP:-${APP,,}}"
+ echo "/usr/local/community-scripts/defaults/${n}.vars"
+}
+
+# ------------------------------------------------------------------------------
+# maybe_offer_save_app_defaults
+#
+# - Called after advanced_settings returned with fully chosen values.
+# - If no .vars exists, offers to persist current advanced settings
+# into /usr/local/community-scripts/defaults/.vars
+# - Only writes whitelisted var_* keys.
+# - Extracts raw values from flags like ",gw=..." ",mtu=..." etc.
+# ------------------------------------------------------------------------------
+if ! declare -p VAR_WHITELIST >/dev/null 2>&1; then
+ declare -ag VAR_WHITELIST=(
+ var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_ctid var_disk var_fuse
+ var_gateway var_hostname var_ipv6_method var_ipv6_static var_mac var_mtu
+ var_net var_ns var_pw var_ram var_tags var_tun var_unprivileged
+ var_verbose var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage
+ )
+fi
+
+# Note: _is_whitelisted_key() is defined above in default_var_settings section
+
+_sanitize_value() {
+ # Disallow Command-Substitution / Shell-Meta
+ case "$1" in
+ *'$('* | *'`'* | *';'* | *'&'* | *'<('*)
+ echo ""
+ return 0
+ ;;
+ esac
+ echo "$1"
+}
+
+# Map-Parser: read var_* from file into _VARS_IN associative array
+# Note: Main _load_vars_file() with full validation is defined in default_var_settings section
+# This simplified version is used specifically for diff operations via _VARS_IN array
+declare -A _VARS_IN
+_load_vars_file_to_map() {
+ local file="$1"
+ [ -f "$file" ] || return 0
+ _VARS_IN=() # Clear array
+ local line key val
+ while IFS= read -r line || [ -n "$line" ]; do
+ line="${line#"${line%%[![:space:]]*}"}"
+ line="${line%"${line##*[![:space:]]}"}"
+ [ -z "$line" ] && continue
+ case "$line" in
+ \#*) continue ;;
+ esac
+ key=$(printf "%s" "$line" | cut -d= -f1)
+ val=$(printf "%s" "$line" | cut -d= -f2-)
+ case "$key" in
+ var_*)
+ if _is_whitelisted_key "$key"; then
+ _VARS_IN["$key"]="$val"
+ fi
+ ;;
+ esac
+ done <"$file"
+}
+
+# Diff function for two var_* files -> produces human-readable diff list for $1 (old) vs $2 (new)
+_build_vars_diff() {
+ local oldf="$1" newf="$2"
+ local k
+ local -A OLD=() NEW=()
+ _load_vars_file_to_map "$oldf"
+ for k in "${!_VARS_IN[@]}"; do OLD["$k"]="${_VARS_IN[$k]}"; done
+ _load_vars_file_to_map "$newf"
+ for k in "${!_VARS_IN[@]}"; do NEW["$k"]="${_VARS_IN[$k]}"; done
+
+ local out
+ out+="# Diff for ${APP} (${NSAPP})\n"
+ out+="# Old: ${oldf}\n# New: ${newf}\n\n"
+
+ local found_change=0
+
+ # Changed & Removed
+ for k in "${!OLD[@]}"; do
+ if [[ -v NEW["$k"] ]]; then
+ if [[ "${OLD[$k]}" != "${NEW[$k]}" ]]; then
+ out+="~ ${k}\n - old: ${OLD[$k]}\n + new: ${NEW[$k]}\n"
+ found_change=1
+ fi
+ else
+ out+="- ${k}\n - old: ${OLD[$k]}\n"
+ found_change=1
+ fi
+ done
+
+ # Added
+ for k in "${!NEW[@]}"; do
+ if [[ ! -v OLD["$k"] ]]; then
+ out+="+ ${k}\n + new: ${NEW[$k]}\n"
+ found_change=1
+ fi
+ done
+
+ if [[ $found_change -eq 0 ]]; then
+ out+="(No differences)\n"
+ fi
+
+ printf "%b" "$out"
+}
+
+# Build a temporary .vars file from current advanced settings
+_build_current_app_vars_tmp() {
+ tmpf="$(mktemp /tmp/${NSAPP:-app}.vars.new.XXXXXX)"
+
+ # NET/GW
+ _net="${NET:-}"
+ _gate=""
+ case "${GATE:-}" in
+ ,gw=*) _gate=$(echo "$GATE" | sed 's/^,gw=//') ;;
+ esac
+
+ # IPv6
+ _ipv6_method="${IPV6_METHOD:-auto}"
+ _ipv6_static=""
+ _ipv6_gateway=""
+ if [ "$_ipv6_method" = "static" ]; then
+ _ipv6_static="${IPV6_ADDR:-}"
+ _ipv6_gateway="${IPV6_GATE:-}"
+ fi
+
+ # MTU/VLAN/MAC
+ _mtu=""
+ _vlan=""
+ _mac=""
+ case "${MTU:-}" in
+ ,mtu=*) _mtu=$(echo "$MTU" | sed 's/^,mtu=//') ;;
+ esac
+ case "${VLAN:-}" in
+ ,tag=*) _vlan=$(echo "$VLAN" | sed 's/^,tag=//') ;;
+ esac
+ case "${MAC:-}" in
+ ,hwaddr=*) _mac=$(echo "$MAC" | sed 's/^,hwaddr=//') ;;
+ esac
+
+ # DNS / Searchdomain
+ _ns=""
+ _searchdomain=""
+ case "${NS:-}" in
+ -nameserver=*) _ns=$(echo "$NS" | sed 's/^-nameserver=//') ;;
+ esac
+ case "${SD:-}" in
+ -searchdomain=*) _searchdomain=$(echo "$SD" | sed 's/^-searchdomain=//') ;;
+ esac
+
+ # SSH / APT / Features
+ _ssh="${SSH:-no}"
+ _ssh_auth="${SSH_AUTHORIZED_KEY:-}"
+ _apt_cacher="${APT_CACHER:-}"
+ _apt_cacher_ip="${APT_CACHER_IP:-}"
+ _fuse="${ENABLE_FUSE:-no}"
+ _tun="${ENABLE_TUN:-no}"
+ _tags="${TAGS:-}"
+ _verbose="${VERBOSE:-no}"
+
+ # Type / Resources / Identity
+ _unpriv="${CT_TYPE:-1}"
+ _cpu="${CORE_COUNT:-1}"
+ _ram="${RAM_SIZE:-1024}"
+ _disk="${DISK_SIZE:-4}"
+ _hostname="${HN:-$NSAPP}"
+
+ # Storage
+ _tpl_storage="${TEMPLATE_STORAGE:-${var_template_storage:-}}"
+ _ct_storage="${CONTAINER_STORAGE:-${var_container_storage:-}}"
+
+ {
+ echo "# App-specific defaults for ${APP} (${NSAPP})"
+ echo "# Generated on $(date -u '+%Y-%m-%dT%H:%M:%SZ')"
+ echo
+
+ echo "var_unprivileged=$(_sanitize_value "$_unpriv")"
+ echo "var_cpu=$(_sanitize_value "$_cpu")"
+ echo "var_ram=$(_sanitize_value "$_ram")"
+ echo "var_disk=$(_sanitize_value "$_disk")"
+
+ [ -n "${BRG:-}" ] && echo "var_brg=$(_sanitize_value "$BRG")"
+ [ -n "$_net" ] && echo "var_net=$(_sanitize_value "$_net")"
+ [ -n "$_gate" ] && echo "var_gateway=$(_sanitize_value "$_gate")"
+ [ -n "$_mtu" ] && echo "var_mtu=$(_sanitize_value "$_mtu")"
+ [ -n "$_vlan" ] && echo "var_vlan=$(_sanitize_value "$_vlan")"
+ [ -n "$_mac" ] && echo "var_mac=$(_sanitize_value "$_mac")"
+ [ -n "$_ns" ] && echo "var_ns=$(_sanitize_value "$_ns")"
+
+ [ -n "$_ipv6_method" ] && echo "var_ipv6_method=$(_sanitize_value "$_ipv6_method")"
+ [ -n "$_ipv6_static" ] && echo "var_ipv6_static=$(_sanitize_value "$_ipv6_static")"
+
+ [ -n "$_ssh" ] && echo "var_ssh=$(_sanitize_value "$_ssh")"
+ [ -n "$_ssh_auth" ] && echo "var_ssh_authorized_key=$(_sanitize_value "$_ssh_auth")"
+
+ [ -n "$_apt_cacher" ] && echo "var_apt_cacher=$(_sanitize_value "$_apt_cacher")"
+ [ -n "$_apt_cacher_ip" ] && echo "var_apt_cacher_ip=$(_sanitize_value "$_apt_cacher_ip")"
+
+ [ -n "$_fuse" ] && echo "var_fuse=$(_sanitize_value "$_fuse")"
+ [ -n "$_tun" ] && echo "var_tun=$(_sanitize_value "$_tun")"
+ [ -n "$_tags" ] && echo "var_tags=$(_sanitize_value "$_tags")"
+ [ -n "$_verbose" ] && echo "var_verbose=$(_sanitize_value "$_verbose")"
+
+ [ -n "$_hostname" ] && echo "var_hostname=$(_sanitize_value "$_hostname")"
+ [ -n "$_searchdomain" ] && echo "var_searchdomain=$(_sanitize_value "$_searchdomain")"
+
+ [ -n "$_tpl_storage" ] && echo "var_template_storage=$(_sanitize_value "$_tpl_storage")"
+ [ -n "$_ct_storage" ] && echo "var_container_storage=$(_sanitize_value "$_ct_storage")"
+ } >"$tmpf"
+
+ echo "$tmpf"
+}
+
+# ------------------------------------------------------------------------------
+# maybe_offer_save_app_defaults()
+#
+# - Called after advanced_settings()
+# - Offers to save current values as app defaults if not existing
+# - If file exists: shows diff and allows Update, Keep, View Diff, or Cancel
+# ------------------------------------------------------------------------------
+maybe_offer_save_app_defaults() {
+ local app_vars_path
+ app_vars_path="$(get_app_defaults_path)"
+
+ # always build from current settings
+ local new_tmp diff_tmp
+ new_tmp="$(_build_current_app_vars_tmp)"
+ diff_tmp="$(mktemp -p /tmp "${NSAPP:-app}.vars.diff.XXXXXX")"
+
+ # 1) if no file → offer to create
+ if [[ ! -f "$app_vars_path" ]]; then
+ if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --yesno "Save these advanced settings as defaults for ${APP}?\n\nThis will create:\n${app_vars_path}" 12 72; then
+ mkdir -p "$(dirname "$app_vars_path")"
+ install -m 0644 "$new_tmp" "$app_vars_path"
+ msg_ok "Saved app defaults: ${app_vars_path}"
+ fi
+ rm -f "$new_tmp" "$diff_tmp"
+ return 0
+ fi
+
+ # 2) if file exists → build diff
+ _build_vars_diff "$app_vars_path" "$new_tmp" >"$diff_tmp"
+
+ # if no differences → do nothing
+ if grep -q "^(No differences)$" "$diff_tmp"; then
+ rm -f "$new_tmp" "$diff_tmp"
+ return 0
+ fi
+
+ # 3) if file exists → show menu with default selection "Update Defaults"
+ local app_vars_file
+ app_vars_file="$(basename "$app_vars_path")"
+
+ while true; do
+ local sel
+ sel="$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "APP DEFAULTS – ${APP}" \
+ --menu "Differences detected. What do you want to do?" 20 78 10 \
+ "Update Defaults" "Write new values to ${app_vars_file}" \
+ "Keep Current" "Keep existing defaults (no changes)" \
+ "View Diff" "Show a detailed diff" \
+ "Cancel" "Abort without changes" \
+ --default-item "Update Defaults" \
+ 3>&1 1>&2 2>&3)" || { sel="Cancel"; }
+
+ case "$sel" in
+ "Update Defaults")
+ install -m 0644 "$new_tmp" "$app_vars_path"
+ msg_ok "Updated app defaults: ${app_vars_path}"
+ break
+ ;;
+ "Keep Current")
+ msg_info "Keeping current app defaults: ${app_vars_path}"
+ break
+ ;;
+ "View Diff")
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "Diff – ${APP}" \
+ --scrolltext --textbox "$diff_tmp" 25 100
+ ;;
+ "Cancel" | *)
+ msg_info "Canceled. No changes to app defaults."
+ break
+ ;;
+ esac
+ done
+
+ rm -f "$new_tmp" "$diff_tmp"
+}
+
+ensure_storage_selection_for_vars_file() {
+ local vf="$1"
+
+ # Read stored values (if any)
+ local tpl ct
+ tpl=$(grep -E '^var_template_storage=' "$vf" | cut -d= -f2-)
+ ct=$(grep -E '^var_container_storage=' "$vf" | cut -d= -f2-)
+
+ if [[ -n "$tpl" && -n "$ct" ]]; then
+ TEMPLATE_STORAGE="$tpl"
+ CONTAINER_STORAGE="$ct"
+ return 0
+ fi
+
+ choose_and_set_storage_for_file "$vf" template
+ choose_and_set_storage_for_file "$vf" container
+
+ msg_ok "Storage configuration saved to $(basename "$vf")"
+}
+
+diagnostics_menu() {
+ if [ "${DIAGNOSTICS:-no}" = "yes" ]; then
+ if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "DIAGNOSTIC SETTINGS" \
+ --yesno "Send Diagnostics?\n\nCurrent: ${DIAGNOSTICS}" 10 58 \
+ --yes-button "No" --no-button "Back"; then
+ DIAGNOSTICS="no"
+ sed -i 's/^DIAGNOSTICS=.*/DIAGNOSTICS=no/' /usr/local/community-scripts/diagnostics
+ whiptail --msgbox "Diagnostics set to ${DIAGNOSTICS}." 8 58
+ fi
+ else
+ if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "DIAGNOSTIC SETTINGS" \
+ --yesno "Send Diagnostics?\n\nCurrent: ${DIAGNOSTICS}" 10 58 \
+ --yes-button "Yes" --no-button "Back"; then
+ DIAGNOSTICS="yes"
+ sed -i 's/^DIAGNOSTICS=.*/DIAGNOSTICS=yes/' /usr/local/community-scripts/diagnostics
+ whiptail --msgbox "Diagnostics set to ${DIAGNOSTICS}." 8 58
+ fi
+ fi
+}
+
+ensure_global_default_vars_file() {
+ local vars_path="/usr/local/community-scripts/default.vars"
+ if [[ ! -f "$vars_path" ]]; then
+ mkdir -p "$(dirname "$vars_path")"
+ touch "$vars_path"
+ fi
+ echo "$vars_path"
+}
+
+# ------------------------------------------------------------------------------
+# install_script()
+#
+# - Main entrypoint for installation mode
+# - Runs safety checks (pve_check, root_check, maxkeys_check, diagnostics_check)
+# - Builds interactive menu (Default, Verbose, Advanced, My Defaults, App Defaults, Diagnostics, Storage, Exit)
+# - Applies chosen settings and triggers container build
+# ------------------------------------------------------------------------------
+install_script() {
+ pve_check
+ shell_check
+ root_check
+ arch_check
+ ssh_check
+ maxkeys_check
+ diagnostics_check
+
+ if systemctl is-active -q ping-instances.service; then
+ systemctl -q stop ping-instances.service
+ fi
+
+ NEXTID=$(pvesh get /cluster/nextid)
+ timezone=$(cat /etc/timezone)
+
+ # Show APP Header
+ header_info
+
+ # --- Support CLI argument as direct preset (default, advanced, …) ---
+ CHOICE="${mode:-${1:-}}"
+
+ # If no CLI argument → show whiptail menu
+ # Build menu dynamically based on available options
+ local appdefaults_option=""
+ local settings_option=""
+ local menu_items=(
+ "1" "Default Install"
+ "2" "Advanced Install"
+ "3" "My Defaults"
+ )
+
+ if [ -f "$(get_app_defaults_path)" ]; then
+ appdefaults_option="4"
+ menu_items+=("4" "App Defaults for ${APP}")
+ settings_option="5"
+ menu_items+=("5" "Settings")
+ else
+ settings_option="4"
+ menu_items+=("4" "Settings")
+ fi
+
+ if [ -z "$CHOICE" ]; then
+
+ TMP_CHOICE=$(whiptail \
+ --backtitle "Proxmox VE Helper Scripts" \
+ --title "Community-Scripts Options" \
+ --ok-button "Select" --cancel-button "Exit Script" \
+ --notags \
+ --menu "\nChoose an option:\n Use TAB or Arrow keys to navigate, ENTER to select.\n" \
+ 20 60 9 \
+ "${menu_items[@]}" \
+ --default-item "1" \
+ 3>&1 1>&2 2>&3) || exit_script
+ CHOICE="$TMP_CHOICE"
+ fi
+
+ APPDEFAULTS_OPTION="$appdefaults_option"
+ SETTINGS_OPTION="$settings_option"
+
+ # --- Main case ---
+ local defaults_target=""
+ local run_maybe_offer="no"
+ case "$CHOICE" in
+ 1 | default | DEFAULT)
+ header_info
+ echo -e "${DEFAULT}${BOLD}${BL}Using Default Settings on node $PVEHOST_NAME${CL}"
+ VERBOSE="no"
+ METHOD="default"
+ base_settings "$VERBOSE"
+ echo_default
+ defaults_target="$(ensure_global_default_vars_file)"
+ ;;
+ 2 | advanced | ADVANCED)
+ header_info
+
+ echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Install on node $PVEHOST_NAME${CL}"
+ echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}"
+ METHOD="advanced"
+ base_settings
+ advanced_settings
+ defaults_target="$(ensure_global_default_vars_file)"
+ run_maybe_offer="yes"
+ ;;
+ 3 | mydefaults | MYDEFAULTS)
+ default_var_settings || {
+ msg_error "Failed to apply default.vars"
+ exit 1
+ }
+ defaults_target="/usr/local/community-scripts/default.vars"
+ ;;
+ "$APPDEFAULTS_OPTION" | appdefaults | APPDEFAULTS)
+ if [ -f "$(get_app_defaults_path)" ]; then
+ header_info
+ echo -e "${DEFAULT}${BOLD}${BL}Using App Defaults for ${APP} on node $PVEHOST_NAME${CL}"
+ METHOD="appdefaults"
+ base_settings
+ _load_vars_file "$(get_app_defaults_path)"
+ echo_default
+ defaults_target="$(get_app_defaults_path)"
+ else
+ msg_error "No App Defaults available for ${APP}"
+ exit 1
+ fi
+ ;;
+ "$SETTINGS_OPTION" | settings | SETTINGS)
+ settings_menu
+ defaults_target=""
+ ;;
+ *)
+ echo -e "${CROSS}${RD}Invalid option: $CHOICE${CL}"
+ exit 1
+ ;;
+ esac
+
+ if [[ -n "$defaults_target" ]]; then
+ ensure_storage_selection_for_vars_file "$defaults_target"
+ fi
+
+ if [[ "$run_maybe_offer" == "yes" ]]; then
+ maybe_offer_save_app_defaults
+ fi
+}
+
+edit_default_storage() {
+ local vf="/usr/local/community-scripts/default.vars"
+
+ # Ensure file exists
+ if [[ ! -f "$vf" ]]; then
+ mkdir -p "$(dirname "$vf")"
+ touch "$vf"
+ fi
+
+ # Let ensure_storage_selection_for_vars_file handle everything
+ ensure_storage_selection_for_vars_file "$vf"
+}
+
+settings_menu() {
+ while true; do
+ local settings_items=(
+ "1" "Manage API-Diagnostic Setting"
+ "2" "Edit Default.vars"
+ "3" "Edit Default Storage"
+ )
+ if [ -f "$(get_app_defaults_path)" ]; then
+ settings_items+=("4" "Edit App.vars for ${APP}")
+ settings_items+=("5" "Exit")
+ else
+ settings_items+=("4" "Exit")
+ fi
+
+ local choice
+ choice=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
+ --title "Community-Scripts SETTINGS Menu" \
+ --ok-button "OK" --cancel-button "Back" \
+ --menu "\n\nChoose a settings option:\n\nUse TAB or Arrow keys to navigate, ENTER to select." 20 60 9 \
+ "${settings_items[@]}" \
+ 3>&1 1>&2 2>&3) || break
+
+ case "$choice" in
+ 1) diagnostics_menu ;;
+ 2) ${EDITOR:-nano} /usr/local/community-scripts/default.vars ;;
+ 3) edit_default_storage ;;
+ 4)
+ if [ -f "$(get_app_defaults_path)" ]; then
+ ${EDITOR:-nano} "$(get_app_defaults_path)"
+ else
+ exit_script
+ fi
+ ;;
+ 5) exit_script ;;
+ esac
+ done
+}
+
+# ===== Unified storage selection & writing to vars files =====
+_write_storage_to_vars() {
+ # $1 = vars_file, $2 = key (var_container_storage / var_template_storage), $3 = value
+ local vf="$1" key="$2" val="$3"
+ # remove uncommented and commented versions to avoid duplicates
+ sed -i "/^[#[:space:]]*${key}=/d" "$vf"
+ echo "${key}=${val}" >>"$vf"
+}
+
+choose_and_set_storage_for_file() {
+ # $1 = vars_file, $2 = class ('container'|'template')
+ local vf="$1" class="$2" key="" current=""
+ case "$class" in
+ container) key="var_container_storage" ;;
+ template) key="var_template_storage" ;;
+ *)
+ msg_error "Unknown storage class: $class"
+ return 1
+ ;;
+ esac
+
+ current=$(awk -F= -v k="^${key}=" '$0 ~ k {print $2; exit}' "$vf")
+
+ # If only one storage exists for the content type, auto-pick. Else always ask (your wish #4).
+ local content="rootdir"
+ [[ "$class" == "template" ]] && content="vztmpl"
+ local count
+ count=$(pvesm status -content "$content" | awk 'NR>1{print $1}' | wc -l)
+
+ if [[ "$count" -eq 1 ]]; then
+ STORAGE_RESULT=$(pvesm status -content "$content" | awk 'NR>1{print $1; exit}')
+ STORAGE_INFO=""
+ else
+ # If the current value is preselectable, we could show it, but per your requirement we always offer selection
+ select_storage "$class" || return 1
+ fi
+
+ _write_storage_to_vars "$vf" "$key" "$STORAGE_RESULT"
+
+ # Keep environment in sync for later steps (e.g. app-default save)
+ if [[ "$class" == "container" ]]; then
+ export var_container_storage="$STORAGE_RESULT"
+ export CONTAINER_STORAGE="$STORAGE_RESULT"
+ else
+ export var_template_storage="$STORAGE_RESULT"
+ export TEMPLATE_STORAGE="$STORAGE_RESULT"
+ fi
+
+ msg_ok "Updated ${key} → ${STORAGE_RESULT}"
+}
+
+# ------------------------------------------------------------------------------
+# check_container_resources()
+#
+# - Compares host RAM/CPU with required values
+# - Warns if under-provisioned and asks user to continue or abort
+# ------------------------------------------------------------------------------
+check_container_resources() {
+ current_ram=$(free -m | awk 'NR==2{print $2}')
+ current_cpu=$(nproc)
+
+ if [[ "$current_ram" -lt "$var_ram" ]] || [[ "$current_cpu" -lt "$var_cpu" ]]; then
+ echo -e "\n${INFO}${HOLD} ${GN}Required: ${var_cpu} CPU, ${var_ram}MB RAM ${CL}| ${RD}Current: ${current_cpu} CPU, ${current_ram}MB RAM${CL}"
+ echo -e "${YWB}Please ensure that the ${APP} LXC is configured with at least ${var_cpu} vCPU and ${var_ram} MB RAM for the build process.${CL}\n"
+ echo -ne "${INFO}${HOLD} May cause data loss! ${INFO} Continue update with under-provisioned LXC? "
+ read -r prompt
+ if [[ ! ${prompt,,} =~ ^(yes)$ ]]; then
+ echo -e "${CROSS}${HOLD} ${YWB}Exiting based on user input.${CL}"
+ exit 1
+ fi
+ else
+ echo -e ""
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# check_container_storage()
+#
+# - Checks /boot partition usage
+# - Warns if usage >80% and asks user confirmation before proceeding
+# ------------------------------------------------------------------------------
+check_container_storage() {
+ total_size=$(df /boot --output=size | tail -n 1)
+ local used_size=$(df /boot --output=used | tail -n 1)
+ usage=$((100 * used_size / total_size))
+ if ((usage > 80)); then
+ echo -e "${INFO}${HOLD} ${YWB}Warning: Storage is dangerously low (${usage}%).${CL}"
+ echo -ne "Continue anyway? "
+ read -r prompt
+ if [[ ! ${prompt,,} =~ ^(y|yes)$ ]]; then
+ echo -e "${CROSS}${HOLD}${YWB}Exiting based on user input.${CL}"
+ exit 1
+ fi
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# ssh_extract_keys_from_file()
+#
+# - Extracts valid SSH public keys from given file
+# - Supports RSA, Ed25519, ECDSA and filters out comments/invalid lines
+# ------------------------------------------------------------------------------
+ssh_extract_keys_from_file() {
+ local f="$1"
+ [[ -r "$f" ]] || return 0
+ tr -d '\r' <"$f" | awk '
+ /^[[:space:]]*#/ {next}
+ /^[[:space:]]*$/ {next}
+ # nackt: typ base64 [comment]
+ /^(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))[[:space:]]+/ {print; next}
+ # mit Optionen: finde ab erstem Key-Typ
+ {
+ match($0, /(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))[[:space:]]+/)
+ if (RSTART>0) { print substr($0, RSTART) }
+ }
+ '
+}
+
+# ------------------------------------------------------------------------------
+# ssh_build_choices_from_files()
+#
+# - Builds interactive whiptail checklist of available SSH keys
+# - Generates fingerprint, type and comment for each key
+# ------------------------------------------------------------------------------
+ssh_build_choices_from_files() {
+ local -a files=("$@")
+ CHOICES=()
+ COUNT=0
+ MAPFILE="$(mktemp)"
+ local id key typ fp cmt base ln=0
+
+ for f in "${files[@]}"; do
+ [[ -f "$f" && -r "$f" ]] || continue
+ base="$(basename -- "$f")"
+ case "$base" in
+ known_hosts | known_hosts.* | config) continue ;;
+ id_*) [[ "$f" != *.pub ]] && continue ;;
+ esac
+
+ # map every key in file
+ while IFS= read -r key; do
+ [[ -n "$key" ]] || continue
+
+ typ=""
+ fp=""
+ cmt=""
+ # Only the pure key part (without options) is already included in ‘key’.
+ read -r _typ _b64 _cmt <<<"$key"
+ typ="${_typ:-key}"
+ cmt="${_cmt:-}"
+ # Fingerprint via ssh-keygen (if available)
+ if command -v ssh-keygen >/dev/null 2>&1; then
+ fp="$(printf '%s\n' "$key" | ssh-keygen -lf - 2>/dev/null | awk '{print $2}')"
+ fi
+ # Label shorten
+ [[ ${#cmt} -gt 40 ]] && cmt="${cmt:0:37}..."
+
+ ln=$((ln + 1))
+ COUNT=$((COUNT + 1))
+ id="K${COUNT}"
+ echo "${id}|${key}" >>"$MAPFILE"
+ CHOICES+=("$id" "[$typ] ${fp:+$fp }${cmt:+$cmt }— ${base}" "OFF")
+ done < <(ssh_extract_keys_from_file "$f")
+ done
+}
+
+# ------------------------------------------------------------------------------
+# ssh_discover_default_files()
+#
+# - Scans standard paths for SSH keys
+# - Includes ~/.ssh/*.pub, /etc/ssh/authorized_keys, etc.
+# ------------------------------------------------------------------------------
+ssh_discover_default_files() {
+ local -a cand=()
+ shopt -s nullglob
+ cand+=(/root/.ssh/authorized_keys /root/.ssh/authorized_keys2)
+ cand+=(/root/.ssh/*.pub)
+ cand+=(/etc/ssh/authorized_keys /etc/ssh/authorized_keys.d/*)
+ shopt -u nullglob
+ printf '%s\0' "${cand[@]}"
+}
+
+configure_ssh_settings() {
+ SSH_KEYS_FILE="$(mktemp)"
+ : >"$SSH_KEYS_FILE"
+
+ IFS=$'\0' read -r -d '' -a _def_files < <(ssh_discover_default_files && printf '\0')
+ ssh_build_choices_from_files "${_def_files[@]}"
+ local default_key_count="$COUNT"
+
+ local ssh_key_mode
+ if [[ "$default_key_count" -gt 0 ]]; then
+ ssh_key_mode=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SSH KEY SOURCE" --menu \
+ "Provision SSH keys for root:" 14 72 4 \
+ "found" "Select from detected keys (${default_key_count})" \
+ "manual" "Paste a single public key" \
+ "folder" "Scan another folder (path or glob)" \
+ "none" "No keys" 3>&1 1>&2 2>&3) || exit_script
+ else
+ ssh_key_mode=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SSH KEY SOURCE" --menu \
+ "No host keys detected; choose manual/none:" 12 72 2 \
+ "manual" "Paste a single public key" \
+ "none" "No keys" 3>&1 1>&2 2>&3) || exit_script
+ fi
+
+ case "$ssh_key_mode" in
+ found)
+ local selection
+ selection=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SELECT HOST KEYS" \
+ --checklist "Select one or more keys to import:" 20 140 10 "${CHOICES[@]}" 3>&1 1>&2 2>&3) || exit_script
+ for tag in $selection; do
+ tag="${tag%\"}"
+ tag="${tag#\"}"
+ local line
+ line=$(grep -E "^${tag}\|" "$MAPFILE" | head -n1 | cut -d'|' -f2-)
+ [[ -n "$line" ]] && printf '%s\n' "$line" >>"$SSH_KEYS_FILE"
+ done
+ ;;
+ manual)
+ SSH_AUTHORIZED_KEY="$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --inputbox "Paste one SSH public key line (ssh-ed25519/ssh-rsa/...)" 10 72 --title "SSH Public Key" 3>&1 1>&2 2>&3)"
+ [[ -n "$SSH_AUTHORIZED_KEY" ]] && printf '%s\n' "$SSH_AUTHORIZED_KEY" >>"$SSH_KEYS_FILE"
+ ;;
+ folder)
+ local glob_path
+ glob_path=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --inputbox "Enter a folder or glob to scan (e.g. /root/.ssh/*.pub)" 10 72 --title "Scan Folder/Glob" 3>&1 1>&2 2>&3)
+ if [[ -n "$glob_path" ]]; then
+ shopt -s nullglob
+ read -r -a _scan_files <<<"$glob_path"
+ shopt -u nullglob
+ if [[ "${#_scan_files[@]}" -gt 0 ]]; then
+ ssh_build_choices_from_files "${_scan_files[@]}"
+ if [[ "$COUNT" -gt 0 ]]; then
+ local folder_selection
+ folder_selection=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SELECT FOLDER KEYS" \
+ --checklist "Select key(s) to import:" 20 78 10 "${CHOICES[@]}" 3>&1 1>&2 2>&3) || exit_script
+ for tag in $folder_selection; do
+ tag="${tag%\"}"
+ tag="${tag#\"}"
+ local line
+ line=$(grep -E "^${tag}\|" "$MAPFILE" | head -n1 | cut -d'|' -f2-)
+ [[ -n "$line" ]] && printf '%s\n' "$line" >>"$SSH_KEYS_FILE"
+ done
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox "No keys found in: $glob_path" 8 60
+ fi
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox "Path/glob returned no files." 8 60
+ fi
+ fi
+ ;;
+ none)
+ :
+ ;;
+ esac
+
+ if [[ -s "$SSH_KEYS_FILE" ]]; then
+ sort -u -o "$SSH_KEYS_FILE" "$SSH_KEYS_FILE"
+ printf '\n' >>"$SSH_KEYS_FILE"
+ fi
+
+ if [[ -s "$SSH_KEYS_FILE" || "$PW" == -password* ]]; then
+ if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "SSH ACCESS" --yesno "Enable root SSH access?" 10 58); then
+ SSH="yes"
+ else
+ SSH="no"
+ fi
+ else
+ SSH="no"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# start()
+#
+# - Entry point of script
+# - On Proxmox host: calls install_script
+# - In silent mode: runs update_script
+# - Otherwise: shows update/setting menu
+# ------------------------------------------------------------------------------
+start() {
+ source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/tools.func)
+ if command -v pveversion >/dev/null 2>&1; then
+ install_script || return 0
+ return 0
+ elif [ ! -z ${PHS_SILENT+x} ] && [[ "${PHS_SILENT}" == "1" ]]; then
+ VERBOSE="no"
+ set_std_mode
+ update_script
+ else
+ CHOICE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "${APP} LXC Update/Setting" --menu \
+ "Support/Update functions for ${APP} LXC. Choose an option:" \
+ 12 60 3 \
+ "1" "YES (Silent Mode)" \
+ "2" "YES (Verbose Mode)" \
+ "3" "NO (Cancel Update)" --nocancel --default-item "1" 3>&1 1>&2 2>&3)
+
+ case "$CHOICE" in
+ 1)
+ VERBOSE="no"
+ set_std_mode
+ ;;
+ 2)
+ VERBOSE="yes"
+ set_std_mode
+ ;;
+ 3)
+ clear
+ exit_script
+ exit
+ ;;
+ esac
+ update_script
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# build_container()
+#
+# - Creates and configures the LXC container
+# - Builds network string and applies features (FUSE, TUN, VAAPI passthrough)
+# - Starts container and waits for network connectivity
+# - Installs base packages, SSH keys, and runs -install.sh
+# ------------------------------------------------------------------------------
+build_container() {
+ # if [ "$VERBOSE" == "yes" ]; then set -x; fi
+
+ NET_STRING="-net0 name=eth0,bridge=${BRG:-vmbr0}"
+
+ # MAC
+ if [[ -n "$MAC" ]]; then
+ case "$MAC" in
+ ,hwaddr=*) NET_STRING+="$MAC" ;;
+ *) NET_STRING+=",hwaddr=$MAC" ;;
+ esac
+ fi
+
+ # IP (immer zwingend, Standard dhcp)
+ NET_STRING+=",ip=${NET:-dhcp}"
+
+ # Gateway
+ if [[ -n "$GATE" ]]; then
+ case "$GATE" in
+ ,gw=*) NET_STRING+="$GATE" ;;
+ *) NET_STRING+=",gw=$GATE" ;;
+ esac
+ fi
+
+ # VLAN
+ if [[ -n "$VLAN" ]]; then
+ case "$VLAN" in
+ ,tag=*) NET_STRING+="$VLAN" ;;
+ *) NET_STRING+=",tag=$VLAN" ;;
+ esac
+ fi
+
+ # MTU
+ if [[ -n "$MTU" ]]; then
+ case "$MTU" in
+ ,mtu=*) NET_STRING+="$MTU" ;;
+ *) NET_STRING+=",mtu=$MTU" ;;
+ esac
+ fi
+
+ # IPv6 Handling
+ case "$IPV6_METHOD" in
+ auto) NET_STRING="$NET_STRING,ip6=auto" ;;
+ dhcp) NET_STRING="$NET_STRING,ip6=dhcp" ;;
+ static)
+ NET_STRING="$NET_STRING,ip6=$IPV6_ADDR"
+ [ -n "$IPV6_GATE" ] && NET_STRING="$NET_STRING,gw6=$IPV6_GATE"
+ ;;
+ none) ;;
+ esac
+
+ if [ "$CT_TYPE" == "1" ]; then
+ FEATURES="keyctl=1,nesting=1"
+ else
+ FEATURES="nesting=1"
+ fi
+
+ if [ "$ENABLE_FUSE" == "yes" ]; then
+ FEATURES="$FEATURES,fuse=1"
+ fi
+
+ TEMP_DIR=$(mktemp -d)
+ pushd "$TEMP_DIR" >/dev/null
+ if [ "$var_os" == "alpine" ]; then
+ export FUNCTIONS_FILE_PATH="$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/alpine-install.func)"
+ else
+ export FUNCTIONS_FILE_PATH="$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/install.func)"
+ fi
+ export DIAGNOSTICS="$DIAGNOSTICS"
+ export RANDOM_UUID="$RANDOM_UUID"
+ export CACHER="$APT_CACHER"
+ export CACHER_IP="$APT_CACHER_IP"
+ export tz="$timezone"
+ export APPLICATION="$APP"
+ export app="$NSAPP"
+ export PASSWORD="$PW"
+ export VERBOSE="$VERBOSE"
+ export SSH_ROOT="${SSH}"
+ export SSH_AUTHORIZED_KEY
+ export CTID="$CT_ID"
+ export CTTYPE="$CT_TYPE"
+ export ENABLE_FUSE="$ENABLE_FUSE"
+ export ENABLE_TUN="$ENABLE_TUN"
+ export PCT_OSTYPE="$var_os"
+ export PCT_OSVERSION="$var_version"
+ export PCT_DISK_SIZE="$DISK_SIZE"
+ export PCT_OPTIONS="
+ -features $FEATURES
+ -hostname $HN
+ -tags $TAGS
+ $SD
+ $NS
+ $NET_STRING
+ -onboot 1
+ -cores $CORE_COUNT
+ -memory $RAM_SIZE
+ -unprivileged $CT_TYPE
+ $PW
+"
+ export TEMPLATE_STORAGE="${var_template_storage:-}"
+ export CONTAINER_STORAGE="${var_container_storage:-}"
+ create_lxc_container || exit $?
+
+ LXC_CONFIG="/etc/pve/lxc/${CTID}.conf"
+
+ # ============================================================================
+ # GPU/USB PASSTHROUGH CONFIGURATION
+ # ============================================================================
+
+ # List of applications that benefit from GPU acceleration
+ GPU_APPS=(
+ "immich" "channels" "emby" "ersatztv" "frigate"
+ "jellyfin" "plex" "scrypted" "tdarr" "unmanic"
+ "ollama" "fileflows" "open-webui" "tunarr" "debian"
+ "handbrake" "sunshine" "moonlight" "kodi" "stremio"
+ "viseron"
+ )
+
+ # Check if app needs GPU
+ is_gpu_app() {
+ local app="${1,,}"
+ for gpu_app in "${GPU_APPS[@]}"; do
+ [[ "$app" == "${gpu_app,,}" ]] && return 0
+ done
+ return 1
+ }
+
+ # Detect all available GPU devices
+ detect_gpu_devices() {
+ INTEL_DEVICES=()
+ AMD_DEVICES=()
+ NVIDIA_DEVICES=()
+
+ # Store PCI info to avoid multiple calls
+ local pci_vga_info=$(lspci -nn 2>/dev/null | grep -E "VGA|Display|3D")
+
+ # Check for Intel GPU - look for Intel vendor ID [8086]
+ if echo "$pci_vga_info" | grep -q "\[8086:"; then
+ msg_info "Detected Intel GPU"
+ if [[ -d /dev/dri ]]; then
+ for d in /dev/dri/renderD* /dev/dri/card*; do
+ [[ -e "$d" ]] && INTEL_DEVICES+=("$d")
+ done
+ fi
+ fi
+
+ # Check for AMD GPU - look for AMD vendor IDs [1002] (AMD/ATI) or [1022] (AMD)
+ if echo "$pci_vga_info" | grep -qE "\[1002:|\[1022:"; then
+ msg_info "Detected AMD GPU"
+ if [[ -d /dev/dri ]]; then
+ # Only add if not already claimed by Intel
+ if [[ ${#INTEL_DEVICES[@]} -eq 0 ]]; then
+ for d in /dev/dri/renderD* /dev/dri/card*; do
+ [[ -e "$d" ]] && AMD_DEVICES+=("$d")
+ done
+ fi
+ fi
+ fi
+
+ # Check for NVIDIA GPU - look for NVIDIA vendor ID [10de]
+ if echo "$pci_vga_info" | grep -q "\[10de:"; then
+ msg_info "Detected NVIDIA GPU"
+ if ! check_nvidia_host_setup; then
+ msg_error "NVIDIA host setup incomplete. Skipping GPU passthrough."
+ msg_info "Fix NVIDIA drivers on host, then recreate container or passthrough manually."
+ return 0
+ fi
+
+ for d in /dev/nvidia* /dev/nvidiactl /dev/nvidia-modeset; do
+ [[ -e "$d" ]] && NVIDIA_DEVICES+=("$d")
+ done
+
+ if [[ ${#NVIDIA_DEVICES[@]} -eq 0 ]]; then
+ msg_warn "NVIDIA GPU detected but no /dev/nvidia* devices found"
+ msg_warn "Please install NVIDIA drivers on host: apt install nvidia-driver"
+ else
+ if [[ "$CT_TYPE" == "0" ]]; then
+ cat <>"$LXC_CONFIG"
+ # NVIDIA GPU Passthrough (privileged)
+ lxc.cgroup2.devices.allow: c 195:* rwm
+ lxc.cgroup2.devices.allow: c 243:* rwm
+ lxc.mount.entry: /dev/nvidia0 dev/nvidia0 none bind,optional,create=file
+ lxc.mount.entry: /dev/nvidiactl dev/nvidiactl none bind,optional,create=file
+ lxc.mount.entry: /dev/nvidia-uvm dev/nvidia-uvm none bind,optional,create=file
+ lxc.mount.entry: /dev/nvidia-uvm-tools dev/nvidia-uvm-tools none bind,optional,create=file
+EOF
+
+ if [[ -e /dev/dri/renderD128 ]]; then
+ echo "lxc.mount.entry: /dev/dri/renderD128 dev/dri/renderD128 none bind,optional,create=file" >>"$LXC_CONFIG"
+ fi
+
+ export GPU_TYPE="NVIDIA"
+ export NVIDIA_DRIVER_VERSION=$(nvidia-smi --query-gpu=driver_version --format=csv,noheader 2>/dev/null | head -n1)
+ msg_ok "NVIDIA GPU passthrough configured (driver: ${NVIDIA_DRIVER_VERSION})"
+ else
+ msg_warn "NVIDIA passthrough only supported for privileged containers"
+ return 0
+ fi
+ fi
+ fi
+
+ # Debug output
+ msg_debug "Intel devices: ${INTEL_DEVICES[*]}"
+ msg_debug "AMD devices: ${AMD_DEVICES[*]}"
+ msg_debug "NVIDIA devices: ${NVIDIA_DEVICES[*]}"
+ }
+
+ # Configure USB passthrough for privileged containers
+ configure_usb_passthrough() {
+ if [[ "$CT_TYPE" != "0" ]]; then
+ return 0
+ fi
+
+ msg_info "Configuring automatic USB passthrough (privileged container)"
+ cat <>"$LXC_CONFIG"
+# Automatic USB passthrough (privileged container)
+lxc.cgroup2.devices.allow: a
+lxc.cap.drop:
+lxc.cgroup2.devices.allow: c 188:* rwm
+lxc.cgroup2.devices.allow: c 189:* rwm
+lxc.mount.entry: /dev/serial/by-id dev/serial/by-id none bind,optional,create=dir
+lxc.mount.entry: /dev/ttyUSB0 dev/ttyUSB0 none bind,optional,create=file
+lxc.mount.entry: /dev/ttyUSB1 dev/ttyUSB1 none bind,optional,create=file
+lxc.mount.entry: /dev/ttyACM0 dev/ttyACM0 none bind,optional,create=file
+lxc.mount.entry: /dev/ttyACM1 dev/ttyACM1 none bind,optional,create=file
+EOF
+ msg_ok "USB passthrough configured"
+ }
+
+ # Configure GPU passthrough
+ configure_gpu_passthrough() {
+ # Skip if not a GPU app and not privileged
+ if [[ "$CT_TYPE" != "0" ]] && ! is_gpu_app "$APP"; then
+ return 0
+ fi
+
+ detect_gpu_devices
+
+ # Count available GPU types
+ local gpu_count=0
+ local available_gpus=()
+
+ if [[ ${#INTEL_DEVICES[@]} -gt 0 ]]; then
+ available_gpus+=("INTEL")
+ gpu_count=$((gpu_count + 1))
+ fi
+
+ if [[ ${#AMD_DEVICES[@]} -gt 0 ]]; then
+ available_gpus+=("AMD")
+ gpu_count=$((gpu_count + 1))
+ fi
+
+ if [[ ${#NVIDIA_DEVICES[@]} -gt 0 ]]; then
+ available_gpus+=("NVIDIA")
+ gpu_count=$((gpu_count + 1))
+ fi
+
+ if [[ $gpu_count -eq 0 ]]; then
+ msg_info "No GPU devices found for passthrough"
+ return 0
+ fi
+
+ local selected_gpu=""
+
+ if [[ $gpu_count -eq 1 ]]; then
+ # Automatic selection for single GPU
+ selected_gpu="${available_gpus[0]}"
+ msg_info "Automatically configuring ${selected_gpu} GPU passthrough"
+ else
+ # Multiple GPUs - ask user
+ echo -e "\n${INFO} Multiple GPU types detected:"
+ for gpu in "${available_gpus[@]}"; do
+ echo " - $gpu"
+ done
+ read -rp "Which GPU type to passthrough? (${available_gpus[*]}): " selected_gpu
+ selected_gpu="${selected_gpu^^}"
+
+ # Validate selection
+ local valid=0
+ for gpu in "${available_gpus[@]}"; do
+ [[ "$selected_gpu" == "$gpu" ]] && valid=1
+ done
+
+ if [[ $valid -eq 0 ]]; then
+ msg_warn "Invalid selection. Skipping GPU passthrough."
+ return 0
+ fi
+ fi
+
+ # Apply passthrough configuration based on selection
+ local dev_idx=0
+
+ case "$selected_gpu" in
+ INTEL | AMD)
+ local devices=()
+ [[ "$selected_gpu" == "INTEL" ]] && devices=("${INTEL_DEVICES[@]}")
+ [[ "$selected_gpu" == "AMD" ]] && devices=("${AMD_DEVICES[@]}")
+
+ # For Proxmox WebUI visibility, add as dev0, dev1 etc.
+ for dev in "${devices[@]}"; do
+ if [[ "$CT_TYPE" == "0" ]]; then
+ # Privileged container - use dev entries for WebUI visibility
+ # Use initial GID 104 (render) for renderD*, 44 (video) for card*
+ if [[ "$dev" =~ renderD ]]; then
+ echo "dev${dev_idx}: $dev,gid=104" >>"$LXC_CONFIG"
+ else
+ echo "dev${dev_idx}: $dev,gid=44" >>"$LXC_CONFIG"
+ fi
+ dev_idx=$((dev_idx + 1))
+
+ # Also add cgroup allows for privileged containers
+ local major minor
+ major=$(stat -c '%t' "$dev" 2>/dev/null || echo "0")
+ minor=$(stat -c '%T' "$dev" 2>/dev/null || echo "0")
+
+ if [[ "$major" != "0" && "$minor" != "0" ]]; then
+ echo "lxc.cgroup2.devices.allow: c $((0x$major)):$((0x$minor)) rwm" >>"$LXC_CONFIG"
+ fi
+ else
+ # Unprivileged container
+ if [[ "$dev" =~ renderD ]]; then
+ echo "dev${dev_idx}: $dev,uid=0,gid=104" >>"$LXC_CONFIG"
+ else
+ echo "dev${dev_idx}: $dev,uid=0,gid=44" >>"$LXC_CONFIG"
+ fi
+ dev_idx=$((dev_idx + 1))
+ fi
+ done
+
+ export GPU_TYPE="$selected_gpu"
+ msg_ok "${selected_gpu} GPU passthrough configured (${dev_idx} devices)"
+ ;;
+
+ NVIDIA)
+ if [[ ${#NVIDIA_DEVICES[@]} -eq 0 ]]; then
+ msg_error "NVIDIA drivers not installed on host. Please install: apt install nvidia-driver"
+ return 1
+ fi
+
+ for dev in "${NVIDIA_DEVICES[@]}"; do
+ # NVIDIA devices typically need different handling
+ echo "dev${dev_idx}: $dev,uid=0,gid=44" >>"$LXC_CONFIG"
+ dev_idx=$((dev_idx + 1))
+
+ if [[ "$CT_TYPE" == "0" ]]; then
+ local major minor
+ major=$(stat -c '%t' "$dev" 2>/dev/null || echo "0")
+ minor=$(stat -c '%T' "$dev" 2>/dev/null || echo "0")
+
+ if [[ "$major" != "0" && "$minor" != "0" ]]; then
+ echo "lxc.cgroup2.devices.allow: c $((0x$major)):$((0x$minor)) rwm" >>"$LXC_CONFIG"
+ fi
+ fi
+ done
+
+ export GPU_TYPE="NVIDIA"
+ msg_ok "NVIDIA GPU passthrough configured (${dev_idx} devices)"
+ ;;
+ esac
+ }
+
+ # Additional device passthrough
+ configure_additional_devices() {
+ # TUN device passthrough
+ if [ "$ENABLE_TUN" == "yes" ]; then
+ cat <>"$LXC_CONFIG"
+lxc.cgroup2.devices.allow: c 10:200 rwm
+lxc.mount.entry: /dev/net/tun dev/net/tun none bind,create=file
+EOF
+ fi
+
+ # Coral TPU passthrough
+ if [[ -e /dev/apex_0 ]]; then
+ msg_info "Detected Coral TPU - configuring passthrough"
+ echo "lxc.mount.entry: /dev/apex_0 dev/apex_0 none bind,optional,create=file" >>"$LXC_CONFIG"
+ fi
+ }
+
+ # Execute pre-start configurations
+ configure_usb_passthrough
+ configure_gpu_passthrough
+ configure_additional_devices
+
+ # ============================================================================
+ # START CONTAINER AND INSTALL USERLAND
+ # ============================================================================
+
+ msg_info "Starting LXC Container"
+ pct start "$CTID"
+
+ # Wait for container to be running
+ for i in {1..10}; do
+ if pct status "$CTID" | grep -q "status: running"; then
+ msg_ok "Started LXC Container"
+ break
+ fi
+ sleep 1
+ if [ "$i" -eq 10 ]; then
+ msg_error "LXC Container did not reach running state"
+ exit 1
+ fi
+ done
+
+ # Wait for network (skip for Alpine initially)
+ if [ "$var_os" != "alpine" ]; then
+ msg_info "Waiting for network in LXC container"
+
+ # Wait for IP
+ for i in {1..20}; do
+ ip_in_lxc=$(pct exec "$CTID" -- ip -4 addr show dev eth0 | awk '/inet / {print $2}' | cut -d/ -f1)
+ [ -n "$ip_in_lxc" ] && break
+ sleep 1
+ done
+
+ if [ -z "$ip_in_lxc" ]; then
+ msg_error "No IP assigned to CT $CTID after 20s"
+ exit 1
+ fi
+
+ # Try to reach gateway
+ gw_ok=0
+ for i in {1..10}; do
+ if pct exec "$CTID" -- ping -c1 -W1 "${GATEWAY:-8.8.8.8}" >/dev/null 2>&1; then
+ gw_ok=1
+ break
+ fi
+ sleep 1
+ done
+
+ if [ "$gw_ok" -eq 1 ]; then
+ msg_ok "Network in LXC is reachable (IP $ip_in_lxc)"
+ else
+ msg_warn "Network reachable but gateway check failed"
+ fi
+ fi
+ # Function to get correct GID inside container
+ get_container_gid() {
+ local group="$1"
+ local gid=$(pct exec "$CTID" -- getent group "$group" 2>/dev/null | cut -d: -f3)
+ echo "${gid:-44}" # Default to 44 if not found
+ }
+
+ fix_gpu_gids
+
+ # Continue with standard container setup
+ msg_info "Customizing LXC Container"
+
+ # # Install GPU userland if configured
+ # if [[ "${ENABLE_VAAPI:-0}" == "1" ]]; then
+ # install_gpu_userland "VAAPI"
+ # fi
+
+ # if [[ "${ENABLE_NVIDIA:-0}" == "1" ]]; then
+ # install_gpu_userland "NVIDIA"
+ # fi
+
+ # Continue with standard container setup
+ if [ "$var_os" == "alpine" ]; then
+ sleep 3
+ pct exec "$CTID" -- /bin/sh -c 'cat </etc/apk/repositories
+http://dl-cdn.alpinelinux.org/alpine/latest-stable/main
+http://dl-cdn.alpinelinux.org/alpine/latest-stable/community
+EOF'
+ pct exec "$CTID" -- ash -c "apk add bash newt curl openssh nano mc ncurses jq >/dev/null"
+ else
+ sleep 3
+ pct exec "$CTID" -- bash -c "sed -i '/$LANG/ s/^# //' /etc/locale.gen"
+ pct exec "$CTID" -- bash -c "locale_line=\$(grep -v '^#' /etc/locale.gen | grep -E '^[a-zA-Z]' | awk '{print \$1}' | head -n 1) && \
+ echo LANG=\$locale_line >/etc/default/locale && \
+ locale-gen >/dev/null && \
+ export LANG=\$locale_line"
+
+ if [[ -z "${tz:-}" ]]; then
+ tz=$(timedatectl show --property=Timezone --value 2>/dev/null || echo "Etc/UTC")
+ fi
+
+ if pct exec "$CTID" -- test -e "/usr/share/zoneinfo/$tz"; then
+ pct exec "$CTID" -- bash -c "tz='$tz'; echo \"\$tz\" >/etc/timezone && ln -sf \"/usr/share/zoneinfo/\$tz\" /etc/localtime"
+ else
+ msg_warn "Skipping timezone setup – zone '$tz' not found in container"
+ fi
+
+ pct exec "$CTID" -- bash -c "apt-get update >/dev/null && apt-get install -y sudo curl mc gnupg2 jq >/dev/null" || {
+ msg_error "apt-get base packages installation failed"
+ exit 1
+ }
+ fi
+
+ msg_ok "Customized LXC Container"
+
+ # Verify GPU access if enabled
+ if [[ "${ENABLE_VAAPI:-0}" == "1" ]] && [ "$var_os" != "alpine" ]; then
+ pct exec "$CTID" -- bash -c "vainfo >/dev/null 2>&1" &&
+ msg_ok "VAAPI verified working" ||
+ msg_warn "VAAPI verification failed - may need additional configuration"
+ fi
+
+ if [[ "${ENABLE_NVIDIA:-0}" == "1" ]] && [ "$var_os" != "alpine" ]; then
+ pct exec "$CTID" -- bash -c "nvidia-smi >/dev/null 2>&1" &&
+ msg_ok "NVIDIA verified working" ||
+ msg_warn "NVIDIA verification failed - may need additional configuration"
+ fi
+
+ # Install SSH keys
+ install_ssh_keys_into_ct
+
+ # Run application installer
+ if ! lxc-attach -n "$CTID" -- bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/install/${var_install}.sh)"; then
+ exit $?
+ fi
+}
+
+destroy_lxc() {
+ if [[ -z "$CT_ID" ]]; then
+ msg_error "No CT_ID found. Nothing to remove."
+ return 1
+ fi
+
+ # Abbruch bei Ctrl-C / Ctrl-D / ESC
+ trap 'echo; msg_error "Aborted by user (SIGINT/SIGQUIT)"; return 130' INT QUIT
+
+ local prompt
+ if ! read -rp "Remove this Container? " prompt; then
+ # read gibt != 0 zurück bei Ctrl-D/ESC
+ msg_error "Aborted input (Ctrl-D/ESC)"
+ return 130
+ fi
+
+ case "${prompt,,}" in
+ y | yes)
+ if pct stop "$CT_ID" &>/dev/null && pct destroy "$CT_ID" &>/dev/null; then
+ msg_ok "Removed Container $CT_ID"
+ else
+ msg_error "Failed to remove Container $CT_ID"
+ return 1
+ fi
+ ;;
+ "" | n | no)
+ msg_info "Container was not removed."
+ ;;
+ *)
+ msg_warn "Invalid response. Container was not removed."
+ ;;
+ esac
+}
+
+# ------------------------------------------------------------------------------
+# Storage discovery / selection helpers
+# ------------------------------------------------------------------------------
+# ===== Storage discovery / selection helpers (ported from create_lxc.sh) =====
+resolve_storage_preselect() {
+ local class="$1" preselect="$2" required_content=""
+ case "$class" in
+ template) required_content="vztmpl" ;;
+ container) required_content="rootdir" ;;
+ *) return 1 ;;
+ esac
+ [[ -z "$preselect" ]] && return 1
+ if ! pvesm status -content "$required_content" | awk 'NR>1{print $1}' | grep -qx -- "$preselect"; then
+ msg_warn "Preselected storage '${preselect}' does not support content '${required_content}' (or not found)"
+ return 1
+ fi
+
+ local line total used free
+ line="$(pvesm status | awk -v s="$preselect" 'NR>1 && $1==s {print $0}')"
+ if [[ -z "$line" ]]; then
+ STORAGE_INFO="n/a"
+ else
+ total="$(awk '{print $4}' <<<"$line")"
+ used="$(awk '{print $5}' <<<"$line")"
+ free="$(awk '{print $6}' <<<"$line")"
+ local total_h used_h free_h
+ if command -v numfmt >/dev/null 2>&1; then
+ total_h="$(numfmt --to=iec --suffix=B --format %.1f "$total" 2>/dev/null || echo "$total")"
+ used_h="$(numfmt --to=iec --suffix=B --format %.1f "$used" 2>/dev/null || echo "$used")"
+ free_h="$(numfmt --to=iec --suffix=B --format %.1f "$free" 2>/dev/null || echo "$free")"
+ STORAGE_INFO="Free: ${free_h} Used: ${used_h}"
+ else
+ STORAGE_INFO="Free: ${free} Used: ${used}"
+ fi
+ fi
+ STORAGE_RESULT="$preselect"
+ return 0
+}
+
+fix_gpu_gids() {
+ if [[ -z "${GPU_TYPE:-}" ]]; then
+ return 0
+ fi
+
+ msg_info "Detecting and setting correct GPU group IDs"
+
+ # Ermittle die tatsächlichen GIDs aus dem Container
+ local video_gid=$(pct exec "$CTID" -- sh -c "getent group video 2>/dev/null | cut -d: -f3")
+ local render_gid=$(pct exec "$CTID" -- sh -c "getent group render 2>/dev/null | cut -d: -f3")
+
+ # Fallbacks wenn Gruppen nicht existieren
+ if [[ -z "$video_gid" ]]; then
+ # Versuche die video Gruppe zu erstellen
+ pct exec "$CTID" -- sh -c "groupadd -r video 2>/dev/null || true"
+ video_gid=$(pct exec "$CTID" -- sh -c "getent group video 2>/dev/null | cut -d: -f3")
+ [[ -z "$video_gid" ]] && video_gid="44" # Ultimate fallback
+ fi
+
+ if [[ -z "$render_gid" ]]; then
+ # Versuche die render Gruppe zu erstellen
+ pct exec "$CTID" -- sh -c "groupadd -r render 2>/dev/null || true"
+ render_gid=$(pct exec "$CTID" -- sh -c "getent group render 2>/dev/null | cut -d: -f3")
+ [[ -z "$render_gid" ]] && render_gid="104" # Ultimate fallback
+ fi
+
+ msg_info "Container GIDs detected - video:${video_gid}, render:${render_gid}"
+
+ # Prüfe ob die GIDs von den Defaults abweichen
+ local need_update=0
+ if [[ "$video_gid" != "44" ]] || [[ "$render_gid" != "104" ]]; then
+ need_update=1
+ fi
+
+ if [[ $need_update -eq 1 ]]; then
+ msg_info "Updating device GIDs in container config"
+
+ # Stoppe Container für Config-Update
+ pct stop "$CTID" >/dev/null 2>&1
+
+ # Update die dev Einträge mit korrekten GIDs
+ # Backup der Config
+ cp "$LXC_CONFIG" "${LXC_CONFIG}.bak"
+
+ # Parse und update jeden dev Eintrag
+ while IFS= read -r line; do
+ if [[ "$line" =~ ^dev[0-9]+: ]]; then
+ # Extract device path
+ local device_path=$(echo "$line" | sed -E 's/^dev[0-9]+: ([^,]+).*/\1/')
+ local dev_num=$(echo "$line" | sed -E 's/^(dev[0-9]+):.*/\1/')
+
+ if [[ "$device_path" =~ renderD ]]; then
+ # RenderD device - use render GID
+ echo "${dev_num}: ${device_path},gid=${render_gid}"
+ elif [[ "$device_path" =~ card ]]; then
+ # Card device - use video GID
+ echo "${dev_num}: ${device_path},gid=${video_gid}"
+ else
+ # Keep original line
+ echo "$line"
+ fi
+ else
+ # Keep non-dev lines
+ echo "$line"
+ fi
+ done <"$LXC_CONFIG" >"${LXC_CONFIG}.new"
+
+ mv "${LXC_CONFIG}.new" "$LXC_CONFIG"
+
+ # Starte Container wieder
+ pct start "$CTID" >/dev/null 2>&1
+ sleep 3
+
+ msg_ok "Device GIDs updated successfully"
+ else
+ msg_ok "Device GIDs are already correct"
+ fi
+ if [[ "$CT_TYPE" == "0" ]]; then
+ pct exec "$CTID" -- bash -c "
+ if [ -d /dev/dri ]; then
+ for dev in /dev/dri/*; do
+ if [ -e \"\$dev\" ]; then
+ if [[ \"\$dev\" =~ renderD ]]; then
+ chgrp ${render_gid} \"\$dev\" 2>/dev/null || true
+ else
+ chgrp ${video_gid} \"\$dev\" 2>/dev/null || true
+ fi
+ chmod 660 \"\$dev\" 2>/dev/null || true
+ fi
+ done
+ fi
+ " >/dev/null 2>&1
+ fi
+}
+
+# NVIDIA-spezific check on host
+check_nvidia_host_setup() {
+ if ! command -v nvidia-smi >/dev/null 2>&1; then
+ msg_warn "NVIDIA GPU detected but nvidia-smi not found on host"
+ msg_warn "Please install NVIDIA drivers on host first."
+ #echo " 1. Download driver: wget https://us.download.nvidia.com/XFree86/Linux-x86_64/550.127.05/NVIDIA-Linux-x86_64-550.127.05.run"
+ #echo " 2. Install: ./NVIDIA-Linux-x86_64-550.127.05.run --dkms"
+ #echo " 3. Verify: nvidia-smi"
+ return 1
+ fi
+
+ # check if nvidia-smi works
+ if ! nvidia-smi >/dev/null 2>&1; then
+ msg_warn "nvidia-smi installed but not working. Driver issue?"
+ return 1
+ fi
+
+ return 0
+}
+
+check_storage_support() {
+ local CONTENT="$1" VALID=0
+ while IFS= read -r line; do
+ local STORAGE_NAME
+ STORAGE_NAME=$(awk '{print $1}' <<<"$line")
+ [[ -n "$STORAGE_NAME" ]] && VALID=1
+ done < <(pvesm status -content "$CONTENT" 2>/dev/null | awk 'NR>1')
+ [[ $VALID -eq 1 ]]
+}
+
+select_storage() {
+ local CLASS=$1 CONTENT CONTENT_LABEL
+ case $CLASS in
+ container)
+ CONTENT='rootdir'
+ CONTENT_LABEL='Container'
+ ;;
+ template)
+ CONTENT='vztmpl'
+ CONTENT_LABEL='Container template'
+ ;;
+ iso)
+ CONTENT='iso'
+ CONTENT_LABEL='ISO image'
+ ;;
+ images)
+ CONTENT='images'
+ CONTENT_LABEL='VM Disk image'
+ ;;
+ backup)
+ CONTENT='backup'
+ CONTENT_LABEL='Backup'
+ ;;
+ snippets)
+ CONTENT='snippets'
+ CONTENT_LABEL='Snippets'
+ ;;
+ *)
+ msg_error "Invalid storage class '$CLASS'"
+ return 1
+ ;;
+ esac
+
+ declare -A STORAGE_MAP
+ local -a MENU=()
+ local COL_WIDTH=0
+
+ while read -r TAG TYPE _ TOTAL USED FREE _; do
+ [[ -n "$TAG" && -n "$TYPE" ]] || continue
+ local DISPLAY="${TAG} (${TYPE})"
+ local USED_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$USED")
+ local FREE_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$FREE")
+ local INFO="Free: ${FREE_FMT}B Used: ${USED_FMT}B"
+ STORAGE_MAP["$DISPLAY"]="$TAG"
+ MENU+=("$DISPLAY" "$INFO" "OFF")
+ ((${#DISPLAY} > COL_WIDTH)) && COL_WIDTH=${#DISPLAY}
+ done < <(pvesm status -content "$CONTENT" | awk 'NR>1')
+
+ if [[ ${#MENU[@]} -eq 0 ]]; then
+ msg_error "No storage found for content type '$CONTENT'."
+ return 2
+ fi
+
+ if [[ $((${#MENU[@]} / 3)) -eq 1 ]]; then
+ STORAGE_RESULT="${STORAGE_MAP[${MENU[0]}]}"
+ STORAGE_INFO="${MENU[1]}"
+ return 0
+ fi
+
+ local WIDTH=$((COL_WIDTH + 42))
+ while true; do
+ local DISPLAY_SELECTED
+ DISPLAY_SELECTED=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "Storage Pools" \
+ --radiolist "Which storage pool for ${CONTENT_LABEL,,}?\n(Spacebar to select)" \
+ 16 "$WIDTH" 6 "${MENU[@]}" 3>&1 1>&2 2>&3) || { exit_script; }
+
+ DISPLAY_SELECTED=$(sed 's/[[:space:]]*$//' <<<"$DISPLAY_SELECTED")
+ if [[ -z "$DISPLAY_SELECTED" || -z "${STORAGE_MAP[$DISPLAY_SELECTED]+_}" ]]; then
+ whiptail --msgbox "No valid storage selected. Please try again." 8 58
+ continue
+ fi
+ STORAGE_RESULT="${STORAGE_MAP[$DISPLAY_SELECTED]}"
+ for ((i = 0; i < ${#MENU[@]}; i += 3)); do
+ if [[ "${MENU[$i]}" == "$DISPLAY_SELECTED" ]]; then
+ STORAGE_INFO="${MENU[$i + 1]}"
+ break
+ fi
+ done
+ return 0
+ done
+}
+
+create_lxc_container() {
+ # ------------------------------------------------------------------------------
+ # Optional verbose mode (debug tracing)
+ # ------------------------------------------------------------------------------
+ if [[ "${CREATE_LXC_VERBOSE:-no}" == "yes" ]]; then set -x; fi
+
+ # ------------------------------------------------------------------------------
+ # Helpers (dynamic versioning / template parsing)
+ # ------------------------------------------------------------------------------
+ pkg_ver() { dpkg-query -W -f='${Version}\n' "$1" 2>/dev/null || echo ""; }
+ pkg_cand() { apt-cache policy "$1" 2>/dev/null | awk '/Candidate:/ {print $2}'; }
+
+ ver_ge() { dpkg --compare-versions "$1" ge "$2"; }
+ ver_gt() { dpkg --compare-versions "$1" gt "$2"; }
+ ver_lt() { dpkg --compare-versions "$1" lt "$2"; }
+
+ # Extract Debian OS minor from template name: debian-13-standard_13.1-1_amd64.tar.zst => "13.1"
+ parse_template_osver() { sed -n 's/.*_\([0-9][0-9]*\(\.[0-9]\+\)\?\)-.*/\1/p' <<<"$1"; }
+
+ # Offer upgrade for pve-container/lxc-pve if candidate > installed; optional auto-retry pct create
+ # Returns:
+ # 0 = no upgrade needed
+ # 1 = upgraded (and if do_retry=yes and retry succeeded, creation done)
+ # 2 = user declined
+ # 3 = upgrade attempted but failed OR retry failed
+ offer_lxc_stack_upgrade_and_maybe_retry() {
+ local do_retry="${1:-no}" # yes|no
+ local _pvec_i _pvec_c _lxcp_i _lxcp_c need=0
+
+ _pvec_i="$(pkg_ver pve-container)"
+ _lxcp_i="$(pkg_ver lxc-pve)"
+ _pvec_c="$(pkg_cand pve-container)"
+ _lxcp_c="$(pkg_cand lxc-pve)"
+
+ if [[ -n "$_pvec_c" && "$_pvec_c" != "none" ]]; then
+ ver_gt "$_pvec_c" "${_pvec_i:-0}" && need=1
+ fi
+ if [[ -n "$_lxcp_c" && "$_lxcp_c" != "none" ]]; then
+ ver_gt "$_lxcp_c" "${_lxcp_i:-0}" && need=1
+ fi
+ if [[ $need -eq 0 ]]; then
+ msg_debug "No newer candidate for pve-container/lxc-pve (installed=$_pvec_i/$_lxcp_i, cand=$_pvec_c/$_lxcp_c)"
+ return 0
+ fi
+
+ echo
+ echo "An update for the Proxmox LXC stack is available:"
+ echo " pve-container: installed=${_pvec_i:-n/a} candidate=${_pvec_c:-n/a}"
+ echo " lxc-pve : installed=${_lxcp_i:-n/a} candidate=${_lxcp_c:-n/a}"
+ echo
+ read -rp "Do you want to upgrade now? [y/N] " _ans
+ case "${_ans,,}" in
+ y | yes)
+ msg_info "Upgrading Proxmox LXC stack (pve-container, lxc-pve)"
+ if apt-get update -qq >/dev/null && apt-get install -y --only-upgrade pve-container lxc-pve >/dev/null; then
+ msg_ok "LXC stack upgraded."
+ if [[ "$do_retry" == "yes" ]]; then
+ msg_info "Retrying container creation after upgrade"
+ if pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >>"$LOGFILE" 2>&1; then
+ msg_ok "Container created successfully after upgrade."
+ return 0
+ else
+ msg_error "pct create still failed after upgrade. See $LOGFILE"
+ return 3
+ fi
+ fi
+ return 1
+ else
+ msg_error "Upgrade failed. Please check APT output."
+ return 3
+ fi
+ ;;
+ *) return 2 ;;
+ esac
+ }
+
+ # ------------------------------------------------------------------------------
+ # Required input variables
+ # ------------------------------------------------------------------------------
+ [[ "${CTID:-}" ]] || {
+ msg_error "You need to set 'CTID' variable."
+ exit 203
+ }
+ [[ "${PCT_OSTYPE:-}" ]] || {
+ msg_error "You need to set 'PCT_OSTYPE' variable."
+ exit 204
+ }
+
+ msg_debug "CTID=$CTID"
+ msg_debug "PCT_OSTYPE=$PCT_OSTYPE"
+ msg_debug "PCT_OSVERSION=${PCT_OSVERSION:-default}"
+
+ # ID checks
+ [[ "$CTID" -ge 100 ]] || {
+ msg_error "ID cannot be less than 100."
+ exit 205
+ }
+ if qm status "$CTID" &>/dev/null || pct status "$CTID" &>/dev/null; then
+ echo -e "ID '$CTID' is already in use."
+ unset CTID
+ msg_error "Cannot use ID that is already in use."
+ exit 206
+ fi
+
+ # Storage capability check
+ check_storage_support "rootdir" || {
+ msg_error "No valid storage found for 'rootdir' [Container]"
+ exit 1
+ }
+ check_storage_support "vztmpl" || {
+ msg_error "No valid storage found for 'vztmpl' [Template]"
+ exit 1
+ }
+
+ # Template storage selection
+ if resolve_storage_preselect template "${TEMPLATE_STORAGE:-}"; then
+ TEMPLATE_STORAGE="$STORAGE_RESULT"
+ TEMPLATE_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${TEMPLATE_STORAGE}${CL} (${TEMPLATE_STORAGE_INFO}) [Template]"
+ else
+ while true; do
+ if [[ -z "${var_template_storage:-}" ]]; then
+ if select_storage template; then
+ TEMPLATE_STORAGE="$STORAGE_RESULT"
+ TEMPLATE_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${TEMPLATE_STORAGE}${CL} (${TEMPLATE_STORAGE_INFO}) [Template]"
+ break
+ fi
+ fi
+ done
+ fi
+
+ # Container storage selection
+ if resolve_storage_preselect container "${CONTAINER_STORAGE:-}"; then
+ CONTAINER_STORAGE="$STORAGE_RESULT"
+ CONTAINER_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${CONTAINER_STORAGE}${CL} (${CONTAINER_STORAGE_INFO}) [Container]"
+ else
+ if [[ -z "${var_container_storage:-}" ]]; then
+ if select_storage container; then
+ CONTAINER_STORAGE="$STORAGE_RESULT"
+ CONTAINER_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${CONTAINER_STORAGE}${CL} (${CONTAINER_STORAGE_INFO}) [Container]"
+ fi
+ fi
+ fi
+
+ # Validate content types
+ msg_info "Validating content types of storage '$CONTAINER_STORAGE'"
+ STORAGE_CONTENT=$(grep -A4 -E "^(zfspool|dir|lvmthin|lvm): $CONTAINER_STORAGE" /etc/pve/storage.cfg | grep content | awk '{$1=""; print $0}' | xargs)
+ msg_debug "Storage '$CONTAINER_STORAGE' has content types: $STORAGE_CONTENT"
+ grep -qw "rootdir" <<<"$STORAGE_CONTENT" || {
+ msg_error "Storage '$CONTAINER_STORAGE' does not support 'rootdir'. Cannot create LXC."
+ exit 217
+ }
+ $STD msg_ok "Storage '$CONTAINER_STORAGE' supports 'rootdir'"
+
+ msg_info "Validating content types of template storage '$TEMPLATE_STORAGE'"
+ TEMPLATE_CONTENT=$(grep -A4 -E "^[^:]+: $TEMPLATE_STORAGE" /etc/pve/storage.cfg | grep content | awk '{$1=""; print $0}' | xargs)
+ msg_debug "Template storage '$TEMPLATE_STORAGE' has content types: $TEMPLATE_CONTENT"
+ if ! grep -qw "vztmpl" <<<"$TEMPLATE_CONTENT"; then
+ msg_warn "Template storage '$TEMPLATE_STORAGE' does not declare 'vztmpl'. This may cause pct create to fail."
+ else
+ $STD msg_ok "Template storage '$TEMPLATE_STORAGE' supports 'vztmpl'"
+ fi
+
+ # Free space check
+ STORAGE_FREE=$(pvesm status | awk -v s="$CONTAINER_STORAGE" '$1 == s { print $6 }')
+ REQUIRED_KB=$((${PCT_DISK_SIZE:-8} * 1024 * 1024))
+ [[ "$STORAGE_FREE" -ge "$REQUIRED_KB" ]] || {
+ msg_error "Not enough space on '$CONTAINER_STORAGE'. Needed: ${PCT_DISK_SIZE:-8}G."
+ exit 214
+ }
+
+ # Cluster quorum (if cluster)
+ if [[ -f /etc/pve/corosync.conf ]]; then
+ msg_info "Checking cluster quorum"
+ if ! pvecm status | awk -F':' '/^Quorate/ { exit ($2 ~ /Yes/) ? 0 : 1 }'; then
+ msg_error "Cluster is not quorate. Start all nodes or configure quorum device (QDevice)."
+ exit 210
+ fi
+ msg_ok "Cluster is quorate"
+ fi
+
+ # ------------------------------------------------------------------------------
+ # Template discovery & validation
+ # ------------------------------------------------------------------------------
+ TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}"
+ case "$PCT_OSTYPE" in
+ debian | ubuntu) TEMPLATE_PATTERN="-standard_" ;;
+ alpine | fedora | rocky | centos) TEMPLATE_PATTERN="-default_" ;;
+ *) TEMPLATE_PATTERN="" ;;
+ esac
+
+ msg_info "Searching for template '$TEMPLATE_SEARCH'"
+
+ # Build regex patterns outside awk/grep for clarity
+ SEARCH_PATTERN="^${TEMPLATE_SEARCH}"
+
+ #echo "[DEBUG] TEMPLATE_SEARCH='$TEMPLATE_SEARCH'"
+ #echo "[DEBUG] SEARCH_PATTERN='$SEARCH_PATTERN'"
+ #echo "[DEBUG] TEMPLATE_PATTERN='$TEMPLATE_PATTERN'"
+
+ mapfile -t LOCAL_TEMPLATES < <(
+ pveam list "$TEMPLATE_STORAGE" 2>/dev/null |
+ awk -v search="${SEARCH_PATTERN}" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' |
+ sed 's|.*/||' | sort -t - -k 2 -V
+ )
+
+ pveam update >/dev/null 2>&1 || msg_warn "Could not update template catalog (pveam update failed)."
+
+ #echo "[DEBUG] pveam available output (first 5 lines with .tar files):"
+ #pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | head -5 | sed 's/^/ /'
+
+ set +u
+ mapfile -t ONLINE_TEMPLATES < <(pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | awk '{print $2}' | grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" | sort -t - -k 2 -V 2>/dev/null || true)
+ #echo "[DEBUG] After filtering: ${#ONLINE_TEMPLATES[@]} online templates found"
+ set -u
+ if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then
+ #echo "[DEBUG] Online templates:"
+ for tmpl in "${ONLINE_TEMPLATES[@]}"; do
+ echo " - $tmpl"
+ done
+ fi
+
+ ONLINE_TEMPLATE=""
+ [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}"
+
+ #msg_debug "SEARCH_PATTERN='${SEARCH_PATTERN}' TEMPLATE_PATTERN='${TEMPLATE_PATTERN}'"
+ #msg_debug "Found ${#LOCAL_TEMPLATES[@]} local templates, ${#ONLINE_TEMPLATES[@]} online templates"
+ if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then
+ #msg_debug "First 3 online templates:"
+ count=0
+ for idx in "${!ONLINE_TEMPLATES[@]}"; do
+ #msg_debug " [$idx]: ${ONLINE_TEMPLATES[$idx]}"
+ ((count++))
+ [[ $count -ge 3 ]] && break
+ done
+ fi
+ #msg_debug "ONLINE_TEMPLATE='$ONLINE_TEMPLATE'"
+
+ if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then
+ TEMPLATE="${LOCAL_TEMPLATES[-1]}"
+ TEMPLATE_SOURCE="local"
+ else
+ TEMPLATE="$ONLINE_TEMPLATE"
+ TEMPLATE_SOURCE="online"
+ fi
+
+ # If still no template, try to find alternatives
+ if [[ -z "$TEMPLATE" ]]; then
+ echo ""
+ echo "[DEBUG] No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}, searching for alternatives..."
+
+ # Get all available versions for this OS type
+ mapfile -t AVAILABLE_VERSIONS < <(
+ pveam available -section system 2>/dev/null |
+ grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' |
+ awk -F'\t' '{print $1}' |
+ grep "^${PCT_OSTYPE}-" |
+ sed -E "s/.*${PCT_OSTYPE}-([0-9]+(\.[0-9]+)?).*/\1/" |
+ sort -u -V 2>/dev/null
+ )
+
+ if [[ ${#AVAILABLE_VERSIONS[@]} -gt 0 ]]; then
+ echo ""
+ echo "${BL}Available ${PCT_OSTYPE} versions:${CL}"
+ for i in "${!AVAILABLE_VERSIONS[@]}"; do
+ echo " [$((i + 1))] ${AVAILABLE_VERSIONS[$i]}"
+ done
+ echo ""
+ read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or press Enter to cancel: " choice
+
+ if [[ "$choice" =~ ^[0-9]+$ ]] && [[ "$choice" -ge 1 ]] && [[ "$choice" -le ${#AVAILABLE_VERSIONS[@]} ]]; then
+ PCT_OSVERSION="${AVAILABLE_VERSIONS[$((choice - 1))]}"
+ TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION}"
+ SEARCH_PATTERN="^${TEMPLATE_SEARCH}-"
+
+ #echo "[DEBUG] Retrying with version: $PCT_OSVERSION"
+
+ mapfile -t ONLINE_TEMPLATES < <(
+ pveam available -section system 2>/dev/null |
+ grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' |
+ awk -F'\t' '{print $1}' |
+ grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" |
+ sort -t - -k 2 -V 2>/dev/null || true
+ )
+
+ if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then
+ TEMPLATE="${ONLINE_TEMPLATES[-1]}"
+ TEMPLATE_SOURCE="online"
+ #echo "[DEBUG] Found alternative: $TEMPLATE"
+ else
+ msg_error "No templates available for ${PCT_OSTYPE} ${PCT_OSVERSION}"
+ exit 225
+ fi
+ else
+ msg_info "Installation cancelled"
+ exit 0
+ fi
+ else
+ msg_error "No ${PCT_OSTYPE} templates available at all"
+ exit 225
+ fi
+ fi
+
+ #echo "[DEBUG] Selected TEMPLATE='$TEMPLATE' SOURCE='$TEMPLATE_SOURCE'"
+ #msg_debug "Selected TEMPLATE='$TEMPLATE' SOURCE='$TEMPLATE_SOURCE'"
+
+ TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)"
+ if [[ -z "$TEMPLATE_PATH" ]]; then
+ TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg)
+ [[ -n "$TEMPLATE_BASE" ]] && TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE"
+ fi
+
+ # If we still don't have a path but have a valid template name, construct it
+ if [[ -z "$TEMPLATE_PATH" && -n "$TEMPLATE" ]]; then
+ TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE"
+ fi
+
+ [[ -n "$TEMPLATE_PATH" ]] || {
+ if [[ -z "$TEMPLATE" ]]; then
+ msg_error "Template ${PCT_OSTYPE} ${PCT_OSVERSION} not available"
+
+ # Get available versions
+ mapfile -t AVAILABLE_VERSIONS < <(
+ pveam available -section system 2>/dev/null |
+ grep "^${PCT_OSTYPE}-" |
+ sed -E 's/.*'"${PCT_OSTYPE}"'-([0-9]+\.[0-9]+).*/\1/' |
+ grep -E '^[0-9]+\.[0-9]+$' |
+ sort -u -V 2>/dev/null || sort -u
+ )
+
+ if [[ ${#AVAILABLE_VERSIONS[@]} -gt 0 ]]; then
+ echo -e "\n${BL}Available versions:${CL}"
+ for i in "${!AVAILABLE_VERSIONS[@]}"; do
+ echo " [$((i + 1))] ${AVAILABLE_VERSIONS[$i]}"
+ done
+
+ echo ""
+ read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or Enter to exit: " choice
+
+ if [[ "$choice" =~ ^[0-9]+$ ]] && [[ "$choice" -ge 1 ]] && [[ "$choice" -le ${#AVAILABLE_VERSIONS[@]} ]]; then
+ export var_version="${AVAILABLE_VERSIONS[$((choice - 1))]}"
+ export PCT_OSVERSION="$var_version"
+ msg_ok "Switched to ${PCT_OSTYPE} ${var_version}"
+
+ # Retry template search with new version
+ TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}"
+ SEARCH_PATTERN="^${TEMPLATE_SEARCH}-"
+
+ mapfile -t LOCAL_TEMPLATES < <(
+ pveam list "$TEMPLATE_STORAGE" 2>/dev/null |
+ awk -v search="${SEARCH_PATTERN}" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' |
+ sed 's|.*/||' | sort -t - -k 2 -V
+ )
+ mapfile -t ONLINE_TEMPLATES < <(
+ pveam available -section system 2>/dev/null |
+ grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' |
+ awk -F'\t' '{print $1}' |
+ grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" |
+ sort -t - -k 2 -V 2>/dev/null || true
+ )
+ ONLINE_TEMPLATE=""
+ [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}"
+
+ if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then
+ TEMPLATE="${LOCAL_TEMPLATES[-1]}"
+ TEMPLATE_SOURCE="local"
+ else
+ TEMPLATE="$ONLINE_TEMPLATE"
+ TEMPLATE_SOURCE="online"
+ fi
+
+ TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)"
+ if [[ -z "$TEMPLATE_PATH" ]]; then
+ TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg)
+ [[ -n "$TEMPLATE_BASE" ]] && TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE"
+ fi
+
+ # If we still don't have a path but have a valid template name, construct it
+ if [[ -z "$TEMPLATE_PATH" && -n "$TEMPLATE" ]]; then
+ TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE"
+ fi
+
+ [[ -n "$TEMPLATE_PATH" ]] || {
+ msg_error "Template still not found after version change"
+ exit 220
+ }
+ else
+ msg_info "Installation cancelled"
+ exit 1
+ fi
+ else
+ msg_error "No ${PCT_OSTYPE} templates available"
+ exit 220
+ fi
+ fi
+ }
+
+ # Validate that we found a template
+ if [[ -z "$TEMPLATE" ]]; then
+ msg_error "No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}"
+ msg_info "Please check:"
+ msg_info " - Is pveam catalog available? (run: pveam available -section system)"
+ msg_info " - Does the template exist for your OS version?"
+ exit 225
+ fi
+
+ msg_ok "Template ${BL}$TEMPLATE${CL} [$TEMPLATE_SOURCE]"
+ msg_debug "Resolved TEMPLATE_PATH=$TEMPLATE_PATH"
+
+ NEED_DOWNLOAD=0
+ if [[ ! -f "$TEMPLATE_PATH" ]]; then
+ msg_info "Template not present locally – will download."
+ NEED_DOWNLOAD=1
+ elif [[ ! -r "$TEMPLATE_PATH" ]]; then
+ msg_error "Template file exists but is not readable – check permissions."
+ exit 221
+ elif [[ "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then
+ if [[ -n "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Template file too small (<1MB) – re-downloading."
+ NEED_DOWNLOAD=1
+ else
+ msg_warn "Template looks too small, but no online version exists. Keeping local file."
+ fi
+ elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then
+ if [[ -n "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Template appears corrupted – re-downloading."
+ NEED_DOWNLOAD=1
+ else
+ msg_warn "Template appears corrupted, but no online version exists. Keeping local file."
+ fi
+ else
+ $STD msg_ok "Template $TEMPLATE is present and valid."
+ fi
+
+ if [[ "$TEMPLATE_SOURCE" == "local" && -n "$ONLINE_TEMPLATE" && "$TEMPLATE" != "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Local template is outdated: $TEMPLATE (latest available: $ONLINE_TEMPLATE)"
+ if whiptail --yesno "A newer template is available:\n$ONLINE_TEMPLATE\n\nDo you want to download and use it instead?" 12 70; then
+ TEMPLATE="$ONLINE_TEMPLATE"
+ NEED_DOWNLOAD=1
+ else
+ msg_info "Continuing with local template $TEMPLATE"
+ fi
+ fi
+
+ if [[ "$NEED_DOWNLOAD" -eq 1 ]]; then
+ [[ -f "$TEMPLATE_PATH" ]] && rm -f "$TEMPLATE_PATH"
+ for attempt in {1..3}; do
+ msg_info "Attempt $attempt: Downloading template $TEMPLATE to $TEMPLATE_STORAGE"
+ if pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null 2>&1; then
+ msg_ok "Template download successful."
+ break
+ fi
+ if [[ $attempt -eq 3 ]]; then
+ msg_error "Failed after 3 attempts. Please check network access, permissions, or manually run:\n pveam download $TEMPLATE_STORAGE $TEMPLATE"
+ exit 222
+ fi
+ sleep $((attempt * 5))
+ done
+ fi
+
+ if ! pveam list "$TEMPLATE_STORAGE" 2>/dev/null | grep -q "$TEMPLATE"; then
+ msg_error "Template $TEMPLATE not available in storage $TEMPLATE_STORAGE after download."
+ exit 223
+ fi
+
+ # ------------------------------------------------------------------------------
+ # Dynamic preflight for Debian 13.x: offer upgrade if available (no hard mins)
+ # ------------------------------------------------------------------------------
+ if [[ "$PCT_OSTYPE" == "debian" ]]; then
+ OSVER="$(parse_template_osver "$TEMPLATE")"
+ if [[ -n "$OSVER" ]]; then
+ # Proactive, aber ohne Abbruch – nur Angebot
+ offer_lxc_stack_upgrade_and_maybe_retry "no" || true
+ fi
+ fi
+
+ # ------------------------------------------------------------------------------
+ # Create LXC Container
+ # ------------------------------------------------------------------------------
+ msg_info "Creating LXC container"
+
+ # Ensure subuid/subgid entries exist
+ grep -q "root:100000:65536" /etc/subuid || echo "root:100000:65536" >>/etc/subuid
+ grep -q "root:100000:65536" /etc/subgid || echo "root:100000:65536" >>/etc/subgid
+
+ # Assemble pct options
+ PCT_OPTIONS=(${PCT_OPTIONS[@]:-${DEFAULT_PCT_OPTIONS[@]}})
+ [[ " ${PCT_OPTIONS[*]} " =~ " -rootfs " ]] || PCT_OPTIONS+=(-rootfs "$CONTAINER_STORAGE:${PCT_DISK_SIZE:-8}")
+
+ # Lock by template file (avoid concurrent downloads/creates)
+ lockfile="/tmp/template.${TEMPLATE}.lock"
+ exec 9>"$lockfile" || {
+ msg_error "Failed to create lock file '$lockfile'."
+ exit 200
+ }
+ flock -w 60 9 || {
+ msg_error "Timeout while waiting for template lock."
+ exit 211
+ }
+
+ LOGFILE="/tmp/pct_create_${CTID}.log"
+ msg_debug "pct create command: pct create $CTID ${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE} ${PCT_OPTIONS[*]}"
+ msg_debug "Logfile: $LOGFILE"
+
+ # First attempt
+ if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >"$LOGFILE" 2>&1; then
+ msg_error "Container creation failed on ${TEMPLATE_STORAGE}. Checking template..."
+
+ # Validate template file
+ if [[ ! -s "$TEMPLATE_PATH" || "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then
+ msg_warn "Template file too small or missing – re-downloading."
+ rm -f "$TEMPLATE_PATH"
+ pveam download "$TEMPLATE_STORAGE" "$TEMPLATE"
+ elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then
+ if [[ -n "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Template appears corrupted – re-downloading."
+ rm -f "$TEMPLATE_PATH"
+ pveam download "$TEMPLATE_STORAGE" "$TEMPLATE"
+ else
+ msg_warn "Template appears corrupted, but no online version exists. Skipping re-download."
+ fi
+ fi
+
+ # Retry after repair
+ if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >>"$LOGFILE" 2>&1; then
+ # Fallback to local storage
+ if [[ "$TEMPLATE_STORAGE" != "local" ]]; then
+ msg_warn "Retrying container creation with fallback to local storage..."
+ LOCAL_TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE"
+ if [[ ! -f "$LOCAL_TEMPLATE_PATH" ]]; then
+ msg_info "Downloading template to local..."
+ pveam download local "$TEMPLATE" >/dev/null 2>&1
+ fi
+ if pct create "$CTID" "local:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >>"$LOGFILE" 2>&1; then
+ msg_ok "Container successfully created using local fallback."
+ else
+ # --- Dynamic stack upgrade + auto-retry on the well-known error pattern ---
+ if grep -qiE 'unsupported .* version' "$LOGFILE"; then
+ echo
+ echo "pct reported 'unsupported ... version' – your LXC stack might be too old for this template."
+ echo "We can try to upgrade 'pve-container' and 'lxc-pve' now and retry automatically."
+ offer_lxc_stack_upgrade_and_maybe_retry "yes"
+ rc=$?
+ case $rc in
+ 0) : ;; # success - container created, continue
+ 2)
+ echo "Upgrade was declined. Please update and re-run:
+ apt update && apt install --only-upgrade pve-container lxc-pve"
+ exit 231
+ ;;
+ 3)
+ echo "Upgrade and/or retry failed. Please inspect: $LOGFILE"
+ exit 231
+ ;;
+ esac
+ else
+ msg_error "Container creation failed even with local fallback. See $LOGFILE"
+ if whiptail --yesno "pct create failed.\nDo you want to enable verbose debug mode and view detailed logs?" 12 70; then
+ set -x
+ bash -x -c "pct create $CTID local:vztmpl/${TEMPLATE} ${PCT_OPTIONS[*]}" 2>&1 | tee -a "$LOGFILE"
+ set +x
+ fi
+ exit 209
+ fi
+ fi
+ else
+ msg_error "Container creation failed on local storage. See $LOGFILE"
+ # --- Dynamic stack upgrade + auto-retry on the well-known error pattern ---
+ if grep -qiE 'unsupported .* version' "$LOGFILE"; then
+ echo
+ echo "pct reported 'unsupported ... version' – your LXC stack might be too old for this template."
+ echo "We can try to upgrade 'pve-container' and 'lxc-pve' now and retry automatically."
+ offer_lxc_stack_upgrade_and_maybe_retry "yes"
+ rc=$?
+ case $rc in
+ 0) : ;; # success - container created, continue
+ 2)
+ echo "Upgrade was declined. Please update and re-run:
+ apt update && apt install --only-upgrade pve-container lxc-pve"
+ exit 231
+ ;;
+ 3)
+ echo "Upgrade and/or retry failed. Please inspect: $LOGFILE"
+ exit 231
+ ;;
+ esac
+ else
+ msg_error "Container creation failed. See $LOGFILE"
+ if whiptail --yesno "pct create failed.\nDo you want to enable verbose debug mode and view detailed logs?" 12 70; then
+ set -x
+ bash -x -c "pct create $CTID local:vztmpl/${TEMPLATE} ${PCT_OPTIONS[*]}" 2>&1 | tee -a "$LOGFILE"
+ set +x
+ fi
+ exit 209
+ fi
+ fi
+ fi
+ fi
+
+ # Verify container exists
+ pct list | awk '{print $1}' | grep -qx "$CTID" || {
+ msg_error "Container ID $CTID not listed in 'pct list'. See $LOGFILE"
+ exit 215
+ }
+
+ # Verify config rootfs
+ grep -q '^rootfs:' "/etc/pve/lxc/$CTID.conf" || {
+ msg_error "RootFS entry missing in container config. See $LOGFILE"
+ exit 216
+ }
+
+ msg_ok "LXC Container ${BL}$CTID${CL} ${GN}was successfully created."
+}
+
+# ------------------------------------------------------------------------------
+# description()
+#
+# - Sets container description with HTML content (logo, links, badges)
+# - Restarts ping-instances.service if present
+# - Posts status "done" to API
+# ------------------------------------------------------------------------------
+description() {
+ IP=$(pct exec "$CTID" ip a s dev eth0 | awk '/inet / {print $2}' | cut -d/ -f1)
+
+ # Generate LXC Description
+ DESCRIPTION=$(
+ cat <
+
+
+
+
+ ${APP} LXC
+
+
+
+
+
+
+
+
+
+ GitHub
+
+
+
+ Discussions
+
+
+
+ Issues
+
+
+EOF
+ )
+ pct set "$CTID" -description "$DESCRIPTION"
+
+ if [[ -f /etc/systemd/system/ping-instances.service ]]; then
+ systemctl start ping-instances.service
+ fi
+
+ post_update_to_api "done" "none"
+}
+
+# ------------------------------------------------------------------------------
+# api_exit_script()
+#
+# - Exit trap handler
+# - Reports exit codes to API with detailed reason
+# - Handles known codes (100–209) and maps them to errors
+# ------------------------------------------------------------------------------
+api_exit_script() {
+ exit_code=$?
+ if [ $exit_code -ne 0 ]; then
+ case $exit_code in
+ 100) post_update_to_api "failed" "100: Unexpected error in create_lxc.sh" ;;
+ 101) post_update_to_api "failed" "101: No network connection detected in create_lxc.sh" ;;
+ 200) post_update_to_api "failed" "200: LXC creation failed in create_lxc.sh" ;;
+ 201) post_update_to_api "failed" "201: Invalid Storage class in create_lxc.sh" ;;
+ 202) post_update_to_api "failed" "202: User aborted menu in create_lxc.sh" ;;
+ 203) post_update_to_api "failed" "203: CTID not set in create_lxc.sh" ;;
+ 204) post_update_to_api "failed" "204: PCT_OSTYPE not set in create_lxc.sh" ;;
+ 205) post_update_to_api "failed" "205: CTID cannot be less than 100 in create_lxc.sh" ;;
+ 206) post_update_to_api "failed" "206: CTID already in use in create_lxc.sh" ;;
+ 207) post_update_to_api "failed" "207: Template not found in create_lxc.sh" ;;
+ 208) post_update_to_api "failed" "208: Error downloading template in create_lxc.sh" ;;
+ 209) post_update_to_api "failed" "209: Container creation failed, but template is intact in create_lxc.sh" ;;
+ *) post_update_to_api "failed" "Unknown error, exit code: $exit_code in create_lxc.sh" ;;
+ esac
+ fi
+}
+
+if command -v pveversion >/dev/null 2>&1; then
+ trap 'api_exit_script' EXIT
+fi
+trap 'post_update_to_api "failed" "$BASH_COMMAND"' ERR
+trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
+trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
diff --git a/misc/build.func.backup-refactoring-20251029-125644 b/misc/build.func.backup-refactoring-20251029-125644
new file mode 100644
index 000000000..d452f4637
--- /dev/null
+++ b/misc/build.func.backup-refactoring-20251029-125644
@@ -0,0 +1,3517 @@
+#!/usr/bin/env bash
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: tteck (tteckster) | MickLesk | michelroegl-brunner
+# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
+# Revision: 1
+
+# ==============================================================================
+# SECTION 1: CORE INITIALIZATION & VARIABLES
+# ==============================================================================
+
+# ------------------------------------------------------------------------------
+# variables()
+#
+# - Normalize application name (NSAPP = lowercase, no spaces)
+# - Build installer filename (var_install)
+# - Define regex for integer validation
+# - Fetch hostname of Proxmox node
+# - Set default values for diagnostics/method
+# - Generate random UUID for tracking
+# - Get Proxmox VE version and kernel version
+# ------------------------------------------------------------------------------
+variables() {
+ NSAPP=$(echo "${APP,,}" | tr -d ' ') # This function sets the NSAPP variable by converting the value of the APP variable to lowercase and removing any spaces.
+ var_install="${NSAPP}-install" # sets the var_install variable by appending "-install" to the value of NSAPP.
+ INTEGER='^[0-9]+([.][0-9]+)?$' # it defines the INTEGER regular expression pattern.
+ PVEHOST_NAME=$(hostname) # gets the Proxmox Hostname and sets it to Uppercase
+ DIAGNOSTICS="yes" # sets the DIAGNOSTICS variable to "yes", used for the API call.
+ METHOD="default" # sets the METHOD variable to "default", used for the API call.
+ RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" # generates a random UUID and sets it to the RANDOM_UUID variable.
+ CTTYPE="${CTTYPE:-${CT_TYPE:-1}}"
+ #CT_TYPE=${var_unprivileged:-$CT_TYPE}
+
+ # Get Proxmox VE version and kernel version
+ if command -v pveversion >/dev/null 2>&1; then
+ PVEVERSION=$(pveversion | grep "pve-manager" | awk '{print $2}' | cut -d'/' -f1)
+ else
+ PVEVERSION="N/A"
+ fi
+ KERNEL_VERSION=$(uname -r)
+}
+
+# -----------------------------------------------------------------------------
+# Community-Scripts bootstrap loader
+# - Always sources build.func from remote
+# - Updates local core files only if build.func changed
+# - Local cache: /usr/local/community-scripts/core
+# -----------------------------------------------------------------------------
+
+# FUNC_DIR="/usr/local/community-scripts/core"
+# mkdir -p "$FUNC_DIR"
+
+# BUILD_URL="https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func"
+# BUILD_REV="$FUNC_DIR/build.rev"
+# DEVMODE="${DEVMODE:-no}"
+
+# # --- Step 1: fetch build.func content once, compute hash ---
+# build_content="$(curl -fsSL "$BUILD_URL")" || {
+# echo "❌ Failed to fetch build.func"
+# exit 1
+# }
+
+# newhash=$(printf "%s" "$build_content" | sha256sum | awk '{print $1}')
+# oldhash=$(cat "$BUILD_REV" 2>/dev/null || echo "")
+
+# # --- Step 2: if build.func changed, offer update for core files ---
+# if [ "$newhash" != "$oldhash" ]; then
+# echo "⚠️ build.func changed!"
+
+# while true; do
+# read -rp "Refresh local core files? [y/N/diff]: " ans
+# case "$ans" in
+# [Yy]*)
+# echo "$newhash" >"$BUILD_REV"
+
+# update_func_file() {
+# local file="$1"
+# local url="https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/$file"
+# local local_path="$FUNC_DIR/$file"
+
+# echo "⬇️ Downloading $file ..."
+# curl -fsSL "$url" -o "$local_path" || {
+# echo "❌ Failed to fetch $file"
+# exit 1
+# }
+# echo "✔️ Updated $file"
+# }
+
+# update_func_file core.func
+# update_func_file error_handler.func
+# update_func_file tools.func
+# break
+# ;;
+# [Dd]*)
+# for file in core.func error_handler.func tools.func; do
+# local_path="$FUNC_DIR/$file"
+# url="https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/$file"
+# remote_tmp="$(mktemp)"
+
+# curl -fsSL "$url" -o "$remote_tmp" || continue
+
+# if [ -f "$local_path" ]; then
+# echo "🔍 Diff for $file:"
+# diff -u "$local_path" "$remote_tmp" || echo "(no differences)"
+# else
+# echo "📦 New file $file will be installed"
+# fi
+
+# rm -f "$remote_tmp"
+# done
+# ;;
+# *)
+# echo "❌ Skipped updating local core files"
+# break
+# ;;
+# esac
+# done
+# else
+# if [ "$DEVMODE" != "yes" ]; then
+# echo "✔️ build.func unchanged → using existing local core files"
+# fi
+# fi
+
+# if [ -n "${_COMMUNITY_SCRIPTS_LOADER:-}" ]; then
+# return 0 2>/dev/null || exit 0
+# fi
+# _COMMUNITY_SCRIPTS_LOADER=1
+
+# # --- Step 3: always source local versions of the core files ---
+# source "$FUNC_DIR/core.func"
+# source "$FUNC_DIR/error_handler.func"
+# source "$FUNC_DIR/tools.func"
+
+# # --- Step 4: finally, source build.func directly from memory ---
+# # (no tmp file needed)
+# source <(printf "%s" "$build_content")
+
+# ------------------------------------------------------------------------------
+# Load core + error handler functions from community-scripts repo
+#
+# - Prefer curl if available, fallback to wget
+# - Load: core.func, error_handler.func, api.func
+# - Initialize error traps after loading
+# ------------------------------------------------------------------------------
+
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/api.func)
+
+if command -v curl >/dev/null 2>&1; then
+ source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func)
+ source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func)
+ load_functions
+ catch_errors
+ #echo "(build.func) Loaded core.func via curl"
+elif command -v wget >/dev/null 2>&1; then
+ source <(wget -qO- https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func)
+ source <(wget -qO- https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func)
+ load_functions
+ catch_errors
+ #echo "(build.func) Loaded core.func via wget"
+fi
+
+# ------------------------------------------------------------------------------
+# maxkeys_check()
+#
+# - Reads kernel keyring limits (maxkeys, maxbytes)
+# - Checks current usage for LXC user (UID 100000)
+# - Warns if usage is close to limits and suggests sysctl tuning
+# - Exits if thresholds are exceeded
+# - https://cleveruptime.com/docs/files/proc-key-users | https://docs.kernel.org/security/keys/core.html
+# ------------------------------------------------------------------------------
+
+maxkeys_check() {
+ # Read kernel parameters
+ per_user_maxkeys=$(cat /proc/sys/kernel/keys/maxkeys 2>/dev/null || echo 0)
+ per_user_maxbytes=$(cat /proc/sys/kernel/keys/maxbytes 2>/dev/null || echo 0)
+
+ # Exit if kernel parameters are unavailable
+ if [[ "$per_user_maxkeys" -eq 0 || "$per_user_maxbytes" -eq 0 ]]; then
+ echo -e "${CROSS}${RD} Error: Unable to read kernel parameters. Ensure proper permissions.${CL}"
+ exit 1
+ fi
+
+ # Fetch key usage for user ID 100000 (typical for containers)
+ used_lxc_keys=$(awk '/100000:/ {print $2}' /proc/key-users 2>/dev/null || echo 0)
+ used_lxc_bytes=$(awk '/100000:/ {split($5, a, "/"); print a[1]}' /proc/key-users 2>/dev/null || echo 0)
+
+ # Calculate thresholds and suggested new limits
+ threshold_keys=$((per_user_maxkeys - 100))
+ threshold_bytes=$((per_user_maxbytes - 1000))
+ new_limit_keys=$((per_user_maxkeys * 2))
+ new_limit_bytes=$((per_user_maxbytes * 2))
+
+ # Check if key or byte usage is near limits
+ failure=0
+ if [[ "$used_lxc_keys" -gt "$threshold_keys" ]]; then
+ echo -e "${CROSS}${RD} Warning: Key usage is near the limit (${used_lxc_keys}/${per_user_maxkeys}).${CL}"
+ echo -e "${INFO} Suggested action: Set ${GN}kernel.keys.maxkeys=${new_limit_keys}${CL} in ${BOLD}/etc/sysctl.d/98-community-scripts.conf${CL}."
+ failure=1
+ fi
+ if [[ "$used_lxc_bytes" -gt "$threshold_bytes" ]]; then
+ echo -e "${CROSS}${RD} Warning: Key byte usage is near the limit (${used_lxc_bytes}/${per_user_maxbytes}).${CL}"
+ echo -e "${INFO} Suggested action: Set ${GN}kernel.keys.maxbytes=${new_limit_bytes}${CL} in ${BOLD}/etc/sysctl.d/98-community-scripts.conf${CL}."
+ failure=1
+ fi
+
+ # Provide next steps if issues are detected
+ if [[ "$failure" -eq 1 ]]; then
+ echo -e "${INFO} To apply changes, run: ${BOLD}service procps force-reload${CL}"
+ exit 1
+ fi
+
+ echo -e "${CM}${GN} All kernel key limits are within safe thresholds.${CL}"
+}
+
+# ------------------------------------------------------------------------------
+# get_current_ip()
+#
+# - Returns current container IP depending on OS type
+# - Debian/Ubuntu: uses `hostname -I`
+# - Alpine: parses eth0 via `ip -4 addr`
+# ------------------------------------------------------------------------------
+get_current_ip() {
+ if [ -f /etc/os-release ]; then
+ # Check for Debian/Ubuntu (uses hostname -I)
+ if grep -qE 'ID=debian|ID=ubuntu' /etc/os-release; then
+ CURRENT_IP=$(hostname -I | awk '{print $1}')
+ # Check for Alpine (uses ip command)
+ elif grep -q 'ID=alpine' /etc/os-release; then
+ CURRENT_IP=$(ip -4 addr show eth0 | awk '/inet / {print $2}' | cut -d/ -f1 | head -n 1)
+ else
+ CURRENT_IP="Unknown"
+ fi
+ fi
+ echo "$CURRENT_IP"
+}
+
+# ------------------------------------------------------------------------------
+# update_motd_ip()
+#
+# - Updates /etc/motd with current container IP
+# - Removes old IP entries to avoid duplicates
+# ------------------------------------------------------------------------------
+update_motd_ip() {
+ MOTD_FILE="/etc/motd"
+
+ if [ -f "$MOTD_FILE" ]; then
+ # Remove existing IP Address lines to prevent duplication
+ sed -i '/IP Address:/d' "$MOTD_FILE"
+
+ IP=$(get_current_ip)
+ # Add the new IP address
+ echo -e "${TAB}${NETWORK}${YW} IP Address: ${GN}${IP}${CL}" >>"$MOTD_FILE"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# install_ssh_keys_into_ct()
+#
+# - Installs SSH keys into container root account if SSH is enabled
+# - Uses pct push or direct input to authorized_keys
+# - Falls back to warning if no keys provided
+# ------------------------------------------------------------------------------
+install_ssh_keys_into_ct() {
+ [[ "$SSH" != "yes" ]] && return 0
+
+ if [[ -n "$SSH_KEYS_FILE" && -s "$SSH_KEYS_FILE" ]]; then
+ msg_info "Installing selected SSH keys into CT ${CTID}"
+ pct exec "$CTID" -- sh -c 'mkdir -p /root/.ssh && chmod 700 /root/.ssh' || {
+ msg_error "prepare /root/.ssh failed"
+ return 1
+ }
+ pct push "$CTID" "$SSH_KEYS_FILE" /root/.ssh/authorized_keys >/dev/null 2>&1 ||
+ pct exec "$CTID" -- sh -c "cat > /root/.ssh/authorized_keys" <"$SSH_KEYS_FILE" || {
+ msg_error "write authorized_keys failed"
+ return 1
+ }
+ pct exec "$CTID" -- sh -c 'chmod 600 /root/.ssh/authorized_keys' || true
+ msg_ok "Installed SSH keys into CT ${CTID}"
+ return 0
+ fi
+
+ # Fallback: nichts ausgewählt
+ msg_warn "No SSH keys to install (skipping)."
+ return 0
+}
+
+# ------------------------------------------------------------------------------
+# base_settings()
+#
+# - Defines all base/default variables for container creation
+# - Reads from environment variables (var_*)
+# - Provides fallback defaults for OS type/version
+# ------------------------------------------------------------------------------
+base_settings() {
+ # Default Settings
+ CT_TYPE=${var_unprivileged:-"1"}
+ DISK_SIZE=${var_disk:-"4"}
+ CORE_COUNT=${var_cpu:-"1"}
+ RAM_SIZE=${var_ram:-"1024"}
+ VERBOSE=${var_verbose:-"${1:-no}"}
+ PW=${var_pw:-""}
+ CT_ID=${var_ctid:-$NEXTID}
+ HN=${var_hostname:-$NSAPP}
+ BRG=${var_brg:-"vmbr0"}
+ NET=${var_net:-"dhcp"}
+ IPV6_METHOD=${var_ipv6_method:-"none"}
+ IPV6_STATIC=${var_ipv6_static:-""}
+ GATE=${var_gateway:-""}
+ APT_CACHER=${var_apt_cacher:-""}
+ APT_CACHER_IP=${var_apt_cacher_ip:-""}
+ MTU=${var_mtu:-""}
+ SD=${var_storage:-""}
+ NS=${var_ns:-""}
+ MAC=${var_mac:-""}
+ VLAN=${var_vlan:-""}
+ SSH=${var_ssh:-"no"}
+ SSH_AUTHORIZED_KEY=${var_ssh_authorized_key:-""}
+ UDHCPC_FIX=${var_udhcpc_fix:-""}
+ TAGS="community-script,${var_tags:-}"
+ ENABLE_FUSE=${var_fuse:-"${1:-no}"}
+ ENABLE_TUN=${var_tun:-"${1:-no}"}
+
+ # Since these 2 are only defined outside of default_settings function, we add a temporary fallback. TODO: To align everything, we should add these as constant variables (e.g. OSTYPE and OSVERSION), but that would currently require updating the default_settings function for all existing scripts
+ if [ -z "$var_os" ]; then
+ var_os="debian"
+ fi
+ if [ -z "$var_version" ]; then
+ var_version="12"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# echo_default()
+#
+# - Prints summary of default values (ID, OS, type, disk, RAM, CPU, etc.)
+# - Uses icons and formatting for readability
+# - Convert CT_TYPE to description
+# ------------------------------------------------------------------------------
+echo_default() {
+ CT_TYPE_DESC="Unprivileged"
+ if [ "$CT_TYPE" -eq 0 ]; then
+ CT_TYPE_DESC="Privileged"
+ fi
+ echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}"
+ echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}${CT_ID}${CL}"
+ echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os ($var_version)${CL}"
+ echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
+ echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}"
+ echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}${CORE_COUNT}${CL}"
+ echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
+ if [ "$VERBOSE" == "yes" ]; then
+ echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}Enabled${CL}"
+ fi
+ echo -e "${CREATING}${BOLD}${BL}Creating a ${APP} LXC using the above default settings${CL}"
+ echo -e " "
+}
+
+# ------------------------------------------------------------------------------
+# exit_script()
+#
+# - Called when user cancels an action
+# - Clears screen and exits gracefully
+# ------------------------------------------------------------------------------
+exit_script() {
+ clear
+ echo -e "\n${CROSS}${RD}User exited script${CL}\n"
+ exit
+}
+
+# ------------------------------------------------------------------------------
+# find_host_ssh_keys()
+#
+# - Scans system for available SSH keys
+# - Supports defaults (~/.ssh, /etc/ssh/authorized_keys)
+# - Returns list of files containing valid SSH public keys
+# - Sets FOUND_HOST_KEY_COUNT to number of keys found
+# ------------------------------------------------------------------------------
+find_host_ssh_keys() {
+ local re='(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))'
+ local -a files=() cand=()
+ local g="${var_ssh_import_glob:-}"
+ local total=0 f base c
+
+ shopt -s nullglob
+ if [[ -n "$g" ]]; then
+ for pat in $g; do cand+=($pat); done
+ else
+ cand+=(/root/.ssh/authorized_keys /root/.ssh/authorized_keys2)
+ cand+=(/root/.ssh/*.pub)
+ cand+=(/etc/ssh/authorized_keys /etc/ssh/authorized_keys.d/*)
+ fi
+ shopt -u nullglob
+
+ for f in "${cand[@]}"; do
+ [[ -f "$f" && -r "$f" ]] || continue
+ base="$(basename -- "$f")"
+ case "$base" in
+ known_hosts | known_hosts.* | config) continue ;;
+ id_*) [[ "$f" != *.pub ]] && continue ;;
+ esac
+
+ # CRLF safe check for host keys
+ c=$(tr -d '\r' <"$f" | awk '
+ /^[[:space:]]*#/ {next}
+ /^[[:space:]]*$/ {next}
+ {print}
+ ' | grep -E -c '"$re"' || true)
+
+ if ((c > 0)); then
+ files+=("$f")
+ total=$((total + c))
+ fi
+ done
+
+ # Fallback to /root/.ssh/authorized_keys
+ if ((${#files[@]} == 0)) && [[ -r /root/.ssh/authorized_keys ]]; then
+ if grep -E -q "$re" /root/.ssh/authorized_keys; then
+ files+=(/root/.ssh/authorized_keys)
+ total=$((total + $(grep -E -c "$re" /root/.ssh/authorized_keys || echo 0)))
+ fi
+ fi
+
+ FOUND_HOST_KEY_COUNT="$total"
+ (
+ IFS=:
+ echo "${files[*]}"
+ )
+}
+
+# ------------------------------------------------------------------------------
+# advanced_settings()
+#
+# - Interactive whiptail menu for advanced configuration
+# - Lets user set container type, password, CT ID, hostname, disk, CPU, RAM
+# - Supports IPv4/IPv6, DNS, MAC, VLAN, tags, SSH keys, FUSE, verbose mode
+# - Ends with confirmation or re-entry if cancelled
+# ------------------------------------------------------------------------------
+advanced_settings() {
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox --title "Here is an instructional tip:" "To make a selection, use the Spacebar." 8 58
+ # Setting Default Tag for Advanced Settings
+ TAGS="community-script;${var_tags:-}"
+ CT_DEFAULT_TYPE="${CT_TYPE}"
+ CT_TYPE=""
+ while [ -z "$CT_TYPE" ]; do
+ if [ "$CT_DEFAULT_TYPE" == "1" ]; then
+ if CT_TYPE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "CONTAINER TYPE" --radiolist "Choose Type" 10 58 2 \
+ "1" "Unprivileged" ON \
+ "0" "Privileged" OFF \
+ 3>&1 1>&2 2>&3); then
+ if [ -n "$CT_TYPE" ]; then
+ CT_TYPE_DESC="Unprivileged"
+ if [ "$CT_TYPE" -eq 0 ]; then
+ CT_TYPE_DESC="Privileged"
+ fi
+ echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}"
+ echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os |${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}"
+ echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
+ fi
+ else
+ exit_script
+ fi
+ fi
+ if [ "$CT_DEFAULT_TYPE" == "0" ]; then
+ if CT_TYPE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "CONTAINER TYPE" --radiolist "Choose Type" 10 58 2 \
+ "1" "Unprivileged" OFF \
+ "0" "Privileged" ON \
+ 3>&1 1>&2 2>&3); then
+ if [ -n "$CT_TYPE" ]; then
+ CT_TYPE_DESC="Unprivileged"
+ if [ "$CT_TYPE" -eq 0 ]; then
+ CT_TYPE_DESC="Privileged"
+ fi
+ echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}"
+ echo -e "${OS}${BOLD}${DGN}Operating System: ${BGN}$var_os${CL}"
+ echo -e "${OSVERSION}${BOLD}${DGN}Version: ${BGN}$var_version${CL}"
+ echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
+ fi
+ else
+ exit_script
+ fi
+ fi
+ done
+
+ while true; do
+ if PW1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --passwordbox "\nSet Root Password (needed for root ssh access)" 9 58 --title "PASSWORD (leave blank for automatic login)" 3>&1 1>&2 2>&3); then
+ # Empty = Autologin
+ if [[ -z "$PW1" ]]; then
+ PW=""
+ PW1="Automatic Login"
+ echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}$PW1${CL}"
+ break
+ fi
+
+ # Invalid: contains spaces
+ if [[ "$PW1" == *" "* ]]; then
+ whiptail --msgbox "Password cannot contain spaces." 8 58
+ continue
+ fi
+
+ # Invalid: too short
+ if ((${#PW1} < 5)); then
+ whiptail --msgbox "Password must be at least 5 characters." 8 58
+ continue
+ fi
+
+ # Confirm password
+ if PW2=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --passwordbox "\nVerify Root Password" 9 58 --title "PASSWORD VERIFICATION" 3>&1 1>&2 2>&3); then
+ if [[ "$PW1" == "$PW2" ]]; then
+ PW="-password $PW1"
+ echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}********${CL}"
+ break
+ else
+ whiptail --msgbox "Passwords do not match. Please try again." 8 58
+ fi
+ else
+ exit_script
+ fi
+ else
+ exit_script
+ fi
+ done
+
+ if CT_ID=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Container ID" 8 58 "$NEXTID" --title "CONTAINER ID" 3>&1 1>&2 2>&3); then
+ if [ -z "$CT_ID" ]; then
+ CT_ID="$NEXTID"
+ fi
+ else
+ exit_script
+ fi
+ echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
+
+ while true; do
+ if CT_NAME=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set Hostname" 8 58 "$NSAPP" --title "HOSTNAME" 3>&1 1>&2 2>&3); then
+ if [ -z "$CT_NAME" ]; then
+ HN="$NSAPP"
+ else
+ HN=$(echo "${CT_NAME,,}" | tr -d ' ')
+ fi
+ # Hostname validate (RFC 1123)
+ if [[ "$HN" =~ ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ ]]; then
+ echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}"
+ break
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --msgbox "❌ Invalid hostname: '$HN'\n\nOnly lowercase letters, digits and hyphens (-) are allowed.\nUnderscores (_) or other characters are not permitted!" 10 70
+ fi
+ else
+ exit_script
+ fi
+ done
+
+ while true; do
+ DISK_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Disk Size in GB" 8 58 "$var_disk" --title "DISK SIZE" 3>&1 1>&2 2>&3) || exit_script
+
+ if [ -z "$DISK_SIZE" ]; then
+ DISK_SIZE="$var_disk"
+ fi
+
+ if [[ "$DISK_SIZE" =~ ^[1-9][0-9]*$ ]]; then
+ echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}"
+ break
+ else
+ whiptail --msgbox "Disk size must be a positive integer!" 8 58
+ fi
+ done
+
+ while true; do
+ CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
+ --inputbox "Allocate CPU Cores" 8 58 "$var_cpu" --title "CORE COUNT" 3>&1 1>&2 2>&3) || exit_script
+
+ if [ -z "$CORE_COUNT" ]; then
+ CORE_COUNT="$var_cpu"
+ fi
+
+ if [[ "$CORE_COUNT" =~ ^[1-9][0-9]*$ ]]; then
+ echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}"
+ break
+ else
+ whiptail --msgbox "CPU core count must be a positive integer!" 8 58
+ fi
+ done
+
+ while true; do
+ RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
+ --inputbox "Allocate RAM in MiB" 8 58 "$var_ram" --title "RAM" 3>&1 1>&2 2>&3) || exit_script
+
+ if [ -z "$RAM_SIZE" ]; then
+ RAM_SIZE="$var_ram"
+ fi
+
+ if [[ "$RAM_SIZE" =~ ^[1-9][0-9]*$ ]]; then
+ echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
+ break
+ else
+ whiptail --msgbox "RAM size must be a positive integer!" 8 58
+ fi
+ done
+
+ IFACE_FILEPATH_LIST="/etc/network/interfaces"$'\n'$(find "/etc/network/interfaces.d/" -type f)
+ BRIDGES=""
+ OLD_IFS=$IFS
+ IFS=$'\n'
+ for iface_filepath in ${IFACE_FILEPATH_LIST}; do
+
+ iface_indexes_tmpfile=$(mktemp -q -u '.iface-XXXX')
+ (grep -Pn '^\s*iface' "${iface_filepath}" | cut -d':' -f1 && wc -l "${iface_filepath}" | cut -d' ' -f1) | awk 'FNR==1 {line=$0; next} {print line":"$0-1; line=$0}' >"${iface_indexes_tmpfile}" || true
+
+ if [ -f "${iface_indexes_tmpfile}" ]; then
+
+ while read -r pair; do
+ start=$(echo "${pair}" | cut -d':' -f1)
+ end=$(echo "${pair}" | cut -d':' -f2)
+
+ if awk "NR >= ${start} && NR <= ${end}" "${iface_filepath}" | grep -qP '^\s*(bridge[-_](ports|stp|fd|vlan-aware|vids)|ovs_type\s+OVSBridge)\b'; then
+ iface_name=$(sed "${start}q;d" "${iface_filepath}" | awk '{print $2}')
+ BRIDGES="${iface_name}"$'\n'"${BRIDGES}"
+ fi
+
+ done <"${iface_indexes_tmpfile}"
+ rm -f "${iface_indexes_tmpfile}"
+ fi
+
+ done
+ IFS=$OLD_IFS
+ BRIDGES=$(echo "$BRIDGES" | grep -v '^\s*$' | sort | uniq)
+ if [[ -z "$BRIDGES" ]]; then
+ BRG="vmbr0"
+ echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}"
+ else
+ # Build bridge menu with descriptions
+ BRIDGE_MENU_OPTIONS=()
+ while IFS= read -r bridge; do
+ if [[ -n "$bridge" ]]; then
+ # Get description from Proxmox built-in method - find comment for this specific bridge
+ description=$(grep -A 10 "iface $bridge" /etc/network/interfaces | grep '^#' | head -n1 | sed 's/^#\s*//')
+ if [[ -n "$description" ]]; then
+ BRIDGE_MENU_OPTIONS+=("$bridge" "${description}")
+ else
+ BRIDGE_MENU_OPTIONS+=("$bridge" " ")
+ fi
+ fi
+ done <<<"$BRIDGES"
+
+ BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --menu "Select network bridge: " 18 55 6 "${BRIDGE_MENU_OPTIONS[@]}" 3>&1 1>&2 2>&3)
+ if [[ -z "$BRG" ]]; then
+ exit_script
+ else
+ echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}"
+ fi
+ fi
+
+ # IPv4 methods: dhcp, static, none
+ while true; do
+ IPV4_METHOD=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "IPv4 Address Management" \
+ --menu "Select IPv4 Address Assignment Method:" 12 60 2 \
+ "dhcp" "Automatic (DHCP, recommended)" \
+ "static" "Static (manual entry)" \
+ 3>&1 1>&2 2>&3)
+
+ exit_status=$?
+ if [ $exit_status -ne 0 ]; then
+ exit_script
+ fi
+
+ case "$IPV4_METHOD" in
+ dhcp)
+ NET="dhcp"
+ GATE=""
+ echo -e "${NETWORK}${BOLD}${DGN}IPv4: DHCP${CL}"
+ break
+ ;;
+ static)
+ # Static: call and validate CIDR address
+ while true; do
+ NET=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --inputbox "Enter Static IPv4 CIDR Address (e.g. 192.168.100.50/24)" 8 58 "" \
+ --title "IPv4 ADDRESS" 3>&1 1>&2 2>&3)
+ if [ -z "$NET" ]; then
+ whiptail --msgbox "IPv4 address must not be empty." 8 58
+ continue
+ elif [[ "$NET" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$ ]]; then
+ echo -e "${NETWORK}${BOLD}${DGN}IPv4 Address: ${BGN}$NET${CL}"
+ break
+ else
+ whiptail --msgbox "$NET is not a valid IPv4 CIDR address. Please enter a correct value!" 8 58
+ fi
+ done
+
+ # call and validate Gateway
+ while true; do
+ GATE1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --inputbox "Enter Gateway IP address for static IPv4" 8 58 "" \
+ --title "Gateway IP" 3>&1 1>&2 2>&3)
+ if [ -z "$GATE1" ]; then
+ whiptail --msgbox "Gateway IP address cannot be empty." 8 58
+ elif [[ ! "$GATE1" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
+ whiptail --msgbox "Invalid Gateway IP address format." 8 58
+ else
+ GATE=",gw=$GATE1"
+ echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE1${CL}"
+ break
+ fi
+ done
+ break
+ ;;
+ esac
+ done
+
+ # IPv6 Address Management selection
+ while true; do
+ IPV6_METHOD=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --menu \
+ "Select IPv6 Address Management Type:" 15 58 4 \
+ "auto" "SLAAC/AUTO (recommended, default)" \
+ "dhcp" "DHCPv6" \
+ "static" "Static (manual entry)" \
+ "none" "Disabled" \
+ --default-item "auto" 3>&1 1>&2 2>&3)
+ [ $? -ne 0 ] && exit_script
+
+ case "$IPV6_METHOD" in
+ auto)
+ echo -e "${NETWORK}${BOLD}${DGN}IPv6: ${BGN}SLAAC/AUTO${CL}"
+ IPV6_ADDR=""
+ IPV6_GATE=""
+ break
+ ;;
+ dhcp)
+ echo -e "${NETWORK}${BOLD}${DGN}IPv6: ${BGN}DHCPv6${CL}"
+ IPV6_ADDR="dhcp"
+ IPV6_GATE=""
+ break
+ ;;
+ static)
+ # Ask for static IPv6 address (CIDR notation, e.g., 2001:db8::1234/64)
+ while true; do
+ IPV6_ADDR=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox \
+ "Set a static IPv6 CIDR address (e.g., 2001:db8::1234/64)" 8 58 "" \
+ --title "IPv6 STATIC ADDRESS" 3>&1 1>&2 2>&3) || exit_script
+ if [[ "$IPV6_ADDR" =~ ^([0-9a-fA-F:]+:+)+[0-9a-fA-F]+(/[0-9]{1,3})$ ]]; then
+ echo -e "${NETWORK}${BOLD}${DGN}IPv6 Address: ${BGN}$IPV6_ADDR${CL}"
+ break
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox \
+ "$IPV6_ADDR is an invalid IPv6 CIDR address. Please enter a valid IPv6 CIDR address (e.g., 2001:db8::1234/64)" 8 58
+ fi
+ done
+ # Optional: ask for IPv6 gateway for static config
+ while true; do
+ IPV6_GATE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox \
+ "Enter IPv6 gateway address (optional, leave blank for none)" 8 58 "" --title "IPv6 GATEWAY" 3>&1 1>&2 2>&3)
+ if [ -z "$IPV6_GATE" ]; then
+ IPV6_GATE=""
+ break
+ elif [[ "$IPV6_GATE" =~ ^([0-9a-fA-F:]+:+)+[0-9a-fA-F]+$ ]]; then
+ break
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox \
+ "Invalid IPv6 gateway format." 8 58
+ fi
+ done
+ break
+ ;;
+ none)
+ echo -e "${NETWORK}${BOLD}${DGN}IPv6: ${BGN}Disabled${CL}"
+ IPV6_ADDR="none"
+ IPV6_GATE=""
+ break
+ ;;
+ *)
+ exit_script
+ ;;
+ esac
+ done
+
+ if [ "$var_os" == "alpine" ]; then
+ APT_CACHER=""
+ APT_CACHER_IP=""
+ else
+ if APT_CACHER_IP=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set APT-Cacher IP (leave blank for none)" 8 58 --title "APT-Cacher IP" 3>&1 1>&2 2>&3); then
+ APT_CACHER="${APT_CACHER_IP:+yes}"
+ echo -e "${NETWORK}${BOLD}${DGN}APT-Cacher IP Address: ${BGN}${APT_CACHER_IP:-Default}${CL}"
+ else
+ exit_script
+ fi
+ fi
+
+ # if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "IPv6" --yesno "Disable IPv6?" 10 58); then
+ # DISABLEIP6="yes"
+ # else
+ # DISABLEIP6="no"
+ # fi
+ # echo -e "${DISABLEIPV6}${BOLD}${DGN}Disable IPv6: ${BGN}$DISABLEIP6${CL}"
+
+ if MTU1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default [The MTU of your selected vmbr, default is 1500])" 8 58 --title "MTU SIZE" 3>&1 1>&2 2>&3); then
+ if [ -z "$MTU1" ]; then
+ MTU1="Default"
+ MTU=""
+ else
+ MTU=",mtu=$MTU1"
+ fi
+ echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}"
+ else
+ exit_script
+ fi
+
+ if SD=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a DNS Search Domain (leave blank for HOST)" 8 58 --title "DNS Search Domain" 3>&1 1>&2 2>&3); then
+ if [ -z "$SD" ]; then
+ SX=Host
+ SD=""
+ else
+ SX=$SD
+ SD="-searchdomain=$SD"
+ fi
+ echo -e "${SEARCH}${BOLD}${DGN}DNS Search Domain: ${BGN}$SX${CL}"
+ else
+ exit_script
+ fi
+
+ if NX=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a DNS Server IP (leave blank for HOST)" 8 58 --title "DNS SERVER IP" 3>&1 1>&2 2>&3); then
+ if [ -z "$NX" ]; then
+ NX=Host
+ NS=""
+ else
+ NS="-nameserver=$NX"
+ fi
+ echo -e "${NETWORK}${BOLD}${DGN}DNS Server IP Address: ${BGN}$NX${CL}"
+ else
+ exit_script
+ fi
+
+ if [ "$var_os" == "alpine" ] && [ "$NET" == "dhcp" ] && [ "$NX" != "Host" ]; then
+ UDHCPC_FIX="yes"
+ else
+ UDHCPC_FIX="no"
+ fi
+ export UDHCPC_FIX
+
+ if MAC1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a MAC Address(leave blank for generated MAC)" 8 58 --title "MAC ADDRESS" 3>&1 1>&2 2>&3); then
+ if [ -z "$MAC1" ]; then
+ MAC1="Default"
+ MAC=""
+ else
+ MAC=",hwaddr=$MAC1"
+ echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}"
+ fi
+ else
+ exit_script
+ fi
+
+ if VLAN1=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for no VLAN)" 8 58 --title "VLAN" 3>&1 1>&2 2>&3); then
+ if [ -z "$VLAN1" ]; then
+ VLAN1="Default"
+ VLAN=""
+ else
+ VLAN=",tag=$VLAN1"
+ fi
+ echo -e "${VLANTAG}${BOLD}${DGN}Vlan: ${BGN}$VLAN1${CL}"
+ else
+ exit_script
+ fi
+
+ if ADV_TAGS=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "Set Custom Tags?[If you remove all, there will be no tags!]" 8 58 "${TAGS}" --title "Advanced Tags" 3>&1 1>&2 2>&3); then
+ if [ -n "${ADV_TAGS}" ]; then
+ ADV_TAGS=$(echo "$ADV_TAGS" | tr -d '[:space:]')
+ TAGS="${ADV_TAGS}"
+ else
+ TAGS=";"
+ fi
+ echo -e "${NETWORK}${BOLD}${DGN}Tags: ${BGN}$TAGS${CL}"
+ else
+ exit_script
+ fi
+
+ configure_ssh_settings
+ export SSH_KEYS_FILE
+ echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
+ if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "FUSE Support" --yesno "Enable FUSE support?\nRequired for tools like rclone, mergerfs, AppImage, etc." 10 58); then
+ ENABLE_FUSE="yes"
+ else
+ ENABLE_FUSE="no"
+ fi
+ echo -e "${FUSE}${BOLD}${DGN}Enable FUSE Support: ${BGN}$ENABLE_FUSE${CL}"
+
+ if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "VERBOSE MODE" --yesno "Enable Verbose Mode?" 10 58); then
+ VERBOSE="yes"
+ else
+ VERBOSE="no"
+ fi
+ echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}$VERBOSE${CL}"
+
+ if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "ADVANCED SETTINGS COMPLETE" --yesno "Ready to create ${APP} LXC?" 10 58); then
+ echo -e "${CREATING}${BOLD}${RD}Creating a ${APP} LXC using the above advanced settings${CL}"
+ else
+ clear
+ header_info
+ echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}"
+ echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Settings on node $PVEHOST_NAME${CL}"
+ advanced_settings
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# diagnostics_check()
+#
+# - Ensures diagnostics config file exists at /usr/local/community-scripts/diagnostics
+# - Asks user whether to send anonymous diagnostic data
+# - Saves DIAGNOSTICS=yes/no in the config file
+# ------------------------------------------------------------------------------
+diagnostics_check() {
+ if ! [ -d "/usr/local/community-scripts" ]; then
+ mkdir -p /usr/local/community-scripts
+ fi
+
+ if ! [ -f "/usr/local/community-scripts/diagnostics" ]; then
+ if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "DIAGNOSTICS" --yesno "Send Diagnostics of LXC Installation?\n\n(This only transmits data without user data, just RAM, CPU, LXC name, ...)" 10 58); then
+ cat </usr/local/community-scripts/diagnostics
+DIAGNOSTICS=yes
+
+#This file is used to store the diagnostics settings for the Community-Scripts API.
+#https://github.com/community-scripts/ProxmoxVED/discussions/1836
+#Your diagnostics will be sent to the Community-Scripts API for troubleshooting/statistical purposes.
+#You can review the data at https://community-scripts.github.io/ProxmoxVE/data
+#If you do not wish to send diagnostics, please set the variable 'DIAGNOSTICS' to "no" in /usr/local/community-scripts/diagnostics, or use the menue.
+#This will disable the diagnostics feature.
+#To send diagnostics, set the variable 'DIAGNOSTICS' to "yes" in /usr/local/community-scripts/diagnostics, or use the menue.
+#This will enable the diagnostics feature.
+#The following information will be sent:
+#"disk_size"
+#"core_count"
+#"ram_size"
+#"os_type"
+#"os_version"
+#"nsapp"
+#"method"
+#"pve_version"
+#"status"
+#If you have any concerns, please review the source code at /misc/build.func
+EOF
+ DIAGNOSTICS="yes"
+ else
+ cat </usr/local/community-scripts/diagnostics
+DIAGNOSTICS=no
+
+#This file is used to store the diagnostics settings for the Community-Scripts API.
+#https://github.com/community-scripts/ProxmoxVED/discussions/1836
+#Your diagnostics will be sent to the Community-Scripts API for troubleshooting/statistical purposes.
+#You can review the data at https://community-scripts.github.io/ProxmoxVE/data
+#If you do not wish to send diagnostics, please set the variable 'DIAGNOSTICS' to "no" in /usr/local/community-scripts/diagnostics, or use the menue.
+#This will disable the diagnostics feature.
+#To send diagnostics, set the variable 'DIAGNOSTICS' to "yes" in /usr/local/community-scripts/diagnostics, or use the menue.
+#This will enable the diagnostics feature.
+#The following information will be sent:
+#"disk_size"
+#"core_count"
+#"ram_size"
+#"os_type"
+#"os_version"
+#"nsapp"
+#"method"
+#"pve_version"
+#"status"
+#If you have any concerns, please review the source code at /misc/build.func
+EOF
+ DIAGNOSTICS="no"
+ fi
+ else
+ DIAGNOSTICS=$(awk -F '=' '/^DIAGNOSTICS/ {print $2}' /usr/local/community-scripts/diagnostics)
+
+ fi
+
+}
+
+# ------------------------------------------------------------------------------
+# default_var_settings
+#
+# - Ensures /usr/local/community-scripts/default.vars exists (creates if missing)
+# - Loads var_* values from default.vars (safe parser, no source/eval)
+# - Precedence: ENV var_* > default.vars > built-in defaults
+# - Maps var_verbose → VERBOSE
+# - Calls base_settings "$VERBOSE" and echo_default
+# ------------------------------------------------------------------------------
+default_var_settings() {
+ # Allowed var_* keys (alphabetically sorted)
+ local VAR_WHITELIST=(
+ var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_ctid var_disk var_fuse
+ var_gateway var_hostname var_ipv6_method var_ipv6_static var_mac var_mtu
+ var_net var_ns var_pw var_ram var_tags var_tun var_unprivileged
+ var_verbose var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage
+ )
+
+ # Snapshot: environment variables (highest precedence)
+ declare -A _HARD_ENV=()
+ local _k
+ for _k in "${VAR_WHITELIST[@]}"; do
+ if printenv "$_k" >/dev/null 2>&1; then _HARD_ENV["$_k"]=1; fi
+ done
+
+ # Find default.vars location
+ local _find_default_vars
+ _find_default_vars() {
+ local f
+ for f in \
+ /usr/local/community-scripts/default.vars \
+ "$HOME/.config/community-scripts/default.vars" \
+ "./default.vars"; do
+ [ -f "$f" ] && {
+ echo "$f"
+ return 0
+ }
+ done
+ return 1
+ }
+ # Allow override of storages via env (for non-interactive use cases)
+ [ -n "${var_template_storage:-}" ] && TEMPLATE_STORAGE="$var_template_storage"
+ [ -n "${var_container_storage:-}" ] && CONTAINER_STORAGE="$var_container_storage"
+
+ # Create once, with storages already selected, no var_ctid/var_hostname lines
+ local _ensure_default_vars
+ _ensure_default_vars() {
+ _find_default_vars >/dev/null 2>&1 && return 0
+
+ local canonical="/usr/local/community-scripts/default.vars"
+ msg_info "No default.vars found. Creating ${canonical}"
+ mkdir -p /usr/local/community-scripts
+
+ # Pick storages before writing the file (always ask unless only one)
+ # Create a minimal temp file to write into
+ : >"$canonical"
+
+ # Base content (no var_ctid / var_hostname here)
+ cat >"$canonical" <<'EOF'
+# Community-Scripts defaults (var_* only). Lines starting with # are comments.
+# Precedence: ENV var_* > default.vars > built-ins.
+# Keep keys alphabetically sorted.
+
+# Container type
+var_unprivileged=1
+
+# Resources
+var_cpu=1
+var_disk=4
+var_ram=1024
+
+# Network
+var_brg=vmbr0
+var_net=dhcp
+var_ipv6_method=none
+# var_gateway=
+# var_ipv6_static=
+# var_vlan=
+# var_mtu=
+# var_mac=
+# var_ns=
+
+# SSH
+var_ssh=no
+# var_ssh_authorized_key=
+
+# APT cacher (optional)
+# var_apt_cacher=yes
+# var_apt_cacher_ip=192.168.1.10
+
+# Features/Tags/verbosity
+var_fuse=no
+var_tun=no
+var_tags=community-script
+var_verbose=no
+
+# Security (root PW) – empty => autologin
+# var_pw=
+EOF
+
+ # Now choose storages (always prompt unless just one exists)
+ choose_and_set_storage_for_file "$canonical" template
+ choose_and_set_storage_for_file "$canonical" container
+
+ chmod 0644 "$canonical"
+ msg_ok "Created ${canonical}"
+ }
+
+ # Whitelist check
+ local _is_whitelisted_key
+ _is_whitelisted_key() {
+ local k="$1"
+ local w
+ for w in "${VAR_WHITELIST[@]}"; do [ "$k" = "$w" ] && return 0; done
+ return 1
+ }
+
+ # Safe parser for KEY=VALUE lines
+ local _load_vars_file
+ _load_vars_file() {
+ local file="$1"
+ [ -f "$file" ] || return 0
+ msg_info "Loading defaults from ${file}"
+ local line key val
+ while IFS= read -r line || [ -n "$line" ]; do
+ line="${line#"${line%%[![:space:]]*}"}"
+ line="${line%"${line##*[![:space:]]}"}"
+ [[ -z "$line" || "$line" == \#* ]] && continue
+ if [[ "$line" =~ ^([A-Za-z_][A-Za-z0-9_]*)=(.*)$ ]]; then
+ local var_key="${BASH_REMATCH[1]}"
+ local var_val="${BASH_REMATCH[2]}"
+
+ [[ "$var_key" != var_* ]] && continue
+ _is_whitelisted_key "$var_key" || {
+ msg_debug "Ignore non-whitelisted ${var_key}"
+ continue
+ }
+
+ # Strip quotes
+ if [[ "$var_val" =~ ^\"(.*)\"$ ]]; then
+ var_val="${BASH_REMATCH[1]}"
+ elif [[ "$var_val" =~ ^\'(.*)\'$ ]]; then
+ var_val="${BASH_REMATCH[1]}"
+ fi
+
+ # Unsafe characters
+ case $var_val in
+ \"*\")
+ var_val=${var_val#\"}
+ var_val=${var_val%\"}
+ ;;
+ \'*\')
+ var_val=${var_val#\'}
+ var_val=${var_val%\'}
+ ;;
+ esac # Hard env wins
+ [[ -n "${_HARD_ENV[$var_key]:-}" ]] && continue
+ # Set only if not already exported
+ [[ -z "${!var_key+x}" ]] && export "${var_key}=${var_val}"
+ else
+ msg_warn "Malformed line in ${file}: ${line}"
+ fi
+ done <"$file"
+ msg_ok "Loaded ${file}"
+ }
+
+ # 1) Ensure file exists
+ _ensure_default_vars
+
+ # 2) Load file
+ local dv
+ dv="$(_find_default_vars)" || {
+ msg_error "default.vars not found after ensure step"
+ return 1
+ }
+ _load_vars_file "$dv"
+
+ # 3) Map var_verbose → VERBOSE
+ if [[ -n "${var_verbose:-}" ]]; then
+ case "${var_verbose,,}" in 1 | yes | true | on) VERBOSE="yes" ;; 0 | no | false | off) VERBOSE="no" ;; *) VERBOSE="${var_verbose}" ;; esac
+ else
+ VERBOSE="no"
+ fi
+
+ # 4) Apply base settings and show summary
+ METHOD="mydefaults-global"
+ base_settings "$VERBOSE"
+ header_info
+ echo -e "${DEFAULT}${BOLD}${BL}Using My Defaults (default.vars) on node $PVEHOST_NAME${CL}"
+ echo_default
+}
+
+# ------------------------------------------------------------------------------
+# get_app_defaults_path()
+#
+# - Returns full path for app-specific defaults file
+# - Example: /usr/local/community-scripts/defaults/.vars
+# ------------------------------------------------------------------------------
+
+get_app_defaults_path() {
+ local n="${NSAPP:-${APP,,}}"
+ echo "/usr/local/community-scripts/defaults/${n}.vars"
+}
+
+# ------------------------------------------------------------------------------
+# maybe_offer_save_app_defaults
+#
+# - Called after advanced_settings returned with fully chosen values.
+# - If no .vars exists, offers to persist current advanced settings
+# into /usr/local/community-scripts/defaults/.vars
+# - Only writes whitelisted var_* keys.
+# - Extracts raw values from flags like ",gw=..." ",mtu=..." etc.
+# ------------------------------------------------------------------------------
+if ! declare -p VAR_WHITELIST >/dev/null 2>&1; then
+ declare -ag VAR_WHITELIST=(
+ var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_ctid var_disk var_fuse
+ var_gateway var_hostname var_ipv6_method var_ipv6_static var_mac var_mtu
+ var_net var_ns var_pw var_ram var_tags var_tun var_unprivileged
+ var_verbose var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage
+ )
+fi
+
+# Note: _is_whitelisted_key() is defined above in default_var_settings section
+
+_sanitize_value() {
+ # Disallow Command-Substitution / Shell-Meta
+ case "$1" in
+ *'$('* | *'`'* | *';'* | *'&'* | *'<('*)
+ echo ""
+ return 0
+ ;;
+ esac
+ echo "$1"
+}
+
+# Map-Parser: read var_* from file into _VARS_IN associative array
+# Note: Main _load_vars_file() with full validation is defined in default_var_settings section
+# This simplified version is used specifically for diff operations via _VARS_IN array
+declare -A _VARS_IN
+_load_vars_file_to_map() {
+ local file="$1"
+ [ -f "$file" ] || return 0
+ _VARS_IN=() # Clear array
+ local line key val
+ while IFS= read -r line || [ -n "$line" ]; do
+ line="${line#"${line%%[![:space:]]*}"}"
+ line="${line%"${line##*[![:space:]]}"}"
+ [ -z "$line" ] && continue
+ case "$line" in
+ \#*) continue ;;
+ esac
+ key=$(printf "%s" "$line" | cut -d= -f1)
+ val=$(printf "%s" "$line" | cut -d= -f2-)
+ case "$key" in
+ var_*)
+ if _is_whitelisted_key "$key"; then
+ _VARS_IN["$key"]="$val"
+ fi
+ ;;
+ esac
+ done <"$file"
+}
+
+# Diff function for two var_* files -> produces human-readable diff list for $1 (old) vs $2 (new)
+_build_vars_diff() {
+ local oldf="$1" newf="$2"
+ local k
+ local -A OLD=() NEW=()
+ _load_vars_file_to_map "$oldf"
+ for k in "${!_VARS_IN[@]}"; do OLD["$k"]="${_VARS_IN[$k]}"; done
+ _load_vars_file_to_map "$newf"
+ for k in "${!_VARS_IN[@]}"; do NEW["$k"]="${_VARS_IN[$k]}"; done
+
+ local out
+ out+="# Diff for ${APP} (${NSAPP})\n"
+ out+="# Old: ${oldf}\n# New: ${newf}\n\n"
+
+ local found_change=0
+
+ # Changed & Removed
+ for k in "${!OLD[@]}"; do
+ if [[ -v NEW["$k"] ]]; then
+ if [[ "${OLD[$k]}" != "${NEW[$k]}" ]]; then
+ out+="~ ${k}\n - old: ${OLD[$k]}\n + new: ${NEW[$k]}\n"
+ found_change=1
+ fi
+ else
+ out+="- ${k}\n - old: ${OLD[$k]}\n"
+ found_change=1
+ fi
+ done
+
+ # Added
+ for k in "${!NEW[@]}"; do
+ if [[ ! -v OLD["$k"] ]]; then
+ out+="+ ${k}\n + new: ${NEW[$k]}\n"
+ found_change=1
+ fi
+ done
+
+ if [[ $found_change -eq 0 ]]; then
+ out+="(No differences)\n"
+ fi
+
+ printf "%b" "$out"
+}
+
+# Build a temporary .vars file from current advanced settings
+_build_current_app_vars_tmp() {
+ tmpf="$(mktemp /tmp/${NSAPP:-app}.vars.new.XXXXXX)"
+
+ # NET/GW
+ _net="${NET:-}"
+ _gate=""
+ case "${GATE:-}" in
+ ,gw=*) _gate=$(echo "$GATE" | sed 's/^,gw=//') ;;
+ esac
+
+ # IPv6
+ _ipv6_method="${IPV6_METHOD:-auto}"
+ _ipv6_static=""
+ _ipv6_gateway=""
+ if [ "$_ipv6_method" = "static" ]; then
+ _ipv6_static="${IPV6_ADDR:-}"
+ _ipv6_gateway="${IPV6_GATE:-}"
+ fi
+
+ # MTU/VLAN/MAC
+ _mtu=""
+ _vlan=""
+ _mac=""
+ case "${MTU:-}" in
+ ,mtu=*) _mtu=$(echo "$MTU" | sed 's/^,mtu=//') ;;
+ esac
+ case "${VLAN:-}" in
+ ,tag=*) _vlan=$(echo "$VLAN" | sed 's/^,tag=//') ;;
+ esac
+ case "${MAC:-}" in
+ ,hwaddr=*) _mac=$(echo "$MAC" | sed 's/^,hwaddr=//') ;;
+ esac
+
+ # DNS / Searchdomain
+ _ns=""
+ _searchdomain=""
+ case "${NS:-}" in
+ -nameserver=*) _ns=$(echo "$NS" | sed 's/^-nameserver=//') ;;
+ esac
+ case "${SD:-}" in
+ -searchdomain=*) _searchdomain=$(echo "$SD" | sed 's/^-searchdomain=//') ;;
+ esac
+
+ # SSH / APT / Features
+ _ssh="${SSH:-no}"
+ _ssh_auth="${SSH_AUTHORIZED_KEY:-}"
+ _apt_cacher="${APT_CACHER:-}"
+ _apt_cacher_ip="${APT_CACHER_IP:-}"
+ _fuse="${ENABLE_FUSE:-no}"
+ _tun="${ENABLE_TUN:-no}"
+ _tags="${TAGS:-}"
+ _verbose="${VERBOSE:-no}"
+
+ # Type / Resources / Identity
+ _unpriv="${CT_TYPE:-1}"
+ _cpu="${CORE_COUNT:-1}"
+ _ram="${RAM_SIZE:-1024}"
+ _disk="${DISK_SIZE:-4}"
+ _hostname="${HN:-$NSAPP}"
+
+ # Storage
+ _tpl_storage="${TEMPLATE_STORAGE:-${var_template_storage:-}}"
+ _ct_storage="${CONTAINER_STORAGE:-${var_container_storage:-}}"
+
+ {
+ echo "# App-specific defaults for ${APP} (${NSAPP})"
+ echo "# Generated on $(date -u '+%Y-%m-%dT%H:%M:%SZ')"
+ echo
+
+ echo "var_unprivileged=$(_sanitize_value "$_unpriv")"
+ echo "var_cpu=$(_sanitize_value "$_cpu")"
+ echo "var_ram=$(_sanitize_value "$_ram")"
+ echo "var_disk=$(_sanitize_value "$_disk")"
+
+ [ -n "${BRG:-}" ] && echo "var_brg=$(_sanitize_value "$BRG")"
+ [ -n "$_net" ] && echo "var_net=$(_sanitize_value "$_net")"
+ [ -n "$_gate" ] && echo "var_gateway=$(_sanitize_value "$_gate")"
+ [ -n "$_mtu" ] && echo "var_mtu=$(_sanitize_value "$_mtu")"
+ [ -n "$_vlan" ] && echo "var_vlan=$(_sanitize_value "$_vlan")"
+ [ -n "$_mac" ] && echo "var_mac=$(_sanitize_value "$_mac")"
+ [ -n "$_ns" ] && echo "var_ns=$(_sanitize_value "$_ns")"
+
+ [ -n "$_ipv6_method" ] && echo "var_ipv6_method=$(_sanitize_value "$_ipv6_method")"
+ [ -n "$_ipv6_static" ] && echo "var_ipv6_static=$(_sanitize_value "$_ipv6_static")"
+
+ [ -n "$_ssh" ] && echo "var_ssh=$(_sanitize_value "$_ssh")"
+ [ -n "$_ssh_auth" ] && echo "var_ssh_authorized_key=$(_sanitize_value "$_ssh_auth")"
+
+ [ -n "$_apt_cacher" ] && echo "var_apt_cacher=$(_sanitize_value "$_apt_cacher")"
+ [ -n "$_apt_cacher_ip" ] && echo "var_apt_cacher_ip=$(_sanitize_value "$_apt_cacher_ip")"
+
+ [ -n "$_fuse" ] && echo "var_fuse=$(_sanitize_value "$_fuse")"
+ [ -n "$_tun" ] && echo "var_tun=$(_sanitize_value "$_tun")"
+ [ -n "$_tags" ] && echo "var_tags=$(_sanitize_value "$_tags")"
+ [ -n "$_verbose" ] && echo "var_verbose=$(_sanitize_value "$_verbose")"
+
+ [ -n "$_hostname" ] && echo "var_hostname=$(_sanitize_value "$_hostname")"
+ [ -n "$_searchdomain" ] && echo "var_searchdomain=$(_sanitize_value "$_searchdomain")"
+
+ [ -n "$_tpl_storage" ] && echo "var_template_storage=$(_sanitize_value "$_tpl_storage")"
+ [ -n "$_ct_storage" ] && echo "var_container_storage=$(_sanitize_value "$_ct_storage")"
+ } >"$tmpf"
+
+ echo "$tmpf"
+}
+
+# ------------------------------------------------------------------------------
+# maybe_offer_save_app_defaults()
+#
+# - Called after advanced_settings()
+# - Offers to save current values as app defaults if not existing
+# - If file exists: shows diff and allows Update, Keep, View Diff, or Cancel
+# ------------------------------------------------------------------------------
+maybe_offer_save_app_defaults() {
+ local app_vars_path
+ app_vars_path="$(get_app_defaults_path)"
+
+ # always build from current settings
+ local new_tmp diff_tmp
+ new_tmp="$(_build_current_app_vars_tmp)"
+ diff_tmp="$(mktemp -p /tmp "${NSAPP:-app}.vars.diff.XXXXXX")"
+
+ # 1) if no file → offer to create
+ if [[ ! -f "$app_vars_path" ]]; then
+ if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --yesno "Save these advanced settings as defaults for ${APP}?\n\nThis will create:\n${app_vars_path}" 12 72; then
+ mkdir -p "$(dirname "$app_vars_path")"
+ install -m 0644 "$new_tmp" "$app_vars_path"
+ msg_ok "Saved app defaults: ${app_vars_path}"
+ fi
+ rm -f "$new_tmp" "$diff_tmp"
+ return 0
+ fi
+
+ # 2) if file exists → build diff
+ _build_vars_diff "$app_vars_path" "$new_tmp" >"$diff_tmp"
+
+ # if no differences → do nothing
+ if grep -q "^(No differences)$" "$diff_tmp"; then
+ rm -f "$new_tmp" "$diff_tmp"
+ return 0
+ fi
+
+ # 3) if file exists → show menu with default selection "Update Defaults"
+ local app_vars_file
+ app_vars_file="$(basename "$app_vars_path")"
+
+ while true; do
+ local sel
+ sel="$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "APP DEFAULTS – ${APP}" \
+ --menu "Differences detected. What do you want to do?" 20 78 10 \
+ "Update Defaults" "Write new values to ${app_vars_file}" \
+ "Keep Current" "Keep existing defaults (no changes)" \
+ "View Diff" "Show a detailed diff" \
+ "Cancel" "Abort without changes" \
+ --default-item "Update Defaults" \
+ 3>&1 1>&2 2>&3)" || { sel="Cancel"; }
+
+ case "$sel" in
+ "Update Defaults")
+ install -m 0644 "$new_tmp" "$app_vars_path"
+ msg_ok "Updated app defaults: ${app_vars_path}"
+ break
+ ;;
+ "Keep Current")
+ msg_info "Keeping current app defaults: ${app_vars_path}"
+ break
+ ;;
+ "View Diff")
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "Diff – ${APP}" \
+ --scrolltext --textbox "$diff_tmp" 25 100
+ ;;
+ "Cancel" | *)
+ msg_info "Canceled. No changes to app defaults."
+ break
+ ;;
+ esac
+ done
+
+ rm -f "$new_tmp" "$diff_tmp"
+}
+
+ensure_storage_selection_for_vars_file() {
+ local vf="$1"
+
+ # Read stored values (if any)
+ local tpl ct
+ tpl=$(grep -E '^var_template_storage=' "$vf" | cut -d= -f2-)
+ ct=$(grep -E '^var_container_storage=' "$vf" | cut -d= -f2-)
+
+ if [[ -n "$tpl" && -n "$ct" ]]; then
+ TEMPLATE_STORAGE="$tpl"
+ CONTAINER_STORAGE="$ct"
+ return 0
+ fi
+
+ choose_and_set_storage_for_file "$vf" template
+ choose_and_set_storage_for_file "$vf" container
+
+ msg_ok "Storage configuration saved to $(basename "$vf")"
+}
+
+diagnostics_menu() {
+ if [ "${DIAGNOSTICS:-no}" = "yes" ]; then
+ if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "DIAGNOSTIC SETTINGS" \
+ --yesno "Send Diagnostics?\n\nCurrent: ${DIAGNOSTICS}" 10 58 \
+ --yes-button "No" --no-button "Back"; then
+ DIAGNOSTICS="no"
+ sed -i 's/^DIAGNOSTICS=.*/DIAGNOSTICS=no/' /usr/local/community-scripts/diagnostics
+ whiptail --msgbox "Diagnostics set to ${DIAGNOSTICS}." 8 58
+ fi
+ else
+ if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "DIAGNOSTIC SETTINGS" \
+ --yesno "Send Diagnostics?\n\nCurrent: ${DIAGNOSTICS}" 10 58 \
+ --yes-button "Yes" --no-button "Back"; then
+ DIAGNOSTICS="yes"
+ sed -i 's/^DIAGNOSTICS=.*/DIAGNOSTICS=yes/' /usr/local/community-scripts/diagnostics
+ whiptail --msgbox "Diagnostics set to ${DIAGNOSTICS}." 8 58
+ fi
+ fi
+}
+
+ensure_global_default_vars_file() {
+ local vars_path="/usr/local/community-scripts/default.vars"
+ if [[ ! -f "$vars_path" ]]; then
+ mkdir -p "$(dirname "$vars_path")"
+ touch "$vars_path"
+ fi
+ echo "$vars_path"
+}
+
+# ------------------------------------------------------------------------------
+# install_script()
+#
+# - Main entrypoint for installation mode
+# - Runs safety checks (pve_check, root_check, maxkeys_check, diagnostics_check)
+# - Builds interactive menu (Default, Verbose, Advanced, My Defaults, App Defaults, Diagnostics, Storage, Exit)
+# - Applies chosen settings and triggers container build
+# ------------------------------------------------------------------------------
+install_script() {
+ pve_check
+ shell_check
+ root_check
+ arch_check
+ ssh_check
+ maxkeys_check
+ diagnostics_check
+
+ if systemctl is-active -q ping-instances.service; then
+ systemctl -q stop ping-instances.service
+ fi
+
+ NEXTID=$(pvesh get /cluster/nextid)
+ timezone=$(cat /etc/timezone)
+
+ # Show APP Header
+ header_info
+
+ # --- Support CLI argument as direct preset (default, advanced, …) ---
+ CHOICE="${mode:-${1:-}}"
+
+ # If no CLI argument → show whiptail menu
+ # Build menu dynamically based on available options
+ local appdefaults_option=""
+ local settings_option=""
+ local menu_items=(
+ "1" "Default Install"
+ "2" "Advanced Install"
+ "3" "My Defaults"
+ )
+
+ if [ -f "$(get_app_defaults_path)" ]; then
+ appdefaults_option="4"
+ menu_items+=("4" "App Defaults for ${APP}")
+ settings_option="5"
+ menu_items+=("5" "Settings")
+ else
+ settings_option="4"
+ menu_items+=("4" "Settings")
+ fi
+
+ if [ -z "$CHOICE" ]; then
+
+ TMP_CHOICE=$(whiptail \
+ --backtitle "Proxmox VE Helper Scripts" \
+ --title "Community-Scripts Options" \
+ --ok-button "Select" --cancel-button "Exit Script" \
+ --notags \
+ --menu "\nChoose an option:\n Use TAB or Arrow keys to navigate, ENTER to select.\n" \
+ 20 60 9 \
+ "${menu_items[@]}" \
+ --default-item "1" \
+ 3>&1 1>&2 2>&3) || exit_script
+ CHOICE="$TMP_CHOICE"
+ fi
+
+ APPDEFAULTS_OPTION="$appdefaults_option"
+ SETTINGS_OPTION="$settings_option"
+
+ # --- Main case ---
+ local defaults_target=""
+ local run_maybe_offer="no"
+ case "$CHOICE" in
+ 1 | default | DEFAULT)
+ header_info
+ echo -e "${DEFAULT}${BOLD}${BL}Using Default Settings on node $PVEHOST_NAME${CL}"
+ VERBOSE="no"
+ METHOD="default"
+ base_settings "$VERBOSE"
+ echo_default
+ defaults_target="$(ensure_global_default_vars_file)"
+ ;;
+ 2 | advanced | ADVANCED)
+ header_info
+
+ echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Install on node $PVEHOST_NAME${CL}"
+ echo -e "${INFO}${BOLD}${DGN}PVE Version ${PVEVERSION} (Kernel: ${KERNEL_VERSION})${CL}"
+ METHOD="advanced"
+ base_settings
+ advanced_settings
+ defaults_target="$(ensure_global_default_vars_file)"
+ run_maybe_offer="yes"
+ ;;
+ 3 | mydefaults | MYDEFAULTS)
+ default_var_settings || {
+ msg_error "Failed to apply default.vars"
+ exit 1
+ }
+ defaults_target="/usr/local/community-scripts/default.vars"
+ ;;
+ "$APPDEFAULTS_OPTION" | appdefaults | APPDEFAULTS)
+ if [ -f "$(get_app_defaults_path)" ]; then
+ header_info
+ echo -e "${DEFAULT}${BOLD}${BL}Using App Defaults for ${APP} on node $PVEHOST_NAME${CL}"
+ METHOD="appdefaults"
+ base_settings
+ _load_vars_file "$(get_app_defaults_path)"
+ echo_default
+ defaults_target="$(get_app_defaults_path)"
+ else
+ msg_error "No App Defaults available for ${APP}"
+ exit 1
+ fi
+ ;;
+ "$SETTINGS_OPTION" | settings | SETTINGS)
+ settings_menu
+ defaults_target=""
+ ;;
+ *)
+ echo -e "${CROSS}${RD}Invalid option: $CHOICE${CL}"
+ exit 1
+ ;;
+ esac
+
+ if [[ -n "$defaults_target" ]]; then
+ ensure_storage_selection_for_vars_file "$defaults_target"
+ fi
+
+ if [[ "$run_maybe_offer" == "yes" ]]; then
+ maybe_offer_save_app_defaults
+ fi
+}
+
+edit_default_storage() {
+ local vf="/usr/local/community-scripts/default.vars"
+
+ # Ensure file exists
+ if [[ ! -f "$vf" ]]; then
+ mkdir -p "$(dirname "$vf")"
+ touch "$vf"
+ fi
+
+ # Let ensure_storage_selection_for_vars_file handle everything
+ ensure_storage_selection_for_vars_file "$vf"
+}
+
+settings_menu() {
+ while true; do
+ local settings_items=(
+ "1" "Manage API-Diagnostic Setting"
+ "2" "Edit Default.vars"
+ "3" "Edit Default Storage"
+ )
+ if [ -f "$(get_app_defaults_path)" ]; then
+ settings_items+=("4" "Edit App.vars for ${APP}")
+ settings_items+=("5" "Exit")
+ else
+ settings_items+=("4" "Exit")
+ fi
+
+ local choice
+ choice=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
+ --title "Community-Scripts SETTINGS Menu" \
+ --ok-button "OK" --cancel-button "Back" \
+ --menu "\n\nChoose a settings option:\n\nUse TAB or Arrow keys to navigate, ENTER to select." 20 60 9 \
+ "${settings_items[@]}" \
+ 3>&1 1>&2 2>&3) || break
+
+ case "$choice" in
+ 1) diagnostics_menu ;;
+ 2) ${EDITOR:-nano} /usr/local/community-scripts/default.vars ;;
+ 3) edit_default_storage ;;
+ 4)
+ if [ -f "$(get_app_defaults_path)" ]; then
+ ${EDITOR:-nano} "$(get_app_defaults_path)"
+ else
+ exit_script
+ fi
+ ;;
+ 5) exit_script ;;
+ esac
+ done
+}
+
+# ===== Unified storage selection & writing to vars files =====
+_write_storage_to_vars() {
+ # $1 = vars_file, $2 = key (var_container_storage / var_template_storage), $3 = value
+ local vf="$1" key="$2" val="$3"
+ # remove uncommented and commented versions to avoid duplicates
+ sed -i "/^[#[:space:]]*${key}=/d" "$vf"
+ echo "${key}=${val}" >>"$vf"
+}
+
+choose_and_set_storage_for_file() {
+ # $1 = vars_file, $2 = class ('container'|'template')
+ local vf="$1" class="$2" key="" current=""
+ case "$class" in
+ container) key="var_container_storage" ;;
+ template) key="var_template_storage" ;;
+ *)
+ msg_error "Unknown storage class: $class"
+ return 1
+ ;;
+ esac
+
+ current=$(awk -F= -v k="^${key}=" '$0 ~ k {print $2; exit}' "$vf")
+
+ # If only one storage exists for the content type, auto-pick. Else always ask (your wish #4).
+ local content="rootdir"
+ [[ "$class" == "template" ]] && content="vztmpl"
+ local count
+ count=$(pvesm status -content "$content" | awk 'NR>1{print $1}' | wc -l)
+
+ if [[ "$count" -eq 1 ]]; then
+ STORAGE_RESULT=$(pvesm status -content "$content" | awk 'NR>1{print $1; exit}')
+ STORAGE_INFO=""
+ else
+ # If the current value is preselectable, we could show it, but per your requirement we always offer selection
+ select_storage "$class" || return 1
+ fi
+
+ _write_storage_to_vars "$vf" "$key" "$STORAGE_RESULT"
+
+ # Keep environment in sync for later steps (e.g. app-default save)
+ if [[ "$class" == "container" ]]; then
+ export var_container_storage="$STORAGE_RESULT"
+ export CONTAINER_STORAGE="$STORAGE_RESULT"
+ else
+ export var_template_storage="$STORAGE_RESULT"
+ export TEMPLATE_STORAGE="$STORAGE_RESULT"
+ fi
+
+ msg_ok "Updated ${key} → ${STORAGE_RESULT}"
+}
+
+# ------------------------------------------------------------------------------
+# check_container_resources()
+#
+# - Compares host RAM/CPU with required values
+# - Warns if under-provisioned and asks user to continue or abort
+# ------------------------------------------------------------------------------
+check_container_resources() {
+ current_ram=$(free -m | awk 'NR==2{print $2}')
+ current_cpu=$(nproc)
+
+ if [[ "$current_ram" -lt "$var_ram" ]] || [[ "$current_cpu" -lt "$var_cpu" ]]; then
+ echo -e "\n${INFO}${HOLD} ${GN}Required: ${var_cpu} CPU, ${var_ram}MB RAM ${CL}| ${RD}Current: ${current_cpu} CPU, ${current_ram}MB RAM${CL}"
+ echo -e "${YWB}Please ensure that the ${APP} LXC is configured with at least ${var_cpu} vCPU and ${var_ram} MB RAM for the build process.${CL}\n"
+ echo -ne "${INFO}${HOLD} May cause data loss! ${INFO} Continue update with under-provisioned LXC? "
+ read -r prompt
+ if [[ ! ${prompt,,} =~ ^(yes)$ ]]; then
+ echo -e "${CROSS}${HOLD} ${YWB}Exiting based on user input.${CL}"
+ exit 1
+ fi
+ else
+ echo -e ""
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# check_container_storage()
+#
+# - Checks /boot partition usage
+# - Warns if usage >80% and asks user confirmation before proceeding
+# ------------------------------------------------------------------------------
+check_container_storage() {
+ total_size=$(df /boot --output=size | tail -n 1)
+ local used_size=$(df /boot --output=used | tail -n 1)
+ usage=$((100 * used_size / total_size))
+ if ((usage > 80)); then
+ echo -e "${INFO}${HOLD} ${YWB}Warning: Storage is dangerously low (${usage}%).${CL}"
+ echo -ne "Continue anyway? "
+ read -r prompt
+ if [[ ! ${prompt,,} =~ ^(y|yes)$ ]]; then
+ echo -e "${CROSS}${HOLD}${YWB}Exiting based on user input.${CL}"
+ exit 1
+ fi
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# ssh_extract_keys_from_file()
+#
+# - Extracts valid SSH public keys from given file
+# - Supports RSA, Ed25519, ECDSA and filters out comments/invalid lines
+# ------------------------------------------------------------------------------
+ssh_extract_keys_from_file() {
+ local f="$1"
+ [[ -r "$f" ]] || return 0
+ tr -d '\r' <"$f" | awk '
+ /^[[:space:]]*#/ {next}
+ /^[[:space:]]*$/ {next}
+ # nackt: typ base64 [comment]
+ /^(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))[[:space:]]+/ {print; next}
+ # mit Optionen: finde ab erstem Key-Typ
+ {
+ match($0, /(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))[[:space:]]+/)
+ if (RSTART>0) { print substr($0, RSTART) }
+ }
+ '
+}
+
+# ------------------------------------------------------------------------------
+# ssh_build_choices_from_files()
+#
+# - Builds interactive whiptail checklist of available SSH keys
+# - Generates fingerprint, type and comment for each key
+# ------------------------------------------------------------------------------
+ssh_build_choices_from_files() {
+ local -a files=("$@")
+ CHOICES=()
+ COUNT=0
+ MAPFILE="$(mktemp)"
+ local id key typ fp cmt base ln=0
+
+ for f in "${files[@]}"; do
+ [[ -f "$f" && -r "$f" ]] || continue
+ base="$(basename -- "$f")"
+ case "$base" in
+ known_hosts | known_hosts.* | config) continue ;;
+ id_*) [[ "$f" != *.pub ]] && continue ;;
+ esac
+
+ # map every key in file
+ while IFS= read -r key; do
+ [[ -n "$key" ]] || continue
+
+ typ=""
+ fp=""
+ cmt=""
+ # Only the pure key part (without options) is already included in ‘key’.
+ read -r _typ _b64 _cmt <<<"$key"
+ typ="${_typ:-key}"
+ cmt="${_cmt:-}"
+ # Fingerprint via ssh-keygen (if available)
+ if command -v ssh-keygen >/dev/null 2>&1; then
+ fp="$(printf '%s\n' "$key" | ssh-keygen -lf - 2>/dev/null | awk '{print $2}')"
+ fi
+ # Label shorten
+ [[ ${#cmt} -gt 40 ]] && cmt="${cmt:0:37}..."
+
+ ln=$((ln + 1))
+ COUNT=$((COUNT + 1))
+ id="K${COUNT}"
+ echo "${id}|${key}" >>"$MAPFILE"
+ CHOICES+=("$id" "[$typ] ${fp:+$fp }${cmt:+$cmt }— ${base}" "OFF")
+ done < <(ssh_extract_keys_from_file "$f")
+ done
+}
+
+# ------------------------------------------------------------------------------
+# ssh_discover_default_files()
+#
+# - Scans standard paths for SSH keys
+# - Includes ~/.ssh/*.pub, /etc/ssh/authorized_keys, etc.
+# ------------------------------------------------------------------------------
+ssh_discover_default_files() {
+ local -a cand=()
+ shopt -s nullglob
+ cand+=(/root/.ssh/authorized_keys /root/.ssh/authorized_keys2)
+ cand+=(/root/.ssh/*.pub)
+ cand+=(/etc/ssh/authorized_keys /etc/ssh/authorized_keys.d/*)
+ shopt -u nullglob
+ printf '%s\0' "${cand[@]}"
+}
+
+configure_ssh_settings() {
+ SSH_KEYS_FILE="$(mktemp)"
+ : >"$SSH_KEYS_FILE"
+
+ IFS=$'\0' read -r -d '' -a _def_files < <(ssh_discover_default_files && printf '\0')
+ ssh_build_choices_from_files "${_def_files[@]}"
+ local default_key_count="$COUNT"
+
+ local ssh_key_mode
+ if [[ "$default_key_count" -gt 0 ]]; then
+ ssh_key_mode=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SSH KEY SOURCE" --menu \
+ "Provision SSH keys for root:" 14 72 4 \
+ "found" "Select from detected keys (${default_key_count})" \
+ "manual" "Paste a single public key" \
+ "folder" "Scan another folder (path or glob)" \
+ "none" "No keys" 3>&1 1>&2 2>&3) || exit_script
+ else
+ ssh_key_mode=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SSH KEY SOURCE" --menu \
+ "No host keys detected; choose manual/none:" 12 72 2 \
+ "manual" "Paste a single public key" \
+ "none" "No keys" 3>&1 1>&2 2>&3) || exit_script
+ fi
+
+ case "$ssh_key_mode" in
+ found)
+ local selection
+ selection=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SELECT HOST KEYS" \
+ --checklist "Select one or more keys to import:" 20 140 10 "${CHOICES[@]}" 3>&1 1>&2 2>&3) || exit_script
+ for tag in $selection; do
+ tag="${tag%\"}"
+ tag="${tag#\"}"
+ local line
+ line=$(grep -E "^${tag}\|" "$MAPFILE" | head -n1 | cut -d'|' -f2-)
+ [[ -n "$line" ]] && printf '%s\n' "$line" >>"$SSH_KEYS_FILE"
+ done
+ ;;
+ manual)
+ SSH_AUTHORIZED_KEY="$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --inputbox "Paste one SSH public key line (ssh-ed25519/ssh-rsa/...)" 10 72 --title "SSH Public Key" 3>&1 1>&2 2>&3)"
+ [[ -n "$SSH_AUTHORIZED_KEY" ]] && printf '%s\n' "$SSH_AUTHORIZED_KEY" >>"$SSH_KEYS_FILE"
+ ;;
+ folder)
+ local glob_path
+ glob_path=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --inputbox "Enter a folder or glob to scan (e.g. /root/.ssh/*.pub)" 10 72 --title "Scan Folder/Glob" 3>&1 1>&2 2>&3)
+ if [[ -n "$glob_path" ]]; then
+ shopt -s nullglob
+ read -r -a _scan_files <<<"$glob_path"
+ shopt -u nullglob
+ if [[ "${#_scan_files[@]}" -gt 0 ]]; then
+ ssh_build_choices_from_files "${_scan_files[@]}"
+ if [[ "$COUNT" -gt 0 ]]; then
+ local folder_selection
+ folder_selection=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SELECT FOLDER KEYS" \
+ --checklist "Select key(s) to import:" 20 78 10 "${CHOICES[@]}" 3>&1 1>&2 2>&3) || exit_script
+ for tag in $folder_selection; do
+ tag="${tag%\"}"
+ tag="${tag#\"}"
+ local line
+ line=$(grep -E "^${tag}\|" "$MAPFILE" | head -n1 | cut -d'|' -f2-)
+ [[ -n "$line" ]] && printf '%s\n' "$line" >>"$SSH_KEYS_FILE"
+ done
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox "No keys found in: $glob_path" 8 60
+ fi
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox "Path/glob returned no files." 8 60
+ fi
+ fi
+ ;;
+ none)
+ :
+ ;;
+ esac
+
+ if [[ -s "$SSH_KEYS_FILE" ]]; then
+ sort -u -o "$SSH_KEYS_FILE" "$SSH_KEYS_FILE"
+ printf '\n' >>"$SSH_KEYS_FILE"
+ fi
+
+ if [[ -s "$SSH_KEYS_FILE" || "$PW" == -password* ]]; then
+ if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "SSH ACCESS" --yesno "Enable root SSH access?" 10 58); then
+ SSH="yes"
+ else
+ SSH="no"
+ fi
+ else
+ SSH="no"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# start()
+#
+# - Entry point of script
+# - On Proxmox host: calls install_script
+# - In silent mode: runs update_script
+# - Otherwise: shows update/setting menu
+# ------------------------------------------------------------------------------
+start() {
+ source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/tools.func)
+ if command -v pveversion >/dev/null 2>&1; then
+ install_script || return 0
+ return 0
+ elif [ ! -z ${PHS_SILENT+x} ] && [[ "${PHS_SILENT}" == "1" ]]; then
+ VERBOSE="no"
+ set_std_mode
+ update_script
+ else
+ CHOICE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "${APP} LXC Update/Setting" --menu \
+ "Support/Update functions for ${APP} LXC. Choose an option:" \
+ 12 60 3 \
+ "1" "YES (Silent Mode)" \
+ "2" "YES (Verbose Mode)" \
+ "3" "NO (Cancel Update)" --nocancel --default-item "1" 3>&1 1>&2 2>&3)
+
+ case "$CHOICE" in
+ 1)
+ VERBOSE="no"
+ set_std_mode
+ ;;
+ 2)
+ VERBOSE="yes"
+ set_std_mode
+ ;;
+ 3)
+ clear
+ exit_script
+ exit
+ ;;
+ esac
+ update_script
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# build_container()
+#
+# - Creates and configures the LXC container
+# - Builds network string and applies features (FUSE, TUN, VAAPI passthrough)
+# - Starts container and waits for network connectivity
+# - Installs base packages, SSH keys, and runs -install.sh
+# ------------------------------------------------------------------------------
+build_container() {
+ # if [ "$VERBOSE" == "yes" ]; then set -x; fi
+
+ NET_STRING="-net0 name=eth0,bridge=${BRG:-vmbr0}"
+
+ # MAC
+ if [[ -n "$MAC" ]]; then
+ case "$MAC" in
+ ,hwaddr=*) NET_STRING+="$MAC" ;;
+ *) NET_STRING+=",hwaddr=$MAC" ;;
+ esac
+ fi
+
+ # IP (immer zwingend, Standard dhcp)
+ NET_STRING+=",ip=${NET:-dhcp}"
+
+ # Gateway
+ if [[ -n "$GATE" ]]; then
+ case "$GATE" in
+ ,gw=*) NET_STRING+="$GATE" ;;
+ *) NET_STRING+=",gw=$GATE" ;;
+ esac
+ fi
+
+ # VLAN
+ if [[ -n "$VLAN" ]]; then
+ case "$VLAN" in
+ ,tag=*) NET_STRING+="$VLAN" ;;
+ *) NET_STRING+=",tag=$VLAN" ;;
+ esac
+ fi
+
+ # MTU
+ if [[ -n "$MTU" ]]; then
+ case "$MTU" in
+ ,mtu=*) NET_STRING+="$MTU" ;;
+ *) NET_STRING+=",mtu=$MTU" ;;
+ esac
+ fi
+
+ # IPv6 Handling
+ case "$IPV6_METHOD" in
+ auto) NET_STRING="$NET_STRING,ip6=auto" ;;
+ dhcp) NET_STRING="$NET_STRING,ip6=dhcp" ;;
+ static)
+ NET_STRING="$NET_STRING,ip6=$IPV6_ADDR"
+ [ -n "$IPV6_GATE" ] && NET_STRING="$NET_STRING,gw6=$IPV6_GATE"
+ ;;
+ none) ;;
+ esac
+
+ if [ "$CT_TYPE" == "1" ]; then
+ FEATURES="keyctl=1,nesting=1"
+ else
+ FEATURES="nesting=1"
+ fi
+
+ if [ "$ENABLE_FUSE" == "yes" ]; then
+ FEATURES="$FEATURES,fuse=1"
+ fi
+
+ TEMP_DIR=$(mktemp -d)
+ pushd "$TEMP_DIR" >/dev/null
+ if [ "$var_os" == "alpine" ]; then
+ export FUNCTIONS_FILE_PATH="$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/alpine-install.func)"
+ else
+ export FUNCTIONS_FILE_PATH="$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/install.func)"
+ fi
+ export DIAGNOSTICS="$DIAGNOSTICS"
+ export RANDOM_UUID="$RANDOM_UUID"
+ export CACHER="$APT_CACHER"
+ export CACHER_IP="$APT_CACHER_IP"
+ export tz="$timezone"
+ export APPLICATION="$APP"
+ export app="$NSAPP"
+ export PASSWORD="$PW"
+ export VERBOSE="$VERBOSE"
+ export SSH_ROOT="${SSH}"
+ export SSH_AUTHORIZED_KEY
+ export CTID="$CT_ID"
+ export CTTYPE="$CT_TYPE"
+ export ENABLE_FUSE="$ENABLE_FUSE"
+ export ENABLE_TUN="$ENABLE_TUN"
+ export PCT_OSTYPE="$var_os"
+ export PCT_OSVERSION="$var_version"
+ export PCT_DISK_SIZE="$DISK_SIZE"
+ export PCT_OPTIONS="
+ -features $FEATURES
+ -hostname $HN
+ -tags $TAGS
+ $SD
+ $NS
+ $NET_STRING
+ -onboot 1
+ -cores $CORE_COUNT
+ -memory $RAM_SIZE
+ -unprivileged $CT_TYPE
+ $PW
+"
+ export TEMPLATE_STORAGE="${var_template_storage:-}"
+ export CONTAINER_STORAGE="${var_container_storage:-}"
+ create_lxc_container || exit $?
+
+ LXC_CONFIG="/etc/pve/lxc/${CTID}.conf"
+
+ # ============================================================================
+ # GPU/USB PASSTHROUGH CONFIGURATION
+ # ============================================================================
+
+ # List of applications that benefit from GPU acceleration
+ GPU_APPS=(
+ "immich" "channels" "emby" "ersatztv" "frigate"
+ "jellyfin" "plex" "scrypted" "tdarr" "unmanic"
+ "ollama" "fileflows" "open-webui" "tunarr" "debian"
+ "handbrake" "sunshine" "moonlight" "kodi" "stremio"
+ "viseron"
+ )
+
+ # Check if app needs GPU
+ is_gpu_app() {
+ local app="${1,,}"
+ for gpu_app in "${GPU_APPS[@]}"; do
+ [[ "$app" == "${gpu_app,,}" ]] && return 0
+ done
+ return 1
+ }
+
+ # Detect all available GPU devices
+ detect_gpu_devices() {
+ INTEL_DEVICES=()
+ AMD_DEVICES=()
+ NVIDIA_DEVICES=()
+
+ # Store PCI info to avoid multiple calls
+ local pci_vga_info=$(lspci -nn 2>/dev/null | grep -E "VGA|Display|3D")
+
+ # Check for Intel GPU - look for Intel vendor ID [8086]
+ if echo "$pci_vga_info" | grep -q "\[8086:"; then
+ msg_info "Detected Intel GPU"
+ if [[ -d /dev/dri ]]; then
+ for d in /dev/dri/renderD* /dev/dri/card*; do
+ [[ -e "$d" ]] && INTEL_DEVICES+=("$d")
+ done
+ fi
+ fi
+
+ # Check for AMD GPU - look for AMD vendor IDs [1002] (AMD/ATI) or [1022] (AMD)
+ if echo "$pci_vga_info" | grep -qE "\[1002:|\[1022:"; then
+ msg_info "Detected AMD GPU"
+ if [[ -d /dev/dri ]]; then
+ # Only add if not already claimed by Intel
+ if [[ ${#INTEL_DEVICES[@]} -eq 0 ]]; then
+ for d in /dev/dri/renderD* /dev/dri/card*; do
+ [[ -e "$d" ]] && AMD_DEVICES+=("$d")
+ done
+ fi
+ fi
+ fi
+
+ # Check for NVIDIA GPU - look for NVIDIA vendor ID [10de]
+ if echo "$pci_vga_info" | grep -q "\[10de:"; then
+ msg_info "Detected NVIDIA GPU"
+ if ! check_nvidia_host_setup; then
+ msg_error "NVIDIA host setup incomplete. Skipping GPU passthrough."
+ msg_info "Fix NVIDIA drivers on host, then recreate container or passthrough manually."
+ return 0
+ fi
+
+ for d in /dev/nvidia* /dev/nvidiactl /dev/nvidia-modeset; do
+ [[ -e "$d" ]] && NVIDIA_DEVICES+=("$d")
+ done
+
+ if [[ ${#NVIDIA_DEVICES[@]} -eq 0 ]]; then
+ msg_warn "NVIDIA GPU detected but no /dev/nvidia* devices found"
+ msg_warn "Please install NVIDIA drivers on host: apt install nvidia-driver"
+ else
+ if [[ "$CT_TYPE" == "0" ]]; then
+ cat <>"$LXC_CONFIG"
+ # NVIDIA GPU Passthrough (privileged)
+ lxc.cgroup2.devices.allow: c 195:* rwm
+ lxc.cgroup2.devices.allow: c 243:* rwm
+ lxc.mount.entry: /dev/nvidia0 dev/nvidia0 none bind,optional,create=file
+ lxc.mount.entry: /dev/nvidiactl dev/nvidiactl none bind,optional,create=file
+ lxc.mount.entry: /dev/nvidia-uvm dev/nvidia-uvm none bind,optional,create=file
+ lxc.mount.entry: /dev/nvidia-uvm-tools dev/nvidia-uvm-tools none bind,optional,create=file
+EOF
+
+ if [[ -e /dev/dri/renderD128 ]]; then
+ echo "lxc.mount.entry: /dev/dri/renderD128 dev/dri/renderD128 none bind,optional,create=file" >>"$LXC_CONFIG"
+ fi
+
+ export GPU_TYPE="NVIDIA"
+ export NVIDIA_DRIVER_VERSION=$(nvidia-smi --query-gpu=driver_version --format=csv,noheader 2>/dev/null | head -n1)
+ msg_ok "NVIDIA GPU passthrough configured (driver: ${NVIDIA_DRIVER_VERSION})"
+ else
+ msg_warn "NVIDIA passthrough only supported for privileged containers"
+ return 0
+ fi
+ fi
+ fi
+
+ # Debug output
+ msg_debug "Intel devices: ${INTEL_DEVICES[*]}"
+ msg_debug "AMD devices: ${AMD_DEVICES[*]}"
+ msg_debug "NVIDIA devices: ${NVIDIA_DEVICES[*]}"
+ }
+
+ # Configure USB passthrough for privileged containers
+ configure_usb_passthrough() {
+ if [[ "$CT_TYPE" != "0" ]]; then
+ return 0
+ fi
+
+ msg_info "Configuring automatic USB passthrough (privileged container)"
+ cat <>"$LXC_CONFIG"
+# Automatic USB passthrough (privileged container)
+lxc.cgroup2.devices.allow: a
+lxc.cap.drop:
+lxc.cgroup2.devices.allow: c 188:* rwm
+lxc.cgroup2.devices.allow: c 189:* rwm
+lxc.mount.entry: /dev/serial/by-id dev/serial/by-id none bind,optional,create=dir
+lxc.mount.entry: /dev/ttyUSB0 dev/ttyUSB0 none bind,optional,create=file
+lxc.mount.entry: /dev/ttyUSB1 dev/ttyUSB1 none bind,optional,create=file
+lxc.mount.entry: /dev/ttyACM0 dev/ttyACM0 none bind,optional,create=file
+lxc.mount.entry: /dev/ttyACM1 dev/ttyACM1 none bind,optional,create=file
+EOF
+ msg_ok "USB passthrough configured"
+ }
+
+ # Configure GPU passthrough
+ configure_gpu_passthrough() {
+ # Skip if not a GPU app and not privileged
+ if [[ "$CT_TYPE" != "0" ]] && ! is_gpu_app "$APP"; then
+ return 0
+ fi
+
+ detect_gpu_devices
+
+ # Count available GPU types
+ local gpu_count=0
+ local available_gpus=()
+
+ if [[ ${#INTEL_DEVICES[@]} -gt 0 ]]; then
+ available_gpus+=("INTEL")
+ gpu_count=$((gpu_count + 1))
+ fi
+
+ if [[ ${#AMD_DEVICES[@]} -gt 0 ]]; then
+ available_gpus+=("AMD")
+ gpu_count=$((gpu_count + 1))
+ fi
+
+ if [[ ${#NVIDIA_DEVICES[@]} -gt 0 ]]; then
+ available_gpus+=("NVIDIA")
+ gpu_count=$((gpu_count + 1))
+ fi
+
+ if [[ $gpu_count -eq 0 ]]; then
+ msg_info "No GPU devices found for passthrough"
+ return 0
+ fi
+
+ local selected_gpu=""
+
+ if [[ $gpu_count -eq 1 ]]; then
+ # Automatic selection for single GPU
+ selected_gpu="${available_gpus[0]}"
+ msg_info "Automatically configuring ${selected_gpu} GPU passthrough"
+ else
+ # Multiple GPUs - ask user
+ echo -e "\n${INFO} Multiple GPU types detected:"
+ for gpu in "${available_gpus[@]}"; do
+ echo " - $gpu"
+ done
+ read -rp "Which GPU type to passthrough? (${available_gpus[*]}): " selected_gpu
+ selected_gpu="${selected_gpu^^}"
+
+ # Validate selection
+ local valid=0
+ for gpu in "${available_gpus[@]}"; do
+ [[ "$selected_gpu" == "$gpu" ]] && valid=1
+ done
+
+ if [[ $valid -eq 0 ]]; then
+ msg_warn "Invalid selection. Skipping GPU passthrough."
+ return 0
+ fi
+ fi
+
+ # Apply passthrough configuration based on selection
+ local dev_idx=0
+
+ case "$selected_gpu" in
+ INTEL | AMD)
+ local devices=()
+ [[ "$selected_gpu" == "INTEL" ]] && devices=("${INTEL_DEVICES[@]}")
+ [[ "$selected_gpu" == "AMD" ]] && devices=("${AMD_DEVICES[@]}")
+
+ # For Proxmox WebUI visibility, add as dev0, dev1 etc.
+ for dev in "${devices[@]}"; do
+ if [[ "$CT_TYPE" == "0" ]]; then
+ # Privileged container - use dev entries for WebUI visibility
+ # Use initial GID 104 (render) for renderD*, 44 (video) for card*
+ if [[ "$dev" =~ renderD ]]; then
+ echo "dev${dev_idx}: $dev,gid=104" >>"$LXC_CONFIG"
+ else
+ echo "dev${dev_idx}: $dev,gid=44" >>"$LXC_CONFIG"
+ fi
+ dev_idx=$((dev_idx + 1))
+
+ # Also add cgroup allows for privileged containers
+ local major minor
+ major=$(stat -c '%t' "$dev" 2>/dev/null || echo "0")
+ minor=$(stat -c '%T' "$dev" 2>/dev/null || echo "0")
+
+ if [[ "$major" != "0" && "$minor" != "0" ]]; then
+ echo "lxc.cgroup2.devices.allow: c $((0x$major)):$((0x$minor)) rwm" >>"$LXC_CONFIG"
+ fi
+ else
+ # Unprivileged container
+ if [[ "$dev" =~ renderD ]]; then
+ echo "dev${dev_idx}: $dev,uid=0,gid=104" >>"$LXC_CONFIG"
+ else
+ echo "dev${dev_idx}: $dev,uid=0,gid=44" >>"$LXC_CONFIG"
+ fi
+ dev_idx=$((dev_idx + 1))
+ fi
+ done
+
+ export GPU_TYPE="$selected_gpu"
+ msg_ok "${selected_gpu} GPU passthrough configured (${dev_idx} devices)"
+ ;;
+
+ NVIDIA)
+ if [[ ${#NVIDIA_DEVICES[@]} -eq 0 ]]; then
+ msg_error "NVIDIA drivers not installed on host. Please install: apt install nvidia-driver"
+ return 1
+ fi
+
+ for dev in "${NVIDIA_DEVICES[@]}"; do
+ # NVIDIA devices typically need different handling
+ echo "dev${dev_idx}: $dev,uid=0,gid=44" >>"$LXC_CONFIG"
+ dev_idx=$((dev_idx + 1))
+
+ if [[ "$CT_TYPE" == "0" ]]; then
+ local major minor
+ major=$(stat -c '%t' "$dev" 2>/dev/null || echo "0")
+ minor=$(stat -c '%T' "$dev" 2>/dev/null || echo "0")
+
+ if [[ "$major" != "0" && "$minor" != "0" ]]; then
+ echo "lxc.cgroup2.devices.allow: c $((0x$major)):$((0x$minor)) rwm" >>"$LXC_CONFIG"
+ fi
+ fi
+ done
+
+ export GPU_TYPE="NVIDIA"
+ msg_ok "NVIDIA GPU passthrough configured (${dev_idx} devices)"
+ ;;
+ esac
+ }
+
+ # Additional device passthrough
+ configure_additional_devices() {
+ # TUN device passthrough
+ if [ "$ENABLE_TUN" == "yes" ]; then
+ cat <>"$LXC_CONFIG"
+lxc.cgroup2.devices.allow: c 10:200 rwm
+lxc.mount.entry: /dev/net/tun dev/net/tun none bind,create=file
+EOF
+ fi
+
+ # Coral TPU passthrough
+ if [[ -e /dev/apex_0 ]]; then
+ msg_info "Detected Coral TPU - configuring passthrough"
+ echo "lxc.mount.entry: /dev/apex_0 dev/apex_0 none bind,optional,create=file" >>"$LXC_CONFIG"
+ fi
+ }
+
+ # Execute pre-start configurations
+ configure_usb_passthrough
+ configure_gpu_passthrough
+ configure_additional_devices
+
+ # ============================================================================
+ # START CONTAINER AND INSTALL USERLAND
+ # ============================================================================
+
+ msg_info "Starting LXC Container"
+ pct start "$CTID"
+
+ # Wait for container to be running
+ for i in {1..10}; do
+ if pct status "$CTID" | grep -q "status: running"; then
+ msg_ok "Started LXC Container"
+ break
+ fi
+ sleep 1
+ if [ "$i" -eq 10 ]; then
+ msg_error "LXC Container did not reach running state"
+ exit 1
+ fi
+ done
+
+ # Wait for network (skip for Alpine initially)
+ if [ "$var_os" != "alpine" ]; then
+ msg_info "Waiting for network in LXC container"
+
+ # Wait for IP
+ for i in {1..20}; do
+ ip_in_lxc=$(pct exec "$CTID" -- ip -4 addr show dev eth0 | awk '/inet / {print $2}' | cut -d/ -f1)
+ [ -n "$ip_in_lxc" ] && break
+ sleep 1
+ done
+
+ if [ -z "$ip_in_lxc" ]; then
+ msg_error "No IP assigned to CT $CTID after 20s"
+ exit 1
+ fi
+
+ # Try to reach gateway
+ gw_ok=0
+ for i in {1..10}; do
+ if pct exec "$CTID" -- ping -c1 -W1 "${GATEWAY:-8.8.8.8}" >/dev/null 2>&1; then
+ gw_ok=1
+ break
+ fi
+ sleep 1
+ done
+
+ if [ "$gw_ok" -eq 1 ]; then
+ msg_ok "Network in LXC is reachable (IP $ip_in_lxc)"
+ else
+ msg_warn "Network reachable but gateway check failed"
+ fi
+ fi
+ # Function to get correct GID inside container
+ get_container_gid() {
+ local group="$1"
+ local gid=$(pct exec "$CTID" -- getent group "$group" 2>/dev/null | cut -d: -f3)
+ echo "${gid:-44}" # Default to 44 if not found
+ }
+
+ fix_gpu_gids
+
+ # Continue with standard container setup
+ msg_info "Customizing LXC Container"
+
+ # # Install GPU userland if configured
+ # if [[ "${ENABLE_VAAPI:-0}" == "1" ]]; then
+ # install_gpu_userland "VAAPI"
+ # fi
+
+ # if [[ "${ENABLE_NVIDIA:-0}" == "1" ]]; then
+ # install_gpu_userland "NVIDIA"
+ # fi
+
+ # Continue with standard container setup
+ if [ "$var_os" == "alpine" ]; then
+ sleep 3
+ pct exec "$CTID" -- /bin/sh -c 'cat </etc/apk/repositories
+http://dl-cdn.alpinelinux.org/alpine/latest-stable/main
+http://dl-cdn.alpinelinux.org/alpine/latest-stable/community
+EOF'
+ pct exec "$CTID" -- ash -c "apk add bash newt curl openssh nano mc ncurses jq >/dev/null"
+ else
+ sleep 3
+ pct exec "$CTID" -- bash -c "sed -i '/$LANG/ s/^# //' /etc/locale.gen"
+ pct exec "$CTID" -- bash -c "locale_line=\$(grep -v '^#' /etc/locale.gen | grep -E '^[a-zA-Z]' | awk '{print \$1}' | head -n 1) && \
+ echo LANG=\$locale_line >/etc/default/locale && \
+ locale-gen >/dev/null && \
+ export LANG=\$locale_line"
+
+ if [[ -z "${tz:-}" ]]; then
+ tz=$(timedatectl show --property=Timezone --value 2>/dev/null || echo "Etc/UTC")
+ fi
+
+ if pct exec "$CTID" -- test -e "/usr/share/zoneinfo/$tz"; then
+ pct exec "$CTID" -- bash -c "tz='$tz'; echo \"\$tz\" >/etc/timezone && ln -sf \"/usr/share/zoneinfo/\$tz\" /etc/localtime"
+ else
+ msg_warn "Skipping timezone setup – zone '$tz' not found in container"
+ fi
+
+ pct exec "$CTID" -- bash -c "apt-get update >/dev/null && apt-get install -y sudo curl mc gnupg2 jq >/dev/null" || {
+ msg_error "apt-get base packages installation failed"
+ exit 1
+ }
+ fi
+
+ msg_ok "Customized LXC Container"
+
+ # Verify GPU access if enabled
+ if [[ "${ENABLE_VAAPI:-0}" == "1" ]] && [ "$var_os" != "alpine" ]; then
+ pct exec "$CTID" -- bash -c "vainfo >/dev/null 2>&1" &&
+ msg_ok "VAAPI verified working" ||
+ msg_warn "VAAPI verification failed - may need additional configuration"
+ fi
+
+ if [[ "${ENABLE_NVIDIA:-0}" == "1" ]] && [ "$var_os" != "alpine" ]; then
+ pct exec "$CTID" -- bash -c "nvidia-smi >/dev/null 2>&1" &&
+ msg_ok "NVIDIA verified working" ||
+ msg_warn "NVIDIA verification failed - may need additional configuration"
+ fi
+
+ # Install SSH keys
+ install_ssh_keys_into_ct
+
+ # Run application installer
+ if ! lxc-attach -n "$CTID" -- bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/install/${var_install}.sh)"; then
+ exit $?
+ fi
+}
+
+destroy_lxc() {
+ if [[ -z "$CT_ID" ]]; then
+ msg_error "No CT_ID found. Nothing to remove."
+ return 1
+ fi
+
+ # Abbruch bei Ctrl-C / Ctrl-D / ESC
+ trap 'echo; msg_error "Aborted by user (SIGINT/SIGQUIT)"; return 130' INT QUIT
+
+ local prompt
+ if ! read -rp "Remove this Container? " prompt; then
+ # read gibt != 0 zurück bei Ctrl-D/ESC
+ msg_error "Aborted input (Ctrl-D/ESC)"
+ return 130
+ fi
+
+ case "${prompt,,}" in
+ y | yes)
+ if pct stop "$CT_ID" &>/dev/null && pct destroy "$CT_ID" &>/dev/null; then
+ msg_ok "Removed Container $CT_ID"
+ else
+ msg_error "Failed to remove Container $CT_ID"
+ return 1
+ fi
+ ;;
+ "" | n | no)
+ msg_info "Container was not removed."
+ ;;
+ *)
+ msg_warn "Invalid response. Container was not removed."
+ ;;
+ esac
+}
+
+# ------------------------------------------------------------------------------
+# Storage discovery / selection helpers
+# ------------------------------------------------------------------------------
+# ===== Storage discovery / selection helpers (ported from create_lxc.sh) =====
+resolve_storage_preselect() {
+ local class="$1" preselect="$2" required_content=""
+ case "$class" in
+ template) required_content="vztmpl" ;;
+ container) required_content="rootdir" ;;
+ *) return 1 ;;
+ esac
+ [[ -z "$preselect" ]] && return 1
+ if ! pvesm status -content "$required_content" | awk 'NR>1{print $1}' | grep -qx -- "$preselect"; then
+ msg_warn "Preselected storage '${preselect}' does not support content '${required_content}' (or not found)"
+ return 1
+ fi
+
+ local line total used free
+ line="$(pvesm status | awk -v s="$preselect" 'NR>1 && $1==s {print $0}')"
+ if [[ -z "$line" ]]; then
+ STORAGE_INFO="n/a"
+ else
+ total="$(awk '{print $4}' <<<"$line")"
+ used="$(awk '{print $5}' <<<"$line")"
+ free="$(awk '{print $6}' <<<"$line")"
+ local total_h used_h free_h
+ if command -v numfmt >/dev/null 2>&1; then
+ total_h="$(numfmt --to=iec --suffix=B --format %.1f "$total" 2>/dev/null || echo "$total")"
+ used_h="$(numfmt --to=iec --suffix=B --format %.1f "$used" 2>/dev/null || echo "$used")"
+ free_h="$(numfmt --to=iec --suffix=B --format %.1f "$free" 2>/dev/null || echo "$free")"
+ STORAGE_INFO="Free: ${free_h} Used: ${used_h}"
+ else
+ STORAGE_INFO="Free: ${free} Used: ${used}"
+ fi
+ fi
+ STORAGE_RESULT="$preselect"
+ return 0
+}
+
+fix_gpu_gids() {
+ if [[ -z "${GPU_TYPE:-}" ]]; then
+ return 0
+ fi
+
+ msg_info "Detecting and setting correct GPU group IDs"
+
+ # Ermittle die tatsächlichen GIDs aus dem Container
+ local video_gid=$(pct exec "$CTID" -- sh -c "getent group video 2>/dev/null | cut -d: -f3")
+ local render_gid=$(pct exec "$CTID" -- sh -c "getent group render 2>/dev/null | cut -d: -f3")
+
+ # Fallbacks wenn Gruppen nicht existieren
+ if [[ -z "$video_gid" ]]; then
+ # Versuche die video Gruppe zu erstellen
+ pct exec "$CTID" -- sh -c "groupadd -r video 2>/dev/null || true"
+ video_gid=$(pct exec "$CTID" -- sh -c "getent group video 2>/dev/null | cut -d: -f3")
+ [[ -z "$video_gid" ]] && video_gid="44" # Ultimate fallback
+ fi
+
+ if [[ -z "$render_gid" ]]; then
+ # Versuche die render Gruppe zu erstellen
+ pct exec "$CTID" -- sh -c "groupadd -r render 2>/dev/null || true"
+ render_gid=$(pct exec "$CTID" -- sh -c "getent group render 2>/dev/null | cut -d: -f3")
+ [[ -z "$render_gid" ]] && render_gid="104" # Ultimate fallback
+ fi
+
+ msg_info "Container GIDs detected - video:${video_gid}, render:${render_gid}"
+
+ # Prüfe ob die GIDs von den Defaults abweichen
+ local need_update=0
+ if [[ "$video_gid" != "44" ]] || [[ "$render_gid" != "104" ]]; then
+ need_update=1
+ fi
+
+ if [[ $need_update -eq 1 ]]; then
+ msg_info "Updating device GIDs in container config"
+
+ # Stoppe Container für Config-Update
+ pct stop "$CTID" >/dev/null 2>&1
+
+ # Update die dev Einträge mit korrekten GIDs
+ # Backup der Config
+ cp "$LXC_CONFIG" "${LXC_CONFIG}.bak"
+
+ # Parse und update jeden dev Eintrag
+ while IFS= read -r line; do
+ if [[ "$line" =~ ^dev[0-9]+: ]]; then
+ # Extract device path
+ local device_path=$(echo "$line" | sed -E 's/^dev[0-9]+: ([^,]+).*/\1/')
+ local dev_num=$(echo "$line" | sed -E 's/^(dev[0-9]+):.*/\1/')
+
+ if [[ "$device_path" =~ renderD ]]; then
+ # RenderD device - use render GID
+ echo "${dev_num}: ${device_path},gid=${render_gid}"
+ elif [[ "$device_path" =~ card ]]; then
+ # Card device - use video GID
+ echo "${dev_num}: ${device_path},gid=${video_gid}"
+ else
+ # Keep original line
+ echo "$line"
+ fi
+ else
+ # Keep non-dev lines
+ echo "$line"
+ fi
+ done <"$LXC_CONFIG" >"${LXC_CONFIG}.new"
+
+ mv "${LXC_CONFIG}.new" "$LXC_CONFIG"
+
+ # Starte Container wieder
+ pct start "$CTID" >/dev/null 2>&1
+ sleep 3
+
+ msg_ok "Device GIDs updated successfully"
+ else
+ msg_ok "Device GIDs are already correct"
+ fi
+ if [[ "$CT_TYPE" == "0" ]]; then
+ pct exec "$CTID" -- bash -c "
+ if [ -d /dev/dri ]; then
+ for dev in /dev/dri/*; do
+ if [ -e \"\$dev\" ]; then
+ if [[ \"\$dev\" =~ renderD ]]; then
+ chgrp ${render_gid} \"\$dev\" 2>/dev/null || true
+ else
+ chgrp ${video_gid} \"\$dev\" 2>/dev/null || true
+ fi
+ chmod 660 \"\$dev\" 2>/dev/null || true
+ fi
+ done
+ fi
+ " >/dev/null 2>&1
+ fi
+}
+
+# NVIDIA-spezific check on host
+check_nvidia_host_setup() {
+ if ! command -v nvidia-smi >/dev/null 2>&1; then
+ msg_warn "NVIDIA GPU detected but nvidia-smi not found on host"
+ msg_warn "Please install NVIDIA drivers on host first."
+ #echo " 1. Download driver: wget https://us.download.nvidia.com/XFree86/Linux-x86_64/550.127.05/NVIDIA-Linux-x86_64-550.127.05.run"
+ #echo " 2. Install: ./NVIDIA-Linux-x86_64-550.127.05.run --dkms"
+ #echo " 3. Verify: nvidia-smi"
+ return 1
+ fi
+
+ # check if nvidia-smi works
+ if ! nvidia-smi >/dev/null 2>&1; then
+ msg_warn "nvidia-smi installed but not working. Driver issue?"
+ return 1
+ fi
+
+ return 0
+}
+
+check_storage_support() {
+ local CONTENT="$1" VALID=0
+ while IFS= read -r line; do
+ local STORAGE_NAME
+ STORAGE_NAME=$(awk '{print $1}' <<<"$line")
+ [[ -n "$STORAGE_NAME" ]] && VALID=1
+ done < <(pvesm status -content "$CONTENT" 2>/dev/null | awk 'NR>1')
+ [[ $VALID -eq 1 ]]
+}
+
+select_storage() {
+ local CLASS=$1 CONTENT CONTENT_LABEL
+ case $CLASS in
+ container)
+ CONTENT='rootdir'
+ CONTENT_LABEL='Container'
+ ;;
+ template)
+ CONTENT='vztmpl'
+ CONTENT_LABEL='Container template'
+ ;;
+ iso)
+ CONTENT='iso'
+ CONTENT_LABEL='ISO image'
+ ;;
+ images)
+ CONTENT='images'
+ CONTENT_LABEL='VM Disk image'
+ ;;
+ backup)
+ CONTENT='backup'
+ CONTENT_LABEL='Backup'
+ ;;
+ snippets)
+ CONTENT='snippets'
+ CONTENT_LABEL='Snippets'
+ ;;
+ *)
+ msg_error "Invalid storage class '$CLASS'"
+ return 1
+ ;;
+ esac
+
+ declare -A STORAGE_MAP
+ local -a MENU=()
+ local COL_WIDTH=0
+
+ while read -r TAG TYPE _ TOTAL USED FREE _; do
+ [[ -n "$TAG" && -n "$TYPE" ]] || continue
+ local DISPLAY="${TAG} (${TYPE})"
+ local USED_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$USED")
+ local FREE_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$FREE")
+ local INFO="Free: ${FREE_FMT}B Used: ${USED_FMT}B"
+ STORAGE_MAP["$DISPLAY"]="$TAG"
+ MENU+=("$DISPLAY" "$INFO" "OFF")
+ ((${#DISPLAY} > COL_WIDTH)) && COL_WIDTH=${#DISPLAY}
+ done < <(pvesm status -content "$CONTENT" | awk 'NR>1')
+
+ if [[ ${#MENU[@]} -eq 0 ]]; then
+ msg_error "No storage found for content type '$CONTENT'."
+ return 2
+ fi
+
+ if [[ $((${#MENU[@]} / 3)) -eq 1 ]]; then
+ STORAGE_RESULT="${STORAGE_MAP[${MENU[0]}]}"
+ STORAGE_INFO="${MENU[1]}"
+ return 0
+ fi
+
+ local WIDTH=$((COL_WIDTH + 42))
+ while true; do
+ local DISPLAY_SELECTED
+ DISPLAY_SELECTED=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --title "Storage Pools" \
+ --radiolist "Which storage pool for ${CONTENT_LABEL,,}?\n(Spacebar to select)" \
+ 16 "$WIDTH" 6 "${MENU[@]}" 3>&1 1>&2 2>&3) || { exit_script; }
+
+ DISPLAY_SELECTED=$(sed 's/[[:space:]]*$//' <<<"$DISPLAY_SELECTED")
+ if [[ -z "$DISPLAY_SELECTED" || -z "${STORAGE_MAP[$DISPLAY_SELECTED]+_}" ]]; then
+ whiptail --msgbox "No valid storage selected. Please try again." 8 58
+ continue
+ fi
+ STORAGE_RESULT="${STORAGE_MAP[$DISPLAY_SELECTED]}"
+ for ((i = 0; i < ${#MENU[@]}; i += 3)); do
+ if [[ "${MENU[$i]}" == "$DISPLAY_SELECTED" ]]; then
+ STORAGE_INFO="${MENU[$i + 1]}"
+ break
+ fi
+ done
+ return 0
+ done
+}
+
+create_lxc_container() {
+ # ------------------------------------------------------------------------------
+ # Optional verbose mode (debug tracing)
+ # ------------------------------------------------------------------------------
+ if [[ "${CREATE_LXC_VERBOSE:-no}" == "yes" ]]; then set -x; fi
+
+ # ------------------------------------------------------------------------------
+ # Helpers (dynamic versioning / template parsing)
+ # ------------------------------------------------------------------------------
+ pkg_ver() { dpkg-query -W -f='${Version}\n' "$1" 2>/dev/null || echo ""; }
+ pkg_cand() { apt-cache policy "$1" 2>/dev/null | awk '/Candidate:/ {print $2}'; }
+
+ ver_ge() { dpkg --compare-versions "$1" ge "$2"; }
+ ver_gt() { dpkg --compare-versions "$1" gt "$2"; }
+ ver_lt() { dpkg --compare-versions "$1" lt "$2"; }
+
+ # Extract Debian OS minor from template name: debian-13-standard_13.1-1_amd64.tar.zst => "13.1"
+ parse_template_osver() { sed -n 's/.*_\([0-9][0-9]*\(\.[0-9]\+\)\?\)-.*/\1/p' <<<"$1"; }
+
+ # Offer upgrade for pve-container/lxc-pve if candidate > installed; optional auto-retry pct create
+ # Returns:
+ # 0 = no upgrade needed
+ # 1 = upgraded (and if do_retry=yes and retry succeeded, creation done)
+ # 2 = user declined
+ # 3 = upgrade attempted but failed OR retry failed
+ offer_lxc_stack_upgrade_and_maybe_retry() {
+ local do_retry="${1:-no}" # yes|no
+ local _pvec_i _pvec_c _lxcp_i _lxcp_c need=0
+
+ _pvec_i="$(pkg_ver pve-container)"
+ _lxcp_i="$(pkg_ver lxc-pve)"
+ _pvec_c="$(pkg_cand pve-container)"
+ _lxcp_c="$(pkg_cand lxc-pve)"
+
+ if [[ -n "$_pvec_c" && "$_pvec_c" != "none" ]]; then
+ ver_gt "$_pvec_c" "${_pvec_i:-0}" && need=1
+ fi
+ if [[ -n "$_lxcp_c" && "$_lxcp_c" != "none" ]]; then
+ ver_gt "$_lxcp_c" "${_lxcp_i:-0}" && need=1
+ fi
+ if [[ $need -eq 0 ]]; then
+ msg_debug "No newer candidate for pve-container/lxc-pve (installed=$_pvec_i/$_lxcp_i, cand=$_pvec_c/$_lxcp_c)"
+ return 0
+ fi
+
+ echo
+ echo "An update for the Proxmox LXC stack is available:"
+ echo " pve-container: installed=${_pvec_i:-n/a} candidate=${_pvec_c:-n/a}"
+ echo " lxc-pve : installed=${_lxcp_i:-n/a} candidate=${_lxcp_c:-n/a}"
+ echo
+ read -rp "Do you want to upgrade now? [y/N] " _ans
+ case "${_ans,,}" in
+ y | yes)
+ msg_info "Upgrading Proxmox LXC stack (pve-container, lxc-pve)"
+ if apt-get update -qq >/dev/null && apt-get install -y --only-upgrade pve-container lxc-pve >/dev/null; then
+ msg_ok "LXC stack upgraded."
+ if [[ "$do_retry" == "yes" ]]; then
+ msg_info "Retrying container creation after upgrade"
+ if pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >>"$LOGFILE" 2>&1; then
+ msg_ok "Container created successfully after upgrade."
+ return 0
+ else
+ msg_error "pct create still failed after upgrade. See $LOGFILE"
+ return 3
+ fi
+ fi
+ return 1
+ else
+ msg_error "Upgrade failed. Please check APT output."
+ return 3
+ fi
+ ;;
+ *) return 2 ;;
+ esac
+ }
+
+ # ------------------------------------------------------------------------------
+ # Required input variables
+ # ------------------------------------------------------------------------------
+ [[ "${CTID:-}" ]] || {
+ msg_error "You need to set 'CTID' variable."
+ exit 203
+ }
+ [[ "${PCT_OSTYPE:-}" ]] || {
+ msg_error "You need to set 'PCT_OSTYPE' variable."
+ exit 204
+ }
+
+ msg_debug "CTID=$CTID"
+ msg_debug "PCT_OSTYPE=$PCT_OSTYPE"
+ msg_debug "PCT_OSVERSION=${PCT_OSVERSION:-default}"
+
+ # ID checks
+ [[ "$CTID" -ge 100 ]] || {
+ msg_error "ID cannot be less than 100."
+ exit 205
+ }
+ if qm status "$CTID" &>/dev/null || pct status "$CTID" &>/dev/null; then
+ echo -e "ID '$CTID' is already in use."
+ unset CTID
+ msg_error "Cannot use ID that is already in use."
+ exit 206
+ fi
+
+ # Storage capability check
+ check_storage_support "rootdir" || {
+ msg_error "No valid storage found for 'rootdir' [Container]"
+ exit 1
+ }
+ check_storage_support "vztmpl" || {
+ msg_error "No valid storage found for 'vztmpl' [Template]"
+ exit 1
+ }
+
+ # Template storage selection
+ if resolve_storage_preselect template "${TEMPLATE_STORAGE:-}"; then
+ TEMPLATE_STORAGE="$STORAGE_RESULT"
+ TEMPLATE_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${TEMPLATE_STORAGE}${CL} (${TEMPLATE_STORAGE_INFO}) [Template]"
+ else
+ while true; do
+ if [[ -z "${var_template_storage:-}" ]]; then
+ if select_storage template; then
+ TEMPLATE_STORAGE="$STORAGE_RESULT"
+ TEMPLATE_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${TEMPLATE_STORAGE}${CL} (${TEMPLATE_STORAGE_INFO}) [Template]"
+ break
+ fi
+ fi
+ done
+ fi
+
+ # Container storage selection
+ if resolve_storage_preselect container "${CONTAINER_STORAGE:-}"; then
+ CONTAINER_STORAGE="$STORAGE_RESULT"
+ CONTAINER_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${CONTAINER_STORAGE}${CL} (${CONTAINER_STORAGE_INFO}) [Container]"
+ else
+ if [[ -z "${var_container_storage:-}" ]]; then
+ if select_storage container; then
+ CONTAINER_STORAGE="$STORAGE_RESULT"
+ CONTAINER_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${CONTAINER_STORAGE}${CL} (${CONTAINER_STORAGE_INFO}) [Container]"
+ fi
+ fi
+ fi
+
+ # Validate content types
+ msg_info "Validating content types of storage '$CONTAINER_STORAGE'"
+ STORAGE_CONTENT=$(grep -A4 -E "^(zfspool|dir|lvmthin|lvm): $CONTAINER_STORAGE" /etc/pve/storage.cfg | grep content | awk '{$1=""; print $0}' | xargs)
+ msg_debug "Storage '$CONTAINER_STORAGE' has content types: $STORAGE_CONTENT"
+ grep -qw "rootdir" <<<"$STORAGE_CONTENT" || {
+ msg_error "Storage '$CONTAINER_STORAGE' does not support 'rootdir'. Cannot create LXC."
+ exit 217
+ }
+ $STD msg_ok "Storage '$CONTAINER_STORAGE' supports 'rootdir'"
+
+ msg_info "Validating content types of template storage '$TEMPLATE_STORAGE'"
+ TEMPLATE_CONTENT=$(grep -A4 -E "^[^:]+: $TEMPLATE_STORAGE" /etc/pve/storage.cfg | grep content | awk '{$1=""; print $0}' | xargs)
+ msg_debug "Template storage '$TEMPLATE_STORAGE' has content types: $TEMPLATE_CONTENT"
+ if ! grep -qw "vztmpl" <<<"$TEMPLATE_CONTENT"; then
+ msg_warn "Template storage '$TEMPLATE_STORAGE' does not declare 'vztmpl'. This may cause pct create to fail."
+ else
+ $STD msg_ok "Template storage '$TEMPLATE_STORAGE' supports 'vztmpl'"
+ fi
+
+ # Free space check
+ STORAGE_FREE=$(pvesm status | awk -v s="$CONTAINER_STORAGE" '$1 == s { print $6 }')
+ REQUIRED_KB=$((${PCT_DISK_SIZE:-8} * 1024 * 1024))
+ [[ "$STORAGE_FREE" -ge "$REQUIRED_KB" ]] || {
+ msg_error "Not enough space on '$CONTAINER_STORAGE'. Needed: ${PCT_DISK_SIZE:-8}G."
+ exit 214
+ }
+
+ # Cluster quorum (if cluster)
+ if [[ -f /etc/pve/corosync.conf ]]; then
+ msg_info "Checking cluster quorum"
+ if ! pvecm status | awk -F':' '/^Quorate/ { exit ($2 ~ /Yes/) ? 0 : 1 }'; then
+ msg_error "Cluster is not quorate. Start all nodes or configure quorum device (QDevice)."
+ exit 210
+ fi
+ msg_ok "Cluster is quorate"
+ fi
+
+ # ------------------------------------------------------------------------------
+ # Template discovery & validation
+ # ------------------------------------------------------------------------------
+ TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}"
+ case "$PCT_OSTYPE" in
+ debian | ubuntu) TEMPLATE_PATTERN="-standard_" ;;
+ alpine | fedora | rocky | centos) TEMPLATE_PATTERN="-default_" ;;
+ *) TEMPLATE_PATTERN="" ;;
+ esac
+
+ msg_info "Searching for template '$TEMPLATE_SEARCH'"
+
+ # Build regex patterns outside awk/grep for clarity
+ SEARCH_PATTERN="^${TEMPLATE_SEARCH}"
+
+ #echo "[DEBUG] TEMPLATE_SEARCH='$TEMPLATE_SEARCH'"
+ #echo "[DEBUG] SEARCH_PATTERN='$SEARCH_PATTERN'"
+ #echo "[DEBUG] TEMPLATE_PATTERN='$TEMPLATE_PATTERN'"
+
+ mapfile -t LOCAL_TEMPLATES < <(
+ pveam list "$TEMPLATE_STORAGE" 2>/dev/null |
+ awk -v search="${SEARCH_PATTERN}" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' |
+ sed 's|.*/||' | sort -t - -k 2 -V
+ )
+
+ pveam update >/dev/null 2>&1 || msg_warn "Could not update template catalog (pveam update failed)."
+
+ #echo "[DEBUG] pveam available output (first 5 lines with .tar files):"
+ #pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | head -5 | sed 's/^/ /'
+
+ set +u
+ mapfile -t ONLINE_TEMPLATES < <(pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | awk '{print $2}' | grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" | sort -t - -k 2 -V 2>/dev/null || true)
+ #echo "[DEBUG] After filtering: ${#ONLINE_TEMPLATES[@]} online templates found"
+ set -u
+ if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then
+ #echo "[DEBUG] Online templates:"
+ for tmpl in "${ONLINE_TEMPLATES[@]}"; do
+ echo " - $tmpl"
+ done
+ fi
+
+ ONLINE_TEMPLATE=""
+ [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}"
+
+ #msg_debug "SEARCH_PATTERN='${SEARCH_PATTERN}' TEMPLATE_PATTERN='${TEMPLATE_PATTERN}'"
+ #msg_debug "Found ${#LOCAL_TEMPLATES[@]} local templates, ${#ONLINE_TEMPLATES[@]} online templates"
+ if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then
+ #msg_debug "First 3 online templates:"
+ count=0
+ for idx in "${!ONLINE_TEMPLATES[@]}"; do
+ #msg_debug " [$idx]: ${ONLINE_TEMPLATES[$idx]}"
+ ((count++))
+ [[ $count -ge 3 ]] && break
+ done
+ fi
+ #msg_debug "ONLINE_TEMPLATE='$ONLINE_TEMPLATE'"
+
+ if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then
+ TEMPLATE="${LOCAL_TEMPLATES[-1]}"
+ TEMPLATE_SOURCE="local"
+ else
+ TEMPLATE="$ONLINE_TEMPLATE"
+ TEMPLATE_SOURCE="online"
+ fi
+
+ # If still no template, try to find alternatives
+ if [[ -z "$TEMPLATE" ]]; then
+ echo ""
+ echo "[DEBUG] No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}, searching for alternatives..."
+
+ # Get all available versions for this OS type
+ mapfile -t AVAILABLE_VERSIONS < <(
+ pveam available -section system 2>/dev/null |
+ grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' |
+ awk -F'\t' '{print $1}' |
+ grep "^${PCT_OSTYPE}-" |
+ sed -E "s/.*${PCT_OSTYPE}-([0-9]+(\.[0-9]+)?).*/\1/" |
+ sort -u -V 2>/dev/null
+ )
+
+ if [[ ${#AVAILABLE_VERSIONS[@]} -gt 0 ]]; then
+ echo ""
+ echo "${BL}Available ${PCT_OSTYPE} versions:${CL}"
+ for i in "${!AVAILABLE_VERSIONS[@]}"; do
+ echo " [$((i + 1))] ${AVAILABLE_VERSIONS[$i]}"
+ done
+ echo ""
+ read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or press Enter to cancel: " choice
+
+ if [[ "$choice" =~ ^[0-9]+$ ]] && [[ "$choice" -ge 1 ]] && [[ "$choice" -le ${#AVAILABLE_VERSIONS[@]} ]]; then
+ PCT_OSVERSION="${AVAILABLE_VERSIONS[$((choice - 1))]}"
+ TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION}"
+ SEARCH_PATTERN="^${TEMPLATE_SEARCH}-"
+
+ #echo "[DEBUG] Retrying with version: $PCT_OSVERSION"
+
+ mapfile -t ONLINE_TEMPLATES < <(
+ pveam available -section system 2>/dev/null |
+ grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' |
+ awk -F'\t' '{print $1}' |
+ grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" |
+ sort -t - -k 2 -V 2>/dev/null || true
+ )
+
+ if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then
+ TEMPLATE="${ONLINE_TEMPLATES[-1]}"
+ TEMPLATE_SOURCE="online"
+ #echo "[DEBUG] Found alternative: $TEMPLATE"
+ else
+ msg_error "No templates available for ${PCT_OSTYPE} ${PCT_OSVERSION}"
+ exit 225
+ fi
+ else
+ msg_info "Installation cancelled"
+ exit 0
+ fi
+ else
+ msg_error "No ${PCT_OSTYPE} templates available at all"
+ exit 225
+ fi
+ fi
+
+ #echo "[DEBUG] Selected TEMPLATE='$TEMPLATE' SOURCE='$TEMPLATE_SOURCE'"
+ #msg_debug "Selected TEMPLATE='$TEMPLATE' SOURCE='$TEMPLATE_SOURCE'"
+
+ TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)"
+ if [[ -z "$TEMPLATE_PATH" ]]; then
+ TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg)
+ [[ -n "$TEMPLATE_BASE" ]] && TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE"
+ fi
+
+ # If we still don't have a path but have a valid template name, construct it
+ if [[ -z "$TEMPLATE_PATH" && -n "$TEMPLATE" ]]; then
+ TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE"
+ fi
+
+ [[ -n "$TEMPLATE_PATH" ]] || {
+ if [[ -z "$TEMPLATE" ]]; then
+ msg_error "Template ${PCT_OSTYPE} ${PCT_OSVERSION} not available"
+
+ # Get available versions
+ mapfile -t AVAILABLE_VERSIONS < <(
+ pveam available -section system 2>/dev/null |
+ grep "^${PCT_OSTYPE}-" |
+ sed -E 's/.*'"${PCT_OSTYPE}"'-([0-9]+\.[0-9]+).*/\1/' |
+ grep -E '^[0-9]+\.[0-9]+$' |
+ sort -u -V 2>/dev/null || sort -u
+ )
+
+ if [[ ${#AVAILABLE_VERSIONS[@]} -gt 0 ]]; then
+ echo -e "\n${BL}Available versions:${CL}"
+ for i in "${!AVAILABLE_VERSIONS[@]}"; do
+ echo " [$((i + 1))] ${AVAILABLE_VERSIONS[$i]}"
+ done
+
+ echo ""
+ read -p "Select version [1-${#AVAILABLE_VERSIONS[@]}] or Enter to exit: " choice
+
+ if [[ "$choice" =~ ^[0-9]+$ ]] && [[ "$choice" -ge 1 ]] && [[ "$choice" -le ${#AVAILABLE_VERSIONS[@]} ]]; then
+ export var_version="${AVAILABLE_VERSIONS[$((choice - 1))]}"
+ export PCT_OSVERSION="$var_version"
+ msg_ok "Switched to ${PCT_OSTYPE} ${var_version}"
+
+ # Retry template search with new version
+ TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}"
+ SEARCH_PATTERN="^${TEMPLATE_SEARCH}-"
+
+ mapfile -t LOCAL_TEMPLATES < <(
+ pveam list "$TEMPLATE_STORAGE" 2>/dev/null |
+ awk -v search="${SEARCH_PATTERN}" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' |
+ sed 's|.*/||' | sort -t - -k 2 -V
+ )
+ mapfile -t ONLINE_TEMPLATES < <(
+ pveam available -section system 2>/dev/null |
+ grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' |
+ awk -F'\t' '{print $1}' |
+ grep -E "${SEARCH_PATTERN}.*${TEMPLATE_PATTERN}" |
+ sort -t - -k 2 -V 2>/dev/null || true
+ )
+ ONLINE_TEMPLATE=""
+ [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}"
+
+ if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then
+ TEMPLATE="${LOCAL_TEMPLATES[-1]}"
+ TEMPLATE_SOURCE="local"
+ else
+ TEMPLATE="$ONLINE_TEMPLATE"
+ TEMPLATE_SOURCE="online"
+ fi
+
+ TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)"
+ if [[ -z "$TEMPLATE_PATH" ]]; then
+ TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg)
+ [[ -n "$TEMPLATE_BASE" ]] && TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE"
+ fi
+
+ # If we still don't have a path but have a valid template name, construct it
+ if [[ -z "$TEMPLATE_PATH" && -n "$TEMPLATE" ]]; then
+ TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE"
+ fi
+
+ [[ -n "$TEMPLATE_PATH" ]] || {
+ msg_error "Template still not found after version change"
+ exit 220
+ }
+ else
+ msg_info "Installation cancelled"
+ exit 1
+ fi
+ else
+ msg_error "No ${PCT_OSTYPE} templates available"
+ exit 220
+ fi
+ fi
+ }
+
+ # Validate that we found a template
+ if [[ -z "$TEMPLATE" ]]; then
+ msg_error "No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}"
+ msg_info "Please check:"
+ msg_info " - Is pveam catalog available? (run: pveam available -section system)"
+ msg_info " - Does the template exist for your OS version?"
+ exit 225
+ fi
+
+ msg_ok "Template ${BL}$TEMPLATE${CL} [$TEMPLATE_SOURCE]"
+ msg_debug "Resolved TEMPLATE_PATH=$TEMPLATE_PATH"
+
+ NEED_DOWNLOAD=0
+ if [[ ! -f "$TEMPLATE_PATH" ]]; then
+ msg_info "Template not present locally – will download."
+ NEED_DOWNLOAD=1
+ elif [[ ! -r "$TEMPLATE_PATH" ]]; then
+ msg_error "Template file exists but is not readable – check permissions."
+ exit 221
+ elif [[ "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then
+ if [[ -n "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Template file too small (<1MB) – re-downloading."
+ NEED_DOWNLOAD=1
+ else
+ msg_warn "Template looks too small, but no online version exists. Keeping local file."
+ fi
+ elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then
+ if [[ -n "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Template appears corrupted – re-downloading."
+ NEED_DOWNLOAD=1
+ else
+ msg_warn "Template appears corrupted, but no online version exists. Keeping local file."
+ fi
+ else
+ $STD msg_ok "Template $TEMPLATE is present and valid."
+ fi
+
+ if [[ "$TEMPLATE_SOURCE" == "local" && -n "$ONLINE_TEMPLATE" && "$TEMPLATE" != "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Local template is outdated: $TEMPLATE (latest available: $ONLINE_TEMPLATE)"
+ if whiptail --yesno "A newer template is available:\n$ONLINE_TEMPLATE\n\nDo you want to download and use it instead?" 12 70; then
+ TEMPLATE="$ONLINE_TEMPLATE"
+ NEED_DOWNLOAD=1
+ else
+ msg_info "Continuing with local template $TEMPLATE"
+ fi
+ fi
+
+ if [[ "$NEED_DOWNLOAD" -eq 1 ]]; then
+ [[ -f "$TEMPLATE_PATH" ]] && rm -f "$TEMPLATE_PATH"
+ for attempt in {1..3}; do
+ msg_info "Attempt $attempt: Downloading template $TEMPLATE to $TEMPLATE_STORAGE"
+ if pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null 2>&1; then
+ msg_ok "Template download successful."
+ break
+ fi
+ if [[ $attempt -eq 3 ]]; then
+ msg_error "Failed after 3 attempts. Please check network access, permissions, or manually run:\n pveam download $TEMPLATE_STORAGE $TEMPLATE"
+ exit 222
+ fi
+ sleep $((attempt * 5))
+ done
+ fi
+
+ if ! pveam list "$TEMPLATE_STORAGE" 2>/dev/null | grep -q "$TEMPLATE"; then
+ msg_error "Template $TEMPLATE not available in storage $TEMPLATE_STORAGE after download."
+ exit 223
+ fi
+
+ # ------------------------------------------------------------------------------
+ # Dynamic preflight for Debian 13.x: offer upgrade if available (no hard mins)
+ # ------------------------------------------------------------------------------
+ if [[ "$PCT_OSTYPE" == "debian" ]]; then
+ OSVER="$(parse_template_osver "$TEMPLATE")"
+ if [[ -n "$OSVER" ]]; then
+ # Proactive, aber ohne Abbruch – nur Angebot
+ offer_lxc_stack_upgrade_and_maybe_retry "no" || true
+ fi
+ fi
+
+ # ------------------------------------------------------------------------------
+ # Create LXC Container
+ # ------------------------------------------------------------------------------
+ msg_info "Creating LXC container"
+
+ # Ensure subuid/subgid entries exist
+ grep -q "root:100000:65536" /etc/subuid || echo "root:100000:65536" >>/etc/subuid
+ grep -q "root:100000:65536" /etc/subgid || echo "root:100000:65536" >>/etc/subgid
+
+ # Assemble pct options
+ PCT_OPTIONS=(${PCT_OPTIONS[@]:-${DEFAULT_PCT_OPTIONS[@]}})
+ [[ " ${PCT_OPTIONS[*]} " =~ " -rootfs " ]] || PCT_OPTIONS+=(-rootfs "$CONTAINER_STORAGE:${PCT_DISK_SIZE:-8}")
+
+ # Lock by template file (avoid concurrent downloads/creates)
+ lockfile="/tmp/template.${TEMPLATE}.lock"
+ exec 9>"$lockfile" || {
+ msg_error "Failed to create lock file '$lockfile'."
+ exit 200
+ }
+ flock -w 60 9 || {
+ msg_error "Timeout while waiting for template lock."
+ exit 211
+ }
+
+ LOGFILE="/tmp/pct_create_${CTID}.log"
+ msg_debug "pct create command: pct create $CTID ${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE} ${PCT_OPTIONS[*]}"
+ msg_debug "Logfile: $LOGFILE"
+
+ # First attempt
+ if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >"$LOGFILE" 2>&1; then
+ msg_error "Container creation failed on ${TEMPLATE_STORAGE}. Checking template..."
+
+ # Validate template file
+ if [[ ! -s "$TEMPLATE_PATH" || "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then
+ msg_warn "Template file too small or missing – re-downloading."
+ rm -f "$TEMPLATE_PATH"
+ pveam download "$TEMPLATE_STORAGE" "$TEMPLATE"
+ elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then
+ if [[ -n "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Template appears corrupted – re-downloading."
+ rm -f "$TEMPLATE_PATH"
+ pveam download "$TEMPLATE_STORAGE" "$TEMPLATE"
+ else
+ msg_warn "Template appears corrupted, but no online version exists. Skipping re-download."
+ fi
+ fi
+
+ # Retry after repair
+ if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >>"$LOGFILE" 2>&1; then
+ # Fallback to local storage
+ if [[ "$TEMPLATE_STORAGE" != "local" ]]; then
+ msg_warn "Retrying container creation with fallback to local storage..."
+ LOCAL_TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE"
+ if [[ ! -f "$LOCAL_TEMPLATE_PATH" ]]; then
+ msg_info "Downloading template to local..."
+ pveam download local "$TEMPLATE" >/dev/null 2>&1
+ fi
+ if pct create "$CTID" "local:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >>"$LOGFILE" 2>&1; then
+ msg_ok "Container successfully created using local fallback."
+ else
+ # --- Dynamic stack upgrade + auto-retry on the well-known error pattern ---
+ if grep -qiE 'unsupported .* version' "$LOGFILE"; then
+ echo
+ echo "pct reported 'unsupported ... version' – your LXC stack might be too old for this template."
+ echo "We can try to upgrade 'pve-container' and 'lxc-pve' now and retry automatically."
+ offer_lxc_stack_upgrade_and_maybe_retry "yes"
+ rc=$?
+ case $rc in
+ 0) : ;; # success - container created, continue
+ 2)
+ echo "Upgrade was declined. Please update and re-run:
+ apt update && apt install --only-upgrade pve-container lxc-pve"
+ exit 231
+ ;;
+ 3)
+ echo "Upgrade and/or retry failed. Please inspect: $LOGFILE"
+ exit 231
+ ;;
+ esac
+ else
+ msg_error "Container creation failed even with local fallback. See $LOGFILE"
+ if whiptail --yesno "pct create failed.\nDo you want to enable verbose debug mode and view detailed logs?" 12 70; then
+ set -x
+ bash -x -c "pct create $CTID local:vztmpl/${TEMPLATE} ${PCT_OPTIONS[*]}" 2>&1 | tee -a "$LOGFILE"
+ set +x
+ fi
+ exit 209
+ fi
+ fi
+ else
+ msg_error "Container creation failed on local storage. See $LOGFILE"
+ # --- Dynamic stack upgrade + auto-retry on the well-known error pattern ---
+ if grep -qiE 'unsupported .* version' "$LOGFILE"; then
+ echo
+ echo "pct reported 'unsupported ... version' – your LXC stack might be too old for this template."
+ echo "We can try to upgrade 'pve-container' and 'lxc-pve' now and retry automatically."
+ offer_lxc_stack_upgrade_and_maybe_retry "yes"
+ rc=$?
+ case $rc in
+ 0) : ;; # success - container created, continue
+ 2)
+ echo "Upgrade was declined. Please update and re-run:
+ apt update && apt install --only-upgrade pve-container lxc-pve"
+ exit 231
+ ;;
+ 3)
+ echo "Upgrade and/or retry failed. Please inspect: $LOGFILE"
+ exit 231
+ ;;
+ esac
+ else
+ msg_error "Container creation failed. See $LOGFILE"
+ if whiptail --yesno "pct create failed.\nDo you want to enable verbose debug mode and view detailed logs?" 12 70; then
+ set -x
+ bash -x -c "pct create $CTID local:vztmpl/${TEMPLATE} ${PCT_OPTIONS[*]}" 2>&1 | tee -a "$LOGFILE"
+ set +x
+ fi
+ exit 209
+ fi
+ fi
+ fi
+ fi
+
+ # Verify container exists
+ pct list | awk '{print $1}' | grep -qx "$CTID" || {
+ msg_error "Container ID $CTID not listed in 'pct list'. See $LOGFILE"
+ exit 215
+ }
+
+ # Verify config rootfs
+ grep -q '^rootfs:' "/etc/pve/lxc/$CTID.conf" || {
+ msg_error "RootFS entry missing in container config. See $LOGFILE"
+ exit 216
+ }
+
+ msg_ok "LXC Container ${BL}$CTID${CL} ${GN}was successfully created."
+}
+
+# ------------------------------------------------------------------------------
+# description()
+#
+# - Sets container description with HTML content (logo, links, badges)
+# - Restarts ping-instances.service if present
+# - Posts status "done" to API
+# ------------------------------------------------------------------------------
+description() {
+ IP=$(pct exec "$CTID" ip a s dev eth0 | awk '/inet / {print $2}' | cut -d/ -f1)
+
+ # Generate LXC Description
+ DESCRIPTION=$(
+ cat <
+
+
+
+
+ ${APP} LXC
+
+
+
+
+
+
+
+
+
+ GitHub
+
+
+
+ Discussions
+
+
+
+ Issues
+
+
+EOF
+ )
+ pct set "$CTID" -description "$DESCRIPTION"
+
+ if [[ -f /etc/systemd/system/ping-instances.service ]]; then
+ systemctl start ping-instances.service
+ fi
+
+ post_update_to_api "done" "none"
+}
+
+# ------------------------------------------------------------------------------
+# api_exit_script()
+#
+# - Exit trap handler
+# - Reports exit codes to API with detailed reason
+# - Handles known codes (100–209) and maps them to errors
+# ------------------------------------------------------------------------------
+api_exit_script() {
+ exit_code=$?
+ if [ $exit_code -ne 0 ]; then
+ case $exit_code in
+ 100) post_update_to_api "failed" "100: Unexpected error in create_lxc.sh" ;;
+ 101) post_update_to_api "failed" "101: No network connection detected in create_lxc.sh" ;;
+ 200) post_update_to_api "failed" "200: LXC creation failed in create_lxc.sh" ;;
+ 201) post_update_to_api "failed" "201: Invalid Storage class in create_lxc.sh" ;;
+ 202) post_update_to_api "failed" "202: User aborted menu in create_lxc.sh" ;;
+ 203) post_update_to_api "failed" "203: CTID not set in create_lxc.sh" ;;
+ 204) post_update_to_api "failed" "204: PCT_OSTYPE not set in create_lxc.sh" ;;
+ 205) post_update_to_api "failed" "205: CTID cannot be less than 100 in create_lxc.sh" ;;
+ 206) post_update_to_api "failed" "206: CTID already in use in create_lxc.sh" ;;
+ 207) post_update_to_api "failed" "207: Template not found in create_lxc.sh" ;;
+ 208) post_update_to_api "failed" "208: Error downloading template in create_lxc.sh" ;;
+ 209) post_update_to_api "failed" "209: Container creation failed, but template is intact in create_lxc.sh" ;;
+ *) post_update_to_api "failed" "Unknown error, exit code: $exit_code in create_lxc.sh" ;;
+ esac
+ fi
+}
+
+if command -v pveversion >/dev/null 2>&1; then
+ trap 'api_exit_script' EXIT
+fi
+trap 'post_update_to_api "failed" "$BASH_COMMAND"' ERR
+trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
+trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
diff --git a/misc/core.func b/misc/core.func
index a8774fc29..dfa6b2e7e 100644
--- a/misc/core.func
+++ b/misc/core.func
@@ -1,30 +1,7 @@
+#!/usr/bin/env bash
# Copyright (c) 2021-2025 community-scripts ORG
# License: MIT | https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/LICENSE
-# if ! declare -f wait_for >/dev/null; then
-# echo "[DEBUG] Undefined function 'wait_for' used from: ${BASH_SOURCE[*]}" >&2
-# wait_for() {
-# echo "[DEBUG] Fallback: wait_for called with: $*" >&2
-# true
-# }
-# fi
-
-# trap 'on_error $? $LINENO' ERR
-# trap 'on_exit' EXIT
-# trap 'on_interrupt' INT
-# trap 'on_terminate' TERM
-
-# if ! declare -f wait_for >/dev/null; then
-# wait_for() {
-# true
-# }
-# fi
-
-# declare -A MSG_INFO_SHOWN=()
-# SPINNER_PID=""
-# SPINNER_ACTIVE=0
-# SPINNER_MSG=""
-
# ------------------------------------------------------------------------------
# Loads core utility groups once (colors, formatting, icons, defaults).
# ------------------------------------------------------------------------------
@@ -43,87 +20,6 @@ load_functions() {
# add more
}
-# ============================================================================
-# Error & Signal Handling – robust, universal, subshell-safe
-# ============================================================================
-
-# _stop_spinner_on_error() {
-# [[ -n "${SPINNER_PID:-}" ]] && kill "$SPINNER_PID" 2>/dev/null && wait "$SPINNER_PID" 2>/dev/null || true
-# }
-
-_tool_error_hint() {
- local cmd="$1"
- local code="$2"
- case "$cmd" in
- curl)
- case "$code" in
- 6) echo "Curl: Could not resolve host (DNS problem)" ;;
- 7) echo "Curl: Failed to connect to host (connection refused)" ;;
- 22) echo "Curl: HTTP error (404/403 etc)" ;;
- 28) echo "Curl: Operation timeout" ;;
- *) echo "Curl: Unknown error ($code)" ;;
- esac
- ;;
- wget)
- echo "Wget failed – URL unreachable or permission denied"
- ;;
- systemctl)
- echo "Systemd unit failure – check service name and permissions"
- ;;
- jq)
- echo "jq parse error – malformed JSON or missing key"
- ;;
- mariadb | mysql)
- echo "MySQL/MariaDB command failed – check credentials or DB"
- ;;
- unzip)
- echo "unzip failed – corrupt file or missing permission"
- ;;
- tar)
- echo "tar failed – invalid format or missing binary"
- ;;
- node | npm | pnpm | yarn)
- echo "Node tool failed – check version compatibility or package.json"
- ;;
- *) echo "" ;;
- esac
-}
-
-# on_error() {
-# local code="$?"
-# local line="${BASH_LINENO[0]:-unknown}"
-# local cmd="${BASH_COMMAND:-unknown}"
-
-# # Signalcode unterdrücken, falls INT/TERM kommt
-# [[ "$code" == "130" || "$code" == "143" ]] && return
-
-# _stop_spinner_on_error
-# msg_error "Script failed at line $line with exit code $code: $cmd"
-# exit "$code"
-# }
-
-# on_exit() {
-# _stop_spinner_on_error
-# [[ "${VERBOSE:-no}" == "yes" ]] && msg_info "Script exited cleanly"
-# }
-
-# on_interrupt() {
-# _stop_spinner_on_error
-# msg_error "Interrupted by user (CTRL+C)"
-# exit 130
-# }
-
-# on_terminate() {
-# _stop_spinner_on_error
-# msg_error "Terminated by signal (SIGTERM)"
-# exit 143
-# }
-
-catch_errors() {
- set -Eeuo pipefail
- trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
-}
-
# ------------------------------------------------------------------------------
# Sets ANSI color codes used for styled terminal output.
# ------------------------------------------------------------------------------
@@ -212,9 +108,148 @@ set_std_mode() {
fi
}
-# Silent execution function
+SILENT_LOGFILE="/tmp/install-$(date +%Y%m%d_%H%M%S)_${SESSION_ID:-$(date +%s)}.log"
+
silent() {
- "$@" >/dev/null 2>&1
+ local cmd="$*"
+ local caller_line="${BASH_LINENO[0]:-unknown}"
+
+ set +Eeuo pipefail
+ trap - ERR
+
+ "$@" >>"$SILENT_LOGFILE" 2>&1
+ local rc=$?
+
+ set -Eeuo pipefail
+ trap 'error_handler' ERR
+
+ if [[ $rc -ne 0 ]]; then
+ # Source explain_exit_code if needed
+ if ! declare -f explain_exit_code >/dev/null 2>&1; then
+ source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func)
+ fi
+
+ local explanation
+ explanation="$(explain_exit_code "$rc")"
+
+ printf "\e[?25h"
+ msg_error "in line ${caller_line}: exit code ${rc} (${explanation})"
+ msg_custom "→" "${YWB}" "${cmd}"
+
+ if [[ -s "$SILENT_LOGFILE" ]]; then
+ local log_lines=$(wc -l <"$SILENT_LOGFILE")
+ echo "--- Last 10 lines of silent log ---"
+ tail -n 10 "$SILENT_LOGFILE"
+ echo "-----------------------------------"
+
+ # Show how to view full log if there are more lines
+ if [[ $log_lines -gt 10 ]]; then
+ msg_custom "📋" "${YW}" "View full log (${log_lines} lines): /tmp/install-*_${SESSION_ID:-*}.log"
+ fi
+ fi
+
+ exit "$rc"
+ fi
+}
+
+# Check if the shell is using bash
+shell_check() {
+ if [[ "$(ps -p $$ -o comm=)" != "bash" ]]; then
+ clear
+ msg_error "Your default shell is currently not set to Bash. To use these scripts, please switch to the Bash shell."
+ echo -e "\nExiting..."
+ sleep 2
+ exit
+ fi
+}
+
+# Run as root only
+root_check() {
+ if [[ "$(id -u)" -ne 0 || $(ps -o comm= -p $PPID) == "sudo" ]]; then
+ clear
+ msg_error "Please run this script as root."
+ echo -e "\nExiting..."
+ sleep 2
+ exit
+ fi
+}
+
+# This function checks the version of Proxmox Virtual Environment (PVE) and exits if the version is not supported.
+# Supported: Proxmox VE 8.0.x – 8.9.x and 9.0 (NOT 9.1+)
+pve_check() {
+ local PVE_VER
+ PVE_VER="$(pveversion | awk -F'/' '{print $2}' | awk -F'-' '{print $1}')"
+
+ # Check for Proxmox VE 8.x: allow 8.0–8.9
+ if [[ "$PVE_VER" =~ ^8\.([0-9]+) ]]; then
+ local MINOR="${BASH_REMATCH[1]}"
+ if ((MINOR < 0 || MINOR > 9)); then
+ msg_error "This version of Proxmox VE is not supported."
+ msg_error "Supported: Proxmox VE version 8.0 – 8.9"
+ exit 1
+ fi
+ return 0
+ fi
+
+ # Check for Proxmox VE 9.x: allow ONLY 9.0
+ if [[ "$PVE_VER" =~ ^9\.([0-9]+) ]]; then
+ local MINOR="${BASH_REMATCH[1]}"
+ if ((MINOR != 0)); then
+ msg_error "This version of Proxmox VE is not yet supported."
+ msg_error "Supported: Proxmox VE version 9.0"
+ exit 1
+ fi
+ return 0
+ fi
+
+ # All other unsupported versions
+ msg_error "This version of Proxmox VE is not supported."
+ msg_error "Supported versions: Proxmox VE 8.0 – 8.x or 9.0"
+ exit 1
+}
+
+# This function checks the system architecture and exits if it's not "amd64".
+arch_check() {
+ if [ "$(dpkg --print-architecture)" != "amd64" ]; then
+ echo -e "\n ${INFO}${YWB}This script will not work with PiMox! \n"
+ echo -e "\n ${YWB}Visit https://github.com/asylumexp/Proxmox for ARM64 support. \n"
+ echo -e "Exiting..."
+ sleep 2
+ exit
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# ssh_check()
+#
+# - Detects if script is running over SSH
+# - Warns user and recommends using Proxmox shell
+# - User can choose to continue or abort
+# ------------------------------------------------------------------------------
+ssh_check() {
+ if [ -n "$SSH_CLIENT" ]; then
+ local client_ip=$(awk '{print $1}' <<<"$SSH_CLIENT")
+ local host_ip=$(hostname -I | awk '{print $1}')
+
+ # Check if connection is local (Proxmox WebUI or same machine)
+ # - localhost (127.0.0.1, ::1)
+ # - same IP as host
+ # - local network range (10.x, 172.16-31.x, 192.168.x)
+ if [[ "$client_ip" == "127.0.0.1" || "$client_ip" == "::1" || "$client_ip" == "$host_ip" ]]; then
+ return
+ fi
+
+ # Check if client is in same local network (optional, safer approach)
+ local host_subnet=$(echo "$host_ip" | cut -d. -f1-3)
+ local client_subnet=$(echo "$client_ip" | cut -d. -f1-3)
+ if [[ "$host_subnet" == "$client_subnet" ]]; then
+ return
+ fi
+
+ # Only warn for truly external connections
+ msg_warn "Running via external SSH (client: $client_ip)."
+ msg_warn "For better stability, consider using the Proxmox Shell (Console) instead."
+ fi
}
# Function to download & save header files
@@ -285,43 +320,6 @@ is_verbose_mode() {
[[ "$verbose" != "no" || ! -t 2 ]]
}
-# ------------------------------------------------------------------------------
-# Handles specific curl error codes and displays descriptive messages.
-# ------------------------------------------------------------------------------
-__curl_err_handler() {
- local exit_code="$1"
- local target="$2"
- local curl_msg="$3"
-
- case $exit_code in
- 1) msg_error "Unsupported protocol: $target" ;;
- 2) msg_error "Curl init failed: $target" ;;
- 3) msg_error "Malformed URL: $target" ;;
- 5) msg_error "Proxy resolution failed: $target" ;;
- 6) msg_error "Host resolution failed: $target" ;;
- 7) msg_error "Connection failed: $target" ;;
- 9) msg_error "Access denied: $target" ;;
- 18) msg_error "Partial file transfer: $target" ;;
- 22) msg_error "HTTP error (e.g. 400/404): $target" ;;
- 23) msg_error "Write error on local system: $target" ;;
- 26) msg_error "Read error from local file: $target" ;;
- 28) msg_error "Timeout: $target" ;;
- 35) msg_error "SSL connect error: $target" ;;
- 47) msg_error "Too many redirects: $target" ;;
- 51) msg_error "SSL cert verify failed: $target" ;;
- 52) msg_error "Empty server response: $target" ;;
- 55) msg_error "Send error: $target" ;;
- 56) msg_error "Receive error: $target" ;;
- 60) msg_error "SSL CA not trusted: $target" ;;
- 67) msg_error "Login denied by server: $target" ;;
- 78) msg_error "Remote file not found (404): $target" ;;
- *) msg_error "Curl failed with code $exit_code: $target" ;;
- esac
-
- [[ -n "$curl_msg" ]] && printf "%s\n" "$curl_msg" >&2
- exit 1
-}
-
fatal() {
msg_error "$1"
kill -INT $$
@@ -415,80 +413,56 @@ msg_custom() {
echo -e "${BFR:-} ${symbol} ${color}${msg}${CL:-\e[0m}"
}
-# msg_ok() {
-# local msg="$1"
-# [[ -z "$msg" ]] && return
-# stop_spinner
-# printf "\r\e[2K%s %b\n" "$CM" "${GN}${msg}${CL}" >&2
-# if declare -p MSG_INFO_SHOWN &>/dev/null && [[ "$(declare -p MSG_INFO_SHOWN 2>/dev/null)" =~ "declare -A" ]]; then
-# unset MSG_INFO_SHOWN["$msg"]
-# fi
-# }
+function msg_debug() {
+ if [[ "${var_full_verbose:-0}" == "1" ]]; then
+ [[ "${var_verbose:-0}" != "1" ]] && var_verbose=1
+ echo -e "${YWB}[$(date '+%F %T')] [DEBUG]${CL} $*"
+ fi
+}
-# msg_error() {
-# local msg="$1"
-# [[ -z "$msg" ]] && return
-# stop_spinner
-# printf "\r\e[2K%s %b\n" "$CROSS" "${RD}${msg}${CL}" >&2
-# }
+cleanup_lxc() {
+ msg_info "Cleaning up"
+ if is_alpine; then
+ $STD apk cache clean || true
+ rm -rf /var/cache/apk/*
+ else
+ $STD apt -y autoremove || true
+ $STD apt -y autoclean || true
+ $STD apt -y clean || true
+ fi
-# msg_warn() {
-# local msg="$1"
-# [[ -z "$msg" ]] && return
-# stop_spinner
-# printf "\r\e[2K%s %b\n" "$INFO" "${YWB}${msg}${CL}" >&2
-# if declare -p MSG_INFO_SHOWN &>/dev/null && [[ "$(declare -p MSG_INFO_SHOWN 2>/dev/null)" =~ "declare -A" ]]; then
-# unset MSG_INFO_SHOWN["$msg"]
-# fi
-# }
+ rm -rf /tmp/* /var/tmp/*
-# msg_custom() {
-# local symbol="${1:-"[*]"}"
-# local color="${2:-"\e[36m"}" # Default: Cyan
-# local msg="${3:-}"
+ # Remove temp files created by mktemp/tempfile
+ find /tmp /var/tmp -type f -name 'tmp*' -delete 2>/dev/null || true
+ find /tmp /var/tmp -type f -name 'tempfile*' -delete 2>/dev/null || true
-# [[ -z "$msg" ]] && return
-# stop_spinner 2>/dev/null || true
-# printf "\r\e[2K%s %b\n" "$symbol" "${color}${msg}${CL:-\e[0m}" >&2
-# }
+ find /var/log -type f -exec truncate -s 0 {} +
-# msg_progress() {
-# local current="$1"
-# local total="$2"
-# local label="$3"
-# local width=40
-# local filled percent bar empty
-# local fill_char="#"
-# local empty_char="-"
+ # Python pip
+ if command -v pip &>/dev/null; then pip cache purge || true; fi
+ # Python uv
+ if command -v uv &>/dev/null; then uv cache clear || true; fi
+ # Node.js npm
+ if command -v npm &>/dev/null; then npm cache clean --force || true; fi
+ # Node.js yarn
+ if command -v yarn &>/dev/null; then yarn cache clean || true; fi
+ # Node.js pnpm
+ if command -v pnpm &>/dev/null; then pnpm store prune || true; fi
+ # Go
+ if command -v go &>/dev/null; then go clean -cache -modcache || true; fi
+ # Rust cargo
+ if command -v cargo &>/dev/null; then cargo clean || true; fi
+ # Ruby gem
+ if command -v gem &>/dev/null; then gem cleanup || true; fi
+ # Composer (PHP)
+ if command -v composer &>/dev/null; then composer clear-cache || true; fi
-# if ! [[ "$current" =~ ^[0-9]+$ ]] || ! [[ "$total" =~ ^[0-9]+$ ]] || [[ "$total" -eq 0 ]]; then
-# printf "\r\e[2K%s %b\n" "$CROSS" "${RD}Invalid progress input${CL}" >&2
-# return
-# fi
-
-# percent=$(((current * 100) / total))
-# filled=$(((current * width) / total))
-# empty=$((width - filled))
-
-# bar=$(printf "%${filled}s" | tr ' ' "$fill_char")
-# bar+=$(printf "%${empty}s" | tr ' ' "$empty_char")
-
-# printf "\r\e[2K%s [%s] %3d%% %s" "${TAB}" "$bar" "$percent" "$label" >&2
-
-# if [[ "$current" -eq "$total" ]]; then
-# printf "\n" >&2
-# fi
-# }
-
-run_container_safe() {
- local ct="$1"
- shift
- local cmd="$*"
-
- lxc-attach -n "$ct" -- bash -euo pipefail -c "
- trap 'echo Aborted in container; exit 130' SIGINT SIGTERM
- $cmd
- " || __handle_general_error "lxc-attach to CT $ct"
+ if command -v journalctl &>/dev/null; then
+ $STD journalctl --rotate
+ $STD journalctl --vacuum-time=10m
+ fi
+ msg_ok "Cleaned"
}
check_or_create_swap() {
diff --git a/misc/core_ref/newtemplateavailable.png b/misc/core_ref/newtemplateavailable.png
new file mode 100644
index 000000000..14ca28639
Binary files /dev/null and b/misc/core_ref/newtemplateavailable.png differ
diff --git a/misc/core_ref/pve-container-upgrader.png b/misc/core_ref/pve-container-upgrader.png
new file mode 100644
index 000000000..efabf8a81
Binary files /dev/null and b/misc/core_ref/pve-container-upgrader.png differ
diff --git a/misc/create_lxc.sh b/misc/create_lxc.sh
deleted file mode 100644
index d74ee9fa9..000000000
--- a/misc/create_lxc.sh
+++ /dev/null
@@ -1,404 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) 2021-2025 tteck
-# Author: tteck (tteckster)
-# Co-Author: MickLesk
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-
-# This sets verbose mode if the global variable is set to "yes"
-# if [ "$VERBOSE" == "yes" ]; then set -x; fi
-
-if command -v curl >/dev/null 2>&1; then
- source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func)
- load_functions
- #echo "(create-lxc.sh) Loaded core.func via curl"
-elif command -v wget >/dev/null 2>&1; then
- source <(wget -qO- https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func)
- load_functions
- #echo "(create-lxc.sh) Loaded core.func via wget"
-fi
-
-# This sets error handling options and defines the error_handler function to handle errors
-set -Eeuo pipefail
-trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
-trap on_exit EXIT
-trap on_interrupt INT
-trap on_terminate TERM
-
-function on_exit() {
- local exit_code="$?"
- [[ -n "${lockfile:-}" && -e "$lockfile" ]] && rm -f "$lockfile"
- exit "$exit_code"
-}
-
-function error_handler() {
- local exit_code="$?"
- local line_number="$1"
- local command="$2"
- printf "\e[?25h"
- echo -e "\n${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}\n"
- exit "$exit_code"
-}
-
-function on_interrupt() {
- echo -e "\n${RD}Interrupted by user (SIGINT)${CL}"
- exit 130
-}
-
-function on_terminate() {
- echo -e "\n${RD}Terminated by signal (SIGTERM)${CL}"
- exit 143
-}
-
-function check_storage_support() {
- local CONTENT="$1"
- local -a VALID_STORAGES=()
-
- while IFS= read -r line; do
- local STORAGE=$(awk '{print $1}' <<<"$line")
- [[ "$STORAGE" == "storage" || -z "$STORAGE" ]] && continue
- VALID_STORAGES+=("$STORAGE")
- done < <(pvesm status -content "$CONTENT" 2>/dev/null | awk 'NR>1')
-
- [[ ${#VALID_STORAGES[@]} -gt 0 ]]
-}
-
-# This checks for the presence of valid Container Storage and Template Storage locations
-msg_info "Validating Storage"
-if ! check_storage_support "rootdir"; then
- msg_error "No valid storage found for 'rootdir' (Container)."
- exit 1
-fi
-if ! check_storage_support "vztmpl"; then
- msg_error "No valid storage found for 'vztmpl' (Template)."
- exit 1
-fi
-msg_ok "Validated Storage (rootdir / vztmpl)."
-
-# This function is used to select the storage class and determine the corresponding storage content type and label.
-function select_storage() {
- local CLASS=$1 CONTENT CONTENT_LABEL
-
- case $CLASS in
- container)
- CONTENT='rootdir'
- CONTENT_LABEL='Container'
- ;;
- template)
- CONTENT='vztmpl'
- CONTENT_LABEL='Container template'
- ;;
- iso)
- CONTENT='iso'
- CONTENT_LABEL='ISO image'
- ;;
- images)
- CONTENT='images'
- CONTENT_LABEL='VM Disk image'
- ;;
- backup)
- CONTENT='backup'
- CONTENT_LABEL='Backup'
- ;;
- snippets)
- CONTENT='snippets'
- CONTENT_LABEL='Snippets'
- ;;
- *)
- msg_error "Invalid storage class '$CLASS'"
- return 1
- ;;
- esac
-
- local -a MENU
- local -A STORAGE_MAP
- local COL_WIDTH=0
-
- while read -r TAG TYPE _ TOTAL USED FREE _; do
- [[ -n "$TAG" && -n "$TYPE" ]] || continue
- local DISPLAY="${TAG} (${TYPE})"
- local USED_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$USED")
- local FREE_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$FREE")
- local INFO="Free: ${FREE_FMT}B Used: ${USED_FMT}B"
- STORAGE_MAP["$DISPLAY"]="$TAG"
- MENU+=("$DISPLAY" "$INFO" "OFF")
- ((${#DISPLAY} > COL_WIDTH)) && COL_WIDTH=${#DISPLAY}
- done < <(pvesm status -content "$CONTENT" | awk 'NR>1')
-
- if [ ${#MENU[@]} -eq 0 ]; then
- msg_error "No storage found for content type '$CONTENT'."
- return 2
- fi
-
- if [ $((${#MENU[@]} / 3)) -eq 1 ]; then
- STORAGE_RESULT="${STORAGE_MAP[${MENU[0]}]}"
- return 0
- fi
-
- local WIDTH=$((COL_WIDTH + 42))
- while true; do
- local DISPLAY_SELECTED=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
- --title "Storage Pools" \
- --radiolist "Which storage pool for ${CONTENT_LABEL,,}?\n(Spacebar to select)" \
- 16 "$WIDTH" 6 "${MENU[@]}" 3>&1 1>&2 2>&3)
-
- [[ $? -ne 0 ]] && return 3
-
- if [[ -z "$DISPLAY_SELECTED" || -z "${STORAGE_MAP[$DISPLAY_SELECTED]+_}" ]]; then
- whiptail --msgbox "No valid storage selected. Please try again." 8 58
- continue
- fi
-
- STORAGE_RESULT="${STORAGE_MAP[$DISPLAY_SELECTED]}"
- return 0
- done
-}
-
-# Test if required variables are set
-[[ "${CTID:-}" ]] || {
- msg_error "You need to set 'CTID' variable."
- exit 203
-}
-[[ "${PCT_OSTYPE:-}" ]] || {
- msg_error "You need to set 'PCT_OSTYPE' variable."
- exit 204
-}
-
-# Test if ID is valid
-[ "$CTID" -ge "100" ] || {
- msg_error "ID cannot be less than 100."
- exit 205
-}
-
-# Test if ID is in use
-if qm status "$CTID" &>/dev/null || pct status "$CTID" &>/dev/null; then
- echo -e "ID '$CTID' is already in use."
- unset CTID
- msg_error "Cannot use ID that is already in use."
- exit 206
-fi
-
-# DEFAULT_FILE="/usr/local/community-scripts/default_storage"
-# if [[ -f "$DEFAULT_FILE" ]]; then
-# source "$DEFAULT_FILE"
-# if [[ -n "$TEMPLATE_STORAGE" && -n "$CONTAINER_STORAGE" ]]; then
-# msg_info "Using default storage configuration from: $DEFAULT_FILE"
-# msg_ok "Template Storage: ${BL}$TEMPLATE_STORAGE${CL} ${GN}|${CL} Container Storage: ${BL}$CONTAINER_STORAGE${CL}"
-# else
-# msg_warn "Default storage file exists but is incomplete – falling back to manual selection"
-# TEMPLATE_STORAGE=$(select_storage template)
-# msg_ok "Using ${BL}$TEMPLATE_STORAGE${CL} ${GN}for Template Storage."
-# CONTAINER_STORAGE=$(select_storage container)
-# msg_ok "Using ${BL}$CONTAINER_STORAGE${CL} ${GN}for Container Storage."
-# fi
-# else
-# # TEMPLATE STORAGE SELECTION
-# # Template Storage
-# while true; do
-# TEMPLATE_STORAGE=$(select_storage template)
-# if [[ -n "$TEMPLATE_STORAGE" ]]; then
-# msg_ok "Using ${BL}$TEMPLATE_STORAGE${CL} ${GN}for Template Storage."
-# break
-# fi
-# msg_warn "No valid template storage selected. Please try again."
-# done
-
-# while true; do
-# CONTAINER_STORAGE=$(select_storage container)
-# if [[ -n "$CONTAINER_STORAGE" ]]; then
-# msg_ok "Using ${BL}$CONTAINER_STORAGE${CL} ${GN}for Container Storage."
-# break
-# fi
-# msg_warn "No valid container storage selected. Please try again."
-# done
-
-# fi
-
-while true; do
- if select_storage template; then
- TEMPLATE_STORAGE="$STORAGE_RESULT"
- break
- fi
-done
-
-while true; do
- if select_storage container; then
- CONTAINER_STORAGE="$STORAGE_RESULT"
- break
- fi
-done
-
-# Check free space on selected container storage
-STORAGE_FREE=$(pvesm status | awk -v s="$CONTAINER_STORAGE" '$1 == s { print $6 }')
-REQUIRED_KB=$((${PCT_DISK_SIZE:-8} * 1024 * 1024))
-if [ "$STORAGE_FREE" -lt "$REQUIRED_KB" ]; then
- msg_error "Not enough space on '$CONTAINER_STORAGE'. Needed: ${PCT_DISK_SIZE:-8}G."
- exit 214
-fi
-# Check Cluster Quorum if in Cluster
-if [ -f /etc/pve/corosync.conf ]; then
- msg_info "Checking Proxmox cluster quorum status"
- if ! pvecm status | awk -F':' '/^Quorate/ { exit ($2 ~ /Yes/) ? 0 : 1 }'; then
- printf "\e[?25h"
- msg_error "Cluster is not quorate. Start all nodes or configure quorum device (QDevice)."
- exit 210
- fi
- msg_ok "Cluster is quorate"
-fi
-
-# Update LXC template list
-TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}"
-
-msg_info "Updating LXC Template List"
-if ! timeout 15 pveam update >/dev/null 2>&1; then
- TEMPLATE_FALLBACK=$(pveam list "$TEMPLATE_STORAGE" | awk "/$TEMPLATE_SEARCH/ {print \$2}" | sort -t - -k 2 -V | tail -n1)
- if [[ -z "$TEMPLATE_FALLBACK" ]]; then
- msg_error "Failed to update LXC template list and no local template matching '$TEMPLATE_SEARCH' found."
- exit 201
- fi
- msg_info "Skipping template update – using local fallback: $TEMPLATE_FALLBACK"
-else
- msg_ok "LXC Template List Updated"
-fi
-
-# Get LXC template string
-TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}"
-mapfile -t TEMPLATES < <(pveam available -section system | sed -n "s/.*\($TEMPLATE_SEARCH.*\)/\1/p" | sort -t - -k 2 -V)
-
-if [ ${#TEMPLATES[@]} -eq 0 ]; then
- msg_error "No matching LXC template found for '${TEMPLATE_SEARCH}'. Make sure your host can reach the Proxmox template repository."
- exit 207
-fi
-
-TEMPLATE="${TEMPLATES[-1]}"
-TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || echo "/var/lib/vz/template/cache/$TEMPLATE")"
-
-TEMPLATE_VALID=1
-if ! pveam list "$TEMPLATE_STORAGE" | grep -q "$TEMPLATE"; then
- TEMPLATE_VALID=0
-elif [ ! -s "$TEMPLATE_PATH" ]; then
- TEMPLATE_VALID=0
-elif ! tar --use-compress-program=zstdcat -tf "$TEMPLATE_PATH" >/dev/null 2>&1; then
- TEMPLATE_VALID=0
-fi
-
-if [ "$TEMPLATE_VALID" -eq 0 ]; then
- msg_warn "Template $TEMPLATE not found or appears to be corrupted. Re-downloading."
- [[ -f "$TEMPLATE_PATH" ]] && rm -f "$TEMPLATE_PATH"
- for attempt in {1..3}; do
- msg_info "Attempt $attempt: Downloading LXC template..."
- if pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null 2>&1; then
- msg_ok "Template download successful."
- break
- fi
- if [ $attempt -eq 3 ]; then
- msg_error "Failed after 3 attempts. Please check network access or manually run:\n pveam download $TEMPLATE_STORAGE $TEMPLATE"
- exit 208
- fi
- sleep $((attempt * 5))
- done
-fi
-
-msg_ok "LXC Template '$TEMPLATE' is ready to use."
-
-msg_info "Creating LXC Container"
-# Check and fix subuid/subgid
-grep -q "root:100000:65536" /etc/subuid || echo "root:100000:65536" >>/etc/subuid
-grep -q "root:100000:65536" /etc/subgid || echo "root:100000:65536" >>/etc/subgid
-
-# Combine all options
-PCT_OPTIONS=(${PCT_OPTIONS[@]:-${DEFAULT_PCT_OPTIONS[@]}})
-[[ " ${PCT_OPTIONS[@]} " =~ " -rootfs " ]] || PCT_OPTIONS+=(-rootfs "$CONTAINER_STORAGE:${PCT_DISK_SIZE:-8}")
-
-# Secure creation of the LXC container with lock and template check
-lockfile="/tmp/template.${TEMPLATE}.lock"
-exec 9>"$lockfile" >/dev/null 2>&1 || {
- msg_error "Failed to create lock file '$lockfile'."
- exit 200
-}
-flock -w 60 9 || {
- msg_error "Timeout while waiting for template lock"
- exit 211
-}
-
-if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" &>/dev/null; then
- msg_error "Container creation failed. Checking if template is corrupted or incomplete."
-
- if [[ ! -s "$TEMPLATE_PATH" || "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then
- msg_error "Template file too small or missing – re-downloading."
- rm -f "$TEMPLATE_PATH"
- elif ! zstdcat "$TEMPLATE_PATH" | tar -tf - &>/dev/null; then
- msg_error "Template appears to be corrupted – re-downloading."
- rm -f "$TEMPLATE_PATH"
- else
- msg_error "Template is valid, but container creation still failed."
- exit 209
- fi
-
- # Retry download
- for attempt in {1..3}; do
- msg_info "Attempt $attempt: Re-downloading template..."
- if timeout 120 pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null; then
- msg_ok "Template re-download successful."
- break
- fi
- if [ "$attempt" -eq 3 ]; then
- msg_error "Three failed attempts. Aborting."
- exit 208
- fi
- sleep $((attempt * 5))
- done
-
- sleep 1 # I/O-Sync-Delay
- msg_ok "Re-downloaded LXC Template"
-fi
-
-if ! pct list | awk '{print $1}' | grep -qx "$CTID"; then
- msg_error "Container ID $CTID not listed in 'pct list' – unexpected failure."
- exit 215
-fi
-
-if ! grep -q '^rootfs:' "/etc/pve/lxc/$CTID.conf"; then
- msg_error "RootFS entry missing in container config – storage not correctly assigned."
- exit 216
-fi
-
-if grep -q '^hostname:' "/etc/pve/lxc/$CTID.conf"; then
- CT_HOSTNAME=$(grep '^hostname:' "/etc/pve/lxc/$CTID.conf" | awk '{print $2}')
- if [[ ! "$CT_HOSTNAME" =~ ^[a-z0-9-]+$ ]]; then
- msg_warn "Hostname '$CT_HOSTNAME' contains invalid characters – may cause issues with networking or DNS."
- fi
-fi
-
-if [[ "${PCT_RAM_SIZE:-2048}" -lt 1024 ]]; then
- msg_warn "Configured RAM (${PCT_RAM_SIZE}MB) is below 1024MB – some apps may not work properly."
-fi
-
-if [[ "${PCT_UNPRIVILEGED:-1}" == "1" && " ${PCT_OPTIONS[*]} " == *"fuse=1"* ]]; then
- msg_warn "Unprivileged container with FUSE may fail unless extra device mappings are configured."
-fi
-
-# Extra: Debug-Ausgabe (wenn DEBUG=yes gesetzt)
-DEBUG_LOG="/tmp/lxc_debug_${CTID}.log"
-{
- echo "--- DEBUG DUMP for CTID $CTID ---"
- echo "Hostname: ${CT_HOSTNAME:-unknown}"
- echo "Template: ${TEMPLATE}"
- echo "Template Storage: ${TEMPLATE_STORAGE}"
- echo "Container Storage: ${CONTAINER_STORAGE}"
- echo "Template Path: ${TEMPLATE_PATH}"
- echo "Disk Size: ${PCT_DISK_SIZE:-8} GB"
- echo "RAM Size: ${PCT_RAM_SIZE:-2048} MB"
- echo "CPU Cores: ${PCT_CPU_CORES:-2}"
- echo "Unprivileged: ${PCT_UNPRIVILEGED:-1}"
- echo "PCT_OPTIONS:"
- printf ' %s\n' "${PCT_OPTIONS[@]}"
- echo "--- Container Config Dump ---"
- [[ -f "/etc/pve/lxc/$CTID.conf" ]] && cat "/etc/pve/lxc/$CTID.conf"
- echo "--- LVM Volumes ---"
- lvs | grep "vm-${CTID}-disk-0" || echo "No LVM volume found."
- echo "--- pct list ---"
- pct list
-} >"$DEBUG_LOG"
-
-msg_ok "LXC Container ${BL}$CTID${CL} ${GN}was successfully created."
diff --git a/misc/config-file.func b/misc/deferred/config-file.func
similarity index 100%
rename from misc/config-file.func
rename to misc/deferred/config-file.func
diff --git a/misc/deferred/create_lxc.sh b/misc/deferred/create_lxc.sh
new file mode 100644
index 000000000..a85f27478
--- /dev/null
+++ b/misc/deferred/create_lxc.sh
@@ -0,0 +1,633 @@
+#!/usr/bin/env bash
+# Copyright (c) 2021-2025 tteck
+# Author: tteck (tteckster)
+# Co-Author: MickLesk
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+
+# ------------------------------------------------------------------------------
+# Optional verbose mode (debug tracing)
+# ------------------------------------------------------------------------------
+if [[ "${CREATE_LXC_VERBOSE:-no}" == "yes" ]]; then set -x; fi
+
+# ------------------------------------------------------------------------------
+# Load core functions (msg_info/msg_ok/msg_error/…)
+# ------------------------------------------------------------------------------
+if command -v curl >/dev/null 2>&1; then
+ # Achtung: bewusst exakt diese URL-Struktur
+ source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func)
+ load_functions
+elif command -v wget >/dev/null 2>&1; then
+ source <(wget -qO- https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func)
+ load_functions
+fi
+
+# ------------------------------------------------------------------------------
+# Strict error handling
+# ------------------------------------------------------------------------------
+# set -Eeuo pipefail
+# trap 'error_handler $? $LINENO "$BASH_COMMAND"' ERR
+# trap on_exit EXIT
+# trap on_interrupt INT
+# trap on_terminate TERM
+
+# error_handler() {
+# local exit_code="$1"
+# local line_number="$2"
+# local command="${3:-}"
+
+# if [[ "$exit_code" -eq 0 ]]; then
+# return 0
+# fi
+
+# printf "\e[?25h"
+# echo -e "\n${RD}[ERROR]${CL} in line ${RD}${line_number}${CL}: exit code ${RD}${exit_code}${CL}: while executing command ${YW}${command}${CL}\n"
+# exit "$exit_code"
+# }
+
+# on_exit() {
+# local exit_code="$?"
+# [[ -n "${lockfile:-}" && -e "$lockfile" ]] && rm -f "$lockfile"
+# exit "$exit_code"
+# }
+
+# on_interrupt() {
+# echo -e "\n${RD}Interrupted by user (SIGINT)${CL}"
+# exit 130
+# }
+
+# on_terminate() {
+# echo -e "\n${RD}Terminated by signal (SIGTERM)${CL}"
+# exit 143
+# }
+
+exit_script() {
+ clear
+ printf "\e[?25h"
+ echo -e "\n${CROSS}${RD}User exited script${CL}\n"
+ kill 0
+ exit 1
+}
+
+# ------------------------------------------------------------------------------
+# Helpers (dynamic versioning / template parsing)
+# ------------------------------------------------------------------------------
+pkg_ver() { dpkg-query -W -f='${Version}\n' "$1" 2>/dev/null || echo ""; }
+pkg_cand() { apt-cache policy "$1" 2>/dev/null | awk '/Candidate:/ {print $2}'; }
+
+ver_ge() { dpkg --compare-versions "$1" ge "$2"; }
+ver_gt() { dpkg --compare-versions "$1" gt "$2"; }
+ver_lt() { dpkg --compare-versions "$1" lt "$2"; }
+
+# Extract Debian OS minor from template name: debian-13-standard_13.1-1_amd64.tar.zst => "13.1"
+parse_template_osver() { sed -n 's/.*_\([0-9][0-9]*\(\.[0-9]\+\)\?\)-.*/\1/p' <<<"$1"; }
+
+# Offer upgrade for pve-container/lxc-pve if candidate > installed; optional auto-retry pct create
+# Returns:
+# 0 = no upgrade needed
+# 1 = upgraded (and if do_retry=yes and retry succeeded, creation done)
+# 2 = user declined
+# 3 = upgrade attempted but failed OR retry failed
+offer_lxc_stack_upgrade_and_maybe_retry() {
+ local do_retry="${1:-no}" # yes|no
+ local _pvec_i _pvec_c _lxcp_i _lxcp_c need=0
+
+ _pvec_i="$(pkg_ver pve-container)"
+ _lxcp_i="$(pkg_ver lxc-pve)"
+ _pvec_c="$(pkg_cand pve-container)"
+ _lxcp_c="$(pkg_cand lxc-pve)"
+
+ if [[ -n "$_pvec_c" && "$_pvec_c" != "none" ]]; then
+ ver_gt "$_pvec_c" "${_pvec_i:-0}" && need=1
+ fi
+ if [[ -n "$_lxcp_c" && "$_lxcp_c" != "none" ]]; then
+ ver_gt "$_lxcp_c" "${_lxcp_i:-0}" && need=1
+ fi
+ if [[ $need -eq 0 ]]; then
+ msg_debug "No newer candidate for pve-container/lxc-pve (installed=$_pvec_i/$_lxcp_i, cand=$_pvec_c/$_lxcp_c)"
+ return 0
+ fi
+
+ echo
+ echo "An update for the Proxmox LXC stack is available:"
+ echo " pve-container: installed=${_pvec_i:-n/a} candidate=${_pvec_c:-n/a}"
+ echo " lxc-pve : installed=${_lxcp_i:-n/a} candidate=${_lxcp_c:-n/a}"
+ echo
+ read -rp "Do you want to upgrade now? [y/N] " _ans
+ case "${_ans,,}" in
+ y | yes)
+ msg_info "Upgrading Proxmox LXC stack (pve-container, lxc-pve)"
+ if apt-get update -qq >/dev/null && apt-get install -y --only-upgrade pve-container lxc-pve >/dev/null; then
+ msg_ok "LXC stack upgraded."
+ if [[ "$do_retry" == "yes" ]]; then
+ msg_info "Retrying container creation after upgrade"
+ if pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >>"$LOGFILE" 2>&1; then
+ msg_ok "Container created successfully after upgrade."
+ return 1
+ else
+ msg_error "pct create still failed after upgrade. See $LOGFILE"
+ return 3
+ fi
+ fi
+ return 1
+ else
+ msg_error "Upgrade failed. Please check APT output."
+ return 3
+ fi
+ ;;
+ *) return 2 ;;
+ esac
+}
+
+# ------------------------------------------------------------------------------
+# Storage discovery / selection helpers
+# ------------------------------------------------------------------------------
+resolve_storage_preselect() {
+ local class="$1" preselect="$2" required_content=""
+ case "$class" in
+ template) required_content="vztmpl" ;;
+ container) required_content="rootdir" ;;
+ *) return 1 ;;
+ esac
+ [[ -z "$preselect" ]] && return 1
+ if ! pvesm status -content "$required_content" | awk 'NR>1{print $1}' | grep -qx -- "$preselect"; then
+ msg_warn "Preselected storage '${preselect}' does not support content '${required_content}' (or not found)"
+ return 1
+ fi
+
+ local line total used free
+ line="$(pvesm status | awk -v s="$preselect" 'NR>1 && $1==s {print $0}')"
+ if [[ -z "$line" ]]; then
+ STORAGE_INFO="n/a"
+ else
+ total="$(awk '{print $4}' <<<"$line")"
+ used="$(awk '{print $5}' <<<"$line")"
+ free="$(awk '{print $6}' <<<"$line")"
+ local total_h used_h free_h
+ if command -v numfmt >/dev/null 2>&1; then
+ total_h="$(numfmt --to=iec --suffix=B --format %.1f "$total" 2>/dev/null || echo "$total")"
+ used_h="$(numfmt --to=iec --suffix=B --format %.1f "$used" 2>/dev/null || echo "$used")"
+ free_h="$(numfmt --to=iec --suffix=B --format %.1f "$free" 2>/dev/null || echo "$free")"
+ STORAGE_INFO="Free: ${free_h} Used: ${used_h}"
+ else
+ STORAGE_INFO="Free: ${free} Used: ${used}"
+ fi
+ fi
+ STORAGE_RESULT="$preselect"
+ return 0
+}
+
+check_storage_support() {
+ local CONTENT="$1" VALID=0
+ while IFS= read -r line; do
+ local STORAGE_NAME
+ STORAGE_NAME=$(awk '{print $1}' <<<"$line")
+ [[ -n "$STORAGE_NAME" ]] && VALID=1
+ done < <(pvesm status -content "$CONTENT" 2>/dev/null | awk 'NR>1')
+ [[ $VALID -eq 1 ]]
+}
+
+select_storage() {
+ local CLASS=$1 CONTENT CONTENT_LABEL
+ case $CLASS in
+ container)
+ CONTENT='rootdir'
+ CONTENT_LABEL='Container'
+ ;;
+ template)
+ CONTENT='vztmpl'
+ CONTENT_LABEL='Container template'
+ ;;
+ iso)
+ CONTENT='iso'
+ CONTENT_LABEL='ISO image'
+ ;;
+ images)
+ CONTENT='images'
+ CONTENT_LABEL='VM Disk image'
+ ;;
+ backup)
+ CONTENT='backup'
+ CONTENT_LABEL='Backup'
+ ;;
+ snippets)
+ CONTENT='snippets'
+ CONTENT_LABEL='Snippets'
+ ;;
+ *)
+ msg_error "Invalid storage class '$CLASS'"
+ return 1
+ ;;
+ esac
+
+ if [[ "$CONTENT" == "rootdir" && -n "${STORAGE:-}" ]]; then
+ if pvesm status -content "$CONTENT" | awk 'NR>1 {print $1}' | grep -qx "$STORAGE"; then
+ STORAGE_RESULT="$STORAGE"
+ msg_info "Using preset storage: $STORAGE_RESULT for $CONTENT_LABEL"
+ return 0
+ else
+ msg_error "Preset storage '$STORAGE' is not valid for content type '$CONTENT'."
+ return 2
+ fi
+ fi
+
+ declare -A STORAGE_MAP
+ local -a MENU=()
+ local COL_WIDTH=0
+
+ while read -r TAG TYPE _ TOTAL USED FREE _; do
+ [[ -n "$TAG" && -n "$TYPE" ]] || continue
+ local DISPLAY="${TAG} (${TYPE})"
+ local USED_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$USED")
+ local FREE_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$FREE")
+ local INFO="Free: ${FREE_FMT}B Used: ${USED_FMT}B"
+ STORAGE_MAP["$DISPLAY"]="$TAG"
+ MENU+=("$DISPLAY" "$INFO" "OFF")
+ ((${#DISPLAY} > COL_WIDTH)) && COL_WIDTH=${#DISPLAY}
+ done < <(pvesm status -content "$CONTENT" | awk 'NR>1')
+
+ if [[ ${#MENU[@]} -eq 0 ]]; then
+ msg_error "No storage found for content type '$CONTENT'."
+ return 2
+ fi
+
+ if [[ $((${#MENU[@]} / 3)) -eq 1 ]]; then
+ STORAGE_RESULT="${STORAGE_MAP[${MENU[0]}]}"
+ STORAGE_INFO="${MENU[1]}"
+ return 0
+ fi
+
+ local WIDTH=$((COL_WIDTH + 42))
+ while true; do
+ local DISPLAY_SELECTED
+ DISPLAY_SELECTED=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
+ --title "Storage Pools" \
+ --radiolist "Which storage pool for ${CONTENT_LABEL,,}?\n(Spacebar to select)" \
+ 16 "$WIDTH" 6 "${MENU[@]}" 3>&1 1>&2 2>&3) || exit_script
+
+ DISPLAY_SELECTED=$(sed 's/[[:space:]]*$//' <<<"$DISPLAY_SELECTED")
+ if [[ -z "$DISPLAY_SELECTED" || -z "${STORAGE_MAP[$DISPLAY_SELECTED]+_}" ]]; then
+ whiptail --msgbox "No valid storage selected. Please try again." 8 58
+ continue
+ fi
+ STORAGE_RESULT="${STORAGE_MAP[$DISPLAY_SELECTED]}"
+ for ((i = 0; i < ${#MENU[@]}; i += 3)); do
+ if [[ "${MENU[$i]}" == "$DISPLAY_SELECTED" ]]; then
+ STORAGE_INFO="${MENU[$i + 1]}"
+ break
+ fi
+ done
+ return 0
+ done
+}
+
+# ------------------------------------------------------------------------------
+# Required input variables
+# ------------------------------------------------------------------------------
+[[ "${CTID:-}" ]] || {
+ msg_error "You need to set 'CTID' variable."
+ exit 203
+}
+[[ "${PCT_OSTYPE:-}" ]] || {
+ msg_error "You need to set 'PCT_OSTYPE' variable."
+ exit 204
+}
+
+msg_debug "CTID=$CTID"
+msg_debug "PCT_OSTYPE=$PCT_OSTYPE"
+msg_debug "PCT_OSVERSION=${PCT_OSVERSION:-default}"
+
+# ID checks
+[[ "$CTID" -ge 100 ]] || {
+ msg_error "ID cannot be less than 100."
+ exit 205
+}
+if qm status "$CTID" &>/dev/null || pct status "$CTID" &>/dev/null; then
+ echo -e "ID '$CTID' is already in use."
+ unset CTID
+ msg_error "Cannot use ID that is already in use."
+ exit 206
+fi
+
+# Storage capability check
+check_storage_support "rootdir" || {
+ msg_error "No valid storage found for 'rootdir' [Container]"
+ exit 1
+}
+check_storage_support "vztmpl" || {
+ msg_error "No valid storage found for 'vztmpl' [Template]"
+ exit 1
+}
+
+# Template storage selection
+if resolve_storage_preselect template "${TEMPLATE_STORAGE:-}"; then
+ TEMPLATE_STORAGE="$STORAGE_RESULT"
+ TEMPLATE_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${TEMPLATE_STORAGE}${CL} (${TEMPLATE_STORAGE_INFO}) [Template]"
+else
+ while true; do
+ if select_storage template; then
+ TEMPLATE_STORAGE="$STORAGE_RESULT"
+ TEMPLATE_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${TEMPLATE_STORAGE}${CL} (${TEMPLATE_STORAGE_INFO}) [Template]"
+ break
+ fi
+ done
+fi
+
+# Container storage selection
+if resolve_storage_preselect container "${CONTAINER_STORAGE:-}"; then
+ CONTAINER_STORAGE="$STORAGE_RESULT"
+ CONTAINER_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${CONTAINER_STORAGE}${CL} (${CONTAINER_STORAGE_INFO}) [Container]"
+else
+ while true; do
+ if select_storage container; then
+ CONTAINER_STORAGE="$STORAGE_RESULT"
+ CONTAINER_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${CONTAINER_STORAGE}${CL} (${CONTAINER_STORAGE_INFO}) [Container]"
+ break
+ fi
+ done
+fi
+
+# Validate content types
+msg_info "Validating content types of storage '$CONTAINER_STORAGE'"
+STORAGE_CONTENT=$(grep -A4 -E "^(zfspool|dir|lvmthin|lvm): $CONTAINER_STORAGE" /etc/pve/storage.cfg | grep content | awk '{$1=""; print $0}' | xargs)
+msg_debug "Storage '$CONTAINER_STORAGE' has content types: $STORAGE_CONTENT"
+grep -qw "rootdir" <<<"$STORAGE_CONTENT" || {
+ msg_error "Storage '$CONTAINER_STORAGE' does not support 'rootdir'. Cannot create LXC."
+ exit 217
+}
+$STD msg_ok "Storage '$CONTAINER_STORAGE' supports 'rootdir'"
+
+msg_info "Validating content types of template storage '$TEMPLATE_STORAGE'"
+TEMPLATE_CONTENT=$(grep -A4 -E "^[^:]+: $TEMPLATE_STORAGE" /etc/pve/storage.cfg | grep content | awk '{$1=""; print $0}' | xargs)
+msg_debug "Template storage '$TEMPLATE_STORAGE' has content types: $TEMPLATE_CONTENT"
+if ! grep -qw "vztmpl" <<<"$TEMPLATE_CONTENT"; then
+ msg_warn "Template storage '$TEMPLATE_STORAGE' does not declare 'vztmpl'. This may cause pct create to fail."
+else
+ $STD msg_ok "Template storage '$TEMPLATE_STORAGE' supports 'vztmpl'"
+fi
+
+# Free space check
+STORAGE_FREE=$(pvesm status | awk -v s="$CONTAINER_STORAGE" '$1 == s { print $6 }')
+REQUIRED_KB=$((${PCT_DISK_SIZE:-8} * 1024 * 1024))
+[[ "$STORAGE_FREE" -ge "$REQUIRED_KB" ]] || {
+ msg_error "Not enough space on '$CONTAINER_STORAGE'. Needed: ${PCT_DISK_SIZE:-8}G."
+ exit 214
+}
+
+# Cluster quorum (if cluster)
+if [[ -f /etc/pve/corosync.conf ]]; then
+ msg_info "Checking cluster quorum"
+ if ! pvecm status | awk -F':' '/^Quorate/ { exit ($2 ~ /Yes/) ? 0 : 1 }'; then
+ msg_error "Cluster is not quorate. Start all nodes or configure quorum device (QDevice)."
+ exit 210
+ fi
+ msg_ok "Cluster is quorate"
+fi
+
+# ------------------------------------------------------------------------------
+# Template discovery & validation
+# ------------------------------------------------------------------------------
+TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}"
+case "$PCT_OSTYPE" in
+debian | ubuntu) TEMPLATE_PATTERN="-standard_" ;;
+alpine | fedora | rocky | centos) TEMPLATE_PATTERN="-default_" ;;
+*) TEMPLATE_PATTERN="" ;;
+esac
+
+msg_info "Searching for template '$TEMPLATE_SEARCH'"
+
+mapfile -t LOCAL_TEMPLATES < <(
+ pveam list "$TEMPLATE_STORAGE" 2>/dev/null |
+ awk -v s="$TEMPLATE_SEARCH" -v p="$TEMPLATE_PATTERN" '$1 ~ s && $1 ~ p {print $1}' |
+ sed 's|.*/||' | sort -t - -k 2 -V
+)
+
+pveam update >/dev/null 2>&1 || msg_warn "Could not update template catalog (pveam update failed)."
+mapfile -t ONLINE_TEMPLATES < <(
+ pveam available -section system 2>/dev/null |
+ sed -n "s/.*\($TEMPLATE_SEARCH.*$TEMPLATE_PATTERN.*\)/\1/p" |
+ sort -t - -k 2 -V
+)
+ONLINE_TEMPLATE=""
+[[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}"
+
+if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then
+ TEMPLATE="${LOCAL_TEMPLATES[-1]}"
+ TEMPLATE_SOURCE="local"
+else
+ TEMPLATE="$ONLINE_TEMPLATE"
+ TEMPLATE_SOURCE="online"
+fi
+
+TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)"
+if [[ -z "$TEMPLATE_PATH" ]]; then
+ TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg)
+ [[ -n "$TEMPLATE_BASE" ]] && TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE"
+fi
+[[ -n "$TEMPLATE_PATH" ]] || {
+ msg_error "Unable to resolve template path for $TEMPLATE_STORAGE. Check storage type and permissions."
+ exit 220
+}
+
+msg_ok "Template ${BL}$TEMPLATE${CL} [$TEMPLATE_SOURCE]"
+msg_debug "Resolved TEMPLATE_PATH=$TEMPLATE_PATH"
+
+NEED_DOWNLOAD=0
+if [[ ! -f "$TEMPLATE_PATH" ]]; then
+ msg_info "Template not present locally – will download."
+ NEED_DOWNLOAD=1
+elif [[ ! -r "$TEMPLATE_PATH" ]]; then
+ msg_error "Template file exists but is not readable – check permissions."
+ exit 221
+elif [[ "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then
+ if [[ -n "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Template file too small (<1MB) – re-downloading."
+ NEED_DOWNLOAD=1
+ else
+ msg_warn "Template looks too small, but no online version exists. Keeping local file."
+ fi
+elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then
+ if [[ -n "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Template appears corrupted – re-downloading."
+ NEED_DOWNLOAD=1
+ else
+ msg_warn "Template appears corrupted, but no online version exists. Keeping local file."
+ fi
+else
+ $STD msg_ok "Template $TEMPLATE is present and valid."
+fi
+
+if [[ "$TEMPLATE_SOURCE" == "local" && -n "$ONLINE_TEMPLATE" && "$TEMPLATE" != "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Local template is outdated: $TEMPLATE (latest available: $ONLINE_TEMPLATE)"
+ if whiptail --yesno "A newer template is available:\n$ONLINE_TEMPLATE\n\nDo you want to download and use it instead?" 12 70; then
+ TEMPLATE="$ONLINE_TEMPLATE"
+ NEED_DOWNLOAD=1
+ else
+ msg_info "Continuing with local template $TEMPLATE"
+ fi
+fi
+
+if [[ "$NEED_DOWNLOAD" -eq 1 ]]; then
+ [[ -f "$TEMPLATE_PATH" ]] && rm -f "$TEMPLATE_PATH"
+ for attempt in {1..3}; do
+ msg_info "Attempt $attempt: Downloading template $TEMPLATE to $TEMPLATE_STORAGE"
+ if pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null 2>&1; then
+ msg_ok "Template download successful."
+ break
+ fi
+ if [[ $attempt -eq 3 ]]; then
+ msg_error "Failed after 3 attempts. Please check network access, permissions, or manually run:\n pveam download $TEMPLATE_STORAGE $TEMPLATE"
+ exit 222
+ fi
+ sleep $((attempt * 5))
+ done
+fi
+
+if ! pveam list "$TEMPLATE_STORAGE" 2>/dev/null | grep -q "$TEMPLATE"; then
+ msg_error "Template $TEMPLATE not available in storage $TEMPLATE_STORAGE after download."
+ exit 223
+fi
+
+# ------------------------------------------------------------------------------
+# Dynamic preflight for Debian 13.x: offer upgrade if available (no hard mins)
+# ------------------------------------------------------------------------------
+if [[ "$PCT_OSTYPE" == "debian" ]]; then
+ OSVER="$(parse_template_osver "$TEMPLATE")"
+ if [[ -n "$OSVER" ]]; then
+ # Proactive, aber ohne Abbruch – nur Angebot
+ offer_lxc_stack_upgrade_and_maybe_retry "no" || true
+ fi
+fi
+
+# ------------------------------------------------------------------------------
+# Create LXC Container
+# ------------------------------------------------------------------------------
+msg_info "Creating LXC container"
+
+# Ensure subuid/subgid entries exist
+grep -q "root:100000:65536" /etc/subuid || echo "root:100000:65536" >>/etc/subuid
+grep -q "root:100000:65536" /etc/subgid || echo "root:100000:65536" >>/etc/subgid
+
+# Assemble pct options
+PCT_OPTIONS=(${PCT_OPTIONS[@]:-${DEFAULT_PCT_OPTIONS[@]}})
+[[ " ${PCT_OPTIONS[*]} " =~ " -rootfs " ]] || PCT_OPTIONS+=(-rootfs "$CONTAINER_STORAGE:${PCT_DISK_SIZE:-8}")
+
+# Lock by template file (avoid concurrent downloads/creates)
+lockfile="/tmp/template.${TEMPLATE}.lock"
+exec 9>"$lockfile" || {
+ msg_error "Failed to create lock file '$lockfile'."
+ exit 200
+}
+flock -w 60 9 || {
+ msg_error "Timeout while waiting for template lock."
+ exit 211
+}
+
+LOGFILE="/tmp/pct_create_${CTID}.log"
+msg_debug "pct create command: pct create $CTID ${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE} ${PCT_OPTIONS[*]}"
+msg_debug "Logfile: $LOGFILE"
+
+# First attempt
+if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >"$LOGFILE" 2>&1; then
+ msg_error "Container creation failed on ${TEMPLATE_STORAGE}. Checking template..."
+
+ # Validate template file
+ if [[ ! -s "$TEMPLATE_PATH" || "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then
+ msg_warn "Template file too small or missing – re-downloading."
+ rm -f "$TEMPLATE_PATH"
+ pveam download "$TEMPLATE_STORAGE" "$TEMPLATE"
+ elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then
+ if [[ -n "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Template appears corrupted – re-downloading."
+ rm -f "$TEMPLATE_PATH"
+ pveam download "$TEMPLATE_STORAGE" "$TEMPLATE"
+ else
+ msg_warn "Template appears corrupted, but no online version exists. Skipping re-download."
+ fi
+ fi
+
+ # Retry after repair
+ if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >>"$LOGFILE" 2>&1; then
+ # Fallback to local storage
+ if [[ "$TEMPLATE_STORAGE" != "local" ]]; then
+ msg_warn "Retrying container creation with fallback to local storage..."
+ LOCAL_TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE"
+ if [[ ! -f "$LOCAL_TEMPLATE_PATH" ]]; then
+ msg_info "Downloading template to local..."
+ pveam download local "$TEMPLATE" >/dev/null 2>&1
+ fi
+ if pct create "$CTID" "local:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >>"$LOGFILE" 2>&1; then
+ msg_ok "Container successfully created using local fallback."
+ else
+ # --- Dynamic stack upgrade + auto-retry on the well-known error pattern ---
+ if grep -qiE 'unsupported .* version' "$LOGFILE"; then
+ echo
+ echo "pct reported 'unsupported ... version' – your LXC stack might be too old for this template."
+ echo "We can try to upgrade 'pve-container' and 'lxc-pve' now and retry automatically."
+ if offer_lxc_stack_upgrade_and_maybe_retry "yes"; then
+ : # success after retry
+ else
+ rc=$?
+ case $rc in
+ 2) echo "Upgrade was declined. Please update and re-run:
+ apt update && apt install --only-upgrade pve-container lxc-pve" ;;
+ 3) echo "Upgrade and/or retry failed. Please inspect: $LOGFILE" ;;
+ esac
+ exit 231
+ fi
+ else
+ msg_error "Container creation failed even with local fallback. See $LOGFILE"
+ if whiptail --yesno "pct create failed.\nDo you want to enable verbose debug mode and view detailed logs?" 12 70; then
+ set -x
+ bash -x -c "pct create $CTID local:vztmpl/${TEMPLATE} ${PCT_OPTIONS[*]}" 2>&1 | tee -a "$LOGFILE"
+ set +x
+ fi
+ exit 209
+ fi
+ fi
+ else
+ msg_error "Container creation failed on local storage. See $LOGFILE"
+ # --- Dynamic stack upgrade + auto-retry on the well-known error pattern ---
+ if grep -qiE 'unsupported .* version' "$LOGFILE"; then
+ echo
+ echo "pct reported 'unsupported ... version' – your LXC stack might be too old for this template."
+ echo "We can try to upgrade 'pve-container' and 'lxc-pve' now and retry automatically."
+ if offer_lxc_stack_upgrade_and_maybe_retry "yes"; then
+ : # success after retry
+ else
+ rc=$?
+ case $rc in
+ 2) echo "Upgrade was declined. Please update and re-run:
+ apt update && apt install --only-upgrade pve-container lxc-pve" ;;
+ 3) echo "Upgrade and/or retry failed. Please inspect: $LOGFILE" ;;
+ esac
+ exit 231
+ fi
+ else
+ if whiptail --yesno "pct create failed.\nDo you want to enable verbose debug mode and view detailed logs?" 12 70; then
+ set -x
+ bash -x -c "pct create $CTID local:vztmpl/${TEMPLATE} ${PCT_OPTIONS[*]}" 2>&1 | tee -a "$LOGFILE"
+ set +x
+ fi
+ exit 209
+ fi
+ fi
+ fi
+fi
+
+# Verify container exists
+pct list | awk '{print $1}' | grep -qx "$CTID" || {
+ msg_error "Container ID $CTID not listed in 'pct list'. See $LOGFILE"
+ exit 215
+}
+
+# Verify config rootfs
+grep -q '^rootfs:' "/etc/pve/lxc/$CTID.conf" || {
+ msg_error "RootFS entry missing in container config. See $LOGFILE"
+ exit 216
+}
+
+msg_ok "LXC Container ${BL}$CTID${CL} ${GN}was successfully created."
diff --git a/misc/github.func b/misc/deferred/github.func
similarity index 100%
rename from misc/github.func
rename to misc/deferred/github.func
diff --git a/misc/error_handler.func b/misc/error_handler.func
new file mode 100644
index 000000000..5aa38e5e1
--- /dev/null
+++ b/misc/error_handler.func
@@ -0,0 +1,180 @@
+#!/usr/bin/env bash
+# ------------------------------------------------------------------------------
+# Error & Signal Handling for ProxmoxVED Scripts
+# ------------------------------------------------------------------------------
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: MickLesk (CanbiZ)
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# ------------------------------------------------------------------------------
+
+explain_exit_code() {
+ local code="$1"
+ case "$code" in
+ # --- Generic / Shell ---
+ 1) echo "General error / Operation not permitted" ;;
+ 2) echo "Misuse of shell builtins (e.g. syntax error)" ;;
+ 126) echo "Command invoked cannot execute (permission problem?)" ;;
+ 127) echo "Command not found" ;;
+ 128) echo "Invalid argument to exit" ;;
+ 130) echo "Terminated by Ctrl+C (SIGINT)" ;;
+ 137) echo "Killed (SIGKILL / Out of memory?)" ;;
+ 139) echo "Segmentation fault (core dumped)" ;;
+ 143) echo "Terminated (SIGTERM)" ;;
+
+ # --- Package manager / APT / DPKG ---
+ 100) echo "APT: Package manager error (broken packages / dependency problems)" ;;
+ 101) echo "APT: Configuration error (bad sources.list, malformed config)" ;;
+ 255) echo "DPKG: Fatal internal error" ;;
+
+ # --- Node.js / npm / pnpm / yarn ---
+ 243) echo "Node.js: Out of memory (JavaScript heap out of memory)" ;;
+ 245) echo "Node.js: Invalid command-line option" ;;
+ 246) echo "Node.js: Internal JavaScript Parse Error" ;;
+ 247) echo "Node.js: Fatal internal error" ;;
+ 248) echo "Node.js: Invalid C++ addon / N-API failure" ;;
+ 249) echo "Node.js: Inspector error" ;;
+ 254) echo "npm/pnpm/yarn: Unknown fatal error" ;;
+
+ # --- Python / pip / uv ---
+ 210) echo "Python: Virtualenv / uv environment missing or broken" ;;
+ 211) echo "Python: Dependency resolution failed" ;;
+ 212) echo "Python: Installation aborted (permissions or EXTERNALLY-MANAGED)" ;;
+
+ # --- PostgreSQL ---
+ 231) echo "PostgreSQL: Connection failed (server not running / wrong socket)" ;;
+ 232) echo "PostgreSQL: Authentication failed (bad user/password)" ;;
+ 233) echo "PostgreSQL: Database does not exist" ;;
+ 234) echo "PostgreSQL: Fatal error in query / syntax" ;;
+
+ # --- MySQL / MariaDB ---
+ 241) echo "MySQL/MariaDB: Connection failed (server not running / wrong socket)" ;;
+ 242) echo "MySQL/MariaDB: Authentication failed (bad user/password)" ;;
+ 243) echo "MySQL/MariaDB: Database does not exist" ;;
+ 244) echo "MySQL/MariaDB: Fatal error in query / syntax" ;;
+
+ # --- MongoDB ---
+ 251) echo "MongoDB: Connection failed (server not running)" ;;
+ 252) echo "MongoDB: Authentication failed (bad user/password)" ;;
+ 253) echo "MongoDB: Database not found" ;;
+ 254) echo "MongoDB: Fatal query error" ;;
+
+ # --- Proxmox Custom Codes ---
+ 200) echo "Custom: Failed to create lock file" ;;
+ 203) echo "Custom: Missing CTID variable" ;;
+ 204) echo "Custom: Missing PCT_OSTYPE variable" ;;
+ 205) echo "Custom: Invalid CTID (<100)" ;;
+ 209) echo "Custom: Container creation failed" ;;
+ 210) echo "Custom: Cluster not quorate" ;;
+ 214) echo "Custom: Not enough storage space" ;;
+ 215) echo "Custom: Container ID not listed" ;;
+ 216) echo "Custom: RootFS entry missing in config" ;;
+ 217) echo "Custom: Storage does not support rootdir" ;;
+ 220) echo "Custom: Unable to resolve template path" ;;
+ 222) echo "Custom: Template download failed after 3 attempts" ;;
+ 223) echo "Custom: Template not available after download" ;;
+ 231) echo "Custom: LXC stack upgrade/retry failed" ;;
+
+ # --- Default ---
+ *) echo "Unknown error" ;;
+ esac
+}
+
+# === Error handler ============================================================
+error_handler() {
+ local exit_code=${1:-$?}
+ local command=${2:-${BASH_COMMAND:-unknown}}
+ local line_number=${BASH_LINENO[0]:-unknown}
+
+ command="${command//\$STD/}"
+
+ if [[ "$exit_code" -eq 0 ]]; then
+ return 0
+ fi
+
+ local explanation
+ explanation="$(explain_exit_code "$exit_code")"
+
+ printf "\e[?25h"
+
+ # Use msg_error if available, fallback to echo
+ if declare -f msg_error >/dev/null 2>&1; then
+ msg_error "in line ${line_number}: exit code ${exit_code} (${explanation}): while executing command ${command}"
+ else
+ echo -e "\n${RD}[ERROR]${CL} in line ${RD}${line_number}${CL}: exit code ${RD}${exit_code}${CL} (${explanation}): while executing command ${YWB}${command}${CL}\n"
+ fi
+
+ if [[ -n "${DEBUG_LOGFILE:-}" ]]; then
+ {
+ echo "------ ERROR ------"
+ echo "Timestamp : $(date '+%Y-%m-%d %H:%M:%S')"
+ echo "Exit Code : $exit_code ($explanation)"
+ echo "Line : $line_number"
+ echo "Command : $command"
+ echo "-------------------"
+ } >>"$DEBUG_LOGFILE"
+ fi
+
+ if [[ -n "${SILENT_LOGFILE:-}" && -s "$SILENT_LOGFILE" ]]; then
+ echo "--- Last 20 lines of silent log ---"
+ tail -n 20 "$SILENT_LOGFILE"
+ echo "-----------------------------------"
+
+ # Copy log to container home for later retrieval (if running inside container via pct exec)
+ if [[ -d /root ]]; then
+ local container_log="/root/.install-${SESSION_ID:-error}.log"
+ cp "$SILENT_LOGFILE" "$container_log" 2>/dev/null || true
+ if declare -f msg_custom >/dev/null 2>&1; then
+ msg_custom "📋" "${YW}" "Log saved to: ${container_log}"
+ else
+ echo -e "${YW}Log saved to:${CL} ${BL}${container_log}${CL}"
+ fi
+ else
+ # Running on host - show local path
+ if declare -f msg_custom >/dev/null 2>&1; then
+ msg_custom "📋" "${YW}" "Full log: ${SILENT_LOGFILE}"
+ else
+ echo -e "${YW}Full log:${CL} ${BL}${SILENT_LOGFILE}${CL}"
+ fi
+ fi
+ fi
+
+ exit "$exit_code"
+}
+
+# === Exit handler =============================================================
+on_exit() {
+ local exit_code=$?
+ [[ -n "${lockfile:-}" && -e "$lockfile" ]] && rm -f "$lockfile"
+ exit "$exit_code"
+}
+
+# === Signal handlers ==========================================================
+on_interrupt() {
+ if declare -f msg_error >/dev/null 2>&1; then
+ msg_error "Interrupted by user (SIGINT)"
+ else
+ echo -e "\n${RD}Interrupted by user (SIGINT)${CL}"
+ fi
+ exit 130
+}
+
+on_terminate() {
+ if declare -f msg_error >/dev/null 2>&1; then
+ msg_error "Terminated by signal (SIGTERM)"
+ else
+ echo -e "\n${RD}Terminated by signal (SIGTERM)${CL}"
+ fi
+ exit 143
+}
+
+# === Init traps ===============================================================
+catch_errors() {
+ set -Ee -o pipefail
+ if [ "${STRICT_UNSET:-0}" = "1" ]; then
+ set -u
+ fi
+ trap 'error_handler' ERR
+ trap on_exit EXIT
+ trap on_interrupt INT
+ trap on_terminate TERM
+}
diff --git a/misc/install.func b/misc/install.func
index e3751c295..f741b921d 100644
--- a/misc/install.func
+++ b/misc/install.func
@@ -2,139 +2,138 @@
# Author: tteck (tteckster)
# Co-Author: MickLesk
# Co-Author: michelroegl-brunner
-# License: MIT
-# https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
+# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
if ! command -v curl >/dev/null 2>&1; then
- printf "\r\e[2K%b" '\033[93m Setup Source \033[m' >&2
- apt-get update >/dev/null 2>&1
- apt-get install -y curl >/dev/null 2>&1
+ printf "\r\e[2K%b" '\033[93m Setup Source \033[m' >&2
+ apt-get update >/dev/null 2>&1
+ apt-get install -y curl >/dev/null 2>&1
fi
source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func)
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/error_handler.func)
load_functions
+catch_errors
# This function enables IPv6 if it's not disabled and sets verbose mode
verb_ip6() {
- set_std_mode # Set STD mode based on VERBOSE
+ set_std_mode # Set STD mode based on VERBOSE
- if [ "$DISABLEIPV6" == "yes" ]; then
- echo "net.ipv6.conf.all.disable_ipv6 = 1" >>/etc/sysctl.conf
- $STD sysctl -p
- fi
+ if [ "$DISABLEIPV6" == "yes" ]; then
+ echo "net.ipv6.conf.all.disable_ipv6 = 1" >>/etc/sysctl.conf
+ $STD sysctl -p
+ fi
}
-# This function sets error handling options and defines the error_handler function to handle errors
-catch_errors() {
- set -Eeuo pipefail
- trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
-}
+# # This function sets error handling options and defines the error_handler function to handle errors
+# catch_errors() {
+# set -Eeuo pipefail
+# trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
+# }
-# This function handles errors
-error_handler() {
- source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/api.func)
- printf "\e[?25h"
- local exit_code="$?"
- local line_number="$1"
- local command="$2"
- local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
- echo -e "\n$error_message"
+# # This function handles errors
+# error_handler() {
+# source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/api.func)
+# local exit_code="$1"
+# local line_number="$2"
+# local command="${3:-}"
- if [[ "$line_number" -eq 50 ]]; then
- echo -e "The silent function has suppressed the error, run the script with verbose mode enabled, which will provide more detailed output.\n"
- post_update_to_api "failed" "No error message, script ran in silent mode"
- else
- post_update_to_api "failed" "${command}"
- fi
-}
+# if [[ "$exit_code" -eq 0 ]]; then
+# return 0
+# fi
+
+# printf "\e[?25h"
+# echo -e "\n${RD}[ERROR]${CL} in line ${RD}${line_number}${CL}: exit code ${RD}${exit_code}${CL}: while executing command ${YW}${command}${CL}\n"
+# exit "$exit_code"
+#}
# This function sets up the Container OS by generating the locale, setting the timezone, and checking the network connection
setting_up_container() {
- msg_info "Setting up Container OS"
- for ((i = RETRY_NUM; i > 0; i--)); do
- if [ "$(hostname -I)" != "" ]; then
- break
+ msg_info "Setting up Container OS"
+ for ((i = RETRY_NUM; i > 0; i--)); do
+ if [ "$(hostname -I)" != "" ]; then
+ break
+ fi
+ echo 1>&2 -en "${CROSS}${RD} No Network! "
+ sleep $RETRY_EVERY
+ done
+ if [ "$(hostname -I)" = "" ]; then
+ echo 1>&2 -e "\n${CROSS}${RD} No Network After $RETRY_NUM Tries${CL}"
+ echo -e "${NETWORK}Check Network Settings"
+ exit 1
fi
- echo 1>&2 -en "${CROSS}${RD} No Network! "
- sleep $RETRY_EVERY
- done
- if [ "$(hostname -I)" = "" ]; then
- echo 1>&2 -e "\n${CROSS}${RD} No Network After $RETRY_NUM Tries${CL}"
- echo -e "${NETWORK}Check Network Settings"
- exit 1
- fi
- rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED
- systemctl disable -q --now systemd-networkd-wait-online.service
- msg_ok "Set up Container OS"
- #msg_custom "${CM}" "${GN}" "Network Connected: ${BL}$(hostname -I)"
- msg_ok "Network Connected: ${BL}$(hostname -I)"
+ rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED
+ systemctl disable -q --now systemd-networkd-wait-online.service
+ msg_ok "Set up Container OS"
+ #msg_custom "${CM}" "${GN}" "Network Connected: ${BL}$(hostname -I)"
+ msg_ok "Network Connected: ${BL}$(hostname -I)"
}
# This function checks the network connection by pinging a known IP address and prompts the user to continue if the internet is not connected
network_check() {
- set +e
- trap - ERR
- ipv4_connected=false
- ipv6_connected=false
- sleep 1
+ set +e
+ trap - ERR
+ ipv4_connected=false
+ ipv6_connected=false
+ sleep 1
- # Check IPv4 connectivity to Google, Cloudflare & Quad9 DNS servers.
- if ping -c 1 -W 1 1.1.1.1 &>/dev/null || ping -c 1 -W 1 8.8.8.8 &>/dev/null || ping -c 1 -W 1 9.9.9.9 &>/dev/null; then
- msg_ok "IPv4 Internet Connected"
- ipv4_connected=true
- else
- msg_error "IPv4 Internet Not Connected"
- fi
-
- # Check IPv6 connectivity to Google, Cloudflare & Quad9 DNS servers.
- if ping6 -c 1 -W 1 2606:4700:4700::1111 &>/dev/null || ping6 -c 1 -W 1 2001:4860:4860::8888 &>/dev/null || ping6 -c 1 -W 1 2620:fe::fe &>/dev/null; then
- msg_ok "IPv6 Internet Connected"
- ipv6_connected=true
- else
- msg_error "IPv6 Internet Not Connected"
- fi
-
- # If both IPv4 and IPv6 checks fail, prompt the user
- if [[ $ipv4_connected == false && $ipv6_connected == false ]]; then
- read -r -p "No Internet detected, would you like to continue anyway? " prompt
- if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then
- echo -e "${INFO}${RD}Expect Issues Without Internet${CL}"
+ # Check IPv4 connectivity to Google, Cloudflare & Quad9 DNS servers.
+ if ping -c 1 -W 1 1.1.1.1 &>/dev/null || ping -c 1 -W 1 8.8.8.8 &>/dev/null || ping -c 1 -W 1 9.9.9.9 &>/dev/null; then
+ msg_ok "IPv4 Internet Connected"
+ ipv4_connected=true
else
- echo -e "${NETWORK}Check Network Settings"
- exit 1
+ msg_error "IPv4 Internet Not Connected"
fi
- fi
- # DNS resolution checks for GitHub-related domains (IPv4 and/or IPv6)
- GIT_HOSTS=("github.com" "raw.githubusercontent.com" "api.github.com" "git.community-scripts.org")
- GIT_STATUS="Git DNS:"
- DNS_FAILED=false
-
- for HOST in "${GIT_HOSTS[@]}"; do
- RESOLVEDIP=$(getent hosts "$HOST" | awk '{ print $1 }' | grep -E '(^([0-9]{1,3}\.){3}[0-9]{1,3}$)|(^[a-fA-F0-9:]+$)' | head -n1)
- if [[ -z "$RESOLVEDIP" ]]; then
- GIT_STATUS+="$HOST:($DNSFAIL)"
- DNS_FAILED=true
+ # Check IPv6 connectivity to Google, Cloudflare & Quad9 DNS servers.
+ if ping6 -c 1 -W 1 2606:4700:4700::1111 &>/dev/null || ping6 -c 1 -W 1 2001:4860:4860::8888 &>/dev/null || ping6 -c 1 -W 1 2620:fe::fe &>/dev/null; then
+ msg_ok "IPv6 Internet Connected"
+ ipv6_connected=true
else
- GIT_STATUS+=" $HOST:($DNSOK)"
+ msg_error "IPv6 Internet Not Connected"
fi
- done
- if [[ "$DNS_FAILED" == true ]]; then
- fatal "$GIT_STATUS"
- else
- msg_ok "$GIT_STATUS"
- fi
+ # If both IPv4 and IPv6 checks fail, prompt the user
+ if [[ $ipv4_connected == false && $ipv6_connected == false ]]; then
+ read -r -p "No Internet detected, would you like to continue anyway? " prompt
+ if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then
+ echo -e "${INFO}${RD}Expect Issues Without Internet${CL}"
+ else
+ echo -e "${NETWORK}Check Network Settings"
+ exit 1
+ fi
+ fi
- set -e
- trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
+ # DNS resolution checks for GitHub-related domains (IPv4 and/or IPv6)
+ GIT_HOSTS=("github.com" "raw.githubusercontent.com" "api.github.com" "git.community-scripts.org")
+ GIT_STATUS="Git DNS:"
+ DNS_FAILED=false
+
+ for HOST in "${GIT_HOSTS[@]}"; do
+ RESOLVEDIP=$(getent hosts "$HOST" | awk '{ print $1 }' | grep -E '(^([0-9]{1,3}\.){3}[0-9]{1,3}$)|(^[a-fA-F0-9:]+$)' | head -n1)
+ if [[ -z "$RESOLVEDIP" ]]; then
+ GIT_STATUS+="$HOST:($DNSFAIL)"
+ DNS_FAILED=true
+ else
+ GIT_STATUS+=" $HOST:($DNSOK)"
+ fi
+ done
+
+ if [[ "$DNS_FAILED" == true ]]; then
+ fatal "$GIT_STATUS"
+ else
+ msg_ok "$GIT_STATUS"
+ fi
+
+ set -e
+ trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
}
# This function updates the Container OS by running apt-get update and upgrade
update_os() {
- msg_info "Updating Container OS"
- if [[ "$CACHER" == "yes" ]]; then
- echo "Acquire::http::Proxy-Auto-Detect \"/usr/local/bin/apt-proxy-detect.sh\";" >/etc/apt/apt.conf.d/00aptproxy
- cat <<'EOF' >/usr/local/bin/apt-proxy-detect.sh
+ msg_info "Updating Container OS"
+ if [[ "$CACHER" == "yes" ]]; then
+ echo "Acquire::http::Proxy-Auto-Detect \"/usr/local/bin/apt-proxy-detect.sh\";" >/etc/apt/apt.conf.d/00aptproxy
+ cat <<'EOF' >/usr/local/bin/apt-proxy-detect.sh
#!/bin/bash
if nc -w1 -z "${CACHER_IP}" 3142; then
echo -n "http://${CACHER_IP}:3142"
@@ -142,66 +141,66 @@ else
echo -n "DIRECT"
fi
EOF
- chmod +x /usr/local/bin/apt-proxy-detect.sh
- fi
- $STD apt-get update
- $STD apt-get -o Dpkg::Options::="--force-confold" -y dist-upgrade
- rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED
- msg_ok "Updated Container OS"
- source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/tools.func)
+ chmod +x /usr/local/bin/apt-proxy-detect.sh
+ fi
+ $STD apt-get update
+ $STD apt-get -o Dpkg::Options::="--force-confold" -y dist-upgrade
+ rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED
+ msg_ok "Updated Container OS"
+ source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/tools.func)
}
# This function modifies the message of the day (motd) and SSH settings
motd_ssh() {
- grep -qxF "export TERM='xterm-256color'" /root/.bashrc || echo "export TERM='xterm-256color'" >>/root/.bashrc
+ grep -qxF "export TERM='xterm-256color'" /root/.bashrc || echo "export TERM='xterm-256color'" >>/root/.bashrc
- if [ -f "/etc/os-release" ]; then
- OS_NAME=$(grep ^NAME /etc/os-release | cut -d= -f2 | tr -d '"')
- OS_VERSION=$(grep ^VERSION_ID /etc/os-release | cut -d= -f2 | tr -d '"')
- elif [ -f "/etc/debian_version" ]; then
- OS_NAME="Debian"
- OS_VERSION=$(cat /etc/debian_version)
- fi
+ if [ -f "/etc/os-release" ]; then
+ OS_NAME=$(grep ^NAME /etc/os-release | cut -d= -f2 | tr -d '"')
+ OS_VERSION=$(grep ^VERSION_ID /etc/os-release | cut -d= -f2 | tr -d '"')
+ elif [ -f "/etc/debian_version" ]; then
+ OS_NAME="Debian"
+ OS_VERSION=$(cat /etc/debian_version)
+ fi
- PROFILE_FILE="/etc/profile.d/00_lxc-details.sh"
- echo "echo -e \"\"" >"$PROFILE_FILE"
- echo -e "echo -e \"${BOLD}${YW}${APPLICATION} LXC Container - DEV Repository${CL}\"" >>"$PROFILE_FILE"
- echo -e "echo -e \"${RD}WARNING: This is a DEVELOPMENT version (ProxmoxVED). Do NOT use in production!${CL}\"" >>"$PROFILE_FILE"
- echo -e "echo -e \"${YW} OS: ${GN}${OS_NAME} - Version: ${OS_VERSION}${CL}\"" >>"$PROFILE_FILE"
- echo -e "echo -e \"${YW} Hostname: ${GN}\$(hostname)${CL}\"" >>"$PROFILE_FILE"
- echo -e "echo -e \"${YW} IP Address: ${GN}\$(hostname -I | awk '{print \$1}')${CL}\"" >>"$PROFILE_FILE"
- echo -e "echo -e \"${YW} Repository: ${GN}https://github.com/community-scripts/ProxmoxVED${CL}\"" >>"$PROFILE_FILE"
- echo "echo \"\"" >>"$PROFILE_FILE"
+ PROFILE_FILE="/etc/profile.d/00_lxc-details.sh"
+ echo "echo -e \"\"" >"$PROFILE_FILE"
+ echo -e "echo -e \"${BOLD}${YW}${APPLICATION} LXC Container - DEV Repository${CL}\"" >>"$PROFILE_FILE"
+ echo -e "echo -e \"${RD}WARNING: This is a DEVELOPMENT version (ProxmoxVED). Do NOT use in production!${CL}\"" >>"$PROFILE_FILE"
+ echo -e "echo -e \"${YW} OS: ${GN}${OS_NAME} - Version: ${OS_VERSION}${CL}\"" >>"$PROFILE_FILE"
+ echo -e "echo -e \"${YW} Hostname: ${GN}\$(hostname)${CL}\"" >>"$PROFILE_FILE"
+ echo -e "echo -e \"${YW} IP Address: ${GN}\$(hostname -I | awk '{print \$1}')${CL}\"" >>"$PROFILE_FILE"
+ echo -e "echo -e \"${YW} Repository: ${GN}https://github.com/community-scripts/ProxmoxVED${CL}\"" >>"$PROFILE_FILE"
+ echo "echo \"\"" >>"$PROFILE_FILE"
- chmod -x /etc/update-motd.d/*
+ chmod -x /etc/update-motd.d/*
- if [[ "${SSH_ROOT}" == "yes" ]]; then
- sed -i "s/#PermitRootLogin prohibit-password/PermitRootLogin yes/g" /etc/ssh/sshd_config
- systemctl restart sshd
- fi
+ if [[ "${SSH_ROOT}" == "yes" ]]; then
+ sed -i "s/#PermitRootLogin prohibit-password/PermitRootLogin yes/g" /etc/ssh/sshd_config
+ systemctl restart sshd
+ fi
}
# This function customizes the container by modifying the getty service and enabling auto-login for the root user
customize() {
- if [[ "$PASSWORD" == "" ]]; then
- msg_info "Customizing Container"
- GETTY_OVERRIDE="/etc/systemd/system/container-getty@1.service.d/override.conf"
- mkdir -p $(dirname $GETTY_OVERRIDE)
- cat <$GETTY_OVERRIDE
+ if [[ "$PASSWORD" == "" ]]; then
+ msg_info "Customizing Container"
+ GETTY_OVERRIDE="/etc/systemd/system/container-getty@1.service.d/override.conf"
+ mkdir -p $(dirname $GETTY_OVERRIDE)
+ cat <$GETTY_OVERRIDE
[Service]
ExecStart=
ExecStart=-/sbin/agetty --autologin root --noclear --keep-baud tty%I 115200,38400,9600 \$TERM
EOF
- systemctl daemon-reload
- systemctl restart $(basename $(dirname $GETTY_OVERRIDE) | sed 's/\.d//')
- msg_ok "Customized Container"
- fi
- echo "bash -c \"\$(curl -fsSL https://github.com/community-scripts/ProxmoxVED/raw/main/ct/${app}.sh)\"" >/usr/bin/update
- chmod +x /usr/bin/update
- if [[ -n "${SSH_AUTHORIZED_KEY}" ]]; then
- mkdir -p /root/.ssh
- echo "${SSH_AUTHORIZED_KEY}" >/root/.ssh/authorized_keys
- chmod 700 /root/.ssh
- chmod 600 /root/.ssh/authorized_keys
- fi
+ systemctl daemon-reload
+ systemctl restart $(basename $(dirname $GETTY_OVERRIDE) | sed 's/\.d//')
+ msg_ok "Customized Container"
+ fi
+ echo "bash -c \"\$(curl -fsSL https://github.com/community-scripts/ProxmoxVED/raw/main/ct/${app}.sh)\"" >/usr/bin/update
+ chmod +x /usr/bin/update
+ if [[ -n "${SSH_AUTHORIZED_KEY}" ]]; then
+ mkdir -p /root/.ssh
+ echo "${SSH_AUTHORIZED_KEY}" >/root/.ssh/authorized_keys
+ chmod 700 /root/.ssh
+ chmod 600 /root/.ssh/authorized_keys
+ fi
}
diff --git a/misc/optimize_build_func.py b/misc/optimize_build_func.py
new file mode 100644
index 000000000..92fe03000
--- /dev/null
+++ b/misc/optimize_build_func.py
@@ -0,0 +1,508 @@
+#!/usr/bin/env python3
+"""
+Build.func Optimizer
+====================
+Optimizes the build.func file by:
+- Removing duplicate functions
+- Sorting and grouping functions logically
+- Adding section headers
+- Improving readability
+"""
+
+import re
+import sys
+from pathlib import Path
+from datetime import datetime
+from typing import List, Tuple, Dict
+
+# ==============================================================================
+# CONFIGURATION
+# ==============================================================================
+
+# Define function groups in desired order
+FUNCTION_GROUPS = {
+ "CORE_INIT": {
+ "title": "CORE INITIALIZATION & VARIABLES",
+ "functions": [
+ "variables",
+ ]
+ },
+ "DEPENDENCIES": {
+ "title": "DEPENDENCY LOADING",
+ "functions": [
+ # Bootstrap loader section (commented code)
+ ]
+ },
+ "VALIDATION": {
+ "title": "SYSTEM VALIDATION & CHECKS",
+ "functions": [
+ "maxkeys_check",
+ "check_container_resources",
+ "check_container_storage",
+ "check_nvidia_host_setup",
+ "check_storage_support",
+ ]
+ },
+ "NETWORK": {
+ "title": "NETWORK & IP MANAGEMENT",
+ "functions": [
+ "get_current_ip",
+ "update_motd_ip",
+ ]
+ },
+ "SSH": {
+ "title": "SSH KEY MANAGEMENT",
+ "functions": [
+ "find_host_ssh_keys",
+ "ssh_discover_default_files",
+ "ssh_extract_keys_from_file",
+ "ssh_build_choices_from_files",
+ "configure_ssh_settings",
+ "install_ssh_keys_into_ct",
+ ]
+ },
+ "SETTINGS": {
+ "title": "SETTINGS & CONFIGURATION",
+ "functions": [
+ "base_settings",
+ "echo_default",
+ "exit_script",
+ "advanced_settings",
+ "diagnostics_check",
+ "diagnostics_menu",
+ "default_var_settings",
+ "ensure_global_default_vars_file",
+ "settings_menu",
+ "edit_default_storage",
+ ]
+ },
+ "DEFAULTS": {
+ "title": "DEFAULTS MANAGEMENT (VAR_* FILES)",
+ "functions": [
+ "get_app_defaults_path",
+ "_is_whitelisted_key",
+ "_sanitize_value",
+ "_load_vars_file",
+ "_load_vars_file_to_map",
+ "_build_vars_diff",
+ "_build_current_app_vars_tmp",
+ "maybe_offer_save_app_defaults",
+ "ensure_storage_selection_for_vars_file",
+ ]
+ },
+ "STORAGE": {
+ "title": "STORAGE DISCOVERY & SELECTION",
+ "functions": [
+ "resolve_storage_preselect",
+ "select_storage",
+ "choose_and_set_storage_for_file",
+ "_write_storage_to_vars",
+ ]
+ },
+ "GPU": {
+ "title": "GPU & HARDWARE PASSTHROUGH",
+ "functions": [
+ "is_gpu_app",
+ "detect_gpu_devices",
+ "configure_gpu_passthrough",
+ "configure_usb_passthrough",
+ "configure_additional_devices",
+ "fix_gpu_gids",
+ "get_container_gid",
+ ]
+ },
+ "CONTAINER": {
+ "title": "CONTAINER LIFECYCLE & CREATION",
+ "functions": [
+ "create_lxc_container",
+ "offer_lxc_stack_upgrade_and_maybe_retry",
+ "parse_template_osver",
+ "pkg_ver",
+ "pkg_cand",
+ "ver_ge",
+ "ver_gt",
+ "ver_lt",
+ "build_container",
+ "destroy_lxc",
+ "description",
+ ]
+ },
+ "MAIN": {
+ "title": "MAIN ENTRY POINTS & ERROR HANDLING",
+ "functions": [
+ "install_script",
+ "start",
+ "api_exit_script",
+ ]
+ },
+}
+
+# Functions to exclude from duplication check (intentionally similar)
+EXCLUDE_FROM_DEDUP = {
+ "_load_vars_file",
+ "_load_vars_file_to_map",
+}
+
+# ==============================================================================
+# HELPER FUNCTIONS
+# ==============================================================================
+
+def extract_functions(content: str) -> Dict[str, Tuple[str, int, int]]:
+ """
+ Extract all function definitions from the content.
+ Returns dict: {function_name: (full_code, start_line, end_line)}
+ """
+ functions = {}
+ lines = content.split('\n')
+
+ i = 0
+ while i < len(lines):
+ line = lines[i]
+
+ # Match function definition: function_name() {
+ match = re.match(r'^([a-zA-Z_][a-zA-Z0-9_]*)\s*\(\)\s*\{', line)
+ if match:
+ func_name = match.group(1)
+ start_line = i
+
+ # Find function end by counting braces
+ brace_count = 1
+ func_lines = [line]
+ i += 1
+
+ while i < len(lines) and brace_count > 0:
+ current_line = lines[i]
+ func_lines.append(current_line)
+
+ # Count braces (simple method, doesn't handle strings/comments perfectly)
+ brace_count += current_line.count('{') - current_line.count('}')
+ i += 1
+
+ end_line = i
+ functions[func_name] = ('\n'.join(func_lines), start_line, end_line)
+ continue
+
+ i += 1
+
+ return functions
+
+def extract_header_comments(content: str, func_name: str, func_code: str) -> str:
+ """Extract comment block before function if exists"""
+ lines = content.split('\n')
+
+ # Find function start in original content
+ for i, line in enumerate(lines):
+ if line.strip().startswith(f"{func_name}()"):
+ # Look backwards for comment block
+ comments = []
+ j = i - 1
+ while j >= 0:
+ prev_line = lines[j]
+ stripped = prev_line.strip()
+
+ # SKIP section headers and copyright - we add our own
+ if (stripped.startswith('# ===') or
+ stripped.startswith('#!/usr/bin/env') or
+ 'Copyright' in stripped or
+ 'Author:' in stripped or
+ 'License:' in stripped or
+ 'Revision:' in stripped or
+ 'SECTION' in stripped):
+ j -= 1
+ continue
+
+ # Include function-specific comment lines
+ if (stripped.startswith('# ---') or
+ stripped.startswith('#')):
+ comments.insert(0, prev_line)
+ j -= 1
+ elif stripped == '':
+ # Keep collecting through empty lines
+ comments.insert(0, prev_line)
+ j -= 1
+ else:
+ break
+
+ # Remove leading empty lines from comments
+ while comments and comments[0].strip() == '':
+ comments.pop(0)
+
+ # Remove trailing empty lines from comments
+ while comments and comments[-1].strip() == '':
+ comments.pop()
+
+ if comments:
+ return '\n'.join(comments) + '\n'
+
+ return ''
+
+def find_duplicate_functions(functions: Dict[str, Tuple[str, int, int]]) -> List[str]:
+ """Find duplicate function definitions"""
+ seen = {}
+ duplicates = []
+
+ for func_name, (code, start, end) in functions.items():
+ if func_name in EXCLUDE_FROM_DEDUP:
+ continue
+
+ # Normalize code for comparison (remove whitespace variations)
+ normalized = re.sub(r'\s+', ' ', code).strip()
+
+ if normalized in seen:
+ duplicates.append(func_name)
+ print(f" ⚠️ Duplicate found: {func_name} (also defined as {seen[normalized]})")
+ else:
+ seen[normalized] = func_name
+
+ return duplicates
+
+def create_section_header(title: str) -> str:
+ """Create a formatted section header"""
+ return f"""
+# ==============================================================================
+# {title}
+# ==============================================================================
+"""
+
+def get_function_group(func_name: str) -> str:
+ """Determine which group a function belongs to"""
+ for group_key, group_data in FUNCTION_GROUPS.items():
+ if func_name in group_data["functions"]:
+ return group_key
+ return "UNKNOWN"
+
+# ==============================================================================
+# MAIN OPTIMIZATION LOGIC
+# ==============================================================================
+
+def optimize_build_func(input_file: Path, output_file: Path):
+ """Main optimization function"""
+
+ print("=" * 80)
+ print("BUILD.FUNC OPTIMIZER")
+ print("=" * 80)
+ print()
+
+ # Read input file
+ print(f"📖 Reading: {input_file}")
+ content = input_file.read_text(encoding='utf-8')
+ original_lines = len(content.split('\n'))
+ print(f" Lines: {original_lines:,}")
+ print()
+
+ # Extract functions
+ print("🔍 Extracting functions...")
+ functions = extract_functions(content)
+ print(f" Found {len(functions)} functions")
+ print()
+
+ # Find duplicates
+ print("🔎 Checking for duplicates...")
+ duplicates = find_duplicate_functions(functions)
+ if duplicates:
+ print(f" Found {len(duplicates)} duplicate(s)")
+ else:
+ print(" ✓ No duplicates found")
+ print()
+
+ # Extract header (copyright, etc)
+ print("📝 Extracting file header...")
+ lines = content.split('\n')
+ header_lines = []
+
+ # Extract only the first copyright block
+ in_header = True
+ for i, line in enumerate(lines):
+ if in_header:
+ # Keep copyright and license lines
+ if (line.strip().startswith('#!') or
+ line.strip().startswith('# Copyright') or
+ line.strip().startswith('# Author:') or
+ line.strip().startswith('# License:') or
+ line.strip().startswith('# Revision:') or
+ line.strip() == ''):
+ header_lines.append(line)
+ else:
+ in_header = False
+ break
+
+ # Remove trailing empty lines
+ while header_lines and header_lines[-1].strip() == '':
+ header_lines.pop()
+
+ header = '\n'.join(header_lines)
+ print()
+
+ # Build optimized content
+ print("🔨 Building optimized structure...")
+
+ optimized_parts = [header]
+
+ # Group functions
+ grouped_functions = {key: [] for key in FUNCTION_GROUPS.keys()}
+ grouped_functions["UNKNOWN"] = []
+
+ for func_name, (func_code, start, end) in functions.items():
+ if func_name in duplicates:
+ continue # Skip duplicates
+
+ group = get_function_group(func_name)
+
+ # Extract comments before function
+ comments = extract_header_comments(content, func_name, func_code)
+
+ grouped_functions[group].append((func_name, comments + func_code))
+
+ # Add grouped sections
+ for group_key, group_data in FUNCTION_GROUPS.items():
+ if grouped_functions[group_key]:
+ optimized_parts.append(create_section_header(group_data["title"]))
+
+ for func_name, func_code in grouped_functions[group_key]:
+ optimized_parts.append(func_code)
+ optimized_parts.append('') # Empty line between functions
+
+ # Add unknown functions at the end
+ if grouped_functions["UNKNOWN"]:
+ optimized_parts.append(create_section_header("UNCATEGORIZED FUNCTIONS"))
+ print(f" ⚠️ {len(grouped_functions['UNKNOWN'])} uncategorized functions:")
+ for func_name, func_code in grouped_functions["UNKNOWN"]:
+ print(f" - {func_name}")
+ optimized_parts.append(func_code)
+ optimized_parts.append('')
+
+ # Add any remaining non-function code (bootstrap, source commands, traps, etc)
+ print("📌 Adding remaining code...")
+
+ # Extract bootstrap/source section
+ bootstrap_lines = []
+ trap_lines = []
+ other_lines = []
+
+ in_function = False
+ brace_count = 0
+ in_bootstrap_comment = False
+
+ for line in lines:
+ stripped = line.strip()
+
+ # Skip the header we already extracted
+ if (stripped.startswith('#!/usr/bin/env bash') or
+ stripped.startswith('# Copyright') or
+ stripped.startswith('# Author:') or
+ stripped.startswith('# License:') or
+ stripped.startswith('# Revision:')):
+ continue
+
+ # Check if we're in a function
+ if re.match(r'^[a-zA-Z_][a-zA-Z0-9_]*\s*\(\)\s*\{', line):
+ in_function = True
+ brace_count = 1
+ elif in_function:
+ brace_count += line.count('{') - line.count('}')
+ if brace_count == 0:
+ in_function = False
+ elif not in_function:
+ # Collect non-function lines
+
+ # Bootstrap/loader section
+ if ('Community-Scripts bootstrap' in line or
+ 'Load core' in line or
+ in_bootstrap_comment):
+ bootstrap_lines.append(line)
+ if '# ---' in line or '# ===' in line:
+ in_bootstrap_comment = not in_bootstrap_comment
+ continue
+
+ # Source commands
+ if (stripped.startswith('source <(') or
+ stripped.startswith('if command -v curl') or
+ stripped.startswith('elif command -v wget') or
+ 'load_functions' in stripped or
+ 'catch_errors' in stripped):
+ bootstrap_lines.append(line)
+ continue
+
+ # Traps
+ if stripped.startswith('trap '):
+ trap_lines.append(line)
+ continue
+
+ # VAR_WHITELIST declaration
+ if 'declare -ag VAR_WHITELIST' in line or (other_lines and 'VAR_WHITELIST' in other_lines[-1]):
+ other_lines.append(line)
+ continue
+
+ # Empty lines between sections - keep some
+ if stripped == '' and (bootstrap_lines or trap_lines or other_lines):
+ if bootstrap_lines and bootstrap_lines[-1].strip() != '':
+ bootstrap_lines.append(line)
+ elif trap_lines and trap_lines[-1].strip() != '':
+ trap_lines.append(line)
+
+ # Add bootstrap section if exists
+ if bootstrap_lines:
+ optimized_parts.append(create_section_header("DEPENDENCY LOADING"))
+ optimized_parts.extend(bootstrap_lines)
+ optimized_parts.append('')
+
+ # Add other declarations
+ if other_lines:
+ optimized_parts.extend(other_lines)
+ optimized_parts.append('')
+
+ # Write output
+ optimized_content = '\n'.join(optimized_parts)
+ optimized_lines = len(optimized_content.split('\n'))
+
+ print()
+ print(f"💾 Writing optimized file: {output_file}")
+ output_file.write_text(optimized_content, encoding='utf-8')
+
+ print()
+ print("=" * 80)
+ print("✅ OPTIMIZATION COMPLETE")
+ print("=" * 80)
+ print(f"Original lines: {original_lines:,}")
+ print(f"Optimized lines: {optimized_lines:,}")
+ print(f"Difference: {original_lines - optimized_lines:+,}")
+ print(f"Functions: {len(functions) - len(duplicates)}")
+ print(f"Duplicates removed: {len(duplicates)}")
+ print()
+
+# ==============================================================================
+# ENTRY POINT
+# ==============================================================================
+
+def main():
+ """Main entry point"""
+
+ # Set paths
+ script_dir = Path(__file__).parent
+ input_file = script_dir / "build.func"
+
+ # Create backup first
+ timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
+ backup_file = script_dir / f"build.func.backup-{timestamp}"
+
+ if not input_file.exists():
+ print(f"❌ Error: {input_file} not found!")
+ sys.exit(1)
+
+ print(f"📦 Creating backup: {backup_file.name}")
+ backup_file.write_text(input_file.read_text(encoding='utf-8'), encoding='utf-8')
+ print()
+
+ # Optimize
+ output_file = script_dir / "build.func.optimized"
+ optimize_build_func(input_file, output_file)
+
+ print("📋 Next steps:")
+ print(f" 1. Review: {output_file.name}")
+ print(f" 2. Test the optimized version")
+ print(f" 3. If OK: mv build.func.optimized build.func")
+ print(f" 4. Backup available at: {backup_file.name}")
+ print()
+
+if __name__ == "__main__":
+ main()
diff --git a/misc/passthrough.func b/misc/passthrough.func
new file mode 100644
index 000000000..6eab5f0b2
--- /dev/null
+++ b/misc/passthrough.func
@@ -0,0 +1,320 @@
+#!/usr/bin/env bash
+# passthrough.func — host-side passthrough logic (VAAPI & NVIDIA) for LXC
+# This file ONLY touches host config (/etc/pve/lxc/.conf) and whiptail.
+# Inside-CT package setup lives in *_inside_setup (called from build.func).
+
+CTTYPE="${CTTYPE:-${CT_TYPE:-1}}"
+
+# --------------------------- Common helpers -----------------------------------
+
+_whiptail_dims() {
+ local n="$1" L="$2"
+ local maxW=$((L + 8))
+ ((maxW < 70)) && maxW=70
+ ((maxW > 100)) && maxW=100
+ local H=$((10 + n * 2))
+ ((H > 22)) && H=22
+ echo "$H $maxW"
+}
+
+select_hw_passthrough() {
+ local CT_ID="$1" CT_TYPE="$2" APP="$3"
+ local LXC_CONFIG="/etc/pve/lxc/${CT_ID}.conf"
+
+ if ! _is_gpu_app "$APP" && [[ "$CT_TYPE" != "0" ]]; then
+ return
+ fi
+
+ local choices=()
+ [[ -d /dev/dri ]] && choices+=("VAAPI" "Intel/AMD GPU via VAAPI" OFF)
+ compgen -G "/dev/nvidia*" >/dev/null && choices+=("NVIDIA" "NVIDIA GPU passthrough" OFF)
+
+ # no GPUs found
+ [[ ${#choices[@]} -eq 0 ]] && {
+ msg_info "No GPU devices detected"
+ return
+ }
+
+ local SELECTED
+ if [[ ${#choices[@]} -eq 2 ]]; then
+ # both available → show whiptail
+ SELECTED=$(whiptail --title "GPU Passthrough" \
+ --checklist "Select GPU passthrough for CT $CT_ID:" 12 70 2 \
+ "${choices[@]}" 3>&1 1>&2 2>&3) || return
+ else
+ # only one option → auto-select
+ SELECTED="\"${choices[0]}\""
+ msg_info "Auto-selecting GPU passthrough: ${choices[0]}"
+ fi
+
+ for sel in $SELECTED; do
+ case "$sel" in
+ "\"VAAPI\"")
+ export ENABLE_VAAPI=1
+ vaapi_select_and_apply "$CT_ID" "$CT_TYPE"
+ ;;
+ "\"NVIDIA\"")
+ export ENABLE_NVIDIA=1
+ nvidia_passthrough_to_lxc "$CT_ID" "$CT_TYPE"
+ ;;
+ esac
+ done
+}
+
+# Apps that benefit from GPU passthrough (VAAPI + NVIDIA)
+_GPU_APPS=(
+ immich
+ channels
+ emby
+ ersatztv
+ frigate
+ jellyfin
+ plex
+ scrypted
+ tdarr
+ unmanic
+ ollama
+ fileflows
+ "open webui"
+ tunarr
+ debian
+)
+
+_is_gpu_app() {
+ local app="$1"
+ local a
+ shopt -s nocasematch
+ for a in "${_GPU_APPS[@]}"; do
+ [[ "$app" == "$a" ]] && shopt -u nocasematch && return 0
+ done
+ shopt -u nocasematch
+ return 1
+}
+
+# ------------------------------ USB -------------------------------------------
+
+usb_handle_passthrough() {
+ local CT_ID="$1" CT_TYPE="$2"
+ local LXC_CONFIG="/etc/pve/lxc/${CT_ID}.conf"
+
+ if [[ "$CT_TYPE" != "0" ]]; then
+ return 0 # USB passthrough only for privileged CTs
+ fi
+ cat <>"$LXC_CONFIG"
+# USB passthrough
+lxc.cgroup2.devices.allow: a
+lxc.cap.drop:
+lxc.cgroup2.devices.allow: c 188:* rwm
+lxc.cgroup2.devices.allow: c 189:* rwm
+lxc.mount.entry: /dev/serial/by-id dev/serial/by-id none bind,optional,create=dir
+lxc.mount.entry: /dev/ttyUSB0 dev/ttyUSB0 none bind,optional,create=file
+lxc.mount.entry: /dev/ttyUSB1 dev/ttyUSB1 none bind,optional,create=file
+lxc.mount.entry: /dev/ttyACM0 dev/ttyACM0 none bind,optional,create=file
+lxc.mount.entry: /dev/ttyACM1 dev/ttyACM1 none bind,optional,create=file
+EOF
+}
+
+# ------------------------------ VAAPI -----------------------------------------
+
+_vaapi_gid() {
+ local g="$1" gid
+ gid="$(getent group "$g" | cut -d: -f3)"
+ case "$g" in
+ video) echo "${gid:-44}" ;;
+ render) echo "${gid:-104}" ;;
+ *) echo "${gid:-44}" ;;
+ esac
+}
+
+_vaapi_pairs() {
+ local seen=() by real id idx card pci pci_info name
+ shopt -s nullglob
+ for by in /dev/dri/by-path/*-render /dev/dri/renderD*; do
+ [[ -e "$by" ]] || continue
+ real="$(readlink -f "$by" || true)"
+ [[ -e "$real" ]] || continue
+ id="$(basename "$real")"
+ [[ " ${seen[*]} " == *" $id "* ]] && continue
+ seen+=("$id")
+
+ idx="${id#renderD}"
+ [[ "$idx" =~ ^[0-9]+$ ]] && idx=$((idx - 128)) || idx=0
+ card="/dev/dri/card${idx}"
+ [[ -e "$card" ]] || card=""
+
+ if [[ "$by" == *"/by-path/"* ]]; then
+ pci="$(basename "$by" | sed -E 's/^pci-([0-9a-fA-F:.]+)-render/\1/')"
+ pci_info="$(lspci -nn 2>/dev/null | grep -i "${pci#0000:}" || true)"
+ name="${pci_info#*: }"
+ [[ -z "$name" ]] && name="GPU ${pci}"
+ else
+ name="DRM $(basename "$real")"
+ fi
+
+ label="$(basename "$real")"
+ [[ -n "$card" ]] && label+=" + $(basename "$card")"
+ label+=" – ${name}"
+ printf "%s:%s\t%s\n" "$real" "$card" "$label"
+ done
+ shopt -u nullglob
+}
+
+vaapi_select_and_apply() {
+ local CT_ID="$1" CT_TYPE="$2"
+ local LXC_CONFIG="/etc/pve/lxc/${CT_ID}.conf"
+
+ mapfile -t pairs < <(_vaapi_pairs)
+ ((${#pairs[@]} == 0)) && {
+ msg_warn "No VAAPI devices detected – skipping."
+ return
+ }
+
+ # only one device -> auto-select
+ local SELECTED
+ if [[ ${#pairs[@]} -eq 1 ]]; then
+ SELECTED="${pairs[0]%%$'\t'*}"
+ msg_info "Auto-selecting VAAPI device: ${pairs[0]#*$'\t'}"
+ else
+ # more than one device -> show whiptail
+ local items=() maxlen=0
+ for p in "${pairs[@]}"; do
+ local devs="${p%%$'\t'*}" label="${p#*$'\t'}"
+ items+=("$devs" "$label" "OFF")
+ ((${#label} > maxlen)) && maxlen=${#label}
+ done
+ read -r h w < <(_whiptail_dims $((${#items[@]} / 3)) "$maxlen")
+
+ SELECTED="$(whiptail --title "VAAPI Device Selection" \
+ --checklist "Select VAAPI devices for CT $CT_ID:" "$h" "$w" 6 \
+ "${items[@]}" 3>&1 1>&2 2>&3)" || {
+ msg_warn "VAAPI selection cancelled."
+ return
+ }
+ [[ -z "$SELECTED" ]] && {
+ msg_warn "No VAAPI devices selected."
+ return
+ }
+ fi
+
+ # Apply selection to LXC config
+ local DID_MOUNT_DRI=0 idx=0
+ for dev in $SELECTED; do
+ dev="${dev%\"}"
+ dev="${dev#\"}"
+ IFS=":" read -r path card <<<"$dev"
+ for d in "$path" "$card"; do
+ [[ -n "$d" && -e "$d" ]] || continue
+ if [[ "$CT_TYPE" == "0" ]]; then
+ [[ $DID_MOUNT_DRI -eq 0 && -d /dev/dri ]] && {
+ echo "lxc.mount.entry: /dev/dri /dev/dri none bind,optional,create=dir" >>"$LXC_CONFIG"
+ DID_MOUNT_DRI=1
+ }
+ if mm=$(stat -c '%t:%T' "$d" | awk -F: '{printf "%d:%d","0x"$1,"0x"$2}'); then
+ echo "lxc.cgroup2.devices.allow: c $mm rwm" >>"$LXC_CONFIG"
+ echo "lxc.mount.entry: $d $d none bind,optional,create=file" >>"$LXC_CONFIG"
+ fi
+ else
+ gid=$([[ "$d" =~ renderD ]] && _vaapi_gid render || _vaapi_gid video)
+ echo "dev${idx}: $d,gid=${gid}" >>"$LXC_CONFIG"
+ idx=$((idx + 1))
+ fi
+ done
+ done
+
+ # Fallback only for privileged CTs
+ if [[ "$CT_TYPE" == "0" ]]; then
+ cat <<'EOF' >>"$LXC_CONFIG"
+# VAAPI fallback
+lxc.mount.entry: /dev/dri /dev/dri none bind,optional,create=dir
+lxc.cgroup2.devices.allow: c 226:* rwm
+EOF
+ fi
+}
+
+# ----------------------------- NVIDIA -----------------------------------------
+
+nvidia_passthrough_to_lxc() {
+ local CT_ID="$1" CT_TYPE="$2"
+ local LXC_CONFIG="/etc/pve/lxc/${CT_ID}.conf"
+ local found=0
+
+ for dev in /dev/nvidia*; do
+ [[ -e "$dev" ]] || continue
+ found=1
+ if mm="$(stat -c '%t:%T' "$dev" | awk -F: '{printf "%d:%d","0x"$1,"0x"$2}')"; then
+ echo "lxc.cgroup2.devices.allow: c $mm rwm" >>"$LXC_CONFIG"
+ echo "lxc.mount.entry: $dev $dev none bind,optional,create=file" >>"$LXC_CONFIG"
+ fi
+ done
+ ((found == 0)) && {
+ msg_warn "No NVIDIA devices found."
+ return
+ }
+
+ if [[ -d /dev/dri && "$CT_TYPE" == "0" ]]; then
+ echo "lxc.mount.entry: /dev/dri /dev/dri none bind,optional,create=dir" >>"$LXC_CONFIG"
+ fi
+ msg_ok "NVIDIA devices mapped to CT ${CT_ID}"
+}
+
+install_vaapi_userland_interactive() {
+ . /etc/os-release
+ if [[ "$VERSION_CODENAME" == "trixie" ]]; then
+ read -r -p "${TAB3}Do you need the intel-media-va-driver-non-free driver for HW encoding (Debian 13 only)? " prompt
+ if [[ ${prompt,,} =~ ^(y|yes)$ ]]; then
+ msg_info "Installing Intel Hardware Acceleration (non-free)"
+ cat <<'EOF' >/etc/apt/sources.list.d/non-free.sources
+Types: deb deb-src
+URIs: http://deb.debian.org/debian
+Suites: trixie
+Components: non-free non-free-firmware
+
+Types: deb deb-src
+URIs: http://deb.debian.org/debian-security
+Suites: trixie-security
+Components: non-free non-free-firmware
+
+Types: deb deb-src
+URIs: http://deb.debian.org/debian
+Suites: trixie-updates
+Components: non-free non-free-firmware
+EOF
+ $STD apt-get update
+ $STD apt-get install -y \
+ intel-media-va-driver-non-free \
+ ocl-icd-libopencl1 \
+ mesa-opencl-icd \
+ mesa-va-drivers \
+ libvpl2 \
+ vainfo \
+ intel-gpu-tools
+ msg_ok "Installed Intel Hardware Acceleration (non-free)"
+ return
+ fi
+ fi
+
+ msg_info "Installing Intel Hardware Acceleration (open packages)"
+ $STD apt-get update
+ $STD apt-get install -y \
+ va-driver-all \
+ ocl-icd-libopencl1 \
+ mesa-opencl-icd \
+ mesa-va-drivers \
+ vainfo \
+ intel-gpu-tools
+ msg_ok "Installed Intel Hardware Acceleration (open packages)"
+}
+
+install_nvidia_userland_interactive() {
+ msg_info "Installing NVIDIA Userland"
+ $STD apt-get update
+ $STD apt-get install -y \
+ nvidia-driver \
+ nvidia-utils \
+ libnvidia-encode1 \
+ libcuda1 || {
+ msg_error "Failed to install NVIDIA packages"
+ return 1
+ }
+ msg_ok "Installed NVIDIA Userland"
+}
diff --git a/misc/test-tools-func.sh b/misc/test-tools-func.sh
new file mode 100644
index 000000000..a1655219e
--- /dev/null
+++ b/misc/test-tools-func.sh
@@ -0,0 +1,355 @@
+#!/usr/bin/env bash
+
+# ==============================================================================
+# TEST SUITE FOR tools.func
+# ==============================================================================
+# This script tests all setup_* functions from tools.func
+# Can be run standalone in any Debian-based system
+#
+# Usage:
+# bash <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/test-tools-func.sh)
+# ==============================================================================
+
+set -uo pipefail
+
+# Colors
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+CYAN='\033[0;36m'
+NC='\033[0m' # No Color
+
+# Counters
+TESTS_PASSED=0
+TESTS_FAILED=0
+TESTS_SKIPPED=0
+
+# Log file
+TEST_LOG="/tmp/tools-func-test-$(date +%Y%m%d-%H%M%S).log"
+
+echo -e "${CYAN}═══════════════════════════════════════════════════════════${NC}"
+echo -e "${CYAN} TOOLS.FUNC TEST SUITE${NC}"
+echo -e "${CYAN}═══════════════════════════════════════════════════════════${NC}"
+echo -e "Log file: ${TEST_LOG}\n"
+
+# Source tools.func from repository
+echo -e "${BLUE}► Sourcing tools.func from repository...${NC}"
+if ! source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/tools.func); then
+ echo -e "${RED}✖ Failed to source tools.func${NC}"
+ exit 1
+fi
+echo -e "${GREEN}✔ tools.func loaded${NC}\n"
+
+# Source core functions if available
+if curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func &>/dev/null; then
+ source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func) || true
+fi
+
+# Override STD to show all output for debugging
+export STD=''
+
+# Force non-interactive mode for all apt operations
+export DEBIAN_FRONTEND=noninteractive
+
+# Update PATH to include common installation directories
+export PATH="/usr/local/bin:/usr/local/go/bin:/root/.cargo/bin:/root/.rbenv/bin:/root/.rbenv/shims:/opt/java/bin:$PATH"
+
+# Helper functions (override if needed from core.func)
+msg_info() { echo -e "${BLUE}ℹ ${1}${CL:-${NC}}"; }
+msg_ok() { echo -e "${GREEN}✔ ${1}${CL:-${NC}}"; }
+msg_error() { echo -e "${RED}✖ ${1}${CL:-${NC}}"; }
+msg_warn() { echo -e "${YELLOW}⚠ ${1}${CL:-${NC}}"; }
+
+# Color definitions if not already set
+GN="${GN:-${GREEN}}"
+BL="${BL:-${BLUE}}"
+RD="${RD:-${RED}}"
+YW="${YW:-${YELLOW}}"
+CL="${CL:-${NC}}"
+
+# Reload environment helper
+reload_path() {
+ export PATH="/usr/local/bin:/usr/local/go/bin:/root/.cargo/bin:/root/.rbenv/bin:/root/.rbenv/shims:/opt/java/bin:$PATH"
+ # Source profile files if they exist
+ [ -f "/root/.bashrc" ] && source /root/.bashrc 2>/dev/null || true
+ [ -f "/root/.profile" ] && source /root/.profile 2>/dev/null || true
+ [ -f "/root/.cargo/env" ] && source /root/.cargo/env 2>/dev/null || true
+}
+
+# Clean up before test to avoid interactive prompts and locks
+cleanup_before_test() {
+ # Kill any hanging apt processes
+ killall apt-get apt 2>/dev/null || true
+
+ # Remove apt locks
+ rm -f /var/lib/dpkg/lock-frontend /var/lib/dpkg/lock /var/cache/apt/archives/lock 2>/dev/null || true
+
+ # Clean up broken repository files from previous tests
+ # Remove all custom sources files
+ rm -f /etc/apt/sources.list.d/*.sources 2>/dev/null || true
+ rm -f /etc/apt/sources.list.d/*.list 2>/dev/null || true
+
+ # Remove all keyrings
+ rm -f /etc/apt/keyrings/*.gpg 2>/dev/null || true
+ rm -f /etc/apt/keyrings/*.asc 2>/dev/null || true
+
+ # Update package lists to ensure clean state
+ apt-get update -qq 2>/dev/null || true
+
+ # Wait a moment for processes to clean up
+ sleep 1
+}
+[ -f "/root/.profile" ] && source /root/.profile 2>/dev/null || true
+[ -f "/root/.cargo/env" ] && source /root/.cargo/env 2>/dev/null || true
+
+# Test validation function
+test_function() {
+ local test_name="$1"
+ local test_command="$2"
+ local validation_cmd="${3:-}"
+
+ # Clean up before starting test
+ cleanup_before_test
+
+ echo -e "\n${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
+ echo -e "${CYAN}Testing: ${test_name}${NC}"
+ echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
+
+ {
+ echo "=== Test: ${test_name} ==="
+ echo "Command: ${test_command}"
+ echo "Started: $(date)"
+ } | tee -a "$TEST_LOG"
+
+ # Execute installation with output visible AND logged
+ if eval "$test_command" 2>&1 | tee -a "$TEST_LOG"; then
+ # Reload PATH after installation
+ reload_path
+
+ if [[ -n "$validation_cmd" ]]; then
+ local output
+ if output=$(bash -c "$validation_cmd" 2>&1); then
+ msg_ok "${test_name} - $(echo "$output" | head -n1)"
+ ((TESTS_PASSED++))
+ else
+ msg_error "${test_name} - Installation succeeded but validation failed"
+ {
+ echo "Validation command: $validation_cmd"
+ echo "Validation output: $output"
+ echo "PATH: $PATH"
+ } | tee -a "$TEST_LOG"
+ ((TESTS_FAILED++))
+ fi
+ else
+ msg_ok "${test_name}"
+ ((TESTS_PASSED++))
+ fi
+ else
+ msg_error "${test_name} - Installation failed"
+ echo "Installation failed" | tee -a "$TEST_LOG"
+ ((TESTS_FAILED++))
+ fi
+
+ echo "Completed: $(date)" | tee -a "$TEST_LOG"
+ echo "" | tee -a "$TEST_LOG"
+}
+
+# Skip test with reason
+skip_test() {
+ local test_name="$1"
+ local reason="$2"
+
+ echo -e "\n${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
+ echo -e "${CYAN}Testing: ${test_name}${NC}"
+ echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
+ msg_warn "Skipped: ${reason}"
+ ((TESTS_SKIPPED++))
+}
+
+# Update system
+msg_info "Updating system packages"
+apt-get update &>/dev/null && msg_ok "System updated"
+
+# Install base dependencies
+msg_info "Installing base dependencies"
+apt-get install -y curl wget gpg jq git build-essential ca-certificates &>/dev/null && msg_ok "Base dependencies installed"
+
+# ==============================================================================
+# TEST 1: YQ - YAML Processor
+# ==============================================================================
+# test_function "YQ" \
+# "setup_yq" \
+# "yq --version"
+
+# ==============================================================================
+# TEST 2: ADMINER - Database Management
+# ==============================================================================
+# test_function "Adminer" \
+# "setup_adminer" \
+# "dpkg -l adminer 2>/dev/null | grep -q '^ii' && a2query -c adminer 2>/dev/null && echo 'Adminer installed'"
+
+# ==============================================================================
+# TEST 3: CLICKHOUSE
+# ==============================================================================
+# test_function "ClickHouse" \
+# "setup_clickhouse" \
+# "clickhouse-server --version"
+
+# ==============================================================================
+# TEST 4: POSTGRESQL
+# ==============================================================================
+test_function "PostgreSQL 16" \
+ "PG_VERSION=16 setup_postgresql" \
+ "psql --version"
+
+# ==============================================================================
+# TEST 6: MARIADB
+# ==============================================================================
+test_function "MariaDB 11.4" \
+ "MARIADB_VERSION=11.4 setup_mariadb" \
+ "mariadb --version"
+
+# ==============================================================================
+# TEST 7: MYSQL (Remove MariaDB first)
+# ==============================================================================
+msg_info "Removing MariaDB before MySQL installation"
+systemctl stop mariadb &>/dev/null || true
+apt-get purge -y mariadb-server mariadb-client mariadb-common &>/dev/null || true
+apt-get autoremove -y &>/dev/null
+rm -rf /etc/mysql /var/lib/mysql
+msg_ok "MariaDB removed"
+
+test_function "MySQL 8.0" \
+ "MYSQL_VERSION=8.0 setup_mysql" \
+ "mysql --version"
+
+# ==============================================================================
+# TEST 8: MONGODB (Check AVX support)
+# ==============================================================================
+# if grep -q avx /proc/cpuinfo; then
+# test_function "MongoDB 8.0" \
+# "MONGO_VERSION=8.0 setup_mongodb" \
+# "mongod --version"
+# else
+# skip_test "MongoDB 8.0" "CPU does not support AVX"
+# fi
+
+# ==============================================================================
+# TEST 9: NODE.JS
+# ==============================================================================
+# test_function "Node.js 22 with modules" \
+# "NODE_VERSION=22 NODE_MODULE='yarn,pnpm@10.1.0,pm2' setup_nodejs" \
+# "node --version && npm --version && yarn --version && pnpm --version && pm2 --version"
+
+# ==============================================================================
+# TEST 10: PYTHON (UV)
+# ==============================================================================
+# test_function "Python 3.12 via uv" \
+# "PYTHON_VERSION=3.12 setup_uv" \
+# "uv --version"
+
+# ==============================================================================
+# TEST 11: PHP
+# ==============================================================================
+# test_function "PHP 8.3 with FPM" \
+# "PHP_VERSION=8.3 PHP_FPM=YES PHP_MODULE='redis,imagick,apcu,zip,mbstring' setup_php" \
+# "php --version"
+
+# ==============================================================================
+# TEST 12: COMPOSER
+# # ==============================================================================
+# test_function "Composer" \
+# "setup_composer" \
+# "composer --version"
+
+# ==============================================================================
+# TEST 13: JAVA
+# ==============================================================================
+# test_function "Java Temurin 21" \
+# "JAVA_VERSION=21 setup_java" \
+# "java --version"
+
+# ==============================================================================
+# TEST 14: GO
+# ==============================================================================
+# test_function "Go (latest)" \
+# "GO_VERSION=latest setup_go" \
+# "go version"
+
+# ==============================================================================
+# TEST 15: RUBY
+# ==============================================================================
+test_function "Ruby 3.4.1 with Rails" \
+ "RUBY_VERSION=3.4.1 RUBY_INSTALL_RAILS=true setup_ruby" \
+ "ruby --version"
+
+# ==============================================================================
+# TEST 16: RUST
+# ==============================================================================
+# test_function "Rust (stable)" \
+# "RUST_TOOLCHAIN=stable RUST_CRATES='cargo-edit' setup_rust" \
+# "source \$HOME/.cargo/env && rustc --version"
+
+# ==============================================================================
+# TEST 17: GHOSTSCRIPT
+# ==============================================================================
+# test_function "Ghostscript" \
+# "setup_gs" \
+# "gs --version"
+
+# ==============================================================================
+# TEST 18: IMAGEMAGICK
+# ==============================================================================
+# test_function "ImageMagick" \
+# "setup_imagemagick" \
+# "magick --version"
+
+# ==============================================================================
+# TEST 19: FFMPEG
+# ==============================================================================
+# test_function "FFmpeg n7.1.1 (full)" \
+# "FFMPEG_VERSION=n7.1.1 FFMPEG_TYPE=full setup_ffmpeg" \
+# "ffmpeg -version"
+
+# ==============================================================================
+# FINAL SUMMARY
+# ==============================================================================
+echo -e "\n${CYAN}═══════════════════════════════════════════════════════════${NC}"
+echo -e "${CYAN} TEST SUMMARY${NC}"
+echo -e "${CYAN}═══════════════════════════════════════════════════════════${NC}"
+echo -e "${GREEN}✔ Passed: ${TESTS_PASSED}${NC}"
+echo -e "${RED}✖ Failed: ${TESTS_FAILED}${NC}"
+echo -e "${YELLOW}⚠ Skipped: ${TESTS_SKIPPED}${NC}"
+echo -e "\nDetailed log: ${TEST_LOG}"
+
+# Generate summary report
+{
+ echo ""
+ echo "=== FINAL SUMMARY ==="
+ echo "Tests Passed: ${TESTS_PASSED}"
+ echo "Tests Failed: ${TESTS_FAILED}"
+ echo "Tests Skipped: ${TESTS_SKIPPED}"
+ echo ""
+ echo "=== Installed Versions ==="
+ command -v yq &>/dev/null && echo "yq: $(yq --version 2>&1)"
+ command -v clickhouse-server &>/dev/null && echo "ClickHouse: $(clickhouse-server --version 2>&1 | head -n1)"
+ command -v psql &>/dev/null && echo "PostgreSQL: $(psql --version)"
+ command -v mysql &>/dev/null && echo "MySQL: $(mysql --version)"
+ command -v mongod &>/dev/null && echo "MongoDB: $(mongod --version 2>&1 | head -n1)"
+ command -v node &>/dev/null && echo "Node.js: $(node --version)"
+ command -v php &>/dev/null && echo "PHP: $(php --version | head -n1)"
+ command -v java &>/dev/null && echo "Java: $(java --version 2>&1 | head -n1)"
+ command -v go &>/dev/null && echo "Go: $(go version)"
+ command -v ruby &>/dev/null && echo "Ruby: $(ruby --version)"
+ command -v rustc &>/dev/null && echo "Rust: $(rustc --version)"
+ command -v ffmpeg &>/dev/null && echo "FFmpeg: $(ffmpeg -version 2>&1 | head -n1)"
+} >>"$TEST_LOG"
+
+if [ $TESTS_FAILED -eq 0 ]; then
+ echo -e "\n${GREEN}All tests completed successfully!${NC}"
+ exit 0
+else
+ echo -e "\n${RED}Some tests failed. Check the log for details.${NC}"
+ exit 1
+fi
diff --git a/misc/tools.func b/misc/tools.func
index 6973e4c6d..b55f1de1b 100644
--- a/misc/tools.func
+++ b/misc/tools.func
@@ -1,738 +1,1617 @@
#!/bin/bash
-# ------------------------------------------------------------------------------
-# Installs Node.js and optional global modules.
+# ==============================================================================
+# HELPER FUNCTIONS FOR PACKAGE MANAGEMENT
+# ==============================================================================
#
-# Description:
-# - Installs specified Node.js version using NodeSource APT repo
-# - Optionally installs or updates global npm modules
+# This file provides unified helper functions for robust package installation
+# and repository management across Debian/Ubuntu OS upgrades.
#
-# Variables:
-# NODE_VERSION - Node.js version to install (default: 22)
-# NODE_MODULE - Comma-separated list of global modules (e.g. "yarn,@vue/cli@5.0.0")
+# Key Features:
+# - Automatic retry logic for transient APT/network failures
+# - Unified keyring cleanup from all 3 locations
+# - Legacy installation cleanup (nvm, rbenv, rustup)
+# - OS-upgrade-safe repository preparation
+# - Service pattern matching for multi-version tools
+#
+# Usage in install scripts:
+# source /dev/stdin <<< "$FUNCTIONS" # Load from build.func
+# prepare_repository_setup "mysql"
+# install_packages_with_retry "mysql-server" "mysql-client"
+#
+# Quick Reference (Core Helpers):
+# cleanup_tool_keyrings() - Remove keyrings from all 3 locations
+# stop_all_services() - Stop services by pattern (e.g. "php*-fpm")
+# verify_tool_version() - Validate installed version matches expected
+# cleanup_legacy_install() - Remove nvm, rbenv, rustup, etc.
+# prepare_repository_setup() - Cleanup repos + keyrings + validate APT
+# install_packages_with_retry() - Install with 3 retries and APT refresh
+# upgrade_packages_with_retry() - Upgrade with 3 retries and APT refresh
+#
+# ==============================================================================
+
# ------------------------------------------------------------------------------
+# Cache installed version to avoid repeated checks
+# ------------------------------------------------------------------------------
+cache_installed_version() {
+ local app="$1"
+ local version="$2"
+ mkdir -p /var/cache/app-versions
+ echo "$version" >"/var/cache/app-versions/${app}_version.txt"
+}
-function setup_nodejs() {
- local NODE_VERSION="${NODE_VERSION:-22}"
- local NODE_MODULE="${NODE_MODULE:-}"
- local CURRENT_NODE_VERSION=""
- local NEED_NODE_INSTALL=false
+get_cached_version() {
+ local app="$1"
+ mkdir -p /var/cache/app-versions
+ if [[ -f "/var/cache/app-versions/${app}_version.txt" ]]; then
+ cat "/var/cache/app-versions/${app}_version.txt"
+ return 0
+ fi
+ return 0
+}
- if command -v node >/dev/null; then
- CURRENT_NODE_VERSION="$(node -v | grep -oP '^v\K[0-9]+')"
- if [[ "$CURRENT_NODE_VERSION" != "$NODE_VERSION" ]]; then
- msg_info "Old Node.js $CURRENT_NODE_VERSION found, replacing with $NODE_VERSION"
- NEED_NODE_INSTALL=true
- fi
- else
- msg_info "Setup Node.js $NODE_VERSION"
- NEED_NODE_INSTALL=true
+# ------------------------------------------------------------------------------
+# Clean up ALL keyring locations for a tool (unified helper)
+# Usage: cleanup_tool_keyrings "mariadb" "mysql" "postgresql"
+# ------------------------------------------------------------------------------
+cleanup_tool_keyrings() {
+ local tool_patterns=("$@")
+
+ for pattern in "${tool_patterns[@]}"; do
+ rm -f /usr/share/keyrings/${pattern}*.gpg \
+ /etc/apt/keyrings/${pattern}*.gpg \
+ /etc/apt/trusted.gpg.d/${pattern}*.gpg 2>/dev/null || true
+ done
+}
+
+# ------------------------------------------------------------------------------
+# Stop and disable all service instances matching a pattern
+# Usage: stop_all_services "php*-fpm" "mysql" "mariadb"
+# ------------------------------------------------------------------------------
+stop_all_services() {
+ local service_patterns=("$@")
+
+ for pattern in "${service_patterns[@]}"; do
+ # Find all matching services
+ systemctl list-units --type=service --all 2>/dev/null |
+ grep -oE "${pattern}[^ ]*\.service" |
+ sort -u |
+ while read -r service; do
+ $STD systemctl stop "$service" 2>/dev/null || true
+ $STD systemctl disable "$service" 2>/dev/null || true
+ done
+ done
+}
+
+# ------------------------------------------------------------------------------
+# Verify installed tool version matches expected version
+# Returns: 0 if match, 1 if mismatch (with warning)
+# Usage: verify_tool_version "nodejs" "22" "$(node -v | grep -oP '^v\K[0-9]+')"
+# ------------------------------------------------------------------------------
+verify_tool_version() {
+ local tool_name="$1"
+ local expected_version="$2"
+ local installed_version="$3"
+
+ # Extract major version for comparison
+ local expected_major="${expected_version%%.*}"
+ local installed_major="${installed_version%%.*}"
+
+ if [[ "$installed_major" != "$expected_major" ]]; then
+ msg_warn "$tool_name version mismatch: expected $expected_version, got $installed_version"
+ return 1
fi
- if ! command -v jq &>/dev/null; then
- $STD apt-get update
- $STD apt-get install -y jq || {
- msg_error "Failed to install jq"
+ return 0
+}
+
+# ------------------------------------------------------------------------------
+# Clean up legacy installation methods (nvm, rbenv, rustup, etc.)
+# Usage: cleanup_legacy_install "nodejs" -> removes nvm
+# ------------------------------------------------------------------------------
+cleanup_legacy_install() {
+ local tool_name="$1"
+
+ case "$tool_name" in
+ nodejs | node)
+ if [[ -d "$HOME/.nvm" ]]; then
+ msg_info "Removing legacy nvm installation"
+ rm -rf "$HOME/.nvm" "$HOME/.npm" "$HOME/.bower" "$HOME/.config/yarn" 2>/dev/null || true
+ sed -i '/NVM_DIR/d' "$HOME/.bashrc" "$HOME/.profile" 2>/dev/null || true
+ msg_ok "Legacy nvm installation removed"
+ fi
+ ;;
+ ruby)
+ if [[ -d "$HOME/.rbenv" ]]; then
+ msg_info "Removing legacy rbenv installation"
+ rm -rf "$HOME/.rbenv" 2>/dev/null || true
+ sed -i '/rbenv/d' "$HOME/.bashrc" "$HOME/.profile" 2>/dev/null || true
+ msg_ok "Legacy rbenv installation removed"
+ fi
+ ;;
+ rust)
+ if [[ -d "$HOME/.cargo" ]] || [[ -d "$HOME/.rustup" ]]; then
+ msg_info "Removing legacy rustup installation"
+ rm -rf "$HOME/.cargo" "$HOME/.rustup" 2>/dev/null || true
+ sed -i '/cargo/d' "$HOME/.bashrc" "$HOME/.profile" 2>/dev/null || true
+ msg_ok "Legacy rustup installation removed"
+ fi
+ ;;
+ go | golang)
+ if [[ -d "$HOME/go" ]]; then
+ msg_info "Removing legacy Go workspace"
+ # Keep user code, just remove GOPATH env
+ sed -i '/GOPATH/d' "$HOME/.bashrc" "$HOME/.profile" 2>/dev/null || true
+ msg_ok "Legacy Go workspace cleaned"
+ fi
+ ;;
+ esac
+}
+
+# ------------------------------------------------------------------------------
+# Unified repository preparation before setup
+# Cleans up old repos, keyrings, and ensures APT is working
+# Usage: prepare_repository_setup "mariadb" "mysql"
+# ------------------------------------------------------------------------------
+prepare_repository_setup() {
+ local repo_names=("$@")
+
+ # Clean up all old repository files
+ for repo in "${repo_names[@]}"; do
+ cleanup_old_repo_files "$repo"
+ done
+
+ # Clean up all keyrings
+ cleanup_tool_keyrings "${repo_names[@]}"
+
+ # Ensure APT is in working state
+ ensure_apt_working || return 1
+
+ return 0
+}
+
+# ------------------------------------------------------------------------------
+# Install packages with retry logic
+# Usage: install_packages_with_retry "mysql-server" "mysql-client"
+# ------------------------------------------------------------------------------
+install_packages_with_retry() {
+ local packages=("$@")
+ local max_retries=2
+ local retry=0
+
+ while [[ $retry -le $max_retries ]]; do
+ if $STD apt install -y "${packages[@]}" 2>/dev/null; then
+ return 0
+ fi
+
+ retry=$((retry + 1))
+ if [[ $retry -le $max_retries ]]; then
+ msg_warn "Package installation failed, retrying ($retry/$max_retries)..."
+ sleep 2
+ $STD apt update 2>/dev/null || true
+ fi
+ done
+
+ return 1
+}
+
+# ------------------------------------------------------------------------------
+# Upgrade specific packages with retry logic
+# Usage: upgrade_packages_with_retry "mariadb-server" "mariadb-client"
+# ------------------------------------------------------------------------------
+upgrade_packages_with_retry() {
+ local packages=("$@")
+ local max_retries=2
+ local retry=0
+
+ while [[ $retry -le $max_retries ]]; do
+ if $STD apt install --only-upgrade -y "${packages[@]}" 2>/dev/null; then
+ return 0
+ fi
+
+ retry=$((retry + 1))
+ if [[ $retry -le $max_retries ]]; then
+ msg_warn "Package upgrade failed, retrying ($retry/$max_retries)..."
+ sleep 2
+ $STD apt update 2>/dev/null || true
+ fi
+ done
+
+ return 1
+}
+
+# ------------------------------------------------------------------------------
+# Check if tool is already installed and optionally verify exact version
+# Returns: 0 if installed (with optional version match), 1 if not installed
+# Usage: is_tool_installed "mariadb" "11.4" || echo "Not installed"
+# ------------------------------------------------------------------------------
+is_tool_installed() {
+ local tool_name="$1"
+ local required_version="${2:-}"
+ local installed_version=""
+
+ case "$tool_name" in
+ mariadb)
+ if command -v mariadb >/dev/null 2>&1; then
+ installed_version=$(mariadb --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1)
+ fi
+ ;;
+ mysql)
+ if command -v mysql >/dev/null 2>&1; then
+ installed_version=$(mysql --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1)
+ fi
+ ;;
+ mongodb | mongod)
+ if command -v mongod >/dev/null 2>&1; then
+ installed_version=$(mongod --version 2>/dev/null | awk '/db version/{print $3}' | cut -d. -f1,2)
+ fi
+ ;;
+ node | nodejs)
+ if command -v node >/dev/null 2>&1; then
+ installed_version=$(node -v 2>/dev/null | grep -oP '^v\K[0-9]+')
+ fi
+ ;;
+ php)
+ if command -v php >/dev/null 2>&1; then
+ installed_version=$(php -v 2>/dev/null | awk '/^PHP/{print $2}' | cut -d. -f1,2)
+ fi
+ ;;
+ postgres | postgresql)
+ if command -v psql >/dev/null 2>&1; then
+ installed_version=$(psql --version 2>/dev/null | awk '{print $3}' | cut -d. -f1)
+ fi
+ ;;
+ ruby)
+ if command -v ruby >/dev/null 2>&1; then
+ installed_version=$(ruby --version 2>/dev/null | awk '{print $2}' | cut -d. -f1,2)
+ fi
+ ;;
+ rust | rustc)
+ if command -v rustc >/dev/null 2>&1; then
+ installed_version=$(rustc --version 2>/dev/null | awk '{print $2}')
+ fi
+ ;;
+ go | golang)
+ if command -v go >/dev/null 2>&1; then
+ installed_version=$(go version 2>/dev/null | awk '{print $3}' | sed 's/go//')
+ fi
+ ;;
+ clickhouse)
+ if command -v clickhouse >/dev/null 2>&1; then
+ installed_version=$(clickhouse --version 2>/dev/null | awk '{print $2}')
+ fi
+ ;;
+ esac
+
+ if [[ -z "$installed_version" ]]; then
+ return 1 # Not installed
+ fi
+
+ if [[ -n "$required_version" && "$installed_version" != "$required_version" ]]; then
+ echo "$installed_version"
+ return 1 # Version mismatch
+ fi
+
+ echo "$installed_version"
+ return 0 # Installed and version matches (if specified)
+}
+
+# ------------------------------------------------------------------------------
+# Remove old tool version completely (purge + cleanup repos)
+# Usage: remove_old_tool_version "mariadb" "repository-name"
+# ------------------------------------------------------------------------------
+remove_old_tool_version() {
+ local tool_name="$1"
+ local repo_name="${2:-$tool_name}"
+
+ case "$tool_name" in
+ mariadb)
+ stop_all_services "mariadb"
+ $STD apt purge -y 'mariadb*' >/dev/null 2>&1 || true
+ cleanup_tool_keyrings "mariadb"
+ ;;
+ mysql)
+ stop_all_services "mysql"
+ $STD apt purge -y 'mysql*' >/dev/null 2>&1 || true
+ rm -rf /var/lib/mysql 2>/dev/null || true
+ cleanup_tool_keyrings "mysql"
+ ;;
+ mongodb)
+ stop_all_services "mongod"
+ $STD apt purge -y 'mongodb*' >/dev/null 2>&1 || true
+ rm -rf /var/lib/mongodb 2>/dev/null || true
+ cleanup_tool_keyrings "mongodb"
+ ;;
+ node | nodejs)
+ $STD apt purge -y nodejs npm >/dev/null 2>&1 || true
+ # Clean up npm global modules
+ if command -v npm >/dev/null 2>&1; then
+ npm list -g 2>/dev/null | grep -oE '^ \S+' | awk '{print $1}' | while read -r module; do
+ npm uninstall -g "$module" >/dev/null 2>&1 || true
+ done
+ fi
+ cleanup_legacy_install "nodejs"
+ cleanup_tool_keyrings "nodesource"
+ ;;
+ php)
+ stop_all_services "php.*-fpm"
+ $STD apt purge -y 'php*' >/dev/null 2>&1 || true
+ rm -rf /etc/php 2>/dev/null || true
+ cleanup_tool_keyrings "deb.sury.org-php" "php"
+ ;;
+ postgresql)
+ stop_all_services "postgresql"
+ $STD apt purge -y 'postgresql*' >/dev/null 2>&1 || true
+ # Keep data directory for safety (can be removed manually if needed)
+ # rm -rf /var/lib/postgresql 2>/dev/null || true
+ cleanup_tool_keyrings "postgresql" "pgdg"
+ ;;
+ java)
+ $STD apt purge -y 'temurin*' 'adoptium*' 'openjdk*' >/dev/null 2>&1 || true
+ cleanup_tool_keyrings "adoptium"
+ ;;
+ ruby)
+ cleanup_legacy_install "ruby"
+ $STD apt purge -y 'ruby*' >/dev/null 2>&1 || true
+ ;;
+ rust)
+ cleanup_legacy_install "rust"
+ ;;
+ go | golang)
+ rm -rf /usr/local/go 2>/dev/null || true
+ cleanup_legacy_install "golang"
+ ;;
+ clickhouse)
+ stop_all_services "clickhouse-server"
+ $STD apt purge -y 'clickhouse*' >/dev/null 2>&1 || true
+ rm -rf /var/lib/clickhouse 2>/dev/null || true
+ cleanup_tool_keyrings "clickhouse"
+ ;;
+ esac
+
+ # Clean up old repository files (both .list and .sources)
+ cleanup_old_repo_files "$repo_name"
+
+ return 0
+}
+
+# ------------------------------------------------------------------------------
+# Determine if tool update/upgrade is needed
+# Returns: 0 (update needed), 1 (already up-to-date)
+# Usage: if should_update_tool "mariadb" "11.4"; then ... fi
+# ------------------------------------------------------------------------------
+should_update_tool() {
+ local tool_name="$1"
+ local target_version="$2"
+ local current_version=""
+
+ # Get currently installed version
+ current_version=$(is_tool_installed "$tool_name" 2>/dev/null) || return 0 # Not installed = needs install
+
+ # If versions are identical, no update needed
+ if [[ "$current_version" == "$target_version" ]]; then
+ return 1 # No update needed
+ fi
+
+ return 0 # Update needed
+}
+
+# ---------------------–----------------------------------------------------------
+# Unified repository management for tools
+# Handles adding, updating, and verifying tool repositories
+# Usage: manage_tool_repository "mariadb" "11.4" "https://repo..." "GPG_key_url"
+# Supports: mariadb, mongodb, nodejs, postgresql, php, mysql
+# ------------------------------------------------------------------------------
+manage_tool_repository() {
+ local tool_name="$1"
+ local version="$2"
+ local repo_url="$3"
+ local gpg_key_url="${4:-}"
+ local distro_id repo_component suite
+
+ distro_id=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"')
+
+ case "$tool_name" in
+ mariadb)
+ if [[ -z "$repo_url" || -z "$gpg_key_url" ]]; then
+ msg_error "MariaDB repository requires repo_url and gpg_key_url"
+ return 1
+ fi
+
+ # Clean old repos first
+ cleanup_old_repo_files "mariadb"
+
+ # Get suite for fallback handling
+ local distro_codename
+ distro_codename=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)
+ suite=$(get_fallback_suite "$distro_id" "$distro_codename" "$repo_url/$distro_id")
+
+ # Setup new repository using deb822 format
+ setup_deb822_repo "mariadb" "$gpg_key_url" "$repo_url/$distro_id" "$suite" "main" "amd64 arm64" || return 1
+ return 0
+ ;;
+
+ mongodb)
+ if [[ -z "$repo_url" || -z "$gpg_key_url" ]]; then
+ msg_error "MongoDB repository requires repo_url and gpg_key_url"
+ return 1
+ fi
+
+ # Clean old repos first
+ cleanup_old_repo_files "mongodb"
+
+ # Import GPG key
+ mkdir -p /etc/apt/keyrings
+ if ! curl -fsSL "$gpg_key_url" | gpg --dearmor --yes -o "/etc/apt/keyrings/mongodb-server-${version}.gpg" 2>/dev/null; then
+ msg_error "Failed to download MongoDB GPG key"
+ return 1
+ fi
+
+ # Setup repository
+ local distro_codename
+ distro_codename=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)
+
+ # Suite mapping with fallback for newer releases not yet supported by upstream
+ if [[ "$distro_id" == "debian" ]]; then
+ case "$distro_codename" in
+ trixie | forky | sid)
+ # Testing/unstable releases fallback to latest stable suite
+ suite="bookworm"
+ ;;
+ bookworm)
+ suite="bookworm"
+ ;;
+ bullseye)
+ suite="bullseye"
+ ;;
+ *)
+ # Unknown release: fallback to latest stable suite
+ msg_warn "Unknown Debian release '${distro_codename}', using bookworm"
+ suite="bookworm"
+ ;;
+ esac
+ elif [[ "$distro_id" == "ubuntu" ]]; then
+ case "$distro_codename" in
+ oracular | plucky)
+ # Newer releases fallback to latest LTS
+ suite="noble"
+ ;;
+ noble)
+ suite="noble"
+ ;;
+ jammy)
+ suite="jammy"
+ ;;
+ focal)
+ suite="focal"
+ ;;
+ *)
+ # Unknown release: fallback to latest LTS
+ msg_warn "Unknown Ubuntu release '${distro_codename}', using noble"
+ suite="noble"
+ ;;
+ esac
+ else
+ # For other distros, try generic fallback
+ suite=$(get_fallback_suite "$distro_id" "$distro_codename" "$repo_url")
+ fi
+
+ repo_component="main"
+ [[ "$distro_id" == "ubuntu" ]] && repo_component="multiverse"
+
+ cat </etc/apt/sources.list.d/mongodb-org-${version}.sources
+Types: deb
+URIs: ${repo_url}
+Suites: ${suite}/mongodb-org/${version}
+Components: ${repo_component}
+Architectures: amd64 arm64
+Signed-By: /etc/apt/keyrings/mongodb-server-${version}.gpg
+EOF
+ return 0
+ ;;
+
+ nodejs)
+ if [[ -z "$repo_url" || -z "$gpg_key_url" ]]; then
+ msg_error "Node.js repository requires repo_url and gpg_key_url"
+ return 1
+ fi
+
+ cleanup_old_repo_files "nodesource"
+
+ # NodeSource uses deb822 format with GPG from repo
+ local distro_codename
+ distro_codename=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)
+
+ # Create keyring directory first
+ mkdir -p /etc/apt/keyrings
+
+ # Download GPG key from NodeSource
+ curl -fsSL "$gpg_key_url" | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg || {
+ msg_error "Failed to import NodeSource GPG key"
+ return 1
+ }
+
+ cat </etc/apt/sources.list.d/nodesource.sources
+Types: deb
+URIs: $repo_url
+Suites: nodistro
+Components: main
+Architectures: amd64 arm64
+Signed-By: /etc/apt/keyrings/nodesource.gpg
+EOF
+ return 0
+ ;;
+
+ php)
+ if [[ -z "$gpg_key_url" ]]; then
+ msg_error "PHP repository requires gpg_key_url"
+ return 1
+ fi
+
+ cleanup_old_repo_files "php"
+
+ # Download and install keyring
+ curl -fsSLo /tmp/debsuryorg-archive-keyring.deb "$gpg_key_url" || {
+ msg_error "Failed to download PHP keyring"
+ return 1
+ }
+ dpkg -i /tmp/debsuryorg-archive-keyring.deb >/dev/null 2>&1 || {
+ msg_error "Failed to install PHP keyring"
+ rm -f /tmp/debsuryorg-archive-keyring.deb
+ return 1
+ }
+ rm -f /tmp/debsuryorg-archive-keyring.deb
+
+ # Setup repository
+ local distro_codename
+ distro_codename=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)
+ cat </etc/apt/sources.list.d/php.sources
+Types: deb
+URIs: https://packages.sury.org/php
+Suites: $distro_codename
+Components: main
+Architectures: amd64 arm64
+Signed-By: /usr/share/keyrings/deb.sury.org-php.gpg
+EOF
+ return 0
+ ;;
+
+ postgresql)
+ if [[ -z "$gpg_key_url" ]]; then
+ msg_error "PostgreSQL repository requires gpg_key_url"
+ return 1
+ fi
+
+ cleanup_old_repo_files "postgresql"
+
+ # Create keyring directory first
+ mkdir -p /etc/apt/keyrings
+
+ # Import PostgreSQL key
+ curl -fsSL "$gpg_key_url" | gpg --dearmor -o /etc/apt/keyrings/postgresql.gpg || {
+ msg_error "Failed to import PostgreSQL GPG key"
+ return 1
+ }
+
+ # Setup repository
+ local distro_codename
+ distro_codename=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)
+ cat </etc/apt/sources.list.d/postgresql.sources
+Types: deb
+URIs: http://apt.postgresql.org/pub/repos/apt
+Suites: $distro_codename-pgdg
+Components: main
+Architectures: amd64 arm64
+Signed-By: /etc/apt/keyrings/postgresql.gpg
+EOF
+ return 0
+ ;;
+
+ *)
+ msg_error "Unknown tool repository: $tool_name"
+ return 1
+ ;;
+ esac
+
+ return 0
+}
+
+# ------–----------------------------------------------------------------------
+# Unified package upgrade function (with apt update caching)
+# ------------------------------------------------------------------------------
+upgrade_package() {
+ local package="$1"
+
+ # Use same caching logic as ensure_dependencies
+ local apt_cache_file="/var/cache/apt-update-timestamp"
+ local current_time=$(date +%s)
+ local last_update=0
+
+ if [[ -f "$apt_cache_file" ]]; then
+ last_update=$(cat "$apt_cache_file" 2>/dev/null || echo 0)
+ fi
+
+ if ((current_time - last_update > 300)); then
+ $STD apt update || {
+ msg_warn "APT update failed in upgrade_package - continuing with cached packages"
+ }
+ echo "$current_time" >"$apt_cache_file"
+ fi
+
+ $STD apt install --only-upgrade -y "$package" || {
+ msg_warn "Failed to upgrade $package"
+ return 1
+ }
+}
+
+# ------------------------------------------------------------------------------
+# Repository availability check
+# ------------------------------------------------------------------------------
+verify_repo_available() {
+ local repo_url="$1"
+ local suite="$2"
+
+ if curl -fsSL --max-time 10 "${repo_url}/dists/${suite}/Release" &>/dev/null; then
+ return 0
+ fi
+ return 1
+}
+
+# ------------------------------------------------------------------------------
+# Ensure dependencies are installed (with apt update caching)
+# ------------------------------------------------------------------------------
+ensure_dependencies() {
+ local deps=("$@")
+ local missing=()
+
+ for dep in "${deps[@]}"; do
+ if ! command -v "$dep" &>/dev/null && ! is_package_installed "$dep"; then
+ missing+=("$dep")
+ fi
+ done
+
+ if [[ ${#missing[@]} -gt 0 ]]; then
+ # Only run apt update if not done recently (within last 5 minutes)
+ local apt_cache_file="/var/cache/apt-update-timestamp"
+ local current_time=$(date +%s)
+ local last_update=0
+
+ if [[ -f "$apt_cache_file" ]]; then
+ last_update=$(cat "$apt_cache_file" 2>/dev/null || echo 0)
+ fi
+
+ if ((current_time - last_update > 300)); then
+ # Ensure orphaned sources are cleaned before updating
+ cleanup_orphaned_sources 2>/dev/null || true
+
+ if ! $STD apt update; then
+ ensure_apt_working || return 1
+ fi
+ echo "$current_time" >"$apt_cache_file"
+ fi
+
+ $STD apt install -y "${missing[@]}" || {
+ msg_error "Failed to install dependencies: ${missing[*]}"
return 1
}
fi
+}
- if [[ "$NEED_NODE_INSTALL" == true ]]; then
- $STD apt-get purge -y nodejs
- rm -f /etc/apt/sources.list.d/nodesource.list /usr/share/keyrings/nodesource.gpg
+# ------------------------------------------------------------------------------
+# Smart version comparison
+# ------------------------------------------------------------------------------
+version_gt() {
+ test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"
+}
- mkdir -p /usr/share/keyrings
- curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key |
- gpg --dearmor -o /usr/share/keyrings/nodesource.gpg || {
- msg_error "Failed to import NodeSource GPG key"
- exit 1
- }
- chmod 644 /usr/share/keyrings/nodesource.gpg
+# ------------------------------------------------------------------------------
+# Get system architecture (normalized)
+# ------------------------------------------------------------------------------
+get_system_arch() {
+ local arch_type="${1:-dpkg}" # dpkg, uname, or both
+ local arch
- local ARCH
- ARCH=$(dpkg --print-architecture)
- if ! [[ "$ARCH" =~ ^(amd64|arm64|armhf)$ ]]; then
- msg_error "Unsupported architecture: $ARCH"
- exit 1
+ case "$arch_type" in
+ dpkg)
+ arch=$(dpkg --print-architecture 2>/dev/null)
+ ;;
+ uname)
+ arch=$(uname -m)
+ [[ "$arch" == "x86_64" ]] && arch="amd64"
+ [[ "$arch" == "aarch64" ]] && arch="arm64"
+ ;;
+ both | *)
+ arch=$(dpkg --print-architecture 2>/dev/null || uname -m)
+ [[ "$arch" == "x86_64" ]] && arch="amd64"
+ [[ "$arch" == "aarch64" ]] && arch="arm64"
+ ;;
+ esac
+
+ echo "$arch"
+}
+
+# ------------------------------------------------------------------------------
+# Create temporary directory with automatic cleanup
+# ------------------------------------------------------------------------------
+create_temp_dir() {
+ local tmp_dir=$(mktemp -d)
+ # Set trap to cleanup on EXIT, ERR, INT, TERM
+ trap "rm -rf '$tmp_dir'" EXIT ERR INT TERM
+ echo "$tmp_dir"
+}
+
+# ------------------------------------------------------------------------------
+# Check if package is installed (faster than dpkg -l | grep)
+# ------------------------------------------------------------------------------
+is_package_installed() {
+ local package="$1"
+ dpkg-query -W -f='${Status}' "$package" 2>/dev/null | grep -q "^install ok installed$"
+}
+
+# ------------------------------------------------------------------------------
+# GitHub API call with authentication and rate limit handling
+# ------------------------------------------------------------------------------
+github_api_call() {
+ local url="$1"
+ local output_file="${2:-/dev/stdout}"
+ local max_retries=3
+ local retry_delay=2
+
+ local header_args=()
+ [[ -n "${GITHUB_TOKEN:-}" ]] && header_args=(-H "Authorization: Bearer $GITHUB_TOKEN")
+
+ for attempt in $(seq 1 $max_retries); do
+ local http_code
+ http_code=$(curl -fsSL -w "%{http_code}" -o "$output_file" \
+ -H "Accept: application/vnd.github+json" \
+ -H "X-GitHub-Api-Version: 2022-11-28" \
+ "${header_args[@]}" \
+ "$url" 2>/dev/null || echo "000")
+
+ case "$http_code" in
+ 200)
+ return 0
+ ;;
+ 403)
+ # Rate limit - check if we can retry
+ if [[ $attempt -lt $max_retries ]]; then
+ msg_warn "GitHub API rate limit, waiting ${retry_delay}s... (attempt $attempt/$max_retries)"
+ sleep "$retry_delay"
+ retry_delay=$((retry_delay * 2))
+ continue
+ fi
+ msg_error "GitHub API rate limit exceeded. Set GITHUB_TOKEN to increase limits."
+ return 1
+ ;;
+ 404)
+ msg_error "GitHub API endpoint not found: $url"
+ return 1
+ ;;
+ *)
+ if [[ $attempt -lt $max_retries ]]; then
+ sleep "$retry_delay"
+ continue
+ fi
+ msg_error "GitHub API call failed with HTTP $http_code"
+ return 1
+ ;;
+ esac
+ done
+
+ return 1
+}
+
+should_upgrade() {
+ local current="$1"
+ local target="$2"
+
+ [[ -z "$current" ]] && return 0
+ version_gt "$target" "$current" && return 0
+ return 1
+}
+
+# ------------------------------------------------------------------------------
+# Get OS information (cached for performance)
+# ------------------------------------------------------------------------------
+get_os_info() {
+ local field="${1:-all}" # id, codename, version, version_id, all
+
+ # Cache OS info to avoid repeated file reads
+ if [[ -z "${_OS_ID:-}" ]]; then
+ export _OS_ID=$(awk -F= '/^ID=/{gsub(/"/,"",$2); print $2}' /etc/os-release)
+ export _OS_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{gsub(/"/,"",$2); print $2}' /etc/os-release)
+ export _OS_VERSION=$(awk -F= '/^VERSION_ID=/{gsub(/"/,"",$2); print $2}' /etc/os-release)
+ export _OS_VERSION_FULL=$(awk -F= '/^VERSION=/{gsub(/"/,"",$2); print $2}' /etc/os-release)
+ fi
+
+ case "$field" in
+ id) echo "$_OS_ID" ;;
+ codename) echo "$_OS_CODENAME" ;;
+ version) echo "$_OS_VERSION" ;;
+ version_id) echo "$_OS_VERSION" ;;
+ version_full) echo "$_OS_VERSION_FULL" ;;
+ all) echo "ID=$_OS_ID CODENAME=$_OS_CODENAME VERSION=$_OS_VERSION" ;;
+ *) echo "$_OS_ID" ;;
+ esac
+}
+
+# ------------------------------------------------------------------------------
+# Check if running on specific OS
+# ------------------------------------------------------------------------------
+is_debian() {
+ [[ "$(get_os_info id)" == "debian" ]]
+}
+
+is_ubuntu() {
+ [[ "$(get_os_info id)" == "ubuntu" ]]
+}
+
+is_alpine() {
+ [[ "$(get_os_info id)" == "alpine" ]]
+}
+
+# ------------------------------------------------------------------------------
+# Get Debian/Ubuntu major version
+# ------------------------------------------------------------------------------
+get_os_version_major() {
+ local version=$(get_os_info version)
+ echo "${version%%.*}"
+}
+
+# ------------------------------------------------------------------------------
+# Download file with retry logic and progress
+# ------------------------------------------------------------------------------
+download_file() {
+ local url="$1"
+ local output="$2"
+ local max_retries="${3:-3}"
+ local show_progress="${4:-false}"
+
+ local curl_opts=(-fsSL)
+ [[ "$show_progress" == "true" ]] && curl_opts=(-fL#)
+
+ for attempt in $(seq 1 $max_retries); do
+ if curl "${curl_opts[@]}" -o "$output" "$url"; then
+ return 0
fi
- echo "deb [arch=$ARCH signed-by=/usr/share/keyrings/nodesource.gpg] https://deb.nodesource.com/node_${NODE_VERSION}.x nodistro main" >/etc/apt/sources.list.d/nodesource.list
+ if [[ $attempt -lt $max_retries ]]; then
+ msg_warn "Download failed, retrying... (attempt $attempt/$max_retries)"
+ sleep 2
+ fi
+ done
- cat </etc/apt/preferences.d/nodejs
-Package: nodejs
-Pin: origin deb.nodesource.com
-Pin-Priority: 700
+ msg_error "Failed to download: $url"
+ return 1
+}
+
+# ------------------------------------------------------------------------------
+# Get fallback suite for repository (comprehensive mapping)
+# ------------------------------------------------------------------------------
+get_fallback_suite() {
+ local distro_id="$1"
+ local distro_codename="$2"
+ local repo_base_url="$3"
+
+ # Check if current codename works
+ if verify_repo_available "$repo_base_url" "$distro_codename"; then
+ echo "$distro_codename"
+ return 0
+ fi
+
+ # Comprehensive fallback mappings
+ case "$distro_id" in
+ debian)
+ case "$distro_codename" in
+ # Debian 13 (Trixie) → Debian 12 (Bookworm)
+ trixie | forky | sid)
+ echo "bookworm"
+ ;;
+ # Debian 12 (Bookworm) stays
+ bookworm)
+ echo "bookworm"
+ ;;
+ # Debian 11 (Bullseye) stays
+ bullseye)
+ echo "bullseye"
+ ;;
+ # Unknown → latest stable
+ *)
+ echo "bookworm"
+ ;;
+ esac
+ ;;
+ ubuntu)
+ case "$distro_codename" in
+ # Ubuntu 24.10 (Oracular) → 24.04 LTS (Noble)
+ oracular | plucky)
+ echo "noble"
+ ;;
+ # Ubuntu 24.04 LTS (Noble) stays
+ noble)
+ echo "noble"
+ ;;
+ # Ubuntu 23.10 (Mantic) → 22.04 LTS (Jammy)
+ mantic | lunar)
+ echo "jammy"
+ ;;
+ # Ubuntu 22.04 LTS (Jammy) stays
+ jammy)
+ echo "jammy"
+ ;;
+ # Ubuntu 20.04 LTS (Focal) stays
+ focal)
+ echo "focal"
+ ;;
+ # Unknown → latest LTS
+ *)
+ echo "jammy"
+ ;;
+ esac
+ ;;
+ *)
+ echo "$distro_codename"
+ ;;
+ esac
+}
+
+# ------------------------------------------------------------------------------
+# Verify package source and version
+# ------------------------------------------------------------------------------
+verify_package_source() {
+ local package="$1"
+ local expected_version="$2"
+
+ if apt-cache policy "$package" 2>/dev/null | grep -q "$expected_version"; then
+ return 0
+ fi
+ return 1
+}
+
+# ------------------------------------------------------------------------------
+# Check if running on LTS version
+# ------------------------------------------------------------------------------
+is_lts_version() {
+ local os_id=$(get_os_info id)
+ local codename=$(get_os_info codename)
+
+ if [[ "$os_id" == "ubuntu" ]]; then
+ case "$codename" in
+ focal | jammy | noble) return 0 ;; # 20.04, 22.04, 24.04
+ *) return 1 ;;
+ esac
+ elif [[ "$os_id" == "debian" ]]; then
+ # Debian releases are all "stable"
+ case "$codename" in
+ bullseye | bookworm | trixie) return 0 ;;
+ *) return 1 ;;
+ esac
+ fi
+
+ return 1
+}
+
+# ------------------------------------------------------------------------------
+# Get optimal number of parallel jobs (cached)
+# ------------------------------------------------------------------------------
+get_parallel_jobs() {
+ if [[ -z "${_PARALLEL_JOBS:-}" ]]; then
+ local cpu_count=$(nproc 2>/dev/null || echo 1)
+ local mem_gb=$(free -g | awk '/^Mem:/{print $2}')
+
+ # Limit by available memory (assume 1GB per job for compilation)
+ local max_by_mem=$((mem_gb > 0 ? mem_gb : 1))
+ local max_jobs=$((cpu_count < max_by_mem ? cpu_count : max_by_mem))
+
+ # At least 1, at most cpu_count
+ export _PARALLEL_JOBS=$((max_jobs > 0 ? max_jobs : 1))
+ fi
+ echo "$_PARALLEL_JOBS"
+}
+
+# ------------------------------------------------------------------------------
+# Get default PHP version for OS
+# ------------------------------------------------------------------------------
+get_default_php_version() {
+ local os_id=$(get_os_info id)
+ local os_version=$(get_os_version_major)
+
+ case "$os_id" in
+ debian)
+ case "$os_version" in
+ 13) echo "8.3" ;; # Debian 13 (Trixie)
+ 12) echo "8.2" ;; # Debian 12 (Bookworm)
+ 11) echo "7.4" ;; # Debian 11 (Bullseye)
+ *) echo "8.2" ;;
+ esac
+ ;;
+ ubuntu)
+ case "$os_version" in
+ 24) echo "8.3" ;; # Ubuntu 24.04 LTS (Noble)
+ 22) echo "8.1" ;; # Ubuntu 22.04 LTS (Jammy)
+ 20) echo "7.4" ;; # Ubuntu 20.04 LTS (Focal)
+ *) echo "8.1" ;;
+ esac
+ ;;
+ *)
+ echo "8.2"
+ ;;
+ esac
+}
+
+# ------------------------------------------------------------------------------
+# Get default Python version for OS
+# ------------------------------------------------------------------------------
+get_default_python_version() {
+ local os_id=$(get_os_info id)
+ local os_version=$(get_os_version_major)
+
+ case "$os_id" in
+ debian)
+ case "$os_version" in
+ 13) echo "3.12" ;; # Debian 13 (Trixie)
+ 12) echo "3.11" ;; # Debian 12 (Bookworm)
+ 11) echo "3.9" ;; # Debian 11 (Bullseye)
+ *) echo "3.11" ;;
+ esac
+ ;;
+ ubuntu)
+ case "$os_version" in
+ 24) echo "3.12" ;; # Ubuntu 24.04 LTS
+ 22) echo "3.10" ;; # Ubuntu 22.04 LTS
+ 20) echo "3.8" ;; # Ubuntu 20.04 LTS
+ *) echo "3.10" ;;
+ esac
+ ;;
+ *)
+ echo "3.11"
+ ;;
+ esac
+}
+
+# ------------------------------------------------------------------------------
+# Get default Node.js LTS version
+# ------------------------------------------------------------------------------
+get_default_nodejs_version() {
+ # Always return current LTS (as of 2025)
+ echo "22"
+}
+
+# ------------------------------------------------------------------------------
+# Check if package manager is locked
+# ------------------------------------------------------------------------------
+is_apt_locked() {
+ if fuser /var/lib/dpkg/lock-frontend &>/dev/null ||
+ fuser /var/lib/apt/lists/lock &>/dev/null ||
+ fuser /var/cache/apt/archives/lock &>/dev/null; then
+ return 0
+ fi
+ return 1
+}
+
+# ------------------------------------------------------------------------------
+# Wait for apt to be available
+# ------------------------------------------------------------------------------
+wait_for_apt() {
+ local max_wait="${1:-300}" # 5 minutes default
+ local waited=0
+
+ while is_apt_locked; do
+ if [[ $waited -ge $max_wait ]]; then
+ msg_error "Timeout waiting for apt to be available"
+ return 1
+ fi
+
+ sleep 5
+ waited=$((waited + 5))
+ done
+
+ return 0
+}
+
+# ------------------------------------------------------------------------------
+# Cleanup old repository files (migration helper)
+# ------------------------------------------------------------------------------
+cleanup_old_repo_files() {
+ local app="$1"
+
+ # Remove old-style .list files (including backups)
+ rm -f /etc/apt/sources.list.d/"${app}"*.list
+ rm -f /etc/apt/sources.list.d/"${app}"*.list.save
+ rm -f /etc/apt/sources.list.d/"${app}"*.list.distUpgrade
+ rm -f /etc/apt/sources.list.d/"${app}"*.list.dpkg-*
+
+ # Remove old GPG keys from trusted.gpg.d
+ rm -f /etc/apt/trusted.gpg.d/"${app}"*.gpg
+
+ # Remove keyrings from /etc/apt/keyrings
+ rm -f /etc/apt/keyrings/"${app}"*.gpg
+
+ # Remove ALL .sources files for this app (including the main one)
+ # This ensures no orphaned .sources files reference deleted keyrings
+ rm -f /etc/apt/sources.list.d/"${app}"*.sources
+}
+
+# ------------------------------------------------------------------------------
+# Cleanup orphaned .sources files that reference missing keyrings
+# This prevents APT signature verification errors
+# Call this at the start of any setup function to ensure APT is in a clean state
+# ------------------------------------------------------------------------------
+cleanup_orphaned_sources() {
+ local sources_dir="/etc/apt/sources.list.d"
+ local keyrings_dir="/etc/apt/keyrings"
+
+ [[ ! -d "$sources_dir" ]] && return 0
+
+ while IFS= read -r -d '' sources_file; do
+ local basename_file
+ basename_file=$(basename "$sources_file")
+
+ # NEVER remove debian.sources - this is the standard Debian repository
+ if [[ "$basename_file" == "debian.sources" ]]; then
+ continue
+ fi
+
+ # Extract Signed-By path from .sources file
+ local keyring_path
+ keyring_path=$(grep -E '^Signed-By:' "$sources_file" 2>/dev/null | awk '{print $2}')
+
+ # If keyring doesn't exist, remove the .sources file
+ if [[ -n "$keyring_path" ]] && [[ ! -f "$keyring_path" ]]; then
+ rm -f "$sources_file"
+ fi
+ done < <(find "$sources_dir" -name "*.sources" -print0 2>/dev/null)
+
+ # Also check for broken symlinks in keyrings directory
+ if [[ -d "$keyrings_dir" ]]; then
+ find "$keyrings_dir" -type l ! -exec test -e {} \; -delete 2>/dev/null || true
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# Ensure APT is in a working state before installing packages
+# This should be called at the start of any setup function
+# ------------------------------------------------------------------------------
+ensure_apt_working() {
+ # Clean up orphaned sources first
+ cleanup_orphaned_sources
+
+ # Try to update package lists
+ if ! $STD apt update; then
+ # More aggressive cleanup
+ rm -f /etc/apt/sources.list.d/*.sources 2>/dev/null || true
+ cleanup_orphaned_sources
+
+ # Try again
+ if ! $STD apt update; then
+ msg_error "Cannot update package lists - APT is critically broken"
+ return 1
+ fi
+ fi
+
+ return 0
+}
+
+# ------------------------------------------------------------------------------
+# Standardized deb822 repository setup
+# Validates all parameters and fails safely if any are empty
+# ------------------------------------------------------------------------------
+setup_deb822_repo() {
+ local name="$1"
+ local gpg_url="$2"
+ local repo_url="$3"
+ local suite="$4"
+ local component="${5:-main}"
+ local architectures="${6:-amd64 arm64}"
+
+ # Validate required parameters
+ if [[ -z "$name" || -z "$gpg_url" || -z "$repo_url" || -z "$suite" ]]; then
+ msg_error "setup_deb822_repo: missing required parameters (name=$name, gpg=$gpg_url, repo=$repo_url, suite=$suite)"
+ return 1
+ fi
+
+ # Cleanup old configs for this app
+ cleanup_old_repo_files "$name"
+
+ # Cleanup any orphaned .sources files from other apps
+ cleanup_orphaned_sources
+
+ # Ensure keyring directory exists
+ mkdir -p /etc/apt/keyrings || {
+ msg_error "Failed to create /etc/apt/keyrings directory"
+ return 1
+ }
+
+ # Download GPG key (with --yes to avoid interactive prompts)
+ curl -fsSL "$gpg_url" | gpg --dearmor --yes -o "/etc/apt/keyrings/${name}.gpg" 2>/dev/null || {
+ msg_error "Failed to download or import GPG key for ${name} from $gpg_url"
+ return 1
+ }
+
+ # Create deb822 sources file
+ cat </etc/apt/sources.list.d/${name}.sources
+Types: deb
+URIs: $repo_url
+Suites: $suite
+Components: $component
+Architectures: $architectures
+Signed-By: /etc/apt/keyrings/${name}.gpg
EOF
- sleep 2
- if ! apt-get update >/dev/null 2>&1; then
- msg_warn "APT update failed – retrying in 5s"
- sleep 5
- if ! apt-get update >/dev/null 2>&1; then
- msg_error "Failed to update APT repositories after adding NodeSource"
- exit 1
- fi
- fi
+ # Use cached apt update
+ local apt_cache_file="/var/cache/apt-update-timestamp"
+ local current_time=$(date +%s)
+ local last_update=0
- if ! apt-get install -y -t nodistro nodejs >/dev/null 2>&1; then
- msg_error "Failed to install Node.js ${NODE_VERSION} from NodeSource"
- apt-cache policy nodejs | tee "$STD"
- exit 1
- fi
-
- $STD npm install -g npm@latest || {
- msg_error "Failed to update npm to latest version"
- }
- msg_ok "Setup Node.js ${NODE_VERSION}"
+ if [[ -f "$apt_cache_file" ]]; then
+ last_update=$(cat "$apt_cache_file" 2>/dev/null || echo 0)
fi
- export NODE_OPTIONS="--max-old-space-size=4096"
-
- [[ -d /opt ]] || mkdir -p /opt
- cd /opt || {
- msg_error "Failed to set safe working directory before npm install"
- exit 1
- }
-
- if [[ -n "$NODE_MODULE" ]]; then
- IFS=',' read -ra MODULES <<<"$NODE_MODULE"
- for mod in "${MODULES[@]}"; do
- local MODULE_NAME MODULE_REQ_VERSION MODULE_INSTALLED_VERSION
- if [[ "$mod" == @*/*@* ]]; then
- MODULE_NAME="${mod%@*}"
- MODULE_REQ_VERSION="${mod##*@}"
- elif [[ "$mod" == *"@"* ]]; then
- MODULE_NAME="${mod%@*}"
- MODULE_REQ_VERSION="${mod##*@}"
- else
- MODULE_NAME="$mod"
- MODULE_REQ_VERSION="latest"
- fi
-
- if npm list -g --depth=0 "$MODULE_NAME" >/dev/null 2>&1; then
- MODULE_INSTALLED_VERSION="$(npm list -g --depth=0 "$MODULE_NAME" | grep "$MODULE_NAME@" | awk -F@ '{print $2}' | tr -d '[:space:]')"
- if [[ "$MODULE_REQ_VERSION" != "latest" && "$MODULE_REQ_VERSION" != "$MODULE_INSTALLED_VERSION" ]]; then
- msg_info "Updating $MODULE_NAME from v$MODULE_INSTALLED_VERSION to v$MODULE_REQ_VERSION"
- $STD npm install -g "${MODULE_NAME}@${MODULE_REQ_VERSION}" || {
- msg_error "Failed to update $MODULE_NAME to version $MODULE_REQ_VERSION"
- exit 1
- }
- elif [[ "$MODULE_REQ_VERSION" == "latest" ]]; then
- msg_info "Updating $MODULE_NAME to latest version"
- $STD npm install -g "${MODULE_NAME}@latest" || {
- msg_error "Failed to update $MODULE_NAME to latest version"
- exit 1
- }
- fi
- else
- msg_info "Installing $MODULE_NAME@$MODULE_REQ_VERSION"
- $STD npm install -g "${MODULE_NAME}@${MODULE_REQ_VERSION}" || {
- msg_error "Failed to install $MODULE_NAME@$MODULE_REQ_VERSION"
- exit 1
- }
- fi
- done
- msg_ok "Installed Node.js modules: $NODE_MODULE"
+ # For repo changes, always update but respect short-term cache (30s)
+ if ((current_time - last_update > 30)); then
+ $STD apt update
+ echo "$current_time" >"$apt_cache_file"
fi
}
# ------------------------------------------------------------------------------
-# Installs or upgrades PostgreSQL and optional extensions/modules.
-#
-# Description:
-# - Detects existing PostgreSQL version
-# - Dumps all databases before upgrade
-# - Adds PGDG repo and installs specified version
-# - Installs optional PG_MODULES (e.g. postgis, contrib)
-# - Restores dumped data post-upgrade
-#
-# Variables:
-# PG_VERSION - Major PostgreSQL version (e.g. 15, 16) (default: 16)
-# PG_MODULES - Comma-separated list of extensions (e.g. "postgis,contrib")
+# Package version hold/unhold helpers
# ------------------------------------------------------------------------------
-function setup_postgresql() {
- local PG_VERSION="${PG_VERSION:-16}"
- local PG_MODULES="${PG_MODULES:-}"
- local CURRENT_PG_VERSION=""
- local DISTRO
- local NEED_PG_INSTALL=false
- DISTRO="$(awk -F'=' '/^VERSION_CODENAME=/{ print $NF }' /etc/os-release)"
+hold_package_version() {
+ local package="$1"
+ $STD apt-mark hold "$package"
+}
- if command -v psql >/dev/null; then
- CURRENT_PG_VERSION="$(psql -V | awk '{print $3}' | cut -d. -f1)"
- if [[ "$CURRENT_PG_VERSION" == "$PG_VERSION" ]]; then
- : # PostgreSQL is already at the desired version – no action needed
- else
- msg_info "Detected PostgreSQL $CURRENT_PG_VERSION, preparing upgrade to $PG_VERSION"
- NEED_PG_INSTALL=true
- fi
+unhold_package_version() {
+ local package="$1"
+ $STD apt-mark unhold "$package"
+}
+
+# ------------------------------------------------------------------------------
+# Safe service restart with verification
+# ------------------------------------------------------------------------------
+safe_service_restart() {
+ local service="$1"
+
+ if systemctl is-active --quiet "$service"; then
+ $STD systemctl restart "$service"
else
- msg_info "Setup PostgreSQL $PG_VERSION"
- NEED_PG_INSTALL=true
+ $STD systemctl start "$service"
fi
- if [[ "$NEED_PG_INSTALL" == true ]]; then
- if [[ -n "$CURRENT_PG_VERSION" ]]; then
- msg_info "Dumping PostgreSQL $CURRENT_PG_VERSION data"
- su - postgres -c "pg_dumpall > /var/lib/postgresql/backup_$(date +%F)_v${CURRENT_PG_VERSION}.sql"
- msg_ok "Data dump completed"
-
- systemctl stop postgresql
- fi
-
- rm -f /etc/apt/sources.list.d/pgdg.list /etc/apt/trusted.gpg.d/postgresql.gpg
-
- $STD msg_info "Adding PostgreSQL PGDG repository"
- curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc |
- gpg --dearmor -o /etc/apt/trusted.gpg.d/postgresql.gpg
-
- echo "deb https://apt.postgresql.org/pub/repos/apt ${DISTRO}-pgdg main" \
- >/etc/apt/sources.list.d/pgdg.list
- $STD msg_ok "Repository added"
-
- $STD apt-get update
-
- msg_info "Setup PostgreSQL $PG_VERSION"
- $STD apt-get install -y "postgresql-${PG_VERSION}" "postgresql-client-${PG_VERSION}"
- msg_ok "Setup PostgreSQL $PG_VERSION"
-
- if [[ -n "$CURRENT_PG_VERSION" ]]; then
- $STD apt-get purge -y "postgresql-${CURRENT_PG_VERSION}" "postgresql-client-${CURRENT_PG_VERSION}" || true
- fi
-
- $STD msg_info "Starting PostgreSQL $PG_VERSION"
- systemctl enable -q --now postgresql
- $STD msg_ok "PostgreSQL $PG_VERSION started"
-
- if [[ -n "$CURRENT_PG_VERSION" ]]; then
- msg_info "Restoring dumped data"
- su - postgres -c "psql < /var/lib/postgresql/backup_$(date +%F)_v${CURRENT_PG_VERSION}.sql"
- msg_ok "Data restored"
- fi
-
- msg_ok "PostgreSQL $PG_VERSION installed"
- fi
-
- # Install optional PostgreSQL modules
- if [[ -n "$PG_MODULES" ]]; then
- IFS=',' read -ra MODULES <<<"$PG_MODULES"
- for module in "${MODULES[@]}"; do
- local pkg="postgresql-${PG_VERSION}-${module}"
- msg_info "Setup PostgreSQL module/s: $pkg"
- $STD apt-get install -y "$pkg" || {
- msg_error "Failed to install $pkg"
- continue
- }
- done
- msg_ok "Setup PostgreSQL modules"
+ if ! systemctl is-active --quiet "$service"; then
+ msg_error "Failed to start $service"
+ systemctl status "$service" --no-pager
+ return 1
fi
+ return 0
}
# ------------------------------------------------------------------------------
-# Installs or updates MariaDB from official repo.
-#
-# Description:
-# - Detects current MariaDB version and replaces it if necessary
-# - Preserves existing database data
-# - Dynamically determines latest GA version if "latest" is given
-#
-# Variables:
-# MARIADB_VERSION - MariaDB version to install (e.g. 10.11, latest) (default: latest)
+# Enable and start service (with error handling)
# ------------------------------------------------------------------------------
+enable_and_start_service() {
+ local service="$1"
-setup_mariadb() {
- local MARIADB_VERSION="${MARIADB_VERSION:-latest}"
- local DISTRO_CODENAME
- DISTRO_CODENAME="$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)"
- CURRENT_OS="$(awk -F= '/^ID=/{print $2}' /etc/os-release)"
-
- if ! curl -fsI http://mirror.mariadb.org/repo/ >/dev/null; then
- msg_error "MariaDB mirror not reachable"
+ if ! systemctl enable "$service" &>/dev/null; then
return 1
fi
- msg_info "Setting up MariaDB $MARIADB_VERSION"
- # grab dynamic latest LTS version
- if [[ "$MARIADB_VERSION" == "latest" ]]; then
- MARIADB_VERSION=$(curl -fsSL http://mirror.mariadb.org/repo/ |
- grep -Eo '[0-9]+\.[0-9]+\.[0-9]+/' |
- grep -vE 'rc/|rolling/' |
- sed 's|/||' |
- sort -Vr |
- head -n1)
- if [[ -z "$MARIADB_VERSION" ]]; then
- msg_error "Could not determine latest GA MariaDB version"
- return 1
- fi
+ if ! systemctl start "$service" &>/dev/null; then
+ msg_error "Failed to start $service"
+ systemctl status "$service" --no-pager
+ return 1
fi
- local CURRENT_VERSION=""
- if command -v mariadb >/dev/null; then
- CURRENT_VERSION=$(mariadb --version | grep -oE '[0-9]+\.[0-9]+\.[0-9]+')
+ return 0
+}
+
+# ------------------------------------------------------------------------------
+# Check if service is enabled
+# ------------------------------------------------------------------------------
+is_service_enabled() {
+ local service="$1"
+ systemctl is-enabled --quiet "$service" 2>/dev/null
+}
+
+# ------------------------------------------------------------------------------
+# Check if service is running
+# ------------------------------------------------------------------------------
+is_service_running() {
+ local service="$1"
+ systemctl is-active --quiet "$service" 2>/dev/null
+}
+
+# ------------------------------------------------------------------------------
+# Extract version from JSON (GitHub releases)
+# ------------------------------------------------------------------------------
+extract_version_from_json() {
+ local json="$1"
+ local field="${2:-tag_name}"
+ local strip_v="${3:-true}"
+
+ ensure_dependencies jq
+
+ local version
+ version=$(echo "$json" | jq -r ".${field} // empty")
+
+ if [[ -z "$version" ]]; then
+ return 1
fi
- if [[ "$CURRENT_VERSION" == "$MARIADB_VERSION" ]]; then
- $STD msg_info "MariaDB $MARIADB_VERSION, upgrading"
- $STD apt-get update
- $STD apt-get install --only-upgrade -y mariadb-server mariadb-client
- $STD msg_ok "MariaDB upgraded to $MARIADB_VERSION"
+ if [[ "$strip_v" == "true" ]]; then
+ echo "${version#v}"
+ else
+ echo "$version"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# Get latest GitHub release version
+# ------------------------------------------------------------------------------
+get_latest_github_release() {
+ local repo="$1"
+ local strip_v="${2:-true}"
+ local temp_file=$(mktemp)
+
+ if ! github_api_call "https://api.github.com/repos/${repo}/releases/latest" "$temp_file"; then
+ rm -f "$temp_file"
+ return 1
+ fi
+
+ local version
+ version=$(extract_version_from_json "$(cat "$temp_file")" "tag_name" "$strip_v")
+ rm -f "$temp_file"
+
+ if [[ -z "$version" ]]; then
+ return 1
+ fi
+
+ echo "$version"
+}
+
+# ------------------------------------------------------------------------------
+# Debug logging (only if DEBUG=1)
+# ------------------------------------------------------------------------------
+debug_log() {
+ [[ "${DEBUG:-0}" == "1" ]] && echo "[DEBUG] $*" >&2
+}
+
+# ------------------------------------------------------------------------------
+# Performance timing helper
+# ------------------------------------------------------------------------------
+start_timer() {
+ echo $(date +%s)
+}
+
+end_timer() {
+ local start_time="$1"
+ local label="${2:-Operation}"
+ local end_time=$(date +%s)
+ local duration=$((end_time - start_time))
+}
+
+# ------------------------------------------------------------------------------
+# GPG key fingerprint verification
+# ------------------------------------------------------------------------------
+verify_gpg_fingerprint() {
+ local key_file="$1"
+ local expected_fingerprint="$2"
+
+ local actual_fingerprint
+ actual_fingerprint=$(gpg --show-keys --with-fingerprint --with-colons "$key_file" 2>&1 | grep -m1 '^fpr:' | cut -d: -f10)
+
+ if [[ "$actual_fingerprint" == "$expected_fingerprint" ]]; then
return 0
fi
- if [[ -n "$CURRENT_VERSION" ]]; then
- $STD msg_info "Upgrading MariaDB $CURRENT_VERSION to $MARIADB_VERSION"
- $STD systemctl stop mariadb >/dev/null 2>&1 || true
- $STD apt-get purge -y 'mariadb*' || true
- rm -f /etc/apt/sources.list.d/mariadb.list /etc/apt/trusted.gpg.d/mariadb.gpg
- else
- $STD msg_info "Setup MariaDB $MARIADB_VERSION"
- fi
-
- curl -fsSL "https://mariadb.org/mariadb_release_signing_key.asc" |
- gpg --dearmor -o /etc/apt/trusted.gpg.d/mariadb.gpg
-
- echo "deb [signed-by=/etc/apt/trusted.gpg.d/mariadb.gpg] http://mirror.mariadb.org/repo/${MARIADB_VERSION}/${CURRENT_OS} ${DISTRO_CODENAME} main" \
- >/etc/apt/sources.list.d/mariadb.list
-
- $STD apt-get update
- $STD apt-get install -y mariadb-server mariadb-client
-
- msg_ok "Setup MariaDB $MARIADB_VERSION"
+ msg_error "GPG fingerprint mismatch! Expected: $expected_fingerprint, Got: $actual_fingerprint"
+ return 1
}
+# ==============================================================================
+# EXISTING FUNCTIONS
+# ==============================================================================
+
# ------------------------------------------------------------------------------
-# Installs or upgrades MySQL and configures APT repo.
+# Checks for new GitHub release (latest tag).
#
# Description:
-# - Detects existing MySQL installation
-# - Purges conflicting packages before installation
-# - Supports clean upgrade
+# - Queries the GitHub API for the latest release tag
+# - Compares it to a local cached version (~/.)
+# - If newer, sets global CHECK_UPDATE_RELEASE and returns 0
#
-# Variables:
-# MYSQL_VERSION - MySQL version to install (e.g. 5.7, 8.0) (default: 8.0)
+# Usage:
+# if check_for_gh_release "flaresolverr" "FlareSolverr/FlareSolverr" [optional] "v1.1.1"; then
+# # trigger update...
+# fi
+# exit 0
+# } (end of update_script not from the function)
+#
+# Notes:
+# - Requires `jq` (auto-installed if missing)
+# - Does not modify anything, only checks version state
+# - Does not support pre-releases
# ------------------------------------------------------------------------------
+check_for_gh_release() {
+ local app="$1"
+ local source="$2"
+ local pinned_version_in="${3:-}" # optional
+ local app_lc="${app,,}"
+ local current_file="$HOME/.${app_lc}"
-function setup_mysql() {
- local MYSQL_VERSION="${MYSQL_VERSION:-8.0}"
- local CURRENT_VERSION=""
- local NEED_INSTALL=false
- CURRENT_OS="$(awk -F= '/^ID=/{print $2}' /etc/os-release)"
+ msg_info "Checking for update: ${app}"
- if command -v mysql >/dev/null; then
- CURRENT_VERSION="$(mysql --version | grep -oP 'Distrib\s+\K[0-9]+\.[0-9]+')"
- if [[ "$CURRENT_VERSION" != "$MYSQL_VERSION" ]]; then
- $STD msg_info "MySQL $CURRENT_VERSION will be upgraded to $MYSQL_VERSION"
- NEED_INSTALL=true
- else
- # Check for patch-level updates
- if apt list --upgradable 2>/dev/null | grep -q '^mysql-server/'; then
- $STD msg_info "MySQL $CURRENT_VERSION available for upgrade"
- $STD apt-get update
- $STD apt-get install --only-upgrade -y mysql-server
- $STD msg_ok "MySQL upgraded"
+ # DNS check
+ if ! getent hosts api.github.com >/dev/null 2>&1; then
+ msg_error "Network error: cannot resolve api.github.com"
+ return 1
+ fi
+
+ ensure_dependencies jq
+
+ # Fetch releases and exclude drafts/prereleases
+ local releases_json
+ releases_json=$(curl -fsSL --max-time 20 \
+ -H 'Accept: application/vnd.github+json' \
+ -H 'X-GitHub-Api-Version: 2022-11-28' \
+ "https://api.github.com/repos/${source}/releases") || {
+ msg_error "Unable to fetch releases for ${app}"
+ return 1
+ }
+
+ mapfile -t raw_tags < <(jq -r '.[] | select(.draft==false and .prerelease==false) | .tag_name' <<<"$releases_json")
+ if ((${#raw_tags[@]} == 0)); then
+ msg_error "No stable releases found for ${app}"
+ return 1
+ fi
+
+ local clean_tags=()
+ for t in "${raw_tags[@]}"; do
+ clean_tags+=("${t#v}")
+ done
+
+ local latest_raw="${raw_tags[0]}"
+ local latest_clean="${clean_tags[0]}"
+
+ # current installed (stored without v)
+ local current=""
+ if [[ -f "$current_file" ]]; then
+ current="$(<"$current_file")"
+ else
+ # Migration: search for any /opt/*_version.txt
+ local legacy_files
+ mapfile -t legacy_files < <(find /opt -maxdepth 1 -type f -name "*_version.txt" 2>/dev/null)
+ if ((${#legacy_files[@]} == 1)); then
+ current="$(<"${legacy_files[0]}")"
+ echo "${current#v}" >"$current_file"
+ rm -f "${legacy_files[0]}"
+ fi
+ fi
+ current="${current#v}"
+
+ # Pinned version handling
+ if [[ -n "$pinned_version_in" ]]; then
+ local pin_clean="${pinned_version_in#v}"
+ local match_raw=""
+ for i in "${!clean_tags[@]}"; do
+ if [[ "${clean_tags[$i]}" == "$pin_clean" ]]; then
+ match_raw="${raw_tags[$i]}"
+ break
fi
- return
- fi
- else
- msg_info "Setup MySQL $MYSQL_VERSION"
- NEED_INSTALL=true
- fi
+ done
- if [[ "$NEED_INSTALL" == true ]]; then
- $STD systemctl stop mysql || true
- $STD apt-get purge -y "^mysql-server.*" "^mysql-client.*" "^mysql-common.*" || true
- rm -f /etc/apt/sources.list.d/mysql.list /etc/apt/trusted.gpg.d/mysql.gpg
-
- local DISTRO_CODENAME
- DISTRO_CODENAME="$(awk -F= '/VERSION_CODENAME/ { print $2 }' /etc/os-release)"
- curl -fsSL https://repo.mysql.com/RPM-GPG-KEY-mysql-2023 | gpg --dearmor -o /etc/apt/trusted.gpg.d/mysql.gpg
- echo "deb [signed-by=/etc/apt/trusted.gpg.d/mysql.gpg] https://repo.mysql.com/apt/${CURRENT_OS}/ ${DISTRO_CODENAME} mysql-${MYSQL_VERSION}" \
- >/etc/apt/sources.list.d/mysql.list
-
- export DEBIAN_FRONTEND=noninteractive
- $STD apt-get update
- $STD apt-get install -y mysql-server
- msg_ok "Setup MySQL $MYSQL_VERSION"
- fi
-}
-
-# ------------------------------------------------------------------------------
-# Installs PHP with selected modules and configures Apache/FPM support.
-#
-# Description:
-# - Adds Sury PHP repo if needed
-# - Installs default and user-defined modules
-# - Patches php.ini for CLI, Apache, and FPM as needed
-#
-# Variables:
-# PHP_VERSION - PHP version to install (default: 8.4)
-# PHP_MODULE - Additional comma-separated modules
-# PHP_APACHE - Set YES to enable PHP with Apache
-# PHP_FPM - Set YES to enable PHP-FPM
-# PHP_MEMORY_LIMIT - (default: 512M)
-# PHP_UPLOAD_MAX_FILESIZE - (default: 128M)
-# PHP_POST_MAX_SIZE - (default: 128M)
-# PHP_MAX_EXECUTION_TIME - (default: 300)
-# ------------------------------------------------------------------------------
-
-# ------------------------------------------------------------------------------
-# Installs PHP with selected modules and configures Apache/FPM support.
-#
-# Description:
-# - Adds Sury PHP repo if needed
-# - Installs default and user-defined modules
-# - Patches php.ini for CLI, Apache, and FPM as needed
-#
-# Variables:
-# PHP_VERSION - PHP version to install (default: 8.4)
-# PHP_MODULE - Additional comma-separated modules
-# PHP_APACHE - Set YES to enable PHP with Apache
-# PHP_FPM - Set YES to enable PHP-FPM
-# PHP_MEMORY_LIMIT - (default: 512M)
-# PHP_UPLOAD_MAX_FILESIZE - (default: 128M)
-# PHP_POST_MAX_SIZE - (default: 128M)
-# PHP_MAX_EXECUTION_TIME - (default: 300)
-# ------------------------------------------------------------------------------
-
-function setup_php() {
- local PHP_VERSION="${PHP_VERSION:-8.4}"
- local PHP_MODULE="${PHP_MODULE:-}"
- local PHP_APACHE="${PHP_APACHE:-NO}"
- local PHP_FPM="${PHP_FPM:-NO}"
- local DISTRO_CODENAME
- DISTRO_CODENAME=$(awk -F= '/VERSION_CODENAME/ { print $2 }' /etc/os-release)
-
- local DEFAULT_MODULES="bcmath,cli,curl,gd,intl,mbstring,opcache,readline,xml,zip"
- local COMBINED_MODULES
-
- local PHP_MEMORY_LIMIT="${PHP_MEMORY_LIMIT:-512M}"
- local PHP_UPLOAD_MAX_FILESIZE="${PHP_UPLOAD_MAX_FILESIZE:-128M}"
- local PHP_POST_MAX_SIZE="${PHP_POST_MAX_SIZE:-128M}"
- local PHP_MAX_EXECUTION_TIME="${PHP_MAX_EXECUTION_TIME:-300}"
-
- # Merge default + user-defined modules
- if [[ -n "$PHP_MODULE" ]]; then
- COMBINED_MODULES="${DEFAULT_MODULES},${PHP_MODULE}"
- else
- COMBINED_MODULES="${DEFAULT_MODULES}"
- fi
-
- # Deduplicate modules
- COMBINED_MODULES=$(echo "$COMBINED_MODULES" | tr ',' '\n' | awk '!seen[$0]++' | paste -sd, -)
-
- local CURRENT_PHP=""
- if command -v php >/dev/null 2>&1; then
- CURRENT_PHP=$(php -v 2>/dev/null | awk '/^PHP/{print $2}' | cut -d. -f1,2)
- fi
-
- if [[ -z "$CURRENT_PHP" ]]; then
- msg_info "Setup PHP $PHP_VERSION"
- elif [[ "$CURRENT_PHP" != "$PHP_VERSION" ]]; then
- msg_info "Old PHP $CURRENT_PHP detected, Setup new PHP $PHP_VERSION"
- $STD apt-get purge -y "php${CURRENT_PHP//./}"* || true
- fi
-
- # Ensure Sury repo is available
- if [[ ! -f /etc/apt/sources.list.d/php.list ]]; then
- $STD curl -fsSLo /tmp/debsuryorg-archive-keyring.deb https://packages.sury.org/debsuryorg-archive-keyring.deb
- $STD dpkg -i /tmp/debsuryorg-archive-keyring.deb
- echo "deb [signed-by=/usr/share/keyrings/deb.sury.org-php.gpg] https://packages.sury.org/php/ ${DISTRO_CODENAME} main" \
- >/etc/apt/sources.list.d/php.list
- $STD apt-get update
- fi
-
- local MODULE_LIST="php${PHP_VERSION}"
- for pkg in $MODULE_LIST; do
- if ! apt-cache show "$pkg" >/dev/null 2>&1; then
- msg_error "Package not found: $pkg"
- exit 1
- fi
- done
- IFS=',' read -ra MODULES <<<"$COMBINED_MODULES"
- for mod in "${MODULES[@]}"; do
- MODULE_LIST+=" php${PHP_VERSION}-${mod}"
- done
-
- if [[ "$PHP_FPM" == "YES" ]]; then
- MODULE_LIST+=" php${PHP_VERSION}-fpm"
- fi
- if [[ "$PHP_APACHE" == "YES" ]]; then
- $STD apt-get install -y apache2
- $STD systemctl restart apache2 || true
- fi
-
- if [[ "$PHP_APACHE" == "YES" ]] && [[ -n "$CURRENT_PHP" ]]; then
- if [[ -f /etc/apache2/mods-enabled/php${CURRENT_PHP}.load ]]; then
- $STD a2dismod php${CURRENT_PHP} || true
- fi
- fi
-
- if [[ "$PHP_FPM" == "YES" ]] && [[ -n "$CURRENT_PHP" ]]; then
- $STD systemctl stop php${CURRENT_PHP}-fpm || true
- $STD systemctl disable php${CURRENT_PHP}-fpm || true
- fi
-
- $STD apt-get install -y $MODULE_LIST
- msg_ok "Setup PHP $PHP_VERSION"
-
- if [[ "$PHP_APACHE" == "YES" ]]; then
- $STD systemctl restart apache2 || true
- fi
-
- if [[ "$PHP_FPM" == "YES" ]]; then
- $STD systemctl enable php${PHP_VERSION}-fpm
- $STD systemctl restart php${PHP_VERSION}-fpm
- fi
-
- # Patch all relevant php.ini files
- local PHP_INI_PATHS=("/etc/php/${PHP_VERSION}/cli/php.ini")
- [[ "$PHP_FPM" == "YES" ]] && PHP_INI_PATHS+=("/etc/php/${PHP_VERSION}/fpm/php.ini")
- [[ "$PHP_APACHE" == "YES" ]] && PHP_INI_PATHS+=("/etc/php/${PHP_VERSION}/apache2/php.ini")
-
- for ini in "${PHP_INI_PATHS[@]}"; do
- if [[ -f "$ini" ]]; then
- $STD msg_info "Patching $ini"
- sed -i "s|^memory_limit = .*|memory_limit = ${PHP_MEMORY_LIMIT}|" "$ini"
- sed -i "s|^upload_max_filesize = .*|upload_max_filesize = ${PHP_UPLOAD_MAX_FILESIZE}|" "$ini"
- sed -i "s|^post_max_size = .*|post_max_size = ${PHP_POST_MAX_SIZE}|" "$ini"
- sed -i "s|^max_execution_time = .*|max_execution_time = ${PHP_MAX_EXECUTION_TIME}|" "$ini"
- $STD msg_ok "Patched $ini"
- fi
- done
-}
-
-# ------------------------------------------------------------------------------
-# Installs or updates Composer globally.
-#
-# Description:
-# - Downloads latest version from getcomposer.org
-# - Installs to /usr/local/bin/composer
-# ------------------------------------------------------------------------------
-
-function setup_composer() {
- local COMPOSER_BIN="/usr/local/bin/composer"
- export COMPOSER_ALLOW_SUPERUSER=1
-
- # Check if composer is already installed
- if [[ -x "$COMPOSER_BIN" ]]; then
- local CURRENT_VERSION
- CURRENT_VERSION=$("$COMPOSER_BIN" --version | awk '{print $3}')
- $STD msg_info "Old Composer $CURRENT_VERSION found, updating to latest"
- else
- msg_info "Setup Composer"
- fi
-
- # Download and install latest composer
- curl -fsSL https://getcomposer.org/installer -o /tmp/composer-setup.php
- php /tmp/composer-setup.php --install-dir=/usr/local/bin --filename=composer >/dev/null 2>&1
-
- if [[ $? -ne 0 ]]; then
- msg_error "Failed to install Composer"
- return 1
- fi
-
- chmod +x "$COMPOSER_BIN"
- composer diagnose >/dev/null 2>&1
- msg_ok "Setup Composer"
-}
-
-# ------------------------------------------------------------------------------
-# Installs Go (Golang) from official tarball.
-#
-# Description:
-# - Determines system architecture
-# - Downloads latest version if GO_VERSION not set
-#
-# Variables:
-# GO_VERSION - Version to install (e.g. 1.22.2 or latest)
-# ------------------------------------------------------------------------------
-
-function setup_go() {
- local ARCH
- case "$(uname -m)" in
- x86_64) ARCH="amd64" ;;
- aarch64) ARCH="arm64" ;;
- *)
- msg_error "Unsupported architecture: $(uname -m)"
- return 1
- ;;
- esac
-
- # Determine version
- if [[ -z "${GO_VERSION:-}" || "${GO_VERSION}" == "latest" ]]; then
- GO_VERSION=$(curl -fsSL https://go.dev/VERSION?m=text | head -n1 | sed 's/^go//')
- if [[ -z "$GO_VERSION" ]]; then
- msg_error "Could not determine latest Go version"
+ if [[ -z "$match_raw" ]]; then
+ msg_error "Pinned version ${pinned_version_in} not found upstream"
return 1
fi
- fi
- local GO_BIN="/usr/local/bin/go"
- local GO_INSTALL_DIR="/usr/local/go"
-
- if [[ -x "$GO_BIN" ]]; then
- local CURRENT_VERSION
- CURRENT_VERSION=$("$GO_BIN" version | awk '{print $3}' | sed 's/go//')
- if [[ "$CURRENT_VERSION" == "$GO_VERSION" ]]; then
+ if [[ "$current" != "$pin_clean" ]]; then
+ CHECK_UPDATE_RELEASE="$match_raw"
+ msg_ok "Update available: ${app} ${current:-not installed} → ${pin_clean}"
return 0
- else
- $STD msg_info "Old Go Installation ($CURRENT_VERSION) found, upgrading to $GO_VERSION"
- rm -rf "$GO_INSTALL_DIR"
fi
- else
- msg_info "Setup Go $GO_VERSION"
- fi
- local TARBALL="go${GO_VERSION}.linux-${ARCH}.tar.gz"
- local URL="https://go.dev/dl/${TARBALL}"
- local TMP_TAR=$(mktemp)
-
- curl -fsSL "$URL" -o "$TMP_TAR" || {
- msg_error "Failed to download $TARBALL"
+ msg_error "No update available: ${app} is not installed!"
return 1
- }
-
- tar -C /usr/local -xzf "$TMP_TAR"
- ln -sf /usr/local/go/bin/go /usr/local/bin/go
- ln -sf /usr/local/go/bin/gofmt /usr/local/bin/gofmt
- rm -f "$TMP_TAR"
-
- msg_ok "Setup Go $GO_VERSION"
-}
-
-# ------------------------------------------------------------------------------
-# Installs Temurin JDK via Adoptium APT repository.
-#
-# Description:
-# - Removes previous JDK if version mismatch
-# - Installs or upgrades to specified JAVA_VERSION
-#
-# Variables:
-# JAVA_VERSION - Temurin JDK version to install (e.g. 17, 21)
-# ------------------------------------------------------------------------------
-
-function setup_java() {
- local JAVA_VERSION="${JAVA_VERSION:-21}"
- local DISTRO_CODENAME
- DISTRO_CODENAME=$(awk -F= '/VERSION_CODENAME/ { print $2 }' /etc/os-release)
- local DESIRED_PACKAGE="temurin-${JAVA_VERSION}-jdk"
-
- # Add Adoptium repo if missing
- if [[ ! -f /etc/apt/sources.list.d/adoptium.list ]]; then
- $STD msg_info "Setting up Adoptium Repository"
- mkdir -p /etc/apt/keyrings
- curl -fsSL "https://packages.adoptium.net/artifactory/api/gpg/key/public" | gpg --dearmor -o /etc/apt/trusted.gpg.d/adoptium.gpg
- echo "deb [signed-by=/etc/apt/trusted.gpg.d/adoptium.gpg] https://packages.adoptium.net/artifactory/deb ${DISTRO_CODENAME} main" \
- >/etc/apt/sources.list.d/adoptium.list
- $STD apt-get update
- $STD msg_ok "Set up Adoptium Repository"
fi
- # Detect currently installed temurin version
- local INSTALLED_VERSION=""
- if dpkg -l | grep -q "temurin-.*-jdk"; then
- INSTALLED_VERSION=$(dpkg -l | awk '/temurin-.*-jdk/{print $2}' | grep -oP 'temurin-\K[0-9]+')
- fi
-
- if [[ "$INSTALLED_VERSION" == "$JAVA_VERSION" ]]; then
- $STD msg_info "Upgrading Temurin JDK $JAVA_VERSION"
- $STD apt-get update
- $STD apt-get install --only-upgrade -y "$DESIRED_PACKAGE"
- $STD msg_ok "Upgraded Temurin JDK $JAVA_VERSION"
- else
- if [[ -n "$INSTALLED_VERSION" ]]; then
- $STD msg_info "Removing Temurin JDK $INSTALLED_VERSION"
- $STD apt-get purge -y "temurin-${INSTALLED_VERSION}-jdk"
- fi
-
- msg_info "Setup Temurin JDK $JAVA_VERSION"
- $STD apt-get install -y "$DESIRED_PACKAGE"
- msg_ok "Setup Temurin JDK $JAVA_VERSION"
- fi
-}
-
-# ------------------------------------------------------------------------------
-# Installs or updates MongoDB to specified major version.
-#
-# Description:
-# - Preserves data across installations
-# - Adds official MongoDB repo
-#
-# Variables:
-# MONGO_VERSION - MongoDB major version to install (e.g. 7.0, 8.0)
-# ------------------------------------------------------------------------------
-
-function setup_mongodb() {
- local MONGO_VERSION="${MONGO_VERSION:-8.0}"
- local DISTRO_ID DISTRO_CODENAME MONGO_BASE_URL
- DISTRO_ID=$(awk -F= '/^ID=/{ gsub(/"/,"",$2); print $2 }' /etc/os-release)
- DISTRO_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{ print $2 }' /etc/os-release)
-
- # Check AVX support
- if ! grep -qm1 'avx[^ ]*' /proc/cpuinfo; then
- local major="${MONGO_VERSION%%.*}"
- if ((major > 5)); then
- msg_error "MongoDB ${MONGO_VERSION} requires AVX support, which is not available on this system."
- return 1
- fi
- fi
-
- case "$DISTRO_ID" in
- ubuntu)
- MONGO_BASE_URL="https://repo.mongodb.org/apt/ubuntu"
- REPO_COMPONENT="multiverse"
- ;;
- debian)
- MONGO_BASE_URL="https://repo.mongodb.org/apt/debian"
- REPO_COMPONENT="main"
- ;;
- *)
- msg_error "Unsupported distribution: $DISTRO_ID"
- return 1
- ;;
- esac
-
- local REPO_LIST="/etc/apt/sources.list.d/mongodb-org-${MONGO_VERSION}.list"
-
- local INSTALLED_VERSION=""
- if command -v mongod >/dev/null; then
- INSTALLED_VERSION=$(mongod --version | awk '/db version/{print $3}' | cut -d. -f1,2)
- fi
-
- if [[ "$INSTALLED_VERSION" == "$MONGO_VERSION" ]]; then
- $STD msg_info "Upgrading MongoDB $MONGO_VERSION"
- $STD apt-get update
- $STD apt-get install --only-upgrade -y mongodb-org
- $STD msg_ok "Upgraded MongoDB $MONGO_VERSION"
+ # No pinning → use latest
+ if [[ -z "$current" || "$current" != "$latest_clean" ]]; then
+ CHECK_UPDATE_RELEASE="$latest_raw"
+ msg_ok "Update available: ${app} ${current:-not installed} → ${latest_clean}"
return 0
fi
- if [[ -n "$INSTALLED_VERSION" ]]; then
- $STD systemctl stop mongod || true
- $STD apt-get purge -y mongodb-org || true
- rm -f /etc/apt/sources.list.d/mongodb-org-*.list
- rm -f /etc/apt/trusted.gpg.d/mongodb-*.gpg
- else
- msg_info "Setup MongoDB $MONGO_VERSION"
+ msg_ok "No update available: ${app} (${latest_clean})"
+ return 1
+}
+
+# ------------------------------------------------------------------------------
+# Creates and installs self-signed certificates.
+#
+# Description:
+# - Create a self-signed certificate with option to override application name
+#
+# Variables:
+# APP - Application name (default: $APPLICATION variable)
+# ------------------------------------------------------------------------------
+create_self_signed_cert() {
+ local APP_NAME="${1:-${APPLICATION}}"
+ local CERT_DIR="/etc/ssl/${APP_NAME}"
+ local CERT_KEY="${CERT_DIR}/${APP_NAME}.key"
+ local CERT_CRT="${CERT_DIR}/${APP_NAME}.crt"
+
+ if [[ -f "$CERT_CRT" && -f "$CERT_KEY" ]]; then
+ return 0
fi
- curl -fsSL "https://pgp.mongodb.com/server-${MONGO_VERSION}.asc" | gpg --dearmor -o "/etc/apt/trusted.gpg.d/mongodb-${MONGO_VERSION}.gpg"
- echo "deb [signed-by=/etc/apt/trusted.gpg.d/mongodb-${MONGO_VERSION}.gpg] ${MONGO_BASE_URL} ${DISTRO_CODENAME}/mongodb-org/${MONGO_VERSION} main" \
- >"$REPO_LIST"
-
- $STD apt-get update || {
- msg_error "APT update failed — invalid MongoDB repo for ${DISTRO_ID}-${DISTRO_CODENAME}?"
+ # Use ensure_dependencies for cleaner handling
+ ensure_dependencies openssl || {
+ msg_error "Failed to install OpenSSL"
return 1
}
- $STD apt-get install -y mongodb-org
+ mkdir -p "$CERT_DIR"
+ $STD openssl req -new -newkey rsa:2048 -days 365 -nodes -x509 \
+ -subj "/C=US/ST=State/L=City/O=Organization/CN=${APP_NAME}" \
+ -keyout "$CERT_KEY" \
+ -out "$CERT_CRT" || {
+ msg_error "Failed to create self-signed certificate"
+ return 1
+ }
- mkdir -p /var/lib/mongodb
- chown -R mongodb:mongodb /var/lib/mongodb
+ chmod 600 "$CERT_KEY"
+ chmod 644 "$CERT_CRT"
+}
- $STD systemctl enable mongod
- $STD systemctl start mongod
- msg_ok "Setup MongoDB $MONGO_VERSION"
+# ------------------------------------------------------------------------------
+# Downloads file with optional progress indicator using pv.
+#
+# Arguments:
+# $1 - URL
+# $2 - Destination path
+# ------------------------------------------------------------------------------
+
+function download_with_progress() {
+ local url="$1"
+ local output="$2"
+ if [ -n "$SPINNER_PID" ] && ps -p "$SPINNER_PID" >/dev/null; then kill "$SPINNER_PID" >/dev/null; fi
+
+ ensure_dependencies pv
+ set -o pipefail
+
+ # Content-Length aus HTTP-Header holen
+ local content_length
+ content_length=$(curl -fsSLI "$url" | awk '/Content-Length/ {print $2}' | tr -d '\r' || true)
+
+ if [[ -z "$content_length" ]]; then
+ if ! curl -fL# -o "$output" "$url"; then
+ msg_error "Download failed"
+ return 1
+ fi
+ else
+ if ! curl -fsSL "$url" | pv -s "$content_length" >"$output"; then
+ msg_error "Download failed"
+ return 1
+ fi
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# Ensures /usr/local/bin is permanently in system PATH.
+#
+# Description:
+# - Adds to /etc/profile.d if not present
+# ------------------------------------------------------------------------------
+
+function ensure_usr_local_bin_persist() {
+ local PROFILE_FILE="/etc/profile.d/custom_path.sh"
+
+ if [[ ! -f "$PROFILE_FILE" ]] && ! command -v pveversion &>/dev/null; then
+ echo 'export PATH="/usr/local/bin:$PATH"' >"$PROFILE_FILE"
+ chmod +x "$PROFILE_FILE"
+ fi
}
# ------------------------------------------------------------------------------
@@ -796,9 +1675,7 @@ function fetch_and_deploy_gh_release() {
local current_version=""
[[ -f "$version_file" ]] && current_version=$(<"$version_file")
- if ! command -v jq &>/dev/null; then
- $STD apt-get install -y jq &>/dev/null
- fi
+ ensure_dependencies jq
local api_url="https://api.github.com/repos/$repo/releases"
[[ "$version" != "latest" ]] && api_url="$api_url/tags/$version" || api_url="$api_url/latest"
@@ -848,6 +1725,9 @@ function fetch_and_deploy_gh_release() {
msg_info "Fetching GitHub release: $app ($version)"
+ local clean_install=false
+ [[ -n "${CLEAN_INSTALL:-}" && "$CLEAN_INSTALL" == "1" ]] && clean_install=true
+
### Tarball Mode ###
if [[ "$mode" == "tarball" || "$mode" == "source" ]]; then
url=$(echo "$json" | jq -r '.tarball_url // empty')
@@ -861,7 +1741,15 @@ function fetch_and_deploy_gh_release() {
}
mkdir -p "$target"
- tar -xzf "$tmpdir/$filename" -C "$tmpdir"
+ if [[ "${CLEAN_INSTALL:-0}" == "1" ]]; then
+ rm -rf "${target:?}/"*
+ fi
+
+ tar --no-same-owner -xzf "$tmpdir/$filename" -C "$tmpdir" || {
+ msg_error "Failed to extract tarball"
+ rm -rf "$tmpdir"
+ return 1
+ }
local unpack_dir
unpack_dir=$(find "$tmpdir" -mindepth 1 -maxdepth 1 -type d | head -n1)
@@ -882,7 +1770,12 @@ function fetch_and_deploy_gh_release() {
# If explicit filename pattern is provided (param $6), match that first
if [[ -n "$asset_pattern" ]]; then
for u in $assets; do
- [[ "$u" =~ $asset_pattern || "$u" == *"$asset_pattern" ]] && url_match="$u" && break
+ case "${u##*/}" in
+ $asset_pattern)
+ url_match="$u"
+ break
+ ;;
+ esac
done
fi
@@ -917,7 +1810,7 @@ function fetch_and_deploy_gh_release() {
}
chmod 644 "$tmpdir/$filename"
- $STD apt-get install -y "$tmpdir/$filename" || {
+ $STD apt install -y "$tmpdir/$filename" || {
$STD dpkg -i "$tmpdir/$filename" || {
msg_error "Both apt and dpkg installation failed"
rm -rf "$tmpdir"
@@ -925,7 +1818,7 @@ function fetch_and_deploy_gh_release() {
}
}
- ### Prebuild Mode ###
+ ### Prebuild Mode ###
elif [[ "$mode" == "prebuild" ]]; then
local pattern="${6%\"}"
pattern="${pattern#\"}"
@@ -962,14 +1855,23 @@ function fetch_and_deploy_gh_release() {
local unpack_tmp
unpack_tmp=$(mktemp -d)
mkdir -p "$target"
+ if [[ "${CLEAN_INSTALL:-0}" == "1" ]]; then
+ rm -rf "${target:?}/"*
+ fi
if [[ "$filename" == *.zip ]]; then
- if ! command -v unzip &>/dev/null; then
- $STD apt-get install -y unzip
- fi
- unzip -q "$tmpdir/$filename" -d "$unpack_tmp"
- elif [[ "$filename" == *.tar.* ]]; then
- tar -xf "$tmpdir/$filename" -C "$unpack_tmp"
+ ensure_dependencies unzip
+ unzip -q "$tmpdir/$filename" -d "$unpack_tmp" || {
+ msg_error "Failed to extract ZIP archive"
+ rm -rf "$tmpdir" "$unpack_tmp"
+ return 1
+ }
+ elif [[ "$filename" == *.tar.* || "$filename" == *.tgz ]]; then
+ tar --no-same-owner -xf "$tmpdir/$filename" -C "$unpack_tmp" || {
+ msg_error "Failed to extract TAR archive"
+ rm -rf "$tmpdir" "$unpack_tmp"
+ return 1
+ }
else
msg_error "Unsupported archive format: $filename"
rm -rf "$tmpdir" "$unpack_tmp"
@@ -978,23 +1880,41 @@ function fetch_and_deploy_gh_release() {
local top_dirs
top_dirs=$(find "$unpack_tmp" -mindepth 1 -maxdepth 1 -type d | wc -l)
-
- if [[ "$top_dirs" -eq 1 ]]; then
+ local top_entries inner_dir
+ top_entries=$(find "$unpack_tmp" -mindepth 1 -maxdepth 1)
+ if [[ "$(echo "$top_entries" | wc -l)" -eq 1 && -d "$top_entries" ]]; then
# Strip leading folder
- local inner_dir
- inner_dir=$(find "$unpack_tmp" -mindepth 1 -maxdepth 1 -type d)
+ inner_dir="$top_entries"
shopt -s dotglob nullglob
- cp -r "$inner_dir"/* "$target/"
+ if compgen -G "$inner_dir/*" >/dev/null; then
+ cp -r "$inner_dir"/* "$target/" || {
+ msg_error "Failed to copy contents from $inner_dir to $target"
+ rm -rf "$tmpdir" "$unpack_tmp"
+ return 1
+ }
+ else
+ msg_error "Inner directory is empty: $inner_dir"
+ rm -rf "$tmpdir" "$unpack_tmp"
+ return 1
+ fi
shopt -u dotglob nullglob
else
# Copy all contents
shopt -s dotglob nullglob
- cp -r "$unpack_tmp"/* "$target/"
+ if compgen -G "$unpack_tmp/*" >/dev/null; then
+ cp -r "$unpack_tmp"/* "$target/" || {
+ msg_error "Failed to copy contents to $target"
+ rm -rf "$tmpdir" "$unpack_tmp"
+ return 1
+ }
+ else
+ msg_error "Unpacked archive is empty"
+ rm -rf "$tmpdir" "$unpack_tmp"
+ return 1
+ fi
shopt -u dotglob nullglob
fi
- rm -rf "$unpack_tmp"
-
### Singlefile Mode ###
elif [[ "$mode" == "singlefile" ]]; then
local pattern="${6%\"}"
@@ -1050,6 +1970,868 @@ function fetch_and_deploy_gh_release() {
rm -rf "$tmpdir"
}
+# ------------------------------------------------------------------------------
+# Loads LOCAL_IP from persistent store or detects if missing.
+#
+# Description:
+# - Loads from /run/local-ip.env or performs runtime lookup
+# ------------------------------------------------------------------------------
+
+function import_local_ip() {
+ local IP_FILE="/run/local-ip.env"
+ if [[ -f "$IP_FILE" ]]; then
+ # shellcheck disable=SC1090
+ source "$IP_FILE"
+ fi
+
+ if [[ -z "${LOCAL_IP:-}" ]]; then
+ get_current_ip() {
+ local targets=("8.8.8.8" "1.1.1.1" "192.168.1.1" "10.0.0.1" "172.16.0.1" "default")
+ local ip
+
+ for target in "${targets[@]}"; do
+ if [[ "$target" == "default" ]]; then
+ ip=$(ip route get 1 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}')
+ else
+ ip=$(ip route get "$target" 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}')
+ fi
+ if [[ -n "$ip" ]]; then
+ echo "$ip"
+ return 0
+ fi
+ done
+
+ return 1
+ }
+
+ LOCAL_IP="$(get_current_ip || true)"
+ if [[ -z "$LOCAL_IP" ]]; then
+ msg_error "Could not determine LOCAL_IP"
+ return 1
+ fi
+ fi
+
+ export LOCAL_IP
+}
+
+# ------------------------------------------------------------------------------
+# Installs Adminer (Debian/Ubuntu via APT, Alpine via direct download).
+#
+# Description:
+# - Adds Adminer to Apache or web root
+# - Supports Alpine and Debian-based systems
+# ------------------------------------------------------------------------------
+
+function setup_adminer() {
+ if grep -qi alpine /etc/os-release; then
+ msg_info "Setup Adminer (Alpine)"
+ mkdir -p /var/www/localhost/htdocs/adminer
+ curl -fsSL https://github.com/vrana/adminer/releases/latest/download/adminer.php \
+ -o /var/www/localhost/htdocs/adminer/index.php || {
+ msg_error "Failed to download Adminer"
+ return 1
+ }
+ cache_installed_version "adminer" "latest-alpine"
+ msg_ok "Setup Adminer (Alpine)"
+ else
+ msg_info "Setup Adminer (Debian/Ubuntu)"
+ ensure_dependencies adminer
+ $STD a2enconf adminer || {
+ msg_error "Failed to enable Adminer Apache config"
+ return 1
+ }
+ $STD systemctl reload apache2 || {
+ msg_error "Failed to reload Apache"
+ return 1
+ }
+ local VERSION
+ VERSION=$(dpkg -s adminer 2>/dev/null | grep '^Version:' | awk '{print $2}')
+ cache_installed_version "adminer" "${VERSION:-unknown}"
+ msg_ok "Setup Adminer (Debian/Ubuntu)"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# Installs or updates Composer globally (robust, idempotent).
+#
+# - Installs to /usr/local/bin/composer
+# - Removes old binaries/symlinks in /usr/bin, /bin, /root/.composer, etc.
+# - Ensures /usr/local/bin is in PATH (permanent)
+# - Auto-updates to latest version
+# ------------------------------------------------------------------------------
+
+function setup_composer() {
+ local COMPOSER_BIN="/usr/local/bin/composer"
+ export COMPOSER_ALLOW_SUPERUSER=1
+
+ # Get currently installed version
+ local INSTALLED_VERSION=""
+ if [[ -x "$COMPOSER_BIN" ]]; then
+ INSTALLED_VERSION=$("$COMPOSER_BIN" --version 2>/dev/null | awk '{print $3}')
+ fi
+
+ # Scenario 1: Already installed - just self-update
+ if [[ -n "$INSTALLED_VERSION" ]]; then
+ msg_info "Update Composer $INSTALLED_VERSION"
+ $STD "$COMPOSER_BIN" self-update --no-interaction || true
+ local UPDATED_VERSION
+ UPDATED_VERSION=$("$COMPOSER_BIN" --version 2>/dev/null | awk '{print $3}')
+ cache_installed_version "composer" "$UPDATED_VERSION"
+ msg_ok "Update Composer $UPDATED_VERSION"
+ return 0
+ fi
+
+ # Scenario 2: Fresh install
+ msg_info "Setup Composer"
+
+ for old in /usr/bin/composer /bin/composer /root/.composer/vendor/bin/composer; do
+ [[ -e "$old" && "$old" != "$COMPOSER_BIN" ]] && rm -f "$old"
+ done
+
+ ensure_usr_local_bin_persist
+ export PATH="/usr/local/bin:$PATH"
+
+ curl -fsSL https://getcomposer.org/installer -o /tmp/composer-setup.php || {
+ msg_error "Failed to download Composer installer"
+ return 1
+ }
+
+ $STD php /tmp/composer-setup.php --install-dir=/usr/local/bin --filename=composer || {
+ msg_error "Failed to install Composer"
+ rm -f /tmp/composer-setup.php
+ return 1
+ }
+ rm -f /tmp/composer-setup.php
+
+ if [[ ! -x "$COMPOSER_BIN" ]]; then
+ msg_error "Composer installation failed"
+ return 1
+ fi
+
+ chmod +x "$COMPOSER_BIN"
+ $STD "$COMPOSER_BIN" self-update --no-interaction || true
+
+ local FINAL_VERSION
+ FINAL_VERSION=$("$COMPOSER_BIN" --version 2>/dev/null | awk '{print $3}')
+ cache_installed_version "composer" "$FINAL_VERSION"
+ msg_ok "Setup Composer"
+}
+
+# ------------------------------------------------------------------------------
+# Installs FFmpeg from source or prebuilt binary (Debian/Ubuntu only).
+#
+# Description:
+# - Downloads and builds FFmpeg from GitHub (https://github.com/FFmpeg/FFmpeg)
+# - Supports specific version override via FFMPEG_VERSION (e.g. n7.1.1)
+# - Supports build profile via FFMPEG_TYPE:
+# - minimal : x264, vpx, mp3 only
+# - medium : adds subtitles, fonts, opus, vorbis
+# - full : adds dav1d, svt-av1, zlib, numa
+# - binary : downloads static build (johnvansickle.com)
+# - Defaults to latest stable version and full feature set
+#
+# Notes:
+# - Requires: curl, jq, build-essential, and matching codec libraries
+# - Result is installed to /usr/local/bin/ffmpeg
+# ------------------------------------------------------------------------------
+
+function setup_ffmpeg() {
+ local TMP_DIR=$(mktemp -d)
+ local GITHUB_REPO="FFmpeg/FFmpeg"
+ local VERSION="${FFMPEG_VERSION:-latest}"
+ local TYPE="${FFMPEG_TYPE:-full}"
+ local BIN_PATH="/usr/local/bin/ffmpeg"
+
+ # Get currently installed version
+ local INSTALLED_VERSION=""
+ if command -v ffmpeg &>/dev/null; then
+ INSTALLED_VERSION=$(ffmpeg -version 2>/dev/null | head -n1 | awk '{print $3}')
+ fi
+
+ msg_info "Setup FFmpeg ${VERSION} ($TYPE)"
+
+ # Binary fallback mode
+ if [[ "$TYPE" == "binary" ]]; then
+ curl -fsSL https://johnvansickle.com/ffmpeg/releases/ffmpeg-release-amd64-static.tar.xz -o "$TMP_DIR/ffmpeg.tar.xz" || {
+ msg_error "Failed to download FFmpeg binary"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+ tar -xf "$TMP_DIR/ffmpeg.tar.xz" -C "$TMP_DIR" || {
+ msg_error "Failed to extract FFmpeg binary"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+ local EXTRACTED_DIR
+ EXTRACTED_DIR=$(find "$TMP_DIR" -maxdepth 1 -type d -name "ffmpeg-*")
+ cp "$EXTRACTED_DIR/ffmpeg" "$BIN_PATH"
+ cp "$EXTRACTED_DIR/ffprobe" /usr/local/bin/ffprobe
+ chmod +x "$BIN_PATH" /usr/local/bin/ffprobe
+ local FINAL_VERSION=$($BIN_PATH -version 2>/dev/null | head -n1 | awk '{print $3}')
+ rm -rf "$TMP_DIR"
+ cache_installed_version "ffmpeg" "$FINAL_VERSION"
+ ensure_usr_local_bin_persist
+ [[ -n "$INSTALLED_VERSION" ]] && msg_ok "Upgrade FFmpeg $INSTALLED_VERSION → $FINAL_VERSION" || msg_ok "Setup FFmpeg $FINAL_VERSION"
+ return 0
+ fi
+
+ ensure_dependencies jq
+
+ # Auto-detect latest stable version if none specified
+ if [[ "$VERSION" == "latest" || -z "$VERSION" ]]; then
+ local ffmpeg_tags
+ ffmpeg_tags=$(curl -fsSL --max-time 15 "https://api.github.com/repos/${GITHUB_REPO}/tags" 2>/dev/null || echo "")
+
+ if [[ -z "$ffmpeg_tags" ]]; then
+ msg_warn "Could not fetch FFmpeg versions from GitHub, trying binary fallback"
+ VERSION="" # Will trigger binary fallback below
+ else
+ VERSION=$(echo "$ffmpeg_tags" | jq -r '.[].name' 2>/dev/null |
+ grep -E '^n[0-9]+\.[0-9]+\.[0-9]+$' |
+ sort -V | tail -n1 || echo "")
+ fi
+ fi
+
+ if [[ -z "$VERSION" ]]; then
+ msg_info "Could not determine FFmpeg source version, using pre-built binary"
+ VERSION="" # Will use binary fallback
+ fi
+
+ # Dependency selection
+ local DEPS=(build-essential yasm nasm pkg-config)
+ case "$TYPE" in
+ minimal)
+ DEPS+=(libx264-dev libvpx-dev libmp3lame-dev)
+ ;;
+ medium)
+ DEPS+=(libx264-dev libvpx-dev libmp3lame-dev libfreetype6-dev libass-dev libopus-dev libvorbis-dev)
+ ;;
+ full)
+ DEPS+=(
+ libx264-dev libx265-dev libvpx-dev libmp3lame-dev
+ libfreetype6-dev libass-dev libopus-dev libvorbis-dev
+ libdav1d-dev libsvtav1-dev zlib1g-dev libnuma-dev
+ libva-dev libdrm-dev
+ )
+ ;;
+ *)
+ msg_error "Invalid FFMPEG_TYPE: $TYPE"
+ rm -rf "$TMP_DIR"
+ return 1
+ ;;
+ esac
+
+ ensure_dependencies "${DEPS[@]}"
+
+ # Try to download source if VERSION is set
+ if [[ -n "$VERSION" ]]; then
+ curl -fsSL "https://github.com/${GITHUB_REPO}/archive/refs/tags/${VERSION}.tar.gz" -o "$TMP_DIR/ffmpeg.tar.gz" || {
+ msg_warn "Failed to download FFmpeg source ${VERSION}, falling back to pre-built binary"
+ VERSION=""
+ }
+ fi
+
+ # If no source download (either VERSION empty or download failed), use binary
+ if [[ -z "$VERSION" ]]; then
+ msg_info "Setup FFmpeg from pre-built binary"
+ curl -fsSL https://johnvansickle.com/ffmpeg/releases/ffmpeg-release-amd64-static.tar.xz -o "$TMP_DIR/ffmpeg.tar.xz" || {
+ msg_error "Failed to download FFmpeg pre-built binary"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+
+ tar -xJf "$TMP_DIR/ffmpeg.tar.xz" -C "$TMP_DIR" || {
+ msg_error "Failed to extract FFmpeg binary archive"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+
+ if ! cp "$TMP_DIR/ffmpeg-"*/ffmpeg /usr/local/bin/ffmpeg 2>/dev/null; then
+ msg_error "Failed to install FFmpeg binary"
+ rm -rf "$TMP_DIR"
+ return 1
+ fi
+
+ cache_installed_version "ffmpeg" "static"
+ rm -rf "$TMP_DIR"
+ msg_ok "Setup FFmpeg from pre-built binary"
+ return 0
+ fi
+
+ tar -xzf "$TMP_DIR/ffmpeg.tar.gz" -C "$TMP_DIR" || {
+ msg_error "Failed to extract FFmpeg source"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+
+ cd "$TMP_DIR/FFmpeg-"* || {
+ msg_error "Source extraction failed"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+
+ local args=(
+ --enable-gpl
+ --enable-shared
+ --enable-nonfree
+ --disable-static
+ --enable-libx264
+ --enable-libvpx
+ --enable-libmp3lame
+ )
+
+ if [[ "$TYPE" != "minimal" ]]; then
+ args+=(--enable-libfreetype --enable-libass --enable-libopus --enable-libvorbis)
+ fi
+
+ if [[ "$TYPE" == "full" ]]; then
+ args+=(--enable-libx265 --enable-libdav1d --enable-zlib)
+ args+=(--enable-vaapi --enable-libdrm)
+ fi
+
+ if [[ ${#args[@]} -eq 0 ]]; then
+ msg_error "FFmpeg configure args array is empty"
+ rm -rf "$TMP_DIR"
+ return 1
+ fi
+
+ $STD ./configure "${args[@]}" || {
+ msg_error "FFmpeg configure failed"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+ $STD make -j"$(nproc)" || {
+ msg_error "FFmpeg compilation failed"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+ $STD make install || {
+ msg_error "FFmpeg installation failed"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+ echo "/usr/local/lib" >/etc/ld.so.conf.d/ffmpeg.conf
+ $STD ldconfig
+
+ ldconfig -p 2>/dev/null | grep libavdevice >/dev/null || {
+ msg_error "libavdevice not registered with dynamic linker"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+
+ if ! command -v ffmpeg &>/dev/null; then
+ msg_error "FFmpeg installation failed"
+ rm -rf "$TMP_DIR"
+ return 1
+ fi
+
+ local FINAL_VERSION
+ FINAL_VERSION=$(ffmpeg -version 2>/dev/null | head -n1 | awk '{print $3}')
+ rm -rf "$TMP_DIR"
+ cache_installed_version "ffmpeg" "$FINAL_VERSION"
+ ensure_usr_local_bin_persist
+ [[ -n "$INSTALLED_VERSION" ]] && msg_ok "Upgrade FFmpeg $INSTALLED_VERSION → $FINAL_VERSION" || msg_ok "Setup FFmpeg $FINAL_VERSION"
+}
+
+# ------------------------------------------------------------------------------
+# Installs Go (Golang) from official tarball.
+#
+# Description:
+# - Determines system architecture
+# - Downloads latest version if GO_VERSION not set
+#
+# Variables:
+# GO_VERSION - Version to install (e.g. 1.22.2 or latest)
+# ------------------------------------------------------------------------------
+
+function setup_go() {
+ local ARCH
+ case "$(uname -m)" in
+ x86_64) ARCH="amd64" ;;
+ aarch64) ARCH="arm64" ;;
+ *)
+ msg_error "Unsupported architecture: $(uname -m)"
+ return 1
+ ;;
+ esac
+
+ # Resolve "latest" version
+ local GO_VERSION="${GO_VERSION:-latest}"
+ if [[ "$GO_VERSION" == "latest" ]]; then
+ GO_VERSION=$(curl -fsSL https://go.dev/VERSION?m=text 2>/dev/null | head -n1 | sed 's/^go//') || {
+ msg_error "Could not determine latest Go version"
+ return 1
+ }
+ [[ -z "$GO_VERSION" ]] && {
+ msg_error "Latest Go version is empty"
+ return 1
+ }
+ fi
+
+ local GO_BIN="/usr/local/bin/go"
+ local GO_INSTALL_DIR="/usr/local/go"
+
+ # Get currently installed version
+ local CURRENT_VERSION=""
+ if [[ -x "$GO_BIN" ]]; then
+ CURRENT_VERSION=$("$GO_BIN" version 2>/dev/null | awk '{print $3}' | sed 's/go//')
+ fi
+
+ # Scenario 1: Already at target version
+ if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" == "$GO_VERSION" ]]; then
+ cache_installed_version "go" "$GO_VERSION"
+ return 0
+ fi
+
+ # Scenario 2: Different version or not installed
+ if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" != "$GO_VERSION" ]]; then
+ msg_info "Upgrade Go from $CURRENT_VERSION to $GO_VERSION"
+ remove_old_tool_version "go"
+ else
+ msg_info "Setup Go $GO_VERSION"
+ fi
+
+ local TARBALL="go${GO_VERSION}.linux-${ARCH}.tar.gz"
+ local URL="https://go.dev/dl/${TARBALL}"
+ local TMP_TAR=$(mktemp)
+
+ curl -fsSL "$URL" -o "$TMP_TAR" || {
+ msg_error "Failed to download Go $GO_VERSION"
+ rm -f "$TMP_TAR"
+ return 1
+ }
+
+ $STD tar -C /usr/local -xzf "$TMP_TAR" || {
+ msg_error "Failed to extract Go tarball"
+ rm -f "$TMP_TAR"
+ return 1
+ }
+
+ ln -sf /usr/local/go/bin/go /usr/local/bin/go
+ ln -sf /usr/local/go/bin/gofmt /usr/local/bin/gofmt
+ rm -f "$TMP_TAR"
+
+ cache_installed_version "go" "$GO_VERSION"
+ ensure_usr_local_bin_persist
+ msg_ok "Setup Go $GO_VERSION"
+}
+
+# ------------------------------------------------------------------------------
+# Installs or updates Ghostscript (gs) from source.
+#
+# Description:
+# - Fetches latest release
+# - Builds and installs system-wide
+# ------------------------------------------------------------------------------
+
+function setup_gs() {
+ local TMP_DIR=$(mktemp -d)
+ local CURRENT_VERSION=$(gs --version 2>/dev/null || echo "0")
+
+ ensure_dependencies jq
+
+ local RELEASE_JSON
+ RELEASE_JSON=$(curl -fsSL --max-time 15 https://api.github.com/repos/ArtifexSoftware/ghostpdl-downloads/releases/latest 2>/dev/null || echo "")
+
+ if [[ -z "$RELEASE_JSON" ]]; then
+ msg_warn "Cannot fetch latest Ghostscript version from GitHub API"
+ # Try to get from current version
+ if command -v gs &>/dev/null; then
+ gs --version | head -n1
+ cache_installed_version "ghostscript" "$CURRENT_VERSION"
+ return 0
+ fi
+ msg_error "Cannot determine Ghostscript version and no existing installation found"
+ return 1
+ fi
+ local LATEST_VERSION
+ LATEST_VERSION=$(echo "$RELEASE_JSON" | jq -r '.tag_name' | sed 's/^gs//')
+ local LATEST_VERSION_DOTTED
+ LATEST_VERSION_DOTTED=$(echo "$RELEASE_JSON" | jq -r '.name' | grep -o '[0-9]\+\.[0-9]\+\.[0-9]\+')
+
+ if [[ -z "$LATEST_VERSION" || -z "$LATEST_VERSION_DOTTED" ]]; then
+ msg_warn "Could not determine latest Ghostscript version from GitHub - checking system"
+ # Fallback: try to use system version or return error
+ if [[ "$CURRENT_VERSION" == "0" ]]; then
+ msg_error "Ghostscript not installed and cannot determine latest version"
+ rm -rf "$TMP_DIR"
+ return 1
+ fi
+ rm -rf "$TMP_DIR"
+ return 0
+ fi
+
+ # Scenario 1: Already at latest version
+ if [[ -n "$LATEST_VERSION_DOTTED" ]] && dpkg --compare-versions "$CURRENT_VERSION" ge "$LATEST_VERSION_DOTTED" 2>/dev/null; then
+ cache_installed_version "ghostscript" "$LATEST_VERSION_DOTTED"
+ rm -rf "$TMP_DIR"
+ return 0
+ fi
+
+ # Scenario 2: New install or upgrade
+ if [[ "$CURRENT_VERSION" != "0" && "$CURRENT_VERSION" != "$LATEST_VERSION_DOTTED" ]]; then
+ msg_info "Upgrade Ghostscript from $CURRENT_VERSION to $LATEST_VERSION_DOTTED"
+ else
+ msg_info "Setup Ghostscript $LATEST_VERSION_DOTTED"
+ fi
+
+ curl -fsSL "https://github.com/ArtifexSoftware/ghostpdl-downloads/releases/download/gs${LATEST_VERSION}/ghostscript-${LATEST_VERSION_DOTTED}.tar.gz" -o "$TMP_DIR/ghostscript.tar.gz" || {
+ msg_error "Failed to download Ghostscript"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+
+ if ! tar -xzf "$TMP_DIR/ghostscript.tar.gz" -C "$TMP_DIR"; then
+ msg_error "Failed to extract Ghostscript archive"
+ rm -rf "$TMP_DIR"
+ return 1
+ fi
+
+ # Verify directory exists before cd
+ if [[ ! -d "$TMP_DIR/ghostscript-${LATEST_VERSION_DOTTED}" ]]; then
+ msg_error "Ghostscript source directory not found: $TMP_DIR/ghostscript-${LATEST_VERSION_DOTTED}"
+ rm -rf "$TMP_DIR"
+ return 1
+ fi
+
+ cd "$TMP_DIR/ghostscript-${LATEST_VERSION_DOTTED}" || {
+ msg_error "Failed to enter Ghostscript source directory"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+
+ ensure_dependencies build-essential libpng-dev zlib1g-dev
+
+ $STD ./configure || {
+ msg_error "Ghostscript configure failed"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+ $STD make -j"$(nproc)" || {
+ msg_error "Ghostscript compilation failed"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+ $STD make install || {
+ msg_error "Ghostscript installation failed"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+
+ hash -r
+ if [[ ! -x "$(command -v gs)" ]]; then
+ if [[ -x /usr/local/bin/gs ]]; then
+ ln -sf /usr/local/bin/gs /usr/bin/gs
+ fi
+ fi
+
+ rm -rf "$TMP_DIR"
+ cache_installed_version "ghostscript" "$LATEST_VERSION_DOTTED"
+ ensure_usr_local_bin_persist
+ msg_ok "Setup Ghostscript $LATEST_VERSION_DOTTED"
+}
+
+# ------------------------------------------------------------------------------
+# Sets up Hardware Acceleration on debian or ubuntu.
+#
+# Description:
+# - Determites CPU/GPU/APU Vendor
+# - Installs the correct libraries and packages
+# - Sets up Hardware Acceleration
+#
+# Notes:
+# - Some things are fetched from intel repositories due to not being in debian repositories.
+# ------------------------------------------------------------------------------
+function setup_hwaccel() {
+ msg_info "Setup Hardware Acceleration"
+
+ if ! command -v lspci &>/dev/null; then
+ $STD apt -y update || {
+ msg_error "Failed to update package list"
+ return 1
+ }
+ $STD apt -y install pciutils || {
+ msg_error "Failed to install pciutils"
+ return 1
+ }
+ fi
+
+ # Detect GPU vendor (Intel, AMD, NVIDIA)
+ local gpu_vendor
+ gpu_vendor=$(lspci 2>/dev/null | grep -Ei 'vga|3d|display' | grep -Eo 'Intel|AMD|NVIDIA' | head -n1 || echo "")
+
+ # Detect CPU vendor (relevant for AMD APUs)
+ local cpu_vendor
+ cpu_vendor=$(lscpu 2>/dev/null | grep -i 'Vendor ID' | awk '{print $3}' || echo "")
+
+ if [[ -z "$gpu_vendor" && -z "$cpu_vendor" ]]; then
+ msg_error "No GPU or CPU vendor detected (missing lspci/lscpu output)"
+ return 1
+ fi
+
+ # Detect OS with fallbacks
+ local os_id os_codename
+ os_id=$(grep -oP '(?<=^ID=).+' /etc/os-release 2>/dev/null | tr -d '"' || grep '^ID=' /etc/os-release 2>/dev/null | cut -d'=' -f2 | tr -d '"' || echo "debian")
+ os_codename=$(grep -oP '(?<=^VERSION_CODENAME=).+' /etc/os-release 2>/dev/null | tr -d '"' || grep '^VERSION_CODENAME=' /etc/os-release 2>/dev/null | cut -d'=' -f2 | tr -d '"' || echo "unknown")
+
+ # Validate os_id
+ if [[ -z "$os_id" ]]; then
+ os_id="debian"
+ fi
+
+ # Determine if we are on a VM or LXC
+ local in_ct="${CTTYPE:-0}"
+
+ case "$gpu_vendor" in
+ Intel)
+ if [[ "$os_id" == "ubuntu" ]]; then
+ $STD apt -y install intel-opencl-icd || {
+ msg_error "Failed to install intel-opencl-icd"
+ return 1
+ }
+ else
+ # For Debian: fetch Intel GPU drivers from GitHub
+ fetch_and_deploy_gh_release "" "intel/intel-graphics-compiler" "binary" "latest" "" "intel-igc-core-2_*_amd64.deb" || {
+ msg_warn "Failed to deploy Intel IGC core 2"
+ }
+ fetch_and_deploy_gh_release "" "intel/intel-graphics-compiler" "binary" "latest" "" "intel-igc-opencl-2_*_amd64.deb" || {
+ msg_warn "Failed to deploy Intel IGC OpenCL 2"
+ }
+ fetch_and_deploy_gh_release "" "intel/compute-runtime" "binary" "latest" "" "libigdgmm12_*_amd64.deb" || {
+ msg_warn "Failed to deploy Intel GDGMM12"
+ }
+ fetch_and_deploy_gh_release "" "intel/compute-runtime" "binary" "latest" "" "intel-opencl-icd_*_amd64.deb" || {
+ msg_warn "Failed to deploy Intel OpenCL ICD"
+ }
+ fi
+
+ $STD apt -y install va-driver-all ocl-icd-libopencl1 vainfo intel-gpu-tools || {
+ msg_error "Failed to install Intel GPU dependencies"
+ return 1
+ }
+ ;;
+ AMD)
+ $STD apt -y install mesa-va-drivers mesa-vdpau-drivers mesa-opencl-icd vainfo clinfo || {
+ msg_error "Failed to install AMD GPU dependencies"
+ return 1
+ }
+
+ # For AMD CPUs without discrete GPU (APUs)
+ if [[ "$cpu_vendor" == "AuthenticAMD" && -n "$gpu_vendor" ]]; then
+ $STD apt -y install libdrm-amdgpu1 firmware-amd-graphics || true
+ fi
+ ;;
+ NVIDIA)
+ # NVIDIA needs manual driver setup - skip for now
+ msg_info "NVIDIA GPU detected - manual driver setup required"
+ ;;
+ *)
+ # If no discrete GPU, but AMD CPU (e.g., Ryzen APU)
+ if [[ "$cpu_vendor" == "AuthenticAMD" ]]; then
+ $STD apt -y install mesa-opencl-icd ocl-icd-libopencl1 clinfo || {
+ msg_error "Failed to install Mesa OpenCL stack"
+ return 1
+ }
+ else
+ msg_warn "No supported GPU vendor detected - skipping GPU acceleration"
+ fi
+ ;;
+ esac
+
+ if [[ "$in_ct" == "0" ]]; then
+ chgrp video /dev/dri 2>/dev/null || true
+ chmod 755 /dev/dri 2>/dev/null || true
+ chmod 660 /dev/dri/* 2>/dev/null || true
+ $STD adduser "$(id -u -n)" video
+ $STD adduser "$(id -u -n)" render
+ fi
+
+ cache_installed_version "hwaccel" "1.0"
+ msg_ok "Setup Hardware Acceleration"
+}
+
+# ------------------------------------------------------------------------------
+# Installs ImageMagick 7 from source (Debian/Ubuntu only).
+#
+# Description:
+# - Downloads the latest ImageMagick source tarball
+# - Builds and installs ImageMagick to /usr/local
+# - Configures dynamic linker (ldconfig)
+#
+# Notes:
+# - Requires: build-essential, libtool, libjpeg-dev, libpng-dev, etc.
+# ------------------------------------------------------------------------------
+function setup_imagemagick() {
+ local TMP_DIR=$(mktemp -d)
+ local BINARY_PATH="/usr/local/bin/magick"
+
+ # Get currently installed version
+ local INSTALLED_VERSION=""
+ if command -v magick &>/dev/null; then
+ INSTALLED_VERSION=$(magick -version | awk '/^Version/ {print $3}')
+ fi
+
+ msg_info "Setup ImageMagick"
+
+ ensure_dependencies \
+ build-essential \
+ libtool \
+ libjpeg-dev \
+ libpng-dev \
+ libtiff-dev \
+ libwebp-dev \
+ libheif-dev \
+ libde265-dev \
+ libopenjp2-7-dev \
+ libxml2-dev \
+ liblcms2-dev \
+ libfreetype6-dev \
+ libraw-dev \
+ libfftw3-dev \
+ liblqr-1-0-dev \
+ libgsl-dev \
+ pkg-config \
+ ghostscript
+
+ curl -fsSL https://imagemagick.org/archive/ImageMagick.tar.gz -o "$TMP_DIR/ImageMagick.tar.gz" || {
+ msg_error "Failed to download ImageMagick"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+
+ tar -xzf "$TMP_DIR/ImageMagick.tar.gz" -C "$TMP_DIR" || {
+ msg_error "Failed to extract ImageMagick"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+
+ cd "$TMP_DIR"/ImageMagick-* || {
+ msg_error "Source extraction failed"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+
+ $STD ./configure --disable-static || {
+ msg_error "ImageMagick configure failed"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+ $STD make -j"$(nproc)" || {
+ msg_error "ImageMagick compilation failed"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+ $STD make install || {
+ msg_error "ImageMagick installation failed"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+ $STD ldconfig /usr/local/lib
+
+ if [[ ! -x "$BINARY_PATH" ]]; then
+ msg_error "ImageMagick installation failed"
+ rm -rf "$TMP_DIR"
+ return 1
+ fi
+
+ local FINAL_VERSION
+ FINAL_VERSION=$("$BINARY_PATH" -version | awk '/^Version/ {print $3}')
+ rm -rf "$TMP_DIR"
+ cache_installed_version "imagemagick" "$FINAL_VERSION"
+ ensure_usr_local_bin_persist
+
+ if [[ -n "$INSTALLED_VERSION" ]]; then
+ msg_ok "Upgrade ImageMagick $INSTALLED_VERSION → $FINAL_VERSION"
+ else
+ msg_ok "Setup ImageMagick $FINAL_VERSION"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# Installs Temurin JDK via Adoptium APT repository.
+#
+# Description:
+# - Removes previous JDK if version mismatch
+# - Installs or upgrades to specified JAVA_VERSION
+#
+# Variables:
+# JAVA_VERSION - Temurin JDK version to install (e.g. 17, 21)
+# ------------------------------------------------------------------------------
+
+function setup_java() {
+ local JAVA_VERSION="${JAVA_VERSION:-21}"
+ local DISTRO_ID DISTRO_CODENAME
+ DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"')
+ DISTRO_CODENAME=$(awk -F= '/VERSION_CODENAME/ { print $2 }' /etc/os-release)
+ local DESIRED_PACKAGE="temurin-${JAVA_VERSION}-jdk"
+
+ # Prepare repository (cleanup + validation)
+ prepare_repository_setup "adoptium" || {
+ msg_error "Failed to prepare Adoptium repository"
+ return 1
+ }
+
+ # Add repo if needed
+ if [[ ! -f /etc/apt/sources.list.d/adoptium.sources ]]; then
+ local SUITE
+ SUITE=$(get_fallback_suite "$DISTRO_ID" "$DISTRO_CODENAME" "https://packages.adoptium.net/artifactory/deb")
+ setup_deb822_repo \
+ "adoptium" \
+ "https://packages.adoptium.net/artifactory/api/gpg/key/public" \
+ "https://packages.adoptium.net/artifactory/deb" \
+ "$SUITE" \
+ "main" \
+ "amd64 arm64"
+ fi
+
+ # Get currently installed version
+ local INSTALLED_VERSION=""
+ if dpkg -l | grep -q "temurin-.*-jdk" 2>/dev/null; then
+ INSTALLED_VERSION=$(dpkg -l 2>/dev/null | awk '/temurin-.*-jdk/{print $2}' | grep -oP 'temurin-\K[0-9]+' | head -n1 || echo "")
+ fi
+
+ # Validate INSTALLED_VERSION is not empty if matched
+ local JDK_COUNT
+ JDK_COUNT=$(dpkg -l 2>/dev/null | grep -c "temurin-.*-jdk" || echo "0")
+ JDK_COUNT=${JDK_COUNT//[^0-9]/} # Remove any non-numeric characters
+ if [[ -z "$INSTALLED_VERSION" && "${JDK_COUNT:-0}" -gt 0 ]]; then
+ msg_warn "Found Temurin JDK but cannot determine version"
+ INSTALLED_VERSION="0"
+ fi
+
+ # Scenario 1: Already at correct version
+ if [[ "$INSTALLED_VERSION" == "$JAVA_VERSION" ]]; then
+ msg_info "Update Temurin JDK $JAVA_VERSION"
+ ensure_apt_working || return 1
+ upgrade_packages_with_retry "$DESIRED_PACKAGE" || {
+ msg_error "Failed to update Temurin JDK"
+ return 1
+ }
+ cache_installed_version "temurin-jdk" "$JAVA_VERSION"
+ msg_ok "Update Temurin JDK $JAVA_VERSION"
+ return 0
+ fi
+
+ # Scenario 2: Different version - remove old and install new
+ if [[ -n "$INSTALLED_VERSION" ]]; then
+ msg_info "Upgrade Temurin JDK from $INSTALLED_VERSION to $JAVA_VERSION"
+ $STD apt purge -y "temurin-${INSTALLED_VERSION}-jdk" || true
+ else
+ msg_info "Setup Temurin JDK $JAVA_VERSION"
+ fi
+
+ ensure_apt_working || return 1
+
+ # Install with retry logic
+ install_packages_with_retry "$DESIRED_PACKAGE" || {
+ msg_error "Failed to install Temurin JDK $JAVA_VERSION"
+ return 1
+ }
+
+ cache_installed_version "temurin-jdk" "$JAVA_VERSION"
+ msg_ok "Setup Temurin JDK $JAVA_VERSION"
+}
+
# ------------------------------------------------------------------------------
# Installs a local IP updater script using networkd-dispatcher.
#
@@ -1064,12 +2846,23 @@ function setup_local_ip_helper() {
local IP_FILE="/run/local-ip.env"
local DISPATCHER_SCRIPT="/etc/networkd-dispatcher/routable.d/10-update-local-ip.sh"
+ # Check if already set up
+ if [[ -f "$SCRIPT_PATH" && -f "$DISPATCHER_SCRIPT" ]]; then
+ msg_info "Update Local IP Helper"
+ cache_installed_version "local-ip-helper" "1.0"
+ msg_ok "Update Local IP Helper"
+ else
+ msg_info "Setup Local IP Helper"
+ fi
+
mkdir -p "$BASE_DIR"
# Install networkd-dispatcher if not present
if ! dpkg -s networkd-dispatcher >/dev/null 2>&1; then
- $STD apt-get update
- $STD apt-get install -y networkd-dispatcher
+ ensure_dependencies networkd-dispatcher || {
+ msg_error "Failed to install networkd-dispatcher"
+ return 1
+ }
fi
# Write update_local_ip.sh
@@ -1125,261 +2918,903 @@ $SCRIPT_PATH
EOF
chmod +x "$DISPATCHER_SCRIPT"
- systemctl enable -q --now networkd-dispatcher.service
+ systemctl enable -q --now networkd-dispatcher.service || {
+ msg_warn "Failed to enable networkd-dispatcher service"
+ }
+
+ cache_installed_version "local-ip-helper" "1.0"
+ msg_ok "Setup Local IP Helper"
}
# ------------------------------------------------------------------------------
-# Loads LOCAL_IP from persistent store or detects if missing.
+# Installs or updates MariaDB from official repo.
#
# Description:
-# - Loads from /run/local-ip.env or performs runtime lookup
+# - Detects current MariaDB version and replaces it if necessary
+# - Preserves existing database data
+# - Dynamically determines latest GA version if "latest" is given
+#
+# Variables:
+# MARIADB_VERSION - MariaDB version to install (e.g. 10.11, latest) (default: latest)
# ------------------------------------------------------------------------------
-function import_local_ip() {
- local IP_FILE="/run/local-ip.env"
- if [[ -f "$IP_FILE" ]]; then
- # shellcheck disable=SC1090
- source "$IP_FILE"
+setup_mariadb() {
+ local MARIADB_VERSION="${MARIADB_VERSION:-latest}"
+
+ # Resolve "latest" to actual version
+ if [[ "$MARIADB_VERSION" == "latest" ]]; then
+ if ! curl -fsI --max-time 10 http://mirror.mariadb.org/repo/ >/dev/null 2>&1; then
+ msg_warn "MariaDB mirror not reachable - trying cached package list fallback"
+ # Fallback: try to use a known stable version
+ MARIADB_VERSION="12.0"
+ else
+ MARIADB_VERSION=$(curl -fsSL --max-time 15 http://mirror.mariadb.org/repo/ 2>/dev/null |
+ grep -Eo '[0-9]+\.[0-9]+\.[0-9]+/' |
+ grep -vE 'rc/|rolling/' |
+ sed 's|/||' |
+ sort -Vr |
+ head -n1 || echo "")
+
+ if [[ -z "$MARIADB_VERSION" ]]; then
+ msg_warn "Could not parse latest GA MariaDB version from mirror - using fallback"
+ MARIADB_VERSION="12.0"
+ fi
+ fi
fi
- if [[ -z "${LOCAL_IP:-}" ]]; then
- get_current_ip() {
- local targets=("8.8.8.8" "1.1.1.1" "192.168.1.1" "10.0.0.1" "172.16.0.1" "default")
- local ip
+ # Get currently installed version
+ local CURRENT_VERSION=""
+ CURRENT_VERSION=$(is_tool_installed "mariadb" 2>/dev/null) || true
- for target in "${targets[@]}"; do
- if [[ "$target" == "default" ]]; then
- ip=$(ip route get 1 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}')
- else
- ip=$(ip route get "$target" 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}')
- fi
- if [[ -n "$ip" ]]; then
- echo "$ip"
- return 0
- fi
- done
+ # Scenario 1: Already installed at target version - just update packages
+ if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" == "$MARIADB_VERSION" ]]; then
+ msg_info "Update MariaDB $MARIADB_VERSION"
+ # Ensure APT is working
+ ensure_apt_working || return 1
+
+ # Check if repository needs to be refreshed
+ if [[ -f /etc/apt/sources.list.d/mariadb.sources ]]; then
+ local REPO_VERSION=""
+ REPO_VERSION=$(grep -oP 'repo/\K[0-9]+\.[0-9]+' /etc/apt/sources.list.d/mariadb.sources 2>/dev/null || echo "")
+ if [[ -n "$REPO_VERSION" && "$REPO_VERSION" != "${MARIADB_VERSION%.*}" ]]; then
+ msg_warn "Repository version mismatch, updating..."
+ manage_tool_repository "mariadb" "$MARIADB_VERSION" "http://mirror.mariadb.org/repo/$MARIADB_VERSION" \
+ "https://mariadb.org/mariadb_release_signing_key.asc" || {
+ msg_error "Failed to update MariaDB repository"
+ return 1
+ }
+ fi
+ fi
+
+ # Perform upgrade with retry logic
+ ensure_apt_working || return 1
+ upgrade_packages_with_retry "mariadb-server" "mariadb-client" || {
+ msg_error "Failed to upgrade MariaDB packages"
return 1
}
-
- LOCAL_IP="$(get_current_ip || true)"
- if [[ -z "$LOCAL_IP" ]]; then
- msg_error "Could not determine LOCAL_IP"
- return 1
- fi
+ cache_installed_version "mariadb" "$MARIADB_VERSION"
+ msg_ok "Update MariaDB $MARIADB_VERSION"
+ return 0
fi
- export LOCAL_IP
+ # Scenario 2: Different version installed - clean upgrade
+ if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" != "$MARIADB_VERSION" ]]; then
+ msg_info "Upgrade MariaDB from $CURRENT_VERSION to $MARIADB_VERSION"
+ remove_old_tool_version "mariadb"
+ fi
+
+ # Scenario 3: Fresh install or version change
+ msg_info "Setup MariaDB $MARIADB_VERSION"
+
+ # Prepare repository (cleanup + validation)
+ prepare_repository_setup "mariadb" || {
+ msg_error "Failed to prepare MariaDB repository"
+ return 1
+ }
+
+ # Install required dependencies first
+ local mariadb_deps=()
+ for dep in gawk rsync socat libdbi-perl pv; do
+ if apt-cache search "^${dep}$" 2>/dev/null | grep -q .; then
+ mariadb_deps+=("$dep")
+ fi
+ done
+
+ if [[ ${#mariadb_deps[@]} -gt 0 ]]; then
+ $STD apt install -y "${mariadb_deps[@]}" 2>/dev/null || true
+ fi
+
+ # Setup repository
+ manage_tool_repository "mariadb" "$MARIADB_VERSION" "http://mirror.mariadb.org/repo/$MARIADB_VERSION" \
+ "https://mariadb.org/mariadb_release_signing_key.asc" || {
+ msg_error "Failed to setup MariaDB repository"
+ return 1
+ }
+
+ # Set debconf selections for all potential versions
+ local MARIADB_MAJOR_MINOR
+ MARIADB_MAJOR_MINOR=$(echo "$MARIADB_VERSION" | awk -F. '{print $1"."$2}')
+ if [[ -n "$MARIADB_MAJOR_MINOR" ]]; then
+ echo "mariadb-server-$MARIADB_MAJOR_MINOR mariadb-server/feedback boolean false" | debconf-set-selections
+ fi
+
+ # Install packages with retry logic
+ export DEBIAN_FRONTEND=noninteractive
+ if ! install_packages_with_retry "mariadb-server" "mariadb-client"; then
+ # Fallback: try without specific version
+ msg_warn "Failed to install MariaDB packages from upstream repo, trying distro fallback..."
+ cleanup_old_repo_files "mariadb"
+ $STD apt update || {
+ msg_warn "APT update also failed, continuing with cache"
+ }
+ install_packages_with_retry "mariadb-server" "mariadb-client" || {
+ msg_error "Failed to install MariaDB packages (both upstream and distro)"
+ return 1
+ }
+ fi
+
+ cache_installed_version "mariadb" "$MARIADB_VERSION"
+ msg_ok "Setup MariaDB $MARIADB_VERSION"
}
# ------------------------------------------------------------------------------
-# Downloads file with optional progress indicator using pv.
+# Installs or updates MongoDB to specified major version.
#
-# Arguments:
-# $1 - URL
-# $2 - Destination path
+# Description:
+# - Preserves data across installations
+# - Adds official MongoDB repo
+#
+# Variables:
+# MONGO_VERSION - MongoDB major version to install (e.g. 7.0, 8.0)
# ------------------------------------------------------------------------------
-function download_with_progress() {
- local url="$1"
- local output="$2"
- if [ -n "$SPINNER_PID" ] && ps -p "$SPINNER_PID" >/dev/null; then kill "$SPINNER_PID" >/dev/null; fi
+function setup_mongodb() {
+ local MONGO_VERSION="${MONGO_VERSION:-8.0}"
+ local DISTRO_ID DISTRO_CODENAME
+ DISTRO_ID=$(get_os_info id)
+ DISTRO_CODENAME=$(get_os_info codename)
- if ! command -v pv &>/dev/null; then
- $STD apt-get install -y pv
- fi
- set -o pipefail
-
- # Content-Length aus HTTP-Header holen
- local content_length
- content_length=$(curl -fsSLI "$url" | awk '/Content-Length/ {print $2}' | tr -d '\r' || true)
-
- if [[ -z "$content_length" ]]; then
- if ! curl -fL# -o "$output" "$url"; then
- msg_error "Download failed"
- return 1
- fi
- else
- if ! curl -fsSL "$url" | pv -s "$content_length" >"$output"; then
- msg_error "Download failed"
+ # Check AVX support
+ if ! grep -qm1 'avx[^ ]*' /proc/cpuinfo; then
+ local major="${MONGO_VERSION%%.*}"
+ if ((major > 5)); then
+ msg_error "MongoDB ${MONGO_VERSION} requires AVX support, which is not available on this system."
return 1
fi
fi
-}
-# ------------------------------------------------------------------------------
-# Installs or upgrades uv (Python package manager) from GitHub releases.
-# - Downloads platform-specific tarball (no install.sh!)
-# - Extracts uv binary
-# - Places it in /usr/local/bin
-# - Optionally installs a specific Python version via uv
-# ------------------------------------------------------------------------------
-
-function setup_uv() {
- local UV_BIN="/usr/local/bin/uv"
- local TMP_DIR
- TMP_DIR=$(mktemp -d)
-
- # Determine system architecture
- local ARCH
- ARCH=$(uname -m)
- local UV_TAR
-
- case "$ARCH" in
- x86_64) UV_TAR="uv-x86_64-unknown-linux-gnu.tar.gz" ;;
- aarch64) UV_TAR="uv-aarch64-unknown-linux-gnu.tar.gz" ;;
+ case "$DISTRO_ID" in
+ ubuntu)
+ MONGO_BASE_URL="https://repo.mongodb.org/apt/ubuntu"
+ ;;
+ debian)
+ MONGO_BASE_URL="https://repo.mongodb.org/apt/debian"
+ ;;
*)
- msg_error "Unsupported architecture: $ARCH"
- rm -rf "$TMP_DIR"
+ msg_error "Unsupported distribution: $DISTRO_ID"
return 1
;;
esac
- # Get latest version from GitHub
- local LATEST_VERSION
- LATEST_VERSION=$(curl -fsSL https://api.github.com/repos/astral-sh/uv/releases/latest |
- grep '"tag_name":' | cut -d '"' -f4 | sed 's/^v//')
+ # Get currently installed version
+ local INSTALLED_VERSION=""
+ INSTALLED_VERSION=$(is_tool_installed "mongodb" 2>/dev/null) || true
- if [[ -z "$LATEST_VERSION" ]]; then
- msg_error "Could not fetch latest uv version from GitHub."
- rm -rf "$TMP_DIR"
- return 1
+ # Scenario 1: Already at target version - just update packages
+ if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" == "$MONGO_VERSION" ]]; then
+ msg_info "Update MongoDB $MONGO_VERSION"
+
+ ensure_apt_working || return 1
+
+ # Perform upgrade with retry logic
+ upgrade_packages_with_retry "mongodb-org" || {
+ msg_error "Failed to upgrade MongoDB"
+ return 1
+ }
+ cache_installed_version "mongodb" "$MONGO_VERSION"
+ msg_ok "Update MongoDB $MONGO_VERSION"
+ return 0
fi
- # Check if uv is already up to date
- if [[ -x "$UV_BIN" ]]; then
- local INSTALLED_VERSION
- INSTALLED_VERSION=$($UV_BIN -V | awk '{print $2}')
- if [[ "$INSTALLED_VERSION" == "$LATEST_VERSION" ]]; then
- rm -rf "$TMP_DIR"
- [[ ":$PATH:" != *":/usr/local/bin:"* ]] && export PATH="/usr/local/bin:$PATH"
- return 0
- else
- msg_info "Updating uv from $INSTALLED_VERSION to $LATEST_VERSION"
- fi
+ # Scenario 2: Different version installed - clean upgrade
+ if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" != "$MONGO_VERSION" ]]; then
+ msg_info "Upgrade MongoDB from $INSTALLED_VERSION to $MONGO_VERSION"
+ remove_old_tool_version "mongodb"
else
- msg_info "Setup uv $LATEST_VERSION"
+ msg_info "Setup MongoDB $MONGO_VERSION"
fi
- # Download and install manually
- local UV_URL="https://github.com/astral-sh/uv/releases/latest/download/${UV_TAR}"
- if ! curl -fsSL "$UV_URL" -o "$TMP_DIR/uv.tar.gz"; then
- msg_error "Failed to download $UV_URL"
- rm -rf "$TMP_DIR"
- return 1
- fi
+ cleanup_orphaned_sources
- if ! tar -xzf "$TMP_DIR/uv.tar.gz" -C "$TMP_DIR"; then
- msg_error "Failed to extract uv archive"
- rm -rf "$TMP_DIR"
- return 1
- fi
-
- install -m 755 "$TMP_DIR"/*/uv "$UV_BIN" || {
- msg_error "Failed to install uv binary"
- rm -rf "$TMP_DIR"
+ # Prepare repository (cleanup + validation)
+ prepare_repository_setup "mongodb" || {
+ msg_error "Failed to prepare MongoDB repository"
return 1
}
- rm -rf "$TMP_DIR"
- ensure_usr_local_bin_persist
- msg_ok "Setup uv $LATEST_VERSION"
+ # Setup repository
+ manage_tool_repository "mongodb" "$MONGO_VERSION" "$MONGO_BASE_URL" \
+ "https://www.mongodb.org/static/pgp/server-${MONGO_VERSION}.asc" || {
+ msg_error "Failed to setup MongoDB repository"
+ return 1
+ }
- # Optional: install specific Python version
- if [[ -n "${PYTHON_VERSION:-}" ]]; then
- local VERSION_MATCH
- VERSION_MATCH=$(uv python list --only-downloads |
- grep -E "^cpython-${PYTHON_VERSION//./\\.}\.[0-9]+-linux" |
- cut -d'-' -f2 | sort -V | tail -n1)
+ # Wait for repo to settle
+ $STD apt update || {
+ msg_error "APT update failed — invalid MongoDB repo for ${DISTRO_ID}-${DISTRO_CODENAME}?"
+ return 1
+ }
- if [[ -z "$VERSION_MATCH" ]]; then
- msg_error "No matching Python $PYTHON_VERSION.x version found via uv"
+ # Install MongoDB with retry logic
+ install_packages_with_retry "mongodb-org" || {
+ msg_error "Failed to install MongoDB packages"
+ return 1
+ }
+
+ # Verify MongoDB was installed correctly
+ if ! command -v mongod >/dev/null 2>&1; then
+ msg_error "MongoDB binary not found after installation"
+ return 1
+ fi
+
+ mkdir -p /var/lib/mongodb
+ chown -R mongodb:mongodb /var/lib/mongodb
+
+ $STD systemctl enable mongod || {
+ msg_warn "Failed to enable mongod service"
+ }
+ safe_service_restart mongod
+
+ # Verify MongoDB version
+ local INSTALLED_VERSION
+ INSTALLED_VERSION=$(mongod --version 2>/dev/null | grep -oP 'db version v\K[0-9]+\.[0-9]+' | head -n1 || echo "0.0")
+ verify_tool_version "MongoDB" "$MONGO_VERSION" "$INSTALLED_VERSION" || true
+
+ cache_installed_version "mongodb" "$MONGO_VERSION"
+ msg_ok "Setup MongoDB $MONGO_VERSION"
+}
+
+# ------------------------------------------------------------------------------
+# Installs or upgrades MySQL and configures APT repo.
+#
+# Description:
+# - Detects existing MySQL installation
+# - Purges conflicting packages before installation
+# - Supports clean upgrade
+# - Handles Debian Trixie libaio1t64 transition
+#
+# Variables:
+# MYSQL_VERSION - MySQL version to install (e.g. 5.7, 8.0) (default: 8.0)
+# ------------------------------------------------------------------------------
+
+function setup_mysql() {
+ local MYSQL_VERSION="${MYSQL_VERSION:-8.0}"
+ local DISTRO_ID DISTRO_CODENAME
+ DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"')
+ DISTRO_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)
+
+ # Get currently installed version
+ local CURRENT_VERSION=""
+ CURRENT_VERSION=$(is_tool_installed "mysql" 2>/dev/null) || true
+
+ # Scenario 1: Already at target version - just update packages
+ if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" == "$MYSQL_VERSION" ]]; then
+ msg_info "Update MySQL $MYSQL_VERSION"
+
+ ensure_apt_working || return 1
+
+ # Perform upgrade with retry logic (non-fatal if fails)
+ upgrade_packages_with_retry "mysql-server" "mysql-client" || true
+
+ cache_installed_version "mysql" "$MYSQL_VERSION"
+ msg_ok "Update MySQL $MYSQL_VERSION"
+ return 0
+ fi
+
+ # Scenario 2: Different version installed - clean upgrade
+ if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" != "$MYSQL_VERSION" ]]; then
+ msg_info "Upgrade MySQL from $CURRENT_VERSION to $MYSQL_VERSION"
+ remove_old_tool_version "mysql"
+ else
+ msg_info "Setup MySQL $MYSQL_VERSION"
+ fi
+
+ # Prepare repository (cleanup + validation)
+ prepare_repository_setup "mysql" || {
+ msg_error "Failed to prepare MySQL repository"
+ return 1
+ }
+
+ # Debian 13+ Fix: MySQL 8.0 incompatible with libaio1t64, use 8.4 LTS
+ if [[ "$DISTRO_ID" == "debian" && "$DISTRO_CODENAME" =~ ^(trixie|forky|sid)$ ]]; then
+ msg_info "Debian ${DISTRO_CODENAME} detected → using MySQL 8.4 LTS (libaio1t64 compatible)"
+
+ if ! curl -fsSL https://repo.mysql.com/RPM-GPG-KEY-mysql-2023 | gpg --dearmor -o /etc/apt/keyrings/mysql.gpg 2>/dev/null; then
+ msg_error "Failed to import MySQL GPG key"
return 1
fi
- if ! uv python list | grep -q "cpython-${VERSION_MATCH}-linux.*uv/python"; then
- if ! $STD uv python install "$VERSION_MATCH"; then
- msg_error "Failed to install Python $VERSION_MATCH via uv"
+ cat >/etc/apt/sources.list.d/mysql.sources <<'EOF'
+Types: deb
+URIs: https://repo.mysql.com/apt/debian/
+Suites: bookworm
+Components: mysql-8.4-lts
+Architectures: amd64 arm64
+Signed-By: /etc/apt/keyrings/mysql.gpg
+EOF
+
+ $STD apt update || {
+ msg_error "Failed to update APT for MySQL 8.4 LTS"
+ return 1
+ }
+
+ # Install with retry logic
+ if ! install_packages_with_retry "mysql-community-server" "mysql-community-client"; then
+ msg_warn "MySQL 8.4 LTS installation failed – falling back to MariaDB"
+ cleanup_old_repo_files "mysql"
+ $STD apt update
+ install_packages_with_retry "mariadb-server" "mariadb-client" || {
+ msg_error "Failed to install database engine (MySQL/MariaDB fallback)"
return 1
- fi
- msg_ok "Setup Python $VERSION_MATCH via uv"
+ }
+ msg_ok "Setup Database Engine (MariaDB fallback on Debian ${DISTRO_CODENAME})"
+ return 0
fi
- fi
-}
-# ------------------------------------------------------------------------------
-# Ensures /usr/local/bin is permanently in system PATH.
-#
-# Description:
-# - Adds to /etc/profile.d if not present
-# ------------------------------------------------------------------------------
-
-function ensure_usr_local_bin_persist() {
- local PROFILE_FILE="/etc/profile.d/custom_path.sh"
-
- if [[ ! -f "$PROFILE_FILE" ]] && ! command -v pveversion &>/dev/null; then
- echo 'export PATH="/usr/local/bin:$PATH"' >"$PROFILE_FILE"
- chmod +x "$PROFILE_FILE"
- fi
-}
-
-# ------------------------------------------------------------------------------
-# Installs or updates Ghostscript (gs) from source.
-#
-# Description:
-# - Fetches latest release
-# - Builds and installs system-wide
-# ------------------------------------------------------------------------------
-
-function setup_gs() {
- mkdir -p /tmp
- TMP_DIR=$(mktemp -d)
- CURRENT_VERSION=$(gs --version 2>/dev/null || echo "0")
-
- RELEASE_JSON=$(curl -fsSL https://api.github.com/repos/ArtifexSoftware/ghostpdl-downloads/releases/latest)
- LATEST_VERSION=$(echo "$RELEASE_JSON" | grep '"tag_name":' | head -n1 | cut -d '"' -f4 | sed 's/^gs//')
- LATEST_VERSION_DOTTED=$(echo "$RELEASE_JSON" | grep '"name":' | head -n1 | grep -o '[0-9]\+\.[0-9]\+\.[0-9]\+')
-
- if [[ -z "$LATEST_VERSION" ]]; then
- msg_error "Could not determine latest Ghostscript version from GitHub."
- rm -rf "$TMP_DIR"
- return
+ cache_installed_version "mysql" "8.4"
+ msg_ok "Setup MySQL 8.4 LTS (Debian ${DISTRO_CODENAME})"
+ return 0
fi
- if dpkg --compare-versions "$CURRENT_VERSION" ge "$LATEST_VERSION_DOTTED"; then
- rm -rf "$TMP_DIR"
- return
- fi
-
- msg_info "Setup Ghostscript $LATEST_VERSION_DOTTED"
- curl -fsSL "https://github.com/ArtifexSoftware/ghostpdl-downloads/releases/download/gs${LATEST_VERSION}/ghostscript-${LATEST_VERSION_DOTTED}.tar.gz" -o "$TMP_DIR/ghostscript.tar.gz"
-
- if ! tar -xzf "$TMP_DIR/ghostscript.tar.gz" -C "$TMP_DIR"; then
- msg_error "Failed to extract Ghostscript archive."
- rm -rf "$TMP_DIR"
- return
- fi
-
- cd "$TMP_DIR/ghostscript-${LATEST_VERSION_DOTTED}" || {
- msg_error "Failed to enter Ghostscript source directory."
- rm -rf "$TMP_DIR"
- }
- $STD apt-get install -y build-essential libpng-dev zlib1g-dev
- ./configure >/dev/null && make && sudo make install >/dev/null
- local EXIT_CODE=$?
- hash -r
- if [[ ! -x "$(command -v gs)" ]]; then
- if [[ -x /usr/local/bin/gs ]]; then
- ln -sf /usr/local/bin/gs /usr/bin/gs
- fi
- fi
-
- rm -rf "$TMP_DIR"
-
- if [[ $EXIT_CODE -eq 0 ]]; then
- msg_ok "Setup Ghostscript $LATEST_VERSION_DOTTED"
+ # Standard setup for other distributions
+ local SUITE
+ if [[ "$DISTRO_ID" == "debian" ]]; then
+ case "$DISTRO_CODENAME" in
+ bookworm | bullseye) SUITE="$DISTRO_CODENAME" ;;
+ *) SUITE="bookworm" ;;
+ esac
else
- msg_error "Ghostscript installation failed"
+ SUITE=$(get_fallback_suite "$DISTRO_ID" "$DISTRO_CODENAME" "https://repo.mysql.com/apt/${DISTRO_ID}")
+ fi
+
+ # Setup repository
+ manage_tool_repository "mysql" "$MYSQL_VERSION" "https://repo.mysql.com/apt/${DISTRO_ID}" \
+ "https://repo.mysql.com/RPM-GPG-KEY-mysql-2023" || {
+ msg_error "Failed to setup MySQL repository"
+ return 1
+ }
+
+ ensure_apt_working || return 1
+
+ # Try multiple package names with retry logic
+ export DEBIAN_FRONTEND=noninteractive
+ local mysql_install_success=false
+
+ if apt-cache search "^mysql-server$" 2>/dev/null | grep -q . &&
+ install_packages_with_retry "mysql-server" "mysql-client"; then
+ mysql_install_success=true
+ elif apt-cache search "^mysql-community-server$" 2>/dev/null | grep -q . &&
+ install_packages_with_retry "mysql-community-server" "mysql-community-client"; then
+ mysql_install_success=true
+ elif apt-cache search "^mysql$" 2>/dev/null | grep -q . &&
+ install_packages_with_retry "mysql"; then
+ mysql_install_success=true
+ fi
+
+ if [[ "$mysql_install_success" == false ]]; then
+ msg_error "MySQL ${MYSQL_VERSION} package not available for suite ${SUITE}"
+ return 1
+ fi
+
+ # Verify mysql command is accessible
+ if ! command -v mysql >/dev/null 2>&1; then
+ hash -r
+ if ! command -v mysql >/dev/null 2>&1; then
+ msg_error "MySQL installed but mysql command still not found"
+ return 1
+ fi
+ fi
+
+ cache_installed_version "mysql" "$MYSQL_VERSION"
+ msg_ok "Setup MySQL $MYSQL_VERSION"
+}
+
+# ------------------------------------------------------------------------------
+# Installs Node.js and optional global modules.
+#
+# Description:
+# - Installs specified Node.js version using NodeSource APT repo
+# - Optionally installs or updates global npm modules
+#
+# Variables:
+# NODE_VERSION - Node.js version to install (default: 22)
+# NODE_MODULE - Comma-separated list of global modules (e.g. "yarn,@vue/cli@5.0.0")
+# ------------------------------------------------------------------------------
+
+function setup_nodejs() {
+ local NODE_VERSION="${NODE_VERSION:-22}"
+ local NODE_MODULE="${NODE_MODULE:-}"
+
+ # ALWAYS clean up legacy installations first (nvm, etc.) to prevent conflicts
+ cleanup_legacy_install "nodejs"
+
+ # Get currently installed version
+ local CURRENT_NODE_VERSION=""
+ CURRENT_NODE_VERSION=$(is_tool_installed "nodejs" 2>/dev/null) || true
+
+ # Ensure jq is available for JSON parsing
+ if ! command -v jq &>/dev/null; then
+ $STD apt update
+ $STD apt install -y jq || {
+ msg_error "Failed to install jq"
+ return 1
+ }
+ fi
+
+ # Scenario 1: Already installed at target version - just update packages/modules
+ if [[ -n "$CURRENT_NODE_VERSION" && "$CURRENT_NODE_VERSION" == "$NODE_VERSION" ]]; then
+ msg_info "Update Node.js $NODE_VERSION"
+
+ ensure_apt_working || return 1
+
+ # Just update npm to latest
+ $STD npm install -g npm@latest 2>/dev/null || true
+
+ cache_installed_version "nodejs" "$NODE_VERSION"
+ msg_ok "Update Node.js $NODE_VERSION"
+ else
+ # Scenario 2: Different version installed - clean upgrade
+ if [[ -n "$CURRENT_NODE_VERSION" && "$CURRENT_NODE_VERSION" != "$NODE_VERSION" ]]; then
+ msg_info "Upgrade Node.js from $CURRENT_NODE_VERSION to $NODE_VERSION"
+ remove_old_tool_version "nodejs"
+ else
+ msg_info "Setup Node.js $NODE_VERSION"
+ fi
+
+ # Remove ALL Debian nodejs packages BEFORE adding NodeSource repo
+ if dpkg -l 2>/dev/null | grep -qE "^ii.*(nodejs|libnode|node-cjs|node-acorn|node-balanced|node-brace|node-minimatch|node-undici|node-xtend|node-corepack)"; then
+ msg_info "Removing Debian-packaged Node.js and dependencies"
+ $STD apt purge -y nodejs nodejs-doc libnode* node-* 2>/dev/null || true
+ $STD apt autoremove -y 2>/dev/null || true
+ $STD apt clean 2>/dev/null || true
+ fi
+
+ # Remove any APT pinning (not needed)
+ rm -f /etc/apt/preferences.d/nodesource 2>/dev/null || true
+
+ # Prepare repository (cleanup + validation)
+ prepare_repository_setup "nodesource" || {
+ msg_error "Failed to prepare Node.js repository"
+ return 1
+ }
+
+ # Setup NodeSource repository
+ manage_tool_repository "nodejs" "$NODE_VERSION" "https://deb.nodesource.com/node_${NODE_VERSION}.x" "https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key" || {
+ msg_error "Failed to setup Node.js repository"
+ return 1
+ }
+
+ # CRITICAL: Force APT cache refresh AFTER repository setup
+ # This ensures NodeSource is the only nodejs source in APT cache
+ $STD apt update
+
+ # Install dependencies (NodeSource is now the only nodejs source)
+ ensure_dependencies curl ca-certificates gnupg
+
+ # Install Node.js from NodeSource
+ install_packages_with_retry "nodejs" || {
+ msg_error "Failed to install Node.js ${NODE_VERSION} from NodeSource"
+ return 1
+ }
+
+ # Verify Node.js was installed correctly
+ if ! command -v node >/dev/null 2>&1; then
+ msg_error "Node.js binary not found after installation"
+ return 1
+ fi
+
+ local INSTALLED_NODE_VERSION
+ INSTALLED_NODE_VERSION=$(node -v 2>/dev/null | grep -oP '^v\K[0-9]+' || echo "0")
+ verify_tool_version "Node.js" "$NODE_VERSION" "$INSTALLED_NODE_VERSION" || true
+
+ # Verify npm is available (should come with NodeSource nodejs)
+ if ! command -v npm >/dev/null 2>&1; then
+ msg_error "npm not found after Node.js installation - repository issue?"
+ return 1
+ fi
+
+ # Update to latest npm (with version check to avoid incompatibility)
+ local NPM_VERSION
+ NPM_VERSION=$(npm -v 2>/dev/null || echo "0")
+ if [[ "$NPM_VERSION" != "0" ]]; then
+ $STD npm install -g npm@latest 2>/dev/null || {
+ msg_warn "Failed to update npm to latest version (continuing with bundled npm $NPM_VERSION)"
+ }
+ fi
+
+ cache_installed_version "nodejs" "$NODE_VERSION"
+ msg_ok "Setup Node.js $NODE_VERSION"
+ fi
+
+ export NODE_OPTIONS="--max-old-space-size=4096"
+
+ # Ensure valid working directory for npm (avoids uv_cwd error)
+ if [[ ! -d /opt ]]; then
+ mkdir -p /opt
+ fi
+ cd /opt || {
+ msg_error "Failed to set safe working directory before npm install"
+ return 1
+ }
+
+ # Install global Node modules
+ if [[ -n "$NODE_MODULE" ]]; then
+ IFS=',' read -ra MODULES <<<"$NODE_MODULE"
+ local failed_modules=0
+ for mod in "${MODULES[@]}"; do
+ local MODULE_NAME MODULE_REQ_VERSION MODULE_INSTALLED_VERSION
+ if [[ "$mod" == @*/*@* ]]; then
+ # Scoped package with version, e.g. @vue/cli-service@latest
+ MODULE_NAME="${mod%@*}"
+ MODULE_REQ_VERSION="${mod##*@}"
+ elif [[ "$mod" == *"@"* ]]; then
+ # Unscoped package with version, e.g. yarn@latest
+ MODULE_NAME="${mod%@*}"
+ MODULE_REQ_VERSION="${mod##*@}"
+ else
+ # No version specified
+ MODULE_NAME="$mod"
+ MODULE_REQ_VERSION="latest"
+ fi
+
+ # Check if the module is already installed
+ if $STD npm list -g --depth=0 "$MODULE_NAME" 2>&1 | grep -q "$MODULE_NAME@"; then
+ MODULE_INSTALLED_VERSION="$($STD npm list -g --depth=0 "$MODULE_NAME" 2>&1 | grep "$MODULE_NAME@" | awk -F@ '{print $2}' | tr -d '[:space:]')"
+ if [[ "$MODULE_REQ_VERSION" != "latest" && "$MODULE_REQ_VERSION" != "$MODULE_INSTALLED_VERSION" ]]; then
+ msg_info "Updating $MODULE_NAME from v$MODULE_INSTALLED_VERSION to v$MODULE_REQ_VERSION"
+ if ! $STD npm install -g "${MODULE_NAME}@${MODULE_REQ_VERSION}" 2>/dev/null; then
+ msg_warn "Failed to update $MODULE_NAME to version $MODULE_REQ_VERSION"
+ ((failed_modules++))
+ continue
+ fi
+ elif [[ "$MODULE_REQ_VERSION" == "latest" ]]; then
+ msg_info "Updating $MODULE_NAME to latest version"
+ if ! $STD npm install -g "${MODULE_NAME}@latest" 2>/dev/null; then
+ msg_warn "Failed to update $MODULE_NAME to latest version"
+ ((failed_modules++))
+ continue
+ fi
+ fi
+ else
+ msg_info "Installing $MODULE_NAME@$MODULE_REQ_VERSION"
+ if ! $STD npm install -g "${MODULE_NAME}@${MODULE_REQ_VERSION}" 2>/dev/null; then
+ msg_warn "Failed to install $MODULE_NAME@$MODULE_REQ_VERSION"
+ ((failed_modules++))
+ continue
+ fi
+ fi
+ done
+ if [[ $failed_modules -eq 0 ]]; then
+ msg_ok "Installed Node.js modules: $NODE_MODULE"
+ else
+ msg_warn "Installed Node.js modules with $failed_modules failure(s): $NODE_MODULE"
+ fi
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# Installs PHP with selected modules and configures Apache/FPM support.
+#
+# Description:
+# - Adds Sury PHP repo if needed
+# - Installs default and user-defined modules
+# - Patches php.ini for CLI, Apache, and FPM as needed
+#
+# Variables:
+# PHP_VERSION - PHP version to install (default: 8.4)
+# PHP_MODULE - Additional comma-separated modules
+# PHP_APACHE - Set YES to enable PHP with Apache
+# PHP_FPM - Set YES to enable PHP-FPM
+# PHP_MEMORY_LIMIT - (default: 512M)
+# PHP_UPLOAD_MAX_FILESIZE - (default: 128M)
+# PHP_POST_MAX_SIZE - (default: 128M)
+# PHP_MAX_EXECUTION_TIME - (default: 300)
+# ------------------------------------------------------------------------------
+
+function setup_php() {
+ local PHP_VERSION="${PHP_VERSION:-8.4}"
+ local PHP_MODULE="${PHP_MODULE:-}"
+ local PHP_APACHE="${PHP_APACHE:-NO}"
+ local PHP_FPM="${PHP_FPM:-NO}"
+ local DISTRO_ID DISTRO_CODENAME
+ DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"')
+ DISTRO_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)
+
+ local DEFAULT_MODULES="bcmath,cli,curl,gd,intl,mbstring,opcache,readline,xml,zip"
+ local COMBINED_MODULES
+
+ local PHP_MEMORY_LIMIT="${PHP_MEMORY_LIMIT:-512M}"
+ local PHP_UPLOAD_MAX_FILESIZE="${PHP_UPLOAD_MAX_FILESIZE:-128M}"
+ local PHP_POST_MAX_SIZE="${PHP_POST_MAX_SIZE:-128M}"
+ local PHP_MAX_EXECUTION_TIME="${PHP_MAX_EXECUTION_TIME:-300}"
+
+ # Merge default + user-defined modules
+ if [[ -n "$PHP_MODULE" ]]; then
+ COMBINED_MODULES="${DEFAULT_MODULES},${PHP_MODULE}"
+ else
+ COMBINED_MODULES="${DEFAULT_MODULES}"
+ fi
+
+ # Deduplicate
+ COMBINED_MODULES=$(echo "$COMBINED_MODULES" | tr ',' '\n' | awk '!seen[$0]++' | paste -sd, -)
+
+ # Get current PHP-CLI version
+ local CURRENT_PHP=""
+ CURRENT_PHP=$(is_tool_installed "php" 2>/dev/null) || true
+
+ # Scenario 1: Already at target version - just update packages
+ if [[ -n "$CURRENT_PHP" && "$CURRENT_PHP" == "$PHP_VERSION" ]]; then
+ msg_info "Update PHP $PHP_VERSION"
+
+ # Ensure Sury repo is available
+ if [[ ! -f /etc/apt/sources.list.d/php.sources ]]; then
+ manage_tool_repository "php" "$PHP_VERSION" "" "https://packages.sury.org/debsuryorg-archive-keyring.deb" || {
+ msg_error "Failed to setup PHP repository"
+ return 1
+ }
+ fi
+
+ ensure_apt_working || return 1
+
+ # Perform upgrade with retry logic (non-fatal if fails)
+ upgrade_packages_with_retry "php${PHP_VERSION}" || true
+
+ cache_installed_version "php" "$PHP_VERSION"
+ msg_ok "Update PHP $PHP_VERSION"
+ else
+ # Scenario 2: Different version installed - clean upgrade
+ if [[ -n "$CURRENT_PHP" && "$CURRENT_PHP" != "$PHP_VERSION" ]]; then
+ msg_info "Upgrade PHP from $CURRENT_PHP to $PHP_VERSION"
+ # Stop and disable ALL PHP-FPM versions
+ stop_all_services "php.*-fpm"
+ remove_old_tool_version "php"
+ else
+ msg_info "Setup PHP $PHP_VERSION"
+ fi
+
+ # Prepare repository (cleanup + validation)
+ prepare_repository_setup "php" "deb.sury.org-php" || {
+ msg_error "Failed to prepare PHP repository"
+ return 1
+ }
+
+ # Setup Sury repository
+ manage_tool_repository "php" "$PHP_VERSION" "" "https://packages.sury.org/debsuryorg-archive-keyring.deb" || {
+ msg_error "Failed to setup PHP repository"
+ return 1
+ }
+
+ ensure_apt_working || return 1
+ fi
+
+ # Build module list
+ local MODULE_LIST="php${PHP_VERSION}"
+ IFS=',' read -ra MODULES <<<"$COMBINED_MODULES"
+ for mod in "${MODULES[@]}"; do
+ if apt-cache show "php${PHP_VERSION}-${mod}" >/dev/null 2>&1; then
+ MODULE_LIST+=" php${PHP_VERSION}-${mod}"
+ fi
+ done
+ if [[ "$PHP_FPM" == "YES" ]]; then
+ MODULE_LIST+=" php${PHP_VERSION}-fpm"
+ fi
+
+ # install apache2 with PHP support if requested
+ if [[ "$PHP_APACHE" == "YES" ]]; then
+ if ! dpkg -l 2>/dev/null | grep -q "libapache2-mod-php${PHP_VERSION}"; then
+ install_packages_with_retry "apache2" "libapache2-mod-php${PHP_VERSION}" || {
+ msg_error "Failed to install Apache with PHP module"
+ return 1
+ }
+ fi
+ fi
+
+ # Install PHP packages with retry logic
+ install_packages_with_retry $MODULE_LIST || {
+ msg_error "Failed to install PHP packages"
+ return 1
+ }
+ cache_installed_version "php" "$PHP_VERSION"
+
+ # Patch all relevant php.ini files
+ local PHP_INI_PATHS=("/etc/php/${PHP_VERSION}/cli/php.ini")
+ [[ "$PHP_FPM" == "YES" ]] && PHP_INI_PATHS+=("/etc/php/${PHP_VERSION}/fpm/php.ini")
+ [[ "$PHP_APACHE" == "YES" ]] && PHP_INI_PATHS+=("/etc/php/${PHP_VERSION}/apache2/php.ini")
+ for ini in "${PHP_INI_PATHS[@]}"; do
+ if [[ -f "$ini" ]]; then
+ $STD sed -i "s|^memory_limit = .*|memory_limit = ${PHP_MEMORY_LIMIT}|" "$ini"
+ $STD sed -i "s|^upload_max_filesize = .*|upload_max_filesize = ${PHP_UPLOAD_MAX_FILESIZE}|" "$ini"
+ $STD sed -i "s|^post_max_size = .*|post_max_size = ${PHP_POST_MAX_SIZE}|" "$ini"
+ $STD sed -i "s|^max_execution_time = .*|max_execution_time = ${PHP_MAX_EXECUTION_TIME}|" "$ini"
+ fi
+ done
+
+ # Patch Apache configuration if needed
+ if [[ "$PHP_APACHE" == "YES" ]]; then
+ for mod in $(ls /etc/apache2/mods-enabled/ 2>/dev/null | grep -E '^php[0-9]\.[0-9]\.conf$' | sed 's/\.conf//'); do
+ if [[ "$mod" != "php${PHP_VERSION}" ]]; then
+ $STD a2dismod "$mod" || true
+ fi
+ done
+ $STD a2enmod mpm_prefork
+ $STD a2enmod "php${PHP_VERSION}"
+ safe_service_restart apache2 || true
+ fi
+
+ # Enable and restart PHP-FPM if requested
+ if [[ "$PHP_FPM" == "YES" ]]; then
+ if systemctl list-unit-files | grep -q "php${PHP_VERSION}-fpm.service"; then
+ $STD systemctl enable php${PHP_VERSION}-fpm
+ safe_service_restart php${PHP_VERSION}-fpm
+ fi
+ fi
+
+ msg_ok "Setup PHP $PHP_VERSION"
+}
+
+# ------------------------------------------------------------------------------
+# Installs or upgrades PostgreSQL and optional extensions/modules.
+#
+# Description:
+# - Detects existing PostgreSQL version
+# - Dumps all databases before upgrade
+# - Adds PGDG repo and installs specified version
+# - Installs optional PG_MODULES (e.g. postgis, contrib)
+# - Restores dumped data post-upgrade
+#
+# Variables:
+# PG_VERSION - Major PostgreSQL version (e.g. 15, 16) (default: 16)
+function setup_postgresql() {
+ local PG_VERSION="${PG_VERSION:-16}"
+ local PG_MODULES="${PG_MODULES:-}"
+ local DISTRO_ID DISTRO_CODENAME
+ DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"')
+ DISTRO_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)
+
+ # Get currently installed version
+ local CURRENT_PG_VERSION=""
+ if command -v psql >/dev/null; then
+ CURRENT_PG_VERSION="$(psql -V 2>/dev/null | awk '{print $3}' | cut -d. -f1)"
+ fi
+
+ # Scenario 1: Already at correct version
+ if [[ "$CURRENT_PG_VERSION" == "$PG_VERSION" ]]; then
+ msg_info "Update PostgreSQL $PG_VERSION"
+ ensure_apt_working || return 1
+
+ # Perform upgrade with retry logic (non-fatal if fails)
+ upgrade_packages_with_retry "postgresql-${PG_VERSION}" "postgresql-client-${PG_VERSION}" 2>/dev/null || true
+ cache_installed_version "postgresql" "$PG_VERSION"
+ msg_ok "Update PostgreSQL $PG_VERSION"
+
+ # Still install modules if specified
+ if [[ -n "$PG_MODULES" ]]; then
+ IFS=',' read -ra MODULES <<<"$PG_MODULES"
+ for module in "${MODULES[@]}"; do
+ $STD apt install -y "postgresql-${PG_VERSION}-${module}" 2>/dev/null || true
+ done
+ fi
+ return 0
+ fi
+
+ # Scenario 2: Different version - backup, remove old, install new
+ if [[ -n "$CURRENT_PG_VERSION" ]]; then
+ msg_info "Upgrade PostgreSQL from $CURRENT_PG_VERSION to $PG_VERSION"
+ msg_info "Creating backup of PostgreSQL $CURRENT_PG_VERSION databases..."
+ $STD runuser -u postgres -- pg_dumpall >/var/lib/postgresql/backup_$(date +%F)_v${CURRENT_PG_VERSION}.sql || {
+ msg_error "Failed to backup PostgreSQL databases"
+ return 1
+ }
+ $STD systemctl stop postgresql || true
+ $STD apt purge -y "postgresql-${CURRENT_PG_VERSION}" "postgresql-client-${CURRENT_PG_VERSION}" 2>/dev/null || true
+ else
+ msg_info "Setup PostgreSQL $PG_VERSION"
+ fi
+
+ # Scenario 3: Fresh install or after removal - setup repo and install
+ prepare_repository_setup "pgdg" "postgresql" || {
+ msg_error "Failed to prepare PostgreSQL repository"
+ return 1
+ }
+
+ local SUITE
+ case "$DISTRO_CODENAME" in
+ trixie | forky | sid)
+ if verify_repo_available "https://apt.postgresql.org/pub/repos/apt" "trixie-pgdg"; then
+ SUITE="trixie-pgdg"
+ else
+ SUITE="bookworm-pgdg"
+ fi
+ ;;
+ *)
+ SUITE=$(get_fallback_suite "$DISTRO_ID" "$DISTRO_CODENAME" "https://apt.postgresql.org/pub/repos/apt")
+ SUITE="${SUITE}-pgdg"
+ ;;
+ esac
+
+ setup_deb822_repo \
+ "pgdg" \
+ "https://www.postgresql.org/media/keys/ACCC4CF8.asc" \
+ "https://apt.postgresql.org/pub/repos/apt" \
+ "$SUITE" \
+ "main" \
+ "amd64 arm64"
+
+ if ! $STD apt update; then
+ msg_error "APT update failed for PostgreSQL repository"
+ return 1
+ fi
+
+ # Install ssl-cert dependency if available
+ if apt-cache search "^ssl-cert$" 2>/dev/null | grep -q .; then
+ $STD apt install -y ssl-cert 2>/dev/null || true
+ fi
+
+ # Try multiple PostgreSQL package patterns with retry logic
+ local pg_install_success=false
+
+ if apt-cache search "^postgresql-${PG_VERSION}$" 2>/dev/null | grep -q . &&
+ install_packages_with_retry "postgresql-${PG_VERSION}" "postgresql-client-${PG_VERSION}"; then
+ pg_install_success=true
+ fi
+
+ if [[ "$pg_install_success" == false ]] &&
+ apt-cache search "^postgresql-server-${PG_VERSION}$" 2>/dev/null | grep -q . &&
+ $STD apt install -y "postgresql-server-${PG_VERSION}" "postgresql-client-${PG_VERSION}" 2>/dev/null; then
+ pg_install_success=true
+ fi
+
+ if [[ "$pg_install_success" == false ]] &&
+ apt-cache search "^postgresql$" 2>/dev/null | grep -q . &&
+ $STD apt install -y postgresql postgresql-client 2>/dev/null; then
+ pg_install_success=true
+ fi
+
+ if [[ "$pg_install_success" == false ]]; then
+ msg_error "PostgreSQL package not available for suite ${SUITE}"
+ return 1
+ fi
+
+ if ! command -v psql >/dev/null 2>&1; then
+ msg_error "PostgreSQL installed but psql command not found"
+ return 1
+ fi
+
+ # Restore database backup if we upgraded from previous version
+ if [[ -n "$CURRENT_PG_VERSION" ]]; then
+ msg_info "Restoring PostgreSQL databases from backup..."
+ $STD runuser -u postgres -- psql /dev/null || {
+ msg_warn "Failed to restore database backup - this may be expected for major version upgrades"
+ }
+ fi
+
+ $STD systemctl enable --now postgresql 2>/dev/null || true
+
+ # Add PostgreSQL binaries to PATH
+ if ! grep -q '/usr/lib/postgresql' /etc/environment 2>/dev/null; then
+ echo 'PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/lib/postgresql/'"${PG_VERSION}"'/bin"' >/etc/environment
+ fi
+
+ cache_installed_version "postgresql" "$PG_VERSION"
+ msg_ok "Setup PostgreSQL $PG_VERSION"
+
+ # Install optional modules
+ if [[ -n "$PG_MODULES" ]]; then
+ IFS=',' read -ra MODULES <<<"$PG_MODULES"
+ for module in "${MODULES[@]}"; do
+ $STD apt install -y "postgresql-${PG_VERSION}-${module}" 2>/dev/null || true
+ done
fi
}
@@ -1399,83 +3834,305 @@ function setup_gs() {
function setup_ruby() {
local RUBY_VERSION="${RUBY_VERSION:-3.4.4}"
local RUBY_INSTALL_RAILS="${RUBY_INSTALL_RAILS:-true}"
-
local RBENV_DIR="$HOME/.rbenv"
local RBENV_BIN="$RBENV_DIR/bin/rbenv"
local PROFILE_FILE="$HOME/.profile"
- local TMP_DIR
- TMP_DIR=$(mktemp -d)
+ local TMP_DIR=$(mktemp -d)
- msg_info "Setup Ruby $RUBY_VERSION"
+ # Get currently installed Ruby version
+ local CURRENT_RUBY_VERSION=""
+ if [[ -x "$RBENV_BIN" ]]; then
+ CURRENT_RUBY_VERSION=$("$RBENV_BIN" global 2>/dev/null || echo "")
+ fi
- local RBENV_RELEASE
- RBENV_RELEASE=$(curl -fsSL https://api.github.com/repos/rbenv/rbenv/releases/latest | grep '"tag_name":' | cut -d '"' -f4 | sed 's/^v//')
- if [[ -z "$RBENV_RELEASE" ]]; then
- msg_error "Failed to fetch latest rbenv version"
+ # Scenario 1: Already at correct Ruby version
+ if [[ "$CURRENT_RUBY_VERSION" == "$RUBY_VERSION" ]]; then
+ msg_info "Update Ruby $RUBY_VERSION"
+ cache_installed_version "ruby" "$RUBY_VERSION"
+ msg_ok "Update Ruby $RUBY_VERSION"
+ return 0
+ fi
+
+ # Scenario 2: Different version - reinstall
+ if [[ -n "$CURRENT_RUBY_VERSION" ]]; then
+ msg_info "Upgrade Ruby from $CURRENT_RUBY_VERSION to $RUBY_VERSION"
+ else
+ msg_info "Setup Ruby $RUBY_VERSION"
+ fi
+
+ ensure_apt_working || return 1
+
+ # Install build dependencies with fallbacks
+ local ruby_deps=()
+ local dep_variations=(
+ "jq"
+ "autoconf"
+ "patch"
+ "build-essential"
+ "libssl-dev"
+ "libyaml-dev"
+ "libreadline-dev|libreadline6-dev"
+ "zlib1g-dev"
+ "libgmp-dev"
+ "libncurses-dev|libncurses5-dev"
+ "libffi-dev"
+ "libgdbm-dev"
+ "libdb-dev"
+ "uuid-dev"
+ )
+
+ for dep_pattern in "${dep_variations[@]}"; do
+ if [[ "$dep_pattern" == *"|"* ]]; then
+ IFS='|' read -ra variations <<<"$dep_pattern"
+ for var in "${variations[@]}"; do
+ if apt-cache search "^${var}$" 2>/dev/null | grep -q .; then
+ ruby_deps+=("$var")
+ break
+ fi
+ done
+ else
+ if apt-cache search "^${dep_pattern}$" 2>/dev/null | grep -q .; then
+ ruby_deps+=("$dep_pattern")
+ fi
+ fi
+ done
+
+ if [[ ${#ruby_deps[@]} -gt 0 ]]; then
+ $STD apt install -y "${ruby_deps[@]}" 2>/dev/null || true
+ else
+ msg_error "No Ruby build dependencies available"
rm -rf "$TMP_DIR"
return 1
fi
- curl -fsSL "https://github.com/rbenv/rbenv/archive/refs/tags/v${RBENV_RELEASE}.tar.gz" -o "$TMP_DIR/rbenv.tar.gz"
- tar -xzf "$TMP_DIR/rbenv.tar.gz" -C "$TMP_DIR"
- mkdir -p "$RBENV_DIR"
- cp -r "$TMP_DIR/rbenv-${RBENV_RELEASE}/." "$RBENV_DIR/"
- cd "$RBENV_DIR" && src/configure && $STD make -C src
+ # Download and build rbenv if needed
+ if [[ ! -x "$RBENV_BIN" ]]; then
+ local RBENV_RELEASE
+ local rbenv_json
+ rbenv_json=$(curl -fsSL --max-time 15 https://api.github.com/repos/rbenv/rbenv/releases/latest 2>/dev/null || echo "")
- local RUBY_BUILD_RELEASE
- RUBY_BUILD_RELEASE=$(curl -fsSL https://api.github.com/repos/rbenv/ruby-build/releases/latest | grep '"tag_name":' | cut -d '"' -f4 | sed 's/^v//')
- if [[ -z "$RUBY_BUILD_RELEASE" ]]; then
- msg_error "Failed to fetch latest ruby-build version"
- rm -rf "$TMP_DIR"
- return 1
+ if [[ -z "$rbenv_json" ]]; then
+ msg_error "Failed to fetch latest rbenv version from GitHub"
+ rm -rf "$TMP_DIR"
+ return 1
+ fi
+
+ RBENV_RELEASE=$(echo "$rbenv_json" | jq -r '.tag_name' 2>/dev/null | sed 's/^v//' || echo "")
+
+ if [[ -z "$RBENV_RELEASE" ]]; then
+ msg_error "Could not parse rbenv version from GitHub response"
+ rm -rf "$TMP_DIR"
+ return 1
+ fi
+
+ curl -fsSL "https://github.com/rbenv/rbenv/archive/refs/tags/v${RBENV_RELEASE}.tar.gz" -o "$TMP_DIR/rbenv.tar.gz" || {
+ msg_error "Failed to download rbenv"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+
+ tar -xzf "$TMP_DIR/rbenv.tar.gz" -C "$TMP_DIR" || {
+ msg_error "Failed to extract rbenv"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+
+ mkdir -p "$RBENV_DIR"
+ cp -r "$TMP_DIR/rbenv-${RBENV_RELEASE}/." "$RBENV_DIR/"
+ (cd "$RBENV_DIR" && src/configure && $STD make -C src) || {
+ msg_error "Failed to build rbenv"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+
+ # Setup profile
+ if ! grep -q 'rbenv init' "$PROFILE_FILE" 2>/dev/null; then
+ echo 'export PATH="$HOME/.rbenv/bin:$PATH"' >>"$PROFILE_FILE"
+ echo 'eval "$(rbenv init -)"' >>"$PROFILE_FILE"
+ fi
fi
- curl -fsSL "https://github.com/rbenv/ruby-build/archive/refs/tags/v${RUBY_BUILD_RELEASE}.tar.gz" -o "$TMP_DIR/ruby-build.tar.gz"
- tar -xzf "$TMP_DIR/ruby-build.tar.gz" -C "$TMP_DIR"
- mkdir -p "$RBENV_DIR/plugins/ruby-build"
- cp -r "$TMP_DIR/ruby-build-${RUBY_BUILD_RELEASE}/." "$RBENV_DIR/plugins/ruby-build/"
- echo "$RUBY_BUILD_RELEASE" >"$RBENV_DIR/plugins/ruby-build/RUBY_BUILD_version.txt"
+ # Install ruby-build plugin
+ if [[ ! -d "$RBENV_DIR/plugins/ruby-build" ]]; then
+ local RUBY_BUILD_RELEASE
+ local ruby_build_json
+ ruby_build_json=$(curl -fsSL --max-time 15 https://api.github.com/repos/rbenv/ruby-build/releases/latest 2>/dev/null || echo "")
- if ! grep -q 'rbenv init' "$PROFILE_FILE"; then
- echo 'export PATH="$HOME/.rbenv/bin:$PATH"' >>"$PROFILE_FILE"
- echo 'eval "$(rbenv init -)"' >>"$PROFILE_FILE"
+ if [[ -z "$ruby_build_json" ]]; then
+ msg_error "Failed to fetch latest ruby-build version from GitHub"
+ rm -rf "$TMP_DIR"
+ return 1
+ fi
+
+ RUBY_BUILD_RELEASE=$(echo "$ruby_build_json" | jq -r '.tag_name' 2>/dev/null | sed 's/^v//' || echo "")
+
+ if [[ -z "$RUBY_BUILD_RELEASE" ]]; then
+ msg_error "Could not parse ruby-build version from GitHub response"
+ rm -rf "$TMP_DIR"
+ return 1
+ fi
+
+ curl -fsSL "https://github.com/rbenv/ruby-build/archive/refs/tags/v${RUBY_BUILD_RELEASE}.tar.gz" -o "$TMP_DIR/ruby-build.tar.gz" || {
+ msg_error "Failed to download ruby-build"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+
+ tar -xzf "$TMP_DIR/ruby-build.tar.gz" -C "$TMP_DIR" || {
+ msg_error "Failed to extract ruby-build"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+
+ mkdir -p "$RBENV_DIR/plugins/ruby-build"
+ cp -r "$TMP_DIR/ruby-build-${RUBY_BUILD_RELEASE}/." "$RBENV_DIR/plugins/ruby-build/"
fi
+ # Setup PATH and install Ruby version
export PATH="$RBENV_DIR/bin:$PATH"
- eval "$("$RBENV_BIN" init - bash)"
+ eval "$("$RBENV_BIN" init - bash)" 2>/dev/null || true
- if ! "$RBENV_BIN" versions --bare | grep -qx "$RUBY_VERSION"; then
- $STD "$RBENV_BIN" install "$RUBY_VERSION"
+ if ! "$RBENV_BIN" versions --bare 2>/dev/null | grep -qx "$RUBY_VERSION"; then
+ $STD "$RBENV_BIN" install "$RUBY_VERSION" || {
+ msg_error "Failed to install Ruby $RUBY_VERSION"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
fi
- "$RBENV_BIN" global "$RUBY_VERSION"
+ "$RBENV_BIN" global "$RUBY_VERSION" || {
+ msg_error "Failed to set Ruby $RUBY_VERSION as global version"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+
hash -r
+ # Install Rails if requested
if [[ "$RUBY_INSTALL_RAILS" == "true" ]]; then
- msg_info "Setup Rails via gem"
- gem install rails
- msg_ok "Setup Rails $(rails -v)"
+ $STD gem install rails || {
+ msg_warn "Failed to install Rails - Ruby installation successful"
+ }
fi
rm -rf "$TMP_DIR"
+ cache_installed_version "ruby" "$RUBY_VERSION"
msg_ok "Setup Ruby $RUBY_VERSION"
}
# ------------------------------------------------------------------------------
-# Creates and installs self-signed certificates.
+# Installs or upgrades ClickHouse database server.
#
# Description:
-# - Create a self-signed certificate with option to override application name
+# - Adds ClickHouse official repository
+# - Installs specified version
+# - Configures systemd service
+# - Supports Debian/Ubuntu with fallback mechanism
#
# Variables:
-# APP - Application name (default: $APPLICATION variable)
+# CLICKHOUSE_VERSION - ClickHouse version to install (default: latest)
# ------------------------------------------------------------------------------
-function create_selfsigned_certs() {
- local app=${APP:-$(echo "${APPLICATION,,}" | tr -d ' ')}
- $STD openssl req -x509 -nodes -days 365 -newkey rsa:4096 \
- -keyout /etc/ssl/private/"$app"-selfsigned.key \
- -out /etc/ssl/certs/"$app"-selfsigned.crt \
- -subj "/C=US/O=$app/OU=Domain Control Validated/CN=localhost"
+
+function setup_clickhouse() {
+ local CLICKHOUSE_VERSION="${CLICKHOUSE_VERSION:-latest}"
+ local DISTRO_ID DISTRO_CODENAME
+ DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"')
+ DISTRO_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)
+
+ # Resolve "latest" version
+ if [[ "$CLICKHOUSE_VERSION" == "latest" ]]; then
+ CLICKHOUSE_VERSION=$(curl -fsSL --max-time 15 https://packages.clickhouse.com/tgz/stable/ 2>/dev/null |
+ grep -oP 'clickhouse-common-static-\K[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' |
+ sort -V | tail -n1 || echo "")
+
+ # Fallback to GitHub API if package server failed
+ if [[ -z "$CLICKHOUSE_VERSION" ]]; then
+ CLICKHOUSE_VERSION=$(curl -fsSL --max-time 15 https://api.github.com/repos/ClickHouse/ClickHouse/releases/latest 2>/dev/null |
+ grep -oP '"tag_name":\s*"v\K[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -n1 || echo "")
+ fi
+
+ [[ -z "$CLICKHOUSE_VERSION" ]] && {
+ msg_error "Could not determine latest ClickHouse version from any source"
+ return 1
+ }
+ fi
+
+ # Get currently installed version
+ local CURRENT_VERSION=""
+ if command -v clickhouse-server >/dev/null 2>&1; then
+ CURRENT_VERSION=$(clickhouse-server --version 2>/dev/null | grep -oP 'version \K[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -n1)
+ fi
+
+ # Scenario 1: Already at target version - just update packages
+ if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" == "$CLICKHOUSE_VERSION" ]]; then
+ msg_info "Update ClickHouse $CLICKHOUSE_VERSION"
+ ensure_apt_working || return 1
+
+ # Perform upgrade with retry logic (non-fatal if fails)
+ upgrade_packages_with_retry "clickhouse-server" "clickhouse-client" || true
+ cache_installed_version "clickhouse" "$CLICKHOUSE_VERSION"
+ msg_ok "Update ClickHouse $CLICKHOUSE_VERSION"
+ return 0
+ fi
+
+ # Scenario 2: Different version - clean upgrade
+ if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" != "$CLICKHOUSE_VERSION" ]]; then
+ msg_info "Upgrade ClickHouse from $CURRENT_VERSION to $CLICKHOUSE_VERSION"
+ stop_all_services "clickhouse-server"
+ remove_old_tool_version "clickhouse"
+ else
+ msg_info "Setup ClickHouse $CLICKHOUSE_VERSION"
+ fi
+
+ ensure_dependencies apt-transport-https ca-certificates dirmngr gnupg
+
+ # Prepare repository (cleanup + validation)
+ prepare_repository_setup "clickhouse" || {
+ msg_error "Failed to prepare ClickHouse repository"
+ return 1
+ }
+
+ # Setup repository (ClickHouse uses 'stable' suite)
+ setup_deb822_repo \
+ "clickhouse" \
+ "https://packages.clickhouse.com/rpm/lts/repodata/repomd.xml.key" \
+ "https://packages.clickhouse.com/deb" \
+ "stable" \
+ "main" \
+ "amd64 arm64"
+
+ # Install packages with retry logic
+ export DEBIAN_FRONTEND=noninteractive
+ $STD apt update || {
+ msg_error "APT update failed for ClickHouse repository"
+ return 1
+ }
+
+ install_packages_with_retry "clickhouse-server" "clickhouse-client" || {
+ msg_error "Failed to install ClickHouse packages"
+ return 1
+ }
+
+ # Verify installation
+ if ! command -v clickhouse-server >/dev/null 2>&1; then
+ msg_error "ClickHouse installation completed but clickhouse-server command not found"
+ return 1
+ fi
+
+ # Setup data directory
+ mkdir -p /var/lib/clickhouse
+ if id clickhouse >/dev/null 2>&1; then
+ chown -R clickhouse:clickhouse /var/lib/clickhouse
+ fi
+
+ # Enable and start service
+ $STD systemctl enable clickhouse-server || {
+ msg_warn "Failed to enable clickhouse-server service"
+ }
+ safe_service_restart clickhouse-server || true
+
+ cache_installed_version "clickhouse" "$CLICKHOUSE_VERSION"
+ msg_ok "Setup ClickHouse $CLICKHOUSE_VERSION"
}
# ------------------------------------------------------------------------------
@@ -1500,21 +4157,42 @@ function setup_rust() {
local RUST_CRATES="${RUST_CRATES:-}"
local CARGO_BIN="${HOME}/.cargo/bin"
- # rustup & toolchain
- if ! command -v rustup &>/dev/null; then
- msg_info "Setup Rust"
- curl -fsSL https://sh.rustup.rs | $STD sh -s -- -y --default-toolchain "$RUST_TOOLCHAIN"
- export PATH="$CARGO_BIN:$PATH"
- echo 'export PATH="$HOME/.cargo/bin:$PATH"' >>"$HOME/.profile"
- msg_ok "Setup Rust"
- else
- $STD rustup install "$RUST_TOOLCHAIN"
- $STD rustup default "$RUST_TOOLCHAIN"
- $STD rustup update "$RUST_TOOLCHAIN"
- msg_ok "Rust toolchain set to $RUST_TOOLCHAIN"
+ # Get currently installed version
+ local CURRENT_VERSION=""
+ if command -v rustc &>/dev/null; then
+ CURRENT_VERSION=$(rustc --version 2>/dev/null | awk '{print $2}')
fi
- # install/update crates
+ # Scenario 1: Rustup not installed - fresh install
+ if ! command -v rustup &>/dev/null; then
+ msg_info "Setup Rust ($RUST_TOOLCHAIN)"
+ curl -fsSL https://sh.rustup.rs | $STD sh -s -- -y --default-toolchain "$RUST_TOOLCHAIN" || {
+ msg_error "Failed to install Rust"
+ return 1
+ }
+ export PATH="$CARGO_BIN:$PATH"
+ echo 'export PATH="$HOME/.cargo/bin:$PATH"' >>"$HOME/.profile"
+ local RUST_VERSION=$(rustc --version 2>/dev/null | awk '{print $2}')
+ cache_installed_version "rust" "$RUST_VERSION"
+ msg_ok "Setup Rust $RUST_VERSION"
+ else
+ # Scenario 2: Rustup already installed - update/maintain
+ msg_info "Update Rust ($RUST_TOOLCHAIN)"
+ $STD rustup install "$RUST_TOOLCHAIN" || {
+ msg_error "Failed to install Rust toolchain $RUST_TOOLCHAIN"
+ return 1
+ }
+ $STD rustup default "$RUST_TOOLCHAIN" || {
+ msg_error "Failed to set default Rust toolchain"
+ return 1
+ }
+ $STD rustup update "$RUST_TOOLCHAIN" || true
+ local RUST_VERSION=$(rustc --version 2>/dev/null | awk '{print $2}')
+ cache_installed_version "rust" "$RUST_VERSION"
+ msg_ok "Update Rust $RUST_VERSION"
+ fi
+
+ # Install global crates
if [[ -n "$RUST_CRATES" ]]; then
IFS=',' read -ra CRATES <<<"$RUST_CRATES"
for crate in "${CRATES[@]}"; do
@@ -1531,49 +4209,179 @@ function setup_rust() {
if [[ -n "$INSTALLED_VER" ]]; then
if [[ -n "$VER" && "$VER" != "$INSTALLED_VER" ]]; then
- msg_info "Update $NAME: $INSTALLED_VER → $VER"
$STD cargo install "$NAME" --version "$VER" --force
- msg_ok "Updated $NAME to $VER"
elif [[ -z "$VER" ]]; then
- msg_info "Update $NAME: $INSTALLED_VER → latest"
$STD cargo install "$NAME" --force
- msg_ok "Updated $NAME to latest"
fi
else
- msg_info "Setup $NAME ${VER:+($VER)}"
$STD cargo install "$NAME" ${VER:+--version "$VER"}
- msg_ok "Setup $NAME ${VER:-latest}"
fi
done
- msg_ok "Setup Rust"
fi
}
# ------------------------------------------------------------------------------
-# Installs Adminer (Debian/Ubuntu via APT, Alpine via direct download).
-#
-# Description:
-# - Adds Adminer to Apache or web root
-# - Supports Alpine and Debian-based systems
+# Installs or upgrades uv (Python package manager) from GitHub releases.
+# - Downloads platform-specific tarball (no install.sh!)
+# - Extracts uv binary
+# - Places it in /usr/local/bin
+# - Optionally installs a specific Python version via uv
# ------------------------------------------------------------------------------
-function setup_adminer() {
- if grep -qi alpine /etc/os-release; then
- msg_info "Setup Adminer (Alpine)"
- mkdir -p /var/www/localhost/htdocs/adminer
- if ! curl -fsSL https://github.com/vrana/adminer/releases/latest/download/adminer.php \
- -o /var/www/localhost/htdocs/adminer/index.php; then
- msg_error "Failed to download Adminer"
- return 1
- fi
- msg_ok "Adminer available at /adminer (Alpine)"
+function setup_uv() {
+ local UV_BIN="/usr/local/bin/uv"
+ local UVX_BIN="/usr/local/bin/uvx"
+ local TMP_DIR=$(mktemp -d)
+ local CACHED_VERSION
+
+ # trap for TMP Cleanup
+ trap "rm -rf '$TMP_DIR'" EXIT
+
+ CACHED_VERSION=$(get_cached_version "uv")
+
+ # Architecture Detection
+ local ARCH=$(uname -m)
+ local OS_TYPE=""
+ local UV_TAR=""
+
+ if grep -qi "alpine" /etc/os-release; then
+ OS_TYPE="musl"
else
- msg_info "Setup Adminer (Debian/Ubuntu)"
- $STD apt-get install -y adminer
- $STD a2enconf adminer
- $STD systemctl reload apache2
- msg_ok "Adminer available at /adminer (Debian/Ubuntu)"
+ OS_TYPE="gnu"
fi
+
+ case "$ARCH" in
+ x86_64)
+ UV_TAR="uv-x86_64-unknown-linux-${OS_TYPE}.tar.gz"
+ ;;
+ aarch64)
+ UV_TAR="uv-aarch64-unknown-linux-${OS_TYPE}.tar.gz"
+ ;;
+ i686)
+ UV_TAR="uv-i686-unknown-linux-${OS_TYPE}.tar.gz"
+ ;;
+ *)
+ msg_error "Unsupported architecture: $ARCH (supported: x86_64, aarch64, i686)"
+ return 1
+ ;;
+ esac
+
+ ensure_dependencies jq
+
+ # Fetch latest version
+ local releases_json
+ releases_json=$(curl -fsSL --max-time 15 \
+ "https://api.github.com/repos/astral-sh/uv/releases/latest" 2>/dev/null || echo "")
+
+ if [[ -z "$releases_json" ]]; then
+ msg_error "Could not fetch latest uv version from GitHub API"
+ return 1
+ fi
+
+ local LATEST_VERSION
+ LATEST_VERSION=$(echo "$releases_json" | jq -r '.tag_name' 2>/dev/null | sed 's/^v//')
+
+ if [[ -z "$LATEST_VERSION" ]]; then
+ msg_error "Could not parse uv version from GitHub API response"
+ return 1
+ fi
+
+ # Get currently installed version
+ local INSTALLED_VERSION=""
+ if [[ -x "$UV_BIN" ]]; then
+ INSTALLED_VERSION=$("$UV_BIN" --version 2>/dev/null | awk '{print $2}')
+ fi
+
+ # Scenario 1: Already at latest version
+ if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" == "$LATEST_VERSION" ]]; then
+ cache_installed_version "uv" "$LATEST_VERSION"
+
+ # Check if uvx is needed and missing
+ if [[ "${USE_UVX:-NO}" == "YES" ]] && [[ ! -x "$UVX_BIN" ]]; then
+ msg_info "Installing uvx wrapper"
+ _install_uvx_wrapper || return 1
+ msg_ok "uvx wrapper installed"
+ fi
+
+ return 0
+ fi
+
+ # Scenario 2: New install or upgrade
+ if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" != "$LATEST_VERSION" ]]; then
+ msg_info "Upgrade uv from $INSTALLED_VERSION to $LATEST_VERSION"
+ else
+ msg_info "Setup uv $LATEST_VERSION"
+ fi
+
+ local UV_URL="https://github.com/astral-sh/uv/releases/download/${LATEST_VERSION}/${UV_TAR}"
+
+ $STD curl -fsSL "$UV_URL" -o "$TMP_DIR/uv.tar.gz" || {
+ msg_error "Failed to download uv from $UV_URL"
+ return 1
+ }
+
+ # Extract
+ $STD tar -xzf "$TMP_DIR/uv.tar.gz" -C "$TMP_DIR" || {
+ msg_error "Failed to extract uv"
+ return 1
+ }
+
+ # Find and install uv binary (tarball extracts to uv-VERSION-ARCH/ directory)
+ local UV_BINARY=$(find "$TMP_DIR" -name "uv" -type f -executable | head -n1)
+ if [[ ! -f "$UV_BINARY" ]]; then
+ msg_error "Could not find uv binary in extracted tarball"
+ return 1
+ fi
+
+ $STD install -m 755 "$UV_BINARY" "$UV_BIN" || {
+ msg_error "Failed to install uv binary"
+ return 1
+ }
+
+ ensure_usr_local_bin_persist
+ export PATH="/usr/local/bin:$PATH"
+
+ # Optional: Install uvx wrapper
+ if [[ "${USE_UVX:-NO}" == "YES" ]]; then
+ msg_info "Installing uvx wrapper"
+ _install_uvx_wrapper || {
+ msg_error "Failed to install uvx wrapper"
+ return 1
+ }
+ msg_ok "uvx wrapper installed"
+ fi
+
+ # Optional: Generate shell completions
+ $STD uv generate-shell-completion bash >/etc/bash_completion.d/uv 2>/dev/null || true
+ $STD uv generate-shell-completion zsh >/usr/share/zsh/site-functions/_uv 2>/dev/null || true
+
+ # Optional: Install specific Python version if requested
+ if [[ -n "${PYTHON_VERSION:-}" ]]; then
+ msg_info "Installing Python $PYTHON_VERSION via uv"
+ $STD uv python install "$PYTHON_VERSION" || {
+ msg_error "Failed to install Python $PYTHON_VERSION"
+ return 1
+ }
+ msg_ok "Python $PYTHON_VERSION installed"
+ fi
+
+ cache_installed_version "uv" "$LATEST_VERSION"
+ msg_ok "Setup uv $LATEST_VERSION"
+}
+
+# Helper function to install uvx wrapper
+_install_uvx_wrapper() {
+ local UVX_BIN="/usr/local/bin/uvx"
+
+ cat >"$UVX_BIN" <<'EOF'
+#!/bin/bash
+# uvx - Run Python applications from PyPI as command-line tools
+# Wrapper for: uv tool run
+exec /usr/local/bin/uv tool run "$@"
+EOF
+
+ chmod +x "$UVX_BIN"
+ return 0
}
# ------------------------------------------------------------------------------
@@ -1586,366 +4394,76 @@ function setup_adminer() {
# ------------------------------------------------------------------------------
function setup_yq() {
- local TMP_DIR
- TMP_DIR=$(mktemp -d)
- local CURRENT_VERSION=""
+ local TMP_DIR=$(mktemp -d)
local BINARY_PATH="/usr/local/bin/yq"
local GITHUB_REPO="mikefarah/yq"
- if ! command -v jq &>/dev/null; then
- $STD apt-get update
- $STD apt-get install -y jq || {
- msg_error "Failed to install jq"
- rm -rf "$TMP_DIR"
- return 1
- }
- fi
+ ensure_dependencies jq
+ ensure_usr_local_bin_persist
+ # Remove non-mikefarah implementations
if command -v yq &>/dev/null; then
if ! yq --version 2>&1 | grep -q 'mikefarah'; then
rm -f "$(command -v yq)"
- else
- CURRENT_VERSION=$(yq --version | awk '{print $NF}' | sed 's/^v//')
fi
fi
- local RELEASE_JSON
- RELEASE_JSON=$(curl -fsSL "https://api.github.com/repos/${GITHUB_REPO}/releases/latest")
local LATEST_VERSION
- LATEST_VERSION=$(echo "$RELEASE_JSON" | jq -r '.tag_name' | sed 's/^v//')
+ local releases_json
+ releases_json=$(curl -fsSL --max-time 15 "https://api.github.com/repos/${GITHUB_REPO}/releases/latest" 2>/dev/null || echo "")
+
+ if [[ -z "$releases_json" ]]; then
+ msg_error "Could not fetch latest yq version from GitHub API"
+ rm -rf "$TMP_DIR"
+ return 1
+ fi
+
+ LATEST_VERSION=$(echo "$releases_json" | jq -r '.tag_name' 2>/dev/null | sed 's/^v//' || echo "")
if [[ -z "$LATEST_VERSION" ]]; then
- msg_error "Could not determine latest yq version from GitHub."
+ msg_error "Could not parse yq version from GitHub API response"
rm -rf "$TMP_DIR"
return 1
fi
- if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" == "$LATEST_VERSION" ]]; then
- return
+ # Get currently installed version
+ local INSTALLED_VERSION=""
+ if command -v yq &>/dev/null && yq --version 2>&1 | grep -q 'mikefarah'; then
+ INSTALLED_VERSION=$(yq --version 2>/dev/null | awk '{print $NF}' | sed 's/^v//')
fi
- msg_info "Setup yq ($LATEST_VERSION)"
- curl -fsSL "https://github.com/${GITHUB_REPO}/releases/download/v${LATEST_VERSION}/yq_linux_amd64" -o "$TMP_DIR/yq"
+ # Scenario 1: Already at latest version
+ if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" == "$LATEST_VERSION" ]]; then
+ cache_installed_version "yq" "$LATEST_VERSION"
+ rm -rf "$TMP_DIR"
+ return 0
+ fi
+
+ # Scenario 2: New install or upgrade
+ if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" != "$LATEST_VERSION" ]]; then
+ msg_info "Upgrade yq from $INSTALLED_VERSION to $LATEST_VERSION"
+ else
+ msg_info "Setup yq $LATEST_VERSION"
+ fi
+
+ curl -fsSL "https://github.com/${GITHUB_REPO}/releases/download/v${LATEST_VERSION}/yq_linux_amd64" -o "$TMP_DIR/yq" || {
+ msg_error "Failed to download yq"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+
chmod +x "$TMP_DIR/yq"
- mv "$TMP_DIR/yq" "$BINARY_PATH"
-
- if [[ ! -x "$BINARY_PATH" ]]; then
- msg_error "Failed to install yq to $BINARY_PATH"
+ mv "$TMP_DIR/yq" "$BINARY_PATH" || {
+ msg_error "Failed to install yq"
rm -rf "$TMP_DIR"
return 1
- fi
+ }
rm -rf "$TMP_DIR"
hash -r
local FINAL_VERSION
- FINAL_VERSION=$("$BINARY_PATH" --version 2>/dev/null | awk '{print $NF}')
- if [[ "$FINAL_VERSION" == "v$LATEST_VERSION" ]]; then
- msg_ok "Setup yq ($LATEST_VERSION)"
- else
- msg_error "yq installation incomplete or version mismatch"
- fi
-}
-
-# ------------------------------------------------------------------------------
-# Installs ImageMagick 7 from source (Debian/Ubuntu only).
-#
-# Description:
-# - Downloads the latest ImageMagick source tarball
-# - Builds and installs ImageMagick to /usr/local
-# - Configures dynamic linker (ldconfig)
-#
-# Notes:
-# - Requires: build-essential, libtool, libjpeg-dev, libpng-dev, etc.
-# ------------------------------------------------------------------------------
-function setup_imagemagick() {
- local TMP_DIR
- TMP_DIR=$(mktemp -d)
- local VERSION=""
- local BINARY_PATH="/usr/local/bin/magick"
-
- if command -v magick &>/dev/null; then
- VERSION=$(magick -version | awk '/^Version/ {print $3}')
- msg_ok "ImageMagick already installed ($VERSION)"
- return 0
- fi
-
- msg_info "Setup ImageMagick (Patience)"
- $STD apt-get update
- $STD apt-get install -y \
- build-essential \
- libtool \
- libjpeg-dev \
- libpng-dev \
- libtiff-dev \
- libwebp-dev \
- libheif-dev \
- libde265-dev \
- libopenjp2-7-dev \
- libxml2-dev \
- liblcms2-dev \
- libfreetype6-dev \
- libraw-dev \
- libfftw3-dev \
- liblqr-1-0-dev \
- libgsl-dev \
- pkg-config \
- ghostscript
-
- curl -fsSL https://imagemagick.org/archive/ImageMagick.tar.gz -o "$TMP_DIR/ImageMagick.tar.gz"
- tar -xzf "$TMP_DIR/ImageMagick.tar.gz" -C "$TMP_DIR"
- cd "$TMP_DIR"/ImageMagick-* || {
- msg_error "Source extraction failed"
- rm -rf "$TMP_DIR"
- return 1
- }
-
- ./configure --disable-static >/dev/null
- $STD make
- $STD make install
- $STD ldconfig /usr/local/lib
-
- if [[ ! -x "$BINARY_PATH" ]]; then
- msg_error "ImageMagick installation failed"
- rm -rf "$TMP_DIR"
- return 1
- fi
-
- VERSION=$("$BINARY_PATH" -version | awk '/^Version/ {print $3}')
- rm -rf "$TMP_DIR"
- ensure_usr_local_bin_persist
- msg_ok "Setup ImageMagick $VERSION"
-}
-
-# ------------------------------------------------------------------------------
-# Installs FFmpeg from source or prebuilt binary (Debian/Ubuntu only).
-#
-# Description:
-# - Downloads and builds FFmpeg from GitHub (https://github.com/FFmpeg/FFmpeg)
-# - Supports specific version override via FFMPEG_VERSION (e.g. n7.1.1)
-# - Supports build profile via FFMPEG_TYPE:
-# - minimal : x264, vpx, mp3 only
-# - medium : adds subtitles, fonts, opus, vorbis
-# - full : adds dav1d, svt-av1, zlib, numa
-# - binary : downloads static build (johnvansickle.com)
-# - Defaults to latest stable version and full feature set
-#
-# Notes:
-# - Requires: curl, jq, build-essential, and matching codec libraries
-# - Result is installed to /usr/local/bin/ffmpeg
-# ------------------------------------------------------------------------------
-
-function setup_ffmpeg() {
- local TMP_DIR
- TMP_DIR=$(mktemp -d)
- local GITHUB_REPO="FFmpeg/FFmpeg"
- local VERSION="${FFMPEG_VERSION:-latest}"
- local TYPE="${FFMPEG_TYPE:-full}"
- local BIN_PATH="/usr/local/bin/ffmpeg"
-
- # Binary fallback mode
- if [[ "$TYPE" == "binary" ]]; then
- msg_info "Installing FFmpeg (static binary)"
- curl -fsSL https://johnvansickle.com/ffmpeg/releases/ffmpeg-release-amd64-static.tar.xz -o "$TMP_DIR/ffmpeg.tar.xz"
- tar -xf "$TMP_DIR/ffmpeg.tar.xz" -C "$TMP_DIR"
- local EXTRACTED_DIR
- EXTRACTED_DIR=$(find "$TMP_DIR" -maxdepth 1 -type d -name "ffmpeg-*")
- cp "$EXTRACTED_DIR/ffmpeg" "$BIN_PATH"
- cp "$EXTRACTED_DIR/ffprobe" /usr/local/bin/ffprobe
- chmod +x "$BIN_PATH" /usr/local/bin/ffprobe
- rm -rf "$TMP_DIR"
- msg_ok "Installed FFmpeg binary ($($BIN_PATH -version | head -n1))"
- return
- fi
-
- if ! command -v jq &>/dev/null; then
- $STD apt-get update
- $STD apt-get install -y jq
- fi
-
- # Auto-detect latest stable version if none specified
- if [[ "$VERSION" == "latest" || -z "$VERSION" ]]; then
- msg_info "Resolving latest FFmpeg tag"
- VERSION=$(curl -fsSL "https://api.github.com/repos/${GITHUB_REPO}/tags" |
- jq -r '.[].name' |
- grep -E '^n[0-9]+\.[0-9]+\.[0-9]+$' |
- sort -V | tail -n1)
- fi
-
- if [[ -z "$VERSION" ]]; then
- msg_error "Could not determine FFmpeg version"
- rm -rf "$TMP_DIR"
- return 1
- fi
-
- msg_info "Installing FFmpeg ${VERSION} ($TYPE)"
-
- # Dependency selection
- local DEPS=(build-essential yasm nasm pkg-config)
- case "$TYPE" in
- minimal)
- DEPS+=(libx264-dev libvpx-dev libmp3lame-dev)
- ;;
- medium)
- DEPS+=(libx264-dev libvpx-dev libmp3lame-dev libfreetype6-dev libass-dev libopus-dev libvorbis-dev)
- ;;
- full)
- DEPS+=(
- libx264-dev libx265-dev libvpx-dev libmp3lame-dev
- libfreetype6-dev libass-dev libopus-dev libvorbis-dev
- libdav1d-dev libsvtav1-dev zlib1g-dev libnuma-dev
- )
- ;;
- *)
- msg_error "Invalid FFMPEG_TYPE: $TYPE"
- rm -rf "$TMP_DIR"
- return 1
- ;;
- esac
-
- $STD apt-get update
- $STD apt-get install -y "${DEPS[@]}"
-
- curl -fsSL "https://github.com/${GITHUB_REPO}/archive/refs/tags/${VERSION}.tar.gz" -o "$TMP_DIR/ffmpeg.tar.gz"
- tar -xzf "$TMP_DIR/ffmpeg.tar.gz" -C "$TMP_DIR"
- cd "$TMP_DIR/FFmpeg-"* || {
- msg_error "Source extraction failed"
- rm -rf "$TMP_DIR"
- return 1
- }
-
- local args=(
- --enable-gpl
- --enable-shared
- --enable-nonfree
- --disable-static
- --enable-libx264
- --enable-libvpx
- --enable-libmp3lame
- )
-
- if [[ "$TYPE" != "minimal" ]]; then
- args+=(--enable-libfreetype --enable-libass --enable-libopus --enable-libvorbis)
- fi
-
- if [[ "$TYPE" == "full" ]]; then
- args+=(--enable-libx265 --enable-libdav1d --enable-zlib)
- fi
-
- if [[ ${#args[@]} -eq 0 ]]; then
- msg_error "FFmpeg configure args array is empty – aborting."
- rm -rf "$TMP_DIR"
- return 1
- fi
-
- ./configure "${args[@]}" >"$TMP_DIR/configure.log" 2>&1 || {
- msg_error "FFmpeg ./configure failed (see $TMP_DIR/configure.log)"
- cat "$TMP_DIR/configure.log" | tail -n 20
- rm -rf "$TMP_DIR"
- return 1
- }
-
- $STD make -j"$(nproc)"
- $STD make install
- echo "/usr/local/lib" >/etc/ld.so.conf.d/ffmpeg.conf
- ldconfig
-
- ldconfig -p | grep libavdevice >/dev/null || {
- msg_error "libavdevice not registered with dynamic linker"
- return 1
- }
-
- if ! command -v ffmpeg &>/dev/null; then
- msg_error "FFmpeg installation failed"
- rm -rf "$TMP_DIR"
- return 1
- fi
-
- local FINAL_VERSION
- FINAL_VERSION=$(ffmpeg -version | head -n1 | awk '{print $3}')
- rm -rf "$TMP_DIR"
- ensure_usr_local_bin_persist
- msg_ok "Setup FFmpeg $FINAL_VERSION"
-}
-
-# ------------------------------------------------------------------------------
-# Installs ClickHouse server and client, sets up DB/user with credentials.
-#
-# Description:
-# - Adds official ClickHouse APT repo with GPG key
-# - Installs clickhouse-server and clickhouse-client
-# - Creates database and user (credentials optionally overrideable via env)
-#
-# Variables:
-# CLICKHOUSE_DB - Database name (default: analytics)
-# CLICKHOUSE_USER - Username (default: analytics_user)
-# CLICKHOUSE_PASS - Password (default: auto-generated)
-# ------------------------------------------------------------------------------
-
-function setup_clickhouse() {
- local CLICKHOUSE_DB="${CLICKHOUSE_DB:-analytics}"
- local CLICKHOUSE_USER="${CLICKHOUSE_USER:-analytics_user}"
- local CLICKHOUSE_PASS="${CLICKHOUSE_PASS:-$(openssl rand -base64 18 | cut -c1-13)}"
- local GPG_URL="https://packages.clickhouse.com/rpm/lts/repodata/repomd.xml.key"
- local GPG_KEY_PATH="/usr/share/keyrings/clickhouse-keyring.gpg"
- local ARCH
- ARCH=$(dpkg --print-architecture)
-
- if ! command -v clickhouse >/dev/null; then
- msg_info "Setup ClickHouse"
-
- if ! curl -fsSL --connect-timeout 5 https://packages.clickhouse.com >/dev/null 2>&1; then
- msg_error "Connection to packages.clickhouse.com:443 failed – possibly blocked"
- echo "💡 Check AdGuard/Pi-hole or firewall rules"
- return 1
- fi
-
- if ! curl -fsSL --retry 3 --connect-timeout 10 "$GPG_URL" |
- gpg --dearmor -o "$GPG_KEY_PATH"; then
- msg_error "Failed to fetch ClickHouse GPG key"
- return 1
- fi
-
- echo "deb [signed-by=$GPG_KEY_PATH arch=$ARCH] https://packages.clickhouse.com/deb stable main" \
- >/etc/apt/sources.list.d/clickhouse.list
-
- env -u CLICKHOUSE_USER $STD apt-get update
- env -u CLICKHOUSE_USER DEBIAN_FRONTEND=noninteractive $STD apt-get install -y clickhouse-server clickhouse-client
-
- $STD systemctl enable --now clickhouse-server
-
- msg_info "Waiting for ClickHouse to be ready"
- for i in {1..10}; do
- if clickhouse client --query "SELECT 1" &>/dev/null; then break; fi
- sleep 1
- done
-
- # User anlegen
- clickhouse client --query "CREATE DATABASE IF NOT EXISTS $CLICKHOUSE_DB"
- clickhouse client --query "CREATE USER IF NOT EXISTS $CLICKHOUSE_USER IDENTIFIED WITH plaintext_password BY '$CLICKHOUSE_PASS'"
- clickhouse client --query "GRANT ALL ON $CLICKHOUSE_DB.* TO $CLICKHOUSE_USER"
-
- # Default-User ggf. deaktivieren
- cat </etc/clickhouse-server/users.d/disable-default.xml
-
-
-
-
-
-EOF
- systemctl restart clickhouse-server
-
- msg_ok "Setup ClickHouse (DB: $CLICKHOUSE_DB, User: $CLICKHOUSE_USER)"
-
- {
- echo "ClickHouse DB: $CLICKHOUSE_DB"
- echo "ClickHouse User: $CLICKHOUSE_USER"
- echo "ClickHouse Pass: $CLICKHOUSE_PASS"
- } >>~/clickhouse.creds
- else
- msg_info "Updating ClickHouse packages"
- env -u CLICKHOUSE_USER $STD apt-get update
- env -u CLICKHOUSE_USER $STD apt-get install -y --only-upgrade clickhouse-server clickhouse-client
- msg_ok "ClickHouse updated"
- fi
+ FINAL_VERSION=$("$BINARY_PATH" --version 2>/dev/null | awk '{print $NF}' | sed 's/^v//')
+ cache_installed_version "yq" "$FINAL_VERSION"
+ msg_ok "Setup yq $FINAL_VERSION"
}
diff --git a/misc/tools.func.bak b/misc/tools.func.bak
new file mode 100644
index 000000000..e98332517
--- /dev/null
+++ b/misc/tools.func.bak
@@ -0,0 +1,2208 @@
+#!/bin/bash
+
+# ------------------------------------------------------------------------------
+# Installs Microsoft .NET
+#
+# Description:
+# - Installs specified .NET version using Microsoft APT repo
+#
+# Variables:
+# NET_VERSION - .NET version to install (default: "8.0")
+# ------------------------------------------------------------------------------
+
+function setup_dotnet() {
+ local NET_VERSION="${NET_VERSION:-8.0}"
+ local DISTRO_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{ print $2 }' /etc/os-release)
+ local NEED_NET_INSTALL=false
+
+ if command -v dotnet >/dev/null; then
+ CURRENT_NET_VERSION=$(dotnet --version | awk -F. '{print $1 "." $2}')
+ if [[ "$CURRENT_NET_VERSION" == "$NET_VERSION" ]]; then
+ :
+ fi
+ else
+ msg_info "Setup .NET $NET_VERSION"
+ NEED_NET_INSTALL=true
+ fi
+
+ if [[ "$NEED_NET_INSTALL" == true ]]; then
+ curl -fsSL https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor -o /usr/share/keyrings/microsoft-prod.gpg
+ if [[ "$DISTRO_CODENAME" != "trixie" ]]; then
+ curl -fsSL https://packages.microsoft.com/config/debian/12/prod.list -o /etc/apt/sources.list.d/msprod.list
+ else
+ curl -fsSL https://packages.microsoft.com/config/debian/13/prod.list -o /etc/apt/sources.list.d/msprod.list
+ fi
+ $STD apt-get update
+ $STD apt-get install -y dotnet-sdk-$NET_VERSION
+ msg_ok "Setup .NET ${NET_VERSION}"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# Installs Node.js and optional global modules.
+#
+# Description:
+# - Installs specified Node.js version using NodeSource APT repo
+# - Optionally installs or updates global npm modules
+#
+# Variables:
+# NODE_VERSION - Node.js version to install (default: 22)
+# NODE_MODULE - Comma-separated list of global modules (e.g. "yarn,@vue/cli@5.0.0")
+# ------------------------------------------------------------------------------
+
+function setup_nodejs() {
+ local NODE_VERSION="${NODE_VERSION:-22}"
+ local NODE_MODULE="${NODE_MODULE:-}"
+ local CURRENT_NODE_VERSION=""
+ local NEED_NODE_INSTALL=false
+
+ if command -v node >/dev/null; then
+ CURRENT_NODE_VERSION="$(node -v | grep -oP '^v\K[0-9]+')"
+ if [[ "$CURRENT_NODE_VERSION" != "$NODE_VERSION" ]]; then
+ msg_info "Old Node.js $CURRENT_NODE_VERSION found, replacing with $NODE_VERSION"
+ NEED_NODE_INSTALL=true
+ fi
+ else
+ msg_info "Setup Node.js $NODE_VERSION"
+ NEED_NODE_INSTALL=true
+ fi
+
+ if ! command -v jq &>/dev/null; then
+ $STD apt-get update
+ $STD apt-get install -y jq || {
+ msg_error "Failed to install jq"
+ return 1
+ }
+ fi
+
+ if [[ "$NEED_NODE_INSTALL" == true ]]; then
+ $STD apt-get purge -y nodejs
+ rm -f /etc/apt/sources.list.d/nodesource.list /usr/share/keyrings/nodesource.gpg
+
+ mkdir -p /usr/share/keyrings
+ curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key |
+ gpg --dearmor -o /usr/share/keyrings/nodesource.gpg || {
+ msg_error "Failed to import NodeSource GPG key"
+ exit 1
+ }
+ chmod 644 /usr/share/keyrings/nodesource.gpg
+
+ local ARCH
+ ARCH=$(dpkg --print-architecture)
+ if ! [[ "$ARCH" =~ ^(amd64|arm64|armhf)$ ]]; then
+ msg_error "Unsupported architecture: $ARCH"
+ exit 1
+ fi
+
+ echo "deb [arch=$ARCH signed-by=/usr/share/keyrings/nodesource.gpg] https://deb.nodesource.com/node_${NODE_VERSION}.x nodistro main" >/etc/apt/sources.list.d/nodesource.list
+
+ cat </etc/apt/preferences.d/nodejs
+Package: nodejs
+Pin: origin deb.nodesource.com
+Pin-Priority: 700
+EOF
+
+ sleep 2
+ if ! apt-get update >/dev/null 2>&1; then
+ msg_warn "APT update failed – retrying in 5s"
+ sleep 5
+ if ! apt-get update >/dev/null 2>&1; then
+ msg_error "Failed to update APT repositories after adding NodeSource"
+ exit 1
+ fi
+ fi
+
+ if ! apt-get install -y -t nodistro nodejs >/dev/null 2>&1; then
+ msg_error "Failed to install Node.js ${NODE_VERSION} from NodeSource"
+ apt-cache policy nodejs | tee "$STD"
+ exit 1
+ fi
+
+ $STD npm install -g npm@latest || {
+ msg_error "Failed to update npm to latest version"
+ }
+ msg_ok "Setup Node.js ${NODE_VERSION}"
+ fi
+
+ export NODE_OPTIONS="--max-old-space-size=4096"
+
+ [[ -d /opt ]] || mkdir -p /opt
+ cd /opt || {
+ msg_error "Failed to set safe working directory before npm install"
+ exit 1
+ }
+
+ if [[ -n "$NODE_MODULE" ]]; then
+ IFS=',' read -ra MODULES <<<"$NODE_MODULE"
+ for mod in "${MODULES[@]}"; do
+ local MODULE_NAME MODULE_REQ_VERSION MODULE_INSTALLED_VERSION
+ if [[ "$mod" == @*/*@* ]]; then
+ MODULE_NAME="${mod%@*}"
+ MODULE_REQ_VERSION="${mod##*@}"
+ elif [[ "$mod" == *"@"* ]]; then
+ MODULE_NAME="${mod%@*}"
+ MODULE_REQ_VERSION="${mod##*@}"
+ else
+ MODULE_NAME="$mod"
+ MODULE_REQ_VERSION="latest"
+ fi
+
+ if npm list -g --depth=0 "$MODULE_NAME" >/dev/null 2>&1; then
+ MODULE_INSTALLED_VERSION="$(npm list -g --depth=0 "$MODULE_NAME" | grep "$MODULE_NAME@" | awk -F@ '{print $2}' | tr -d '[:space:]')"
+ if [[ "$MODULE_REQ_VERSION" != "latest" && "$MODULE_REQ_VERSION" != "$MODULE_INSTALLED_VERSION" ]]; then
+ msg_info "Updating $MODULE_NAME from v$MODULE_INSTALLED_VERSION to v$MODULE_REQ_VERSION"
+ $STD npm install -g "${MODULE_NAME}@${MODULE_REQ_VERSION}" || {
+ msg_error "Failed to update $MODULE_NAME to version $MODULE_REQ_VERSION"
+ exit 1
+ }
+ elif [[ "$MODULE_REQ_VERSION" == "latest" ]]; then
+ msg_info "Updating $MODULE_NAME to latest version"
+ $STD npm install -g "${MODULE_NAME}@latest" || {
+ msg_error "Failed to update $MODULE_NAME to latest version"
+ exit 1
+ }
+ fi
+ else
+ msg_info "Installing $MODULE_NAME@$MODULE_REQ_VERSION"
+ $STD npm install -g "${MODULE_NAME}@${MODULE_REQ_VERSION}" || {
+ msg_error "Failed to install $MODULE_NAME@$MODULE_REQ_VERSION"
+ exit 1
+ }
+ fi
+ done
+ msg_ok "Installed Node.js modules: $NODE_MODULE"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# Installs or upgrades PostgreSQL and optional extensions/modules.
+#
+# Description:
+# - Detects existing PostgreSQL version
+# - Dumps all databases before upgrade
+# - Adds PGDG repo and installs specified version
+# - Installs optional PG_MODULES (e.g. postgis, contrib)
+# - Restores dumped data post-upgrade
+#
+# Variables:
+# PG_VERSION - Major PostgreSQL version (e.g. 15, 16) (default: 16)
+# PG_MODULES - Comma-separated list of extensions (e.g. "postgis,contrib")
+# ------------------------------------------------------------------------------
+function setup_postgresql() {
+ local PG_VERSION="${PG_VERSION:-16}"
+ local PG_MODULES="${PG_MODULES:-}"
+ local CURRENT_PG_VERSION=""
+ local DISTRO
+ local NEED_PG_INSTALL=false
+ DISTRO="$(awk -F'=' '/^VERSION_CODENAME=/{ print $NF }' /etc/os-release)"
+
+ if command -v psql >/dev/null; then
+ CURRENT_PG_VERSION="$(psql -V | awk '{print $3}' | cut -d. -f1)"
+ if [[ "$CURRENT_PG_VERSION" == "$PG_VERSION" ]]; then
+ : # PostgreSQL is already at the desired version – no action needed
+ else
+ $STD msg_info "Detected PostgreSQL $CURRENT_PG_VERSION, preparing upgrade to $PG_VERSION"
+ NEED_PG_INSTALL=true
+ fi
+ else
+ NEED_PG_INSTALL=true
+ fi
+
+ if [[ "$NEED_PG_INSTALL" == true ]]; then
+ if [[ -n "$CURRENT_PG_VERSION" ]]; then
+ $STD msg_info "Dumping PostgreSQL $CURRENT_PG_VERSION data"
+ su - postgres -c "pg_dumpall > /var/lib/postgresql/backup_$(date +%F)_v${CURRENT_PG_VERSION}.sql"
+ $STD msg_ok "Data dump completed"
+
+ systemctl stop postgresql
+ fi
+
+ rm -f /etc/apt/sources.list.d/pgdg.list /etc/apt/trusted.gpg.d/postgresql.gpg
+
+ $STD msg_info "Adding PostgreSQL PGDG repository"
+ curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc |
+ gpg --dearmor -o /etc/apt/trusted.gpg.d/postgresql.gpg
+
+ echo "deb https://apt.postgresql.org/pub/repos/apt ${DISTRO}-pgdg main" \
+ >/etc/apt/sources.list.d/pgdg.list
+ $STD apt-get update
+ $STD msg_ok "Repository added"
+
+ msg_info "Setup PostgreSQL $PG_VERSION"
+ $STD apt-get install -y "postgresql-${PG_VERSION}" "postgresql-client-${PG_VERSION}"
+
+ if [[ -n "$CURRENT_PG_VERSION" ]]; then
+ $STD apt-get purge -y "postgresql-${CURRENT_PG_VERSION}" "postgresql-client-${CURRENT_PG_VERSION}" || true
+ fi
+ systemctl enable -q --now postgresql
+
+ if [[ -n "$CURRENT_PG_VERSION" ]]; then
+ $STD msg_info "Restoring dumped data"
+ su - postgres -c "psql < /var/lib/postgresql/backup_$(date +%F)_v${CURRENT_PG_VERSION}.sql"
+ $STD msg_ok "Data restored"
+ fi
+
+ $STD msg_ok "PostgreSQL $PG_VERSION installed"
+ fi
+
+ # Install optional PostgreSQL modules
+ if [[ -n "$PG_MODULES" ]]; then
+ IFS=',' read -ra MODULES <<<"$PG_MODULES"
+ for module in "${MODULES[@]}"; do
+ local pkg="postgresql-${PG_VERSION}-${module}"
+ $STD msg_info "Setup PostgreSQL module/s: $pkg"
+ $STD apt-get install -y "$pkg" || {
+ msg_error "Failed to install $pkg"
+ continue
+ }
+ done
+ $STD msg_ok "Setup PostgreSQL modules"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# Installs or updates MariaDB from official repo.
+#
+# Description:
+# - Detects current MariaDB version and replaces it if necessary
+# - Preserves existing database data
+# - Dynamically determines latest GA version if "latest" is given
+#
+# Variables:
+# MARIADB_VERSION - MariaDB version to install (e.g. 10.11, latest) (default: latest)
+# ------------------------------------------------------------------------------ test
+
+setup_mariadb() {
+ local MARIADB_VERSION="${MARIADB_VERSION:-latest}"
+ local DISTRO_CODENAME
+ DISTRO_CODENAME="$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)"
+ CURRENT_OS="$(awk -F= '/^ID=/{print $2}' /etc/os-release)"
+
+ if ! curl -fsI http://mirror.mariadb.org/repo/ >/dev/null; then
+ msg_error "MariaDB mirror not reachable"
+ return 1
+ fi
+
+ msg_info "Setting up MariaDB $MARIADB_VERSION"
+ # Grab dynamic latest LTS version
+ if [[ "$MARIADB_VERSION" == "latest" ]]; then
+ MARIADB_VERSION=$(curl -fsSL http://mirror.mariadb.org/repo/ |
+ grep -Eo '[0-9]+\.[0-9]+\.[0-9]+/' |
+ grep -vE 'rc/|rolling/' |
+ sed 's|/||' |
+ sort -Vr |
+ head -n1)
+ if [[ -z "$MARIADB_VERSION" ]]; then
+ msg_error "Could not determine latest GA MariaDB version"
+ return 1
+ fi
+ fi
+
+ local CURRENT_VERSION=""
+ if command -v mariadb >/dev/null; then
+ CURRENT_VERSION=$(mariadb --version | grep -oE '[0-9]+\.[0-9]+\.[0-9]+')
+ fi
+
+ if [[ "$CURRENT_VERSION" == "$MARIADB_VERSION" ]]; then
+ $STD msg_info "MariaDB $MARIADB_VERSION, upgrading"
+ $STD apt-get update
+ $STD apt-get install --only-upgrade -y mariadb-server mariadb-client
+ $STD msg_ok "MariaDB upgraded to $MARIADB_VERSION"
+ return 0
+ fi
+
+ if [[ -n "$CURRENT_VERSION" ]]; then
+ $STD msg_info "Upgrading MariaDB $CURRENT_VERSION to $MARIADB_VERSION"
+ $STD systemctl stop mariadb >/dev/null 2>&1 || true
+ $STD apt-get purge -y 'mariadb*' || true
+ rm -f /etc/apt/sources.list.d/mariadb.list /etc/apt/trusted.gpg.d/mariadb.gpg
+ else
+ $STD msg_info "Setup MariaDB $MARIADB_VERSION"
+ fi
+
+ curl -fsSL "https://mariadb.org/mariadb_release_signing_key.asc" |
+ gpg --dearmor -o /etc/apt/trusted.gpg.d/mariadb.gpg
+
+ echo "deb [signed-by=/etc/apt/trusted.gpg.d/mariadb.gpg] http://mirror.mariadb.org/repo/${MARIADB_VERSION}/${CURRENT_OS} ${DISTRO_CODENAME} main" \
+ >/etc/apt/sources.list.d/mariadb.list
+
+ $STD apt-get update
+
+ local MARIADB_MAJOR_MINOR
+ MARIADB_MAJOR_MINOR=$(echo "$MARIADB_VERSION" | awk -F. '{print $1"."$2}')
+ if [[ -n "$MARIADB_MAJOR_MINOR" ]]; then
+ echo "mariadb-server-$MARIADB_MAJOR_MINOR mariadb-server/feedback boolean false" | debconf-set-selections
+ else
+ for ver in 12.1 12.0 11.4 11.3 11.2 11.1 11.0 10.11 10.6 10.5 10.4 10.3; do
+ echo "mariadb-server-$ver mariadb-server/feedback boolean false" | debconf-set-selections
+ done
+ fi
+ DEBIAN_FRONTEND=noninteractive $STD apt-get install -y mariadb-server mariadb-client || {
+ msg_warn "Failed to install MariaDB ${MARIADB_VERSION} from upstream repo – trying distro package as fallback..."
+ # Cleanup, remove upstream repo to avoid conflicts
+ rm -f /etc/apt/sources.list.d/mariadb.list /etc/apt/trusted.gpg.d/mariadb.gpg
+ $STD apt-get update
+ # Final fallback: distro package
+ DEBIAN_FRONTEND=noninteractive $STD apt-get install -y mariadb-server mariadb-client || {
+ msg_error "MariaDB installation failed even with distro fallback!"
+ return 1
+ }
+ msg_ok "Setup MariaDB (distro fallback)"
+ return 0
+ }
+
+ msg_ok "Setup MariaDB $MARIADB_VERSION"
+}
+
+# ------------------------------------------------------------------------------
+# Installs or upgrades MySQL and configures APT repo.
+#
+# Description:
+# - Detects existing MySQL installation
+# - Purges conflicting packages before installation
+# - Supports clean upgrade
+#
+# Variables:
+# MYSQL_VERSION - MySQL version to install (e.g. 5.7, 8.0) (default: 8.0)
+# ------------------------------------------------------------------------------
+
+function setup_mysql() {
+ local MYSQL_VERSION="${MYSQL_VERSION:-8.0}"
+ local CURRENT_VERSION=""
+ local NEED_INSTALL=false
+ CURRENT_OS="$(awk -F= '/^ID=/{print $2}' /etc/os-release)"
+
+ if command -v mysql >/dev/null; then
+ CURRENT_VERSION="$(mysql --version | grep -oP 'Distrib\s+\K[0-9]+\.[0-9]+')"
+ if [[ "$CURRENT_VERSION" != "$MYSQL_VERSION" ]]; then
+ $STD msg_info "MySQL $CURRENT_VERSION will be upgraded to $MYSQL_VERSION"
+ NEED_INSTALL=true
+ else
+ # Check for patch-level updates
+ if apt list --upgradable 2>/dev/null | grep -q '^mysql-server/'; then
+ $STD msg_info "MySQL $CURRENT_VERSION available for upgrade"
+ $STD apt-get update
+ $STD apt-get install --only-upgrade -y mysql-server
+ $STD msg_ok "MySQL upgraded"
+ fi
+ return
+ fi
+ else
+ msg_info "Setup MySQL $MYSQL_VERSION"
+ NEED_INSTALL=true
+ fi
+
+ if [[ "$NEED_INSTALL" == true ]]; then
+ $STD systemctl stop mysql || true
+ $STD apt-get purge -y "^mysql-server.*" "^mysql-client.*" "^mysql-common.*" || true
+ rm -f /etc/apt/sources.list.d/mysql.list /etc/apt/trusted.gpg.d/mysql.gpg
+
+ local DISTRO_CODENAME
+ DISTRO_CODENAME="$(awk -F= '/VERSION_CODENAME/ { print $2 }' /etc/os-release)"
+ curl -fsSL https://repo.mysql.com/RPM-GPG-KEY-mysql-2023 | gpg --dearmor -o /etc/apt/trusted.gpg.d/mysql.gpg
+ echo "deb [signed-by=/etc/apt/trusted.gpg.d/mysql.gpg] https://repo.mysql.com/apt/${CURRENT_OS}/ ${DISTRO_CODENAME} mysql-${MYSQL_VERSION}" \
+ >/etc/apt/sources.list.d/mysql.list
+
+ export DEBIAN_FRONTEND=noninteractive
+ $STD apt-get update
+ $STD apt-get install -y mysql-server
+ msg_ok "Setup MySQL $MYSQL_VERSION"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# Installs PHP with selected modules and configures Apache/FPM support.
+#
+# Description:
+# - Adds Sury PHP repo if needed
+# - Installs default and user-defined modules
+# - Patches php.ini for CLI, Apache, and FPM as needed
+#
+# Variables:
+# PHP_VERSION - PHP version to install (default: 8.4)
+# PHP_MODULE - Additional comma-separated modules
+# PHP_APACHE - Set YES to enable PHP with Apache
+# PHP_FPM - Set YES to enable PHP-FPM
+# PHP_MEMORY_LIMIT - (default: 512M)
+# PHP_UPLOAD_MAX_FILESIZE - (default: 128M)
+# PHP_POST_MAX_SIZE - (default: 128M)
+# PHP_MAX_EXECUTION_TIME - (default: 300)
+# ------------------------------------------------------------------------------
+
+function setup_php() {
+ local PHP_VERSION="${PHP_VERSION:-8.4}"
+ local PHP_MODULE="${PHP_MODULE:-}"
+ local PHP_APACHE="${PHP_APACHE:-NO}"
+ local PHP_FPM="${PHP_FPM:-NO}"
+ local DISTRO_CODENAME
+ DISTRO_CODENAME=$(awk -F= '/VERSION_CODENAME/ { print $2 }' /etc/os-release)
+
+ local DEFAULT_MODULES="bcmath,cli,curl,gd,intl,mbstring,opcache,readline,xml,zip"
+ local COMBINED_MODULES
+
+ local PHP_MEMORY_LIMIT="${PHP_MEMORY_LIMIT:-512M}"
+ local PHP_UPLOAD_MAX_FILESIZE="${PHP_UPLOAD_MAX_FILESIZE:-128M}"
+ local PHP_POST_MAX_SIZE="${PHP_POST_MAX_SIZE:-128M}"
+ local PHP_MAX_EXECUTION_TIME="${PHP_MAX_EXECUTION_TIME:-300}"
+
+ # Merge default + user-defined modules
+ if [[ -n "$PHP_MODULE" ]]; then
+ COMBINED_MODULES="${DEFAULT_MODULES},${PHP_MODULE}"
+ else
+ COMBINED_MODULES="${DEFAULT_MODULES}"
+ fi
+
+ # Deduplicate
+ COMBINED_MODULES=$(echo "$COMBINED_MODULES" | tr ',' '\n' | awk '!seen[$0]++' | paste -sd, -)
+
+ # Get current PHP-CLI version
+ local CURRENT_PHP=""
+ if command -v php >/dev/null 2>&1; then
+ CURRENT_PHP=$(php -v 2>/dev/null | awk '/^PHP/{print $2}' | cut -d. -f1,2)
+ fi
+
+ if [[ -z "$CURRENT_PHP" ]]; then
+ msg_info "Setup PHP $PHP_VERSION"
+ elif [[ "$CURRENT_PHP" != "$PHP_VERSION" ]]; then
+ msg_info "Old PHP $CURRENT_PHP detected, Setup new PHP $PHP_VERSION"
+ $STD apt-get purge -y "php${CURRENT_PHP//./}"* || true
+ fi
+
+ # Ensure Sury repo is available
+ if [[ ! -f /etc/apt/sources.list.d/php.list ]]; then
+ $STD curl -fsSLo /tmp/debsuryorg-archive-keyring.deb https://packages.sury.org/debsuryorg-archive-keyring.deb
+ $STD dpkg -i /tmp/debsuryorg-archive-keyring.deb
+ echo "deb [signed-by=/usr/share/keyrings/deb.sury.org-php.gpg] https://packages.sury.org/php/ ${DISTRO_CODENAME} main" \
+ >/etc/apt/sources.list.d/php.list
+ $STD apt-get update
+ fi
+
+ # Build module list
+ local MODULE_LIST="php${PHP_VERSION}"
+ IFS=',' read -ra MODULES <<<"$COMBINED_MODULES"
+ for mod in "${MODULES[@]}"; do
+ if apt-cache show "php${PHP_VERSION}-${mod}" >/dev/null 2>&1; then
+ MODULE_LIST+=" php${PHP_VERSION}-${mod}"
+ else
+ msg_warn "PHP-Module ${mod} for PHP ${PHP_VERSION} not found – skipping"
+ fi
+ done
+ if [[ "$PHP_FPM" == "YES" ]]; then
+ MODULE_LIST+=" php${PHP_VERSION}-fpm"
+ fi
+
+ # install apache2 with PHP support if requested
+ if [[ "$PHP_APACHE" == "YES" ]]; then
+ if ! dpkg -l | grep -q "libapache2-mod-php${PHP_VERSION}"; then
+ msg_info "Installing Apache with PHP${PHP_VERSION} support"
+ $STD apt-get install -y apache2 libapache2-mod-php${PHP_VERSION}
+ else
+ msg_info "Apache with PHP${PHP_VERSION} already installed – skipping install"
+ fi
+ fi
+
+ # setup / update PHP modules
+ $STD apt-get install -y $MODULE_LIST
+ msg_ok "Setup PHP $PHP_VERSION"
+
+ # optional stop old PHP-FPM service
+ if [[ "$PHP_FPM" == "YES" && -n "$CURRENT_PHP" && "$CURRENT_PHP" != "$PHP_VERSION" ]]; then
+ $STD systemctl stop php"${CURRENT_PHP}"-fpm || true
+ $STD systemctl disable php"${CURRENT_PHP}"-fpm || true
+ fi
+
+ # Patch all relevant php.ini files
+ local PHP_INI_PATHS=("/etc/php/${PHP_VERSION}/cli/php.ini")
+ [[ "$PHP_FPM" == "YES" ]] && PHP_INI_PATHS+=("/etc/php/${PHP_VERSION}/fpm/php.ini")
+ [[ "$PHP_APACHE" == "YES" ]] && PHP_INI_PATHS+=("/etc/php/${PHP_VERSION}/apache2/php.ini")
+ for ini in "${PHP_INI_PATHS[@]}"; do
+ if [[ -f "$ini" ]]; then
+ $STD msg_info "Patching $ini"
+ sed -i "s|^memory_limit = .*|memory_limit = ${PHP_MEMORY_LIMIT}|" "$ini"
+ sed -i "s|^upload_max_filesize = .*|upload_max_filesize = ${PHP_UPLOAD_MAX_FILESIZE}|" "$ini"
+ sed -i "s|^post_max_size = .*|post_max_size = ${PHP_POST_MAX_SIZE}|" "$ini"
+ sed -i "s|^max_execution_time = .*|max_execution_time = ${PHP_MAX_EXECUTION_TIME}|" "$ini"
+ $STD msg_ok "Patched $ini"
+ fi
+ done
+
+ # patch Apache configuration if needed
+ if [[ "$PHP_APACHE" == "YES" ]]; then
+ for mod in $(ls /etc/apache2/mods-enabled/ 2>/dev/null | grep -E '^php[0-9]\.[0-9]\.conf$' | sed 's/\.conf//'); do
+ if [[ "$mod" != "php${PHP_VERSION}" ]]; then
+ $STD a2dismod "$mod" || true
+ fi
+ done
+ $STD a2enmod mpm_prefork
+ $STD a2enmod "php${PHP_VERSION}"
+ $STD systemctl restart apache2 || true
+ fi
+
+ # enable and restart PHP-FPM if requested
+ if [[ "$PHP_FPM" == "YES" ]]; then
+ if systemctl list-unit-files | grep -q "php${PHP_VERSION}-fpm.service"; then
+ $STD systemctl enable php${PHP_VERSION}-fpm
+ $STD systemctl restart php${PHP_VERSION}-fpm
+ else
+ msg_warn "FPM requested but service php${PHP_VERSION}-fpm not found"
+ fi
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# Installs or updates Composer globally.
+#
+# Description:
+# - Downloads latest version from getcomposer.org
+# - Installs to /usr/local/bin/composer
+# ------------------------------------------------------------------------------
+
+function setup_composer() {
+ local COMPOSER_BIN="/usr/local/bin/composer"
+ export COMPOSER_ALLOW_SUPERUSER=1
+
+ # Check if composer is already installed
+ if [[ -x "$COMPOSER_BIN" ]]; then
+ local CURRENT_VERSION
+ CURRENT_VERSION=$("$COMPOSER_BIN" --version | awk '{print $3}')
+ $STD msg_info "Old Composer $CURRENT_VERSION found, updating to latest"
+ else
+ msg_info "Setup Composer"
+ fi
+
+ # Download and install latest composer
+ curl -fsSL https://getcomposer.org/installer -o /tmp/composer-setup.php
+ php /tmp/composer-setup.php --install-dir=/usr/local/bin --filename=composer >/dev/null 2>&1
+
+ if [[ $? -ne 0 ]]; then
+ msg_error "Failed to install Composer"
+ return 1
+ fi
+
+ chmod +x "$COMPOSER_BIN"
+ $STD composer diagnose
+ msg_ok "Setup Composer"
+}
+
+# ------------------------------------------------------------------------------
+# Installs Go (Golang) from official tarball.
+#
+# Description:
+# - Determines system architecture
+# - Downloads latest version if GO_VERSION not set
+#
+# Variables:
+# GO_VERSION - Version to install (e.g. 1.22.2 or latest)
+# ------------------------------------------------------------------------------
+
+function setup_go() {
+ local ARCH
+ case "$(uname -m)" in
+ x86_64) ARCH="amd64" ;;
+ aarch64) ARCH="arm64" ;;
+ *)
+ msg_error "Unsupported architecture: $(uname -m)"
+ return 1
+ ;;
+ esac
+
+ # Determine version
+ if [[ -z "${GO_VERSION:-}" || "${GO_VERSION}" == "latest" ]]; then
+ GO_VERSION=$(curl -fsSL https://go.dev/VERSION?m=text | head -n1 | sed 's/^go//')
+ if [[ -z "$GO_VERSION" ]]; then
+ msg_error "Could not determine latest Go version"
+ return 1
+ fi
+ fi
+
+ local GO_BIN="/usr/local/bin/go"
+ local GO_INSTALL_DIR="/usr/local/go"
+
+ if [[ -x "$GO_BIN" ]]; then
+ local CURRENT_VERSION
+ CURRENT_VERSION=$("$GO_BIN" version | awk '{print $3}' | sed 's/go//')
+ if [[ "$CURRENT_VERSION" == "$GO_VERSION" ]]; then
+ return 0
+ else
+ $STD msg_info "Old Go Installation ($CURRENT_VERSION) found, upgrading to $GO_VERSION"
+ rm -rf "$GO_INSTALL_DIR"
+ fi
+ else
+ msg_info "Setup Go $GO_VERSION"
+ fi
+
+ local TARBALL="go${GO_VERSION}.linux-${ARCH}.tar.gz"
+ local URL="https://go.dev/dl/${TARBALL}"
+ local TMP_TAR=$(mktemp)
+
+ curl -fsSL "$URL" -o "$TMP_TAR" || {
+ msg_error "Failed to download $TARBALL"
+ return 1
+ }
+
+ tar -C /usr/local -xzf "$TMP_TAR"
+ ln -sf /usr/local/go/bin/go /usr/local/bin/go
+ ln -sf /usr/local/go/bin/gofmt /usr/local/bin/gofmt
+ rm -f "$TMP_TAR"
+
+ msg_ok "Setup Go $GO_VERSION"
+}
+
+# ------------------------------------------------------------------------------
+# Installs Temurin JDK via Adoptium APT repository.
+#
+# Description:
+# - Removes previous JDK if version mismatch
+# - Installs or upgrades to specified JAVA_VERSION
+#
+# Variables:
+# JAVA_VERSION - Temurin JDK version to install (e.g. 17, 21)
+# ------------------------------------------------------------------------------
+
+function setup_java() {
+ local JAVA_VERSION="${JAVA_VERSION:-21}"
+ local DISTRO_CODENAME
+ DISTRO_CODENAME=$(awk -F= '/VERSION_CODENAME/ { print $2 }' /etc/os-release)
+ local DESIRED_PACKAGE="temurin-${JAVA_VERSION}-jdk"
+
+ # Add Adoptium repo if missing
+ if [[ ! -f /etc/apt/sources.list.d/adoptium.list ]]; then
+ $STD msg_info "Setting up Adoptium Repository"
+ mkdir -p /etc/apt/keyrings
+ curl -fsSL "https://packages.adoptium.net/artifactory/api/gpg/key/public" | gpg --dearmor -o /etc/apt/trusted.gpg.d/adoptium.gpg
+ echo "deb [signed-by=/etc/apt/trusted.gpg.d/adoptium.gpg] https://packages.adoptium.net/artifactory/deb ${DISTRO_CODENAME} main" \
+ >/etc/apt/sources.list.d/adoptium.list
+ $STD apt-get update
+ $STD msg_ok "Set up Adoptium Repository"
+ fi
+
+ # Detect currently installed temurin version
+ local INSTALLED_VERSION=""
+ if dpkg -l | grep -q "temurin-.*-jdk"; then
+ INSTALLED_VERSION=$(dpkg -l | awk '/temurin-.*-jdk/{print $2}' | grep -oP 'temurin-\K[0-9]+')
+ fi
+
+ if [[ "$INSTALLED_VERSION" == "$JAVA_VERSION" ]]; then
+ $STD msg_info "Upgrading Temurin JDK $JAVA_VERSION"
+ $STD apt-get update
+ $STD apt-get install --only-upgrade -y "$DESIRED_PACKAGE"
+ $STD msg_ok "Upgraded Temurin JDK $JAVA_VERSION"
+ else
+ if [[ -n "$INSTALLED_VERSION" ]]; then
+ $STD msg_info "Removing Temurin JDK $INSTALLED_VERSION"
+ $STD apt-get purge -y "temurin-${INSTALLED_VERSION}-jdk"
+ fi
+
+ msg_info "Setup Temurin JDK $JAVA_VERSION"
+ $STD apt-get install -y "$DESIRED_PACKAGE"
+ msg_ok "Setup Temurin JDK $JAVA_VERSION"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# Installs or updates MongoDB to specified major version.
+#
+# Description:
+# - Preserves data across installations
+# - Adds official MongoDB repo
+#
+# Variables:
+# MONGO_VERSION - MongoDB major version to install (e.g. 7.0, 8.0)
+# ------------------------------------------------------------------------------
+
+function setup_mongodb() {
+ local MONGO_VERSION="${MONGO_VERSION:-8.0}"
+ local DISTRO_ID DISTRO_CODENAME MONGO_BASE_URL
+ DISTRO_ID=$(awk -F= '/^ID=/{ gsub(/"/,"",$2); print $2 }' /etc/os-release)
+ DISTRO_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{ print $2 }' /etc/os-release)
+
+ # Check AVX support
+ if ! grep -qm1 'avx[^ ]*' /proc/cpuinfo; then
+ local major="${MONGO_VERSION%%.*}"
+ if ((major > 5)); then
+ msg_error "MongoDB ${MONGO_VERSION} requires AVX support, which is not available on this system."
+ return 1
+ fi
+ fi
+
+ case "$DISTRO_ID" in
+ ubuntu)
+ MONGO_BASE_URL="https://repo.mongodb.org/apt/ubuntu"
+ REPO_COMPONENT="multiverse"
+ ;;
+ debian)
+ MONGO_BASE_URL="https://repo.mongodb.org/apt/debian"
+ REPO_COMPONENT="main"
+ ;;
+ *)
+ msg_error "Unsupported distribution: $DISTRO_ID"
+ return 1
+ ;;
+ esac
+
+ local REPO_LIST="/etc/apt/sources.list.d/mongodb-org-${MONGO_VERSION}.list"
+
+ local INSTALLED_VERSION=""
+ if command -v mongod >/dev/null; then
+ INSTALLED_VERSION=$(mongod --version | awk '/db version/{print $3}' | cut -d. -f1,2)
+ fi
+
+ if [[ "$INSTALLED_VERSION" == "$MONGO_VERSION" ]]; then
+ $STD msg_info "Upgrading MongoDB $MONGO_VERSION"
+ $STD apt-get update
+ $STD apt-get install --only-upgrade -y mongodb-org
+ $STD msg_ok "Upgraded MongoDB $MONGO_VERSION"
+ return 0
+ fi
+
+ if [[ -n "$INSTALLED_VERSION" ]]; then
+ $STD systemctl stop mongod || true
+ $STD apt-get purge -y mongodb-org || true
+ rm -f /etc/apt/sources.list.d/mongodb-org-*.list
+ rm -f /etc/apt/trusted.gpg.d/mongodb-*.gpg
+ else
+ msg_info "Setup MongoDB $MONGO_VERSION"
+ fi
+
+ curl -fsSL "https://pgp.mongodb.com/server-${MONGO_VERSION}.asc" | gpg --dearmor -o "/etc/apt/trusted.gpg.d/mongodb-${MONGO_VERSION}.gpg"
+ echo "deb [signed-by=/etc/apt/trusted.gpg.d/mongodb-${MONGO_VERSION}.gpg] ${MONGO_BASE_URL} ${DISTRO_CODENAME}/mongodb-org/${MONGO_VERSION} main" \
+ >"$REPO_LIST"
+
+ $STD apt-get update || {
+ msg_error "APT update failed — invalid MongoDB repo for ${DISTRO_ID}-${DISTRO_CODENAME}?"
+ return 1
+ }
+
+ $STD apt-get install -y mongodb-org
+
+ mkdir -p /var/lib/mongodb
+ chown -R mongodb:mongodb /var/lib/mongodb
+
+ $STD systemctl enable mongod
+ $STD systemctl start mongod
+ msg_ok "Setup MongoDB $MONGO_VERSION"
+}
+
+# ------------------------------------------------------------------------------
+# Downloads and deploys latest GitHub release (source, binary, tarball, asset).
+#
+# Description:
+# - Fetches latest release metadata from GitHub API
+# - Supports the following modes:
+# - tarball: Source code tarball (default if omitted)
+# - source: Alias for tarball (same behavior)
+# - binary: .deb package install (arch-dependent)
+# - prebuild: Prebuilt .tar.gz archive (e.g. Go binaries)
+# - singlefile: Standalone binary (no archive, direct chmod +x install)
+# - Handles download, extraction/installation and version tracking in ~/.
+#
+# Parameters:
+# $1 APP - Application name (used for install path and version file)
+# $2 REPO - GitHub repository in form user/repo
+# $3 MODE - Release type:
+# tarball → source tarball (.tar.gz)
+# binary → .deb file (auto-arch matched)
+# prebuild → prebuilt archive (e.g. tar.gz)
+# singlefile→ standalone binary (chmod +x)
+# $4 VERSION - Optional release tag (default: latest)
+# $5 TARGET_DIR - Optional install path (default: /opt/)
+# $6 ASSET_FILENAME - Required for:
+# - prebuild → archive filename or pattern
+# - singlefile→ binary filename or pattern
+#
+# Optional:
+# - Set GITHUB_TOKEN env var to increase API rate limit (recommended for CI/CD).
+#
+# Examples:
+# # 1. Minimal: Fetch and deploy source tarball
+# fetch_and_deploy_gh_release "myapp" "myuser/myapp"
+#
+# # 2. Binary install via .deb asset (architecture auto-detected)
+# fetch_and_deploy_gh_release "myapp" "myuser/myapp" "binary"
+#
+# # 3. Prebuilt archive (.tar.gz) with asset filename match
+# fetch_and_deploy_gh_release "hanko" "teamhanko/hanko" "prebuild" "latest" "/opt/hanko" "hanko_Linux_x86_64.tar.gz"
+#
+# # 4. Single binary (chmod +x) like Argus, Promtail etc.
+# fetch_and_deploy_gh_release "argus" "release-argus/Argus" "singlefile" "0.26.3" "/opt/argus" "Argus-.*linux-amd64"
+# ------------------------------------------------------------------------------
+
+function fetch_and_deploy_gh_release() {
+ local app="$1"
+ local repo="$2"
+ local mode="${3:-tarball}" # tarball | binary | prebuild | singlefile
+ local version="${4:-latest}"
+ local target="${5:-/opt/$app}"
+ local asset_pattern="${6:-}"
+
+ local app_lc=$(echo "${app,,}" | tr -d ' ')
+ local version_file="$HOME/.${app_lc}"
+
+ local api_timeout="--connect-timeout 10 --max-time 60"
+ local download_timeout="--connect-timeout 15 --max-time 900"
+
+ local current_version=""
+ [[ -f "$version_file" ]] && current_version=$(<"$version_file")
+
+ if ! command -v jq &>/dev/null; then
+ $STD apt-get install -y jq &>/dev/null
+ fi
+
+ local api_url="https://api.github.com/repos/$repo/releases"
+ [[ "$version" != "latest" ]] && api_url="$api_url/tags/$version" || api_url="$api_url/latest"
+ local header=()
+ [[ -n "${GITHUB_TOKEN:-}" ]] && header=(-H "Authorization: token $GITHUB_TOKEN")
+
+ # dns pre check
+ local gh_host
+ gh_host=$(awk -F/ '{print $3}' <<<"$api_url")
+ if ! getent hosts "$gh_host" &>/dev/null; then
+ msg_error "DNS resolution failed for $gh_host – check /etc/resolv.conf or networking"
+ return 1
+ fi
+
+ local max_retries=3 retry_delay=2 attempt=1 success=false resp http_code
+
+ while ((attempt <= max_retries)); do
+ resp=$(curl $api_timeout -fsSL -w "%{http_code}" -o /tmp/gh_rel.json "${header[@]}" "$api_url") && success=true && break
+ sleep "$retry_delay"
+ ((attempt++))
+ done
+
+ if ! $success; then
+ msg_error "Failed to fetch release metadata from $api_url after $max_retries attempts"
+ return 1
+ fi
+
+ http_code="${resp:(-3)}"
+ [[ "$http_code" != "200" ]] && {
+ msg_error "GitHub API returned HTTP $http_code"
+ return 1
+ }
+
+ local json tag_name
+ json=$(/dev/null || uname -m)
+ [[ "$arch" == "x86_64" ]] && arch="amd64"
+ [[ "$arch" == "aarch64" ]] && arch="arm64"
+
+ local assets url_match=""
+ assets=$(echo "$json" | jq -r '.assets[].browser_download_url')
+
+ # If explicit filename pattern is provided (param $6), match that first
+ if [[ -n "$asset_pattern" ]]; then
+ for u in $assets; do
+ case "${u##*/}" in
+ $asset_pattern)
+ url_match="$u"
+ break
+ ;;
+ esac
+ done
+ fi
+
+ # If no match via explicit pattern, fall back to architecture heuristic
+ if [[ -z "$url_match" ]]; then
+ for u in $assets; do
+ if [[ "$u" =~ ($arch|amd64|x86_64|aarch64|arm64).*\.deb$ ]]; then
+ url_match="$u"
+ break
+ fi
+ done
+ fi
+
+ # Fallback: any .deb file
+ if [[ -z "$url_match" ]]; then
+ for u in $assets; do
+ [[ "$u" =~ \.deb$ ]] && url_match="$u" && break
+ done
+ fi
+
+ if [[ -z "$url_match" ]]; then
+ msg_error "No suitable .deb asset found for $app"
+ rm -rf "$tmpdir"
+ return 1
+ fi
+
+ filename="${url_match##*/}"
+ curl $download_timeout -fsSL -o "$tmpdir/$filename" "$url_match" || {
+ msg_error "Download failed: $url_match"
+ rm -rf "$tmpdir"
+ return 1
+ }
+
+ chmod 644 "$tmpdir/$filename"
+ $STD apt-get install -y "$tmpdir/$filename" || {
+ $STD dpkg -i "$tmpdir/$filename" || {
+ msg_error "Both apt and dpkg installation failed"
+ rm -rf "$tmpdir"
+ return 1
+ }
+ }
+
+ ### Prebuild Mode ###
+ elif [[ "$mode" == "prebuild" ]]; then
+ local pattern="${6%\"}"
+ pattern="${pattern#\"}"
+ [[ -z "$pattern" ]] && {
+ msg_error "Mode 'prebuild' requires 6th parameter (asset filename pattern)"
+ rm -rf "$tmpdir"
+ return 1
+ }
+
+ local asset_url=""
+ for u in $(echo "$json" | jq -r '.assets[].browser_download_url'); do
+ filename_candidate="${u##*/}"
+ case "$filename_candidate" in
+ $pattern)
+ asset_url="$u"
+ break
+ ;;
+ esac
+ done
+
+ [[ -z "$asset_url" ]] && {
+ msg_error "No asset matching '$pattern' found"
+ rm -rf "$tmpdir"
+ return 1
+ }
+
+ filename="${asset_url##*/}"
+ curl $download_timeout -fsSL -o "$tmpdir/$filename" "$asset_url" || {
+ msg_error "Download failed: $asset_url"
+ rm -rf "$tmpdir"
+ return 1
+ }
+
+ local unpack_tmp
+ unpack_tmp=$(mktemp -d)
+ mkdir -p "$target"
+ if [[ "${CLEAN_INSTALL:-0}" == "1" ]]; then
+ rm -rf "${target:?}/"*
+ fi
+
+ if [[ "$filename" == *.zip ]]; then
+ if ! command -v unzip &>/dev/null; then
+ $STD apt-get install -y unzip
+ fi
+ unzip -q "$tmpdir/$filename" -d "$unpack_tmp"
+ elif [[ "$filename" == *.tar.* || "$filename" == *.tgz ]]; then
+ tar -xf "$tmpdir/$filename" -C "$unpack_tmp"
+ else
+ msg_error "Unsupported archive format: $filename"
+ rm -rf "$tmpdir" "$unpack_tmp"
+ return 1
+ fi
+
+ local top_dirs
+ top_dirs=$(find "$unpack_tmp" -mindepth 1 -maxdepth 1 -type d | wc -l)
+ local top_entries inner_dir
+ top_entries=$(find "$unpack_tmp" -mindepth 1 -maxdepth 1)
+ if [[ "$(echo "$top_entries" | wc -l)" -eq 1 && -d "$top_entries" ]]; then
+ # Strip leading folder
+ inner_dir="$top_entries"
+ shopt -s dotglob nullglob
+ if compgen -G "$inner_dir/*" >/dev/null; then
+ cp -r "$inner_dir"/* "$target/" || {
+ msg_error "Failed to copy contents from $inner_dir to $target"
+ rm -rf "$tmpdir" "$unpack_tmp"
+ return 1
+ }
+ else
+ msg_error "Inner directory is empty: $inner_dir"
+ rm -rf "$tmpdir" "$unpack_tmp"
+ return 1
+ fi
+ shopt -u dotglob nullglob
+ else
+ # Copy all contents
+ shopt -s dotglob nullglob
+ if compgen -G "$unpack_tmp/*" >/dev/null; then
+ cp -r "$unpack_tmp"/* "$target/" || {
+ msg_error "Failed to copy contents to $target"
+ rm -rf "$tmpdir" "$unpack_tmp"
+ return 1
+ }
+ else
+ msg_error "Unpacked archive is empty"
+ rm -rf "$tmpdir" "$unpack_tmp"
+ return 1
+ fi
+ shopt -u dotglob nullglob
+ fi
+
+ ### Singlefile Mode ###
+ elif [[ "$mode" == "singlefile" ]]; then
+ local pattern="${6%\"}"
+ pattern="${pattern#\"}"
+ [[ -z "$pattern" ]] && {
+ msg_error "Mode 'singlefile' requires 6th parameter (asset filename pattern)"
+ rm -rf "$tmpdir"
+ return 1
+ }
+
+ local asset_url=""
+ for u in $(echo "$json" | jq -r '.assets[].browser_download_url'); do
+ filename_candidate="${u##*/}"
+ case "$filename_candidate" in
+ $pattern)
+ asset_url="$u"
+ break
+ ;;
+ esac
+ done
+
+ [[ -z "$asset_url" ]] && {
+ msg_error "No asset matching '$pattern' found"
+ rm -rf "$tmpdir"
+ return 1
+ }
+
+ filename="${asset_url##*/}"
+ mkdir -p "$target"
+
+ local use_filename="${USE_ORIGINAL_FILENAME:-false}"
+ local target_file="$app"
+ [[ "$use_filename" == "true" ]] && target_file="$filename"
+
+ curl $download_timeout -fsSL -o "$target/$target_file" "$asset_url" || {
+ msg_error "Download failed: $asset_url"
+ rm -rf "$tmpdir"
+ return 1
+ }
+
+ if [[ "$target_file" != *.jar && -f "$target/$target_file" ]]; then
+ chmod +x "$target/$target_file"
+ fi
+
+ else
+ msg_error "Unknown mode: $mode"
+ rm -rf "$tmpdir"
+ return 1
+ fi
+
+ echo "$version" >"$version_file"
+ msg_ok "Deployed: $app ($version)"
+ rm -rf "$tmpdir"
+}
+
+# ------------------------------------------------------------------------------
+# Installs a local IP updater script using networkd-dispatcher.
+#
+# Description:
+# - Stores current IP in /run/local-ip.env
+# - Automatically runs on network changes
+# ------------------------------------------------------------------------------
+
+function setup_local_ip_helper() {
+ local BASE_DIR="/usr/local/community-scripts/ip-management"
+ local SCRIPT_PATH="$BASE_DIR/update_local_ip.sh"
+ local IP_FILE="/run/local-ip.env"
+ local DISPATCHER_SCRIPT="/etc/networkd-dispatcher/routable.d/10-update-local-ip.sh"
+
+ mkdir -p "$BASE_DIR"
+
+ # Install networkd-dispatcher if not present
+ if ! dpkg -s networkd-dispatcher >/dev/null 2>&1; then
+ $STD apt-get update
+ $STD apt-get install -y networkd-dispatcher
+ fi
+
+ # Write update_local_ip.sh
+ cat <<'EOF' >"$SCRIPT_PATH"
+#!/bin/bash
+set -euo pipefail
+
+IP_FILE="/run/local-ip.env"
+mkdir -p "$(dirname "$IP_FILE")"
+
+get_current_ip() {
+ local targets=("8.8.8.8" "1.1.1.1" "192.168.1.1" "10.0.0.1" "172.16.0.1" "default")
+ local ip
+
+ for target in "${targets[@]}"; do
+ if [[ "$target" == "default" ]]; then
+ ip=$(ip route get 1 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}')
+ else
+ ip=$(ip route get "$target" 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}')
+ fi
+ if [[ -n "$ip" ]]; then
+ echo "$ip"
+ return 0
+ fi
+ done
+
+ return 1
+}
+
+current_ip="$(get_current_ip)"
+
+if [[ -z "$current_ip" ]]; then
+ echo "[ERROR] Could not detect local IP" >&2
+ exit 1
+fi
+
+if [[ -f "$IP_FILE" ]]; then
+ source "$IP_FILE"
+ [[ "$LOCAL_IP" == "$current_ip" ]] && exit 0
+fi
+
+echo "LOCAL_IP=$current_ip" > "$IP_FILE"
+echo "[INFO] LOCAL_IP updated to $current_ip"
+EOF
+
+ chmod +x "$SCRIPT_PATH"
+
+ # Install dispatcher hook
+ mkdir -p "$(dirname "$DISPATCHER_SCRIPT")"
+ cat <"$DISPATCHER_SCRIPT"
+#!/bin/bash
+$SCRIPT_PATH
+EOF
+
+ chmod +x "$DISPATCHER_SCRIPT"
+ systemctl enable -q --now networkd-dispatcher.service
+}
+
+# ------------------------------------------------------------------------------
+# Loads LOCAL_IP from persistent store or detects if missing.
+#
+# Description:
+# - Loads from /run/local-ip.env or performs runtime lookup
+# ------------------------------------------------------------------------------
+
+function import_local_ip() {
+ local IP_FILE="/run/local-ip.env"
+ if [[ -f "$IP_FILE" ]]; then
+ # shellcheck disable=SC1090
+ source "$IP_FILE"
+ fi
+
+ if [[ -z "${LOCAL_IP:-}" ]]; then
+ get_current_ip() {
+ local targets=("8.8.8.8" "1.1.1.1" "192.168.1.1" "10.0.0.1" "172.16.0.1" "default")
+ local ip
+
+ for target in "${targets[@]}"; do
+ if [[ "$target" == "default" ]]; then
+ ip=$(ip route get 1 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}')
+ else
+ ip=$(ip route get "$target" 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}')
+ fi
+ if [[ -n "$ip" ]]; then
+ echo "$ip"
+ return 0
+ fi
+ done
+
+ return 1
+ }
+
+ LOCAL_IP="$(get_current_ip || true)"
+ if [[ -z "$LOCAL_IP" ]]; then
+ msg_error "Could not determine LOCAL_IP"
+ return 1
+ fi
+ fi
+
+ export LOCAL_IP
+}
+
+# ------------------------------------------------------------------------------
+# Downloads file with optional progress indicator using pv.
+#
+# Arguments:
+# $1 - URL
+# $2 - Destination path
+# ------------------------------------------------------------------------------
+
+function download_with_progress() {
+ local url="$1"
+ local output="$2"
+ if [ -n "$SPINNER_PID" ] && ps -p "$SPINNER_PID" >/dev/null; then kill "$SPINNER_PID" >/dev/null; fi
+
+ if ! command -v pv &>/dev/null; then
+ $STD apt-get install -y pv
+ fi
+ set -o pipefail
+
+ # Content-Length aus HTTP-Header holen
+ local content_length
+ content_length=$(curl -fsSLI "$url" | awk '/Content-Length/ {print $2}' | tr -d '\r' || true)
+
+ if [[ -z "$content_length" ]]; then
+ if ! curl -fL# -o "$output" "$url"; then
+ msg_error "Download failed"
+ return 1
+ fi
+ else
+ if ! curl -fsSL "$url" | pv -s "$content_length" >"$output"; then
+ msg_error "Download failed"
+ return 1
+ fi
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# Installs or upgrades uv (Python package manager) from GitHub releases.
+# - Downloads platform-specific tarball (no install.sh!)
+# - Extracts uv binary
+# - Places it in /usr/local/bin
+# - Optionally installs a specific Python version via uv
+# ------------------------------------------------------------------------------
+
+function setup_uv() {
+ local UV_BIN="/usr/local/bin/uv"
+ local TMP_DIR
+ TMP_DIR=$(mktemp -d)
+
+ # Determine system architecture
+ local ARCH
+ ARCH=$(uname -m)
+ local UV_TAR
+
+ case "$ARCH" in
+ x86_64)
+ if grep -qi "alpine" /etc/os-release; then
+ UV_TAR="uv-x86_64-unknown-linux-musl.tar.gz"
+ else
+ UV_TAR="uv-x86_64-unknown-linux-gnu.tar.gz"
+ fi
+ ;;
+ aarch64)
+ if grep -qi "alpine" /etc/os-release; then
+ UV_TAR="uv-aarch64-unknown-linux-musl.tar.gz"
+ else
+ UV_TAR="uv-aarch64-unknown-linux-gnu.tar.gz"
+ fi
+ ;;
+ *)
+ msg_error "Unsupported architecture: $ARCH"
+ rm -rf "$TMP_DIR"
+ return 1
+ ;;
+ esac
+
+ # Get latest version from GitHub
+ local LATEST_VERSION
+ LATEST_VERSION=$(curl -fsSL https://api.github.com/repos/astral-sh/uv/releases/latest |
+ grep '"tag_name":' | cut -d '"' -f4 | sed 's/^v//')
+
+ if [[ -z "$LATEST_VERSION" ]]; then
+ msg_error "Could not fetch latest uv version from GitHub."
+ rm -rf "$TMP_DIR"
+ return 1
+ fi
+
+ # Check if uv is already up to date
+ if [[ -x "$UV_BIN" ]]; then
+ local INSTALLED_VERSION
+ INSTALLED_VERSION=$($UV_BIN -V | awk '{print $2}')
+ if [[ "$INSTALLED_VERSION" == "$LATEST_VERSION" ]]; then
+ rm -rf "$TMP_DIR"
+ [[ ":$PATH:" != *":/usr/local/bin:"* ]] && export PATH="/usr/local/bin:$PATH"
+ return 0
+ else
+ msg_info "Updating uv from $INSTALLED_VERSION to $LATEST_VERSION"
+ fi
+ else
+ msg_info "Setup uv $LATEST_VERSION"
+ fi
+
+ # Download and install manually
+ local UV_URL="https://github.com/astral-sh/uv/releases/latest/download/${UV_TAR}"
+ if ! curl -fsSL "$UV_URL" -o "$TMP_DIR/uv.tar.gz"; then
+ msg_error "Failed to download $UV_URL"
+ rm -rf "$TMP_DIR"
+ return 1
+ fi
+
+ if ! tar -xzf "$TMP_DIR/uv.tar.gz" -C "$TMP_DIR"; then
+ msg_error "Failed to extract uv archive"
+ rm -rf "$TMP_DIR"
+ return 1
+ fi
+
+ install -m 755 "$TMP_DIR"/*/uv "$UV_BIN" || {
+ msg_error "Failed to install uv binary"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+
+ rm -rf "$TMP_DIR"
+ #ensure_usr_local_bin_persist
+ $STD uv python update-shell || {
+ msg_error "Failed to update uv shell integration"
+ return 1
+ }
+ msg_ok "Setup uv $LATEST_VERSION"
+
+ # Optional: install specific Python version
+ if [[ -n "${PYTHON_VERSION:-}" ]]; then
+ local VERSION_MATCH
+ VERSION_MATCH=$(uv python list --only-downloads |
+ grep -E "^cpython-${PYTHON_VERSION//./\\.}\.[0-9]+-linux" |
+ cut -d'-' -f2 | sort -V | tail -n1)
+
+ if [[ -z "$VERSION_MATCH" ]]; then
+ msg_error "No matching Python $PYTHON_VERSION.x version found via uv"
+ return 1
+ fi
+
+ if ! uv python list | grep -q "cpython-${VERSION_MATCH}-linux.*uv/python"; then
+ if ! $STD uv python install "$VERSION_MATCH"; then
+ msg_error "Failed to install Python $VERSION_MATCH via uv"
+ return 1
+ fi
+ msg_ok "Setup Python $VERSION_MATCH via uv"
+ fi
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# Ensures /usr/local/bin is permanently in system PATH.
+#
+# Description:
+# - Adds to /etc/profile.d if not present
+# ------------------------------------------------------------------------------
+
+function ensure_usr_local_bin_persist() {
+ local PROFILE_FILE="/etc/profile.d/custom_path.sh"
+
+ if [[ ! -f "$PROFILE_FILE" ]] && ! command -v pveversion &>/dev/null; then
+ echo 'export PATH="/usr/local/bin:$PATH"' >"$PROFILE_FILE"
+ chmod +x "$PROFILE_FILE"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# Installs or updates Ghostscript (gs) from source.
+#
+# Description:
+# - Fetches latest release
+# - Builds and installs system-wide
+# ------------------------------------------------------------------------------
+
+function setup_gs() {
+ mkdir -p /tmp
+ TMP_DIR=$(mktemp -d)
+ CURRENT_VERSION=$(gs --version 2>/dev/null || echo "0")
+
+ RELEASE_JSON=$(curl -fsSL https://api.github.com/repos/ArtifexSoftware/ghostpdl-downloads/releases/latest)
+ LATEST_VERSION=$(echo "$RELEASE_JSON" | grep '"tag_name":' | head -n1 | cut -d '"' -f4 | sed 's/^gs//')
+ LATEST_VERSION_DOTTED=$(echo "$RELEASE_JSON" | grep '"name":' | head -n1 | grep -o '[0-9]\+\.[0-9]\+\.[0-9]\+')
+
+ if [[ -z "$LATEST_VERSION" ]]; then
+ msg_error "Could not determine latest Ghostscript version from GitHub."
+ rm -rf "$TMP_DIR"
+ return
+ fi
+
+ if dpkg --compare-versions "$CURRENT_VERSION" ge "$LATEST_VERSION_DOTTED"; then
+ rm -rf "$TMP_DIR"
+ return
+ fi
+
+ msg_info "Setup Ghostscript $LATEST_VERSION_DOTTED (Patience)"
+ curl -fsSL "https://github.com/ArtifexSoftware/ghostpdl-downloads/releases/download/gs${LATEST_VERSION}/ghostscript-${LATEST_VERSION_DOTTED}.tar.gz" -o "$TMP_DIR/ghostscript.tar.gz"
+
+ if ! tar -xzf "$TMP_DIR/ghostscript.tar.gz" -C "$TMP_DIR"; then
+ msg_error "Failed to extract Ghostscript archive."
+ rm -rf "$TMP_DIR"
+ return
+ fi
+
+ cd "$TMP_DIR/ghostscript-${LATEST_VERSION_DOTTED}" || {
+ msg_error "Failed to enter Ghostscript source directory."
+ rm -rf "$TMP_DIR"
+ }
+ $STD apt-get install -y build-essential libpng-dev zlib1g-dev
+ $STD ./configure >/dev/null
+ $STD make
+ $STD sudo make install
+ local EXIT_CODE=$?
+ hash -r
+ if [[ ! -x "$(command -v gs)" ]]; then
+ if [[ -x /usr/local/bin/gs ]]; then
+ ln -sf /usr/local/bin/gs /usr/bin/gs
+ fi
+ fi
+
+ rm -rf "$TMP_DIR"
+
+ if [[ $EXIT_CODE -eq 0 ]]; then
+ msg_ok "Setup Ghostscript $LATEST_VERSION_DOTTED"
+ else
+ msg_error "Ghostscript installation failed"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# Installs rbenv and ruby-build, installs Ruby and optionally Rails.
+#
+# Description:
+# - Downloads rbenv and ruby-build from GitHub
+# - Compiles and installs target Ruby version
+# - Optionally installs Rails via gem
+#
+# Variables:
+# RUBY_VERSION - Ruby version to install (default: 3.4.4)
+# RUBY_INSTALL_RAILS - true/false to install Rails (default: true)
+# ------------------------------------------------------------------------------
+
+function setup_ruby() {
+ local RUBY_VERSION="${RUBY_VERSION:-3.4.4}"
+ local RUBY_INSTALL_RAILS="${RUBY_INSTALL_RAILS:-true}"
+
+ local RBENV_DIR="$HOME/.rbenv"
+ local RBENV_BIN="$RBENV_DIR/bin/rbenv"
+ local PROFILE_FILE="$HOME/.profile"
+ local TMP_DIR
+ TMP_DIR=$(mktemp -d)
+
+ msg_info "Setup Ruby $RUBY_VERSION"
+
+ local RBENV_RELEASE
+ RBENV_RELEASE=$(curl -fsSL https://api.github.com/repos/rbenv/rbenv/releases/latest | grep '"tag_name":' | cut -d '"' -f4 | sed 's/^v//')
+ if [[ -z "$RBENV_RELEASE" ]]; then
+ msg_error "Failed to fetch latest rbenv version"
+ rm -rf "$TMP_DIR"
+ return 1
+ fi
+
+ curl -fsSL "https://github.com/rbenv/rbenv/archive/refs/tags/v${RBENV_RELEASE}.tar.gz" -o "$TMP_DIR/rbenv.tar.gz"
+ tar -xzf "$TMP_DIR/rbenv.tar.gz" -C "$TMP_DIR"
+ mkdir -p "$RBENV_DIR"
+ cp -r "$TMP_DIR/rbenv-${RBENV_RELEASE}/." "$RBENV_DIR/"
+ cd "$RBENV_DIR" && src/configure && $STD make -C src
+
+ local RUBY_BUILD_RELEASE
+ RUBY_BUILD_RELEASE=$(curl -fsSL https://api.github.com/repos/rbenv/ruby-build/releases/latest | grep '"tag_name":' | cut -d '"' -f4 | sed 's/^v//')
+ if [[ -z "$RUBY_BUILD_RELEASE" ]]; then
+ msg_error "Failed to fetch latest ruby-build version"
+ rm -rf "$TMP_DIR"
+ return 1
+ fi
+
+ curl -fsSL "https://github.com/rbenv/ruby-build/archive/refs/tags/v${RUBY_BUILD_RELEASE}.tar.gz" -o "$TMP_DIR/ruby-build.tar.gz"
+ tar -xzf "$TMP_DIR/ruby-build.tar.gz" -C "$TMP_DIR"
+ mkdir -p "$RBENV_DIR/plugins/ruby-build"
+ cp -r "$TMP_DIR/ruby-build-${RUBY_BUILD_RELEASE}/." "$RBENV_DIR/plugins/ruby-build/"
+ echo "$RUBY_BUILD_RELEASE" >"$RBENV_DIR/plugins/ruby-build/RUBY_BUILD_version.txt"
+
+ if ! grep -q 'rbenv init' "$PROFILE_FILE"; then
+ echo 'export PATH="$HOME/.rbenv/bin:$PATH"' >>"$PROFILE_FILE"
+ echo 'eval "$(rbenv init -)"' >>"$PROFILE_FILE"
+ fi
+
+ export PATH="$RBENV_DIR/bin:$PATH"
+ eval "$("$RBENV_BIN" init - bash)"
+
+ if ! "$RBENV_BIN" versions --bare | grep -qx "$RUBY_VERSION"; then
+ $STD "$RBENV_BIN" install "$RUBY_VERSION"
+ fi
+
+ "$RBENV_BIN" global "$RUBY_VERSION"
+ hash -r
+
+ if [[ "$RUBY_INSTALL_RAILS" == "true" ]]; then
+ msg_info "Setup Rails via gem"
+ gem install rails
+ msg_ok "Setup Rails $(rails -v)"
+ fi
+
+ rm -rf "$TMP_DIR"
+ msg_ok "Setup Ruby $RUBY_VERSION"
+}
+
+# ------------------------------------------------------------------------------
+# Creates and installs self-signed certificates.
+#
+# Description:
+# - Create a self-signed certificate with option to override application name
+#
+# Variables:
+# APP - Application name (default: $APPLICATION variable)
+# ------------------------------------------------------------------------------
+function create_selfsigned_certs() {
+ local app=${APP:-$(echo "${APPLICATION,,}" | tr -d ' ')}
+ $STD openssl req -x509 -nodes -days 365 -newkey rsa:4096 \
+ -keyout /etc/ssl/private/"$app"-selfsigned.key \
+ -out /etc/ssl/certs/"$app"-selfsigned.crt \
+ -subj "/C=US/O=$app/OU=Domain Control Validated/CN=localhost"
+}
+
+# ------------------------------------------------------------------------------
+# Installs Rust toolchain and optional global crates via cargo.
+#
+# Description:
+# - Installs rustup (if missing)
+# - Installs or updates desired Rust toolchain (stable, nightly, or versioned)
+# - Installs or updates specified global crates using `cargo install`
+#
+# Notes:
+# - Skips crate install if exact version is already present
+# - Updates crate if newer version or different version is requested
+#
+# Variables:
+# RUST_TOOLCHAIN - Rust toolchain to install (default: stable)
+# RUST_CRATES - Comma-separated list of crates (e.g. "cargo-edit,wasm-pack@0.12.1")
+# ------------------------------------------------------------------------------
+
+function setup_rust() {
+ local RUST_TOOLCHAIN="${RUST_TOOLCHAIN:-stable}"
+ local RUST_CRATES="${RUST_CRATES:-}"
+ local CARGO_BIN="${HOME}/.cargo/bin"
+
+ # rustup & toolchain
+ if ! command -v rustup &>/dev/null; then
+ msg_info "Setup Rust"
+ curl -fsSL https://sh.rustup.rs | $STD sh -s -- -y --default-toolchain "$RUST_TOOLCHAIN"
+ export PATH="$CARGO_BIN:$PATH"
+ echo 'export PATH="$HOME/.cargo/bin:$PATH"' >>"$HOME/.profile"
+ msg_ok "Setup Rust"
+ else
+ $STD rustup install "$RUST_TOOLCHAIN"
+ $STD rustup default "$RUST_TOOLCHAIN"
+ $STD rustup update "$RUST_TOOLCHAIN"
+ msg_ok "Rust toolchain set to $RUST_TOOLCHAIN"
+ fi
+
+ # install/update crates
+ if [[ -n "$RUST_CRATES" ]]; then
+ IFS=',' read -ra CRATES <<<"$RUST_CRATES"
+ for crate in "${CRATES[@]}"; do
+ local NAME VER INSTALLED_VER
+ if [[ "$crate" == *"@"* ]]; then
+ NAME="${crate%@*}"
+ VER="${crate##*@}"
+ else
+ NAME="$crate"
+ VER=""
+ fi
+
+ INSTALLED_VER=$(cargo install --list 2>/dev/null | awk "/^$NAME v[0-9]/ {print \$2}" | tr -d 'v')
+
+ if [[ -n "$INSTALLED_VER" ]]; then
+ if [[ -n "$VER" && "$VER" != "$INSTALLED_VER" ]]; then
+ msg_info "Update $NAME: $INSTALLED_VER → $VER"
+ $STD cargo install "$NAME" --version "$VER" --force
+ msg_ok "Updated $NAME to $VER"
+ elif [[ -z "$VER" ]]; then
+ msg_info "Update $NAME: $INSTALLED_VER → latest"
+ $STD cargo install "$NAME" --force
+ msg_ok "Updated $NAME to latest"
+ fi
+ else
+ msg_info "Setup $NAME ${VER:+($VER)}"
+ $STD cargo install "$NAME" ${VER:+--version "$VER"}
+ msg_ok "Setup $NAME ${VER:-latest}"
+ fi
+ done
+ msg_ok "Setup Rust"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# Installs Adminer (Debian/Ubuntu via APT, Alpine via direct download).
+#
+# Description:
+# - Adds Adminer to Apache or web root
+# - Supports Alpine and Debian-based systems
+# ------------------------------------------------------------------------------
+
+function setup_adminer() {
+ if grep -qi alpine /etc/os-release; then
+ msg_info "Setup Adminer (Alpine)"
+ mkdir -p /var/www/localhost/htdocs/adminer
+ if ! curl -fsSL https://github.com/vrana/adminer/releases/latest/download/adminer.php \
+ -o /var/www/localhost/htdocs/adminer/index.php; then
+ msg_error "Failed to download Adminer"
+ return 1
+ fi
+ msg_ok "Adminer available at /adminer (Alpine)"
+ else
+ msg_info "Setup Adminer (Debian/Ubuntu)"
+ $STD apt-get install -y adminer
+ $STD a2enconf adminer
+ $STD systemctl reload apache2
+ msg_ok "Adminer available at /adminer (Debian/Ubuntu)"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# Installs or updates yq (mikefarah/yq - Go version).
+#
+# Description:
+# - Checks if yq is installed and from correct source
+# - Compares with latest release on GitHub
+# - Updates if outdated or wrong implementation
+# ------------------------------------------------------------------------------
+
+function setup_yq() {
+ local TMP_DIR
+ TMP_DIR=$(mktemp -d)
+ local CURRENT_VERSION=""
+ local BINARY_PATH="/usr/local/bin/yq"
+ local GITHUB_REPO="mikefarah/yq"
+
+ if ! command -v jq &>/dev/null; then
+ $STD apt-get update
+ $STD apt-get install -y jq || {
+ msg_error "Failed to install jq"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+ fi
+
+ if command -v yq &>/dev/null; then
+ if ! yq --version 2>&1 | grep -q 'mikefarah'; then
+ rm -f "$(command -v yq)"
+ else
+ CURRENT_VERSION=$(yq --version | awk '{print $NF}' | sed 's/^v//')
+ fi
+ fi
+
+ local RELEASE_JSON
+ RELEASE_JSON=$(curl -fsSL "https://api.github.com/repos/${GITHUB_REPO}/releases/latest")
+ local LATEST_VERSION
+ LATEST_VERSION=$(echo "$RELEASE_JSON" | jq -r '.tag_name' | sed 's/^v//')
+
+ if [[ -z "$LATEST_VERSION" ]]; then
+ msg_error "Could not determine latest yq version from GitHub."
+ rm -rf "$TMP_DIR"
+ return 1
+ fi
+
+ if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" == "$LATEST_VERSION" ]]; then
+ return
+ fi
+
+ msg_info "Setup yq ($LATEST_VERSION)"
+ curl -fsSL "https://github.com/${GITHUB_REPO}/releases/download/v${LATEST_VERSION}/yq_linux_amd64" -o "$TMP_DIR/yq"
+ chmod +x "$TMP_DIR/yq"
+ mv "$TMP_DIR/yq" "$BINARY_PATH"
+
+ if [[ ! -x "$BINARY_PATH" ]]; then
+ msg_error "Failed to install yq to $BINARY_PATH"
+ rm -rf "$TMP_DIR"
+ return 1
+ fi
+
+ rm -rf "$TMP_DIR"
+ hash -r
+
+ local FINAL_VERSION
+ FINAL_VERSION=$("$BINARY_PATH" --version 2>/dev/null | awk '{print $NF}')
+ if [[ "$FINAL_VERSION" == "v$LATEST_VERSION" ]]; then
+ msg_ok "Setup yq ($LATEST_VERSION)"
+ else
+ msg_error "yq installation incomplete or version mismatch"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# Installs ImageMagick 7 from source (Debian/Ubuntu only).
+#
+# Description:
+# - Downloads the latest ImageMagick source tarball
+# - Builds and installs ImageMagick to /usr/local
+# - Configures dynamic linker (ldconfig)
+#
+# Notes:
+# - Requires: build-essential, libtool, libjpeg-dev, libpng-dev, etc.
+# ------------------------------------------------------------------------------
+function setup_imagemagick() {
+ local TMP_DIR
+ TMP_DIR=$(mktemp -d)
+ local VERSION=""
+ local BINARY_PATH="/usr/local/bin/magick"
+
+ if command -v magick &>/dev/null; then
+ VERSION=$(magick -version | awk '/^Version/ {print $3}')
+ msg_ok "ImageMagick already installed ($VERSION)"
+ return 0
+ fi
+
+ msg_info "Setup ImageMagick (Patience)"
+ $STD apt-get update
+ $STD apt-get install -y \
+ build-essential \
+ libtool \
+ libjpeg-dev \
+ libpng-dev \
+ libtiff-dev \
+ libwebp-dev \
+ libheif-dev \
+ libde265-dev \
+ libopenjp2-7-dev \
+ libxml2-dev \
+ liblcms2-dev \
+ libfreetype6-dev \
+ libraw-dev \
+ libfftw3-dev \
+ liblqr-1-0-dev \
+ libgsl-dev \
+ pkg-config \
+ ghostscript
+
+ curl -fsSL https://imagemagick.org/archive/ImageMagick.tar.gz -o "$TMP_DIR/ImageMagick.tar.gz"
+ tar -xzf "$TMP_DIR/ImageMagick.tar.gz" -C "$TMP_DIR"
+ cd "$TMP_DIR"/ImageMagick-* || {
+ msg_error "Source extraction failed"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+
+ ./configure --disable-static >/dev/null
+ $STD make
+ $STD make install
+ $STD ldconfig /usr/local/lib
+
+ if [[ ! -x "$BINARY_PATH" ]]; then
+ msg_error "ImageMagick installation failed"
+ rm -rf "$TMP_DIR"
+ return 1
+ fi
+
+ VERSION=$("$BINARY_PATH" -version | awk '/^Version/ {print $3}')
+ rm -rf "$TMP_DIR"
+ ensure_usr_local_bin_persist
+ msg_ok "Setup ImageMagick $VERSION"
+}
+
+# ------------------------------------------------------------------------------
+# Installs FFmpeg from source or prebuilt binary (Debian/Ubuntu only).
+#
+# Description:
+# - Downloads and builds FFmpeg from GitHub (https://github.com/FFmpeg/FFmpeg)
+# - Supports specific version override via FFMPEG_VERSION (e.g. n7.1.1)
+# - Supports build profile via FFMPEG_TYPE:
+# - minimal : x264, vpx, mp3 only
+# - medium : adds subtitles, fonts, opus, vorbis
+# - full : adds dav1d, svt-av1, zlib, numa
+# - binary : downloads static build (johnvansickle.com)
+# - Defaults to latest stable version and full feature set
+#
+# Notes:
+# - Requires: curl, jq, build-essential, and matching codec libraries
+# - Result is installed to /usr/local/bin/ffmpeg
+# ------------------------------------------------------------------------------
+
+function setup_ffmpeg() {
+ local TMP_DIR
+ TMP_DIR=$(mktemp -d)
+ local GITHUB_REPO="FFmpeg/FFmpeg"
+ local VERSION="${FFMPEG_VERSION:-latest}"
+ local TYPE="${FFMPEG_TYPE:-full}"
+ local BIN_PATH="/usr/local/bin/ffmpeg"
+
+ # Binary fallback mode
+ if [[ "$TYPE" == "binary" ]]; then
+ msg_info "Installing FFmpeg (static binary)"
+ curl -fsSL https://johnvansickle.com/ffmpeg/releases/ffmpeg-release-amd64-static.tar.xz -o "$TMP_DIR/ffmpeg.tar.xz"
+ tar -xf "$TMP_DIR/ffmpeg.tar.xz" -C "$TMP_DIR"
+ local EXTRACTED_DIR
+ EXTRACTED_DIR=$(find "$TMP_DIR" -maxdepth 1 -type d -name "ffmpeg-*")
+ cp "$EXTRACTED_DIR/ffmpeg" "$BIN_PATH"
+ cp "$EXTRACTED_DIR/ffprobe" /usr/local/bin/ffprobe
+ chmod +x "$BIN_PATH" /usr/local/bin/ffprobe
+ rm -rf "$TMP_DIR"
+ msg_ok "Installed FFmpeg binary ($($BIN_PATH -version | head -n1))"
+ return
+ fi
+
+ if ! command -v jq &>/dev/null; then
+ $STD apt-get update
+ $STD apt-get install -y jq
+ fi
+
+ # Auto-detect latest stable version if none specified
+ if [[ "$VERSION" == "latest" || -z "$VERSION" ]]; then
+ msg_info "Resolving latest FFmpeg tag"
+ VERSION=$(curl -fsSL "https://api.github.com/repos/${GITHUB_REPO}/tags" |
+ jq -r '.[].name' |
+ grep -E '^n[0-9]+\.[0-9]+\.[0-9]+$' |
+ sort -V | tail -n1)
+ fi
+
+ if [[ -z "$VERSION" ]]; then
+ msg_error "Could not determine FFmpeg version"
+ rm -rf "$TMP_DIR"
+ return 1
+ fi
+
+ msg_info "Installing FFmpeg ${VERSION} ($TYPE)"
+
+ # Dependency selection
+ local DEPS=(build-essential yasm nasm pkg-config)
+ case "$TYPE" in
+ minimal)
+ DEPS+=(libx264-dev libvpx-dev libmp3lame-dev)
+ ;;
+ medium)
+ DEPS+=(libx264-dev libvpx-dev libmp3lame-dev libfreetype6-dev libass-dev libopus-dev libvorbis-dev)
+ ;;
+ full)
+ DEPS+=(
+ libx264-dev libx265-dev libvpx-dev libmp3lame-dev
+ libfreetype6-dev libass-dev libopus-dev libvorbis-dev
+ libdav1d-dev libsvtav1-dev zlib1g-dev libnuma-dev
+ )
+ ;;
+ *)
+ msg_error "Invalid FFMPEG_TYPE: $TYPE"
+ rm -rf "$TMP_DIR"
+ return 1
+ ;;
+ esac
+
+ $STD apt-get update
+ $STD apt-get install -y "${DEPS[@]}"
+
+ curl -fsSL "https://github.com/${GITHUB_REPO}/archive/refs/tags/${VERSION}.tar.gz" -o "$TMP_DIR/ffmpeg.tar.gz"
+ tar -xzf "$TMP_DIR/ffmpeg.tar.gz" -C "$TMP_DIR"
+ cd "$TMP_DIR/FFmpeg-"* || {
+ msg_error "Source extraction failed"
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+
+ local args=(
+ --enable-gpl
+ --enable-shared
+ --enable-nonfree
+ --disable-static
+ --enable-libx264
+ --enable-libvpx
+ --enable-libmp3lame
+ )
+
+ if [[ "$TYPE" != "minimal" ]]; then
+ args+=(--enable-libfreetype --enable-libass --enable-libopus --enable-libvorbis)
+ fi
+
+ if [[ "$TYPE" == "full" ]]; then
+ args+=(--enable-libx265 --enable-libdav1d --enable-zlib)
+ fi
+
+ if [[ ${#args[@]} -eq 0 ]]; then
+ msg_error "FFmpeg configure args array is empty – aborting."
+ rm -rf "$TMP_DIR"
+ return 1
+ fi
+
+ ./configure "${args[@]}" >"$TMP_DIR/configure.log" 2>&1 || {
+ msg_error "FFmpeg ./configure failed (see $TMP_DIR/configure.log)"
+ cat "$TMP_DIR/configure.log" | tail -n 20
+ rm -rf "$TMP_DIR"
+ return 1
+ }
+
+ $STD make -j"$(nproc)"
+ $STD make install
+ echo "/usr/local/lib" >/etc/ld.so.conf.d/ffmpeg.conf
+ ldconfig
+
+ ldconfig -p | grep libavdevice >/dev/null || {
+ msg_error "libavdevice not registered with dynamic linker"
+ return 1
+ }
+
+ if ! command -v ffmpeg &>/dev/null; then
+ msg_error "FFmpeg installation failed"
+ rm -rf "$TMP_DIR"
+ return 1
+ fi
+
+ local FINAL_VERSION
+ FINAL_VERSION=$(ffmpeg -version | head -n1 | awk '{print $3}')
+ rm -rf "$TMP_DIR"
+ ensure_usr_local_bin_persist
+ msg_ok "Setup FFmpeg $FINAL_VERSION"
+}
+
+# ------------------------------------------------------------------------------
+# Installs ClickHouse server and client, sets up DB/user with credentials.
+#
+# Description:
+# - Adds official ClickHouse APT repo with GPG key
+# - Installs clickhouse-server and clickhouse-client
+# - Creates database and user (credentials optionally overrideable via env)
+#
+# Variables:
+# CLICKHOUSE_DB - Database name (default: analytics)
+# CLICKHOUSE_USER - Username (default: analytics_user)
+# CLICKHOUSE_PASS - Password (default: auto-generated)
+# ------------------------------------------------------------------------------
+
+function setup_clickhouse() {
+ local CLICKHOUSE_DB="${CLICKHOUSE_DB:-analytics}"
+ local CLICKHOUSE_USER="${CLICKHOUSE_USER:-analytics_user}"
+ local CLICKHOUSE_PASS="${CLICKHOUSE_PASS:-$(openssl rand -base64 18 | cut -c1-13)}"
+ local GPG_URL="https://packages.clickhouse.com/rpm/lts/repodata/repomd.xml.key"
+ local GPG_KEY_PATH="/usr/share/keyrings/clickhouse-keyring.gpg"
+ local ARCH
+ ARCH=$(dpkg --print-architecture)
+
+ if ! command -v clickhouse >/dev/null; then
+ msg_info "Setup ClickHouse"
+
+ if ! curl -fsSL --connect-timeout 5 https://packages.clickhouse.com >/dev/null 2>&1; then
+ msg_error "Connection to packages.clickhouse.com:443 failed – possibly blocked"
+ echo "💡 Check AdGuard/Pi-hole or firewall rules"
+ return 1
+ fi
+
+ if ! curl -fsSL --retry 3 --connect-timeout 10 "$GPG_URL" |
+ gpg --dearmor -o "$GPG_KEY_PATH"; then
+ msg_error "Failed to fetch ClickHouse GPG key"
+ return 1
+ fi
+
+ echo "deb [signed-by=$GPG_KEY_PATH arch=$ARCH] https://packages.clickhouse.com/deb stable main" \
+ >/etc/apt/sources.list.d/clickhouse.list
+
+ env -u CLICKHOUSE_USER $STD apt-get update
+ env -u CLICKHOUSE_USER DEBIAN_FRONTEND=noninteractive $STD apt-get install -y clickhouse-server clickhouse-client
+
+ $STD systemctl enable --now clickhouse-server
+
+ msg_info "Waiting for ClickHouse to be ready"
+ for i in {1..10}; do
+ if clickhouse client --query "SELECT 1" &>/dev/null; then break; fi
+ sleep 1
+ done
+
+ # User anlegen
+ clickhouse client --query "CREATE DATABASE IF NOT EXISTS $CLICKHOUSE_DB"
+ clickhouse client --query "CREATE USER IF NOT EXISTS $CLICKHOUSE_USER IDENTIFIED WITH plaintext_password BY '$CLICKHOUSE_PASS'"
+ clickhouse client --query "GRANT ALL ON $CLICKHOUSE_DB.* TO $CLICKHOUSE_USER"
+
+ # Default-User ggf. deaktivieren
+ cat </etc/clickhouse-server/users.d/disable-default.xml
+
+
+
+
+
+EOF
+ systemctl restart clickhouse-server
+
+ msg_ok "Setup ClickHouse (DB: $CLICKHOUSE_DB, User: $CLICKHOUSE_USER)"
+
+ {
+ echo "ClickHouse DB: $CLICKHOUSE_DB"
+ echo "ClickHouse User: $CLICKHOUSE_USER"
+ echo "ClickHouse Pass: $CLICKHOUSE_PASS"
+ } >>~/clickhouse.creds
+ else
+ msg_info "Updating ClickHouse packages"
+ env -u CLICKHOUSE_USER $STD apt-get update
+ env -u CLICKHOUSE_USER $STD apt-get install -y --only-upgrade clickhouse-server clickhouse-client
+ msg_ok "ClickHouse updated"
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# Checks for new GitHub release (latest tag).
+#
+# Description:
+# - Queries the GitHub API for the latest release tag
+# - Compares it to a local cached version (~/.)
+# - If newer, sets global CHECK_UPDATE_RELEASE and returns 0
+#
+# Usage:
+# check_for_gh_release "AppName" "user/repo"
+# if [[ $? -eq 0 ]]; then
+# echo "New version available: $CHECK_UPDATE_RELEASE"
+# # trigger update...
+# fi
+#
+# Notes:
+# - Requires `jq` (auto-installed if missing)
+# - Does not modify anything, only checks version state
+# - Does not support pre-releases
+# ------------------------------------------------------------------------------
+check_for_gh_release() {
+ local app="$1"
+ local source="$2"
+ local pinned_version="${3:-}" # optional
+ local current_file="$HOME/.${app,,}"
+
+ msg_info "Check for update: ${app}"
+
+ # DNS check for GitHub
+ if ! getent hosts api.github.com >/dev/null 2>&1; then
+ msg_error "Network error: cannot resolve api.github.com"
+ return 1
+ fi
+
+ # jq check
+ if ! command -v jq &>/dev/null; then
+ apt-get update -qq &>/dev/null && apt-get install -y jq &>/dev/null || {
+ msg_error "Failed to install jq"
+ return 1
+ }
+ fi
+
+ # get latest release
+ local release
+ release=$(curl -fsSL "https://api.github.com/repos/${source}/releases/latest" |
+ jq -r '.tag_name' | sed 's/^v//')
+
+ if [[ -z "$release" ]]; then
+ msg_error "Unable to determine latest release for ${app}"
+ return 1
+ fi
+
+ local current=""
+ [[ -f "$current_file" ]] && current=$(<"$current_file")
+
+ # PINNED Releases
+ if [[ -n "$pinned_version" ]]; then
+ if [[ "$pinned_version" == "$release" ]]; then
+ msg_ok "${app} pinned to v${pinned_version} (no update needed)"
+ return 1
+ else
+ if [[ "$current" == "$pinned_version" ]]; then
+ msg_ok "${app} pinned to v${pinned_version} (already installed, upstream v${release})"
+ return 1
+ fi
+ msg_info "${app} pinned to v${pinned_version} (upstream v${release}) → update/downgrade required"
+ CHECK_UPDATE_RELEASE="$pinned_version"
+ return 0
+ fi
+ fi
+
+ if [[ "$release" != "$current" ]] || [[ ! -f "$current_file" ]]; then
+ CHECK_UPDATE_RELEASE="$release"
+ msg_info "New release available: v${release} (current: v${current:-none})"
+ return 0
+ else
+ msg_ok "${app} is up to date (v${release})"
+ return 1
+ fi
+}
+
+# ------------------------------------------------------------------------------
+# Hardware acceleration setup inside container
+# Works with: Debian 12 (bookworm), Debian 13 (trixie), Ubuntu 24.04 (noble)
+# Usage: hwaccel_setup_in_ct [--nonfree-intel]
+# ------------------------------------------------------------------------------
+
+hwaccel_setup_in_ct() {
+ local CTTYPE="$1" NONFREE=0
+ [[ "$2" == "--nonfree-intel" ]] && NONFREE=1
+
+ # Detect OS info inside the container
+ local ID VERSION_CODENAME
+ if [[ -r /etc/os-release ]]; then
+ . /etc/os-release
+ fi
+ ID="${ID:-debian}"
+ VERSION_CODENAME="${VERSION_CODENAME:-bookworm}"
+
+ msg_info "Setting up hardware acceleration for ${ID^} ($VERSION_CODENAME)"
+
+ case "$ID" in
+ debian | ubuntu)
+ if ((NONFREE)) && [[ "$VERSION_CODENAME" =~ (trixie|noble) ]]; then
+ # Debian 13 / Ubuntu 24.04 → non-free Intel driver
+ cat >/etc/apt/sources.list.d/non-free.sources <<'SRC'
+Types: deb deb-src
+URIs: http://deb.debian.org/debian
+Suites: trixie
+Components: non-free non-free-firmware
+
+Types: deb deb-src
+URIs: http://deb.debian.org/debian-security
+Suites: trixie-security
+Components: non-free non-free-firmware
+
+Types: deb deb-src
+URIs: http://deb.debian.org/debian
+Suites: trixie-updates
+Components: non-free non-free-firmware
+SRC
+
+ $STD apt-get update
+ $STD apt-get install -y \
+ intel-media-va-driver-non-free \
+ ocl-icd-libopencl1 \
+ mesa-opencl-icd \
+ mesa-va-drivers \
+ libvpl2 \
+ vainfo \
+ intel-gpu-tools
+ else
+ # Debian 12 (bookworm) and fallback for Debian 13/Ubuntu 24.04 without non-free
+ $STD apt-get update
+ $STD apt-get install -y \
+ va-driver-all \
+ ocl-icd-libopencl1 \
+ mesa-opencl-icd \
+ mesa-va-drivers \
+ vainfo \
+ intel-gpu-tools
+ fi
+ ;;
+ *)
+ msg_warn "Unsupported distribution ($ID $VERSION_CODENAME) – skipping HW accel setup"
+ return 0
+ ;;
+ esac
+
+ # Add current user to video/render groups (only for privileged CTs)
+ if [[ "$CTTYPE" == "0" ]]; then
+ $STD adduser "$(id -un)" video || true
+ $STD adduser "$(id -un)" render || true
+ fi
+
+ msg_ok "Hardware acceleration is ready"
+}
diff --git a/tools/addon/copyparty.sh b/tools/addon/copyparty.sh
new file mode 100644
index 000000000..98f7ad72d
--- /dev/null
+++ b/tools/addon/copyparty.sh
@@ -0,0 +1,248 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: MickLesk
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://github.com/9001/copyparty
+
+function header_info() {
+ clear
+ cat <<"EOF"
+ ______ ____ __
+ / ____/___ ____ __ __/ __ \____ ______/ /___ __
+ / / / __ \/ __ \/ / / / /_/ / __ `/ ___/ __/ / / /
+/ /___/ /_/ / /_/ / /_/ / ____/ /_/ / / / /_/ /_/ /
+\____/\____/ .___/\__, /_/ \__,_/_/ \__/\__, /
+ /_/ /____/ /____/
+EOF
+}
+
+YW=$(echo "\033[33m")
+GN=$(echo "\033[1;92m")
+RD=$(echo "\033[01;31m")
+BL=$(echo "\033[36m")
+CL=$(echo "\033[m")
+CM="${GN}✔️${CL}"
+CROSS="${RD}✖️${CL}"
+INFO="${BL}ℹ️${CL}"
+
+APP="CopyParty"
+BIN_PATH="/usr/local/bin/copyparty-sfx.py"
+CONF_PATH="/etc/copyparty.conf"
+LOG_PATH="/var/log/copyparty"
+DATA_PATH="/var/lib/copyparty"
+SERVICE_PATH_DEB="/etc/systemd/system/copyparty.service"
+SERVICE_PATH_ALP="/etc/init.d/copyparty"
+SVC_USER="copyparty"
+SVC_GROUP="copyparty"
+SRC_URL="https://github.com/9001/copyparty/releases/latest/download/copyparty-sfx.py"
+DEFAULT_PORT=3923
+
+if [[ -f "/etc/alpine-release" ]]; then
+ OS="Alpine"
+ PKG_MANAGER="apk add --no-cache"
+ SERVICE_PATH="$SERVICE_PATH_ALP"
+elif [[ -f "/etc/debian_version" ]]; then
+ OS="Debian"
+ PKG_MANAGER="apt-get install -y"
+ SERVICE_PATH="$SERVICE_PATH_DEB"
+else
+ echo -e "${CROSS} Unsupported OS detected. Exiting."
+ exit 1
+fi
+
+header_info
+
+function msg_info() { echo -e "${INFO} ${YW}$1...${CL}"; }
+function msg_ok() { echo -e "${CM} ${GN}$1${CL}"; }
+function msg_error() { echo -e "${CROSS} ${RD}$1${CL}"; }
+
+function setup_user_and_dirs() {
+ msg_info "Creating $SVC_USER user and directories"
+ if ! id "$SVC_USER" &>/dev/null; then
+ if [[ "$OS" == "Debian" ]]; then
+ useradd -r -s /sbin/nologin -d "$DATA_PATH" "$SVC_USER"
+ else
+ addgroup -S "$SVC_GROUP" 2>/dev/null || true
+ adduser -S -D -H -G "$SVC_GROUP" -h "$DATA_PATH" -s /sbin/nologin "$SVC_USER" 2>/dev/null || true
+ fi
+ fi
+ mkdir -p "$DATA_PATH" "$LOG_PATH"
+ chown -R "$SVC_USER:$SVC_GROUP" "$DATA_PATH" "$LOG_PATH"
+ chmod 755 "$DATA_PATH" "$LOG_PATH"
+ msg_ok "User/Group/Dirs ready"
+}
+
+function uninstall_copyparty() {
+ msg_info "Uninstalling $APP"
+ if [[ "$OS" == "Debian" ]]; then
+ systemctl disable --now copyparty &>/dev/null
+ rm -f "$SERVICE_PATH_DEB"
+ else
+ rc-service copyparty stop &>/dev/null
+ rc-update del copyparty &>/dev/null
+ rm -f "$SERVICE_PATH_ALP"
+ fi
+ rm -f "$BIN_PATH" "$CONF_PATH"
+ msg_ok "$APP has been uninstalled."
+ exit 0
+}
+
+function update_copyparty() {
+ msg_info "Updating $APP"
+ curl -fsSL "$SRC_URL" -o "$BIN_PATH"
+ chmod +x "$BIN_PATH"
+ msg_ok "Updated $APP"
+ exit 0
+}
+
+if [[ -f "$BIN_PATH" ]]; then
+ echo -e "${YW}⚠️ $APP is already installed.${CL}"
+ echo -n "Uninstall $APP? (y/N): "
+ read -r uninstall_prompt
+ if [[ "${uninstall_prompt,,}" =~ ^(y|yes)$ ]]; then
+ uninstall_copyparty
+ fi
+
+ echo -n "Update $APP? (y/N): "
+ read -r update_prompt
+ if [[ "${update_prompt,,}" =~ ^(y|yes)$ ]]; then
+ update_copyparty
+ else
+ echo -e "${YW}⚠️ Update skipped. Exiting.${CL}"
+ exit 0
+ fi
+fi
+
+msg_info "Installing dependencies"
+if [[ "$OS" == "Debian" ]]; then
+ $PKG_MANAGER python3 curl &>/dev/null
+else
+ $PKG_MANAGER python3 curl &>/dev/null
+fi
+msg_ok "Dependencies installed"
+
+setup_user_and_dirs
+
+msg_info "Downloading $APP"
+curl -fsSL "$SRC_URL" -o "$BIN_PATH"
+chmod +x "$BIN_PATH"
+chown "$SVC_USER:$SVC_GROUP" "$BIN_PATH"
+msg_ok "Downloaded to $BIN_PATH"
+
+echo -n "Enter port for $APP (default: $DEFAULT_PORT): "
+read -r PORT
+PORT=${PORT:-$DEFAULT_PORT}
+
+echo -n "Set data directory (default: $DATA_PATH): "
+read -r USER_DATA_PATH
+USER_DATA_PATH=${USER_DATA_PATH:-$DATA_PATH}
+mkdir -p "$USER_DATA_PATH"
+chown "$SVC_USER:$SVC_GROUP" "$USER_DATA_PATH"
+
+echo -n "Enable authentication? (Y/n): "
+read -r auth_enable
+if [[ "${auth_enable,,}" =~ ^(n|no)$ ]]; then
+ AUTH_LINE=""
+ msg_ok "Configured without authentication"
+else
+ echo -n "Set admin username [default: admin]: "
+ read -r ADMIN_USER
+ ADMIN_USER=${ADMIN_USER:-admin}
+ echo -n "Set admin password [default: helper-scripts.com]: "
+ read -rs ADMIN_PASS
+ ADMIN_PASS=${ADMIN_PASS:-helper-scripts.com}
+ echo
+ AUTH_LINE="auth vhost=/:$ADMIN_USER:$ADMIN_PASS:admin,,"
+ msg_ok "Configured with admin user: $ADMIN_USER"
+fi
+
+msg_info "Writing config to $CONF_PATH"
+msg_info "Writing config to $CONF_PATH"
+{
+ echo "[global]"
+ echo " p: $PORT"
+ echo " ansi"
+ echo " e2dsa"
+ echo " e2ts"
+ echo " theme: 2"
+ echo " grid"
+ echo
+ if [[ -n "$ADMIN_USER" && -n "$ADMIN_PASS" ]]; then
+ echo "[accounts]"
+ echo " $ADMIN_USER: $ADMIN_PASS"
+ echo
+ fi
+ echo "[/]"
+ echo " $USER_DATA_PATH"
+ echo " accs:"
+ if [[ -n "$ADMIN_USER" ]]; then
+ echo " rw: *"
+ echo " rwmda: $ADMIN_USER"
+ else
+ echo " rw: *"
+ fi
+} >"$CONF_PATH"
+
+chmod 640 "$CONF_PATH"
+chown "$SVC_USER:$SVC_GROUP" "$CONF_PATH"
+msg_ok "Config written"
+
+msg_info "Creating service"
+if [[ "$OS" == "Debian" ]]; then
+ cat <"$SERVICE_PATH_DEB"
+[Unit]
+Description=Copyparty file server
+After=network.target
+
+[Service]
+User=$SVC_USER
+Group=$SVC_GROUP
+WorkingDirectory=$DATA_PATH
+ExecStart=/usr/bin/python3 /usr/local/bin/copyparty-sfx.py -c /etc/copyparty.conf
+Restart=always
+StandardOutput=append:/var/log/copyparty/copyparty.log
+StandardError=append:/var/log/copyparty/copyparty.err
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+ systemctl enable -q --now copyparty
+
+elif [[ "$OS" == "Alpine" ]]; then
+ cat <<'EOF' >"$SERVICE_PATH_ALP"
+#!/sbin/openrc-run
+
+name="copyparty"
+description="Copyparty file server"
+
+command="$(command -v python3)"
+command_args="/usr/local/bin/copyparty-sfx.py -c /etc/copyparty.conf"
+command_background=true
+directory="/var/lib/copyparty"
+pidfile="/run/copyparty.pid"
+output_log="/var/log/copyparty/copyparty.log"
+error_log="/var/log/copyparty/copyparty.err"
+
+depend() {
+ need net
+}
+EOF
+
+ chmod +x "$SERVICE_PATH_ALP"
+ rc-update add copyparty default >/dev/null 2>&1
+ rc-service copyparty restart >/dev/null 2>&1
+fi
+msg_ok "Service created and started"
+
+IFACE=$(ip -4 route | awk '/default/ {print $5; exit}')
+IP=$(ip -4 addr show "$IFACE" | awk '/inet / {print $2}' | cut -d/ -f1 | head -n 1)
+[[ -z "$IP" ]] && IP=$(hostname -I | awk '{print $1}')
+[[ -z "$IP" ]] && IP="127.0.0.1"
+
+echo -e "${CM} ${GN}$APP is running at: ${BL}http://$IP:$PORT${CL}"
+echo -e "${INFO} Storage directory: ${YW}$USER_DATA_PATH${CL}"
+if [[ -n "$AUTH_LINE" ]]; then
+ echo -e "${INFO} Login: ${GN}${ADMIN_USER}${CL} / ${GN}${ADMIN_PASS}${CL}"
+fi
diff --git a/tools/addon/glances.sh b/tools/addon/glances.sh
new file mode 100644
index 000000000..0f1b76f11
--- /dev/null
+++ b/tools/addon/glances.sh
@@ -0,0 +1,202 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 tteck
+# Author: tteck (tteckster) | MickLesk (CanbiZ)
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+
+function header_info {
+ clear
+ cat <<"EOF"
+ ________
+ / ____/ /___ _____ ________ _____
+ / / __/ / __ `/ __ \/ ___/ _ \/ ___/
+/ /_/ / / /_/ / / / / /__/ __(__ )
+\____/_/\__,_/_/ /_/\___/\___/____/
+
+EOF
+}
+
+APP="Glances"
+YW=$(echo "\033[33m")
+GN=$(echo "\033[1;92m")
+RD=$(echo "\033[01;31m")
+BL=$(echo "\033[36m")
+CL=$(echo "\033[m")
+CM="${GN}✔️${CL}"
+CROSS="${RD}✖️${CL}"
+INFO="${BL}ℹ️${CL}"
+
+function msg_info() { echo -e "${INFO} ${YW}$1...${CL}"; }
+function msg_ok() { echo -e "${CM} ${GN}$1${CL}"; }
+function msg_error() { echo -e "${CROSS} ${RD}$1${CL}"; }
+
+get_local_ip() {
+ if command -v hostname >/dev/null 2>&1 && hostname -I 2>/dev/null; then
+ hostname -I | awk '{print $1}'
+ elif command -v ip >/dev/null 2>&1; then
+ ip -4 addr show scope global | awk '/inet / {print $2}' | cut -d/ -f1 | head -n1
+ else
+ echo "127.0.0.1"
+ fi
+}
+IP=$(get_local_ip)
+
+install_glances_debian() {
+ msg_info "Installing dependencies"
+ apt-get update >/dev/null 2>&1
+ apt-get install -y gcc lm-sensors wireless-tools >/dev/null 2>&1
+ msg_ok "Installed dependencies"
+
+ msg_info "Setting up Python + uv"
+ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/tools.func)
+ setup_uv PYTHON_VERSION="3.12"
+ msg_ok "Setup Python + uv"
+
+ msg_info "Installing $APP (with web UI)"
+ cd /opt
+ mkdir -p glances
+ cd glances
+ uv venv
+ source .venv/bin/activate >/dev/null 2>&1
+ uv pip install --upgrade pip wheel setuptools >/dev/null 2>&1
+ uv pip install "glances[web]" >/dev/null 2>&1
+ deactivate
+ msg_ok "Installed $APP"
+
+ msg_info "Creating systemd service"
+ cat </etc/systemd/system/glances.service
+[Unit]
+Description=Glances - An eye on your system
+After=network.target
+
+[Service]
+Type=simple
+ExecStart=/opt/glances/.venv/bin/glances -w
+Restart=on-failure
+WorkingDirectory=/opt/glances
+
+[Install]
+WantedBy=multi-user.target
+EOF
+ systemctl enable -q --now glances
+ msg_ok "Created systemd service"
+
+ echo -e "\n$APP is now running at: http://$IP:61208\n"
+}
+
+# update on Debian/Ubuntu
+update_glances_debian() {
+ if [[ ! -d /opt/glances/.venv ]]; then
+ msg_error "$APP is not installed"
+ exit 1
+ fi
+ msg_info "Updating $APP"
+ cd /opt/glances
+ source .venv/bin/activate
+ uv pip install --upgrade "glances[web]" >/dev/null 2>&1
+ deactivate
+ systemctl restart glances
+ msg_ok "Updated $APP"
+}
+
+# uninstall on Debian/Ubuntu
+uninstall_glances_debian() {
+ msg_info "Uninstalling $APP"
+ systemctl disable -q --now glances || true
+ rm -f /etc/systemd/system/glances.service
+ rm -rf /opt/glances
+ msg_ok "Removed $APP"
+}
+
+# install on Alpine
+install_glances_alpine() {
+ msg_info "Installing dependencies"
+ apk update >/dev/null 2>&1
+ $STD apk add --no-cache \
+ gcc musl-dev linux-headers python3-dev \
+ python3 py3-pip py3-virtualenv lm-sensors wireless-tools >/dev/null 2>&1
+ msg_ok "Installed dependencies"
+
+ msg_info "Setting up Python + uv"
+ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/tools.func)
+ setup_uv PYTHON_VERSION="3.12"
+ msg_ok "Setup Python + uv"
+
+ msg_info "Installing $APP (with web UI)"
+ cd /opt
+ mkdir -p glances
+ cd glances
+ uv venv
+ source .venv/bin/activate
+ uv pip install --upgrade pip wheel setuptools >/dev/null 2>&1
+ uv pip install "glances[web]" >/dev/null 2>&1
+ deactivate
+ msg_ok "Installed $APP"
+
+ msg_info "Creating OpenRC service"
+ cat <<'EOF' >/etc/init.d/glances
+#!/sbin/openrc-run
+command="/opt/glances/.venv/bin/glances"
+command_args="-w"
+command_background="yes"
+pidfile="/run/glances.pid"
+name="glances"
+description="Glances monitoring tool"
+EOF
+ chmod +x /etc/init.d/glances
+ rc-update add glances default
+ rc-service glances start
+ msg_ok "Created OpenRC service"
+
+ echo -e "\n$APP is now running at: http://$IP:61208\n"
+}
+
+# update on Alpine
+update_glances_alpine() {
+ if [[ ! -d /opt/glances/.venv ]]; then
+ msg_error "$APP is not installed"
+ exit 1
+ fi
+ msg_info "Updating $APP"
+ cd /opt/glances
+ source .venv/bin/activate
+ uv pip install --upgrade "glances[web]" >/dev/null 2>&1
+ deactivate
+ rc-service glances restart
+ msg_ok "Updated $APP"
+}
+
+# uninstall on Alpine
+uninstall_glances_alpine() {
+ msg_info "Uninstalling $APP"
+ rc-service glances stop || true
+ rc-update del glances || true
+ rm -f /etc/init.d/glances
+ rm -rf /opt/glances
+ msg_ok "Removed $APP"
+}
+
+# options menu
+OPTIONS=(Install "Install $APP"
+ Update "Update $APP"
+ Uninstall "Uninstall $APP")
+
+CHOICE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "$APP" --menu "Select an option:" 12 58 3 \
+ "${OPTIONS[@]}" 3>&1 1>&2 2>&3 || true)
+
+# OS detection
+if grep -qi "alpine" /etc/os-release; then
+ case "$CHOICE" in
+ Install) install_glances_alpine ;;
+ Update) update_glances_alpine ;;
+ Uninstall) uninstall_glances_alpine ;;
+ *) exit 0 ;;
+ esac
+else
+ case "$CHOICE" in
+ Install) install_glances_debian ;;
+ Update) update_glances_debian ;;
+ Uninstall) uninstall_glances_debian ;;
+ *) exit 0 ;;
+ esac
+fi
diff --git a/tools/addon/netdata.sh b/tools/addon/netdata.sh
new file mode 100644
index 000000000..1f2d598fa
--- /dev/null
+++ b/tools/addon/netdata.sh
@@ -0,0 +1,172 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 tteck
+# Author: tteck (tteckster)
+# License: MIT
+# https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+
+function header_info {
+ clear
+ cat <<"EOF"
+ _ __ __ ____ __
+ / | / /__ / /_/ __ \____ _/ /_____ _
+ / |/ / _ \/ __/ / / / __ `/ __/ __ `/
+ / /| / __/ /_/ /_/ / /_/ / /_/ /_/ /
+/_/ |_/\___/\__/_____/\__,_/\__/\__,_/
+
+EOF
+}
+
+YW=$(echo "\033[33m")
+BL=$(echo "\033[36m")
+RD=$(echo "\033[01;31m")
+GN=$(echo "\033[1;92m")
+CL=$(echo "\033[m")
+BFR="\\r\\033[K"
+HOLD="-"
+CM="${GN}✓${CL}"
+silent() { "$@" >/dev/null 2>&1; }
+set -e
+header_info
+echo "Loading..."
+function msg_info() {
+ local msg="$1"
+ echo -ne " ${HOLD} ${YW}${msg}..."
+}
+
+function msg_ok() {
+ local msg="$1"
+ echo -e "${BFR} ${CM} ${GN}${msg}${CL}"
+}
+
+function msg_error() { echo -e "${RD}✗ $1${CL}"; }
+
+pve_check() {
+ if ! command -v pveversion >/dev/null 2>&1; then
+ msg_error "This script can only be run on a Proxmox VE host."
+ exit 1
+ fi
+
+ local PVE_VER
+ PVE_VER="$(pveversion | awk -F'/' '{print $2}' | awk -F'-' '{print $1}')"
+
+ # Proxmox VE 8.x: allow 8.0 – 8.9
+ if [[ "$PVE_VER" =~ ^9\.([0-9]+)(\.[0-9]+)?$ ]]; then
+ local MINOR="${BASH_REMATCH[1]}"
+ if ((MINOR != 0)); then
+ msg_error "Unsupported Proxmox VE version: $PVE_VER"
+ msg_error "Supported versions: 8.0 – 8.9 or 9.0.x"
+ exit 1
+ fi
+ return 0
+ fi
+
+ # Proxmox VE 9.x: allow only 9.0
+ if [[ "$PVE_VER" =~ ^9\.([0-9]+)$ ]]; then
+ local MINOR="${BASH_REMATCH[1]}"
+ if ((MINOR != 0)); then
+ msg_error "Unsupported Proxmox VE version: $PVE_VER"
+ msg_error "Supported versions: 8.0 – 8.9 or 9.0"
+ exit 1
+ fi
+ return 0
+ fi
+
+ msg_error "Unsupported Proxmox VE version: $PVE_VER"
+ msg_error "Supported versions: 8.0 – 8.9 or 9.0"
+ exit 1
+}
+
+detect_codename() {
+ source /etc/os-release
+ if [[ "$ID" != "debian" ]]; then
+ msg_error "Unsupported base OS: $ID (only Proxmox VE / Debian supported)."
+ exit 1
+ fi
+ CODENAME="${VERSION_CODENAME:-}"
+ if [[ -z "$CODENAME" ]]; then
+ msg_error "Could not detect Debian codename."
+ exit 1
+ fi
+ echo "$CODENAME"
+}
+
+get_latest_repo_pkg() {
+ local REPO_URL=$1
+ curl -fsSL "$REPO_URL" |
+ grep -oP 'netdata-repo_[^"]+all\.deb' |
+ sort -V |
+ tail -n1
+}
+
+install() {
+ header_info
+ while true; do
+ read -p "Are you sure you want to install NetData on Proxmox VE host. Proceed(y/n)? " yn
+ case $yn in
+ [Yy]*) break ;;
+ [Nn]*) exit ;;
+ *) echo "Please answer yes or no." ;;
+ esac
+ done
+
+ read -r -p "Verbose mode? " prompt
+ [[ ${prompt,,} =~ ^(y|yes)$ ]] && STD="" || STD="silent"
+
+ CODENAME=$(detect_codename)
+ REPO_URL="https://repo.netdata.cloud/repos/repoconfig/debian/${CODENAME}/"
+
+ msg_info "Setting up repository"
+ $STD apt-get install -y debian-keyring
+ PKG=$(get_latest_repo_pkg "$REPO_URL")
+ if [[ -z "$PKG" ]]; then
+ msg_error "Could not find netdata-repo package for Debian $CODENAME"
+ exit 1
+ fi
+ curl -fsSL "${REPO_URL}${PKG}" -o "$PKG"
+ $STD dpkg -i "$PKG"
+ rm -f "$PKG"
+ msg_ok "Set up repository"
+
+ msg_info "Installing Netdata"
+ $STD apt-get update
+ $STD apt-get install -y netdata
+ msg_ok "Installed Netdata"
+ msg_ok "Completed Successfully!\n"
+ echo -e "\n Netdata should be reachable at${BL} http://$(hostname -I | awk '{print $1}'):19999 ${CL}\n"
+}
+
+uninstall() {
+ header_info
+ read -r -p "Verbose mode? " prompt
+ [[ ${prompt,,} =~ ^(y|yes)$ ]] && STD="" || STD="silent"
+
+ msg_info "Uninstalling Netdata"
+ systemctl stop netdata || true
+ rm -rf /var/log/netdata /var/lib/netdata /var/cache/netdata /etc/netdata/go.d
+ rm -rf /etc/apt/trusted.gpg.d/netdata-archive-keyring.gpg /etc/apt/sources.list.d/netdata.list
+ $STD apt-get remove --purge -y netdata netdata-repo
+ systemctl daemon-reload
+ $STD apt autoremove -y
+ $STD userdel netdata || true
+ msg_ok "Uninstalled Netdata"
+ msg_ok "Completed Successfully!\n"
+}
+
+header_info
+pve_check
+
+OPTIONS=(Install "Install NetData on Proxmox VE"
+ Uninstall "Uninstall NetData from Proxmox VE")
+
+CHOICE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "NetData" \
+ --menu "Select an option:" 10 58 2 "${OPTIONS[@]}" 3>&1 1>&2 2>&3)
+
+case $CHOICE in
+"Install") install ;;
+"Uninstall") uninstall ;;
+*)
+ echo "Exiting..."
+ exit 0
+ ;;
+esac
diff --git a/tools/headers/copyparty b/tools/headers/copyparty
new file mode 100644
index 000000000..7e072248d
--- /dev/null
+++ b/tools/headers/copyparty
@@ -0,0 +1,6 @@
+ ______ ____ __
+ / ____/___ ____ __ __/ __ \____ ______/ /___ __
+ / / / __ \/ __ \/ / / / /_/ / __ `/ ___/ __/ / / /
+/ /___/ /_/ / /_/ / /_/ / ____/ /_/ / / / /_/ /_/ /
+\____/\____/ .___/\__, /_/ \__,_/_/ \__/\__, /
+ /_/ /____/ /____/
diff --git a/tools/headers/glances b/tools/headers/glances
new file mode 100644
index 000000000..18ac97577
--- /dev/null
+++ b/tools/headers/glances
@@ -0,0 +1,6 @@
+ ________
+ / ____/ /___ _____ ________ _____
+ / / __/ / __ `/ __ \/ ___/ _ \/ ___/
+/ /_/ / / /_/ / / / / /__/ __(__ )
+\____/_/\__,_/_/ /_/\___/\___/____/
+
diff --git a/tools/headers/prx-add-ips b/tools/headers/prx-add-ips
new file mode 100644
index 000000000..ea035b532
--- /dev/null
+++ b/tools/headers/prx-add-ips
@@ -0,0 +1,6 @@
+ ____ ___ __ __ ________
+ / __ \_________ _ ______ ___ ____ _ __ / | ____/ /___/ / / _/ __ \_____
+ / /_/ / ___/ __ \| |/_/ __ `__ \/ __ \| |/_/ / /| |/ __ / __ /_____ / // /_/ / ___/
+ / ____/ / / /_/ /> / / / / / /_/ /> < / ___ / /_/ / /_/ /_____// // ____(__ )
+/_/ /_/ \____/_/|_/_/ /_/ /_/\____/_/|_| /_/ |_\__,_/\__,_/ /___/_/ /____/
+
diff --git a/tools/pve/add-iptag.sh b/tools/pve/add-iptag.sh
new file mode 100644
index 000000000..742a07a02
--- /dev/null
+++ b/tools/pve/add-iptag.sh
@@ -0,0 +1,1408 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: MickLesk (Canbiz) && Desert_Gamer
+# License: MIT
+
+function header_info {
+ clear
+ cat <<"EOF"
+ ___ ____ _____
+|_ _| _ \ _ |_ _|_ _ __ _
+ | || |_) (_) | |/ _` |/ _` |
+ | || __/ _ | | (_| | (_| |
+|___|_| (_) |_|\__,_|\__, |
+ |___/
+EOF
+}
+
+clear
+header_info
+APP="IP-Tag"
+hostname=$(hostname)
+
+# Color variables
+YW=$(echo "\033[33m")
+GN=$(echo "\033[1;92m")
+RD=$(echo "\033[01;31m")
+CL=$(echo "\033[m")
+BFR="\\r\\033[K"
+HOLD=" "
+CM=" ✔️ ${CL}"
+CROSS=" ✖️ ${CL}"
+
+# Error handler for displaying error messages
+error_handler() {
+ if [ -n "$SPINNER_PID" ] && ps -p $SPINNER_PID >/dev/null; then
+ kill $SPINNER_PID >/dev/null
+ fi
+ printf "\e[?25h"
+ local exit_code="$?"
+ local line_number="$1"
+ local command="$2"
+ local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
+ echo -e "\n$error_message\n"
+}
+
+# Spinner for progress indication
+spinner() {
+ local frames=('⠋' '⠙' '⠹' '⠸' '⠼' '⠴' '⠦' '⠧' '⠇' '⠏')
+ local spin_i=0
+ local interval=0.1
+ printf "\e[?25l"
+
+ local color="${YWB}"
+
+ while true; do
+ printf "\r ${color}%s${CL}" "${frames[spin_i]}"
+ spin_i=$(((spin_i + 1) % ${#frames[@]}))
+ sleep "$interval"
+ done
+}
+
+# Info message
+msg_info() {
+ local msg="$1"
+ echo -ne "${TAB}${YW}${HOLD}${msg}${HOLD}"
+ spinner &
+ SPINNER_PID=$!
+}
+
+# Success message
+msg_ok() {
+ if [ -n "$SPINNER_PID" ] && ps -p $SPINNER_PID >/dev/null; then
+ kill $SPINNER_PID >/dev/null
+ fi
+ printf "\e[?25h"
+ local msg="$1"
+ echo -e "${BFR}${CM}${GN}${msg}${CL}"
+}
+
+# Error message
+msg_error() {
+ if [ -n "$SPINNER_PID" ] && ps -p $SPINNER_PID >/dev/null; then
+ kill $SPINNER_PID >/dev/null
+ fi
+ printf "\e[?25h"
+ local msg="$1"
+ echo -e "${BFR}${CROSS}${RD}${msg}${CL}"
+}
+
+# Check if service exists
+check_service_exists() {
+ if systemctl is-active --quiet iptag.service; then
+ return 0
+ else
+ return 1
+ fi
+}
+
+# Migrate configuration from old path to new
+migrate_config() {
+ local old_config="/opt/lxc-iptag"
+ local new_config="/opt/iptag/iptag.conf"
+
+ if [[ -f "$old_config" ]]; then
+ msg_info "Migrating configuration from old path"
+ if cp "$old_config" "$new_config" &>/dev/null; then
+ rm -rf "$old_config" &>/dev/null
+ msg_ok "Configuration migrated and old config removed"
+ else
+ msg_error "Failed to migrate configuration"
+ fi
+ fi
+}
+
+# Update existing installation
+update_installation() {
+ msg_info "Updating IP-Tag Scripts"
+ systemctl stop iptag.service &>/dev/null
+ msg_ok "Stopped IP-Tag service"
+
+ # Create directory if it doesn't exist
+ if [[ ! -d "/opt/iptag" ]]; then
+ mkdir -p /opt/iptag
+ fi
+
+ # Create new config file (check if exists and ask user)
+ if [[ -f "/opt/iptag/iptag.conf" ]]; then
+ echo -e "\n${YW}Configuration file already exists.${CL}"
+ while true; do
+ read -p "Do you want to replace it with defaults? (y/n): " yn
+ case $yn in
+ [Yy]*)
+ msg_info "Replacing configuration file"
+ generate_config >/opt/iptag/iptag.conf
+ msg_ok "Configuration file replaced with defaults"
+ break
+ ;;
+ [Nn]*)
+ echo -e "${GN}✔️ Keeping existing configuration file${CL}"
+ break
+ ;;
+ *)
+ echo -e "${RD}Please answer yes or no.${CL}"
+ ;;
+ esac
+ done
+ else
+ msg_info "Creating new configuration file"
+ generate_config >/opt/iptag/iptag.conf
+ msg_ok "Created new configuration file at /opt/iptag/iptag.conf"
+ fi
+
+ # Update main script
+ msg_info "Updating main script"
+ generate_main_script >/opt/iptag/iptag
+ chmod +x /opt/iptag/iptag
+ msg_ok "Updated main script"
+
+ # Update service file
+ msg_info "Updating service file"
+ generate_service >/lib/systemd/system/iptag.service
+ msg_ok "Updated service file"
+
+ msg_info "Creating manual run command"
+ cat <<'EOF' >/usr/local/bin/iptag-run
+#!/usr/bin/env bash
+CONFIG_FILE="/opt/iptag/iptag.conf"
+SCRIPT_FILE="/opt/iptag/iptag"
+if [[ ! -f "$SCRIPT_FILE" ]]; then
+ echo "❌ Main script not found: $SCRIPT_FILE"
+ exit 1
+fi
+export FORCE_SINGLE_RUN=true
+exec "$SCRIPT_FILE"
+EOF
+ chmod +x /usr/local/bin/iptag-run
+ msg_ok "Created iptag-run executable - You can execute this manually by entering “iptag-run” in the Proxmox host, so the script is executed by hand."
+
+ msg_info "Restarting service"
+ systemctl daemon-reload &>/dev/null
+ systemctl enable -q --now iptag.service &>/dev/null
+ msg_ok "Updated IP-Tag Scripts"
+}
+
+# Generate configuration file content
+generate_config() {
+ cat <&2
+ fi
+}
+
+# Color constants
+readonly RED='\033[0;31m'
+readonly GREEN='\033[0;32m'
+readonly YELLOW='\033[0;33m'
+readonly BLUE='\033[0;34m'
+readonly PURPLE='\033[0;35m'
+readonly CYAN='\033[0;36m'
+readonly WHITE='\033[1;37m'
+readonly GRAY='\033[0;37m'
+readonly NC='\033[0m' # No Color
+
+# Logging functions with colors
+log_success() {
+ echo -e "${GREEN}✓${NC} $*"
+}
+
+log_info() {
+ echo -e "${BLUE}ℹ${NC} $*"
+}
+
+log_warning() {
+ echo -e "${YELLOW}⚠${NC} $*"
+}
+
+log_error() {
+ echo -e "${RED}✗${NC} $*"
+}
+
+log_change() {
+ echo -e "${CYAN}~${NC} $*"
+}
+
+log_unchanged() {
+ echo -e "${GRAY}=${NC} $*"
+}
+
+# Check if IP is in CIDR
+ip_in_cidr() {
+ local ip="$1" cidr="$2"
+ debug_log "ip_in_cidr: checking '$ip' against '$cidr'"
+
+ # Manual CIDR check - более надёжный метод
+ debug_log "ip_in_cidr: using manual check (bypassing ipcalc)"
+ local network prefix
+ IFS='/' read -r network prefix <<< "$cidr"
+
+ # Convert IP and network to integers for comparison
+ local ip_int net_int mask
+ IFS='.' read -r a b c d <<< "$ip"
+ ip_int=$(( (a << 24) + (b << 16) + (c << 8) + d ))
+
+ IFS='.' read -r a b c d <<< "$network"
+ net_int=$(( (a << 24) + (b << 16) + (c << 8) + d ))
+
+ # Create subnet mask
+ mask=$(( 0xFFFFFFFF << (32 - prefix) ))
+
+ # Apply mask and compare
+ local ip_masked=$((ip_int & mask))
+ local net_masked=$((net_int & mask))
+
+ debug_log "ip_in_cidr: IP=$ip ($ip_int), Network=$network ($net_int), Prefix=$prefix"
+ debug_log "ip_in_cidr: Mask=$mask (hex: $(printf '0x%08x' $mask))"
+ debug_log "ip_in_cidr: IP&Mask=$ip_masked ($(printf '%d.%d.%d.%d' $((ip_masked>>24&255)) $((ip_masked>>16&255)) $((ip_masked>>8&255)) $((ip_masked&255))))"
+ debug_log "ip_in_cidr: Net&Mask=$net_masked ($(printf '%d.%d.%d.%d' $((net_masked>>24&255)) $((net_masked>>16&255)) $((net_masked>>8&255)) $((net_masked&255))))"
+
+ if (( ip_masked == net_masked )); then
+ debug_log "ip_in_cidr: manual check PASSED - IP is in CIDR"
+ return 0
+ else
+ debug_log "ip_in_cidr: manual check FAILED - IP is NOT in CIDR"
+ return 1
+ fi
+}
+
+# Format IP address according to the configuration
+format_ip_tag() {
+ local ip="$1"
+ [[ -z "$ip" ]] && return
+ local format="${TAG_FORMAT:-$DEFAULT_TAG_FORMAT}"
+ case "$format" in
+ "last_octet") echo "${ip##*.}" ;;
+ "last_two_octets") echo "${ip#*.*.}" ;;
+ *) echo "$ip" ;;
+ esac
+}
+
+format_ipv6_tag() {
+ local ip="$1"
+ [[ -z "$ip" ]] && return
+ local format="${IPV6_TAG_FORMAT:-short}"
+
+ case "$format" in
+ "last_block")
+ # take last hex block
+ echo "${ip##*:}"
+ ;;
+ "full")
+ # return full as-is
+ echo "$ip"
+ ;;
+ "short"|"compressed")
+ # compress repeated zeros (::) automatically
+ # Linux ip command already returns compressed by default
+ echo "$ip"
+ ;;
+ *)
+ # fallback
+ echo "$ip"
+ ;;
+ esac
+}
+
+
+# Check if IP is in any CIDRs
+ip_in_cidrs() {
+ local ip="$1" cidrs="$2"
+ [[ -z "$cidrs" ]] && return 1
+ local IFS=' '
+ debug_log "Checking IP '$ip' against CIDRs: '$cidrs'"
+ for cidr in $cidrs; do
+ debug_log "Testing IP '$ip' against CIDR '$cidr'"
+ if ip_in_cidr "$ip" "$cidr"; then
+ debug_log "IP '$ip' matches CIDR '$cidr' - PASSED"
+ return 0
+ else
+ debug_log "IP '$ip' does not match CIDR '$cidr'"
+ fi
+ done
+ debug_log "IP '$ip' failed all CIDR checks"
+ return 1
+}
+
+# Check if IP is valid
+is_valid_ipv4() {
+ local ip="$1"
+ [[ "$ip" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]] || return 1
+
+ local IFS='.' parts
+ read -ra parts <<< "$ip"
+ for part in "${parts[@]}"; do
+ (( part >= 0 && part <= 255 )) || return 1
+ done
+ return 0
+}
+
+# Get VM IPs using multiple methods with performance optimizations
+get_vm_ips() {
+ local vmid=$1 ips=""
+ local vm_config="/etc/pve/qemu-server/${vmid}.conf"
+ [[ ! -f "$vm_config" ]] && return
+
+ debug_log "vm $vmid: starting optimized IP detection"
+
+ # Check if VM is running first (avoid expensive operations for stopped VMs)
+ local vm_status=""
+ if command -v qm >/dev/null 2>&1; then
+ vm_status=$(qm status "$vmid" 2>/dev/null | awk '{print $2}')
+ fi
+
+ if [[ "$vm_status" != "running" ]]; then
+ debug_log "vm $vmid: not running (status: $vm_status), skipping expensive detection"
+ return
+ fi
+
+ # Cache for this execution
+ local cache_file="/tmp/iptag_vm_${vmid}_cache"
+ local cache_ttl=60 # 60 seconds cache
+
+ # Check cache first
+ if [[ -f "$cache_file" ]] && [[ $(($(date +%s) - $(stat -c %Y "$cache_file" 2>/dev/null || echo 0))) -lt $cache_ttl ]]; then
+ local cached_ips=$(cat "$cache_file" 2>/dev/null)
+ if [[ -n "$cached_ips" ]]; then
+ debug_log "vm $vmid: using cached IPs: $cached_ips"
+ echo "$cached_ips"
+ return
+ fi
+ fi
+
+ # Method 1: Quick ARP table lookup (fastest)
+ local mac_addresses=$(grep -E "^net[0-9]+:" "$vm_config" | grep -oE "([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}" | head -3)
+ debug_log "vm $vmid: found MACs: $mac_addresses"
+
+ # Quick ARP check without forced refresh (most common case)
+ for mac in $mac_addresses; do
+ local mac_lower=$(echo "$mac" | tr '[:upper:]' '[:lower:]')
+ local ip=$(ip neighbor show | grep "$mac_lower" | grep -oE '([0-9]{1,3}\.){3}[0-9]{1,3}' | head -1)
+ if [[ -n "$ip" && "$ip" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
+ debug_log "vm $vmid: found IP $ip via quick ARP for MAC $mac_lower"
+ ips+="$ip "
+ fi
+ done
+
+ # Early exit if we found IPs via ARP
+ if [[ -n "$ips" ]]; then
+ local unique_ips=$(echo "$ips" | tr ' ' '\n' | sort -u | tr '\n' ' ')
+ unique_ips="${unique_ips% }"
+ debug_log "vm $vmid: early exit with IPs: '$unique_ips'"
+ echo "$unique_ips" > "$cache_file"
+ echo "$unique_ips"
+ return
+ fi
+
+ # Method 2: QM guest agent (fast if available)
+ if command -v qm >/dev/null 2>&1; then
+ local qm_ips=$(timeout 3 qm guest cmd "$vmid" network-get-interfaces 2>/dev/null | grep -oE '([0-9]{1,3}\.){3}[0-9]{1,3}' | grep -v "127.0.0.1" | head -2)
+ for qm_ip in $qm_ips; do
+ if [[ "$qm_ip" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
+ debug_log "vm $vmid: found IP $qm_ip via qm guest cmd"
+ ips+="$qm_ip "
+ fi
+ done
+ fi
+
+ # Early exit if we found IPs via QM
+ if [[ -n "$ips" ]]; then
+ local unique_ips=$(echo "$ips" | tr ' ' '\n' | sort -u | tr '\n' ' ')
+ unique_ips="${unique_ips% }"
+ debug_log "vm $vmid: early exit with QM IPs: '$unique_ips'"
+ echo "$unique_ips" > "$cache_file"
+ echo "$unique_ips"
+ return
+ fi
+
+ # Method 3: DHCP leases check (medium cost)
+ for mac in $mac_addresses; do
+ local mac_lower=$(echo "$mac" | tr '[:upper:]' '[:lower:]')
+
+ for dhcp_file in "/var/lib/dhcp/dhcpd.leases" "/var/lib/dhcpcd5/dhcpcd.leases" "/tmp/dhcp.leases"; do
+ if [[ -f "$dhcp_file" ]]; then
+ local dhcp_ip=$(timeout 2 grep -A 10 "ethernet $mac_lower" "$dhcp_file" 2>/dev/null | grep "binding state active" -A 5 | grep -oE "([0-9]{1,3}\.){3}[0-9]{1,3}" | head -1)
+ if [[ -n "$dhcp_ip" && "$dhcp_ip" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
+ debug_log "vm $vmid: found IP $dhcp_ip via DHCP leases for MAC $mac_lower"
+ ips+="$dhcp_ip "
+ break 2
+ fi
+ fi
+ done
+ done
+
+ # Early exit if we found IPs via DHCP
+ if [[ -n "$ips" ]]; then
+ local unique_ips=$(echo "$ips" | tr ' ' '\n' | sort -u | tr '\n' ' ')
+ unique_ips="${unique_ips% }"
+ debug_log "vm $vmid: early exit with DHCP IPs: '$unique_ips'"
+ echo "$unique_ips" > "$cache_file"
+ echo "$unique_ips"
+ return
+ fi
+
+ # Method 4: Limited network discovery (expensive - only if really needed)
+ debug_log "vm $vmid: falling back to limited network discovery"
+
+ for mac in $mac_addresses; do
+ local mac_lower=$(echo "$mac" | tr '[:upper:]' '[:lower:]')
+
+ # Get bridge interfaces
+ local bridges=$(grep -E "^net[0-9]+:" "$vm_config" | grep -oE "bridge=\w+" | cut -d= -f2 | head -1)
+ for bridge in $bridges; do
+ if [[ -n "$bridge" && -d "/sys/class/net/$bridge" ]]; then
+ # Get bridge IP range
+ local bridge_ip=$(ip addr show "$bridge" 2>/dev/null | grep -oE '([0-9]{1,3}\.){3}[0-9]{1,3}/[0-9]+' | head -1)
+ if [[ -n "$bridge_ip" ]]; then
+ local network=$(echo "$bridge_ip" | cut -d'/' -f1)
+ debug_log "vm $vmid: limited scan on bridge $bridge network $bridge_ip"
+
+ # Force ARP refresh with broadcast ping (limited)
+ IFS='.' read -r a b c d <<< "$network"
+ local broadcast="$a.$b.$c.255"
+ timeout 1 ping -c 1 -b "$broadcast" >/dev/null 2>&1 || true
+
+ # Check ARP again after refresh
+ sleep 0.5
+ local ip=$(ip neighbor show | grep "$mac_lower" | grep -oE '([0-9]{1,3}\.){3}[0-9]{1,3}' | head -1)
+ if [[ -n "$ip" && "$ip" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
+ debug_log "vm $vmid: found IP $ip via ARP after broadcast for MAC $mac_lower"
+ ips+="$ip "
+ break 2
+ fi
+
+ # Only do very limited ping scan (reduced range)
+ IFS='.' read -r a b c d <<< "$network"
+ local base_net="$a.$b.$c"
+
+ # Try only most common ranges (much smaller than before)
+ for last_octet in {100..105} {200..205}; do
+ local test_ip="$base_net.$last_octet"
+
+ # Very quick ping test (reduced timeout)
+ if timeout 0.2 ping -c 1 -W 1 "$test_ip" >/dev/null 2>&1; then
+ # Check if this IP corresponds to our MAC
+ sleep 0.1
+ local found_mac=$(ip neighbor show "$test_ip" 2>/dev/null | grep -oE "([0-9a-f]{2}:){5}[0-9a-f]{2}")
+ if [[ "$found_mac" == "$mac_lower" ]]; then
+ debug_log "vm $vmid: found IP $test_ip via limited ping scan for MAC $mac_lower"
+ ips+="$test_ip "
+ break 2
+ fi
+ fi
+ done
+
+ # Skip extended scanning entirely (too expensive)
+ debug_log "vm $vmid: skipping extended scan to preserve CPU"
+ fi
+ fi
+ done
+ done
+
+ # Method 5: Static configuration check (fast)
+ if [[ -z "$ips" ]]; then
+ debug_log "vm $vmid: checking for static IP configuration"
+
+ # Check cloud-init configuration if exists
+ local cloudinit_file="/var/lib/vz/snippets/${vmid}-cloud-init.yml"
+ if [[ -f "$cloudinit_file" ]]; then
+ local static_ip=$(grep -E "addresses?:" "$cloudinit_file" 2>/dev/null | grep -oE '([0-9]{1,3}\.){3}[0-9]{1,3}' | head -1)
+ if [[ -n "$static_ip" && "$static_ip" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
+ debug_log "vm $vmid: found static IP $static_ip in cloud-init config"
+ ips+="$static_ip "
+ fi
+ fi
+
+ # Check VM config for any IP hints
+ local config_ip=$(grep -E "(ip=|gw=)" "$vm_config" 2>/dev/null | grep -oE '([0-9]{1,3}\.){3}[0-9]{1,3}' | head -1)
+ if [[ -n "$config_ip" && "$config_ip" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
+ debug_log "vm $vmid: found IP hint $config_ip in VM config"
+ ips+="$config_ip "
+ fi
+ fi
+
+ # Remove duplicates and cache result
+ local unique_ips=$(echo "$ips" | tr ' ' '\n' | sort -u | tr '\n' ' ')
+ unique_ips="${unique_ips% }"
+
+ # Cache the result (even if empty)
+ echo "$unique_ips" > "$cache_file"
+
+ debug_log "vm $vmid: final optimized IPs: '$unique_ips'"
+ echo "$unique_ips"
+}
+
+# Update tags for container or VM
+update_tags() {
+ local type="$1" vmid="$2"
+ local current_ips_full
+
+ if [[ "$type" == "lxc" ]]; then
+ current_ips_full=$(get_lxc_ips "${vmid}")
+ while IFS= read -r line; do
+ [[ "$line" == tags:* ]] && current_tags_raw="${line#tags: }" && break
+ done < <(pct config "$vmid" 2>/dev/null)
+ else
+ current_ips_full=$(get_vm_ips "${vmid}")
+ local vm_config="/etc/pve/qemu-server/${vmid}.conf"
+ if [[ -f "$vm_config" ]]; then
+ local current_tags_raw=$(grep "^tags:" "$vm_config" 2>/dev/null | cut -d: -f2 | sed 's/^[[:space:]]*//')
+ fi
+ fi
+
+ local current_tags=() next_tags=() current_ip_tags=()
+ if [[ -n "$current_tags_raw" ]]; then
+ mapfile -t current_tags < <(echo "$current_tags_raw" | sed 's/;/\n/g')
+ fi
+
+ # Separate IP/numeric and user tags
+ for tag in "${current_tags[@]}"; do
+ if is_valid_ipv4 "${tag}" || [[ "$tag" =~ ^[0-9]+(\.[0-9]+)*$ ]]; then
+ current_ip_tags+=("${tag}")
+ else
+ next_tags+=("${tag}")
+ fi
+ done
+
+ # Generate new IP tags from current IPs
+ local formatted_ips=()
+ debug_log "$type $vmid current_ips_full: '$current_ips_full'"
+ debug_log "$type $vmid CIDR_LIST: ${CIDR_LIST[*]}"
+ for ip in $current_ips_full; do
+ [[ -z "$ip" ]] && continue
+ debug_log "$type $vmid processing IP: '$ip'"
+
+ if is_valid_ipv4 "$ip"; then
+ debug_log "$type $vmid IP '$ip' is valid IPv4"
+ # Only check IPv4 against CIDR list
+ if ip_in_cidrs "$ip" "${CIDR_LIST[*]}"; then
+ debug_log "$type $vmid IPv4 '$ip' passed CIDR check"
+ local formatted_ip4
+ formatted_ip4=$(format_ip_tag "$ip")
+ debug_log "$type $vmid formatted IPv4 '$ip' -> '$formatted_ip4'"
+ [[ -n "$formatted_ip4" ]] && formatted_ips+=("$formatted_ip4")
+ else
+ debug_log "$type $vmid IPv4 '$ip' failed CIDR check, skipping"
+ fi
+
+ elif [[ "${ENABLE_IPV6_TAGS,,}" == "true" ]]; then
+ # IPv6 handling only if enabled
+ debug_log "$type $vmid IP '$ip' not IPv4, treating as IPv6"
+ # basic IPv6 validation
+ if [[ "$ip" =~ ^[0-9a-fA-F:]+$ ]]; then
+ debug_log "$type $vmid IPv6 '$ip' accepted"
+ local formatted_ip6
+ formatted_ip6=$(format_ipv6_tag "$ip")
+ debug_log "$type $vmid formatted IPv6 '$ip' -> '$formatted_ip6'"
+ [[ -n "$formatted_ip6" ]] && formatted_ips+=("$formatted_ip6")
+ else
+ debug_log "$type $vmid value '$ip' not recognized as valid IPv6, skipping"
+ fi
+
+ else
+ debug_log "$type $vmid IP '$ip' is invalid or IPv6 not enabled"
+ fi
+ done
+ debug_log "$type $vmid final formatted_ips: ${formatted_ips[*]}"
+
+
+ # If LXC and no IPs detected, do not touch tags at all
+ if [[ "$type" == "lxc" && ${#formatted_ips[@]} -eq 0 ]]; then
+ log_unchanged "LXC ${GRAY}${vmid}${NC}: No IP detected, tags unchanged"
+ return
+ fi
+
+ # Add new IP tags
+ for new_ip in "${formatted_ips[@]}"; do
+ next_tags+=("$new_ip")
+ done
+
+ # Update tags if there are changes
+ local old_tags_str=$(IFS=';'; echo "${current_tags[*]}")
+ local new_tags_str=$(IFS=';'; echo "${next_tags[*]}")
+
+ debug_log "$type $vmid old_tags: '$old_tags_str'"
+ debug_log "$type $vmid new_tags: '$new_tags_str'"
+ debug_log "$type $vmid tags_equal: $([[ "$old_tags_str" == "$new_tags_str" ]] && echo true || echo false)"
+
+ if [[ "$old_tags_str" != "$new_tags_str" ]]; then
+ # Determine what changed
+ local old_ip_tags_count=${#current_ip_tags[@]}
+ local new_ip_tags_count=${#formatted_ips[@]}
+
+ # Build detailed change message
+ local change_details=""
+
+ if [[ $old_ip_tags_count -eq 0 ]]; then
+ change_details="added ${new_ip_tags_count} IP tag(s): [${GREEN}${formatted_ips[*]}${NC}]"
+ else
+ # Compare old and new IP tags
+ local added_tags=() removed_tags=() common_tags=()
+
+ # Find removed tags
+ for old_tag in "${current_ip_tags[@]}"; do
+ local found=false
+ for new_tag in "${formatted_ips[@]}"; do
+ if [[ "$old_tag" == "$new_tag" ]]; then
+ found=true
+ break
+ fi
+ done
+ if [[ "$found" == false ]]; then
+ removed_tags+=("$old_tag")
+ else
+ common_tags+=("$old_tag")
+ fi
+ done
+
+ # Find added tags
+ for new_tag in "${formatted_ips[@]}"; do
+ local found=false
+ for old_tag in "${current_ip_tags[@]}"; do
+ if [[ "$new_tag" == "$old_tag" ]]; then
+ found=true
+ break
+ fi
+ done
+ if [[ "$found" == false ]]; then
+ added_tags+=("$new_tag")
+ fi
+ done
+
+ # Build change message
+ local change_parts=()
+ if [[ ${#added_tags[@]} -gt 0 ]]; then
+ change_parts+=("added [${GREEN}${added_tags[*]}${NC}]")
+ fi
+ if [[ ${#removed_tags[@]} -gt 0 ]]; then
+ change_parts+=("removed [${YELLOW}${removed_tags[*]}${NC}]")
+ fi
+ if [[ ${#common_tags[@]} -gt 0 ]]; then
+ change_parts+=("kept [${GRAY}${common_tags[*]}${NC}]")
+ fi
+
+ change_details=$(IFS=', '; echo "${change_parts[*]}")
+ fi
+
+ log_change "${type^^} ${CYAN}${vmid}${NC}: ${change_details}"
+
+ if [[ "$type" == "lxc" ]]; then
+ pct set "${vmid}" -tags "$(IFS=';'; echo "${next_tags[*]}")" &>/dev/null
+ else
+ local vm_config="/etc/pve/qemu-server/${vmid}.conf"
+ if [[ -f "$vm_config" ]]; then
+ sed -i '/^tags:/d' "$vm_config"
+ if [[ ${#next_tags[@]} -gt 0 ]]; then
+ echo "tags: $(IFS=';'; echo "${next_tags[*]}")" >> "$vm_config"
+ fi
+ fi
+ fi
+ else
+ # Tags unchanged
+ local ip_count=${#formatted_ips[@]}
+ local status_msg=""
+
+ if [[ $ip_count -eq 0 ]]; then
+ status_msg="No IPs detected"
+ elif [[ $ip_count -eq 1 ]]; then
+ status_msg="IP tag [${GRAY}${formatted_ips[0]}${NC}] unchanged"
+ else
+ status_msg="${ip_count} IP tags [${GRAY}${formatted_ips[*]}${NC}] unchanged"
+ fi
+
+ log_unchanged "${type^^} ${GRAY}${vmid}${NC}: ${status_msg}"
+ fi
+}
+
+# Update all instances of specified type
+update_all_tags() {
+ local type="$1" vmids count=0
+
+ if [[ "$type" == "lxc" ]]; then
+ vmids=($(pct list 2>/dev/null | grep -v VMID | awk '{print $1}'))
+ else
+ local all_vm_configs=($(ls /etc/pve/qemu-server/*.conf 2>/dev/null | sed 's/.*\/\([0-9]*\)\.conf/\1/' | sort -n))
+ vmids=("${all_vm_configs[@]}")
+ fi
+
+ count=${#vmids[@]}
+ [[ $count -eq 0 ]] && return
+
+ # Display processing header with color
+ if [[ "$type" == "lxc" ]]; then
+ log_info "Processing ${WHITE}${count}${NC} LXC container(s) in parallel"
+
+ # Clean up old cache files before processing LXC
+ cleanup_vm_cache
+
+ # Process LXC containers in parallel for better performance
+ process_lxc_parallel "${vmids[@]}"
+ else
+ log_info "Processing ${WHITE}${count}${NC} virtual machine(s) in parallel"
+
+ # Clean up old cache files before processing VMs
+ cleanup_vm_cache
+
+ # Process VMs in parallel for better performance
+ process_vms_parallel "${vmids[@]}"
+ fi
+
+ # Add completion message
+ if [[ "$type" == "lxc" ]]; then
+ log_success "Completed processing LXC containers"
+ else
+ log_success "Completed processing virtual machines"
+ fi
+}
+
+# Check if status changed
+check_status_changed() {
+ local type="$1" current
+ case "$type" in
+ "lxc") current=$(pct list 2>/dev/null | grep -v VMID) ;;
+ "vm") current=$(ls -la /etc/pve/qemu-server/*.conf 2>/dev/null) ;;
+ "fw") current=$(ip link show type bridge 2>/dev/null) ;;
+ esac
+ local last_var="last_${type}_status"
+ [[ "${!last_var}" == "$current" ]] && return 1
+ eval "$last_var='$current'"
+ return 0
+}
+
+# Main check function
+check() {
+ local current_time changes_detected=false
+ current_time=$(date +%s)
+
+ local update_lxc=false
+ local update_vm=false
+
+ # Periodic cache cleanup (every 10 minutes)
+ local time_since_last_cleanup=$((current_time - ${last_cleanup_time:-0}))
+ if [[ $time_since_last_cleanup -ge 600 ]]; then
+ cleanup_vm_cache
+ last_cleanup_time=$current_time
+ debug_log "Performed periodic cache cleanup"
+ fi
+
+ # Check LXC status
+ local time_since_last_lxc_check=$((current_time - last_lxc_status_check_time))
+ if [[ "${LXC_STATUS_CHECK_INTERVAL:-60}" -gt 0 ]] && \
+ [[ "$time_since_last_lxc_check" -ge "${LXC_STATUS_CHECK_INTERVAL:-60}" ]]; then
+ last_lxc_status_check_time=$current_time
+ if check_status_changed "lxc"; then
+ update_lxc=true
+ log_warning "LXC status changes detected"
+ fi
+ fi
+
+ # Check VM status
+ local time_since_last_vm_check=$((current_time - last_vm_status_check_time))
+ if [[ "${VM_STATUS_CHECK_INTERVAL:-60}" -gt 0 ]] && \
+ [[ "$time_since_last_vm_check" -ge "${VM_STATUS_CHECK_INTERVAL:-60}" ]]; then
+ last_vm_status_check_time=$current_time
+ if check_status_changed "vm"; then
+ update_vm=true
+ log_warning "VM status changes detected"
+ fi
+ fi
+
+ # Check network interface changes
+ local time_since_last_fw_check=$((current_time - last_fw_net_interface_check_time))
+ if [[ "${FW_NET_INTERFACE_CHECK_INTERVAL:-60}" -gt 0 ]] && \
+ [[ "$time_since_last_fw_check" -ge "${FW_NET_INTERFACE_CHECK_INTERVAL:-60}" ]]; then
+ last_fw_net_interface_check_time=$current_time
+ if check_status_changed "fw"; then
+ update_lxc=true
+ update_vm=true
+ log_warning "Network interface changes detected"
+ fi
+ fi
+
+ # Force update if interval exceeded
+ for type in "lxc" "vm"; do
+ local last_update_var="last_update_${type}_time"
+ local time_since_last_update=$((current_time - ${!last_update_var}))
+ if [[ $time_since_last_update -ge ${FORCE_UPDATE_INTERVAL:-1800} ]]; then
+ if [[ "$type" == "lxc" ]]; then
+ update_lxc=true
+ log_info "Scheduled LXC update (every $((FORCE_UPDATE_INTERVAL / 60)) minutes)"
+ else
+ update_vm=true
+ log_info "Scheduled VM update (every $((FORCE_UPDATE_INTERVAL / 60)) minutes)"
+ fi
+ eval "${last_update_var}=${current_time}"
+ fi
+ done
+
+ # Final execution
+ $update_lxc && update_all_tags "lxc"
+ $update_vm && update_all_tags "vm"
+}
+
+# Initialize time variables
+declare -g last_lxc_status="" last_vm_status="" last_fw_status=""
+declare -g last_lxc_status_check_time=0 last_vm_status_check_time=0 last_fw_net_interface_check_time=0
+declare -g last_update_lxc_time=0 last_update_vm_time=0 last_cleanup_time=0
+
+# Main loop
+main() {
+ # Display startup message
+ echo -e "\n${PURPLE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
+ log_success "IP-Tag service started successfully"
+ echo -e "${BLUE}ℹ${NC} Loop interval: ${WHITE}${LOOP_INTERVAL:-$DEFAULT_CHECK_INTERVAL}${NC} seconds"
+ echo -e "${BLUE}ℹ${NC} Debug mode: ${WHITE}${DEBUG:-false}${NC}"
+ echo -e "${BLUE}ℹ${NC} Tag format: ${WHITE}${TAG_FORMAT:-$DEFAULT_TAG_FORMAT}${NC}"
+ echo -e "${BLUE}ℹ${NC} Allowed CIDRs: ${WHITE}${CIDR_LIST[*]}${NC}"
+ echo -e "${PURPLE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}\n"
+
+ if [[ "$FORCE_SINGLE_RUN" == "true" ]]; then
+ check
+ exit 0
+ fi
+
+ while true; do
+ check
+ sleep "${LOOP_INTERVAL:-300}"
+ done
+}
+
+
+# Cache cleanup function
+cleanup_vm_cache() {
+ local cache_dir="/tmp"
+ local vm_cache_ttl=${VM_IP_CACHE_TTL:-120}
+ local lxc_cache_ttl=${LXC_IP_CACHE_TTL:-120}
+ local status_cache_ttl=${LXC_STATUS_CACHE_TTL:-30}
+ local current_time=$(date +%s)
+
+ debug_log "Starting extreme cache cleanup"
+
+ # Clean VM cache files
+ for cache_file in "$cache_dir"/iptag_vm_*_cache; do
+ if [[ -f "$cache_file" ]]; then
+ local file_time=$(stat -c %Y "$cache_file" 2>/dev/null || echo 0)
+ if [[ $((current_time - file_time)) -gt $vm_cache_ttl ]]; then
+ rm -f "$cache_file" 2>/dev/null
+ debug_log "Cleaned up expired VM cache file: $cache_file"
+ fi
+ fi
+ done
+
+ # Clean LXC IP cache files
+ for cache_file in "$cache_dir"/iptag_lxc_*_cache; do
+ if [[ -f "$cache_file" ]]; then
+ local file_time=$(stat -c %Y "$cache_file" 2>/dev/null || echo 0)
+ if [[ $((current_time - file_time)) -gt $lxc_cache_ttl ]]; then
+ rm -f "$cache_file" 2>/dev/null
+ # Also clean meta files
+ rm -f "${cache_file}.meta" 2>/dev/null
+ debug_log "Cleaned up expired LXC cache file: $cache_file"
+ fi
+ fi
+ done
+
+ # Clean LXC status cache files (shorter TTL)
+ for cache_file in "$cache_dir"/iptag_lxc_status_*_cache; do
+ if [[ -f "$cache_file" ]]; then
+ local file_time=$(stat -c %Y "$cache_file" 2>/dev/null || echo 0)
+ if [[ $((current_time - file_time)) -gt $status_cache_ttl ]]; then
+ rm -f "$cache_file" 2>/dev/null
+ debug_log "Cleaned up expired LXC status cache: $cache_file"
+ fi
+ fi
+ done
+
+ # Clean LXC PID cache files (60 second TTL)
+ for cache_file in "$cache_dir"/iptag_lxc_pid_*_cache; do
+ if [[ -f "$cache_file" ]]; then
+ local file_time=$(stat -c %Y "$cache_file" 2>/dev/null || echo 0)
+ if [[ $((current_time - file_time)) -gt 60 ]]; then
+ rm -f "$cache_file" 2>/dev/null
+ debug_log "Cleaned up expired LXC PID cache: $cache_file"
+ fi
+ fi
+ done
+
+ # Clean any orphaned meta files
+ for meta_file in "$cache_dir"/iptag_*.meta; do
+ if [[ -f "$meta_file" ]]; then
+ local base_file="${meta_file%.meta}"
+ if [[ ! -f "$base_file" ]]; then
+ rm -f "$meta_file" 2>/dev/null
+ debug_log "Cleaned up orphaned meta file: $meta_file"
+ fi
+ fi
+ done
+
+ debug_log "Completed extreme cache cleanup"
+}
+
+# Parallel VM processing function
+process_vms_parallel() {
+ local vm_list=("$@")
+ local max_parallel=${MAX_PARALLEL_VM_CHECKS:-5}
+ local job_count=0
+ local pids=()
+ local pid_start_times=()
+
+ for vmid in "${vm_list[@]}"; do
+ if [[ $job_count -ge $max_parallel ]]; then
+ local pid_to_wait="${pids[0]}"
+ local start_time="${pid_start_times[0]}"
+ local waited=0
+ while kill -0 "$pid_to_wait" 2>/dev/null && [[ $waited -lt 10 ]]; do
+ sleep 1
+ ((waited++))
+ done
+ if kill -0 "$pid_to_wait" 2>/dev/null; then
+ kill -9 "$pid_to_wait" 2>/dev/null
+ log_warning "VM parallel: killed stuck process $pid_to_wait after 10s timeout"
+ else
+ wait "$pid_to_wait"
+ fi
+ pids=("${pids[@]:1}")
+ pid_start_times=("${pid_start_times[@]:1}")
+ ((job_count--))
+ fi
+ # Start background job
+ (update_tags "vm" "$vmid") &
+ pids+=($!)
+ pid_start_times+=("$(date +%s)")
+ ((job_count++))
+ done
+ for i in "${!pids[@]}"; do
+ local pid="${pids[$i]}"
+ local waited=0
+ while kill -0 "$pid" 2>/dev/null && [[ $waited -lt 10 ]]; do
+ sleep 1
+ ((waited++))
+ done
+ if kill -0 "$pid" 2>/dev/null; then
+ kill -9 "$pid" 2>/dev/null
+ log_warning "VM parallel: killed stuck process $pid after 10s timeout"
+ else
+ wait "$pid"
+ fi
+ done
+}
+
+# Parallel LXC processing function
+process_lxc_parallel() {
+ local lxc_list=("$@")
+ local max_parallel=${MAX_PARALLEL_LXC_CHECKS:-2}
+ local batch_size=${LXC_BATCH_SIZE:-20}
+ local job_count=0
+ local pids=()
+ local pid_start_times=()
+
+ debug_log "Starting parallel LXC processing: ${#lxc_list[@]} containers, max_parallel=$max_parallel"
+
+ if [[ ${#lxc_list[@]} -gt 5 ]]; then
+ debug_log "Pre-loading LXC statuses for ${#lxc_list[@]} containers"
+ local all_statuses=$(pct list 2>/dev/null)
+ for vmid in "${lxc_list[@]}"; do
+ local status=$(echo "$all_statuses" | grep "^$vmid" | awk '{print $2}')
+ if [[ -n "$status" ]]; then
+ local status_cache_file="/tmp/iptag_lxc_status_${vmid}_cache"
+ echo "$status" > "$status_cache_file" 2>/dev/null &
+ fi
+ done
+ wait
+ debug_log "Completed batch status pre-loading"
+ fi
+ for vmid in "${lxc_list[@]}"; do
+ if [[ $job_count -ge $max_parallel ]]; then
+ local pid_to_wait="${pids[0]}"
+ local start_time="${pid_start_times[0]}"
+ local waited=0
+ while kill -0 "$pid_to_wait" 2>/dev/null && [[ $waited -lt 10 ]]; do
+ sleep 1
+ ((waited++))
+ done
+ if kill -0 "$pid_to_wait" 2>/dev/null; then
+ kill -9 "$pid_to_wait" 2>/dev/null
+ log_warning "LXC parallel: killed stuck process $pid_to_wait after 10s timeout"
+ else
+ wait "$pid_to_wait"
+ fi
+ pids=("${pids[@]:1}")
+ pid_start_times=("${pid_start_times[@]:1}")
+ ((job_count--))
+ fi
+ # Start background job with higher priority
+ (update_tags "lxc" "$vmid") &
+ pids+=($!)
+ pid_start_times+=("$(date +%s)")
+ ((job_count++))
+ done
+ for i in "${!pids[@]}"; do
+ local pid="${pids[$i]}"
+ local waited=0
+ while kill -0 "$pid" 2>/dev/null && [[ $waited -lt 10 ]]; do
+ sleep 1
+ ((waited++))
+ done
+ if kill -0 "$pid" 2>/dev/null; then
+ kill -9 "$pid" 2>/dev/null
+ log_warning "LXC parallel: killed stuck process $pid after 10s timeout"
+ else
+ wait "$pid"
+ fi
+ done
+ debug_log "Completed parallel LXC processing"
+}
+
+# Optimized LXC IP detection with caching and alternative methods
+# -------------------------------------------
+# Combined optimized LXC IP detection
+# Keeps advanced debug logs & methods
+# Adds IPv6 detection controlled by ENABLE_IPV6_TAGS
+# -------------------------------------------
+get_lxc_ips() {
+ local vmid=$1
+ local ips=""
+ local method_used=""
+
+ # status cache for container state
+ local status_cache_file="/tmp/iptag_lxc_status_${vmid}_cache"
+ local status_cache_ttl=${LXC_STATUS_CACHE_TTL:-30}
+
+ debug_log "lxc $vmid: starting combined IP detection"
+
+ # ----- STATUS CHECK -----
+ local lxc_status=""
+ if [[ -f "$status_cache_file" ]] && [[ $(($(date +%s) - $(stat -c %Y "$status_cache_file" 2>/dev/null || echo 0))) -lt $status_cache_ttl ]]; then
+ lxc_status=$(cat "$status_cache_file" 2>/dev/null)
+ debug_log "lxc $vmid: using cached status: $lxc_status"
+ else
+ lxc_status=$(pct status "${vmid}" 2>/dev/null | awk '{print $2}')
+ echo "$lxc_status" > "$status_cache_file" 2>/dev/null
+ debug_log "lxc $vmid: fetched fresh status: $lxc_status"
+ fi
+
+ if [[ "$lxc_status" != "running" ]]; then
+ debug_log "lxc $vmid: not running (status: $lxc_status)"
+ return
+ fi
+
+ # ----- TRY CONFIG FOR STATIC IP -----
+ local pve_lxc_config="/etc/pve/lxc/${vmid}.conf"
+ if [[ -f "$pve_lxc_config" ]]; then
+ local static_ip=$(grep -E "^net[0-9]+:" "$pve_lxc_config" 2>/dev/null | grep -oE 'ip=([0-9]{1,3}\.){3}[0-9]{1,3}' | cut -d'=' -f2 | head -1)
+ debug_log "lxc $vmid: [CONFIG] static_ip='$static_ip'"
+ if [[ -n "$static_ip" && "$static_ip" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
+ ips="$static_ip"
+ method_used="proxmox_config"
+ fi
+ fi
+
+ # ----- NAMESPACE FAST PARSE -----
+ if [[ -z "$ips" ]]; then
+ local ns_file="/var/lib/lxc/${vmid}/rootfs/proc/net/fib_trie"
+ debug_log "lxc $vmid: trying namespace fib_trie"
+ if [[ -f "$ns_file" ]]; then
+ local ns_ip=$(timeout 1 grep -m1 -oE '([0-9]{1,3}\.){3}[0-9]{1,3}' "$ns_file" 2>/dev/null | grep -v '127.0.0.1' | head -1)
+ if [[ -n "$ns_ip" ]] && is_valid_ipv4 "$ns_ip"; then
+ ips="$ns_ip"
+ method_used="namespace_fib"
+ debug_log "lxc $vmid: found IP via namespace: $ips"
+ fi
+ fi
+ fi
+
+ # ----- ARP TABLE -----
+ if [[ -z "$ips" ]]; then
+ debug_log "lxc $vmid: trying ARP lookup"
+ local mac_addr=$(grep -Eo 'hwaddr=([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}' "$pve_lxc_config" | head -1 | cut -d'=' -f2 | tr 'A-F' 'a-f')
+ if [[ -n "$mac_addr" ]]; then
+ local bridge_ip=$(ip neighbor show | grep "$mac_addr" | grep -oE '([0-9]{1,3}\.){3}[0-9]{1,3}' | head -1)
+ if [[ -n "$bridge_ip" && "$bridge_ip" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
+ ips="$bridge_ip"
+ method_used="arp_table"
+ debug_log "lxc $vmid: found IP via ARP: $ips"
+ fi
+ fi
+ fi
+
+ # ----- PROCESS NAMESPACE (fast) -----
+ if [[ -z "$ips" && "${LXC_SKIP_SLOW_METHODS:-true}" != "true" ]]; then
+ debug_log "lxc $vmid: trying process namespace"
+ local pid_cache_file="/tmp/iptag_lxc_pid_${vmid}_cache"
+ local container_pid=""
+ if [[ -f "$pid_cache_file" ]] && [[ $(($(date +%s) - $(stat -c %Y "$pid_cache_file" 2>/dev/null || echo 0))) -lt 60 ]]; then
+ container_pid=$(cat "$pid_cache_file" 2>/dev/null)
+ else
+ container_pid=$(pct list 2>/dev/null | grep "^$vmid" | awk '{print $3}')
+ [[ -n "$container_pid" && "$container_pid" != "-" ]] && echo "$container_pid" > "$pid_cache_file"
+ fi
+ if [[ -n "$container_pid" && "$container_pid" != "-" ]]; then
+ local ns_ip=$(timeout 1 nsenter -t "$container_pid" -n ip -4 addr show 2>/dev/null | grep -oE '([0-9]{1,3}\.){3}[0-9]{1,3}' | grep -v '127.0.0.1' | head -1)
+ if [[ -n "$ns_ip" ]] && is_valid_ipv4 "$ns_ip"; then
+ ips="$ns_ip"
+ method_used="process_ns"
+ debug_log "lxc $vmid: found IP via process namespace: $ips"
+ fi
+ fi
+ fi
+
+ # ----- FORCED METHODS (attach/exec) -----
+ if [[ -z "$ips" && "${LXC_ALLOW_FORCED_COMMANDS:-false}" == "true" ]]; then
+ debug_log "lxc $vmid: trying forced pct exec"
+ local pct_ip=$(timeout 7s pct exec "$vmid" -- ip -4 addr show 2>/dev/null | grep -oE '([0-9]{1,3}\.){3}[0-9]{1,3}' | grep -v '127.0.0.1' | head -1)
+ if [[ -n "$pct_ip" ]] && is_valid_ipv4 "$pct_ip"; then
+ ips="$pct_ip"
+ method_used="pct_exec_forced"
+ debug_log "lxc $vmid: found IP via pct exec: $ips"
+ fi
+ fi
+
+ # ----- OPTIONAL IPv6 detection -----
+ if [[ "${ENABLE_IPV6_TAGS,,}" == "true" ]]; then
+ debug_log "lxc $vmid: IPv6 detection enabled"
+
+ # 1. Try to get IPv6 from inside the container
+ local pct_ipv6=$(timeout 3 pct exec "$vmid" -- ip -6 addr show scope global 2>/dev/null \
+ | grep -oE '([0-9a-fA-F]{0,4}:){2,7}[0-9a-fA-F]{0,4}' \
+ | grep -v '^fe80' | head -1)
+ if [[ -n "$pct_ipv6" ]]; then
+ debug_log "lxc $vmid: found IPv6 via pct exec: $pct_ipv6"
+ [[ -n "$ips" ]] && ips="$ips $pct_ipv6" || ips="$pct_ipv6"
+ method_used="${method_used:+$method_used,}ipv6_exec"
+ else
+ debug_log "lxc $vmid: no IPv6 from pct exec, fallback to neighbor table"
+
+ # 2. Fallback: neighbor table
+ local mac_addr=$(grep -Eo 'hwaddr=([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}' "$pve_lxc_config" \
+ | head -1 | cut -d'=' -f2 | tr 'A-F' 'a-f')
+ if [[ -n "$mac_addr" ]]; then
+ local ipv6_nb=$(ip -6 neighbor show | grep -i "$mac_addr" \
+ | grep -oE '([0-9a-fA-F]{0,4}:){2,7}[0-9a-fA-F]{0,4}' \
+ | grep -v '^fe80' | head -1)
+ if [[ -n "$ipv6_nb" ]]; then
+ debug_log "lxc $vmid: found IPv6 via neighbor: $ipv6_nb"
+ [[ -n "$ips" ]] && ips="$ips $ipv6_nb" || ips="$ipv6_nb"
+ method_used="${method_used:+$method_used,}ipv6_neighbor"
+ else
+ debug_log "lxc $vmid: no IPv6 found in neighbor table"
+ fi
+ fi
+ fi
+ fi
+
+ # ----- FINAL RESULT -----
+ debug_log "lxc $vmid: [RESULT] ips='$ips' method='$method_used'"
+ echo "$ips"
+}
+
+
+main
+EOF
+}
+
+# Main installation process
+if check_service_exists; then
+ while true; do
+ read -p "IP-Tag service is already installed. Do you want to update it? (y/n): " yn
+ case $yn in
+ [Yy]*)
+ update_installation
+ exit 0
+ ;;
+ [Nn]*)
+ msg_error "Installation cancelled."
+ exit 0
+ ;;
+ *)
+ msg_error "Please answer yes or no."
+ ;;
+ esac
+ done
+fi
+
+while true; do
+ read -p "This will install ${APP} on ${hostname}. Proceed? (y/n): " yn
+ case $yn in
+ [Yy]*)
+ break
+ ;;
+ [Nn]*)
+ msg_error "Installation cancelled."
+ exit
+ ;;
+ *)
+ msg_error "Please answer yes or no."
+ ;;
+ esac
+done
+
+if ! pveversion | grep -Eq "pve-manager/8\.[0-4](\.[0-9]+)*"; then
+ msg_error "This version of Proxmox Virtual Environment is not supported"
+ msg_error "⚠️ Requires Proxmox Virtual Environment Version 8.0 or later."
+ msg_error "Exiting..."
+ sleep 2
+ exit
+fi
+
+FILE_PATH="/usr/local/bin/iptag"
+if [[ -f "$FILE_PATH" ]]; then
+ msg_info "The file already exists: '$FILE_PATH'. Skipping installation."
+ exit 0
+fi
+
+msg_info "Installing Dependencies"
+apt-get update &>/dev/null
+apt-get install -y ipcalc net-tools &>/dev/null
+msg_ok "Installed Dependencies"
+
+msg_info "Setting up IP-Tag Scripts"
+mkdir -p /opt/iptag
+msg_ok "Setup IP-Tag Scripts"
+
+# Migrate config if needed
+migrate_config
+
+msg_info "Setup Default Config"
+if [[ ! -f /opt/iptag/iptag.conf ]]; then
+ generate_config >/opt/iptag/iptag.conf
+ msg_ok "Setup default config"
+else
+ msg_ok "Default config already exists"
+fi
+
+msg_info "Setup Main Function"
+if [[ ! -f /opt/iptag/iptag ]]; then
+ generate_main_script >/opt/iptag/iptag
+ chmod +x /opt/iptag/iptag
+ msg_ok "Setup Main Function"
+else
+ msg_ok "Main Function already exists"
+fi
+
+msg_info "Creating Service"
+if [[ ! -f /lib/systemd/system/iptag.service ]]; then
+ generate_service >/lib/systemd/system/iptag.service
+ msg_ok "Created Service"
+else
+ msg_ok "Service already exists."
+fi
+
+msg_ok "Setup IP-Tag Scripts"
+
+msg_info "Starting Service"
+systemctl daemon-reload &>/dev/null
+systemctl enable -q --now iptag.service &>/dev/null
+msg_ok "Started Service"
+
+msg_info "Restarting Service with optimizations"
+systemctl restart iptag.service &>/dev/null
+msg_ok "Service restarted with CPU optimizations"
+
+msg_info "Creating manual run command"
+cat <<'EOF' >/usr/local/bin/iptag-run
+#!/usr/bin/env bash
+CONFIG_FILE="/opt/iptag/iptag.conf"
+SCRIPT_FILE="/opt/iptag/iptag"
+if [[ ! -f "$SCRIPT_FILE" ]]; then
+ echo "❌ Main script not found: $SCRIPT_FILE"
+ exit 1
+fi
+export FORCE_SINGLE_RUN=true
+exec "$SCRIPT_FILE"
+EOF
+chmod +x /usr/local/bin/iptag-run
+msg_ok "Created iptag-run executable - You can execute this manually by entering “iptag-run” in the Proxmox host, so the script is executed by hand."
+
+SPINNER_PID=""
+echo -e "\n${APP} installation completed successfully! ${CL}\n"
+
+# Proper script termination
+exit 0
diff --git a/tools/pve/clean-lxcs.sh b/tools/pve/clean-lxcs.sh
index feb0921ce..e6d06c229 100644
--- a/tools/pve/clean-lxcs.sh
+++ b/tools/pve/clean-lxcs.sh
@@ -1,24 +1,22 @@
#!/usr/bin/env bash
-# Copyright (c) 2021-2025 tteck
-# Author: tteck (tteckster)
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: tteck (tteckster) | MickLesk (CanbiZ)
# License: MIT
# https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-set -eEuo pipefail
-
function header_info() {
- clear
- cat <<"EOF"
+ clear
+ cat <<"EOF"
________ __ _ ________
/ ____/ /__ ____ _____ / / | |/ / ____/
/ / / / _ \/ __ `/ __ \ / / | / /
/ /___/ / __/ /_/ / / / / / /___/ / /___
\____/_/\___/\__,_/_/ /_/ /_____/_/|_\____/
-
EOF
}
+set -eEuo pipefail
BL="\033[36m"
RD="\033[01;31m"
CM='\xE2\x9C\x94\033'
@@ -27,84 +25,86 @@ CL="\033[m"
header_info
echo "Loading..."
-whiptail --backtitle "Proxmox VE Helper Scripts" --title "Proxmox VE LXC Updater" \
- --yesno "This Will Clean logs, cache and update apt/apk lists on selected LXC Containers. Proceed?" 10 68 || exit
+
+whiptail --backtitle "Proxmox VE Helper Scripts" --title "Proxmox VE LXC Updater" --yesno "This will clean logs, cache and update package lists on selected LXC Containers. Proceed?" 10 58
NODE=$(hostname)
EXCLUDE_MENU=()
MSG_MAX_LENGTH=0
-while read -r TAG ITEM; do
- OFFSET=2
- ((${#ITEM} + OFFSET > MSG_MAX_LENGTH)) && MSG_MAX_LENGTH=${#ITEM}+OFFSET
- EXCLUDE_MENU+=("$TAG" "$ITEM " "OFF")
-done < <(pct list | awk 'NR>1')
-excluded_containers=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "Containers on $NODE" \
- --checklist "\nSelect containers to skip from cleaning:\n" 16 $((MSG_MAX_LENGTH + 23)) 6 "${EXCLUDE_MENU[@]}" \
- 3>&1 1>&2 2>&3 | tr -d '"') || exit
-function clean_container() {
- local container=$1
- local os=$2
- header_info
- name=$(pct exec "$container" hostname)
- echo -e "${BL}[Info]${GN} Cleaning ${name} (${os}) ${CL} \n"
- if [[ "$os" == "alpine" ]]; then
- pct exec "$container" -- sh -c \
- "apk update && apk cache clean && rm -rf /var/cache/apk/*"
+while read -r TAG ITEM; do
+ OFFSET=2
+ ((${#ITEM} + OFFSET > MSG_MAX_LENGTH)) && MSG_MAX_LENGTH=${#ITEM}+OFFSET
+ EXCLUDE_MENU+=("$TAG" "$ITEM " "OFF")
+done < <(pct list | awk 'NR>1')
+
+excluded_containers=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "Containers on $NODE" --checklist "\nSelect containers to skip from cleaning:\n" \
+ 16 $((MSG_MAX_LENGTH + 23)) 6 "${EXCLUDE_MENU[@]}" 3>&1 1>&2 2>&3 | tr -d '"')
+
+if [ $? -ne 0 ]; then
+ exit
+fi
+
+function run_lxc_clean() {
+ local container=$1
+ header_info
+ name=$(pct exec "$container" hostname)
+
+ pct exec "$container" -- bash -c '
+ BL="\033[36m"; GN="\033[1;92m"; CL="\033[m"
+ name=$(hostname)
+ if [ -e /etc/alpine-release ]; then
+ echo -e "${BL}[Info]${GN} Cleaning $name (Alpine)${CL}\n"
+ apk cache clean
+ find /var/log -type f -delete 2>/dev/null
+ find /tmp -mindepth 1 -delete 2>/dev/null
+ apk update
else
- pct exec "$container" -- bash -c \
- "apt-get -y --purge autoremove && apt-get -y autoclean && \
- bash <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/tools/pve/clean.sh) && \
- rm -rf /var/lib/apt/lists/* && apt-get update"
+ echo -e "${BL}[Info]${GN} Cleaning $name (Debian/Ubuntu)${CL}\n"
+ find /var/cache -type f -delete 2>/dev/null
+ find /var/log -type f -delete 2>/dev/null
+ find /tmp -mindepth 1 -delete 2>/dev/null
+ apt-get -y --purge autoremove
+ apt-get -y autoclean
+ rm -rf /var/lib/apt/lists/*
+ apt-get update
fi
+ '
}
-for container in $(pct list | awk 'NR>1 {print $1}'); do
- if [[ " ${excluded_containers[*]} " =~ " $container " ]]; then
- header_info
- echo -e "${BL}[Info]${GN} Skipping ${BL}$container${CL}"
- sleep 1
- continue
- fi
+for container in $(pct list | awk '{if(NR>1) print $1}'); do
+ if [[ " ${excluded_containers[@]} " =~ " $container " ]]; then
+ header_info
+ echo -e "${BL}[Info]${GN} Skipping ${BL}$container${CL}"
+ sleep 1
+ continue
+ fi
- # locked?
- if pct status "$container" | grep -q 'locked'; then
- header_info
- echo -e "${BL}[Info]${RD} Skipping locked container ${BL}$container${CL}"
- sleep 1
- continue
- fi
+ os=$(pct config "$container" | awk '/^ostype/ {print $2}')
+ # Supported: debian, ubuntu, alpine
+ if [ "$os" != "debian" ] && [ "$os" != "ubuntu" ] && [ "$os" != "alpine" ]; then
+ header_info
+ echo -e "${BL}[Info]${GN} Skipping ${RD}$container is not Debian, Ubuntu or Alpine${CL} \n"
+ sleep 1
+ continue
+ fi
- os=$(pct config "$container" | awk '/^ostype/ {print $2}')
- [[ "$os" != "debian" && "$os" != "ubuntu" && "$os" != "alpine" ]] && {
- header_info
- echo -e "${BL}[Info]${RD} Skipping unsupported OS in $container: $os ${CL}"
- sleep 1
- continue
- }
+ status=$(pct status "$container")
+ template=$(pct config "$container" | grep -q "template:" && echo "true" || echo "false")
- status=$(pct status "$container" | awk '{print $2}')
- template=$(pct config "$container" | grep -q "template:" && echo "true" || echo "false")
-
- if [[ "$template" == "false" && "$status" == "stopped" ]]; then
- if whiptail --backtitle "Proxmox VE Helper Scripts" \
- --title "Container $container is stopped" \
- --yesno "Container $container is stopped.\n\nStart and clean?" 10 58; then
- echo -e "${BL}[Info]${GN} Starting${BL} $container ${CL} \n"
- pct start "$container"
- echo -e "${BL}[Info]${GN} Waiting for${BL} $container${CL}${GN} to start ${CL} \n"
- sleep 5
- clean_container "$container" "$os"
- echo -e "${BL}[Info]${GN} Shutting down${BL} $container ${CL} \n"
- pct shutdown "$container" &
- else
- echo -e "${BL}[Info]${GN} Skipping stopped container ${BL}$container${CL}"
- fi
- elif [[ "$status" == "running" ]]; then
- clean_container "$container" "$os"
- fi
+ if [ "$template" == "false" ] && [ "$status" == "status: stopped" ]; then
+ echo -e "${BL}[Info]${GN} Starting${BL} $container ${CL} \n"
+ pct start "$container"
+ echo -e "${BL}[Info]${GN} Waiting For${BL} $container${CL}${GN} To Start ${CL} \n"
+ sleep 5
+ run_lxc_clean "$container"
+ echo -e "${BL}[Info]${GN} Shutting down${BL} $container ${CL} \n"
+ pct shutdown "$container" &
+ elif [ "$status" == "status: running" ]; then
+ run_lxc_clean "$container"
+ fi
done
wait
header_info
-echo -e "${GN} Finished, selected containers cleaned. ${CL} \n"
+echo -e "${GN} Finished, Selected Containers Cleaned. ${CL} \n"
diff --git a/tools/pve/clean.sh b/tools/pve/clean.sh
new file mode 100644
index 000000000..87d3c9af8
--- /dev/null
+++ b/tools/pve/clean.sh
@@ -0,0 +1,72 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 tteck
+# Author: tteck (tteckster)
+# License: MIT
+# https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+
+set -euo pipefail
+
+function header_info() {
+ clear
+ cat <<"EOF"
+ ________ __ _ ________
+ / ____/ /__ ____ _____ / / | |/ / ____/
+ / / / / _ \/ __ `/ __ \ / / | / /
+/ /___/ / __/ /_/ / / / / / /___/ / /___
+\____/_/\___/\__,_/_/ /_/ /_____/_/|_\____/
+
+EOF
+}
+
+BL="\033[36m"
+GN="\033[1;92m"
+CL="\033[m"
+name=$(hostname)
+
+header_info
+echo -e "${BL}[Info]${GN} Cleaning $name${CL} \n"
+
+# OS-Detection
+if [ -f /etc/alpine-release ]; then
+ OS="alpine"
+elif [ -f /etc/debian_version ] || grep -qi ubuntu /etc/issue 2>/dev/null; then
+ OS="debian"
+else
+ OS="unknown"
+fi
+
+# Universal Cleaning
+function clean_universal() {
+ # Caches
+ find /var/cache/ -type f -delete 2>/dev/null || true
+ # Logs
+ find /var/log/ -type f -delete 2>/dev/null || true
+ # Tmp
+ find /tmp/ -mindepth 1 -delete 2>/dev/null || true
+ find /var/tmp/ -mindepth 1 -delete 2>/dev/null || true
+ # User Trash (Desktop-Umgebungen)
+ for u in /home/* /root; do
+ find "$u/.local/share/Trash/" -type f -delete 2>/dev/null || true
+ done
+}
+
+clean_universal
+
+if [ "$OS" = "alpine" ]; then
+ echo -e "${BL}[Info]${GN} Alpine detected: Cleaning apk cache...${CL}"
+ rm -rf /var/cache/apk/* 2>/dev/null || true
+ apk cache clean 2>/dev/null || true
+ rm -rf /etc/apk/cache/* 2>/dev/null || true
+
+elif [ "$OS" = "debian" ]; then
+ echo -e "${BL}[Info]${GN} Debian/Ubuntu detected: Cleaning apt and journal...${CL}"
+ apt-get -y autoremove --purge >/dev/null 2>&1 || true
+ apt-get -y autoclean >/dev/null 2>&1 || true
+ apt-get -y clean >/dev/null 2>&1 || true
+ journalctl --vacuum-time=2d --rotate >/dev/null 2>&1 || true
+ rm -rf /var/lib/apt/lists/* 2>/dev/null || true
+ apt-get update >/dev/null 2>&1 || true
+fi
+
+echo -e "${GN}Cleanup completed for $name ($OS)${CL}\n"
diff --git a/tools/pve/dependency-check.sh b/tools/pve/dependency-check.sh
new file mode 100644
index 000000000..b7798d95a
--- /dev/null
+++ b/tools/pve/dependency-check.sh
@@ -0,0 +1,363 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2023 community-scripts ORG
+# This script is designed to install the Proxmox Dependency Check Hookscript.
+# It sets up a dependency-checking hookscript and automates its
+# application to all new and existing guests using a systemd watcher.
+# License: MIT
+
+function header_info {
+ clear
+ cat <<"EOF"
+ ____ _ ____ _ _
+ | _ \ ___ _ __ ___ _ __ __| | ___ _ __ ___ _ _ / ___| |__ ___ ___| | __
+ | | | |/ _ \ '_ \ / _ \ '_ \ / _` |/ _ \ '_ \ / __| | | | | | '_ \ / _ \/ __| |/ /
+ | |_| | __/ |_) | __/ | | | (_| | __/ | | | (__| |_| | |___| | | | __/ (__| <
+ |____/ \___| .__/ \___|_| |_|\__,_|\___|_| |_|\___|\__, |\____|_| |_|\___|\___|_|\_\
+ |_| |___/
+EOF
+}
+
+# Color variables
+YW=$(echo "\033[33m")
+GN=$(echo "\033[1;92m")
+RD=$(echo "\033[01;31m")
+CL=$(echo "\033[m")
+BFR="\\r\\033[K"
+HOLD=" "
+CM="${GN}✓${CL}"
+CROSS="${RD}✗${CL}"
+
+# Spinner for progress indication (simplified)
+spinner() {
+ local pid=$!
+ local delay=0.1
+ local spinstr='|/-\'
+ while [ "$(ps a | awk '{print $1}' | grep $pid)" ]; do
+ local temp=${spinstr#?}
+ printf " [%c] " "$spinstr"
+ local spinstr=$temp${spinstr%"$temp"}
+ sleep $delay
+ printf "\b\b\b\b\b\b"
+ done
+ printf " \b\b\b\b"
+}
+
+# Message functions
+msg_info() {
+ echo -ne " ${YW}›${CL} $1..."
+}
+
+msg_ok() {
+ echo -e "${BFR} ${CM} $1${CL}"
+}
+
+msg_error() {
+ echo -e "${BFR} ${CROSS} $1${CL}"
+}
+# --- End of base script functions ---
+
+
+# --- Installation Functions ---
+
+# Function to create the actual hookscript that runs before guest startup
+create_dependency_hookscript() {
+ msg_info "Creating dependency-check hookscript"
+ mkdir -p /var/lib/vz/snippets
+ cat <<'EOF' > /var/lib/vz/snippets/dependency-check.sh
+#!/bin/bash
+# Proxmox Hookscript for Pre-Start Dependency Checking
+# Works for both QEMU VMs and LXC Containers
+
+# --- Configuration ---
+POLL_INTERVAL=5 # Seconds to wait between checks
+MAX_ATTEMPTS=60 # Max number of attempts before failing (60 * 5s = 5 minutes)
+# --- End Configuration ---
+
+VMID=$1
+PHASE=$2
+
+# Function for logging to syslog with a consistent format
+log() {
+ echo "[hookscript-dep-check] VMID $VMID: $1"
+}
+
+# This script only runs in the 'pre-start' phase
+if [ "$PHASE" != "pre-start" ]; then
+ exit 0
+fi
+
+log "--- Starting Pre-Start Dependency Check ---"
+
+# --- Determine Guest Type (QEMU or LXC) ---
+GUEST_TYPE=""
+CONFIG_CMD=""
+if qm config "$VMID" >/dev/null 2>&1; then
+ GUEST_TYPE="qemu"
+ CONFIG_CMD="qm config"
+ log "Guest type is QEMU (VM)."
+elif pct config "$VMID" >/dev/null 2>&1; then
+ GUEST_TYPE="lxc"
+ CONFIG_CMD="pct config"
+ log "Guest type is LXC (Container)."
+else
+ log "ERROR: Could not determine guest type for $VMID. Aborting."
+ exit 1
+fi
+
+GUEST_CONFIG=$($CONFIG_CMD "$VMID")
+
+# --- 1. Storage Availability Check ---
+log "Checking storage availability..."
+# Grep for all disk definitions (scsi, sata, virtio, ide, rootfs, mp)
+# and extract the storage identifier (the field between the colons).
+# Sort -u gets the unique list of storage pools.
+STORAGE_IDS=$(echo "$GUEST_CONFIG" | grep -E '^(scsi|sata|virtio|ide|rootfs|mp)[0-9]*:' | awk -F'[:]' '{print $2}' | awk '{print$1}' | sort -u)
+
+if [ -z "$STORAGE_IDS" ]; then
+ log "No storage dependencies found to check."
+else
+ for STORAGE_ID in $STORAGE_IDS; do
+ log "Checking status of storage: '$STORAGE_ID'"
+ ATTEMPTS=0
+ while true; do
+ # Grep for the storage ID line in pvesm status and check the 'Active' column (3rd column)
+ STATUS=$(pvesm status | grep "^\s*$STORAGE_ID\s" | awk '{print $3}')
+ if [ "$STATUS" == "active" ]; then
+ log "Storage '$STORAGE_ID' is active."
+ break
+ fi
+
+ ATTEMPTS=$((ATTEMPTS + 1))
+ if [ $ATTEMPTS -ge $MAX_ATTEMPTS ]; then
+ log "ERROR: Timeout waiting for storage '$STORAGE_ID' to become active. Aborting start."
+ exit 1
+ fi
+
+ log "Storage '$STORAGE_ID' is not active (current status: '${STATUS:-inactive/unknown}'). Waiting ${POLL_INTERVAL}s... (Attempt ${ATTEMPTS}/${MAX_ATTEMPTS})"
+ sleep $POLL_INTERVAL
+ done
+ done
+fi
+log "All storage dependencies are met."
+
+
+# --- 2. Custom Tag-Based Dependency Check ---
+log "Checking for custom tag-based dependencies..."
+TAGS=$(echo "$GUEST_CONFIG" | grep '^tags:' | awk '{print $2}')
+
+if [ -z "$TAGS" ]; then
+ log "No tags found. Skipping custom dependency check."
+else
+ # Replace colons with spaces to loop through tags
+ for TAG in ${TAGS//;/ }; do
+ # Check if the tag matches our dependency format 'dep_*'
+ if [[ $TAG == dep_* ]]; then
+ log "Found dependency tag: '$TAG'"
+
+ # Split tag into parts using underscore as delimiter
+ IFS='_' read -ra PARTS <<< "$TAG"
+ DEP_TYPE="${PARTS[1]}"
+
+ ATTEMPTS=0
+ while true; do
+ CHECK_PASSED=false
+ case "$DEP_TYPE" in
+ "tcp")
+ HOST="${PARTS[2]}"
+ PORT="${PARTS[3]}"
+ if [ -z "$HOST" ] || [ -z "$PORT" ]; then
+ log "ERROR: Malformed TCP dependency tag '$TAG'. Skipping."
+ CHECK_PASSED=true # Skip to avoid infinite loop
+ # nc -z is great for this. -w sets a timeout.
+ elif nc -z -w 2 "$HOST" "$PORT"; then
+ log "TCP dependency met: Host $HOST port $PORT is open."
+ CHECK_PASSED=true
+ fi
+ ;;
+
+ "ping")
+ HOST="${PARTS[2]}"
+ if [ -z "$HOST" ]; then
+ log "ERROR: Malformed PING dependency tag '$TAG'. Skipping."
+ CHECK_PASSED=true # Skip to avoid infinite loop
+ # ping -c 1 (one packet) -W 2 (2-second timeout)
+ elif ping -c 1 -W 2 "$HOST" >/dev/null 2>&1; then
+ log "Ping dependency met: Host $HOST is reachable."
+ CHECK_PASSED=true
+ fi
+ ;;
+
+ *)
+ log "WARNING: Unknown dependency type '$DEP_TYPE' in tag '$TAG'. Ignoring."
+ CHECK_PASSED=true # Mark as passed to avoid getting stuck
+ ;;
+ esac
+
+ if $CHECK_PASSED; then
+ break
+ fi
+
+ ATTEMPTS=$((ATTEMPTS + 1))
+ if [ $ATTEMPTS -ge $MAX_ATTEMPTS ]; then
+ log "ERROR: Timeout waiting for dependency '$TAG'. Aborting start."
+ exit 1
+ fi
+
+ log "Dependency '$TAG' not met. Waiting ${POLL_INTERVAL}s... (Attempt ${ATTEMPTS}/${MAX_ATTEMPTS})"
+ sleep $POLL_INTERVAL
+ done
+ fi
+ done
+fi
+
+log "All custom dependencies are met."
+log "--- Dependency Check Complete. Proceeding with start. ---"
+exit 0
+EOF
+ chmod +x /var/lib/vz/snippets/dependency-check.sh
+ msg_ok "Created dependency-check hookscript"
+}
+
+# Function to create the config file for exclusions
+create_exclusion_config() {
+ msg_info "Creating exclusion configuration file"
+ if [ -f /etc/default/pve-auto-hook ]; then
+ msg_ok "Exclusion file already exists, skipping."
+ else
+ cat <<'EOF' > /etc/default/pve-auto-hook
+#
+# Configuration for the Proxmox Automatic Hookscript Applicator
+#
+# Add VM or LXC IDs here to prevent the hookscript from being added.
+# Separate IDs with spaces.
+#
+# Example:
+# IGNORE_IDS="9000 9001 105"
+#
+
+IGNORE_IDS=""
+EOF
+ msg_ok "Created exclusion configuration file"
+ fi
+}
+
+# Function to create the script that applies the hook
+create_applicator_script() {
+ msg_info "Creating the hookscript applicator script"
+ cat <<'EOF' > /usr/local/bin/pve-apply-hookscript.sh
+#!/bin/bash
+HOOKSCRIPT_VOLUME_ID="local:snippets/dependency-check.sh"
+CONFIG_FILE="/etc/default/pve-auto-hook"
+LOG_TAG="pve-auto-hook-list"
+
+log() {
+ systemd-cat -t "$LOG_TAG" <<< "$1"
+}
+
+if [ -f "$CONFIG_FILE" ]; then
+ source "$CONFIG_FILE"
+fi
+
+# Process QEMU VMs
+qm list | awk 'NR>1 {print $1}' | while read -r VMID; do
+ is_ignored=false
+ for id_to_ignore in $IGNORE_IDS; do
+ if [ "$id_to_ignore" == "$VMID" ]; then is_ignored=true; break; fi
+ done
+ if $is_ignored; then continue; fi
+ if qm config "$VMID" | grep -q '^hookscript:'; then continue; fi
+ log "Hookscript not found for VM $VMID. Applying..."
+ qm set "$VMID" --hookscript "$HOOKSCRIPT_VOLUME_ID"
+done
+
+# Process LXC Containers
+pct list | awk 'NR>1 {print $1}' | while read -r VMID; do
+ is_ignored=false
+ for id_to_ignore in $IGNORE_IDS; do
+ if [ "$id_to_ignore" == "$VMID" ]; then is_ignored=true; break; fi
+ done
+ if $is_ignored; then continue; fi
+ if pct config "$VMID" | grep -q '^hookscript:'; then continue; fi
+ log "Hookscript not found for LXC $VMID. Applying..."
+ pct set "$VMID" --hookscript "$HOOKSCRIPT_VOLUME_ID"
+done
+EOF
+ chmod +x /usr/local/bin/pve-apply-hookscript.sh
+ msg_ok "Created applicator script"
+}
+
+# Function to set up the systemd watcher and service
+create_systemd_units() {
+ msg_info "Creating systemd watcher and service units"
+ cat <<'EOF' > /etc/systemd/system/pve-auto-hook.path
+[Unit]
+Description=Watch for new Proxmox guest configs to apply hookscript
+
+[Path]
+PathModified=/etc/pve/qemu-server/
+PathModified=/etc/pve/lxc/
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+ cat <<'EOF' > /etc/systemd/system/pve-auto-hook.service
+[Unit]
+Description=Automatically add hookscript to new Proxmox guests
+
+[Service]
+Type=oneshot
+ExecStart=/usr/local/bin/pve-apply-hookscript.sh
+EOF
+ msg_ok "Created systemd units"
+}
+
+
+# --- Main Execution ---
+header_info
+
+if ! command -v pveversion >/dev/null 2>&1; then
+ msg_error "This script must be run on a Proxmox VE host."
+ exit 1
+fi
+
+echo -e "\nThis script will install a service to automatically apply a"
+echo -e "dependency-checking hookscript to all new and existing Proxmox guests."
+echo -e "${YW}This includes creating files in:${CL}"
+echo -e " - /var/lib/vz/snippets/"
+echo -e " - /usr/local/bin/"
+echo -e " - /etc/default/"
+echo -e " - /etc/systemd/system/\n"
+
+read -p "Do you want to proceed with the installation? (y/n): " -n 1 -r
+echo
+if [[ ! $REPLY =~ ^[Yy]$ ]]; then
+ msg_error "Installation cancelled."
+ exit 1
+fi
+
+echo -e "\n"
+create_dependency_hookscript
+create_exclusion_config
+create_applicator_script
+create_systemd_units
+
+msg_info "Reloading systemd and enabling the watcher"
+(systemctl daemon-reload && systemctl enable --now pve-auto-hook.path) >/dev/null 2>&1 &
+spinner
+msg_ok "Systemd watcher enabled and running"
+
+msg_info "Performing initial run to update existing guests"
+/usr/local/bin/pve-apply-hookscript.sh >/dev/null 2>&1 &
+spinner
+msg_ok "Initial run complete"
+
+echo -e "\n\n${GN}Installation successful!${CL}"
+echo -e "The service is now active and will monitor for new guests."
+echo -e "To ${YW}exclude${CL} a VM or LXC, add its ID to the ${YW}IGNORE_IDS${CL} variable in:"
+echo -e " ${YW}/etc/default/pve-auto-hook${CL}"
+echo -e "\nYou can monitor the service's activity with:"
+echo -e " ${YW}journalctl -fu pve-auto-hook.service${CL}\n"
+
+exit 0
diff --git a/tools/pve/execute.sh b/tools/pve/execute.sh
new file mode 100644
index 000000000..fc19be981
--- /dev/null
+++ b/tools/pve/execute.sh
@@ -0,0 +1,84 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: jeroenzwart
+# License: MIT
+# https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+
+function header_info() {
+ clear
+ cat <<"EOF"
+ ______ __ __ _ ________
+ / ____/ _____ _______ __/ /____ / / | |/ / ____/
+ / __/ | |/_/ _ \/ ___/ / / / __/ _ \ / / | / /
+ / /____> __/ /__/ /_/ / /_/ __/ / /___/ / /___
+/_____/_/|_|\___/\___/\__,_/\__/\___/ /_____/_/|_\____/
+
+EOF
+}
+set -eEuo pipefail
+BL=$(echo "\033[36m")
+RD=$(echo "\033[01;31m")
+CM='\xE2\x9C\x94\033'
+GN=$(echo "\033[1;92m")
+CL=$(echo "\033[m")
+header_info
+echo "Loading..."
+whiptail --backtitle "Proxmox VE Helper Scripts" --title "Proxmox VE LXC Execute" --yesno "This will execute a command inside selected LXC Containers. Proceed?" 10 58
+NODE=$(hostname)
+EXCLUDE_MENU=()
+MSG_MAX_LENGTH=0
+while read -r TAG ITEM; do
+ OFFSET=2
+ ((${#ITEM} + OFFSET > MSG_MAX_LENGTH)) && MSG_MAX_LENGTH=${#ITEM}+OFFSET
+ EXCLUDE_MENU+=("$TAG" "$ITEM " "OFF")
+done < <(pct list | awk 'NR>1')
+excluded_containers=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "Containers on $NODE" --checklist "\nSelect containers to skip from executing:\n" \
+ 16 $((MSG_MAX_LENGTH + 23)) 6 "${EXCLUDE_MENU[@]}" 3>&1 1>&2 2>&3 | tr -d '"')
+
+if [ $? -ne 0 ]; then
+ exit
+fi
+
+
+read -r -p "Enter here command for inside the containers: " custom_command
+
+header_info
+echo "One moment please...\n"
+
+function execute_in() {
+ container=$1
+ name=$(pct exec "$container" hostname)
+ echo -e "${BL}[Info]${GN} Execute inside${BL} ${name}${GN} with output: ${CL}"
+ pct exec "$container" -- bash -c "${custom_command}" | tee
+}
+
+for container in $(pct list | awk '{if(NR>1) print $1}'); do
+ if [[ " ${excluded_containers[@]} " =~ " $container " ]]; then
+ echo -e "${BL}[Info]${GN} Skipping ${BL}$container${CL}"
+ else
+ os=$(pct config "$container" | awk '/^ostype/ {print $2}')
+ if [ "$os" != "debian" ] && [ "$os" != "ubuntu" ]; then
+ echo -e "${BL}[Info]${GN} Skipping ${name} ${RD}$container is not Debian or Ubuntu ${CL}"
+ continue
+ fi
+
+ status=$(pct status "$container")
+ template=$(pct config "$container" | grep -q "template:" && echo "true" || echo "false")
+ if [ "$template" == "false" ] && [ "$status" == "status: stopped" ]; then
+ echo -e "${BL}[Info]${GN} Starting${BL} $container ${CL}"
+ pct start "$container"
+ echo -e "${BL}[Info]${GN} Waiting For${BL} $container${CL}${GN} To Start ${CL}"
+ sleep 5
+ execute_in "$container"
+ echo -e "${BL}[Info]${GN} Shutting down${BL} $container ${CL}"
+ pct shutdown "$container" &
+ elif [ "$status" == "status: running" ]; then
+ execute_in "$container"
+ fi
+ fi
+done
+
+wait
+
+echo -e "${GN} Finished, execute command inside selected containers. ${CL} \n"
diff --git a/tools/pve/fstrim.sh b/tools/pve/fstrim.sh
new file mode 100644
index 000000000..4bd68f442
--- /dev/null
+++ b/tools/pve/fstrim.sh
@@ -0,0 +1,189 @@
+#!/usr/bin/env bash
+
+set -eEuo pipefail
+
+function header_info() {
+ clear
+ cat <<"EOF"
+ _______ __ __ ______ _
+ / ____(_) /__ _______ _______/ /____ ____ ___ /_ __/____(_)___ ___
+ / /_ / / / _ \/ ___/ / / / ___/ __/ _ \/ __ `__ \ / / / ___/ / __ `__ \
+ / __/ / / / __(__ ) /_/ (__ ) /_/ __/ / / / / / / / / / / / / / / / /
+/_/ /_/_/\___/____/\__, /____/\__/\___/_/ /_/ /_/ /_/ /_/ /_/_/ /_/ /_/
+ /____/
+EOF
+}
+
+BL="\033[36m"
+RD="\033[01;31m"
+GN="\033[1;92m"
+CL="\033[m"
+
+LOGFILE="/var/log/fstrim.log"
+touch "$LOGFILE"
+chmod 600 "$LOGFILE"
+echo -e "\n----- $(date '+%Y-%m-%d %H:%M:%S') | fstrim Run by $(whoami) on $(hostname) -----" >>"$LOGFILE"
+
+header_info
+echo "Loading..."
+
+whiptail --backtitle "Proxmox VE Helper Scripts" \
+ --title "About fstrim (LXC)" \
+ --msgbox "The 'fstrim' command releases unused blocks back to the storage device. This only makes sense for containers on SSD, NVMe, Thin-LVM, or storage with discard/TRIM support.\n\nIf your root filesystem or container disks are on classic HDDs, thick LVM, or unsupported storage types, running fstrim will have no effect.\n\nRecommended:\n- Use fstrim only on SSD, NVMe, or thin-provisioned storage with discard enabled.\n- For ZFS, ensure 'autotrim=on' is set on your pool.\n" 16 88
+
+ROOT_FS=$(df -Th "/" | awk 'NR==2 {print $2}')
+if [ "$ROOT_FS" != "ext4" ]; then
+ whiptail --backtitle "Proxmox VE Helper Scripts" \
+ --title "Warning" \
+ --yesno "Root filesystem is not ext4 ($ROOT_FS).\nContinue anyway?" 12 80 || exit 1
+fi
+
+NODE=$(hostname)
+EXCLUDE_MENU=()
+STOPPED_MENU=()
+MAX_NAME_LEN=0
+MAX_STAT_LEN=0
+
+# Build arrays with one pct list
+mapfile -t CTLINES < <(pct list | awk 'NR>1')
+
+for LINE in "${CTLINES[@]}"; do
+ CTID=$(awk '{print $1}' <<<"$LINE")
+ STATUS=$(awk '{print $2}' <<<"$LINE")
+ NAME=$(awk '{print $3}' <<<"$LINE")
+ ((${#NAME} > MAX_NAME_LEN)) && MAX_NAME_LEN=${#NAME}
+ ((${#STATUS} > MAX_STAT_LEN)) && MAX_STAT_LEN=${#STATUS}
+done
+
+FMT="%-${MAX_NAME_LEN}s | %-${MAX_STAT_LEN}s"
+
+for LINE in "${CTLINES[@]}"; do
+ CTID=$(awk '{print $1}' <<<"$LINE")
+ STATUS=$(awk '{print $2}' <<<"$LINE")
+ NAME=$(awk '{print $3}' <<<"$LINE")
+ DESC=$(printf "$FMT" "$NAME" "$STATUS")
+ EXCLUDE_MENU+=("$CTID" "$DESC" "OFF")
+ if [[ "$STATUS" == "stopped" ]]; then
+ STOPPED_MENU+=("$CTID" "$DESC" "OFF")
+ fi
+done
+
+excluded_containers_raw=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
+ --title "Containers on $NODE" \
+ --checklist "\nSelect containers to skip from trimming:\n" \
+ 20 $((MAX_NAME_LEN + MAX_STAT_LEN + 20)) 12 "${EXCLUDE_MENU[@]}" 3>&1 1>&2 2>&3)
+[ $? -ne 0 ] && exit
+read -ra EXCLUDED <<<$(echo "$excluded_containers_raw" | tr -d '"')
+
+TO_START=()
+if [ ${#STOPPED_MENU[@]} -gt 0 ]; then
+ for ((i = 0; i < ${#STOPPED_MENU[@]}; i += 3)); do
+ CTID="${STOPPED_MENU[i]}"
+ DESC="${STOPPED_MENU[i + 1]}"
+ if [[ " ${EXCLUDED[*]} " =~ " $CTID " ]]; then
+ continue
+ fi
+ header_info
+ echo -e "${BL}[Info]${GN} Container $CTID ($DESC) is currently stopped.${CL}"
+ read -rp "Temporarily start for fstrim? [y/N]: " answer
+ if [[ "$answer" =~ ^[Yy]$ ]]; then
+ TO_START+=("$CTID")
+ fi
+ done
+fi
+
+declare -A WAS_STOPPED
+for ct in "${TO_START[@]}"; do
+ WAS_STOPPED["$ct"]=1
+done
+
+function trim_container() {
+ local container="$1"
+ local name="$2"
+ header_info
+ echo -e "${BL}[Info]${GN} Trimming ${BL}$container${CL} \n"
+
+ local before_trim after_trim
+ local lv_name="vm-${container}-disk-0"
+ if lvs --noheadings -o lv_name 2>/dev/null | grep -qw "$lv_name"; then
+ before_trim=$(lvs --noheadings -o lv_name,data_percent 2>/dev/null | awk -v ctid="$lv_name" '$1 == ctid {gsub(/%/, "", $2); print $2}')
+ [[ -n "$before_trim" ]] && echo -e "${RD}Data before trim $before_trim%${CL}" || echo -e "${RD}Data before trim: not available${CL}"
+ else
+ before_trim=""
+ echo -e "${RD}Data before trim: not available (non-LVM storage)${CL}"
+ fi
+
+ local fstrim_output
+ fstrim_output=$(pct fstrim "$container" 2>&1)
+ if echo "$fstrim_output" | grep -qi "not supported"; then
+ echo -e "${RD}fstrim isnt supported on this storage!${CL}"
+ elif echo "$fstrim_output" | grep -Eq '([0-9]+(\.[0-9]+)?\s*[KMGT]?B)'; then
+ echo -e "${GN}fstrim result: $fstrim_output${CL}"
+ else
+ echo -e "${RD}fstrim result: $fstrim_output${CL}"
+ fi
+
+ if lvs --noheadings -o lv_name 2>/dev/null | grep -qw "$lv_name"; then
+ after_trim=$(lvs --noheadings -o lv_name,data_percent 2>/dev/null | awk -v ctid="$lv_name" '$1 == ctid {gsub(/%/, "", $2); print $2}')
+ [[ -n "$after_trim" ]] && echo -e "${GN}Data after trim $after_trim%${CL}" || echo -e "${GN}Data after trim: not available${CL}"
+ else
+ after_trim=""
+ echo -e "${GN}Data after trim: not available (non-LVM storage)${CL}"
+ fi
+
+ # Logging
+ echo "$(date '+%Y-%m-%d %H:%M:%S') | CTID=$container | Name=$name | Before=${before_trim:-N/A}% | After=${after_trim:-N/A}% | fstrim: $fstrim_output" >>"$LOGFILE"
+ sleep 0.5
+}
+
+for LINE in "${CTLINES[@]}"; do
+ CTID=$(awk '{print $1}' <<<"$LINE")
+ STATUS=$(awk '{print $2}' <<<"$LINE")
+ NAME=$(awk '{print $3}' <<<"$LINE")
+ if [[ " ${EXCLUDED[*]} " =~ " $CTID " ]]; then
+ header_info
+ echo -e "${BL}[Info]${GN} Skipping $CTID ($NAME, excluded)${CL}"
+ sleep 0.5
+ continue
+ fi
+ if pct config "$CTID" | grep -q "template:"; then
+ header_info
+ echo -e "${BL}[Info]${GN} Skipping $CTID ($NAME, template)${CL}\n"
+ sleep 0.5
+ continue
+ fi
+ if [[ "$STATUS" != "running" ]]; then
+ if [[ -n "${WAS_STOPPED[$CTID]:-}" ]]; then
+ header_info
+ echo -e "${BL}[Info]${GN} Starting $CTID ($NAME) for trim...${CL}"
+ pct start "$CTID"
+ sleep 2
+ else
+ header_info
+ echo -e "${BL}[Info]${GN} Skipping $CTID ($NAME, not running, not selected)${CL}"
+ sleep 0.5
+ continue
+ fi
+ fi
+
+ trim_container "$CTID" "$NAME"
+
+ if [[ -n "${WAS_STOPPED[$CTID]:-}" ]]; then
+ read -rp "Stop LXC $CTID ($NAME) again after trim? [Y/n]: " answer
+ if [[ ! "$answer" =~ ^[Nn]$ ]]; then
+ header_info
+ echo -e "${BL}[Info]${GN} Stopping $CTID ($NAME) again...${CL}"
+ pct stop "$CTID"
+ sleep 1
+ else
+ header_info
+ echo -e "${BL}[Info]${GN} Leaving $CTID ($NAME) running as requested.${CL}"
+ sleep 1
+ fi
+ fi
+done
+
+header_info
+echo -e "${GN}Finished, LXC Containers Trimmed.${CL} \n"
+echo -e "${BL}If you want to see the complete log: cat $LOGFILE${CL}"
+exit 0
diff --git a/tools/pve/post-pve-install.sh b/tools/pve/post-pve-install.sh
index 5afc3bd02..42df5d77a 100644
--- a/tools/pve/post-pve-install.sh
+++ b/tools/pve/post-pve-install.sh
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2025 tteck
-# Author: tteck (tteckster)
+# Author: tteckster | MickLesk (CanbiZ)
# License: MIT
# https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
@@ -44,9 +44,66 @@ msg_error() {
echo -e "${BFR} ${CROSS} ${RD}${msg}${CL}"
}
-start_routines() {
+get_pve_version() {
+ local pve_ver
+ pve_ver="$(pveversion | awk -F'/' '{print $2}' | awk -F'-' '{print $1}')"
+ echo "$pve_ver"
+}
+
+get_pve_major_minor() {
+ local ver="$1"
+ local major minor
+ IFS='.' read -r major minor _ <<<"$ver"
+ echo "$major $minor"
+}
+
+component_exists_in_sources() {
+ local component="$1"
+ grep -h -E "^[^#]*Components:[^#]*\b${component}\b" /etc/apt/sources.list.d/*.sources 2>/dev/null | grep -q .
+}
+
+main() {
+ header_info
+ echo -e "\nThis script will Perform Post Install Routines.\n"
+ while true; do
+ read -p "Start the Proxmox VE Post Install Script (y/n)? " yn
+ case $yn in
+ [Yy]*) break ;;
+ [Nn]*)
+ clear
+ exit
+ ;;
+ *) echo "Please answer yes or no." ;;
+ esac
+ done
+
+ local PVE_VERSION PVE_MAJOR PVE_MINOR
+ PVE_VERSION="$(get_pve_version)"
+ read -r PVE_MAJOR PVE_MINOR <<<"$(get_pve_major_minor "$PVE_VERSION")"
+
+ if [[ "$PVE_MAJOR" == "8" ]]; then
+ if ((PVE_MINOR < 0 || PVE_MINOR > 9)); then
+ msg_error "Unsupported Proxmox 8 version"
+ exit 1
+ fi
+ start_routines_8
+ elif [[ "$PVE_MAJOR" == "9" ]]; then
+ if ((PVE_MINOR != 0)); then
+ msg_error "Only Proxmox 9.0 is currently supported"
+ exit 1
+ fi
+ start_routines_9
+ else
+ msg_error "Unsupported Proxmox VE major version: $PVE_MAJOR"
+ echo -e "Supported: 8.0–8.9.x and 9.0"
+ exit 1
+ fi
+}
+
+start_routines_8() {
header_info
+ # === Bookworm/8.x: .list-Files ===
CHOICE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "SOURCES" --menu "The package manager will use the correct sources to update and install packages on your Proxmox VE server.\n \nCorrect Proxmox VE sources?" 14 58 2 \
"yes" " " \
"no" " " 3>&2 2>&1 1>&3)
@@ -61,9 +118,7 @@ EOF
echo 'APT::Get::Update::SourceListWarnings::NonFreeFirmware "false";' >/etc/apt/apt.conf.d/no-bookworm-firmware.conf
msg_ok "Corrected Proxmox VE Sources"
;;
- no)
- msg_error "Selected no to Correcting Proxmox VE Sources"
- ;;
+ no) msg_error "Selected no to Correcting Proxmox VE Sources" ;;
esac
CHOICE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "PVE-ENTERPRISE" --menu "The 'pve-enterprise' repository is only available to users who have purchased a Proxmox VE subscription.\n \nDisable 'pve-enterprise' repository?" 14 58 2 \
@@ -77,9 +132,7 @@ EOF
EOF
msg_ok "Disabled 'pve-enterprise' repository"
;;
- no)
- msg_error "Selected no to Disabling 'pve-enterprise' repository"
- ;;
+ no) msg_error "Selected no to Disabling 'pve-enterprise' repository" ;;
esac
CHOICE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "PVE-NO-SUBSCRIPTION" --menu "The 'pve-no-subscription' repository provides access to all of the open-source components of Proxmox VE.\n \nEnable 'pve-no-subscription' repository?" 14 58 2 \
@@ -93,9 +146,7 @@ deb http://download.proxmox.com/debian/pve bookworm pve-no-subscription
EOF
msg_ok "Enabled 'pve-no-subscription' repository"
;;
- no)
- msg_error "Selected no to Enabling 'pve-no-subscription' repository"
- ;;
+ no) msg_error "Selected no to Enabling 'pve-no-subscription' repository" ;;
esac
CHOICE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "CEPH PACKAGE REPOSITORIES" --menu "The 'Ceph Package Repositories' provides access to both the 'no-subscription' and 'enterprise' repositories (initially disabled).\n \nCorrect 'ceph package sources?" 14 58 2 \
@@ -112,9 +163,7 @@ EOF
EOF
msg_ok "Corrected 'ceph package repositories'"
;;
- no)
- msg_error "Selected no to Correcting 'ceph package repositories'"
- ;;
+ no) msg_error "Selected no to Correcting 'ceph package repositories'" ;;
esac
CHOICE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "PVETEST" --menu "The 'pvetest' repository can give advanced users access to new features and updates before they are officially released.\n \nAdd (Disabled) 'pvetest' repository?" 14 58 2 \
@@ -128,30 +177,408 @@ EOF
EOF
msg_ok "Added 'pvetest' repository"
;;
- no)
- msg_error "Selected no to Adding 'pvetest' repository"
- ;;
+ no) msg_error "Selected no to Adding 'pvetest' repository" ;;
esac
- if [[ ! -f /etc/apt/apt.conf.d/no-nag-script ]]; then
- CHOICE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "SUBSCRIPTION NAG" --menu "This will disable the nag message reminding you to purchase a subscription every time you log in to the web interface.\n \nDisable subscription nag?" 14 58 2 \
+ post_routines_common
+}
+
+start_routines_9() {
+ header_info
+
+ # check if deb822 Sources (*.sources) exist
+ if find /etc/apt/sources.list.d/ -maxdepth 1 -name '*.sources' | grep -q .; then
+ whiptail --backtitle "Proxmox VE Helper Scripts" --title "Deb822 sources detected" \
+ --msgbox "Modern deb822 sources (*.sources) already exist.\n\nNo changes to sources format required.\n\nYou may still have legacy sources.list or .list files, which you can disable in the next step." 12 65 || true
+ else
+ check_and_disable_legacy_sources() {
+ local LEGACY_COUNT=0
+ local listfile="/etc/apt/sources.list"
+
+ # Check sources.list
+ if [[ -f "$listfile" ]] && grep -qE '^\s*deb ' "$listfile"; then
+ (( ++LEGACY_COUNT ))
+ fi
+
+ # Check .list files
+ local list_files
+ list_files=$(find /etc/apt/sources.list.d/ -type f -name "*.list" 2>/dev/null)
+ if [[ -n "$list_files" ]]; then
+ LEGACY_COUNT=$((LEGACY_COUNT + $(echo "$list_files" | wc -l)))
+ fi
+
+ if ((LEGACY_COUNT > 0)); then
+ # Show summary to user
+ local MSG="Legacy APT sources found:\n"
+ [[ -f "$listfile" ]] && MSG+=" - /etc/apt/sources.list\n"
+ [[ -n "$list_files" ]] && MSG+="$(echo "$list_files" | sed 's|^| - |')\n"
+ MSG+="\nDo you want to disable (comment out/rename) all legacy sources and use ONLY deb822 .sources format?\n\nRecommended for Proxmox VE 9."
+
+ whiptail --backtitle "Proxmox VE Helper Scripts" --title "Disable legacy sources?" \
+ --yesno "$MSG" 18 80
+ if [[ $? -eq 0 ]]; then
+ # Backup and disable sources.list
+ if [[ -f "$listfile" ]] && grep -qE '^\s*deb ' "$listfile"; then
+ cp "$listfile" "$listfile.bak"
+ sed -i '/^\s*deb /s/^/# Disabled by Proxmox Helper Script /' "$listfile"
+ msg_ok "Disabled entries in sources.list (backup: sources.list.bak)"
+ fi
+ # Rename all .list files to .list.bak
+ if [[ -n "$list_files" ]]; then
+ while IFS= read -r f; do
+ mv "$f" "$f.bak"
+ done <<<"$list_files"
+ msg_ok "Renamed legacy .list files to .bak"
+ fi
+ else
+ msg_error "Kept legacy sources as-is (may cause APT warnings)"
+ fi
+ fi
+ }
+
+ check_and_disable_legacy_sources
+ # === Trixie/9.x: deb822 .sources ===
+ CHOICE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "SOURCES" --menu \
+ "The package manager will use the correct sources to update and install packages on your Proxmox VE 9 server.\n\nMigrate to deb822 sources format?" 14 58 2 \
"yes" " " \
"no" " " 3>&2 2>&1 1>&3)
case $CHOICE in
yes)
- whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox --title "Support Subscriptions" "Supporting the software's development team is essential. Check their official website's Support Subscriptions for pricing. Without their dedicated work, we wouldn't have this exceptional software." 10 58
- msg_info "Disabling subscription nag"
- echo "DPkg::Post-Invoke { \"dpkg -V proxmox-widget-toolkit | grep -q '/proxmoxlib\.js$'; if [ \$? -eq 1 ]; then { echo 'Removing subscription nag from UI...'; sed -i '/.*data\.status.*{/{s/\!//;s/active/NoMoreNagging/}' /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js; }; fi\"; };" >/etc/apt/apt.conf.d/no-nag-script
- apt --reinstall install proxmox-widget-toolkit &>/dev/null
- msg_ok "Disabled subscription nag (Delete browser cache)"
+ msg_info "Correcting Proxmox VE Sources (deb822)"
+ # remove all existing .list files
+ rm -f /etc/apt/sources.list.d/*.list
+ # remove bookworm and proxmox entries from sources.list
+ sed -i '/proxmox/d;/bookworm/d' /etc/apt/sources.list || true
+ # Create new deb822 sources
+ cat >/etc/apt/sources.list.d/debian.sources <&2 2>&1 1>&3)
+ case $CHOICE in
+ keep)
+ msg_ok "Kept 'pve-enterprise' repository"
+ ;;
+ disable)
+ msg_info "Disabling (commenting) 'pve-enterprise' repository"
+ # Comment out every non-comment line in the file that has 'pve-enterprise' in Components
+ for file in /etc/apt/sources.list.d/*.sources; do
+ if grep -q "Components:.*pve-enterprise" "$file"; then
+ sed -i '/^\s*Types:/,/^$/s/^\([^#].*\)$/# \1/' "$file"
+ fi
+ done
+ msg_ok "Disabled 'pve-enterprise' repository"
+ ;;
+ delete)
+ msg_info "Deleting 'pve-enterprise' repository file"
+ for file in /etc/apt/sources.list.d/*.sources; do
+ if grep -q "Components:.*pve-enterprise" "$file"; then
+ rm -f "$file"
+ fi
+ done
+ msg_ok "Deleted 'pve-enterprise' repository file"
+ ;;
+ esac
+ else
+ CHOICE=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
+ --title "PVE-ENTERPRISE" \
+ --menu "The 'pve-enterprise' repository is only available to users who have purchased a Proxmox VE subscription.\n\nAdd 'pve-enterprise' repository (deb822)?" 14 58 2 \
+ "no" " " \
+ "yes" " " \
+ --default-item "no" \
+ 3>&2 2>&1 1>&3)
+ case $CHOICE in
+ yes)
+ msg_info "Adding 'pve-enterprise' repository (deb822)"
+ cat >/etc/apt/sources.list.d/pve-enterprise.sources </dev/null; then
+ CHOICE=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
+ --title "CEPH-ENTERPRISE" \
+ --menu "'ceph enterprise' repository already exists.\n\nWhat do you want to do?" 14 58 2 \
+ "keep" "Keep as is" \
+ "disable" "Comment out (disable) this repo" \
+ "delete" "Delete this repo file" \
+ 3>&2 2>&1 1>&3)
+ case $CHOICE in
+ keep)
+ msg_ok "Kept 'ceph enterprise' repository"
+ ;;
+ disable)
+ msg_info "Disabling (commenting) 'ceph enterprise' repository"
+ for file in /etc/apt/sources.list.d/*.sources; do
+ if grep -q "enterprise.proxmox.com.*ceph" "$file"; then
+ sed -i '/^\s*Types:/,/^$/s/^\([^#].*\)$/# \1/' "$file"
+ fi
+ done
+ msg_ok "Disabled 'ceph enterprise' repository"
+ ;;
+ delete)
+ msg_info "Deleting 'ceph enterprise' repository file"
+ for file in /etc/apt/sources.list.d/*.sources; do
+ if grep -q "enterprise.proxmox.com.*ceph" "$file"; then
+ rm -f "$file"
+ fi
+ done
+ msg_ok "Deleted 'ceph enterprise' repository file"
;;
esac
fi
+ # ---- PVE-NO-SUBSCRIPTION ----
+ REPO_FILE=""
+ REPO_ACTIVE=0
+ REPO_COMMENTED=0
+ for file in /etc/apt/sources.list.d/*.sources; do
+ if grep -q "Components:.*pve-no-subscription" "$file"; then
+ REPO_FILE="$file"
+ if grep -E '^[^#]*Components:.*pve-no-subscription' "$file" >/dev/null; then
+ REPO_ACTIVE=1
+ elif grep -E '^#.*Components:.*pve-no-subscription' "$file" >/dev/null; then
+ REPO_COMMENTED=1
+ fi
+ break
+ fi
+ done
+
+ if [[ "$REPO_ACTIVE" -eq 1 ]]; then
+ CHOICE=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
+ --title "PVE-NO-SUBSCRIPTION" \
+ --menu "'pve-no-subscription' repository is currently ENABLED.\n\nWhat do you want to do?" 14 58 3 \
+ "keep" "Keep as is" \
+ "disable" "Comment out (disable)" \
+ "delete" "Delete repo file" \
+ 3>&2 2>&1 1>&3)
+ case $CHOICE in
+ keep)
+ msg_ok "Kept 'pve-no-subscription' repository"
+ ;;
+ disable)
+ msg_info "Disabling (commenting) 'pve-no-subscription' repository"
+ sed -i '/^\s*Types:/,/^$/s/^\([^#].*\)$/# \1/' "$REPO_FILE"
+ msg_ok "Disabled 'pve-no-subscription' repository"
+ ;;
+ delete)
+ msg_info "Deleting 'pve-no-subscription' repository file"
+ rm -f "$REPO_FILE"
+ msg_ok "Deleted 'pve-no-subscription' repository file"
+ ;;
+ esac
+
+ elif [[ "$REPO_COMMENTED" -eq 1 ]]; then
+ CHOICE=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
+ --title "PVE-NO-SUBSCRIPTION" \
+ --menu "'pve-no-subscription' repository is currently DISABLED (commented out).\n\nWhat do you want to do?" 14 58 3 \
+ "enable" "Uncomment (enable)" \
+ "keep" "Keep disabled" \
+ "delete" "Delete repo file" \
+ 3>&2 2>&1 1>&3)
+ case $CHOICE in
+ enable)
+ msg_info "Enabling (uncommenting) 'pve-no-subscription' repository"
+ sed -i '/^#\s*Types:/,/^$/s/^#\s*//' "$REPO_FILE"
+ msg_ok "Enabled 'pve-no-subscription' repository"
+ ;;
+ keep)
+ msg_ok "Kept 'pve-no-subscription' repository disabled"
+ ;;
+ delete)
+ msg_info "Deleting 'pve-no-subscription' repository file"
+ rm -f "$REPO_FILE"
+ msg_ok "Deleted 'pve-no-subscription' repository file"
+ ;;
+ esac
+ else
+ CHOICE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "PVE-NO-SUBSCRIPTION" \
+ --menu "The 'pve-no-subscription' repository provides access to all of the open-source components of Proxmox VE.\n\nAdd 'pve-no-subscription' repository (deb822)?" 14 58 2 \
+ "yes" " " \
+ "no" " " 3>&2 2>&1 1>&3)
+ case $CHOICE in
+ yes)
+ msg_info "Adding 'pve-no-subscription' repository (deb822)"
+ cat >/etc/apt/sources.list.d/proxmox.sources <&2 2>&1 1>&3)
+ case $CHOICE in
+ yes)
+ msg_info "Adding 'ceph package repositories' (deb822)"
+ cat >/etc/apt/sources.list.d/ceph.sources <&2 2>&1 1>&3)
+ case $CHOICE in
+ yes)
+ msg_info "Adding 'pve-test' repository (deb822, disabled)"
+ cat >/etc/apt/sources.list.d/pve-test.sources <