diff --git a/.github/workflows/backup/check_and_update_json_date.yml b/.github/workflows/backup/check_and_update_json_date.yml
deleted file mode 100644
index cde3cbba..00000000
--- a/.github/workflows/backup/check_and_update_json_date.yml
+++ /dev/null
@@ -1,60 +0,0 @@
-name: Update date_created in JSON files
-
-on:
- # Dieser Trigger wird für das Öffnen von PRs sowie für das Aktualisieren von offenen PRs verwendet
- pull_request:
- types: [opened, synchronize]
- schedule:
- # Dieser Trigger wird 4x am Tag ausgelöst, um sicherzustellen, dass das Datum aktualisiert wird
- - cron: "0 0,6,12,18 * * *" # Führt alle 6 Stunden aus
- workflow_dispatch: # Manuelle Ausführung des Workflows möglich
-
-jobs:
- update-date:
- runs-on: ubuntu-latest
-
- steps:
- - name: Checkout repository
- uses: actions/checkout@v4
-
- - name: Install yq
- run: |
- sudo apt-get update
- sudo apt-get install -y yq
-
- - name: Set the current date
- id: set_date
- run: echo "TODAY=$(date -u +%Y-%m-%d)" >> $GITHUB_ENV
-
- - name: Check for changes in PR
- run: |
- # Hole den PR-Branch
- PR_BRANCH="refs/pull/${{ github.event.pull_request.number }}/merge"
- git fetch origin $PR_BRANCH
-
- # Liste alle JSON-Dateien im PR auf, die geändert wurden
- CHANGED_JSON_FILES=$(git diff --name-only origin/main...$PR_BRANCH | grep '.json')
-
- if [ -z "$CHANGED_JSON_FILES" ]; then
- echo "No JSON files changed in this PR."
- exit 0
- fi
-
- # Gehe alle geänderten JSON-Dateien durch und aktualisiere das Datum
- for file in $CHANGED_JSON_FILES; do
- echo "Updating date_created in $file"
- # Setze das aktuelle Datum
- yq eval ".date_created = \"${{ env.TODAY }}\"" -i "$file"
- git add "$file"
- done
-
- - name: Commit and push changes
- run: |
- # Prüfe, ob es Änderungen gibt und committe sie
- git config user.name "json-updater-bot"
- git config user.email "github-actions[bot]@users.noreply.github.com"
-
- git commit -m "Update date_created to ${{ env.TODAY }}" || echo "No changes to commit"
-
- # Push zurück in den PR-Branch
- git push origin $PR_BRANCH
diff --git a/.github/workflows/backup/shellcheck.yml b/.github/workflows/backup/shellcheck.yml
deleted file mode 100644
index 4385fc8e..00000000
--- a/.github/workflows/backup/shellcheck.yml
+++ /dev/null
@@ -1,60 +0,0 @@
-name: Shellcheck
-
-on:
- push:
- branches:
- - main
- pull_request:
- workflow_dispatch:
- schedule:
- - cron: "5 1 * * *"
-
-jobs:
- shellcheck:
- name: Shellcheck
- runs-on: ubuntu-latest
-
- steps:
- - uses: actions/checkout@v4
-
- - name: Get changed files
- id: changed-files
- uses: tj-actions/changed-files@v45
- with:
- files: |
- **.sh
-
- - name: Download ShellCheck
- shell: bash
- env:
- INPUT_VERSION: "v0.10.0"
- run: |
- set -euo pipefail
- if [[ "${{ runner.os }}" == "macOS" ]]; then
- osvariant="darwin"
- else
- osvariant="linux"
- fi
-
- baseurl="https://github.com/koalaman/shellcheck/releases/download"
- curl -Lso "${{ github.workspace }}/sc.tar.xz" \
- "${baseurl}/${INPUT_VERSION}/shellcheck-${INPUT_VERSION}.${osvariant}.x86_64.tar.xz"
-
- tar -xf "${{ github.workspace }}/sc.tar.xz" -C "${{ github.workspace }}"
- mv "${{ github.workspace }}/shellcheck-${INPUT_VERSION}/shellcheck" \
- "${{ github.workspace }}/shellcheck"
-
- - name: Verify ShellCheck binary
- run: |
- ls -l "${{ github.workspace }}/shellcheck"
-
- - name: Display ShellCheck version
- run: |
- "${{ github.workspace }}/shellcheck" --version
-
- - name: Run ShellCheck
- if: steps.changed-files.outputs.any_changed == 'true'
- env:
- ALL_CHANGED_FILES: ${{ steps.changed-files.outputs.all_changed_files }}
- run: |
- echo "${ALL_CHANGED_FILES}" | xargs "${{ github.workspace }}/shellcheck"
diff --git a/.github/workflows/backup/update_json_date.yml.bak b/.github/workflows/backup/update_json_date.yml.bak
deleted file mode 100644
index 71012528..00000000
--- a/.github/workflows/backup/update_json_date.yml.bak
+++ /dev/null
@@ -1,90 +0,0 @@
-name: Auto Update JSON-Date
-
-on:
- push:
- branches:
- - main
- workflow_dispatch:
-
-jobs:
- update-json-dates:
- runs-on: ubuntu-latest
-
- permissions:
- contents: write
- pull-requests: write
-
- steps:
- - name: Generate a token
- id: generate-token
- uses: actions/create-github-app-token@v2
- with:
- app-id: ${{ vars.APP_ID }}
- private-key: ${{ secrets.APP_PRIVATE_KEY }}
- owner: community-scripts
- repositories: ProxmoxVED
-
- - name: Checkout repository
- uses: actions/checkout@v4
- with:
- fetch-depth: 0 # Full history for proper detection
-
- - name: Set up Git
- run: |
- git config --global user.name "GitHub Actions"
- git config --global user.email "github-actions[bot]@users.noreply.github.com"
-
- - name: Find JSON files with incorrect date_created
- id: find_wrong_json
- run: |
- TODAY=$(date -u +"%Y-%m-%d")
- > incorrect_json_files.txt
-
- for FILE in json/*.json; do
- if [[ -f "$FILE" ]]; then
- DATE_IN_JSON=$(jq -r '.date_created' "$FILE" 2>/dev/null || echo "")
-
- if [[ "$DATE_IN_JSON" != "$TODAY" ]]; then
- echo "$FILE" >> incorrect_json_files.txt
- fi
- fi
- done
-
- if [[ -s incorrect_json_files.txt ]]; then
- echo "CHANGED=true" >> $GITHUB_ENV
- else
- echo "CHANGED=false" >> $GITHUB_ENV
- fi
-
- - name: Run update script
- if: env.CHANGED == 'true'
- run: |
- chmod +x .github/workflows/scripts/update-json.sh
- while read -r FILE; do
- .github/workflows/scripts/update-json.sh "$FILE"
- done < incorrect_json_files.txt
-
- - name: Commit and create PR if changes exist
- if: env.CHANGED == 'true'
- run: |
- git add json/*.json
- git commit -m "Auto-update date_created in incorrect JSON files"
- git checkout -b pr-fix-json-dates
- git push origin pr-fix-json-dates --force
- gh pr create --title "[core] Fix incorrect JSON date_created fields" \
- --body "This PR is auto-generated to fix incorrect `date_created` fields in JSON files." \
- --head pr-fix-json-dates \
- --base main \
- --label "automated pr"
- env:
- GH_TOKEN: ${{ steps.generate-token.outputs.token }}
-
- - name: Approve pull request
- if: env.CHANGED == 'true'
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- PR_NUMBER=$(gh pr list --head "pr-fix-json-dates" --json number --jq '.[].number')
- if [ -n "$PR_NUMBER" ]; then
- gh pr review $PR_NUMBER --approve
- fi
diff --git a/.github/workflows/backup/validate-formatting.yaml.bak b/.github/workflows/backup/validate-formatting.yaml.bak
deleted file mode 100644
index 8eadd0ac..00000000
--- a/.github/workflows/backup/validate-formatting.yaml.bak
+++ /dev/null
@@ -1,133 +0,0 @@
-name: Validate script formatting
-
-on:
- push:
- branches:
- - main
- pull_request_target:
- paths:
- - "**/*.sh"
- - "**/*.func"
-
-jobs:
- shfmt:
- name: Check changed files
- runs-on: ubuntu-latest
- permissions:
-
- pull-requests: write
-
- steps:
- - name: Get pull request information
- if: github.event_name == 'pull_request_target'
- uses: actions/github-script@v7
- id: pr
- with:
- script: |
- const { data: pullRequest } = await github.rest.pulls.get({
- ...context.repo,
- pull_number: context.payload.pull_request.number,
- });
- return pullRequest;
-
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- fetch-depth: 0 # Ensure the full history is fetched for accurate diffing
- ref: ${{ github.event_name == 'pull_request_target' && fromJSON(steps.pr.outputs.result).merge_commit_sha || '' }}
-
- - name: Get changed files
- id: changed-files
- run: |
- if ${{ github.event_name == 'pull_request_target' }}; then
- echo "files=$(git diff --name-only ${{ github.event.pull_request.base.sha }} ${{ steps.pr.outputs.result && fromJSON(steps.pr.outputs.result).merge_commit_sha }} | grep -E '\.(sh|func)$' | xargs)" >> $GITHUB_OUTPUT
- else
- echo "files=$(git diff --name-only ${{ github.event.before }} ${{ github.event.after }} | grep -E '\.(sh|func)$' | xargs)" >> $GITHUB_OUTPUT
- fi
-
- - name: Set up Go
- if: steps.changed-files.outputs.files != ''
- uses: actions/setup-go@v5
-
- - name: Install shfmt
- if: steps.changed-files.outputs.files != ''
- run: |
- go install mvdan.cc/sh/v3/cmd/shfmt@latest
- echo "$GOPATH/bin" >> $GITHUB_PATH
-
- - name: Run shfmt
- if: steps.changed-files.outputs.files != ''
- id: shfmt
- run: |
- set +e
-
-
- shfmt_output=$(shfmt -d ${{ steps.changed-files.outputs.files }})
- if [[ $? -eq 0 ]]; then
- exit 0
- else
- echo "diff=\"$(echo -n "$shfmt_output" | base64 -w 0)\"" >> $GITHUB_OUTPUT
- printf "%s" "$shfmt_output"
- exit 1
- fi
-
- - name: Post comment with results
- if: always() && steps.changed-files.outputs.files != '' && github.event_name == 'pull_request_target'
- uses: actions/github-script@v7
- with:
- script: |
- const result = "${{ job.status }}" === "success" ? "success" : "failure";
- const diff = Buffer.from(
- ${{ steps.shfmt.outputs.diff }},
- "base64",
- ).toString();
- const issueNumber = context.payload.pull_request
- ? context.payload.pull_request.number
- : null;
- const commentIdentifier = "validate-formatting";
- let newCommentBody = `\n### Script formatting\n\n`;
-
- if (result === "failure") {
- newCommentBody +=
- `:x: We found issues in the formatting of the following changed files:\n\n\`\`\`diff\n${diff}\n\`\`\`\n`;
- } else {
- newCommentBody += `:rocket: All changed shell scripts are formatted correctly!\n`;
- }
-
- newCommentBody += `\n\n`;
-
- if (issueNumber) {
- const { data: comments } = await github.rest.issues.listComments({
- ...context.repo,
- issue_number: issueNumber,
- });
-
- const existingComment = comments.find(
- (comment) => comment.user.login === "github-actions[bot]",
-
- );
-
- if (existingComment) {
- if (existingComment.body.includes(commentIdentifier)) {
- const re = new RegExp(
- String.raw`[\s\S]*?`,
- "",
- );
- newCommentBody = existingComment.body.replace(re, newCommentBody);
- } else {
- newCommentBody = existingComment.body + "\n\n---\n\n" + newCommentBody;
- }
-
- await github.rest.issues.updateComment({
- ...context.repo,
- comment_id: existingComment.id,
- body: newCommentBody,
- });
- } else {
- await github.rest.issues.createComment({
- ...context.repo,
- issue_number: issueNumber,
- body: newCommentBody,
- });
- }
- }
diff --git a/.github/workflows/backup/validate-scripts.yml.bak b/.github/workflows/backup/validate-scripts.yml.bak
deleted file mode 100644
index acb86132..00000000
--- a/.github/workflows/backup/validate-scripts.yml.bak
+++ /dev/null
@@ -1,234 +0,0 @@
-name: Validate scripts
-on:
- push:
- branches:
- - main
- pull_request_target:
- paths:
- - "ct/*.sh"
- - "install/*.sh"
-
-jobs:
- check-scripts:
- name: Check changed files
- runs-on: ubuntu-latest
- permissions:
- pull-requests: write
-
- steps:
- - name: Debug event payload
- run: |
- echo "Event name: ${{ github.event_name }}"
- echo "Payload: $(cat $GITHUB_EVENT_PATH)"
-
- - name: Get pull request information
- if: github.event_name == 'pull_request_target'
- uses: actions/github-script@v7
- id: pr
- with:
- script: |
- const { data: pullRequest } = await github.rest.pulls.get({
- ...context.repo,
- pull_number: context.payload.pull_request.number,
- });
- return pullRequest;
-
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- fetch-depth: 0
- ref: ${{ github.event_name == 'pull_request_target' && fromJSON(steps.pr.outputs.result).merge_commit_sha || '' }}
-
- - name: Get changed files
- id: changed-files
- run: |
- if [ "${{ github.event_name }}" == "pull_request_target" ]; then
- echo "files=$(git diff --name-only ${{ github.event.pull_request.base.sha }} ${{ steps.pr.outputs.result && fromJSON(steps.pr.outputs.result).merge_commit_sha }} | grep -E '\.(sh|func)$' | xargs)" >> $GITHUB_OUTPUT
- else
- echo "files=$(git diff --name-only ${{ github.event.before }} ${{ github.event.after }} | grep -E '\.(sh|func)$' | xargs)" >> $GITHUB_OUTPUT
- fi
-
- - name: Check build.func line
- if: always() && steps.changed-files.outputs.files != ''
- id: build-func
- run: |
- NON_COMPLIANT_FILES=""
- for FILE in ${{ steps.changed-files.outputs.files }}; do
- if [[ "$FILE" == ct/* ]] && [[ $(sed -n '2p' "$FILE") != "source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)" ]]; then
- NON_COMPLIANT_FILES="$NON_COMPLIANT_FILES $FILE"
- fi
- done
-
- if [ -n "$NON_COMPLIANT_FILES" ]; then
- echo "files=$NON_COMPLIANT_FILES" >> $GITHUB_OUTPUT
- echo "Build.func line missing or incorrect in files:"
- for FILE in $NON_COMPLIANT_FILES; do
- echo "$FILE"
- done
- exit 1
- fi
-
- - name: Check executable permissions
- if: always() && steps.changed-files.outputs.files != ''
- id: check-executable
- run: |
- NON_COMPLIANT_FILES=""
- for FILE in ${{ steps.changed-files.outputs.files }}; do
- if [[ ! -x "$FILE" ]]; then
- NON_COMPLIANT_FILES="$NON_COMPLIANT_FILES $FILE"
- fi
- done
-
- if [ -n "$NON_COMPLIANT_FILES" ]; then
- echo "files=$NON_COMPLIANT_FILES" >> $GITHUB_OUTPUT
- echo "Files not executable:"
- for FILE in $NON_COMPLIANT_FILES; do
- echo "$FILE"
- done
- exit 1
- fi
-
- - name: Check copyright
- if: always() && steps.changed-files.outputs.files != ''
- id: check-copyright
- run: |
- NON_COMPLIANT_FILES=""
- for FILE in ${{ steps.changed-files.outputs.files }}; do
- if ! sed -n '3p' "$FILE" | grep -qE "^# Copyright \(c\) [0-9]{4}(-[0-9]{4})? (tteck \| community-scripts ORG|community-scripts ORG|tteck)$"; then
- NON_COMPLIANT_FILES="$NON_COMPLIANT_FILES $FILE"
- fi
- done
-
- if [ -n "$NON_COMPLIANT_FILES" ]; then
- echo "files=$NON_COMPLIANT_FILES" >> $GITHUB_OUTPUT
- echo "Copyright header missing or not on line 3 in files:"
- for FILE in $NON_COMPLIANT_FILES; do
- echo "$FILE"
- done
- exit 1
- fi
-
- - name: Check author
- if: always() && steps.changed-files.outputs.files != ''
- id: check-author
- run: |
- NON_COMPLIANT_FILES=""
- for FILE in ${{ steps.changed-files.outputs.files }}; do
- if ! sed -n '4p' "$FILE" | grep -qE "^# Author: .+"; then
- NON_COMPLIANT_FILES="$NON_COMPLIANT_FILES $FILE"
- fi
- done
-
- if [ -n "$NON_COMPLIANT_FILES" ]; then
- echo "files=$NON_COMPLIANT_FILES" >> $GITHUB_OUTPUT
- echo "Author header missing or invalid on line 4 in files:"
- for FILE in $NON_COMPLIANT_FILES; do
- echo "$FILE"
- done
- exit 1
- fi
-
- - name: Check license
- if: always() && steps.changed-files.outputs.files != ''
- id: check-license
- run: |
- NON_COMPLIANT_FILES=""
- for FILE in ${{ steps.changed-files.outputs.files }}; do
- if [[ "$(sed -n '5p' "$FILE")" != "# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE" ]]; then
- NON_COMPLIANT_FILES="$NON_COMPLIANT_FILES $FILE"
- fi
- done
-
- if [ -n "$NON_COMPLIANT_FILES" ]; then
- echo "files=$NON_COMPLIANT_FILES" >> $GITHUB_OUTPUT
- echo "License header missing or not on line 5 in files:"
- for FILE in $NON_COMPLIANT_FILES; do
- echo "$FILE"
- done
- exit 1
- fi
-
- - name: Check source
- if: always() && steps.changed-files.outputs.files != ''
- id: check-source
- run: |
- NON_COMPLIANT_FILES=""
- for FILE in ${{ steps.changed-files.outputs.files }}; do
- if ! sed -n '6p' "$FILE" | grep -qE "^# Source: .+"; then
- NON_COMPLIANT_FILES="$NON_COMPLIANT_FILES $FILE"
- fi
- done
-
- if [ -n "$NON_COMPLIANT_FILES" ]; then
- echo "files=$NON_COMPLIANT_FILES" >> $GITHUB_OUTPUT
- echo "Source header missing or not on line 6 in files:"
- for FILE in $NON_COMPLIANT_FILES; do
- echo "$FILE"
- done
- exit 1
- fi
-
- - name: Post results and comment
- if: always() && steps.changed-files.outputs.files != '' && github.event_name == 'pull_request_target'
- uses: actions/github-script@v7
- with:
- script: |
- const result = '${{ job.status }}' === 'success' ? 'success' : 'failure';
- const nonCompliantFiles = {
- 'Invalid build.func source': "${{ steps.build-func.outputs.files || '' }}",
- 'Not executable': "${{ steps.check-executable.outputs.files || '' }}",
- 'Copyright header line missing or invalid': "${{ steps.check-copyright.outputs.files || '' }}",
- 'Author header line missing or invalid': "${{ steps.check-author.outputs.files || '' }}",
- 'License header line missing or invalid': "${{ steps.check-license.outputs.files || '' }}",
- 'Source header line missing or invalid': "${{ steps.check-source.outputs.files || '' }}"
- };
-
- const issueNumber = context.payload.pull_request ? context.payload.pull_request.number : null;
- const commentIdentifier = 'validate-scripts';
- let newCommentBody = `\n### Script validation\n\n`;
-
- if (result === 'failure') {
- newCommentBody += ':x: We found issues in the following changed files:\n\n';
- for (const [check, files] of Object.entries(nonCompliantFiles)) {
- if (files) {
- newCommentBody += `**${check}:**\n`;
- files.trim().split(' ').forEach(file => {
- newCommentBody += `- ${file}: ${check}\n`;
- });
- newCommentBody += `\n`;
- }
- }
- } else {
- newCommentBody += `:rocket: All changed shell scripts passed validation!\n`;
- }
-
- newCommentBody += `\n\n`;
-
- if (issueNumber) {
- const { data: comments } = await github.rest.issues.listComments({
- ...context.repo,
- issue_number: issueNumber
- });
-
- const existingComment = comments.find(comment =>
- comment.body.includes(``) &&
- comment.user.login === 'github-actions[bot]'
- );
-
- if (existingComment) {
- const re = new RegExp(String.raw`[\\s\\S]*?`, "m");
- newCommentBody = existingComment.body.replace(re, newCommentBody);
-
- await github.rest.issues.updateComment({
- ...context.repo,
- comment_id: existingComment.id,
- body: newCommentBody
- });
- } else {
- await github.rest.issues.createComment({
- ...context.repo,
- issue_number: issueNumber,
- body: newCommentBody
- });
- }
- }
diff --git a/.github/workflows/changelog-pr.yaml b/.github/workflows/changelog-pr.yaml
deleted file mode 100644
index 80959d54..00000000
--- a/.github/workflows/changelog-pr.yaml
+++ /dev/null
@@ -1,286 +0,0 @@
-name: Create Changelog Pull Request
-
-on:
- push:
- branches: ["main"]
- workflow_dispatch:
-
-jobs:
- update-changelog-pull-request:
- if: github.repository == 'community-scripts/ProxmoxVED'
- runs-on: ubuntu-latest
- env:
- CONFIG_PATH: .github/changelog-pr-config.json
- BRANCH_NAME: github-action-update-changelog
- AUTOMATED_PR_LABEL: "automated pr"
- permissions:
- contents: write
- pull-requests: write
- steps:
- - name: Generate a token for PR creation
- id: generate-token
- uses: actions/create-github-app-token@v2
- with:
- app-id: ${{ vars.APP_ID }}
- private-key: ${{ secrets.APP_PRIVATE_KEY }}
- owner: community-scripts
- repositories: ProxmoxVED
-
- - name: Generate a token for PR approval and merge
- id: generate-token-merge
- uses: actions/create-github-app-token@v2
- with:
- app-id: ${{ secrets.APP_ID_APPROVE_AND_MERGE }}
- private-key: ${{ secrets.APP_KEY_APPROVE_AND_MERGE }}
- owner: community-scripts
- repositories: ProxmoxVED
-
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- fetch-depth: 0
-
- - name: Get latest dates in changelog
- run: |
- DATES=$(grep -E '^## [0-9]{4}-[0-9]{2}-[0-9]{2}' CHANGELOG.md | head -n 2 | awk '{print $2}')
-
- LATEST_DATE=$(echo "$DATES" | sed -n '1p')
- SECOND_LATEST_DATE=$(echo "$DATES" | sed -n '2p')
- TODAY=$(date -u +%Y-%m-%d)
-
- echo "TODAY=$TODAY" >> $GITHUB_ENV
- if [[ "$LATEST_DATE" == "$TODAY" ]]; then
- echo "LATEST_DATE=$SECOND_LATEST_DATE" >> $GITHUB_ENV
- else
- echo "LATEST_DATE=$LATEST_DATE" >> $GITHUB_ENV
- fi
-
- - name: Get categorized pull requests
- id: get-categorized-prs
- uses: actions/github-script@v7
- with:
- script: |
- async function main() {
- const fs = require('fs').promises;
- const path = require('path');
-
- const configPath = path.resolve(process.env.CONFIG_PATH);
- const fileContent = await fs.readFile(configPath, 'utf-8');
- const changelogConfig = JSON.parse(fileContent);
-
- const categorizedPRs = changelogConfig.map(obj => ({
- ...obj,
- notes: [],
- subCategories: obj.subCategories ?? (
- obj.labels.includes("update script") ? [
- { title: "🐞 Bug Fixes", labels: ["bugfix"], notes: [] },
- { title: "✨ New Features", labels: ["feature"], notes: [] },
- { title: "💥 Breaking Changes", labels: ["breaking change"], notes: [] },
- { title: "🔧 Refactor", labels: ["refactor"], notes: [] },
- ] :
- obj.labels.includes("maintenance") ? [
- { title: "🐞 Bug Fixes", labels: ["bugfix"], notes: [] },
- { title: "✨ New Features", labels: ["feature"], notes: [] },
- { title: "💥 Breaking Changes", labels: ["breaking change"], notes: [] },
- { title: "📡 API", labels: ["api"], notes: [] },
- { title: "Github", labels: ["github"], notes: [] },
- { title: "📝 Documentation", labels: ["documentation"], notes: [] },
- { title: "🔧 Refactor", labels: ["refactor"], notes: [] }
- ] :
- obj.labels.includes("website") ? [
- { title: "🐞 Bug Fixes", labels: ["bugfix"], notes: [] },
- { title: "✨ New Features", labels: ["feature"], notes: [] },
- { title: "💥 Breaking Changes", labels: ["breaking change"], notes: [] },
- { title: "Script Information", labels: ["json"], notes: [] }
- ] : []
- )
- }));
-
- const latestDateInChangelog = new Date(process.env.LATEST_DATE);
- latestDateInChangelog.setUTCHours(23, 59, 59, 999);
-
- const { data: pulls } = await github.rest.pulls.list({
- owner: context.repo.owner,
- repo: "ProxmoxVE",
- base: "main",
- state: "closed",
- sort: "updated",
- direction: "desc",
- per_page: 100,
- });
-
- const filteredPRs = pulls.filter(pr =>
- pr.merged_at &&
- new Date(pr.merged_at) > latestDateInChangelog &&
- !pr.labels.some(label =>
- ["invalid", "wontdo", process.env.AUTOMATED_PR_LABEL].includes(label.name.toLowerCase())
- )
- );
-
- for (const pr of filteredPRs) {
- const prLabels = pr.labels.map(label => label.name.toLowerCase());
- if (pr.user.login.includes("push-app-to-main[bot]")) {
-
- const scriptName = pr.title;
- try {
- const { data: relatedIssues } = await github.rest.issues.listForRepo({
- owner: context.repo.owner,
- repo: "ProxmoxVED",
- state: "all",
- labels: ["Started Migration To ProxmoxVE"]
- });
-
- const matchingIssue = relatedIssues.find(issue =>
- issue.title.toLowerCase().includes(scriptName.toLowerCase())
- );
-
- if (matchingIssue) {
- const issueAuthor = matchingIssue.user.login;
- const issueAuthorUrl = `https://github.com/${issueAuthor}`;
- prNote = `- ${pr.title} [@${issueAuthor}](${issueAuthorUrl}) ([#${pr.number}](${pr.html_url}))`;
- }
- else {
- prNote = `- ${pr.title} ([#${pr.number}](${pr.html_url}))`;
- }
- } catch (error) {
- console.error(`Error fetching related issues: ${error}`);
- prNote = `- ${pr.title} ([#${pr.number}](${pr.html_url}))`;
- }
- }else{
- prNote = `- ${pr.title} [@${pr.user.login}](https://github.com/${pr.user.login}) ([#${pr.number}](${pr.html_url}))`;
- }
-
-
- if (prLabels.includes("new script")) {
- const newScriptCategory = categorizedPRs.find(category =>
- category.title === "New Scripts" || category.labels.includes("new script"));
- if (newScriptCategory) {
- newScriptCategory.notes.push(prNote);
- }
- } else {
-
- let categorized = false;
- const priorityCategories = categorizedPRs.slice();
- for (const category of priorityCategories) {
- if (categorized) break;
- if (category.labels.some(label => prLabels.includes(label))) {
- if (category.subCategories && category.subCategories.length > 0) {
- const subCategory = category.subCategories.find(sub =>
- sub.labels.some(label => prLabels.includes(label))
- );
-
- if (subCategory) {
- subCategory.notes.push(prNote);
- } else {
- category.notes.push(prNote);
- }
- } else {
- category.notes.push(prNote);
- }
- categorized = true;
- }
- }
- }
-
- }
-
- return categorizedPRs;
- }
-
- return await main();
-
- - name: Update CHANGELOG.md
- uses: actions/github-script@v7
- with:
- script: |
- const fs = require('fs').promises;
- const path = require('path');
-
- const today = process.env.TODAY;
- const latestDateInChangelog = process.env.LATEST_DATE;
- const changelogPath = path.resolve('CHANGELOG.md');
- const categorizedPRs = ${{ steps.get-categorized-prs.outputs.result }};
-
- let newReleaseNotes = `## ${today}\n\n`;
- for (const { title, notes, subCategories } of categorizedPRs) {
- const hasSubcategories = subCategories && subCategories.length > 0;
- const hasMainNotes = notes.length > 0;
- const hasSubNotes = hasSubcategories && subCategories.some(sub => sub.notes && sub.notes.length > 0);
-
- if (hasMainNotes || hasSubNotes) {
- newReleaseNotes += `### ${title}\n\n`;
- }
-
- if (hasMainNotes) {
- newReleaseNotes += ` ${notes.join("\n")}\n\n`;
- }
- if (hasSubcategories) {
- for (const { title: subTitle, notes: subNotes } of subCategories) {
- if (subNotes && subNotes.length > 0) {
- newReleaseNotes += ` - #### ${subTitle}\n\n`;
- newReleaseNotes += ` ${subNotes.join("\n ")}\n\n`;
- }
- }
- }
- }
- const changelogContent = await fs.readFile(changelogPath, 'utf-8');
- const changelogIncludesTodaysReleaseNotes = changelogContent.includes(`\n## ${today}`);
-
- const regex = changelogIncludesTodaysReleaseNotes
- ? new RegExp(`## ${today}.*(?=## ${latestDateInChangelog})`, "gs")
- : new RegExp(`(?=## ${latestDateInChangelog})`, "gs");
-
- const newChangelogContent = changelogContent.replace(regex, newReleaseNotes);
- await fs.writeFile(changelogPath, newChangelogContent);
-
- - name: Check for changes
- id: verify-diff
- run: |
- git diff --quiet . || echo "changed=true" >> $GITHUB_ENV
-
- - name: Commit and push changes
- if: env.changed == 'true'
- run: |
- git config --global user.name "github-actions[bot]"
- git config --global user.email "github-actions[bot]@users.noreply.github.com"
- git add CHANGELOG.md
- git commit -m "Update CHANGELOG.md"
- git checkout -b $BRANCH_NAME || git checkout $BRANCH_NAME
- git push origin $BRANCH_NAME --force
-
- - name: Create pull request if not exists
- if: env.changed == 'true'
- env:
- GH_TOKEN: ${{ steps.generate-token.outputs.token }}
- run: |
- PR_EXISTS=$(gh pr list --head "${BRANCH_NAME}" --json number --jq '.[].number')
- if [ -z "$PR_EXISTS" ]; then
- gh pr create --title "[Github Action] Update CHANGELOG.md" \
- --body "This PR is auto-generated by a Github Action to update the CHANGELOG.md file." \
- --head $BRANCH_NAME \
- --base main \
- --label "$AUTOMATED_PR_LABEL"
- fi
-
- - name: Approve pull request
- if: env.changed == 'true'
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- PR_NUMBER=$(gh pr list --head "${BRANCH_NAME}" --json number --jq '.[].number')
- if [ -n "$PR_NUMBER" ]; then
- gh pr review $PR_NUMBER --approve
- fi
-
- - name: Approve pull request and merge
- if: env.changed == 'true'
- env:
- GH_TOKEN: ${{ steps.generate-token-merge.outputs.token }}
- run: |
- git config --global user.name "github-actions-automege[bot]"
- git config --global user.email "github-actions-automege[bot]@users.noreply.github.com"
- PR_NUMBER=$(gh pr list --head "${BRANCH_NAME}" --json number --jq '.[].number')
- if [ -n "$PR_NUMBER" ]; then
- gh pr review $PR_NUMBER --approve
- gh pr merge $PR_NUMBER --squash --admin
- fi
diff --git a/.github/workflows/close-discussion.yaml b/.github/workflows/close-discussion.yaml
deleted file mode 100644
index 9b0352f4..00000000
--- a/.github/workflows/close-discussion.yaml
+++ /dev/null
@@ -1,164 +0,0 @@
-name: Close Discussion on PR Merge
-
-on:
- push:
- branches:
- - main
-
-permissions:
- contents: read
- discussions: write
-
-jobs:
- close-discussion:
- if: github.repository == 'community-scripts/ProxmoxVED'
- runs-on: ubuntu-latest
-
- steps:
- - name: Checkout Repository
- uses: actions/checkout@v4
-
- - name: Set Up Node.js
- uses: actions/setup-node@v4
- with:
- node-version: "20"
-
- - name: Install Dependencies
- run: npm install zx @octokit/graphql
-
- - name: Close Discussion
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- GITHUB_SHA: ${{ github.sha }}
- GITHUB_REPOSITORY: ${{ github.repository }}
- run: |
- npx zx << 'EOF'
- import { graphql } from "@octokit/graphql";
-
- (async function () {
- try {
- const token = process.env.GITHUB_TOKEN;
- const commitSha = process.env.GITHUB_SHA;
- const [owner, repo] = process.env.GITHUB_REPOSITORY.split("/");
-
- if (!token || !commitSha || !owner || !repo) {
- console.log("Missing required environment variables.");
- process.exit(1);
- }
-
- const graphqlWithAuth = graphql.defaults({
- headers: { authorization: `Bearer ${token}` },
- });
-
- // Find PR from commit SHA
- const searchQuery = `
- query($owner: String!, $repo: String!, $sha: GitObjectID!) {
- repository(owner: $owner, name: $repo) {
- object(oid: $sha) {
- ... on Commit {
- associatedPullRequests(first: 1) {
- nodes {
- number
- body
- }
- }
- }
- }
- }
- }
- `;
-
- const prResult = await graphqlWithAuth(searchQuery, {
- owner,
- repo,
- sha: commitSha,
- });
-
- const pr = prResult.repository.object.associatedPullRequests.nodes[0];
- if (!pr) {
- console.log("No PR found for this commit.");
- return;
- }
-
- const prNumber = pr.number;
- const prBody = pr.body;
-
- const match = prBody.match(/#(\d+)/);
- if (!match) {
- console.log("No discussion ID found in PR body.");
- return;
- }
-
- const discussionNumber = match[1];
- console.log(`Extracted Discussion Number: ${discussionNumber}`);
-
- // Fetch GraphQL discussion ID
- const discussionQuery = `
- query($owner: String!, $repo: String!, $number: Int!) {
- repository(owner: $owner, name: $repo) {
- discussion(number: $number) {
- id
- }
- }
- }
- `;
-
- //
- try {
- const discussionResponse = await graphqlWithAuth(discussionQuery, {
- owner,
- repo,
- number: parseInt(discussionNumber, 10),
- });
-
- const discussionQLId = discussionResponse.repository.discussion.id;
- if (!discussionQLId) {
- console.log("Failed to fetch discussion GraphQL ID.");
- return;
- }
- } catch (error) {
- console.error("Discussion not found or error occurred while fetching discussion:", error);
- return;
- }
-
- // Post comment
- const commentMutation = `
- mutation($discussionId: ID!, $body: String!) {
- addDiscussionComment(input: { discussionId: $discussionId, body: $body }) {
- comment { id body }
- }
- }
- `;
-
- const commentResponse = await graphqlWithAuth(commentMutation, {
- discussionId: discussionQLId,
- body: `Merged with PR #${prNumber}`,
- });
-
- const commentId = commentResponse.addDiscussionComment.comment.id;
- if (!commentId) {
- console.log("Failed to post the comment.");
- return;
- }
-
- console.log(`Comment Posted Successfully! Comment ID: ${commentId}`);
-
- // Mark comment as answer
- const markAnswerMutation = `
- mutation($id: ID!) {
- markDiscussionCommentAsAnswer(input: { id: $id }) {
- discussion { id title }
- }
- }
- `;
-
- await graphqlWithAuth(markAnswerMutation, { id: commentId });
-
- console.log("Comment marked as answer successfully!");
-
- } catch (error) {
- console.error("Error:", error);
- process.exit(1);
- }
- })();
- EOF
diff --git a/.github/workflows/close-ttek-issue.yaml b/.github/workflows/close-ttek-issue.yaml
deleted file mode 100644
index 037d6075..00000000
--- a/.github/workflows/close-ttek-issue.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
-name: Auto-Close tteck Issues
-
-on:
- issues:
- types: [opened]
-
-jobs:
- close_tteck_issues:
- if: github.repository == 'community-scripts/ProxmoxVED'
- runs-on: ubuntu-latest
- steps:
- - name: Auto-close if tteck script detected
- uses: actions/github-script@v7
- with:
- script: |
- const issue = context.payload.issue;
- const content = `${issue.title}\n${issue.body}`;
- const issueNumber = issue.number;
-
- // Check for tteck script mention
- if (content.includes("tteck") || content.includes("tteck/Proxmox")) {
- const message = `Hello, it looks like you are referencing the **old tteck repo**.
-
- This repository is no longer used for active scripts.
- **Please update your bookmarks** and use: [https://helper-scripts.com](https://helper-scripts.com)
-
- Also make sure your Bash command starts with:
- \`\`\`bash
- bash <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/...)
- \`\`\`
-
- This issue is being closed automatically.`;
-
- await github.rest.issues.createComment({
- ...context.repo,
- issue_number: issueNumber,
- body: message
- });
-
- // Optionally apply a label like "not planned"
- await github.rest.issues.addLabels({
- ...context.repo,
- issue_number: issueNumber,
- labels: ["not planned"]
- });
-
- // Close the issue
- await github.rest.issues.update({
- ...context.repo,
- issue_number: issueNumber,
- state: "closed"
- });
- }
diff --git a/.github/workflows/live/changelog-pr.yml b/.github/workflows/live/changelog-pr.yml
deleted file mode 100644
index 87da5514..00000000
--- a/.github/workflows/live/changelog-pr.yml
+++ /dev/null
@@ -1,228 +0,0 @@
-name: Create Changelog Pull Request
-
-on:
- push:
- branches: ["main"]
- workflow_dispatch:
-
-jobs:
- update-changelog-pull-request:
- runs-on: ubuntu-latest
- env:
- CONFIG_PATH: .github/changelog-pr-config.json
- BRANCH_NAME: github-action-update-changelog
- AUTOMATED_PR_LABEL: "automated pr"
- permissions:
- contents: write
- pull-requests: write
- steps:
- - name: Generate a token
- id: generate-token
- uses: actions/create-github-app-token@v2
- with:
- app-id: ${{ vars.APP_ID }}
- private-key: ${{ secrets.APP_PRIVATE_KEY }}
- owner: community-scripts
- repositories: ProxmoxVED
-
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- fetch-depth: 0
-
- - name: Get latest dates in changelog
- run: |
- DATES=$(grep -E '^## [0-9]{4}-[0-9]{2}-[0-9]{2}' CHANGELOG.md | head -n 2 | awk '{print $2}')
-
- LATEST_DATE=$(echo "$DATES" | sed -n '1p')
- SECOND_LATEST_DATE=$(echo "$DATES" | sed -n '2p')
- TODAY=$(date -u +%Y-%m-%d)
-
- echo "TODAY=$TODAY" >> $GITHUB_ENV
- if [[ "$LATEST_DATE" == "$TODAY" ]]; then
- echo "LATEST_DATE=$SECOND_LATEST_DATE" >> $GITHUB_ENV
- else
- echo "LATEST_DATE=$LATEST_DATE" >> $GITHUB_ENV
- fi
-
- - name: Get categorized pull requests
- id: get-categorized-prs
- uses: actions/github-script@v7
- with:
- script: |
- const fs = require('fs').promises;
- const path = require('path');
-
- const configPath = path.resolve(process.env.CONFIG_PATH);
- const fileContent = await fs.readFile(configPath, 'utf-8');
- const changelogConfig = JSON.parse(fileContent);
-
- const categorizedPRs = changelogConfig.map(obj => ({
- ...obj,
- notes: [],
- subCategories: obj.subCategories ?? (
- obj.labels.includes("update script") ? [
- { title: "🐞 Bug Fixes", labels: ["bugfix"], notes: [] },
- { title: "✨ New Features", labels: ["feature"], notes: [] },
- { title: "💥 Breaking Changes", labels: ["breaking change"], notes: [] }
- ] :
- obj.labels.includes("maintenance") ? [
- { title: "🐞 Bug Fixes", labels: ["bugfix"], notes: [] },
- { title: "✨ New Features", labels: ["feature"], notes: [] },
- { title: "💥 Breaking Changes", labels: ["breaking change"], notes: [] },
- { title: "📡 API", labels: ["api"], notes: [] },
- { title: "Github", labels: ["github"], notes: [] }
- ] :
- obj.labels.includes("website") ? [
- { title: "🐞 Bug Fixes", labels: ["bugfix"], notes: [] },
- { title: "✨ New Features", labels: ["feature"], notes: [] },
- { title: "💥 Breaking Changes", labels: ["breaking change"], notes: [] },
- { title: "Script Information", labels: ["json"], notes: [] }
- ] : []
- )
- }));
-
- const latestDateInChangelog = new Date(process.env.LATEST_DATE);
- latestDateInChangelog.setUTCHours(23, 59, 59, 999);
-
- const { data: pulls } = await github.rest.pulls.list({
- owner: context.repo.owner,
- repo: context.repo.repo,
- base: "main",
- state: "closed",
- sort: "updated",
- direction: "desc",
- per_page: 100,
- });
-
- pulls.filter(pr =>
- pr.merged_at &&
- new Date(pr.merged_at) > latestDateInChangelog &&
- !pr.labels.some(label =>
- ["invalid", "wontdo", process.env.AUTOMATED_PR_LABEL].includes(label.name.toLowerCase())
- )
- ).forEach(pr => {
-
- const prLabels = pr.labels.map(label => label.name.toLowerCase());
- const prNote = `- ${pr.title} [@${pr.user.login}](https://github.com/${pr.user.login}) ([#${pr.number}](${pr.html_url}))`;
-
- const updateScriptsCategory = categorizedPRs.find(category =>
- category.labels.some(label => prLabels.includes(label))
- );
-
- if (updateScriptsCategory) {
-
- const subCategory = updateScriptsCategory.subCategories.find(sub =>
- sub.labels.some(label => prLabels.includes(label))
- );
-
- if (subCategory) {
- subCategory.notes.push(prNote);
- } else {
- updateScriptsCategory.notes.push(prNote);
- }
- }
- });
-
- console.log(JSON.stringify(categorizedPRs, null, 2));
-
- return categorizedPRs;
-
-
- - name: Update CHANGELOG.md
- uses: actions/github-script@v7
- with:
- script: |
- const fs = require('fs').promises;
- const path = require('path');
-
- const today = process.env.TODAY;
- const latestDateInChangelog = process.env.LATEST_DATE;
- const changelogPath = path.resolve('CHANGELOG.md');
- const categorizedPRs = ${{ steps.get-categorized-prs.outputs.result }};
-
- console.log(JSON.stringify(categorizedPRs, null, 2));
-
-
- let newReleaseNotes = `## ${today}\n\n`;
- for (const { title, notes, subCategories } of categorizedPRs) {
- const hasSubcategories = subCategories && subCategories.length > 0;
- const hasMainNotes = notes.length > 0;
- const hasSubNotes = hasSubcategories && subCategories.some(sub => sub.notes && sub.notes.length > 0);
-
-
- if (hasMainNotes || hasSubNotes) {
- newReleaseNotes += `### ${title}\n\n`;
- }
-
- if (hasMainNotes) {
- newReleaseNotes += ` ${notes.join("\n")}\n\n`;
- }
- if (hasSubcategories) {
- for (const { title: subTitle, notes: subNotes } of subCategories) {
- if (subNotes && subNotes.length > 0) {
- newReleaseNotes += ` - #### ${subTitle}\n\n`;
- newReleaseNotes += ` ${subNotes.join("\n ")}\n\n`;
- }
- }
- }
- }
-
- const changelogContent = await fs.readFile(changelogPath, 'utf-8');
- const changelogIncludesTodaysReleaseNotes = changelogContent.includes(`\n## ${today}`);
-
- const regex = changelogIncludesTodaysReleaseNotes
- ? new RegExp(`## ${today}.*(?=## ${latestDateInChangelog})`, "gs")
- : new RegExp(`(?=## ${latestDateInChangelog})`, "gs");
-
- const newChangelogContent = changelogContent.replace(regex, newReleaseNotes);
- await fs.writeFile(changelogPath, newChangelogContent);
-
- - name: Check for changes
- id: verify-diff
- run: |
- git diff --quiet . || echo "changed=true" >> $GITHUB_ENV
-
- - name: Commit and push changes
- if: env.changed == 'true'
- run: |
- git config --global user.name "github-actions[bot]"
- git config --global user.email "github-actions[bot]@users.noreply.github.com"
- git add CHANGELOG.md
- git commit -m "Update CHANGELOG.md"
- git checkout -b $BRANCH_NAME || git checkout $BRANCH_NAME
- git push origin $BRANCH_NAME --force
-
- - name: Create pull request if not exists
- if: env.changed == 'true'
- env:
- GH_TOKEN: ${{ steps.generate-token.outputs.token }}
- run: |
- PR_EXISTS=$(gh pr list --head "${BRANCH_NAME}" --json number --jq '.[].number')
- if [ -z "$PR_EXISTS" ]; then
- gh pr create --title "[Github Action] Update CHANGELOG.md" \
- --body "This PR is auto-generated by a Github Action to update the CHANGELOG.md file." \
- --head $BRANCH_NAME \
- --base main \
- --label "$AUTOMATED_PR_LABEL"
- fi
-
- - name: Approve pull request
- if: env.changed == 'true'
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- PR_NUMBER=$(gh pr list --head "${BRANCH_NAME}" --json number --jq '.[].number')
- if [ -n "$PR_NUMBER" ]; then
- gh pr review $PR_NUMBER --approve
- fi
-
- - name: Re-approve pull request after update
- if: env.changed == 'true'
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- PR_NUMBER=$(gh pr list --head "${BRANCH_NAME}" --json number --jq '.[].number')
- if [ -n "$PR_NUMBER" ]; then
- gh pr review $PR_NUMBER --approve
- fi
diff --git a/.github/workflows/live/close-discussion.yml b/.github/workflows/live/close-discussion.yml
deleted file mode 100644
index 4b39fbf9..00000000
--- a/.github/workflows/live/close-discussion.yml
+++ /dev/null
@@ -1,122 +0,0 @@
-name: Close Discussion on PR Merge
-
-on:
- pull_request:
- types: [closed]
-
-jobs:
- close-discussion:
- runs-on: ubuntu-latest
-
- steps:
- - name: Checkout Repository
- uses: actions/checkout@v4
-
- - name: Set Up Node.js
- uses: actions/setup-node@v4
- with:
- node-version: "20"
- - name: Install Dependencies
- run: npm install zx @octokit/graphql
-
- - name: Close Discussion
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- PR_BODY: ${{ github.event.pull_request.body }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- REPO_OWNER: ${{ github.repository_owner }}
- REPO_NAME: ${{ github.event.repository.name }}
- run: |
- npx zx << 'EOF'
- import { graphql } from "@octokit/graphql";
- (async function() {
- try {
- const token = process.env.GITHUB_TOKEN;
- const prBody = process.env.PR_BODY;
- const prNumber = process.env.PR_NUMBER;
- const owner = process.env.REPO_OWNER;
- const repo = process.env.REPO_NAME;
-
- if (!token || !prBody || !prNumber || !owner || !repo) {
- console.log("Missing required environment variables.");
- process.exit(1);
- }
-
- const match = prBody.match(/#(\d+)/);
- if (!match) {
- console.log("No discussion ID found in PR body.");
- return;
- }
- const discussionNumber = match[1];
-
- console.log(`Extracted Discussion Number: ${discussionNumber}`);
- console.log(`PR Number: ${prNumber}`);
- console.log(`Repository: ${owner}/${repo}`);
-
- const graphqlWithAuth = graphql.defaults({
- headers: { authorization: `Bearer ${token}` },
- });
-
- const discussionQuery = `
- query($owner: String!, $repo: String!, $number: Int!) {
- repository(owner: $owner, name: $repo) {
- discussion(number: $number) {
- id
- }
- }
- }
- `;
-
- const discussionResponse = await graphqlWithAuth(discussionQuery, {
- owner,
- repo,
- number: parseInt(discussionNumber, 10),
- });
-
- const discussionQLId = discussionResponse.repository.discussion.id;
- if (!discussionQLId) {
- console.log("Failed to fetch discussion GraphQL ID.");
- return;
- }
-
- console.log(`GraphQL Discussion ID: ${discussionQLId}`);
-
- const commentMutation = `
- mutation($discussionId: ID!, $body: String!) {
- addDiscussionComment(input: { discussionId: $discussionId, body: $body }) {
- comment { id body }
- }
- }
- `;
-
- const commentResponse = await graphqlWithAuth(commentMutation, {
- discussionId: discussionQLId,
- body: `Merged with PR #${prNumber}`,
- });
-
- const commentId = commentResponse.addDiscussionComment.comment.id;
- if (!commentId) {
- console.log("Failed to post the comment.");
- return;
- }
-
- console.log(`Comment Posted Successfully! Comment ID: ${commentId}`);
-
- const markAnswerMutation = `
- mutation($id: ID!) {
- markDiscussionCommentAsAnswer(input: { id: $id }) {
- discussion { id title }
- }
- }
- `;
-
- await graphqlWithAuth(markAnswerMutation, { id: commentId });
-
- console.log("Comment marked as answer successfully!");
-
- } catch (error) {
- console.error("Error:", error);
- return;
- }
- })();
- EOF
\ No newline at end of file
diff --git a/.github/workflows/live/create-docker-for-runner.yml b/.github/workflows/live/create-docker-for-runner.yml
deleted file mode 100644
index c9fef0a5..00000000
--- a/.github/workflows/live/create-docker-for-runner.yml
+++ /dev/null
@@ -1,37 +0,0 @@
-name: Build and Publish Docker Image
-
-on:
- push:
- branches:
- - main
- paths:
- - '.github/runner/docker/**'
- schedule:
- - cron: '0 0 * * *'
-
-jobs:
- build:
- runs-on: ubuntu-latest #To ensure it always builds we use the github runner with all the right tooling
-
- steps:
- - name: Checkout code
- uses: actions/checkout@v3
-
- - name: Log in to GHCR
- uses: docker/login-action@v2
- with:
- registry: ghcr.io
- username: ${{ github.actor }}
- password: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Build Docker image
- run: |
- repo_name=${{ github.repository }} # Get repository name
- repo_name_lower=$(echo $repo_name | tr '[:upper:]' '[:lower:]') # Convert to lowercase
- docker build -t ghcr.io/$repo_name_lower/gh-runner-self:latest -f .github/runner/docker/gh-runner-self.dockerfile .
-
- - name: Push Docker image to GHCR
- run: |
- repo_name=${{ github.repository }} # Get repository name
- repo_name_lower=$(echo $repo_name | tr '[:upper:]' '[:lower:]') # Convert to lowercase
- docker push ghcr.io/$repo_name_lower/gh-runner-self:latest
diff --git a/.github/workflows/live/delete-json-branch.yml b/.github/workflows/live/delete-json-branch.yml
deleted file mode 100644
index e4cdcf24..00000000
--- a/.github/workflows/live/delete-json-branch.yml
+++ /dev/null
@@ -1,28 +0,0 @@
-
-name: Delete JSON date PR Branch
-
-on:
- pull_request:
- types: [closed]
- branches:
- - main
-
-jobs:
- delete_branch:
- runs-on: ubuntu-latest
- steps:
- - name: Checkout the code
- uses: actions/checkout@v3
-
- - name: Delete PR Update Branch
- if: github.event.pull_request.merged == true && startsWith(github.event.pull_request.head.ref, 'pr-update-json-')
- run: |
- PR_BRANCH="${{ github.event.pull_request.head.ref }}"
- echo "Deleting branch $PR_BRANCH..."
-
- # Avoid deleting the default branch (e.g., main)
- if [[ "$PR_BRANCH" != "main" ]]; then
- git push origin --delete "$PR_BRANCH"
- else
- echo "Skipping deletion of the main branch"
- fi
\ No newline at end of file
diff --git a/.github/workflows/live/github-release.yml b/.github/workflows/live/github-release.yml
deleted file mode 100644
index 482d88a0..00000000
--- a/.github/workflows/live/github-release.yml
+++ /dev/null
@@ -1,57 +0,0 @@
-name: Create Daily Release
-
-on:
- schedule:
- - cron: '1 0 * * *' # Runs daily at 00:01 UTC
- workflow_dispatch:
-
-jobs:
- create-daily-release:
- runs-on: ubuntu-latest
- permissions:
- contents: write
- steps:
- - name: Checkout repository
- uses: actions/checkout@v4
-
- - name: Extract first 5000 characters from CHANGELOG.md
- run: head -c 5000 CHANGELOG.md > changelog_cropped.md
-
- - name: Debugging - Show extracted changelog
- run: |
- echo "=== CHANGELOG EXCERPT ==="
- cat changelog_cropped.md
- echo "========================="
-
- - name: Parse CHANGELOG.md and create release
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- YESTERDAY=$(date -u --date="yesterday" +%Y-%m-%d)
- echo "Checking for changes on: $YESTERDAY"
-
- # Ensure yesterday's date exists in the changelog
- if ! grep -q "## $YESTERDAY" changelog_cropped.md; then
- echo "No entry found for $YESTERDAY, skipping release."
- exit 0
- fi
-
- # Extract section for yesterday's date
- awk -v date="## $YESTERDAY" '
- $0 ~ date {found=1; next}
- found && /^## [0-9]{4}-[0-9]{2}-[0-9]{2}/ {exit}
- found
- ' changelog_cropped.md > changelog_tmp.md
-
- echo "=== Extracted Changelog ==="
- cat changelog_tmp.md
- echo "==========================="
-
- # Skip if no content was found
- if [ ! -s changelog_tmp.md ]; then
- echo "No changes found for $YESTERDAY, skipping release."
- exit 0
- fi
-
- # Create GitHub release
- gh release create "$YESTERDAY" -t "$YESTERDAY" -F changelog_tmp.md
diff --git a/.github/workflows/live/script-test.yml b/.github/workflows/live/script-test.yml
deleted file mode 100644
index 272a1272..00000000
--- a/.github/workflows/live/script-test.yml
+++ /dev/null
@@ -1,177 +0,0 @@
-name: Run Scripts on PVE Node for testing
-permissions:
- pull-requests: write
-on:
- pull_request_target:
- branches:
- - main
- paths:
- - 'install/**.sh'
- - 'ct/**.sh'
-
-jobs:
- run-install-script:
- runs-on: pvenode
- steps:
- - name: Checkout PR branch
- uses: actions/checkout@v4
- with:
- ref: ${{ github.event.pull_request.head.ref }}
- repository: ${{ github.event.pull_request.head.repo.full_name }}
- fetch-depth: 0
-
- - name: Add Git safe directory
- run: |
- git config --global --add safe.directory /__w/ProxmoxVED/ProxmoxVE
-
- - name: Set up GH_TOKEN
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- echo "GH_TOKEN=${GH_TOKEN}" >> $GITHUB_ENV
-
- - name: Get Changed Files
- run: |
- CHANGED_FILES=$(gh pr diff ${{ github.event.pull_request.number }} --repo ${{ github.repository }} --name-only)
- CHANGED_FILES=$(echo "$CHANGED_FILES" | tr '\n' ' ')
- echo "Changed files: $CHANGED_FILES"
- echo "SCRIPT=$CHANGED_FILES" >> $GITHUB_ENV
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-
-
- - name: Get scripts
- id: check-install-script
- run: |
- ALL_FILES=()
- ADDED_FILES=()
- for FILE in ${{ env.SCRIPT }}; do
- if [[ $FILE =~ ^install/.*-install\.sh$ ]] || [[ $FILE =~ ^ct/.*\.sh$ ]]; then
- STRIPPED_NAME=$(basename "$FILE" | sed 's/-install//' | sed 's/\.sh$//')
- if [[ ! " ${ADDED_FILES[@]} " =~ " $STRIPPED_NAME " ]]; then
- ALL_FILES+=("$FILE")
- ADDED_FILES+=("$STRIPPED_NAME") # Mark this base file as added (without the path)
- fi
- fi
- done
- ALL_FILES=$(echo "${ALL_FILES[@]}" | xargs)
- echo "$ALL_FILES"
- echo "ALL_FILES=$ALL_FILES" >> $GITHUB_ENV
-
- - name: Run scripts
- id: run-install
- continue-on-error: true
- run: |
- set +e
- #run for each files in /ct
- for FILE in ${{ env.ALL_FILES }}; do
- STRIPPED_NAME=$(basename "$FILE" | sed 's/-install//' | sed 's/\.sh$//')
- echo "Running Test for: $STRIPPED_NAME"
- if grep -E -q 'read\s+-r\s+-p\s+".*"\s+\w+' "$FILE"; then
- echo "The script contains an interactive prompt. Skipping execution."
- continue
- fi
- if [[ $FILE =~ ^install/.*-install\.sh$ ]]; then
- CT_SCRIPT="ct/$STRIPPED_NAME.sh"
- if [[ ! -f $CT_SCRIPT ]]; then
- echo "No CT script found for $STRIPPED_NAME"
- ERROR_MSG="No CT script found for $FILE"
- echo "$ERROR_MSG" > result_$STRIPPED_NAME.log
- continue
- fi
- if grep -E -q 'read\s+-r\s+-p\s+".*"\s+\w+' "install/$STRIPPED_NAME-install.sh"; then
- echo "The script contains an interactive prompt. Skipping execution."
- continue
- fi
- echo "Found CT script for $STRIPPED_NAME"
- chmod +x "$CT_SCRIPT"
- RUNNING_FILE=$CT_SCRIPT
- elif [[ $FILE =~ ^ct/.*\.sh$ ]]; then
- INSTALL_SCRIPT="install/$STRIPPED_NAME-install.sh"
- if [[ ! -f $INSTALL_SCRIPT ]]; then
- echo "No install script found for $STRIPPED_NAME"
- ERROR_MSG="No install script found for $FILE"
- echo "$ERROR_MSG" > result_$STRIPPED_NAME.log
- continue
- fi
- echo "Found install script for $STRIPPED_NAME"
- chmod +x "$INSTALL_SCRIPT"
- RUNNING_FILE=$FILE
- if grep -E -q 'read\s+-r\s+-p\s+".*"\s+\w+' "ct/$STRIPPED_NAME.sh"; then
- echo "The script contains an interactive prompt. Skipping execution."
- continue
- fi
- fi
- git remote add community-scripts https://github.com/community-scripts/ProxmoxVE.git
- git fetch community-scripts
- rm -f .github/workflows/scripts/app-test/pr-build.func || true
- rm -f .github/workflows/scripts/app-test/pr-install.func || true
- rm -f .github/workflows/scripts/app-test/pr-alpine-install.func || true
- rm -f .github/workflows/scripts/app-test/pr-create-lxc.sh || true
- git checkout community-scripts/main -- .github/workflows/scripts/app-test/pr-build.func
- git checkout community-scripts/main -- .github/workflows/scripts/app-test/pr-install.func
- git checkout community-scripts/main -- .github/workflows/scripts/app-test/pr-alpine-install.func
- git checkout community-scripts/main -- .github/workflows/scripts/app-test/pr-create-lxc.sh
- chmod +x $RUNNING_FILE
- chmod +x .github/workflows/scripts/app-test/pr-create-lxc.sh
- chmod +x .github/workflows/scripts/app-test/pr-install.func
- chmod +x .github/workflows/scripts/app-test/pr-alpine-install.func
- chmod +x .github/workflows/scripts/app-test/pr-build.func
- sed -i 's|source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)|source .github/workflows/scripts/app-test/pr-build.func|g' "$RUNNING_FILE"
- echo "Executing $RUNNING_FILE"
- ERROR_MSG=$(./$RUNNING_FILE 2>&1 > /dev/null)
- echo "Finished running $FILE"
- if [ -n "$ERROR_MSG" ]; then
- echo "ERROR in $STRIPPED_NAME: $ERROR_MSG"
- echo "$ERROR_MSG" > result_$STRIPPED_NAME.log
- fi
- done
- set -e # Restore exit-on-error
-
- - name: Cleanup PVE Node
- run: |
- containers=$(pct list | tail -n +2 | awk '{print $0 " " $4}' | awk '{print $1}')
-
- for container_id in $containers; do
- status=$(pct status $container_id | awk '{print $2}')
- if [[ $status == "running" ]]; then
- pct stop $container_id
- pct destroy $container_id
- fi
- done
-
- - name: Post error comments
- run: |
- ERROR="false"
- SEARCH_LINE=".github/workflows/scripts/app-test/pr-build.func: line 255:"
-
- # Get all existing comments on the PR
- EXISTING_COMMENTS=$(gh pr view ${{ github.event.pull_request.number }} --repo ${{ github.repository }} --json comments --jq '.comments[].body')
-
- for FILE in ${{ env.ALL_FILES }}; do
- STRIPPED_NAME=$(basename "$FILE" | sed 's/-install//' | sed 's/\.sh$//')
- if [[ ! -f result_$STRIPPED_NAME.log ]]; then
- continue
- fi
- ERROR_MSG=$(cat result_$STRIPPED_NAME.log)
-
- if [ -n "$ERROR_MSG" ]; then
- CLEANED_ERROR_MSG=$(echo "$ERROR_MSG" | sed "s|$SEARCH_LINE.*||")
- COMMENT_BODY=":warning: The script _**$FILE**_ failed with the following message:
${CLEANED_ERROR_MSG}
"
-
- # Check if the comment already exists
- if echo "$EXISTING_COMMENTS" | grep -qF "$COMMENT_BODY"; then
- echo "Skipping duplicate comment for $FILE"
- else
- echo "Posting error message for $FILE"
- gh pr comment ${{ github.event.pull_request.number }} \
- --repo ${{ github.repository }} \
- --body "$COMMENT_BODY"
- ERROR="true"
- fi
- fi
- done
-
- echo "ERROR=$ERROR" >> $GITHUB_ENV
-
-
diff --git a/.github/workflows/live/script_format.yml b/.github/workflows/live/script_format.yml
deleted file mode 100644
index c8ea7a4d..00000000
--- a/.github/workflows/live/script_format.yml
+++ /dev/null
@@ -1,243 +0,0 @@
-name: Script Format Check
-permissions:
- pull-requests: write
-on:
- pull_request_target:
- branches:
- - main
- paths:
- - 'install/*.sh'
- - 'ct/*.sh'
-
-jobs:
- run-install-script:
- runs-on: pvenode
- steps:
- - name: Checkout PR branch (supports forks)
- uses: actions/checkout@v4
- with:
- ref: ${{ github.event.pull_request.head.ref }}
- repository: ${{ github.event.pull_request.head.repo.full_name }}
- fetch-depth: 0
-
- - name: Add Git safe directory
- run: |
- git config --global --add safe.directory /__w/ProxmoxVED/ProxmoxVE
-
- - name: Set up GH_TOKEN
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- echo "GH_TOKEN=${GH_TOKEN}" >> $GITHUB_ENV
-
- - name: Get Changed Files
- run: |
- CHANGED_FILES=$(gh pr diff ${{ github.event.pull_request.number }} --repo ${{ github.repository }} --name-only)
- CHANGED_FILES=$(echo "$CHANGED_FILES" | tr '\n' ' ')
- echo "Changed files: $CHANGED_FILES"
- echo "SCRIPT=$CHANGED_FILES" >> $GITHUB_ENV
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Check scripts
- id: run-install
- continue-on-error: true
- run: |
- for FILE in ${{ env.SCRIPT }}; do
- STRIPPED_NAME=$(basename "$FILE" | sed 's/-install//' | sed 's/\.sh$//')
- echo "Running Test for: $STRIPPED_NAME"
- FILE_STRIPPED="${FILE##*/}"
- LOG_FILE="result_$FILE_STRIPPED.log"
-
- if [[ $FILE =~ ^ct/.*\.sh$ ]]; then
-
- FIRST_LINE=$(sed -n '1p' "$FILE")
- [[ "$FIRST_LINE" != "#!/usr/bin/env bash" ]] && echo "Line 1 was $FIRST_LINE | Should be: #!/usr/bin/env bash" >> "$LOG_FILE"
- SECOND_LINE=$(sed -n '2p' "$FILE")
- [[ "$SECOND_LINE" != "source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)" ]] &&
- echo "Line 2 was $SECOND_LINE | Should be: source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)" >> "$LOG_FILE"
- THIRD_LINE=$(sed -n '3p' "$FILE")
- if ! [[ "$THIRD_LINE" =~ ^#\ Copyright\ \(c\)\ [0-9]{4}-[0-9]{4}\ community-scripts\ ORG$ || "$THIRD_LINE" =~ ^Copyright\ \(c\)\ [0-9]{4}-[0-9]{4}\ tteck$ ]]; then
- echo "Line 3 was $THIRD_LINE | Should be: # Copyright (c) 2021-2025 community-scripts ORG" >> "$LOG_FILE"
- fi
-
- EXPECTED_AUTHOR="# Author:"
- EXPECTED_LICENSE="# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE"
- EXPECTED_SOURCE="# Source:"
- EXPECTED_EMPTY=""
-
- for i in {4..7}; do
- LINE=$(sed -n "${i}p" "$FILE")
-
- case $i in
- 4)
- [[ $LINE == $EXPECTED_AUTHOR* ]] || printf "Line %d was: '%s' | Should start with: '%s'\n" "$i" "$LINE" "$EXPECTED_AUTHOR" >> $LOG_FILE
- ;;
- 5)
- [[ "$LINE" == "$EXPECTED_LICENSE" ]] || printf "Line %d was: '%s' | Should be: '%s'\n" "$i" "$LINE" "$EXPECTED_LICENSE" >> $LOG_FILE
- ;;
- 6)
- [[ $LINE == $EXPECTED_SOURCE* ]] || printf "Line %d was: '%s' | Should start with: '%s'\n" "$i" "$LINE" "$EXPECTED_SOURCE" >> $LOG_FILE
- ;;
- 7)
- [[ -z $LINE ]] || printf "Line %d was: '%s' | Should be empty\n" "$i" "$LINE" >> $LOG_FILE
- ;;
- esac
- done
-
-
- EXPECTED_PREFIXES=(
- "APP="
- "var_tags="
- "var_cpu=" # Must be a number
- "var_ram=" # Must be a number
- "var_disk=" # Must be a number
- "var_os=" # Must be debian, alpine, or ubuntu
- "var_version="
- "var_unprivileged=" # Must be 0 or 1
- )
-
-
- for i in {8..15}; do
- LINE=$(sed -n "${i}p" "$FILE")
- INDEX=$((i - 8))
-
- case $INDEX in
- 2|3|4) # var_cpu, var_ram, var_disk (must be numbers)
- if [[ "$LINE" =~ ^${EXPECTED_PREFIXES[$INDEX]}([0-9]+)$ ]]; then
- continue # Valid
- else
- echo "Line $i was '$LINE' | Should be: '${EXPECTED_PREFIXES[$INDEX]}'" >> "$LOG_FILE"
- fi
- ;;
- 5) # var_os (must be debian, alpine, or ubuntu)
- if [[ "$LINE" =~ ^var_os=(debian|alpine|ubuntu)$ ]]; then
- continue # Valid
- else
- echo "Line $i was '$LINE' | Should be: 'var_os=[debian|alpine|ubuntu]'" >> "$LOG_FILE"
- fi
- ;;
- 7) # var_unprivileged (must be 0 or 1)
- if [[ "$LINE" =~ ^var_unprivileged=[01]$ ]]; then
- continue # Valid
- else
- echo "Line $i was '$LINE' | Should be: 'var_unprivileged=[0|1]'" >> "$LOG_FILE"
- fi
- ;;
- *) # Other lines (must start with expected prefix)
- if [[ "$LINE" == ${EXPECTED_PREFIXES[$INDEX]}* ]]; then
- continue # Valid
- else
- echo "Line $i was '$LINE' | Should start with '${EXPECTED_PREFIXES[$INDEX]}'" >> "$LOG_FILE"
- fi
- ;;
- esac
- done
-
- for i in {16..20}; do
- LINE=$(sed -n "${i}p" "$FILE")
- EXPECTED=(
- "header_info \"$APP\""
- "variables"
- "color"
- "catch_errors"
- "function update_script() {"
- )
- [[ "$LINE" != "${EXPECTED[$((i-16))]}" ]] && echo "Line $i was $LINE | Should be: ${EXPECTED[$((i-16))]}" >> "$LOG_FILE"
- done
- cat "$LOG_FILE"
- elif [[ $FILE =~ ^install/.*-install\.sh$ ]]; then
-
- FIRST_LINE=$(sed -n '1p' "$FILE")
- [[ "$FIRST_LINE" != "#!/usr/bin/env bash" ]] && echo "Line 1 was $FIRST_LINE | Should be: #!/usr/bin/env bash" >> "$LOG_FILE"
-
- SECOND_LINE=$(sed -n '2p' "$FILE")
- [[ -n "$SECOND_LINE" ]] && echo "Line 2 should be empty" >> "$LOG_FILE"
-
- THIRD_LINE=$(sed -n '3p' "$FILE")
- if ! [[ "$THIRD_LINE" =~ ^#\ Copyright\ \(c\)\ [0-9]{4}-[0-9]{4}\ community-scripts\ ORG$ || "$THIRD_LINE" =~ ^Copyright\ \(c\)\ [0-9]{4}-[0-9]{4}\ tteck$ ]]; then
- echo "Line 3 was $THIRD_LINE | Should be: # Copyright (c) 2021-2025 community-scripts ORG" >> "$LOG_FILE"
- fi
-
- EXPECTED_AUTHOR="# Author:"
- EXPECTED_LICENSE="# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE"
- EXPECTED_SOURCE="# Source:"
- EXPECTED_EMPTY=""
-
- for i in {4..7}; do
- LINE=$(sed -n "${i}p" "$FILE")
-
- case $i in
- 4)
- [[ $LINE == $EXPECTED_AUTHOR* ]] || printf "Line %d was: '%s' | Should start with: '%s'\n" "$i" "$LINE" "$EXPECTED_AUTHOR" >> $LOG_FILE
- ;;
- 5)
- [[ "$LINE" == "$EXPECTED_LICENSE" ]] || printf "Line %d was: '%s' | Should be: '%s'\n" "$i" "$LINE" "$EXPECTED_LICENSE" >> $LOG_FILE
- ;;
- 6)
- [[ $LINE == $EXPECTED_SOURCE* ]] || printf "Line %d was: '%s' | Should start with: '%s'\n" "$i" "$LINE" "$EXPECTED_SOURCE" >> $LOG_FILE
- ;;
- 7)
- [[ -z $LINE ]] || printf "Line %d was: '%s' | Should be empty\n" "$i" "$LINE" >> $LOG_FILE
- ;;
- esac
- done
-
- [[ "$(sed -n '8p' "$FILE")" != 'source /dev/stdin <<< "$FUNCTIONS_FILE_PATH"' ]] && echo 'Line 8 should be: source /dev/stdin <<< "$FUNCTIONS_FILE_PATH"' >> "$LOG_FILE"
-
- for i in {9..14}; do
- LINE=$(sed -n "${i}p" "$FILE")
- EXPECTED=(
- "color"
- "verb_ip6"
- "catch_errors"
- "setting_up_container"
- "network_check"
- "update_os"
- )
- [[ "$LINE" != "${EXPECTED[$((i-9))]}" ]] && echo "Line $i was $LINE | Should be: ${EXPECTED[$((i-9))]}" >> "$LOG_FILE"
- done
-
- [[ -n "$(sed -n '15p' "$FILE")" ]] && echo "Line 15 should be empty" >> "$LOG_FILE"
- [[ "$(sed -n '16p' "$FILE")" != 'msg_info "Installing Dependencies"' ]] && echo 'Line 16 should be: msg_info "Installing Dependencies"' >> "$LOG_FILE"
-
- LAST_3_LINES=$(tail -n 3 "$FILE")
- [[ "$LAST_3_LINES" != *"$STD apt-get -y autoremove"* ]] && echo 'Third to last line should be: $STD apt-get -y autoremove' >> "$LOG_FILE"
- [[ "$LAST_3_LINES" != *"$STD apt-get -y autoclean"* ]] && echo 'Second to last line should be: $STD apt-get -y clean' >> "$LOG_FILE"
- [[ "$LAST_3_LINES" != *'msg_ok "Cleaned"'* ]] && echo 'Last line should be: msg_ok "Cleaned"' >> "$LOG_FILE"
- cat "$LOG_FILE"
- fi
-
- done
-
-
- - name: Post error comments
- run: |
- ERROR="false"
- for FILE in ${{ env.SCRIPT }}; do
- FILE_STRIPPED="${FILE##*/}"
- LOG_FILE="result_$FILE_STRIPPED.log"
- echo $LOG_FILE
- if [[ ! -f $LOG_FILE ]]; then
- continue
- fi
- ERROR_MSG=$(cat $LOG_FILE)
-
- if [ -n "$ERROR_MSG" ]; then
- echo "Posting error message for $FILE"
- echo ${ERROR_MSG}
- gh pr comment ${{ github.event.pull_request.number }} \
- --repo ${{ github.repository }} \
- --body ":warning: The script _**$FILE**_ has the following formatting errors:
${ERROR_MSG}
"
-
-
- ERROR="true"
- fi
- done
- echo "ERROR=$ERROR" >> $GITHUB_ENV
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Fail if error
- if: ${{ env.ERROR == 'true' }}
- run: exit 1
diff --git a/.github/workflows/live/update-json-date.yml b/.github/workflows/live/update-json-date.yml
deleted file mode 100644
index 1bf965a4..00000000
--- a/.github/workflows/live/update-json-date.yml
+++ /dev/null
@@ -1,133 +0,0 @@
-name: Update JSON Date
-
-on:
- push:
- branches:
- - main
- paths:
- - 'json/**.json'
- workflow_dispatch:
-
-jobs:
- update-app-files:
- runs-on: ubuntu-latest
-
- permissions:
- contents: write
- pull-requests: write
-
- steps:
- - name: Generate a token
- id: generate-token
- uses: actions/create-github-app-token@v2
- with:
- app-id: ${{ vars.APP_ID }}
- private-key: ${{ secrets.APP_PRIVATE_KEY }}
- owner: community-scripts
- repositories: ProxmoxVED
-
- - name: Generate dynamic branch name
- id: timestamp
- run: echo "BRANCH_NAME=pr-update-json-$(date +'%Y%m%d%H%M%S')" >> $GITHUB_ENV
-
- - name: Set up GH_TOKEN
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- echo "GH_TOKEN=${GH_TOKEN}" >> $GITHUB_ENV
-
- - name: Checkout Repository
- uses: actions/checkout@v4
- with:
- fetch-depth: 2 # Ensure we have the last two commits
-
- - name: Get Previous Commit
- id: prev_commit
- run: |
- PREV_COMMIT=$(git rev-parse HEAD^)
- echo "Previous commit: $PREV_COMMIT"
- echo "prev_commit=$PREV_COMMIT" >> $GITHUB_ENV
-
- - name: Get Newly Added JSON Files
- id: new_json_files
- run: |
- git diff --name-only --diff-filter=A ${{ env.prev_commit }} HEAD | grep '^json/.*\.json$' > new_files.txt || true
- echo "New files detected:"
- cat new_files.txt || echo "No new files."
-
- - name: Disable file mode changes
- run: git config core.fileMode false
-
- - name: Set up Git
- run: |
- git config --global user.name "GitHub Actions"
- git config --global user.email "github-actions[bot]@users.noreply.github.com"
-
- - name: Change JSON Date
- id: change-json-date
- run: |
- current_date=$(date +"%Y-%m-%d")
- while IFS= read -r file; do
- # Skip empty lines
- [[ -z "$file" ]] && continue
-
- if [[ -f "$file" ]]; then
- echo "Processing $file..."
- current_json_date=$(jq -r '.date_created // empty' "$file")
- if [[ -z "$current_json_date" || "$current_json_date" != "$current_date" ]]; then
- echo "Updating $file with date $current_date"
- jq --arg date "$current_date" '.date_created = $date' "$file" > temp.json && mv temp.json "$file"
- else
- echo "Date in $file is already up to date."
- fi
- else
- echo "Warning: File $file not found!"
- fi
- done < new_files.txt
- rm new_files.txt
-
- - name: Check if there are any changes
- run: |
- echo "Checking for changes..."
- git add -A # Untracked Dateien aufnehmen
- git status
- if git diff --cached --quiet; then
- echo "No changes detected."
- echo "changed=false" >> "$GITHUB_ENV"
- else
- echo "Changes detected:"
- git diff --stat --cached
- echo "changed=true" >> "$GITHUB_ENV"
- fi
-
- # Step 7: Commit and create PR if changes exist
- - name: Commit and create PR if changes exist
- if: env.changed == 'true'
- run: |
-
-
- git commit -m "Update date in json"
- git checkout -b ${{ env.BRANCH_NAME }}
- git push origin ${{ env.BRANCH_NAME }}
-
- gh pr create --title "[core] update date in json" \
- --body "This PR is auto-generated by a GitHub Action to update the date in json." \
- --head ${{ env.BRANCH_NAME }} \
- --base main \
- --label "automated pr"
- env:
- GH_TOKEN: ${{ steps.generate-token.outputs.token }}
-
- - name: Approve pull request
- if: env.changed == 'true'
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- PR_NUMBER=$(gh pr list --head "${{ env.BRANCH_NAME }}" --json number --jq '.[].number')
- if [ -n "$PR_NUMBER" ]; then
- gh pr review $PR_NUMBER --approve
- fi
-
- - name: No changes detected
- if: env.changed == 'false'
- run: echo "No changes to commit. Workflow completed successfully."
diff --git a/.github/workflows/live/validate-filenames.yml b/.github/workflows/live/validate-filenames.yml
deleted file mode 100644
index 16f2f710..00000000
--- a/.github/workflows/live/validate-filenames.yml
+++ /dev/null
@@ -1,161 +0,0 @@
-name: Validate filenames
-
-on:
- pull_request_target:
- paths:
- - "ct/*.sh"
- - "install/*.sh"
- - "json/*.json"
-
-jobs:
- check-files:
- name: Check changed files
- runs-on: ubuntu-latest
- permissions:
- pull-requests: write
-
- steps:
- - name: Get pull request information
- if: github.event_name == 'pull_request_target'
- uses: actions/github-script@v7
- id: pr
- with:
- script: |
- const { data: pullRequest } = await github.rest.pulls.get({
- ...context.repo,
- pull_number: context.payload.pull_request.number,
- });
- return pullRequest;
-
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- fetch-depth: 0 # Ensure the full history is fetched for accurate diffing
- ref: ${{ github.event_name == 'pull_request_target' && fromJSON(steps.pr.outputs.result).merge_commit_sha || '' }}
-
- - name: Get changed files
- id: changed-files
- run: |
- if ${{ github.event_name == 'pull_request_target' }}; then
- echo "files=$(git diff --name-only ${{ github.event.pull_request.base.sha }} ${{ steps.pr.outputs.result && fromJSON(steps.pr.outputs.result).merge_commit_sha }} | xargs)" >> $GITHUB_OUTPUT
- else
- echo "files=$(git diff --name-only ${{ github.event.before }} ${{ github.event.after }} | xargs)" >> $GITHUB_OUTPUT
- fi
-
- - name: "Validate filenames in ct and install directory"
- if: always() && steps.changed-files.outputs.files != ''
- id: check-scripts
- run: |
- CHANGED_FILES=$(printf "%s\n" ${{ steps.changed-files.outputs.files }} | { grep -E '^(ct|install)/.*\.sh$' || true; })
-
- NON_COMPLIANT_FILES=""
- for FILE in $CHANGED_FILES; do
- # Datei "misc/create_lxc.sh" explizit überspringen
- if [[ "$FILE" == "misc/create_lxc.sh" ]]; then
- continue
- fi
- BASENAME=$(echo "$(basename "${FILE%.*}")")
- if [[ ! "$BASENAME" =~ ^[a-z0-9-]+$ ]]; then
- NON_COMPLIANT_FILES="$NON_COMPLIANT_FILES $FILE"
- fi
- done
-
- if [ -n "$NON_COMPLIANT_FILES" ]; then
- echo "files=$NON_COMPLIANT_FILES" >> $GITHUB_OUTPUT
- echo "Non-compliant filenames found, change to lowercase:"
- for FILE in $NON_COMPLIANT_FILES; do
- echo "$FILE"
- done
- exit 1
- fi
-
- - name: "Validate filenames in json directory."
- if: always() && steps.changed-files.outputs.files != ''
- id: check-json
- run: |
- CHANGED_FILES=$(printf "%s\n" ${{ steps.changed-files.outputs.files }} | { grep -E '^json/.*\.json$' || true; })
-
- NON_COMPLIANT_FILES=""
- for FILE in $CHANGED_FILES; do
- BASENAME=$(echo "$(basename "${FILE%.*}")")
- if [[ ! "$BASENAME" =~ ^[a-z0-9-]+$ ]]; then
- NON_COMPLIANT_FILES="$NON_COMPLIANT_FILES $FILE"
- fi
- done
-
- if [ -n "$NON_COMPLIANT_FILES" ]; then
- echo "files=$NON_COMPLIANT_FILES" >> $GITHUB_OUTPUT
- echo "Non-compliant filenames found, change to lowercase:"
- for FILE in $NON_COMPLIANT_FILES; do
- echo "$FILE"
- done
- exit 1
- fi
-
- - name: Post results and comment
- if: always() && steps.check-scripts.outputs.files != '' && steps.check-json.outputs.files != '' && github.event_name == 'pull_request_target'
- uses: actions/github-script@v7
- with:
- script: |
- const result = "${{ job.status }}" === "success" ? "success" : "failure";
- const nonCompliantFiles = {
- script: "${{ steps.check-scripts.outputs.files }}",
- JSON: "${{ steps.check-json.outputs.files }}",
- };
-
- const issueNumber = context.payload.pull_request
- ? context.payload.pull_request.number
- : null;
- const commentIdentifier = "validate-filenames";
- let newCommentBody = `\n### Filename validation\n\n`;
-
- if (result === "failure") {
- newCommentBody += ":x: We found issues in the following changed files:\n\n";
- for (const [check, files] of Object.entries(nonCompliantFiles)) {
- if (files) {
- newCommentBody += `**${check.charAt(0).toUpperCase() + check.slice(1)} filename invalid:**\n${files
- .trim()
- .split(" ")
- .map((file) => `- ${file}`)
- .join("\n")}\n\n`;
- }
- }
- newCommentBody +=
- "Please change the filenames to lowercase and use only alphanumeric characters and dashes.\n";
- } else {
- newCommentBody += `:rocket: All files passed filename validation!\n`;
- }
-
- newCommentBody += `\n\n`;
-
- if (issueNumber) {
- const { data: comments } = await github.rest.issues.listComments({
- ...context.repo,
- issue_number: issueNumber,
- });
-
- const existingComment = comments.find(
- (comment) => comment.user.login === "github-actions[bot]",
- );
-
- if (existingComment) {
- if (existingComment.body.includes(commentIdentifier)) {
- const re = new RegExp(String.raw`[\s\S]*?`, "");
- newCommentBody = existingComment.body.replace(re, newCommentBody);
- } else {
- newCommentBody = existingComment.body + '\n\n---\n\n' + newCommentBody;
- }
-
- await github.rest.issues.updateComment({
- ...context.repo,
- comment_id: existingComment.id,
- body: newCommentBody,
- });
- } else {
- await github.rest.issues.createComment({
- ...context.repo,
- issue_number: issueNumber,
- body: newCommentBody,
- });
- }
- }
diff --git a/.github/workflows/push-to-gitea.yml b/.github/workflows/push-to-gitea.yml
index 73d9a72b..c1fb72d5 100644
--- a/.github/workflows/push-to-gitea.yml
+++ b/.github/workflows/push-to-gitea.yml
@@ -27,13 +27,13 @@ jobs:
- name: Pull Gitea changes
run: |
git fetch gitea
- git rebase gitea/main
+ git merge --strategy=ours gitea/main
env:
GITEA_USER: ${{ secrets.GITEA_USERNAME }}
GITEA_TOKEN: ${{ secrets.GITEA_TOKEN }}
- name: Push to Gitea
- run: git push gitea --all
+ run: git push gitea main --force
env:
GITEA_USER: ${{ secrets.GITEA_USERNAME }}
GITEA_TOKEN: ${{ secrets.GITEA_TOKEN }}
diff --git a/ct/autocaliweb.sh b/ct/autocaliweb.sh
new file mode 100644
index 00000000..53cf7fc6
--- /dev/null
+++ b/ct/autocaliweb.sh
@@ -0,0 +1,83 @@
+#!/usr/bin/env bash
+source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: vhsdream
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://github.com/gelbphoenix/autocaliweb
+
+APP="Autocaliweb"
+var_tags="${var_tags:-ebooks}"
+var_cpu="${var_cpu:-2}"
+var_ram="${var_ram:-2048}"
+var_disk="${var_disk:-6}"
+var_os="${var_os:-debian}"
+var_version="${var_version:-12}"
+var_unprivileged="${var_unprivileged:-1}"
+
+header_info "$APP"
+variables
+color
+catch_errors
+
+function update_script() {
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -d /opt/autocaliweb ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+
+ setup_uv
+
+ RELEASE=$(curl -fsSL https://api.github.com/repos/gelbphoenix/autocaliweb/releases/latest | jq '.tag_name' | sed 's/^"v//;s/"$//')
+ if check_for_gh_release "autocaliweb" "gelbphoenix/autocaliweb"; then
+ msg_info "Stopping Services"
+ systemctl stop autocaliweb metadata-change-detector acw-ingest-service acw-auto-zipper
+ msg_ok "Stopped Services"
+
+ INSTALL_DIR="/opt/autocaliweb"
+ export VIRTUAL_ENV="${INSTALL_DIR}/venv"
+ $STD tar -cf ~/autocaliweb_bkp.tar "$INSTALL_DIR"/{metadata_change_logs,dirs.json,.env,scripts/ingest_watcher.sh,scripts/auto_zipper_wrapper.sh,scripts/metadata_change_detector_wrapper.sh}
+ fetch_and_deploy_gh_release "autocaliweb" "gelbphoenix/autocaliweb" "tarball" "latest" "/opt/autocaliweb"
+ msg_info "Updating ${APP}"
+ cd "$INSTALL_DIR"
+ if [[ ! -d "$VIRTUAL_ENV" ]]; then
+ $STD uv venv "$VIRTUAL_ENV"
+ fi
+ $STD uv sync --all-extras --active
+ cd "$INSTALL_DIR"/koreader/plugins
+ PLUGIN_DIGEST="$(find acwsync.koplugin -type f -name "*.lua" -o -name "*.json" | sort | xargs sha256sum | sha256sum | cut -d' ' -f1)"
+ echo "Plugin files digest: $PLUGIN_DIGEST" >acwsync.koplugin/${PLUGIN_DIGEST}.digest
+ echo "Build date: $(date)" >>acwsync.koplugin/${PLUGIN_DIGEST}.digest
+ echo "Files included:" >>acwsync.koplugin/${PLUGIN_DIGEST}.digest
+ $STD zip -r koplugin.zip acwsync.koplugin/
+ cp -r koplugin.zip "$INSTALL_DIR"/cps/static
+ mkdir -p "$INSTALL_DIR"/metadata_temp
+ $STD tar -xf ~/autocaliweb_bkp.tar --directory /
+ KEPUB_VERSION="$(/usr/bin/kepubify --version)"
+ CALIBRE_RELEASE="$(curl -s https://api.github.com/repos/kovidgoyal/calibre/releases/latest | grep -o '"tag_name": "[^"]*' | cut -d'"' -f4)"
+ echo "${KEPUB_VERSION#v}" >"$INSTALL_DIR"/KEPUBIFY_RELEASE
+ echo "${CALIBRE_RELEASE#v}" >/"$INSTALL_DIR"/CALIBRE_RELEASE
+ sed 's/^/v/' ~/.autocaliweb >"$INSTALL_DIR"/ACW_RELEASE
+ chown -R acw:acw "$INSTALL_DIR"
+ rm ~/autocaliweb_bkp.tar
+ msg_ok "Updated $APP"
+
+ msg_info "Starting Services"
+ systemctl start autocaliweb metadata-change-detector acw-ingest-service acw-auto-zipper
+ msg_ok "Started Services"
+
+ msg_ok "Updated Successfully"
+ fi
+ exit
+}
+
+start
+build_container
+description
+
+msg_ok "Completed Successfully!\n"
+echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
+echo -e "${INFO}${YW} Access it using the following URL:${CL}"
+echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8083${CL}"
diff --git a/ct/debian.sh b/ct/debian.sh
index 4bcf91d6..d3bbda9b 100644
--- a/ct/debian.sh
+++ b/ct/debian.sh
@@ -7,14 +7,14 @@ source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxV
APP="Debian"
var_tags="${var_tags:-os}"
-var_cpu="${var_cpu:-1}"
-var_ram="${var_ram:-512}"
-var_disk="${var_disk:-2}"
+var_cpu="${var_cpu:-4}"
+var_ram="${var_ram:-4096}"
+var_disk="${var_disk:-15}"
var_os="${var_os:-debian}"
var_version="${var_version:-12}"
var_unprivileged="${var_unprivileged:-1}"
-var_fuse="${var_fuse:-no}"
-var_tun="${var_tun:-no}"
+#var_fuse="${var_fuse:-no}"
+#var_tun="${var_tun:-no}"
header_info "$APP"
variables
@@ -22,18 +22,18 @@ color
catch_errors
function update_script() {
- header_info
- check_container_storage
- check_container_resources
- if [[ ! -d /var ]]; then
- msg_error "No ${APP} Installation Found!"
- exit
- fi
- msg_info "Updating $APP LXC"
- $STD apt-get update
- $STD apt-get -y upgrade
- msg_ok "Updated $APP LXC"
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -d /var ]]; then
+ msg_error "No ${APP} Installation Found!"
exit
+ fi
+ msg_info "Updating $APP LXC"
+ $STD apt-get update
+ $STD apt-get -y upgrade
+ msg_ok "Updated $APP LXC"
+ exit
}
start
diff --git a/ct/librespeed.sh b/ct/deferred/librespeed.sh
similarity index 100%
rename from ct/librespeed.sh
rename to ct/deferred/librespeed.sh
diff --git a/ct/vikunja.sh b/ct/deferred/vikunja.sh
similarity index 100%
rename from ct/vikunja.sh
rename to ct/deferred/vikunja.sh
diff --git a/ct/dispatcharr.sh b/ct/dispatcharr.sh
new file mode 100644
index 00000000..49285859
--- /dev/null
+++ b/ct/dispatcharr.sh
@@ -0,0 +1,120 @@
+#!/usr/bin/env bash
+source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: ekke85
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://github.com/Dispatcharr/Dispatcharr
+
+APP="Dispatcharr"
+APP_NAME=${APP,,}
+var_tags="${var_tags:-media;arr}"
+var_cpu="${var_cpu:-1}"
+var_ram="${var_ram:-1024}"
+var_disk="${var_disk:-8}"
+var_os="${var_os:-debian}"
+var_version="${var_version:-12}"
+var_unprivileged="${var_unprivileged:-1}"
+
+header_info "$APP"
+variables
+color
+catch_errors
+
+function update_script() {
+ header_info
+ check_container_storage
+ check_container_resources
+
+ if [[ ! -d "/opt/dispatcharr" ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+
+ RELEASE=$(curl -fsSL https://api.github.com/repos/Dispatcharr/Dispatcharr/releases/latest | jq -r '.tag_name' | sed 's/^v//')
+ if [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]] || [[ ! -f /opt/${APP}_version.txt ]]; then
+ msg_ok "Starting update"
+ APP_DIR="/opt/dispatcharr"
+ APP_USER="dispatcharr"
+ APP_GROUP="dispatcharr"
+
+
+
+ msg_info "Stopping $APP"
+ systemctl stop dispatcharr-celery
+ systemctl stop dispatcharr-celerybeat
+ systemctl stop dispatcharr-daphne
+ systemctl stop dispatcharr
+ msg_ok "Stopped $APP"
+
+ msg_info "Creating Backup"
+ BACKUP_FILE="/opt/dispatcharr_$(date +%F).tar.gz"
+ msg_info "Source and Database backup"
+ set -o allexport
+ source /etc/$APP_NAME/$APP_NAME.env
+ set +o allexport
+ PGPASSWORD=$POSTGRES_PASSWORD pg_dump -U $POSTGRES_USER -h $POSTGRES_HOST $POSTGRES_DB > /opt/$POSTGRES_DB-`date +%F`.sql
+ $STD tar -czf "$BACKUP_FILE" /opt/dispatcharr /opt/Dispatcharr_version.txt /opt/$POSTGRES_DB-`date +%F`.sql &>/dev/null
+ msg_ok "Backup Created"
+
+ msg_info "Updating $APP to v${RELEASE}"
+ rm -rf /opt/dispatcharr
+ fetch_and_deploy_gh_release "dispatcharr" "Dispatcharr/Dispatcharr"
+ chown -R "$APP_USER:$APP_GROUP" "$APP_DIR"
+ sed -i 's/program\[\x27channel_id\x27\]/program["channel_id"]/g' "${APP_DIR}/apps/output/views.py"
+
+ msg_ok "Dispatcharr Updated to $RELEASE"
+
+ msg_info "Creating Python Virtual Environment"
+ cd $APP_DIR
+ python3 -m venv env
+ source env/bin/activate
+ $STD pip install --upgrade pip
+ $STD pip install -r requirements.txt
+ $STD pip install gunicorn
+ ln -sf /usr/bin/ffmpeg $APP_DIR/env/bin/ffmpeg
+ msg_ok "Python Environment Setup"
+
+ msg_info "Building Frontend"
+ cd $APP_DIR/frontend
+ $STD npm install --legacy-peer-deps
+ $STD npm run build
+ msg_ok "Built Frontend"
+
+ msg_info "Running Django Migrations"
+ cd $APP_DIR
+ source env/bin/activate
+ set -o allexport
+ source /etc/$APP_NAME/$APP_NAME.env
+ set +o allexport
+ $STD python manage.py migrate --noinput
+ $STD python manage.py collectstatic --noinput
+ msg_ok "Migrations Complete"
+
+ msg_info "Starting $APP"
+ systemctl start dispatcharr-celery
+ systemctl start dispatcharr-celerybeat
+ systemctl start dispatcharr-daphne
+ systemctl start dispatcharr
+ msg_ok "Started $APP"
+ echo "${RELEASE}" > "/opt/${APP}_version.txt"
+
+ msg_info "Cleaning Up"
+ rm -rf /opt/$POSTGRES_DB-`date +%F`.sql
+ msg_ok "Cleanup Completed"
+
+ msg_ok "Update Successful, Backup saved to $BACKUP_FILE"
+
+ else
+ msg_ok "No update required. ${APP} is already at v${RELEASE}"
+ fi
+ exit
+}
+
+start
+build_container
+description
+
+msg_ok "Completed Successfully!\n"
+echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
+echo -e "${INFO}${YW} Access it using the following URL:${CL}"
+echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:9191${CL}"
diff --git a/ct/ente.sh b/ct/ente.sh
new file mode 100644
index 00000000..3074d79b
--- /dev/null
+++ b/ct/ente.sh
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: MickLesk (CanbiZ)
+# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
+# Source: https://www.debian.org/
+
+APP="Ente"
+var_tags="${var_tags:-photos}"
+var_cpu="${var_cpu:-4}"
+var_ram="${var_ram:-4096}"
+var_disk="${var_disk:-10}"
+var_os="${var_os:-debian}"
+var_version="${var_version:-12}"
+var_unprivileged="${var_unprivileged:-1}"
+
+header_info "$APP"
+variables
+color
+catch_errors
+
+function update_script() {
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -d /var ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+ msg_info "Updating $APP LXC"
+ $STD apt-get update
+ $STD apt-get -y upgrade
+ msg_ok "Updated $APP LXC"
+ exit
+}
+
+start
+build_container
+description
+
+msg_ok "Completed Successfully!"
+msg_custom "🚀" "${GN}" "${APP} setup has been successfully initialized!"
diff --git a/ct/freepbx.sh b/ct/freepbx.sh
new file mode 100644
index 00000000..d7526bcd
--- /dev/null
+++ b/ct/freepbx.sh
@@ -0,0 +1,67 @@
+#!/usr/bin/env bash
+source <(curl -s https://raw.githubusercontent.com/vsc55/community-scripts-ProxmoxVED/refs/heads/freepbx/misc/build.func)
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: Arian Nasr (arian-nasr)
+# Updated by: Javier Pastor (vsc55)
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://www.freepbx.org/
+
+APP="FreePBX"
+var_tags="pbx;voip;telephony"
+var_cpu="${var_cpu:-2}"
+var_ram="${var_ram:-2048}"
+var_disk="${var_disk:-10}"
+var_os="${var_os:-debian}"
+var_version="${var_version:-12}"
+var_unprivileged="${var_unprivileged:-1}"
+
+header_info "$APP"
+variables
+color
+catch_errors
+
+function update_script() {
+ header_info
+ check_container_storage
+ check_container_resources
+
+ if [[ ! -f /lib/systemd/system/freepbx.service ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+
+ msg_info "Updating $APP LXC"
+ $STD apt-get update
+ $STD apt-get -y upgrade
+ msg_ok "Updated $APP LXC"
+
+ msg_info "Updating $APP Modules"
+ $STD fwconsole ma updateall
+ $STD fwconsole reload
+ msg_ok "Updated $APP Modules"
+
+ exit
+}
+
+start
+
+if whiptail --title "Commercial Modules" --yesno "Remove Commercial modules?" --defaultno 10 50; then
+ export ONLY_OPENSOURCE="yes"
+
+ if whiptail --title "Firewall Module" --yesno "Do you want to KEEP the Firewall module (and sysadmin)?" 10 50; then
+ export REMOVE_FIREWALL="no"
+ else
+ export REMOVE_FIREWALL="yes"
+ fi
+else
+ export ONLY_OPENSOURCE="no"
+ export REMOVE_FIREWALL="no"
+fi
+
+build_container
+description
+
+msg_ok "Completed Successfully!\n"
+echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
+echo -e "${INFO}${YW} Access it using the following URL:${CL}"
+echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}"
diff --git a/ct/headers/autocaliweb b/ct/headers/autocaliweb
new file mode 100644
index 00000000..a5aa7d63
--- /dev/null
+++ b/ct/headers/autocaliweb
@@ -0,0 +1,6 @@
+ ___ __ ___ __
+ / | __ __/ /_____ _________ _/ (_) _____ / /_
+ / /| |/ / / / __/ __ \/ ___/ __ `/ / / | /| / / _ \/ __ \
+ / ___ / /_/ / /_/ /_/ / /__/ /_/ / / /| |/ |/ / __/ /_/ /
+/_/ |_\__,_/\__/\____/\___/\__,_/_/_/ |__/|__/\___/_.___/
+
diff --git a/ct/headers/dispatcharr b/ct/headers/dispatcharr
new file mode 100644
index 00000000..a8ad5396
--- /dev/null
+++ b/ct/headers/dispatcharr
@@ -0,0 +1,6 @@
+ ____ _ __ __
+ / __ \(_)________ ____ _/ /______/ /_ ____ ___________
+ / / / / / ___/ __ \/ __ `/ __/ ___/ __ \/ __ `/ ___/ ___/
+ / /_/ / (__ ) /_/ / /_/ / /_/ /__/ / / / /_/ / / / /
+/_____/_/____/ .___/\__,_/\__/\___/_/ /_/\__,_/_/ /_/
+ /_/
diff --git a/ct/headers/ente b/ct/headers/ente
new file mode 100644
index 00000000..f700a1f5
--- /dev/null
+++ b/ct/headers/ente
@@ -0,0 +1,6 @@
+ ______ __
+ / ____/___ / /____
+ / __/ / __ \/ __/ _ \
+ / /___/ / / / /_/ __/
+/_____/_/ /_/\__/\___/
+
diff --git a/ct/headers/freepbx b/ct/headers/freepbx
new file mode 100644
index 00000000..25541c2e
--- /dev/null
+++ b/ct/headers/freepbx
@@ -0,0 +1,6 @@
+ ______ ____ ____ _ __
+ / ____/_______ ___ / __ \/ __ ) |/ /
+ / /_ / ___/ _ \/ _ \/ /_/ / __ | /
+ / __/ / / / __/ __/ ____/ /_/ / |
+/_/ /_/ \___/\___/_/ /_____/_/|_|
+
diff --git a/ct/headers/healthchecks b/ct/headers/healthchecks
deleted file mode 100644
index 8f61c877..00000000
--- a/ct/headers/healthchecks
+++ /dev/null
@@ -1,6 +0,0 @@
- __ ____ __ __ __
- / /_ ___ ____ _/ / /_/ /_ _____/ /_ ___ _____/ /_______
- / __ \/ _ \/ __ `/ / __/ __ \/ ___/ __ \/ _ \/ ___/ //_/ ___/
- / / / / __/ /_/ / / /_/ / / / /__/ / / / __/ /__/ ,< (__ )
-/_/ /_/\___/\__,_/_/\__/_/ /_/\___/_/ /_/\___/\___/_/|_/____/
-
diff --git a/ct/headers/librespeed b/ct/headers/librespeed
deleted file mode 100644
index b75b5cec..00000000
--- a/ct/headers/librespeed
+++ /dev/null
@@ -1,6 +0,0 @@
- ___ __ __
- / (_) /_ ________ _________ ___ ___ ____/ /
- / / / __ \/ ___/ _ \/ ___/ __ \/ _ \/ _ \/ __ /
- / / / /_/ / / / __(__ ) /_/ / __/ __/ /_/ /
-/_/_/_.___/_/ \___/____/ .___/\___/\___/\__,_/
- /_/
diff --git a/ct/headers/mediamanager b/ct/headers/mediamanager
deleted file mode 100644
index b05f4db7..00000000
--- a/ct/headers/mediamanager
+++ /dev/null
@@ -1,6 +0,0 @@
- __ ___ ___ __ ___
- / |/ /__ ____/ (_)___ _/ |/ /___ _____ ____ _____ ____ _____
- / /|_/ / _ \/ __ / / __ `/ /|_/ / __ `/ __ \/ __ `/ __ `/ _ \/ ___/
- / / / / __/ /_/ / / /_/ / / / / /_/ / / / / /_/ / /_/ / __/ /
-/_/ /_/\___/\__,_/_/\__,_/_/ /_/\__,_/_/ /_/\__,_/\__, /\___/_/
- /____/
diff --git a/ct/headers/nginxproxymanager b/ct/headers/nginxproxymanager
deleted file mode 100644
index d68d0c9d..00000000
--- a/ct/headers/nginxproxymanager
+++ /dev/null
@@ -1,6 +0,0 @@
- _ __ _ ____ __ ___
- / | / /___ _(_)___ _ __ / __ \_________ _ ____ __ / |/ /___ _____ ____ _____ ____ _____
- / |/ / __ `/ / __ \| |/_/ / /_/ / ___/ __ \| |/_/ / / / / /|_/ / __ `/ __ \/ __ `/ __ `/ _ \/ ___/
- / /| / /_/ / / / / /> < / ____/ / / /_/ /> /_/ / / / / / /_/ / / / / /_/ / /_/ / __/ /
-/_/ |_/\__, /_/_/ /_/_/|_| /_/ /_/ \____/_/|_|\__, / /_/ /_/\__,_/_/ /_/\__,_/\__, /\___/_/
- /____/ /____/ /____/
diff --git a/ct/headers/proxmox-backup-server b/ct/headers/proxmox-backup-server
deleted file mode 100644
index 8536d112..00000000
--- a/ct/headers/proxmox-backup-server
+++ /dev/null
@@ -1,6 +0,0 @@
- ____ ____ __ _____
- / __ \_________ _ ______ ___ ____ _ __ / __ )____ ______/ /____ ______ / ___/___ ______ _____ _____
- / /_/ / ___/ __ \| |/_/ __ `__ \/ __ \| |/_/_____/ __ / __ `/ ___/ //_/ / / / __ \______\__ \/ _ \/ ___/ | / / _ \/ ___/
- / ____/ / / /_/ /> / / / / / /_/ /> "/opt/${APP}_db_backup_$(date +%F).sql"
- tar -czf "/opt/${APP}_backup_$(date +%F).tar.gz" "/opt/${APP}"
- msg_ok "Backup Created"
- fetch_and_deploy_gh_release "$APP" "Leantime/leantime" "prebuild" "latest" "/opt/${APP}" Leantime-v[0-9].[0-9].[0-9].tar.gz
+ if check_for_gh_release "leantime" "Leantime/leantime"; then
+ msg_info "Creating Backup"
+ mariadb-dump leantime >"/opt/${APP}_db_backup_$(date +%F).sql"
+ tar -czf "/opt/${APP}_backup_$(date +%F).tar.gz" "/opt/${APP}"
+ mv /opt/leantime /opt/leantime_bak
+ msg_ok "Backup Created"
+
+ fetch_and_deploy_gh_release "leantime" "Leantime/leantime" "prebuild" "latest" "/opt/leantime" Leantime*.tar.gz
+
+ msg_info "Restoring Config & Permissions"
+ mv /opt/leantime_bak/config/.env /opt/leantime/config/.env
+ chown -R www-data:www-data "/opt/leantime"
+ chmod -R 750 "/opt/leantime"
+ msg_ok "Restored Config & Permissions"
+
+ msg_info "Removing Backup"
+ rm -rf /opt/leantime_bak
+ msg_ok "Removed Backup"
+ msg_ok "Updated Successfully"
+ fi
exit
}
diff --git a/ct/mediamanager.sh b/ct/mediamanager.sh
deleted file mode 100644
index cc67d8af..00000000
--- a/ct/mediamanager.sh
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/env bash
-source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: vhsdream
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://github.com/maxdorninger/MediaManager
-
-APP="MediaManager"
-var_tags="${var_tags:-arr}"
-var_cpu="${var_cpu:-2}"
-var_ram="${var_ram:-2048}"
-var_disk="${var_disk:-4}"
-var_os="${var_os:-debian}"
-var_version="${var_version:-12}"
-var_unprivileged="${var_unprivileged:-1}"
-
-header_info "$APP"
-variables
-color
-catch_errors
-
-function update_script() {
- header_info
- check_container_storage
- check_container_resources
- if [[ ! -d /opt/mediamanager ]]; then
- msg_error "No ${APP} Installation Found!"
- exit
- fi
-
- RELEASE=$(curl -fsSL https://api.github.com/repos/maxdorninger/MediaManager/releases/latest | jq '.tag_name' | sed 's/^v//')
- if [[ "${RELEASE}" != "$(cat ~/.mediamanager 2>/dev/null)" ]] || [[ ! -f ~/.mediamanager ]]; then
- msg_info "Stopping Service"
- systemctl stop mediamanager
- msg_ok "Stopped Service"
-
- msg_info "Updating ${APP}"
- fetch_and_deploy_gh_release "MediaManager" "maxdorninger/MediaManager" "tarball" "latest" "/opt/mediamanager"
- MM_DIR="/opt/mm"
- export CONFIG_DIR="${MM_DIR}/config"
- export FRONTEND_FILES_DIR="${MM_DIR}/web/build"
- export BASE_PATH=""
- export PUBLIC_VERSION=""
- export PUBLIC_API_URL="${BASE_PATH}/api/v1"
- export BASE_PATH="${BASE_PATH}/web"
- cd /opt/mediamanager/web
- $STD npm ci
- $STD npm run build
- rm -rf "$FRONTEND_FILES_DIR"/build
- cp -r build "$FRONTEND_FILES_DIR"
-
- export BASE_PATH=""
- export VIRTUAL_ENV="/opt/${MM_DIR}/venv"
- cd /opt/mediamanager
- rm -rf "$MM_DIR"/{media_manager,alembic*}
- cp -r {media_manager,alembic*} "$MM_DIR"
- $STD /usr/local/bin/uv sync --locked --active
- msg_ok "Updated $APP"
-
- msg_info "Starting Service"
- systemctl start mediamanager
- msg_ok "Started Service"
-
- msg_ok "Updated Successfully"
- else
- msg_ok "Already up to date"
- fi
- exit
-}
-
-start
-build_container
-description
-
-msg_ok "Completed Successfully!\n"
-echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
-echo -e "${INFO}${YW} Access it using the following URL:${CL}"
-echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8000${CL}"
diff --git a/ct/nginxproxymanager.sh b/ct/nginxproxymanager.sh
deleted file mode 100644
index eb7259de..00000000
--- a/ct/nginxproxymanager.sh
+++ /dev/null
@@ -1,158 +0,0 @@
-#!/usr/bin/env bash
-source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
-# Copyright (c) 2021-2025 tteck
-# Author: tteck (tteckster)
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://nginxproxymanager.com/
-
-APP="Nginx Proxy Manager"
-var_tags="${var_tags:-proxy}"
-var_cpu="${var_cpu:-2}"
-var_ram="${var_ram:-1024}"
-var_disk="${var_disk:-4}"
-var_os="${var_os:-debian}"
-var_version="${var_version:-12}"
-var_unprivileged="${var_unprivileged:-1}"
-
-header_info "$APP"
-variables
-color
-catch_errors
-
-function update_script() {
- header_info
- check_container_storage
- check_container_resources
- if [[ ! -f /lib/systemd/system/npm.service ]]; then
- msg_error "No ${APP} Installation Found!"
- exit
- fi
- if ! command -v pnpm &>/dev/null; then
- msg_info "Installing pnpm"
- #export NODE_OPTIONS=--openssl-legacy-provider
- $STD npm install -g pnpm@8.15
- msg_ok "Installed pnpm"
- fi
- RELEASE=$(curl -fsSL https://api.github.com/repos/NginxProxyManager/nginx-proxy-manager/releases/latest |
- grep "tag_name" |
- awk '{print substr($2, 3, length($2)-4) }')
- msg_info "Stopping Services"
- systemctl stop openresty
- systemctl stop npm
- msg_ok "Stopped Services"
-
- msg_info "Cleaning Old Files"
- rm -rf /app \
- /var/www/html \
- /etc/nginx \
- /var/log/nginx \
- /var/lib/nginx \
- "$STD" /var/cache/nginx
- msg_ok "Cleaned Old Files"
-
- msg_info "Downloading NPM v${RELEASE}"
- curl -fsSL "https://codeload.github.com/NginxProxyManager/nginx-proxy-manager/tar.gz/v${RELEASE}" | tar -xz
- cd nginx-proxy-manager-"${RELEASE}"
- msg_ok "Downloaded NPM v${RELEASE}"
-
- msg_info "Setting up Enviroment"
- ln -sf /usr/bin/python3 /usr/bin/python
- ln -sf /usr/bin/certbot /opt/certbot/bin/certbot
- ln -sf /usr/local/openresty/nginx/sbin/nginx /usr/sbin/nginx
- ln -sf /usr/local/openresty/nginx/ /etc/nginx
- sed -i "s|\"version\": \"0.0.0\"|\"version\": \"$RELEASE\"|" backend/package.json
- sed -i "s|\"version\": \"0.0.0\"|\"version\": \"$RELEASE\"|" frontend/package.json
- sed -i 's+^daemon+#daemon+g' docker/rootfs/etc/nginx/nginx.conf
- NGINX_CONFS=$(find "$(pwd)" -type f -name "*.conf")
- for NGINX_CONF in $NGINX_CONFS; do
- sed -i 's+include conf.d+include /etc/nginx/conf.d+g' "$NGINX_CONF"
- done
- mkdir -p /var/www/html /etc/nginx/logs
- cp -r docker/rootfs/var/www/html/* /var/www/html/
- cp -r docker/rootfs/etc/nginx/* /etc/nginx/
- cp docker/rootfs/etc/letsencrypt.ini /etc/letsencrypt.ini
- cp docker/rootfs/etc/logrotate.d/nginx-proxy-manager /etc/logrotate.d/nginx-proxy-manager
- ln -sf /etc/nginx/nginx.conf /etc/nginx/conf/nginx.conf
- rm -f /etc/nginx/conf.d/dev.conf
- mkdir -p /tmp/nginx/body \
- /run/nginx \
- /data/nginx \
- /data/custom_ssl \
- /data/logs \
- /data/access \
- /data/nginx/default_host \
- /data/nginx/default_www \
- /data/nginx/proxy_host \
- /data/nginx/redirection_host \
- /data/nginx/stream \
- /data/nginx/dead_host \
- /data/nginx/temp \
- /var/lib/nginx/cache/public \
- /var/lib/nginx/cache/private \
- /var/cache/nginx/proxy_temp
- chmod -R 777 /var/cache/nginx
- chown root /tmp/nginx
- echo resolver "$(awk 'BEGIN{ORS=" "} $1=="nameserver" {print ($2 ~ ":")? "["$2"]": $2}' /etc/resolv.conf);" >/etc/nginx/conf.d/include/resolvers.conf
- if [ ! -f /data/nginx/dummycert.pem ] || [ ! -f /data/nginx/dummykey.pem ]; then
- $STD openssl req -new -newkey rsa:2048 -days 3650 -nodes -x509 -subj "/O=Nginx Proxy Manager/OU=Dummy Certificate/CN=localhost" -keyout /data/nginx/dummykey.pem -out /data/nginx/dummycert.pem
- fi
- mkdir -p /app/global /app/frontend/images
- cp -r backend/* /app
- cp -r global/* /app/global
- $STD python3 -m pip install --no-cache-dir --break-system-packages certbot-dns-cloudflare
- msg_ok "Setup Enviroment"
-
- msg_info "Building Frontend"
- cd ./frontend
- $STD pnpm install
- $STD pnpm upgrade
- $STD pnpm run build
- cp -r dist/* /app/frontend
- cp -r app-images/* /app/frontend/images
- msg_ok "Built Frontend"
-
- msg_info "Initializing Backend"
- $STD rm -rf /app/config/default.json
- if [ ! -f /app/config/production.json ]; then
- cat <<'EOF' >/app/config/production.json
-{
- "database": {
- "engine": "knex-native",
- "knex": {
- "client": "sqlite3",
- "connection": {
- "filename": "/data/database.sqlite"
- }
- }
- }
-}
-EOF
- fi
- cd /app
- $STD pnpm install
- msg_ok "Initialized Backend"
-
- msg_info "Starting Services"
- sed -i 's/user npm/user root/g; s/^pid/#pid/g' /usr/local/openresty/nginx/conf/nginx.conf
- sed -i 's/su npm npm/su root root/g' /etc/logrotate.d/nginx-proxy-manager
- sed -i 's/include-system-site-packages = false/include-system-site-packages = true/g' /opt/certbot/pyvenv.cfg
- systemctl enable -q --now openresty
- systemctl enable -q --now npm
- msg_ok "Started Services"
-
- msg_info "Cleaning up"
- rm -rf ~/nginx-proxy-manager-*
- msg_ok "Cleaned"
-
- msg_ok "Updated Successfully"
- exit
-}
-
-start
-build_container
-description
-
-msg_ok "Completed Successfully!\n"
-echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
-echo -e "${INFO}${YW} Access it using the following URL:${CL}"
-echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:81${CL}"
diff --git a/ct/proxmox-backup-server.sh b/ct/proxmox-backup-server.sh
deleted file mode 100644
index 5463b1eb..00000000
--- a/ct/proxmox-backup-server.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env bash
-source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
-# Copyright (c) 2021-2025 tteck
-# Author: tteck (tteckster)
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://www.proxmox.com/en/proxmox-backup-server
-
-APP="Proxmox-Backup-Server"
-var_tags="${var_tags:-backup}"
-var_cpu="${var_cpu:-2}"
-var_ram="${var_ram:-2048}"
-var_disk="${var_disk:-10}"
-var_os="${var_os:-debian}"
-var_version="${var_version:-12}"
-var_unprivileged="${var_unprivileged:-1}"
-
-header_info "$APP"
-variables
-color
-catch_errors
-
-function update_script() {
- header_info
- check_container_storage
- check_container_resources
- if [[ ! -e /usr/sbin/proxmox-backup-manager ]]; then
- msg_error "No ${APP} Installation Found!"
- exit
- fi
- msg_info "Updating $APP LXC"
- $STD apt-get update
- $STD apt-get -y upgrade
- msg_ok "Updated $APP LXC"
- exit
-}
-
-start
-build_container
-description
-
-msg_ok "Completed Successfully!\n"
-echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
-echo -e "${INFO}${YW} Access it using the following URL:${CL}"
-echo -e "${TAB}${GATEWAY}${BGN}https://${IP}:8007${CL}"
diff --git a/ct/healthchecks.sh b/ct/resiliosync.sh
similarity index 50%
rename from ct/healthchecks.sh
rename to ct/resiliosync.sh
index 8b55f7fd..eff0f8d5 100644
--- a/ct/healthchecks.sh
+++ b/ct/resiliosync.sh
@@ -1,15 +1,15 @@
#!/usr/bin/env bash
-source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/build.func)
+source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
-# Author: MickLesk (CanbiZ)
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source:
+# Author: David Bennett (dbinit)
+# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
+# Source: https://www.resilio.com/sync
-APP="healthchecks"
-var_tags="${var_tags:-monitoring}"
-var_cpu="${var_cpu:-4}"
-var_ram="${var_ram:-4096}"
-var_disk="${var_disk:-20}"
+APP="Resilio Sync"
+var_tags="${var_tags:-sync}"
+var_cpu="${var_cpu:-2}"
+var_ram="${var_ram:-2048}"
+var_disk="${var_disk:-8}"
var_os="${var_os:-debian}"
var_version="${var_version:-12}"
var_unprivileged="${var_unprivileged:-1}"
@@ -23,11 +23,14 @@ function update_script() {
header_info
check_container_storage
check_container_resources
- if [[ ! -f /etc/systemd/system/healthchecks.service ]]; then
+ if [[ ! -d /var/lib/resilio-sync ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
- msg_error "No Update."
+ msg_info "Updating ${APP} LXC"
+ $STD apt-get update
+ $STD apt-get -y upgrade
+ msg_ok "Updated Successfully"
exit
}
@@ -38,4 +41,4 @@ description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
-echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8000${CL}"
+echo -e "${TAB}${GATEWAY}${BGN}https://${IP}:8888${CL}"
diff --git a/ct/romm.sh b/ct/romm.sh
new file mode 100644
index 00000000..8dc95b58
--- /dev/null
+++ b/ct/romm.sh
@@ -0,0 +1,72 @@
+#!/usr/bin/env bash
+source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: MickLesk (CanbiZ)
+# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
+# Source: https://romm.app
+
+APP="RomM"
+var_tags="${var_tags:-emulation}"
+var_cpu="${var_cpu:-2}"
+var_ram="${var_ram:-4096}"
+var_disk="${var_disk:-20}"
+var_os="${var_os:-ubuntu}"
+var_version="${var_version:-24.04}"
+var_unprivileged="${var_unprivileged:-1}"
+var_fuse="${var_fuse:-1}"
+
+header_info "$APP"
+variables
+color
+catch_errors
+
+function update_script() {
+ header_info
+ check_container_storage
+ check_container_resources
+
+ if [[ ! -d /opt/romm ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+
+ msg_info "Stopping $APP"
+ systemctl stop romm
+ systemctl stop nginx
+ msg_ok "Stopped $APP"
+
+ msg_info "Updating $APP"
+ cd /opt/romm/app
+ git pull
+
+ # Update backend
+ cd /opt/romm/app
+ source /opt/romm/venv/bin/activate
+ pip install --upgrade pip
+ pip install poetry
+ poetry install
+
+ # Update frontend
+ cd /opt/romm/app/frontend
+ npm install
+ npm run build
+
+ echo "Updated on $(date)" >/opt/romm/version.txt
+ msg_ok "Updated $APP"
+
+ msg_info "Starting $APP"
+ systemctl start romm
+ systemctl start nginx
+ msg_ok "Started $APP"
+ msg_ok "Update Successful"
+ exit
+}
+
+start
+build_container
+description
+
+msg_ok "Completed Successfully!\n"
+echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
+echo -e "${INFO}${YW} Access it using the following URL:${CL}"
+echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8080${CL}"
diff --git a/ct/stylus.sh b/ct/stylus.sh
new file mode 100644
index 00000000..9e8bd99b
--- /dev/null
+++ b/ct/stylus.sh
@@ -0,0 +1,59 @@
+#!/usr/bin/env bash
+source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: luismco
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://github.com/mmastrac/stylus
+
+APP="Stylus"
+var_tags="${var_tags:-network}"
+var_cpu="${var_cpu:-1}"
+var_ram="${var_ram:-1024}"
+var_disk="${var_disk:-2}"
+var_os="${var_os:-debian}"
+var_version="${var_version:-12}"
+var_unprivileged="${var_unprivileged:-1}"
+var_fuse="${var_fuse:-1}"
+
+header_info "$APP"
+variables
+color
+catch_errors
+
+function update_script() {
+ header_info
+ check_container_storage
+ check_container_resources
+
+ if [[ ! -d /opt/stylus ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+ if check_for_gh_release "stylus" "mmastrac/stylus"; then
+ msg_info "Stopping $APP"
+ systemctl stop stylus
+ msg_ok "Stopped $APP"
+
+ msg_info "Updating $APP"
+ fetch_and_deploy_gh_release "stylus" "mmastrac/stylus" "singlefile" "latest" "/usr/bin/" "*_linux_amd64"
+
+ msg_ok "Updated $APP"
+
+ msg_info "Starting $APP"
+ systemctl start stylus
+ msg_ok "Started $APP"
+ msg_ok "Update Successful"
+ else
+ msg_ok "No update required. Latest version already installed."
+ fi
+ exit
+}
+
+start
+build_container
+description
+
+msg_ok "Completed Successfully!\n"
+echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
+echo -e "${INFO}${YW} Access it using the following URL:${CL}"
+echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8000${CL}"
diff --git a/ct/swizzin.sh b/ct/swizzin.sh
deleted file mode 100644
index de10cb1b..00000000
--- a/ct/swizzin.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env bash
-source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: EEJoshua
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://swizzin.ltd/
-
-APP="Swizzin"
-var_tags="${var_tags:-seedbox}"
-var_cpu="${var_cpu:-2}"
-var_ram="${var_ram:-4096}"
-var_disk="${var_disk:-20}"
-var_os="${var_os:-debian}"
-var_version="${var_version:-12}"
-var_unprivileged="${var_unprivileged:-1}"
-
-header_info "$APP"
-variables
-color
-catch_errors
-
-function update_script() {
- header_info
- check_container_storage
- check_container_resources
- if ! command -v sudo box >/dev/null 2>&1; then
- msg_error "No ${APP} installation found!\n"
- exit
- fi
- msg_info "Running 'sudo box update' inside the container\n"
- $STD sudo box update
- msg_ok "Update finished\n"
- exit
-}
-
-start
-build_container
-description
-
-msg_ok "Completed Successfully!\n"
-echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
-echo -e "${INFO}${YW}If installed panel, access through the following URL:${CL}"
-echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}"
diff --git a/ct/tracktor.sh b/ct/tracktor.sh
deleted file mode 100644
index 864210e3..00000000
--- a/ct/tracktor.sh
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env bash
-source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: CrazyWolf13
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://tracktor.bytedge.in/
-
-APP="tracktor"
-var_tags="${var_tags:-car;monitoring}"
-var_cpu="${var_cpu:-2}"
-var_ram="${var_ram:-4096}"
-var_disk="${var_disk:-6}"
-var_os="${var_os:-debian}"
-var_version="${var_version:-12}"
-var_unprivileged="${var_unprivileged:-1}"
-
-header_info "$APP"
-variables
-color
-catch_errors
-
-function update_script() {
- header_info
- check_container_storage
- check_container_resources
- if [[ ! -d /opt/tracktor ]]; then
- msg_error "No ${APP} Installation Found!"
- exit
- fi
-
- RELEASE=$(curl -fsSL https://api.github.com/repos/javedh-dev/tracktor/releases/latest | jq -r '.tag_name' | sed 's/^v//')
- if [[ "${RELEASE}" != "$(cat ~/.tracktor 2>/dev/null)" ]] || [[ ! -f ~/.tracktor ]]; then
- msg_info "Stopping Service"
- systemctl stop tracktor
- msg_ok "Stopped Service"
-
- msg_info "Creating Backup"
- cp /opt/tracktor/app/server/.env /opt/tracktor.env
- msg_ok "Created Backup"
-
- msg_info "Updating ${APP}"
- setup_nodejs
- fetch_and_deploy_gh_release "tracktor" "javedh-dev/tracktor" "tarball" "latest" "/opt/tracktor"
- cd /opt/tracktor
- rm package-lock.json
- $STD npm install
- $STD npm run build
- msg_ok "Updated $APP"
-
- msg_info "Restoring Backup"
- cp /opt/tracktor.env /opt/tracktor/app/server/.env
- msg_ok "Restored Backup"
-
- msg_info "Starting Service"
- systemctl start tracktor
- msg_ok "Started Service"
- msg_ok "Updated Successfully"
- else
- msg_ok "Already up to date"
- fi
- exit
-}
-
-start
-build_container
-description
-
-msg_ok "Completed Successfully!\n"
-echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
-echo -e "${INFO}${YW} Access it using the following URL:${CL}"
-echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}"
diff --git a/ct/traefik.sh b/ct/traefik.sh
deleted file mode 100644
index fe4d80f9..00000000
--- a/ct/traefik.sh
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/env bash
-source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
-# Copyright (c) 2021-2025 tteck
-# Author: tteck (tteckster)
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://traefik.io/
-
-APP="Traefik"
-var_tags="${var_tags:-proxy}"
-var_cpu="${var_cpu:-1}"
-var_ram="${var_ram:-512}"
-var_disk="${var_disk:-2}"
-var_os="${var_os:-debian}"
-var_version="${var_version:-12}"
-var_unprivileged="${var_unprivileged:-1}"
-
-header_info "$APP"
-variables
-color
-catch_errors
-
-function update_script() {
- header_info
- check_container_storage
- check_container_resources
- if [[ ! -f /etc/systemd/system/traefik.service ]]; then
- msg_error "No ${APP} Installation Found!"
- exit
- fi
- RELEASE=$(curl -fsSL https://api.github.com/repos/traefik/traefik/releases | grep -oP '"tag_name":\s*"v\K[\d.]+?(?=")' | sort -V | tail -n 1)
- msg_info "Updating $APP LXC"
- if [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]] || [[ ! -f /opt/${APP}_version.txt ]]; then
- curl -fsSL "https://github.com/traefik/traefik/releases/download/v${RELEASE}/traefik_v${RELEASE}_linux_amd64.tar.gz" -o $(basename "https://github.com/traefik/traefik/releases/download/v${RELEASE}/traefik_v${RELEASE}_linux_amd64.tar.gz")
- tar -C /tmp -xzf traefik*.tar.gz
- mv /tmp/traefik /usr/bin/
- rm -rf traefik*.tar.gz
- systemctl restart traefik.service
- echo "${RELEASE}" >/opt/${APP}_version.txt
- msg_ok "Updated $APP LXC"
- else
- msg_ok "No update required. ${APP} is already at ${RELEASE}"
- fi
- exit
-}
-
-start
-build_container
-description
-
-msg_ok "Completed Successfully!\n"
-echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
-echo -e "${INFO}${YW} Access it using the following URL:${CL}"
-echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8080${CL}"
-echo -e "Commands available are as below:"
-echo -e "addsite - creating a config"
-echo -e "ensite - enables a config"
-echo -e "dissite - disables a config"
-echo -e "editsite - edits a config"
diff --git a/ct/tunarr.sh b/ct/tunarr.sh
new file mode 100644
index 00000000..a3941b1d
--- /dev/null
+++ b/ct/tunarr.sh
@@ -0,0 +1,76 @@
+#!/usr/bin/env bash
+source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
+# Copyright (c) 2021-2025 tteck
+# Author: chrisbenincasa
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://tunarr.com/
+
+APP="Tunarr"
+var_tags="${var_tags:-iptv}"
+var_cpu="${var_cpu:-2}"
+var_ram="${var_ram:-1024}"
+var_disk="${var_disk:-5}"
+var_os="${var_os:-debian}"
+var_version="${var_version:-12}"
+var_unprivileged="${var_unprivileged:-1}"
+
+header_info "$APP"
+variables
+color
+catch_errors
+function update_script() {
+ header_info
+ check_container_storage
+ check_container_resources
+ if [[ ! -d /opt/tunarr ]]; then
+ msg_error "No ${APP} Installation Found!"
+ exit
+ fi
+ if check_for_gh_release "tunarr" "chrisbenincasa/tunarr"; then
+ msg_info "Stopping ${APP}"
+ systemctl stop tunarr
+ msg_ok "Stopped ${APP}"
+
+ msg_info "Creating Backup"
+ tar -czf "/opt/${APP}_backup_$(date +%F).tar.gz" /usr/.local/share/tunarr
+ msg_ok "Backup Created"
+
+ fetch_and_deploy_gh_release "tunarr" "chrisbenincasa/tunarr" "singlefile" "latest" "/opt/tunarr" "*linux-x64"
+
+ msg_info "Starting ${APP}"
+ systemctl start tunarr
+ msg_ok "Started ${APP}"
+
+ msg_ok "Updated Successfully"
+ fi
+
+ if check_for_gh_release "ersatztv-ffmpeg" "ErsatzTV/ErsatzTV-ffmpeg"; then
+ msg_info "Stopping ${APP}"
+ systemctl stop tunarr
+ msg_ok "Stopped ${APP}"
+
+ fetch_and_deploy_gh_release "ersatztv-ffmpeg" "ErsatzTV/ErsatzTV-ffmpeg" "prebuild" "latest" "/opt/ErsatzTV-ffmpeg" "*-linux64-gpl-7.1.tar.xz"
+
+ msg_info "Set ErsatzTV-ffmpeg links"
+ chmod +x /opt/ErsatzTV-ffmpeg/bin/*
+ ln -sf /opt/ErsatzTV-ffmpeg/bin/ffmpeg /usr/local/bin/ffmpeg
+ ln -sf /opt/ErsatzTV-ffmpeg/bin/ffplay /usr/local/bin/ffplay
+ ln -sf /opt/ErsatzTV-ffmpeg/bin/ffprobe /usr/local/bin/ffprobe
+ msg_ok "ffmpeg links set"
+
+ msg_info "Starting ${APP}"
+ systemctl start tunarr
+ msg_ok "Started ${APP}"
+ msg_ok "Updated Successfully"
+ fi
+ exit
+}
+
+start
+build_container
+description
+
+msg_ok "Completed Successfully!\n"
+echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
+echo -e "${INFO}${YW} Access it using the following URL:${CL}"
+echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8000${CL}"
diff --git a/ct/twingate-connector.sh b/ct/twingate-connector.sh
deleted file mode 100644
index f87be0a0..00000000
--- a/ct/twingate-connector.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env bash
-source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/misc/build.func)
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: twingate-andrewb
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://www.twingate.com/docs/
-
-APP="twingate-connector"
-var_tags="${var_tags:-network;connector;twingate}"
-var_cpu="${var_cpu:-1}"
-var_ram="${var_ram:-2048}"
-var_disk="${var_disk:-2}"
-var_os="${var_os:-ubuntu}"
-var_version="${var_version:-22.04}"
-var_unprivileged="${var_unprivileged:-1}"
-
-header_info "$APP"
-variables
-color
-catch_errors
-
-function update_script() {
- header_info
- check_container_storage
- check_container_resources
-
- if [[ ! -f /lib/systemd/system/twingate-connector.service ]]; then
- msg_error "No ${APP} Installation Found!"
- exit
- fi
-
- msg_info "Updating ${APP}"
-
- apt update
- apt install -yq twingate-connector
- systemctl restart twingate-connector
-
- msg_ok "Updated Successfully"
- exit
-}
-
-start
-build_container
-description
-
-msg_ok "All Finished! If you need to update your access or refresh tokens, they can be found in /etc/twingate/connector.conf"
diff --git a/ct/uhf.sh b/ct/uhf.sh
index 308c6296..09a30202 100644
--- a/ct/uhf.sh
+++ b/ct/uhf.sh
@@ -27,28 +27,29 @@ function update_script() {
msg_error "No ${APP} Installation Found!"
exit
fi
- msg_info "Stopping ${APP}"
- systemctl stop uhf-server
- msg_ok "Stopped ${APP}"
+ if check_for_gh_release "uhf-server" "swapplications/uhf-server-dist"; then
+ msg_info "Stopping Service"
+ systemctl stop uhf-server
+ msg_ok "Stopped Service"
- msg_info "Updating APT packages"
- $STD apt-get update
- $STD apt-get -y upgrade
- msg_ok "Package list updated"
+ msg_info "Updating APT packages"
+ $STD apt-get update
+ $STD apt-get -y upgrade
+ msg_ok "Package list updated"
- fetch_and_deploy_gh_release "comskip" "swapplications/comskip" "prebuild" "latest" "/opt/comskip" "comskip-x64-*.zip"
- fetch_and_deploy_gh_release "uhf-server" "swapplications/uhf-server-dist" "prebuild" "latest" "/opt/uhf-server" "UHF.Server-linux-x64-*.zip"
+ fetch_and_deploy_gh_release "comskip" "swapplications/comskip" "prebuild" "latest" "/opt/comskip" "comskip-x64-*.zip"
+ fetch_and_deploy_gh_release "uhf-server" "swapplications/uhf-server-dist" "prebuild" "latest" "/opt/uhf-server" "UHF.Server-linux-x64-*.zip"
- msg_info "Starting ${APP}"
- systemctl start uhf-server
- msg_ok "Started ${APP}"
+ msg_info "Starting Service"
+ systemctl start uhf-server
+ msg_ok "Started Service"
- msg_info "Cleaning up"
- $STD apt-get -y autoremove
- $STD apt-get -y autoclean
- msg_ok "Cleaned"
-
- msg_ok "Updated Successfully"
+ msg_info "Cleaning up"
+ $STD apt-get -y autoremove
+ $STD apt-get -y autoclean
+ msg_ok "Cleaned"
+ msg_ok "Updated Successfully"
+ fi
exit
}
diff --git a/frontend/package-lock.json b/frontend/package-lock.json
index fe7ecb63..20bb2092 100644
--- a/frontend/package-lock.json
+++ b/frontend/package-lock.json
@@ -33,7 +33,7 @@
"fuse.js": "^7.1.0",
"lucide-react": "^0.453.0",
"mini-svg-data-uri": "^1.4.4",
- "next": "15.2.4",
+ "next": "15.5.2",
"next-themes": "^0.3.0",
"nuqs": "^2.4.1",
"pocketbase": "^0.21.5",
@@ -441,9 +441,9 @@
}
},
"node_modules/@emnapi/runtime": {
- "version": "1.3.1",
- "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.3.1.tgz",
- "integrity": "sha512-kEBmG8KyqtxJZv+ygbEim+KCGtIq1fC22Ms3S4ziXmYKm8uyoLX0MHONVKwp+9opg390VaKRNt4a7A9NwmpNhw==",
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.5.0.tgz",
+ "integrity": "sha512-97/BJ3iXHww3djw6hYIfErCZFee7qCtrneuLa20UXFCOTCfBM2cvQHjWJ2EG0s0MtdNwInarqCTz35i4wWXHsQ==",
"license": "MIT",
"optional": true,
"dependencies": {
@@ -1267,6 +1267,22 @@
"url": "https://opencollective.com/libvips"
}
},
+ "node_modules/@img/sharp-libvips-linux-ppc64": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-ppc64/-/sharp-libvips-linux-ppc64-1.2.0.tgz",
+ "integrity": "sha512-Xod/7KaDDHkYu2phxxfeEPXfVXFKx70EAFZ0qyUdOjCcxbjqyJOEUpDe6RIyaunGxT34Anf9ue/wuWOqBW2WcQ==",
+ "cpu": [
+ "ppc64"
+ ],
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
"node_modules/@img/sharp-libvips-linux-s390x": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-s390x/-/sharp-libvips-linux-s390x-1.0.4.tgz",
@@ -1375,6 +1391,28 @@
"@img/sharp-libvips-linux-arm64": "1.0.4"
}
},
+ "node_modules/@img/sharp-linux-ppc64": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linux-ppc64/-/sharp-linux-ppc64-0.34.3.tgz",
+ "integrity": "sha512-GLtbLQMCNC5nxuImPR2+RgrviwKwVql28FWZIW1zWruy6zLgA5/x2ZXk3mxj58X/tszVF69KK0Is83V8YgWhLA==",
+ "cpu": [
+ "ppc64"
+ ],
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linux-ppc64": "1.2.0"
+ }
+ },
"node_modules/@img/sharp-linux-s390x": {
"version": "0.33.5",
"resolved": "https://registry.npmjs.org/@img/sharp-linux-s390x/-/sharp-linux-s390x-0.33.5.tgz",
@@ -1482,6 +1520,25 @@
"url": "https://opencollective.com/libvips"
}
},
+ "node_modules/@img/sharp-win32-arm64": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-win32-arm64/-/sharp-win32-arm64-0.34.3.tgz",
+ "integrity": "sha512-MjnHPnbqMXNC2UgeLJtX4XqoVHHlZNd+nPt1kRPmj63wURegwBhZlApELdtxM2OIZDRv/DFtLcNhVbd1z8GYXQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "Apache-2.0 AND LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
"node_modules/@img/sharp-win32-ia32": {
"version": "0.33.5",
"resolved": "https://registry.npmjs.org/@img/sharp-win32-ia32/-/sharp-win32-ia32-0.33.5.tgz",
@@ -1598,9 +1655,9 @@
"license": "MIT"
},
"node_modules/@next/env": {
- "version": "15.2.4",
- "resolved": "https://registry.npmjs.org/@next/env/-/env-15.2.4.tgz",
- "integrity": "sha512-+SFtMgoiYP3WoSswuNmxJOCwi06TdWE733D+WPjpXIe4LXGULwEaofiiAy6kbS0+XjM5xF5n3lKuBwN2SnqD9g==",
+ "version": "15.5.2",
+ "resolved": "https://registry.npmjs.org/@next/env/-/env-15.5.2.tgz",
+ "integrity": "sha512-Qe06ew4zt12LeO6N7j8/nULSOe3fMXE4dM6xgpBQNvdzyK1sv5y4oAP3bq4LamrvGCZtmRYnW8URFCeX5nFgGg==",
"license": "MIT"
},
"node_modules/@next/eslint-plugin-next": {
@@ -1644,9 +1701,9 @@
}
},
"node_modules/@next/swc-darwin-arm64": {
- "version": "15.2.4",
- "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-15.2.4.tgz",
- "integrity": "sha512-1AnMfs655ipJEDC/FHkSr0r3lXBgpqKo4K1kiwfUf3iE68rDFXZ1TtHdMvf7D0hMItgDZ7Vuq3JgNMbt/+3bYw==",
+ "version": "15.5.2",
+ "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-15.5.2.tgz",
+ "integrity": "sha512-8bGt577BXGSd4iqFygmzIfTYizHb0LGWqH+qgIF/2EDxS5JsSdERJKA8WgwDyNBZgTIIA4D8qUtoQHmxIIquoQ==",
"cpu": [
"arm64"
],
@@ -1660,9 +1717,9 @@
}
},
"node_modules/@next/swc-darwin-x64": {
- "version": "15.2.4",
- "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-15.2.4.tgz",
- "integrity": "sha512-3qK2zb5EwCwxnO2HeO+TRqCubeI/NgCe+kL5dTJlPldV/uwCnUgC7VbEzgmxbfrkbjehL4H9BPztWOEtsoMwew==",
+ "version": "15.5.2",
+ "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-15.5.2.tgz",
+ "integrity": "sha512-2DjnmR6JHK4X+dgTXt5/sOCu/7yPtqpYt8s8hLkHFK3MGkka2snTv3yRMdHvuRtJVkPwCGsvBSwmoQCHatauFQ==",
"cpu": [
"x64"
],
@@ -1676,9 +1733,9 @@
}
},
"node_modules/@next/swc-linux-arm64-gnu": {
- "version": "15.2.4",
- "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-15.2.4.tgz",
- "integrity": "sha512-HFN6GKUcrTWvem8AZN7tT95zPb0GUGv9v0d0iyuTb303vbXkkbHDp/DxufB04jNVD+IN9yHy7y/6Mqq0h0YVaQ==",
+ "version": "15.5.2",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-15.5.2.tgz",
+ "integrity": "sha512-3j7SWDBS2Wov/L9q0mFJtEvQ5miIqfO4l7d2m9Mo06ddsgUK8gWfHGgbjdFlCp2Ek7MmMQZSxpGFqcC8zGh2AA==",
"cpu": [
"arm64"
],
@@ -1692,9 +1749,9 @@
}
},
"node_modules/@next/swc-linux-arm64-musl": {
- "version": "15.2.4",
- "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-15.2.4.tgz",
- "integrity": "sha512-Oioa0SORWLwi35/kVB8aCk5Uq+5/ZIumMK1kJV+jSdazFm2NzPDztsefzdmzzpx5oGCJ6FkUC7vkaUseNTStNA==",
+ "version": "15.5.2",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-15.5.2.tgz",
+ "integrity": "sha512-s6N8k8dF9YGc5T01UPQ08yxsK6fUow5gG1/axWc1HVVBYQBgOjca4oUZF7s4p+kwhkB1bDSGR8QznWrFZ/Rt5g==",
"cpu": [
"arm64"
],
@@ -1708,9 +1765,9 @@
}
},
"node_modules/@next/swc-linux-x64-gnu": {
- "version": "15.2.4",
- "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-15.2.4.tgz",
- "integrity": "sha512-yb5WTRaHdkgOqFOZiu6rHV1fAEK0flVpaIN2HB6kxHVSy/dIajWbThS7qON3W9/SNOH2JWkVCyulgGYekMePuw==",
+ "version": "15.5.2",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-15.5.2.tgz",
+ "integrity": "sha512-o1RV/KOODQh6dM6ZRJGZbc+MOAHww33Vbs5JC9Mp1gDk8cpEO+cYC/l7rweiEalkSm5/1WGa4zY7xrNwObN4+Q==",
"cpu": [
"x64"
],
@@ -1724,9 +1781,9 @@
}
},
"node_modules/@next/swc-linux-x64-musl": {
- "version": "15.2.4",
- "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-15.2.4.tgz",
- "integrity": "sha512-Dcdv/ix6srhkM25fgXiyOieFUkz+fOYkHlydWCtB0xMST6X9XYI3yPDKBZt1xuhOytONsIFJFB08xXYsxUwJLw==",
+ "version": "15.5.2",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-15.5.2.tgz",
+ "integrity": "sha512-/VUnh7w8RElYZ0IV83nUcP/J4KJ6LLYliiBIri3p3aW2giF+PAVgZb6mk8jbQSB3WlTai8gEmCAr7kptFa1H6g==",
"cpu": [
"x64"
],
@@ -1740,9 +1797,9 @@
}
},
"node_modules/@next/swc-win32-arm64-msvc": {
- "version": "15.2.4",
- "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-15.2.4.tgz",
- "integrity": "sha512-dW0i7eukvDxtIhCYkMrZNQfNicPDExt2jPb9AZPpL7cfyUo7QSNl1DjsHjmmKp6qNAqUESyT8YFl/Aw91cNJJg==",
+ "version": "15.5.2",
+ "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-15.5.2.tgz",
+ "integrity": "sha512-sMPyTvRcNKXseNQ/7qRfVRLa0VhR0esmQ29DD6pqvG71+JdVnESJaHPA8t7bc67KD5spP3+DOCNLhqlEI2ZgQg==",
"cpu": [
"arm64"
],
@@ -1756,9 +1813,9 @@
}
},
"node_modules/@next/swc-win32-x64-msvc": {
- "version": "15.2.4",
- "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-15.2.4.tgz",
- "integrity": "sha512-SbnWkJmkS7Xl3kre8SdMF6F/XDh1DTFEhp0jRTj/uB8iPKoU2bb2NDfcu+iifv1+mxQEd1g2vvSxcZbXSKyWiQ==",
+ "version": "15.5.2",
+ "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-15.5.2.tgz",
+ "integrity": "sha512-W5VvyZHnxG/2ukhZF/9Ikdra5fdNftxI6ybeVKYvBPDtyx7x4jPPSNduUkfH5fo3zG0JQ0bPxgy41af2JX5D4Q==",
"cpu": [
"x64"
],
@@ -3043,12 +3100,6 @@
"dev": true,
"license": "MIT"
},
- "node_modules/@swc/counter": {
- "version": "0.1.3",
- "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz",
- "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==",
- "license": "Apache-2.0"
- },
"node_modules/@swc/helpers": {
"version": "0.5.15",
"resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.15.tgz",
@@ -4041,17 +4092,6 @@
"node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
}
},
- "node_modules/busboy": {
- "version": "1.6.0",
- "resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz",
- "integrity": "sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==",
- "dependencies": {
- "streamsearch": "^1.1.0"
- },
- "engines": {
- "node": ">=10.16.0"
- }
- },
"node_modules/cac": {
"version": "6.7.14",
"resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz",
@@ -4660,9 +4700,9 @@
}
},
"node_modules/detect-libc": {
- "version": "2.0.3",
- "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.3.tgz",
- "integrity": "sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw==",
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.4.tgz",
+ "integrity": "sha512-3UDv+G9CsCKO1WKMGw9fwq/SWJYbI0c5Y7LU1AXYoDdbhE2AHQ6N6Nb34sG8Fj7T5APy8qXDCKuuIHd1BR0tVA==",
"license": "Apache-2.0",
"engines": {
"node": ">=8"
@@ -7252,15 +7292,13 @@
"license": "MIT"
},
"node_modules/next": {
- "version": "15.2.4",
- "resolved": "https://registry.npmjs.org/next/-/next-15.2.4.tgz",
- "integrity": "sha512-VwL+LAaPSxEkd3lU2xWbgEOtrM8oedmyhBqaVNmgKB+GvZlCy9rgaEc+y2on0wv+l0oSFqLtYD6dcC1eAedUaQ==",
+ "version": "15.5.2",
+ "resolved": "https://registry.npmjs.org/next/-/next-15.5.2.tgz",
+ "integrity": "sha512-H8Otr7abj1glFhbGnvUt3gz++0AF1+QoCXEBmd/6aKbfdFwrn0LpA836Ed5+00va/7HQSDD+mOoVhn3tNy3e/Q==",
"license": "MIT",
"dependencies": {
- "@next/env": "15.2.4",
- "@swc/counter": "0.1.3",
+ "@next/env": "15.5.2",
"@swc/helpers": "0.5.15",
- "busboy": "1.6.0",
"caniuse-lite": "^1.0.30001579",
"postcss": "8.4.31",
"styled-jsx": "5.1.6"
@@ -7272,19 +7310,19 @@
"node": "^18.18.0 || ^19.8.0 || >= 20.0.0"
},
"optionalDependencies": {
- "@next/swc-darwin-arm64": "15.2.4",
- "@next/swc-darwin-x64": "15.2.4",
- "@next/swc-linux-arm64-gnu": "15.2.4",
- "@next/swc-linux-arm64-musl": "15.2.4",
- "@next/swc-linux-x64-gnu": "15.2.4",
- "@next/swc-linux-x64-musl": "15.2.4",
- "@next/swc-win32-arm64-msvc": "15.2.4",
- "@next/swc-win32-x64-msvc": "15.2.4",
- "sharp": "^0.33.5"
+ "@next/swc-darwin-arm64": "15.5.2",
+ "@next/swc-darwin-x64": "15.5.2",
+ "@next/swc-linux-arm64-gnu": "15.5.2",
+ "@next/swc-linux-arm64-musl": "15.5.2",
+ "@next/swc-linux-x64-gnu": "15.5.2",
+ "@next/swc-linux-x64-musl": "15.5.2",
+ "@next/swc-win32-arm64-msvc": "15.5.2",
+ "@next/swc-win32-x64-msvc": "15.5.2",
+ "sharp": "^0.34.3"
},
"peerDependencies": {
"@opentelemetry/api": "^1.1.0",
- "@playwright/test": "^1.41.2",
+ "@playwright/test": "^1.51.1",
"babel-plugin-react-compiler": "*",
"react": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0",
"react-dom": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0",
@@ -7315,6 +7353,367 @@
"react-dom": "^16.8 || ^17 || ^18"
}
},
+ "node_modules/next/node_modules/@img/sharp-darwin-arm64": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.34.3.tgz",
+ "integrity": "sha512-ryFMfvxxpQRsgZJqBd4wsttYQbCxsJksrv9Lw/v798JcQ8+w84mBWuXwl+TT0WJ/WrYOLaYpwQXi3sA9nTIaIg==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-darwin-arm64": "1.2.0"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-darwin-x64": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-darwin-x64/-/sharp-darwin-x64-0.34.3.tgz",
+ "integrity": "sha512-yHpJYynROAj12TA6qil58hmPmAwxKKC7reUqtGLzsOHfP7/rniNGTL8tjWX6L3CTV4+5P4ypcS7Pp+7OB+8ihA==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-darwin-x64": "1.2.0"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-libvips-darwin-arm64": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-arm64/-/sharp-libvips-darwin-arm64-1.2.0.tgz",
+ "integrity": "sha512-sBZmpwmxqwlqG9ueWFXtockhsxefaV6O84BMOrhtg/YqbTaRdqDE7hxraVE3y6gVM4eExmfzW4a8el9ArLeEiQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-libvips-darwin-x64": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-x64/-/sharp-libvips-darwin-x64-1.2.0.tgz",
+ "integrity": "sha512-M64XVuL94OgiNHa5/m2YvEQI5q2cl9d/wk0qFTDVXcYzi43lxuiFTftMR1tOnFQovVXNZJ5TURSDK2pNe9Yzqg==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-libvips-linux-arm": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm/-/sharp-libvips-linux-arm-1.2.0.tgz",
+ "integrity": "sha512-mWd2uWvDtL/nvIzThLq3fr2nnGfyr/XMXlq8ZJ9WMR6PXijHlC3ksp0IpuhK6bougvQrchUAfzRLnbsen0Cqvw==",
+ "cpu": [
+ "arm"
+ ],
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-libvips-linux-arm64": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm64/-/sharp-libvips-linux-arm64-1.2.0.tgz",
+ "integrity": "sha512-RXwd0CgG+uPRX5YYrkzKyalt2OJYRiJQ8ED/fi1tq9WQW2jsQIn0tqrlR5l5dr/rjqq6AHAxURhj2DVjyQWSOA==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-libvips-linux-s390x": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-s390x/-/sharp-libvips-linux-s390x-1.2.0.tgz",
+ "integrity": "sha512-eMKfzDxLGT8mnmPJTNMcjfO33fLiTDsrMlUVcp6b96ETbnJmd4uvZxVJSKPQfS+odwfVaGifhsB07J1LynFehw==",
+ "cpu": [
+ "s390x"
+ ],
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-libvips-linux-x64": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-x64/-/sharp-libvips-linux-x64-1.2.0.tgz",
+ "integrity": "sha512-ZW3FPWIc7K1sH9E3nxIGB3y3dZkpJlMnkk7z5tu1nSkBoCgw2nSRTFHI5pB/3CQaJM0pdzMF3paf9ckKMSE9Tg==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-libvips-linuxmusl-arm64": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-arm64/-/sharp-libvips-linuxmusl-arm64-1.2.0.tgz",
+ "integrity": "sha512-UG+LqQJbf5VJ8NWJ5Z3tdIe/HXjuIdo4JeVNADXBFuG7z9zjoegpzzGIyV5zQKi4zaJjnAd2+g2nna8TZvuW9Q==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-libvips-linuxmusl-x64": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-x64/-/sharp-libvips-linuxmusl-x64-1.2.0.tgz",
+ "integrity": "sha512-SRYOLR7CXPgNze8akZwjoGBoN1ThNZoqpOgfnOxmWsklTGVfJiGJoC/Lod7aNMGA1jSsKWM1+HRX43OP6p9+6Q==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-linux-arm": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm/-/sharp-linux-arm-0.34.3.tgz",
+ "integrity": "sha512-oBK9l+h6KBN0i3dC8rYntLiVfW8D8wH+NPNT3O/WBHeW0OQWCjfWksLUaPidsrDKpJgXp3G3/hkmhptAW0I3+A==",
+ "cpu": [
+ "arm"
+ ],
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linux-arm": "1.2.0"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-linux-arm64": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm64/-/sharp-linux-arm64-0.34.3.tgz",
+ "integrity": "sha512-QdrKe3EvQrqwkDrtuTIjI0bu6YEJHTgEeqdzI3uWJOH6G1O8Nl1iEeVYRGdj1h5I21CqxSvQp1Yv7xeU3ZewbA==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linux-arm64": "1.2.0"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-linux-s390x": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linux-s390x/-/sharp-linux-s390x-0.34.3.tgz",
+ "integrity": "sha512-3gahT+A6c4cdc2edhsLHmIOXMb17ltffJlxR0aC2VPZfwKoTGZec6u5GrFgdR7ciJSsHT27BD3TIuGcuRT0KmQ==",
+ "cpu": [
+ "s390x"
+ ],
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linux-s390x": "1.2.0"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-linux-x64": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linux-x64/-/sharp-linux-x64-0.34.3.tgz",
+ "integrity": "sha512-8kYso8d806ypnSq3/Ly0QEw90V5ZoHh10yH0HnrzOCr6DKAPI6QVHvwleqMkVQ0m+fc7EH8ah0BB0QPuWY6zJQ==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linux-x64": "1.2.0"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-linuxmusl-arm64": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-arm64/-/sharp-linuxmusl-arm64-0.34.3.tgz",
+ "integrity": "sha512-vAjbHDlr4izEiXM1OTggpCcPg9tn4YriK5vAjowJsHwdBIdx0fYRsURkxLG2RLm9gyBq66gwtWI8Gx0/ov+JKQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linuxmusl-arm64": "1.2.0"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-linuxmusl-x64": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-x64/-/sharp-linuxmusl-x64-0.34.3.tgz",
+ "integrity": "sha512-gCWUn9547K5bwvOn9l5XGAEjVTTRji4aPTqLzGXHvIr6bIDZKNTA34seMPgM0WmSf+RYBH411VavCejp3PkOeQ==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linuxmusl-x64": "1.2.0"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-wasm32": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-wasm32/-/sharp-wasm32-0.34.3.tgz",
+ "integrity": "sha512-+CyRcpagHMGteySaWos8IbnXcHgfDn7pO2fiC2slJxvNq9gDipYBN42/RagzctVRKgxATmfqOSulgZv5e1RdMg==",
+ "cpu": [
+ "wasm32"
+ ],
+ "license": "Apache-2.0 AND LGPL-3.0-or-later AND MIT",
+ "optional": true,
+ "dependencies": {
+ "@emnapi/runtime": "^1.4.4"
+ },
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-win32-ia32": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-win32-ia32/-/sharp-win32-ia32-0.34.3.tgz",
+ "integrity": "sha512-xuCdhH44WxuXgOM714hn4amodJMZl3OEvf0GVTm0BEyMeA2to+8HEdRPShH0SLYptJY1uBw+SCFP9WVQi1Q/cw==",
+ "cpu": [
+ "ia32"
+ ],
+ "license": "Apache-2.0 AND LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/next/node_modules/@img/sharp-win32-x64": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/@img/sharp-win32-x64/-/sharp-win32-x64-0.34.3.tgz",
+ "integrity": "sha512-OWwz05d++TxzLEv4VnsTz5CmZ6mI6S05sfQGEMrNrQcOEERbX46332IvE7pO/EUiw7jUrrS40z/M7kPyjfl04g==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "Apache-2.0 AND LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
"node_modules/next/node_modules/postcss": {
"version": "8.4.31",
"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz",
@@ -7343,6 +7742,49 @@
"node": "^10 || ^12 || >=14"
}
},
+ "node_modules/next/node_modules/sharp": {
+ "version": "0.34.3",
+ "resolved": "https://registry.npmjs.org/sharp/-/sharp-0.34.3.tgz",
+ "integrity": "sha512-eX2IQ6nFohW4DbvHIOLRB3MHFpYqaqvXd3Tp5e/T/dSH83fxaNJQRvDMhASmkNTsNTVF2/OOopzRCt7xokgPfg==",
+ "hasInstallScript": true,
+ "license": "Apache-2.0",
+ "optional": true,
+ "dependencies": {
+ "color": "^4.2.3",
+ "detect-libc": "^2.0.4",
+ "semver": "^7.7.2"
+ },
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-darwin-arm64": "0.34.3",
+ "@img/sharp-darwin-x64": "0.34.3",
+ "@img/sharp-libvips-darwin-arm64": "1.2.0",
+ "@img/sharp-libvips-darwin-x64": "1.2.0",
+ "@img/sharp-libvips-linux-arm": "1.2.0",
+ "@img/sharp-libvips-linux-arm64": "1.2.0",
+ "@img/sharp-libvips-linux-ppc64": "1.2.0",
+ "@img/sharp-libvips-linux-s390x": "1.2.0",
+ "@img/sharp-libvips-linux-x64": "1.2.0",
+ "@img/sharp-libvips-linuxmusl-arm64": "1.2.0",
+ "@img/sharp-libvips-linuxmusl-x64": "1.2.0",
+ "@img/sharp-linux-arm": "0.34.3",
+ "@img/sharp-linux-arm64": "0.34.3",
+ "@img/sharp-linux-ppc64": "0.34.3",
+ "@img/sharp-linux-s390x": "0.34.3",
+ "@img/sharp-linux-x64": "0.34.3",
+ "@img/sharp-linuxmusl-arm64": "0.34.3",
+ "@img/sharp-linuxmusl-x64": "0.34.3",
+ "@img/sharp-wasm32": "0.34.3",
+ "@img/sharp-win32-arm64": "0.34.3",
+ "@img/sharp-win32-ia32": "0.34.3",
+ "@img/sharp-win32-x64": "0.34.3"
+ }
+ },
"node_modules/node-releases": {
"version": "2.0.18",
"resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.18.tgz",
@@ -8631,9 +9073,9 @@
"license": "MIT"
},
"node_modules/semver": {
- "version": "7.6.3",
- "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz",
- "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==",
+ "version": "7.7.2",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz",
+ "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==",
"license": "ISC",
"bin": {
"semver": "bin/semver.js"
@@ -8848,14 +9290,6 @@
"dev": true,
"license": "MIT"
},
- "node_modules/streamsearch": {
- "version": "1.1.0",
- "resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz",
- "integrity": "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==",
- "engines": {
- "node": ">=10.0.0"
- }
- },
"node_modules/string-width": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz",
diff --git a/frontend/package.json b/frontend/package.json
index 50379a32..27421f71 100644
--- a/frontend/package.json
+++ b/frontend/package.json
@@ -44,7 +44,7 @@
"fuse.js": "^7.1.0",
"lucide-react": "^0.453.0",
"mini-svg-data-uri": "^1.4.4",
- "next": "15.2.4",
+ "next": "15.5.2",
"next-themes": "^0.3.0",
"nuqs": "^2.4.1",
"pocketbase": "^0.21.5",
diff --git a/frontend/public/json/add-iptag.json b/frontend/public/json/add-iptag.json
deleted file mode 100644
index da84c850..00000000
--- a/frontend/public/json/add-iptag.json
+++ /dev/null
@@ -1,49 +0,0 @@
-{
- "name": "Proxmox VE LXC IP-Tag",
- "slug": "add-lxc-iptag",
- "categories": [
- 1
- ],
- "date_created": "2025-04-02",
- "type": "addon",
- "updateable": false,
- "privileged": false,
- "interface_port": null,
- "documentation": null,
- "website": null,
- "logo": "https://raw.githubusercontent.com/selfhst/icons/refs/heads/main/svg/proxmox.svg",
- "config_path": "",
- "description": "This script automatically adds IP address as tags to LXC containers using a Systemd service. The service also updates the tags if a LXC IP address is changed.",
- "install_methods": [
- {
- "type": "default",
- "script": "tools/addon/add-iptag.sh",
- "resources": {
- "cpu": null,
- "ram": null,
- "hdd": null,
- "os": null,
- "version": null
- }
- }
- ],
- "default_credentials": {
- "username": null,
- "password": null
- },
- "notes": [
- {
- "text": "Execute within the Proxmox shell",
- "type": "info"
- },
- {
- "text": "Configuration: `nano /opt/iptag/iptag.conf`. iptag.service must be restarted after change.",
- "type": "info"
- },
- {
- "text": "The Proxmox Node must contain ipcalc and net-tools. `apt-get install -y ipcalc net-tools`",
- "type": "warning"
- }
- ]
-}
-
diff --git a/frontend/public/json/autocaliweb.json b/frontend/public/json/autocaliweb.json
new file mode 100644
index 00000000..792645a2
--- /dev/null
+++ b/frontend/public/json/autocaliweb.json
@@ -0,0 +1,35 @@
+{
+ "name": "Autocaliweb",
+ "slug": "autocaliweb",
+ "categories": [
+ 13
+ ],
+ "date_created": "2025-08-30",
+ "type": "ct",
+ "updateable": true,
+ "privileged": false,
+ "interface_port": 8083,
+ "documentation": "https://github.com/gelbphoenix/autocaliweb/wiki",
+ "config_path": "/etc/autocaliweb",
+ "website": "https://github.com/gelbphoenix/autocaliweb",
+ "logo": "https://github.com/gelbphoenix/autocaliweb/raw/refs/heads/master/cps/static/icon-dark.svg",
+ "description": "A modern web management system for eBooks, eComics and PDFs",
+ "install_methods": [
+ {
+ "type": "default",
+ "script": "ct/autocaliweb.sh",
+ "resources": {
+ "cpu": 2,
+ "ram": 2048,
+ "hdd": 6,
+ "os": "Debian",
+ "version": "12"
+ }
+ }
+ ],
+ "default_credentials": {
+ "username": "admin",
+ "password": "admin123"
+ },
+ "notes": []
+}
diff --git a/frontend/public/json/dependency-check.json b/frontend/public/json/dependency-check.json
new file mode 100644
index 00000000..13b31fe1
--- /dev/null
+++ b/frontend/public/json/dependency-check.json
@@ -0,0 +1,48 @@
+{
+ "name": "Proxmox VE VM Startup Dependency Check",
+ "slug": "dependency-check",
+ "categories": [
+ 1
+ ],
+ "date_created": "2025-08-12",
+ "type": "pve",
+ "updateable": false,
+ "privileged": false,
+ "interface_port": null,
+ "documentation": null,
+ "website": null,
+ "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/proxmox.webp",
+ "config_path": "/etc/default/pve-auto-hook",
+ "description": "This script checks for the presence of required dependencies before starting a VM or LXC container in Proxmox. It ensures that all referenced storages are available and, additionally, supports the usage of tags to check for specific dependencies. If any required dependency is missing, the VM or container will not start until the issue is resolved. This script is designed to be used as a Proxmox hookscript, which can be applied to both QEMU VMs and LXC containers.",
+ "install_methods": [
+ {
+ "type": "default",
+ "script": "tools/pve/dependency-check.sh",
+ "resources": {
+ "cpu": null,
+ "ram": null,
+ "hdd": null,
+ "os": null,
+ "version": null
+ }
+ }
+ ],
+ "default_credentials": {
+ "username": null,
+ "password": null
+ },
+ "notes": [
+ {
+ "text": "Execute within the Proxmox shell",
+ "type": "info"
+ },
+ {
+ "text": "To wait until a certain host is available, tag the VM or container with `dep_ping_` where `` is the name or IP of the host to ping. The script will wait until the host is reachable before proceeding with the startup.",
+ "type": "info"
+ },
+ {
+ "text": "To wait until a certain TCP port is open, tag the VM or container with `dep_tcp__` where `` is the name or IP of the host and `` is the TCP port number. The script will wait until the port is open before proceeding with the startup.",
+ "type": "info"
+ }
+ ]
+}
diff --git a/frontend/public/json/dispatcharr.json b/frontend/public/json/dispatcharr.json
new file mode 100644
index 00000000..46abd555
--- /dev/null
+++ b/frontend/public/json/dispatcharr.json
@@ -0,0 +1,35 @@
+{
+ "name": "Dispatcharr",
+ "slug": "dispatcharr",
+ "categories": [
+ 14
+ ],
+ "date_created": "2025-07-01",
+ "type": "ct",
+ "updateable": true,
+ "privileged": false,
+ "interface_port": 9191,
+ "documentation": "https://dispatcharr.github.io/Dispatcharr-Docs/",
+ "website": "https://dispatcharr.github.io/Dispatcharr-Docs/",
+ "logo": "https://raw.githubusercontent.com/Dispatcharr/Dispatcharr/refs/heads/main/frontend/src/images/logo.png",
+ "config_path": "",
+ "description": "Dispatcharr is an open-source powerhouse for managing IPTV streams and EPG data with elegance and control. Born from necessity and built with passion, it started as a personal project by OkinawaBoss and evolved with contributions from legends like dekzter, SergeantPanda and Bucatini.",
+ "install_methods": [
+ {
+ "type": "default",
+ "script": "ct/dispatcharr.sh",
+ "resources": {
+ "cpu": 1,
+ "ram": 1024,
+ "hdd": 8,
+ "os": "debian",
+ "version": "12"
+ }
+ }
+ ],
+ "default_credentials": {
+ "username": null,
+ "password": null
+ },
+ "notes": []
+}
diff --git a/frontend/public/json/freepbx.json b/frontend/public/json/freepbx.json
new file mode 100644
index 00000000..11040c04
--- /dev/null
+++ b/frontend/public/json/freepbx.json
@@ -0,0 +1,40 @@
+{
+ "name": "FreePBX",
+ "slug": "freepbx",
+ "categories": [
+ 0
+ ],
+ "date_created": "2025-05-22",
+ "type": "ct",
+ "updateable": true,
+ "privileged": false,
+ "interface_port": 80,
+ "documentation": "https://sangomakb.atlassian.net/wiki/spaces/FP/overview?homepageId=8454359",
+ "website": "https://www.freepbx.org/",
+ "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/freepbx.webp",
+ "config_path": "",
+ "description": "FreePBX is a web-based open-source graphical user interface that manages Asterisk, a voice over IP and telephony server.",
+ "install_methods": [
+ {
+ "type": "default",
+ "script": "ct/freepbx.sh",
+ "resources": {
+ "cpu": 2,
+ "ram": 2048,
+ "hdd": 10,
+ "os": "debian",
+ "version": "12"
+ }
+ }
+ ],
+ "default_credentials": {
+ "username": null,
+ "password": null
+ },
+ "notes": [
+ {
+ "text": "This script uses the official FreePBX install script. Check it here: https://github.com/FreePBX/sng_freepbx_debian_install",
+ "type": "info"
+ }
+ ]
+}
diff --git a/frontend/public/json/healthchecks.json b/frontend/public/json/healthchecks.json
deleted file mode 100644
index ac5f3fe2..00000000
--- a/frontend/public/json/healthchecks.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{
- "name": "Healthchecks",
- "slug": "healthchecks",
- "categories": [
- 9
- ],
- "date_created": "2025-07-02",
- "type": "ct",
- "updateable": true,
- "privileged": false,
- "config_path": "/opt/healthchecks/.env",
- "interface_port": 3000,
- "documentation": "https://healthchecks.io/",
- "website": "https://healthchecks.io/",
- "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/svg/healthchecks.svg",
- "description": "Healthchecks is an open-source self-hosted application.",
- "install_methods": [
- {
- "type": "default",
- "script": "ct/healthchecks.sh",
- "resources": {
- "cpu": 1,
- "ram": 1024,
- "hdd": 2,
- "os": "Debian",
- "version": "12"
- }
- }
- ],
- "default_credentials": {
- "username": null,
- "password": null
- },
- "notes": []
-}
\ No newline at end of file
diff --git a/frontend/public/json/librespeed.json b/frontend/public/json/librespeed.json
deleted file mode 100644
index 713b2fe8..00000000
--- a/frontend/public/json/librespeed.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{
- "name": "Librespeed",
- "slug": "librespeed",
- "categories": [
- 4
- ],
- "date_created": "2025-04-26",
- "type": "ct",
- "updateable": true,
- "privileged": false,
- "interface_port": 80,
- "documentation": "https://github.com/librespeed/speedtest/blob/master/doc.md",
- "config_path": "",
- "website": "https://librespeed.org",
- "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/librespeed.webp",
- "description": "No Flash, No Java, No Websocket, No Bullshit. This is a very lightweight speed test implemented in Javascript, using XMLHttpRequest and Web Workers.",
- "install_methods": [
- {
- "type": "default",
- "script": "ct/librespeed.sh",
- "resources": {
- "cpu": 1,
- "ram": 512,
- "hdd": 4,
- "os": "Debian",
- "version": "12"
- }
- }
- ],
- "default_credentials": {
- "username": "root",
- "password": null
- },
- "notes": []
-}
diff --git a/frontend/public/json/mediamanager.json b/frontend/public/json/mediamanager.json
deleted file mode 100644
index 5df6c4b7..00000000
--- a/frontend/public/json/mediamanager.json
+++ /dev/null
@@ -1,41 +0,0 @@
-{
- "name": "MediaManager",
- "slug": "mediamanager",
- "categories": [
- 14,
- 13
- ],
- "date_created": "2025-07-22",
- "type": "ct",
- "updateable": true,
- "privileged": false,
- "interface_port": 8000,
- "documentation": "https://maxdorninger.github.io/MediaManager/introduction.html",
- "config_path": "/opt/mm_data/config.toml",
- "website": "https://github.com/maxdorninger/MediaManager",
- "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/mediamanager.webp",
- "description": "A modern selfhosted media management system for your media library",
- "install_methods": [
- {
- "type": "default",
- "script": "ct/mediamanager.sh",
- "resources": {
- "cpu": 2,
- "ram": 2048,
- "hdd": 4,
- "os": "Debian",
- "version": "12"
- }
- }
- ],
- "default_credentials": {
- "username": "",
- "password": "admin"
- },
- "notes": [
- {
- "text": "During the installation, provide the email address of the first admin user",
- "type": "info"
- }
- ]
-}
diff --git a/frontend/public/json/resiliosync.json b/frontend/public/json/resiliosync.json
new file mode 100644
index 00000000..0b61fe3d
--- /dev/null
+++ b/frontend/public/json/resiliosync.json
@@ -0,0 +1,40 @@
+{
+ "name": "Resilio Sync",
+ "slug": "resiliosync",
+ "categories": [
+ 11
+ ],
+ "date_created": "2025-08-14",
+ "type": "ct",
+ "updateable": true,
+ "privileged": false,
+ "config_path": "/etc/resilio-sync/config.json",
+ "interface_port": 8888,
+ "documentation": "https://help.resilio.com/",
+ "website": "https://www.resilio.com/sync",
+ "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/resilio-sync.webp",
+ "description": "Fast, reliable, and simple file sync and share solution, powered by P2P technology. Sync files across all your devices without storing them in the cloud.",
+ "install_methods": [
+ {
+ "type": "default",
+ "script": "ct/resilio-sync.sh",
+ "resources": {
+ "cpu": 2,
+ "ram": 2048,
+ "hdd": 8,
+ "os": "debian",
+ "version": "12"
+ }
+ }
+ ],
+ "default_credentials": {
+ "username": null,
+ "password": null
+ },
+ "notes": [
+ {
+ "text": "After free registration, you will receive a license keyfile to your email address. Upload it into any LXC directory and select on first run.",
+ "type": "info"
+ }
+ ]
+}
diff --git a/frontend/public/json/romm.json b/frontend/public/json/romm.json
new file mode 100644
index 00000000..364a7ea2
--- /dev/null
+++ b/frontend/public/json/romm.json
@@ -0,0 +1,35 @@
+{
+ "name": "RomM",
+ "slug": "romm",
+ "categories": [
+ 21
+ ],
+ "date_created": "2025-03-10",
+ "type": "ct",
+ "updateable": true,
+ "privileged": false,
+ "interface_port": 8080,
+ "documentation": "https://docs.romm.app/latest/",
+ "website": "https://romm.app/",
+ "config_path": "/opt",
+ "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/romm.webp",
+ "description": "RomM (ROM Manager) allows you to scan, enrich, browse and play your game collection with a clean and responsive interface. Support for multiple platforms, various naming schemes, and custom tags.",
+ "install_methods": [
+ {
+ "type": "default",
+ "script": "ct/romm.sh",
+ "resources": {
+ "cpu": 2,
+ "ram": 4096,
+ "hdd": 20,
+ "os": "ubuntu",
+ "version": "24.04"
+ }
+ }
+ ],
+ "default_credentials": {
+ "username": "romm",
+ "password": "changeme"
+ },
+ "notes": []
+}
diff --git a/frontend/public/json/stylus.json b/frontend/public/json/stylus.json
new file mode 100644
index 00000000..9ef41244
--- /dev/null
+++ b/frontend/public/json/stylus.json
@@ -0,0 +1,40 @@
+{
+ "name": "Stylus",
+ "slug": "stylus",
+ "categories": [
+ 4
+ ],
+ "date_created": "2025-09-06",
+ "type": "ct",
+ "updateable": true,
+ "privileged": false,
+ "interface_port": 8000,
+ "documentation": "https://mmastrac.github.io/stylus/",
+ "website": "https://github.com/mmastrac/stylus",
+ "logo": "https: //cdn.jsdelivr.net/gh/selfhst/icons/webp/stylus.webp",
+ "config_path": "/opt/stylus/config.yaml",
+ "description": "Stylus (style + status) is a lightweight status page for infrastructure and networks. Configure a set of bash scripts that test the various parts of your infrastructure, set up visualizations with minimal configuration, and Stylus will generate you a dashboard for your system.",
+ "install_methods": [
+ {
+ "type": "default",
+ "script": "ct/stylus.sh",
+ "resources": {
+ "cpu": 1,
+ "ram": 1024,
+ "hdd": 2,
+ "os": "debian",
+ "version": "12"
+ }
+ }
+ ],
+ "default_credentials": {
+ "username": null,
+ "password": null
+ },
+ "notes": [
+ {
+ "text": "Configuration Path: `/opt/stylus/config.yaml`",
+ "type": "info"
+ }
+ ]
+}
diff --git a/frontend/public/json/swizzin.json b/frontend/public/json/swizzin.json
deleted file mode 100644
index 2f08f31f..00000000
--- a/frontend/public/json/swizzin.json
+++ /dev/null
@@ -1,48 +0,0 @@
-{
- "name": "Swizzin",
- "slug": "swizzin",
- "categories": [
- 15
- ],
- "date_created": "2025-08-01",
- "type": "ct",
- "updateable": true,
- "privileged": false,
- "interface_port": 80,
- "documentation": "https://swizzin.ltd/getting-started",
- "config_path": "/etc/swizzin/",
- "website": "https://swizzin.ltd/",
- "logo": "https://swizzin.ltd/img/logo-sm.png",
- "description": "Swizzin is a light-weight, modular, and user-friendly seedbox solution for Debian-based servers. It allows for the easy installation and management of a wide variety of applications commonly used for torrenting and media management, such as rTorrent, Sonarr, Radarr, and Plex, all accessible through a command-line utility or a web-based dashboard.",
- "install_methods": [
- {
- "type": "default",
- "script": "ct/swizzin.sh",
- "resources": {
- "cpu": 2,
- "ram": 4096,
- "hdd": 20,
- "os": "Debian",
- "version": "12"
- }
- }
- ],
- "default_credentials": {
- "username": null,
- "password": null
- },
- "notes": [
- {
- "text": "It is very recommended to install at least the 'panel' for web access, and 'nginx' for easy access to other apps.",
- "type": "warning"
- },
- {
- "text": "Installation might take a long time if choosing to install many apps. Be patient.",
- "type": "warning"
- },
- {
- "text": "Swizzin is a management suite, not a single application. Use the 'box' command inside the container to install/manage individual apps like rTorrent, Sonarr, etc. A full list can be found in documentation.",
- "type": "info"
- }
- ]
-}
\ No newline at end of file
diff --git a/frontend/public/json/tracktor.json b/frontend/public/json/tracktor.json
deleted file mode 100644
index 2d7d0a07..00000000
--- a/frontend/public/json/tracktor.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{
- "name": "Tracktor",
- "slug": "tracktor",
- "categories": [
- 9
- ],
- "date_created": "2025-08-06",
- "type": "ct",
- "updateable": true,
- "privileged": false,
- "interface_port": 3000,
- "documentation": "https://tracktor.bytedge.in/introduction.html",
- "config_path": "/opt/tracktor/app/server/.env",
- "website": "https://tracktor.bytedge.in/",
- "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/svg/tracktor.svg",
- "description": "Tracktor is an open-source web application for comprehensive vehicle management.\nEasily track ⛽ fuel consumption, 🛠️ maintenance, 🛡️ insurance, and 📄 regulatory documents for all your vehicles in one place. ",
- "install_methods": [
- {
- "type": "default",
- "script": "ct/tracktor.sh",
- "resources": {
- "cpu": 1,
- "ram": 1024,
- "hdd": 6,
- "os": "Debian",
- "version": "12"
- }
- }
- ],
- "default_credentials": {
- "username": null,
- "password": null
- },
- "notes": []
-}
diff --git a/frontend/public/json/tunarr.json b/frontend/public/json/tunarr.json
new file mode 100644
index 00000000..d77f28d1
--- /dev/null
+++ b/frontend/public/json/tunarr.json
@@ -0,0 +1,35 @@
+{
+ "name": "Tunarr",
+ "slug": "tunarr",
+ "categories": [
+ 13
+ ],
+ "date_created": "2025-09-06",
+ "type": "ct",
+ "updateable": true,
+ "privileged": false,
+ "config_path": "/opt/tunarr/.env",
+ "interface_port": 8000,
+ "documentation": "https://tunarr.com/",
+ "website": "https://tunarr.com/",
+ "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/png/tunarr.png",
+ "description": "Create a classic TV experience using your own media - IPTV backed by Plex/Jellyfin/Emby.",
+ "install_methods": [
+ {
+ "type": "default",
+ "script": "ct/tunarr.sh",
+ "resources": {
+ "cpu": 2,
+ "ram": 1024,
+ "hdd": 5,
+ "os": "Debian",
+ "version": "12"
+ }
+ }
+ ],
+ "default_credentials": {
+ "username": null,
+ "password": null
+ },
+ "notes": []
+}
diff --git a/frontend/public/json/twingate-connector.json b/frontend/public/json/twingate-connector.json
deleted file mode 100644
index 55eeaacb..00000000
--- a/frontend/public/json/twingate-connector.json
+++ /dev/null
@@ -1,40 +0,0 @@
-{
- "name": "twingate-connector",
- "slug": "twingate-connector",
- "categories": [
- 4
- ],
- "date_created": "2025-07-17",
- "type": "ct",
- "updateable": true,
- "privileged": false,
- "interface_port": null,
- "documentation": "https://www.twingate.com/docs/",
- "config_path": "/etc/twingate/connector.conf",
- "website": "https://www.twingate.com",
- "logo": "https://avatars.githubusercontent.com/u/97470429?s=200&v=4",
- "description": "Twingate Connectors are lightweight software components that establish secure, least-privileged access between private network resources and authorized users without exposing the network to the internet. They act as outbound-only bridges between your protected resources and the Twingate infrastructure, ensuring zero-trust access without the need for a VPN.",
- "install_methods": [
- {
- "type": "default",
- "script": "ct/twingate-connector.sh",
- "resources": {
- "cpu": 1,
- "ram": 2048,
- "hdd": 2,
- "os": "Ubuntu",
- "version": "22.04"
- }
- }
- ],
- "default_credentials": {
- "username": null,
- "password": null
- },
- "notes": [
- {
- "text": "If you need to update your access or refresh tokens, they can be found in /etc/twingate/connector.conf",
- "type": "info"
- }
- ]
-}
diff --git a/frontend/public/json/vikunja.json b/frontend/public/json/vikunja.json
deleted file mode 100644
index ea171140..00000000
--- a/frontend/public/json/vikunja.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{
- "name": "Vikunja",
- "slug": "vikunja",
- "categories": [
- 12
- ],
- "date_created": "2024-11-05",
- "type": "ct",
- "updateable": true,
- "privileged": false,
- "interface_port": 3456,
- "documentation": null,
- "website": "https://vikunja.io/",
- "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/vikunja.webp",
- "config_path": "/etc/vikunja/config.yml",
- "description": "Vikunja is a powerful self-hosted todo app. It allows you to create and manage to-do lists. You can plan tasks, set priorities and collaborate with others. The best part is that your data is safe with you and you can customize the app to your liking. It's like a personal assistant that helps you stay organized.",
- "install_methods": [
- {
- "type": "default",
- "script": "ct/vikunja.sh",
- "resources": {
- "cpu": 1,
- "ram": 1024,
- "hdd": 4,
- "os": "debian",
- "version": "12"
- }
- }
- ],
- "default_credentials": {
- "username": null,
- "password": null
- },
- "notes": []
-}
diff --git a/install/alpine-garage-install.sh b/install/alpine-garage-install.sh
new file mode 100644
index 00000000..126401d9
--- /dev/null
+++ b/install/alpine-garage-install.sh
@@ -0,0 +1,98 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: MickLesk (CanbiZ)
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://garagehq.deuxfleurs.fr/
+
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
+color
+verb_ip6
+catch_errors
+setting_up_container
+network_check
+update_os
+
+msg_info "Preparing directories"
+mkdir -p /var/lib/garage/meta /var/lib/garage/data /var/lib/garage/snapshots
+msg_ok "Prepared directories"
+
+msg_info "Setup Garage packages"
+$STD apk add --no-cache garage garage-openrc openssl
+msg_ok "Setup Garage packages"
+
+# msg_info "Generating RPC secret"
+# if [[ ! -s /etc/garage.rpc_secret ]]; then
+# openssl rand -hex 32 | tr -d '\n' >/etc/garage.rpc_secret
+# chmod 600 /etc/garage.rpc_secret
+# fi
+# msg_ok "Generated RPC secret"
+
+# msg_info "Generating tokens"
+# if [[ ! -s /etc/garage.tokens.env ]]; then
+# ADMIN_TOKEN="$(openssl rand -base64 32)"
+# METRICS_TOKEN="$(openssl rand -base64 32)"
+# cat >/etc/garage.tokens.env </etc/garage.toml <"$INSTALL_DIR"/CALIBRE_RELEASE
+echo "${KEPUB_VERSION#v}" >"$INSTALL_DIR"/KEPUBIFY_RELEASE
+sed 's/^/v/' ~/.autocaliweb >"$INSTALL_DIR"/ACW_RELEASE
+
+cd "$INSTALL_DIR"
+$STD uv venv "$VIRTUAL_ENV"
+$STD uv sync --all-extras --active
+cat <./dirs.json
+{
+ "ingest_folder": "$INGEST_DIR",
+ "calibre_library_dir": "$CALIBRE_LIB_DIR",
+ "tmp_conversion_dir": "$CONFIG_DIR/.acw_conversion_tmp"
+}
+EOF
+useradd -s /usr/sbin/nologin -d "$CONFIG_DIR" -M "$SERVICE_USER"
+ln -sf "$CONFIG_DIR"/.config/calibre/plugins "$CONFIG_DIR"/calibre_plugins
+cat <"$INSTALL_DIR"/.env
+ACW_INSTALL_DIR=$INSTALL_DIR
+ACW_CONFIG_DIR=$CONFIG_DIR
+ACW_USER=$SERVICE_USER
+ACW_GROUP=$SERVICE_GROUP
+LIBRARY_DIR=$CALIBRE_LIB_DIR
+EOF
+msg_ok "Configured Autocaliweb"
+
+msg_info "Creating ACWSync Plugin for KOReader"
+cd "$INSTALL_DIR"/koreader/plugins
+PLUGIN_DIGEST="$(find acwsync.koplugin -type f -name "*.lua" -o -name "*.json" | sort | xargs sha256sum | sha256sum | cut -d' ' -f1)"
+echo "Plugin files digest: $PLUGIN_DIGEST" >acwsync.koplugin/${PLUGIN_DIGEST}.digest
+echo "Build date: $(date)" >>acwsync.koplugin/${PLUGIN_DIGEST}.digest
+echo "Files included:" >>acwsync.koplugin/${PLUGIN_DIGEST}.digest
+$STD zip -r koplugin.zip acwsync.koplugin/
+cp -r koplugin.zip "$INSTALL_DIR"/cps/static
+msg_ok "Created ACWSync Plugin"
+
+msg_info "Initializing databases"
+KEPUBIFY_PATH=$(command -v kepubify 2>/dev/null || echo "/usr/bin/kepubify")
+EBOOK_CONVERT_PATH=$(command -v ebook-convert 2>/dev/null || echo "/usr/bin/ebook-convert")
+CALIBRE_BIN_DIR=$(dirname "$EBOOK_CONVERT_PATH")
+curl -fsSL https://github.com/gelbphoenix/autocaliweb/raw/refs/heads/main/library/metadata.db -o "$CALIBRE_LIB_DIR"/metadata.db
+curl -fsSL https://github.com/gelbphoenix/autocaliweb/raw/refs/heads/main/library/app.db -o "$CONFIG_DIR"/app.db
+sqlite3 "$CONFIG_DIR/app.db" <"$SCRIPTS_DIR"/ingest_watcher.sh
+#!/bin/bash
+
+INSTALL_PATH="$INSTALL_DIR"
+WATCH_FOLDER=\$(grep -o '"ingest_folder": "[^"]*' \${INSTALL_PATH}/dirs.json | grep -o '[^"]*\$')
+echo "[acw-ingest-service] Watching folder: \$WATCH_FOLDER"
+
+# Monitor the folder for new files
+/usr/bin/inotifywait -m -r --format="%e %w%f" -e close_write -e moved_to "\$WATCH_FOLDER" |
+while read -r events filepath ; do
+ echo "[acw-ingest-service] New files detected - \$filepath - Starting Ingest Processor..."
+ # Use the Python interpreter from the virtual environment
+ \${INSTALL_PATH}/venv/bin/python \${INSTALL_PATH}/scripts/ingest_processor.py "\$filepath"
+done
+EOF
+
+# auto-zipper
+cat <"$SCRIPTS_DIR"/auto_zipper_wrapper.sh
+#!/bin/bash
+
+# Source virtual environment
+source ${INSTALL_DIR}/venv/bin/activate
+
+WAKEUP="23:59"
+
+while true; do
+ # Replace expr with modern Bash arithmetic (safer and less prone to parsing issues)
+ # fix: expr: non-integer argument and sleep: missing operand
+ SECS=\$(( \$(date -d "\$WAKEUP" +%s) - \$(date -d "now" +%s) ))
+ if [[ \$SECS -lt 0 ]]; then
+ SECS=\$(( \$(date -d "tomorrow \$WAKEUP" +%s) - \$(date -d "now" +%s) ))
+ fi
+ echo "[acw-auto-zipper] Next run in \$SECS seconds."
+ sleep \$SECS &
+ wait \$!
+
+ # Use virtual environment python
+ python ${SCRIPTS_DIR}/auto_zip.py
+
+ if [[ \$? == 1 ]]; then
+ echo "[acw-auto-zipper] Error occurred during script initialisation."
+ elif [[ \$? == 2 ]]; then
+ echo "[acw-auto-zipper] Error occurred while zipping today's files."
+ elif [[ \$? == 3 ]]; then
+ echo "[acw-auto-zipper] Error occurred while trying to remove zipped files."
+ fi
+
+ sleep 60
+done
+EOF
+
+# metadata change detector
+cat <"$SCRIPTS_DIR"/metadata_change_detector_wrapper.sh
+#!/bin/bash
+# metadata_change_detector_wrapper.sh - Wrapper for periodic metadata enforcement
+
+# Source virtual environment
+source ${INSTALL_DIR}/venv/bin/activate
+
+# Configuration
+CHECK_INTERVAL=300 # Check every 5 minutes (300 seconds)
+METADATA_LOGS_DIR="${INSTALL_DIR}/metadata_change_logs"
+
+echo "[metadata-change-detector] Starting metadata change detector service..."
+echo "[metadata-change-detector] Checking for changes every \$CHECK_INTERVAL seconds"
+
+while true; do
+ # Check if there are any log files to process
+ if [ -d "\$METADATA_LOGS_DIR" ] && [ "\$(ls -A \$METADATA_LOGS_DIR 2>/dev/null)" ]; then
+ echo "[metadata-change-detector] Found metadata change logs, processing..."
+
+ # Process each log file
+ for log_file in "\$METADATA_LOGS_DIR"/*.json; do
+ if [ -f "\$log_file" ]; then
+ log_name=\$(basename "\$log_file")
+ echo "[metadata-change-detector] Processing log: \$log_name"
+
+ # Call cover_enforcer.py with the log file
+ ${INSTALL_DIR}/venv/bin/python ${SCRIPTS_DIR}/cover_enforcer.py --log "\$log_name"
+
+ if [ \$? -eq 0 ]; then
+ echo "[metadata-change-detector] Successfully processed \$log_name"
+ else
+ echo "[metadata-change-detector] Error processing \$log_name"
+ fi
+ fi
+ done
+ else
+ echo "[metadata-change-detector] No metadata changes detected"
+ fi
+
+ echo "[metadata-change-detector] Sleeping for \$CHECK_INTERVAL seconds..."
+ sleep \$CHECK_INTERVAL
+done
+EOF
+chmod +x "$SCRIPTS_DIR"/{ingest_watcher.sh,auto_zipper_wrapper.sh,metadata_change_detector_wrapper.sh}
+chown -R "$SERVICE_USER":"$SERVICE_GROUP" {"$INSTALL_DIR","$CONFIG_DIR","$INGEST_DIR","$CALIBRE_LIB_DIR"}
+
+# systemd service files
+SYS_PATH="/etc/systemd/system"
+cat <"$SYS_PATH"/autocaliweb.service
+[Unit]
+Description=Autocaliweb
+After=network.target
+Wants=network-online.target
+After=network-online.target
+
+[Service]
+Type=simple
+User=$SERVICE_USER
+Group=$SERVICE_GROUP
+WorkingDirectory=$INSTALL_DIR
+Environment=PATH=$INSTALL_DIR/venv/bin:/usr/bin:/bin
+Environment=PYTHONPATH=$SCRIPTS_DIR:$INSTALL_DIR
+Environment=PYTHONDONTWRITEBYTECODE=1
+Environment=PYTHONUNBUFFERED=1
+Environment=CALIBRE_DBPATH=$CONFIG_DIR
+EnvironmentFile=$INSTALL_DIR/.env
+ExecStart=$INSTALL_DIR/venv/bin/python $INSTALL_DIR/cps.py -p $CONFIG_DIR/app.db
+
+Restart=always
+RestartSec=10
+StandardOutput=journal
+StandardError=journal
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+cat <"$SYS_PATH"/acw-ingest-service.service
+[Unit]
+Description=Autocaliweb Ingest Processor Service
+After=autocaliweb.service
+Requires=autocaliweb.service
+
+[Service]
+User=${SERVICE_USER}
+Group=${SERVICE_GROUP}
+WorkingDirectory=${INSTALL_DIR}
+Environment=CALIBRE_DBPATH=${CONFIG_DIR}
+Environment=HOME=${CONFIG_DIR}
+ExecStart=/bin/bash ${SCRIPTS_DIR}/ingest_watcher.sh
+Restart=always
+StandardOutput=journal
+StandardError=journal
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+cat <"$SYS_PATH"/acw-auto-zipper.service
+[Unit]
+Description=Autocaliweb Auto Zipper Service
+After=network.target
+
+[Service]
+User=${SERVICE_USER}
+Group=${SERVICE_GROUP}
+WorkingDirectory=${INSTALL_DIR}
+Environment=CALIBRE_DBPATH=${CONFIG_DIR}
+ExecStart=${SCRIPTS_DIR}/auto_zipper_wrapper.sh
+Restart=always
+StandardOutput=journal
+StandardError=journal
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+cat <"$SYS_PATH"/metadata-change-detector.service
+[Unit]
+Description=Autocaliweb Metadata Change Detector
+After=network.target
+
+[Service]
+User=${SERVICE_USER}
+Group=${SERVICE_GROUP}
+WorkingDirectory=${INSTALL_DIR}
+ExecStart=/bin/bash ${SCRIPTS_DIR}/metadata_change_detector_wrapper.sh
+Restart=always
+StandardOutput=journal
+StandardError=journal
+Environment=CALIBRE_DBPATH=${CONFIG_DIR}
+Environment=HOME=${CONFIG_DIR}
+[Install]
+WantedBy=multi-user.target
+EOF
+
+systemctl -q enable --now autocaliweb acw-ingest-service acw-auto-zipper metadata-change-detector
+msg_ok "Created scripts and service files"
+
+motd_ssh
+customize
+
+msg_info "Cleaning up"
+$STD apt-get -y autoremove
+$STD apt-get -y autoclean
+msg_ok "Cleaned"
diff --git a/install/debian-install.sh b/install/debian-install.sh
index fcca74fc..e6259184 100644
--- a/install/debian-install.sh
+++ b/install/debian-install.sh
@@ -17,7 +17,7 @@ msg_info "Installing Dependencies"
$STD apt-get install -y gpg
msg_ok "Installed Dependencies"
-setup_mariadb
+#setup_mariadb
#FFMPEG_VERSION="n7.1.1" FFMPEG_TYPE="full" setup_ffmpeg
diff --git a/install/vikunja-install.sh b/install/deferred/vikunja-install.sh
similarity index 100%
rename from install/vikunja-install.sh
rename to install/deferred/vikunja-install.sh
diff --git a/install/dispatcharr-install.sh b/install/dispatcharr-install.sh
new file mode 100644
index 00000000..98d9435a
--- /dev/null
+++ b/install/dispatcharr-install.sh
@@ -0,0 +1,220 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: ekke85
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://github.com/Dispatcharr/Dispatcharr
+
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
+color
+verb_ip6
+catch_errors
+setting_up_container
+network_check
+update_os
+
+# msg_info "Creating ${APP_USER} user"
+# groupadd -f $APP_GROUP
+# useradd -M -s /usr/sbin/nologin -g $APP_GROUP $APP_USER || true
+# msg_ok "Created ${APP_USER} user"
+
+msg_info "Installing Dependencies"
+$STD apt-get install -y \
+ build-essential \
+ gcc \
+ libpcre3-dev \
+ libpq-dev \
+ nginx \
+ redis-server \
+ ffmpeg \
+ procps \
+ streamlink
+msg_ok "Installed Dependencies"
+
+PYTHON_VERSION="3.13" setup_uv
+NODE_VERSION="22" setup_nodejs
+PG_VERSION="16" setup_postgresql
+fetch_and_deploy_gh_release "dispatcharr" "Dispatcharr/Dispatcharr"
+
+msg_info "Set up PostgreSQL Database"
+DB_NAME=dispatcharr_db
+DB_USER=dispatcharr_usr
+DB_PASS="$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | cut -c1-13)"
+DB_URL="postgresql://${DB_USER}:${DB_PASS}@localhost:5432/${DB_NAME}"
+$STD sudo -u postgres psql -c "CREATE ROLE $DB_USER WITH LOGIN PASSWORD '$DB_PASS';"
+$STD sudo -u postgres psql -c "CREATE DATABASE $DB_NAME WITH OWNER $DB_USER ENCODING 'UTF8' TEMPLATE template0;"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET client_encoding TO 'utf8';"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET default_transaction_isolation TO 'read committed';"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET timezone TO 'UTC';"
+{
+ echo "Dispatcharr-Credentials"
+ echo "Dispatcharr Database Name: $DB_NAME"
+ echo "Dispatcharr Database User: $DB_USER"
+ echo "Dispatcharr Database Password: $DB_PASS"
+} >>~/dispatcharr.creds
+msg_ok "Set up PostgreSQL Database"
+
+msg_info "Setup Python (uv) requirements (system)"
+UV_PY="${PYTHON_VERSION:-3.13}"
+$STD uv python install "$UV_PY"
+cd /opt/dispatcharr
+PYPI_URL="https://pypi.org/simple"
+mapfile -t EXTRA_INDEX_URLS < <(grep -E '^(--(extra-)?index-url|-i)\s' requirements.txt 2>/dev/null | awk '{print $2}' | sed 's#/*$##')
+
+UV_INDEX_ARGS=(--index-url "$PYPI_URL" --index-strategy unsafe-best-match)
+for u in "${EXTRA_INDEX_URLS[@]}"; do
+ [[ -n "$u" && "$u" != "$PYPI_URL" ]] && UV_INDEX_ARGS+=(--extra-index-url "$u")
+done
+if [[ -f requirements.txt ]]; then
+ $STD uv pip install --system "${UV_INDEX_ARGS[@]}" -r requirements.txt
+fi
+$STD uv pip install --system "${UV_INDEX_ARGS[@]}" gunicorn gevent celery daphne
+ln -sf /usr/bin/ffmpeg /opt/dispatcharr/env/bin/ffmpeg
+msg_ok "Python Requirements Installed"
+
+msg_info "Building Frontend"
+cd /opt/dispatcharr/frontend
+$STD npm install --legacy-peer-deps
+$STD npm run build
+msg_ok "Built Frontend"
+
+msg_info "Running Django Migrations"
+cd /opt/dispatcharr
+set -o allexport
+source /etc/dispatcharr/dispatcharr.env
+set +o allexport
+
+$STD ./.venv/bin/python manage.py migrate --noinput
+$STD ./.venv/bin/python manage.py collectstatic --noinput
+msg_ok "Migrations Complete"
+
+msg_info "Configuring Nginx"
+cat </etc/nginx/sites-available/dispatcharr.conf
+server {
+ listen 9191;
+
+ location / {
+ include proxy_params;
+ proxy_pass http://127.0.0.1:5656;
+ }
+
+ location /static/ {
+ alias /opt/dispatcharr/static/;
+ }
+
+ location /assets/ {
+ alias /opt/dispatcharr/frontend/dist/assets/;
+ }
+
+ location /media/ {
+ alias /opt/dispatcharr/media/;
+ }
+
+ location /ws/ {
+ proxy_pass http://127.0.0.1:8001;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade \$http_upgrade;
+ proxy_set_header Connection "Upgrade";
+ proxy_set_header Host \$host;
+ }
+}
+EOF
+
+ln -sf /etc/nginx/sites-available/dispatcharr.conf /etc/nginx/sites-enabled/dispatcharr.conf
+rm -f /etc/nginx/sites-enabled/default
+nginx -t
+systemctl restart nginx
+systemctl enable nginx
+msg_ok "Configured Nginx"
+
+msg_info "Creating systemd services"
+
+cat </etc/systemd/system/dispatcharr.service
+[Unit]
+Description=Gunicorn for Dispatcharr
+After=network.target postgresql.service redis-server.service
+
+[Service]
+WorkingDirectory=/opt/dispatcharr
+RuntimeDirectory=dispatcharr
+RuntimeDirectoryMode=0775
+Environment="PATH=/opt/dispatcharr/env/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin"
+EnvironmentFile=/etc/dispatcharr/dispatcharr.env
+ExecStart=/opt/dispatcharr/env/bin/gunicorn \\
+ --workers=4 \\
+ --worker-class=gevent \\
+ --timeout=300 \\
+ --bind 0.0.0.0:5656 \
+ dispatcharr.wsgi:application
+Restart=always
+KillMode=mixed
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+cat </etc/systemd/system/dispatcharr-celery.service
+[Unit]
+Description=Celery Worker for Dispatcharr
+After=network.target redis-server.service
+Requires=dispatcharr.service
+
+[Service]
+WorkingDirectory=/opt/dispatcharr
+Environment="PATH=/opt/dispatcharr/env/bin"
+EnvironmentFile=/etc/dispatcharr/dispatcharr.env
+Environment="CELERY_BROKER_URL=redis://localhost:6379/0"
+ExecStart=/opt/dispatcharr/env/bin/celery -A dispatcharr worker -l info -c 4
+Restart=always
+KillMode=mixed
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+cat </etc/systemd/system/dispatcharr-celerybeat.service
+[Unit]
+Description=Celery Beat Scheduler for Dispatcharr
+After=network.target redis-server.service
+Requires=dispatcharr.service
+
+[Service]
+WorkingDirectory=/opt/dispatcharr
+Environment="PATH=/opt/dispatcharr/env/bin"
+EnvironmentFile=/etc/dispatcharr/dispatcharr.env
+Environment="CELERY_BROKER_URL=redis://localhost:6379/0"
+ExecStart=/opt/dispatcharr/env/bin/celery -A dispatcharr beat -l info
+Restart=always
+KillMode=mixed
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+cat </etc/systemd/system/dispatcharr-daphne.service
+[Unit]
+Description=Daphne for Dispatcharr (ASGI)
+After=network.target
+Requires=dispatcharr.service
+
+[Service]
+WorkingDirectory=/opt/dispatcharr
+Environment="PATH=/opt/dispatcharr/env/bin"
+EnvironmentFile=/etc/dispatcharr/dispatcharr.env
+ExecStart=/opt/dispatcharr/env/bin/daphne -b 0.0.0.0 -p 8001 dispatcharr.asgi:application
+Restart=always
+KillMode=mixed
+
+[Install]
+WantedBy=multi-user.target
+EOF
+systemctl enable -q --now dispatcharr dispatcharr-celery dispatcharr-celerybeat dispatcharr-daphne
+msg_ok "Started Dispatcharr Services"
+
+motd_ssh
+customize
+
+msg_info "Cleaning up"
+$STD apt-get -y autoremove
+$STD apt-get -y autoclean
+msg_ok "Cleaned"
diff --git a/install/ente-install.sh b/install/ente-install.sh
new file mode 100644
index 00000000..b6527758
--- /dev/null
+++ b/install/ente-install.sh
@@ -0,0 +1,174 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: MickLesk
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://github.com/ente-io/ente
+
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
+color
+verb_ip6
+catch_errors
+setting_up_container
+network_check
+update_os
+
+msg_info "Installing Dependencies"
+$STD apt-get install -y \
+ libsodium23 \
+ libsodium-dev \
+ pkg-config \
+ caddy \
+ gcc
+msg_ok "Installed Dependencies"
+
+PG_VERSION="17" setup_postgresql
+setup_go
+NODE_VERSION="22" NODE_MODULE="yarn" setup_nodejs
+fetch_and_deploy_gh_release "ente" "ente-io/ente" "tarball" "latest" "/opt/ente"
+
+msg_info "Setting up PostgreSQL"
+DB_NAME="ente_db"
+DB_USER="ente"
+DB_PASS="$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | cut -c1-13)"
+$STD sudo -u postgres psql -c "CREATE ROLE $DB_USER WITH LOGIN PASSWORD '$DB_PASS';"
+$STD sudo -u postgres psql -c "CREATE DATABASE $DB_NAME WITH OWNER $DB_USER ENCODING 'UTF8' TEMPLATE template0;"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET client_encoding TO 'utf8';"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET default_transaction_isolation TO 'read committed';"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET timezone TO 'UTC';"
+{
+ echo "Ente Credentials"
+ echo "Database Name: $DB_NAME"
+ echo "Database User: $DB_USER"
+ echo "Database Password: $DB_PASS"
+} >>~/ente.creds
+msg_ok "Set up PostgreSQL"
+
+msg_info "Building Museum (server)"
+cd /opt/ente/server
+$STD corepack enable
+$STD go mod tidy
+export CGO_ENABLED=1
+CGO_CFLAGS="$(pkg-config --cflags libsodium || true)"
+CGO_LDFLAGS="$(pkg-config --libs libsodium || true)"
+if [ -z "$CGO_CFLAGS" ]; then
+ CGO_CFLAGS="-I/usr/include"
+fi
+if [ -z "$CGO_LDFLAGS" ]; then
+ CGO_LDFLAGS="-lsodium"
+fi
+export CGO_CFLAGS
+export CGO_LDFLAGS
+$STD go build cmd/museum/main.go
+msg_ok "Built Museum"
+
+msg_info "Generating Secrets"
+SECRET_ENC=$($STD go run tools/gen-random-keys/main.go | grep "encryption" | awk '{print $2}')
+SECRET_HASH=$($STD go run tools/gen-random-keys/main.go | grep "hash" | awk '{print $2}')
+SECRET_JWT=$($STD go run tools/gen-random-keys/main.go | grep "jwt" | awk '{print $2}')
+msg_ok "Generated Secrets"
+
+msg_info "Creating museum.yaml"
+cat </opt/ente/server/museum.yaml
+db:
+ host: 127.0.0.1
+ port: 5432
+ name: $DB_NAME
+ user: $DB_USER
+ password: $DB_PASS
+
+s3:
+ are_local_buckets: true
+ use_path_style_urls: true
+ local-dev:
+ key: dummy
+ secret: dummy
+ endpoint: localhost:3200
+ region: eu-central-2
+ bucket: ente-dev
+
+apps:
+ public-albums: http://localhost:3002
+ cast: http://localhost:3004
+ accounts: http://localhost:3001
+
+key:
+ encryption: $SECRET_ENC
+ hash: $SECRET_HASH
+
+jwt:
+ secret: $SECRET_JWT
+EOF
+msg_ok "Created museum.yaml"
+
+msg_info "Building Web Applications"
+cd /opt/ente/web
+$STD yarn install
+export NEXT_PUBLIC_ENTE_ENDPOINT=http://localhost:8080
+export NEXT_PUBLIC_ENTE_ALBUMS_ENDPOINT=http://localhost:3002
+$STD yarn build
+$STD yarn build:accounts
+$STD yarn build:auth
+$STD yarn build:cast
+mkdir -p /var/www/ente/apps
+cp -r apps/photos/out /var/www/ente/apps/photos
+cp -r apps/accounts/out /var/www/ente/apps/accounts
+cp -r apps/auth/out /var/www/ente/apps/auth
+cp -r apps/cast/out /var/www/ente/apps/cast
+msg_ok "Built Web Applications"
+
+msg_info "Creating Museum Service"
+cat </etc/systemd/system/ente-museum.service
+[Unit]
+Description=Ente Museum Server
+After=network.target postgresql.service
+
+[Service]
+WorkingDirectory=/opt/ente/server
+ExecStart=/opt/ente/server/main -config /opt/ente/server/museum.yaml
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
+EOF
+systemctl enable -q --now ente-museum
+msg_ok "Created Museum Service"
+
+msg_info "Configuring Caddy"
+cat </etc/caddy/Caddyfile
+:3000 {
+ root * /var/www/ente/apps/photos
+ file_server
+ try_files {path} {path}.html /index.html
+}
+:3001 {
+ root * /var/www/ente/apps/accounts
+ file_server
+ try_files {path} {path}.html /index.html
+}
+:3002 {
+ root * /var/www/ente/apps/photos
+ file_server
+ try_files {path} {path}.html /index.html
+}
+:3003 {
+ root * /var/www/ente/apps/auth
+ file_server
+ try_files {path} {path}.html /index.html
+}
+:3004 {
+ root * /var/www/ente/apps/cast
+ file_server
+ try_files {path} {path}.html /index.html
+}
+EOF
+systemctl reload caddy
+msg_ok "Configured Caddy"
+
+motd_ssh
+customize
+
+msg_info "Cleaning up"
+$STD apt-get -y autoremove
+$STD apt-get -y autoclean
+msg_ok "Cleaned"
diff --git a/install/freepbx-install.sh b/install/freepbx-install.sh
new file mode 100644
index 00000000..c5da1138
--- /dev/null
+++ b/install/freepbx-install.sh
@@ -0,0 +1,111 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: Arian Nasr (arian-nasr)
+# Updated by: Javier Pastor (vsc55)
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://www.freepbx.org/
+
+INSTALL_URL="https://github.com/FreePBX/sng_freepbx_debian_install/raw/master/sng_freepbx_debian_install.sh"
+INSTALL_PATH="/opt/sng_freepbx_debian_install.sh"
+
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
+color
+verb_ip6
+catch_errors
+setting_up_container
+network_check
+update_os
+
+ONLY_OPENSOURCE="${ONLY_OPENSOURCE:-no}"
+REMOVE_FIREWALL="${REMOVE_FIREWALL:-no}"
+msg_ok "Remove Commercial modules is set to: $ONLY_OPENSOURCE"
+msg_ok "Remove Firewall module is set to: $REMOVE_FIREWALL"
+
+msg_info "Downloading FreePBX installation script..."
+if curl -fsSL "$INSTALL_URL" -o "$INSTALL_PATH"; then
+ msg_ok "Download completed successfully"
+else
+ curl_exit_code=$?
+ msg_error "Error downloading FreePBX installation script (curl exit code: $curl_exit_code)"
+ msg_error "Aborting!"
+ exit 1
+fi
+
+if [[ "$VERBOSE" == "yes" ]]; then
+ msg_info "Installing FreePBX (Verbose)\n"
+else
+ msg_info "Installing FreePBX, be patient, this takes time..."
+fi
+$STD bash "$INSTALL_PATH"
+
+if [[ $ONLY_OPENSOURCE == "yes" ]]; then
+ msg_info "Removing Commercial modules..."
+
+ end_count=0
+ max=5
+ count=0
+ while fwconsole ma list | awk '/Commercial/ {found=1} END {exit !found}'; do
+ count=$((count + 1))
+ while read -r module; do
+ msg_info "Removing module: $module"
+
+ if [[ "$REMOVE_FIREWALL" == "no" ]] && [[ "$module" == "sysadmin" ]]; then
+ msg_warn "Skipping sysadmin module removal, it is required for Firewall!"
+ continue
+ fi
+
+ code=0
+ $STD fwconsole ma -f remove $module || code=$?
+ if [[ $code -ne 0 ]]; then
+ msg_error "Module $module could not be removed - error code $code"
+ else
+ msg_ok "Module $module removed successfully"
+ fi
+ done < <(fwconsole ma list | awk '/Commercial/ {print $2}')
+
+ [[ $count -ge $max ]] && break
+
+ com_list=$(fwconsole ma list)
+ end_count=$(awk '/Commercial/ {count++} END {print count + 0}' <<< "$com_list")
+ awk '/Commercial/ {found=1} END {exit !found}' <<< "$com_list" || break
+ if [[ "$REMOVE_FIREWALL" == "no" ]] && \
+ [[ $end_count -eq 1 ]] && \
+ [[ $(awk '/Commercial/ {print $2}' <<< "$com_list") == "sysadmin" ]]; then
+ break
+ fi
+
+ msg_warn "Not all commercial modules could be removed, retrying (attempt $count of $max)..."
+ done
+
+ if [[ $REMOVE_FIREWALL == "yes" ]] && [[ $end_count -gt 0 ]]; then
+ msg_info "Removing Firewall module..."
+ if $STD fwconsole ma -f remove firewall; then
+ msg_ok "Firewall module removed successfully"
+ else
+ msg_error "Firewall module could not be removed, please check manually!"
+ fi
+ fi
+
+ if [[ $end_count -eq 0 ]]; then
+ msg_ok "All commercial modules removed successfully"
+ elif [[ $end_count -eq 1 ]] && [[ $REMOVE_FIREWALL == "no" ]] && [[ $(fwconsole ma list | awk '/Commercial/ {print $2}') == "sysadmin" ]]; then
+ msg_ok "Only sysadmin module left, which is required for Firewall, skipping removal"
+ else
+ msg_warn "Some commercial modules could not be removed, please check the web interface for removal manually!"
+ fi
+
+ msg_info "Reloading FreePBX..."
+ $STD fwconsole reload
+ msg_ok "FreePBX reloaded completely"
+fi
+msg_ok "Installed FreePBX finished"
+
+motd_ssh
+customize
+
+msg_info "Cleaning up"
+rm -f "$INSTALL_PATH"
+$STD apt-get -y autoremove
+$STD apt-get -y autoclean
+msg_ok "Cleaned"
diff --git a/install/frigate-install.sh b/install/frigate-install.sh
index 52884f07..cbd8357c 100644
--- a/install/frigate-install.sh
+++ b/install/frigate-install.sh
@@ -14,120 +14,55 @@ network_check
update_os
msg_info "Installing Dependencies (Patience)"
-$STD apt-get install -y \
- git automake build-essential xz-utils libtool ccache pkg-config \
- libgtk-3-dev libavcodec-dev libavformat-dev libswscale-dev libv4l-dev libxvidcore-dev libx264-dev \
- libjpeg-dev libpng-dev libtiff-dev gfortran openexr libatlas-base-dev libssl-dev libtbb-dev \
- libopenexr-dev libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev gcc gfortran \
- libopenblas-dev liblapack-dev libusb-1.0-0-dev jq moreutils tclsh libhdf5-dev libopenexr-dev nginx
+$STD apt-get install -y {git,ca-certificates,automake,build-essential,xz-utils,libtool,ccache,pkg-config,libgtk-3-dev,libavcodec-dev,libavformat-dev,libswscale-dev,libv4l-dev,libxvidcore-dev,libx264-dev,libjpeg-dev,libpng-dev,libtiff-dev,gfortran,openexr,libatlas-base-dev,libssl-dev,libtbb-dev,libdc1394-dev,libopenexr-dev,libgstreamer-plugins-base1.0-dev,libgstreamer1.0-dev,gcc,gfortran,libopenblas-dev,liblapack-dev,libusb-1.0-0-dev,jq,moreutils}
msg_ok "Installed Dependencies"
msg_info "Setup Python3"
-$STD apt-get install -y \
- python3 python3-dev python3-setuptools python3-distutils python3-pip python3-venv
-$STD pip install --upgrade pip --break-system-packages
+$STD apt-get install -y {python3,python3-dev,python3-setuptools,python3-distutils,python3-pip,python3-venv}
+$STD pip install --upgrade pip
msg_ok "Setup Python3"
-msg_info "Setup NGINX"
-apt-get update
-apt-get -y build-dep nginx
-apt-get -y install wget build-essential ccache patch ca-certificates
-update-ca-certificates -f
-export PATH="/usr/lib/ccache:$PATH"
-
-cd /tmp
-wget -nv https://nginx.org/download/nginx-1.29.0.tar.gz
-tar -xf nginx-1.29.0.tar.gz
-cd nginx-1.29.0
-
-mkdir /tmp/nginx-vod
-wget -nv https://github.com/kaltura/nginx-vod-module/archive/refs/tags/1.31.tar.gz
-tar -xf 1.31.tar.gz -C /tmp/nginx-vod --strip-components=1
-sed -i 's/MAX_CLIPS (128)/MAX_CLIPS (1080)/g' /tmp/nginx-vod/vod/media_set.h
-patch -d /tmp/nginx-vod -p1 <<'EOF'
---- a/vod/avc_hevc_parser.c
-+++ b/vod/avc_hevc_parser.c
-@@ -3,6 +3,9 @@
- bool_t
- avc_hevc_parser_rbsp_trailing_bits(bit_reader_state_t* reader)
- {
-+ // https://github.com/blakeblackshear/frigate/issues/4572
-+ return TRUE;
-+
- uint32_t one_bit;
-
- if (reader->stream.eof_reached)
-EOF
-# secure-token module
-mkdir /tmp/nginx-secure-token
-wget -nv https://github.com/kaltura/nginx-secure-token-module/archive/refs/tags/1.5.tar.gz
-tar -xf 1.5.tar.gz -C /tmp/nginx-secure-token --strip-components=1
-
-# ngx-devel-kit
-mkdir /tmp/ngx-devel-kit
-wget -nv https://github.com/vision5/ngx_devel_kit/archive/refs/tags/v0.3.3.tar.gz
-tar -xf v0.3.3.tar.gz -C /tmp/ngx-devel-kit --strip-components=1
-
-# set-misc module
-mkdir /tmp/nginx-set-misc
-wget -nv https://github.com/openresty/set-misc-nginx-module/archive/refs/tags/v0.33.tar.gz
-tar -xf v0.33.tar.gz -C /tmp/nginx-set-misc --strip-components=1
-
-# configure & build
-cd /tmp/nginx-1.29.0
-./configure --prefix=/usr/local/nginx \
- --with-file-aio \
- --with-http_sub_module \
- --with-http_ssl_module \
- --with-http_auth_request_module \
- --with-http_realip_module \
- --with-threads \
- --add-module=/tmp/ngx-devel-kit \
- --add-module=/tmp/nginx-set-misc \
- --add-module=/tmp/nginx-vod \
- --add-module=/tmp/nginx-secure-token \
- --with-cc-opt="-O3 -Wno-error=implicit-fallthrough"
-
-make CC="ccache gcc" -j"$(nproc)"
-make install
-ln -sf /usr/local/nginx/sbin/nginx /usr/local/bin/nginx
-
-# cleanup
-rm -rf /tmp/nginx-1.29.0* /tmp/nginx-vod /tmp/nginx-secure-token /tmp/ngx-devel-kit /tmp/nginx-set-misc
-
-msg_ok "NGINX with Custom Modules Built"
-
NODE_VERSION="22" NODE_MODULE="yarn" setup_nodejs
fetch_and_deploy_gh_release "go2rtc" "AlexxIT/go2rtc" "singlefile" "latest" "/usr/local/go2rtc/bin" "go2rtc_linux_amd64"
-fetch_and_deploy_gh_release "frigate" "blakeblackshear/frigate" "tarball" "v0.16.0-beta4" "/opt/frigate"
+fetch_and_deploy_gh_release "frigate" "blakeblackshear/frigate" "tarball" "latest" "/opt/frigate"
fetch_and_deploy_gh_release "libusb" "libusb/libusb" "tarball" "v1.0.29" "/opt/frigate/libusb"
-msg_info "Setting Up Hardware Acceleration"
-$STD apt-get -y install {va-driver-all,ocl-icd-libopencl1,intel-opencl-icd,vainfo,intel-gpu-tools}
-if [[ "$CTTYPE" == "0" ]]; then
- chgrp video /dev/dri
- chmod 755 /dev/dri
- chmod 660 /dev/dri/*
-fi
-msg_ok "Set Up Hardware Acceleration"
+# msg_info "Setting Up Hardware Acceleration"
+# $STD apt-get -y install {va-driver-all,ocl-icd-libopencl1,intel-opencl-icd,vainfo,intel-gpu-tools}
+# if [[ "$CTTYPE" == "0" ]]; then
+# chgrp video /dev/dri
+# chmod 755 /dev/dri
+# chmod 660 /dev/dri/*
+# fi
+# msg_ok "Set Up Hardware Acceleration"
-msg_info "Setting up Python venv"
+msg_info "Setting up Python"
cd /opt/frigate
-python3 -m venv venv
-source venv/bin/activate
-$STD pip install --upgrade pip wheel --break-system-packages
-$STD pip install -r docker/main/requirements.txt --break-system-packages
-$STD pip install -r docker/main/requirements-wheels.txt --break-system-packages
-$STD pip install -r docker/main/requirements-ov.txt --break-system-packages
+mkdir -p /opt/frigate/models
+$STD pip3 wheel --wheel-dir=/wheels -r /opt/frigate/docker/main/requirements-wheels.txt
+cp -a /opt/frigate/docker/main/rootfs/. /
+export TARGETARCH="amd64"
+echo 'libc6 libraries/restart-without-asking boolean true' | debconf-set-selections
+$STD apt update
+$STD ln -svf /usr/lib/btbn-ffmpeg/bin/ffmpeg /usr/local/bin/ffmpeg
+$STD ln -svf /usr/lib/btbn-ffmpeg/bin/ffprobe /usr/local/bin/ffprobe
+$STD pip3 install -U /wheels/*.whl
+ldconfig
+$STD pip3 install -r /opt/frigate/docker/main/requirements-dev.txt
+$STD /opt/frigate/.devcontainer/initialize.sh
+$STD make version
msg_ok "Python venv ready"
msg_info "Building Web UI"
cd /opt/frigate/web
-$STD npm install
+$STD npm ci
$STD npm run build
+cp -r /opt/frigate/web/dist/* /opt/frigate/web/
+cp -r /opt/frigate/config/. /config
msg_ok "Web UI built"
msg_info "Writing default config"
+sed -i '/^s6-svc -O \.$/s/^/#/' /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run
mkdir -p /opt/frigate/config
cat </opt/frigate/config/config.yml
mqtt:
@@ -147,11 +82,54 @@ cameras:
fps: 5
EOF
mkdir -p /config
-ln -sf /opt/frigate/config/config.yml /config/config.yml
+ln -sf /config/config.yml /opt/frigate/config/config.yml
+if [[ "$CTTYPE" == "0" ]]; then
+ sed -i -e 's/^kvm:x:104:$/render:x:104:root,frigate/' -e 's/^render:x:105:root$/kvm:x:105:/' /etc/group
+else
+ sed -i -e 's/^kvm:x:104:$/render:x:104:frigate/' -e 's/^render:x:105:$/kvm:x:105:/' /etc/group
+fi
+echo "tmpfs /tmp/cache tmpfs defaults 0 0" >>/etc/fstab
mkdir -p /media/frigate
wget -qO /media/frigate/person-bicycle-car-detection.mp4 https://github.com/intel-iot-devkit/sample-videos/raw/master/person-bicycle-car-detection.mp4
+cat <<'EOF' >/opt/frigate/frigate/version.py
+VERSION = "0.16.0"
+EOF
msg_ok "Config ready"
+if grep -q -o -m1 -E 'avx[^ ]*' /proc/cpuinfo; then
+ msg_ok "AVX Support Detected"
+ msg_info "Installing Openvino Object Detection Model (Resilience)"
+ $STD pip install -r /opt/frigate/docker/main/requirements-ov.txt
+ cd /opt/frigate/models
+ export ENABLE_ANALYTICS=NO
+ $STD /usr/local/bin/omz_downloader --name ssdlite_mobilenet_v2 --num_attempts 2
+ $STD /usr/local/bin/omz_converter --name ssdlite_mobilenet_v2 --precision FP16 --mo /usr/local/bin/mo
+ cd /
+ cp -r /opt/frigate/models/public/ssdlite_mobilenet_v2 openvino-model
+ curl -fsSL "https://github.com/openvinotoolkit/open_model_zoo/raw/master/data/dataset_classes/coco_91cl_bkgr.txt" -o "openvino-model/coco_91cl_bkgr.txt"
+ sed -i 's/truck/car/g' openvino-model/coco_91cl_bkgr.txt
+ cat <>/config/config.yml
+detectors:
+ ov:
+ type: openvino
+ device: CPU
+ model:
+ path: /openvino-model/FP16/ssdlite_mobilenet_v2.xml
+model:
+ width: 300
+ height: 300
+ input_tensor: nhwc
+ input_pixel_format: bgr
+ labelmap_path: /openvino-model/coco_91cl_bkgr.txt
+EOF
+ msg_ok "Installed Openvino Object Detection Model"
+else
+ cat <>/config/config.yml
+model:
+ path: /cpu_model.tflite
+EOF
+fi
+
msg_info "Building and Installing libUSB without udev"
wget -qO /tmp/libusb.zip https://github.com/libusb/libusb/archive/v1.0.29.zip
unzip -q /tmp/libusb.zip -d /tmp/
@@ -164,24 +142,35 @@ ldconfig
rm -rf /tmp/libusb.zip /tmp/libusb-1.0.29
msg_ok "Installed libUSB without udev"
-# Coral Object Detection Models
-msg_info "Installing Coral Object Detection Models"
+msg_info "Installing Coral Object Detection Model (Patience)"
cd /opt/frigate
export CCACHE_DIR=/root/.ccache
export CCACHE_MAXSIZE=2G
-
-# edgetpu / cpu Modelle
-wget -qO edgetpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite
-wget -qO cpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess.tflite
+curl -fsSL "https://github.com/libusb/libusb/archive/v1.0.26.zip" -o "v1.0.26.zip"
+$STD unzip v1.0.26.zip
+rm v1.0.26.zip
+cd libusb-1.0.26
+$STD ./bootstrap.sh
+$STD ./configure --disable-udev --enable-shared
+$STD make -j $(nproc --all)
+cd /opt/frigate/libusb-1.0.26/libusb
+mkdir -p /usr/local/lib
+$STD /bin/bash ../libtool --mode=install /usr/bin/install -c libusb-1.0.la '/usr/local/lib'
+mkdir -p /usr/local/include/libusb-1.0
+$STD /usr/bin/install -c -m 644 libusb.h '/usr/local/include/libusb-1.0'
+ldconfig
+cd /
+curl -fsSL "https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite" -o "edgetpu_model.tflite"
+curl -fsSL "https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess.tflite" -o "cpu_model.tflite"
cp /opt/frigate/labelmap.txt /labelmap.txt
-
-# Audio-Modelle
-wget -qO yamnet-tflite-classification-tflite-v1.tar.gz https://www.kaggle.com/api/v1/models/google/yamnet/tfLite/classification-tflite/1/download
+curl -fsSL "https://www.kaggle.com/api/v1/models/google/yamnet/tfLite/classification-tflite/1/download" -o "yamnet-tflite-classification-tflite-v1.tar.gz"
tar xzf yamnet-tflite-classification-tflite-v1.tar.gz
rm -rf yamnet-tflite-classification-tflite-v1.tar.gz
mv 1.tflite cpu_audio_model.tflite
cp /opt/frigate/audio-labelmap.txt /audio-labelmap.txt
-msg_ok "Installed Coral Object Detection Models"
+mkdir -p /media/frigate
+curl -fsSL "https://github.com/intel-iot-devkit/sample-videos/raw/master/person-bicycle-car-detection.mp4" -o "/media/frigate/person-bicycle-car-detection.mp4"
+msg_ok "Installed Coral Object Detection Model"
# ------------------------------------------------------------
# Tempio installieren
@@ -191,281 +180,107 @@ export TARGETARCH="amd64"
export DEBIAN_FRONTEND=noninteractive
echo "libedgetpu1-max libedgetpu/accepted-eula select true" | debconf-set-selections
echo "libedgetpu1-max libedgetpu/install-confirm-max select true" | debconf-set-selections
-/opt/frigate/docker/main/install_tempio.sh
+$STD /opt/frigate/docker/main/install_tempio.sh
chmod +x /usr/local/tempio/bin/tempio
ln -sf /usr/local/tempio/bin/tempio /usr/local/bin/tempio
msg_ok "Installed Tempio"
-# ------------------------------------------------------------
-# systemd Units
-msg_info "Creating systemd service for go2rtc"
-cat </etc/systemd/system/go2rtc.service
+# msg_info "Copying model files"
+# cp /opt/frigate/cpu_model.tflite /
+# cp /opt/frigate/edgetpu_model.tflite /
+# cp /opt/frigate/audio-labelmap.txt /
+# cp /opt/frigate/labelmap.txt /
+# msg_ok "Copied model files"
+
+msg_info "Building Nginx with Custom Modules"
+sed -i 's/if \[\[ "$VERSION_ID" == "12" \]\]; then/if [[ -f \/etc\/apt\/sources.list.d\/debian.sources ]]; then/' /opt/frigate/docker/main/build_nginx.sh
+$STD /opt/frigate/docker/main/build_nginx.sh
+sed -e '/s6-notifyoncheck/ s/^#*/#/' -i /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run
+ln -sf /usr/local/nginx/sbin/nginx /usr/local/bin/nginx
+msg_ok "Built Nginx"
+
+msg_info "Creating Services"
+cat </etc/systemd/system/create_directories.service
[Unit]
-Description=go2rtc
-After=network.target
+Description=Create necessary directories for logs
[Service]
-ExecStart=/usr/local/bin/go2rtc
-Restart=always
-RestartSec=2
-User=root
-StandardOutput=journal
-StandardError=journal
+Type=oneshot
+ExecStart=/bin/bash -c '/bin/mkdir -p /dev/shm/logs/{frigate,go2rtc,nginx} && /bin/touch /dev/shm/logs/{frigate/current,go2rtc/current,nginx/current} && /bin/chmod -R 777 /dev/shm/logs'
[Install]
WantedBy=multi-user.target
EOF
-systemctl daemon-reload
-systemctl enable -q --now go2rtc
-msg_ok "go2rtc service enabled"
+systemctl enable -q --now create_directories
+sleep 3
+cat </etc/systemd/system/go2rtc.service
+[Unit]
+Description=go2rtc service
+After=network.target
+After=create_directories.service
+StartLimitIntervalSec=0
-msg_info "Creating systemd service for Frigate"
+[Service]
+Type=simple
+Restart=always
+RestartSec=1
+User=root
+Environment=DEFAULT_FFMPEG_VERSION=7.0
+Environment=INCLUDED_FFMPEG_VERSIONS=5.0
+ExecStartPre=+rm /dev/shm/logs/go2rtc/current
+ExecStart=/bin/bash -c "bash /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run 2> >(/usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S ' >&2) | /usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S '"
+StandardOutput=file:/dev/shm/logs/go2rtc/current
+StandardError=file:/dev/shm/logs/go2rtc/current
+
+[Install]
+WantedBy=multi-user.target
+EOF
+systemctl enable -q --now go2rtc
+sleep 3
cat </etc/systemd/system/frigate.service
[Unit]
Description=Frigate service
-After=go2rtc.service network.target
+After=go2rtc.service
+After=create_directories.service
+StartLimitIntervalSec=0
[Service]
-WorkingDirectory=/opt/frigate
-Environment="PATH=/opt/frigate/venv/bin"
-ExecStart=/opt/frigate/venv/bin/python3 -u -m frigate
+Type=simple
Restart=always
-RestartSec=5
+RestartSec=1
User=root
-StandardOutput=journal
-StandardError=journal
+# Environment=PLUS_API_KEY=
+Environment=DEFAULT_FFMPEG_VERSION=7.0
+Environment=INCLUDED_FFMPEG_VERSIONS=5.0
+ExecStartPre=+rm /dev/shm/logs/frigate/current
+ExecStart=/bin/bash -c "bash /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run 2> >(/usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S ' >&2) | /usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S '"
+StandardOutput=file:/dev/shm/logs/frigate/current
+StandardError=file:/dev/shm/logs/frigate/current
[Install]
WantedBy=multi-user.target
EOF
-systemctl daemon-reload
systemctl enable -q --now frigate
-msg_ok "Frigate service enabled"
+sleep 3
+cat </etc/systemd/system/nginx.service
+[Unit]
+Description=Nginx service
+After=frigate.service
+After=create_directories.service
+StartLimitIntervalSec=0
-# msg_info "Setup Frigate"
-# ln -sf /usr/local/go2rtc/bin/go2rtc /usr/local/bin/go2rtc
-# cd /opt/frigate
-# $STD pip install -r /opt/frigate/docker/main/requirements.txt --break-system-packages
-# $STD pip install -r /opt/frigate/docker/main/requirements-ov.txt --break-system-packages
-# $STD pip3 wheel --wheel-dir=/wheels -r /opt/frigate/docker/main/requirements-wheels.txt
-# pip3 install -U /wheels/*.whl
-# cp -a /opt/frigate/docker/main/rootfs/. /
-# export TARGETARCH="amd64"
-# export DEBIAN_FRONTEND=noninteractive
-# echo "libedgetpu1-max libedgetpu/accepted-eula select true" | debconf-set-selections
-# echo "libedgetpu1-max libedgetpu/install-confirm-max select true" | debconf-set-selections
+[Service]
+Type=simple
+Restart=always
+RestartSec=1
+User=root
+ExecStartPre=+rm /dev/shm/logs/nginx/current
+ExecStart=/bin/bash -c "bash /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run 2> >(/usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S ' >&2) | /usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S '"
+StandardOutput=file:/dev/shm/logs/nginx/current
+StandardError=file:/dev/shm/logs/nginx/current
-# msg_info "Ensure /etc/apt/sources.list.d/debian.sources exists with deb-src"
-# mkdir -p /etc/apt/sources.list.d
-# cat >/etc/apt/sources.list.d/debian.sources <<'EOF'
-# Types: deb deb-src
-# URIs: http://deb.debian.org/debian
-# Suites: bookworm
-# Components: main
-# Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg
-# EOF
-# msg_ok "Stub /etc/apt/sources.list.d/debian.sources created"
-
-# msg_info "Updating APT cache"
-# $STD apt-get update
-# msg_ok "APT cache updated"
-
-# msg_info "Building Nginx with Custom Modules"
-# $STD bash /opt/frigate/docker/main/build_nginx.sh
-# sed -e '/s6-notifyoncheck/ s/^#*/#/' -i /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run
-# ln -sf /usr/local/nginx/sbin/nginx /usr/local/bin/nginx
-# msg_ok "Built Nginx"
-
-# msg_info "Cleanup stub debian.sources"
-# rm -f /etc/apt/sources.list.d/debian.sources
-# $STD apt-get update
-# msg_ok "Removed stub and updated APT cache"
-
-# $STD /opt/frigate/docker/main/install_deps.sh
-# $STD apt update
-# $STD ln -svf /usr/lib/btbn-ffmpeg/bin/ffmpeg /usr/local/bin/ffmpeg
-# $STD ln -svf /usr/lib/btbn-ffmpeg/bin/ffprobe /usr/local/bin/ffprobe
-# $STD pip3 install -U /wheels/*.whl
-# ldconfig
-# $STD pip3 install -r /opt/frigate/docker/main/requirements-dev.txt
-# $STD /opt/frigate/.devcontainer/initialize.sh
-# $STD make version
-# cd /opt/frigate/web
-# $STD npm install
-# $STD npm run build
-# cp -r /opt/frigate/web/dist/* /opt/frigate/web/
-# cp -r /opt/frigate/config/. /config
-# sed -i '/^s6-svc -O \.$/s/^/#/' /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run
-# cat </config/config.yml
-# mqtt:
-# enabled: false
-# cameras:
-# test:
-# ffmpeg:
-# #hwaccel_args: preset-vaapi
-# inputs:
-# - path: /media/frigate/person-bicycle-car-detection.mp4
-# input_args: -re -stream_loop -1 -fflags +genpts
-# roles:
-# - detect
-# - rtmp
-# detect:
-# height: 1080
-# width: 1920
-# fps: 5
-# EOF
-# ln -sf /config/config.yml /opt/frigate/config/config.yml
-# if [[ "$CTTYPE" == "0" ]]; then
-# sed -i -e 's/^kvm:x:104:$/render:x:104:root,frigate/' -e 's/^render:x:105:root$/kvm:x:105:/' /etc/group
-# else
-# sed -i -e 's/^kvm:x:104:$/render:x:104:frigate/' -e 's/^render:x:105:$/kvm:x:105:/' /etc/group
-# fi
-# echo "tmpfs /tmp/cache tmpfs defaults 0 0" >>/etc/fstab
-# msg_ok "Installed Frigate"
-
-# # read -p "Semantic Search requires a dedicated GPU and at least 16GB RAM. Would you like to install it? (y/n): " semantic_choice
-# # if [[ "$semantic_choice" == "y" ]]; then
-# # msg_info "Configuring Semantic Search & AI Models"
-# # mkdir -p /opt/frigate/models/semantic_search
-# # curl -fsSL -o /opt/frigate/models/semantic_search/clip_model.pt https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/pytorch_model.bin
-# # msg_ok "Semantic Search Models Installed"
-# # else
-# # msg_ok "Skipped Semantic Search Setup"
-# # fi
-
-# msg_info "Building and Installing libUSB without udev"
-# wget -qO /tmp/libusb.zip https://github.com/libusb/libusb/archive/v1.0.29.zip
-# unzip -q /tmp/libusb.zip -d /tmp/
-# cd /tmp/libusb-1.0.29
-# ./bootstrap.sh
-# ./configure --disable-udev --enable-shared
-# make -j$(nproc --all)
-# make install
-# ldconfig
-# rm -rf /tmp/libusb.zip /tmp/libusb-1.0.29
-# msg_ok "Installed libUSB without udev"
-
-# msg_info "Installing Coral Object Detection Model (Patience)"
-# cd /opt/frigate
-# export CCACHE_DIR=/root/.ccache
-# export CCACHE_MAXSIZE=2G
-# cd libusb
-# $STD ./bootstrap.sh
-# $STD ./configure --disable-udev --enable-shared
-# $STD make -j $(nproc --all)
-# cd /opt/frigate/libusb/libusb
-# mkdir -p /usr/local/lib
-# $STD /bin/bash ../libtool --mode=install /usr/bin/install -c libusb-1.0.la '/usr/local/lib'
-# mkdir -p /usr/local/include/libusb-1.0
-# $STD /usr/bin/install -c -m 644 libusb.h '/usr/local/include/libusb-1.0'
-# ldconfig
-# cd /
-# wget -qO edgetpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite
-# wget -qO cpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess.tflite
-# cp /opt/frigate/labelmap.txt /labelmap.txt
-# wget -qO yamnet-tflite-classification-tflite-v1.tar.gz https://www.kaggle.com/api/v1/models/google/yamnet/tfLite/classification-tflite/1/download
-# tar xzf yamnet-tflite-classification-tflite-v1.tar.gz
-# rm -rf yamnet-tflite-classification-tflite-v1.tar.gz
-# mv 1.tflite cpu_audio_model.tflite
-# cp /opt/frigate/audio-labelmap.txt /audio-labelmap.txt
-# mkdir -p /media/frigate
-# wget -qO /media/frigate/person-bicycle-car-detection.mp4 https://github.com/intel-iot-devkit/sample-videos/raw/master/person-bicycle-car-detection.mp4
-# msg_ok "Installed Coral Object Detection Model"
-
-# msg_info "Installing Tempio"
-# sed -i 's|/rootfs/usr/local|/usr/local|g' /opt/frigate/docker/main/install_tempio.sh
-# TARGETARCH="amd64"
-# $STD /opt/frigate/docker/main/install_tempio.sh
-# chmod +x /usr/local/tempio/bin/tempio
-# ln -sf /usr/local/tempio/bin/tempio /usr/local/bin/tempio
-# msg_ok "Installed Tempio"
-
-# msg_info "Creating Services"
-# cat </etc/systemd/system/create_directories.service
-# [Unit]
-# Description=Create necessary directories for logs
-
-# [Service]
-# Type=oneshot
-# ExecStart=/bin/bash -c '/bin/mkdir -p /dev/shm/logs/{frigate,go2rtc,nginx} && /bin/touch /dev/shm/logs/{frigate/current,go2rtc/current,nginx/current} && /bin/chmod -R 777 /dev/shm/logs'
-
-# [Install]
-# WantedBy=multi-user.target
-# EOF
-# systemctl enable -q --now create_directories
-# sleep 3
-# cat </etc/systemd/system/go2rtc.service
-# [Unit]
-# Description=go2rtc service
-# After=network.target
-# After=create_directories.service
-# StartLimitIntervalSec=0
-
-# [Service]
-# Type=simple
-# Restart=always
-# RestartSec=1
-# User=root
-# ExecStartPre=+rm /dev/shm/logs/go2rtc/current
-# ExecStart=/bin/bash -c "bash /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run 2> >(/usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S ' >&2) | /usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S '"
-# StandardOutput=file:/dev/shm/logs/go2rtc/current
-# StandardError=file:/dev/shm/logs/go2rtc/current
-
-# [Install]
-# WantedBy=multi-user.target
-# EOF
-# systemctl enable -q --now go2rtc
-# sleep 3
-# cat </etc/systemd/system/frigate.service
-# [Unit]
-# Description=Frigate service
-# After=go2rtc.service
-# After=create_directories.service
-# StartLimitIntervalSec=0
-
-# [Service]
-# Type=simple
-# Restart=always
-# RestartSec=1
-# User=root
-# # Environment=PLUS_API_KEY=
-# ExecStartPre=+rm /dev/shm/logs/frigate/current
-# ExecStart=/bin/bash -c "bash /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run 2> >(/usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S ' >&2) | /usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S '"
-# StandardOutput=file:/dev/shm/logs/frigate/current
-# StandardError=file:/dev/shm/logs/frigate/current
-
-# [Install]
-# WantedBy=multi-user.target
-# EOF
-# systemctl enable -q --now frigate
-# sleep 3
-# cat </etc/systemd/system/nginx.service
-# [Unit]
-# Description=Nginx service
-# After=frigate.service
-# After=create_directories.service
-# StartLimitIntervalSec=0
-
-# [Service]
-# Type=simple
-# Restart=always
-# RestartSec=1
-# User=root
-# ExecStartPre=+rm /dev/shm/logs/nginx/current
-# ExecStart=/bin/bash -c "bash /opt/frigate/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run 2> >(/usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S ' >&2) | /usr/bin/ts '%%Y-%%m-%%d %%H:%%M:%%.S '"
-# StandardOutput=file:/dev/shm/logs/nginx/current
-# StandardError=file:/dev/shm/logs/nginx/current
-
-# [Install]
-# WantedBy=multi-user.target
-# EOF
-# systemctl enable -q --now nginx
-# msg_ok "Configured Services"
-
-motd_ssh
-customize
-
-msg_info "Cleaning up"
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
+[Install]
+WantedBy=multi-user.target
+EOF
+systemctl enable -q --now nginx
+msg_ok "Configured Services"
diff --git a/install/healthchecks-install.sh b/install/healthchecks-install.sh
deleted file mode 100644
index 5ce285fe..00000000
--- a/install/healthchecks-install.sh
+++ /dev/null
@@ -1,132 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: MickLesk (Canbiz)
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://github.com/getmaxun/maxun
-
-source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
-color
-verb_ip6
-catch_errors
-setting_up_container
-network_check
-update_os
-
-msg_info "Installing Dependencies"
-$STD apt-get install -y \
- gcc \
- libpq-dev \
- libcurl4-openssl-dev \
- libssl-dev
-msg_ok "Installed Dependencies"
-
-msg_info "Setup Python3"
-$STD apt-get install -y \
- python3 python3-dev python3-pip
-$STD pip install --upgrade pip
-msg_ok "Setup Python3"
-
-setup_uv
-PG_VERSION=16 setup_postgresql
-
-msg_info "Setup Database"
-DB_NAME=healthchecks_db
-DB_USER=hc_user
-DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | cut -c1-13)
-SECRET_KEY="$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | cut -c1-32)"
-
-$STD sudo -u postgres psql -c "CREATE ROLE $DB_USER WITH LOGIN PASSWORD '$DB_PASS';"
-$STD sudo -u postgres psql -c "CREATE DATABASE $DB_NAME WITH OWNER $DB_USER ENCODING 'UTF8' TEMPLATE template0;"
-$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET client_encoding TO 'utf8';"
-$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET default_transaction_isolation TO 'read committed';"
-$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET timezone TO 'UTC'"
-{
- echo "healthchecks-Credentials"
- echo "healthchecks Database User: $DB_USER"
- echo "healthchecks Database Password: $DB_PASS"
- echo "healthchecks Database Name: $DB_NAME"
-} >>~/healthchecks.creds
-msg_ok "Set up Database"
-
-msg_info "Setup healthchecks"
-fetch_and_deploy_gh_release "healthchecks" "healthchecks/healthchecks" "source"
-cd /opt/healthchecks
-mkdir -p /opt/healthchecks/static-collected/
-$STD uv venv .venv
-$STD source .venv/bin/activate
-$STD uv pip install wheel
-$STD uv pip install gunicorn
-$STD uv pip install -r requirements.txt
-LOCAL_IP=$(hostname -I | awk '{print $1}')
-cat </opt/healthchecks/.env
-ALLOWED_HOSTS=localhost,127.0.0.1,${LOCAL_IP},healthchecks
-DB=postgres
-DB_HOST=localhost
-DB_PORT=5432
-DB_NAME=${DB_NAME}
-DB_USER=${DB_USER}
-DB_PASSWORD=${DB_PASS}
-DB_CONN_MAX_AGE=0
-DB_SSLMODE=prefer
-DB_TARGET_SESSION_ATTRS=read-write
-DATABASE_URL=postgres://${DB_USER}:${DB_PASS}@localhost:5432/${DB_NAME}?sslmode=prefer
-
-DEFAULT_FROM_EMAIL=healthchecks@example.org
-EMAIL_HOST=localhost
-EMAIL_HOST_PASSWORD=
-EMAIL_HOST_USER=
-EMAIL_PORT=587
-EMAIL_USE_TLS=True
-EMAIL_USE_VERIFICATION=True
-
-# Django & Healthchecks Konfiguration
-SECRET_KEY=${SECRET_KEY}
-DEBUG=True
-
-SITE_ROOT=http://${LOCAL_IP}:8000
-SITE_NAME=MyChecks
-STATIC_ROOT=/opt/healthchecks/static-collected
-
-EOF
-
-$STD .venv/bin/python3 manage.py makemigrations
-$STD .venv/bin/python3 manage.py migrate --noinput
-$STD .venv/bin/python3 manage.py collectstatic --noinput
-
-ADMIN_EMAIL="admin@helper-scripts.local"
-ADMIN_PASSWORD="$DB_PASS"
-cat </etc/systemd/system/healthchecks.service
-[Unit]
-Description=Healthchecks Service
-After=network.target postgresql.service
-
-[Service]
-WorkingDirectory=/opt/healthchecks/
-EnvironmentFile=/opt/healthchecks/.env
-ExecStart=/opt/healthchecks/.venv/bin/gunicorn hc.wsgi:application --bind 127.0.0.1:8000
-
-Restart=always
-
-[Install]
-WantedBy=multi-user.target
-EOF
-systemctl enable -q --now healthchecks
-msg_ok "Created Service"
-
-motd_ssh
-customize
-
-msg_info "Cleaning up"
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
diff --git a/install/jeedom-install.sh b/install/jeedom-install.sh
deleted file mode 100644
index d868e84b..00000000
--- a/install/jeedom-install.sh
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: Mips2648
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://jeedom.com/
-
-source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
-color
-verb_ip6
-catch_errors
-setting_up_container
-network_check
-update_os
-
-msg_info "Installing dependencies"
-$STD apt-get install -y \
- lsb-release \
- git
-msg_ok "Dependencies installed"
-
-DEFAULT_BRANCH="master"
-REPO_URL="https://github.com/jeedom/core.git"
-
-echo
-while true; do
- read -rp "${TAB3}Enter branch to use (master, beta, alpha...) (Default: ${DEFAULT_BRANCH}): " BRANCH
- BRANCH="${BRANCH:-$DEFAULT_BRANCH}"
-
- if git ls-remote --heads "$REPO_URL" "refs/heads/$BRANCH" | grep -q .; then
- break
- else
- msg_error "Branch '$BRANCH' does not exist on remote. Please try again."
- fi
-done
-
-msg_info "Downloading Jeedom installation script"
-cd /tmp
-wget -q https://raw.githubusercontent.com/jeedom/core/"${BRANCH}"/install/install.sh
-chmod +x install.sh
-msg_ok "Installation script downloaded"
-
-msg_info "Install Jeedom main dependencies, please wait"
-$STD ./install.sh -v "$BRANCH" -s 2
-msg_ok "Installed Jeedom main dependencies"
-
-msg_info "Install Database"
-$STD ./install.sh -v "$BRANCH" -s 3
-msg_ok "Database installed"
-
-msg_info "Install Apache"
-$STD ./install.sh -v "$BRANCH" -s 4
-msg_ok "Apache installed"
-
-msg_info "Install PHP and dependencies"
-$STD ./install.sh -v "$BRANCH" -s 5
-msg_ok "PHP installed"
-
-msg_info "Download Jeedom core"
-$STD ./install.sh -v "$BRANCH" -s 6
-msg_ok "Download done"
-
-msg_info "Database customisation"
-$STD ./install.sh -v "$BRANCH" -s 7
-msg_ok "Database customisation done"
-
-msg_info "Jeedom customisation"
-$STD ./install.sh -v "$BRANCH" -s 8
-msg_ok "Jeedom customisation done"
-
-msg_info "Configuring Jeedom"
-$STD ./install.sh -v "$BRANCH" -s 9
-msg_ok "Jeedom configured"
-
-msg_info "Installing Jeedom"
-$STD ./install.sh -v "$BRANCH" -s 10
-msg_ok "Jeedom installed"
-
-msg_info "Post installation"
-$STD ./install.sh -v "$BRANCH" -s 11
-msg_ok "Post installation done"
-
-msg_info "Check installation"
-$STD ./install.sh -v "$BRANCH" -s 12
-msg_ok "Installation checked, everything is successfuly installed. A reboot is recommended."
-
-motd_ssh
-customize
-
-msg_info "Cleaning up"
-rm -rf /tmp/install.sh
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
diff --git a/install/leantime-install.sh b/install/leantime-install.sh
index ff358010..db67e925 100644
--- a/install/leantime-install.sh
+++ b/install/leantime-install.sh
@@ -13,7 +13,7 @@ setting_up_container
network_check
update_os
-PHP_VERSION=8.4 PHP_MODULE="mysql" PHP_APACHE="YES" PHP_FPM="YES" setup_php
+PHP_VERSION="8.4" PHP_MODULE="mysql" PHP_APACHE="YES" PHP_FPM="YES" setup_php
setup_mariadb
msg_info "Setting up Database"
@@ -24,20 +24,18 @@ $STD mysql -u root -e "CREATE DATABASE $DB_NAME;"
$STD mysql -u root -e "CREATE USER '$DB_USER'@'localhost' IDENTIFIED WITH mysql_native_password AS PASSWORD('$DB_PASS');"
$STD mysql -u root -e "GRANT ALL ON $DB_NAME.* TO '$DB_USER'@'localhost'; FLUSH PRIVILEGES;"
{
- echo "${APPLICATION} Credentials"
+ echo "Leantime Credentials"
echo "Database User: $DB_USER"
echo "Database Password: $DB_PASS"
echo "Database Name: $DB_NAME"
-} >>~/"$APPLICATION".creds
+} >>~/leantime.creds
msg_ok "Set up Database"
fetch_and_deploy_gh_release "leantime" "Leantime/leantime" "prebuild" "latest" "/opt/leantime" Leantime*.tar.gz
-msg_info "Setup ${APPLICATION}"
-APACHE_LOG_DIR=/var/log/apache2
+msg_info "Setup Leantime"
chown -R www-data:www-data "/opt/leantime"
chmod -R 750 "/opt/leantime"
-
cat </etc/apache2/sites-enabled/000-default.conf
ServerAdmin webmaster@localhost
@@ -55,26 +53,21 @@ cat </etc/apache2/sites-enabled/000-default.conf
Require all granted
- ErrorLog ${APACHE_LOG_DIR}/error.log
- CustomLog ${APACHE_LOG_DIR}/access.log combined
+ ErrorLog /var/log/apache2/error.log
+ CustomLog /var/log/apache2/access.log combined
EOF
-
mv "/opt/leantime/config/sample.env" "/opt/leantime/config/.env"
sed -i -e "s|^LEAN_DB_DATABASE.*|LEAN_DB_DATABASE = '$DB_NAME'|" \
-e "s|^LEAN_DB_USER.*|LEAN_DB_USER = '$DB_USER'|" \
-e "s|^LEAN_DB_PASSWORD.*|LEAN_DB_PASSWORD = '$DB_PASS'|" \
-e "s|^LEAN_SESSION_PASSWORD.*|LEAN_SESSION_PASSWORD = '$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)'|" \
"/opt/leantime/config/.env"
-
-a2enmod -q proxy_fcgi setenvif rewrite
-a2enconf -q "php${PHP_VERSION}-fpm"
-
-sed -i -e "s/^;extension.\(curl\|fileinfo\|gd\|intl\|ldap\|mbstring\|exif\|mysqli\|odbc\|openssl\|pdo_mysql\)/extension=\1/g" "/etc/php/${PHP_VERSION}/apache2/php.ini"
-
+$STD a2enmod -q proxy_fcgi setenvif rewrite
+$STD a2enconf -q "php8.4-fpm"
+sed -i -e "s/^;extension.\(curl\|fileinfo\|gd\|intl\|ldap\|mbstring\|exif\|mysqli\|odbc\|openssl\|pdo_mysql\)/extension=\1/g" "/etc/php/8.4/apache2/php.ini"
systemctl restart apache2
-
-msg_ok "Setup ${APPLICATION}"
+msg_ok "Setup leantime"
motd_ssh
customize
diff --git a/install/librenms-install.sh b/install/librenms-install.sh
index bea0d4a8..46f3c5bf 100644
--- a/install/librenms-install.sh
+++ b/install/librenms-install.sh
@@ -15,28 +15,28 @@ update_os
msg_info "Installing Dependencies"
$STD apt-get install -y \
- lsb-release \
- ca-certificates \
- acl \
- fping \
- graphviz \
- imagemagick \
- mtr-tiny \
- nginx \
- nmap \
- rrdtool \
- snmp \
- snmpd
+ lsb-release \
+ ca-certificates \
+ acl \
+ fping \
+ graphviz \
+ imagemagick \
+ mtr-tiny \
+ nginx \
+ nmap \
+ rrdtool \
+ snmp \
+ snmpd
msg_ok "Installed Dependencies"
-PHP_VERSION=8.2 PHP_FPM=YES PHP_APACHE=NO PHP_MODULE="gmp,mysql,snmp" setup_php
+PHP_VERSION="8.3" PHP_FPM="YES" PHP_MODULE="gmp,mysql,snmp" setup_php
setup_mariadb
setup_composer
-setup_uv
+PYTHON_VERSION="3.13" setup_uv
msg_info "Installing Python"
$STD apt-get install -y \
- python3-{dotenv,pymysql,redis,setuptools,systemd,pip}
+ python3-{dotenv,pymysql,redis,setuptools,systemd,pip}
msg_ok "Installed Python"
msg_info "Configuring Database"
@@ -47,16 +47,17 @@ $STD mariadb -u root -e "CREATE DATABASE $DB_NAME CHARACTER SET utf8mb4 COLLATE
$STD mariadb -u root -e "CREATE USER '$DB_USER'@'localhost' IDENTIFIED BY '$DB_PASS';"
$STD mariadb -u root -e "GRANT ALL ON $DB_NAME.* TO '$DB_USER'@'localhost'; FLUSH PRIVILEGES;"
{
- echo "LibreNMS-Credentials"
- echo "LibreNMS Database User: $DB_USER"
- echo "LibreNMS Database Password: $DB_PASS"
- echo "LibreNMS Database Name: $DB_NAME"
+ echo "LibreNMS-Credentials"
+ echo "LibreNMS Database User: $DB_USER"
+ echo "LibreNMS Database Password: $DB_PASS"
+ echo "LibreNMS Database Name: $DB_NAME"
} >>~/librenms.creds
msg_ok "Configured Database"
-msg_info "Setup Librenms"
+fetch_and_deploy_gh_release "LibreNMS" "librenms/librenms"
+
+msg_info "Configuring LibreNMS"
$STD useradd librenms -d /opt/librenms -M -r -s "$(which bash)"
-fetch_and_deploy_gh_release "librenms/librenms"
setfacl -d -m g::rwx /opt/librenms/rrd /opt/librenms/logs /opt/librenms/bootstrap/cache/ /opt/librenms/storage/
setfacl -R -m g::rwx /opt/librenms/rrd /opt/librenms/logs /opt/librenms/bootstrap/cache/ /opt/librenms/storage/
cd /opt/librenms
@@ -72,7 +73,7 @@ chown -R librenms:librenms /opt/librenms
chmod 771 /opt/librenms
setfacl -d -m g::rwx /opt/librenms/bootstrap/cache /opt/librenms/storage /opt/librenms/logs /opt/librenms/rrd
chmod -R ug=rwX /opt/librenms/bootstrap/cache /opt/librenms/storage /opt/librenms/logs /opt/librenms/rrd
-msg_ok "Setup LibreNMS"
+msg_ok "Configured LibreNMS"
msg_info "Configure MariaDB"
sed -i "/\[mysqld\]/a innodb_file_per_table=1\nlower_case_table_names=0" /etc/mysql/mariadb.conf.d/50-server.cnf
@@ -147,7 +148,6 @@ motd_ssh
customize
msg_info "Cleaning up"
-rm -f $tmp_file
$STD apt-get -y autoremove
$STD apt-get -y autoclean
msg_ok "Cleaned"
diff --git a/install/librespeed-install.sh b/install/librespeed-install.sh
deleted file mode 100644
index 8b2bc48a..00000000
--- a/install/librespeed-install.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: elvito
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://github.com/librespeed/speedtest
-
-source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
-color
-verb_ip6
-catch_errors
-setting_up_container
-network_check
-update_os
-
-msg_info "Installing Dependencies"
-$STD apt-get update
-$STD apt-get install -y \
- caddy \
- php-fpm
-msg_ok "Installed Dependencies"
-
-msg_info "Installing librespeed"
-temp_file=$(mktemp)
-RELEASE=$(curl -fsSL https://api.github.com/repos/librespeed/speedtest/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3)}')
-curl -fsSL "https://github.com/librespeed/speedtest/archive/refs/tags/${RELEASE}.zip" -o "$temp_file"
-mkdir -p /opt/librespeed
-mkdir -p /temp
-unzip -q "$temp_file" -d /temp
-cd /temp/speedtest-"${RELEASE}"
-cp -u favicon.ico index.html speedtest.js speedtest_worker.js /opt/librespeed/
-cp -ru backend results /opt/librespeed/
-
-cat </etc/caddy/Caddyfile
-:80 {
- root * /opt/librespeed
- file_server
- php_fastcgi unix//run/php/php-fpm.sock
-}
-EOF
-
-systemctl restart caddy
-echo "${RELEASE}" >/opt/"${APP}_version.txt"
-msg_ok "Installation completed"
-
-motd_ssh
-customize
-
-msg_info "Cleaning up"
-rm -rf /temp
-rm -f "$temp_file"
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
diff --git a/install/mediamanager-install.sh b/install/mediamanager-install.sh
deleted file mode 100644
index ad2cb345..00000000
--- a/install/mediamanager-install.sh
+++ /dev/null
@@ -1,122 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) 2025 Community Scripts ORG
-# Author: vhsdream
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://github.com/maxdorninger/MediaManager
-
-source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
-color
-verb_ip6
-catch_errors
-setting_up_container
-network_check
-update_os
-
-msg_info "Installing dependencies"
-$STD apt-get install -y yq
-msg_ok "Installed dependencies"
-
-NODE_VERSION="24" setup_nodejs
-setup_uv
-PG_VERSION="17" setup_postgresql
-
-msg_info "Setting up PostgreSQL"
-DB_NAME="mm_db"
-DB_USER="mm_user"
-DB_PASS="$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | cut -c1-13)"
-$STD sudo -u postgres psql -c "CREATE ROLE $DB_USER WITH LOGIN PASSWORD '$DB_PASS';"
-$STD sudo -u postgres psql -c "CREATE DATABASE $DB_NAME WITH OWNER $DB_USER TEMPLATE template0;"
-$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET client_encoding TO 'utf8';"
-{
- echo "MediaManager Credentials"
- echo "MediaManager Database User: $DB_USER"
- echo "MediaManager Database Password: $DB_PASS"
- echo "MediaManager Database Name: $DB_NAME"
-} >>~/mediamanager.creds
-msg_ok "Set up PostgreSQL"
-
-fetch_and_deploy_gh_release "MediaManager" "maxdorninger/MediaManager" "tarball" "latest" "/opt/mediamanager"
-msg_info "Configuring MediaManager"
-MM_DIR="/opt/mm"
-MEDIA_DIR="${MM_DIR}/media"
-export CONFIG_DIR="${MM_DIR}/config"
-export FRONTEND_FILES_DIR="${MM_DIR}/web/build"
-export BASE_PATH=""
-export PUBLIC_VERSION=""
-export PUBLIC_API_URL="${BASE_PATH}/api/v1"
-export BASE_PATH="${BASE_PATH}/web"
-cd /opt/mediamanager/web
-$STD npm ci
-$STD npm run build
-mkdir -p {"$MM_DIR"/web,"$MEDIA_DIR","$CONFIG_DIR"}
-cp -r build "$FRONTEND_FILES_DIR"
-
-export BASE_PATH=""
-export VIRTUAL_ENV="${MM_DIR}/venv"
-cd /opt/mediamanager
-cp -r {media_manager,alembic*} "$MM_DIR"
-$STD /usr/local/bin/uv venv "$VIRTUAL_ENV"
-$STD /usr/local/bin/uv sync --locked --active
-msg_ok "Configured MediaManager"
-
-read -r -p "Enter the email address of your first admin user: " admin_email
-if [[ "$admin_email" ]]; then
- EMAIL="$admin_email"
-fi
-
-msg_info "Creating config and start script"
-LOCAL_IP="$(hostname -I | awk '{print $1}')"
-SECRET="$(openssl rand -hex 32)"
-sed -e "s/localhost:8/$LOCAL_IP:8/g" \
- -e "s|/data/|$MEDIA_DIR|g" \
- -e 's/"db"/"localhost"/' \
- -e "s/user = \"MediaManager\"/user = \"$DB_USER\"/" \
- -e "s/password = \"MediaManager\"/password = \"$DB_PASS\"/" \
- -e "s/dbname = \"MediaManager\"/dbname = \"$DB_NAME\"/" \
- -e "/^token_secret/s/=.*/= \"$SECRET\"/" \
- -e "s/admin@example.com/$EMAIL/" \
- -e '/^admin_emails/s/, .*/]/' \
- /opt/mediamanager/config.example.toml >"$CONFIG_DIR"/config.toml
-
-mkdir -p "$MEDIA_DIR"/{images,tv,movies,torrents}
-
-cat </opt/"$MM_DIR"/start.sh
-#!/usr/bin/env bash
-
-export CONFIG_DIR="$CONFIG_DIR"
-export FRONTEND_FILES_DIR="$FRONTEND_FILES_DIR"
-export BASE_PATH=""
-
-cd /opt/"$MM_DIR"
-source ./venv/bin/activate
-/usr/local/bin/uv run alembic upgrade head
-/usr/local/bin/uv run fastapi run ./media_manager/main.py --port 8000
-EOF
-chmod +x /opt/"$MM_DIR"/start.sh
-msg_ok "Created config and start script"
-
-msg_info "Creating service"
-cat </etc/systemd/system/mediamanager.service
-[Unit]
-Description=MediaManager Backend Service
-After=network.target
-
-[Service]
-Type=simple
-WorkingDirectory=/opt/"$MM_DIR"
-ExecStart=/usr/bin/bash start.sh
-
-[Install]
-WantedBy=multi-user.target
-EOF
-systemctl enable -q --now mediamanager
-msg_ok "Created service"
-
-motd_ssh
-customize
-
-msg_info "Cleaning up"
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
diff --git a/install/nginxproxymanager-install.sh b/install/nginxproxymanager-install.sh
deleted file mode 100644
index 9aaf4cc4..00000000
--- a/install/nginxproxymanager-install.sh
+++ /dev/null
@@ -1,171 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) 2021-2025 tteck
-# Author: tteck (tteckster)
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://nginxproxymanager.com/
-
-source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
-color
-verb_ip6
-catch_errors
-setting_up_container
-network_check
-update_os
-
-msg_info "Installing Dependencies"
-$STD apt-get update
-$STD apt-get -y install \
- ca-certificates \
- apache2-utils \
- logrotate \
- build-essential \
- jq \
- git
-msg_ok "Installed Dependencies"
-
-NODE_VERSION="16" NODE_MODULE="yarn" setup_nodejs
-PYTHON_VERSION="3.12" setup_uv
-fetch_and_deploy_gh_release "nginxproxymanager" "NginxProxyManager/nginx-proxy-manager" "tarball" "latest" "/tmp/nginxproxymanager"
-
-msg_info "Installing Python Dependencies"
-$STD apt-get install -y \
- python3 \
- python3-dev \
- python3-venv
-msg_ok "Installed Python Dependencies"
-
-msg_info "Setting up Certbot Environment"
-$STD uv venv /opt/certbot
-$STD uv pip install --python \
- certbot \
- certbot-dns-cloudflare \
- certbot-dns-multi
-msg_ok "Certbot Environment Ready"
-
-msg_info "Installing Openresty"
-VERSION="$(awk -F'=' '/^VERSION_CODENAME=/{ print $NF }' /etc/os-release)"
-curl -fsSL "https://openresty.org/package/pubkey.gpg" | gpg --dearmor -o /etc/apt/trusted.gpg.d/openresty-archive-keyring.gpg
-echo -e "deb http://openresty.org/package/debian $VERSION openresty" >/etc/apt/sources.list.d/openresty.list
-$STD apt-get update
-$STD apt-get -y install openresty
-msg_ok "Installed Openresty"
-
-msg_info "Setting up Environment"
-ln -sf /usr/bin/python3 /usr/bin/python
-ln -sf /opt/certbot/bin/certbot /usr/bin/certbot
-ln -sf /usr/local/openresty/nginx/sbin/nginx /usr/sbin/nginx
-ln -sf /usr/local/openresty/nginx/ /etc/nginx
-sed -i 's+^daemon+#daemon+g' /tmp/nginxproxymanager/docker/rootfs/etc/nginx/nginx.conf
-NGINX_CONFS=$(find "$(pwd)" -type f -name "*.conf")
-for NGINX_CONF in $NGINX_CONFS; do
- sed -i 's+include conf.d+include /etc/nginx/conf.d+g' "$NGINX_CONF"
-done
-
-mkdir -p /var/www/html /etc/nginx/logs
-cd /tmp/nginxproxymanager
-cp -r docker/rootfs/var/www/html/* /var/www/html/
-cp -r docker/rootfs/etc/nginx/* /etc/nginx/
-cp docker/rootfs/etc/letsencrypt.ini /etc/letsencrypt.ini
-cp docker/rootfs/etc/logrotate.d/nginx-proxy-manager /etc/logrotate.d/nginx-proxy-manager
-ln -sf /etc/nginx/nginx.conf /etc/nginx/conf/nginx.conf
-rm -f /etc/nginx/conf.d/dev.conf
-
-mkdir -p /tmp/nginx/body \
- /run/nginx \
- /data/nginx \
- /data/custom_ssl \
- /data/logs \
- /data/access \
- /data/nginx/default_host \
- /data/nginx/default_www \
- /data/nginx/proxy_host \
- /data/nginx/redirection_host \
- /data/nginx/stream \
- /data/nginx/dead_host \
- /data/nginx/temp \
- /var/lib/nginx/cache/public \
- /var/lib/nginx/cache/private \
- /var/cache/nginx/proxy_temp
-
-chmod -R 777 /var/cache/nginx
-chown root /tmp/nginx
-
-echo resolver "$(awk 'BEGIN{ORS=" "} $1=="nameserver" {print ($2 ~ ":")? "["$2"]": $2}' /etc/resolv.conf);" >/etc/nginx/conf.d/include/resolvers.conf
-
-if [ ! -f /data/nginx/dummycert.pem ] || [ ! -f /data/nginx/dummykey.pem ]; then
- openssl req -new -newkey rsa:2048 -days 3650 -nodes -x509 -subj "/O=Nginx Proxy Manager/OU=Dummy Certificate/CN=localhost" -keyout /data/nginx/dummykey.pem -out /data/nginx/dummycert.pem &>/dev/null
-fi
-
-mkdir -p /app/global /app/frontend/images
-cd /tmp/nginxproxymanager
-cp -r backend/* /app
-cp -r global/* /app/global
-msg_ok "Set up Environment"
-
-msg_info "Building Frontend"
-cd /tmp/nginxproxymanager/frontend
-$STD yarn install --frozen-lockfile
-$STD yarn build
-cp -r dist/* /app/frontend
-cp -r app-images/* /app/frontend/images
-msg_ok "Built Frontend"
-
-msg_info "Initializing Backend"
-rm -rf /app/config/default.json
-if [ ! -f /app/config/production.json ]; then
- cat <<'EOF' >/app/config/production.json
-{
- "database": {
- "engine": "knex-native",
- "knex": {
- "client": "sqlite3",
- "connection": {
- "filename": "/data/database.sqlite"
- }
- }
- }
-}
-EOF
-fi
-cd /app
-$STD yarn install --production
-msg_ok "Initialized Backend"
-
-msg_info "Creating Service"
-cat <<'EOF' >/lib/systemd/system/npm.service
-[Unit]
-Description=Nginx Proxy Manager
-After=network.target
-Wants=openresty.service
-
-[Service]
-Type=simple
-Environment=NODE_ENV=production
-Environment=NODE_OPTIONS=--openssl-legacy-provider
-ExecStartPre=-mkdir -p /tmp/nginx/body /data/letsencrypt-acme-challenge
-ExecStart=/usr/bin/node index.js --abort_on_uncaught_exception --max_old_space_size=1024
-WorkingDirectory=/app
-Restart=on-failure
-
-[Install]
-WantedBy=multi-user.target
-EOF
-msg_ok "Created Service"
-
-motd_ssh
-customize
-
-msg_info "Starting Services"
-sed -i 's/user npm/user root/g; s/^pid/#pid/g' /usr/local/openresty/nginx/conf/nginx.conf
-sed -r -i 's/^([[:space:]]*)su npm npm/\1#su npm npm/g;' /etc/logrotate.d/nginx-proxy-manager
-systemctl enable -q --now openresty
-systemctl enable -q --now npm
-msg_ok "Started Services"
-
-msg_info "Cleaning up"
-rm -rf /tmp/*
-systemctl restart openresty
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
diff --git a/install/proxmox-backup-server-install.sh b/install/proxmox-backup-server-install.sh
deleted file mode 100644
index cac095b9..00000000
--- a/install/proxmox-backup-server-install.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) 2021-2025 tteck
-# Author: tteck (tteckster)
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://www.proxmox.com/en/proxmox-backup-server
-
-source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
-color
-verb_ip6
-catch_errors
-setting_up_container
-network_check
-update_os
-
-read -rp "${TAB3}Do you want to use the Enterprise repository (requires valid subscription key)? [y/N]: " USE_ENTERPRISE_REPO
-
-msg_info "Installing Proxmox Backup Server"
-curl -fsSL https://enterprise.proxmox.com/debian/proxmox-release-bookworm.gpg |
- gpg --dearmor -o /etc/apt/trusted.gpg.d/proxmox-release-bookworm.gpg
-if [[ "$USE_ENTERPRISE_REPO" =~ ^([yY].*)$ ]]; then
- echo "deb https://enterprise.proxmox.com/debian/pbs bookworm pbs-enterprise" >/etc/apt/sources.list.d/pbs-enterprise.list
- msg_ok "Enterprise repository enabled. Make sure your subscription key is installed."
-else
- echo "deb http://download.proxmox.com/debian/pbs bookworm pbs-no-subscription" >>/etc/apt/sources.list
- msg_ok "No-subscription repository enabled."
-fi
-
-$STD apt-get update
-$STD apt-get install -y proxmox-backup-server
-msg_ok "Installed Proxmox Backup Server"
-
-motd_ssh
-customize
-
-msg_info "Cleaning up"
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
diff --git a/install/resiliosync-install.sh b/install/resiliosync-install.sh
new file mode 100644
index 00000000..d03b58d0
--- /dev/null
+++ b/install/resiliosync-install.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: David Bennett (dbinit)
+# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
+# Source: https://www.resilio.com/sync
+
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
+color
+verb_ip6
+catch_errors
+setting_up_container
+network_check
+update_os
+
+msg_info "Setting up Resilio Sync Repository"
+curl -fsSL "https://linux-packages.resilio.com/resilio-sync/key.asc" >/etc/apt/trusted.gpg.d/resilio-sync.asc
+echo "deb [signed-by=/etc/apt/trusted.gpg.d/resilio-sync.asc] http://linux-packages.resilio.com/resilio-sync/deb resilio-sync non-free" >/etc/apt/sources.list.d/resilio-sync.list
+$STD apt-get update
+msg_ok "Resilio Sync Repository Setup"
+
+msg_info "Installing Resilio Sync"
+$STD apt-get install -y resilio-sync
+sed -i "s/127.0.0.1:8888/0.0.0.0:8888/g" /etc/resilio-sync/config.json
+systemctl enable -q resilio-sync
+systemctl restart resilio-sync
+msg_ok "Installed Resilio Sync"
+
+motd_ssh
+customize
+
+msg_info "Cleaning up"
+$STD apt-get -y autoremove
+$STD apt-get -y autoclean
+msg_ok "Cleaned"
diff --git a/install/romm-install.sh b/install/romm-install.sh
new file mode 100644
index 00000000..36c791c4
--- /dev/null
+++ b/install/romm-install.sh
@@ -0,0 +1,223 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 community-scripts ORG
+# Author: DevelopmentCats
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://romm.app
+# Updated: 03/10/2025
+
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
+color
+verb_ip6
+catch_errors
+setting_up_container
+network_check
+update_os
+
+msg_info "Installing dependencies"
+$STD apt-get install -y \
+ acl \
+ build-essential \
+ libssl-dev \
+ libffi-dev \
+ python3-dev \
+ python3-pip \
+ python3-venv \
+ libmariadb3 \
+ libmariadb-dev \
+ libpq-dev \
+ redis-tools \
+ p7zip \
+ tzdata \
+ jq
+msg_ok "Installed core dependencies"
+
+PYTHON_VERSION="3.12" setup_uv
+NODE_VERSION="22" NODE_MODULE="serve" setup_nodejs
+setup_mariadb
+
+msg_info "Configuring Database"
+DB_NAME=romm
+DB_USER=romm
+DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)
+$STD mariadb -u root -e "CREATE DATABASE IF NOT EXISTS $DB_NAME CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;"
+$STD mariadb -u root -e "CREATE USER IF NOT EXISTS '$DB_USER'@'localhost' IDENTIFIED BY '$DB_PASS';"
+$STD mariadb -u root -e "GRANT ALL ON $DB_NAME.* TO '$DB_USER'@'localhost'; FLUSH PRIVILEGES;"
+{
+ echo "RomM-Credentials"
+ echo "RomM Database User: $DB_USER"
+ echo "RomM Database Password: $DB_PASS"
+ echo "RomM Database Name: $DB_NAME"
+} >~/romm.creds
+chmod 600 ~/romm.creds
+msg_ok "Configured Database"
+
+msg_info "Creating romm user and directories"
+id -u romm &>/dev/null || useradd -r -m -d /var/lib/romm -s /bin/bash romm
+mkdir -p /opt/romm \
+ /var/lib/romm/config \
+ /var/lib/romm/resources \
+ /var/lib/romm/assets/{saves,states,screenshots} \
+ /var/lib/romm/library/roms/{gba,gbc,ps} \
+ /var/lib/romm/library/bios/{gba,ps}
+chown -R romm:romm /opt/romm /var/lib/romm
+msg_ok "Created romm user and directories"
+
+msg_info "Configuring Database"
+DB_NAME=romm
+DB_USER=romm
+DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)
+$STD mariadb -u root -e "CREATE DATABASE $DB_NAME CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;"
+$STD mariadb -u root -e "CREATE USER '$DB_USER'@'localhost' IDENTIFIED BY '$DB_PASS';"
+$STD mariadb -u root -e "GRANT ALL ON $DB_NAME.* TO '$DB_USER'@'localhost'; FLUSH PRIVILEGES;"
+{
+ echo "RomM-Credentials"
+ echo "RomM Database User: $DB_USER"
+ echo "RomM Database Password: $DB_PASS"
+ echo "RomM Database Name: $DB_NAME"
+} >~/romm.creds
+msg_ok "Configured Database"
+
+fetch_and_deploy_gh_release "romm" "rommapp/romm"
+
+msg_info "Creating environment file"
+sed -i 's/^supervised no/supervised systemd/' /etc/redis/redis.conf
+systemctl restart redis-server
+systemctl enable -q --now redis-server
+AUTH_SECRET_KEY=$(openssl rand -hex 32)
+
+cat >/opt/romm/.env </etc/systemd/system/romm-backend.service </etc/systemd/system/romm-frontend.service </etc/systemd/system/romm-worker.service </etc/systemd/system/romm-scheduler.service </etc/systemd/system/stylus.service
+[Unit]
+Description=Stylus Service
+After=network.target
+
+[Service]
+Type=simple
+ExecStart=stylus run /opt/stylus/
+Restart=on-failure
+RestartSec=5
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+systemctl enable -q --now stylus
+msg_ok "Created service"
+
+motd_ssh
+customize
+
+msg_info "Cleaning up"
+$STD apt-get -y autoremove
+$STD apt-get -y autoclean
+$STD apt-get -y clean
+msg_ok "Cleaned up"
diff --git a/install/swizzin-install.sh b/install/swizzin-install.sh
deleted file mode 100644
index aff5c85d..00000000
--- a/install/swizzin-install.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: EEJoshua
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://swizzin.ltd/
-
-source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
-color
-verb_ip6
-catch_errors
-setting_up_container
-network_check
-update_os
-
-msg_warn "WARNING: This script will run an external installer from a third-party source (https://swizzin.ltd/)."
-msg_warn "The following code is NOT maintained or audited by our repository."
-msg_warn "If you have any doubts or concerns, please review the installer code before proceeding:"
-msg_custom "${TAB3}${GATEWAY}${BGN}${CL}" "\e[1;34m" "→ https://s5n.sh"
-echo
-read -r -p "${TAB3}Do you want to continue? [y/N]: " CONFIRM
-if [[ ! "$CONFIRM" =~ ^([yY][eE][sS]|[yY])$ ]]; then
- msg_error "Aborted by user. No changes have been made."
- exit 10
-fi
-bash <(curl -sL s5n.sh)
-
-motd_ssh
-customize
-
-msg_info "Cleaning up"
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
diff --git a/install/tracktor-install.sh b/install/tracktor-install.sh
deleted file mode 100644
index d2c5fde5..00000000
--- a/install/tracktor-install.sh
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) 2025 Community Scripts ORG
-# Author: CrazyWolf13
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://tracktor.bytedge.in
-
-source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
-color
-verb_ip6
-catch_errors
-setting_up_container
-network_check
-update_os
-
-setup_nodejs
-fetch_and_deploy_gh_release "tracktor" "javedh-dev/tracktor" "tarball" "latest" "/opt/tracktor"
-
-msg_info "Configuring Tracktor"
-cd /opt/tracktor
-rm package-lock.json
-$STD npm install
-$STD npm run build
-mkdir /opt/tracktor-data
-HOST_IP=$(hostname -I | awk '{print $1}')
-cat </opt/tracktor/app/server/.env
-NODE_ENV=production
-PUBLIC_DEMO_MODE=false
-DB_PATH=/opt/tracktor-data/vehicles.db
-PUBLIC_API_BASE_URL=http://$HOST_IP:3000
-PORT=3000
-EOF
-msg_ok "Configured Tracktor"
-
-msg_info "Creating service"
-cat </etc/systemd/system/tracktor.service
-[Unit]
-Description=Tracktor Service
-After=network.target
-
-[Service]
-Type=simple
-WorkingDirectory=/opt/tracktor
-EnvironmentFile=/opt/tracktor/app/server/.env
-ExecStart=/usr/bin/npm start
-
-[Install]
-WantedBy=multi-user.target
-EOF
-systemctl enable -q --now tracktor
-msg_ok "Created service"
-
-motd_ssh
-customize
-
-msg_info "Cleaning up"
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
diff --git a/install/traefik-install.sh b/install/traefik-install.sh
deleted file mode 100644
index 7507a8a8..00000000
--- a/install/traefik-install.sh
+++ /dev/null
@@ -1,263 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) 2021-2025 tteck
-# Author: tteck (tteckster)
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://traefik.io/
-
-source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
-color
-verb_ip6
-catch_errors
-setting_up_container
-network_check
-update_os
-
-msg_info "Installing Dependencies"
-$STD apt-get install -y apt-transport-https
-msg_ok "Installed Dependencies"
-
-RELEASE=$(curl -fsSL https://api.github.com/repos/traefik/traefik/releases | grep -oP '"tag_name":\s*"v\K[\d.]+?(?=")' | sort -V | tail -n 1)
-msg_info "Installing Traefik v${RELEASE}"
-mkdir -p /etc/traefik/{conf.d,ssl,sites-available}
-curl -fsSL "https://github.com/traefik/traefik/releases/download/v${RELEASE}/traefik_v${RELEASE}_linux_amd64.tar.gz" -o "traefik_v${RELEASE}_linux_amd64.tar.gz"
-tar -C /tmp -xzf traefik*.tar.gz
-mv /tmp/traefik /usr/bin/
-rm -rf traefik*.tar.gz
-echo "${RELEASE}" >/opt/${APPLICATION}_version.txt
-msg_ok "Installed Traefik v${RELEASE}"
-
-msg_info "Creating Traefik configuration"
-cat </etc/traefik/traefik.yaml
-providers:
- file:
- directory: /etc/traefik/conf.d/
- watch: true
-
-entryPoints:
- web:
- address: ':80'
- http:
- redirections:
- entryPoint:
- to: websecure
- scheme: https
- websecure:
- address: ':443'
- http:
- tls:
- certResolver: letsencrypt
- # Uncomment below if using cloudflare
- /*
- forwardedHeaders:
- trustedIPs:
- - 173.245.48.0/20
- - 103.21.244.0/22
- - 103.22.200.0/22
- - 103.31.101.64/22
- - 141.101.64.0/18
- - 108.162.192.0/18
- - 190.93.240.0/20
- - 188.114.96.0/20
- - 197.234.240.0/22
- - 198.41.128.0/17
- - 162.158.0.0/15
- - 104.16.0.0/13
- - 104.16.0.0/13
- - 172.64.0.0/13
- - 131.0.72.0/22
- */
- asDefault: true
- traefik:
- address: ':8080'
-
-certificatesResolvers:
- letsencrypt:
- acme:
- email: "foo@bar.com"
- storage: /etc/traefik/ssl/acme.json
- tlsChallenge: {}
-
-# Uncomment below if you are using self signed or no certificate
-#serversTransport:
-# insecureSkipVerify: true
-
-api:
- dashboard: true
- insecure: true
-
-log:
- filePath: /var/log/traefik/traefik.log
- format: json
- level: INFO
-
-accessLog:
- filePath: /var/log/traefik/traefik-access.log
- format: json
- filters:
- statusCodes:
- - "200"
- - "400-599"
- retryAttempts: true
- minDuration: "10ms"
- bufferingSize: 0
- fields:
- headers:
- defaultMode: drop
- names:
- User-Agent: keep
-EOF
-msg_ok "Created Traefik configuration"
-
-msg_info "Creating Service"
-cat </etc/systemd/system/traefik.service
-[Unit]
-Description=Traefik is an open-source Edge Router that makes publishing your services a fun and easy experience
-
-[Service]
-Type=notify
-ExecStart=/usr/bin/traefik --configFile=/etc/traefik/traefik.yaml
-Restart=on-failure
-ExecReload=/bin/kill -USR1 \$MAINPID
-
-[Install]
-WantedBy=multi-user.target
-EOF
-
-systemctl enable -q --now traefik
-msg_ok "Created Service"
-
-msg_info "Creating site templates"
-cat </etc/traefik/template.yaml.tpl
-http:
- routers:
- ${hostname}:
- rule: Host(`${FQDN}`)
- service: ${hostname}
- tls:
- certResolver: letsencrypt
- services:
- ${hostname}:
- loadbalancer:
- servers:
- - url: "${URL}"
-EOF
-msg_ok: "Template Created"
-msg_info: "Creating Helper Scripts"
-cat </usr/bin/addsite
-#!/bin/bash
-
-function setup_site() {
- hostname="$(whiptail --inputbox "Enter the hostname of the Site" 8 78 --title "Hostname" 3>&1 1>&2 2>&3)"
- exitstatus=$?
- [[ "$exitstatus" = 1 ]] && return;
- FQDN="$(whiptail --inputbox "Enter the FQDN of the Site" 8 78 --title "FQDN" 3>&1 1>&2 2>&3)"
- exitstatus=$?
- [[ "$exitstatus" = 1 ]] && return;
- URL="$(whiptail --inputbox "Enter the URL of the Site (For example http://192.168.x.x:8080)" 8 78 --title "URL" 3>&1 1>&2 2>&3)"
- exitstatus=$?
- [[ "$exitstatus" = 1 ]] && return;
- filename="/etc/traefik/sites-available/${hostname}.yaml"
- export hostname FQDN URL
- envsubst '${hostname} ${FQDN} ${URL}' < /etc/traefik/template.yaml.tpl > ${filename}
-}
-
-setup_site
-EOF
-cat </usr/bin/ensite
-#!/bin/bash
-
-function ensite() {
- DIR="/etc/traefik/sites-available"
- files=( "$DIR"/* )
-
- opts=()
- for f in "${files[@]}"; do
- name="${f##*/}"
- opts+=( "$name" "" )
- done
-
- choice=$(whiptail \
- --title "Select an entry" \
- --menu "Choose a site" \
- 20 60 12 \
- "${opts[@]}" \
- 3>&1 1>&2 2>&3)
-
- if [ $? -eq 0 ]; then
- ln -s $DIR/$choice /etc/traefik/conf.d
- else
- return
- fi
-}
-
-ensite
-EOF
-cat </usr/bin/dissite
-#!/bin/bash
-
-function dissite() {
- DIR="/etc/traefik/conf.d"
- files=( "$DIR"/* )
-
- opts=()
- for f in "${files[@]}"; do
- name="${f##*/}"
- opts+=( "$name" "" )
- done
-
- choice=$(whiptail \
- --title "Select an entry" \
- --menu "Choose a site" \
- 20 60 12 \
- "${opts[@]}" \
- 3>&1 1>&2 2>&3)
-
- if [ $? -eq 0 ]; then
- rm $DIR/$choice
- else
- return
- fi
-}
-
-dissite
-EOF
-
-cat </usr/bin/editsite
-#!/bin/bash
-
-function edit_site() {
- DIR="/etc/traefik/sites-available"
- files=( "$DIR"/* )
-
- opts=()
- for f in "${files[@]}"; do
- name="${f##*/}"
- opts+=( "$name" "" )
- done
-
- choice=$(whiptail \
- --title "Select an entry" \
- --menu "Choose a site" \
- 20 60 12 \
- "${opts[@]}" \
- 3>&1 1>&2 2>&3)
-
- if [ $? -eq 0 ]; then
- nano $DIR/$choice
- else
- return
- fi
-}
-
-edit_site
-EOF
-msg_ok "Helper Scripts Created"
-
-motd_ssh
-customize
-
-msg_info "Cleaning up"
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Cleaned"
diff --git a/install/tunarr-install.sh b/install/tunarr-install.sh
new file mode 100644
index 00000000..854db713
--- /dev/null
+++ b/install/tunarr-install.sh
@@ -0,0 +1,85 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 tteck
+# Author: chrisbenincasa
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://tunarr.com/
+
+source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
+color
+verb_ip6
+catch_errors
+setting_up_container
+network_check
+update_os
+
+msg_info "Setting Up Hardware Acceleration"
+$STD apt-get -y install {va-driver-all,ocl-icd-libopencl1,intel-opencl-icd,vainfo,intel-gpu-tools}
+if [[ "$CTTYPE" == "0" ]]; then
+ chgrp video /dev/dri
+ chmod 755 /dev/dri
+ chmod 660 /dev/dri/*
+ $STD adduser $(id -u -n) video
+ $STD adduser $(id -u -n) render
+fi
+msg_ok "Set Up Hardware Acceleration"
+
+read -r -p "${TAB3}Do you need the intel-media-va-driver-non-free driver for HW encoding (Debian 12 only)? " prompt
+if [[ ${prompt,,} =~ ^(y|yes)$ ]]; then
+ msg_info "Installing Intel Hardware Acceleration (non-free)"
+ cat </etc/apt/sources.list.d/non-free.list
+
+deb http://deb.debian.org/debian bookworm non-free non-free-firmware
+deb-src http://deb.debian.org/debian bookworm non-free non-free-firmware
+
+deb http://deb.debian.org/debian-security bookworm-security non-free non-free-firmware
+deb-src http://deb.debian.org/debian-security bookworm-security non-free non-free-firmware
+
+deb http://deb.debian.org/debian bookworm-updates non-free non-free-firmware
+deb-src http://deb.debian.org/debian bookworm-updates non-free non-free-firmware
+EOF
+ $STD apt-get update
+ $STD apt-get -y install {intel-media-va-driver-non-free,ocl-icd-libopencl1,intel-opencl-icd,vainfo,intel-gpu-tools}
+else
+ msg_info "Installing Intel Hardware Acceleration"
+ $STD apt-get -y install {va-driver-all,ocl-icd-libopencl1,intel-opencl-icd,vainfo,intel-gpu-tools}
+fi
+msg_ok "Installed and Set Up Intel Hardware Acceleration"
+
+fetch_and_deploy_gh_release "tunarr" "chrisbenincasa/tunarr" "singlefile" "latest" "/opt/tunarr" "*linux-x64"
+fetch_and_deploy_gh_release "ersatztv-ffmpeg" "ErsatzTV/ErsatzTV-ffmpeg" "prebuild" "latest" "/opt/ErsatzTV-ffmpeg" "*-linux64-gpl-7.1.tar.xz"
+
+msg_info "Set ErsatzTV-ffmpeg links"
+chmod +x /opt/ErsatzTV-ffmpeg/bin/*
+ln -sf /opt/ErsatzTV-ffmpeg/bin/ffmpeg /usr/bin/ffmpeg
+ln -sf /opt/ErsatzTV-ffmpeg/bin/ffplay /usr/bin/ffplay
+ln -sf /opt/ErsatzTV-ffmpeg/bin/ffprobe /usr/bin/ffprobe
+msg_ok "ffmpeg links set"
+
+msg_info "Creating Service"
+cat </etc/systemd/system/tunarr.service
+[Unit]
+Description=Tunarr Service
+After=multi-user.target
+
+[Service]
+Type=simple
+User=root
+WorkingDirectory=/opt/tunarr
+ExecStart=/opt/tunarr/tunarr
+Restart=always
+RestartSec=30
+
+[Install]
+WantedBy=multi-user.target
+EOF
+systemctl enable -q --now tunarr
+msg_ok "Created Service"
+
+motd_ssh
+customize
+
+msg_info "Cleaning up"
+$STD apt-get -y autoremove
+$STD apt-get -y autoclean
+msg_ok "Cleaned"
diff --git a/install/twingate-connector-install.sh b/install/twingate-connector-install.sh
deleted file mode 100644
index c6027854..00000000
--- a/install/twingate-connector-install.sh
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright (c) 2021-2025 community-scripts ORG
-# Author: MickLesk (CanbiZ), twingate-andrewb
-# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
-# Source: https://www.twingate.com/docs/
-
-source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
-color
-verb_ip6
-catch_errors
-setting_up_container
-network_check
-update_os
-
-install -d -m 0700 /etc/twingate
-
-access_token=""
-refresh_token=""
-network=""
-
-while [[ -z "$access_token" ]]; do
- read -rp "${TAB3}Please enter your access token: " access_token
-done
-while [[ -z "$refresh_token" ]]; do
- read -rp "${TAB3}Please enter your refresh token: " refresh_token
-done
-while [[ -z "$network" ]]; do
- read -rp "${TAB3}Please enter your network name: " network
-done
-
-msg_info "Setup Twingate Repository"
-curl -fsSL "https://packages.twingate.com/apt/gpg.key" | gpg --dearmor -o /usr/share/keyrings/twingate-connector-keyring.gpg
-echo "deb [signed-by=/usr/share/keyrings/twingate-connector-keyring.gpg] https://packages.twingate.com/apt/ /" > /etc/apt/sources.list.d/twingate.list
-$STD apt-get update
-msg_ok "Setup Twingate Repository"
-
-msg_info "Setup Twingate Connector"
-$STD apt-get install -y twingate-connector
-msg_ok "Setup Twingate Connector"
-
-msg_info "Writing config"
-{
- echo "TWINGATE_NETWORK=${network}"
- echo "TWINGATE_ACCESS_TOKEN=${access_token}"
- echo "TWINGATE_REFRESH_TOKEN=${refresh_token}"
- echo "TWINGATE_LABEL_HOSTNAME=$(hostname)"
- echo "TWINGATE_LABEL_DEPLOYED_BY=proxmox"
-} > /etc/twingate/connector.conf
-chmod 600 /etc/twingate/connector.conf
-msg_ok "Config written"
-
-msg_info "Starting Service"
-systemctl enable -q --now twingate-connector
-msg_ok "Service started"
-
-motd_ssh
-customize
-
-msg_info "Cleaning up"
-$STD apt-get -y autoremove
-$STD apt-get -y autoclean
-msg_ok "Done cleaning up"
diff --git a/install/ubuntu-install.sh b/install/ubuntu-install.sh
index ed5dc817..97283d83 100644
--- a/install/ubuntu-install.sh
+++ b/install/ubuntu-install.sh
@@ -17,102 +17,6 @@ msg_info "Installing Dependencies"
$STD apt-get install -y jq
msg_ok "Installed Dependencies"
-# echo "Getting aceberg/WatchYourLAN..."
-# fetch_and_deploy_gh_release aceberg/WatchYourLAN
-# echo "Got Version: $RELEASE"
-
-# echo "Getting actualbudget/actual..."
-# RELEASE=$(get_gh_release actualbudget/actual)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting agl/jbig2enc..."
-# RELEASE=$(get_gh_release agl/jbig2enc)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting alexta69/metube..."
-# RELEASE=$(get_gh_release alexta69/metube)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting AlexxIT/go2rtc..."
-# RELEASE=$(get_gh_release AlexxIT/go2rtc)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting apache/tika..."
-# RELEASE=$(get_gh_release apache/tika)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting ArtifexSoftware/ghostpdl-downloads..."
-# RELEASE=$(get_gh_release ArtifexSoftware/ghostpdl-downloads)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting Athou/commafeed..."
-# RELEASE=$(get_gh_release Athou/commafeed)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting authelia/authelia..."
-# RELEASE=$(get_gh_release authelia/authelia)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting azukaar/Cosmos-Server..."
-# RELEASE=$(get_gh_release azukaar/Cosmos-Server)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting bastienwirtz/homer..."
-# RELEASE=$(get_gh_release bastienwirtz/homer)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting benjaminjonard/koillection..."
-# RELEASE=$(get_gh_release benjaminjonard/koillection)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting benzino77/tasmocompiler..."
-# RELEASE=$(get_gh_release benzino77/tasmocompiler)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting blakeblackshear/frigate..."
-# RELEASE=$(get_gh_release blakeblackshear/frigate)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting bluenviron/mediamtx..."
-# RELEASE=$(get_gh_release bluenviron/mediamtx)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting BookStackApp/BookStack..."
-# RELEASE=$(get_gh_release BookStackApp/BookStack)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting browserless/chrome..."
-# RELEASE=$(get_gh_release browserless/chrome)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting Bubka/2FAuth..."
-# RELEASE=$(get_gh_release Bubka/2FAuth)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting caddyserver/xcaddy..."
-# RELEASE=$(get_gh_release caddyserver/xcaddy)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting clusterzx/paperless-ai..."
-# RELEASE=$(get_gh_release clusterzx/paperless-ai)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting cockpit-project/cockpit..."
-# RELEASE=$(get_gh_release cockpit-project/cockpit)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting community-scripts/ProxmoxVE..."
-# RELEASE=$(get_gh_release community-scripts/ProxmoxVE)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting CorentinTh/it-tools..."
-# RELEASE=$(get_gh_release CorentinTh/it-tools)
-# echo "Got Version: $RELEASE"
-
-# echo "Getting dani-garcia/bw_web_builds..."
-# RELEASE=$(get_gh_release dani-garcia/bw_web_builds)
-# echo "Got Version: $RELEASE"
-
motd_ssh
customize
diff --git a/install/uhf-install.sh b/install/uhf-install.sh
index 8b9ddf84..e414878c 100644
--- a/install/uhf-install.sh
+++ b/install/uhf-install.sh
@@ -14,7 +14,7 @@ network_check
update_os
msg_info "Installing Dependencies"
-setup_ffmpeg
+$STD apt install -y ffmpeg
msg_ok "Installed Dependencies"
msg_info "Setting Up UHF Server Environment"
@@ -34,9 +34,8 @@ fetch_and_deploy_gh_release "comskip" "swapplications/comskip" "prebuild" "lates
fetch_and_deploy_gh_release "uhf-server" "swapplications/uhf-server-dist" "prebuild" "latest" "/opt/uhf-server" "UHF.Server-linux-x64-*.zip"
msg_info "Creating Service"
-service_path=""
cat </etc/systemd/system/uhf-server.service
-echo "[Unit]
+[Unit]
Description=UHF Server service
After=syslog.target network-online.target
[Service]
@@ -47,7 +46,7 @@ ExecStart=/opt/uhf-server/uhf-server
[Install]
WantedBy=multi-user.target
EOF
-systemctl enable --now -q uhf-server.service
+systemctl enable -q --now uhf-server
msg_ok "Created Service"
motd_ssh
diff --git a/install/viseron-install.sh b/install/viseron-install.sh
index 7253f90c..f15a0f42 100644
--- a/install/viseron-install.sh
+++ b/install/viseron-install.sh
@@ -13,20 +13,36 @@ setting_up_container
network_check
update_os
-PYTHON_VERSION="3.12" setup_uv
-
msg_info "Installing Dependencies"
$STD apt-get install -y \
- python3 python3-pip python3-venv \
python3-opencv jq \
libgl1-mesa-glx libglib2.0-0 \
libgstreamer1.0-0 libgstreamer-plugins-base1.0-0 \
gstreamer1.0-plugins-good gstreamer1.0-plugins-bad gstreamer1.0-libav \
build-essential python3-dev python3-gi pkg-config libcairo2-dev gir1.2-glib-2.0 \
- cmake gfortran libopenblas-dev liblapack-dev libgirepository1.0-dev
-
+ cmake gfortran libopenblas-dev liblapack-dev libgirepository1.0-dev git
msg_ok "Installed Dependencies"
+PYTHON_VERSION="3.12" setup_uv
+PG_VERSION="16" setup_postgresql
+
+msg_info "Setting up PostgreSQL Database"
+DB_NAME=viseron
+DB_USER=viseron_usr
+DB_PASS="$(openssl rand -base64 18 | cut -c1-13)"
+$STD sudo -u postgres psql -c "CREATE ROLE $DB_USER WITH LOGIN PASSWORD '$DB_PASS';"
+$STD sudo -u postgres psql -c "CREATE DATABASE $DB_NAME WITH OWNER $DB_USER ENCODING 'UTF8' TEMPLATE template0;"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET client_encoding TO 'utf8';"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET default_transaction_isolation TO 'read committed';"
+$STD sudo -u postgres psql -c "ALTER ROLE $DB_USER SET timezone TO 'UTC'"
+{
+ echo "Hanko-Credentials"
+ echo "Hanko Database User: $DB_USER"
+ echo "Hanko Database Password: $DB_PASS"
+ echo "Hanko Database Name: $DB_NAME"
+} >>~/hanko.creds
+msg_ok "Set up PostgreSQL Database"
+
# msg_info "Setting up Hardware Acceleration"
# if [[ "$CTTYPE" == "0" ]]; then
# chgrp video /dev/dri
@@ -35,21 +51,32 @@ msg_ok "Installed Dependencies"
# fi
# msg_ok "Hardware Acceleration Configured"
+PYTHON_VERSION="3.12" setup_uv
fetch_and_deploy_gh_release "viseron" "roflcoopter/viseron" "tarball" "latest" "/opt/viseron"
-msg_info "Setting up Viseron (Patience)"
-cd /opt/viseron
-uv venv .venv
-$STD uv pip install --upgrade pip setuptools wheel
-$STD uv pip install -r requirements.txt --python /opt/viseron/.venv/bin/python
-ln -s /opt/viseron/.venv/bin/viseron /usr/local/bin/viseron
-msg_ok "Setup Viseron"
+msg_info "Setting up Python Environment"
+uv venv --python "python3.12" /opt/viseron/.venv
+uv pip install --python /opt/viseron/.venv/bin/python --upgrade pip setuptools wheel
+msg_ok "Python Environment Setup"
-msg_info "Creating Configuration Directory"
-mkdir -p /config
-mkdir -p /config/recordings
-mkdir -p /config/logs
-msg_ok "Created Configuration Directory"
+msg_info "Setup Viseron (Patience)"
+if ls /dev/nvidia* >/dev/null 2>&1; then
+ msg_info "GPU detected → Installing PyTorch with CUDA"
+ UV_HTTP_TIMEOUT=600 uv pip install --python /opt/viseron/.venv/bin/python \
+ torch==2.8.0 torchvision==0.19.0 torchaudio==2.8.0
+ msg_ok "Installed Torch with CUDA"
+else
+ msg_info "No GPU detected → Installing CPU-only PyTorch"
+ UV_HTTP_TIMEOUT=600 uv pip install --python /opt/viseron/.venv/bin/python \
+ torch==2.8.0+cpu torchvision==0.19.0+cpu torchaudio==2.8.0+cpu \
+ --extra-index-url https://download.pytorch.org/whl/cpu
+ msg_ok "Installed Torch CPU-only"
+fi
+UV_HTTP_TIMEOUT=600 uv pip install --python /opt/viseron/.venv/bin/python -e /opt/viseron/.
+UV_HTTP_TIMEOUT=600 uv pip install --python /opt/viseron/.venv/bin/python -r /opt/viseron/requirements.txt
+mkdir -p /config/{recordings,snapshots,segments,event_clips,thumbnails}
+for d in recordings snapshots segments event_clips thumbnails; do ln -s "/config/$d" "/$d" 2>/dev/null || true; done
+msg_ok "Setup Viseron"
msg_info "Creating Default Configuration"
cat </config/viseron.yaml
@@ -103,6 +130,14 @@ motion_detection:
enabled: true
threshold: 25
sensitivity: 0.8
+
+storage:
+ connection_string: postgresql://$DB_USER:$DB_PASS@localhost:5432/$DB_NAME
+ recordings: /recordings
+ snapshots: /snapshots
+ segments: /segments
+ event_clips: /event_clips
+ thumbnails: /thumbnails
EOF
msg_ok "Created Default Configuration"
@@ -117,7 +152,7 @@ Type=simple
User=root
WorkingDirectory=/opt/viseron
Environment=PATH=/opt/viseron/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
-ExecStart=/opt/viseron/.venv/bin/viseron --config /config/viseron.yaml
+ExecStart=/opt/viseron/.venv/bin/python -m viseron --config /config/viseron.yaml
Restart=always
RestartSec=10
diff --git a/misc/alpine-install.func b/misc/alpine-install.func
index 85c3c2a1..906d5b14 100644
--- a/misc/alpine-install.func
+++ b/misc/alpine-install.func
@@ -83,6 +83,7 @@ network_check() {
update_os() {
msg_info "Updating Container OS"
$STD apk update && $STD apk upgrade
+ source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/alpine-tools.func)
msg_ok "Updated Container OS"
}
diff --git a/misc/alpine-tools.func b/misc/alpine-tools.func
new file mode 100644
index 00000000..b03f4f08
--- /dev/null
+++ b/misc/alpine-tools.func
@@ -0,0 +1,535 @@
+#!/bin/ash
+# shellcheck shell=ash
+
+# Erwartet vorhandene msg_* und optional $STD aus deinem Framework.
+
+# ------------------------------
+# kleine Helfer
+# ------------------------------
+lower() { printf '%s' "$1" | tr '[:upper:]' '[:lower:]'; }
+has() { command -v "$1" >/dev/null 2>&1; }
+
+need_tool() {
+ # usage: need_tool curl jq unzip ...
+ # installiert fehlende Tools via apk --no-cache
+ local missing=0 t
+ for t in "$@"; do
+ if ! has "$t"; then missing=1; fi
+ done
+ if [ "$missing" -eq 1 ]; then
+ msg_info "Installing tools: $*"
+ # busybox 'apk' ist vorhanden auf Alpine
+ apk add --no-cache "$@" >/dev/null 2>&1 || {
+ msg_error "apk add failed for: $*"
+ return 1
+ }
+ msg_ok "Tools ready: $*"
+ fi
+}
+
+net_resolves() {
+ # robust gegen fehlendes getent auf busybox
+ # usage: net_resolves api.github.com
+ local host="$1"
+ ping -c1 -W1 "$host" >/dev/null 2>&1 || nslookup "$host" >/dev/null 2>&1
+}
+
+ensure_usr_local_bin_persist() {
+ local PROFILE_FILE="/etc/profile.d/10-localbin.sh"
+ if [ ! -f "$PROFILE_FILE" ]; then
+ echo 'case ":$PATH:" in *:/usr/local/bin:*) ;; *) export PATH="/usr/local/bin:$PATH";; esac' >"$PROFILE_FILE"
+ chmod +x "$PROFILE_FILE"
+ fi
+}
+
+download_with_progress() {
+ # $1 url, $2 dest
+ local url="$1" out="$2" cl
+ need_tool curl pv || return 1
+ cl=$(curl -fsSLI "$url" 2>/dev/null | awk 'tolower($0) ~ /^content-length:/ {print $2}' | tr -d '\r')
+ if [ -n "$cl" ]; then
+ curl -fsSL "$url" | pv -s "$cl" >"$out" || {
+ msg_error "Download failed: $url"
+ return 1
+ }
+ else
+ curl -fL# -o "$out" "$url" || {
+ msg_error "Download failed: $url"
+ return 1
+ }
+ fi
+}
+
+# ------------------------------
+# GitHub: Release prüfen
+# ------------------------------
+check_for_gh_release() {
+ # app, repo, [pinned]
+ local app="$1" source="$2" pinned="${3:-}"
+ local app_lc
+ app_lc="$(lower "$app" | tr -d ' ')"
+ local current_file="$HOME/.${app_lc}"
+ local current="" release tag
+
+ msg_info "Check for update: $app"
+
+ net_resolves api.github.com || {
+ msg_error "DNS/network error: api.github.com"
+ return 1
+ }
+ need_tool curl jq || return 1
+
+ tag=$(curl -fsSL "https://api.github.com/repos/${source}/releases/latest" | jq -r '.tag_name // empty')
+ [ -z "$tag" ] && {
+ msg_error "Unable to fetch latest tag for $app"
+ return 1
+ }
+ release="${tag#v}"
+
+ [ -f "$current_file" ] && current="$(cat "$current_file")"
+
+ if [ -n "$pinned" ]; then
+ if [ "$pinned" = "$release" ]; then
+ msg_ok "$app pinned to v$pinned (no update)"
+ return 1
+ fi
+ if [ "$current" = "$pinned" ]; then
+ msg_ok "$app pinned v$pinned installed (upstream v$release)"
+ return 1
+ fi
+ msg_info "$app pinned v$pinned (upstream v$release) → update/downgrade"
+ CHECK_UPDATE_RELEASE="$pinned"
+ return 0
+ fi
+
+ if [ "$release" != "$current" ] || [ ! -f "$current_file" ]; then
+ CHECK_UPDATE_RELEASE="$release"
+ msg_info "New release available: v$release (current: v${current:-none})"
+ return 0
+ fi
+
+ msg_ok "$app is up to date (v$release)"
+ return 1
+}
+
+# ------------------------------
+# GitHub: Release holen & deployen (Alpine)
+# modes: tarball | prebuild | singlefile
+# ------------------------------
+fetch_and_deploy_gh() {
+ # $1 app, $2 repo, [$3 mode], [$4 version], [$5 target], [$6 asset_pattern]
+ local app="$1" repo="$2" mode="${3:-tarball}" version="${4:-latest}" target="${5:-/opt/$1}" pattern="${6:-}"
+ local app_lc
+ app_lc="$(lower "$app" | tr -d ' ')"
+ local vfile="$HOME/.${app_lc}"
+ local json url filename tmpd unpack
+
+ net_resolves api.github.com || {
+ msg_error "DNS/network error"
+ return 1
+ }
+ need_tool curl jq tar || return 1
+ [ "$mode" = "prebuild" ] || [ "$mode" = "singlefile" ] && need_tool unzip >/dev/null 2>&1 || true
+
+ tmpd="$(mktemp -d)" || return 1
+ mkdir -p "$target"
+
+ # Release JSON
+ if [ "$version" = "latest" ]; then
+ json="$(curl -fsSL "https://api.github.com/repos/$repo/releases/latest")" || {
+ msg_error "GitHub API failed"
+ rm -rf "$tmpd"
+ return 1
+ }
+ else
+ json="$(curl -fsSL "https://api.github.com/repos/$repo/releases/tags/$version")" || {
+ msg_error "GitHub API failed"
+ rm -rf "$tmpd"
+ return 1
+ }
+ fi
+
+ # Effektive Version
+ version="$(printf '%s' "$json" | jq -r '.tag_name // empty')"
+ version="${version#v}"
+
+ [ -z "$version" ] && {
+ msg_error "No tag in release json"
+ rm -rf "$tmpd"
+ return 1
+ }
+
+ case "$mode" in
+ tarball | source)
+ url="$(printf '%s' "$json" | jq -r '.tarball_url // empty')"
+ [ -z "$url" ] && url="https://github.com/$repo/archive/refs/tags/v$version.tar.gz"
+ filename="${app_lc}-${version}.tar.gz"
+ download_with_progress "$url" "$tmpd/$filename" || {
+ rm -rf "$tmpd"
+ return 1
+ }
+ tar -xzf "$tmpd/$filename" -C "$tmpd" || {
+ msg_error "tar extract failed"
+ rm -rf "$tmpd"
+ return 1
+ }
+ unpack="$(find "$tmpd" -mindepth 1 -maxdepth 1 -type d | head -n1)"
+ # Inhalte nach target kopieren (inkl. dotfiles)
+ (cd "$unpack" && tar -cf - .) | (cd "$target" && tar -xf -) || {
+ msg_error "copy failed"
+ rm -rf "$tmpd"
+ return 1
+ }
+ ;;
+ prebuild)
+ [ -n "$pattern" ] || {
+ msg_error "prebuild requires asset pattern"
+ rm -rf "$tmpd"
+ return 1
+ }
+ url="$(printf '%s' "$json" | jq -r '.assets[].browser_download_url' | awk -v p="$pattern" '
+ BEGIN{IGNORECASE=1}
+ $0 ~ p {print; exit}
+ ')"
+ [ -z "$url" ] && {
+ msg_error "asset not found for pattern: $pattern"
+ rm -rf "$tmpd"
+ return 1
+ }
+ filename="${url##*/}"
+ download_with_progress "$url" "$tmpd/$filename" || {
+ rm -rf "$tmpd"
+ return 1
+ }
+ # entpacken je nach Format
+ case "$filename" in
+ *.zip)
+ need_tool unzip || {
+ rm -rf "$tmpd"
+ return 1
+ }
+ mkdir -p "$tmpd/unp"
+ unzip -q "$tmpd/$filename" -d "$tmpd/unp"
+ ;;
+ *.tar.gz | *.tgz | *.tar.xz | *.tar.zst | *.tar.bz2)
+ mkdir -p "$tmpd/unp"
+ tar -xf "$tmpd/$filename" -C "$tmpd/unp"
+ ;;
+ *)
+ msg_error "unsupported archive: $filename"
+ rm -rf "$tmpd"
+ return 1
+ ;;
+ esac
+ # top-level folder ggf. strippen
+ if [ "$(find "$tmpd/unp" -mindepth 1 -maxdepth 1 -type d | wc -l)" -eq 1 ] && [ -z "$(find "$tmpd/unp" -mindepth 1 -maxdepth 1 -type f | head -n1)" ]; then
+ unpack="$(find "$tmpd/unp" -mindepth 1 -maxdepth 1 -type d)"
+ (cd "$unpack" && tar -cf - .) | (cd "$target" && tar -xf -) || {
+ msg_error "copy failed"
+ rm -rf "$tmpd"
+ return 1
+ }
+ else
+ (cd "$tmpd/unp" && tar -cf - .) | (cd "$target" && tar -xf -) || {
+ msg_error "copy failed"
+ rm -rf "$tmpd"
+ return 1
+ }
+ fi
+ ;;
+ singlefile)
+ [ -n "$pattern" ] || {
+ msg_error "singlefile requires asset pattern"
+ rm -rf "$tmpd"
+ return 1
+ }
+ url="$(printf '%s' "$json" | jq -r '.assets[].browser_download_url' | awk -v p="$pattern" '
+ BEGIN{IGNORECASE=1}
+ $0 ~ p {print; exit}
+ ')"
+ [ -z "$url" ] && {
+ msg_error "asset not found for pattern: $pattern"
+ rm -rf "$tmpd"
+ return 1
+ }
+ filename="${url##*/}"
+ download_with_progress "$url" "$target/$app" || {
+ rm -rf "$tmpd"
+ return 1
+ }
+ chmod +x "$target/$app"
+ ;;
+ *)
+ msg_error "Unknown mode: $mode"
+ rm -rf "$tmpd"
+ return 1
+ ;;
+ esac
+
+ echo "$version" >"$vfile"
+ ensure_usr_local_bin_persist
+ rm -rf "$tmpd"
+ msg_ok "Deployed $app ($version) → $target"
+}
+
+# ------------------------------
+# yq (mikefarah) – Alpine
+# ------------------------------
+setup_yq() {
+ # bevorzugt apk, optional FORCE_GH=1 → GitHub Binary
+ if [ "${FORCE_GH:-0}" != "1" ] && apk info -e yq >/dev/null 2>&1; then
+ msg_info "Updating yq via apk"
+ apk add --no-cache --upgrade yq >/dev/null 2>&1 || true
+ msg_ok "yq ready ($(yq --version 2>/dev/null))"
+ return 0
+ fi
+
+ need_tool curl || return 1
+ local arch bin url tmp
+ case "$(uname -m)" in
+ x86_64) arch="amd64" ;;
+ aarch64) arch="arm64" ;;
+ *)
+ msg_error "Unsupported arch for yq: $(uname -m)"
+ return 1
+ ;;
+ esac
+ url="https://github.com/mikefarah/yq/releases/latest/download/yq_linux_${arch}"
+ tmp="$(mktemp)"
+ download_with_progress "$url" "$tmp" || return 1
+ install -m 0755 "$tmp" /usr/local/bin/yq
+ rm -f "$tmp"
+ msg_ok "Setup yq ($(yq --version 2>/dev/null))"
+}
+
+# ------------------------------
+# Adminer – Alpine
+# ------------------------------
+setup_adminer() {
+ need_tool curl || return 1
+ msg_info "Setup Adminer (Alpine)"
+ mkdir -p /var/www/localhost/htdocs/adminer
+ curl -fsSL https://github.com/vrana/adminer/releases/latest/download/adminer.php \
+ -o /var/www/localhost/htdocs/adminer/index.php || {
+ msg_error "Adminer download failed"
+ return 1
+ }
+ msg_ok "Adminer at /adminer (served by your webserver)"
+}
+
+# ------------------------------
+# uv – Alpine (musl tarball)
+# Optional: PYTHON_VERSION="3.12"
+# ------------------------------
+setup_uv() {
+ need_tool curl tar || return 1
+ local UV_BIN="/usr/local/bin/uv"
+ local arch tarball url tmpd ver installed
+
+ case "$(uname -m)" in
+ x86_64) arch="x86_64-unknown-linux-musl" ;;
+ aarch64) arch="aarch64-unknown-linux-musl" ;;
+ *)
+ msg_error "Unsupported arch for uv: $(uname -m)"
+ return 1
+ ;;
+ esac
+
+ ver="$(curl -fsSL https://api.github.com/repos/astral-sh/uv/releases/latest | jq -r '.tag_name' 2>/dev/null)"
+ ver="${ver#v}"
+ [ -z "$ver" ] && {
+ msg_error "uv: cannot determine latest version"
+ return 1
+ }
+
+ if has "$UV_BIN"; then
+ installed="$($UV_BIN -V 2>/dev/null | awk '{print $2}')"
+ [ "$installed" = "$ver" ] && {
+ msg_ok "uv $ver already installed"
+ return 0
+ }
+ msg_info "Updating uv $installed → $ver"
+ else
+ msg_info "Setup uv $ver"
+ fi
+
+ tmpd="$(mktemp -d)" || return 1
+ tarball="uv-${arch}.tar.gz"
+ url="https://github.com/astral-sh/uv/releases/download/v${ver}/${tarball}"
+
+ download_with_progress "$url" "$tmpd/uv.tar.gz" || {
+ rm -rf "$tmpd"
+ return 1
+ }
+ tar -xzf "$tmpd/uv.tar.gz" -C "$tmpd" || {
+ msg_error "uv: extract failed"
+ rm -rf "$tmpd"
+ return 1
+ }
+
+ # tar enthält ./uv
+ if [ -x "$tmpd/uv" ]; then
+ install -m 0755 "$tmpd/uv" "$UV_BIN"
+ else
+ # fallback: in Unterordner
+ install -m 0755 "$tmpd"/*/uv "$UV_BIN" 2>/dev/null || {
+ msg_error "uv binary not found in tar"
+ rm -rf "$tmpd"
+ return 1
+ }
+ fi
+ rm -rf "$tmpd"
+ ensure_usr_local_bin_persist
+ msg_ok "Setup uv $ver"
+
+ if [ -n "${PYTHON_VERSION:-}" ]; then
+ # uv liefert cpython builds für musl; den neuesten Patchstand finden:
+ local match
+ match="$(uv python list --only-downloads 2>/dev/null | awk -v maj="$PYTHON_VERSION" '
+ $0 ~ "^cpython-"maj"\\." { print $0 }' | awk -F- '{print $2}' | sort -V | tail -n1)"
+ [ -z "$match" ] && {
+ msg_error "No matching Python for $PYTHON_VERSION"
+ return 1
+ }
+ if ! uv python list | grep -q "cpython-${match}-linux"; then
+ msg_info "Installing Python $match via uv"
+ uv python install "$match" || {
+ msg_error "uv python install failed"
+ return 1
+ }
+ msg_ok "Python $match installed (uv)"
+ fi
+ fi
+}
+
+# ------------------------------
+# Java – Alpine (OpenJDK)
+# JAVA_VERSION: 17|21 (Default 21)
+# ------------------------------
+setup_java() {
+ local JAVA_VERSION="${JAVA_VERSION:-21}" pkg
+ case "$JAVA_VERSION" in
+ 17) pkg="openjdk17-jdk" ;;
+ 21 | *) pkg="openjdk21-jdk" ;;
+ esac
+ msg_info "Setup Java (OpenJDK $JAVA_VERSION)"
+ apk add --no-cache "$pkg" >/dev/null 2>&1 || {
+ msg_error "apk add $pkg failed"
+ return 1
+ }
+ # JAVA_HOME setzen
+ local prof="/etc/profile.d/20-java.sh"
+ if [ ! -f "$prof" ]; then
+ echo 'export JAVA_HOME=$(dirname $(dirname $(readlink -f $(command -v java))))' >"$prof"
+ echo 'case ":$PATH:" in *:$JAVA_HOME/bin:*) ;; *) export PATH="$JAVA_HOME/bin:$PATH";; esac' >>"$prof"
+ chmod +x "$prof"
+ fi
+ msg_ok "Java ready: $(java -version 2>&1 | head -n1)"
+}
+
+# ------------------------------
+# Go – Alpine (apk bevorzugt; optional GO_VERSION tarball)
+# ------------------------------
+setup_go() {
+ if [ -z "${GO_VERSION:-}" ]; then
+ msg_info "Setup Go (apk)"
+ apk add --no-cache go >/dev/null 2>&1 || {
+ msg_error "apk add go failed"
+ return 1
+ }
+ msg_ok "Go ready: $(go version 2>/dev/null)"
+ return 0
+ fi
+
+ # explizite Version via offizielles tar.gz
+ need_tool curl tar || return 1
+ local ARCH TARBALL URL TMP
+ case "$(uname -m)" in
+ x86_64) ARCH="amd64" ;;
+ aarch64) ARCH="arm64" ;;
+ *)
+ msg_error "Unsupported arch for Go: $(uname -m)"
+ return 1
+ ;;
+ esac
+ TARBALL="go${GO_VERSION}.linux-${ARCH}.tar.gz"
+ URL="https://go.dev/dl/${TARBALL}"
+ msg_info "Setup Go $GO_VERSION (tarball)"
+ TMP="$(mktemp)"
+ download_with_progress "$URL" "$TMP" || return 1
+ rm -rf /usr/local/go
+ tar -C /usr/local -xzf "$TMP" || {
+ msg_error "extract go failed"
+ rm -f "$TMP"
+ return 1
+ }
+ rm -f "$TMP"
+ ln -sf /usr/local/go/bin/go /usr/local/bin/go
+ ln -sf /usr/local/go/bin/gofmt /usr/local/bin/gofmt
+ ensure_usr_local_bin_persist
+ msg_ok "Go ready: $(go version 2>/dev/null)"
+}
+
+# ------------------------------
+# Composer – Alpine
+# nutzt php83-cli + openssl + phar
+# ------------------------------
+setup_composer() {
+ local COMPOSER_BIN="/usr/local/bin/composer"
+ if ! has php; then
+ # bevorzugt php83 auf Alpine 3.20/3.21+
+ msg_info "Installing PHP CLI for Composer"
+ apk add --no-cache php83-cli php83-openssl php83-phar php83-iconv >/dev/null 2>&1 || {
+ # Fallback auf generisches php-cli
+ apk add --no-cache php-cli php-openssl php-phar php-iconv >/dev/null 2>&1 || {
+ msg_error "Failed to install php-cli for composer"
+ return 1
+ }
+ }
+ fi
+
+ if [ -x "$COMPOSER_BIN" ]; then
+ msg_info "Updating Composer"
+ else
+ msg_info "Setup Composer"
+ fi
+
+ need_tool curl || return 1
+ curl -fsSL https://getcomposer.org/installer -o /tmp/composer-setup.php || {
+ msg_error "composer installer download failed"
+ return 1
+ }
+ php /tmp/composer-setup.php --install-dir=/usr/local/bin --filename=composer >/dev/null 2>&1 || {
+ msg_error "composer install failed"
+ return 1
+ }
+ rm -f /tmp/composer-setup.php
+ ensure_usr_local_bin_persist
+ msg_ok "Composer ready: $(composer --version 2>/dev/null)"
+}
+
+# ------------------------------
+# Adminer/uv/go/java/yq/composer stehen oben
+# ------------------------------
+
+# ------------------------------
+# (Optional) LOCAL_IP import – POSIX-safe
+# ------------------------------
+import_local_ip() {
+ # lädt LOCAL_IP aus /run/local-ip.env oder ermittelt es best effort
+ local IP_FILE="/run/local-ip.env"
+ if [ -f "$IP_FILE" ]; then
+ # shellcheck disable=SC1090
+ . "$IP_FILE"
+ fi
+ if [ -z "${LOCAL_IP:-}" ]; then
+ LOCAL_IP="$(ip route get 1 2>/dev/null | awk '{for(i=1;i<=NF;i++) if($i=="src"){print $(i+1); exit}}')"
+ [ -z "$LOCAL_IP" ] && LOCAL_IP="$(hostname -i 2>/dev/null | awk '{print $1}')"
+ [ -z "$LOCAL_IP" ] && {
+ msg_error "Could not determine LOCAL_IP"
+ return 1
+ }
+ echo "LOCAL_IP=$LOCAL_IP" >"$IP_FILE"
+ fi
+ export LOCAL_IP
+}
diff --git a/misc/build.func b/misc/build.func
index c40a57bd..a9889913 100644
--- a/misc/build.func
+++ b/misc/build.func
@@ -13,7 +13,7 @@ variables() {
DIAGNOSTICS="yes" # sets the DIAGNOSTICS variable to "yes", used for the API call.
METHOD="default" # sets the METHOD variable to "default", used for the API call.
RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" # generates a random UUID and sets it to the RANDOM_UUID variable.
- CT_TYPE=${var_unprivileged:-$CT_TYPE}
+ #CT_TYPE=${var_unprivileged:-$CT_TYPE}
}
source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/api.func)
@@ -214,92 +214,29 @@ ssh_check() {
fi
}
-# select_storage() {
-# local CLASS=$1 CONTENT CONTENT_LABEL
-# case $CLASS in
-# container)
-# CONTENT='rootdir'
-# CONTENT_LABEL='Container'
-# ;;
-# template)
-# CONTENT='vztmpl'
-# CONTENT_LABEL='Template'
-# ;;
-# iso)
-# CONTENT='iso'
-# CONTENT_LABEL='ISO image'
-# ;;
-# images)
-# CONTENT='images'
-# CONTENT_LABEL='VM Disk image'
-# ;;
-# backup)
-# CONTENT='backup'
-# CONTENT_LABEL='Backup'
-# ;;
-# snippets)
-# CONTENT='snippets'
-# CONTENT_LABEL='Snippets'
-# ;;
-# *)
-# msg_error "Invalid storage class '$CLASS'."
-# exit 201
-# ;;
-# esac
+install_ssh_keys_into_ct() {
+ [[ "$SSH" != "yes" ]] && return 0
-# command -v whiptail >/dev/null || {
-# msg_error "whiptail missing."
-# exit 220
-# }
-# command -v numfmt >/dev/null || {
-# msg_error "numfmt missing."
-# exit 221
-# }
+ if [[ -n "$SSH_KEYS_FILE" && -s "$SSH_KEYS_FILE" ]]; then
+ msg_info "Installing selected SSH keys into CT ${CTID}"
+ pct exec "$CTID" -- sh -c 'mkdir -p /root/.ssh && chmod 700 /root/.ssh' || {
+ msg_error "prepare /root/.ssh failed"
+ return 1
+ }
+ pct push "$CTID" "$SSH_KEYS_FILE" /root/.ssh/authorized_keys >/dev/null 2>&1 ||
+ pct exec "$CTID" -- sh -c "cat > /root/.ssh/authorized_keys" <"$SSH_KEYS_FILE" || {
+ msg_error "write authorized_keys failed"
+ return 1
+ }
+ pct exec "$CTID" -- sh -c 'chmod 600 /root/.ssh/authorized_keys' || true
+ msg_ok "Installed SSH keys into CT ${CTID}"
+ return 0
+ fi
-# local -a MENU
-# while read -r line; do
-# local TAG=$(echo "$line" | awk '{print $1}')
-# local TYPE=$(echo "$line" | awk '{printf "%-10s", $2}')
-# local FREE=$(echo "$line" | numfmt --field 4-6 --from-unit=K --to=iec --format %.2f | awk '{printf "%9sB", $6}')
-# MENU+=("$TAG" "Type: $TYPE Free: $FREE" "OFF")
-# done < <(pvesm status -content "$CONTENT" | awk 'NR>1')
-
-# if [ ${#MENU[@]} -eq 0 ]; then
-# msg_error "No storage found for content type '$CONTENT'."
-# exit 203
-# fi
-
-# if [ $((${#MENU[@]} / 3)) -eq 1 ]; then
-# echo "${MENU[0]}"
-# return
-# fi
-
-# local STORAGE
-# STORAGE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "Storage Pools" --radiolist \
-# "Which storage pool for ${CONTENT_LABEL,,}?\n(Spacebar to select)" \
-# 16 70 6 "${MENU[@]}" 3>&1 1>&2 2>&3) || {
-# msg_error "Storage selection cancelled by user."
-# exit 202
-# }
-# echo "$STORAGE"
-# }
-
-# manage_default_storage() {
-# local file="/usr/local/community-scripts/default_storage"
-# mkdir -p /usr/local/community-scripts
-
-# local tmpl=$(select_storage template)
-# local cont=$(select_storage container)
-
-# cat <"$file"
-# TEMPLATE_STORAGE=$tmpl
-# CONTAINER_STORAGE=$cont
-# EOF
-
-# msg_ok "Default Storage set: Template=${BL}$tmpl${CL} ${GN}|${CL} Container=${BL}$cont${CL}"
-# whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
-# --msgbox "Default Storage set:\n\nTemplate: $tmpl\nContainer: $cont" 10 58
-# }
+ # Fallback: nichts ausgewählt
+ msg_warn "No SSH keys to install (skipping)."
+ return 0
+}
base_settings() {
# Default Settings
@@ -368,6 +305,58 @@ exit_script() {
exit
}
+find_host_ssh_keys() {
+ local re='(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))'
+ local -a files=() cand=()
+ local g="${var_ssh_import_glob:-}"
+ local total=0 f base c
+
+ shopt -s nullglob
+ if [[ -n "$g" ]]; then
+ for pat in $g; do cand+=($pat); done
+ else
+ cand+=(/root/.ssh/authorized_keys /root/.ssh/authorized_keys2)
+ cand+=(/root/.ssh/*.pub)
+ cand+=(/etc/ssh/authorized_keys /etc/ssh/authorized_keys.d/*)
+ fi
+ shopt -u nullglob
+
+ for f in "${cand[@]}"; do
+ [[ -f "$f" && -r "$f" ]] || continue
+ base="$(basename -- "$f")"
+ case "$base" in
+ known_hosts | known_hosts.* | config) continue ;;
+ id_*) [[ "$f" != *.pub ]] && continue ;;
+ esac
+
+ # CRLF safe check for host keys
+ c=$(tr -d '\r' <"$f" | awk '
+ /^[[:space:]]*#/ {next}
+ /^[[:space:]]*$/ {next}
+ {print}
+ ' | grep -E -c '"$re"' || true)
+
+ if ((c > 0)); then
+ files+=("$f")
+ total=$((total + c))
+ fi
+ done
+
+ # Fallback to /root/.ssh/authorized_keys
+ if ((${#files[@]} == 0)) && [[ -r /root/.ssh/authorized_keys ]]; then
+ if grep -E -q "$re" /root/.ssh/authorized_keys; then
+ files+=(/root/.ssh/authorized_keys)
+ total=$((total + $(grep -E -c "$re" /root/.ssh/authorized_keys || echo 0)))
+ fi
+ fi
+
+ FOUND_HOST_KEY_COUNT="$total"
+ (
+ IFS=:
+ echo "${files[*]}"
+ )
+}
+
# This function allows the user to configure advanced settings for the script.
advanced_settings() {
whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox --title "Here is an instructional tip:" "To make a selection, use the Spacebar." 8 58
@@ -792,24 +781,92 @@ advanced_settings() {
exit_script
fi
- SSH_AUTHORIZED_KEY="$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --inputbox "SSH Authorized key for root (leave empty for none)" 8 58 --title "SSH Key" 3>&1 1>&2 2>&3)"
+ # --- SSH key provisioning (one dialog) ---
+ SSH_KEYS_FILE="$(mktemp)"
+ : >"$SSH_KEYS_FILE"
- if [[ -z "${SSH_AUTHORIZED_KEY}" ]]; then
- SSH_AUTHORIZED_KEY=""
+ IFS=$'\0' read -r -d '' -a _def_files < <(ssh_discover_default_files && printf '\0')
+ ssh_build_choices_from_files "${_def_files[@]}"
+ DEF_KEYS_COUNT="$COUNT"
+
+ if [[ "$DEF_KEYS_COUNT" -gt 0 ]]; then
+ SSH_KEY_MODE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SSH KEY SOURCE" --menu \
+ "Provision SSH keys for root:" 14 72 4 \
+ "found" "Select from detected keys (${DEF_KEYS_COUNT})" \
+ "manual" "Paste a single public key" \
+ "folder" "Scan another folder (path or glob)" \
+ "none" "No keys" 3>&1 1>&2 2>&3) || exit_script
+ else
+ SSH_KEY_MODE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SSH KEY SOURCE" --menu \
+ "No host keys detected; choose manual/none:" 12 72 2 \
+ "manual" "Paste a single public key" \
+ "none" "No keys" 3>&1 1>&2 2>&3) || exit_script
fi
- if [[ "$PW" == -password* || -n "$SSH_AUTHORIZED_KEY" ]]; then
- if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "SSH ACCESS" --yesno "Enable Root SSH Access?" 10 58); then
+ case "$SSH_KEY_MODE" in
+ found)
+ SEL=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SELECT HOST KEYS" \
+ --checklist "Select one or more keys to import:" 20 20 10 "${CHOICES[@]}" 3>&1 1>&2 2>&3) || exit_script
+ for tag in $SEL; do
+ tag="${tag%\"}"
+ tag="${tag#\"}"
+ line=$(grep -E "^${tag}\|" "$MAPFILE" | head -n1 | cut -d'|' -f2-)
+ [[ -n "$line" ]] && printf '%s\n' "$line" >>"$SSH_KEYS_FILE"
+ done
+ ;;
+ manual)
+ SSH_AUTHORIZED_KEY="$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --inputbox "Paste one SSH public key line (ssh-ed25519/ssh-rsa/...)" 10 72 --title "SSH Public Key" 3>&1 1>&2 2>&3)"
+ [[ -n "$SSH_AUTHORIZED_KEY" ]] && printf '%s\n' "$SSH_AUTHORIZED_KEY" >>"$SSH_KEYS_FILE"
+ ;;
+ folder)
+ GLOB_PATH="$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --inputbox "Enter a folder or glob to scan (e.g. /root/.ssh/*.pub)" 10 72 --title "Scan Folder/Glob" 3>&1 1>&2 2>&3)"
+ if [[ -n "$GLOB_PATH" ]]; then
+ shopt -s nullglob
+ read -r -a _scan_files <<<"$GLOB_PATH"
+ shopt -u nullglob
+ if [[ "${#_scan_files[@]}" -gt 0 ]]; then
+ ssh_build_choices_from_files "${_scan_files[@]}"
+ if [[ "$COUNT" -gt 0 ]]; then
+ SEL=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "SELECT FOLDER KEYS" \
+ --checklist "Select key(s) to import:" 20 78 10 "${CHOICES[@]}" 3>&1 1>&2 2>&3) || exit_script
+ for tag in $SEL; do
+ tag="${tag%\"}"
+ tag="${tag#\"}"
+ line=$(grep -E "^${tag}\|" "$MAPFILE" | head -n1 | cut -d'|' -f2-)
+ [[ -n "$line" ]] && printf '%s\n' "$line" >>"$SSH_KEYS_FILE"
+ done
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox "No keys found in: $GLOB_PATH" 8 60
+ fi
+ else
+ whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --msgbox "Path/glob returned no files." 8 60
+ fi
+ fi
+ ;;
+ none) : ;;
+ esac
+
+ # Dedupe + clean EOF
+ if [[ -s "$SSH_KEYS_FILE" ]]; then
+ sort -u -o "$SSH_KEYS_FILE" "$SSH_KEYS_FILE"
+ printf '\n' >>"$SSH_KEYS_FILE"
+ fi
+
+ # SSH activate, if keys found or password set
+ if [[ -s "$SSH_KEYS_FILE" || "$PW" == -password* ]]; then
+ if (whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --defaultno --title "SSH ACCESS" --yesno "Enable root SSH access?" 10 58); then
SSH="yes"
else
SSH="no"
fi
- echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
else
SSH="no"
- echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
fi
+ echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
+ export SSH_KEYS_FILE
if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "FUSE Support" --yesno "Enable FUSE support?\nRequired for tools like rclone, mergerfs, AppImage, etc." 10 58); then
ENABLE_FUSE="yes"
else
@@ -902,6 +959,393 @@ EOF
}
+# ------------------------------------------------------------------------------
+# default_var_settings
+#
+# - Ensures /usr/local/community-scripts/default.vars exists (creates if missing)
+# - Loads var_* values from default.vars (safe parser, no source/eval)
+# - Precedence: ENV var_* > default.vars > built-in defaults
+# - Maps var_verbose → VERBOSE
+# - Calls base_settings "$VERBOSE" and echo_default
+# ------------------------------------------------------------------------------
+default_var_settings() {
+ # Allowed var_* keys (alphabetically sorted)
+ local VAR_WHITELIST=(
+ var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_ctid var_disk var_fuse
+ var_gateway var_hostname var_ipv6_method var_ipv6_static var_mac var_mtu
+ var_net var_ns var_pw var_ram var_tags var_tun var_unprivileged
+ var_verbose var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage
+ )
+
+ # Snapshot: environment variables (highest precedence)
+ declare -A _HARD_ENV=()
+ local _k
+ for _k in "${VAR_WHITELIST[@]}"; do
+ if printenv "$_k" >/dev/null 2>&1; then _HARD_ENV["$_k"]=1; fi
+ done
+
+ # Find default.vars (first valid path wins)
+ local _find_default_vars
+ _find_default_vars() {
+ local f
+ for f in \
+ /usr/local/community-scripts/default.vars \
+ "$HOME/.config/community-scripts/default.vars" \
+ "./default.vars"; do
+ [ -f "$f" ] && {
+ echo "$f"
+ return 0
+ }
+ done
+ return 1
+ }
+
+ # Ensure default.vars exists, create with sane defaults if missing
+ local _ensure_default_vars
+ _ensure_default_vars() {
+ _find_default_vars >/dev/null 2>&1 && return 0
+ local canonical="/usr/local/community-scripts/default.vars"
+ msg_info "No default.vars found. Creating ${canonical}"
+ mkdir -p /usr/local/community-scripts
+ cat >"$canonical" <<'EOF'
+# Community-Scripts defaults (var_* only). Lines starting with # are comments.
+# Precedence: ENV var_* > default.vars > built-ins.
+# Keep keys alphabetically sorted.
+
+# Container type
+var_unprivileged=1
+
+# Storage
+# Example: "local", "docker", ...
+# var_template_storage=local
+# var_container_storage=docker
+
+# Resources
+var_cpu=1
+var_disk=4
+var_ram=1024
+
+# Network
+var_brg=vmbr0
+var_net=dhcp
+var_ipv6_method=none
+# var_gateway=
+# var_ipv6_static=
+# var_vlan=
+# var_mtu=
+# var_mac=
+# var_ns=
+
+# SSH
+var_ssh=no
+# var_ssh_authorized_key=
+
+# APT cacher (optional)
+# var_apt_cacher=yes
+# var_apt_cacher_ip=192.168.1.10
+
+# Features/Tags/verbosity
+var_fuse=no
+var_tun=no
+var_tags=community-script
+var_verbose=no
+
+# Security (root PW) – empty => autologin
+# var_pw=
+
+# Optional fixed CTID/hostname – empty => auto
+# var_ctid=
+# var_hostname=
+EOF
+ chmod 0644 "$canonical"
+ msg_ok "Created ${canonical}"
+ }
+
+ # Whitelist check
+ local _is_whitelisted_key
+ _is_whitelisted_key() {
+ local k="$1"
+ local w
+ for w in "${VAR_WHITELIST[@]}"; do [ "$k" = "$w" ] && return 0; done
+ return 1
+ }
+
+ # Safe parser for KEY=VALUE lines
+ local _load_vars_file
+ _load_vars_file() {
+ local file="$1"
+ [ -f "$file" ] || return 0
+ msg_info "Loading defaults from ${file}"
+ local line key val
+ while IFS= read -r line || [ -n "$line" ]; do
+ line="${line#"${line%%[![:space:]]*}"}"
+ line="${line%"${line##*[![:space:]]}"}"
+ [[ -z "$line" || "$line" == \#* ]] && continue
+ if [[ "$line" =~ ^([A-Za-z_][A-Za-z0-9_]*)=(.*)$ ]]; then
+ local var_key="${BASH_REMATCH[1]}"
+ local var_val="${BASH_REMATCH[2]}"
+
+ [[ "$var_key" != var_* ]] && continue
+ _is_whitelisted_key "$var_key" || {
+ msg_debug "Ignore non-whitelisted ${var_key}"
+ continue
+ }
+
+ # Strip quotes
+ if [[ "$var_val" =~ ^\"(.*)\"$ ]]; then
+ var_val="${BASH_REMATCH[1]}"
+ elif [[ "$var_val" =~ ^\'(.*)\'$ ]]; then
+ var_val="${BASH_REMATCH[1]}"
+ fi
+
+ # Unsafe check without regex (formatter-friendly)
+ local _unsafe=""
+ case "$var_val" in
+ *'$('*) _unsafe=1 ;;
+ *'`'*) _unsafe=1 ;;
+ *';'*) _unsafe=1 ;;
+ *'&'*) _unsafe=1 ;;
+ *'<('*) _unsafe=1 ;;
+ esac
+ if [[ -n "$_unsafe" ]]; then
+ msg_warn "Ignoring ${var_key} from ${file}: unsafe characters"
+ continue
+ fi
+
+ # Hard env wins
+ if [[ -n "${_HARD_ENV[$var_key]:-}" ]]; then
+ continue
+ fi
+
+ # Set only if not already exported
+ if [[ -z "${!var_key+x}" ]]; then
+ export "${var_key}=${var_val}"
+ fi
+
+ else
+ msg_warn "Malformed line in ${file}: ${line}"
+ fi
+
+ done <"$file"
+ msg_ok "Loaded ${file}"
+ }
+
+ # 1) Ensure file exists
+ _ensure_default_vars
+
+ # 2) Load file
+ local dv
+ dv="$(_find_default_vars)" || {
+ msg_error "default.vars not found after ensure step"
+ return 1
+ }
+ _load_vars_file "$dv"
+
+ # 3) Map var_verbose → VERBOSE
+ if [[ -n "${var_verbose:-}" ]]; then
+ case "${var_verbose,,}" in
+ 1 | yes | true | on) VERBOSE="yes" ;;
+ 0 | no | false | off) VERBOSE="no" ;;
+ *) VERBOSE="${var_verbose}" ;;
+ esac
+ else
+ VERBOSE="no"
+ fi
+
+ # 4) Apply base settings and show summary
+ METHOD="mydefaults-global"
+ base_settings "$VERBOSE"
+ header_info
+ echo -e "${DEFAULT}${BOLD}${BL}Using My Defaults (default.vars) on node $PVEHOST_NAME${CL}"
+ echo_default
+}
+
+get_app_defaults_path() {
+ local n="${NSAPP:-${APP,,}}"
+ echo "/usr/local/community-scripts/defaults/${n}.vars"
+}
+
+# ------------------------------------------------------------------------------
+# maybe_offer_save_app_defaults
+#
+# - Called after advanced_settings returned with fully chosen values.
+# - If no .vars exists, offers to persist current advanced settings
+# into /usr/local/community-scripts/defaults/.vars
+# - Only writes whitelisted var_* keys.
+# - Extracts raw values from flags like ",gw=..." ",mtu=..." etc.
+# ------------------------------------------------------------------------------
+maybe_offer_save_app_defaults() {
+ local app_vars_path
+ app_vars_path="$(get_app_defaults_path)"
+
+ # Only offer if file does not exist yet
+ if [ -f "$app_vars_path" ]; then
+ return 0
+ fi
+
+ # Ask user (English prompt as requested)
+ if ! whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
+ --yesno "Save these advanced settings as defaults for ${APP}?\n\nThis will create:\n${app_vars_path}" 12 72; then
+ return 0
+ fi
+
+ # Ensure directory exists
+ mkdir -p "$(dirname "$app_vars_path")"
+
+ # Normalizers (extract raw values from flags used during building)
+ local _val
+
+ # NET/GATE: NET is either 'dhcp' or a CIDR; GATE holds ',gw=IP' or ''
+ local _net="${NET:-}"
+ local _gate=""
+ if [[ "${GATE:-}" =~ ^,gw= ]]; then
+ _gate="${GATE#,gw=}"
+ fi
+
+ # IPv6: method + optional static + optional gateway
+ local _ipv6_method="${IPV6_METHOD:-auto}"
+ local _ipv6_static=""
+ local _ipv6_gateway=""
+ case "$_ipv6_method" in
+ static)
+ _ipv6_static="${IPV6_ADDR:-}"
+ _ipv6_gateway="${IPV6_GATE:-}"
+ ;;
+ esac
+
+ # MTU: MTU looks like ',mtu=1500' or ''
+ local _mtu=""
+ if [[ "${MTU:-}" =~ ^,mtu= ]]; then
+ _mtu="${MTU#,mtu=}"
+ fi
+
+ # VLAN: ',tag=NN' or ''
+ local _vlan=""
+ if [[ "${VLAN:-}" =~ ^,tag= ]]; then
+ _vlan="${VLAN#,tag=}"
+ fi
+
+ # MAC: ',hwaddr=XX:XX:...' or ''
+ local _mac=""
+ if [[ "${MAC:-}" =~ ^,hwaddr= ]]; then
+ _mac="${MAC#,hwaddr=}"
+ fi
+
+ # DNS nameserver: NS is like '-nameserver=IP' or ''
+ local _ns=""
+ if [[ "${NS:-}" =~ ^-nameserver= ]]; then
+ _ns="${NS#-nameserver=}"
+ fi
+
+ # Search domain: SD is like '-searchdomain=foo' or ''
+ local _searchdomain=""
+ if [[ "${SD:-}" =~ ^-searchdomain= ]]; then
+ _searchdomain="${SD#-searchdomain=}"
+ fi
+
+ # Authorized key: raw string already
+ local _ssh_auth="${SSH_AUTHORIZED_KEY:-}"
+
+ # SSH enabled: "yes"/"no"
+ local _ssh="${SSH:-no}"
+
+ # APT cacher
+ local _apt_cacher="${APT_CACHER:-}"
+ local _apt_cacher_ip="${APT_CACHER_IP:-}"
+
+ # Features
+ local _fuse="${ENABLE_FUSE:-no}"
+ local _tun="${ENABLE_TUN:-no}"
+
+ # Tags: TAGS may include 'community-script;' etc. Keep as-is unless empty
+ local _tags="${TAGS:-}"
+
+ # Unprivileged container type: CT_TYPE is "1" (unpriv) or "0" (priv)
+ local _unpriv="${CT_TYPE:-1}"
+
+ # Resources and names
+ local _cpu="${CORE_COUNT:-1}"
+ local _ram="${RAM_SIZE:-1024}"
+ local _disk="${DISK_SIZE:-4}"
+ local _hostname="${HN:-$NSAPP}"
+
+ # Verbose
+ local _verbose="${VERBOSE:-no}"
+
+ # Optional storages if already known in this phase
+ local _tpl_storage="${TEMPLATE_STORAGE:-}"
+ local _ct_storage="${CONTAINER_STORAGE:-}"
+
+ # Sanitize function for values (basic safety for config file)
+ _sanitize_value() {
+ local s="$1"
+ # Disallow backticks, $(), <(), ;, &
+ case "$s" in
+ *'$('* | *'`'* | *';'* | *'&'* | *'<('*)
+ echo ""
+ ;;
+ *)
+ echo "$s"
+ ;;
+ esac
+ }
+
+ # Build the file content
+ {
+ echo "# App-specific defaults for ${APP} (${NSAPP})"
+ echo "# Generated on $(date -u '+%Y-%m-%dT%H:%M:%SZ')"
+ echo "# Only var_* keys are read by the loader."
+ echo
+
+ # Container type
+ echo "var_unprivileged=$(_sanitize_value "$_unpriv")"
+
+ # Resources
+ echo "var_cpu=$(_sanitize_value "$_cpu")"
+ echo "var_ram=$(_sanitize_value "$_ram")"
+ echo "var_disk=$(_sanitize_value "$_disk")"
+
+ # Network
+ [ -n "$BRG" ] && echo "var_brg=$(_sanitize_value "$BRG")"
+ [ -n "$_net" ] && echo "var_net=$(_sanitize_value "$_net")"
+ [ -n "$_gate" ] && echo "var_gateway=$(_sanitize_value "$_gate")"
+ [ -n "$_mtu" ] && echo "var_mtu=$(_sanitize_value "$_mtu")"
+ [ -n "$_vlan" ] && echo "var_vlan=$(_sanitize_value "$_vlan")"
+ [ -n "$_mac" ] && echo "var_mac=$(_sanitize_value "$_mac")"
+ [ -n "$_ns" ] && echo "var_ns=$(_sanitize_value "$_ns")"
+
+ # IPv6
+ [ -n "$_ipv6_method" ] && echo "var_ipv6_method=$(_sanitize_value "$_ipv6_method")"
+ [ -n "$_ipv6_static" ] && echo "var_ipv6_static=$(_sanitize_value "$_ipv6_static")"
+ # Note: we do not persist a dedicated var for IPv6 gateway; can be derived if needed
+
+ # SSH
+ [ -n "$_ssh" ] && echo "var_ssh=$(_sanitize_value "$_ssh")"
+ [ -n "$_ssh_auth" ] && echo "var_ssh_authorized_key=$(_sanitize_value "$_ssh_auth")"
+
+ # APT cacher
+ [ -n "$_apt_cacher" ] && echo "var_apt_cacher=$(_sanitize_value "$_apt_cacher")"
+ [ -n "$_apt_cacher_ip" ] && echo "var_apt_cacher_ip=$(_sanitize_value "$_apt_cacher_ip")"
+
+ # Features / tags / verbosity
+ [ -n "$_fuse" ] && echo "var_fuse=$(_sanitize_value "$_fuse")"
+ [ -n "$_tun" ] && echo "var_tun=$(_sanitize_value "$_tun")"
+ [ -n "$_tags" ] && echo "var_tags=$(_sanitize_value "$_tags")"
+ [ -n "$_verbose" ] && echo "var_verbose=$(_sanitize_value "$_verbose")"
+
+ # Identity (optional)
+ [ -n "$_hostname" ] && echo "var_hostname=$(_sanitize_value "$_hostname")"
+ [ -n "$_searchdomain" ] && echo "var_searchdomain=$(_sanitize_value "$_searchdomain")"
+
+ # Storage (optional, if known at this stage)
+ [ -n "$_tpl_storage" ] && echo "var_template_storage=$(_sanitize_value "$_tpl_storage")"
+ [ -n "$_ct_storage" ] && echo "var_container_storage=$(_sanitize_value "$_ct_storage")"
+ } >"$app_vars_path"
+
+ chmod 0644 "$app_vars_path"
+ msg_ok "Saved app defaults: ${app_vars_path}"
+}
+
install_script() {
pve_check
shell_check
@@ -931,23 +1375,27 @@ install_script() {
ADVANCED | advanced | 3)
CHOICE="3"
;;
+ DEFAULT_VARS | default_vars | 4)
+ CHOICE="4"
+ ;;
*)
echo -e "\n${CROSS}${RD}Invalid PRESET value: ${PRESET}${CL}\n"
exit 1
;;
esac
else
+ #"4" "Use Config File" \
+ #"5" "Manage Default Storage" \
while true; do
TMP_CHOICE=$(whiptail --backtitle "[dev] Proxmox VE Helper Scripts" \
--title "SETTINGS" \
- --menu "Choose an option:" 20 60 7 \
+ --menu "Choose an option:" 20 60 6 \
"1" "Default Settings" \
"2" "Default Settings (with verbose)" \
"3" "Advanced Settings" \
- "4" "Use Config File" \
- "5" "Manage Default Storage" \
- "6" "Diagnostic Settings" \
- "7" "Exit" \
+ "4" "My Default Vars" \
+ "5" "Diagnostic Settings" \
+ "6" "Exit" \
--default-item "1" 3>&1 1>&2 2>&3) || true
if [ -z "$TMP_CHOICE" ]; then
@@ -983,18 +1431,23 @@ install_script() {
METHOD="advanced"
base_settings
advanced_settings
+ maybe_offer_save_app_defaults
;;
+ # 4)
+ # header_info
+ # echo -e "${INFO}${HOLD} ${GN}Using Config File on node $PVEHOST_NAME${CL}"
+ # METHOD="advanced"
+ # source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/config-file.func)
+ # config_file
+ # ;;
4)
- header_info
- echo -e "${INFO}${HOLD} ${GN}Using Config File on node $PVEHOST_NAME${CL}"
- METHOD="advanced"
- source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/config-file.func)
- config_file
+ # My Defaults (default.vars)
+ default_var_settings || {
+ msg_error "Failed to apply default.vars"
+ exit 1
+ }
;;
5)
- manage_default_storage
- ;;
- 6)
if [[ $DIAGNOSTICS == "yes" ]]; then
if whiptail --backtitle "[dev] Proxmox VE Helper Scripts" --title "DIAGNOSTICS SETTINGS" --yesno "Send Diagnostics of LXC Installation?\n\nCurrent setting: ${DIAGNOSTICS}" 10 58 \
--yes-button "No" --no-button "Back"; then
@@ -1011,7 +1464,7 @@ install_script() {
fi
fi
;;
- 7)
+ 6)
echo -e "\n${CROSS}${RD}Script terminated. Have a great day!${CL}\n"
exit 0
;;
@@ -1060,6 +1513,76 @@ check_container_storage() {
fi
}
+ssh_extract_keys_from_file() {
+ local f="$1"
+ [[ -r "$f" ]] || return 0
+ tr -d '\r' <"$f" | awk '
+ /^[[:space:]]*#/ {next}
+ /^[[:space:]]*$/ {next}
+ # nackt: typ base64 [comment]
+ /^(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))[[:space:]]+/ {print; next}
+ # mit Optionen: finde ab erstem Key-Typ
+ {
+ match($0, /(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))[[:space:]]+/)
+ if (RSTART>0) { print substr($0, RSTART) }
+ }
+ '
+}
+
+ssh_build_choices_from_files() {
+ local -a files=("$@")
+ CHOICES=()
+ COUNT=0
+ MAPFILE="$(mktemp)"
+ local id key typ fp cmt base ln=0
+
+ for f in "${files[@]}"; do
+ [[ -f "$f" && -r "$f" ]] || continue
+ base="$(basename -- "$f")"
+ case "$base" in
+ known_hosts | known_hosts.* | config) continue ;;
+ id_*) [[ "$f" != *.pub ]] && continue ;;
+ esac
+
+ # jede Key-Zeile mappen -> K|
+ while IFS= read -r key; do
+ [[ -n "$key" ]] || continue
+
+ # Fingerprint/Type/Comment hübsch machen (best effort)
+ typ=""
+ fp=""
+ cmt=""
+ # Nur der pure Key-Teil (ohne Optionen) ist schon in 'key' enthalten
+ read -r _typ _b64 _cmt <<<"$key"
+ typ="${_typ:-key}"
+ cmt="${_cmt:-}"
+ # Fingerprint via ssh-keygen (falls verfügbar)
+ if command -v ssh-keygen >/dev/null 2>&1; then
+ fp="$(printf '%s\n' "$key" | ssh-keygen -lf - 2>/dev/null | awk '{print $2}')"
+ fi
+ # Label kürzen
+ [[ ${#cmt} -gt 40 ]] && cmt="${cmt:0:37}..."
+
+ ln=$((ln + 1))
+ COUNT=$((COUNT + 1))
+ id="K${COUNT}"
+ echo "${id}|${key}" >>"$MAPFILE"
+ CHOICES+=("$id" "[$typ] ${fp:+$fp }${cmt:+$cmt }— ${base}" "OFF")
+ done < <(ssh_extract_keys_from_file "$f")
+ done
+}
+
+# Sucht Standard-Quellen (authorized_keys, *.pub, /etc/ssh/authorized_keys.d/*)
+ssh_discover_default_files() {
+ local -a cand=()
+ shopt -s nullglob
+ cand+=(/root/.ssh/authorized_keys /root/.ssh/authorized_keys2)
+ cand+=(/root/.ssh/*.pub)
+ cand+=(/etc/ssh/authorized_keys /etc/ssh/authorized_keys.d/*)
+ shopt -u nullglob
+ printf '%s\0' "${cand[@]}"
+}
+
start() {
source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/tools.func)
if command -v pveversion >/dev/null 2>&1; then
@@ -1163,6 +1686,8 @@ build_container() {
-unprivileged $CT_TYPE
$PW
"
+ export TEMPLATE_STORAGE="${var_template_storage:-}"
+ export CONTAINER_STORAGE="${var_container_storage:-}"
bash -c "$(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/create_lxc.sh)" || exit
if [ $? -ne 0 ]; then
exit 200
@@ -1190,7 +1715,7 @@ EOF
VAAPI_APPS=(
"immich" "Channels" "Emby" "ErsatzTV" "Frigate" "Jellyfin"
"Plex" "Scrypted" "Tdarr" "Unmanic" "Ollama" "FileFlows"
- "Open WebUI" "Debian"
+ "Open WebUI" "Debian" "Tunarr"
)
is_vaapi_app=false
@@ -1219,7 +1744,7 @@ EOF
if [[ "${#combo_devices[@]}" -eq 1 && -e /dev/dri/card0 ]]; then
combo_devices+=("/dev/dri/card0")
fi
- echo "combo_devices=${combo_devices[*]}"
+ #echo "combo_devices=${combo_devices[*]}"
pci_addr=$(basename "$bypath" | cut -d- -f1 --complement | sed 's/-render//' || true)
pci_info=$(lspci -nn | grep "${pci_addr#0000:}" || true)
@@ -1363,6 +1888,7 @@ EOF
if [ "$var_os" != "alpine" ]; then
msg_info "Waiting for network in LXC container"
+ sleep 2
for i in {1..10}; do
# 1. Primary check: ICMP ping (fastest, but may be blocked by ISP/firewall)
if pct exec "$CTID" -- ping -c1 -W1 deb.debian.org >/dev/null 2>&1; then
@@ -1371,8 +1897,12 @@ EOF
fi
# Wait and retry if not reachable yet
if [ "$i" -lt 10 ]; then
- msg_warn "No network in LXC yet (try $i/10) – waiting..."
- sleep 3
+ if [ "$i" -le 3 ]; then
+ sleep 2
+ else
+ msg_warn "No network in LXC yet (try $i/10) – waiting..."
+ sleep 3
+ fi
else
# After 10 unsuccessful ping attempts, try HTTP connectivity via wget as fallback
msg_warn "Ping failed 10 times. Trying HTTP connectivity check (wget) as fallback..."
@@ -1437,7 +1967,7 @@ EOF'
}
fi
msg_ok "Customized LXC Container"
-
+ install_ssh_keys_into_ct
lxc-attach -n "$CTID" -- bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/install/${var_install}.sh)"
}
diff --git a/misc/create_lxc.sh b/misc/create_lxc.sh
index 03de6aaa..ea53362e 100644
--- a/misc/create_lxc.sh
+++ b/misc/create_lxc.sh
@@ -6,7 +6,7 @@
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# This sets verbose mode if the global variable is set to "yes"
-# if [ "$VERBOSE" == "yes" ]; then set -x; fi
+if [ "$CREATE_LXC_VERBOSE" == "yes" ]; then set -x; fi
if command -v curl >/dev/null 2>&1; then
source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/core.func)
@@ -58,14 +58,64 @@ function exit_script() {
exit 1
}
+# Resolve and validate a preselected storage for a given class.
+# class: "template" -> requires content=vztmpl
+# "container" -> requires content=rootdir
+resolve_storage_preselect() {
+ local class="$1"
+ local preselect="$2"
+ local required_content=""
+ case "$class" in
+ template) required_content="vztmpl" ;;
+ container) required_content="rootdir" ;;
+ *) return 1 ;;
+ esac
+
+ # No preselect provided
+ [ -z "$preselect" ] && return 1
+
+ # Check storage exists and supports required content
+ if ! pvesm status -content "$required_content" | awk 'NR>1{print $1}' | grep -qx -- "$preselect"; then
+ msg_warn "Preselected storage '${preselect}' does not support content '${required_content}' (or not found)"
+ return 1
+ fi
+
+ # Build human-readable info string from pvesm status
+ # Expected columns: Name Type Status Total Used Free ...
+ local line total used free
+ line="$(pvesm status | awk -v s="$preselect" 'NR>1 && $1==s {print $0}')"
+ if [ -z "$line" ]; then
+ STORAGE_INFO="n/a"
+ else
+ total="$(echo "$line" | awk '{print $4}')"
+ used="$(echo "$line" | awk '{print $5}')"
+ free="$(echo "$line" | awk '{print $6}')"
+ # Format bytes to IEC
+ local total_h used_h free_h
+ if command -v numfmt >/dev/null 2>&1; then
+ total_h="$(numfmt --to=iec --suffix=B --format %.1f "$total" 2>/dev/null || echo "$total")"
+ used_h="$(numfmt --to=iec --suffix=B --format %.1f "$used" 2>/dev/null || echo "$used")"
+ free_h="$(numfmt --to=iec --suffix=B --format %.1f "$free" 2>/dev/null || echo "$free")"
+ STORAGE_INFO="Free: ${free_h} Used: ${used_h}"
+ else
+ STORAGE_INFO="Free: ${free} Used: ${used}"
+ fi
+ fi
+
+ # Set outputs expected by your callers
+ STORAGE_RESULT="$preselect"
+ return 0
+}
+
function check_storage_support() {
local CONTENT="$1"
local -a VALID_STORAGES=()
while IFS= read -r line; do
- local STORAGE=$(awk '{print $1}' <<<"$line")
- [[ "$STORAGE" == "storage" || -z "$STORAGE" ]] && continue
- VALID_STORAGES+=("$STORAGE")
+ local STORAGE_NAME
+ STORAGE_NAME=$(awk '{print $1}' <<<"$line")
+ [[ -z "$STORAGE_NAME" ]] && continue
+ VALID_STORAGES+=("$STORAGE_NAME")
done < <(pvesm status -content "$CONTENT" 2>/dev/null | awk 'NR>1')
[[ ${#VALID_STORAGES[@]} -gt 0 ]]
@@ -124,11 +174,12 @@ function select_storage() {
while read -r TAG TYPE _ TOTAL USED FREE _; do
[[ -n "$TAG" && -n "$TYPE" ]] || continue
- local DISPLAY="${TAG} (${TYPE})"
+ local STORAGE_NAME="$TAG"
+ local DISPLAY="${STORAGE_NAME} (${TYPE})"
local USED_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$USED")
local FREE_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$FREE")
local INFO="Free: ${FREE_FMT}B Used: ${USED_FMT}B"
- STORAGE_MAP["$DISPLAY"]="$TAG"
+ STORAGE_MAP["$DISPLAY"]="$STORAGE_NAME"
MENU+=("$DISPLAY" "$INFO" "OFF")
((${#DISPLAY} > COL_WIDTH)) && COL_WIDTH=${#DISPLAY}
done < <(pvesm status -content "$CONTENT" | awk 'NR>1')
@@ -203,39 +254,71 @@ if qm status "$CTID" &>/dev/null || pct status "$CTID" &>/dev/null; then
fi
# This checks for the presence of valid Container Storage and Template Storage locations
-msg_info "Validating Storage"
if ! check_storage_support "rootdir"; then
- msg_error "No valid storage found for 'rootdir' (Container)."
- msg_debug "check_storage_support('rootdir') → success"
+ msg_error "No valid storage found for 'rootdir' [Container]"
exit 1
fi
if ! check_storage_support "vztmpl"; then
- msg_error "No valid storage found for 'vztmpl' (Template)."
- msg_debug "check_storage_support('vztmpl') → success"
+ msg_error "No valid storage found for 'vztmpl' [Template]"
exit 1
fi
-msg_ok "Valid Storage Found"
-while true; do
- if select_storage template; then
- TEMPLATE_STORAGE="$STORAGE_RESULT"
- TEMPLATE_STORAGE_INFO="$STORAGE_INFO"
- msg_debug "TEMPLATE_STORAGE=$TEMPLATE_STORAGE"
- msg_debug "TEMPLATE_STORAGE_INFO=$TEMPLATE_STORAGE_INFO"
- break
- fi
-done
+# Template storage selection
+if resolve_storage_preselect template "${TEMPLATE_STORAGE}"; then
+ TEMPLATE_STORAGE="$STORAGE_RESULT"
+ TEMPLATE_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${TEMPLATE_STORAGE}${CL} (${TEMPLATE_STORAGE_INFO}) [Template]"
+else
+ while true; do
+ if select_storage template; then
+ TEMPLATE_STORAGE="$STORAGE_RESULT"
+ TEMPLATE_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${TEMPLATE_STORAGE}${CL} (${TEMPLATE_STORAGE_INFO}) [Template]"
+ break
+ fi
+ done
+fi
-while true; do
- if select_storage container; then
- CONTAINER_STORAGE="$STORAGE_RESULT"
- CONTAINER_STORAGE_INFO="$STORAGE_INFO"
- msg_debug "CONTAINER_STORAGE=$CONTAINER_STORAGE"
- msg_debug "CONTAINER_STORAGE_INFO=$CONTAINER_STORAGE_INFO"
- break
- fi
-done
-msg_ok "Validated Storage | Container: ${BL}$CONTAINER_STORAGE${CL} ($CONTAINER_STORAGE_INFO)"
+# Container storage selection
+if resolve_storage_preselect container "${CONTAINER_STORAGE}"; then
+ CONTAINER_STORAGE="$STORAGE_RESULT"
+ CONTAINER_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${CONTAINER_STORAGE}${CL} (${CONTAINER_STORAGE_INFO}) [Container]"
+else
+ while true; do
+ if select_storage container; then
+ CONTAINER_STORAGE="$STORAGE_RESULT"
+ CONTAINER_STORAGE_INFO="$STORAGE_INFO"
+ msg_ok "Storage ${BL}${CONTAINER_STORAGE}${CL} (${CONTAINER_STORAGE_INFO}) [Container]"
+ break
+ fi
+ done
+fi
+
+# Storage Content Validation
+msg_info "Validating content types of storage '$CONTAINER_STORAGE'"
+STORAGE_CONTENT=$(grep -A4 -E "^(zfspool|dir|lvmthin|lvm): $CONTAINER_STORAGE" /etc/pve/storage.cfg | grep content | awk '{$1=""; print $0}' | xargs)
+
+msg_debug "Storage '$CONTAINER_STORAGE' has content types: $STORAGE_CONTENT"
+
+# check if rootdir supported
+if ! grep -qw "rootdir" <<<"$STORAGE_CONTENT"; then
+ msg_error "Storage '$CONTAINER_STORAGE' does not support 'rootdir'. Cannot create LXC."
+ exit 217
+fi
+msg_ok "Storage '$CONTAINER_STORAGE' supports 'rootdir'"
+
+# check if template storage is compatible
+msg_info "Validating content types of template storage '$TEMPLATE_STORAGE'"
+TEMPLATE_CONTENT=$(grep -A4 -E "^[^:]+: $TEMPLATE_STORAGE" /etc/pve/storage.cfg | grep content | awk '{$1=""; print $0}' | xargs)
+
+msg_debug "Template storage '$TEMPLATE_STORAGE' has content types: $TEMPLATE_CONTENT"
+
+if ! grep -qw "vztmpl" <<<"$TEMPLATE_CONTENT"; then
+ msg_warn "Template storage '$TEMPLATE_STORAGE' does not declare 'vztmpl'. This may cause pct create to fail."
+else
+ msg_ok "Template storage '$TEMPLATE_STORAGE' supports 'vztmpl'"
+fi
# Check free space on selected container storage
STORAGE_FREE=$(pvesm status | awk -v s="$CONTAINER_STORAGE" '$1 == s { print $6 }')
@@ -244,11 +327,11 @@ if [ "$STORAGE_FREE" -lt "$REQUIRED_KB" ]; then
msg_error "Not enough space on '$CONTAINER_STORAGE'. Needed: ${PCT_DISK_SIZE:-8}G."
exit 214
fi
+
# Check Cluster Quorum if in Cluster
if [ -f /etc/pve/corosync.conf ]; then
- msg_info "Checking Proxmox cluster quorum status"
+ msg_info "Checking cluster quorum"
if ! pvecm status | awk -F':' '/^Quorate/ { exit ($2 ~ /Yes/) ? 0 : 1 }'; then
- printf "\e[?25h"
msg_error "Cluster is not quorate. Start all nodes or configure quorum device (QDevice)."
exit 210
fi
@@ -257,140 +340,213 @@ fi
# Update LXC template list
TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}"
+case "$PCT_OSTYPE" in
+debian | ubuntu) TEMPLATE_PATTERN="-standard_" ;;
+alpine | fedora | rocky | centos) TEMPLATE_PATTERN="-default_" ;;
+*) TEMPLATE_PATTERN="" ;;
+esac
-msg_info "Updating LXC Template List"
-if ! pveam update >/dev/null 2>&1; then
- TEMPLATE_FALLBACK=$(pveam list "$TEMPLATE_STORAGE" | awk "/$TEMPLATE_SEARCH/ {print \$2}" | sort -t - -k 2 -V | tail -n1)
- if [[ -z "$TEMPLATE_FALLBACK" ]]; then
- msg_error "Failed to update LXC template list and no local template matching '$TEMPLATE_SEARCH' found."
- exit 201
- fi
- msg_info "Skipping template update – using local fallback: $TEMPLATE_FALLBACK"
+msg_info "Searching for template '$TEMPLATE_SEARCH'"
+
+# 1. get / check local templates
+mapfile -t LOCAL_TEMPLATES < <(
+ pveam list "$TEMPLATE_STORAGE" 2>/dev/null |
+ awk -v s="$TEMPLATE_SEARCH" -v p="$TEMPLATE_PATTERN" '$1 ~ s && $1 ~ p {print $1}' |
+ sed 's/.*\///' | sort -t - -k 2 -V
+)
+
+# 2. get online templates
+pveam update >/dev/null 2>&1 || msg_warn "Could not update template catalog (pveam update failed)."
+mapfile -t ONLINE_TEMPLATES < <(
+ pveam available -section system 2>/dev/null |
+ sed -n "s/.*\($TEMPLATE_SEARCH.*$TEMPLATE_PATTERN.*\)/\1/p" |
+ sort -t - -k 2 -V
+)
+if [ ${#ONLINE_TEMPLATES[@]} -gt 0 ]; then
+ ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}"
else
- msg_ok "LXC Template List Updated"
+ ONLINE_TEMPLATE=""
fi
-# Get LXC template string
-TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}"
-mapfile -t TEMPLATES < <(pveam available -section system | sed -n "s/.*\($TEMPLATE_SEARCH.*\)/\1/p" | sort -t - -k 2 -V)
-
-if [ ${#TEMPLATES[@]} -eq 0 ]; then
- msg_error "No matching LXC template found for '${TEMPLATE_SEARCH}'. Make sure your host can reach the Proxmox template repository."
- exit 207
+# 3. Local vs Online
+if [ ${#LOCAL_TEMPLATES[@]} -gt 0 ]; then
+ TEMPLATE="${LOCAL_TEMPLATES[-1]}"
+ TEMPLATE_SOURCE="local"
+else
+ TEMPLATE="$ONLINE_TEMPLATE"
+ TEMPLATE_SOURCE="online"
fi
-TEMPLATE="${TEMPLATES[-1]}"
-TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || echo "/var/lib/vz/template/cache/$TEMPLATE")"
-msg_debug "TEMPLATE_SEARCH=$TEMPLATE_SEARCH"
-msg_debug "TEMPLATES=(${TEMPLATES[*]})"
-msg_debug "Selected TEMPLATE=$TEMPLATE"
-msg_debug "TEMPLATE_PATH=$TEMPLATE_PATH"
-
-TEMPLATE_VALID=1
-if ! pveam list "$TEMPLATE_STORAGE" | grep -q "$TEMPLATE"; then
- TEMPLATE_VALID=0
-elif [ ! -s "$TEMPLATE_PATH" ]; then
- TEMPLATE_VALID=0
-elif ! tar --use-compress-program=zstdcat -tf "$TEMPLATE_PATH" >/dev/null 2>&1; then
- TEMPLATE_VALID=0
+# 4. Getting Path (universal, also for nfs/cifs)
+TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null || true)"
+if [[ -z "$TEMPLATE_PATH" ]]; then
+ TEMPLATE_BASE=$(awk -v s="$TEMPLATE_STORAGE" '$1==s {f=1} f && /path/ {print $2; exit}' /etc/pve/storage.cfg)
+ if [[ -n "$TEMPLATE_BASE" ]]; then
+ TEMPLATE_PATH="$TEMPLATE_BASE/template/cache/$TEMPLATE"
+ fi
fi
-if [ "$TEMPLATE_VALID" -eq 0 ]; then
- msg_warn "Template $TEMPLATE not found or appears to be corrupted. Re-downloading."
+if [[ -z "$TEMPLATE_PATH" ]]; then
+ msg_error "Unable to resolve template path for $TEMPLATE_STORAGE. Check storage type and permissions."
+ exit 220
+fi
+
+msg_ok "Template ${BL}$TEMPLATE${CL} [$TEMPLATE_SOURCE]"
+msg_debug "Resolved TEMPLATE_PATH=$TEMPLATE_PATH"
+
+# 5. Validation
+NEED_DOWNLOAD=0
+if [[ ! -f "$TEMPLATE_PATH" ]]; then
+ msg_info "Template not present locally – will download."
+ NEED_DOWNLOAD=1
+elif [[ ! -r "$TEMPLATE_PATH" ]]; then
+ msg_error "Template file exists but is not readable – check permissions."
+ exit 221
+elif [[ "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then
+ if [[ -n "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Template file too small (<1MB) – re-downloading."
+ NEED_DOWNLOAD=1
+ else
+ msg_warn "Template looks too small, but no online version exists. Keeping local file."
+ fi
+elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then
+ if [[ -n "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Template appears corrupted – re-downloading."
+ NEED_DOWNLOAD=1
+ else
+ msg_warn "Template appears corrupted, but no online version exists. Keeping local file."
+ fi
+else
+ msg_ok "Template $TEMPLATE is present and valid."
+fi
+
+# 6. Update-Check (if local exist)
+if [[ "$TEMPLATE_SOURCE" == "local" && -n "$ONLINE_TEMPLATE" && "$TEMPLATE" != "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Local template is outdated: $TEMPLATE (latest available: $ONLINE_TEMPLATE)"
+ if whiptail --yesno "A newer template is available:\n$ONLINE_TEMPLATE\n\nDo you want to download and use it instead?" 12 70; then
+ TEMPLATE="$ONLINE_TEMPLATE"
+ NEED_DOWNLOAD=1
+ else
+ msg_info "Continuing with local template $TEMPLATE"
+ fi
+fi
+
+# 7. Download if needed
+if [[ "$NEED_DOWNLOAD" -eq 1 ]]; then
[[ -f "$TEMPLATE_PATH" ]] && rm -f "$TEMPLATE_PATH"
for attempt in {1..3}; do
- msg_info "Attempt $attempt: Downloading LXC template..."
+ msg_info "Attempt $attempt: Downloading template $TEMPLATE to $TEMPLATE_STORAGE"
if pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null 2>&1; then
msg_ok "Template download successful."
break
fi
if [ $attempt -eq 3 ]; then
- msg_error "Failed after 3 attempts. Please check network access or manually run:\n pveam download $TEMPLATE_STORAGE $TEMPLATE"
- exit 208
+ msg_error "Failed after 3 attempts. Please check network access, permissions, or manually run:\n pveam download $TEMPLATE_STORAGE $TEMPLATE"
+ exit 222
fi
sleep $((attempt * 5))
done
fi
-msg_ok "LXC Template '$TEMPLATE' is ready to use."
+# 8. Final Check – Template usability
+if ! pveam list "$TEMPLATE_STORAGE" 2>/dev/null | grep -q "$TEMPLATE"; then
+ msg_error "Template $TEMPLATE not available in storage $TEMPLATE_STORAGE after download."
+ exit 223
+fi
+msg_ok "Template $TEMPLATE is ready for container creation."
-msg_info "Creating LXC Container"
-# Check and fix subuid/subgid
+# ------------------------------------------------------------------------------
+# Create LXC Container with validation, recovery and debug option
+# ------------------------------------------------------------------------------
+
+msg_info "Creating LXC container"
+
+# Ensure subuid/subgid entries exist
grep -q "root:100000:65536" /etc/subuid || echo "root:100000:65536" >>/etc/subuid
grep -q "root:100000:65536" /etc/subgid || echo "root:100000:65536" >>/etc/subgid
-# Combine all options
+# Assemble pct options
PCT_OPTIONS=(${PCT_OPTIONS[@]:-${DEFAULT_PCT_OPTIONS[@]}})
-[[ " ${PCT_OPTIONS[@]} " =~ " -rootfs " ]] || PCT_OPTIONS+=(-rootfs "$CONTAINER_STORAGE:${PCT_DISK_SIZE:-8}")
+[[ " ${PCT_OPTIONS[*]} " =~ " -rootfs " ]] || PCT_OPTIONS+=(-rootfs "$CONTAINER_STORAGE:${PCT_DISK_SIZE:-8}")
-# Secure creation of the LXC container with lock and template check
+# Secure with lockfile
lockfile="/tmp/template.${TEMPLATE}.lock"
-msg_debug "Creating lockfile: $lockfile"
exec 9>"$lockfile" || {
msg_error "Failed to create lock file '$lockfile'."
exit 200
}
flock -w 60 9 || {
- msg_error "Timeout while waiting for template lock"
+ msg_error "Timeout while waiting for template lock."
exit 211
}
+LOGFILE="/tmp/pct_create_${CTID}.log"
msg_debug "pct create command: pct create $CTID ${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE} ${PCT_OPTIONS[*]}"
-if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" &>/dev/null; then
- msg_error "Container creation failed. Checking if template is corrupted or incomplete."
+msg_debug "Logfile: $LOGFILE"
+# First attempt
+if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >"$LOGFILE" 2>&1; then
+ msg_error "Container creation failed on ${TEMPLATE_STORAGE}. Checking template..."
+
+ # Validate template file
if [[ ! -s "$TEMPLATE_PATH" || "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then
- msg_error "Template file too small or missing – re-downloading."
+ msg_warn "Template file too small or missing – re-downloading."
rm -f "$TEMPLATE_PATH"
- elif ! zstdcat "$TEMPLATE_PATH" | tar -tf - &>/dev/null; then
- msg_error "Template appears to be corrupted – re-downloading."
- rm -f "$TEMPLATE_PATH"
- else
- msg_error "Template is valid, but container creation still failed."
- exit 209
+ pveam download "$TEMPLATE_STORAGE" "$TEMPLATE"
+ elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then
+ if [[ -n "$ONLINE_TEMPLATE" ]]; then
+ msg_warn "Template appears corrupted – re-downloading."
+ rm -f "$TEMPLATE_PATH"
+ pveam download "$TEMPLATE_STORAGE" "$TEMPLATE"
+ else
+ msg_warn "Template appears corrupted, but no online version exists. Skipping re-download."
+ fi
fi
- # Retry download
- for attempt in {1..3}; do
- msg_info "Attempt $attempt: Re-downloading template..."
- if timeout 120 pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null; then
- msg_ok "Template re-download successful."
- break
+ # Retry after repair
+ if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >>"$LOGFILE" 2>&1; then
+ # Fallback to local storage
+ if [[ "$TEMPLATE_STORAGE" != "local" ]]; then
+ msg_warn "Retrying container creation with fallback to local storage..."
+ LOCAL_TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE"
+ if [ ! -f "$LOCAL_TEMPLATE_PATH" ]; then
+ msg_info "Downloading template to local..."
+ pveam download local "$TEMPLATE" >/dev/null 2>&1
+ fi
+ if pct create "$CTID" "local:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >>"$LOGFILE" 2>&1; then
+ msg_ok "Container successfully created using local fallback."
+ else
+ msg_error "Container creation failed even with local fallback. See $LOGFILE"
+ # Ask user if they want debug output
+ if whiptail --yesno "pct create failed.\nDo you want to enable verbose debug mode and view detailed logs?" 12 70; then
+ set -x
+ bash -x -c "pct create $CTID local:vztmpl/${TEMPLATE} ${PCT_OPTIONS[*]}" 2>&1 | tee -a "$LOGFILE"
+ set +x
+ fi
+ exit 209
+ fi
+ else
+ msg_error "Container creation failed on local storage. See $LOGFILE"
+ if whiptail --yesno "pct create failed.\nDo you want to enable verbose debug mode and view detailed logs?" 12 70; then
+ set -x
+ bash -x -c "pct create $CTID local:vztmpl/${TEMPLATE} ${PCT_OPTIONS[*]}" 2>&1 | tee -a "$LOGFILE"
+ set +x
+ fi
+ exit 209
fi
- if [ "$attempt" -eq 3 ]; then
- msg_error "Three failed attempts. Aborting."
- exit 208
- fi
- sleep $((attempt * 5))
- done
-
- sleep 1 # I/O-Sync-Delay
- msg_ok "Re-downloaded LXC Template"
+ fi
fi
+# Verify container exists
if ! pct list | awk '{print $1}' | grep -qx "$CTID"; then
- msg_error "Container ID $CTID not listed in 'pct list' – unexpected failure."
+ msg_error "Container ID $CTID not listed in 'pct list'. See $LOGFILE"
exit 215
fi
+# Verify config rootfs
if ! grep -q '^rootfs:' "/etc/pve/lxc/$CTID.conf"; then
- msg_error "RootFS entry missing in container config – storage not correctly assigned."
+ msg_error "RootFS entry missing in container config. See $LOGFILE"
exit 216
fi
-if grep -q '^hostname:' "/etc/pve/lxc/$CTID.conf"; then
- CT_HOSTNAME=$(grep '^hostname:' "/etc/pve/lxc/$CTID.conf" | awk '{print $2}')
- if [[ ! "$CT_HOSTNAME" =~ ^[a-z0-9-]+$ ]]; then
- msg_warn "Hostname '$CT_HOSTNAME' contains invalid characters – may cause issues with networking or DNS."
- fi
-fi
-
-if [[ "${PCT_RAM_SIZE:-2048}" -lt 1024 ]]; then
- msg_warn "Configured RAM (${PCT_RAM_SIZE}MB) is below 1024MB – some apps may not work properly."
-fi
-
-if [[ "${PCT_UNPRIVILEGED:-1}" == "1" && " ${PCT_OPTIONS[*]} " == *"fuse=1"* ]]; then
- msg_warn "Unprivileged container with FUSE may fail unless extra device mappings are configured."
-fi
-
msg_ok "LXC Container ${BL}$CTID${CL} ${GN}was successfully created."
diff --git a/misc/tools.func b/misc/tools.func
index 88a4faa7..6b619c44 100644
--- a/misc/tools.func
+++ b/misc/tools.func
@@ -163,19 +163,18 @@ function setup_postgresql() {
if [[ "$CURRENT_PG_VERSION" == "$PG_VERSION" ]]; then
: # PostgreSQL is already at the desired version – no action needed
else
- msg_info "Detected PostgreSQL $CURRENT_PG_VERSION, preparing upgrade to $PG_VERSION"
+ $STD msg_info "Detected PostgreSQL $CURRENT_PG_VERSION, preparing upgrade to $PG_VERSION"
NEED_PG_INSTALL=true
fi
else
- msg_info "Setup PostgreSQL $PG_VERSION"
NEED_PG_INSTALL=true
fi
if [[ "$NEED_PG_INSTALL" == true ]]; then
if [[ -n "$CURRENT_PG_VERSION" ]]; then
- msg_info "Dumping PostgreSQL $CURRENT_PG_VERSION data"
+ $STD msg_info "Dumping PostgreSQL $CURRENT_PG_VERSION data"
su - postgres -c "pg_dumpall > /var/lib/postgresql/backup_$(date +%F)_v${CURRENT_PG_VERSION}.sql"
- msg_ok "Data dump completed"
+ $STD msg_ok "Data dump completed"
systemctl stop postgresql
fi
@@ -188,29 +187,24 @@ function setup_postgresql() {
echo "deb https://apt.postgresql.org/pub/repos/apt ${DISTRO}-pgdg main" \
>/etc/apt/sources.list.d/pgdg.list
- $STD msg_ok "Repository added"
-
$STD apt-get update
+ $STD msg_ok "Repository added"
msg_info "Setup PostgreSQL $PG_VERSION"
$STD apt-get install -y "postgresql-${PG_VERSION}" "postgresql-client-${PG_VERSION}"
- msg_ok "Setup PostgreSQL $PG_VERSION"
if [[ -n "$CURRENT_PG_VERSION" ]]; then
$STD apt-get purge -y "postgresql-${CURRENT_PG_VERSION}" "postgresql-client-${CURRENT_PG_VERSION}" || true
fi
-
- $STD msg_info "Starting PostgreSQL $PG_VERSION"
systemctl enable -q --now postgresql
- $STD msg_ok "PostgreSQL $PG_VERSION started"
if [[ -n "$CURRENT_PG_VERSION" ]]; then
- msg_info "Restoring dumped data"
+ $STD msg_info "Restoring dumped data"
su - postgres -c "psql < /var/lib/postgresql/backup_$(date +%F)_v${CURRENT_PG_VERSION}.sql"
- msg_ok "Data restored"
+ $STD msg_ok "Data restored"
fi
- msg_ok "PostgreSQL $PG_VERSION installed"
+ $STD msg_ok "PostgreSQL $PG_VERSION installed"
fi
# Install optional PostgreSQL modules
@@ -218,13 +212,13 @@ function setup_postgresql() {
IFS=',' read -ra MODULES <<<"$PG_MODULES"
for module in "${MODULES[@]}"; do
local pkg="postgresql-${PG_VERSION}-${module}"
- msg_info "Setup PostgreSQL module/s: $pkg"
+ $STD msg_info "Setup PostgreSQL module/s: $pkg"
$STD apt-get install -y "$pkg" || {
msg_error "Failed to install $pkg"
continue
}
done
- msg_ok "Setup PostgreSQL modules"
+ $STD msg_ok "Setup PostgreSQL modules"
fi
}
@@ -305,7 +299,19 @@ setup_mariadb() {
echo "mariadb-server-$ver mariadb-server/feedback boolean false" | debconf-set-selections
done
fi
- DEBIAN_FRONTEND=noninteractive $STD apt-get install -y mariadb-server mariadb-client
+ DEBIAN_FRONTEND=noninteractive $STD apt-get install -y mariadb-server mariadb-client || {
+ msg_warn "Failed to install MariaDB ${MARIADB_VERSION} from upstream repo – trying distro package as fallback..."
+ # Cleanup, remove upstream repo to avoid conflicts
+ rm -f /etc/apt/sources.list.d/mariadb.list /etc/apt/trusted.gpg.d/mariadb.gpg
+ $STD apt-get update
+ # Final fallback: distro package
+ DEBIAN_FRONTEND=noninteractive $STD apt-get install -y mariadb-server mariadb-client || {
+ msg_error "MariaDB installation failed even with distro fallback!"
+ return 1
+ }
+ msg_ok "Setup MariaDB (distro fallback)"
+ return 0
+ }
msg_ok "Setup MariaDB $MARIADB_VERSION"
}
@@ -385,24 +391,6 @@ function setup_mysql() {
# PHP_MAX_EXECUTION_TIME - (default: 300)
# ------------------------------------------------------------------------------
-# ------------------------------------------------------------------------------
-# Installs PHP with selected modules and configures Apache/FPM support.
-#
-# Description:
-# - Adds Sury PHP repo if needed
-# - Installs default and user-defined modules
-# - Patches php.ini for CLI, Apache, and FPM as needed
-#
-# Variables:
-# PHP_VERSION - PHP version to install (default: 8.4)
-# PHP_MODULE - Additional comma-separated modules
-# PHP_APACHE - Set YES to enable PHP with Apache
-# PHP_FPM - Set YES to enable PHP-FPM
-# PHP_MEMORY_LIMIT - (default: 512M)
-# PHP_UPLOAD_MAX_FILESIZE - (default: 128M)
-# PHP_POST_MAX_SIZE - (default: 128M)
-# PHP_MAX_EXECUTION_TIME - (default: 300)
-# ------------------------------------------------------------------------------
function setup_php() {
local PHP_VERSION="${PHP_VERSION:-8.4}"
local PHP_MODULE="${PHP_MODULE:-}"
@@ -901,68 +889,56 @@ function fetch_and_deploy_gh_release() {
local assets url_match=""
assets=$(echo "$json" | jq -r '.assets[].browser_download_url')
- echo "[DEBUG] Listing all available assets from release:"
- for u in $assets; do
- echo " -> $u"
- done
-
- # 1. Pattern match
+ # If explicit filename pattern is provided (param $6), match that first
if [[ -n "$asset_pattern" ]]; then
for u in $assets; do
- filename_candidate="${u##*/}"
- if [[ "$filename_candidate" == *"$asset_pattern"* ]]; then
+ case "${u##*/}" in
+ $asset_pattern)
url_match="$u"
break
- fi
+ ;;
+ esac
done
fi
- # 2. Arch match (only if no pattern match)
+ # If no match via explicit pattern, fall back to architecture heuristic
if [[ -z "$url_match" ]]; then
for u in $assets; do
- filename_candidate="${u##*/}"
- echo " [DEBUG] Checking asset: $filename_candidate"
- if [[ "$filename_candidate" == *"$arch"* && "$filename_candidate" == *.deb ]]; then
+ if [[ "$u" =~ ($arch|amd64|x86_64|aarch64|arm64).*\.deb$ ]]; then
url_match="$u"
break
fi
done
fi
- # 3. Fallback
+ # Fallback: any .deb file
if [[ -z "$url_match" ]]; then
for u in $assets; do
- filename_candidate="${u##*/}"
- if [[ "$filename_candidate" == *.deb ]]; then
- url_match="$u"
- break
- fi
+ [[ "$u" =~ \.deb$ ]] && url_match="$u" && break
done
fi
if [[ -z "$url_match" ]]; then
- echo "[DEBUG] ❌ No suitable .deb asset found!"
+ msg_error "No suitable .deb asset found for $app"
rm -rf "$tmpdir"
return 1
fi
- echo "[DEBUG] Final selected asset: $url_match"
filename="${url_match##*/}"
-
curl $download_timeout -fsSL -o "$tmpdir/$filename" "$url_match" || {
- echo "[DEBUG] ❌ Download failed: $url_match"
+ msg_error "Download failed: $url_match"
rm -rf "$tmpdir"
return 1
}
chmod 644 "$tmpdir/$filename"
- if ! $STD apt-get install -y "$tmpdir/$filename"; then
- if ! $STD dpkg -i "$tmpdir/$filename"; then
- echo "[DEBUG] ❌ Both apt and dpkg installation failed"
+ $STD apt-get install -y "$tmpdir/$filename" || {
+ $STD dpkg -i "$tmpdir/$filename" || {
+ msg_error "Both apt and dpkg installation failed"
rm -rf "$tmpdir"
return 1
- fi
- fi
+ }
+ }
### Prebuild Mode ###
elif [[ "$mode" == "prebuild" ]]; then
@@ -1283,8 +1259,20 @@ function setup_uv() {
local UV_TAR
case "$ARCH" in
- x86_64) UV_TAR="uv-x86_64-unknown-linux-gnu.tar.gz" ;;
- aarch64) UV_TAR="uv-aarch64-unknown-linux-gnu.tar.gz" ;;
+ x86_64)
+ if grep -qi "alpine" /etc/os-release; then
+ UV_TAR="uv-x86_64-unknown-linux-musl.tar.gz"
+ else
+ UV_TAR="uv-x86_64-unknown-linux-gnu.tar.gz"
+ fi
+ ;;
+ aarch64)
+ if grep -qi "alpine" /etc/os-release; then
+ UV_TAR="uv-aarch64-unknown-linux-musl.tar.gz"
+ else
+ UV_TAR="uv-aarch64-unknown-linux-gnu.tar.gz"
+ fi
+ ;;
*)
msg_error "Unsupported architecture: $ARCH"
rm -rf "$TMP_DIR"
@@ -1339,7 +1327,11 @@ function setup_uv() {
}
rm -rf "$TMP_DIR"
- ensure_usr_local_bin_persist
+ #ensure_usr_local_bin_persist
+ $STD uv python update-shell || {
+ msg_error "Failed to update uv shell integration"
+ return 1
+ }
msg_ok "Setup uv $LATEST_VERSION"
# Optional: install specific Python version
@@ -1408,7 +1400,7 @@ function setup_gs() {
return
fi
- msg_info "Setup Ghostscript $LATEST_VERSION_DOTTED"
+ msg_info "Setup Ghostscript $LATEST_VERSION_DOTTED (Patience)"
curl -fsSL "https://github.com/ArtifexSoftware/ghostpdl-downloads/releases/download/gs${LATEST_VERSION}/ghostscript-${LATEST_VERSION_DOTTED}.tar.gz" -o "$TMP_DIR/ghostscript.tar.gz"
if ! tar -xzf "$TMP_DIR/ghostscript.tar.gz" -C "$TMP_DIR"; then
@@ -1422,7 +1414,9 @@ function setup_gs() {
rm -rf "$TMP_DIR"
}
$STD apt-get install -y build-essential libpng-dev zlib1g-dev
- ./configure >/dev/null && make && sudo make install >/dev/null
+ $STD ./configure >/dev/null
+ $STD make
+ $STD sudo make install
local EXIT_CODE=$?
hash -r
if [[ ! -x "$(command -v gs)" ]]; then
@@ -2007,9 +2001,30 @@ EOF
fi
}
-check_for_update() {
+# ------------------------------------------------------------------------------
+# Checks for new GitHub release (latest tag).
+#
+# Description:
+# - Queries the GitHub API for the latest release tag
+# - Compares it to a local cached version (~/.)
+# - If newer, sets global CHECK_UPDATE_RELEASE and returns 0
+#
+# Usage:
+# check_for_gh_release "AppName" "user/repo"
+# if [[ $? -eq 0 ]]; then
+# echo "New version available: $CHECK_UPDATE_RELEASE"
+# # trigger update...
+# fi
+#
+# Notes:
+# - Requires `jq` (auto-installed if missing)
+# - Does not modify anything, only checks version state
+# - Does not support pre-releases
+# ------------------------------------------------------------------------------
+check_for_gh_release() {
local app="$1"
local source="$2"
+ local pinned_version="${3:-}" # optional
local current_file="$HOME/.${app,,}"
msg_info "Check for update: ${app}"
@@ -2029,34 +2044,40 @@ check_for_update() {
fi
# get latest release
- local release
+local release
release=$(curl -fsSL "https://api.github.com/repos/${source}/releases/latest" |
jq -r '.tag_name' | sed 's/^v//')
- # DEBUG
- #echo "[DEBUG] Latest release fetched: '${release}'"
-
if [[ -z "$release" ]]; then
msg_error "Unable to determine latest release for ${app}"
return 1
fi
local current=""
- if [[ -f "$current_file" ]]; then
- current=$(<"$current_file")
+ [[ -f "$current_file" ]] && current=$(<"$current_file")
+
+ # PINNED Releases
+ if [[ -n "$pinned_version" ]]; then
+ if [[ "$pinned_version" == "$release" ]]; then
+ msg_ok "${app} pinned to v${pinned_version} (no update needed)"
+ return 1
+ else
+ if [[ "$current" == "$pinned_version" ]]; then
+ msg_ok "${app} pinned to v${pinned_version} (already installed, upstream v${release})"
+ return 1
+ fi
+ msg_info "${app} pinned to v${pinned_version} (upstream v${release}) → update/downgrade required"
+ CHECK_UPDATE_RELEASE="$pinned_version"
+ return 0
+ fi
fi
- # DEBUG
- #echo "[DEBUG] Current file: '${current_file}'"
- #echo "[DEBUG] Current version read: '${current}'"
-
if [[ "$release" != "$current" ]] || [[ ! -f "$current_file" ]]; then
- #echo "[DEBUG] Decision: Update required (release='${release}' current='${current}')"
CHECK_UPDATE_RELEASE="$release"
+ msg_info "New release available: v${release} (current: v${current:-none})"
return 0
else
- #echo "[DEBUG] Decision: No update (release='${release}' current='${current}')"
msg_ok "${app} is up to date (v${release})"
return 1
fi
-}
+}
\ No newline at end of file
diff --git a/tools/addon/copyparty.sh b/tools/addon/copyparty.sh
index 79bd509f..98f7ad72 100644
--- a/tools/addon/copyparty.sh
+++ b/tools/addon/copyparty.sh
@@ -3,6 +3,7 @@
# Copyright (c) 2021-2025 community-scripts ORG
# Author: MickLesk
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+# Source: https://github.com/9001/copyparty
function header_info() {
clear
@@ -37,7 +38,6 @@ SVC_GROUP="copyparty"
SRC_URL="https://github.com/9001/copyparty/releases/latest/download/copyparty-sfx.py"
DEFAULT_PORT=3923
-# OS Detection
if [[ -f "/etc/alpine-release" ]]; then
OS="Alpine"
PKG_MANAGER="apk add --no-cache"
@@ -57,14 +57,14 @@ function msg_info() { echo -e "${INFO} ${YW}$1...${CL}"; }
function msg_ok() { echo -e "${CM} ${GN}$1${CL}"; }
function msg_error() { echo -e "${CROSS} ${RD}$1${CL}"; }
-# User/Group/Dirs
function setup_user_and_dirs() {
msg_info "Creating $SVC_USER user and directories"
if ! id "$SVC_USER" &>/dev/null; then
if [[ "$OS" == "Debian" ]]; then
useradd -r -s /sbin/nologin -d "$DATA_PATH" "$SVC_USER"
else
- adduser -D -H -h "$DATA_PATH" -s /sbin/nologin "$SVC_USER"
+ addgroup -S "$SVC_GROUP" 2>/dev/null || true
+ adduser -S -D -H -G "$SVC_GROUP" -h "$DATA_PATH" -s /sbin/nologin "$SVC_USER" 2>/dev/null || true
fi
fi
mkdir -p "$DATA_PATH" "$LOG_PATH"
@@ -96,7 +96,6 @@ function update_copyparty() {
exit 0
}
-# --- Existing Install/Update/Uninstall Check ---
if [[ -f "$BIN_PATH" ]]; then
echo -e "${YW}⚠️ $APP is already installed.${CL}"
echo -n "Uninstall $APP? (y/N): "
@@ -115,7 +114,6 @@ if [[ -f "$BIN_PATH" ]]; then
fi
fi
-# --- Deps ---
msg_info "Installing dependencies"
if [[ "$OS" == "Debian" ]]; then
$PKG_MANAGER python3 curl &>/dev/null
@@ -124,17 +122,14 @@ else
fi
msg_ok "Dependencies installed"
-# --- User/Dirs ---
setup_user_and_dirs
-# --- Download Binary ---
msg_info "Downloading $APP"
curl -fsSL "$SRC_URL" -o "$BIN_PATH"
chmod +x "$BIN_PATH"
chown "$SVC_USER:$SVC_GROUP" "$BIN_PATH"
msg_ok "Downloaded to $BIN_PATH"
-# --- Config: Interaktiv, Auth, Rootdir, Port ---
echo -n "Enter port for $APP (default: $DEFAULT_PORT): "
read -r PORT
PORT=${PORT:-$DEFAULT_PORT}
@@ -162,74 +157,85 @@ else
msg_ok "Configured with admin user: $ADMIN_USER"
fi
-# --- Generate /etc/copyparty.conf ---
msg_info "Writing config to $CONF_PATH"
-cat </etc/copyparty.conf
-[global]
- p: $PORT
- ansi
- e2dsa
- e2ts
- theme: 2
- grid
-
-[accounts]
- $ADMIN_USER: $ADMIN_PASS
-
-[/]
- $USER_DATA_PATH
- accs:
- rw: *
- rwmda: $ADMIN_USER
-EOF
+msg_info "Writing config to $CONF_PATH"
+{
+ echo "[global]"
+ echo " p: $PORT"
+ echo " ansi"
+ echo " e2dsa"
+ echo " e2ts"
+ echo " theme: 2"
+ echo " grid"
+ echo
+ if [[ -n "$ADMIN_USER" && -n "$ADMIN_PASS" ]]; then
+ echo "[accounts]"
+ echo " $ADMIN_USER: $ADMIN_PASS"
+ echo
+ fi
+ echo "[/]"
+ echo " $USER_DATA_PATH"
+ echo " accs:"
+ if [[ -n "$ADMIN_USER" ]]; then
+ echo " rw: *"
+ echo " rwmda: $ADMIN_USER"
+ else
+ echo " rw: *"
+ fi
+} >"$CONF_PATH"
chmod 640 "$CONF_PATH"
chown "$SVC_USER:$SVC_GROUP" "$CONF_PATH"
msg_ok "Config written"
-# --- Systemd/OpenRC Service ---
msg_info "Creating service"
if [[ "$OS" == "Debian" ]]; then
cat <"$SERVICE_PATH_DEB"
[Unit]
-Description=CopyParty file server
+Description=Copyparty file server
+After=network.target
[Service]
-Type=simple
User=$SVC_USER
Group=$SVC_GROUP
-WorkingDirectory=$USER_DATA_PATH
-Environment=PYTHONUNBUFFERED=x
-LogsDirectory=copyparty
-ExecStart=/usr/bin/python3 $BIN_PATH -c $CONF_PATH
+WorkingDirectory=$DATA_PATH
+ExecStart=/usr/bin/python3 /usr/local/bin/copyparty-sfx.py -c /etc/copyparty.conf
Restart=always
+StandardOutput=append:/var/log/copyparty/copyparty.log
+StandardError=append:/var/log/copyparty/copyparty.err
[Install]
WantedBy=multi-user.target
EOF
- systemctl daemon-reload
- systemctl enable --now copyparty &>/dev/null
-else
- cat <"$SERVICE_PATH_ALP"
+
+ systemctl enable -q --now copyparty
+
+elif [[ "$OS" == "Alpine" ]]; then
+ cat <<'EOF' >"$SERVICE_PATH_ALP"
#!/sbin/openrc-run
-command="/usr/bin/python3"
-command_args="$BIN_PATH -c $CONF_PATH"
+name="copyparty"
+description="Copyparty file server"
+
+command="$(command -v python3)"
+command_args="/usr/local/bin/copyparty-sfx.py -c /etc/copyparty.conf"
command_background=true
-directory="$USER_DATA_PATH"
-pidfile="$USER_DATA_PATH/copyparty.pid"
+directory="/var/lib/copyparty"
+pidfile="/run/copyparty.pid"
+output_log="/var/log/copyparty/copyparty.log"
+error_log="/var/log/copyparty/copyparty.err"
depend() {
need net
}
EOF
+
chmod +x "$SERVICE_PATH_ALP"
- rc-update add copyparty default &>/dev/null
- rc-service copyparty start &>/dev/null
+ rc-update add copyparty default >/dev/null 2>&1
+ rc-service copyparty restart >/dev/null 2>&1
fi
msg_ok "Service created and started"
-# IP detection (as root, maybe interface up/loopback fallback)
IFACE=$(ip -4 route | awk '/default/ {print $5; exit}')
IP=$(ip -4 addr show "$IFACE" | awk '/inet / {print $2}' | cut -d/ -f1 | head -n 1)
[[ -z "$IP" ]] && IP=$(hostname -I | awk '{print $1}')
diff --git a/tools/addon/glances.sh b/tools/addon/glances.sh
new file mode 100644
index 00000000..0f1b76f1
--- /dev/null
+++ b/tools/addon/glances.sh
@@ -0,0 +1,202 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 tteck
+# Author: tteck (tteckster) | MickLesk (CanbiZ)
+# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+
+function header_info {
+ clear
+ cat <<"EOF"
+ ________
+ / ____/ /___ _____ ________ _____
+ / / __/ / __ `/ __ \/ ___/ _ \/ ___/
+/ /_/ / / /_/ / / / / /__/ __(__ )
+\____/_/\__,_/_/ /_/\___/\___/____/
+
+EOF
+}
+
+APP="Glances"
+YW=$(echo "\033[33m")
+GN=$(echo "\033[1;92m")
+RD=$(echo "\033[01;31m")
+BL=$(echo "\033[36m")
+CL=$(echo "\033[m")
+CM="${GN}✔️${CL}"
+CROSS="${RD}✖️${CL}"
+INFO="${BL}ℹ️${CL}"
+
+function msg_info() { echo -e "${INFO} ${YW}$1...${CL}"; }
+function msg_ok() { echo -e "${CM} ${GN}$1${CL}"; }
+function msg_error() { echo -e "${CROSS} ${RD}$1${CL}"; }
+
+get_local_ip() {
+ if command -v hostname >/dev/null 2>&1 && hostname -I 2>/dev/null; then
+ hostname -I | awk '{print $1}'
+ elif command -v ip >/dev/null 2>&1; then
+ ip -4 addr show scope global | awk '/inet / {print $2}' | cut -d/ -f1 | head -n1
+ else
+ echo "127.0.0.1"
+ fi
+}
+IP=$(get_local_ip)
+
+install_glances_debian() {
+ msg_info "Installing dependencies"
+ apt-get update >/dev/null 2>&1
+ apt-get install -y gcc lm-sensors wireless-tools >/dev/null 2>&1
+ msg_ok "Installed dependencies"
+
+ msg_info "Setting up Python + uv"
+ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/tools.func)
+ setup_uv PYTHON_VERSION="3.12"
+ msg_ok "Setup Python + uv"
+
+ msg_info "Installing $APP (with web UI)"
+ cd /opt
+ mkdir -p glances
+ cd glances
+ uv venv
+ source .venv/bin/activate >/dev/null 2>&1
+ uv pip install --upgrade pip wheel setuptools >/dev/null 2>&1
+ uv pip install "glances[web]" >/dev/null 2>&1
+ deactivate
+ msg_ok "Installed $APP"
+
+ msg_info "Creating systemd service"
+ cat </etc/systemd/system/glances.service
+[Unit]
+Description=Glances - An eye on your system
+After=network.target
+
+[Service]
+Type=simple
+ExecStart=/opt/glances/.venv/bin/glances -w
+Restart=on-failure
+WorkingDirectory=/opt/glances
+
+[Install]
+WantedBy=multi-user.target
+EOF
+ systemctl enable -q --now glances
+ msg_ok "Created systemd service"
+
+ echo -e "\n$APP is now running at: http://$IP:61208\n"
+}
+
+# update on Debian/Ubuntu
+update_glances_debian() {
+ if [[ ! -d /opt/glances/.venv ]]; then
+ msg_error "$APP is not installed"
+ exit 1
+ fi
+ msg_info "Updating $APP"
+ cd /opt/glances
+ source .venv/bin/activate
+ uv pip install --upgrade "glances[web]" >/dev/null 2>&1
+ deactivate
+ systemctl restart glances
+ msg_ok "Updated $APP"
+}
+
+# uninstall on Debian/Ubuntu
+uninstall_glances_debian() {
+ msg_info "Uninstalling $APP"
+ systemctl disable -q --now glances || true
+ rm -f /etc/systemd/system/glances.service
+ rm -rf /opt/glances
+ msg_ok "Removed $APP"
+}
+
+# install on Alpine
+install_glances_alpine() {
+ msg_info "Installing dependencies"
+ apk update >/dev/null 2>&1
+ $STD apk add --no-cache \
+ gcc musl-dev linux-headers python3-dev \
+ python3 py3-pip py3-virtualenv lm-sensors wireless-tools >/dev/null 2>&1
+ msg_ok "Installed dependencies"
+
+ msg_info "Setting up Python + uv"
+ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/tools.func)
+ setup_uv PYTHON_VERSION="3.12"
+ msg_ok "Setup Python + uv"
+
+ msg_info "Installing $APP (with web UI)"
+ cd /opt
+ mkdir -p glances
+ cd glances
+ uv venv
+ source .venv/bin/activate
+ uv pip install --upgrade pip wheel setuptools >/dev/null 2>&1
+ uv pip install "glances[web]" >/dev/null 2>&1
+ deactivate
+ msg_ok "Installed $APP"
+
+ msg_info "Creating OpenRC service"
+ cat <<'EOF' >/etc/init.d/glances
+#!/sbin/openrc-run
+command="/opt/glances/.venv/bin/glances"
+command_args="-w"
+command_background="yes"
+pidfile="/run/glances.pid"
+name="glances"
+description="Glances monitoring tool"
+EOF
+ chmod +x /etc/init.d/glances
+ rc-update add glances default
+ rc-service glances start
+ msg_ok "Created OpenRC service"
+
+ echo -e "\n$APP is now running at: http://$IP:61208\n"
+}
+
+# update on Alpine
+update_glances_alpine() {
+ if [[ ! -d /opt/glances/.venv ]]; then
+ msg_error "$APP is not installed"
+ exit 1
+ fi
+ msg_info "Updating $APP"
+ cd /opt/glances
+ source .venv/bin/activate
+ uv pip install --upgrade "glances[web]" >/dev/null 2>&1
+ deactivate
+ rc-service glances restart
+ msg_ok "Updated $APP"
+}
+
+# uninstall on Alpine
+uninstall_glances_alpine() {
+ msg_info "Uninstalling $APP"
+ rc-service glances stop || true
+ rc-update del glances || true
+ rm -f /etc/init.d/glances
+ rm -rf /opt/glances
+ msg_ok "Removed $APP"
+}
+
+# options menu
+OPTIONS=(Install "Install $APP"
+ Update "Update $APP"
+ Uninstall "Uninstall $APP")
+
+CHOICE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "$APP" --menu "Select an option:" 12 58 3 \
+ "${OPTIONS[@]}" 3>&1 1>&2 2>&3 || true)
+
+# OS detection
+if grep -qi "alpine" /etc/os-release; then
+ case "$CHOICE" in
+ Install) install_glances_alpine ;;
+ Update) update_glances_alpine ;;
+ Uninstall) uninstall_glances_alpine ;;
+ *) exit 0 ;;
+ esac
+else
+ case "$CHOICE" in
+ Install) install_glances_debian ;;
+ Update) update_glances_debian ;;
+ Uninstall) uninstall_glances_debian ;;
+ *) exit 0 ;;
+ esac
+fi
diff --git a/tools/addon/netdata.sh b/tools/addon/netdata.sh
new file mode 100644
index 00000000..1f2d598f
--- /dev/null
+++ b/tools/addon/netdata.sh
@@ -0,0 +1,172 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2021-2025 tteck
+# Author: tteck (tteckster)
+# License: MIT
+# https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+
+function header_info {
+ clear
+ cat <<"EOF"
+ _ __ __ ____ __
+ / | / /__ / /_/ __ \____ _/ /_____ _
+ / |/ / _ \/ __/ / / / __ `/ __/ __ `/
+ / /| / __/ /_/ /_/ / /_/ / /_/ /_/ /
+/_/ |_/\___/\__/_____/\__,_/\__/\__,_/
+
+EOF
+}
+
+YW=$(echo "\033[33m")
+BL=$(echo "\033[36m")
+RD=$(echo "\033[01;31m")
+GN=$(echo "\033[1;92m")
+CL=$(echo "\033[m")
+BFR="\\r\\033[K"
+HOLD="-"
+CM="${GN}✓${CL}"
+silent() { "$@" >/dev/null 2>&1; }
+set -e
+header_info
+echo "Loading..."
+function msg_info() {
+ local msg="$1"
+ echo -ne " ${HOLD} ${YW}${msg}..."
+}
+
+function msg_ok() {
+ local msg="$1"
+ echo -e "${BFR} ${CM} ${GN}${msg}${CL}"
+}
+
+function msg_error() { echo -e "${RD}✗ $1${CL}"; }
+
+pve_check() {
+ if ! command -v pveversion >/dev/null 2>&1; then
+ msg_error "This script can only be run on a Proxmox VE host."
+ exit 1
+ fi
+
+ local PVE_VER
+ PVE_VER="$(pveversion | awk -F'/' '{print $2}' | awk -F'-' '{print $1}')"
+
+ # Proxmox VE 8.x: allow 8.0 – 8.9
+ if [[ "$PVE_VER" =~ ^9\.([0-9]+)(\.[0-9]+)?$ ]]; then
+ local MINOR="${BASH_REMATCH[1]}"
+ if ((MINOR != 0)); then
+ msg_error "Unsupported Proxmox VE version: $PVE_VER"
+ msg_error "Supported versions: 8.0 – 8.9 or 9.0.x"
+ exit 1
+ fi
+ return 0
+ fi
+
+ # Proxmox VE 9.x: allow only 9.0
+ if [[ "$PVE_VER" =~ ^9\.([0-9]+)$ ]]; then
+ local MINOR="${BASH_REMATCH[1]}"
+ if ((MINOR != 0)); then
+ msg_error "Unsupported Proxmox VE version: $PVE_VER"
+ msg_error "Supported versions: 8.0 – 8.9 or 9.0"
+ exit 1
+ fi
+ return 0
+ fi
+
+ msg_error "Unsupported Proxmox VE version: $PVE_VER"
+ msg_error "Supported versions: 8.0 – 8.9 or 9.0"
+ exit 1
+}
+
+detect_codename() {
+ source /etc/os-release
+ if [[ "$ID" != "debian" ]]; then
+ msg_error "Unsupported base OS: $ID (only Proxmox VE / Debian supported)."
+ exit 1
+ fi
+ CODENAME="${VERSION_CODENAME:-}"
+ if [[ -z "$CODENAME" ]]; then
+ msg_error "Could not detect Debian codename."
+ exit 1
+ fi
+ echo "$CODENAME"
+}
+
+get_latest_repo_pkg() {
+ local REPO_URL=$1
+ curl -fsSL "$REPO_URL" |
+ grep -oP 'netdata-repo_[^"]+all\.deb' |
+ sort -V |
+ tail -n1
+}
+
+install() {
+ header_info
+ while true; do
+ read -p "Are you sure you want to install NetData on Proxmox VE host. Proceed(y/n)? " yn
+ case $yn in
+ [Yy]*) break ;;
+ [Nn]*) exit ;;
+ *) echo "Please answer yes or no." ;;
+ esac
+ done
+
+ read -r -p "Verbose mode? " prompt
+ [[ ${prompt,,} =~ ^(y|yes)$ ]] && STD="" || STD="silent"
+
+ CODENAME=$(detect_codename)
+ REPO_URL="https://repo.netdata.cloud/repos/repoconfig/debian/${CODENAME}/"
+
+ msg_info "Setting up repository"
+ $STD apt-get install -y debian-keyring
+ PKG=$(get_latest_repo_pkg "$REPO_URL")
+ if [[ -z "$PKG" ]]; then
+ msg_error "Could not find netdata-repo package for Debian $CODENAME"
+ exit 1
+ fi
+ curl -fsSL "${REPO_URL}${PKG}" -o "$PKG"
+ $STD dpkg -i "$PKG"
+ rm -f "$PKG"
+ msg_ok "Set up repository"
+
+ msg_info "Installing Netdata"
+ $STD apt-get update
+ $STD apt-get install -y netdata
+ msg_ok "Installed Netdata"
+ msg_ok "Completed Successfully!\n"
+ echo -e "\n Netdata should be reachable at${BL} http://$(hostname -I | awk '{print $1}'):19999 ${CL}\n"
+}
+
+uninstall() {
+ header_info
+ read -r -p "Verbose mode? " prompt
+ [[ ${prompt,,} =~ ^(y|yes)$ ]] && STD="" || STD="silent"
+
+ msg_info "Uninstalling Netdata"
+ systemctl stop netdata || true
+ rm -rf /var/log/netdata /var/lib/netdata /var/cache/netdata /etc/netdata/go.d
+ rm -rf /etc/apt/trusted.gpg.d/netdata-archive-keyring.gpg /etc/apt/sources.list.d/netdata.list
+ $STD apt-get remove --purge -y netdata netdata-repo
+ systemctl daemon-reload
+ $STD apt autoremove -y
+ $STD userdel netdata || true
+ msg_ok "Uninstalled Netdata"
+ msg_ok "Completed Successfully!\n"
+}
+
+header_info
+pve_check
+
+OPTIONS=(Install "Install NetData on Proxmox VE"
+ Uninstall "Uninstall NetData from Proxmox VE")
+
+CHOICE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "NetData" \
+ --menu "Select an option:" 10 58 2 "${OPTIONS[@]}" 3>&1 1>&2 2>&3)
+
+case $CHOICE in
+"Install") install ;;
+"Uninstall") uninstall ;;
+*)
+ echo "Exiting..."
+ exit 0
+ ;;
+esac
diff --git a/tools/headers/glances b/tools/headers/glances
new file mode 100644
index 00000000..18ac9757
--- /dev/null
+++ b/tools/headers/glances
@@ -0,0 +1,6 @@
+ ________
+ / ____/ /___ _____ ________ _____
+ / / __/ / __ `/ __ \/ ___/ _ \/ ___/
+/ /_/ / / /_/ / / / / /__/ __(__ )
+\____/_/\__,_/_/ /_/\___/\___/____/
+
diff --git a/tools/headers/prx-add-ips b/tools/headers/prx-add-ips
new file mode 100644
index 00000000..ea035b53
--- /dev/null
+++ b/tools/headers/prx-add-ips
@@ -0,0 +1,6 @@
+ ____ ___ __ __ ________
+ / __ \_________ _ ______ ___ ____ _ __ / | ____/ /___/ / / _/ __ \_____
+ / /_/ / ___/ __ \| |/_/ __ `__ \/ __ \| |/_/ / /| |/ __ / __ /_____ / // /_/ / ___/
+ / ____/ / / /_/ /> / / / / / /_/ /> < / ___ / /_/ / /_/ /_____// // ____(__ )
+/_/ /_/ \____/_/|_/_/ /_/ /_/\____/_/|_| /_/ |_\__,_/\__,_/ /___/_/ /____/
+
diff --git a/tools/pve/dependency-check.sh b/tools/pve/dependency-check.sh
new file mode 100644
index 00000000..b7798d95
--- /dev/null
+++ b/tools/pve/dependency-check.sh
@@ -0,0 +1,363 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2023 community-scripts ORG
+# This script is designed to install the Proxmox Dependency Check Hookscript.
+# It sets up a dependency-checking hookscript and automates its
+# application to all new and existing guests using a systemd watcher.
+# License: MIT
+
+function header_info {
+ clear
+ cat <<"EOF"
+ ____ _ ____ _ _
+ | _ \ ___ _ __ ___ _ __ __| | ___ _ __ ___ _ _ / ___| |__ ___ ___| | __
+ | | | |/ _ \ '_ \ / _ \ '_ \ / _` |/ _ \ '_ \ / __| | | | | | '_ \ / _ \/ __| |/ /
+ | |_| | __/ |_) | __/ | | | (_| | __/ | | | (__| |_| | |___| | | | __/ (__| <
+ |____/ \___| .__/ \___|_| |_|\__,_|\___|_| |_|\___|\__, |\____|_| |_|\___|\___|_|\_\
+ |_| |___/
+EOF
+}
+
+# Color variables
+YW=$(echo "\033[33m")
+GN=$(echo "\033[1;92m")
+RD=$(echo "\033[01;31m")
+CL=$(echo "\033[m")
+BFR="\\r\\033[K"
+HOLD=" "
+CM="${GN}✓${CL}"
+CROSS="${RD}✗${CL}"
+
+# Spinner for progress indication (simplified)
+spinner() {
+ local pid=$!
+ local delay=0.1
+ local spinstr='|/-\'
+ while [ "$(ps a | awk '{print $1}' | grep $pid)" ]; do
+ local temp=${spinstr#?}
+ printf " [%c] " "$spinstr"
+ local spinstr=$temp${spinstr%"$temp"}
+ sleep $delay
+ printf "\b\b\b\b\b\b"
+ done
+ printf " \b\b\b\b"
+}
+
+# Message functions
+msg_info() {
+ echo -ne " ${YW}›${CL} $1..."
+}
+
+msg_ok() {
+ echo -e "${BFR} ${CM} $1${CL}"
+}
+
+msg_error() {
+ echo -e "${BFR} ${CROSS} $1${CL}"
+}
+# --- End of base script functions ---
+
+
+# --- Installation Functions ---
+
+# Function to create the actual hookscript that runs before guest startup
+create_dependency_hookscript() {
+ msg_info "Creating dependency-check hookscript"
+ mkdir -p /var/lib/vz/snippets
+ cat <<'EOF' > /var/lib/vz/snippets/dependency-check.sh
+#!/bin/bash
+# Proxmox Hookscript for Pre-Start Dependency Checking
+# Works for both QEMU VMs and LXC Containers
+
+# --- Configuration ---
+POLL_INTERVAL=5 # Seconds to wait between checks
+MAX_ATTEMPTS=60 # Max number of attempts before failing (60 * 5s = 5 minutes)
+# --- End Configuration ---
+
+VMID=$1
+PHASE=$2
+
+# Function for logging to syslog with a consistent format
+log() {
+ echo "[hookscript-dep-check] VMID $VMID: $1"
+}
+
+# This script only runs in the 'pre-start' phase
+if [ "$PHASE" != "pre-start" ]; then
+ exit 0
+fi
+
+log "--- Starting Pre-Start Dependency Check ---"
+
+# --- Determine Guest Type (QEMU or LXC) ---
+GUEST_TYPE=""
+CONFIG_CMD=""
+if qm config "$VMID" >/dev/null 2>&1; then
+ GUEST_TYPE="qemu"
+ CONFIG_CMD="qm config"
+ log "Guest type is QEMU (VM)."
+elif pct config "$VMID" >/dev/null 2>&1; then
+ GUEST_TYPE="lxc"
+ CONFIG_CMD="pct config"
+ log "Guest type is LXC (Container)."
+else
+ log "ERROR: Could not determine guest type for $VMID. Aborting."
+ exit 1
+fi
+
+GUEST_CONFIG=$($CONFIG_CMD "$VMID")
+
+# --- 1. Storage Availability Check ---
+log "Checking storage availability..."
+# Grep for all disk definitions (scsi, sata, virtio, ide, rootfs, mp)
+# and extract the storage identifier (the field between the colons).
+# Sort -u gets the unique list of storage pools.
+STORAGE_IDS=$(echo "$GUEST_CONFIG" | grep -E '^(scsi|sata|virtio|ide|rootfs|mp)[0-9]*:' | awk -F'[:]' '{print $2}' | awk '{print$1}' | sort -u)
+
+if [ -z "$STORAGE_IDS" ]; then
+ log "No storage dependencies found to check."
+else
+ for STORAGE_ID in $STORAGE_IDS; do
+ log "Checking status of storage: '$STORAGE_ID'"
+ ATTEMPTS=0
+ while true; do
+ # Grep for the storage ID line in pvesm status and check the 'Active' column (3rd column)
+ STATUS=$(pvesm status | grep "^\s*$STORAGE_ID\s" | awk '{print $3}')
+ if [ "$STATUS" == "active" ]; then
+ log "Storage '$STORAGE_ID' is active."
+ break
+ fi
+
+ ATTEMPTS=$((ATTEMPTS + 1))
+ if [ $ATTEMPTS -ge $MAX_ATTEMPTS ]; then
+ log "ERROR: Timeout waiting for storage '$STORAGE_ID' to become active. Aborting start."
+ exit 1
+ fi
+
+ log "Storage '$STORAGE_ID' is not active (current status: '${STATUS:-inactive/unknown}'). Waiting ${POLL_INTERVAL}s... (Attempt ${ATTEMPTS}/${MAX_ATTEMPTS})"
+ sleep $POLL_INTERVAL
+ done
+ done
+fi
+log "All storage dependencies are met."
+
+
+# --- 2. Custom Tag-Based Dependency Check ---
+log "Checking for custom tag-based dependencies..."
+TAGS=$(echo "$GUEST_CONFIG" | grep '^tags:' | awk '{print $2}')
+
+if [ -z "$TAGS" ]; then
+ log "No tags found. Skipping custom dependency check."
+else
+ # Replace colons with spaces to loop through tags
+ for TAG in ${TAGS//;/ }; do
+ # Check if the tag matches our dependency format 'dep_*'
+ if [[ $TAG == dep_* ]]; then
+ log "Found dependency tag: '$TAG'"
+
+ # Split tag into parts using underscore as delimiter
+ IFS='_' read -ra PARTS <<< "$TAG"
+ DEP_TYPE="${PARTS[1]}"
+
+ ATTEMPTS=0
+ while true; do
+ CHECK_PASSED=false
+ case "$DEP_TYPE" in
+ "tcp")
+ HOST="${PARTS[2]}"
+ PORT="${PARTS[3]}"
+ if [ -z "$HOST" ] || [ -z "$PORT" ]; then
+ log "ERROR: Malformed TCP dependency tag '$TAG'. Skipping."
+ CHECK_PASSED=true # Skip to avoid infinite loop
+ # nc -z is great for this. -w sets a timeout.
+ elif nc -z -w 2 "$HOST" "$PORT"; then
+ log "TCP dependency met: Host $HOST port $PORT is open."
+ CHECK_PASSED=true
+ fi
+ ;;
+
+ "ping")
+ HOST="${PARTS[2]}"
+ if [ -z "$HOST" ]; then
+ log "ERROR: Malformed PING dependency tag '$TAG'. Skipping."
+ CHECK_PASSED=true # Skip to avoid infinite loop
+ # ping -c 1 (one packet) -W 2 (2-second timeout)
+ elif ping -c 1 -W 2 "$HOST" >/dev/null 2>&1; then
+ log "Ping dependency met: Host $HOST is reachable."
+ CHECK_PASSED=true
+ fi
+ ;;
+
+ *)
+ log "WARNING: Unknown dependency type '$DEP_TYPE' in tag '$TAG'. Ignoring."
+ CHECK_PASSED=true # Mark as passed to avoid getting stuck
+ ;;
+ esac
+
+ if $CHECK_PASSED; then
+ break
+ fi
+
+ ATTEMPTS=$((ATTEMPTS + 1))
+ if [ $ATTEMPTS -ge $MAX_ATTEMPTS ]; then
+ log "ERROR: Timeout waiting for dependency '$TAG'. Aborting start."
+ exit 1
+ fi
+
+ log "Dependency '$TAG' not met. Waiting ${POLL_INTERVAL}s... (Attempt ${ATTEMPTS}/${MAX_ATTEMPTS})"
+ sleep $POLL_INTERVAL
+ done
+ fi
+ done
+fi
+
+log "All custom dependencies are met."
+log "--- Dependency Check Complete. Proceeding with start. ---"
+exit 0
+EOF
+ chmod +x /var/lib/vz/snippets/dependency-check.sh
+ msg_ok "Created dependency-check hookscript"
+}
+
+# Function to create the config file for exclusions
+create_exclusion_config() {
+ msg_info "Creating exclusion configuration file"
+ if [ -f /etc/default/pve-auto-hook ]; then
+ msg_ok "Exclusion file already exists, skipping."
+ else
+ cat <<'EOF' > /etc/default/pve-auto-hook
+#
+# Configuration for the Proxmox Automatic Hookscript Applicator
+#
+# Add VM or LXC IDs here to prevent the hookscript from being added.
+# Separate IDs with spaces.
+#
+# Example:
+# IGNORE_IDS="9000 9001 105"
+#
+
+IGNORE_IDS=""
+EOF
+ msg_ok "Created exclusion configuration file"
+ fi
+}
+
+# Function to create the script that applies the hook
+create_applicator_script() {
+ msg_info "Creating the hookscript applicator script"
+ cat <<'EOF' > /usr/local/bin/pve-apply-hookscript.sh
+#!/bin/bash
+HOOKSCRIPT_VOLUME_ID="local:snippets/dependency-check.sh"
+CONFIG_FILE="/etc/default/pve-auto-hook"
+LOG_TAG="pve-auto-hook-list"
+
+log() {
+ systemd-cat -t "$LOG_TAG" <<< "$1"
+}
+
+if [ -f "$CONFIG_FILE" ]; then
+ source "$CONFIG_FILE"
+fi
+
+# Process QEMU VMs
+qm list | awk 'NR>1 {print $1}' | while read -r VMID; do
+ is_ignored=false
+ for id_to_ignore in $IGNORE_IDS; do
+ if [ "$id_to_ignore" == "$VMID" ]; then is_ignored=true; break; fi
+ done
+ if $is_ignored; then continue; fi
+ if qm config "$VMID" | grep -q '^hookscript:'; then continue; fi
+ log "Hookscript not found for VM $VMID. Applying..."
+ qm set "$VMID" --hookscript "$HOOKSCRIPT_VOLUME_ID"
+done
+
+# Process LXC Containers
+pct list | awk 'NR>1 {print $1}' | while read -r VMID; do
+ is_ignored=false
+ for id_to_ignore in $IGNORE_IDS; do
+ if [ "$id_to_ignore" == "$VMID" ]; then is_ignored=true; break; fi
+ done
+ if $is_ignored; then continue; fi
+ if pct config "$VMID" | grep -q '^hookscript:'; then continue; fi
+ log "Hookscript not found for LXC $VMID. Applying..."
+ pct set "$VMID" --hookscript "$HOOKSCRIPT_VOLUME_ID"
+done
+EOF
+ chmod +x /usr/local/bin/pve-apply-hookscript.sh
+ msg_ok "Created applicator script"
+}
+
+# Function to set up the systemd watcher and service
+create_systemd_units() {
+ msg_info "Creating systemd watcher and service units"
+ cat <<'EOF' > /etc/systemd/system/pve-auto-hook.path
+[Unit]
+Description=Watch for new Proxmox guest configs to apply hookscript
+
+[Path]
+PathModified=/etc/pve/qemu-server/
+PathModified=/etc/pve/lxc/
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+ cat <<'EOF' > /etc/systemd/system/pve-auto-hook.service
+[Unit]
+Description=Automatically add hookscript to new Proxmox guests
+
+[Service]
+Type=oneshot
+ExecStart=/usr/local/bin/pve-apply-hookscript.sh
+EOF
+ msg_ok "Created systemd units"
+}
+
+
+# --- Main Execution ---
+header_info
+
+if ! command -v pveversion >/dev/null 2>&1; then
+ msg_error "This script must be run on a Proxmox VE host."
+ exit 1
+fi
+
+echo -e "\nThis script will install a service to automatically apply a"
+echo -e "dependency-checking hookscript to all new and existing Proxmox guests."
+echo -e "${YW}This includes creating files in:${CL}"
+echo -e " - /var/lib/vz/snippets/"
+echo -e " - /usr/local/bin/"
+echo -e " - /etc/default/"
+echo -e " - /etc/systemd/system/\n"
+
+read -p "Do you want to proceed with the installation? (y/n): " -n 1 -r
+echo
+if [[ ! $REPLY =~ ^[Yy]$ ]]; then
+ msg_error "Installation cancelled."
+ exit 1
+fi
+
+echo -e "\n"
+create_dependency_hookscript
+create_exclusion_config
+create_applicator_script
+create_systemd_units
+
+msg_info "Reloading systemd and enabling the watcher"
+(systemctl daemon-reload && systemctl enable --now pve-auto-hook.path) >/dev/null 2>&1 &
+spinner
+msg_ok "Systemd watcher enabled and running"
+
+msg_info "Performing initial run to update existing guests"
+/usr/local/bin/pve-apply-hookscript.sh >/dev/null 2>&1 &
+spinner
+msg_ok "Initial run complete"
+
+echo -e "\n\n${GN}Installation successful!${CL}"
+echo -e "The service is now active and will monitor for new guests."
+echo -e "To ${YW}exclude${CL} a VM or LXC, add its ID to the ${YW}IGNORE_IDS${CL} variable in:"
+echo -e " ${YW}/etc/default/pve-auto-hook${CL}"
+echo -e "\nYou can monitor the service's activity with:"
+echo -e " ${YW}journalctl -fu pve-auto-hook.service${CL}\n"
+
+exit 0
diff --git a/tools/pve/prx-add-ips.sh b/tools/pve/prx-add-ips.sh
new file mode 100644
index 00000000..244cffd4
--- /dev/null
+++ b/tools/pve/prx-add-ips.sh
@@ -0,0 +1,187 @@
+#!/usr/bin/env bash
+# -----------------------------------------------------------------
+# Proxmox Add-IPs (LXC + VMs → Tags)
+# -----------------------------------------------------------------
+# © 2021-2025 community-scripts ORG
+# Author: MickLesk (CanbiZ)
+# License: MIT
+# -----------------------------------------------------------------
+
+APP="Proxmox Add-IPs"
+FILE_PATH="/usr/local/bin/prx-add-ips"
+CONF_DIR="/opt/prx-add-ips"
+CONF_FILE="$CONF_DIR/prx-add-ips.conf"
+
+set -Eeuo pipefail
+
+# --- Farben (optional) ---
+YW="\033[33m"
+GN="\033[1;92m"
+RD="\033[01;31m"
+CL="\033[m"
+msg() { [[ "${USE_COLOR:-true}" == "true" ]] && echo -e "$@" || echo -e "$(echo "$@" | sed -E 's/\x1B\[[0-9;]*[JKmsu]//g')"; }
+msg_info() { msg "${YW}➜ $1${CL}"; }
+msg_ok() { msg "${GN}✔ $1${CL}"; }
+msg_error() { msg "${RD}✖ $1${CL}"; }
+
+# -----------------------------------------------------------------
+# Installation
+# -----------------------------------------------------------------
+if [[ -f "$FILE_PATH" ]]; then
+ msg_info "$APP already installed at $FILE_PATH"
+ exit 0
+fi
+
+msg_info "Installing dependencies"
+apt-get update -qq
+apt-get install -y jq ipcalc net-tools >/dev/null
+msg_ok "Dependencies installed"
+
+mkdir -p "$CONF_DIR"
+
+# -----------------------------------------------------------------
+# Config
+# -----------------------------------------------------------------
+if [[ ! -f "$CONF_FILE" ]]; then
+ cat <"$CONF_FILE"
+# prx-add-ips.conf – configuration for Proxmox Add-IPs
+
+# Allowed CIDRs
+CIDR_LIST=(
+ 192.168.0.0/16
+ 10.0.0.0/8
+ 172.16.0.0/12
+)
+
+# Main loop interval in seconds
+LOOP_INTERVAL=60
+
+# Use colored output? (true/false)
+USE_COLOR=true
+EOF
+ msg_ok "Default config written to $CONF_FILE"
+else
+ msg_info "Config $CONF_FILE already exists"
+fi
+
+# -----------------------------------------------------------------
+# Main Script
+# -----------------------------------------------------------------
+cat <<"EOF" >"$FILE_PATH"
+#!/usr/bin/env bash
+set -Eeuo pipefail
+
+CONFIG_FILE="/opt/prx-add-ips/prx-add-ips.conf"
+[[ -f "$CONFIG_FILE" ]] && source "$CONFIG_FILE"
+
+YW="\033[33m"; GN="\033[1;92m"; RD="\033[01;31m"; CL="\033[m"
+msg() { [[ "${USE_COLOR:-true}" == "true" ]] && echo -e "$@" || echo -e "$(echo "$@" | sed -E 's/\x1B\[[0-9;]*[JKmsu]//g')"; }
+msg_info() { msg "${YW}➜ $1${CL}"; }
+msg_ok() { msg "${GN}✔ $1${CL}"; }
+msg_error(){ msg "${RD}✖ $1${CL}"; }
+
+is_valid_ipv4() {
+ local ip=$1
+ [[ $ip =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]] || return 1
+ for part in ${ip//./ }; do
+ ((part >= 0 && part <= 255)) || return 1
+ done
+ return 0
+}
+
+ip_in_cidrs() {
+ local ip="$1"
+ for cidr in "${CIDR_LIST[@]}"; do
+ ipcalc -nb "$cidr" "$ip" &>/dev/null && return 0
+ done
+ return 1
+}
+
+set_tags() {
+ local vmid="$1" kind="$2"; shift 2
+ local ips=("$@")
+
+ # aktuelle Tags holen
+ local existing_tags=()
+ mapfile -t existing_tags < <($kind config "$vmid" | awk '/tags:/{$1=""; print}' | tr ';' '\n')
+
+ local existing_ips=()
+ local non_ip_tags=()
+ for t in "${existing_tags[@]}"; do
+ if is_valid_ipv4 "$t"; then
+ existing_ips+=("$t")
+ else
+ non_ip_tags+=("$t")
+ fi
+ done
+
+ local new_tags=("${non_ip_tags[@]}" "${ips[@]}")
+ new_tags=($(printf "%s\n" "${new_tags[@]}" | sort -u))
+
+ if [[ "$(printf "%s\n" "${existing_ips[@]}" | sort -u)" != "$(printf "%s\n" "${ips[@]}" | sort -u)" ]]; then
+ msg_info "$kind $vmid → updating tags to ${new_tags[*]}"
+ $kind set "$vmid" -tags "$(IFS=';'; echo "${new_tags[*]}")"
+ else
+ msg_info "$kind $vmid → no IP change"
+ fi
+}
+
+update_lxc_iptags() {
+ for vmid in $(pct list | awk 'NR>1 {print $1}'); do
+ local ips=()
+ for ip in $(lxc-info -n "$vmid" -iH 2>/dev/null); do
+ is_valid_ipv4 "$ip" && ip_in_cidrs "$ip" && ips+=("$ip")
+ done
+ [[ ${#ips[@]} -gt 0 ]] && set_tags "$vmid" pct "${ips[@]}"
+ done
+}
+
+update_vm_iptags() {
+ for vmid in $(qm list | awk 'NR>1 {print $1}'); do
+ if qm agent "$vmid" ping &>/dev/null; then
+ local ips=()
+ mapfile -t ips < <(qm agent "$vmid" network-get-interfaces \
+ | jq -r '.[]?."ip-addresses"[]?."ip-address" | select(test("^[0-9]+\\."))')
+ local filtered=()
+ for ip in "${ips[@]}"; do
+ is_valid_ipv4 "$ip" && ip_in_cidrs "$ip" && filtered+=("$ip")
+ done
+ [[ ${#filtered[@]} -gt 0 ]] && set_tags "$vmid" qm "${filtered[@]}"
+ fi
+ done
+}
+
+while true; do
+ update_lxc_iptags
+ update_vm_iptags
+ sleep "${LOOP_INTERVAL:-60}"
+done
+EOF
+
+chmod +x "$FILE_PATH"
+msg_ok "Main script installed to $FILE_PATH"
+
+# -----------------------------------------------------------------
+# Systemd Service
+# -----------------------------------------------------------------
+SERVICE="/etc/systemd/system/prx-add-ips.service"
+if [[ ! -f "$SERVICE" ]]; then
+ cat <"$SERVICE"
+[Unit]
+Description=Proxmox Add-IPs (LXC + VM)
+After=network.target
+
+[Service]
+Type=simple
+ExecStart=$FILE_PATH
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
+EOF
+ msg_ok "Service created"
+fi
+
+systemctl daemon-reload
+systemctl enable -q --now prx-add-ips.service
+msg_ok "$APP service started"
diff --git a/vm/debian-13-vm.sh b/vm/debian-13-vm.sh
index eeb0d106..05aa50bc 100644
--- a/vm/debian-13-vm.sh
+++ b/vm/debian-13-vm.sh
@@ -11,9 +11,9 @@ function header_info {
cat <<"EOF"
____ __ _ ________
/ __ \___ / /_ (_)___ _____ < /__ /
- / / / / _ \/ __ \/ / __ `/ __ \ / / /_ <
- / /_/ / __/ /_/ / / /_/ / / / / / /___/ /
-/_____/\___/_.___/_/\__,_/_/ /_/ /_//____/
+ / / / / _ \/ __ \/ / __ `/ __ \ / / /_ <
+ / /_/ / __/ /_/ / / /_/ / / / / / /___/ /
+/_____/\___/_.___/_/\__,_/_/ /_/ /_//____/
(Trixie)
EOF
}
@@ -138,36 +138,37 @@ function check_root() {
fi
}
+# This function checks the version of Proxmox Virtual Environment (PVE) and exits if the version is not supported.
+# Supported: Proxmox VE 8.0.x – 8.9.x and 9.0 (NOT 9.1+)
pve_check() {
local PVE_VER
PVE_VER="$(pveversion | awk -F'/' '{print $2}' | awk -F'-' '{print $1}')"
- # Check for Proxmox VE 8.x
+ # Check for Proxmox VE 8.x: allow 8.0–8.9
if [[ "$PVE_VER" =~ ^8\.([0-9]+) ]]; then
local MINOR="${BASH_REMATCH[1]}"
- if ((MINOR < 1 || MINOR > 4)); then
+ if ((MINOR < 0 || MINOR > 9)); then
msg_error "This version of Proxmox VE is not supported."
- echo -e "Required: Proxmox VE version 8.1 – 8.4"
+ msg_error "Supported: Proxmox VE version 8.0 – 8.9"
exit 1
fi
return 0
fi
- # Check for Proxmox VE 9.x (Beta) — require confirmation
+ # Check for Proxmox VE 9.x: allow ONLY 9.0
if [[ "$PVE_VER" =~ ^9\.([0-9]+) ]]; then
- if whiptail --title "Proxmox 9.x Detected (Beta)" \
- --yesno "You are using Proxmox VE $PVE_VER, which is currently in Beta state.\n\nThis version is experimentally supported.\n\nDo you want to proceed anyway?" 12 70; then
- msg_ok "Confirmed: Continuing with Proxmox VE $PVE_VER"
- return 0
- else
- msg_error "Aborted by user: Proxmox VE 9.x was not confirmed."
+ local MINOR="${BASH_REMATCH[1]}"
+ if ((MINOR != 0)); then
+ msg_error "This version of Proxmox VE is not yet supported."
+ msg_error "Supported: Proxmox VE version 9.0"
exit 1
fi
+ return 0
fi
# All other unsupported versions
msg_error "This version of Proxmox VE is not supported."
- echo -e "Supported versions: Proxmox VE 8.1 – 8.4 or 9.x (Beta, with confirmation)"
+ msg_error "Supported versions: Proxmox VE 8.0 – 8.x or 9.0"
exit 1
}
@@ -474,9 +475,9 @@ msg_ok "Using ${CL}${BL}$STORAGE${CL} ${GN}for Storage Location."
msg_ok "Virtual Machine ID is ${CL}${BL}$VMID${CL}."
msg_info "Retrieving the URL for the Debian 13 Qcow2 Disk Image"
if [ "$CLOUD_INIT" == "yes" ]; then
- URL=https://cloud.debian.org/images/cloud/trixie/daily/latest/debian-13-genericcloud-amd64-daily.qcow2
+ URL=https://cloud.debian.org/images/cloud/trixie/latest/debian-13-genericcloud-amd64.qcow2
else
- URL=https://cloud.debian.org/images/cloud/trixie/daily/latest/debian-13-nocloud-amd64-daily.qcow2
+ URL=https://cloud.debian.org/images/cloud/trixie/latest/debian-13-nocloud-amd64.qcow2
fi
sleep 2
msg_ok "${CL}${BL}${URL}${CL}"
@@ -540,7 +541,7 @@ DESCRIPTION=$(
-
+
GitHub
diff --git a/vm/docker-vm.sh b/vm/docker-vm.sh
index 67455134..137baf92 100644
--- a/vm/docker-vm.sh
+++ b/vm/docker-vm.sh
@@ -2,13 +2,15 @@
# Copyright (c) 2021-2025 community-scripts ORG
# Author: thost96 (thost96) | Co-Author: michelroegl-brunner
+# Refactor (q35 + PVE9 virt-customize network fix + robustness): MickLesk
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
+set -e
source /dev/stdin <<<$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func)
function header_info() {
- clear
- cat <<"EOF"
+ clear
+ cat <<"EOF"
____ __ _ ____ ___
/ __ \____ _____/ /_____ _____ | | / / |/ /
/ / / / __ \/ ___/ //_/ _ \/ ___/ | | / / /|_/ /
@@ -19,6 +21,8 @@ EOF
}
header_info
echo -e "\n Loading..."
+
+# ---------- Globals ----------
GEN_MAC=02:$(openssl rand -hex 5 | awk '{print toupper($0)}' | sed 's/\(..\)/\1:/g; s/.$//')
RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)"
METHOD=""
@@ -29,13 +33,10 @@ DISK_SIZE="10G"
YW=$(echo "\033[33m")
BL=$(echo "\033[36m")
-HA=$(echo "\033[1;34m")
RD=$(echo "\033[01;31m")
BGN=$(echo "\033[4;92m")
GN=$(echo "\033[1;92m")
DGN=$(echo "\033[32m")
-CL=$(echo "\033[m")
-
CL=$(echo "\033[m")
BOLD=$(echo "\033[1m")
BFR="\\r\\033[K"
@@ -59,349 +60,273 @@ MACADDRESS="${TAB}🔗${TAB}${CL}"
VLANTAG="${TAB}🏷️${TAB}${CL}"
CREATING="${TAB}🚀${TAB}${CL}"
ADVANCED="${TAB}🧩${TAB}${CL}"
+CLOUD="${TAB}☁️${TAB}${CL}"
+
THIN="discard=on,ssd=1,"
-set -e
+
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
trap cleanup EXIT
trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
+
function error_handler() {
- local exit_code="$?"
- local line_number="$1"
- local command="$2"
- post_update_to_api "failed" "${command}"
- local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
- echo -e "\n$error_message\n"
- cleanup_vmid
+ local exit_code="$?"
+ local line_number="$1"
+ local command="$2"
+ local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
+ post_update_to_api "failed" "${command}"
+ echo -e "\n$error_message\n"
+ cleanup_vmid
}
function get_valid_nextid() {
- local try_id
- try_id=$(pvesh get /cluster/nextid)
- while true; do
- if [ -f "/etc/pve/qemu-server/${try_id}.conf" ] || [ -f "/etc/pve/lxc/${try_id}.conf" ]; then
- try_id=$((try_id + 1))
- continue
- fi
- if lvs --noheadings -o lv_name | grep -qE "(^|[-_])${try_id}($|[-_])"; then
- try_id=$((try_id + 1))
- continue
- fi
- break
- done
- echo "$try_id"
+ local try_id
+ try_id=$(pvesh get /cluster/nextid)
+ while true; do
+ if [ -f "/etc/pve/qemu-server/${try_id}.conf" ] || [ -f "/etc/pve/lxc/${try_id}.conf" ]; then
+ try_id=$((try_id + 1))
+ continue
+ fi
+ if lvs --noheadings -o lv_name | grep -qE "(^|[-_])${try_id}($|[-_])"; then
+ try_id=$((try_id + 1))
+ continue
+ fi
+ break
+ done
+ echo "$try_id"
}
function cleanup_vmid() {
- if qm status $VMID &>/dev/null; then
- qm stop $VMID &>/dev/null
- qm destroy $VMID &>/dev/null
- fi
+ if qm status $VMID &>/dev/null; then
+ qm stop $VMID &>/dev/null || true
+ qm destroy $VMID &>/dev/null || true
+ fi
}
function cleanup() {
- popd >/dev/null
- post_update_to_api "done" "none"
- rm -rf $TEMP_DIR
+ popd >/dev/null || true
+ post_update_to_api "done" "none"
+ rm -rf "$TEMP_DIR"
}
TEMP_DIR=$(mktemp -d)
-pushd $TEMP_DIR >/dev/null
-if whiptail --backtitle "Proxmox VE Helper Scripts" --title "Docker VM" --yesno "This will create a New Docker VM. Proceed?" 10 58; then
- :
-else
- header_info && echo -e "${CROSS}${RD}User exited script${CL}\n" && exit
+pushd "$TEMP_DIR" >/dev/null
+
+if ! whiptail --backtitle "Proxmox VE Helper Scripts" --title "Docker VM" --yesno "This will create a New Docker VM. Proceed?" 10 58; then
+ header_info && echo -e "${CROSS}${RD}User exited script${CL}\n" && exit
fi
-function msg_info() {
- local msg="$1"
- echo -ne "${TAB}${YW}${HOLD}${msg}${HOLD}"
-}
-
-function msg_ok() {
- local msg="$1"
- echo -e "${BFR}${CM}${GN}${msg}${CL}"
-}
-
-function msg_error() {
- local msg="$1"
- echo -e "${BFR}${CROSS}${RD}${msg}${CL}"
-}
+function msg_info() { echo -ne "${TAB}${YW}${HOLD}$1${HOLD}"; }
+function msg_ok() { echo -e "${BFR}${CM}${GN}$1${CL}"; }
+function msg_error() { echo -e "${BFR}${CROSS}${RD}$1${CL}"; }
function check_root() {
- if [[ "$(id -u)" -ne 0 || $(ps -o comm= -p $PPID) == "sudo" ]]; then
- clear
- msg_error "Please run this script as root."
- echo -e "\nExiting..."
- sleep 2
- exit
- fi
+ if [[ "$(id -u)" -ne 0 || $(ps -o comm= -p $PPID) == "sudo" ]]; then
+ clear
+ msg_error "Please run this script as root."
+ echo -e "\nExiting..."
+ sleep 2
+ exit
+ fi
}
-function pve_check() {
- if ! pveversion | grep -Eq "pve-manager/8\.[1-4](\.[0-9]+)*"; then
- msg_error "${CROSS}${RD}This version of Proxmox Virtual Environment is not supported"
- echo -e "Requires Proxmox Virtual Environment Version 8.1 or later."
- echo -e "Exiting..."
- sleep 2
- exit
- fi
+# Supported: Proxmox VE 8.0.x – 8.9.x and 9.0 (NOT 9.1+)
+pve_check() {
+ local PVE_VER
+ PVE_VER="$(pveversion | awk -F'/' '{print $2}' | awk -F'-' '{print $1}')"
+ if [[ "$PVE_VER" =~ ^8\.([0-9]+) ]]; then
+ local MINOR="${BASH_REMATCH[1]}"
+ ((MINOR >= 0 && MINOR <= 9)) && return 0
+ msg_error "This version of Proxmox VE is not supported."
+ exit 1
+ fi
+ if [[ "$PVE_VER" =~ ^9\.([0-9]+) ]]; then
+ local MINOR="${BASH_REMATCH[1]}"
+ ((MINOR == 0)) && return 0
+ msg_error "This version of Proxmox VE is not yet supported (9.1+)."
+ exit 1
+ fi
+ msg_error "This version of Proxmox VE is not supported (need 8.x or 9.0)."
+ exit 1
}
function arch_check() {
- if [ "$(dpkg --print-architecture)" != "amd64" ]; then
- echo -e "\n ${INFO}${YWB}This script will not work with PiMox! \n"
- echo -e "\n ${YWB}Visit https://github.com/asylumexp/Proxmox for ARM64 support. \n"
- echo -e "Exiting..."
- sleep 2
- exit
- fi
+ if [ "$(dpkg --print-architecture)" != "amd64" ]; then
+ echo -e "\n ${INFO}This script will not work with PiMox! \n"
+ echo -e "\n Visit https://github.com/asylumexp/Proxmox for ARM64 support. \n"
+ echo -e "Exiting..."
+ sleep 2
+ exit
+ fi
}
function ssh_check() {
- if command -v pveversion >/dev/null 2>&1; then
- if [ -n "${SSH_CLIENT:+x}" ]; then
- if whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "SSH DETECTED" --yesno "It's suggested to use the Proxmox shell instead of SSH, since SSH can create issues while gathering variables. Would you like to proceed with using SSH?" 10 62; then
- echo "you've been warned"
- else
- clear
- exit
- fi
+ if command -v pveversion >/dev/null 2>&1 && [ -n "${SSH_CLIENT:+x}" ]; then
+ if whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "SSH DETECTED" --yesno "It's suggested to use the Proxmox shell instead of SSH, since SSH can create issues while gathering variables. Proceed anyway?" 10 62; then :; else
+ clear
+ exit
+ fi
fi
- fi
}
function exit-script() {
- clear
- echo -e "\n${CROSS}${RD}User exited script${CL}\n"
- exit
+ clear
+ echo -e "\n${CROSS}${RD}User exited script${CL}\n"
+ exit
}
function default_settings() {
- VMID=$(get_valid_nextid)
- FORMAT=",efitype=4m"
- MACHINE=""
- DISK_CACHE=""
- DISK_SIZE="8G"
- HN="docker"
- CPU_TYPE=""
- CORE_COUNT="2"
- RAM_SIZE="4096"
- BRG="vmbr0"
- MAC="$GEN_MAC"
- VLAN=""
- MTU=""
- START_VM="yes"
- METHOD="default"
- echo -e "${CONTAINERID}${BOLD}${DGN}Virtual Machine ID: ${BGN}${VMID}${CL}"
- echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}i440fx${CL}"
- echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE}${CL}"
- echo -e "${DISKSIZE}${BOLD}${DGN}Disk Cache: ${BGN}None${CL}"
- echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}${HN}${CL}"
- echo -e "${OS}${BOLD}${DGN}CPU Model: ${BGN}KVM64${CL}"
- echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}${CORE_COUNT}${CL}"
- echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE}${CL}"
- echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}${BRG}${CL}"
- echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}${MAC}${CL}"
- echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}Default${CL}"
- echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}Default${CL}"
- echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}"
- echo -e "${CREATING}${BOLD}${DGN}Creating a Docker VM using the above default settings${CL}"
+ VMID=$(get_valid_nextid)
+ FORMAT=",efitype=4m"
+ DISK_CACHE=""
+ DISK_SIZE="10G"
+ HN="docker"
+ CPU_TYPE=""
+ CORE_COUNT="2"
+ RAM_SIZE="4096"
+ BRG="vmbr0"
+ MAC="$GEN_MAC"
+ VLAN=""
+ MTU=""
+ START_VM="yes"
+ METHOD="default"
+ echo -e "${CONTAINERID}${BOLD}${DGN}Virtual Machine ID: ${BGN}${VMID}${CL}"
+ echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}q35${CL}"
+ echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE}${CL}"
+ echo -e "${DISKSIZE}${BOLD}${DGN}Disk Cache: ${BGN}None${CL}"
+ echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}${HN}${CL}"
+ echo -e "${OS}${BOLD}${DGN}CPU Model: ${BGN}KVM64${CL}"
+ echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}${CORE_COUNT}${CL}"
+ echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE}${CL}"
+ echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}${BRG}${CL}"
+ echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}${MAC}${CL}"
+ echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}Default${CL}"
+ echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}Default${CL}"
+ echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}"
+ echo -e "${CREATING}${BOLD}${DGN}Creating a Docker VM using the above default settings${CL}"
}
function advanced_settings() {
- METHOD="advanced"
- [ -z "${VMID:-}" ] && VMID=$(get_valid_nextid)
- while true; do
- if VMID=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Virtual Machine ID" 8 58 $VMID --title "VIRTUAL MACHINE ID" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
- if [ -z "$VMID" ]; then
- VMID=$(get_valid_nextid)
- fi
- if pct status "$VMID" &>/dev/null || qm status "$VMID" &>/dev/null; then
- echo -e "${CROSS}${RD} ID $VMID is already in use${CL}"
- sleep 2
- continue
- fi
- echo -e "${CONTAINERID}${BOLD}${DGN}Virtual Machine ID: ${BGN}$VMID${CL}"
- break
+ METHOD="advanced"
+ [ -z "${VMID:-}" ] && VMID=$(get_valid_nextid)
+ while true; do
+ if VMID=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Virtual Machine ID" 8 58 $VMID --title "VIRTUAL MACHINE ID" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
+ [ -z "$VMID" ] && VMID=$(get_valid_nextid)
+ if pct status "$VMID" &>/dev/null || qm status "$VMID" &>/dev/null; then
+ echo -e "${CROSS}${RD} ID $VMID is already in use${CL}"
+ sleep 2
+ continue
+ fi
+ echo -e "${CONTAINERID}${BOLD}${DGN}Virtual Machine ID: ${BGN}$VMID${CL}"
+ break
+ else exit-script; fi
+ done
+
+ FORMAT=",efitype=4m"
+ echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}q35${CL}"
+
+ if DISK_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Disk Size in GiB (e.g., 10, 20)" 8 58 "$DISK_SIZE" --title "DISK SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
+ DISK_SIZE=$(echo "$DISK_SIZE" | tr -d ' ')
+ if [[ "$DISK_SIZE" =~ ^[0-9]+$ ]]; then DISK_SIZE="${DISK_SIZE}G"; fi
+ [[ "$DISK_SIZE" =~ ^[0-9]+G$ ]] || {
+ echo -e "${DISKSIZE}${BOLD}${RD}Invalid Disk Size.${CL}"
+ exit-script
+ }
+ echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}$DISK_SIZE${CL}"
+ else exit-script; fi
+
+ if DISK_CACHE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "DISK CACHE" --radiolist "Choose" --cancel-button Exit-Script 10 58 2 \
+ "0" "None (Default)" ON "1" "Write Through" OFF 3>&1 1>&2 2>&3); then
+ if [ "$DISK_CACHE" = "1" ]; then
+ DISK_CACHE="cache=writethrough,"
+ echo -e "${DISKSIZE}${BOLD}${DGN}Disk Cache: ${BGN}Write Through${CL}"
+ else
+ DISK_CACHE=""
+ echo -e "${DISKSIZE}${BOLD}${DGN}Disk Cache: ${BGN}None${CL}"
+ fi
+ else exit-script; fi
+
+ if VM_NAME=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Hostname" 8 58 docker --title "HOSTNAME" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
+ if [ -z "$VM_NAME" ]; then HN="docker"; else HN=$(echo ${VM_NAME,,} | tr -d ' '); fi
+ echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}"
+ else exit-script; fi
+
+ if CPU_TYPE1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "CPU MODEL" --radiolist "Choose" --cancel-button Exit-Script 10 58 2 \
+ "0" "KVM64 (Default)" ON "1" "Host" OFF 3>&1 1>&2 2>&3); then
+ if [ "$CPU_TYPE1" = "1" ]; then
+ CPU_TYPE=" -cpu host"
+ echo -e "${OS}${BOLD}${DGN}CPU Model: ${BGN}Host${CL}"
+ else
+ CPU_TYPE=""
+ echo -e "${OS}${BOLD}${DGN}CPU Model: ${BGN}KVM64${CL}"
+ fi
+ else exit-script; fi
+
+ if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
+ [ -z "$CORE_COUNT" ] && CORE_COUNT="2"
+ echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}"
+ else exit-script; fi
+
+ if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 2048 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
+ [ -z "$RAM_SIZE" ] && RAM_SIZE="2048"
+ echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}"
+ else exit-script; fi
+
+ if BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Bridge" 8 58 vmbr0 --title "BRIDGE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
+ [ -z "$BRG" ] && BRG="vmbr0"
+ echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}"
+ else exit-script; fi
+
+ if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
+ if [ -z "$MAC1" ]; then MAC="$GEN_MAC"; else MAC="$MAC1"; fi
+ echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}"
+ else exit-script; fi
+
+ if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
+ if [ -z "$VLAN1" ]; then
+ VLAN1="Default"
+ VLAN=""
+ else VLAN=",tag=$VLAN1"; fi
+ echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}"
+ else exit-script; fi
+
+ if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
+ if [ -z "$MTU1" ]; then
+ MTU1="Default"
+ MTU=""
+ else MTU=",mtu=$MTU1"; fi
+ echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}"
+ else exit-script; fi
+
+ if whiptail --backtitle "Proxmox VE Helper Scripts" --title "START VIRTUAL MACHINE" --yesno "Start VM when completed?" 10 58; then
+ echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}"
+ START_VM="yes"
else
- exit-script
+ echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}no${CL}"
+ START_VM="no"
fi
- done
- if MACH=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "MACHINE TYPE" --radiolist --cancel-button Exit-Script "Choose Type" 10 58 2 \
- "i440fx" "Machine i440fx" ON \
- "q35" "Machine q35" OFF \
- 3>&1 1>&2 2>&3); then
- if [ $MACH = q35 ]; then
- echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}$MACH${CL}"
- FORMAT=""
- MACHINE=" -machine q35"
+ if whiptail --backtitle "Proxmox VE Helper Scripts" --title "ADVANCED SETTINGS COMPLETE" --yesno "Ready to create a Docker VM?" --no-button Do-Over 10 58; then
+ echo -e "${CREATING}${BOLD}${DGN}Creating a Docker VM using the above advanced settings${CL}"
else
- echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}$MACH${CL}"
- FORMAT=",efitype=4m"
- MACHINE=""
+ header_info
+ echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Settings${CL}"
+ advanced_settings
fi
- else
- exit-script
- fi
-
- if DISK_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Disk Size in GiB (e.g., 10, 20)" 8 58 "$DISK_SIZE" --title "DISK SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
- DISK_SIZE=$(echo "$DISK_SIZE" | tr -d ' ')
- if [[ "$DISK_SIZE" =~ ^[0-9]+$ ]]; then
- DISK_SIZE="${DISK_SIZE}G"
- echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}$DISK_SIZE${CL}"
- elif [[ "$DISK_SIZE" =~ ^[0-9]+G$ ]]; then
- echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}$DISK_SIZE${CL}"
- else
- echo -e "${DISKSIZE}${BOLD}${RD}Invalid Disk Size. Please use a number (e.g., 10 or 10G).${CL}"
- exit-script
- fi
- else
- exit-script
- fi
-
- if DISK_CACHE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "DISK CACHE" --radiolist "Choose" --cancel-button Exit-Script 10 58 2 \
- "0" "None (Default)" ON \
- "1" "Write Through" OFF \
- 3>&1 1>&2 2>&3); then
- if [ $DISK_CACHE = "1" ]; then
- echo -e "${DISKSIZE}${BOLD}${DGN}Disk Cache: ${BGN}Write Through${CL}"
- DISK_CACHE="cache=writethrough,"
- else
- echo -e "${DISKSIZE}${BOLD}${DGN}Disk Cache: ${BGN}None${CL}"
- DISK_CACHE=""
- fi
- else
- exit-script
- fi
-
- if VM_NAME=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Hostname" 8 58 docker --title "HOSTNAME" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
- if [ -z $VM_NAME ]; then
- HN="docker"
- echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}"
- else
- HN=$(echo ${VM_NAME,,} | tr -d ' ')
- echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}"
- fi
- else
- exit-script
- fi
-
- if CPU_TYPE1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "CPU MODEL" --radiolist "Choose" --cancel-button Exit-Script 10 58 2 \
- "0" "KVM64 (Default)" ON \
- "1" "Host" OFF \
- 3>&1 1>&2 2>&3); then
- if [ $CPU_TYPE1 = "1" ]; then
- echo -e "${OS}${BOLD}${DGN}CPU Model: ${BGN}Host${CL}"
- CPU_TYPE=" -cpu host"
- else
- echo -e "${OS}${BOLD}${DGN}CPU Model: ${BGN}KVM64${CL}"
- CPU_TYPE=""
- fi
- else
- exit-script
- fi
-
- if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
- if [ -z $CORE_COUNT ]; then
- CORE_COUNT="2"
- echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}"
- else
- echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}"
- fi
- else
- exit-script
- fi
-
- if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 2048 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
- if [ -z $RAM_SIZE ]; then
- RAM_SIZE="2048"
- echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}"
- else
- echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}"
- fi
- else
- exit-script
- fi
-
- if BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Bridge" 8 58 vmbr0 --title "BRIDGE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
- if [ -z $BRG ]; then
- BRG="vmbr0"
- echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}"
- else
- echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}"
- fi
- else
- exit-script
- fi
-
- if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
- if [ -z $MAC1 ]; then
- MAC="$GEN_MAC"
- echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}"
- else
- MAC="$MAC1"
- echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}"
- fi
- else
- exit-script
- fi
-
- if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
- if [ -z $VLAN1 ]; then
- VLAN1="Default"
- VLAN=""
- echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}"
- else
- VLAN=",tag=$VLAN1"
- echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}"
- fi
- else
- exit-script
- fi
-
- if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then
- if [ -z $MTU1 ]; then
- MTU1="Default"
- MTU=""
- echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}"
- else
- MTU=",mtu=$MTU1"
- echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}"
- fi
- else
- exit-script
- fi
-
- if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "START VIRTUAL MACHINE" --yesno "Start VM when completed?" 10 58); then
- echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}"
- START_VM="yes"
- else
- echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}no${CL}"
- START_VM="no"
- fi
-
- if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "ADVANCED SETTINGS COMPLETE" --yesno "Ready to create a Docker VM?" --no-button Do-Over 10 58); then
- echo -e "${CREATING}${BOLD}${DGN}Creating a Docker VM using the above advanced settings${CL}"
- else
- header_info
- echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Settings${CL}"
- advanced_settings
- fi
}
function start_script() {
- if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "SETTINGS" --yesno "Use Default Settings?" --no-button Advanced 10 58); then
- header_info
- echo -e "${DEFAULT}${BOLD}${BL}Using Default Settings${CL}"
- default_settings
- else
- header_info
- echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Settings${CL}"
- advanced_settings
- fi
+ if whiptail --backtitle "Proxmox VE Helper Scripts" --title "SETTINGS" --yesno "Use Default Settings?" --no-button Advanced 10 58; then
+ header_info
+ echo -e "${DEFAULT}${BOLD}${BL}Using Default Settings${CL}"
+ default_settings
+ else
+ header_info
+ echo -e "${ADVANCED}${BOLD}${RD}Using Advanced Settings${CL}"
+ advanced_settings
+ fi
}
+
check_root
arch_check
pve_check
@@ -409,103 +334,302 @@ ssh_check
start_script
post_to_api_vm
+function choose_os() {
+ if OS_CHOICE=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
+ --title "Choose Base OS" \
+ --radiolist "Select the OS for the Docker VM:" 12 60 3 \
+ "debian12" "Debian 12 (Bookworm, stable & best for scripts)" ON \
+ "debian13" "Debian 13 (Trixie, newer, but repos lag)" OFF \
+ "ubuntu24" "Ubuntu 24.04 LTS (modern kernel, GPU/AI friendly)" OFF \
+ 3>&1 1>&2 2>&3); then
+ case "$OS_CHOICE" in
+ debian12)
+ var_os="debian"
+ var_version="12"
+ URL="https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-nocloud-$(dpkg --print-architecture).qcow2"
+ ;;
+ debian13)
+ var_os="debian"
+ var_version="13"
+ URL="https://cloud.debian.org/images/cloud/trixie/latest/debian-13-nocloud-$(dpkg --print-architecture).qcow2"
+ ;;
+ ubuntu24)
+ var_os="ubuntu"
+ var_version="24.04"
+ URL="https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-$(dpkg --print-architecture).img"
+ ;;
+ esac
+ echo -e "${OS}${BOLD}${DGN}Selected OS: ${BGN}${OS_CHOICE}${CL}"
+ else
+ exit-script
+ fi
+}
+
+PVE_VER=$(pveversion | awk -F'/' '{print $2}' | cut -d'-' -f1 | cut -d'.' -f1)
+if [ "$PVE_VER" -eq 8 ]; then
+ INSTALL_MODE="direct"
+elif [ "$PVE_VER" -eq 9 ]; then
+ INSTALL_MODE="firstboot"
+else
+ msg_error "Unsupported Proxmox VE version: $PVE_VER"
+ exit 1
+fi
+
+# ---------- Storage selection ----------
msg_info "Validating Storage"
while read -r line; do
- TAG=$(echo $line | awk '{print $1}')
- TYPE=$(echo $line | awk '{printf "%-10s", $2}')
- FREE=$(echo $line | numfmt --field 4-6 --from-unit=K --to=iec --format %.2f | awk '{printf( "%9sB", $6)}')
- ITEM=" Type: $TYPE Free: $FREE "
- OFFSET=2
- if [[ $((${#ITEM} + $OFFSET)) -gt ${MSG_MAX_LENGTH:-} ]]; then
- MSG_MAX_LENGTH=$((${#ITEM} + $OFFSET))
- fi
- STORAGE_MENU+=("$TAG" "$ITEM" "OFF")
+ TAG=$(echo $line | awk '{print $1}')
+ TYPE=$(echo $line | awk '{printf "%-10s", $2}')
+ FREE=$(echo $line | numfmt --field 4-6 --from-unit=K --to=iec --format %.2f | awk '{printf( "%9sB", $6)}')
+ ITEM=" Type: $TYPE Free: $FREE "
+ OFFSET=2
+ if [[ $((${#ITEM} + $OFFSET)) -gt ${MSG_MAX_LENGTH:-} ]]; then
+ MSG_MAX_LENGTH=$((${#ITEM} + $OFFSET))
+ fi
+ STORAGE_MENU+=("$TAG" "$ITEM" "OFF")
done < <(pvesm status -content images | awk 'NR>1')
VALID=$(pvesm status -content images | awk 'NR>1')
if [ -z "$VALID" ]; then
- msg_error "Unable to detect a valid storage location."
- exit
+ msg_error "Unable to detect a valid storage location."
+ exit
elif [ $((${#STORAGE_MENU[@]} / 3)) -eq 1 ]; then
- STORAGE=${STORAGE_MENU[0]}
+ STORAGE=${STORAGE_MENU[0]}
else
- while [ -z "${STORAGE:+x}" ]; do
- STORAGE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "Storage Pools" --radiolist \
- "Which storage pool would you like to use for ${HN}?\nTo make a selection, use the Spacebar.\n" \
- 16 $(($MSG_MAX_LENGTH + 23)) 6 \
- "${STORAGE_MENU[@]}" 3>&1 1>&2 2>&3)
- done
+ while [ -z "${STORAGE:+x}" ]; do
+ STORAGE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "Storage Pools" --radiolist \
+ "Which storage pool would you like to use for ${HN}?\nTo make a selection, use the Spacebar.\n" \
+ 16 $(($MSG_MAX_LENGTH + 23)) 6 \
+ "${STORAGE_MENU[@]}" 3>&1 1>&2 2>&3)
+ done
fi
msg_ok "Using ${CL}${BL}$STORAGE${CL} ${GN}for Storage Location."
msg_ok "Virtual Machine ID is ${CL}${BL}$VMID${CL}."
-msg_info "Retrieving the URL for the Debian 12 Qcow2 Disk Image"
-URL="https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-nocloud-$(dpkg --print-architecture).qcow2"
-sleep 2
-msg_ok "${CL}${BL}${URL}${CL}"
-curl -f#SL -o "$(basename "$URL")" "$URL"
-echo -en "\e[1A\e[0K"
-FILE=$(basename $URL)
+
+# ---------- Download Cloud Image ----------
+choose_os
+msg_info "Retrieving Cloud Image for $var_os $var_version"
+curl --retry 30 --retry-delay 3 --retry-connrefused -fSL -o "$(basename "$URL")" "$URL"
+FILE=$(basename "$URL")
msg_ok "Downloaded ${CL}${BL}${FILE}${CL}"
-STORAGE_TYPE=$(pvesm status -storage "$STORAGE" | awk 'NR>1 {print $2}')
-case $STORAGE_TYPE in
-nfs | dir)
- DISK_EXT=".qcow2"
- DISK_REF="$VMID/"
- DISK_IMPORT="-format qcow2"
- THIN=""
- ;;
-btrfs)
- DISK_EXT=".raw"
- DISK_REF="$VMID/"
- DISK_IMPORT="-format raw"
- FORMAT=",efitype=4m"
- THIN=""
- ;;
-esac
-for i in {0,1}; do
- disk="DISK$i"
- eval DISK${i}=vm-${VMID}-disk-${i}${DISK_EXT:-}
- eval DISK${i}_REF=${STORAGE}:${DISK_REF:-}${!disk}
-done
-
-if ! command -v virt-customize &>/dev/null; then
- msg_info "Installing Pre-Requisite libguestfs-tools onto Host"
- apt-get -qq update >/dev/null
- apt-get -qq install libguestfs-tools lsb-release -y >/dev/null
- msg_ok "Installed libguestfs-tools successfully"
+# Ubuntu RAW → qcow2
+if [[ "$FILE" == *.img ]]; then
+ msg_info "Converting RAW image to qcow2"
+ qemu-img convert -O qcow2 "$FILE" "${FILE%.img}.qcow2"
+ rm -f "$FILE"
+ FILE="${FILE%.img}.qcow2"
+ msg_ok "Converted to ${CL}${BL}${FILE}${CL}"
fi
-msg_info "Adding UniFi OS Server Installer to Debian 12 Qcow2 Disk Image"
-UOS_VERSION="4.2.23"
-UOS_URL="https://fw-download.ubnt.com/data/unifi-os-server/8b93-linux-x64-4.2.23-158fa00b-6b2c-4cd8-94ea-e92bc4a81369.23-x64"
-UOS_INSTALLER="unifi-os-server-${UOS_VERSION}.bin"
-virt-customize -q -a "${FILE}" \
- --install qemu-guest-agent,ca-certificates,curl,lsb-release,podman \
- --run-command "curl -fsSL '${UOS_URL}' -o /root/${UOS_INSTALLER} && chmod +x /root/${UOS_INSTALLER}" >/dev/null
+# ---------- Ensure libguestfs-tools ----------
+if ! command -v virt-customize &>/dev/null; then
+ msg_info "Installing libguestfs-tools on host"
+ apt-get -qq update >/dev/null
+ apt-get -qq install -y libguestfs-tools lsb-release >/dev/null
+ msg_ok "Installed libguestfs-tools"
+fi
-msg_ok "Added UniFi OS Server Installer to Debian 12 Qcow2 Disk Image successfully"
+# ---------- Decide distro codename & Docker repo base ----------
+if [[ "$URL" == *"/bookworm/"* || "$FILE" == *"debian-12-"* ]]; then
+ CODENAME="bookworm"
+ DOCKER_BASE="https://download.docker.com/linux/debian"
+elif [[ "$URL" == *"/trixie/"* || "$FILE" == *"debian-13-"* ]]; then
+ CODENAME="trixie"
+ DOCKER_BASE="https://download.docker.com/linux/debian"
+elif [[ "$URL" == *"/noble/"* || "$FILE" == *"noble-"* ]]; then
+ CODENAME="noble"
+ DOCKER_BASE="https://download.docker.com/linux/ubuntu"
+else
+ CODENAME="bookworm"
+ DOCKER_BASE="https://download.docker.com/linux/debian"
+fi
+# Map Debian trixie → bookworm (Docker-Repo oft später)
+REPO_CODENAME="$CODENAME"
+if [[ "$DOCKER_BASE" == *"linux/debian"* && "$CODENAME" == "trixie" ]]; then
+ REPO_CODENAME="bookworm"
+fi
+# ---------- Detect PVE major version (again; independent var) ----------
+PVE_MAJ=$(pveversion | awk -F'/' '{print $2}' | cut -d'-' -f1 | cut -d'.' -f1)
+if [ "$PVE_MAJ" -eq 8 ]; then INSTALL_MODE="direct"; else INSTALL_MODE="firstboot"; fi
+
+# ---------- Optional: allow manual override ----------
+if whiptail --backtitle "Proxmox VE Helper Scripts" --title "Docker Installation Mode" \
+ --yesno "Detected PVE ${PVE_MAJ}. Use ${INSTALL_MODE^^} mode?\n\nYes = ${INSTALL_MODE^^}\nNo = Switch to the other mode" 11 70; then :; else
+ if [ "$INSTALL_MODE" = "direct" ]; then INSTALL_MODE="firstboot"; else INSTALL_MODE="direct"; fi
+fi
+
+# ---------- PVE8: Direct install into image via virt-customize ----------
+if [ "$INSTALL_MODE" = "direct" ]; then
+ msg_info "Injecting Docker directly into image (${CODENAME}, $(basename "$DOCKER_BASE"))"
+ virt-customize -q -a "${FILE}" \
+ --install qemu-guest-agent,apt-transport-https,ca-certificates,curl,gnupg,lsb-release \
+ --run-command "install -m 0755 -d /etc/apt/keyrings" \
+ --run-command "curl -fsSL ${DOCKER_BASE}/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg" \
+ --run-command "chmod a+r /etc/apt/keyrings/docker.gpg" \
+ --run-command "echo 'deb [arch=\$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] ${DOCKER_BASE} ${REPO_CODENAME} stable' > /etc/apt/sources.list.d/docker.list" \
+ --run-command "apt-get update -qq" \
+ --run-command "apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin" \
+ --run-command "systemctl enable docker" \
+ --run-command "systemctl enable qemu-guest-agent" >/dev/null
+
+ # PATH-Fix separat
+ virt-customize -q -a "${FILE}" \
+ --run-command "sed -i 's#^ENV_SUPATH.*#ENV_SUPATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin#' /etc/login.defs || true" \
+ --run-command "sed -i 's#^ENV_PATH.*#ENV_PATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin#' /etc/login.defs || true" \
+ --run-command "printf 'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\n' >/etc/environment" \
+ --run-command "grep -q 'export PATH=' /root/.bashrc || echo 'export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' >> /root/.bashrc" >/dev/null
+
+ msg_ok "Docker injected into image"
+fi
+
+# ---------- PVE9: First-boot installer inside guest ----------
+if [ "$INSTALL_MODE" = "firstboot" ]; then
+ msg_info "Preparing first-boot Docker installer (${CODENAME}, $(basename "$DOCKER_BASE"))"
+ mkdir -p firstboot
+ cat >firstboot/firstboot-docker.sh <<'EOSH'
+#!/usr/bin/env bash
+set -euxo pipefail
+
+LOG=/var/log/firstboot-docker.log
+exec >>"$LOG" 2>&1
+
+mark_done() { mkdir -p /var/lib/firstboot; date > /var/lib/firstboot/docker.done; }
+retry() { local t=$1; shift; local n=0; until "$@"; do n=$((n+1)); [ "$n" -ge "$t" ] && return 1; sleep 5; done; }
+
+wait_network() {
+ retry 60 getent hosts deb.debian.org || retry 60 getent hosts archive.ubuntu.com
+ retry 60 bash -lc 'curl -fsS https://download.docker.com/ >/dev/null'
+}
+
+fix_path() {
+ sed -i 's#^ENV_SUPATH.*#ENV_SUPATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin#' /etc/login.defs || true
+ sed -i 's#^ENV_PATH.*#ENV_PATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin#' /etc/login.defs || true
+ printf 'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\n' >/etc/environment
+ grep -q 'export PATH=' /root/.bashrc || echo 'export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' >> /root/.bashrc
+ export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+}
+
+main() {
+ export DEBIAN_FRONTEND=noninteractive
+ mkdir -p /etc/apt/apt.conf.d
+ printf 'Acquire::Retries "10";\nAcquire::http::Timeout "60";\nAcquire::https::Timeout "60";\n' >/etc/apt/apt.conf.d/80-retries-timeouts
+
+ wait_network
+
+ . /etc/os-release
+ CODENAME="${VERSION_CODENAME:-bookworm}"
+ case "$ID" in
+ ubuntu) DOCKER_BASE="https://download.docker.com/linux/ubuntu" ;;
+ debian|*) DOCKER_BASE="https://download.docker.com/linux/debian" ;;
+ esac
+ REPO_CODENAME="$CODENAME"
+ if [ "$ID" = "debian" ] && [ "$CODENAME" = "trixie" ]; then REPO_CODENAME="bookworm"; fi
+
+ retry 20 apt-get update -qq
+ retry 10 apt-get install -y ca-certificates curl gnupg qemu-guest-agent apt-transport-https lsb-release software-properties-common
+
+ install -m 0755 -d /etc/apt/keyrings
+ curl -fsSL "${DOCKER_BASE}/gpg" | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
+ chmod a+r /etc/apt/keyrings/docker.gpg
+ echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] ${DOCKER_BASE} ${REPO_CODENAME} stable" > /etc/apt/sources.list.d/docker.list
+
+ retry 20 apt-get update -qq
+ retry 10 apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin
+
+ systemctl enable --now qemu-guest-agent || true
+ systemctl enable --now docker
+
+ fix_path
+
+ command -v docker >/dev/null
+ systemctl is-active --quiet docker
+
+ mark_done
+}
+main
+EOSH
+ chmod +x firstboot/firstboot-docker.sh
+
+ cat >firstboot/firstboot-docker.service <<'EOUNIT'
+[Unit]
+Description=First boot: install Docker & QGA
+After=network-online.target cloud-init.service
+Wants=network-online.target
+ConditionPathExists=!/var/lib/firstboot/docker.done
+StartLimitIntervalSec=0
+
+[Service]
+Type=oneshot
+ExecStart=/usr/local/sbin/firstboot-docker.sh
+Restart=on-failure
+RestartSec=10s
+TimeoutStartSec=0
+RemainAfterExit=no
+
+[Install]
+WantedBy=multi-user.target
+EOUNIT
+
+ echo "$HN" >firstboot/hostname
+
+ virt-customize -q -a "${FILE}" \
+ --copy-in firstboot/firstboot-docker.sh:/usr/local/sbin \
+ --copy-in firstboot/firstboot-docker.service:/etc/systemd/system \
+ --copy-in firstboot/hostname:/etc \
+ --run-command "chmod +x /usr/local/sbin/firstboot-docker.sh" \
+ --run-command "systemctl enable firstboot-docker.service" \
+ --run-command "echo -n > /etc/machine-id" \
+ --run-command "truncate -s 0 /etc/hostname && mv /etc/hostname /etc/hostname.orig && echo '${HN}' >/etc/hostname" >/dev/null
+
+ msg_ok "First-boot Docker installer injected"
+fi
+
+# ---------- Expand partition offline ----------
msg_info "Expanding root partition to use full disk space"
qemu-img create -f qcow2 expanded.qcow2 ${DISK_SIZE} >/dev/null 2>&1
virt-resize --expand /dev/sda1 ${FILE} expanded.qcow2 >/dev/null 2>&1
mv expanded.qcow2 ${FILE} >/dev/null 2>&1
msg_ok "Expanded image to full size"
-msg_info "Creating a Docker VM"
-qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \
- -name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci
-pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null
-qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null
-qm set $VMID \
- -efidisk0 ${DISK0_REF}${FORMAT} \
- -scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE} \
- -boot order=scsi0 \
- -serial0 socket >/dev/null
-qm resize $VMID scsi0 8G >/dev/null
-qm set $VMID --agent enabled=1 >/dev/null
+# ---------- Create VM shell (q35) ----------
+msg_info "Creating a Docker VM shell"
+qm create "$VMID" -machine q35 -bios ovmf -agent 1 -tablet 0 -localtime 1 ${CPU_TYPE} \
+ -cores "$CORE_COUNT" -memory "$RAM_SIZE" -name "$HN" -tags community-script \
+ -net0 "virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU" -onboot 1 -ostype l26 -scsihw virtio-scsi-pci >/dev/null
+msg_ok "Created VM shell"
+# ---------- Import disk ----------
+msg_info "Importing disk into storage ($STORAGE)"
+if qm disk import --help >/dev/null 2>&1; then IMPORT_CMD=(qm disk import); else IMPORT_CMD=(qm importdisk); fi
+IMPORT_OUT="$("${IMPORT_CMD[@]}" "$VMID" "${FILE}" "$STORAGE" --format qcow2 2>&1 || true)"
+DISK_REF="$(printf '%s\n' "$IMPORT_OUT" | sed -n "s/.*successfully imported disk '\([^']\+\)'.*/\1/p" | tr -d "\r\"'")"
+[[ -z "$DISK_REF" ]] && DISK_REF="$(pvesm list "$STORAGE" | awk -v id="$VMID" '$5 ~ ("vm-"id"-disk-") {print $1":"$5}' | sort | tail -n1)"
+[[ -z "$DISK_REF" ]] && {
+ msg_error "Unable to determine imported disk reference."
+ echo "$IMPORT_OUT"
+ exit 1
+}
+msg_ok "Imported disk (${CL}${BL}${DISK_REF}${CL})"
+
+# ---------- Attach EFI + root disk ----------
+msg_info "Attaching EFI and root disk"
+qm set "$VMID" \
+ --efidisk0 "${STORAGE}:0${FORMAT}" \
+ --scsi0 "${DISK_REF},${DISK_CACHE}${THIN}size=${DISK_SIZE}" \
+ --boot order=scsi0 \
+ --serial0 socket >/dev/null
+qm set "$VMID" --agent enabled=1 >/dev/null
+msg_ok "Attached EFI and root disk"
+
+# ---------- Ensure final size (PVE layer) ----------
+msg_info "Resizing disk to $DISK_SIZE (PVE layer)"
+qm resize "$VMID" scsi0 "${DISK_SIZE}" >/dev/null || true
+msg_ok "Resized disk"
+
+# ---------- Description ----------
DESCRIPTION=$(
- cat <
@@ -537,10 +661,12 @@ EOF
qm set "$VMID" -description "$DESCRIPTION" >/dev/null
msg_ok "Created a Docker VM ${CL}${BL}(${HN})"
+
if [ "$START_VM" == "yes" ]; then
- msg_info "Starting Docker VM"
- qm start $VMID
- msg_ok "Started Docker VM"
+ msg_info "Starting Docker VM"
+ qm start $VMID
+ msg_ok "Started Docker VM"
fi
+
post_update_to_api "done" "none"
msg_ok "Completed Successfully!\n"
diff --git a/vm/haos-vm.sh b/vm/haos-vm.sh
index 8e9e61cc..ea7ff30d 100644
--- a/vm/haos-vm.sh
+++ b/vm/haos-vm.sh
@@ -142,16 +142,6 @@ function check_root() {
fi
}
-function pve_check() {
- if ! pveversion | grep -Eq "pve-manager/8\.[1-4](\.[0-9]+)*"; then
- msg_error "${CROSS}${RD}This version of Proxmox Virtual Environment is not supported"
- echo -e "Requires Proxmox Virtual Environment Version 8.1 or later."
- echo -e "Exiting..."
- sleep 2
- exit
- fi
-}
-
function arch_check() {
if [ "$(dpkg --print-architecture)" != "amd64" ]; then
echo -e "\n ${INFO}${YWB}This script will not work with PiMox! \n"
@@ -184,8 +174,8 @@ function exit-script() {
function default_settings() {
BRANCH="$stable"
VMID=$(get_valid_nextid)
- FORMAT=",efitype=4m"
- MACHINE=""
+ FORMAT=",efitype=4m,pre-enrolled-keys=0"
+ MACHINE=" -machine q35"
DISK_CACHE="cache=writethrough,"
HN="haos$stable"
CPU_TYPE=" -cpu host"
@@ -198,11 +188,11 @@ function default_settings() {
START_VM="yes"
METHOD="default"
echo -e "${CONTAINERID}${BOLD}${DGN}Virtual Machine ID: ${BGN}${VMID}${CL}"
- echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}i440fx${CL}"
+ echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}q35${CL}"
echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE}${CL}"
- echo -e "${DISKSIZE}${BOLD}${DGN}Disk Cache: ${BGN}None${CL}"
+ echo -e "${DISKSIZE}${BOLD}${DGN}Disk Cache: ${BGN}Write Through${CL}"
echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}${HN}${CL}"
- echo -e "${OS}${BOLD}${DGN}CPU Model: ${BGN}KVM64${CL}"
+ echo -e "${OS}${BOLD}${DGN}CPU Model: ${BGN}Host${CL}"
echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}${CORE_COUNT}${CL}"
echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE}${CL}"
echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}${BRG}${CL}"
@@ -246,16 +236,16 @@ function advanced_settings() {
done
if MACH=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "MACHINE TYPE" --radiolist --cancel-button Exit-Script "Choose Type" 10 58 2 \
- "i440fx" "Machine i440fx" ON \
- "q35" "Machine q35" OFF \
+ "i440fx" "Machine i440fx" OFF \
+ "q35" "Machine q35" ON \
3>&1 1>&2 2>&3); then
if [ $MACH = q35 ]; then
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}$MACH${CL}"
- FORMAT=""
+ FORMAT=",efitype=4m,pre-enrolled-keys=0"
MACHINE=" -machine q35"
else
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Machine Type: ${BGN}$MACH${CL}"
- FORMAT=",efitype=4m"
+ FORMAT=",efitype=4m,pre-enrolled-keys=0"
MACHINE=""
fi
else
@@ -421,12 +411,14 @@ function start_script() {
check_root
arch_check
-pve_check
ssh_check
start_script
post_to_api_vm
+qm set socket 105 -serial0
+qm set serial0 -vga
+
msg_info "Validating Storage"
while read -r line; do
TAG=$(echo $line | awk '{print $1}')
@@ -486,7 +478,7 @@ btrfs | local-zfs)
DISK_EXT=".raw"
DISK_REF="$VMID/"
DISK_IMPORT="-format raw"
- FORMAT=",efitype=4m"
+ FORMAT=",efitype=4m,pre-enrolled-keys=0"
THIN=""
;;
esac
@@ -507,6 +499,8 @@ qm set $VMID \
-scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=32G \
-boot order=scsi0 >/dev/null
+qm set $VMID -serial0 socket >/dev/null
+
DESCRIPTION=$(
cat <