Merge branch 'community-scripts:main' into step-ca
This commit is contained in:
commit
f30b34151d
111
.github/workflows/stale_pr_close.yml
generated
vendored
Normal file
111
.github/workflows/stale_pr_close.yml
generated
vendored
Normal file
@ -0,0 +1,111 @@
|
||||
name: Stale PR Management
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
workflow_dispatch:
|
||||
pull_request_target:
|
||||
types:
|
||||
- labeled
|
||||
|
||||
jobs:
|
||||
stale-prs:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
issues: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Handle stale PRs
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const now = new Date();
|
||||
const owner = context.repo.owner;
|
||||
const repo = context.repo.repo;
|
||||
|
||||
// --- When stale label is added, comment immediately ---
|
||||
if (context.eventName === "pull_request_target" && context.payload.action === "labeled") {
|
||||
const label = context.payload.label?.name;
|
||||
if (label === "stale") {
|
||||
const author = context.payload.pull_request.user.login;
|
||||
await github.rest.issues.createComment({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: context.payload.pull_request.number,
|
||||
body: `@${author} This PR has been marked as stale. It will be closed if no new commits are added in 7 days.`
|
||||
});
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// --- Scheduled run: check all stale PRs ---
|
||||
const { data: prs } = await github.rest.pulls.list({
|
||||
owner,
|
||||
repo,
|
||||
state: "open",
|
||||
per_page: 100
|
||||
});
|
||||
|
||||
for (const pr of prs) {
|
||||
const hasStale = pr.labels.some(l => l.name === "stale");
|
||||
if (!hasStale) continue;
|
||||
|
||||
// Get timeline events to find when stale label was added
|
||||
const { data: events } = await github.rest.issues.listEvents({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: pr.number,
|
||||
per_page: 100
|
||||
});
|
||||
|
||||
// Find the most recent time the stale label was added
|
||||
const staleLabelEvents = events
|
||||
.filter(e => e.event === "labeled" && e.label?.name === "stale")
|
||||
.sort((a, b) => new Date(b.created_at) - new Date(a.created_at));
|
||||
|
||||
if (staleLabelEvents.length === 0) continue;
|
||||
|
||||
const staleLabelDate = new Date(staleLabelEvents[0].created_at);
|
||||
const daysSinceStale = (now - staleLabelDate) / (1000 * 60 * 60 * 24);
|
||||
|
||||
// Check for new commits since stale label was added
|
||||
const { data: commits } = await github.rest.pulls.listCommits({
|
||||
owner,
|
||||
repo,
|
||||
pull_number: pr.number
|
||||
});
|
||||
|
||||
const lastCommitDate = new Date(commits[commits.length - 1].commit.author.date);
|
||||
const author = pr.user.login;
|
||||
|
||||
// If there are new commits after the stale label, remove it
|
||||
if (lastCommitDate > staleLabelDate) {
|
||||
await github.rest.issues.removeLabel({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: pr.number,
|
||||
name: "stale"
|
||||
});
|
||||
await github.rest.issues.createComment({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: pr.number,
|
||||
body: `@${author} Recent activity detected. Removing stale label.`
|
||||
});
|
||||
}
|
||||
// If 7 days have passed since stale label, close the PR
|
||||
else if (daysSinceStale > 7) {
|
||||
await github.rest.pulls.update({
|
||||
owner,
|
||||
repo,
|
||||
pull_number: pr.number,
|
||||
state: "closed"
|
||||
});
|
||||
await github.rest.issues.createComment({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: pr.number,
|
||||
body: `@${author} Closing stale PR due to inactivity (no commits for 7 days after stale label).`
|
||||
});
|
||||
}
|
||||
}
|
||||
@ -1,5 +0,0 @@
|
||||
MONGO_USER=
|
||||
MONGO_PASSWORD=
|
||||
MONGO_IP=
|
||||
MONGO_PORT=
|
||||
MONGO_DATABASE=
|
||||
23
api/go.mod
23
api/go.mod
@ -1,23 +0,0 @@
|
||||
module proxmox-api
|
||||
|
||||
go 1.24.0
|
||||
|
||||
require (
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/joho/godotenv v1.5.1
|
||||
github.com/rs/cors v1.11.1
|
||||
go.mongodb.org/mongo-driver v1.17.2
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/klauspost/compress v1.16.7 // indirect
|
||||
github.com/montanaflynn/stats v0.7.1 // indirect
|
||||
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
|
||||
github.com/xdg-go/scram v1.1.2 // indirect
|
||||
github.com/xdg-go/stringprep v1.0.4 // indirect
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
|
||||
golang.org/x/crypto v0.45.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
)
|
||||
56
api/go.sum
56
api/go.sum
@ -1,56 +0,0 @@
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
|
||||
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
|
||||
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
|
||||
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE=
|
||||
github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
|
||||
github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
|
||||
github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
||||
github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
|
||||
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
|
||||
github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8=
|
||||
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM=
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.mongodb.org/mongo-driver v1.17.2 h1:gvZyk8352qSfzyZ2UMWcpDpMSGEr1eqE4T793SqyhzM=
|
||||
go.mongodb.org/mongo-driver v1.17.2/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
450
api/main.go
450
api/main.go
@ -1,450 +0,0 @@
|
||||
// Copyright (c) 2021-2025 community-scripts ORG
|
||||
// Author: Michel Roegl-Brunner (michelroegl-brunner)
|
||||
// License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/joho/godotenv"
|
||||
"github.com/rs/cors"
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
)
|
||||
|
||||
var client *mongo.Client
|
||||
var collection *mongo.Collection
|
||||
|
||||
func loadEnv() {
|
||||
if err := godotenv.Load(); err != nil {
|
||||
log.Fatal("Error loading .env file")
|
||||
}
|
||||
}
|
||||
|
||||
// DataModel represents a single document in MongoDB
|
||||
type DataModel struct {
|
||||
ID primitive.ObjectID `json:"id" bson:"_id,omitempty"`
|
||||
CT_TYPE uint `json:"ct_type" bson:"ct_type"`
|
||||
DISK_SIZE float32 `json:"disk_size" bson:"disk_size"`
|
||||
CORE_COUNT uint `json:"core_count" bson:"core_count"`
|
||||
RAM_SIZE uint `json:"ram_size" bson:"ram_size"`
|
||||
OS_TYPE string `json:"os_type" bson:"os_type"`
|
||||
OS_VERSION string `json:"os_version" bson:"os_version"`
|
||||
DISABLEIP6 string `json:"disableip6" bson:"disableip6"`
|
||||
NSAPP string `json:"nsapp" bson:"nsapp"`
|
||||
METHOD string `json:"method" bson:"method"`
|
||||
CreatedAt time.Time `json:"created_at" bson:"created_at"`
|
||||
PVEVERSION string `json:"pve_version" bson:"pve_version"`
|
||||
STATUS string `json:"status" bson:"status"`
|
||||
RANDOM_ID string `json:"random_id" bson:"random_id"`
|
||||
TYPE string `json:"type" bson:"type"`
|
||||
ERROR string `json:"error" bson:"error"`
|
||||
}
|
||||
|
||||
type StatusModel struct {
|
||||
RANDOM_ID string `json:"random_id" bson:"random_id"`
|
||||
ERROR string `json:"error" bson:"error"`
|
||||
STATUS string `json:"status" bson:"status"`
|
||||
}
|
||||
|
||||
type CountResponse struct {
|
||||
TotalEntries int64 `json:"total_entries"`
|
||||
StatusCount map[string]int64 `json:"status_count"`
|
||||
NSAPPCount map[string]int64 `json:"nsapp_count"`
|
||||
}
|
||||
|
||||
// ConnectDatabase initializes the MongoDB connection
|
||||
func ConnectDatabase() {
|
||||
loadEnv()
|
||||
|
||||
mongoURI := fmt.Sprintf("mongodb://%s:%s@%s:%s",
|
||||
os.Getenv("MONGO_USER"),
|
||||
os.Getenv("MONGO_PASSWORD"),
|
||||
os.Getenv("MONGO_IP"),
|
||||
os.Getenv("MONGO_PORT"))
|
||||
|
||||
database := os.Getenv("MONGO_DATABASE")
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var err error
|
||||
client, err = mongo.Connect(ctx, options.Client().ApplyURI(mongoURI))
|
||||
if err != nil {
|
||||
log.Fatal("Failed to connect to MongoDB!", err)
|
||||
}
|
||||
collection = client.Database(database).Collection("data_models")
|
||||
fmt.Println("Connected to MongoDB on 10.10.10.18")
|
||||
}
|
||||
|
||||
// UploadJSON handles API requests and stores data as a document in MongoDB
|
||||
func UploadJSON(w http.ResponseWriter, r *http.Request) {
|
||||
var input DataModel
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
input.CreatedAt = time.Now()
|
||||
|
||||
_, err := collection.InsertOne(context.Background(), input)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
log.Println("Received data:", input)
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
json.NewEncoder(w).Encode(map[string]string{"message": "Data saved successfully"})
|
||||
}
|
||||
|
||||
// UpdateStatus updates the status of a record based on RANDOM_ID
|
||||
func UpdateStatus(w http.ResponseWriter, r *http.Request) {
|
||||
var input StatusModel
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
filter := bson.M{"random_id": input.RANDOM_ID}
|
||||
update := bson.M{"$set": bson.M{"status": input.STATUS, "error": input.ERROR}}
|
||||
|
||||
_, err := collection.UpdateOne(context.Background(), filter, update)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
log.Println("Updated data:", input)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
json.NewEncoder(w).Encode(map[string]string{"message": "Record updated successfully"})
|
||||
}
|
||||
|
||||
// GetDataJSON fetches all data from MongoDB
|
||||
func GetDataJSON(w http.ResponseWriter, r *http.Request) {
|
||||
var records []DataModel
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cursor, err := collection.Find(ctx, bson.M{})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer cursor.Close(ctx)
|
||||
|
||||
for cursor.Next(ctx) {
|
||||
var record DataModel
|
||||
if err := cursor.Decode(&record); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
records = append(records, record)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(records)
|
||||
}
|
||||
func GetPaginatedData(w http.ResponseWriter, r *http.Request) {
|
||||
page, _ := strconv.Atoi(r.URL.Query().Get("page"))
|
||||
limit, _ := strconv.Atoi(r.URL.Query().Get("limit"))
|
||||
if page < 1 {
|
||||
page = 1
|
||||
}
|
||||
if limit < 1 {
|
||||
limit = 10
|
||||
}
|
||||
skip := (page - 1) * limit
|
||||
var records []DataModel
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
options := options.Find().SetSkip(int64(skip)).SetLimit(int64(limit))
|
||||
cursor, err := collection.Find(ctx, bson.M{}, options)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer cursor.Close(ctx)
|
||||
|
||||
for cursor.Next(ctx) {
|
||||
var record DataModel
|
||||
if err := cursor.Decode(&record); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
records = append(records, record)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(records)
|
||||
}
|
||||
|
||||
func GetSummary(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
totalCount, err := collection.CountDocuments(ctx, bson.M{})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
statusCount := make(map[string]int64)
|
||||
nsappCount := make(map[string]int64)
|
||||
|
||||
pipeline := []bson.M{
|
||||
{"$group": bson.M{"_id": "$status", "count": bson.M{"$sum": 1}}},
|
||||
}
|
||||
cursor, err := collection.Aggregate(ctx, pipeline)
|
||||
if err == nil {
|
||||
for cursor.Next(ctx) {
|
||||
var result struct {
|
||||
ID string `bson:"_id"`
|
||||
Count int64 `bson:"count"`
|
||||
}
|
||||
if err := cursor.Decode(&result); err == nil {
|
||||
statusCount[result.ID] = result.Count
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pipeline = []bson.M{
|
||||
{"$group": bson.M{"_id": "$nsapp", "count": bson.M{"$sum": 1}}},
|
||||
}
|
||||
cursor, err = collection.Aggregate(ctx, pipeline)
|
||||
if err == nil {
|
||||
for cursor.Next(ctx) {
|
||||
var result struct {
|
||||
ID string `bson:"_id"`
|
||||
Count int64 `bson:"count"`
|
||||
}
|
||||
if err := cursor.Decode(&result); err == nil {
|
||||
nsappCount[result.ID] = result.Count
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
response := CountResponse{
|
||||
TotalEntries: totalCount,
|
||||
StatusCount: statusCount,
|
||||
NSAPPCount: nsappCount,
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(response)
|
||||
}
|
||||
|
||||
func GetByNsapp(w http.ResponseWriter, r *http.Request) {
|
||||
nsapp := r.URL.Query().Get("nsapp")
|
||||
var records []DataModel
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cursor, err := collection.Find(ctx, bson.M{"nsapp": nsapp})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer cursor.Close(ctx)
|
||||
|
||||
for cursor.Next(ctx) {
|
||||
var record DataModel
|
||||
if err := cursor.Decode(&record); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
records = append(records, record)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(records)
|
||||
}
|
||||
|
||||
func GetByDateRange(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
startDate := r.URL.Query().Get("start_date")
|
||||
endDate := r.URL.Query().Get("end_date")
|
||||
|
||||
if startDate == "" || endDate == "" {
|
||||
http.Error(w, "Both start_date and end_date are required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
start, err := time.Parse("2006-01-02T15:04:05.999999+00:00", startDate+"T00:00:00+00:00")
|
||||
if err != nil {
|
||||
http.Error(w, "Invalid start_date format", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
end, err := time.Parse("2006-01-02T15:04:05.999999+00:00", endDate+"T23:59:59+00:00")
|
||||
if err != nil {
|
||||
http.Error(w, "Invalid end_date format", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var records []DataModel
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cursor, err := collection.Find(ctx, bson.M{
|
||||
"created_at": bson.M{
|
||||
"$gte": start,
|
||||
"$lte": end,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer cursor.Close(ctx)
|
||||
|
||||
for cursor.Next(ctx) {
|
||||
var record DataModel
|
||||
if err := cursor.Decode(&record); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
records = append(records, record)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(records)
|
||||
}
|
||||
func GetByStatus(w http.ResponseWriter, r *http.Request) {
|
||||
status := r.URL.Query().Get("status")
|
||||
var records []DataModel
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cursor, err := collection.Find(ctx, bson.M{"status": status})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer cursor.Close(ctx)
|
||||
|
||||
for cursor.Next(ctx) {
|
||||
var record DataModel
|
||||
if err := cursor.Decode(&record); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
records = append(records, record)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(records)
|
||||
}
|
||||
|
||||
func GetByOS(w http.ResponseWriter, r *http.Request) {
|
||||
osType := r.URL.Query().Get("os_type")
|
||||
osVersion := r.URL.Query().Get("os_version")
|
||||
var records []DataModel
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cursor, err := collection.Find(ctx, bson.M{"os_type": osType, "os_version": osVersion})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer cursor.Close(ctx)
|
||||
|
||||
for cursor.Next(ctx) {
|
||||
var record DataModel
|
||||
if err := cursor.Decode(&record); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
records = append(records, record)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(records)
|
||||
}
|
||||
|
||||
func GetErrors(w http.ResponseWriter, r *http.Request) {
|
||||
errorCount := make(map[string]int)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cursor, err := collection.Find(ctx, bson.M{"error": bson.M{"$ne": ""}})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer cursor.Close(ctx)
|
||||
|
||||
for cursor.Next(ctx) {
|
||||
var record DataModel
|
||||
if err := cursor.Decode(&record); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if record.ERROR != "" {
|
||||
errorCount[record.ERROR]++
|
||||
}
|
||||
}
|
||||
|
||||
type ErrorCountResponse struct {
|
||||
Error string `json:"error"`
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
var errorCounts []ErrorCountResponse
|
||||
for err, count := range errorCount {
|
||||
errorCounts = append(errorCounts, ErrorCountResponse{
|
||||
Error: err,
|
||||
Count: count,
|
||||
})
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(struct {
|
||||
ErrorCounts []ErrorCountResponse `json:"error_counts"`
|
||||
}{
|
||||
ErrorCounts: errorCounts,
|
||||
})
|
||||
}
|
||||
|
||||
func main() {
|
||||
ConnectDatabase()
|
||||
|
||||
router := mux.NewRouter()
|
||||
router.HandleFunc("/upload", UploadJSON).Methods("POST")
|
||||
router.HandleFunc("/upload/updatestatus", UpdateStatus).Methods("POST")
|
||||
router.HandleFunc("/data/json", GetDataJSON).Methods("GET")
|
||||
router.HandleFunc("/data/paginated", GetPaginatedData).Methods("GET")
|
||||
router.HandleFunc("/data/summary", GetSummary).Methods("GET")
|
||||
router.HandleFunc("/data/nsapp", GetByNsapp).Methods("GET")
|
||||
router.HandleFunc("/data/date", GetByDateRange).Methods("GET")
|
||||
router.HandleFunc("/data/status", GetByStatus).Methods("GET")
|
||||
router.HandleFunc("/data/os", GetByOS).Methods("GET")
|
||||
router.HandleFunc("/data/errors", GetErrors).Methods("GET")
|
||||
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "POST"},
|
||||
AllowedHeaders: []string{"Content-Type", "Authorization"},
|
||||
AllowCredentials: true,
|
||||
})
|
||||
|
||||
handler := c.Handler(router)
|
||||
|
||||
fmt.Println("Server running on port 8080")
|
||||
log.Fatal(http.ListenAndServe(":8080", handler))
|
||||
}
|
||||
@ -1,50 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: Slaviša Arežina (tremor021)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
|
||||
# Source: https://www.powerdns.com/
|
||||
|
||||
APP="Alpine-PowerDNS"
|
||||
var_tags="${var_tags:-os;alpine;dns}"
|
||||
var_cpu="${var_cpu:-1}"
|
||||
var_ram="${var_ram:-256}"
|
||||
var_disk="${var_disk:-4}"
|
||||
var_os="${var_os:-alpine}"
|
||||
var_version="${var_version:-3.23}"
|
||||
var_unprivileged="${var_unprivileged:-1}"
|
||||
|
||||
header_info "$APP"
|
||||
variables
|
||||
color
|
||||
catch_errors
|
||||
|
||||
function update_script() {
|
||||
header_info
|
||||
check_container_storage
|
||||
check_container_resources
|
||||
|
||||
if ! apk info -e pdns >/dev/null 2>&1; then
|
||||
msg_error "No ${APP} Installation Found!"
|
||||
exit
|
||||
fi
|
||||
|
||||
msg_info "Updating PowerDNS"
|
||||
$STD apk -U upgrade
|
||||
msg_ok "Updated PowerDNS"
|
||||
|
||||
msg_info "Restarting Services"
|
||||
$STD rc-service pdns restart
|
||||
msg_ok "Restarted Services"
|
||||
msg_ok "Updated successfully!"
|
||||
exit
|
||||
}
|
||||
|
||||
start
|
||||
build_container
|
||||
description
|
||||
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
||||
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
|
||||
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:53${CL}"
|
||||
@ -1,48 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: thost96 (thost96)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://www.authelia.com/
|
||||
|
||||
APP="Authelia"
|
||||
var_tags="${var_tags:-authenticator}"
|
||||
var_cpu="${var_cpu:-1}"
|
||||
var_ram="${var_ram:-512}"
|
||||
var_disk="${var_disk:-2}"
|
||||
var_os="${var_os:-debian}"
|
||||
var_version="${var_version:-13}"
|
||||
var_unprivileged="${var_unprivileged:-1}"
|
||||
|
||||
header_info "$APP"
|
||||
base_settings
|
||||
|
||||
variables
|
||||
color
|
||||
catch_errors
|
||||
|
||||
function update_script() {
|
||||
header_info
|
||||
check_container_storage
|
||||
check_container_resources
|
||||
if [[ ! -d /etc/authelia/ ]]; then
|
||||
msg_error "No ${APP} Installation Found!"
|
||||
exit
|
||||
fi
|
||||
|
||||
if check_for_gh_release "authelia" "authelia/authelia"; then
|
||||
$STD apt update
|
||||
$STD apt -y upgrade
|
||||
fetch_and_deploy_gh_release "authelia" "authelia/authelia" "binary"
|
||||
msg_ok "Updated successfully!"
|
||||
fi
|
||||
exit
|
||||
}
|
||||
start
|
||||
build_container
|
||||
description
|
||||
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
||||
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
|
||||
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:9091 or https://auth.YOURDOMAIN ${CL}"
|
||||
58
ct/drawio.sh
Normal file
58
ct/drawio.sh
Normal file
@ -0,0 +1,58 @@
|
||||
#!/usr/bin/env bash
|
||||
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: Slaviša Arežina (tremor021)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
|
||||
# Source: https://www.drawio.com/
|
||||
|
||||
APP="DrawIO"
|
||||
var_tags="${var_tags:-diagrams}"
|
||||
var_cpu="${var_cpu:-1}"
|
||||
var_ram="${var_ram:-2048}"
|
||||
var_disk="${var_disk:-4}"
|
||||
var_os="${var_os:-debian}"
|
||||
var_version="${var_version:-13}"
|
||||
var_unprivileged="${var_unprivileged:-1}"
|
||||
|
||||
header_info "$APP"
|
||||
variables
|
||||
color
|
||||
catch_errors
|
||||
|
||||
function update_script() {
|
||||
header_info
|
||||
check_container_storage
|
||||
check_container_resources
|
||||
if [[ ! -f /var/lib/tomcat11/webapps/draw.war ]]; then
|
||||
msg_error "No ${APP} Installation Found!"
|
||||
exit
|
||||
fi
|
||||
|
||||
if check_for_gh_release "drawio" "jgraph/drawio"; then
|
||||
msg_info "Stopping service"
|
||||
systemctl stop tomcat11
|
||||
msg_ok "Service stopped"
|
||||
|
||||
msg_info "Updating Debian LXC"
|
||||
$STD apt update
|
||||
$STD apt upgrade -y
|
||||
msg_ok "Updated Debian LXC"
|
||||
|
||||
USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "drawio" "jgraph/drawio" "singlefile" "latest" "/var/lib/tomcat11/webapps" "draw.war"
|
||||
|
||||
msg_info "Starting service"
|
||||
systemctl start tomcat11
|
||||
msg_ok "Service started"
|
||||
msg_ok "Updated successfully!"
|
||||
fi
|
||||
exit
|
||||
}
|
||||
|
||||
start
|
||||
build_container
|
||||
description
|
||||
|
||||
msg_ok "Completed Successfully!\n"
|
||||
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
||||
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
|
||||
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8080/draw${CL}"
|
||||
20
ct/ebusd.sh
20
ct/ebusd.sh
@ -23,16 +23,22 @@ function update_script() {
|
||||
header_info
|
||||
check_container_storage
|
||||
check_container_resources
|
||||
if [[ ! -f /etc/apt/sources.list.d/ebusd.sources ]]; then
|
||||
if [[ ! -f /etc/default/ebusd ]]; then
|
||||
msg_error "No ${APP} Installation Found!"
|
||||
exit
|
||||
fi
|
||||
|
||||
msg_info "Updating ebusd"
|
||||
$STD apt update
|
||||
$STD apt upgrade -y ebusd
|
||||
msg_ok "Updated ebusd"
|
||||
msg_ok "Updated successfully!"
|
||||
if check_for_gh_release "ebusd" "john30/ebusd"; then
|
||||
msg_info "Stopping Services"
|
||||
systemctl stop ebusd.service
|
||||
msg_ok "Stopped Services"
|
||||
|
||||
fetch_and_deploy_gh_release "ebusd" "john30/ebusd" "binary" "latest" "/opt/ebusd" "ebusd-*_amd64-trixie_mqtt1.deb"
|
||||
|
||||
msg_info "Starting Services"
|
||||
systemctl start ebusd.service
|
||||
msg_ok "Started Services"
|
||||
msg_ok "Updated successfully!"
|
||||
fi
|
||||
exit
|
||||
}
|
||||
|
||||
|
||||
@ -1,68 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: kkroboth
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://fileflows.com/
|
||||
|
||||
APP="FileFlows"
|
||||
var_tags="${var_tags:-media;automation}"
|
||||
var_cpu="${var_cpu:-2}"
|
||||
var_ram="${var_ram:-2048}"
|
||||
var_disk="${var_disk:-8}"
|
||||
var_os="${var_os:-debian}"
|
||||
var_version="${var_version:-13}"
|
||||
var_unprivileged="${var_unprivileged:-1}"
|
||||
var_gpu="${var_gpu:-yes}"
|
||||
|
||||
header_info "$APP"
|
||||
variables
|
||||
color
|
||||
catch_errors
|
||||
|
||||
function update_script() {
|
||||
header_info
|
||||
check_container_storage
|
||||
check_container_resources
|
||||
|
||||
if [[ ! -d /opt/fileflows ]]; then
|
||||
msg_error "No ${APP} Installation Found!"
|
||||
exit
|
||||
fi
|
||||
if ! [[ $(dpkg -s jq 2>/dev/null) ]]; then
|
||||
$STD apt-get update
|
||||
$STD apt-get install -y jq
|
||||
fi
|
||||
|
||||
update_available=$(curl -fsSL -X 'GET' "http://localhost:19200/api/status/update-available" -H 'accept: application/json' | jq .UpdateAvailable)
|
||||
if [[ "${update_available}" == "true" ]]; then
|
||||
msg_info "Stopping Service"
|
||||
systemctl stop fileflows
|
||||
msg_info "Stopped Service"
|
||||
|
||||
msg_info "Creating Backup"
|
||||
backup_filename="/opt/${APP}_backup_$(date +%F).tar.gz"
|
||||
tar -czf "$backup_filename" -C /opt/fileflows Data
|
||||
msg_ok "Backup Created"
|
||||
|
||||
fetch_and_deploy_archive "https://fileflows.com/downloads/zip" "/opt/fileflows"
|
||||
|
||||
msg_info "Starting Service"
|
||||
systemctl start fileflows
|
||||
msg_ok "Started Service"
|
||||
msg_ok "Updated successfully!"
|
||||
else
|
||||
msg_ok "No update required. ${APP} is already at latest version"
|
||||
fi
|
||||
|
||||
exit
|
||||
}
|
||||
|
||||
start
|
||||
build_container
|
||||
description
|
||||
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
||||
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
|
||||
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:19200${CL}"
|
||||
@ -1,66 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
source <(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVED/refs/heads/freepbx/misc/build.func)
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: Arian Nasr (arian-nasr) | Co-Author: Javier Pastor (vsc55)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://www.freepbx.org/
|
||||
|
||||
APP="FreePBX"
|
||||
var_tags="pbx;voip;telephony"
|
||||
var_cpu="${var_cpu:-2}"
|
||||
var_ram="${var_ram:-2048}"
|
||||
var_disk="${var_disk:-10}"
|
||||
var_os="${var_os:-debian}"
|
||||
var_version="${var_version:-12}"
|
||||
var_unprivileged="${var_unprivileged:-1}"
|
||||
|
||||
header_info "$APP"
|
||||
variables
|
||||
color
|
||||
catch_errors
|
||||
|
||||
function update_script() {
|
||||
header_info
|
||||
check_container_storage
|
||||
check_container_resources
|
||||
|
||||
if [[ ! -f /lib/systemd/system/freepbx.service ]]; then
|
||||
msg_error "No ${APP} Installation Found!"
|
||||
exit
|
||||
fi
|
||||
|
||||
msg_info "Updating FreePBX LXC"
|
||||
$STD apt-get update
|
||||
$STD apt-get -y upgrade
|
||||
msg_ok "Updated FreePBX LXC"
|
||||
|
||||
msg_info "Updating FreePBX Modules"
|
||||
$STD fwconsole ma updateall
|
||||
$STD fwconsole reload
|
||||
msg_ok "Updated FreePBX Modules"
|
||||
msg_ok "Updated successfully!"
|
||||
exit
|
||||
}
|
||||
|
||||
start
|
||||
|
||||
if whiptail --title "Commercial Modules" --yesno "Remove Commercial modules?" --defaultno 10 50; then
|
||||
export ONLY_OPENSOURCE="yes"
|
||||
|
||||
if whiptail --title "Firewall Module" --yesno "Do you want to KEEP the Firewall module (and sysadmin)?" 10 50; then
|
||||
export REMOVE_FIREWALL="no"
|
||||
else
|
||||
export REMOVE_FIREWALL="yes"
|
||||
fi
|
||||
else
|
||||
export ONLY_OPENSOURCE="no"
|
||||
export REMOVE_FIREWALL="no"
|
||||
fi
|
||||
|
||||
build_container
|
||||
description
|
||||
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
||||
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
|
||||
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}"
|
||||
55
ct/ghost.sh
55
ct/ghost.sh
@ -1,55 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: fabrice1236
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://ghost.org/
|
||||
|
||||
APP="Ghost"
|
||||
var_tags="${var_tags:-cms;blog}"
|
||||
var_cpu="${var_cpu:-2}"
|
||||
var_ram="${var_ram:-1024}"
|
||||
var_disk="${var_disk:-5}"
|
||||
var_os="${var_os:-debian}"
|
||||
var_version="${var_version:-13}"
|
||||
var_unprivileged="${var_unprivileged:-1}"
|
||||
|
||||
header_info "$APP"
|
||||
variables
|
||||
color
|
||||
catch_errors
|
||||
|
||||
function update_script() {
|
||||
header_info
|
||||
check_container_storage
|
||||
check_container_resources
|
||||
|
||||
setup_mariadb
|
||||
NODE_VERSION="22" setup_nodejs
|
||||
|
||||
msg_info "Updating Ghost"
|
||||
if command -v ghost &>/dev/null; then
|
||||
current_version=$(ghost version | grep 'Ghost-CLI version' | awk '{print $3}')
|
||||
latest_version=$(npm show ghost-cli version)
|
||||
if [ "$current_version" != "$latest_version" ]; then
|
||||
msg_info "Updating ${APP} from version v${current_version} to v${latest_version}"
|
||||
$STD npm install -g ghost-cli@latest
|
||||
msg_ok "Updated successfully!"
|
||||
else
|
||||
msg_ok "${APP} is already at v${current_version}"
|
||||
fi
|
||||
else
|
||||
msg_error "No ${APP} Installation Found!"
|
||||
exit
|
||||
fi
|
||||
exit
|
||||
}
|
||||
|
||||
start
|
||||
build_container
|
||||
description
|
||||
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
||||
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
|
||||
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:2368${CL}"
|
||||
103
ct/jellyseerr.sh
103
ct/jellyseerr.sh
@ -1,103 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
|
||||
# Copyright (c) 2021-2026 tteck
|
||||
# Author: tteck (tteckster)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://docs.jellyseerr.dev/
|
||||
|
||||
APP="Jellyseerr"
|
||||
var_tags="${var_tags:-media}"
|
||||
var_cpu="${var_cpu:-4}"
|
||||
var_ram="${var_ram:-4096}"
|
||||
var_disk="${var_disk:-8}"
|
||||
var_os="${var_os:-debian}"
|
||||
var_version="${var_version:-12}"
|
||||
var_unprivileged="${var_unprivileged:-1}"
|
||||
|
||||
header_info "$APP"
|
||||
variables
|
||||
color
|
||||
catch_errors
|
||||
|
||||
function update_script() {
|
||||
header_info
|
||||
check_container_storage
|
||||
check_container_resources
|
||||
|
||||
if [[ ! -d /opt/jellyseerr ]]; then
|
||||
msg_error "No ${APP} Installation Found!"
|
||||
exit
|
||||
fi
|
||||
|
||||
if [ "$(node -v | cut -c2-3)" -ne 22 ]; then
|
||||
msg_info "Updating Node.js Repository"
|
||||
echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_22.x nodistro main" >/etc/apt/sources.list.d/nodesource.list
|
||||
msg_ok "Updating Node.js Repository"
|
||||
|
||||
msg_info "Updating Packages"
|
||||
$STD apt-get update
|
||||
$STD apt-get -y upgrade
|
||||
msg_ok "Updating Packages"
|
||||
fi
|
||||
|
||||
cd /opt/jellyseerr
|
||||
output=$(git pull --no-rebase)
|
||||
|
||||
pnpm_current=$(pnpm --version 2>/dev/null)
|
||||
pnpm_desired=$(grep -Po '"pnpm":\s*"\K[^"]+' /opt/jellyseerr/package.json)
|
||||
|
||||
if [ -z "$pnpm_current" ]; then
|
||||
msg_error "pnpm not found. Installing version $pnpm_desired..."
|
||||
NODE_VERSION="22" NODE_MODULE="pnpm@$pnpm_desired" setup_nodejs
|
||||
elif ! node -e "const semver = require('semver'); process.exit(semver.satisfies('$pnpm_current', '$pnpm_desired') ? 0 : 1)"; then
|
||||
msg_error "Updating pnpm from version $pnpm_current to $pnpm_desired..."
|
||||
NODE_VERSION="22" NODE_MODULE="pnpm@$pnpm_desired" setup_nodejs
|
||||
else
|
||||
msg_ok "pnpm is already installed and satisfies version $pnpm_desired."
|
||||
fi
|
||||
|
||||
msg_info "Updating Jellyseerr"
|
||||
if echo "$output" | grep -q "Already up to date."; then
|
||||
msg_ok "$APP is already up to date."
|
||||
exit
|
||||
fi
|
||||
|
||||
systemctl stop jellyseerr
|
||||
rm -rf dist .next node_modules
|
||||
export CYPRESS_INSTALL_BINARY=0
|
||||
cd /opt/jellyseerr
|
||||
$STD pnpm install --frozen-lockfile
|
||||
export NODE_OPTIONS="--max-old-space-size=3072"
|
||||
$STD pnpm build
|
||||
|
||||
cat <<EOF >/etc/systemd/system/jellyseerr.service
|
||||
[Unit]
|
||||
Description=jellyseerr Service
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=/etc/jellyseerr/jellyseerr.conf
|
||||
Environment=NODE_ENV=production
|
||||
Type=exec
|
||||
WorkingDirectory=/opt/jellyseerr
|
||||
ExecStart=/usr/bin/node dist/index.js
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
systemctl daemon-reload
|
||||
systemctl start jellyseerr
|
||||
msg_ok "Updated Jellyseerr"
|
||||
msg_ok "Updated successfully!"
|
||||
exit
|
||||
}
|
||||
|
||||
start
|
||||
build_container
|
||||
description
|
||||
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
||||
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
|
||||
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:5055${CL}"
|
||||
68
ct/joplin.sh
68
ct/joplin.sh
@ -1,68 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: Slaviša Arežina (tremor021)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://joplinapp.org/
|
||||
|
||||
APP="Joplin"
|
||||
var_tags="${var_tags:-notes}"
|
||||
var_cpu="${var_cpu:-2}"
|
||||
var_ram="${var_ram:-6144}"
|
||||
var_disk="${var_disk:-20}"
|
||||
var_os="${var_os:-debian}"
|
||||
var_version="${var_version:-13}"
|
||||
var_unprivileged="${var_unprivileged:-1}"
|
||||
|
||||
header_info "$APP"
|
||||
variables
|
||||
color
|
||||
catch_errors
|
||||
|
||||
function update_script() {
|
||||
header_info
|
||||
check_container_storage
|
||||
check_container_resources
|
||||
if [[ ! -d /opt/joplin-server ]]; then
|
||||
msg_error "No ${APP} Installation Found!"
|
||||
exit
|
||||
fi
|
||||
|
||||
NODE_VERSION=24 NODE_MODULE="yarn,npm,pm2" setup_nodejs
|
||||
|
||||
if check_for_gh_release "joplin-server" "laurent22/joplin"; then
|
||||
msg_info "Stopping Services"
|
||||
systemctl stop joplin-server
|
||||
msg_ok "Stopped Services"
|
||||
|
||||
cp /opt/joplin-server/.env /opt
|
||||
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "joplin-server" "laurent22/joplin" "tarball"
|
||||
mv /opt/.env /opt/joplin-server
|
||||
|
||||
msg_info "Updating Joplin-Server"
|
||||
cd /opt/joplin-server
|
||||
sed -i "/onenote-converter/d" packages/lib/package.json
|
||||
$STD yarn config set --home enableTelemetry 0
|
||||
export BUILD_SEQUENCIAL=1
|
||||
$STD yarn workspaces focus @joplin/server
|
||||
cd packages/server
|
||||
$STD yarn run build
|
||||
$STD yarn run tsc
|
||||
msg_ok "Updated Joplin-Server"
|
||||
|
||||
msg_info "Starting Services"
|
||||
systemctl start joplin-server
|
||||
msg_ok "Started Services"
|
||||
msg_ok "Updated successfully!"
|
||||
fi
|
||||
exit
|
||||
}
|
||||
|
||||
start
|
||||
build_container
|
||||
description
|
||||
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
||||
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
|
||||
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:22300${CL}"
|
||||
105
ct/jotty.sh
105
ct/jotty.sh
@ -1,105 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: vhsdream
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://github.com/fccview/jotty
|
||||
|
||||
APP="jotty"
|
||||
var_tags="${var_tags:-tasks;notes}"
|
||||
var_cpu="${var_cpu:-2}"
|
||||
var_ram="${var_ram:-4096}"
|
||||
var_disk="${var_disk:-6}"
|
||||
var_os="${var_os:-debian}"
|
||||
var_version="${var_version:-13}"
|
||||
var_unprivileged="${var_unprivileged:-1}"
|
||||
|
||||
header_info "$APP"
|
||||
variables
|
||||
color
|
||||
catch_errors
|
||||
|
||||
function update_script() {
|
||||
header_info
|
||||
check_container_storage
|
||||
check_container_resources
|
||||
|
||||
if [[ ! -d /opt/jotty ]]; then
|
||||
msg_error "No ${APP} Installation Found!"
|
||||
exit
|
||||
fi
|
||||
|
||||
if check_for_gh_release "jotty" "fccview/jotty"; then
|
||||
msg_info "Stopping Service"
|
||||
systemctl stop jotty
|
||||
msg_ok "Stopped Service"
|
||||
|
||||
msg_info "Backing up configuration & data"
|
||||
cd /opt/jotty
|
||||
cp ./.env /opt/app.env
|
||||
$STD tar -cf /opt/data_config.tar ./data ./config
|
||||
msg_ok "Backed up configuration & data"
|
||||
|
||||
NODE_VERSION="22" NODE_MODULE="yarn" setup_nodejs
|
||||
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "jotty" "fccview/jotty" "tarball" "latest" "/opt/jotty"
|
||||
|
||||
msg_info "Updating jotty"
|
||||
cd /opt/jotty
|
||||
unset NODE_OPTIONS
|
||||
export NODE_OPTIONS="--max-old-space-size=3072"
|
||||
$STD yarn --frozen-lockfile
|
||||
$STD yarn next telemetry disable
|
||||
$STD yarn build
|
||||
|
||||
[ -d "public" ] && cp -r public .next/standalone/
|
||||
[ -d "howto" ] && cp -r howto .next/standalone/
|
||||
mkdir -p .next/standalone/.next
|
||||
cp -r .next/static .next/standalone/.next/
|
||||
|
||||
mv .next/standalone /tmp/jotty_standalone
|
||||
rm -rf ./* .next .git .gitignore .yarn
|
||||
mv /tmp/jotty_standalone/* .
|
||||
mv /tmp/jotty_standalone/.[!.]* . 2>/dev/null || true
|
||||
rm -rf /tmp/jotty_standalone
|
||||
msg_ok "Updated jotty"
|
||||
|
||||
msg_info "Restoring configuration & data"
|
||||
mv /opt/app.env /opt/jotty/.env
|
||||
$STD tar -xf /opt/data_config.tar
|
||||
msg_ok "Restored configuration & data"
|
||||
|
||||
msg_info "Updating Service"
|
||||
cat <<EOF >/etc/systemd/system/jotty.service
|
||||
[Unit]
|
||||
Description=jotty server
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
WorkingDirectory=/opt/jotty
|
||||
EnvironmentFile=/opt/jotty/.env
|
||||
ExecStart=/usr/bin/node server.js
|
||||
Restart=on-abnormal
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
systemctl daemon-reload
|
||||
msg_ok "Updated Service"
|
||||
|
||||
msg_info "Starting Service"
|
||||
systemctl start jotty
|
||||
msg_ok "Started Service"
|
||||
rm /opt/data_config.tar
|
||||
msg_ok "Updated successfully!"
|
||||
fi
|
||||
exit
|
||||
}
|
||||
|
||||
start
|
||||
build_container
|
||||
description
|
||||
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
||||
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
|
||||
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}"
|
||||
79
ct/linkding.sh
Normal file
79
ct/linkding.sh
Normal file
@ -0,0 +1,79 @@
|
||||
#!/usr/bin/env bash
|
||||
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: MickLesk (MickLesk)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
|
||||
# Source: https://linkding.link/
|
||||
|
||||
APP="linkding"
|
||||
var_tags="${var_tags:-bookmarks;management}"
|
||||
var_cpu="${var_cpu:-2}"
|
||||
var_ram="${var_ram:-1024}"
|
||||
var_disk="${var_disk:-4}"
|
||||
var_os="${var_os:-debian}"
|
||||
var_version="${var_version:-13}"
|
||||
var_unprivileged="${var_unprivileged:-1}"
|
||||
|
||||
header_info "$APP"
|
||||
variables
|
||||
color
|
||||
catch_errors
|
||||
|
||||
function update_script() {
|
||||
header_info
|
||||
check_container_storage
|
||||
check_container_resources
|
||||
|
||||
if [[ ! -d /opt/linkding ]]; then
|
||||
msg_error "No ${APP} Installation Found!"
|
||||
exit
|
||||
fi
|
||||
|
||||
if check_for_gh_release "linkding" "sissbruecker/linkding"; then
|
||||
msg_info "Stopping Services"
|
||||
systemctl stop nginx linkding linkding-tasks
|
||||
msg_ok "Stopped Services"
|
||||
|
||||
msg_info "Backing up Data"
|
||||
cp -r /opt/linkding/data /opt/linkding_data_backup
|
||||
cp /opt/linkding/.env /opt/linkding_env_backup
|
||||
msg_ok "Backed up Data"
|
||||
|
||||
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "linkding" "sissbruecker/linkding"
|
||||
|
||||
msg_info "Restoring Data"
|
||||
cp -r /opt/linkding_data_backup/. /opt/linkding/data
|
||||
cp /opt/linkding_env_backup /opt/linkding/.env
|
||||
rm -rf /opt/linkding_data_backup /opt/linkding_env_backup
|
||||
ln -sf /usr/lib/x86_64-linux-gnu/mod_icu.so /opt/linkding/libicu.so
|
||||
msg_ok "Restored Data"
|
||||
|
||||
msg_info "Updating ${APP}"
|
||||
cd /opt/linkding
|
||||
rm -f bookmarks/settings/dev.py
|
||||
touch bookmarks/settings/custom.py
|
||||
$STD npm ci
|
||||
$STD npm run build
|
||||
$STD uv sync --no-dev --frozen
|
||||
$STD uv pip install gunicorn
|
||||
set -a && source /opt/linkding/.env && set +a
|
||||
$STD /opt/linkding/.venv/bin/python manage.py migrate
|
||||
$STD /opt/linkding/.venv/bin/python manage.py collectstatic --no-input
|
||||
msg_ok "Updated ${APP}"
|
||||
|
||||
msg_info "Starting Services"
|
||||
systemctl start nginx linkding linkding-tasks
|
||||
msg_ok "Started Services"
|
||||
msg_ok "Updated Successfully"
|
||||
fi
|
||||
exit
|
||||
}
|
||||
|
||||
start
|
||||
build_container
|
||||
description
|
||||
|
||||
msg_ok "Completed Successfully!\n"
|
||||
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
||||
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
|
||||
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:9090${CL}"
|
||||
60
ct/memos.sh
60
ct/memos.sh
@ -1,60 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
|
||||
# Copyright (c) 2021-2026 tteck
|
||||
# Author: MickLesk (Canbiz)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://www.usememos.com/
|
||||
|
||||
APP="Memos"
|
||||
var_tags="${var_tags:-notes}"
|
||||
var_cpu="${var_cpu:-2}"
|
||||
var_ram="${var_ram:-2048}"
|
||||
var_disk="${var_disk:-6}"
|
||||
var_os="${var_os:-debian}"
|
||||
var_version="${var_version:-13}"
|
||||
var_unprivileged="${var_unprivileged:-1}"
|
||||
|
||||
header_info "$APP"
|
||||
variables
|
||||
color
|
||||
catch_errors
|
||||
|
||||
function update_script() {
|
||||
header_info
|
||||
check_container_storage
|
||||
check_container_resources
|
||||
if [[ ! -d /opt/memos ]]; then
|
||||
msg_error "No ${APP} Installation Found!"
|
||||
exit
|
||||
fi
|
||||
if check_for_gh_release "memos" "usememos/memos"; then
|
||||
msg_info "Stopping Service"
|
||||
systemctl stop memos
|
||||
msg_ok "Stopped Service"
|
||||
|
||||
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "memos" "usememos/memos" "tarball"
|
||||
|
||||
msg_info "Building Memos (patience)"
|
||||
cd /opt/memos/web
|
||||
$STD pnpm install --frozen-lockfile
|
||||
$STD pnpm release
|
||||
cd /opt/memos
|
||||
$STD go build -o memos ./cmd/memos
|
||||
msg_ok "Built Memos"
|
||||
|
||||
msg_info "Starting Service"
|
||||
systemctl start memos
|
||||
msg_ok "Started Service"
|
||||
msg_ok "Updated successfully!"
|
||||
fi
|
||||
exit
|
||||
}
|
||||
|
||||
start
|
||||
build_container
|
||||
description
|
||||
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
||||
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
|
||||
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:9030${CL}"
|
||||
@ -41,20 +41,20 @@ function update_script() {
|
||||
msg_info "Updating nextExplorer"
|
||||
APP_DIR="/opt/nextExplorer/app"
|
||||
mkdir -p "$APP_DIR"
|
||||
cd /opt/nextExplorer/backend
|
||||
cd /opt/nextExplorer
|
||||
export NODE_ENV=production
|
||||
$STD npm ci
|
||||
cd /opt/nextExplorer/frontend
|
||||
$STD npm ci --omit=dev --workspace backend
|
||||
mv node_modules "$APP_DIR"
|
||||
mv backend/{src,package.json} "$APP_DIR"
|
||||
unset NODE_ENV
|
||||
export NODE_ENV=development
|
||||
$STD npm ci
|
||||
$STD npm run build -- --sourcemap false
|
||||
$STD npm ci --workspace frontend
|
||||
$STD npm run -w frontend build -- --sourcemap false
|
||||
unset NODE_ENV
|
||||
cd /opt/nextExplorer/
|
||||
mv backend/{node_modules,src,package.json} "$APP_DIR"
|
||||
mv frontend/dist/ "$APP_DIR"/src/public
|
||||
chown -R explorer:explorer "$APP_DIR" /etc/nextExplorer
|
||||
sed -i "\|version|s|$(jq -cr '.version' ${APP_DIR}/package.json)|$(cat ~/.nextexplorer)|" "$APP_DIR"/package.json
|
||||
sed -i 's/app.js/server.js/' /etc/systemd/system/nextexplorer.service && systemctl daemon-reload
|
||||
msg_ok "Updated nextExplorer"
|
||||
|
||||
msg_info "Starting nextExplorer"
|
||||
|
||||
@ -1,50 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: pfassina
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://github.com/openclaw/openclaw
|
||||
|
||||
APP="OpenClaw"
|
||||
var_tags="${var_tags:-ai-assistant;chatops}"
|
||||
var_cpu="${var_cpu:-2}"
|
||||
var_ram="${var_ram:-2048}"
|
||||
var_disk="${var_disk:-4}"
|
||||
var_os="${var_os:-debian}"
|
||||
var_version="${var_version:-13}"
|
||||
var_unprivileged="${var_unprivileged:-1}"
|
||||
|
||||
header_info "$APP"
|
||||
variables
|
||||
color
|
||||
catch_errors
|
||||
|
||||
function update_script() {
|
||||
header_info
|
||||
check_container_storage
|
||||
check_container_resources
|
||||
if [[ ! -f /etc/systemd/system/openclaw.service ]]; then
|
||||
msg_error "No ${APP} Installation Found!"
|
||||
exit
|
||||
fi
|
||||
msg_info "Stopping Service"
|
||||
systemctl stop openclaw
|
||||
msg_ok "Stopped Service"
|
||||
|
||||
NODE_VERSION="22" NODE_MODULE="openclaw" setup_nodejs
|
||||
msg_info "Starting Service"
|
||||
systemctl start openclaw
|
||||
msg_ok "Started Service"
|
||||
msg_ok "Updated successfully!"
|
||||
exit
|
||||
}
|
||||
|
||||
start
|
||||
build_container
|
||||
description
|
||||
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
||||
echo -e "${INFO}${YW} Run 'openclaw onboard' inside the container to complete setup${CL}"
|
||||
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
|
||||
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:18789${CL}"
|
||||
75
ct/skylite-ux.sh
Normal file
75
ct/skylite-ux.sh
Normal file
@ -0,0 +1,75 @@
|
||||
#!/usr/bin/env bash
|
||||
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: bzumhagen
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
|
||||
# Source: https://github.com/Wetzel402/Skylite-UX
|
||||
|
||||
APP="Skylite-UX"
|
||||
var_tags="${var_tags:-family;productivity}"
|
||||
var_cpu="${var_cpu:-2}"
|
||||
var_ram="${var_ram:-4096}"
|
||||
var_disk="${var_disk:-8}"
|
||||
var_os="${var_os:-debian}"
|
||||
var_version="${var_version:-13}"
|
||||
var_unprivileged="${var_unprivileged:-1}"
|
||||
|
||||
header_info "$APP"
|
||||
variables
|
||||
color
|
||||
catch_errors
|
||||
|
||||
function update_script() {
|
||||
header_info
|
||||
check_container_storage
|
||||
check_container_resources
|
||||
|
||||
if [[ ! -d /opt/skylite-ux ]]; then
|
||||
msg_error "No ${APP} Installation Found!"
|
||||
exit
|
||||
fi
|
||||
|
||||
if check_for_gh_release "skylite-ux" "Wetzel402/Skylite-UX"; then
|
||||
msg_info "Stopping Service"
|
||||
systemctl stop skylite-ux
|
||||
msg_ok "Stopped Service"
|
||||
|
||||
msg_info "Backing up Data"
|
||||
cp /opt/skylite-ux/.env /tmp/skylite-ux.env.backup
|
||||
msg_ok "Backed up Data"
|
||||
|
||||
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "skylite-ux" "Wetzel402/Skylite-UX" "tarball"
|
||||
|
||||
msg_info "Restoring Data"
|
||||
cp /tmp/skylite-ux.env.backup /opt/skylite-ux/.env
|
||||
rm -f /tmp/skylite-ux.env.backup
|
||||
msg_ok "Restored Data"
|
||||
|
||||
msg_info "Building Skylite-UX"
|
||||
cd /opt/skylite-ux
|
||||
$STD npm ci
|
||||
$STD npx prisma generate
|
||||
$STD npm run build
|
||||
msg_ok "Built Skylite-UX"
|
||||
|
||||
msg_info "Running Database Migrations"
|
||||
cd /opt/skylite-ux
|
||||
$STD npx prisma migrate deploy
|
||||
msg_ok "Ran Database Migrations"
|
||||
|
||||
msg_info "Starting Service"
|
||||
systemctl start skylite-ux
|
||||
msg_ok "Started Service"
|
||||
msg_ok "Updated successfully!"
|
||||
fi
|
||||
exit
|
||||
}
|
||||
|
||||
start
|
||||
build_container
|
||||
description
|
||||
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
||||
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
|
||||
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}"
|
||||
@ -1,115 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
|
||||
# Copyright (c) 2021-2026 tteck
|
||||
# Author: tteck (tteckster)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://github.com/dani-garcia/vaultwarden
|
||||
|
||||
APP="Vaultwarden"
|
||||
var_tags="${var_tags:-password-manager}"
|
||||
var_cpu="${var_cpu:-4}"
|
||||
var_ram="${var_ram:-6144}"
|
||||
var_disk="${var_disk:-20}"
|
||||
var_os="${var_os:-debian}"
|
||||
var_version="${var_version:-13}"
|
||||
var_unprivileged="${var_unprivileged:-1}"
|
||||
|
||||
header_info "$APP"
|
||||
variables
|
||||
color
|
||||
catch_errors
|
||||
|
||||
function update_script() {
|
||||
header_info
|
||||
check_container_storage
|
||||
check_container_resources
|
||||
if [[ ! -f /etc/systemd/system/vaultwarden.service ]]; then
|
||||
msg_error "No ${APP} Installation Found!"
|
||||
exit
|
||||
fi
|
||||
|
||||
VAULT=$(get_latest_github_release "dani-garcia/vaultwarden")
|
||||
WVRELEASE=$(get_latest_github_release "dani-garcia/bw_web_builds")
|
||||
|
||||
UPD=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "SUPPORT" --radiolist --cancel-button Exit-Script "Spacebar = Select" 11 58 3 \
|
||||
"1" "VaultWarden $VAULT" ON \
|
||||
"2" "Web-Vault $WVRELEASE" OFF \
|
||||
"3" "Set Admin Token" OFF \
|
||||
3>&1 1>&2 2>&3)
|
||||
|
||||
if [ "$UPD" == "1" ]; then
|
||||
if check_for_gh_release "vaultwarden" "dani-garcia/vaultwarden"; then
|
||||
msg_info "Stopping Service"
|
||||
systemctl stop vaultwarden
|
||||
msg_ok "Stopped Service"
|
||||
|
||||
fetch_and_deploy_gh_release "vaultwarden" "dani-garcia/vaultwarden" "tarball" "latest" "/tmp/vaultwarden-src"
|
||||
|
||||
msg_info "Updating VaultWarden to $VAULT (Patience)"
|
||||
cd /tmp/vaultwarden-src
|
||||
$STD cargo build --features "sqlite,mysql,postgresql" --release
|
||||
if [[ -f /usr/bin/vaultwarden ]]; then
|
||||
cp target/release/vaultwarden /usr/bin/
|
||||
else
|
||||
cp target/release/vaultwarden /opt/vaultwarden/bin/
|
||||
fi
|
||||
cd ~ && rm -rf /tmp/vaultwarden-src
|
||||
msg_ok "Updated VaultWarden to ${VAULT}"
|
||||
|
||||
msg_info "Starting Service"
|
||||
systemctl start vaultwarden
|
||||
msg_ok "Started Service"
|
||||
msg_ok "Updated successfully!"
|
||||
else
|
||||
msg_ok "VaultWarden is already up-to-date"
|
||||
fi
|
||||
exit
|
||||
fi
|
||||
|
||||
if [ "$UPD" == "2" ]; then
|
||||
if check_for_gh_release "vaultwarden_webvault" "dani-garcia/bw_web_builds"; then
|
||||
msg_info "Stopping Service"
|
||||
systemctl stop vaultwarden
|
||||
msg_ok "Stopped Service"
|
||||
|
||||
fetch_and_deploy_gh_release "vaultwarden_webvault" "dani-garcia/bw_web_builds" "prebuild" "latest" "/opt/vaultwarden" "bw_web_*.tar.gz"
|
||||
|
||||
msg_info "Updating Web-Vault to $WVRELEASE"
|
||||
rm -rf /opt/vaultwarden/web-vault
|
||||
chown -R root:root /opt/vaultwarden/web-vault/
|
||||
msg_ok "Updated Web-Vault to ${WVRELEASE}"
|
||||
|
||||
msg_info "Starting Service"
|
||||
systemctl start vaultwarden
|
||||
msg_ok "Started Service"
|
||||
msg_ok "Updated successfully!"
|
||||
else
|
||||
msg_ok "Web-Vault is already up-to-date"
|
||||
fi
|
||||
exit
|
||||
fi
|
||||
|
||||
if [ "$UPD" == "3" ]; then
|
||||
if NEWTOKEN=$(whiptail --backtitle "Proxmox VE Helper Scripts" --passwordbox "Set the ADMIN_TOKEN" 10 58 3>&1 1>&2 2>&3); then
|
||||
if [[ -z "$NEWTOKEN" ]]; then exit; fi
|
||||
ensure_dependencies argon2
|
||||
TOKEN=$(echo -n "${NEWTOKEN}" | argon2 "$(openssl rand -base64 32)" -t 2 -m 16 -p 4 -l 64 -e)
|
||||
sed -i "s|ADMIN_TOKEN=.*|ADMIN_TOKEN='${TOKEN}'|" /opt/vaultwarden/.env
|
||||
if [[ -f /opt/vaultwarden/data/config.json ]]; then
|
||||
sed -i "s|\"admin_token\":.*|\"admin_token\": \"${TOKEN}\"|" /opt/vaultwarden/data/config.json
|
||||
fi
|
||||
systemctl restart vaultwarden
|
||||
msg_ok "Admin token updated"
|
||||
fi
|
||||
exit
|
||||
fi
|
||||
}
|
||||
|
||||
start
|
||||
build_container
|
||||
description
|
||||
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
||||
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
|
||||
echo -e "${TAB}${GATEWAY}${BGN}https://${IP}:8000${CL}"
|
||||
@ -1,70 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: MickLesk (Canbiz) | Co-Author: CrazyWolf13
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://vikunja.io/
|
||||
|
||||
APP="Vikunja"
|
||||
var_tags="${var_tags:-todo-app}"
|
||||
var_cpu="${var_cpu:-1}"
|
||||
var_ram="${var_ram:-1024}"
|
||||
var_disk="${var_disk:-4}"
|
||||
var_os="${var_os:-debian}"
|
||||
var_version="${var_version:-13}"
|
||||
var_unprivileged="${var_unprivileged:-1}"
|
||||
|
||||
header_info "$APP"
|
||||
variables
|
||||
color
|
||||
catch_errors
|
||||
|
||||
function update_script() {
|
||||
header_info
|
||||
check_container_storage
|
||||
check_container_resources
|
||||
if [[ ! -d /opt/vikunja ]]; then
|
||||
msg_error "No ${APP} Installation Found!"
|
||||
exit
|
||||
fi
|
||||
|
||||
RELEASE="$( [[ -f "$HOME/.vikunja" ]] && cat "$HOME/.vikunja" 2>/dev/null || [[ -f /opt/Vikunja_version ]] && cat /opt/Vikunja_version 2>/dev/null || true)"
|
||||
if [[ -z "$RELEASE" ]] || [[ "$RELEASE" == "unstable" ]] || dpkg --compare-versions "${RELEASE:-0.0.0}" lt "1.0.0"; then
|
||||
msg_warn "You are upgrading from Vikunja '$RELEASE'."
|
||||
msg_warn "This requires MANUAL config changes in /etc/vikunja/config.yml."
|
||||
msg_warn "See: https://vikunja.io/changelog/whats-new-in-vikunja-1.0.0/#config-changes"
|
||||
|
||||
read -rp "Continue with update? (y to proceed): " -t 30 CONFIRM1 || exit 1
|
||||
[[ "$CONFIRM1" =~ ^[yY]$ ]] || exit 0
|
||||
|
||||
echo
|
||||
msg_warn "Vikunja may not start after the update until you manually adjust the config."
|
||||
msg_warn "Details: https://vikunja.io/changelog/whats-new-in-vikunja-1.0.0/#config-changes"
|
||||
|
||||
read -rp "Acknowledge and continue? (y): " -t 30 CONFIRM2 || exit 1
|
||||
[[ "$CONFIRM2" =~ ^[yY]$ ]] || exit 0
|
||||
fi
|
||||
|
||||
if check_for_gh_release "vikunja" "go-vikunja/vikunja"; then
|
||||
msg_info "Stopping Service"
|
||||
systemctl stop vikunja
|
||||
msg_ok "Stopped Service"
|
||||
|
||||
fetch_and_deploy_gh_release "vikunja" "go-vikunja/vikunja" "binary"
|
||||
|
||||
msg_info "Starting Service"
|
||||
systemctl start vikunja
|
||||
msg_ok "Started Service"
|
||||
msg_ok "Updated successfully!"
|
||||
fi
|
||||
exit 0
|
||||
}
|
||||
|
||||
start
|
||||
build_container
|
||||
description
|
||||
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
||||
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
|
||||
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3456${CL}"
|
||||
75
ct/wger.sh
75
ct/wger.sh
@ -1,75 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVED/main/misc/build.func)
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: Slaviša Arežina (tremor021)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
|
||||
# Source: https://github.com/wger-project/wger
|
||||
|
||||
APP="wger"
|
||||
var_tags="${var_tags:-management;fitness}"
|
||||
var_cpu="${var_cpu:-2}"
|
||||
var_ram="${var_ram:-2048}"
|
||||
var_disk="${var_disk:-8}"
|
||||
var_os="${var_os:-debian}"
|
||||
var_version="${var_version:-13}"
|
||||
var_unprivileged="${var_unprivileged:-1}"
|
||||
|
||||
header_info "$APP"
|
||||
variables
|
||||
color
|
||||
catch_errors
|
||||
|
||||
function update_script() {
|
||||
header_info
|
||||
check_container_storage
|
||||
check_container_resources
|
||||
|
||||
if [[ ! -d /opt/wger ]]; then
|
||||
msg_error "No ${APP} Installation Found!"
|
||||
exit
|
||||
fi
|
||||
|
||||
if check_for_gh_release "wger" "wger-project/wger"; then
|
||||
msg_info "Stopping Service"
|
||||
systemctl stop redis-server nginx celery celery-beat wger
|
||||
msg_ok "Stopped Service"
|
||||
|
||||
msg_info "Backing up Data"
|
||||
cp -r /opt/wger/media /opt/wger_media_backup
|
||||
cp /opt/wger/.env /opt/wger_env_backup
|
||||
msg_ok "Backed up Data"
|
||||
|
||||
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "wger" "wger-project/wger" "tarball" "latest" "/opt/wger"
|
||||
|
||||
msg_info "Restoring Data"
|
||||
cp -r /opt/wger_media_backup/. /opt/wger/media
|
||||
cp /opt/wger_env_backup /opt/wger/.env
|
||||
rm -rf /opt/wger_media_backup /opt/wger_env_backup
|
||||
|
||||
msg_ok "Restored Data"
|
||||
|
||||
msg_info "Updating wger"
|
||||
cd /opt/wger
|
||||
set -a && source /opt/wger/.env && set +a
|
||||
export DJANGO_SETTINGS_MODULE=settings.main
|
||||
$STD uv pip install .
|
||||
$STD uv run python manage.py migrate
|
||||
$STD uv run python manage.py collectstatic --no-input
|
||||
msg_ok "Updated wger"
|
||||
|
||||
msg_info "Starting Services"
|
||||
systemctl start redis-server nginx celery celery-beat wger
|
||||
msg_ok "Started Services"
|
||||
msg_ok "Updated Successfully"
|
||||
fi
|
||||
exit
|
||||
}
|
||||
|
||||
start
|
||||
build_container
|
||||
description
|
||||
|
||||
msg_ok "Completed Successfully!\n"
|
||||
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
||||
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
|
||||
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}"
|
||||
@ -30,7 +30,8 @@ function update_script() {
|
||||
|
||||
if check_for_gh_release "zitadel" "zitadel/zitadel"; then
|
||||
msg_info "Stopping Service"
|
||||
systemctl stop zitadel-api zitadel-login
|
||||
systemctl stop zitadel-api
|
||||
systemctl stop zitadel-login
|
||||
msg_ok "Stopped Service"
|
||||
|
||||
msg_info "Updating Zitadel"
|
||||
@ -45,7 +46,9 @@ function update_script() {
|
||||
msg_ok "Updated Zitadel"
|
||||
|
||||
msg_info "Starting Service"
|
||||
systemctl start zitadel
|
||||
systemctl start zitadel-api
|
||||
sleep 5
|
||||
systemctl start zitadel-login
|
||||
msg_ok "Started Service"
|
||||
msg_ok "Updated successfully!"
|
||||
fi
|
||||
|
||||
@ -1,38 +1,30 @@
|
||||
# API Integration Documentation (/api)
|
||||
|
||||
This directory contains comprehensive documentation for API integration and the `/api` directory.
|
||||
# API Integration Documentation
|
||||
|
||||
## Overview
|
||||
|
||||
The `/api` directory contains the Proxmox Community Scripts API backend for diagnostic reporting, telemetry, and analytics integration.
|
||||
The telemetry and diagnostics API uses **PocketBase** as backend, hosted at `http://db.community-scripts.org`. All telemetry data is stored in the `_dev_telemetry_data` collection.
|
||||
|
||||
The Go/MongoDB API server (`/api` directory) has been replaced entirely by PocketBase.
|
||||
|
||||
## Key Components
|
||||
|
||||
### Main API Service
|
||||
Located in `/api/main.go`:
|
||||
### PocketBase Backend
|
||||
- **URL**: `http://db.community-scripts.org`
|
||||
- **Collection**: `_dev_telemetry_data`
|
||||
- **Admin UI**: `http://db.community-scripts.org/_/#/collections`
|
||||
- RESTful API for receiving telemetry data
|
||||
- Installation statistics tracking
|
||||
- Error reporting and analytics
|
||||
- Performance monitoring
|
||||
|
||||
### Integration with Scripts
|
||||
The API is integrated into all installation scripts via `api.func`:
|
||||
- Sends installation start/completion events
|
||||
- Reports errors and exit codes
|
||||
- Reports errors and exit codes with numeric values
|
||||
- Collects anonymous usage statistics
|
||||
- Enables project analytics
|
||||
|
||||
## Documentation Structure
|
||||
|
||||
API documentation covers:
|
||||
- API endpoint specifications
|
||||
- Integration methods
|
||||
- Data formats and schemas
|
||||
- Error handling
|
||||
- Privacy and data handling
|
||||
|
||||
## Key Resources
|
||||
|
||||
- **[misc/api.func/](../misc/api.func/)** - API function library documentation
|
||||
- **[misc/api.func/README.md](../misc/api.func/README.md)** - Quick reference
|
||||
- **[misc/api.func/API_FUNCTIONS_REFERENCE.md](../misc/api.func/API_FUNCTIONS_REFERENCE.md)** - Complete function reference
|
||||
@ -42,48 +34,92 @@ API documentation covers:
|
||||
The `api.func` library provides:
|
||||
|
||||
### `post_to_api()`
|
||||
Send container installation data to API.
|
||||
Send LXC container installation data to PocketBase.
|
||||
|
||||
**Usage**:
|
||||
```bash
|
||||
post_to_api CTID STATUS APP_NAME
|
||||
```
|
||||
Creates a new record in `_dev_telemetry_data` with status `installing`.
|
||||
|
||||
### `post_update_to_api()`
|
||||
Report application update status.
|
||||
### `post_to_api_vm()`
|
||||
Send VM installation data to PocketBase.
|
||||
|
||||
**Usage**:
|
||||
```bash
|
||||
post_update_to_api CTID APP_NAME VERSION
|
||||
```
|
||||
Creates a new record with `type=vm` and `ct_type=2`.
|
||||
|
||||
### `get_error_description()`
|
||||
### `post_update_to_api(status, exit_code)`
|
||||
Update installation status via PocketBase PATCH.
|
||||
|
||||
Maps status values:
|
||||
- `"done"` → PocketBase status `"sucess"`
|
||||
- `"failed"` → PocketBase status `"failed"`
|
||||
|
||||
### `explain_exit_code(code)`
|
||||
Get human-readable error description from exit code.
|
||||
|
||||
**Usage**:
|
||||
```bash
|
||||
ERROR_DESC=$(get_error_description EXIT_CODE)
|
||||
ERROR_DESC=$(explain_exit_code 137)
|
||||
# → "Killed (SIGKILL / Out of memory?)"
|
||||
```
|
||||
|
||||
## PocketBase Collection Schema
|
||||
|
||||
| Field | Type | Required | Description |
|
||||
|-------|------|----------|-------------|
|
||||
| `id` | text (auto) | yes | PocketBase record ID |
|
||||
| `random_id` | text | yes | Session UUID (unique) |
|
||||
| `type` | select | yes | `lxc`, `vm`, `addon`, `pve` |
|
||||
| `ct_type` | number | yes | 1=LXC, 2=VM |
|
||||
| `nsapp` | text | yes | Application name |
|
||||
| `status` | select | yes | `installing`, `sucess`, `failed`, `unknown` |
|
||||
| `disk_size` | number | no | Disk size in GB |
|
||||
| `core_count` | number | no | CPU cores |
|
||||
| `ram_size` | number | no | RAM in MB |
|
||||
| `os_type` | text | no | OS type (debian, ubuntu, etc.) |
|
||||
| `os_version` | text | no | OS version |
|
||||
| `pve_version` | text | no | Proxmox VE version |
|
||||
| `method` | text | no | Installation method |
|
||||
| `error` | text | no | Error description |
|
||||
| `exit_code` | number | no | Numeric exit code |
|
||||
| `created` | autodate | auto | Record creation timestamp |
|
||||
| `updated` | autodate | auto | Last update timestamp |
|
||||
|
||||
## API Endpoints (PocketBase REST)
|
||||
|
||||
**Base URL**: `http://db.community-scripts.org`
|
||||
|
||||
| Method | Endpoint | Description |
|
||||
|--------|----------|-------------|
|
||||
| `POST` | `/api/collections/_dev_telemetry_data/records` | Create telemetry record |
|
||||
| `PATCH` | `/api/collections/_dev_telemetry_data/records/{id}` | Update record status |
|
||||
| `GET` | `/api/collections/_dev_telemetry_data/records` | List/search records |
|
||||
|
||||
### Query Parameters (GET)
|
||||
- `filter` – PocketBase filter syntax, e.g. `(nsapp='debian' && status='failed')`
|
||||
- `sort` – Sort fields, e.g. `-created,nsapp`
|
||||
- `page` / `perPage` – Pagination
|
||||
- `fields` – Limit returned fields
|
||||
|
||||
## API Integration Points
|
||||
|
||||
### In Container Creation (`ct/AppName.sh`)
|
||||
- Called by build.func to report container creation
|
||||
- Sends initial container setup data
|
||||
- Reports success or failure
|
||||
- Called by `build.func` to report container creation via `post_to_api`
|
||||
- Sends initial container setup data with status `installing`
|
||||
- Reports success or failure via `post_update_to_api`
|
||||
|
||||
### In Installation Scripts (`install/appname-install.sh`)
|
||||
- Called at start of installation
|
||||
- Called on installation completion
|
||||
- Called on error conditions
|
||||
### In VM Creation (`vm/AppName.sh`)
|
||||
- Calls `post_to_api_vm` after VM creation
|
||||
- Status updates via `post_update_to_api`
|
||||
|
||||
### Data Collected
|
||||
- Container/VM ID
|
||||
- Application name and version
|
||||
- Installation duration
|
||||
- Success/failure status
|
||||
- Error codes (if failure)
|
||||
- Anonymous usage metrics
|
||||
### Data Flow
|
||||
```
|
||||
Installation Scripts
|
||||
│
|
||||
├─ Call: api.func functions
|
||||
│
|
||||
├─ POST → PocketBase (create record, status=installing)
|
||||
│ └─ Returns record ID (stored in PB_RECORD_ID)
|
||||
│
|
||||
└─ PATCH → PocketBase (update record with final status)
|
||||
└─ status=sucess/failed + exit_code + error
|
||||
```
|
||||
|
||||
## Privacy
|
||||
|
||||
@ -92,55 +128,18 @@ All API data:
|
||||
- ✅ Aggregated for statistics
|
||||
- ✅ Used only for project improvement
|
||||
- ✅ No tracking of user identities
|
||||
- ✅ Can be disabled if desired
|
||||
|
||||
## API Architecture
|
||||
|
||||
```
|
||||
Installation Scripts
|
||||
│
|
||||
├─ Call: api.func functions
|
||||
│
|
||||
└─ POST to: https://api.community-scripts.org
|
||||
│
|
||||
├─ Receives data
|
||||
├─ Validates format
|
||||
├─ Stores metrics
|
||||
└─ Aggregates statistics
|
||||
│
|
||||
└─ Used for:
|
||||
├─ Download tracking
|
||||
├─ Error trending
|
||||
├─ Feature usage stats
|
||||
└─ Project health monitoring
|
||||
```
|
||||
|
||||
## Common API Tasks
|
||||
|
||||
- **Enable API reporting** → Built-in by default, no configuration needed
|
||||
- **Disable API** → Set `api_disable="yes"` before running
|
||||
- **View API data** → Visit https://community-scripts.org/stats
|
||||
- **Report API errors** → [GitHub Issues](https://github.com/community-scripts/ProxmoxVED/issues)
|
||||
- ✅ Can be disabled via diagnostics settings
|
||||
|
||||
## Debugging API Issues
|
||||
|
||||
If API calls fail:
|
||||
1. Check internet connectivity
|
||||
2. Verify API endpoint availability
|
||||
2. Verify PocketBase endpoint: `curl -s http://db.community-scripts.org/api/health`
|
||||
3. Review error codes in [EXIT_CODES.md](../EXIT_CODES.md)
|
||||
4. Check API function logs
|
||||
5. Report issues on GitHub
|
||||
|
||||
## API Endpoint
|
||||
|
||||
**Base URL**: `https://api.community-scripts.org`
|
||||
|
||||
**Endpoints**:
|
||||
- `POST /install` - Report container installation
|
||||
- `POST /update` - Report application update
|
||||
- `GET /stats` - Public statistics
|
||||
4. Check that `DIAGNOSTICS=yes` in `/usr/local/community-scripts/diagnostics`
|
||||
5. Report issues on [GitHub](https://git.community-scripts.org/community-scripts/ProxmoxVED/issues)
|
||||
|
||||
---
|
||||
|
||||
**Last Updated**: December 2025
|
||||
**Last Updated**: February 2026
|
||||
**Maintainers**: community-scripts team
|
||||
|
||||
@ -1,5 +1,9 @@
|
||||
# api.func Execution Flowchart
|
||||
|
||||
## Overview
|
||||
|
||||
This document illustrates the execution flow of `api.func` functions. The backend is **PocketBase** at `http://db.community-scripts.org`, collection `_dev_telemetry_data`.
|
||||
|
||||
## Main API Communication Flow
|
||||
|
||||
```
|
||||
@ -10,333 +14,321 @@
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Prerequisites Check │
|
||||
│ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
|
||||
│ │ Prerequisites Validation │ │
|
||||
│ │ │ │
|
||||
│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
|
||||
│ │ │ Check curl │ │ Check │ │ Check │ │ │
|
||||
│ │ │ Availability │ │ Diagnostics │ │ Random UUID │ │ │
|
||||
│ │ │ │ │ Setting │ │ │ │
|
||||
│ │ │ • command -v │ │ • DIAGNOSTICS │ │ • RANDOM_UUID │ │
|
||||
│ │ │ curl │ │ = "yes" │ │ not empty │ │
|
||||
│ │ │ • Return if │ │ • Return if │ │ • Return if │ │
|
||||
│ │ │ not found │ │ disabled │ │ not set │ │
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
|
||||
│ └─────────────────────────────────────────────────────────────────────────────┘ │
|
||||
│ Prerequisites Check │
|
||||
│ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────────────┐ │
|
||||
│ │ Prerequisites Validation │ │
|
||||
│ │ │ │
|
||||
│ │ ┌─────────────────┐ ┌─────────────────┐ ┌──────────────────┐ │ │
|
||||
│ │ │ Check curl │ │ Check │ │ Check │ │ │
|
||||
│ │ │ Availability │ │ DIAGNOSTICS │ │ RANDOM_UUID │ │ │
|
||||
│ │ │ │ │ │ │ │ │ │
|
||||
│ │ │ • command -v │ │ • Must be "yes" │ │ • Must not be │ │ │
|
||||
│ │ │ curl │ │ • Return if │ │ empty │ │ │
|
||||
│ │ │ • Return if │ │ "no" or unset │ │ • Return if │ │ │
|
||||
│ │ │ not found │ │ │ │ not set │ │ │
|
||||
│ │ └─────────────────┘ └─────────────────┘ └──────────────────┘ │ │
|
||||
│ └─────────────────────────────────────────────────────────────────────────┘ │
|
||||
└─────────────────────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Data Collection │
|
||||
│ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
|
||||
│ │ System Information Gathering │ │
|
||||
│ │ │ │
|
||||
│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
|
||||
│ │ │ Get PVE │ │ Collect │ │ Prepare JSON │ │ │
|
||||
│ │ │ Version │ │ Environment │ │ Payload │ │
|
||||
│ │ │ │ │ Variables │ │ │ │
|
||||
│ │ │ • pveversion │ │ • CT_TYPE │ │ • Create JSON │ │
|
||||
│ │ │ command │ │ • DISK_SIZE │ │ structure │ │
|
||||
│ │ │ • Parse version │ │ • CORE_COUNT │ │ • Include all │ │
|
||||
│ │ │ • Extract │ │ • RAM_SIZE │ │ variables │ │
|
||||
│ │ │ major.minor │ │ • var_os │ │ • Format for API │ │
|
||||
│ │ │ │ │ • var_version │ │ │ │
|
||||
│ │ │ │ │ • NSAPP │ │ │ │
|
||||
│ │ │ │ │ • METHOD │ │ │ │
|
||||
│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
|
||||
│ └─────────────────────────────────────────────────────────────────────────────┘ │
|
||||
│ Data Collection │
|
||||
│ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────────────┐ │
|
||||
│ │ System Information Gathering │ │
|
||||
│ │ │ │
|
||||
│ │ ┌─────────────────┐ ┌─────────────────┐ ┌──────────────────┐ │ │
|
||||
│ │ │ Get PVE │ │ Collect Env │ │ Build JSON │ │ │
|
||||
│ │ │ Version │ │ Variables │ │ Payload │ │ │
|
||||
│ │ │ │ │ │ │ │ │ │
|
||||
│ │ │ • pveversion │ │ • CT_TYPE │ │ • Heredoc JSON │ │ │
|
||||
│ │ │ command │ │ • DISK_SIZE │ │ • Include all │ │ │
|
||||
│ │ │ • Parse version │ │ • CORE_COUNT │ │ fields │ │ │
|
||||
│ │ │ • Fallback: │ │ • RAM_SIZE │ │ • status = │ │ │
|
||||
│ │ │ "not found" │ │ • var_os │ │ "installing" │ │ │
|
||||
│ │ │ │ │ • NSAPP, METHOD │ │ │ │ │
|
||||
│ │ └─────────────────┘ └─────────────────┘ └──────────────────┘ │ │
|
||||
│ └─────────────────────────────────────────────────────────────────────────┘ │
|
||||
└─────────────────────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ API Request Execution │
|
||||
│ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
|
||||
│ │ HTTP Request Processing │ │
|
||||
│ │ │ │
|
||||
│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
|
||||
│ │ │ Prepare │ │ Execute │ │ Handle │ │ │
|
||||
│ │ │ Request │ │ HTTP Request │ │ Response │ │
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ • Set API URL │ │ • curl -s -w │ │ • Capture HTTP │ │
|
||||
│ │ │ • Set headers │ │ "%{http_code}" │ │ status code │ │
|
||||
│ │ │ • Set payload │ │ • POST request │ │ • Store response │ │
|
||||
│ │ │ • Content-Type │ │ • JSON data │ │ • Handle errors │ │
|
||||
│ │ │ application/ │ │ • Follow │ │ gracefully │ │
|
||||
│ │ │ json │ │ redirects │ │ │ │
|
||||
│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
|
||||
│ └─────────────────────────────────────────────────────────────────────────────┘ │
|
||||
│ PocketBase API Request │
|
||||
│ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────────────┐ │
|
||||
│ │ HTTP Request Processing │ │
|
||||
│ │ │ │
|
||||
│ │ ┌─────────────────┐ ┌─────────────────┐ ┌──────────────────┐ │ │
|
||||
│ │ │ Prepare │ │ Execute │ │ Handle │ │ │
|
||||
│ │ │ Request │ │ HTTP POST │ │ Response │ │ │
|
||||
│ │ │ │ │ │ │ │ │ │
|
||||
│ │ │ • URL: │ │ • curl -s -w │ │ • Check HTTP │ │ │
|
||||
│ │ │ PB_API_URL │ │ "%{http_code}"│ │ 200/201 │ │ │
|
||||
│ │ │ • Method: POST │ │ • -X POST │ │ • Extract "id" │ │ │
|
||||
│ │ │ • Content-Type: │ │ • -L (follow │ │ from response │ │ │
|
||||
│ │ │ application/ │ │ redirects) │ │ • Store in │ │ │
|
||||
│ │ │ json │ │ • JSON body │ │ PB_RECORD_ID │ │ │
|
||||
│ │ └─────────────────┘ └─────────────────┘ └──────────────────┘ │ │
|
||||
│ └─────────────────────────────────────────────────────────────────────────┘ │
|
||||
└─────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## LXC API Reporting Flow
|
||||
## LXC API Reporting Flow — `post_to_api()`
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ POST_TO_API() Flow │
|
||||
│ Send LXC container installation data to API │
|
||||
│ post_to_api() Flow │
|
||||
│ POST → Create LXC telemetry record in PocketBase │
|
||||
└─────────────────────┬───────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ LXC Data Preparation │
|
||||
│ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
|
||||
│ │ LXC-Specific Data Collection │ │
|
||||
│ │ │ │
|
||||
│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
|
||||
│ │ │ Set LXC │ │ Include LXC │ │ Set Status │ │ │
|
||||
│ │ │ Type │ │ Variables │ │ Information │ │
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ • ct_type: 1 │ │ • DISK_SIZE │ │ • status: │ │
|
||||
│ │ │ • type: "lxc" │ │ • CORE_COUNT │ │ "installing" │ │
|
||||
│ │ │ • Include all │ │ • RAM_SIZE │ │ • Include all │ │
|
||||
│ │ │ LXC data │ │ • var_os │ │ tracking data │ │
|
||||
│ │ │ │ │ • var_version │ │ │ │
|
||||
│ │ │ │ │ • DISABLEIP6 │ │ │ │
|
||||
│ │ │ │ │ • NSAPP │ │ │ │
|
||||
│ │ │ │ │ • METHOD │ │ │ │
|
||||
│ │ │ │ │ • pve_version │ │ │ │
|
||||
│ │ │ │ │ • random_id │ │ │ │
|
||||
│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
|
||||
│ └─────────────────────────────────────────────────────────────────────────────┘ │
|
||||
└─────────────────────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ JSON Payload Creation │
|
||||
│ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
|
||||
│ │ JSON Structure Generation │ │
|
||||
│ │ │ │
|
||||
│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
|
||||
│ │ │ Create JSON │ │ Validate │ │ Format for │ │ │
|
||||
│ │ │ Structure │ │ Data │ │ API Request │ │
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ • Use heredoc │ │ • Check all │ │ • Ensure proper │ │
|
||||
│ │ │ syntax │ │ variables │ │ JSON format │ │
|
||||
│ │ │ • Include all │ │ are set │ │ • Escape special │ │
|
||||
│ │ │ required │ │ • Validate │ │ characters │ │
|
||||
│ │ │ fields │ │ data types │ │ • Set content │ │
|
||||
│ │ │ • Format │ │ • Handle │ │ type │ │
|
||||
│ │ │ properly │ │ missing │ │ │ │
|
||||
│ │ │ │ │ values │ │ │ │
|
||||
│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
|
||||
│ └─────────────────────────────────────────────────────────────────────────────┘ │
|
||||
└─────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## VM API Reporting Flow
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ POST_TO_API_VM() Flow │
|
||||
│ Send VM installation data to API │
|
||||
│ Prerequisites: curl? ──► DIAGNOSTICS="yes"? ──► RANDOM_UUID set? │
|
||||
│ (return silently on any failure) │
|
||||
└─────────────────────┬───────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ VM Data Preparation │
|
||||
│ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
|
||||
│ │ VM-Specific Data Collection │ │
|
||||
│ │ │ │
|
||||
│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
|
||||
│ │ │ Check │ │ Set VM │ │ Process Disk │ │ │
|
||||
│ │ │ Diagnostics │ │ Type │ │ Size │ │
|
||||
│ │ │ File │ │ │ │ │ │
|
||||
│ │ │ │ │ • ct_type: 2 │ │ • Remove 'G' │ │
|
||||
│ │ │ • Check file │ │ • type: "vm" │ │ suffix │ │
|
||||
│ │ │ existence │ │ • Include all │ │ • Convert to │ │
|
||||
│ │ │ • Read │ │ VM data │ │ numeric value │ │
|
||||
│ │ │ DIAGNOSTICS │ │ │ │ • Store in │ │
|
||||
│ │ │ setting │ │ │ │ DISK_SIZE_API │ │
|
||||
│ │ │ • Parse value │ │ │ │ │ │
|
||||
│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
|
||||
│ └─────────────────────────────────────────────────────────────────────────────┘ │
|
||||
└─────────────────────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ VM JSON Payload Creation │
|
||||
│ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
|
||||
│ │ VM-Specific JSON Structure │ │
|
||||
│ │ │ │
|
||||
│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
|
||||
│ │ │ Include VM │ │ Set VM │ │ Format VM │ │ │
|
||||
│ │ │ Variables │ │ Status │ │ Data for API │ │
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ • DISK_SIZE_API │ │ • status: │ │ • Ensure proper │ │
|
||||
│ │ │ • CORE_COUNT │ │ "installing" │ │ JSON format │ │
|
||||
│ │ │ • RAM_SIZE │ │ • Include all │ │ • Handle VM- │ │
|
||||
│ │ │ • var_os │ │ tracking │ │ specific data │ │
|
||||
│ │ │ • var_version │ │ information │ │ • Set appropriate │ │
|
||||
│ │ │ • NSAPP │ │ │ │ content type │ │
|
||||
│ │ │ • METHOD │ │ │ │ │ │
|
||||
│ │ │ • pve_version │ │ │ │ │ │
|
||||
│ │ │ • random_id │ │ │ │ │ │
|
||||
│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
|
||||
│ └─────────────────────────────────────────────────────────────────────────────┘ │
|
||||
└─────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Status Update Flow
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ POST_UPDATE_TO_API() Flow │
|
||||
│ Send installation completion status to API │
|
||||
│ LXC Data Preparation │
|
||||
│ │
|
||||
│ ┌─────────────────┐ ┌─────────────────────┐ ┌───────────────────┐ │
|
||||
│ │ Set LXC type │ │ Collect variables │ │ Set initial │ │
|
||||
│ │ │ │ │ │ status │ │
|
||||
│ │ • ct_type: 1 │ │ • DISK_SIZE │ │ │ │
|
||||
│ │ • type: "lxc" │ │ • CORE_COUNT │ │ • status: │ │
|
||||
│ │ │ │ • RAM_SIZE │ │ "installing" │ │
|
||||
│ │ │ │ • var_os, var_version│ │ • random_id: │ │
|
||||
│ │ │ │ • NSAPP, METHOD │ │ RANDOM_UUID │ │
|
||||
│ │ │ │ • pve_version │ │ │ │
|
||||
│ └─────────────────┘ └─────────────────────┘ └───────────────────┘ │
|
||||
└─────────────────────┬───────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Update Prevention Check │
|
||||
│ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
|
||||
│ │ Duplicate Update Prevention │ │
|
||||
│ │ │ │
|
||||
│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
|
||||
│ │ │ Check │ │ Set Flag │ │ Return Early │ │ │
|
||||
│ │ │ POST_UPDATE_ │ │ if First │ │ if Already │ │
|
||||
│ │ │ DONE │ │ Update │ │ Updated │ │
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ • Check if │ │ • Set │ │ • Return 0 │ │
|
||||
│ │ │ already │ │ POST_UPDATE_ │ │ • Skip API call │ │
|
||||
│ │ │ updated │ │ DONE=true │ │ • Prevent │ │
|
||||
│ │ │ • Prevent │ │ • Continue │ │ duplicate │ │
|
||||
│ │ │ duplicate │ │ with update │ │ requests │ │
|
||||
│ │ │ requests │ │ │ │ │ │
|
||||
│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
|
||||
│ └─────────────────────────────────────────────────────────────────────────────┘ │
|
||||
└─────────────────────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Status and Error Processing │
|
||||
│ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
|
||||
│ │ Status Determination │ │
|
||||
│ │ │ │
|
||||
│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
|
||||
│ │ │ Determine │ │ Get Error │ │ Prepare Status │ │ │
|
||||
│ │ │ Status │ │ Description │ │ Data │ │
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ • status: │ │ • Call │ │ • Include status │ │
|
||||
│ │ │ "success" or │ │ get_error_ │ │ • Include error │ │
|
||||
│ │ │ "failed" │ │ description() │ │ description │ │
|
||||
│ │ │ • Set exit │ │ • Get human- │ │ • Include random │ │
|
||||
│ │ │ code based │ │ readable │ │ ID for tracking │ │
|
||||
│ │ │ on status │ │ error message │ │ │ │
|
||||
│ │ │ • Default to │ │ • Handle │ │ │ │
|
||||
│ │ │ error if │ │ unknown │ │ │ │
|
||||
│ │ │ not set │ │ errors │ │ │ │
|
||||
│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
|
||||
│ └─────────────────────────────────────────────────────────────────────────────┘ │
|
||||
└─────────────────────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Status Update API Request │
|
||||
│ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
|
||||
│ │ Status Update Payload Creation │ │
|
||||
│ │ │ │
|
||||
│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
|
||||
│ │ │ Create │ │ Send Status │ │ Mark Update │ │ │
|
||||
│ │ │ Status JSON │ │ Update │ │ Complete │ │
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ • Include │ │ • POST to │ │ • Set │ │
|
||||
│ │ │ status │ │ updatestatus │ │ POST_UPDATE_ │ │
|
||||
│ │ │ • Include │ │ endpoint │ │ DONE=true │ │
|
||||
│ │ │ error │ │ • Include JSON │ │ • Prevent further │ │
|
||||
│ │ │ description │ │ payload │ │ updates │ │
|
||||
│ │ │ • Include │ │ • Handle │ │ • Complete │ │
|
||||
│ │ │ random_id │ │ response │ │ process │ │
|
||||
│ │ │ │ │ gracefully │ │ │ │
|
||||
│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
|
||||
│ └─────────────────────────────────────────────────────────────────────────────┘ │
|
||||
│ POST → PB_API_URL │
|
||||
│ http://db.community-scripts.org/api/collections/_dev_telemetry_data/records │
|
||||
│ │
|
||||
│ Response (HTTP 200/201): │
|
||||
│ { "id": "abc123def456789", ... } │
|
||||
│ │ │
|
||||
│ └──► PB_RECORD_ID = "abc123def456789" │
|
||||
└─────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Error Description Flow
|
||||
## VM API Reporting Flow — `post_to_api_vm()`
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ GET_ERROR_DESCRIPTION() Flow │
|
||||
│ Convert numeric exit codes to human-readable explanations │
|
||||
│ post_to_api_vm() Flow │
|
||||
│ POST → Create VM telemetry record in PocketBase │
|
||||
└─────────────────────┬───────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Error Code Classification │
|
||||
│ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
|
||||
│ │ Error Code Categories │ │
|
||||
│ │ │ │
|
||||
│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
|
||||
│ │ │ General │ │ Network │ │ LXC-Specific │ │ │
|
||||
│ │ │ System │ │ Errors │ │ Errors │ │
|
||||
│ │ │ Errors │ │ │ │ │ │
|
||||
│ │ │ │ │ • 18: Connection│ │ • 100-101: LXC │ │
|
||||
│ │ │ • 0-9: Basic │ │ failed │ │ install errors │ │
|
||||
│ │ │ errors │ │ • 22: Invalid │ │ • 200-209: LXC │ │
|
||||
│ │ │ • 126-128: │ │ argument │ │ creation errors │ │
|
||||
│ │ │ Command │ │ • 28: No space │ │ │ │
|
||||
│ │ │ errors │ │ • 35: Timeout │ │ │ │
|
||||
│ │ │ • 129-143: │ │ • 56: TLS error │ │ │ │
|
||||
│ │ │ Signal │ │ • 60: SSL cert │ │ │ │
|
||||
│ │ │ errors │ │ error │ │ │ │
|
||||
│ │ │ • 152: Resource │ │ │ │ │ │
|
||||
│ │ │ limit │ │ │ │ │ │
|
||||
│ │ │ • 255: Unknown │ │ │ │ │ │
|
||||
│ │ │ critical │ │ │ │ │ │
|
||||
│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
|
||||
│ └─────────────────────────────────────────────────────────────────────────────┘ │
|
||||
└─────────────────────────────────────────────────────────────────────────────────┘
|
||||
│ Read /usr/local/community-scripts/diagnostics │
|
||||
│ Extract DIAGNOSTICS=yes/no from file │
|
||||
└─────────────────────┬───────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Error Message Return │
|
||||
│ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────────────────┐ │
|
||||
│ │ Error Message Formatting │ │
|
||||
│ │ │ │
|
||||
│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │
|
||||
│ │ │ Match Error │ │ Return │ │ Default Case │ │ │
|
||||
│ │ │ Code │ │ Description │ │ │ │
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ • Use case │ │ • Return │ │ • Return "Unknown │ │
|
||||
│ │ │ statement │ │ human- │ │ error code │ │
|
||||
│ │ │ • Match │ │ readable │ │ (exit_code)" │ │
|
||||
│ │ │ specific │ │ message │ │ • Handle │ │
|
||||
│ │ │ codes │ │ • Include │ │ unrecognized │ │
|
||||
│ │ │ • Handle │ │ context │ │ codes │ │
|
||||
│ │ │ ranges │ │ information │ │ • Provide fallback │ │
|
||||
│ │ │ │ │ │ │ message │ │
|
||||
│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │
|
||||
│ └─────────────────────────────────────────────────────────────────────────────┘ │
|
||||
│ Prerequisites: curl? ──► DIAGNOSTICS="yes"? ──► RANDOM_UUID set? │
|
||||
│ (return silently on any failure) │
|
||||
└─────────────────────┬───────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ VM Data Preparation │
|
||||
│ │
|
||||
│ ┌─────────────────┐ ┌─────────────────────┐ ┌───────────────────┐ │
|
||||
│ │ Set VM type │ │ Process disk size │ │ Set initial │ │
|
||||
│ │ │ │ │ │ status │ │
|
||||
│ │ • ct_type: 2 │ │ • Strip 'G' suffix │ │ │ │
|
||||
│ │ • type: "vm" │ │ "20G" → 20 │ │ • status: │ │
|
||||
│ │ │ │ • Store in │ │ "installing" │ │
|
||||
│ │ │ │ DISK_SIZE_API │ │ • random_id: │ │
|
||||
│ │ │ │ │ │ RANDOM_UUID │ │
|
||||
│ └─────────────────┘ └─────────────────────┘ └───────────────────┘ │
|
||||
└─────────────────────┬───────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ POST → PB_API_URL │
|
||||
│ http://db.community-scripts.org/api/collections/_dev_telemetry_data/records │
|
||||
│ │
|
||||
│ Response (HTTP 200/201): │
|
||||
│ { "id": "xyz789abc012345", ... } │
|
||||
│ │ │
|
||||
│ └──► PB_RECORD_ID = "xyz789abc012345" │
|
||||
└─────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Status Update Flow — `post_update_to_api()`
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ post_update_to_api(status, exit_code) Flow │
|
||||
│ PATCH → Update existing PocketBase record with final status │
|
||||
└─────────────────────┬───────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Duplicate Prevention Check │
|
||||
│ │
|
||||
│ ┌─────────────────┐ ┌──────────────────────────────────────────────┐ │
|
||||
│ │ Check │ │ POST_UPDATE_DONE == "true"? │ │
|
||||
│ │ POST_UPDATE_ │───►│ │ │
|
||||
│ │ DONE flag │ │ YES → return 0 (skip PATCH) │ │
|
||||
│ │ │ │ NO → continue │ │
|
||||
│ └─────────────────┘ └──────────────────────────────────────────────┘ │
|
||||
└─────────────────────┬───────────────────────────────────────────────────────────┘
|
||||
│ (first call only)
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Prerequisites: curl? ──► DIAGNOSTICS="yes"? ──► RANDOM_UUID set? │
|
||||
└─────────────────────┬───────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Status Mapping │
|
||||
│ │
|
||||
│ Input $1 │ PocketBase status │ exit_code │ error │
|
||||
│ ─────────────────┼─────────────────────┼──────────────┼────────────────────── │
|
||||
│ "done"/"success" │ "sucess" │ 0 │ "" │
|
||||
│ "failed" │ "failed" │ from $2 │ explain_exit_code() │
|
||||
│ anything else │ "unknown" │ from $2 │ explain_exit_code() │
|
||||
│ │
|
||||
│ Note: PocketBase schema spells it "sucess" intentionally │
|
||||
└─────────────────────┬───────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Record ID Resolution │
|
||||
│ │
|
||||
│ ┌──────────────────────────┐ ┌──────────────────────────────────────┐ │
|
||||
│ │ PB_RECORD_ID set? │ │ Fallback: GET lookup │ │
|
||||
│ │ │ │ │ │
|
||||
│ │ YES → use PB_RECORD_ID │ │ GET PB_API_URL │ │
|
||||
│ │ │ │ ?filter=(random_id='UUID') │ │
|
||||
│ │ NO → try GET lookup ───┼───►│ &fields=id │ │
|
||||
│ │ │ │ &perPage=1 │ │
|
||||
│ │ │ │ │ │
|
||||
│ │ │ │ Extract "id" from response │ │
|
||||
│ │ │ │ If not found → set flag, return │ │
|
||||
│ └──────────────────────────┘ └──────────────────────────────────────┘ │
|
||||
└─────────────────────┬───────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ PATCH Request │
|
||||
│ │
|
||||
│ PATCH → PB_API_URL/{record_id} │
|
||||
│ http://db.community-scripts.org/api/collections/_dev_telemetry_data/ │
|
||||
│ records/{record_id} │
|
||||
│ │
|
||||
│ Payload: │
|
||||
│ { │
|
||||
│ "status": "sucess" | "failed" | "unknown", │
|
||||
│ "error": "..." | "", │
|
||||
│ "exit_code": 0 | <numeric> │
|
||||
│ } │
|
||||
│ │
|
||||
│ ──► POST_UPDATE_DONE = true (prevents future calls) │
|
||||
└─────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Error Description Flow — `explain_exit_code()`
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ explain_exit_code(code) Flow │
|
||||
│ Convert numeric exit codes to human-readable descriptions │
|
||||
│ Canonical function — used by api.func AND error_handler.func │
|
||||
└─────────────────────┬───────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Exit Code Classification (non-overlapping ranges) │
|
||||
│ │
|
||||
│ ┌─────────────────┐ ┌──────────────────┐ ┌──────────────────────────┐ │
|
||||
│ │ Generic/Shell │ │ curl/wget │ │ APT/DPKG │ │
|
||||
│ │ 1–2 │ │ 6, 7, 22, 28, 35│ │ 100–102, 255 │ │
|
||||
│ └─────────────────┘ └──────────────────┘ └──────────────────────────┘ │
|
||||
│ │
|
||||
│ ┌─────────────────┐ ┌──────────────────┐ ┌──────────────────────────┐ │
|
||||
│ │ System/Signals │ │ Systemd/Service │ │ Python/pip/uv │ │
|
||||
│ │ 124–143 │ │ 150–154 │ │ 160–162 │ │
|
||||
│ └─────────────────┘ └──────────────────┘ └──────────────────────────┘ │
|
||||
│ │
|
||||
│ ┌─────────────────┐ ┌──────────────────┐ ┌──────────────────────────┐ │
|
||||
│ │ PostgreSQL │ │ MySQL/MariaDB │ │ MongoDB │ │
|
||||
│ │ 170–173 │ │ 180–183 │ │ 190–193 │ │
|
||||
│ └─────────────────┘ └──────────────────┘ └──────────────────────────┘ │
|
||||
│ │
|
||||
│ ┌─────────────────┐ ┌──────────────────┐ │
|
||||
│ │ Proxmox │ │ Node.js/npm │ │
|
||||
│ │ 200–231 │ │ 243–249 │ │
|
||||
│ └─────────────────┘ └──────────────────┘ │
|
||||
└─────────────────────┬───────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ case "$code" in │
|
||||
│ <matched>) echo "<description>" ;; │
|
||||
│ *) echo "Unknown error" ;; │
|
||||
│ esac │
|
||||
└─────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Complete Installation Lifecycle
|
||||
|
||||
```
|
||||
┌──────────────────────────────────────────────────────────────┐
|
||||
│ Installation Script (e.g. build.func / vm-core.func) │
|
||||
└────────┬─────────────────────────────────────────────────────┘
|
||||
│
|
||||
│ 1. source api.func
|
||||
│ 2. Set DIAGNOSTICS, RANDOM_UUID, NSAPP, etc.
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────────────────────────────────┐
|
||||
│ post_to_api() / post_to_api_vm() │
|
||||
│ │
|
||||
│ POST → PB_API_URL │
|
||||
│ Body: { ..., "status": "installing", "random_id": "..." } │
|
||||
│ │
|
||||
│ Response → PB_RECORD_ID = "abc123def456789" │
|
||||
└────────┬─────────────────────────────────────────────────────┘
|
||||
│
|
||||
│ 3. Installation proceeds...
|
||||
│ (container/VM creation, package install, etc.)
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────────────────────────────────┐
|
||||
│ post_update_to_api("done", 0) │
|
||||
│ or │
|
||||
│ post_update_to_api("failed", $exit_code) │
|
||||
│ │
|
||||
│ PATCH → PB_API_URL/{PB_RECORD_ID} │
|
||||
│ Body: { "status": "sucess", "error": "", "exit_code": 0 } │
|
||||
│ or { "status": "failed", "error": "...", "exit_code": N }│
|
||||
│ │
|
||||
│ POST_UPDATE_DONE = true │
|
||||
└──────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Integration Points
|
||||
|
||||
### With Installation Scripts
|
||||
- **build.func**: Sends LXC installation data
|
||||
- **vm-core.func**: Sends VM installation data
|
||||
- **install.func**: Reports installation status
|
||||
- **alpine-install.func**: Reports Alpine installation data
|
||||
- **build.func**: Calls `post_to_api()` for LXC creation, then `post_update_to_api()` on completion
|
||||
- **vm-core.func**: Calls `post_to_api_vm()` for VM creation, then `post_update_to_api()` on completion
|
||||
- **install.func / alpine-install.func**: Reports installation status via `post_update_to_api()`
|
||||
|
||||
### With Error Handling
|
||||
- **error_handler.func**: Provides error explanations
|
||||
- **core.func**: Uses error descriptions in silent execution
|
||||
- **Diagnostic reporting**: Tracks error patterns
|
||||
- **error_handler.func**: Uses `explain_exit_code()` for human-readable error messages
|
||||
- **Diagnostic reporting**: PocketBase records track error patterns anonymously
|
||||
|
||||
### External Dependencies
|
||||
- **curl**: HTTP client for API communication
|
||||
- **Community Scripts API**: External API endpoint
|
||||
- **Network connectivity**: Required for API communication
|
||||
- **curl**: HTTP client for PocketBase API communication
|
||||
- **PocketBase**: Backend at `http://db.community-scripts.org`
|
||||
- **Network connectivity**: Required for API communication (failures are silently ignored)
|
||||
|
||||
@ -2,63 +2,88 @@
|
||||
|
||||
## Overview
|
||||
|
||||
This document provides a comprehensive alphabetical reference of all functions in `api.func`, including parameters, dependencies, usage examples, and error handling.
|
||||
This document provides a comprehensive reference of all functions in `api.func`, including parameters, dependencies, usage examples, and error handling. The backend is **PocketBase** hosted at `http://db.community-scripts.org`.
|
||||
|
||||
## Configuration Variables
|
||||
|
||||
| Variable | Value | Description |
|
||||
|----------|-------|-------------|
|
||||
| `PB_URL` | `http://db.community-scripts.org` | PocketBase server URL |
|
||||
| `PB_COLLECTION` | `_dev_telemetry_data` | PocketBase collection name |
|
||||
| `PB_API_URL` | `${PB_URL}/api/collections/${PB_COLLECTION}/records` | Full API endpoint |
|
||||
| `PB_RECORD_ID` | *(runtime)* | Stores the PocketBase record ID returned by POST for later PATCH calls |
|
||||
|
||||
## Function Categories
|
||||
|
||||
### Error Description Functions
|
||||
|
||||
#### `get_error_description()`
|
||||
#### `explain_exit_code()`
|
||||
|
||||
**Purpose**: Convert numeric exit codes to human-readable explanations
|
||||
**Parameters**:
|
||||
- `$1` - Exit code to explain
|
||||
- `$1` — Exit code to explain
|
||||
**Returns**: Human-readable error explanation string
|
||||
**Side Effects**: None
|
||||
**Dependencies**: None
|
||||
**Environment Variables Used**: None
|
||||
|
||||
**Supported Exit Codes**:
|
||||
- **General System**: 0-9, 18, 22, 28, 35, 56, 60, 125-128, 129-143, 152, 255
|
||||
- **LXC-Specific**: 100-101, 200-209
|
||||
- **Docker**: 125
|
||||
> **Note**: `explain_exit_code()` is the **canonical** function for exit-code mapping. It is used by both `api.func` (telemetry) and `error_handler.func` (error display).
|
||||
|
||||
**Supported Exit Code Ranges** (non-overlapping):
|
||||
|
||||
| Range | Category |
|
||||
|-------|----------|
|
||||
| 1–2 | Generic / Shell |
|
||||
| 6–35 | curl / wget |
|
||||
| 100–102 | APT / Package manager |
|
||||
| 124–143 | System / Signals |
|
||||
| 150–154 | Systemd / Service |
|
||||
| 160–162 | Python / pip / uv |
|
||||
| 170–173 | PostgreSQL |
|
||||
| 180–183 | MySQL / MariaDB |
|
||||
| 190–193 | MongoDB |
|
||||
| 200–231 | Proxmox custom codes |
|
||||
| 243–249 | Node.js / npm |
|
||||
| 255 | DPKG fatal |
|
||||
|
||||
**Usage Example**:
|
||||
```bash
|
||||
error_msg=$(get_error_description 127)
|
||||
error_msg=$(explain_exit_code 127)
|
||||
echo "Error 127: $error_msg"
|
||||
# Output: Error 127: Command not found: Incorrect path or missing dependency.
|
||||
# Output: Error 127: Command not found
|
||||
```
|
||||
|
||||
**Error Code Examples**:
|
||||
```bash
|
||||
get_error_description 0 # " " (space)
|
||||
get_error_description 1 # "General error: An unspecified error occurred."
|
||||
get_error_description 127 # "Command not found: Incorrect path or missing dependency."
|
||||
get_error_description 200 # "LXC creation failed."
|
||||
get_error_description 255 # "Unknown critical error, often due to missing permissions or broken scripts."
|
||||
explain_exit_code 1 # "General error / Operation not permitted"
|
||||
explain_exit_code 22 # "curl: HTTP error returned (404, 429, 500+)"
|
||||
explain_exit_code 127 # "Command not found"
|
||||
explain_exit_code 200 # "Proxmox: Failed to create lock file"
|
||||
explain_exit_code 255 # "DPKG: Fatal internal error"
|
||||
explain_exit_code 999 # "Unknown error"
|
||||
```
|
||||
|
||||
### API Communication Functions
|
||||
|
||||
#### `post_to_api()`
|
||||
**Purpose**: Send LXC container installation data to community-scripts.org API
|
||||
|
||||
**Purpose**: Create an LXC container telemetry record in PocketBase
|
||||
**Parameters**: None (uses environment variables)
|
||||
**Returns**: None
|
||||
**Side Effects**:
|
||||
- Sends HTTP POST request to API
|
||||
- Stores response in RESPONSE variable
|
||||
- Requires curl command and network connectivity
|
||||
- Sends HTTP **POST** to `PB_API_URL`
|
||||
- Stores the returned PocketBase record `id` in `PB_RECORD_ID` for later PATCH updates
|
||||
**Dependencies**: `curl` command
|
||||
**Environment Variables Used**: `DIAGNOSTICS`, `RANDOM_UUID`, `CT_TYPE`, `DISK_SIZE`, `CORE_COUNT`, `RAM_SIZE`, `var_os`, `var_version`, `DISABLEIP6`, `NSAPP`, `METHOD`
|
||||
**Environment Variables Used**: `DIAGNOSTICS`, `RANDOM_UUID`, `CT_TYPE`, `DISK_SIZE`, `CORE_COUNT`, `RAM_SIZE`, `var_os`, `var_version`, `NSAPP`, `METHOD`
|
||||
|
||||
**Prerequisites**:
|
||||
- `curl` command must be available
|
||||
- `DIAGNOSTICS` must be set to "yes"
|
||||
- `curl` must be available
|
||||
- `DIAGNOSTICS` must be `"yes"`
|
||||
- `RANDOM_UUID` must be set and not empty
|
||||
|
||||
**API Endpoint**: `http://api.community-scripts.org/dev/upload`
|
||||
**API Endpoint**: `POST http://db.community-scripts.org/api/collections/_dev_telemetry_data/records`
|
||||
|
||||
**JSON Payload Structure**:
|
||||
**JSON Payload**:
|
||||
```json
|
||||
{
|
||||
"ct_type": 1,
|
||||
@ -68,7 +93,6 @@ get_error_description 255 # "Unknown critical error, often due to missing perm
|
||||
"ram_size": 2048,
|
||||
"os_type": "debian",
|
||||
"os_version": "12",
|
||||
"disableip6": "true",
|
||||
"nsapp": "plex",
|
||||
"method": "install",
|
||||
"pve_version": "8.0",
|
||||
@ -77,6 +101,10 @@ get_error_description 255 # "Unknown critical error, often due to missing perm
|
||||
}
|
||||
```
|
||||
|
||||
**Response Handling**:
|
||||
- On HTTP 200/201, `PB_RECORD_ID` is extracted from the response JSON (`"id"` field)
|
||||
- On failure, the function returns silently without blocking the installation
|
||||
|
||||
**Usage Example**:
|
||||
```bash
|
||||
export DIAGNOSTICS="yes"
|
||||
@ -91,39 +119,39 @@ export NSAPP="plex"
|
||||
export METHOD="install"
|
||||
|
||||
post_to_api
|
||||
# PB_RECORD_ID is now set (e.g. "abc123def456789")
|
||||
```
|
||||
|
||||
#### `post_to_api_vm()`
|
||||
**Purpose**: Send VM installation data to community-scripts.org API
|
||||
|
||||
**Purpose**: Create a VM telemetry record in PocketBase
|
||||
**Parameters**: None (uses environment variables)
|
||||
**Returns**: None
|
||||
**Side Effects**:
|
||||
- Sends HTTP POST request to API
|
||||
- Stores response in RESPONSE variable
|
||||
- Requires curl command and network connectivity
|
||||
- Sends HTTP **POST** to `PB_API_URL`
|
||||
- Stores the returned PocketBase record `id` in `PB_RECORD_ID`
|
||||
**Dependencies**: `curl` command, diagnostics file
|
||||
**Environment Variables Used**: `DIAGNOSTICS`, `RANDOM_UUID`, `DISK_SIZE`, `CORE_COUNT`, `RAM_SIZE`, `var_os`, `var_version`, `NSAPP`, `METHOD`
|
||||
**Environment Variables Used**: `RANDOM_UUID`, `DISK_SIZE`, `CORE_COUNT`, `RAM_SIZE`, `var_os`, `var_version`, `NSAPP`, `METHOD`
|
||||
|
||||
**Prerequisites**:
|
||||
- `/usr/local/community-scripts/diagnostics` file must exist
|
||||
- `DIAGNOSTICS` must be set to "yes" in diagnostics file
|
||||
- `curl` command must be available
|
||||
- `DIAGNOSTICS` must be `"yes"` in that file (read at runtime)
|
||||
- `curl` must be available
|
||||
- `RANDOM_UUID` must be set and not empty
|
||||
|
||||
**API Endpoint**: `http://api.community-scripts.org/dev/upload`
|
||||
**API Endpoint**: `POST http://db.community-scripts.org/api/collections/_dev_telemetry_data/records`
|
||||
|
||||
**JSON Payload Structure**:
|
||||
**JSON Payload**:
|
||||
```json
|
||||
{
|
||||
"ct_type": 2,
|
||||
"type": "vm",
|
||||
"disk_size": 8,
|
||||
"core_count": 2,
|
||||
"ram_size": 2048,
|
||||
"os_type": "debian",
|
||||
"os_version": "12",
|
||||
"disableip6": "",
|
||||
"nsapp": "plex",
|
||||
"disk_size": 20,
|
||||
"core_count": 4,
|
||||
"ram_size": 4096,
|
||||
"os_type": "ubuntu",
|
||||
"os_version": "22.04",
|
||||
"nsapp": "nextcloud",
|
||||
"method": "install",
|
||||
"pve_version": "8.0",
|
||||
"status": "installing",
|
||||
@ -131,50 +159,81 @@ post_to_api
|
||||
}
|
||||
```
|
||||
|
||||
> **Note**: `DISK_SIZE` is stripped of its `G` suffix before sending (e.g. `"20G"` → `20`).
|
||||
|
||||
**Usage Example**:
|
||||
```bash
|
||||
# Create diagnostics file
|
||||
mkdir -p /usr/local/community-scripts
|
||||
echo "DIAGNOSTICS=yes" > /usr/local/community-scripts/diagnostics
|
||||
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
export DISK_SIZE="8G"
|
||||
export CORE_COUNT=2
|
||||
export RAM_SIZE=2048
|
||||
export var_os="debian"
|
||||
export var_version="12"
|
||||
export NSAPP="plex"
|
||||
export DISK_SIZE="20G"
|
||||
export CORE_COUNT=4
|
||||
export RAM_SIZE=4096
|
||||
export var_os="ubuntu"
|
||||
export var_version="22.04"
|
||||
export NSAPP="nextcloud"
|
||||
export METHOD="install"
|
||||
|
||||
post_to_api_vm
|
||||
# PB_RECORD_ID is now set
|
||||
```
|
||||
|
||||
#### `post_update_to_api()`
|
||||
**Purpose**: Send installation completion status to community-scripts.org API
|
||||
|
||||
**Purpose**: Update an existing PocketBase record with installation completion status via PATCH
|
||||
**Parameters**:
|
||||
- `$1` - Status ("success" or "failed", default: "failed")
|
||||
- `$2` - Exit code (default: 1)
|
||||
- `$1` — Status (`"done"`, `"success"`, or `"failed"`; default: `"failed"`)
|
||||
- `$2` — Exit code (numeric, default: `1`)
|
||||
**Returns**: None
|
||||
**Side Effects**:
|
||||
- Sends HTTP POST request to API
|
||||
- Sets POST_UPDATE_DONE=true to prevent duplicates
|
||||
- Stores response in RESPONSE variable
|
||||
**Dependencies**: `curl` command, `get_error_description()`
|
||||
**Environment Variables Used**: `DIAGNOSTICS`, `RANDOM_UUID`
|
||||
- Sends HTTP **PATCH** to `PB_API_URL/{record_id}`
|
||||
- Sets `POST_UPDATE_DONE=true` to prevent duplicate calls
|
||||
**Dependencies**: `curl`, `explain_exit_code()`
|
||||
**Environment Variables Used**: `DIAGNOSTICS`, `RANDOM_UUID`, `PB_RECORD_ID`
|
||||
|
||||
**Prerequisites**:
|
||||
- `curl` command must be available
|
||||
- `DIAGNOSTICS` must be set to "yes"
|
||||
- `curl` must be available
|
||||
- `DIAGNOSTICS` must be `"yes"`
|
||||
- `RANDOM_UUID` must be set and not empty
|
||||
- POST_UPDATE_DONE must be false (prevents duplicates)
|
||||
- `POST_UPDATE_DONE` must not be `"true"` (prevents duplicate updates)
|
||||
|
||||
**API Endpoint**: `http://api.community-scripts.org/dev/upload/updatestatus`
|
||||
**Record Lookup**:
|
||||
1. If `PB_RECORD_ID` is already set (from a prior `post_to_api` / `post_to_api_vm` call), it is used directly.
|
||||
2. Otherwise, the function performs a **GET** lookup:
|
||||
```
|
||||
GET PB_API_URL?filter=(random_id='<RANDOM_UUID>')&fields=id&perPage=1
|
||||
```
|
||||
3. If no record is found, the function sets `POST_UPDATE_DONE=true` and returns.
|
||||
|
||||
**JSON Payload Structure**:
|
||||
**Status Mapping** (PocketBase select field values: `installing`, `sucess`, `failed`, `unknown`):
|
||||
|
||||
| Input Status | PocketBase `status` | `exit_code` | `error` |
|
||||
|---|---|---|---|
|
||||
| `"done"` / `"success"` / `"sucess"` | `"sucess"` | `0` | `""` |
|
||||
| `"failed"` | `"failed"` | *from $2* | *from `explain_exit_code()`* |
|
||||
| anything else | `"unknown"` | *from $2* | *from `explain_exit_code()`* |
|
||||
|
||||
> **Note**: The PocketBase schema intentionally spells success as `"sucess"`.
|
||||
|
||||
**API Endpoint**: `PATCH http://db.community-scripts.org/api/collections/_dev_telemetry_data/records/{record_id}`
|
||||
|
||||
**JSON Payload**:
|
||||
```json
|
||||
{
|
||||
"status": "success",
|
||||
"error": "Error description from get_error_description()",
|
||||
"random_id": "uuid-string"
|
||||
"status": "sucess",
|
||||
"error": "",
|
||||
"exit_code": 0
|
||||
}
|
||||
```
|
||||
|
||||
or for failures:
|
||||
```json
|
||||
{
|
||||
"status": "failed",
|
||||
"error": "Command not found",
|
||||
"exit_code": 127
|
||||
}
|
||||
```
|
||||
|
||||
@ -183,10 +242,10 @@ post_to_api_vm
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
|
||||
# Report successful installation
|
||||
post_update_to_api "success" 0
|
||||
# After a successful installation
|
||||
post_update_to_api "done" 0
|
||||
|
||||
# Report failed installation
|
||||
# After a failed installation
|
||||
post_update_to_api "failed" 127
|
||||
```
|
||||
|
||||
@ -196,198 +255,250 @@ post_update_to_api "failed" 127
|
||||
```
|
||||
post_to_api()
|
||||
├── Check curl availability
|
||||
├── Check DIAGNOSTICS setting
|
||||
├── Check RANDOM_UUID
|
||||
├── Check DIAGNOSTICS == "yes"
|
||||
├── Check RANDOM_UUID is set
|
||||
├── Get PVE version
|
||||
├── Create JSON payload
|
||||
└── Send HTTP POST request
|
||||
├── Create JSON payload (ct_type=1, type="lxc", status="installing")
|
||||
├── POST to PB_API_URL
|
||||
└── Extract PB_RECORD_ID from response
|
||||
|
||||
post_to_api_vm()
|
||||
├── Check diagnostics file
|
||||
├── Read DIAGNOSTICS from /usr/local/community-scripts/diagnostics
|
||||
├── Check curl availability
|
||||
├── Check DIAGNOSTICS setting
|
||||
├── Check RANDOM_UUID
|
||||
├── Process disk size
|
||||
├── Check DIAGNOSTICS == "yes"
|
||||
├── Check RANDOM_UUID is set
|
||||
├── Strip 'G' suffix from DISK_SIZE
|
||||
├── Get PVE version
|
||||
├── Create JSON payload
|
||||
└── Send HTTP POST request
|
||||
├── Create JSON payload (ct_type=2, type="vm", status="installing")
|
||||
├── POST to PB_API_URL
|
||||
└── Extract PB_RECORD_ID from response
|
||||
|
||||
post_update_to_api()
|
||||
├── Check POST_UPDATE_DONE flag
|
||||
post_update_to_api(status, exit_code)
|
||||
├── Check curl availability
|
||||
├── Check DIAGNOSTICS setting
|
||||
├── Check RANDOM_UUID
|
||||
├── Determine status and exit code
|
||||
├── Get error description
|
||||
├── Create JSON payload
|
||||
├── Send HTTP POST request
|
||||
├── Check POST_UPDATE_DONE flag
|
||||
├── Check DIAGNOSTICS == "yes"
|
||||
├── Check RANDOM_UUID is set
|
||||
├── Map status → pb_status ("done"→"sucess", "failed"→"failed", *→"unknown")
|
||||
├── For failed/unknown: call explain_exit_code(exit_code)
|
||||
├── Resolve record_id (PB_RECORD_ID or GET lookup by random_id)
|
||||
├── PATCH to PB_API_URL/{record_id}
|
||||
└── Set POST_UPDATE_DONE=true
|
||||
```
|
||||
|
||||
### Error Description Flow
|
||||
```
|
||||
get_error_description()
|
||||
├── Match exit code
|
||||
├── Return appropriate description
|
||||
└── Handle unknown codes
|
||||
explain_exit_code(code)
|
||||
├── Match code against case statement (non-overlapping ranges)
|
||||
├── Return description string
|
||||
└── Default: "Unknown error"
|
||||
```
|
||||
|
||||
## Error Code Reference
|
||||
|
||||
### General System Errors
|
||||
### Generic / Shell (1–2)
|
||||
| Code | Description |
|
||||
|------|-------------|
|
||||
| 0 | (space) |
|
||||
| 1 | General error: An unspecified error occurred. |
|
||||
| 2 | Incorrect shell usage or invalid command arguments. |
|
||||
| 3 | Unexecuted function or invalid shell condition. |
|
||||
| 4 | Error opening a file or invalid path. |
|
||||
| 5 | I/O error: An input/output failure occurred. |
|
||||
| 6 | No such device or address. |
|
||||
| 7 | Insufficient memory or resource exhaustion. |
|
||||
| 8 | Non-executable file or invalid file format. |
|
||||
| 9 | Failed child process execution. |
|
||||
| 18 | Connection to a remote server failed. |
|
||||
| 22 | Invalid argument or faulty network connection. |
|
||||
| 28 | No space left on device. |
|
||||
| 35 | Timeout while establishing a connection. |
|
||||
| 56 | Faulty TLS connection. |
|
||||
| 60 | SSL certificate error. |
|
||||
| 1 | General error / Operation not permitted |
|
||||
| 2 | Misuse of shell builtins (e.g. syntax error) |
|
||||
|
||||
### Command Execution Errors
|
||||
### curl / wget (6–35)
|
||||
| Code | Description |
|
||||
|------|-------------|
|
||||
| 125 | Docker error: Container could not start. |
|
||||
| 126 | Command not executable: Incorrect permissions or missing dependencies. |
|
||||
| 127 | Command not found: Incorrect path or missing dependency. |
|
||||
| 128 | Invalid exit signal, e.g., incorrect Git command. |
|
||||
| 6 | curl: DNS resolution failed (could not resolve host) |
|
||||
| 7 | curl: Failed to connect (network unreachable / host down) |
|
||||
| 22 | curl: HTTP error returned (404, 429, 500+) |
|
||||
| 28 | curl: Operation timeout (network slow or server not responding) |
|
||||
| 35 | curl: SSL/TLS handshake failed (certificate error) |
|
||||
|
||||
### Signal Errors
|
||||
### APT / Package Manager (100–102)
|
||||
| Code | Description |
|
||||
|------|-------------|
|
||||
| 129 | Signal 1 (SIGHUP): Process terminated due to hangup. |
|
||||
| 130 | Signal 2 (SIGINT): Manual termination via Ctrl+C. |
|
||||
| 132 | Signal 4 (SIGILL): Illegal machine instruction. |
|
||||
| 133 | Signal 5 (SIGTRAP): Debugging error or invalid breakpoint signal. |
|
||||
| 134 | Signal 6 (SIGABRT): Program aborted itself. |
|
||||
| 135 | Signal 7 (SIGBUS): Memory error, invalid memory address. |
|
||||
| 137 | Signal 9 (SIGKILL): Process forcibly terminated (OOM-killer or 'kill -9'). |
|
||||
| 139 | Signal 11 (SIGSEGV): Segmentation fault, possibly due to invalid pointer access. |
|
||||
| 141 | Signal 13 (SIGPIPE): Pipe closed unexpectedly. |
|
||||
| 143 | Signal 15 (SIGTERM): Process terminated normally. |
|
||||
| 152 | Signal 24 (SIGXCPU): CPU time limit exceeded. |
|
||||
| 100 | APT: Package manager error (broken packages / dependency problems) |
|
||||
| 101 | APT: Configuration error (bad sources.list, malformed config) |
|
||||
| 102 | APT: Lock held by another process (dpkg/apt still running) |
|
||||
|
||||
### LXC-Specific Errors
|
||||
### System / Signals (124–143)
|
||||
| Code | Description |
|
||||
|------|-------------|
|
||||
| 100 | LXC install error: Unexpected error in create_lxc.sh. |
|
||||
| 101 | LXC install error: No network connection detected. |
|
||||
| 200 | LXC creation failed. |
|
||||
| 201 | LXC error: Invalid Storage class. |
|
||||
| 202 | User aborted menu in create_lxc.sh. |
|
||||
| 203 | CTID not set in create_lxc.sh. |
|
||||
| 204 | PCT_OSTYPE not set in create_lxc.sh. |
|
||||
| 205 | CTID cannot be less than 100 in create_lxc.sh. |
|
||||
| 206 | CTID already in use in create_lxc.sh. |
|
||||
| 207 | Template not found in create_lxc.sh. |
|
||||
| 208 | Error downloading template in create_lxc.sh. |
|
||||
| 209 | Container creation failed, but template is intact in create_lxc.sh. |
|
||||
| 124 | Command timed out (timeout command) |
|
||||
| 126 | Command invoked cannot execute (permission problem?) |
|
||||
| 127 | Command not found |
|
||||
| 128 | Invalid argument to exit |
|
||||
| 130 | Terminated by Ctrl+C (SIGINT) |
|
||||
| 134 | Process aborted (SIGABRT — possibly Node.js heap overflow) |
|
||||
| 137 | Killed (SIGKILL / Out of memory?) |
|
||||
| 139 | Segmentation fault (core dumped) |
|
||||
| 141 | Broken pipe (SIGPIPE — output closed prematurely) |
|
||||
| 143 | Terminated (SIGTERM) |
|
||||
|
||||
### Other Errors
|
||||
### Systemd / Service (150–154)
|
||||
| Code | Description |
|
||||
|------|-------------|
|
||||
| 255 | Unknown critical error, often due to missing permissions or broken scripts. |
|
||||
| * | Unknown error code (exit_code). |
|
||||
| 150 | Systemd: Service failed to start |
|
||||
| 151 | Systemd: Service unit not found |
|
||||
| 152 | Permission denied (EACCES) |
|
||||
| 153 | Build/compile failed (make/gcc/cmake) |
|
||||
| 154 | Node.js: Native addon build failed (node-gyp) |
|
||||
|
||||
### Python / pip / uv (160–162)
|
||||
| Code | Description |
|
||||
|------|-------------|
|
||||
| 160 | Python: Virtualenv / uv environment missing or broken |
|
||||
| 161 | Python: Dependency resolution failed |
|
||||
| 162 | Python: Installation aborted (permissions or EXTERNALLY-MANAGED) |
|
||||
|
||||
### PostgreSQL (170–173)
|
||||
| Code | Description |
|
||||
|------|-------------|
|
||||
| 170 | PostgreSQL: Connection failed (server not running / wrong socket) |
|
||||
| 171 | PostgreSQL: Authentication failed (bad user/password) |
|
||||
| 172 | PostgreSQL: Database does not exist |
|
||||
| 173 | PostgreSQL: Fatal error in query / syntax |
|
||||
|
||||
### MySQL / MariaDB (180–183)
|
||||
| Code | Description |
|
||||
|------|-------------|
|
||||
| 180 | MySQL/MariaDB: Connection failed (server not running / wrong socket) |
|
||||
| 181 | MySQL/MariaDB: Authentication failed (bad user/password) |
|
||||
| 182 | MySQL/MariaDB: Database does not exist |
|
||||
| 183 | MySQL/MariaDB: Fatal error in query / syntax |
|
||||
|
||||
### MongoDB (190–193)
|
||||
| Code | Description |
|
||||
|------|-------------|
|
||||
| 190 | MongoDB: Connection failed (server not running) |
|
||||
| 191 | MongoDB: Authentication failed (bad user/password) |
|
||||
| 192 | MongoDB: Database not found |
|
||||
| 193 | MongoDB: Fatal query error |
|
||||
|
||||
### Proxmox Custom Codes (200–231)
|
||||
| Code | Description |
|
||||
|------|-------------|
|
||||
| 200 | Proxmox: Failed to create lock file |
|
||||
| 203 | Proxmox: Missing CTID variable |
|
||||
| 204 | Proxmox: Missing PCT_OSTYPE variable |
|
||||
| 205 | Proxmox: Invalid CTID (<100) |
|
||||
| 206 | Proxmox: CTID already in use |
|
||||
| 207 | Proxmox: Password contains unescaped special characters |
|
||||
| 208 | Proxmox: Invalid configuration (DNS/MAC/Network format) |
|
||||
| 209 | Proxmox: Container creation failed |
|
||||
| 210 | Proxmox: Cluster not quorate |
|
||||
| 211 | Proxmox: Timeout waiting for template lock |
|
||||
| 212 | Proxmox: Storage type 'iscsidirect' does not support containers (VMs only) |
|
||||
| 213 | Proxmox: Storage type does not support 'rootdir' content |
|
||||
| 214 | Proxmox: Not enough storage space |
|
||||
| 215 | Proxmox: Container created but not listed (ghost state) |
|
||||
| 216 | Proxmox: RootFS entry missing in config |
|
||||
| 217 | Proxmox: Storage not accessible |
|
||||
| 218 | Proxmox: Template file corrupted or incomplete |
|
||||
| 219 | Proxmox: CephFS does not support containers — use RBD |
|
||||
| 220 | Proxmox: Unable to resolve template path |
|
||||
| 221 | Proxmox: Template file not readable |
|
||||
| 222 | Proxmox: Template download failed |
|
||||
| 223 | Proxmox: Template not available after download |
|
||||
| 224 | Proxmox: PBS storage is for backups only |
|
||||
| 225 | Proxmox: No template available for OS/Version |
|
||||
| 231 | Proxmox: LXC stack upgrade failed |
|
||||
|
||||
### Node.js / npm (243–249)
|
||||
| Code | Description |
|
||||
|------|-------------|
|
||||
| 243 | Node.js: Out of memory (JavaScript heap out of memory) |
|
||||
| 245 | Node.js: Invalid command-line option |
|
||||
| 246 | Node.js: Internal JavaScript Parse Error |
|
||||
| 247 | Node.js: Fatal internal error |
|
||||
| 248 | Node.js: Invalid C++ addon / N-API failure |
|
||||
| 249 | npm/pnpm/yarn: Unknown fatal error |
|
||||
|
||||
### DPKG (255)
|
||||
| Code | Description |
|
||||
|------|-------------|
|
||||
| 255 | DPKG: Fatal internal error |
|
||||
|
||||
### Default
|
||||
| Code | Description |
|
||||
|------|-------------|
|
||||
| * | Unknown error |
|
||||
|
||||
## Environment Variable Dependencies
|
||||
|
||||
### Required Variables
|
||||
- **`DIAGNOSTICS`**: Enable/disable diagnostic reporting ("yes"/"no")
|
||||
- **`RANDOM_UUID`**: Unique identifier for tracking
|
||||
- **`DIAGNOSTICS`**: Enable/disable diagnostic reporting (`"yes"` / `"no"`)
|
||||
- **`RANDOM_UUID`**: Unique identifier for session tracking
|
||||
|
||||
### Optional Variables
|
||||
- **`CT_TYPE`**: Container type (1 for LXC, 2 for VM)
|
||||
- **`DISK_SIZE`**: Disk size in GB (or GB with 'G' suffix for VM)
|
||||
### Container / VM Variables
|
||||
- **`CT_TYPE`**: Container type (`1` for LXC, `2` for VM)
|
||||
- **`DISK_SIZE`**: Disk size in GB (VMs may include `G` suffix)
|
||||
- **`CORE_COUNT`**: Number of CPU cores
|
||||
- **`RAM_SIZE`**: RAM size in MB
|
||||
- **`var_os`**: Operating system type
|
||||
- **`var_version`**: OS version
|
||||
- **`DISABLEIP6`**: IPv6 disable setting
|
||||
- **`NSAPP`**: Namespace application name
|
||||
- **`NSAPP`**: Application name
|
||||
- **`METHOD`**: Installation method
|
||||
|
||||
### Internal Variables
|
||||
- **`POST_UPDATE_DONE`**: Prevents duplicate status updates
|
||||
- **`API_URL`**: Community scripts API endpoint
|
||||
- **`JSON_PAYLOAD`**: API request payload
|
||||
- **`RESPONSE`**: API response
|
||||
- **`DISK_SIZE_API`**: Processed disk size for VM API
|
||||
- **`PB_URL`**: PocketBase server URL
|
||||
- **`PB_COLLECTION`**: PocketBase collection name
|
||||
- **`PB_API_URL`**: Full PocketBase API endpoint
|
||||
- **`PB_RECORD_ID`**: PocketBase record ID (set after POST, used for PATCH)
|
||||
- **`POST_UPDATE_DONE`**: Flag to prevent duplicate status updates
|
||||
- **`JSON_PAYLOAD`**: API request payload (local to each function)
|
||||
- **`RESPONSE`**: API response (local to each function)
|
||||
|
||||
## Error Handling Patterns
|
||||
|
||||
### API Communication Errors
|
||||
- All API functions handle curl failures gracefully
|
||||
- Network errors don't block installation process
|
||||
- Missing prerequisites cause early return
|
||||
- Duplicate updates are prevented
|
||||
- All API functions return silently on failure — network errors never block installation
|
||||
- Missing prerequisites (no curl, diagnostics disabled, no UUID) cause early return
|
||||
- `POST_UPDATE_DONE` flag prevents duplicate PATCH updates
|
||||
- PocketBase record lookup falls back to `GET ?filter=(random_id='...')` if `PB_RECORD_ID` is unset
|
||||
|
||||
### Error Description Errors
|
||||
- Unknown error codes return generic message
|
||||
- All error codes are handled with case statement
|
||||
- Fallback message includes the actual error code
|
||||
|
||||
### Prerequisites Validation
|
||||
- Check curl availability before API calls
|
||||
- Validate DIAGNOSTICS setting
|
||||
- Ensure RANDOM_UUID is set
|
||||
- Check for duplicate updates
|
||||
- Unknown error codes return `"Unknown error"`
|
||||
- All recognized codes are handled via a `case` statement with non-overlapping ranges
|
||||
- The fallback message is generic (no error code is embedded)
|
||||
|
||||
## Integration Examples
|
||||
|
||||
### With build.func
|
||||
### With build.func (LXC)
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
source core.func
|
||||
source api.func
|
||||
source build.func
|
||||
|
||||
# Set up API reporting
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
|
||||
# Report installation start
|
||||
# Report LXC installation start → POST creates record
|
||||
post_to_api
|
||||
|
||||
# Container creation...
|
||||
# ... build.func code ...
|
||||
# ... container creation via build.func ...
|
||||
|
||||
# Report completion
|
||||
# Report completion → PATCH updates record
|
||||
if [[ $? -eq 0 ]]; then
|
||||
post_update_to_api "success" 0
|
||||
post_update_to_api "done" 0
|
||||
else
|
||||
post_update_to_api "failed" $?
|
||||
fi
|
||||
```
|
||||
|
||||
### With vm-core.func
|
||||
### With vm-core.func (VM)
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
source core.func
|
||||
source api.func
|
||||
source vm-core.func
|
||||
|
||||
# Set up API reporting
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
|
||||
# Report VM installation start
|
||||
# Report VM installation start → POST creates record
|
||||
post_to_api_vm
|
||||
|
||||
# VM creation...
|
||||
# ... vm-core.func code ...
|
||||
# ... VM creation via vm-core.func ...
|
||||
|
||||
# Report completion
|
||||
post_update_to_api "success" 0
|
||||
# Report completion → PATCH updates record
|
||||
post_update_to_api "done" 0
|
||||
```
|
||||
|
||||
### With error_handler.func
|
||||
@ -397,37 +508,30 @@ source core.func
|
||||
source error_handler.func
|
||||
source api.func
|
||||
|
||||
# Use error descriptions
|
||||
error_code=127
|
||||
error_msg=$(get_error_description $error_code)
|
||||
error_msg=$(explain_exit_code $error_code)
|
||||
echo "Error $error_code: $error_msg"
|
||||
|
||||
# Report error to API
|
||||
# Report error to PocketBase
|
||||
post_update_to_api "failed" $error_code
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### API Usage
|
||||
1. Always check prerequisites before API calls
|
||||
2. Use unique identifiers for tracking
|
||||
3. Handle API failures gracefully
|
||||
4. Don't block installation on API failures
|
||||
1. Always check prerequisites before API calls (handled internally by each function)
|
||||
2. Call `post_to_api` / `post_to_api_vm` **once** at installation start to get a `PB_RECORD_ID`
|
||||
3. Call `post_update_to_api` **once** at the end to finalize the record via PATCH
|
||||
4. Never block the installation on API failures
|
||||
|
||||
### Error Reporting
|
||||
1. Use appropriate error codes
|
||||
2. Provide meaningful error descriptions
|
||||
3. Report both success and failure cases
|
||||
4. Prevent duplicate status updates
|
||||
1. Use `explain_exit_code()` for human-readable error messages
|
||||
2. Pass the actual numeric exit code to `post_update_to_api`
|
||||
3. Report both success (`"done"`) and failure (`"failed"`) cases
|
||||
4. The `POST_UPDATE_DONE` flag automatically prevents duplicate updates
|
||||
|
||||
### Diagnostic Reporting
|
||||
1. Respect user privacy settings
|
||||
2. Only send data when diagnostics enabled
|
||||
3. Use anonymous tracking identifiers
|
||||
4. Include relevant system information
|
||||
|
||||
### Error Handling
|
||||
1. Handle unknown error codes gracefully
|
||||
2. Provide fallback error messages
|
||||
3. Include error code in unknown error messages
|
||||
4. Use consistent error message format
|
||||
1. Respect user privacy — only send data when `DIAGNOSTICS="yes"`
|
||||
2. Use anonymous random UUIDs for session tracking (no personal data)
|
||||
3. Include relevant system information (PVE version, OS, app name)
|
||||
4. The diagnostics file at `/usr/local/community-scripts/diagnostics` controls VM reporting
|
||||
|
||||
@ -2,26 +2,42 @@
|
||||
|
||||
## Overview
|
||||
|
||||
This document describes how `api.func` integrates with other components in the Proxmox Community Scripts project, including dependencies, data flow, and API surface.
|
||||
This document describes how `api.func` integrates with other components in the Proxmox Community Scripts project. The telemetry backend is **PocketBase** at `http://db.community-scripts.org`, using the `_dev_telemetry_data` collection.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
Installation Scripts ──► api.func ──► PocketBase (db.community-scripts.org)
|
||||
│
|
||||
├─ POST → create record (status: "installing")
|
||||
├─ PATCH → update record (status: "sucess"/"failed")
|
||||
└─ GET → lookup record by random_id (fallback)
|
||||
```
|
||||
|
||||
### Key Design Points
|
||||
- **POST** creates a new telemetry record and returns a PocketBase `id`
|
||||
- **PATCH** updates the existing record using that `id` (or a GET lookup by `random_id`)
|
||||
- All communication is fire-and-forget — failures never block the installation
|
||||
- `explain_exit_code()` is the canonical function for exit-code-to-description mapping
|
||||
|
||||
## Dependencies
|
||||
|
||||
### External Dependencies
|
||||
|
||||
#### Required Commands
|
||||
- **`curl`**: HTTP client for API communication
|
||||
- **`uuidgen`**: Generate unique identifiers (optional, can use other methods)
|
||||
- **`curl`**: HTTP client for PocketBase API communication
|
||||
|
||||
#### Optional Commands
|
||||
- **None**: No other external command dependencies
|
||||
- **`uuidgen`**: Generate unique identifiers (any UUID source works)
|
||||
- **`pveversion`**: Retrieve Proxmox VE version (gracefully skipped if missing)
|
||||
|
||||
### Internal Dependencies
|
||||
|
||||
#### Environment Variables from Other Scripts
|
||||
- **build.func**: Provides container creation variables
|
||||
- **build.func**: Provides container creation variables (`CT_TYPE`, `DISK_SIZE`, etc.)
|
||||
- **vm-core.func**: Provides VM creation variables
|
||||
- **core.func**: Provides system information variables
|
||||
- **Installation scripts**: Provide application-specific variables
|
||||
- **core.func**: Provides system information
|
||||
- **Installation scripts**: Provide application-specific variables (`NSAPP`, `METHOD`)
|
||||
|
||||
## Integration Points
|
||||
|
||||
@ -29,48 +45,41 @@ This document describes how `api.func` integrates with other components in the P
|
||||
|
||||
#### LXC Container Reporting
|
||||
```bash
|
||||
# build.func uses api.func for container reporting
|
||||
source core.func
|
||||
source api.func
|
||||
source build.func
|
||||
|
||||
# Set up API reporting
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
|
||||
# Container creation with API reporting
|
||||
create_container() {
|
||||
# Set container parameters
|
||||
export CT_TYPE=1
|
||||
export DISK_SIZE="$var_disk"
|
||||
export CORE_COUNT="$var_cpu"
|
||||
export RAM_SIZE="$var_ram"
|
||||
export var_os="$var_os"
|
||||
export var_version="$var_version"
|
||||
export NSAPP="$APP"
|
||||
export METHOD="install"
|
||||
# Set container parameters
|
||||
export CT_TYPE=1
|
||||
export DISK_SIZE="$var_disk"
|
||||
export CORE_COUNT="$var_cpu"
|
||||
export RAM_SIZE="$var_ram"
|
||||
export var_os="$var_os"
|
||||
export var_version="$var_version"
|
||||
export NSAPP="$APP"
|
||||
export METHOD="install"
|
||||
|
||||
# Report installation start
|
||||
post_to_api
|
||||
# POST → creates record in PocketBase, saves PB_RECORD_ID
|
||||
post_to_api
|
||||
|
||||
# Container creation using build.func
|
||||
# ... build.func container creation logic ...
|
||||
# ... container creation via build.func ...
|
||||
|
||||
# Report completion
|
||||
if [[ $? -eq 0 ]]; then
|
||||
post_update_to_api "success" 0
|
||||
else
|
||||
post_update_to_api "failed" $?
|
||||
fi
|
||||
}
|
||||
# PATCH → updates the record with final status
|
||||
if [[ $? -eq 0 ]]; then
|
||||
post_update_to_api "done" 0
|
||||
else
|
||||
post_update_to_api "failed" $?
|
||||
fi
|
||||
```
|
||||
|
||||
#### Error Reporting Integration
|
||||
```bash
|
||||
# build.func uses api.func for error reporting
|
||||
handle_container_error() {
|
||||
local exit_code=$1
|
||||
local error_msg=$(get_error_description $exit_code)
|
||||
local error_msg=$(explain_exit_code $exit_code)
|
||||
|
||||
echo "Container creation failed: $error_msg"
|
||||
post_update_to_api "failed" $exit_code
|
||||
@ -81,93 +90,54 @@ handle_container_error() {
|
||||
|
||||
#### VM Installation Reporting
|
||||
```bash
|
||||
# vm-core.func uses api.func for VM reporting
|
||||
source core.func
|
||||
source api.func
|
||||
source vm-core.func
|
||||
|
||||
# Set up VM API reporting
|
||||
# VM reads DIAGNOSTICS from file
|
||||
mkdir -p /usr/local/community-scripts
|
||||
echo "DIAGNOSTICS=yes" > /usr/local/community-scripts/diagnostics
|
||||
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
|
||||
# VM creation with API reporting
|
||||
create_vm() {
|
||||
# Set VM parameters
|
||||
export DISK_SIZE="${var_disk}G"
|
||||
export CORE_COUNT="$var_cpu"
|
||||
export RAM_SIZE="$var_ram"
|
||||
export var_os="$var_os"
|
||||
export var_version="$var_version"
|
||||
export NSAPP="$APP"
|
||||
export METHOD="install"
|
||||
# Set VM parameters
|
||||
export DISK_SIZE="${var_disk}G"
|
||||
export CORE_COUNT="$var_cpu"
|
||||
export RAM_SIZE="$var_ram"
|
||||
export var_os="$var_os"
|
||||
export var_version="$var_version"
|
||||
export NSAPP="$APP"
|
||||
export METHOD="install"
|
||||
|
||||
# Report VM installation start
|
||||
post_to_api_vm
|
||||
# POST → creates record in PocketBase (ct_type=2, type="vm")
|
||||
post_to_api_vm
|
||||
|
||||
# VM creation using vm-core.func
|
||||
# ... vm-core.func VM creation logic ...
|
||||
# ... VM creation via vm-core.func ...
|
||||
|
||||
# Report completion
|
||||
post_update_to_api "success" 0
|
||||
}
|
||||
```
|
||||
|
||||
### With core.func
|
||||
|
||||
#### System Information Integration
|
||||
```bash
|
||||
# core.func provides system information for api.func
|
||||
source core.func
|
||||
source api.func
|
||||
|
||||
# Get system information for API reporting
|
||||
get_system_info_for_api() {
|
||||
# Get PVE version using core.func utilities
|
||||
local pve_version=$(pveversion | awk -F'[/ ]' '{print $2}')
|
||||
|
||||
# Set API parameters
|
||||
export var_os="$var_os"
|
||||
export var_version="$var_version"
|
||||
|
||||
# Use core.func error handling with api.func reporting
|
||||
if silent apt-get update; then
|
||||
post_update_to_api "success" 0
|
||||
else
|
||||
post_update_to_api "failed" $?
|
||||
fi
|
||||
}
|
||||
# PATCH → finalizes record
|
||||
post_update_to_api "done" 0
|
||||
```
|
||||
|
||||
### With error_handler.func
|
||||
|
||||
#### Error Description Integration
|
||||
```bash
|
||||
# error_handler.func uses api.func for error descriptions
|
||||
source core.func
|
||||
source error_handler.func
|
||||
source api.func
|
||||
|
||||
# Enhanced error handler with API reporting
|
||||
enhanced_error_handler() {
|
||||
local exit_code=${1:-$?}
|
||||
local command=${2:-${BASH_COMMAND:-unknown}}
|
||||
|
||||
# Get error description from api.func
|
||||
local error_msg=$(get_error_description $exit_code)
|
||||
# explain_exit_code() is the canonical error description function
|
||||
local error_msg=$(explain_exit_code $exit_code)
|
||||
|
||||
# Display error information
|
||||
echo "Error $exit_code: $error_msg"
|
||||
echo "Command: $command"
|
||||
|
||||
# Report error to API
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
# PATCH the telemetry record with failure details
|
||||
post_update_to_api "failed" $exit_code
|
||||
|
||||
# Use standard error handler
|
||||
error_handler $exit_code $command
|
||||
}
|
||||
```
|
||||
|
||||
@ -175,32 +145,28 @@ enhanced_error_handler() {
|
||||
|
||||
#### Installation Process Reporting
|
||||
```bash
|
||||
# install.func uses api.func for installation reporting
|
||||
source core.func
|
||||
source api.func
|
||||
source install.func
|
||||
|
||||
# Installation with API reporting
|
||||
install_package_with_reporting() {
|
||||
local package="$1"
|
||||
|
||||
# Set up API reporting
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
export NSAPP="$package"
|
||||
export METHOD="install"
|
||||
|
||||
# Report installation start
|
||||
# POST → create telemetry record
|
||||
post_to_api
|
||||
|
||||
# Package installation using install.func
|
||||
if install_package "$package"; then
|
||||
echo "$package installed successfully"
|
||||
post_update_to_api "success" 0
|
||||
post_update_to_api "done" 0
|
||||
return 0
|
||||
else
|
||||
local exit_code=$?
|
||||
local error_msg=$(get_error_description $exit_code)
|
||||
local error_msg=$(explain_exit_code $exit_code)
|
||||
echo "$package installation failed: $error_msg"
|
||||
post_update_to_api "failed" $exit_code
|
||||
return $exit_code
|
||||
@ -208,270 +174,105 @@ install_package_with_reporting() {
|
||||
}
|
||||
```
|
||||
|
||||
### With alpine-install.func
|
||||
|
||||
#### Alpine Installation Reporting
|
||||
```bash
|
||||
# alpine-install.func uses api.func for Alpine reporting
|
||||
source core.func
|
||||
source api.func
|
||||
source alpine-install.func
|
||||
|
||||
# Alpine installation with API reporting
|
||||
install_alpine_with_reporting() {
|
||||
local app="$1"
|
||||
|
||||
# Set up API reporting
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
export NSAPP="$app"
|
||||
export METHOD="install"
|
||||
export var_os="alpine"
|
||||
|
||||
# Report Alpine installation start
|
||||
post_to_api
|
||||
|
||||
# Alpine installation using alpine-install.func
|
||||
if install_alpine_app "$app"; then
|
||||
echo "Alpine $app installed successfully"
|
||||
post_update_to_api "success" 0
|
||||
return 0
|
||||
else
|
||||
local exit_code=$?
|
||||
local error_msg=$(get_error_description $exit_code)
|
||||
echo "Alpine $app installation failed: $error_msg"
|
||||
post_update_to_api "failed" $exit_code
|
||||
return $exit_code
|
||||
fi
|
||||
}
|
||||
```
|
||||
|
||||
### With alpine-tools.func
|
||||
|
||||
#### Alpine Tools Reporting
|
||||
```bash
|
||||
# alpine-tools.func uses api.func for Alpine tools reporting
|
||||
source core.func
|
||||
source api.func
|
||||
source alpine-tools.func
|
||||
|
||||
# Alpine tools with API reporting
|
||||
run_alpine_tool_with_reporting() {
|
||||
local tool="$1"
|
||||
|
||||
# Set up API reporting
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
export NSAPP="alpine-tools"
|
||||
export METHOD="tool"
|
||||
|
||||
# Report tool execution start
|
||||
post_to_api
|
||||
|
||||
# Run Alpine tool using alpine-tools.func
|
||||
if run_alpine_tool "$tool"; then
|
||||
echo "Alpine tool $tool executed successfully"
|
||||
post_update_to_api "success" 0
|
||||
return 0
|
||||
else
|
||||
local exit_code=$?
|
||||
local error_msg=$(get_error_description $exit_code)
|
||||
echo "Alpine tool $tool failed: $error_msg"
|
||||
post_update_to_api "failed" $exit_code
|
||||
return $exit_code
|
||||
fi
|
||||
}
|
||||
```
|
||||
|
||||
### With passthrough.func
|
||||
|
||||
#### Hardware Passthrough Reporting
|
||||
```bash
|
||||
# passthrough.func uses api.func for hardware reporting
|
||||
source core.func
|
||||
source api.func
|
||||
source passthrough.func
|
||||
|
||||
# Hardware passthrough with API reporting
|
||||
configure_passthrough_with_reporting() {
|
||||
local hardware_type="$1"
|
||||
|
||||
# Set up API reporting
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
export NSAPP="passthrough"
|
||||
export METHOD="hardware"
|
||||
|
||||
# Report passthrough configuration start
|
||||
post_to_api
|
||||
|
||||
# Configure passthrough using passthrough.func
|
||||
if configure_passthrough "$hardware_type"; then
|
||||
echo "Hardware passthrough configured successfully"
|
||||
post_update_to_api "success" 0
|
||||
return 0
|
||||
else
|
||||
local exit_code=$?
|
||||
local error_msg=$(get_error_description $exit_code)
|
||||
echo "Hardware passthrough failed: $error_msg"
|
||||
post_update_to_api "failed" $exit_code
|
||||
return $exit_code
|
||||
fi
|
||||
}
|
||||
```
|
||||
|
||||
### With tools.func
|
||||
|
||||
#### Maintenance Operations Reporting
|
||||
```bash
|
||||
# tools.func uses api.func for maintenance reporting
|
||||
source core.func
|
||||
source api.func
|
||||
source tools.func
|
||||
|
||||
# Maintenance operations with API reporting
|
||||
run_maintenance_with_reporting() {
|
||||
local operation="$1"
|
||||
|
||||
# Set up API reporting
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
export NSAPP="maintenance"
|
||||
export METHOD="tool"
|
||||
|
||||
# Report maintenance start
|
||||
post_to_api
|
||||
|
||||
# Run maintenance using tools.func
|
||||
if run_maintenance_operation "$operation"; then
|
||||
echo "Maintenance operation $operation completed successfully"
|
||||
post_update_to_api "success" 0
|
||||
return 0
|
||||
else
|
||||
local exit_code=$?
|
||||
local error_msg=$(get_error_description $exit_code)
|
||||
echo "Maintenance operation $operation failed: $error_msg"
|
||||
post_update_to_api "failed" $exit_code
|
||||
return $exit_code
|
||||
fi
|
||||
}
|
||||
```
|
||||
|
||||
## Data Flow
|
||||
|
||||
### Input Data
|
||||
|
||||
#### Environment Variables from Other Scripts
|
||||
- **`CT_TYPE`**: Container type (1 for LXC, 2 for VM)
|
||||
- **`DISK_SIZE`**: Disk size in GB
|
||||
- **`CORE_COUNT`**: Number of CPU cores
|
||||
- **`RAM_SIZE`**: RAM size in MB
|
||||
- **`var_os`**: Operating system type
|
||||
- **`var_version`**: OS version
|
||||
- **`DISABLEIP6`**: IPv6 disable setting
|
||||
- **`NSAPP`**: Namespace application name
|
||||
- **`METHOD`**: Installation method
|
||||
- **`DIAGNOSTICS`**: Enable/disable diagnostic reporting
|
||||
- **`RANDOM_UUID`**: Unique identifier for tracking
|
||||
#### Environment Variables
|
||||
| Variable | Source | Description |
|
||||
|----------|--------|-------------|
|
||||
| `CT_TYPE` | build.func | Container type (1=LXC, 2=VM) |
|
||||
| `DISK_SIZE` | build.func / vm-core.func | Disk size in GB (VMs may have `G` suffix) |
|
||||
| `CORE_COUNT` | build.func / vm-core.func | CPU core count |
|
||||
| `RAM_SIZE` | build.func / vm-core.func | RAM in MB |
|
||||
| `var_os` | core.func | Operating system type |
|
||||
| `var_version` | core.func | OS version |
|
||||
| `NSAPP` | Installation scripts | Application name |
|
||||
| `METHOD` | Installation scripts | Installation method |
|
||||
| `DIAGNOSTICS` | User config / diagnostics file | Enable/disable telemetry |
|
||||
| `RANDOM_UUID` | Caller | Session tracking UUID |
|
||||
|
||||
#### Function Parameters
|
||||
- **Exit codes**: Passed to `get_error_description()` and `post_update_to_api()`
|
||||
- **Status information**: Passed to `post_update_to_api()`
|
||||
- **API endpoints**: Hardcoded in functions
|
||||
- **Exit codes**: Passed to `explain_exit_code()` and `post_update_to_api()`
|
||||
- **Status strings**: Passed to `post_update_to_api()` (`"done"`, `"failed"`)
|
||||
|
||||
#### System Information
|
||||
- **PVE version**: Retrieved from `pveversion` command
|
||||
- **Disk size processing**: Processed for VM API (removes 'G' suffix)
|
||||
- **Error codes**: Retrieved from command exit codes
|
||||
- **PVE version**: Retrieved from `pveversion` command at runtime
|
||||
- **Disk size**: VM disk size is stripped of `G` suffix before sending
|
||||
|
||||
### Processing Data
|
||||
### Processing
|
||||
|
||||
#### API Request Preparation
|
||||
- **JSON payload creation**: Format data for API consumption
|
||||
- **Data validation**: Ensure required fields are present
|
||||
- **Error handling**: Handle missing or invalid data
|
||||
- **Content type setting**: Set appropriate HTTP headers
|
||||
#### Record Creation (POST)
|
||||
1. Validate prerequisites (curl, DIAGNOSTICS, RANDOM_UUID)
|
||||
2. Gather PVE version
|
||||
3. Build JSON payload with all telemetry fields
|
||||
4. `POST` to `PB_API_URL`
|
||||
5. Extract `PB_RECORD_ID` from PocketBase response (HTTP 200/201)
|
||||
|
||||
#### Error Processing
|
||||
- **Error code mapping**: Map numeric codes to descriptions
|
||||
- **Error message formatting**: Format error descriptions
|
||||
- **Unknown error handling**: Handle unrecognized error codes
|
||||
- **Fallback messages**: Provide default error messages
|
||||
|
||||
#### API Communication
|
||||
- **HTTP request preparation**: Prepare curl commands
|
||||
- **Response handling**: Capture HTTP response codes
|
||||
- **Error handling**: Handle network and API errors
|
||||
- **Duplicate prevention**: Prevent duplicate status updates
|
||||
#### Record Update (PATCH)
|
||||
1. Validate prerequisites + check `POST_UPDATE_DONE` flag
|
||||
2. Map status string → PocketBase select value (`"done"` → `"sucess"`)
|
||||
3. For failures: call `explain_exit_code()` to get error description
|
||||
4. Resolve record ID: use `PB_RECORD_ID` or fall back to GET lookup
|
||||
5. `PATCH` to `PB_API_URL/{record_id}` with status, error, exit_code
|
||||
6. Set `POST_UPDATE_DONE=true`
|
||||
|
||||
### Output Data
|
||||
|
||||
#### API Communication
|
||||
- **HTTP requests**: Sent to community-scripts.org API
|
||||
- **Response codes**: Captured from API responses
|
||||
- **Error information**: Reported to API
|
||||
- **Status updates**: Sent to API
|
||||
#### PocketBase Records
|
||||
- **POST response**: Returns record with `id` field → stored in `PB_RECORD_ID`
|
||||
- **PATCH response**: Updates record fields (status, error, exit_code)
|
||||
- **GET response**: Used for record ID lookup by `random_id` filter
|
||||
|
||||
#### Error Information
|
||||
- **Error descriptions**: Human-readable error messages
|
||||
- **Error codes**: Mapped to descriptions
|
||||
- **Context information**: Error context and details
|
||||
- **Fallback messages**: Default error messages
|
||||
|
||||
#### System State
|
||||
- **POST_UPDATE_DONE**: Prevents duplicate updates
|
||||
- **RESPONSE**: Stores API response
|
||||
- **JSON_PAYLOAD**: Stores formatted API data
|
||||
- **API_URL**: Stores API endpoint
|
||||
#### Internal State
|
||||
| Variable | Description |
|
||||
|----------|-------------|
|
||||
| `PB_RECORD_ID` | PocketBase record ID for PATCH calls |
|
||||
| `POST_UPDATE_DONE` | Flag preventing duplicate updates |
|
||||
|
||||
## API Surface
|
||||
|
||||
### Public Functions
|
||||
|
||||
#### Error Description
|
||||
- **`get_error_description()`**: Convert exit codes to explanations
|
||||
- **Parameters**: Exit code to explain
|
||||
- **Returns**: Human-readable explanation string
|
||||
- **Usage**: Called by other functions and scripts
|
||||
| Function | Purpose | HTTP Method |
|
||||
|----------|---------|-------------|
|
||||
| `explain_exit_code(code)` | Map exit code to description | — |
|
||||
| `post_to_api()` | Create LXC telemetry record | POST |
|
||||
| `post_to_api_vm()` | Create VM telemetry record | POST |
|
||||
| `post_update_to_api(status, exit_code)` | Update record with final status | PATCH |
|
||||
|
||||
#### API Communication
|
||||
- **`post_to_api()`**: Send LXC installation data
|
||||
- **`post_to_api_vm()`**: Send VM installation data
|
||||
- **`post_update_to_api()`**: Send status updates
|
||||
- **Parameters**: Status and exit code (for updates)
|
||||
- **Returns**: None
|
||||
- **Usage**: Called by installation scripts
|
||||
### PocketBase Collection Schema
|
||||
|
||||
### Internal Functions
|
||||
Collection: `_dev_telemetry_data`
|
||||
|
||||
#### None
|
||||
- All functions in api.func are public
|
||||
- No internal helper functions
|
||||
- Direct implementation of all functionality
|
||||
| Field | Type | Required | Description |
|
||||
|-------|------|----------|-------------|
|
||||
| `id` | text (auto) | yes | PocketBase record ID (15 chars) |
|
||||
| `random_id` | text | yes | Session UUID (min 8 chars, unique) |
|
||||
| `type` | select | yes | `"lxc"`, `"vm"`, `"addon"`, `"pve"` |
|
||||
| `ct_type` | number | yes | 1 (LXC) or 2 (VM) |
|
||||
| `nsapp` | text | yes | Application name |
|
||||
| `status` | select | yes | `"installing"`, `"sucess"`, `"failed"`, `"unknown"` |
|
||||
| `disk_size` | number | no | Disk size in GB |
|
||||
| `core_count` | number | no | CPU cores |
|
||||
| `ram_size` | number | no | RAM in MB |
|
||||
| `os_type` | text | no | OS type |
|
||||
| `os_version` | text | no | OS version |
|
||||
| `pve_version` | text | no | Proxmox VE version |
|
||||
| `method` | text | no | Installation method |
|
||||
| `error` | text | no | Error description |
|
||||
| `exit_code` | number | no | Numeric exit code |
|
||||
| `created` | autodate | auto | Record creation timestamp |
|
||||
| `updated` | autodate | auto | Last update timestamp |
|
||||
|
||||
### Global Variables
|
||||
> **Note**: The `status` field intentionally uses the spelling `"sucess"` (not `"success"`).
|
||||
|
||||
#### Configuration Variables
|
||||
- **`DIAGNOSTICS`**: Diagnostic reporting setting
|
||||
- **`RANDOM_UUID`**: Unique tracking identifier
|
||||
- **`POST_UPDATE_DONE`**: Duplicate update prevention
|
||||
|
||||
#### Data Variables
|
||||
- **`CT_TYPE`**: Container type
|
||||
- **`DISK_SIZE`**: Disk size
|
||||
- **`CORE_COUNT`**: CPU core count
|
||||
- **`RAM_SIZE`**: RAM size
|
||||
- **`var_os`**: Operating system
|
||||
- **`var_version`**: OS version
|
||||
- **`DISABLEIP6`**: IPv6 setting
|
||||
- **`NSAPP`**: Application namespace
|
||||
- **`METHOD`**: Installation method
|
||||
|
||||
#### Internal Variables
|
||||
- **`API_URL`**: API endpoint URL
|
||||
- **`JSON_PAYLOAD`**: API request payload
|
||||
- **`RESPONSE`**: API response
|
||||
- **`DISK_SIZE_API`**: Processed disk size for VM API
|
||||
### Configuration Variables
|
||||
| Variable | Value |
|
||||
|----------|-------|
|
||||
| `PB_URL` | `http://db.community-scripts.org` |
|
||||
| `PB_COLLECTION` | `_dev_telemetry_data` |
|
||||
| `PB_API_URL` | `${PB_URL}/api/collections/${PB_COLLECTION}/records` |
|
||||
|
||||
## Integration Patterns
|
||||
|
||||
@ -479,45 +280,39 @@ run_maintenance_with_reporting() {
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
# Standard integration pattern
|
||||
|
||||
# 1. Source core.func first
|
||||
# 1. Source dependencies
|
||||
source core.func
|
||||
|
||||
# 2. Source api.func
|
||||
source api.func
|
||||
|
||||
# 3. Set up API reporting
|
||||
# 2. Enable telemetry
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
|
||||
# 4. Set application parameters
|
||||
# 3. Set application parameters
|
||||
export NSAPP="$APP"
|
||||
export METHOD="install"
|
||||
|
||||
# 5. Report installation start
|
||||
# 4. POST → create telemetry record in PocketBase
|
||||
post_to_api
|
||||
|
||||
# 6. Perform installation
|
||||
# 5. Perform installation
|
||||
# ... installation logic ...
|
||||
|
||||
# 7. Report completion
|
||||
post_update_to_api "success" 0
|
||||
# 6. PATCH → update record with final status
|
||||
post_update_to_api "done" 0
|
||||
```
|
||||
|
||||
### Minimal Integration Pattern
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
# Minimal integration pattern
|
||||
|
||||
source api.func
|
||||
|
||||
# Basic error reporting
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
|
||||
# Report failure
|
||||
# Report failure (PATCH via record lookup)
|
||||
post_update_to_api "failed" 127
|
||||
```
|
||||
|
||||
@ -525,13 +320,10 @@ post_update_to_api "failed" 127
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
# Advanced integration pattern
|
||||
|
||||
source core.func
|
||||
source api.func
|
||||
source error_handler.func
|
||||
|
||||
# Set up comprehensive API reporting
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
export CT_TYPE=1
|
||||
@ -542,12 +334,12 @@ export var_os="debian"
|
||||
export var_version="12"
|
||||
export METHOD="install"
|
||||
|
||||
# Enhanced error handling with API reporting
|
||||
# Enhanced error handler with PocketBase reporting
|
||||
enhanced_error_handler() {
|
||||
local exit_code=${1:-$?}
|
||||
local command=${2:-${BASH_COMMAND:-unknown}}
|
||||
|
||||
local error_msg=$(get_error_description $exit_code)
|
||||
local error_msg=$(explain_exit_code $exit_code)
|
||||
echo "Error $exit_code: $error_msg"
|
||||
|
||||
post_update_to_api "failed" $exit_code
|
||||
@ -556,88 +348,39 @@ enhanced_error_handler() {
|
||||
|
||||
trap 'enhanced_error_handler' ERR
|
||||
|
||||
# Advanced operations with API reporting
|
||||
# POST → create record
|
||||
post_to_api
|
||||
|
||||
# ... operations ...
|
||||
post_update_to_api "success" 0
|
||||
|
||||
# PATCH → finalize
|
||||
post_update_to_api "done" 0
|
||||
```
|
||||
|
||||
## Error Handling Integration
|
||||
|
||||
### Automatic Error Reporting
|
||||
- **Error Descriptions**: Provides human-readable error messages
|
||||
- **API Integration**: Reports errors to community-scripts.org API
|
||||
- **Error Tracking**: Tracks error patterns for project improvement
|
||||
- **Diagnostic Data**: Contributes to anonymous usage analytics
|
||||
|
||||
### Manual Error Reporting
|
||||
- **Custom Error Codes**: Use appropriate error codes for different scenarios
|
||||
- **Error Context**: Provide context information for errors
|
||||
- **Status Updates**: Report both success and failure cases
|
||||
- **Error Analysis**: Analyze error patterns and trends
|
||||
- **Error Descriptions**: `explain_exit_code()` provides human-readable messages for all recognized exit codes
|
||||
- **PocketBase Integration**: Errors are recorded via PATCH with `status`, `error`, and `exit_code` fields
|
||||
- **Error Tracking**: Anonymous telemetry helps track common failure patterns
|
||||
- **Diagnostic Data**: Contributes to project-wide analytics without PII
|
||||
|
||||
### API Communication Errors
|
||||
- **Network Failures**: Handle API communication failures gracefully
|
||||
- **Missing Prerequisites**: Check prerequisites before API calls
|
||||
- **Duplicate Prevention**: Prevent duplicate status updates
|
||||
- **Error Recovery**: Handle API errors without blocking installation
|
||||
- **Network Failures**: All API calls use `|| true` — failures are swallowed silently
|
||||
- **Missing Prerequisites**: Functions return early if curl, DIAGNOSTICS, or UUID are missing
|
||||
- **Duplicate Prevention**: `POST_UPDATE_DONE` flag ensures only one PATCH per session
|
||||
- **Record Lookup Fallback**: If `PB_RECORD_ID` is unset, a GET filter query resolves the record
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
### API Communication Overhead
|
||||
- **Minimal Impact**: API calls add minimal overhead
|
||||
- **Asynchronous**: API calls don't block installation process
|
||||
- **Error Handling**: API failures don't affect installation
|
||||
- **Optional**: API reporting is optional and can be disabled
|
||||
- **Minimal Impact**: Only 2 HTTP calls per installation (1 POST + 1 PATCH)
|
||||
- **Non-blocking**: API failures never block the installation process
|
||||
- **Fire-and-forget**: curl stderr is suppressed (`2>/dev/null`)
|
||||
- **Optional**: Telemetry is entirely opt-in via `DIAGNOSTICS` setting
|
||||
|
||||
### Memory Usage
|
||||
- **Minimal Footprint**: API functions use minimal memory
|
||||
- **Variable Reuse**: Global variables reused across functions
|
||||
- **No Memory Leaks**: Proper cleanup prevents memory leaks
|
||||
- **Efficient Processing**: Efficient JSON payload creation
|
||||
|
||||
### Execution Speed
|
||||
- **Fast API Calls**: Quick API communication
|
||||
- **Efficient Error Processing**: Fast error code processing
|
||||
- **Minimal Delay**: Minimal delay in API operations
|
||||
- **Non-blocking**: API calls don't block installation
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Data Privacy
|
||||
- **Anonymous Reporting**: Only anonymous data is sent
|
||||
- **No Sensitive Data**: No sensitive information is transmitted
|
||||
- **User Control**: Users can disable diagnostic reporting
|
||||
- **Data Minimization**: Only necessary data is sent
|
||||
|
||||
### API Security
|
||||
- **HTTPS**: API communication uses secure protocols
|
||||
- **Data Validation**: API data is validated before sending
|
||||
- **Error Handling**: API errors are handled securely
|
||||
- **No Credentials**: No authentication credentials are sent
|
||||
|
||||
### Network Security
|
||||
- **Secure Communication**: Uses secure HTTP protocols
|
||||
- **Error Handling**: Network errors are handled gracefully
|
||||
- **No Data Leakage**: No sensitive data is leaked
|
||||
- **Secure Endpoints**: Uses trusted API endpoints
|
||||
|
||||
## Future Integration Considerations
|
||||
|
||||
### Extensibility
|
||||
- **New API Endpoints**: Easy to add new API endpoints
|
||||
- **Additional Data**: Easy to add new data fields
|
||||
- **Error Codes**: Easy to add new error code descriptions
|
||||
- **API Versions**: Easy to support new API versions
|
||||
|
||||
### Compatibility
|
||||
- **API Versioning**: Compatible with different API versions
|
||||
- **Data Format**: Compatible with different data formats
|
||||
- **Error Codes**: Compatible with different error code systems
|
||||
- **Network Protocols**: Compatible with different network protocols
|
||||
|
||||
### Performance
|
||||
- **Optimization**: API communication can be optimized
|
||||
- **Caching**: API responses can be cached
|
||||
- **Batch Operations**: Multiple operations can be batched
|
||||
- **Async Processing**: API calls can be made asynchronous
|
||||
### Security Considerations
|
||||
- **Anonymous**: No personal data is transmitted — only system specs and app names
|
||||
- **No Auth Required**: PocketBase collection rules allow anonymous create/update
|
||||
- **User Control**: Users can disable telemetry by setting `DIAGNOSTICS=no`
|
||||
- **HTTP**: API uses HTTP (not HTTPS) for compatibility with minimal containers
|
||||
|
||||
@ -1,794 +0,0 @@
|
||||
# api.func Usage Examples
|
||||
|
||||
## Overview
|
||||
|
||||
This document provides practical usage examples for `api.func` functions, covering common scenarios, integration patterns, and best practices.
|
||||
|
||||
## Basic API Setup
|
||||
|
||||
### Standard API Initialization
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
# Standard API setup for LXC containers
|
||||
|
||||
source api.func
|
||||
|
||||
# Set up diagnostic reporting
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
|
||||
# Set container parameters
|
||||
export CT_TYPE=1
|
||||
export DISK_SIZE=8
|
||||
export CORE_COUNT=2
|
||||
export RAM_SIZE=2048
|
||||
export var_os="debian"
|
||||
export var_version="12"
|
||||
export NSAPP="plex"
|
||||
export METHOD="install"
|
||||
|
||||
# Report installation start
|
||||
post_to_api
|
||||
|
||||
# Your installation code here
|
||||
# ... installation logic ...
|
||||
|
||||
# Report completion
|
||||
if [[ $? -eq 0 ]]; then
|
||||
post_update_to_api "success" 0
|
||||
else
|
||||
post_update_to_api "failed" $?
|
||||
fi
|
||||
```
|
||||
|
||||
### VM API Setup
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
# API setup for VMs
|
||||
|
||||
source api.func
|
||||
|
||||
# Create diagnostics file for VM
|
||||
mkdir -p /usr/local/community-scripts
|
||||
echo "DIAGNOSTICS=yes" > /usr/local/community-scripts/diagnostics
|
||||
|
||||
# Set up VM parameters
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
export DISK_SIZE="20G"
|
||||
export CORE_COUNT=4
|
||||
export RAM_SIZE=4096
|
||||
export var_os="ubuntu"
|
||||
export var_version="22.04"
|
||||
export NSAPP="nextcloud"
|
||||
export METHOD="install"
|
||||
|
||||
# Report VM installation start
|
||||
post_to_api_vm
|
||||
|
||||
# Your VM installation code here
|
||||
# ... VM creation logic ...
|
||||
|
||||
# Report completion
|
||||
post_update_to_api "success" 0
|
||||
```
|
||||
|
||||
## Error Description Examples
|
||||
|
||||
### Basic Error Explanation
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
source api.func
|
||||
|
||||
# Explain common error codes
|
||||
echo "Error 0: '$(get_error_description 0)'"
|
||||
echo "Error 1: $(get_error_description 1)"
|
||||
echo "Error 127: $(get_error_description 127)"
|
||||
echo "Error 200: $(get_error_description 200)"
|
||||
echo "Error 255: $(get_error_description 255)"
|
||||
```
|
||||
|
||||
### Error Code Testing
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
source api.func
|
||||
|
||||
# Test all error codes
|
||||
test_error_codes() {
|
||||
local codes=(0 1 2 127 128 130 137 139 143 200 203 205 255)
|
||||
|
||||
for code in "${codes[@]}"; do
|
||||
echo "Code $code: $(get_error_description $code)"
|
||||
done
|
||||
}
|
||||
|
||||
test_error_codes
|
||||
```
|
||||
|
||||
### Error Handling with Descriptions
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
source api.func
|
||||
|
||||
# Function with error handling
|
||||
run_command_with_error_handling() {
|
||||
local command="$1"
|
||||
local description="$2"
|
||||
|
||||
echo "Running: $description"
|
||||
|
||||
if $command; then
|
||||
echo "Success: $description"
|
||||
return 0
|
||||
else
|
||||
local exit_code=$?
|
||||
local error_msg=$(get_error_description $exit_code)
|
||||
echo "Error $exit_code: $error_msg"
|
||||
return $exit_code
|
||||
fi
|
||||
}
|
||||
|
||||
# Usage
|
||||
run_command_with_error_handling "apt-get update" "Package list update"
|
||||
run_command_with_error_handling "nonexistent_command" "Test command"
|
||||
```
|
||||
|
||||
## API Communication Examples
|
||||
|
||||
### LXC Installation Reporting
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
source api.func
|
||||
|
||||
# Complete LXC installation with API reporting
|
||||
install_lxc_with_reporting() {
|
||||
local app="$1"
|
||||
local ctid="$2"
|
||||
|
||||
# Set up API reporting
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
export CT_TYPE=1
|
||||
export DISK_SIZE=10
|
||||
export CORE_COUNT=2
|
||||
export RAM_SIZE=2048
|
||||
export var_os="debian"
|
||||
export var_version="12"
|
||||
export NSAPP="$app"
|
||||
export METHOD="install"
|
||||
|
||||
# Report installation start
|
||||
post_to_api
|
||||
|
||||
# Installation process
|
||||
echo "Installing $app container (ID: $ctid)..."
|
||||
|
||||
# Simulate installation
|
||||
sleep 2
|
||||
|
||||
# Check if installation succeeded
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "Installation completed successfully"
|
||||
post_update_to_api "success" 0
|
||||
return 0
|
||||
else
|
||||
echo "Installation failed"
|
||||
post_update_to_api "failed" $?
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Install multiple containers
|
||||
install_lxc_with_reporting "plex" "100"
|
||||
install_lxc_with_reporting "nextcloud" "101"
|
||||
install_lxc_with_reporting "nginx" "102"
|
||||
```
|
||||
|
||||
### VM Installation Reporting
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
source api.func
|
||||
|
||||
# Complete VM installation with API reporting
|
||||
install_vm_with_reporting() {
|
||||
local app="$1"
|
||||
local vmid="$2"
|
||||
|
||||
# Create diagnostics file
|
||||
mkdir -p /usr/local/community-scripts
|
||||
echo "DIAGNOSTICS=yes" > /usr/local/community-scripts/diagnostics
|
||||
|
||||
# Set up API reporting
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
export DISK_SIZE="20G"
|
||||
export CORE_COUNT=4
|
||||
export RAM_SIZE=4096
|
||||
export var_os="ubuntu"
|
||||
export var_version="22.04"
|
||||
export NSAPP="$app"
|
||||
export METHOD="install"
|
||||
|
||||
# Report VM installation start
|
||||
post_to_api_vm
|
||||
|
||||
# VM installation process
|
||||
echo "Installing $app VM (ID: $vmid)..."
|
||||
|
||||
# Simulate VM creation
|
||||
sleep 3
|
||||
|
||||
# Check if VM creation succeeded
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "VM installation completed successfully"
|
||||
post_update_to_api "success" 0
|
||||
return 0
|
||||
else
|
||||
echo "VM installation failed"
|
||||
post_update_to_api "failed" $?
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Install multiple VMs
|
||||
install_vm_with_reporting "nextcloud" "200"
|
||||
install_vm_with_reporting "wordpress" "201"
|
||||
```
|
||||
|
||||
## Status Update Examples
|
||||
|
||||
### Success Reporting
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
source api.func
|
||||
|
||||
# Report successful installation
|
||||
report_success() {
|
||||
local operation="$1"
|
||||
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
|
||||
echo "Reporting successful $operation"
|
||||
post_update_to_api "success" 0
|
||||
}
|
||||
|
||||
# Usage
|
||||
report_success "container installation"
|
||||
report_success "package installation"
|
||||
report_success "service configuration"
|
||||
```
|
||||
|
||||
### Failure Reporting
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
source api.func
|
||||
|
||||
# Report failed installation
|
||||
report_failure() {
|
||||
local operation="$1"
|
||||
local exit_code="$2"
|
||||
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
|
||||
local error_msg=$(get_error_description $exit_code)
|
||||
echo "Reporting failed $operation: $error_msg"
|
||||
post_update_to_api "failed" $exit_code
|
||||
}
|
||||
|
||||
# Usage
|
||||
report_failure "container creation" 200
|
||||
report_failure "package installation" 127
|
||||
report_failure "service start" 1
|
||||
```
|
||||
|
||||
### Conditional Status Reporting
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
source api.func
|
||||
|
||||
# Conditional status reporting
|
||||
report_installation_status() {
|
||||
local operation="$1"
|
||||
local exit_code="$2"
|
||||
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
echo "Reporting successful $operation"
|
||||
post_update_to_api "success" 0
|
||||
else
|
||||
local error_msg=$(get_error_description $exit_code)
|
||||
echo "Reporting failed $operation: $error_msg"
|
||||
post_update_to_api "failed" $exit_code
|
||||
fi
|
||||
}
|
||||
|
||||
# Usage
|
||||
report_installation_status "container creation" 0
|
||||
report_installation_status "package installation" 127
|
||||
```
|
||||
|
||||
## Advanced Usage Examples
|
||||
|
||||
### Batch Installation with API Reporting
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
source api.func
|
||||
|
||||
# Batch installation with comprehensive API reporting
|
||||
batch_install_with_reporting() {
|
||||
local apps=("plex" "nextcloud" "nginx" "mysql")
|
||||
local ctids=(100 101 102 103)
|
||||
|
||||
# Set up API reporting
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
export CT_TYPE=1
|
||||
export DISK_SIZE=8
|
||||
export CORE_COUNT=2
|
||||
export RAM_SIZE=2048
|
||||
export var_os="debian"
|
||||
export var_version="12"
|
||||
export METHOD="install"
|
||||
|
||||
local success_count=0
|
||||
local failure_count=0
|
||||
|
||||
for i in "${!apps[@]}"; do
|
||||
local app="${apps[$i]}"
|
||||
local ctid="${ctids[$i]}"
|
||||
|
||||
echo "Installing $app (ID: $ctid)..."
|
||||
|
||||
# Set app-specific parameters
|
||||
export NSAPP="$app"
|
||||
|
||||
# Report installation start
|
||||
post_to_api
|
||||
|
||||
# Simulate installation
|
||||
if install_app "$app" "$ctid"; then
|
||||
echo "$app installed successfully"
|
||||
post_update_to_api "success" 0
|
||||
((success_count++))
|
||||
else
|
||||
echo "$app installation failed"
|
||||
post_update_to_api "failed" $?
|
||||
((failure_count++))
|
||||
fi
|
||||
|
||||
echo "---"
|
||||
done
|
||||
|
||||
echo "Batch installation completed: $success_count successful, $failure_count failed"
|
||||
}
|
||||
|
||||
# Mock installation function
|
||||
install_app() {
|
||||
local app="$1"
|
||||
local ctid="$2"
|
||||
|
||||
# Simulate installation
|
||||
sleep 1
|
||||
|
||||
# Simulate occasional failures
|
||||
if [[ $((RANDOM % 10)) -eq 0 ]]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
batch_install_with_reporting
|
||||
```
|
||||
|
||||
### Error Analysis and Reporting
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
source api.func
|
||||
|
||||
# Analyze and report errors
|
||||
analyze_and_report_errors() {
|
||||
local log_file="$1"
|
||||
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
|
||||
if [[ ! -f "$log_file" ]]; then
|
||||
echo "Log file not found: $log_file"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Extract error codes from log
|
||||
local error_codes=$(grep -o 'exit code [0-9]\+' "$log_file" | grep -o '[0-9]\+' | sort -u)
|
||||
|
||||
if [[ -z "$error_codes" ]]; then
|
||||
echo "No errors found in log"
|
||||
post_update_to_api "success" 0
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo "Found error codes: $error_codes"
|
||||
|
||||
# Report each unique error
|
||||
for code in $error_codes; do
|
||||
local error_msg=$(get_error_description $code)
|
||||
echo "Error $code: $error_msg"
|
||||
post_update_to_api "failed" $code
|
||||
done
|
||||
}
|
||||
|
||||
# Usage
|
||||
analyze_and_report_errors "/var/log/installation.log"
|
||||
```
|
||||
|
||||
### API Health Check
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
source api.func
|
||||
|
||||
# Check API connectivity and functionality
|
||||
check_api_health() {
|
||||
echo "Checking API health..."
|
||||
|
||||
# Test prerequisites
|
||||
if ! command -v curl >/dev/null 2>&1; then
|
||||
echo "ERROR: curl not available"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Test error description function
|
||||
local test_error=$(get_error_description 127)
|
||||
if [[ -z "$test_error" ]]; then
|
||||
echo "ERROR: Error description function not working"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "Error description test: $test_error"
|
||||
|
||||
# Test API connectivity (without sending data)
|
||||
local api_url="http://api.community-scripts.org/dev/upload"
|
||||
if curl -s --head "$api_url" >/dev/null 2>&1; then
|
||||
echo "API endpoint is reachable"
|
||||
else
|
||||
echo "WARNING: API endpoint not reachable"
|
||||
fi
|
||||
|
||||
echo "API health check completed"
|
||||
}
|
||||
|
||||
check_api_health
|
||||
```
|
||||
|
||||
## Integration Examples
|
||||
|
||||
### With build.func
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
# Integration with build.func
|
||||
|
||||
source core.func
|
||||
source api.func
|
||||
source build.func
|
||||
|
||||
# Set up API reporting
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
|
||||
# Container creation with API reporting
|
||||
create_container_with_reporting() {
|
||||
local app="$1"
|
||||
local ctid="$2"
|
||||
|
||||
# Set container parameters
|
||||
export APP="$app"
|
||||
export CTID="$ctid"
|
||||
export var_hostname="${app}-server"
|
||||
export var_os="debian"
|
||||
export var_version="12"
|
||||
export var_cpu="2"
|
||||
export var_ram="2048"
|
||||
export var_disk="10"
|
||||
export var_net="vmbr0"
|
||||
export var_gateway="192.168.1.1"
|
||||
export var_ip="192.168.1.$ctid"
|
||||
export var_template_storage="local"
|
||||
export var_container_storage="local"
|
||||
|
||||
# Report installation start
|
||||
post_to_api
|
||||
|
||||
# Create container using build.func
|
||||
if source build.func; then
|
||||
echo "Container $app created successfully"
|
||||
post_update_to_api "success" 0
|
||||
return 0
|
||||
else
|
||||
echo "Container $app creation failed"
|
||||
post_update_to_api "failed" $?
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Create containers
|
||||
create_container_with_reporting "plex" "100"
|
||||
create_container_with_reporting "nextcloud" "101"
|
||||
```
|
||||
|
||||
### With vm-core.func
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
# Integration with vm-core.func
|
||||
|
||||
source core.func
|
||||
source api.func
|
||||
source vm-core.func
|
||||
|
||||
# Set up VM API reporting
|
||||
mkdir -p /usr/local/community-scripts
|
||||
echo "DIAGNOSTICS=yes" > /usr/local/community-scripts/diagnostics
|
||||
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
|
||||
# VM creation with API reporting
|
||||
create_vm_with_reporting() {
|
||||
local app="$1"
|
||||
local vmid="$2"
|
||||
|
||||
# Set VM parameters
|
||||
export APP="$app"
|
||||
export VMID="$vmid"
|
||||
export var_hostname="${app}-vm"
|
||||
export var_os="ubuntu"
|
||||
export var_version="22.04"
|
||||
export var_cpu="4"
|
||||
export var_ram="4096"
|
||||
export var_disk="20"
|
||||
|
||||
# Report VM installation start
|
||||
post_to_api_vm
|
||||
|
||||
# Create VM using vm-core.func
|
||||
if source vm-core.func; then
|
||||
echo "VM $app created successfully"
|
||||
post_update_to_api "success" 0
|
||||
return 0
|
||||
else
|
||||
echo "VM $app creation failed"
|
||||
post_update_to_api "failed" $?
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Create VMs
|
||||
create_vm_with_reporting "nextcloud" "200"
|
||||
create_vm_with_reporting "wordpress" "201"
|
||||
```
|
||||
|
||||
### With error_handler.func
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
# Integration with error_handler.func
|
||||
|
||||
source core.func
|
||||
source error_handler.func
|
||||
source api.func
|
||||
|
||||
# Enhanced error handling with API reporting
|
||||
enhanced_error_handler() {
|
||||
local exit_code=${1:-$?}
|
||||
local command=${2:-${BASH_COMMAND:-unknown}}
|
||||
|
||||
# Get error description from api.func
|
||||
local error_msg=$(get_error_description $exit_code)
|
||||
|
||||
# Display error information
|
||||
echo "Error $exit_code: $error_msg"
|
||||
echo "Command: $command"
|
||||
|
||||
# Report error to API
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
post_update_to_api "failed" $exit_code
|
||||
|
||||
# Use standard error handler
|
||||
error_handler $exit_code $command
|
||||
}
|
||||
|
||||
# Set up enhanced error handling
|
||||
trap 'enhanced_error_handler' ERR
|
||||
|
||||
# Test enhanced error handling
|
||||
nonexistent_command
|
||||
```
|
||||
|
||||
## Best Practices Examples
|
||||
|
||||
### Comprehensive API Integration
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
# Comprehensive API integration example
|
||||
|
||||
source core.func
|
||||
source api.func
|
||||
|
||||
# Set up comprehensive API reporting
|
||||
setup_api_reporting() {
|
||||
# Enable diagnostics
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
|
||||
# Set common parameters
|
||||
export CT_TYPE=1
|
||||
export DISK_SIZE=8
|
||||
export CORE_COUNT=2
|
||||
export RAM_SIZE=2048
|
||||
export var_os="debian"
|
||||
export var_version="12"
|
||||
export METHOD="install"
|
||||
|
||||
echo "API reporting configured"
|
||||
}
|
||||
|
||||
# Installation with comprehensive reporting
|
||||
install_with_comprehensive_reporting() {
|
||||
local app="$1"
|
||||
local ctid="$2"
|
||||
|
||||
# Set up API reporting
|
||||
setup_api_reporting
|
||||
export NSAPP="$app"
|
||||
|
||||
# Report installation start
|
||||
post_to_api
|
||||
|
||||
# Installation process
|
||||
echo "Installing $app..."
|
||||
|
||||
# Simulate installation steps
|
||||
local steps=("Downloading" "Installing" "Configuring" "Starting")
|
||||
for step in "${steps[@]}"; do
|
||||
echo "$step $app..."
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# Check installation result
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "$app installation completed successfully"
|
||||
post_update_to_api "success" 0
|
||||
return 0
|
||||
else
|
||||
echo "$app installation failed"
|
||||
post_update_to_api "failed" $?
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Install multiple applications
|
||||
apps=("plex" "nextcloud" "nginx" "mysql")
|
||||
ctids=(100 101 102 103)
|
||||
|
||||
for i in "${!apps[@]}"; do
|
||||
install_with_comprehensive_reporting "${apps[$i]}" "${ctids[$i]}"
|
||||
echo "---"
|
||||
done
|
||||
```
|
||||
|
||||
### Error Recovery with API Reporting
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
source api.func
|
||||
|
||||
# Error recovery with API reporting
|
||||
retry_with_api_reporting() {
|
||||
local operation="$1"
|
||||
local max_attempts=3
|
||||
local attempt=1
|
||||
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
|
||||
while [[ $attempt -le $max_attempts ]]; do
|
||||
echo "Attempt $attempt of $max_attempts: $operation"
|
||||
|
||||
if $operation; then
|
||||
echo "Operation succeeded on attempt $attempt"
|
||||
post_update_to_api "success" 0
|
||||
return 0
|
||||
else
|
||||
local exit_code=$?
|
||||
local error_msg=$(get_error_description $exit_code)
|
||||
echo "Attempt $attempt failed: $error_msg"
|
||||
|
||||
post_update_to_api "failed" $exit_code
|
||||
|
||||
((attempt++))
|
||||
|
||||
if [[ $attempt -le $max_attempts ]]; then
|
||||
echo "Retrying in 5 seconds..."
|
||||
sleep 5
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Operation failed after $max_attempts attempts"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Usage
|
||||
retry_with_api_reporting "apt-get update"
|
||||
retry_with_api_reporting "apt-get install -y package"
|
||||
```
|
||||
|
||||
### API Reporting with Logging
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
source api.func
|
||||
|
||||
# API reporting with detailed logging
|
||||
install_with_logging_and_api() {
|
||||
local app="$1"
|
||||
local log_file="/var/log/${app}_installation.log"
|
||||
|
||||
# Set up API reporting
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
export NSAPP="$app"
|
||||
|
||||
# Start logging
|
||||
exec > >(tee -a "$log_file")
|
||||
exec 2>&1
|
||||
|
||||
echo "Starting $app installation at $(date)"
|
||||
|
||||
# Report installation start
|
||||
post_to_api
|
||||
|
||||
# Installation process
|
||||
echo "Installing $app..."
|
||||
|
||||
# Simulate installation
|
||||
if install_app "$app"; then
|
||||
echo "$app installation completed successfully at $(date)"
|
||||
post_update_to_api "success" 0
|
||||
return 0
|
||||
else
|
||||
local exit_code=$?
|
||||
local error_msg=$(get_error_description $exit_code)
|
||||
echo "$app installation failed at $(date): $error_msg"
|
||||
post_update_to_api "failed" $exit_code
|
||||
return $exit_code
|
||||
fi
|
||||
}
|
||||
|
||||
# Mock installation function
|
||||
install_app() {
|
||||
local app="$1"
|
||||
echo "Installing $app..."
|
||||
sleep 2
|
||||
return 0
|
||||
}
|
||||
|
||||
# Install with logging and API reporting
|
||||
install_with_logging_and_api "plex"
|
||||
```
|
||||
@ -2,22 +2,27 @@
|
||||
|
||||
## Overview
|
||||
|
||||
The `api.func` file provides Proxmox API integration and diagnostic reporting functionality for the Community Scripts project. It handles API communication, error reporting, and status updates to the community-scripts.org API.
|
||||
The `api.func` file provides PocketBase API integration and diagnostic reporting for the Community Scripts project. It handles telemetry communication, error reporting, and status updates to the PocketBase backend at `db.community-scripts.org`.
|
||||
|
||||
## Purpose and Use Cases
|
||||
|
||||
- **API Communication**: Send installation and status data to community-scripts.org API
|
||||
- **API Communication**: Send installation and status data to PocketBase
|
||||
- **Diagnostic Reporting**: Report installation progress and errors for analytics
|
||||
- **Error Description**: Provide detailed error code explanations
|
||||
- **Error Description**: Provide detailed error code explanations (canonical source of truth)
|
||||
- **Status Updates**: Track installation success/failure status
|
||||
- **Analytics**: Contribute anonymous usage data for project improvement
|
||||
|
||||
## Quick Reference
|
||||
|
||||
### Key Function Groups
|
||||
- **Error Handling**: `get_error_description()` - Convert exit codes to human-readable messages
|
||||
- **API Communication**: `post_to_api()`, `post_to_api_vm()` - Send installation data
|
||||
- **Status Updates**: `post_update_to_api()` - Report installation completion status
|
||||
- **Error Handling**: `explain_exit_code()` - Convert exit codes to human-readable messages
|
||||
- **API Communication**: `post_to_api()`, `post_to_api_vm()` - Send installation data to PocketBase
|
||||
- **Status Updates**: `post_update_to_api()` - Report installation completion status via PATCH
|
||||
|
||||
### PocketBase Configuration
|
||||
- **URL**: `http://db.community-scripts.org`
|
||||
- **Collection**: `_dev_telemetry_data`
|
||||
- **API Endpoint**: `/api/collections/_dev_telemetry_data/records`
|
||||
|
||||
### Dependencies
|
||||
- **External**: `curl` command for HTTP requests
|
||||
@ -26,7 +31,7 @@ The `api.func` file provides Proxmox API integration and diagnostic reporting fu
|
||||
### Integration Points
|
||||
- Used by: All installation scripts for diagnostic reporting
|
||||
- Uses: Environment variables from build.func and other scripts
|
||||
- Provides: API communication and error reporting services
|
||||
- Provides: API communication, error reporting, and exit code descriptions
|
||||
|
||||
## Documentation Files
|
||||
|
||||
@ -44,17 +49,18 @@ How api.func integrates with other components and provides API services.
|
||||
|
||||
## Key Features
|
||||
|
||||
### Error Code Descriptions
|
||||
- **Comprehensive Coverage**: 50+ error codes with detailed explanations
|
||||
- **LXC-Specific Errors**: Container creation and management errors
|
||||
- **System Errors**: General system and network errors
|
||||
### Exit Code Descriptions
|
||||
- **Canonical source**: Single authoritative `explain_exit_code()` for the entire project
|
||||
- **Non-overlapping ranges**: Clean separation between error categories
|
||||
- **Comprehensive Coverage**: 60+ error codes with detailed explanations
|
||||
- **System Errors**: General system, curl, and network errors
|
||||
- **Signal Errors**: Process termination and signal errors
|
||||
|
||||
### API Communication
|
||||
- **LXC Reporting**: Send LXC container installation data
|
||||
- **VM Reporting**: Send VM installation data
|
||||
- **Status Updates**: Report installation success/failure
|
||||
- **Diagnostic Data**: Anonymous usage analytics
|
||||
### PocketBase Integration
|
||||
- **Record Creation**: POST to create telemetry records with status `installing`
|
||||
- **Record Updates**: PATCH to update with final status, exit code, and error
|
||||
- **ID Tracking**: Stores `PB_RECORD_ID` for efficient updates
|
||||
- **Fallback Lookup**: Searches by `random_id` filter if record ID is lost
|
||||
|
||||
### Diagnostic Integration
|
||||
- **Optional Reporting**: Only sends data when diagnostics enabled
|
||||
@ -67,15 +73,13 @@ How api.func integrates with other components and provides API services.
|
||||
### Basic API Setup
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
# Basic API setup
|
||||
|
||||
source api.func
|
||||
|
||||
# Set up diagnostic reporting
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
export RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)"
|
||||
|
||||
# Report installation start
|
||||
# Report installation start (creates PocketBase record)
|
||||
post_to_api
|
||||
```
|
||||
|
||||
@ -85,9 +89,9 @@ post_to_api
|
||||
source api.func
|
||||
|
||||
# Get error description
|
||||
error_msg=$(get_error_description 127)
|
||||
echo "Error 127: $error_msg"
|
||||
# Output: Error 127: Command not found: Incorrect path or missing dependency.
|
||||
error_msg=$(explain_exit_code 137)
|
||||
echo "Error 137: $error_msg"
|
||||
# Output: Error 137: Killed (SIGKILL / Out of memory?)
|
||||
```
|
||||
|
||||
### Status Updates
|
||||
@ -96,9 +100,9 @@ echo "Error 127: $error_msg"
|
||||
source api.func
|
||||
|
||||
# Report successful installation
|
||||
post_update_to_api "success" 0
|
||||
post_update_to_api "done" 0
|
||||
|
||||
# Report failed installation
|
||||
# Report failed installation with exit code
|
||||
post_update_to_api "failed" 127
|
||||
```
|
||||
|
||||
@ -106,7 +110,7 @@ post_update_to_api "failed" 127
|
||||
|
||||
### Required Variables
|
||||
- `DIAGNOSTICS`: Enable/disable diagnostic reporting ("yes"/"no")
|
||||
- `RANDOM_UUID`: Unique identifier for tracking
|
||||
- `RANDOM_UUID`: Unique identifier for session tracking
|
||||
|
||||
### Optional Variables
|
||||
- `CT_TYPE`: Container type (1 for LXC, 2 for VM)
|
||||
@ -115,33 +119,31 @@ post_update_to_api "failed" 127
|
||||
- `RAM_SIZE`: RAM size in MB
|
||||
- `var_os`: Operating system type
|
||||
- `var_version`: OS version
|
||||
- `DISABLEIP6`: IPv6 disable setting
|
||||
- `NSAPP`: Namespace application name
|
||||
- `NSAPP`: Application name
|
||||
- `METHOD`: Installation method
|
||||
|
||||
### Internal Variables
|
||||
- `POST_UPDATE_DONE`: Prevents duplicate status updates
|
||||
- `API_URL`: Community scripts API endpoint
|
||||
- `JSON_PAYLOAD`: API request payload
|
||||
- `RESPONSE`: API response
|
||||
- `PB_URL`: PocketBase base URL
|
||||
- `PB_API_URL`: Full API endpoint URL
|
||||
- `PB_RECORD_ID`: Stored PocketBase record ID for updates
|
||||
|
||||
## Error Code Categories
|
||||
## Error Code Categories (Non-Overlapping Ranges)
|
||||
|
||||
### General System Errors
|
||||
- **0-9**: Basic system errors
|
||||
- **18, 22, 28, 35**: Network and I/O errors
|
||||
- **56, 60**: TLS/SSL errors
|
||||
- **125-128**: Command execution errors
|
||||
- **129-143**: Signal errors
|
||||
- **152**: Resource limit errors
|
||||
- **255**: Unknown critical errors
|
||||
|
||||
### LXC-Specific Errors
|
||||
- **100-101**: LXC installation errors
|
||||
- **200-209**: LXC creation and management errors
|
||||
|
||||
### Docker Errors
|
||||
- **125**: Docker container start errors
|
||||
| Range | Category |
|
||||
|-------|----------|
|
||||
| 1-2 | Generic shell errors |
|
||||
| 6-35 | curl/wget network errors |
|
||||
| 100-102 | APT/DPKG package errors |
|
||||
| 124-143 | Command execution & signal errors |
|
||||
| 150-154 | Systemd/service errors |
|
||||
| 160-162 | Python/pip/uv errors |
|
||||
| 170-173 | PostgreSQL errors |
|
||||
| 180-183 | MySQL/MariaDB errors |
|
||||
| 190-193 | MongoDB errors |
|
||||
| 200-231 | Proxmox custom codes |
|
||||
| 243-249 | Node.js/npm errors |
|
||||
| 255 | DPKG fatal error |
|
||||
|
||||
## Best Practices
|
||||
|
||||
@ -152,48 +154,56 @@ post_update_to_api "failed" 127
|
||||
4. Report both success and failure cases
|
||||
|
||||
### Error Handling
|
||||
1. Use appropriate error codes
|
||||
2. Provide meaningful error descriptions
|
||||
1. Use the correct non-overlapping exit code ranges
|
||||
2. Use `explain_exit_code()` from api.func (canonical source)
|
||||
3. Handle API communication failures gracefully
|
||||
4. Don't block installation on API failures
|
||||
|
||||
### API Usage
|
||||
1. Check for curl availability
|
||||
2. Handle network failures gracefully
|
||||
3. Use appropriate HTTP methods
|
||||
4. Include all required data
|
||||
1. Check for curl availability before API calls
|
||||
2. Handle network failures gracefully (all calls use `|| true`)
|
||||
3. Store and reuse PB_RECORD_ID for updates
|
||||
4. Use proper PocketBase REST methods (POST for create, PATCH for update)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
1. **API Communication Fails**: Check network connectivity and curl availability
|
||||
2. **Diagnostics Not Working**: Verify DIAGNOSTICS setting and RANDOM_UUID
|
||||
3. **Missing Error Descriptions**: Check error code coverage
|
||||
4. **Duplicate Updates**: POST_UPDATE_DONE prevents duplicates
|
||||
2. **Diagnostics Not Working**: Verify `DIAGNOSTICS=yes` in `/usr/local/community-scripts/diagnostics`
|
||||
3. **Status Update Fails**: Check that `PB_RECORD_ID` was captured or `random_id` filter works
|
||||
4. **Duplicate Updates**: `POST_UPDATE_DONE` flag prevents duplicates
|
||||
|
||||
### Debug Mode
|
||||
Enable diagnostic reporting for debugging:
|
||||
```bash
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="$(uuidgen)"
|
||||
export RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)"
|
||||
```
|
||||
|
||||
### API Testing
|
||||
Test API communication:
|
||||
Test PocketBase connectivity:
|
||||
```bash
|
||||
curl -s http://db.community-scripts.org/api/health
|
||||
```
|
||||
|
||||
Test record creation:
|
||||
```bash
|
||||
source api.func
|
||||
export DIAGNOSTICS="yes"
|
||||
export RANDOM_UUID="test-$(date +%s)"
|
||||
export NSAPP="test"
|
||||
export CT_TYPE=1
|
||||
post_to_api
|
||||
echo "Record ID: $PB_RECORD_ID"
|
||||
```
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [core.func](../core.func/) - Core utilities and error handling
|
||||
- [error_handler.func](../error_handler.func/) - Error handling utilities
|
||||
- [core.func](../core.func/) - Core utilities
|
||||
- [error_handler.func](../error_handler.func/) - Error handling (fallback `explain_exit_code`)
|
||||
- [build.func](../build.func/) - Container creation with API integration
|
||||
- [tools.func](../tools.func/) - Extended utilities with API integration
|
||||
- [tools.func](../tools.func/) - Extended utilities
|
||||
|
||||
---
|
||||
|
||||
*This documentation covers the api.func file which provides API communication and diagnostic reporting for all Proxmox Community Scripts.*
|
||||
*This documentation covers the api.func file which provides PocketBase communication and diagnostic reporting for all Proxmox Community Scripts.*
|
||||
|
||||
35
frontend/public/json/drawio.json
Normal file
35
frontend/public/json/drawio.json
Normal file
@ -0,0 +1,35 @@
|
||||
{
|
||||
"name": "Draw.IO",
|
||||
"slug": "drawio",
|
||||
"categories": [
|
||||
12
|
||||
],
|
||||
"date_created": "2026-01-29",
|
||||
"type": "ct",
|
||||
"updateable": true,
|
||||
"privileged": false,
|
||||
"interface_port": 8080,
|
||||
"documentation": "https://www.drawio.com/doc/",
|
||||
"website": "https://www.drawio.com/",
|
||||
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/draw-io.webp",
|
||||
"config_path": "",
|
||||
"description": "draw.io is a configurable diagramming and whiteboarding application, jointly owned and developed by draw.io Ltd (previously named JGraph) and draw.io AG.",
|
||||
"install_methods": [
|
||||
{
|
||||
"type": "default",
|
||||
"script": "ct/drawio.sh",
|
||||
"resources": {
|
||||
"cpu": 1,
|
||||
"ram": 2048,
|
||||
"hdd": 4,
|
||||
"os": "Debian",
|
||||
"version": "13"
|
||||
}
|
||||
}
|
||||
],
|
||||
"default_credentials": {
|
||||
"username": null,
|
||||
"password": null
|
||||
},
|
||||
"notes": []
|
||||
}
|
||||
@ -11,7 +11,7 @@
|
||||
"interface_port": null,
|
||||
"documentation": "https://github.com/john30/ebusd/wiki",
|
||||
"website": "https://github.com/john30/ebusd",
|
||||
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/proxmox-helper-scripts.webp",
|
||||
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/ebusd.webp",
|
||||
"config_path": "/etc/default/ebusd",
|
||||
"description": "ebusd is a daemon for handling communication with eBUS devices connected to a 2-wire `energy bus` used by numerous heating systems.",
|
||||
"install_methods": [
|
||||
|
||||
@ -1,44 +0,0 @@
|
||||
{
|
||||
"name": "FreePBX",
|
||||
"slug": "freepbx",
|
||||
"categories": [
|
||||
0
|
||||
],
|
||||
"date_created": "2025-01-15",
|
||||
"type": "ct",
|
||||
"updateable": true,
|
||||
"privileged": false,
|
||||
"interface_port": 80,
|
||||
"documentation": "https://wiki.freepbx.org/",
|
||||
"website": "https://www.freepbx.org/",
|
||||
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/freepbx.webp",
|
||||
"config_path": "/etc/freepbx.conf",
|
||||
"description": "FreePBX is a web-based open-source graphical user interface that manages Asterisk, a voice over IP and telephony server. FreePBX provides a complete PBX solution with call routing, voicemail, IVR, and more.",
|
||||
"install_methods": [
|
||||
{
|
||||
"type": "default",
|
||||
"script": "ct/freepbx.sh",
|
||||
"resources": {
|
||||
"cpu": 2,
|
||||
"ram": 2048,
|
||||
"hdd": 10,
|
||||
"os": "debian",
|
||||
"version": "12"
|
||||
}
|
||||
}
|
||||
],
|
||||
"default_credentials": {
|
||||
"username": null,
|
||||
"password": null
|
||||
},
|
||||
"notes": [
|
||||
{
|
||||
"text": "Access the web interface to complete initial setup",
|
||||
"type": "info"
|
||||
},
|
||||
{
|
||||
"text": "SIP Port: 5060, IAX2 Port: 4569",
|
||||
"type": "info"
|
||||
}
|
||||
]
|
||||
}
|
||||
40
frontend/public/json/linkding.json
Normal file
40
frontend/public/json/linkding.json
Normal file
@ -0,0 +1,40 @@
|
||||
{
|
||||
"name": "linkding",
|
||||
"slug": "linkding",
|
||||
"categories": [
|
||||
12
|
||||
],
|
||||
"date_created": "2026-02-09",
|
||||
"type": "ct",
|
||||
"updateable": true,
|
||||
"privileged": false,
|
||||
"interface_port": 9090,
|
||||
"documentation": "https://linkding.link/",
|
||||
"website": "https://linkding.link/",
|
||||
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/linkding.webp",
|
||||
"config_path": "/opt/linkding/.env",
|
||||
"description": "linkding is a self-hosted bookmark manager that is designed to be minimal, fast, and easy to set up. It features a clean UI, tag-based organization, bulk editing, Markdown notes, read it later functionality, sharing, REST API, and browser extensions for Firefox and Chrome.",
|
||||
"install_methods": [
|
||||
{
|
||||
"type": "default",
|
||||
"script": "ct/linkding.sh",
|
||||
"resources": {
|
||||
"cpu": 2,
|
||||
"ram": 1024,
|
||||
"hdd": 4,
|
||||
"os": "Debian",
|
||||
"version": "13"
|
||||
}
|
||||
}
|
||||
],
|
||||
"default_credentials": {
|
||||
"username": "admin",
|
||||
"password": null
|
||||
},
|
||||
"notes": [
|
||||
{
|
||||
"text": "Admin credentials are stored in /opt/linkding/.env",
|
||||
"type": "info"
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -1,49 +0,0 @@
|
||||
{
|
||||
"name": "OpenClaw",
|
||||
"slug": "openclaw",
|
||||
"categories": [
|
||||
22,
|
||||
20
|
||||
],
|
||||
"date_created": "2026-02-03",
|
||||
"type": "ct",
|
||||
"updateable": true,
|
||||
"privileged": false,
|
||||
"interface_port": 18789,
|
||||
"documentation": "https://docs.openclaw.ai",
|
||||
"website": "https://openclaw.ai",
|
||||
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/openclaw.webp",
|
||||
"config_path": "/root/.openclaw/openclaw.json",
|
||||
"description": "OpenClaw is a personal AI assistant that runs locally and integrates with messaging platforms like WhatsApp, Telegram, Discord, and Slack.",
|
||||
"install_methods": [
|
||||
{
|
||||
"type": "default",
|
||||
"script": "ct/openclaw.sh",
|
||||
"resources": {
|
||||
"cpu": 2,
|
||||
"ram": 2048,
|
||||
"hdd": 4,
|
||||
"os": "debian",
|
||||
"version": "13"
|
||||
}
|
||||
}
|
||||
],
|
||||
"default_credentials": {
|
||||
"username": null,
|
||||
"password": null
|
||||
},
|
||||
"notes": [
|
||||
{
|
||||
"text": "After installation, run 'openclaw onboard' inside the container to configure AI providers.",
|
||||
"type": "info"
|
||||
},
|
||||
{
|
||||
"text": "After onboarding, start the gateway with 'systemctl start openclaw'.",
|
||||
"type": "info"
|
||||
},
|
||||
{
|
||||
"text": "The Control UI requires a secure context (HTTPS or localhost). Either use a reverse proxy with HTTPS, or connect via SSH port forwarding: ssh -L 18789:localhost:18789 root@<container-ip>, then open http://localhost:18789 in your browser.",
|
||||
"type": "warning"
|
||||
}
|
||||
]
|
||||
}
|
||||
40
frontend/public/json/powerdns.json
Normal file
40
frontend/public/json/powerdns.json
Normal file
@ -0,0 +1,40 @@
|
||||
{
|
||||
"name": "PowerDNS",
|
||||
"slug": "powerdns",
|
||||
"categories": [
|
||||
5
|
||||
],
|
||||
"date_created": "2026-02-11",
|
||||
"type": "ct",
|
||||
"updateable": true,
|
||||
"privileged": false,
|
||||
"interface_port": 80,
|
||||
"documentation": "https://doc.powerdns.com/index.html",
|
||||
"config_path": "/opt/poweradmin/config/settings.php",
|
||||
"website": "https://www.powerdns.com/",
|
||||
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/powerdns.webp",
|
||||
"description": "The PowerDNS Authoritative Server is a versatile nameserver which supports a large number of backends. These backends can either be plain zone files or be more dynamic in nature. PowerDNS has the concepts of ‘backends’. A backend is a datastore that the server will consult that contains DNS records (and some metadata). The backends range from database backends (MySQL, PostgreSQL) and BIND zone files to co-processes and JSON API’s.",
|
||||
"install_methods": [
|
||||
{
|
||||
"type": "default",
|
||||
"script": "ct/powerdns.sh",
|
||||
"resources": {
|
||||
"cpu": 1,
|
||||
"ram": 1024,
|
||||
"hdd": 4,
|
||||
"os": "Debian",
|
||||
"version": "13"
|
||||
}
|
||||
}
|
||||
],
|
||||
"default_credentials": {
|
||||
"username": null,
|
||||
"password": null
|
||||
},
|
||||
"notes": [
|
||||
{
|
||||
"text": "For administrator credentials type: `cat ~/poweradmin.creds` inside LXC.",
|
||||
"type": "info"
|
||||
}
|
||||
]
|
||||
}
|
||||
44
frontend/public/json/skylite-ux.json
Normal file
44
frontend/public/json/skylite-ux.json
Normal file
@ -0,0 +1,44 @@
|
||||
{
|
||||
"name": "Skylite-UX",
|
||||
"slug": "skylite-ux",
|
||||
"categories": [
|
||||
19
|
||||
],
|
||||
"date_created": "2026-02-06",
|
||||
"type": "ct",
|
||||
"updateable": true,
|
||||
"privileged": false,
|
||||
"interface_port": 3000,
|
||||
"documentation": "https://github.com/Wetzel402/Skylite-UX",
|
||||
"website": "https://github.com/Wetzel402/Skylite-UX",
|
||||
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/skylite-ux.webp",
|
||||
"config_path": "/opt/skylite-ux/.env",
|
||||
"description": "Skylite-UX is an open-source, self-hosted family management app with calendar, todos, shopping lists, and user management.",
|
||||
"install_methods": [
|
||||
{
|
||||
"type": "default",
|
||||
"script": "ct/skylite-ux.sh",
|
||||
"resources": {
|
||||
"cpu": 2,
|
||||
"ram": 4096,
|
||||
"hdd": 8,
|
||||
"os": "Debian",
|
||||
"version": "13"
|
||||
}
|
||||
}
|
||||
],
|
||||
"default_credentials": {
|
||||
"username": null,
|
||||
"password": null
|
||||
},
|
||||
"notes": [
|
||||
{
|
||||
"text": "Database credentials: `cat ~/skylite-ux.creds`",
|
||||
"type": "info"
|
||||
},
|
||||
{
|
||||
"text": "Build process requires ~4GB RAM. Runtime usage is much lower — RAM can be reduced after installation.",
|
||||
"type": "info"
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -1,40 +0,0 @@
|
||||
{
|
||||
"name": "wger",
|
||||
"slug": "wger",
|
||||
"categories": [
|
||||
24
|
||||
],
|
||||
"date_created": "2025-02-24",
|
||||
"type": "ct",
|
||||
"updateable": true,
|
||||
"privileged": false,
|
||||
"interface_port": 3000,
|
||||
"documentation": "https://wger.readthedocs.io/en/latest/index.html",
|
||||
"website": "https://wger.de",
|
||||
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/wger.webp",
|
||||
"config_path": "/opt/wger/wger.env",
|
||||
"description": "wger (ˈvɛɡɐ) Workout Manager is a free, open source web application that helps you manage your personal workouts, weight and diet plans and can also be used as a simple gym management utility. It offers a REST API as well, for easy integration with other projects and tools.",
|
||||
"install_methods": [
|
||||
{
|
||||
"type": "default",
|
||||
"script": "ct/wger.sh",
|
||||
"resources": {
|
||||
"cpu": 2,
|
||||
"ram": 2048,
|
||||
"hdd": 8,
|
||||
"os": "debian",
|
||||
"version": "13"
|
||||
}
|
||||
}
|
||||
],
|
||||
"default_credentials": {
|
||||
"username": "admin",
|
||||
"password": "adminadmin"
|
||||
},
|
||||
"notes": [
|
||||
{
|
||||
"text": "This LXC also runs Celery and Redis to synchronize workouts and ingredients",
|
||||
"type": "info"
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -1,34 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: Slaviša Arežina (tremor021)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
|
||||
# Source: https://www.powerdns.com/
|
||||
|
||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||
color
|
||||
verb_ip6
|
||||
catch_errors
|
||||
setting_up_container
|
||||
network_check
|
||||
update_os
|
||||
|
||||
msg_info "Installing PowerDNS"
|
||||
$STD apk add --no-cache pdns pdns-backend-sqlite3 pdns-doc
|
||||
msg_ok "Installed PowerDNS"
|
||||
|
||||
msg_info "Configuring PowerDNS"
|
||||
sed -i '/^# launch=$/c\launch=gsqlite3\ngsqlite3-database=/var/lib/powerdns/pdns.sqlite3' /etc/pdns/pdns.conf
|
||||
mkdir /var/lib/powerdns
|
||||
sqlite3 /var/lib/powerdns/pdns.sqlite3 < /usr/share/doc/pdns/schema.sqlite3.sql
|
||||
chown -R pdns:pdns /var/lib/powerdns
|
||||
msg_ok "Configured PowerDNS"
|
||||
|
||||
msg_info "Creating Service"
|
||||
$STD rc-update add pdns default
|
||||
$STD rc-service pdns start
|
||||
msg_ok "Created Service"
|
||||
|
||||
motd_ssh
|
||||
customize
|
||||
cleanup_lxc
|
||||
@ -1,107 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: thost96 (thost96)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://www.authelia.com/
|
||||
|
||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||
color
|
||||
verb_ip6
|
||||
catch_errors
|
||||
setting_up_container
|
||||
network_check
|
||||
update_os
|
||||
|
||||
fetch_and_deploy_gh_release "authelia" "authelia/authelia" "binary"
|
||||
|
||||
MAX_ATTEMPTS=3
|
||||
attempt=0
|
||||
while true; do
|
||||
attempt=$((attempt + 1))
|
||||
read -rp "${TAB3}Enter your domain or IP (ex. example.com or 192.168.1.100): " DOMAIN
|
||||
if [[ -z "$DOMAIN" ]]; then
|
||||
if ((attempt >= MAX_ATTEMPTS)); then
|
||||
DOMAIN="${LOCAL_IP:-localhost}"
|
||||
msg_warn "Using fallback: $DOMAIN"
|
||||
break
|
||||
fi
|
||||
msg_warn "Domain cannot be empty! (Attempt $attempt/$MAX_ATTEMPTS)"
|
||||
elif [[ "$DOMAIN" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
|
||||
valid_ip=true
|
||||
IFS='.' read -ra octets <<< "$DOMAIN"
|
||||
for octet in "${octets[@]}"; do
|
||||
if ((octet > 255)); then
|
||||
valid_ip=false
|
||||
break
|
||||
fi
|
||||
done
|
||||
if $valid_ip; then
|
||||
break
|
||||
else
|
||||
msg_warn "Invalid IP address!"
|
||||
fi
|
||||
elif [[ "$DOMAIN" =~ ^[a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(\.[a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*\.[a-zA-Z]{2,}$ ]]; then
|
||||
break
|
||||
else
|
||||
msg_warn "Invalid domain format!"
|
||||
fi
|
||||
done
|
||||
msg_info "Setting Authelia up"
|
||||
touch /etc/authelia/emails.txt
|
||||
JWT_SECRET=$(openssl rand -hex 64)
|
||||
SESSION_SECRET=$(openssl rand -hex 64)
|
||||
STORAGE_KEY=$(openssl rand -hex 64)
|
||||
|
||||
if [[ "$DOMAIN" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
|
||||
AUTHELIA_URL="https://${DOMAIN}:9091"
|
||||
else
|
||||
AUTHELIA_URL="https://auth.${DOMAIN}"
|
||||
fi
|
||||
echo "$AUTHELIA_URL" > /etc/authelia/.authelia_url
|
||||
|
||||
cat <<EOF >/etc/authelia/users.yml
|
||||
users:
|
||||
authelia:
|
||||
disabled: false
|
||||
displayname: "Authelia Admin"
|
||||
password: "\$argon2id\$v=19\$m=65536,t=3,p=4\$ZBopMzXrzhHXPEZxRDVT2w\$SxWm96DwhOsZyn34DLocwQEIb4kCDsk632PuiMdZnig"
|
||||
groups: []
|
||||
EOF
|
||||
cat <<EOF >/etc/authelia/configuration.yml
|
||||
authentication_backend:
|
||||
file:
|
||||
path: /etc/authelia/users.yml
|
||||
access_control:
|
||||
default_policy: one_factor
|
||||
session:
|
||||
secret: "${SESSION_SECRET}"
|
||||
name: 'authelia_session'
|
||||
same_site: 'lax'
|
||||
inactivity: '5m'
|
||||
expiration: '1h'
|
||||
remember_me: '1M'
|
||||
cookies:
|
||||
- domain: "${DOMAIN}"
|
||||
authelia_url: "${AUTHELIA_URL}"
|
||||
storage:
|
||||
encryption_key: "${STORAGE_KEY}"
|
||||
local:
|
||||
path: /etc/authelia/db.sqlite
|
||||
identity_validation:
|
||||
reset_password:
|
||||
jwt_secret: "${JWT_SECRET}"
|
||||
jwt_lifespan: '5 minutes'
|
||||
jwt_algorithm: 'HS256'
|
||||
notifier:
|
||||
filesystem:
|
||||
filename: /etc/authelia/emails.txt
|
||||
EOF
|
||||
touch /etc/authelia/emails.txt
|
||||
chown -R authelia:authelia /etc/authelia
|
||||
systemctl enable -q --now authelia
|
||||
msg_ok "Authelia Setup completed"
|
||||
|
||||
motd_ssh
|
||||
customize
|
||||
cleanup_lxc
|
||||
25
install/drawio-install.sh
Normal file
25
install/drawio-install.sh
Normal file
@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: Slaviša Arežina (tremor021)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
|
||||
# Source: https://www.drawio.com/
|
||||
|
||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||
color
|
||||
verb_ip6
|
||||
catch_errors
|
||||
setting_up_container
|
||||
network_check
|
||||
update_os
|
||||
setup_hwaccel
|
||||
|
||||
msg_info "Installing Dependencies"
|
||||
$STD apt install -y tomcat11
|
||||
msg_ok "Installed Dependencies"
|
||||
|
||||
USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "drawio" "jgraph/drawio" "singlefile" "latest" "/var/lib/tomcat11/webapps" "draw.war"
|
||||
|
||||
motd_ssh
|
||||
customize
|
||||
cleanup_lxc
|
||||
@ -13,16 +13,9 @@ setting_up_container
|
||||
network_check
|
||||
update_os
|
||||
|
||||
setup_deb822_repo \
|
||||
"ebusd" \
|
||||
"https://raw.githubusercontent.com/john30/ebusd-debian/master/ebusd.gpg" \
|
||||
"https://repo.ebusd.eu/apt/default/bookworm/" \
|
||||
"bookworm" \
|
||||
"main"
|
||||
|
||||
msg_info "Installing ebusd"
|
||||
$STD apt install -y ebusd
|
||||
systemctl enable -q ebusd
|
||||
fetch_and_deploy_gh_release "ebusd" "john30/ebusd" "binary" "latest" "" "ebusd-*_amd64-trixie_mqtt1.deb"
|
||||
systemctl enable -q ebusd.service
|
||||
msg_ok "Installed ebusd"
|
||||
|
||||
motd_ssh
|
||||
|
||||
@ -1,45 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: kkroboth
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://fileflows.com/
|
||||
|
||||
# Import Functions und Setup
|
||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||
color
|
||||
verb_ip6
|
||||
catch_errors
|
||||
setting_up_container
|
||||
network_check
|
||||
update_os
|
||||
|
||||
msg_info "Installing Dependencies"
|
||||
$STD apt install -y \
|
||||
ffmpeg \
|
||||
imagemagick
|
||||
msg_ok "Installed Dependencies"
|
||||
|
||||
setup_hwaccel
|
||||
setup_deb822_repo \
|
||||
"microsoft" \
|
||||
"https://packages.microsoft.com/keys/microsoft-2025.asc" \
|
||||
"https://packages.microsoft.com/debian/13/prod/" \
|
||||
"trixie"
|
||||
fetch_and_deploy_archive "https://fileflows.com/downloads/zip" "/opt/fileflows"
|
||||
|
||||
msg_info "Installing ASP.NET Core Runtime"
|
||||
$STD apt install -y aspnetcore-runtime-8.0
|
||||
msg_ok "Installed ASP.NET Core Runtime"
|
||||
|
||||
msg_info "Setting up FileFlows"
|
||||
$STD ln -svf /usr/bin/ffmpeg /usr/local/bin/ffmpeg
|
||||
$STD ln -svf /usr/bin/ffprobe /usr/local/bin/ffprobe
|
||||
cd /opt/fileflows/Server
|
||||
$STD dotnet FileFlows.Server.dll --systemd install --root true
|
||||
systemctl enable -q --now fileflows
|
||||
msg_ok "Setup FileFlows"
|
||||
|
||||
motd_ssh
|
||||
customize
|
||||
cleanup_lxc
|
||||
@ -1,111 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: Arian Nasr (arian-nasr)
|
||||
# Updated by: Javier Pastor (vsc55)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://www.freepbx.org/
|
||||
|
||||
INSTALL_URL="https://github.com/FreePBX/sng_freepbx_debian_install/raw/master/sng_freepbx_debian_install.sh"
|
||||
INSTALL_PATH="/opt/sng_freepbx_debian_install.sh"
|
||||
|
||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||
color
|
||||
verb_ip6
|
||||
catch_errors
|
||||
setting_up_container
|
||||
network_check
|
||||
update_os
|
||||
|
||||
ONLY_OPENSOURCE="${ONLY_OPENSOURCE:-no}"
|
||||
REMOVE_FIREWALL="${REMOVE_FIREWALL:-no}"
|
||||
msg_ok "Remove Commercial modules is set to: $ONLY_OPENSOURCE"
|
||||
msg_ok "Remove Firewall module is set to: $REMOVE_FIREWALL"
|
||||
|
||||
msg_info "Downloading FreePBX installation script..."
|
||||
if curl -fsSL "$INSTALL_URL" -o "$INSTALL_PATH"; then
|
||||
msg_ok "Download completed successfully"
|
||||
else
|
||||
curl_exit_code=$?
|
||||
msg_error "Error downloading FreePBX installation script (curl exit code: $curl_exit_code)"
|
||||
msg_error "Aborting!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$VERBOSE" == "yes" ]]; then
|
||||
msg_info "Installing FreePBX (Verbose)\n"
|
||||
else
|
||||
msg_info "Installing FreePBX, be patient, this takes time..."
|
||||
fi
|
||||
$STD bash "$INSTALL_PATH"
|
||||
|
||||
if [[ $ONLY_OPENSOURCE == "yes" ]]; then
|
||||
msg_info "Removing Commercial modules..."
|
||||
|
||||
end_count=0
|
||||
max=5
|
||||
count=0
|
||||
while fwconsole ma list | awk '/Commercial/ {found=1} END {exit !found}'; do
|
||||
count=$((count + 1))
|
||||
while read -r module; do
|
||||
msg_info "Removing module: $module"
|
||||
|
||||
if [[ "$REMOVE_FIREWALL" == "no" ]] && [[ "$module" == "sysadmin" ]]; then
|
||||
msg_warn "Skipping sysadmin module removal, it is required for Firewall!"
|
||||
continue
|
||||
fi
|
||||
|
||||
code=0
|
||||
$STD fwconsole ma -f remove $module || code=$?
|
||||
if [[ $code -ne 0 ]]; then
|
||||
msg_error "Module $module could not be removed - error code $code"
|
||||
else
|
||||
msg_ok "Module $module removed successfully"
|
||||
fi
|
||||
done < <(fwconsole ma list | awk '/Commercial/ {print $2}')
|
||||
|
||||
[[ $count -ge $max ]] && break
|
||||
|
||||
com_list=$(fwconsole ma list)
|
||||
end_count=$(awk '/Commercial/ {count++} END {print count + 0}' <<<"$com_list")
|
||||
awk '/Commercial/ {found=1} END {exit !found}' <<<"$com_list" || break
|
||||
if [[ "$REMOVE_FIREWALL" == "no" ]] &&
|
||||
[[ $end_count -eq 1 ]] &&
|
||||
[[ $(awk '/Commercial/ {print $2}' <<<"$com_list") == "sysadmin" ]]; then
|
||||
break
|
||||
fi
|
||||
|
||||
msg_warn "Not all commercial modules could be removed, retrying (attempt $count of $max)..."
|
||||
done
|
||||
|
||||
if [[ $REMOVE_FIREWALL == "yes" ]] && [[ $end_count -gt 0 ]]; then
|
||||
msg_info "Removing Firewall module..."
|
||||
if $STD fwconsole ma -f remove firewall; then
|
||||
msg_ok "Firewall module removed successfully"
|
||||
else
|
||||
msg_error "Firewall module could not be removed, please check manually!"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ $end_count -eq 0 ]]; then
|
||||
msg_ok "All commercial modules removed successfully"
|
||||
elif [[ $end_count -eq 1 ]] && [[ $REMOVE_FIREWALL == "no" ]] && [[ $(fwconsole ma list | awk '/Commercial/ {print $2}') == "sysadmin" ]]; then
|
||||
msg_ok "Only sysadmin module left, which is required for Firewall, skipping removal"
|
||||
else
|
||||
msg_warn "Some commercial modules could not be removed, please check the web interface for removal manually!"
|
||||
fi
|
||||
|
||||
msg_info "Reloading FreePBX..."
|
||||
$STD fwconsole reload
|
||||
msg_ok "FreePBX reloaded completely"
|
||||
fi
|
||||
msg_ok "Installed FreePBX finished"
|
||||
|
||||
motd_ssh
|
||||
customize
|
||||
|
||||
msg_info "Cleaning up"
|
||||
rm -f "$INSTALL_PATH"
|
||||
$STD apt-get -y autoremove
|
||||
$STD apt-get -y autoclean
|
||||
msg_ok "Cleaned"
|
||||
@ -178,7 +178,7 @@ NODE_VERSION="22" NODE_MODULE="yarn" setup_nodejs
|
||||
|
||||
msg_info "Downloading Inference Models"
|
||||
mkdir -p /models /openvino-model
|
||||
wget -q -O edgetpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite
|
||||
wget -q -O /edgetpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite
|
||||
wget -q -O /models/cpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess.tflite
|
||||
cp /opt/frigate/labelmap.txt /labelmap.txt
|
||||
msg_ok "Downloaded Inference Models"
|
||||
@ -210,12 +210,15 @@ msg_info "Building OpenVino Model"
|
||||
cd /models
|
||||
wget -q http://download.tensorflow.org/models/object_detection/ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz
|
||||
$STD tar -zxf ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz --no-same-owner
|
||||
$STD python3 /opt/frigate/docker/main/build_ov_model.py
|
||||
cp /models/ssdlite_mobilenet_v2.xml /openvino-model/
|
||||
cp /models/ssdlite_mobilenet_v2.bin /openvino-model/
|
||||
wget -q https://github.com/openvinotoolkit/open_model_zoo/raw/master/data/dataset_classes/coco_91cl_bkgr.txt -O /openvino-model/coco_91cl_bkgr.txt
|
||||
sed -i 's/truck/car/g' /openvino-model/coco_91cl_bkgr.txt
|
||||
msg_ok "Built OpenVino Model"
|
||||
if python3 /opt/frigate/docker/main/build_ov_model.py 2>&1; then
|
||||
cp /models/ssdlite_mobilenet_v2.xml /openvino-model/
|
||||
cp /models/ssdlite_mobilenet_v2.bin /openvino-model/
|
||||
wget -q https://github.com/openvinotoolkit/open_model_zoo/raw/master/data/dataset_classes/coco_91cl_bkgr.txt -O /openvino-model/coco_91cl_bkgr.txt
|
||||
sed -i 's/truck/car/g' /openvino-model/coco_91cl_bkgr.txt
|
||||
msg_ok "Built OpenVino Model"
|
||||
else
|
||||
msg_warn "OpenVino build failed (CPU may not support required instructions). Frigate will use CPU model."
|
||||
fi
|
||||
|
||||
msg_info "Building Frigate Application (Patience)"
|
||||
cd /opt/frigate
|
||||
|
||||
@ -1,45 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: fabrice1236
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://ghost.org/
|
||||
|
||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||
color
|
||||
verb_ip6
|
||||
catch_errors
|
||||
setting_up_container
|
||||
network_check
|
||||
update_os
|
||||
|
||||
msg_info "Installing Dependencies"
|
||||
$STD apt install -y \
|
||||
nginx \
|
||||
ca-certificates \
|
||||
libjemalloc2 \
|
||||
git
|
||||
msg_ok "Installed Dependencies"
|
||||
|
||||
setup_mariadb
|
||||
MARIADB_DB_NAME="ghost" MARIADB_DB_USER="ghostuser" setup_mariadb_db
|
||||
NODE_VERSION="22" setup_nodejs
|
||||
|
||||
msg_info "Installing Ghost CLI"
|
||||
$STD npm install ghost-cli@latest -g
|
||||
msg_ok "Installed Ghost CLI"
|
||||
|
||||
msg_info "Creating Service"
|
||||
$STD adduser --disabled-password --gecos "Ghost user" ghost-user
|
||||
$STD usermod -aG sudo ghost-user
|
||||
echo "ghost-user ALL=(ALL) NOPASSWD:ALL" | tee /etc/sudoers.d/ghost-user
|
||||
mkdir -p /var/www/ghost
|
||||
chown -R ghost-user:ghost-user /var/www/ghost
|
||||
chmod 775 /var/www/ghost
|
||||
$STD sudo -u ghost-user -H sh -c "cd /var/www/ghost && ghost install --db=mysql --dbhost=localhost --dbuser=$MARIADB_DB_USER --dbpass=$MARIADB_DB_PASS --dbname=$MARIADB_DB_NAME --url=http://localhost:2368 --no-prompt --no-setup-nginx --no-setup-ssl --no-setup-mysql --enable --start --ip 0.0.0.0"
|
||||
rm /etc/sudoers.d/ghost-user
|
||||
msg_ok "Creating Service"
|
||||
|
||||
motd_ssh
|
||||
customize
|
||||
cleanup_lxc
|
||||
@ -1,53 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: Slaviša Arežina (tremor021)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://www.grandstream.com/products/networking-solutions/wi-fi-management/product/gwn-manager
|
||||
|
||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||
color
|
||||
verb_ip6
|
||||
catch_errors
|
||||
setting_up_container
|
||||
network_check
|
||||
update_os
|
||||
|
||||
msg_info "Installing Dependencies"
|
||||
$STD apt install -y \
|
||||
xfonts-utils \
|
||||
fontconfig
|
||||
msg_ok "Installed Dependencies"
|
||||
|
||||
msg_info "Setting up GWN Manager (Patience)"
|
||||
RELEASE=$(curl -s https://www.grandstream.com/support/tools#gwntools \
|
||||
| grep -oP 'https://firmware\.grandstream\.com/GWN_Manager-[^"]+-Ubuntu\.tar\.gz')
|
||||
download_file "$RELEASE" "/tmp/gwnmanager.tar.gz"
|
||||
cd /tmp
|
||||
tar -xzf gwnmanager.tar.gz --strip-components=1
|
||||
$STD ./install
|
||||
msg_ok "Setup GWN Manager"
|
||||
|
||||
msg_info "Creating Service"
|
||||
cat <<EOF >/etc/systemd/system/gwnmanager.service
|
||||
[Unit]
|
||||
Description=GWN Manager
|
||||
After=network.target
|
||||
Requires=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/gwn
|
||||
ExecStart=/gwn/gwn start
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
systemctl enable -q gwnmanager
|
||||
msg_ok "Created Service"
|
||||
|
||||
motd_ssh
|
||||
customize
|
||||
cleanup_lxc
|
||||
@ -1,57 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021-2026 tteck
|
||||
# Author: tteck (tteckster)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://heimdall.site/
|
||||
|
||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||
color
|
||||
verb_ip6
|
||||
catch_errors
|
||||
setting_up_container
|
||||
network_check
|
||||
update_os
|
||||
|
||||
msg_info "Installing Dependencies"
|
||||
$STD apt install -y apt-transport-https
|
||||
msg_ok "Installed Dependencies"
|
||||
|
||||
PHP_VERSION="8.4" PHP_MODULE="bz2,sqlite3" PHP_FPM="YES" setup_php
|
||||
setup_composer
|
||||
fetch_and_deploy_gh_release "Heimdall" "linuxserver/Heimdall" "tarball"
|
||||
|
||||
msg_info "Setting up Heimdall-Dashboard"
|
||||
cd /opt/Heimdall
|
||||
cp .env.example .env
|
||||
$STD php artisan key:generate
|
||||
msg_ok "Setup Heimdall-Dashboard"
|
||||
|
||||
msg_info "Creating Service"
|
||||
cat <<EOF >/etc/systemd/system/heimdall.service
|
||||
[Unit]
|
||||
Description=Heimdall
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
Type=simple
|
||||
User=root
|
||||
WorkingDirectory=/opt/Heimdall
|
||||
ExecStart=/usr/bin/php artisan serve --port 7990 --host 0.0.0.0
|
||||
TimeoutStopSec=30
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target"
|
||||
EOF
|
||||
systemctl enable -q --now heimdall
|
||||
cd /opt/Heimdall
|
||||
export COMPOSER_ALLOW_SUPERUSER=1
|
||||
$STD composer dump-autoload
|
||||
systemctl restart heimdall.service
|
||||
msg_ok "Created Service"
|
||||
|
||||
motd_ssh
|
||||
customize
|
||||
cleanup_lxc
|
||||
@ -1,88 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: MickLesk (Canbiz)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://github.com/homarr-labs/homarr
|
||||
|
||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||
color
|
||||
verb_ip6
|
||||
catch_errors
|
||||
setting_up_container
|
||||
network_check
|
||||
update_os
|
||||
|
||||
msg_info "Installing Dependencies"
|
||||
$STD apt install -y \
|
||||
redis-server \
|
||||
nginx \
|
||||
gettext \
|
||||
openssl
|
||||
msg_ok "Installed Dependencies"
|
||||
|
||||
NODE_VERSION=$(curl -s https://raw.githubusercontent.com/Meierschlumpf/homarr/dev/package.json | jq -r '.engines.node | split(">=")[1] | split(".")[0]')
|
||||
setup_nodejs
|
||||
fetch_and_deploy_gh_release "homarr" "Meierschlumpf/homarr" "prebuild" "latest" "/opt/homarr" "source-debian-amd64.tar.gz"
|
||||
|
||||
msg_info "Installing Homarr"
|
||||
mkdir -p /opt/homarr_db
|
||||
touch /opt/homarr_db/db.sqlite
|
||||
SECRET_ENCRYPTION_KEY="$(openssl rand -hex 32)"
|
||||
cd /opt/homarr
|
||||
cat <<EOF >/opt/homarr.env
|
||||
DB_DRIVER='better-sqlite3'
|
||||
DB_DIALECT='sqlite'
|
||||
SECRET_ENCRYPTION_KEY='${SECRET_ENCRYPTION_KEY}'
|
||||
DB_URL='/opt/homarr_db/db.sqlite'
|
||||
TURBO_TELEMETRY_DISABLED=1
|
||||
AUTH_PROVIDERS='credentials'
|
||||
NODE_ENV='production'
|
||||
REDIS_IS_EXTERNAL='true'
|
||||
EOF
|
||||
msg_ok "Installed Homarr"
|
||||
|
||||
msg_info "Copying config files"
|
||||
mkdir -p /appdata/redis
|
||||
chown -R redis:redis /appdata/redis
|
||||
chmod 744 /appdata/redis
|
||||
cp /opt/homarr/redis.conf /etc/redis/redis.conf
|
||||
rm /etc/nginx/nginx.conf
|
||||
mkdir -p /etc/nginx/templates
|
||||
cp /opt/homarr/nginx.conf /etc/nginx/templates/nginx.conf
|
||||
echo $'#!/bin/bash\ncd /opt/homarr/apps/cli && node ./cli.cjs "$@"' >/usr/bin/homarr
|
||||
chmod +x /usr/bin/homarr
|
||||
msg_ok "Copied config files"
|
||||
|
||||
msg_info "Creating Services"
|
||||
mkdir -p /etc/systemd/system/redis-server.service.d/
|
||||
cat > /etc/systemd/system/redis-server.service.d/override.conf << 'EOF'
|
||||
[Service]
|
||||
ReadWritePaths=-/appdata/redis -/var/lib/redis -/var/log/redis -/var/run/redis -/etc/redis
|
||||
EOF
|
||||
cat <<EOF >/etc/systemd/system/homarr.service
|
||||
[Unit]
|
||||
Requires=redis-server.service
|
||||
After=redis-server.service
|
||||
Description=Homarr Service
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
WorkingDirectory=/opt/homarr
|
||||
EnvironmentFile=-/opt/homarr.env
|
||||
ExecStart=/opt/homarr/run.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
chmod +x /opt/homarr/run.sh
|
||||
systemctl daemon-reload
|
||||
systemctl enable -q --now redis-server && sleep 5
|
||||
systemctl enable -q --now homarr
|
||||
systemctl disable -q --now nginx
|
||||
msg_ok "Created Services"
|
||||
|
||||
motd_ssh
|
||||
customize
|
||||
cleanup_lxc
|
||||
@ -1,62 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021-2026 tteck
|
||||
# Author: tteck (tteckster)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://docs.jellyseerr.dev/
|
||||
|
||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||
color
|
||||
verb_ip6
|
||||
catch_errors
|
||||
setting_up_container
|
||||
network_check
|
||||
update_os
|
||||
|
||||
msg_info "Installing Dependencies"
|
||||
$STD apt-get install -y build-essential
|
||||
msg_ok "Installed Dependencies"
|
||||
|
||||
git clone -q https://github.com/Fallenbagel/jellyseerr.git /opt/jellyseerr
|
||||
cd /opt/jellyseerr
|
||||
$STD git checkout main
|
||||
|
||||
pnpm_desired=$(grep -Po '"pnpm":\s*"\K[^"]+' /opt/jellyseerr/package.json)
|
||||
NODE_VERSION="22" NODE_MODULE="pnpm@$pnpm_desired" setup_nodejs
|
||||
|
||||
msg_info "Installing Jellyseerr (Patience)"
|
||||
export CYPRESS_INSTALL_BINARY=0
|
||||
cd /opt/jellyseerr
|
||||
$STD pnpm install --frozen-lockfile
|
||||
export NODE_OPTIONS="--max-old-space-size=3072"
|
||||
$STD pnpm build
|
||||
mkdir -p /etc/jellyseerr/
|
||||
cat <<EOF >/etc/jellyseerr/jellyseerr.conf
|
||||
PORT=5055
|
||||
# HOST=0.0.0.0
|
||||
# JELLYFIN_TYPE=emby
|
||||
EOF
|
||||
msg_ok "Installed Jellyseerr"
|
||||
|
||||
msg_info "Creating Service"
|
||||
cat <<EOF >/etc/systemd/system/jellyseerr.service
|
||||
[Unit]
|
||||
Description=jellyseerr Service
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=/etc/jellyseerr/jellyseerr.conf
|
||||
Environment=NODE_ENV=production
|
||||
Type=exec
|
||||
WorkingDirectory=/opt/jellyseerr
|
||||
ExecStart=/usr/bin/node dist/index.js
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
systemctl enable -q --now jellyseerr
|
||||
msg_ok "Created Service"
|
||||
|
||||
motd_ssh
|
||||
customize
|
||||
cleanup_lxc
|
||||
@ -1,78 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: Slaviša Arežina (tremor021)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://joplinapp.org/
|
||||
|
||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||
color
|
||||
verb_ip6
|
||||
catch_errors
|
||||
setting_up_container
|
||||
network_check
|
||||
update_os
|
||||
|
||||
msg_info "Installing Dependencies"
|
||||
$STD apt install -y \
|
||||
git \
|
||||
rsync
|
||||
msg_ok "Installed Dependencies"
|
||||
|
||||
PG_VERSION="17" setup_postgresql
|
||||
PG_DB_NAME="joplin" PG_DB_USER="joplin" setup_postgresql_db
|
||||
NODE_VERSION=24 NODE_MODULE="yarn,npm,pm2" setup_nodejs
|
||||
mkdir -p /opt/pm2
|
||||
export PM2_HOME=/opt/pm2
|
||||
$STD pm2 install pm2-logrotate
|
||||
$STD pm2 set pm2-logrotate:max_size 100MB
|
||||
$STD pm2 set pm2-logrotate:retain 5
|
||||
$STD pm2 set pm2-logrotate:compress tr
|
||||
|
||||
fetch_and_deploy_gh_release "joplin-server" "laurent22/joplin" "tarball"
|
||||
|
||||
msg_info "Setting up Joplin Server (Patience)"
|
||||
cd /opt/joplin-server
|
||||
sed -i "/onenote-converter/d" packages/lib/package.json
|
||||
$STD yarn config set --home enableTelemetry 0
|
||||
export BUILD_SEQUENCIAL=1
|
||||
$STD yarn workspaces focus @joplin/server
|
||||
$STD yarn workspaces foreach -R --topological-dev --from @joplin/server run build
|
||||
$STD yarn workspaces foreach -R --topological-dev --from @joplin/server run tsc
|
||||
cat <<EOF >/opt/joplin-server/.env
|
||||
PM2_HOME=/opt/pm2
|
||||
NODE_ENV=production
|
||||
APP_BASE_URL=http://$LOCAL_IP:22300
|
||||
APP_PORT=22300
|
||||
DB_CLIENT=pg
|
||||
POSTGRES_PASSWORD=$PG_DB_PASS
|
||||
POSTGRES_DATABASE=$PG_DB_NAME
|
||||
POSTGRES_USER=$PG_DB_USER
|
||||
POSTGRES_PORT=5432
|
||||
POSTGRES_HOST=localhost
|
||||
EOF
|
||||
msg_ok "Setup Joplin Server"
|
||||
|
||||
msg_info "Setting up Service"
|
||||
cat <<EOF >/etc/systemd/system/joplin-server.service
|
||||
[Unit]
|
||||
Description=Joplin Server Service
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/opt/joplin-server/packages/server
|
||||
EnvironmentFile=/opt/joplin-server/.env
|
||||
ExecStart=/usr/bin/yarn start-prod
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
systemctl enable -q --now joplin-server
|
||||
msg_ok "Service Setup"
|
||||
|
||||
motd_ssh
|
||||
customize
|
||||
cleanup_lxc
|
||||
@ -1,65 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: vhsdream | MickLesk
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://github.com/fccview/jotty
|
||||
|
||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||
color
|
||||
verb_ip6
|
||||
catch_errors
|
||||
setting_up_container
|
||||
network_check
|
||||
update_os
|
||||
|
||||
NODE_VERSION="22" NODE_MODULE="yarn" setup_nodejs
|
||||
fetch_and_deploy_gh_release "jotty" "fccview/jotty" "prebuild" "latest" "/opt/jotty" "jotty_*_prebuild.tar.gz"
|
||||
|
||||
msg_info "Setup jotty"
|
||||
mkdir -p data/{users,checklists,notes}
|
||||
|
||||
cat <<EOF >/opt/jotty/.env
|
||||
NODE_ENV=production
|
||||
# --- Uncomment to enable
|
||||
# APP_URL=https://your-jotty-domain.com
|
||||
# INTERNAL_API_URL=http://localhost:3000
|
||||
# HTTPS=true
|
||||
# SERVE_PUBLIC_IMAGES=yes
|
||||
# SERVE_PUBLIC_FILES=yes
|
||||
# SERVE_PUBLIC_VIDEOS=yes
|
||||
# STOP_CHECK_UPDATES=yes
|
||||
# --- For troubleshooting
|
||||
# DEBUGGER=true
|
||||
|
||||
# --- SSO with OIDC (optional)
|
||||
# SSO_MODE=oidc
|
||||
# OIDC_ISSUER=<your-oidc-issuer-url>
|
||||
# OIDC_CLIENT_ID=<oidc-client-id>
|
||||
# SSO_FALLBACK_LOCAL=yes
|
||||
# OIDC_CLIENT_SECRET=your_client_secret
|
||||
# OIDC_ADMIN_GROUPS=admins
|
||||
EOF
|
||||
msg_ok "Setup jotty"
|
||||
|
||||
msg_info "Creating Service"
|
||||
cat <<EOF >/etc/systemd/system/jotty.service
|
||||
[Unit]
|
||||
Description=jotty server
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
WorkingDirectory=/opt/jotty
|
||||
EnvironmentFile=/opt/jotty/.env
|
||||
ExecStart=/usr/bin/node server.js
|
||||
Restart=on-abnormal
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
systemctl enable -q --now jotty
|
||||
msg_ok "Created Service"
|
||||
|
||||
motd_ssh
|
||||
customize
|
||||
cleanup_lxc
|
||||
126
install/linkding-install.sh
Normal file
126
install/linkding-install.sh
Normal file
@ -0,0 +1,126 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: MickLesk (MickLesk)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
|
||||
# Source: https://linkding.link/
|
||||
|
||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||
color
|
||||
verb_ip6
|
||||
catch_errors
|
||||
setting_up_container
|
||||
network_check
|
||||
update_os
|
||||
|
||||
msg_info "Installing Dependencies"
|
||||
$STD apt install -y \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
python3-dev \
|
||||
nginx \
|
||||
libpq-dev \
|
||||
libicu-dev \
|
||||
libsqlite3-dev \
|
||||
libffi-dev
|
||||
msg_ok "Installed Dependencies"
|
||||
|
||||
NODE_VERSION="22" setup_nodejs
|
||||
setup_uv
|
||||
fetch_and_deploy_gh_release "linkding" "sissbruecker/linkding"
|
||||
|
||||
msg_info "Building Frontend"
|
||||
cd /opt/linkding
|
||||
$STD npm ci
|
||||
$STD npm run build
|
||||
ln -sf /usr/lib/x86_64-linux-gnu/mod_icu.so /opt/linkding/libicu.so
|
||||
msg_ok "Built Frontend"
|
||||
|
||||
msg_info "Setting up linkding"
|
||||
rm -f bookmarks/settings/dev.py
|
||||
touch bookmarks/settings/custom.py
|
||||
$STD uv sync --no-dev --frozen
|
||||
$STD uv pip install gunicorn
|
||||
mkdir -p data/{favicons,previews,assets}
|
||||
ADMIN_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | cut -c1-13)
|
||||
cat <<EOF >/opt/linkding/.env
|
||||
LD_SUPERUSER_NAME=admin
|
||||
LD_SUPERUSER_PASSWORD=${ADMIN_PASS}
|
||||
LD_CSRF_TRUSTED_ORIGINS=http://${LOCAL_IP}:9090
|
||||
EOF
|
||||
set -a && source /opt/linkding/.env && set +a
|
||||
$STD /opt/linkding/.venv/bin/python manage.py generate_secret_key
|
||||
$STD /opt/linkding/.venv/bin/python manage.py migrate
|
||||
$STD /opt/linkding/.venv/bin/python manage.py enable_wal
|
||||
$STD /opt/linkding/.venv/bin/python manage.py create_initial_superuser
|
||||
$STD /opt/linkding/.venv/bin/python manage.py collectstatic --no-input
|
||||
msg_ok "Set up linkding"
|
||||
|
||||
msg_info "Creating Services"
|
||||
cat <<EOF >/etc/systemd/system/linkding.service
|
||||
[Unit]
|
||||
Description=linkding Bookmark Manager
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
User=root
|
||||
WorkingDirectory=/opt/linkding
|
||||
EnvironmentFile=/opt/linkding/.env
|
||||
ExecStart=/opt/linkding/.venv/bin/gunicorn \
|
||||
--bind 127.0.0.1:8000 \
|
||||
--workers 3 \
|
||||
--threads 2 \
|
||||
--timeout 120 \
|
||||
bookmarks.wsgi:application
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
cat <<EOF >/etc/systemd/system/linkding-tasks.service
|
||||
[Unit]
|
||||
Description=linkding Background Tasks
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
User=root
|
||||
WorkingDirectory=/opt/linkding
|
||||
EnvironmentFile=/opt/linkding/.env
|
||||
ExecStart=/opt/linkding/.venv/bin/python manage.py run_huey
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
cat <<'EOF' >/etc/nginx/sites-available/linkding
|
||||
server {
|
||||
listen 9090;
|
||||
server_name _;
|
||||
|
||||
client_max_body_size 20M;
|
||||
|
||||
location /static/ {
|
||||
alias /opt/linkding/static/;
|
||||
expires 30d;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:8000;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_redirect off;
|
||||
}
|
||||
}
|
||||
EOF
|
||||
$STD rm -f /etc/nginx/sites-enabled/default
|
||||
$STD ln -sf /etc/nginx/sites-available/linkding /etc/nginx/sites-enabled/linkding
|
||||
systemctl enable -q --now nginx linkding linkding-tasks
|
||||
systemctl restart nginx
|
||||
msg_ok "Created Services"
|
||||
|
||||
motd_ssh
|
||||
customize
|
||||
cleanup_lxc
|
||||
@ -1,76 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: MickLesk (Canbiz)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://linkwarden.app/
|
||||
|
||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||
color
|
||||
verb_ip6
|
||||
catch_errors
|
||||
setting_up_container
|
||||
network_check
|
||||
update_os
|
||||
|
||||
msg_info "Installing Dependencies"
|
||||
$STD apt install -y build-essential
|
||||
msg_ok "Installed Dependencies"
|
||||
|
||||
NODE_VERSION="22" setup_nodejs
|
||||
PG_VERSION="16" setup_postgresql
|
||||
PG_DB_NAME="linkwardendb" PG_DB_USER="linkwarden" setup_postgresql_db
|
||||
RUST_CRATES="monolith" setup_rust
|
||||
fetch_and_deploy_gh_release "linkwarden" "linkwarden/linkwarden"
|
||||
|
||||
|
||||
read -r -p "${TAB3}Would you like to add Adminer? <y/N> " prompt
|
||||
if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then
|
||||
setup_adminer
|
||||
fi
|
||||
|
||||
msg_info "Installing Linkwarden (Patience)"
|
||||
export COREPACK_ENABLE_DOWNLOAD_PROMPT=0
|
||||
export PRISMA_HIDE_UPDATE_MESSAGE=1
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
corepack enable
|
||||
SECRET_KEY="$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32)"
|
||||
cd /opt/linkwarden
|
||||
$STD yarn workspaces focus linkwarden @linkwarden/web @linkwarden/worker
|
||||
# $STD npx playwright install-deps
|
||||
# $STD yarn playwright install
|
||||
|
||||
cat <<EOF >/opt/linkwarden/.env
|
||||
NEXTAUTH_SECRET=${SECRET_KEY}
|
||||
NEXTAUTH_URL=http://${LOCAL_IP}:3000
|
||||
DATABASE_URL=postgresql://${PG_DB_USER}:${PG_DB_PASS}@localhost:5432/${PG_DB_NAME}
|
||||
EOF
|
||||
$STD yarn prisma:generate
|
||||
$STD yarn web:build
|
||||
$STD yarn prisma:deploy
|
||||
rm -rf ~/.cargo/registry ~/.cargo/git ~/.cargo/.package-cache
|
||||
rm -rf /root/.cache/yarn
|
||||
rm -rf /opt/linkwarden/.next/cache
|
||||
msg_ok "Installed Linkwarden"
|
||||
|
||||
msg_info "Creating Service"
|
||||
cat <<EOF >/etc/systemd/system/linkwarden.service
|
||||
[Unit]
|
||||
Description=Linkwarden Service
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
Environment=PATH=$PATH
|
||||
WorkingDirectory=/opt/linkwarden
|
||||
ExecStart=/usr/bin/yarn concurrently:start
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
systemctl enable -q --now linkwarden
|
||||
msg_ok "Created Service"
|
||||
|
||||
motd_ssh
|
||||
customize
|
||||
cleanup_lxc
|
||||
@ -1,52 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021-2026 tteck
|
||||
# Author: tteck
|
||||
# Co-Author: MickLesk (Canbiz)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://github.com/usememos/memos
|
||||
|
||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||
color
|
||||
verb_ip6
|
||||
catch_errors
|
||||
setting_up_container
|
||||
network_check
|
||||
update_os
|
||||
|
||||
setup_go
|
||||
NODE_MODULE="pnpm" setup_nodejs
|
||||
fetch_and_deploy_gh_release "memos" "usememos/memos" "tarball"
|
||||
|
||||
msg_info "Building Memos (patience)"
|
||||
cd /opt/memos/web
|
||||
$STD pnpm install --frozen-lockfile
|
||||
$STD pnpm release
|
||||
cd /opt/memos
|
||||
$STD go build -o memos ./cmd/memos
|
||||
mkdir -p /opt/memos_data
|
||||
msg_ok "Built Memos"
|
||||
|
||||
msg_info "Creating Service"
|
||||
cat <<EOF >/etc/systemd/system/memos.service
|
||||
[Unit]
|
||||
Description=Memos Server
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
ExecStart=/opt/memos/memos
|
||||
Environment="MEMOS_MODE=prod"
|
||||
Environment="MEMOS_PORT=9030"
|
||||
Environment="MEMOS_DATA=/opt/memos_data"
|
||||
WorkingDirectory=/opt/memos
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
systemctl enable -q --now memos
|
||||
msg_ok "Created Service"
|
||||
|
||||
motd_ssh
|
||||
customize
|
||||
cleanup_lxc
|
||||
@ -33,19 +33,18 @@ APP_DIR="/opt/nextExplorer/app"
|
||||
LOCAL_IP="$(hostname -I | awk '{print $1}')"
|
||||
mkdir -p "$APP_DIR"
|
||||
mkdir -p /etc/nextExplorer
|
||||
cd /opt/nextExplorer/backend
|
||||
export NODE_ENV=production
|
||||
$STD npm ci
|
||||
unset NODE_ENV
|
||||
|
||||
cd /opt/nextExplorer/frontend
|
||||
export NODE_ENV=development
|
||||
$STD npm ci
|
||||
$STD npm run build -- --sourcemap false
|
||||
unset NODE_ENV
|
||||
|
||||
cd /opt/nextExplorer
|
||||
mv backend/{node_modules,src,package.json} "$APP_DIR"
|
||||
export NODE_ENV=production
|
||||
$STD npm ci --omit=dev --workspace backend
|
||||
mv node_modules "$APP_DIR"
|
||||
mv backend/{src,package.json} "$APP_DIR"
|
||||
unset NODE_ENV
|
||||
|
||||
export NODE_ENV=development
|
||||
export NODE_OPTIONS="--max-old-space-size=2048"
|
||||
$STD npm ci --workspace frontend
|
||||
$STD npm run -w frontend build -- --sourcemap false
|
||||
unset NODE_ENV
|
||||
mv frontend/dist/ "$APP_DIR"/src/public
|
||||
msg_ok "Built nextExplorer"
|
||||
|
||||
@ -84,6 +83,7 @@ SESSION_SECRET="${SECRET}"
|
||||
# OIDC_CLIENT_ID=
|
||||
# OIDC_CLIENT_SECRET=
|
||||
# OIDC_CALLBACK_URL=
|
||||
# OIDC_LOGOUT_URL=
|
||||
# OIDC_SCOPES=
|
||||
# OIDC_AUTO_CREATE_USERS=true
|
||||
|
||||
@ -145,7 +145,7 @@ User=explorer
|
||||
Group=explorer
|
||||
WorkingDirectory=/opt/nextExplorer/app
|
||||
EnvironmentFile=/etc/nextExplorer/.env
|
||||
ExecStart=/usr/bin/node ./src/app.js
|
||||
ExecStart=/usr/bin/node ./src/server.js
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
|
||||
@ -1,56 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: pfassina
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
|
||||
# Source: https://github.com/openclaw/openclaw
|
||||
|
||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||
color
|
||||
verb_ip6
|
||||
catch_errors
|
||||
setting_up_container
|
||||
network_check
|
||||
update_os
|
||||
|
||||
msg_info "Installing Dependencies"
|
||||
$STD apt install -y git
|
||||
msg_ok "Installed Dependencies"
|
||||
|
||||
NODE_VERSION="22" NODE_MODULE="openclaw" setup_nodejs
|
||||
|
||||
msg_info "Setup OpenClaw"
|
||||
mkdir -p /root/.openclaw
|
||||
cat <<CONF >/root/.openclaw/openclaw.json
|
||||
{
|
||||
"gateway": {
|
||||
"bind": "lan",
|
||||
"port": 18789
|
||||
}
|
||||
}
|
||||
CONF
|
||||
msg_ok "Setup OpenClaw"
|
||||
|
||||
msg_info "Creating Service"
|
||||
cat <<EOF >/etc/systemd/system/openclaw.service
|
||||
[Unit]
|
||||
Description=OpenClaw Gateway
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/usr/bin/openclaw gateway --allow-unconfigured --port 18789 --bind lan
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
Environment=NODE_ENV=production
|
||||
Environment=PATH=/usr/bin:/usr/local/bin:/bin
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
systemctl enable -q openclaw
|
||||
msg_ok "Created Service"
|
||||
|
||||
motd_ssh
|
||||
customize
|
||||
cleanup_lxc
|
||||
71
install/skylite-ux-install.sh
Normal file
71
install/skylite-ux-install.sh
Normal file
@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: bzumhagen
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
|
||||
# Source: https://github.com/Wetzel402/Skylite-UX
|
||||
|
||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||
color
|
||||
verb_ip6
|
||||
catch_errors
|
||||
setting_up_container
|
||||
network_check
|
||||
update_os
|
||||
|
||||
msg_info "Installing Dependencies"
|
||||
$STD apt install -y openssl
|
||||
msg_ok "Installed Dependencies"
|
||||
|
||||
PG_VERSION="16" setup_postgresql
|
||||
NODE_VERSION="24" setup_nodejs
|
||||
PG_DB_NAME="skylite" PG_DB_USER="skylite" PG_DB_SCHEMA_PERMS="true" setup_postgresql_db
|
||||
fetch_and_deploy_gh_release "skylite-ux" "Wetzel402/Skylite-UX" "tarball" "2026.2.2"
|
||||
|
||||
msg_info "Configuring skylite-ux"
|
||||
cat <<EOF >/opt/skylite-ux/.env
|
||||
DATABASE_URL=postgresql://${PG_DB_USER}:${PG_DB_PASS}@localhost:5432/${PG_DB_NAME}
|
||||
NODE_ENV=production
|
||||
HOST=0.0.0.0
|
||||
NUXT_PUBLIC_TZ=Etc/UTC
|
||||
NUXT_PUBLIC_LOG_LEVEL=warn
|
||||
EOF
|
||||
msg_ok "Configured skylite-ux"
|
||||
|
||||
msg_info "Building skylite-ux"
|
||||
cd /opt/skylite-ux
|
||||
$STD npm ci
|
||||
$STD npx prisma generate
|
||||
$STD npm run build
|
||||
msg_ok "Built skylite-ux"
|
||||
|
||||
msg_info "Running Database Migrations"
|
||||
cd /opt/skylite-ux
|
||||
$STD npx prisma migrate deploy
|
||||
msg_ok "Ran Database Migrations"
|
||||
|
||||
msg_info "Creating Service"
|
||||
cat <<EOF >/etc/systemd/system/skylite-ux.service
|
||||
[Unit]
|
||||
Description=Skylite-UX
|
||||
After=network.target postgresql.service
|
||||
Wants=postgresql.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=root
|
||||
WorkingDirectory=/opt/skylite-ux
|
||||
EnvironmentFile=/opt/skylite-ux/.env
|
||||
ExecStart=/usr/bin/node /opt/skylite-ux/.output/server/index.mjs
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
systemctl enable -q --now skylite-ux
|
||||
msg_ok "Created Service"
|
||||
|
||||
motd_ssh
|
||||
customize
|
||||
cleanup_lxc
|
||||
@ -17,6 +17,7 @@ fetch_and_deploy_gh_release "sonobarr" "Dodelidoo-Labs/sonobarr" "tarball"
|
||||
PYTHON_VERSION="3.12" setup_uv
|
||||
|
||||
msg_info "Setting up sonobarr"
|
||||
$STD uv venv -c /opt/sonobarr/venv
|
||||
source /opt/sonobarr/venv/bin/activate
|
||||
$STD uv pip install --no-cache-dir -r /opt/sonobarr/requirements.txt
|
||||
mkdir -p /etc/sonobarr
|
||||
|
||||
@ -1,27 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: MickLesk (Canbiz) | Co-Author: CrazyWolf13
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://vikunja.io/
|
||||
|
||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||
color
|
||||
verb_ip6
|
||||
catch_errors
|
||||
setting_up_container
|
||||
network_check
|
||||
update_os
|
||||
|
||||
fetch_and_deploy_gh_release "vikunja" "go-vikunja/vikunja" "binary"
|
||||
|
||||
msg_info "Setting up Vikunja"
|
||||
sed -i 's|^# \(service:\)|\1|' /etc/vikunja/config.yml
|
||||
sed -i "s|^ # \(publicurl: \).*| \1\"http://$LOCAL_IP\"|" /etc/vikunja/config.yml
|
||||
sed -i "0,/^ # \(timezone: \).*/s|| \1${tz}|" /etc/vikunja/config.yml
|
||||
systemctl enable -q --now vikunja
|
||||
msg_ok "Set up Vikunja"
|
||||
|
||||
motd_ssh
|
||||
customize
|
||||
cleanup_lxc
|
||||
@ -1,183 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: Slaviša Arežina (tremor021)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE
|
||||
# Source: https://github.com/wger-project/wger
|
||||
|
||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||
color
|
||||
verb_ip6
|
||||
catch_errors
|
||||
setting_up_container
|
||||
network_check
|
||||
update_os
|
||||
|
||||
msg_info "Installing Dependencies"
|
||||
$STD apt install -y \
|
||||
build-essential \
|
||||
nginx \
|
||||
redis-server \
|
||||
libpq-dev
|
||||
msg_ok "Installed Dependencies"
|
||||
|
||||
import_local_ip
|
||||
NODE_VERSION="22" NODE_MODULE="sass" setup_nodejs
|
||||
setup_uv
|
||||
PG_VERSION="16" setup_postgresql
|
||||
PG_DB_NAME="wger" PG_DB_USER="wger" setup_postgresql_db
|
||||
fetch_and_deploy_gh_release "wger" "wger-project/wger" "tarball" "latest" "/opt/wger"
|
||||
|
||||
msg_info "Setting up wger"
|
||||
mkdir -p /opt/wger/{static,media}
|
||||
chmod o+w /opt/wger/media
|
||||
cd /opt/wger
|
||||
$STD corepack enable
|
||||
$STD npm install
|
||||
$STD npm run build:css:sass
|
||||
$STD uv venv
|
||||
$STD uv pip install . --group docker
|
||||
SECRET_KEY=$(openssl rand -base64 40)
|
||||
cat <<EOF >/opt/wger/.env
|
||||
DJANGO_SETTINGS_MODULE=settings.main
|
||||
PYTHONPATH=/opt/wger
|
||||
|
||||
DJANGO_DB_ENGINE=django.db.backends.postgresql
|
||||
DJANGO_DB_DATABASE=${PG_DB_NAME}
|
||||
DJANGO_DB_USER=${PG_DB_USER}
|
||||
DJANGO_DB_PASSWORD=${PG_DB_PASS}
|
||||
DJANGO_DB_HOST=localhost
|
||||
DJANGO_DB_PORT=5432
|
||||
DATABASE_URL=postgresql://${PG_DB_USER}:${PG_DB_PASS}@localhost:5432/${PG_DB_NAME}
|
||||
|
||||
DJANGO_MEDIA_ROOT=/opt/wger/media
|
||||
DJANGO_STATIC_ROOT=/opt/wger/static
|
||||
DJANGO_STATIC_URL=/static/
|
||||
|
||||
ALLOWED_HOSTS=${LOCAL_IP},localhost,127.0.0.1
|
||||
CSRF_TRUSTED_ORIGINS=http://${LOCAL_IP}:3000
|
||||
|
||||
USE_X_FORWARDED_HOST=True
|
||||
SECURE_PROXY_SSL_HEADER=HTTP_X_FORWARDED_PROTO,http
|
||||
|
||||
DJANGO_CACHE_BACKEND=django_redis.cache.RedisCache
|
||||
DJANGO_CACHE_LOCATION=redis://127.0.0.1:6379/1
|
||||
DJANGO_CACHE_TIMEOUT=300
|
||||
DJANGO_CACHE_CLIENT_CLASS=django_redis.client.DefaultClient
|
||||
AXES_CACHE_ALIAS=default
|
||||
|
||||
USE_CELERY=True
|
||||
CELERY_BROKER=redis://127.0.0.1:6379/2
|
||||
CELERY_BACKEND=redis://127.0.0.1:6379/2
|
||||
|
||||
SITE_URL=http://${LOCAL_IP}:3000
|
||||
SECRET_KEY=${SECRET_KEY}
|
||||
EOF
|
||||
set -a && source /opt/wger/.env && set +a
|
||||
$STD uv run wger bootstrap
|
||||
$STD uv run python manage.py collectstatic --no-input
|
||||
cat <<EOF | uv run python manage.py shell
|
||||
from django.contrib.auth import get_user_model
|
||||
User = get_user_model()
|
||||
|
||||
user, created = User.objects.get_or_create(
|
||||
username="admin",
|
||||
defaults={"email": "admin@localhost"},
|
||||
)
|
||||
|
||||
if created:
|
||||
user.set_password("${PG_DB_PASS}")
|
||||
user.is_superuser = True
|
||||
user.is_staff = True
|
||||
user.save()
|
||||
EOF
|
||||
msg_ok "Set up wger"
|
||||
msg_info "Creating Config and Services"
|
||||
cat <<EOF >/etc/systemd/system/wger.service
|
||||
[Unit]
|
||||
Description=wger Gunicorn
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
User=root
|
||||
WorkingDirectory=/opt/wger
|
||||
EnvironmentFile=/opt/wger/.env
|
||||
ExecStart=/opt/wger/.venv/bin/gunicorn \
|
||||
--bind 127.0.0.1:8000 \
|
||||
--workers 3 \
|
||||
--threads 2 \
|
||||
--timeout 120 \
|
||||
wger.wsgi:application
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
cat <<EOF >/etc/systemd/system/celery.service
|
||||
[Unit]
|
||||
Description=wger Celery Worker
|
||||
After=network.target redis-server.service
|
||||
Requires=redis-server.service
|
||||
|
||||
[Service]
|
||||
WorkingDirectory=/opt/wger
|
||||
EnvironmentFile=/opt/wger/.env
|
||||
ExecStart=/opt/wger/.venv/bin/celery -A wger worker -l info
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
mkdir -p /var/lib/wger/celery
|
||||
chmod 700 /var/lib/wger/celery
|
||||
cat <<EOF >/etc/systemd/system/celery-beat.service
|
||||
[Unit]
|
||||
Description=wger Celery Beat
|
||||
After=network.target redis-server.service
|
||||
Requires=redis-server.service
|
||||
|
||||
[Service]
|
||||
WorkingDirectory=/opt/wger
|
||||
EnvironmentFile=/opt/wger/.env
|
||||
ExecStart=/opt/wger/.venv/bin/celery -A wger beat -l info \
|
||||
--schedule /var/lib/wger/celery/celerybeat-schedule
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
cat <<'EOF' >/etc/nginx/sites-available/wger
|
||||
server {
|
||||
listen 3000;
|
||||
server_name _;
|
||||
|
||||
client_max_body_size 20M;
|
||||
|
||||
location /static/ {
|
||||
alias /opt/wger/static/;
|
||||
expires 30d;
|
||||
}
|
||||
|
||||
location /media/ {
|
||||
alias /opt/wger/media/;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:8000;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_redirect off;
|
||||
}
|
||||
}
|
||||
EOF
|
||||
$STD rm -f /etc/nginx/sites-enabled/default
|
||||
$STD ln -sf /etc/nginx/sites-available/wger /etc/nginx/sites-enabled/wger
|
||||
systemctl enable -q --now redis-server nginx wger celery celery-beat
|
||||
systemctl restart nginx
|
||||
msg_ok "Created Config and Services"
|
||||
|
||||
motd_ssh
|
||||
customize
|
||||
cleanup_lxc
|
||||
@ -30,10 +30,6 @@ LOGIN_PORT="3000"
|
||||
# Detect server IP address
|
||||
SERVER_IP=$(hostname -I | awk '{print $1}')
|
||||
|
||||
msg_info "Installing Dependencies (Patience)"
|
||||
$STD apt install -y ca-certificates
|
||||
msg_ok "Installed Dependecies"
|
||||
|
||||
# Create zitadel user
|
||||
msg_info "Creating zitadel system user"
|
||||
groupadd --system "${ZITADEL_GROUP}"
|
||||
@ -59,7 +55,9 @@ msg_ok "Configured PostgreSQL"
|
||||
msg_info "Installing Zitadel"
|
||||
cd "${ZITADEL_DIR}"
|
||||
mkdir -p ${CONFIG_DIR}
|
||||
echo "${MASTERKEY}" > ${CONFIG_DIR}/.masterkey
|
||||
echo -n "${MASTERKEY}" > ${CONFIG_DIR}/.masterkey
|
||||
chmod 600 "${CONFIG_DIR}/.masterkey"
|
||||
chown "${ZITADEL_USER}:${ZITADEL_GROUP}" "${CONFIG_DIR}/.masterkey"
|
||||
|
||||
# Update config.yaml for network access
|
||||
cat > "${CONFIG_DIR}/config.yaml" <<EOF
|
||||
@ -127,10 +125,10 @@ EOF
|
||||
chown "${ZITADEL_USER}:${ZITADEL_GROUP}" "${CONFIG_DIR}/config.yaml"
|
||||
|
||||
# Initialize database as zitadel user (no masterkey needed for init)
|
||||
$STD ./zitadel init --config ${CONFIG_DIR}/config.yaml
|
||||
$STD sudo -u ${ZITADEL_USER} ./zitadel init --config ${CONFIG_DIR}/config.yaml
|
||||
|
||||
# Run setup phase as zitadel user (with masterkey and steps)
|
||||
$STD ./zitadel setup --config ${CONFIG_DIR}/config.yaml --steps ${CONFIG_DIR}/config.yaml --masterkey "${MASTERKEY}"
|
||||
$STD sudo -u ${ZITADEL_USER} ./zitadel setup --config ${CONFIG_DIR}/config.yaml --steps ${CONFIG_DIR}/config.yaml --masterkey "${MASTERKEY}"
|
||||
|
||||
#Read client token
|
||||
CLIENT_PAT=$(cat ${ZITADEL_DIR}/login-client.pat)
|
||||
@ -145,9 +143,6 @@ ZITADEL_SERVICE_USER_TOKEN=${CLIENT_PAT}
|
||||
EOF
|
||||
chown "${ZITADEL_USER}:${ZITADEL_GROUP}" "${CONFIG_DIR}/login.env"
|
||||
|
||||
# Update package.json to bind to 0.0.0.0 instead of 127.0.0.1
|
||||
#sed -i 's/"prod": "cd \.\/\.next\/standalone && HOSTNAME=127\.0\.0\.1/"prod": "cd .\/\.next\/standalone \&\& HOSTNAME=0.0.0.0/g' "${LOGIN_DIR}/apps/login/package.json"
|
||||
|
||||
# Create api.env file
|
||||
cat > "${CONFIG_DIR}/api.env" <<EOF
|
||||
ZITADEL_MASTERKEY=${MASTERKEY}
|
||||
@ -183,7 +178,7 @@ Group=${ZITADEL_GROUP}
|
||||
WorkingDirectory=${ZITADEL_DIR}
|
||||
EnvironmentFile=${CONFIG_DIR}/api.env
|
||||
Environment="PATH=/usr/local/bin:/usr/local/go/bin:/usr/bin:/bin"
|
||||
ExecStart=${ZITADEL_DIR}/zitadel start --config ${CONFIG_DIR}/config.yaml --masterkey \${ZITADEL_MASTERKEY}
|
||||
ExecStart=${ZITADEL_DIR}/zitadel start --config ${CONFIG_DIR}/config.yaml --masterkeyFile ${CONFIG_DIR}/.masterkey
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
|
||||
@ -214,17 +209,14 @@ RestartSec=10
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
# Reload systemd
|
||||
systemctl daemon-reload
|
||||
|
||||
# Enable and start API service
|
||||
systemctl enable -q --now zitadel-api.service
|
||||
|
||||
# Wait for API to start
|
||||
sleep 10
|
||||
sleep 5
|
||||
|
||||
# Enable and start Login service
|
||||
systemctl enable -q --now zitadel-login.service
|
||||
systemctl enable -q --now zitadel-login
|
||||
msg_ok "Created Services"
|
||||
|
||||
msg_info "Saving Credentials"
|
||||
@ -313,9 +305,9 @@ msg_ok "Saved Credentials"
|
||||
|
||||
msg_info "Create zitadel-rerun.sh"
|
||||
cat <<EOF >~/zitadel-rerun.sh
|
||||
systemctl stop zitadel
|
||||
timeout --kill-after=5s 15s zitadel setup --masterkeyFile ${CONFIG_DIR}/.masterkey --config ${CONFIG_DIR}/config.yaml"
|
||||
systemctl restart zitadel
|
||||
systemctl stop zitadel-api zitadel-login
|
||||
timeout --kill-after=5s 15s /opt/zitadel/zitadel setup --masterkeyFile ${CONFIG_DIR}/.masterkey --config ${CONFIG_DIR}/config.yaml
|
||||
systemctl restart zitadel-api zitadel-login
|
||||
EOF
|
||||
msg_ok "Bash script for rerunning Zitadel after changing Zitadel config.yaml"
|
||||
|
||||
|
||||
710
misc/api.func
710
misc/api.func
@ -6,8 +6,8 @@
|
||||
# API.FUNC - TELEMETRY & DIAGNOSTICS API
|
||||
# ==============================================================================
|
||||
#
|
||||
# Provides functions for sending anonymous telemetry data to Community-Scripts
|
||||
# API for analytics and diagnostics purposes.
|
||||
# Provides functions for sending anonymous telemetry data via the community
|
||||
# telemetry ingest service at telemetry.community-scripts.org.
|
||||
#
|
||||
# Features:
|
||||
# - Container/VM creation statistics
|
||||
@ -17,16 +17,30 @@
|
||||
#
|
||||
# Usage:
|
||||
# source <(curl -fsSL .../api.func)
|
||||
# post_to_api # Report container creation
|
||||
# post_to_api # Report LXC container creation
|
||||
# post_to_api_vm # Report VM creation
|
||||
# post_update_to_api # Report installation status
|
||||
#
|
||||
# Privacy:
|
||||
# - Only anonymous statistics (no personal data)
|
||||
# - User can opt-out via diagnostics settings
|
||||
# - User can opt-out via DIAGNOSTICS=no
|
||||
# - Random UUID for session tracking only
|
||||
# - Data retention: 30 days
|
||||
#
|
||||
# ==============================================================================
|
||||
|
||||
# ==============================================================================
|
||||
# Telemetry Configuration
|
||||
# ==============================================================================
|
||||
TELEMETRY_URL="https://telemetry.community-scripts.org/telemetry"
|
||||
|
||||
# Timeout for telemetry requests (seconds)
|
||||
TELEMETRY_TIMEOUT=5
|
||||
|
||||
# Repository source identifier (auto-transformed by CI on promotion to ProxmoxVE)
|
||||
# DO NOT CHANGE - this is used by the telemetry service to route data to the correct collection
|
||||
REPO_SOURCE="community-scripts/ProxmoxVED"
|
||||
|
||||
# ==============================================================================
|
||||
# SECTION 1: ERROR CODE DESCRIPTIONS
|
||||
# ==============================================================================
|
||||
@ -35,6 +49,8 @@
|
||||
# explain_exit_code()
|
||||
#
|
||||
# - Maps numeric exit codes to human-readable error descriptions
|
||||
# - Canonical source of truth for ALL exit code mappings
|
||||
# - Used by both api.func (telemetry) and error_handler.func (error display)
|
||||
# - Supports:
|
||||
# * Generic/Shell errors (1, 2, 124, 126-130, 134, 137, 139, 141, 143)
|
||||
# * curl/wget errors (6, 7, 22, 28, 35)
|
||||
@ -47,7 +63,6 @@
|
||||
# * Proxmox custom codes (200-231)
|
||||
# * Node.js/npm errors (243, 245-249)
|
||||
# - Returns description string for given exit code
|
||||
# - Shared function with error_handler.func for consistency
|
||||
# ------------------------------------------------------------------------------
|
||||
explain_exit_code() {
|
||||
local code="$1"
|
||||
@ -157,10 +172,106 @@ explain_exit_code() {
|
||||
# SECTION 2: TELEMETRY FUNCTIONS
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# detect_gpu()
|
||||
#
|
||||
# - Detects GPU vendor, model, and passthrough type
|
||||
# - Sets GPU_VENDOR, GPU_MODEL, and GPU_PASSTHROUGH globals
|
||||
# - Used for GPU analytics
|
||||
# ------------------------------------------------------------------------------
|
||||
detect_gpu() {
|
||||
GPU_VENDOR="unknown"
|
||||
GPU_MODEL=""
|
||||
GPU_PASSTHROUGH="unknown"
|
||||
|
||||
local gpu_line
|
||||
gpu_line=$(lspci 2>/dev/null | grep -iE "VGA|3D|Display" | head -1)
|
||||
|
||||
if [[ -n "$gpu_line" ]]; then
|
||||
# Extract model: everything after the colon, clean up
|
||||
GPU_MODEL=$(echo "$gpu_line" | sed 's/.*: //' | sed 's/ (rev .*)$//' | cut -c1-64)
|
||||
|
||||
# Detect vendor and passthrough type
|
||||
if echo "$gpu_line" | grep -qi "Intel"; then
|
||||
GPU_VENDOR="intel"
|
||||
GPU_PASSTHROUGH="igpu"
|
||||
elif echo "$gpu_line" | grep -qi "AMD\|ATI"; then
|
||||
GPU_VENDOR="amd"
|
||||
if echo "$gpu_line" | grep -qi "Radeon RX\|Radeon Pro"; then
|
||||
GPU_PASSTHROUGH="dgpu"
|
||||
else
|
||||
GPU_PASSTHROUGH="igpu"
|
||||
fi
|
||||
elif echo "$gpu_line" | grep -qi "NVIDIA"; then
|
||||
GPU_VENDOR="nvidia"
|
||||
GPU_PASSTHROUGH="dgpu"
|
||||
fi
|
||||
fi
|
||||
|
||||
export GPU_VENDOR GPU_MODEL GPU_PASSTHROUGH
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# detect_cpu()
|
||||
#
|
||||
# - Detects CPU vendor and model
|
||||
# - Sets CPU_VENDOR (intel/amd/arm/unknown) and CPU_MODEL globals
|
||||
# - Used for CPU analytics
|
||||
# ------------------------------------------------------------------------------
|
||||
detect_cpu() {
|
||||
CPU_VENDOR="unknown"
|
||||
CPU_MODEL=""
|
||||
|
||||
if [[ -f /proc/cpuinfo ]]; then
|
||||
local vendor_id
|
||||
vendor_id=$(grep -m1 "vendor_id" /proc/cpuinfo 2>/dev/null | cut -d: -f2 | tr -d ' ')
|
||||
|
||||
case "$vendor_id" in
|
||||
GenuineIntel) CPU_VENDOR="intel" ;;
|
||||
AuthenticAMD) CPU_VENDOR="amd" ;;
|
||||
*)
|
||||
# ARM doesn't have vendor_id, check for CPU implementer
|
||||
if grep -qi "CPU implementer" /proc/cpuinfo 2>/dev/null; then
|
||||
CPU_VENDOR="arm"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
# Extract model name and clean it up
|
||||
CPU_MODEL=$(grep -m1 "model name" /proc/cpuinfo 2>/dev/null | cut -d: -f2 | sed 's/^ *//' | sed 's/(R)//g' | sed 's/(TM)//g' | sed 's/ */ /g' | cut -c1-64)
|
||||
fi
|
||||
|
||||
export CPU_VENDOR CPU_MODEL
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# detect_ram()
|
||||
#
|
||||
# - Detects RAM speed using dmidecode
|
||||
# - Sets RAM_SPEED global (e.g., "4800" for DDR5-4800)
|
||||
# - Requires root access for dmidecode
|
||||
# - Returns empty if not available
|
||||
# ------------------------------------------------------------------------------
|
||||
detect_ram() {
|
||||
RAM_SPEED=""
|
||||
|
||||
if command -v dmidecode &>/dev/null; then
|
||||
# Get configured memory speed (actual running speed)
|
||||
RAM_SPEED=$(dmidecode -t memory 2>/dev/null | grep -m1 "Configured Memory Speed:" | grep -oE "[0-9]+" | head -1)
|
||||
|
||||
# Fallback to Speed: if Configured not available
|
||||
if [[ -z "$RAM_SPEED" ]]; then
|
||||
RAM_SPEED=$(dmidecode -t memory 2>/dev/null | grep -m1 "Speed:" | grep -oE "[0-9]+" | head -1)
|
||||
fi
|
||||
fi
|
||||
|
||||
export RAM_SPEED
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# post_to_api()
|
||||
#
|
||||
# - Sends LXC container creation statistics to Community-Scripts API
|
||||
# - Sends LXC container creation statistics to telemetry ingest service
|
||||
# - Only executes if:
|
||||
# * curl is available
|
||||
# * DIAGNOSTICS=yes
|
||||
@ -168,182 +279,565 @@ explain_exit_code() {
|
||||
# - Payload includes:
|
||||
# * Container type, disk size, CPU cores, RAM
|
||||
# * OS type and version
|
||||
# * IPv6 disable status
|
||||
# * Application name (NSAPP)
|
||||
# * Installation method
|
||||
# * PVE version
|
||||
# * Status: "installing"
|
||||
# * Random UUID for session tracking
|
||||
# - Anonymous telemetry (no personal data)
|
||||
# - Never blocks or fails script execution
|
||||
# ------------------------------------------------------------------------------
|
||||
post_to_api() {
|
||||
# Silent fail - telemetry should never break scripts
|
||||
command -v curl &>/dev/null || {
|
||||
[[ "${DEV_MODE:-}" == "true" ]] && echo "[DEBUG] curl not found, skipping" >&2
|
||||
return 0
|
||||
}
|
||||
[[ "${DIAGNOSTICS:-no}" == "no" ]] && {
|
||||
[[ "${DEV_MODE:-}" == "true" ]] && echo "[DEBUG] DIAGNOSTICS=no, skipping" >&2
|
||||
return 0
|
||||
}
|
||||
[[ -z "${RANDOM_UUID:-}" ]] && {
|
||||
[[ "${DEV_MODE:-}" == "true" ]] && echo "[DEBUG] RANDOM_UUID empty, skipping" >&2
|
||||
return 0
|
||||
}
|
||||
|
||||
if ! command -v curl &>/dev/null; then
|
||||
return
|
||||
[[ "${DEV_MODE:-}" == "true" ]] && echo "[DEBUG] post_to_api() DIAGNOSTICS=$DIAGNOSTICS RANDOM_UUID=$RANDOM_UUID NSAPP=$NSAPP" >&2
|
||||
|
||||
# Set type for later status updates
|
||||
TELEMETRY_TYPE="lxc"
|
||||
|
||||
local pve_version=""
|
||||
if command -v pveversion &>/dev/null; then
|
||||
pve_version=$(pveversion 2>/dev/null | awk -F'[/ ]' '{print $2}') || true
|
||||
fi
|
||||
|
||||
if [ "$DIAGNOSTICS" = "no" ]; then
|
||||
return
|
||||
# Detect GPU if not already set
|
||||
if [[ -z "${GPU_VENDOR:-}" ]]; then
|
||||
detect_gpu
|
||||
fi
|
||||
local gpu_vendor="${GPU_VENDOR:-unknown}"
|
||||
local gpu_model="${GPU_MODEL:-}"
|
||||
local gpu_passthrough="${GPU_PASSTHROUGH:-unknown}"
|
||||
|
||||
if [ -z "$RANDOM_UUID" ]; then
|
||||
return
|
||||
# Detect CPU if not already set
|
||||
if [[ -z "${CPU_VENDOR:-}" ]]; then
|
||||
detect_cpu
|
||||
fi
|
||||
local cpu_vendor="${CPU_VENDOR:-unknown}"
|
||||
local cpu_model="${CPU_MODEL:-}"
|
||||
|
||||
local API_URL="http://api.community-scripts.org/dev/upload"
|
||||
local pve_version="not found"
|
||||
pve_version=$(pveversion | awk -F'[/ ]' '{print $2}')
|
||||
# Detect RAM if not already set
|
||||
if [[ -z "${RAM_SPEED:-}" ]]; then
|
||||
detect_ram
|
||||
fi
|
||||
local ram_speed="${RAM_SPEED:-}"
|
||||
|
||||
local JSON_PAYLOAD
|
||||
JSON_PAYLOAD=$(
|
||||
cat <<EOF
|
||||
{
|
||||
"ct_type": $CT_TYPE,
|
||||
"type":"lxc",
|
||||
"disk_size": $DISK_SIZE,
|
||||
"core_count": $CORE_COUNT,
|
||||
"ram_size": $RAM_SIZE,
|
||||
"os_type": "$var_os",
|
||||
"os_version": "$var_version",
|
||||
"nsapp": "$NSAPP",
|
||||
"method": "$METHOD",
|
||||
"pve_version": "$pve_version",
|
||||
"random_id": "${RANDOM_UUID}",
|
||||
"type": "lxc",
|
||||
"nsapp": "${NSAPP:-unknown}",
|
||||
"status": "installing",
|
||||
"random_id": "$RANDOM_UUID"
|
||||
"ct_type": ${CT_TYPE:-1},
|
||||
"disk_size": ${DISK_SIZE:-0},
|
||||
"core_count": ${CORE_COUNT:-0},
|
||||
"ram_size": ${RAM_SIZE:-0},
|
||||
"os_type": "${var_os:-}",
|
||||
"os_version": "${var_version:-}",
|
||||
"pve_version": "${pve_version}",
|
||||
"method": "${METHOD:-default}",
|
||||
"cpu_vendor": "${cpu_vendor}",
|
||||
"cpu_model": "${cpu_model}",
|
||||
"gpu_vendor": "${gpu_vendor}",
|
||||
"gpu_model": "${gpu_model}",
|
||||
"gpu_passthrough": "${gpu_passthrough}",
|
||||
"ram_speed": "${ram_speed}",
|
||||
"repo_source": "${REPO_SOURCE}"
|
||||
}
|
||||
EOF
|
||||
)
|
||||
if [[ "$DIAGNOSTICS" == "yes" ]]; then
|
||||
RESPONSE=$(curl -s -w "%{http_code}" -L -X POST "$API_URL" --post301 --post302 \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$JSON_PAYLOAD") || true
|
||||
fi
|
||||
|
||||
[[ "${DEV_MODE:-}" == "true" ]] && echo "[DEBUG] Sending to: $TELEMETRY_URL" >&2
|
||||
[[ "${DEV_MODE:-}" == "true" ]] && echo "[DEBUG] Payload: $JSON_PAYLOAD" >&2
|
||||
|
||||
# Fire-and-forget: never block, never fail
|
||||
local http_code
|
||||
if [[ "${DEV_MODE:-}" == "true" ]]; then
|
||||
http_code=$(curl -sS -w "%{http_code}" -m "${TELEMETRY_TIMEOUT}" -X POST "${TELEMETRY_URL}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$JSON_PAYLOAD" -o /dev/stderr 2>&1) || true
|
||||
echo "[DEBUG] HTTP response code: $http_code" >&2
|
||||
else
|
||||
curl -fsS -m "${TELEMETRY_TIMEOUT}" -X POST "${TELEMETRY_URL}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$JSON_PAYLOAD" &>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# post_to_api_vm()
|
||||
#
|
||||
# - Sends VM creation statistics to Community-Scripts API
|
||||
# - Similar to post_to_api() but for virtual machines (not containers)
|
||||
# - Sends VM creation statistics to telemetry ingest service
|
||||
# - Reads DIAGNOSTICS from /usr/local/community-scripts/diagnostics file
|
||||
# - Payload differences:
|
||||
# - Payload differences from LXC:
|
||||
# * ct_type=2 (VM instead of LXC)
|
||||
# * type="vm"
|
||||
# * Disk size without 'G' suffix (parsed from DISK_SIZE variable)
|
||||
# * Disk size without 'G' suffix
|
||||
# - Only executes if DIAGNOSTICS=yes and RANDOM_UUID is set
|
||||
# - Never blocks or fails script execution
|
||||
# ------------------------------------------------------------------------------
|
||||
post_to_api_vm() {
|
||||
|
||||
if [[ ! -f /usr/local/community-scripts/diagnostics ]]; then
|
||||
return
|
||||
fi
|
||||
DIAGNOSTICS=$(grep -i "^DIAGNOSTICS=" /usr/local/community-scripts/diagnostics | awk -F'=' '{print $2}')
|
||||
if ! command -v curl &>/dev/null; then
|
||||
return
|
||||
# Read diagnostics setting from file
|
||||
if [[ -f /usr/local/community-scripts/diagnostics ]]; then
|
||||
DIAGNOSTICS=$(grep -i "^DIAGNOSTICS=" /usr/local/community-scripts/diagnostics 2>/dev/null | awk -F'=' '{print $2}') || true
|
||||
fi
|
||||
|
||||
if [ "$DIAGNOSTICS" = "no" ]; then
|
||||
return
|
||||
# Silent fail - telemetry should never break scripts
|
||||
command -v curl &>/dev/null || return 0
|
||||
[[ "${DIAGNOSTICS:-no}" == "no" ]] && return 0
|
||||
[[ -z "${RANDOM_UUID:-}" ]] && return 0
|
||||
|
||||
# Set type for later status updates
|
||||
TELEMETRY_TYPE="vm"
|
||||
|
||||
local pve_version=""
|
||||
if command -v pveversion &>/dev/null; then
|
||||
pve_version=$(pveversion 2>/dev/null | awk -F'[/ ]' '{print $2}') || true
|
||||
fi
|
||||
|
||||
if [ -z "$RANDOM_UUID" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
local API_URL="http://api.community-scripts.org/dev/upload"
|
||||
local pve_version="not found"
|
||||
pve_version=$(pveversion | awk -F'[/ ]' '{print $2}')
|
||||
|
||||
DISK_SIZE_API=${DISK_SIZE%G}
|
||||
# Remove 'G' suffix from disk size
|
||||
local DISK_SIZE_API="${DISK_SIZE%G}"
|
||||
|
||||
local JSON_PAYLOAD
|
||||
JSON_PAYLOAD=$(
|
||||
cat <<EOF
|
||||
{
|
||||
"ct_type": 2,
|
||||
"type":"vm",
|
||||
"disk_size": $DISK_SIZE_API,
|
||||
"core_count": $CORE_COUNT,
|
||||
"ram_size": $RAM_SIZE,
|
||||
"os_type": "$var_os",
|
||||
"os_version": "$var_version",
|
||||
"nsapp": "$NSAPP",
|
||||
"method": "$METHOD",
|
||||
"pve_version": "$pve_version",
|
||||
"random_id": "${RANDOM_UUID}",
|
||||
"type": "vm",
|
||||
"nsapp": "${NSAPP:-unknown}",
|
||||
"status": "installing",
|
||||
"random_id": "$RANDOM_UUID"
|
||||
"ct_type": 2,
|
||||
"disk_size": ${DISK_SIZE_API:-0},
|
||||
"core_count": ${CORE_COUNT:-0},
|
||||
"ram_size": ${RAM_SIZE:-0},
|
||||
"os_type": "${var_os:-}",
|
||||
"os_version": "${var_version:-}",
|
||||
"pve_version": "${pve_version}",
|
||||
"method": "${METHOD:-default}",
|
||||
"repo_source": "${REPO_SOURCE}"
|
||||
}
|
||||
EOF
|
||||
)
|
||||
if [[ "$DIAGNOSTICS" == "yes" ]]; then
|
||||
RESPONSE=$(curl -s -w "%{http_code}" -L -X POST "$API_URL" --post301 --post302 \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$JSON_PAYLOAD") || true
|
||||
fi
|
||||
|
||||
# Fire-and-forget: never block, never fail
|
||||
curl -fsS -m "${TELEMETRY_TIMEOUT}" -X POST "${TELEMETRY_URL}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$JSON_PAYLOAD" &>/dev/null || true
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# post_update_to_api()
|
||||
#
|
||||
# - Reports installation completion status to API
|
||||
# - Reports installation completion status to telemetry ingest service
|
||||
# - Prevents duplicate submissions via POST_UPDATE_DONE flag
|
||||
# - Arguments:
|
||||
# * $1: status ("success" or "failed")
|
||||
# * $2: exit_code (default: 1 for failed, 0 for success)
|
||||
# * $1: status ("done" or "failed")
|
||||
# * $2: exit_code (numeric, default: 1 for failed, 0 for done)
|
||||
# - Payload includes:
|
||||
# * Final status (success/failed)
|
||||
# * Error description via get_error_description()
|
||||
# * Random UUID for session correlation
|
||||
# * Final status (mapped: "done"→"success", "failed"→"failed")
|
||||
# * Error description via explain_exit_code()
|
||||
# * Numeric exit code
|
||||
# - Only executes once per session
|
||||
# - Silently returns if:
|
||||
# * curl not available
|
||||
# * Already reported (POST_UPDATE_DONE=true)
|
||||
# * DIAGNOSTICS=no
|
||||
# - Never blocks or fails script execution
|
||||
# ------------------------------------------------------------------------------
|
||||
post_update_to_api() {
|
||||
# Silent fail - telemetry should never break scripts
|
||||
command -v curl &>/dev/null || return 0
|
||||
|
||||
if ! command -v curl &>/dev/null; then
|
||||
return
|
||||
fi
|
||||
|
||||
# Initialize flag if not set (prevents 'unbound variable' error with set -u)
|
||||
# Prevent duplicate submissions
|
||||
POST_UPDATE_DONE=${POST_UPDATE_DONE:-false}
|
||||
[[ "$POST_UPDATE_DONE" == "true" ]] && return 0
|
||||
|
||||
[[ "${DIAGNOSTICS:-no}" == "no" ]] && return 0
|
||||
[[ -z "${RANDOM_UUID:-}" ]] && return 0
|
||||
|
||||
if [ "$POST_UPDATE_DONE" = true ]; then
|
||||
return 0
|
||||
fi
|
||||
exit_code=${2:-1}
|
||||
local API_URL="http://api.community-scripts.org/dev/upload/updatestatus"
|
||||
local status="${1:-failed}"
|
||||
if [[ "$status" == "failed" ]]; then
|
||||
local exit_code="${2:-1}"
|
||||
elif [[ "$status" == "success" ]]; then
|
||||
local exit_code="${2:-0}"
|
||||
local raw_exit_code="${2:-1}"
|
||||
local exit_code=0 error="" pb_status error_category=""
|
||||
|
||||
# Get GPU info (if detected)
|
||||
local gpu_vendor="${GPU_VENDOR:-unknown}"
|
||||
local gpu_model="${GPU_MODEL:-}"
|
||||
local gpu_passthrough="${GPU_PASSTHROUGH:-unknown}"
|
||||
|
||||
# Get CPU info (if detected)
|
||||
local cpu_vendor="${CPU_VENDOR:-unknown}"
|
||||
local cpu_model="${CPU_MODEL:-}"
|
||||
|
||||
# Get RAM info (if detected)
|
||||
local ram_speed="${RAM_SPEED:-}"
|
||||
|
||||
# Map status to telemetry values: installing, success, failed, unknown
|
||||
case "$status" in
|
||||
done | success)
|
||||
pb_status="success"
|
||||
exit_code=0
|
||||
error=""
|
||||
error_category=""
|
||||
;;
|
||||
failed)
|
||||
pb_status="failed"
|
||||
;;
|
||||
*)
|
||||
pb_status="unknown"
|
||||
;;
|
||||
esac
|
||||
|
||||
# For failed/unknown status, resolve exit code and error description
|
||||
if [[ "$pb_status" == "failed" ]] || [[ "$pb_status" == "unknown" ]]; then
|
||||
if [[ "$raw_exit_code" =~ ^[0-9]+$ ]]; then
|
||||
exit_code="$raw_exit_code"
|
||||
else
|
||||
exit_code=1
|
||||
fi
|
||||
error=$(explain_exit_code "$exit_code")
|
||||
error_category=$(categorize_error "$exit_code")
|
||||
[[ -z "$error" ]] && error="Unknown error"
|
||||
fi
|
||||
|
||||
if [[ -z "$exit_code" ]]; then
|
||||
exit_code=1
|
||||
# Calculate duration if timer was started
|
||||
local duration=0
|
||||
if [[ -n "${INSTALL_START_TIME:-}" ]]; then
|
||||
duration=$(($(date +%s) - INSTALL_START_TIME))
|
||||
fi
|
||||
|
||||
error=$(explain_exit_code "$exit_code")
|
||||
|
||||
if [ -z "$error" ]; then
|
||||
error="Unknown error"
|
||||
# Get PVE version
|
||||
local pve_version=""
|
||||
if command -v pveversion &>/dev/null; then
|
||||
pve_version=$(pveversion 2>/dev/null | awk -F'[/ ]' '{print $2}') || true
|
||||
fi
|
||||
|
||||
# Full payload including all fields - allows record creation if initial call failed
|
||||
# The Go service will find the record by random_id and PATCH, or create if not found
|
||||
local JSON_PAYLOAD
|
||||
JSON_PAYLOAD=$(
|
||||
cat <<EOF
|
||||
{
|
||||
"status": "$status",
|
||||
"error": "$error",
|
||||
"random_id": "$RANDOM_UUID"
|
||||
"random_id": "${RANDOM_UUID}",
|
||||
"type": "${TELEMETRY_TYPE:-lxc}",
|
||||
"nsapp": "${NSAPP:-unknown}",
|
||||
"status": "${pb_status}",
|
||||
"ct_type": ${CT_TYPE:-1},
|
||||
"disk_size": ${DISK_SIZE:-0},
|
||||
"core_count": ${CORE_COUNT:-0},
|
||||
"ram_size": ${RAM_SIZE:-0},
|
||||
"os_type": "${var_os:-}",
|
||||
"os_version": "${var_version:-}",
|
||||
"pve_version": "${pve_version}",
|
||||
"method": "${METHOD:-default}",
|
||||
"exit_code": ${exit_code},
|
||||
"error": "${error}",
|
||||
"error_category": "${error_category}",
|
||||
"install_duration": ${duration},
|
||||
"cpu_vendor": "${cpu_vendor}",
|
||||
"cpu_model": "${cpu_model}",
|
||||
"gpu_vendor": "${gpu_vendor}",
|
||||
"gpu_model": "${gpu_model}",
|
||||
"gpu_passthrough": "${gpu_passthrough}",
|
||||
"ram_speed": "${ram_speed}",
|
||||
"repo_source": "${REPO_SOURCE}"
|
||||
}
|
||||
EOF
|
||||
)
|
||||
if [[ "$DIAGNOSTICS" == "yes" ]]; then
|
||||
RESPONSE=$(curl -s -w "%{http_code}" -L -X POST "$API_URL" --post301 --post302 \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$JSON_PAYLOAD") || true
|
||||
fi
|
||||
|
||||
# Fire-and-forget: never block, never fail
|
||||
curl -sS -w "%{http_code}" -m "${TELEMETRY_TIMEOUT}" -X POST "${TELEMETRY_URL}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$JSON_PAYLOAD" -o /dev/null 2>&1 || true
|
||||
|
||||
POST_UPDATE_DONE=true
|
||||
}
|
||||
|
||||
# ==============================================================================
|
||||
# SECTION 3: EXTENDED TELEMETRY FUNCTIONS
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# categorize_error()
|
||||
#
|
||||
# - Maps exit codes to error categories for better analytics
|
||||
# - Categories: network, storage, dependency, permission, timeout, config, resource, unknown
|
||||
# - Used to group errors in dashboard
|
||||
# ------------------------------------------------------------------------------
|
||||
categorize_error() {
|
||||
local code="$1"
|
||||
case "$code" in
|
||||
# Network errors
|
||||
6 | 7 | 22 | 28 | 35) echo "network" ;;
|
||||
|
||||
# Storage errors
|
||||
214 | 217 | 219) echo "storage" ;;
|
||||
|
||||
# Dependency/Package errors
|
||||
100 | 101 | 102 | 127 | 160 | 161 | 162) echo "dependency" ;;
|
||||
|
||||
# Permission errors
|
||||
126 | 152) echo "permission" ;;
|
||||
|
||||
# Timeout errors
|
||||
124 | 28 | 211) echo "timeout" ;;
|
||||
|
||||
# Configuration errors
|
||||
203 | 204 | 205 | 206 | 207 | 208) echo "config" ;;
|
||||
|
||||
# Resource errors (OOM, etc)
|
||||
137 | 134) echo "resource" ;;
|
||||
|
||||
# Default
|
||||
*) echo "unknown" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# start_install_timer()
|
||||
#
|
||||
# - Captures start time for installation duration tracking
|
||||
# - Call at the beginning of installation
|
||||
# - Sets INSTALL_START_TIME global variable
|
||||
# ------------------------------------------------------------------------------
|
||||
start_install_timer() {
|
||||
INSTALL_START_TIME=$(date +%s)
|
||||
export INSTALL_START_TIME
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# get_install_duration()
|
||||
#
|
||||
# - Returns elapsed seconds since start_install_timer() was called
|
||||
# - Returns 0 if timer was not started
|
||||
# ------------------------------------------------------------------------------
|
||||
get_install_duration() {
|
||||
if [[ -z "${INSTALL_START_TIME:-}" ]]; then
|
||||
echo "0"
|
||||
return
|
||||
fi
|
||||
local now=$(date +%s)
|
||||
echo $((now - INSTALL_START_TIME))
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# post_tool_to_api()
|
||||
#
|
||||
# - Reports tool usage to telemetry
|
||||
# - Arguments:
|
||||
# * $1: tool_name (e.g., "microcode", "lxc-update", "post-pve-install")
|
||||
# * $2: status ("success" or "failed")
|
||||
# * $3: exit_code (optional, default: 0 for success, 1 for failed)
|
||||
# - For PVE host tools, not container installations
|
||||
# ------------------------------------------------------------------------------
|
||||
post_tool_to_api() {
|
||||
command -v curl &>/dev/null || return 0
|
||||
[[ "${DIAGNOSTICS:-no}" == "no" ]] && return 0
|
||||
|
||||
local tool_name="${1:-unknown}"
|
||||
local status="${2:-success}"
|
||||
local exit_code="${3:-0}"
|
||||
local error="" error_category=""
|
||||
local uuid duration
|
||||
|
||||
# Generate UUID for this tool execution
|
||||
uuid=$(cat /proc/sys/kernel/random/uuid 2>/dev/null || uuidgen 2>/dev/null || echo "tool-$(date +%s)")
|
||||
duration=$(get_install_duration)
|
||||
|
||||
# Map status
|
||||
[[ "$status" == "done" ]] && status="success"
|
||||
|
||||
if [[ "$status" == "failed" ]]; then
|
||||
[[ ! "$exit_code" =~ ^[0-9]+$ ]] && exit_code=1
|
||||
error=$(explain_exit_code "$exit_code")
|
||||
error_category=$(categorize_error "$exit_code")
|
||||
fi
|
||||
|
||||
local pve_version=""
|
||||
if command -v pveversion &>/dev/null; then
|
||||
pve_version=$(pveversion 2>/dev/null | awk -F'[/ ]' '{print $2}') || true
|
||||
fi
|
||||
|
||||
local JSON_PAYLOAD
|
||||
JSON_PAYLOAD=$(
|
||||
cat <<EOF
|
||||
{
|
||||
"random_id": "${uuid}",
|
||||
"type": "tool",
|
||||
"nsapp": "${tool_name}",
|
||||
"status": "${status}",
|
||||
"exit_code": ${exit_code},
|
||||
"error": "${error}",
|
||||
"error_category": "${error_category}",
|
||||
"install_duration": ${duration:-0},
|
||||
"pve_version": "${pve_version}",
|
||||
"repo_source": "${REPO_SOURCE}"
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
curl -fsS -m "${TELEMETRY_TIMEOUT}" -X POST "${TELEMETRY_URL}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$JSON_PAYLOAD" &>/dev/null || true
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# post_addon_to_api()
|
||||
#
|
||||
# - Reports addon installation to telemetry
|
||||
# - Arguments:
|
||||
# * $1: addon_name (e.g., "filebrowser", "netdata")
|
||||
# * $2: status ("success" or "failed")
|
||||
# * $3: exit_code (optional)
|
||||
# - For addons installed inside containers
|
||||
# ------------------------------------------------------------------------------
|
||||
post_addon_to_api() {
|
||||
command -v curl &>/dev/null || return 0
|
||||
[[ "${DIAGNOSTICS:-no}" == "no" ]] && return 0
|
||||
|
||||
local addon_name="${1:-unknown}"
|
||||
local status="${2:-success}"
|
||||
local exit_code="${3:-0}"
|
||||
local error="" error_category=""
|
||||
local uuid duration
|
||||
|
||||
# Generate UUID for this addon installation
|
||||
uuid=$(cat /proc/sys/kernel/random/uuid 2>/dev/null || uuidgen 2>/dev/null || echo "addon-$(date +%s)")
|
||||
duration=$(get_install_duration)
|
||||
|
||||
# Map status
|
||||
[[ "$status" == "done" ]] && status="success"
|
||||
|
||||
if [[ "$status" == "failed" ]]; then
|
||||
[[ ! "$exit_code" =~ ^[0-9]+$ ]] && exit_code=1
|
||||
error=$(explain_exit_code "$exit_code")
|
||||
error_category=$(categorize_error "$exit_code")
|
||||
fi
|
||||
|
||||
# Detect OS info
|
||||
local os_type="" os_version=""
|
||||
if [[ -f /etc/os-release ]]; then
|
||||
os_type=$(grep "^ID=" /etc/os-release | cut -d= -f2 | tr -d '"')
|
||||
os_version=$(grep "^VERSION_ID=" /etc/os-release | cut -d= -f2 | tr -d '"')
|
||||
fi
|
||||
|
||||
local JSON_PAYLOAD
|
||||
JSON_PAYLOAD=$(
|
||||
cat <<EOF
|
||||
{
|
||||
"random_id": "${uuid}",
|
||||
"type": "addon",
|
||||
"nsapp": "${addon_name}",
|
||||
"status": "${status}",
|
||||
"exit_code": ${exit_code},
|
||||
"error": "${error}",
|
||||
"error_category": "${error_category}",
|
||||
"install_duration": ${duration:-0},
|
||||
"os_type": "${os_type}",
|
||||
"os_version": "${os_version}",
|
||||
"repo_source": "${REPO_SOURCE}"
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
curl -fsS -m "${TELEMETRY_TIMEOUT}" -X POST "${TELEMETRY_URL}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$JSON_PAYLOAD" &>/dev/null || true
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# post_update_to_api_extended()
|
||||
#
|
||||
# - Extended version of post_update_to_api with duration, GPU, and error category
|
||||
# - Same arguments as post_update_to_api:
|
||||
# * $1: status ("done" or "failed")
|
||||
# * $2: exit_code (numeric)
|
||||
# - Automatically includes:
|
||||
# * Install duration (if start_install_timer was called)
|
||||
# * Error category (for failed status)
|
||||
# * GPU info (if detect_gpu was called)
|
||||
# ------------------------------------------------------------------------------
|
||||
post_update_to_api_extended() {
|
||||
# Silent fail - telemetry should never break scripts
|
||||
command -v curl &>/dev/null || return 0
|
||||
|
||||
# Prevent duplicate submissions
|
||||
POST_UPDATE_DONE=${POST_UPDATE_DONE:-false}
|
||||
[[ "$POST_UPDATE_DONE" == "true" ]] && return 0
|
||||
|
||||
[[ "${DIAGNOSTICS:-no}" == "no" ]] && return 0
|
||||
[[ -z "${RANDOM_UUID:-}" ]] && return 0
|
||||
|
||||
local status="${1:-failed}"
|
||||
local raw_exit_code="${2:-1}"
|
||||
local exit_code=0 error="" pb_status error_category=""
|
||||
local duration gpu_vendor gpu_passthrough
|
||||
|
||||
# Get duration
|
||||
duration=$(get_install_duration)
|
||||
|
||||
# Get GPU info (if detected)
|
||||
gpu_vendor="${GPU_VENDOR:-}"
|
||||
gpu_passthrough="${GPU_PASSTHROUGH:-}"
|
||||
|
||||
# Map status to telemetry values
|
||||
case "$status" in
|
||||
done | success)
|
||||
pb_status="success"
|
||||
exit_code=0
|
||||
error=""
|
||||
error_category=""
|
||||
;;
|
||||
failed)
|
||||
pb_status="failed"
|
||||
;;
|
||||
*)
|
||||
pb_status="unknown"
|
||||
;;
|
||||
esac
|
||||
|
||||
# For failed/unknown status, resolve exit code and error description
|
||||
if [[ "$pb_status" == "failed" ]] || [[ "$pb_status" == "unknown" ]]; then
|
||||
if [[ "$raw_exit_code" =~ ^[0-9]+$ ]]; then
|
||||
exit_code="$raw_exit_code"
|
||||
else
|
||||
exit_code=1
|
||||
fi
|
||||
error=$(explain_exit_code "$exit_code")
|
||||
error_category=$(categorize_error "$exit_code")
|
||||
[[ -z "$error" ]] && error="Unknown error"
|
||||
fi
|
||||
|
||||
local JSON_PAYLOAD
|
||||
JSON_PAYLOAD=$(
|
||||
cat <<EOF
|
||||
{
|
||||
"random_id": "${RANDOM_UUID}",
|
||||
"type": "${TELEMETRY_TYPE:-lxc}",
|
||||
"nsapp": "${NSAPP:-unknown}",
|
||||
"status": "${pb_status}",
|
||||
"exit_code": ${exit_code},
|
||||
"error": "${error}",
|
||||
"error_category": "${error_category}",
|
||||
"install_duration": ${duration:-0},
|
||||
"gpu_vendor": "${gpu_vendor}",
|
||||
"gpu_passthrough": "${gpu_passthrough}",
|
||||
"repo_source": "${REPO_SOURCE}"
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
curl -fsS -m "${TELEMETRY_TIMEOUT}" -X POST "${TELEMETRY_URL}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$JSON_PAYLOAD" &>/dev/null || true
|
||||
|
||||
POST_UPDATE_DONE=true
|
||||
}
|
||||
|
||||
190
misc/build.func
190
misc/build.func
@ -38,15 +38,16 @@
|
||||
# - Captures app-declared resource defaults (CPU, RAM, Disk)
|
||||
# ------------------------------------------------------------------------------
|
||||
variables() {
|
||||
NSAPP=$(echo "${APP,,}" | tr -d ' ') # This function sets the NSAPP variable by converting the value of the APP variable to lowercase and removing any spaces.
|
||||
var_install="${NSAPP}-install" # sets the var_install variable by appending "-install" to the value of NSAPP.
|
||||
INTEGER='^[0-9]+([.][0-9]+)?$' # it defines the INTEGER regular expression pattern.
|
||||
PVEHOST_NAME=$(hostname) # gets the Proxmox Hostname and sets it to Uppercase
|
||||
DIAGNOSTICS="yes" # sets the DIAGNOSTICS variable to "yes", used for the API call.
|
||||
METHOD="default" # sets the METHOD variable to "default", used for the API call.
|
||||
RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" # generates a random UUID and sets it to the RANDOM_UUID variable.
|
||||
SESSION_ID="${RANDOM_UUID:0:8}" # Short session ID (first 8 chars of UUID) for log files
|
||||
BUILD_LOG="/tmp/create-lxc-${SESSION_ID}.log" # Host-side container creation log
|
||||
NSAPP=$(echo "${APP,,}" | tr -d ' ') # This function sets the NSAPP variable by converting the value of the APP variable to lowercase and removing any spaces.
|
||||
var_install="${NSAPP}-install" # sets the var_install variable by appending "-install" to the value of NSAPP.
|
||||
INTEGER='^[0-9]+([.][0-9]+)?$' # it defines the INTEGER regular expression pattern.
|
||||
PVEHOST_NAME=$(hostname) # gets the Proxmox Hostname and sets it to Uppercase
|
||||
DIAGNOSTICS="yes" # sets the DIAGNOSTICS variable to "yes", used for the API call.
|
||||
METHOD="default" # sets the METHOD variable to "default", used for the API call.
|
||||
RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" # generates a random UUID and sets it to the RANDOM_UUID variable.
|
||||
SESSION_ID="${RANDOM_UUID:0:8}" # Short session ID (first 8 chars of UUID) for log files
|
||||
BUILD_LOG="/tmp/create-lxc-${SESSION_ID}.log" # Host-side container creation log
|
||||
combined_log="/tmp/install-${SESSION_ID}-combined.log" # Combined log (build + install) for failed installations
|
||||
CTTYPE="${CTTYPE:-${CT_TYPE:-1}}"
|
||||
|
||||
# Parse dev_mode early
|
||||
@ -56,6 +57,7 @@ variables() {
|
||||
if [[ "${DEV_MODE_LOGS:-false}" == "true" ]]; then
|
||||
mkdir -p /var/log/community-scripts
|
||||
BUILD_LOG="/var/log/community-scripts/create-lxc-${SESSION_ID}-$(date +%Y%m%d_%H%M%S).log"
|
||||
combined_log="/var/log/community-scripts/install-${SESSION_ID}-combined-$(date +%Y%m%d_%H%M%S).log"
|
||||
fi
|
||||
|
||||
# Get Proxmox VE version and kernel version
|
||||
@ -2728,6 +2730,26 @@ Advanced:
|
||||
[[ "$APT_CACHER" == "yes" ]] && echo -e "${INFO}${BOLD}${DGN}APT Cacher: ${BGN}$APT_CACHER_IP${CL}"
|
||||
echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}$VERBOSE${CL}"
|
||||
echo -e "${CREATING}${BOLD}${RD}Creating a ${APP} LXC using the above advanced settings${CL}"
|
||||
|
||||
# Log settings to file
|
||||
log_section "CONTAINER SETTINGS (ADVANCED) - ${APP}"
|
||||
log_msg "Application: ${APP}"
|
||||
log_msg "PVE Version: ${PVEVERSION} (Kernel: ${KERNEL_VERSION})"
|
||||
log_msg "Operating System: $var_os ($var_version)"
|
||||
log_msg "Container Type: $([ "$CT_TYPE" == "1" ] && echo "Unprivileged" || echo "Privileged")"
|
||||
log_msg "Container ID: $CT_ID"
|
||||
log_msg "Hostname: $HN"
|
||||
log_msg "Disk Size: ${DISK_SIZE} GB"
|
||||
log_msg "CPU Cores: $CORE_COUNT"
|
||||
log_msg "RAM Size: ${RAM_SIZE} MiB"
|
||||
log_msg "Bridge: $BRG"
|
||||
log_msg "IPv4: $NET"
|
||||
log_msg "IPv6: $IPV6_METHOD"
|
||||
log_msg "FUSE Support: ${ENABLE_FUSE:-no}"
|
||||
log_msg "Nesting: $([ "${ENABLE_NESTING:-1}" == "1" ] && echo "Enabled" || echo "Disabled")"
|
||||
log_msg "GPU Passthrough: ${ENABLE_GPU:-no}"
|
||||
log_msg "Verbose Mode: $VERBOSE"
|
||||
log_msg "Session ID: ${SESSION_ID}"
|
||||
}
|
||||
|
||||
# ==============================================================================
|
||||
@ -2807,6 +2829,65 @@ EOF
|
||||
fi
|
||||
}
|
||||
|
||||
dev_mode_menu() {
|
||||
local motd=OFF keep=OFF trace=OFF pause=OFF breakpoint=OFF logs=OFF dryrun=OFF verbose=OFF
|
||||
|
||||
IFS=',' read -r -a _modes <<<"$dev_mode"
|
||||
for m in "${_modes[@]}"; do
|
||||
case "$m" in
|
||||
motd) motd=ON ;;
|
||||
keep) keep=ON ;;
|
||||
trace) trace=ON ;;
|
||||
pause) pause=ON ;;
|
||||
breakpoint) breakpoint=ON ;;
|
||||
logs) logs=ON ;;
|
||||
dryrun) dryrun=ON ;;
|
||||
esac
|
||||
done
|
||||
|
||||
[[ "$var_verbose" == "yes" ]] && verbose=ON
|
||||
|
||||
local selection
|
||||
selection=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
|
||||
--title "DEV MODE" \
|
||||
--checklist "Choose one or more Options" 16 51 10 \
|
||||
"motd" "Early SSH/MOTD Setup" "$motd" \
|
||||
"keep" "Preserve Container on Failure" "$keep" \
|
||||
"trace" "Bash Command Tracing" "$trace" \
|
||||
"pause" "Step-by-Step Execution" "$pause" \
|
||||
"breakpoint" "Interactive Shell on Error" "$breakpoint" \
|
||||
"logs" "Persistent Logging" "$logs" \
|
||||
"dryrun" "Simulation Mode" "$dryrun" \
|
||||
"verbose" "Verbose logging" "$verbose" \
|
||||
3>&1 1>&2 2>&3) || exit_script
|
||||
|
||||
dev_mode=""
|
||||
var_verbose="no"
|
||||
local modes_out=()
|
||||
|
||||
for tag in $selection; do
|
||||
tag="${tag%\"}"
|
||||
tag="${tag#\"}"
|
||||
if [[ "$tag" == "verbose" ]]; then
|
||||
var_verbose="yes"
|
||||
else
|
||||
modes_out+=("$tag")
|
||||
fi
|
||||
done
|
||||
|
||||
dev_mode=$(
|
||||
IFS=,
|
||||
echo "${modes_out[*]}"
|
||||
)
|
||||
unset DEV_MODE_MOTD DEV_MODE_KEEP DEV_MODE_TRACE DEV_MODE_PAUSE DEV_MODE_BREAKPOINT DEV_MODE_LOGS DEV_MODE_DRYRUN
|
||||
parse_dev_mode
|
||||
if [[ "${DEV_MODE_LOGS:-false}" == "true" ]]; then
|
||||
mkdir -p /var/log/community-scripts
|
||||
BUILD_LOG="/var/log/community-scripts/create-lxc-${SESSION_ID}-$(date +%Y%m%d_%H%M%S).log"
|
||||
combined_log="/var/log/community-scripts/install-${SESSION_ID}-combined-$(date +%Y%m%d_%H%M%S).log"
|
||||
fi
|
||||
}
|
||||
|
||||
diagnostics_menu() {
|
||||
if [ "${DIAGNOSTICS:-no}" = "yes" ]; then
|
||||
if whiptail --backtitle "Proxmox VE Helper Scripts" \
|
||||
@ -2856,6 +2937,20 @@ echo_default() {
|
||||
fi
|
||||
echo -e "${CREATING}${BOLD}${BL}Creating a ${APP} LXC using the above default settings${CL}"
|
||||
echo -e " "
|
||||
|
||||
# Log settings to file
|
||||
log_section "CONTAINER SETTINGS - ${APP}"
|
||||
log_msg "Application: ${APP}"
|
||||
log_msg "PVE Version: ${PVEVERSION} (Kernel: ${KERNEL_VERSION})"
|
||||
log_msg "Container ID: ${CT_ID}"
|
||||
log_msg "Operating System: $var_os ($var_version)"
|
||||
log_msg "Container Type: $CT_TYPE_DESC"
|
||||
log_msg "Disk Size: ${DISK_SIZE} GB"
|
||||
log_msg "CPU Cores: ${CORE_COUNT}"
|
||||
log_msg "RAM Size: ${RAM_SIZE} MiB"
|
||||
[[ -n "${var_gpu:-}" && "${var_gpu}" == "yes" ]] && log_msg "GPU Passthrough: Enabled"
|
||||
[[ "$VERBOSE" == "yes" ]] && log_msg "Verbose Mode: Enabled"
|
||||
log_msg "Session ID: ${SESSION_ID}"
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -3024,12 +3119,13 @@ settings_menu() {
|
||||
local settings_items=(
|
||||
"1" "Manage API-Diagnostic Setting"
|
||||
"2" "Edit Default.vars"
|
||||
"3" "Configure dev mode"
|
||||
)
|
||||
if [ -f "$(get_app_defaults_path)" ]; then
|
||||
settings_items+=("3" "Edit App.vars for ${APP}")
|
||||
settings_items+=("4" "Back to Main Menu")
|
||||
settings_items+=("4" "Edit App.vars for ${APP}")
|
||||
settings_items+=("5" "Back to Main Menu")
|
||||
else
|
||||
settings_items+=("3" "Back to Main Menu")
|
||||
settings_items+=("4" "Back to Main Menu")
|
||||
fi
|
||||
|
||||
local choice
|
||||
@ -3043,7 +3139,8 @@ settings_menu() {
|
||||
case "$choice" in
|
||||
1) diagnostics_menu ;;
|
||||
2) nano /usr/local/community-scripts/default.vars ;;
|
||||
3)
|
||||
3) dev_mode_menu ;;
|
||||
4)
|
||||
if [ -f "$(get_app_defaults_path)" ]; then
|
||||
nano "$(get_app_defaults_path)"
|
||||
else
|
||||
@ -3051,7 +3148,7 @@ settings_menu() {
|
||||
return
|
||||
fi
|
||||
;;
|
||||
4)
|
||||
5)
|
||||
# Back to main menu
|
||||
return
|
||||
;;
|
||||
@ -3456,6 +3553,7 @@ build_container() {
|
||||
export PCT_DISK_SIZE="$DISK_SIZE"
|
||||
export IPV6_METHOD="$IPV6_METHOD"
|
||||
export ENABLE_GPU="$ENABLE_GPU"
|
||||
export APPLICATION_VERSION="${var_appversion:-}"
|
||||
|
||||
# DEV_MODE exports (optional, for debugging)
|
||||
export BUILD_LOG="$BUILD_LOG"
|
||||
@ -3546,6 +3644,9 @@ $PCT_OPTIONS_STRING"
|
||||
exit 214
|
||||
fi
|
||||
msg_ok "Storage space validated"
|
||||
|
||||
# Report installation start to API (early - captures failed installs too)
|
||||
post_to_api
|
||||
fi
|
||||
|
||||
create_lxc_container || exit $?
|
||||
@ -3922,6 +4023,9 @@ EOF'
|
||||
install_ssh_keys_into_ct
|
||||
|
||||
# Run application installer
|
||||
# Start timer for duration tracking
|
||||
start_install_timer
|
||||
|
||||
# Disable error trap - container errors are handled internally via flag file
|
||||
set +Eeuo pipefail # Disable ALL error handling temporarily
|
||||
trap - ERR # Remove ERR trap completely
|
||||
@ -3951,25 +4055,54 @@ EOF'
|
||||
if [[ $install_exit_code -ne 0 ]]; then
|
||||
msg_error "Installation failed in container ${CTID} (exit code: ${install_exit_code})"
|
||||
|
||||
# Report failure to telemetry API
|
||||
post_update_to_api "failed" "$install_exit_code"
|
||||
|
||||
# Copy both logs from container before potential deletion
|
||||
local build_log_copied=false
|
||||
local install_log_copied=false
|
||||
|
||||
if [[ -n "$CTID" && -n "${SESSION_ID:-}" ]]; then
|
||||
# Copy BUILD_LOG (creation log) if it exists
|
||||
# Create combined log with header
|
||||
{
|
||||
echo "================================================================================"
|
||||
echo "COMBINED INSTALLATION LOG - ${APP:-LXC}"
|
||||
echo "Container ID: ${CTID}"
|
||||
echo "Session ID: ${SESSION_ID}"
|
||||
echo "Timestamp: $(date '+%Y-%m-%d %H:%M:%S')"
|
||||
echo "================================================================================"
|
||||
echo ""
|
||||
} >"$combined_log"
|
||||
|
||||
# Append BUILD_LOG (host-side creation log) if it exists
|
||||
if [[ -f "${BUILD_LOG}" ]]; then
|
||||
cp "${BUILD_LOG}" "/tmp/create-lxc-${CTID}-${SESSION_ID}.log" 2>/dev/null && build_log_copied=true
|
||||
{
|
||||
echo "================================================================================"
|
||||
echo "PHASE 1: CONTAINER CREATION (Host)"
|
||||
echo "================================================================================"
|
||||
cat "${BUILD_LOG}"
|
||||
echo ""
|
||||
} >>"$combined_log"
|
||||
build_log_copied=true
|
||||
fi
|
||||
|
||||
# Copy INSTALL_LOG from container
|
||||
if pct pull "$CTID" "/root/.install-${SESSION_ID}.log" "/tmp/install-lxc-${CTID}-${SESSION_ID}.log" 2>/dev/null; then
|
||||
# Copy and append INSTALL_LOG from container
|
||||
local temp_install_log="/tmp/.install-temp-${SESSION_ID}.log"
|
||||
if pct pull "$CTID" "/root/.install-${SESSION_ID}.log" "$temp_install_log" 2>/dev/null; then
|
||||
{
|
||||
echo "================================================================================"
|
||||
echo "PHASE 2: APPLICATION INSTALLATION (Container)"
|
||||
echo "================================================================================"
|
||||
cat "$temp_install_log"
|
||||
echo ""
|
||||
} >>"$combined_log"
|
||||
rm -f "$temp_install_log"
|
||||
install_log_copied=true
|
||||
fi
|
||||
|
||||
# Show available logs
|
||||
# Show combined log
|
||||
echo ""
|
||||
[[ "$build_log_copied" == true ]] && echo -e "${GN}✔${CL} Container creation log: ${BL}/tmp/create-lxc-${CTID}-${SESSION_ID}.log${CL}"
|
||||
[[ "$install_log_copied" == true ]] && echo -e "${GN}✔${CL} Installation log: ${BL}/tmp/install-lxc-${CTID}-${SESSION_ID}.log${CL}"
|
||||
echo -e "${GN}✔${CL} Installation log: ${BL}${combined_log}${CL}"
|
||||
fi
|
||||
|
||||
# Dev mode: Keep container or open breakpoint shell
|
||||
@ -5028,9 +5161,6 @@ create_lxc_container() {
|
||||
}
|
||||
|
||||
msg_ok "LXC Container ${BL}$CTID${CL} ${GN}was successfully created."
|
||||
|
||||
# Report container creation to API
|
||||
post_to_api
|
||||
}
|
||||
|
||||
# ==============================================================================
|
||||
@ -5100,9 +5230,9 @@ EOF
|
||||
# api_exit_script()
|
||||
#
|
||||
# - Exit trap handler for reporting to API telemetry
|
||||
# - Captures exit code and reports to API using centralized error descriptions
|
||||
# - Uses explain_exit_code() from error_handler.func for consistent error messages
|
||||
# - Posts failure status with exit code to API (error description added automatically)
|
||||
# - Captures exit code and reports to PocketBase using centralized error descriptions
|
||||
# - Uses explain_exit_code() from api.func for consistent error messages
|
||||
# - Posts failure status with exit code to API (error description resolved automatically)
|
||||
# - Only executes on non-zero exit codes
|
||||
# ------------------------------------------------------------------------------
|
||||
api_exit_script() {
|
||||
@ -5115,6 +5245,6 @@ api_exit_script() {
|
||||
if command -v pveversion >/dev/null 2>&1; then
|
||||
trap 'api_exit_script' EXIT
|
||||
fi
|
||||
trap 'post_update_to_api "failed" "$BASH_COMMAND"' ERR
|
||||
trap 'post_update_to_api "failed" "INTERRUPTED"' SIGINT
|
||||
trap 'post_update_to_api "failed" "TERMINATED"' SIGTERM
|
||||
trap 'post_update_to_api "failed" "$?"' ERR
|
||||
trap 'post_update_to_api "failed" "130"' SIGINT
|
||||
trap 'post_update_to_api "failed" "143"' SIGTERM
|
||||
|
||||
152
misc/core.func
152
misc/core.func
@ -413,6 +413,69 @@ get_active_logfile() {
|
||||
# Legacy compatibility: SILENT_LOGFILE points to active log
|
||||
SILENT_LOGFILE="$(get_active_logfile)"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# strip_ansi()
|
||||
#
|
||||
# - Removes ANSI escape sequences from input text
|
||||
# - Used to clean colored output for log files
|
||||
# - Handles both piped input and arguments
|
||||
# ------------------------------------------------------------------------------
|
||||
strip_ansi() {
|
||||
if [[ $# -gt 0 ]]; then
|
||||
echo -e "$*" | sed 's/\x1b\[[0-9;]*m//g; s/\x1b\[[0-9;]*[a-zA-Z]//g'
|
||||
else
|
||||
sed 's/\x1b\[[0-9;]*m//g; s/\x1b\[[0-9;]*[a-zA-Z]//g'
|
||||
fi
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# log_msg()
|
||||
#
|
||||
# - Writes message to active log file without ANSI codes
|
||||
# - Adds timestamp prefix for log correlation
|
||||
# - Creates log file if it doesn't exist
|
||||
# - Arguments: message text (can include ANSI codes, will be stripped)
|
||||
# ------------------------------------------------------------------------------
|
||||
log_msg() {
|
||||
local msg="$*"
|
||||
local logfile
|
||||
logfile="$(get_active_logfile)"
|
||||
|
||||
[[ -z "$msg" ]] && return
|
||||
[[ -z "$logfile" ]] && return
|
||||
|
||||
# Ensure log directory exists
|
||||
mkdir -p "$(dirname "$logfile")" 2>/dev/null || true
|
||||
|
||||
# Strip ANSI codes and write with timestamp
|
||||
local clean_msg
|
||||
clean_msg=$(strip_ansi "$msg")
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $clean_msg" >>"$logfile"
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# log_section()
|
||||
#
|
||||
# - Writes a section header to the log file
|
||||
# - Used for separating different phases of installation
|
||||
# - Arguments: section name
|
||||
# ------------------------------------------------------------------------------
|
||||
log_section() {
|
||||
local section="$1"
|
||||
local logfile
|
||||
logfile="$(get_active_logfile)"
|
||||
|
||||
[[ -z "$logfile" ]] && return
|
||||
mkdir -p "$(dirname "$logfile")" 2>/dev/null || true
|
||||
|
||||
{
|
||||
echo ""
|
||||
echo "================================================================================"
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $section"
|
||||
echo "================================================================================"
|
||||
} >>"$logfile"
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# silent()
|
||||
#
|
||||
@ -555,6 +618,9 @@ msg_info() {
|
||||
[[ -n "${MSG_INFO_SHOWN["$msg"]+x}" ]] && return
|
||||
MSG_INFO_SHOWN["$msg"]=1
|
||||
|
||||
# Log to file
|
||||
log_msg "[INFO] $msg"
|
||||
|
||||
stop_spinner
|
||||
SPINNER_MSG="$msg"
|
||||
|
||||
@ -598,6 +664,7 @@ msg_ok() {
|
||||
stop_spinner
|
||||
clear_line
|
||||
echo -e "$CM ${GN}${msg}${CL}"
|
||||
log_msg "[OK] $msg"
|
||||
local sanitized_msg
|
||||
sanitized_msg=$(printf '%s' "$msg" | sed 's/\x1b\[[0-9;]*m//g; s/[^a-zA-Z0-9_]/_/g')
|
||||
unset 'MSG_INFO_SHOWN['"$sanitized_msg"']' 2>/dev/null || true
|
||||
@ -615,6 +682,7 @@ msg_error() {
|
||||
stop_spinner
|
||||
local msg="$1"
|
||||
echo -e "${BFR:-}${CROSS:-✖️} ${RD}${msg}${CL}" >&2
|
||||
log_msg "[ERROR] $msg"
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -629,6 +697,7 @@ msg_warn() {
|
||||
stop_spinner
|
||||
local msg="$1"
|
||||
echo -e "${BFR:-}${INFO:-ℹ️} ${YWB}${msg}${CL}" >&2
|
||||
log_msg "[WARN] $msg"
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -646,6 +715,7 @@ msg_custom() {
|
||||
[[ -z "$msg" ]] && return
|
||||
stop_spinner
|
||||
echo -e "${BFR:-} ${symbol} ${color}${msg}${CL:-\e[0m}"
|
||||
log_msg "$msg"
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -833,29 +903,29 @@ is_verbose_mode() {
|
||||
is_unattended() {
|
||||
# Primary: Check MODE variable (case-insensitive)
|
||||
local mode="${MODE:-${mode:-}}"
|
||||
mode="${mode,,}" # lowercase
|
||||
mode="${mode,,}" # lowercase
|
||||
|
||||
case "$mode" in
|
||||
default|1)
|
||||
default | 1)
|
||||
return 0
|
||||
;;
|
||||
mydefaults | userdefaults | 3)
|
||||
return 0
|
||||
;;
|
||||
appdefaults | 4)
|
||||
return 0
|
||||
;;
|
||||
advanced | 2)
|
||||
# Advanced mode is interactive ONLY during wizard
|
||||
# Inside container (install scripts), it should be unattended
|
||||
# Check if we're inside a container (no pveversion command)
|
||||
if ! command -v pveversion &>/dev/null; then
|
||||
# We're inside the container - all values already collected
|
||||
return 0
|
||||
;;
|
||||
mydefaults|userdefaults|3)
|
||||
return 0
|
||||
;;
|
||||
appdefaults|4)
|
||||
return 0
|
||||
;;
|
||||
advanced|2)
|
||||
# Advanced mode is interactive ONLY during wizard
|
||||
# Inside container (install scripts), it should be unattended
|
||||
# Check if we're inside a container (no pveversion command)
|
||||
if ! command -v pveversion &>/dev/null; then
|
||||
# We're inside the container - all values already collected
|
||||
return 0
|
||||
fi
|
||||
# On host during wizard - interactive
|
||||
return 1
|
||||
;;
|
||||
fi
|
||||
# On host during wizard - interactive
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Legacy fallbacks for compatibility
|
||||
@ -977,29 +1047,29 @@ prompt_confirm() {
|
||||
# User provided input
|
||||
response="${response,,}" # lowercase
|
||||
case "$response" in
|
||||
y|yes)
|
||||
y | yes)
|
||||
return 0
|
||||
;;
|
||||
n | no)
|
||||
return 1
|
||||
;;
|
||||
"")
|
||||
# Empty response, use default
|
||||
if [[ "$default" == "y" ]]; then
|
||||
return 0
|
||||
;;
|
||||
n|no)
|
||||
else
|
||||
return 1
|
||||
;;
|
||||
"")
|
||||
# Empty response, use default
|
||||
if [[ "$default" == "y" ]]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
# Invalid input, use default
|
||||
echo -e "${YW}Invalid response, using default: ${default}${CL}"
|
||||
if [[ "$default" == "y" ]]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
# Invalid input, use default
|
||||
echo -e "${YW}Invalid response, using default: ${default}${CL}"
|
||||
if [[ "$default" == "y" ]]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
else
|
||||
# Timeout occurred
|
||||
|
||||
52
misc/data/Dockerfile
Normal file
52
misc/data/Dockerfile
Normal file
@ -0,0 +1,52 @@
|
||||
FROM golang:1.25-alpine AS build
|
||||
WORKDIR /src
|
||||
COPY go.mod go.sum* ./
|
||||
RUN go mod download 2>/dev/null || true
|
||||
COPY . .
|
||||
RUN go build -trimpath -ldflags "-s -w" -o /out/telemetry-service .
|
||||
RUN go build -trimpath -ldflags "-s -w" -o /out/migrate ./migration/migrate.go
|
||||
|
||||
FROM alpine:3.23
|
||||
RUN apk add --no-cache ca-certificates tzdata
|
||||
WORKDIR /app
|
||||
COPY --from=build /out/telemetry-service /app/telemetry-service
|
||||
COPY --from=build /out/migrate /app/migrate
|
||||
COPY entrypoint.sh /app/entrypoint.sh
|
||||
RUN chmod +x /app/entrypoint.sh /app/migrate
|
||||
|
||||
# Service config
|
||||
ENV LISTEN_ADDR=":8080"
|
||||
ENV MAX_BODY_BYTES="1024"
|
||||
ENV RATE_LIMIT_RPM="60"
|
||||
ENV RATE_BURST="20"
|
||||
ENV UPSTREAM_TIMEOUT_MS="4000"
|
||||
ENV ENABLE_REQUEST_LOGGING="false"
|
||||
|
||||
# Cache config (optional)
|
||||
ENV ENABLE_CACHE="true"
|
||||
ENV CACHE_TTL_SECONDS="60"
|
||||
ENV ENABLE_REDIS="false"
|
||||
# ENV REDIS_URL="redis://localhost:6379"
|
||||
|
||||
# Alert config (optional)
|
||||
ENV ALERT_ENABLED="false"
|
||||
# ENV SMTP_HOST=""
|
||||
# ENV SMTP_PORT="587"
|
||||
# ENV SMTP_USER=""
|
||||
# ENV SMTP_PASSWORD=""
|
||||
# ENV SMTP_FROM="telemetry@proxmoxved.local"
|
||||
# ENV SMTP_TO=""
|
||||
# ENV SMTP_USE_TLS="false"
|
||||
ENV ALERT_FAILURE_THRESHOLD="20.0"
|
||||
ENV ALERT_CHECK_INTERVAL_MIN="15"
|
||||
ENV ALERT_COOLDOWN_MIN="60"
|
||||
|
||||
# Migration config (optional)
|
||||
ENV RUN_MIGRATION="false"
|
||||
ENV MIGRATION_REQUIRED="false"
|
||||
ENV MIGRATION_SOURCE_URL="https://api.htl-braunau.at/dev/data"
|
||||
|
||||
EXPOSE 8080
|
||||
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s \
|
||||
CMD wget -q --spider http://localhost:8080/healthz || exit 1
|
||||
ENTRYPOINT ["/app/entrypoint.sh"]
|
||||
853
misc/data/alerts.go
Normal file
853
misc/data/alerts.go
Normal file
@ -0,0 +1,853 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/smtp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// AlertConfig holds SMTP alert configuration
|
||||
type AlertConfig struct {
|
||||
Enabled bool
|
||||
SMTPHost string
|
||||
SMTPPort int
|
||||
SMTPUser string
|
||||
SMTPPassword string
|
||||
SMTPFrom string
|
||||
SMTPTo []string
|
||||
UseTLS bool
|
||||
FailureThreshold float64 // Alert when failure rate exceeds this (e.g., 20.0 = 20%)
|
||||
CheckInterval time.Duration // How often to check
|
||||
Cooldown time.Duration // Minimum time between alerts
|
||||
|
||||
// Weekly Report settings
|
||||
WeeklyReportEnabled bool // Enable weekly summary reports
|
||||
WeeklyReportDay time.Weekday // Day to send report (0=Sunday, 1=Monday, etc.)
|
||||
WeeklyReportHour int // Hour to send report (0-23)
|
||||
}
|
||||
|
||||
// WeeklyReportData contains aggregated weekly statistics
|
||||
type WeeklyReportData struct {
|
||||
CalendarWeek int
|
||||
Year int
|
||||
StartDate time.Time
|
||||
EndDate time.Time
|
||||
TotalInstalls int
|
||||
SuccessCount int
|
||||
FailedCount int
|
||||
SuccessRate float64
|
||||
TopApps []AppStat
|
||||
TopFailedApps []AppStat
|
||||
ComparedToPrev WeekComparison
|
||||
OsDistribution map[string]int
|
||||
TypeDistribution map[string]int
|
||||
}
|
||||
|
||||
// AppStat represents statistics for a single app
|
||||
type AppStat struct {
|
||||
Name string
|
||||
Total int
|
||||
Failed int
|
||||
FailureRate float64
|
||||
}
|
||||
|
||||
// WeekComparison shows changes compared to previous week
|
||||
type WeekComparison struct {
|
||||
InstallsChange int // Difference in total installs
|
||||
InstallsPercent float64 // Percentage change
|
||||
FailRateChange float64 // Change in failure rate (percentage points)
|
||||
}
|
||||
|
||||
// Alerter handles alerting functionality
|
||||
type Alerter struct {
|
||||
cfg AlertConfig
|
||||
lastAlertAt time.Time
|
||||
lastWeeklyReport time.Time
|
||||
mu sync.Mutex
|
||||
pb *PBClient
|
||||
lastStats alertStats
|
||||
alertHistory []AlertEvent
|
||||
}
|
||||
|
||||
type alertStats struct {
|
||||
successCount int
|
||||
failedCount int
|
||||
checkedAt time.Time
|
||||
}
|
||||
|
||||
// AlertEvent records an alert that was sent
|
||||
type AlertEvent struct {
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Type string `json:"type"`
|
||||
Message string `json:"message"`
|
||||
FailureRate float64 `json:"failure_rate,omitempty"`
|
||||
}
|
||||
|
||||
// NewAlerter creates a new alerter instance
|
||||
func NewAlerter(cfg AlertConfig, pb *PBClient) *Alerter {
|
||||
return &Alerter{
|
||||
cfg: cfg,
|
||||
pb: pb,
|
||||
alertHistory: make([]AlertEvent, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Start begins the alert monitoring loop
|
||||
func (a *Alerter) Start() {
|
||||
if !a.cfg.Enabled {
|
||||
log.Println("INFO: alerting disabled")
|
||||
return
|
||||
}
|
||||
|
||||
if a.cfg.SMTPHost == "" || len(a.cfg.SMTPTo) == 0 {
|
||||
log.Println("WARN: alerting enabled but SMTP not configured")
|
||||
return
|
||||
}
|
||||
|
||||
go a.monitorLoop()
|
||||
log.Printf("INFO: alert monitoring started (threshold: %.1f%%, interval: %v)", a.cfg.FailureThreshold, a.cfg.CheckInterval)
|
||||
|
||||
// Start weekly report scheduler if enabled
|
||||
if a.cfg.WeeklyReportEnabled {
|
||||
go a.weeklyReportLoop()
|
||||
log.Printf("INFO: weekly report scheduler started (day: %s, hour: %02d:00)", a.cfg.WeeklyReportDay, a.cfg.WeeklyReportHour)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Alerter) monitorLoop() {
|
||||
ticker := time.NewTicker(a.cfg.CheckInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
a.checkAndAlert()
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Alerter) checkAndAlert() {
|
||||
ctx, cancel := newTimeoutContext(10 * time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Fetch last hour's data
|
||||
data, err := a.pb.FetchDashboardData(ctx, 1)
|
||||
if err != nil {
|
||||
log.Printf("WARN: alert check failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Calculate current failure rate
|
||||
total := data.SuccessCount + data.FailedCount
|
||||
if total < 10 {
|
||||
// Not enough data to determine rate
|
||||
return
|
||||
}
|
||||
|
||||
failureRate := float64(data.FailedCount) / float64(total) * 100
|
||||
|
||||
// Check if we should alert
|
||||
if failureRate >= a.cfg.FailureThreshold {
|
||||
a.maybeSendAlert(failureRate, data.FailedCount, total)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Alerter) maybeSendAlert(rate float64, failed, total int) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
// Check cooldown
|
||||
if time.Since(a.lastAlertAt) < a.cfg.Cooldown {
|
||||
return
|
||||
}
|
||||
|
||||
// Send alert
|
||||
subject := fmt.Sprintf("[ProxmoxVED Alert] High Failure Rate: %.1f%%", rate)
|
||||
body := fmt.Sprintf(`ProxmoxVE Helper Scripts - Telemetry Alert
|
||||
|
||||
⚠️ High installation failure rate detected!
|
||||
|
||||
Current Statistics (last 24h):
|
||||
- Failure Rate: %.1f%%
|
||||
- Failed Installations: %d
|
||||
- Total Installations: %d
|
||||
- Threshold: %.1f%%
|
||||
|
||||
Time: %s
|
||||
|
||||
Please check the dashboard for more details.
|
||||
|
||||
---
|
||||
This is an automated alert from the telemetry service.
|
||||
`, rate, failed, total, a.cfg.FailureThreshold, time.Now().Format(time.RFC1123))
|
||||
|
||||
if err := a.sendEmail(subject, body); err != nil {
|
||||
log.Printf("ERROR: failed to send alert email: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
a.lastAlertAt = time.Now()
|
||||
a.alertHistory = append(a.alertHistory, AlertEvent{
|
||||
Timestamp: time.Now(),
|
||||
Type: "high_failure_rate",
|
||||
Message: fmt.Sprintf("Failure rate %.1f%% exceeded threshold %.1f%%", rate, a.cfg.FailureThreshold),
|
||||
FailureRate: rate,
|
||||
})
|
||||
|
||||
// Keep only last 100 alerts
|
||||
if len(a.alertHistory) > 100 {
|
||||
a.alertHistory = a.alertHistory[len(a.alertHistory)-100:]
|
||||
}
|
||||
|
||||
log.Printf("ALERT: sent high failure rate alert (%.1f%%)", rate)
|
||||
}
|
||||
|
||||
func (a *Alerter) sendEmail(subject, body string) error {
|
||||
return a.sendEmailWithType(subject, body, "text/plain")
|
||||
}
|
||||
|
||||
func (a *Alerter) sendHTMLEmail(subject, body string) error {
|
||||
return a.sendEmailWithType(subject, body, "text/html")
|
||||
}
|
||||
|
||||
func (a *Alerter) sendEmailWithType(subject, body, contentType string) error {
|
||||
// Build message
|
||||
var msg bytes.Buffer
|
||||
msg.WriteString(fmt.Sprintf("From: %s\r\n", a.cfg.SMTPFrom))
|
||||
msg.WriteString(fmt.Sprintf("To: %s\r\n", strings.Join(a.cfg.SMTPTo, ", ")))
|
||||
msg.WriteString(fmt.Sprintf("Subject: %s\r\n", subject))
|
||||
msg.WriteString("MIME-Version: 1.0\r\n")
|
||||
msg.WriteString(fmt.Sprintf("Content-Type: %s; charset=UTF-8\r\n", contentType))
|
||||
msg.WriteString("\r\n")
|
||||
msg.WriteString(body)
|
||||
|
||||
addr := fmt.Sprintf("%s:%d", a.cfg.SMTPHost, a.cfg.SMTPPort)
|
||||
|
||||
var auth smtp.Auth
|
||||
if a.cfg.SMTPUser != "" && a.cfg.SMTPPassword != "" {
|
||||
auth = smtp.PlainAuth("", a.cfg.SMTPUser, a.cfg.SMTPPassword, a.cfg.SMTPHost)
|
||||
}
|
||||
|
||||
if a.cfg.UseTLS {
|
||||
// TLS connection
|
||||
tlsConfig := &tls.Config{
|
||||
ServerName: a.cfg.SMTPHost,
|
||||
}
|
||||
|
||||
conn, err := tls.Dial("tcp", addr, tlsConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("TLS dial failed: %w", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
client, err := smtp.NewClient(conn, a.cfg.SMTPHost)
|
||||
if err != nil {
|
||||
return fmt.Errorf("SMTP client failed: %w", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
if auth != nil {
|
||||
if err := client.Auth(auth); err != nil {
|
||||
return fmt.Errorf("SMTP auth failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := client.Mail(a.cfg.SMTPFrom); err != nil {
|
||||
return fmt.Errorf("SMTP MAIL failed: %w", err)
|
||||
}
|
||||
|
||||
for _, to := range a.cfg.SMTPTo {
|
||||
if err := client.Rcpt(to); err != nil {
|
||||
return fmt.Errorf("SMTP RCPT failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
w, err := client.Data()
|
||||
if err != nil {
|
||||
return fmt.Errorf("SMTP DATA failed: %w", err)
|
||||
}
|
||||
|
||||
_, err = w.Write(msg.Bytes())
|
||||
if err != nil {
|
||||
return fmt.Errorf("SMTP write failed: %w", err)
|
||||
}
|
||||
|
||||
return w.Close()
|
||||
}
|
||||
|
||||
// Non-TLS (STARTTLS)
|
||||
return smtp.SendMail(addr, auth, a.cfg.SMTPFrom, a.cfg.SMTPTo, msg.Bytes())
|
||||
}
|
||||
|
||||
// GetAlertHistory returns recent alert events
|
||||
func (a *Alerter) GetAlertHistory() []AlertEvent {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
result := make([]AlertEvent, len(a.alertHistory))
|
||||
copy(result, a.alertHistory)
|
||||
return result
|
||||
}
|
||||
|
||||
// TestAlert sends a test alert email
|
||||
func (a *Alerter) TestAlert() error {
|
||||
if !a.cfg.Enabled || a.cfg.SMTPHost == "" {
|
||||
return fmt.Errorf("alerting not configured")
|
||||
}
|
||||
|
||||
subject := "[ProxmoxVED] Test Alert"
|
||||
body := fmt.Sprintf(`This is a test alert from ProxmoxVE Helper Scripts telemetry service.
|
||||
|
||||
If you received this email, your alert configuration is working correctly.
|
||||
|
||||
Time: %s
|
||||
SMTP Host: %s
|
||||
Recipients: %s
|
||||
|
||||
---
|
||||
This is an automated test message.
|
||||
`, time.Now().Format(time.RFC1123), a.cfg.SMTPHost, strings.Join(a.cfg.SMTPTo, ", "))
|
||||
|
||||
return a.sendEmail(subject, body)
|
||||
}
|
||||
|
||||
// Helper for timeout context
|
||||
func newTimeoutContext(d time.Duration) (context.Context, context.CancelFunc) {
|
||||
return context.WithTimeout(context.Background(), d)
|
||||
}
|
||||
|
||||
// weeklyReportLoop checks periodically if it's time to send the weekly report
|
||||
func (a *Alerter) weeklyReportLoop() {
|
||||
// Check every hour
|
||||
ticker := time.NewTicker(1 * time.Hour)
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
a.checkAndSendWeeklyReport()
|
||||
}
|
||||
}
|
||||
|
||||
// checkAndSendWeeklyReport sends the weekly report if it's the right time
|
||||
func (a *Alerter) checkAndSendWeeklyReport() {
|
||||
now := time.Now()
|
||||
|
||||
// Check if it's the right day and hour
|
||||
if now.Weekday() != a.cfg.WeeklyReportDay || now.Hour() != a.cfg.WeeklyReportHour {
|
||||
return
|
||||
}
|
||||
|
||||
a.mu.Lock()
|
||||
// Check if we already sent a report this week
|
||||
_, lastWeek := a.lastWeeklyReport.ISOWeek()
|
||||
_, currentWeek := now.ISOWeek()
|
||||
if a.lastWeeklyReport.Year() == now.Year() && lastWeek == currentWeek {
|
||||
a.mu.Unlock()
|
||||
return
|
||||
}
|
||||
a.mu.Unlock()
|
||||
|
||||
// Send the weekly report
|
||||
if err := a.SendWeeklyReport(); err != nil {
|
||||
log.Printf("ERROR: failed to send weekly report: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// SendWeeklyReport generates and sends the weekly summary email
|
||||
func (a *Alerter) SendWeeklyReport() error {
|
||||
if !a.cfg.Enabled || a.cfg.SMTPHost == "" {
|
||||
return fmt.Errorf("alerting not configured")
|
||||
}
|
||||
|
||||
ctx, cancel := newTimeoutContext(30 * time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Get data for the past week
|
||||
reportData, err := a.fetchWeeklyReportData(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch weekly data: %w", err)
|
||||
}
|
||||
|
||||
// Generate email content
|
||||
subject := fmt.Sprintf("[ProxmoxVED] Weekly Report - Week %d, %d", reportData.CalendarWeek, reportData.Year)
|
||||
body := a.generateWeeklyReportHTML(reportData)
|
||||
|
||||
if err := a.sendHTMLEmail(subject, body); err != nil {
|
||||
return fmt.Errorf("failed to send email: %w", err)
|
||||
}
|
||||
|
||||
a.mu.Lock()
|
||||
a.lastWeeklyReport = time.Now()
|
||||
a.alertHistory = append(a.alertHistory, AlertEvent{
|
||||
Timestamp: time.Now(),
|
||||
Type: "weekly_report",
|
||||
Message: fmt.Sprintf("Weekly report KW %d/%d sent", reportData.CalendarWeek, reportData.Year),
|
||||
})
|
||||
a.mu.Unlock()
|
||||
|
||||
log.Printf("INFO: weekly report KW %d/%d sent successfully", reportData.CalendarWeek, reportData.Year)
|
||||
return nil
|
||||
}
|
||||
|
||||
// fetchWeeklyReportData collects data for the weekly report
|
||||
func (a *Alerter) fetchWeeklyReportData(ctx context.Context) (*WeeklyReportData, error) {
|
||||
// Calculate the previous week's date range (Mon-Sun)
|
||||
now := time.Now()
|
||||
|
||||
// Find last Monday
|
||||
daysToLastMonday := int(now.Weekday() - time.Monday)
|
||||
if daysToLastMonday < 0 {
|
||||
daysToLastMonday += 7
|
||||
}
|
||||
// Go back to the Monday of LAST week
|
||||
lastMonday := now.AddDate(0, 0, -daysToLastMonday-7)
|
||||
lastMonday = time.Date(lastMonday.Year(), lastMonday.Month(), lastMonday.Day(), 0, 0, 0, 0, lastMonday.Location())
|
||||
lastSunday := lastMonday.AddDate(0, 0, 6)
|
||||
lastSunday = time.Date(lastSunday.Year(), lastSunday.Month(), lastSunday.Day(), 23, 59, 59, 0, lastSunday.Location())
|
||||
|
||||
// Get calendar week
|
||||
year, week := lastMonday.ISOWeek()
|
||||
|
||||
// Fetch current week's data (7 days)
|
||||
currentData, err := a.pb.FetchDashboardData(ctx, 7)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch current week data: %w", err)
|
||||
}
|
||||
|
||||
// Fetch previous week's data for comparison (14 days, we'll compare)
|
||||
prevData, err := a.pb.FetchDashboardData(ctx, 14)
|
||||
if err != nil {
|
||||
// Non-fatal, just log
|
||||
log.Printf("WARN: could not fetch previous week data: %v", err)
|
||||
prevData = nil
|
||||
}
|
||||
|
||||
// Build report data
|
||||
report := &WeeklyReportData{
|
||||
CalendarWeek: week,
|
||||
Year: year,
|
||||
StartDate: lastMonday,
|
||||
EndDate: lastSunday,
|
||||
TotalInstalls: currentData.TotalInstalls,
|
||||
SuccessCount: currentData.SuccessCount,
|
||||
FailedCount: currentData.FailedCount,
|
||||
OsDistribution: make(map[string]int),
|
||||
TypeDistribution: make(map[string]int),
|
||||
}
|
||||
|
||||
// Calculate success rate
|
||||
if report.TotalInstalls > 0 {
|
||||
report.SuccessRate = float64(report.SuccessCount) / float64(report.TotalInstalls) * 100
|
||||
}
|
||||
|
||||
// Top 5 installed apps
|
||||
for i, app := range currentData.TopApps {
|
||||
if i >= 5 {
|
||||
break
|
||||
}
|
||||
report.TopApps = append(report.TopApps, AppStat{
|
||||
Name: app.App,
|
||||
Total: app.Count,
|
||||
})
|
||||
}
|
||||
|
||||
// Top 5 failed apps
|
||||
for i, app := range currentData.FailedApps {
|
||||
if i >= 5 {
|
||||
break
|
||||
}
|
||||
report.TopFailedApps = append(report.TopFailedApps, AppStat{
|
||||
Name: app.App,
|
||||
Total: app.TotalCount,
|
||||
Failed: app.FailedCount,
|
||||
FailureRate: app.FailureRate,
|
||||
})
|
||||
}
|
||||
|
||||
// OS distribution
|
||||
for _, os := range currentData.OsDistribution {
|
||||
report.OsDistribution[os.Os] = os.Count
|
||||
}
|
||||
|
||||
// Type distribution (LXC vs VM)
|
||||
for _, t := range currentData.TypeStats {
|
||||
report.TypeDistribution[t.Type] = t.Count
|
||||
}
|
||||
|
||||
// Calculate comparison to previous week
|
||||
if prevData != nil {
|
||||
// Previous week stats (subtract current from 14-day total)
|
||||
prevInstalls := prevData.TotalInstalls - currentData.TotalInstalls
|
||||
prevFailed := prevData.FailedCount - currentData.FailedCount
|
||||
prevSuccess := prevData.SuccessCount - currentData.SuccessCount
|
||||
|
||||
if prevInstalls > 0 {
|
||||
prevFailRate := float64(prevFailed) / float64(prevInstalls) * 100
|
||||
currentFailRate := 100 - report.SuccessRate
|
||||
|
||||
report.ComparedToPrev.InstallsChange = report.TotalInstalls - prevInstalls
|
||||
if prevInstalls > 0 {
|
||||
report.ComparedToPrev.InstallsPercent = float64(report.TotalInstalls-prevInstalls) / float64(prevInstalls) * 100
|
||||
}
|
||||
report.ComparedToPrev.FailRateChange = currentFailRate - prevFailRate
|
||||
_ = prevSuccess // suppress unused warning
|
||||
}
|
||||
}
|
||||
|
||||
return report, nil
|
||||
}
|
||||
|
||||
// generateWeeklyReportHTML creates the HTML email body for the weekly report
|
||||
func (a *Alerter) generateWeeklyReportHTML(data *WeeklyReportData) string {
|
||||
var b strings.Builder
|
||||
|
||||
// HTML Email Template
|
||||
b.WriteString(`<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
</head>
|
||||
<body style="margin:0;padding:0;background-color:#f6f9fc;font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Helvetica,Arial,sans-serif;">
|
||||
<table width="100%" cellpadding="0" cellspacing="0" style="background-color:#f6f9fc;padding:40px 20px;">
|
||||
<tr><td align="center">
|
||||
<table width="600" cellpadding="0" cellspacing="0" style="background-color:#ffffff;border-radius:12px;box-shadow:0 4px 6px rgba(0,0,0,0.07);">
|
||||
|
||||
<!-- Header -->
|
||||
<tr>
|
||||
<td style="background:linear-gradient(135deg,#667eea 0%,#764ba2 100%);padding:32px 40px;border-radius:12px 12px 0 0;">
|
||||
<h1 style="margin:0;color:#ffffff;font-size:24px;font-weight:600;">📊 Weekly Telemetry Report</h1>
|
||||
<p style="margin:8px 0 0;color:rgba(255,255,255,0.85);font-size:14px;">ProxmoxVE Helper Scripts</p>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
<!-- Week Info -->
|
||||
<tr>
|
||||
<td style="padding:24px 40px 0;">
|
||||
<table width="100%" style="background:#f8fafc;border-radius:8px;padding:16px;">
|
||||
<tr>
|
||||
<td style="padding:12px 16px;">
|
||||
<span style="color:#64748b;font-size:12px;text-transform:uppercase;letter-spacing:0.5px;">Calendar Week</span><br>
|
||||
<span style="color:#1e293b;font-size:20px;font-weight:600;">Week `)
|
||||
b.WriteString(fmt.Sprintf("%d, %d", data.CalendarWeek, data.Year))
|
||||
b.WriteString(`</span>
|
||||
</td>
|
||||
<td style="padding:12px 16px;text-align:right;">
|
||||
<span style="color:#64748b;font-size:12px;text-transform:uppercase;letter-spacing:0.5px;">Period</span><br>
|
||||
<span style="color:#1e293b;font-size:14px;">`)
|
||||
b.WriteString(fmt.Sprintf("%s – %s", data.StartDate.Format("Jan 02"), data.EndDate.Format("Jan 02, 2006")))
|
||||
b.WriteString(`</span>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
<!-- Stats Grid -->
|
||||
<tr>
|
||||
<td style="padding:24px 40px;">
|
||||
<table width="100%" cellpadding="0" cellspacing="0">
|
||||
<tr>
|
||||
<td width="25%" style="padding:8px;">
|
||||
<div style="background:#f0fdf4;border-radius:8px;padding:16px;text-align:center;">
|
||||
<div style="color:#16a34a;font-size:28px;font-weight:700;">`)
|
||||
b.WriteString(fmt.Sprintf("%d", data.TotalInstalls))
|
||||
b.WriteString(`</div>
|
||||
<div style="color:#166534;font-size:11px;text-transform:uppercase;letter-spacing:0.5px;margin-top:4px;">Total</div>
|
||||
</div>
|
||||
</td>
|
||||
<td width="25%" style="padding:8px;">
|
||||
<div style="background:#f0fdf4;border-radius:8px;padding:16px;text-align:center;">
|
||||
<div style="color:#16a34a;font-size:28px;font-weight:700;">`)
|
||||
b.WriteString(fmt.Sprintf("%d", data.SuccessCount))
|
||||
b.WriteString(`</div>
|
||||
<div style="color:#166534;font-size:11px;text-transform:uppercase;letter-spacing:0.5px;margin-top:4px;">Successful</div>
|
||||
</div>
|
||||
</td>
|
||||
<td width="25%" style="padding:8px;">
|
||||
<div style="background:#fef2f2;border-radius:8px;padding:16px;text-align:center;">
|
||||
<div style="color:#dc2626;font-size:28px;font-weight:700;">`)
|
||||
b.WriteString(fmt.Sprintf("%d", data.FailedCount))
|
||||
b.WriteString(`</div>
|
||||
<div style="color:#991b1b;font-size:11px;text-transform:uppercase;letter-spacing:0.5px;margin-top:4px;">Failed</div>
|
||||
</div>
|
||||
</td>
|
||||
<td width="25%" style="padding:8px;">
|
||||
<div style="background:#eff6ff;border-radius:8px;padding:16px;text-align:center;">
|
||||
<div style="color:#2563eb;font-size:28px;font-weight:700;">`)
|
||||
b.WriteString(fmt.Sprintf("%.1f%%", data.SuccessRate))
|
||||
b.WriteString(`</div>
|
||||
<div style="color:#1e40af;font-size:11px;text-transform:uppercase;letter-spacing:0.5px;margin-top:4px;">Success Rate</div>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
</td>
|
||||
</tr>
|
||||
`)
|
||||
|
||||
// Week comparison
|
||||
if data.ComparedToPrev.InstallsChange != 0 || data.ComparedToPrev.FailRateChange != 0 {
|
||||
installIcon := "📈"
|
||||
installColor := "#16a34a"
|
||||
if data.ComparedToPrev.InstallsChange < 0 {
|
||||
installIcon = "📉"
|
||||
installColor = "#dc2626"
|
||||
}
|
||||
failIcon := "✅"
|
||||
failColor := "#16a34a"
|
||||
if data.ComparedToPrev.FailRateChange > 0 {
|
||||
failIcon = "⚠️"
|
||||
failColor = "#dc2626"
|
||||
}
|
||||
|
||||
b.WriteString(`<tr>
|
||||
<td style="padding:0 40px 24px;">
|
||||
<table width="100%" style="background:#fafafa;border-radius:8px;">
|
||||
<tr>
|
||||
<td style="padding:16px;border-right:1px solid #e5e7eb;">
|
||||
<span style="font-size:12px;color:#64748b;">vs. Previous Week</span><br>
|
||||
<span style="font-size:16px;color:`)
|
||||
b.WriteString(installColor)
|
||||
b.WriteString(`;">`)
|
||||
b.WriteString(installIcon)
|
||||
b.WriteString(fmt.Sprintf(" %+d installations (%.1f%%)", data.ComparedToPrev.InstallsChange, data.ComparedToPrev.InstallsPercent))
|
||||
b.WriteString(`</span>
|
||||
</td>
|
||||
<td style="padding:16px;">
|
||||
<span style="font-size:12px;color:#64748b;">Failure Rate Change</span><br>
|
||||
<span style="font-size:16px;color:`)
|
||||
b.WriteString(failColor)
|
||||
b.WriteString(`;">`)
|
||||
b.WriteString(failIcon)
|
||||
b.WriteString(fmt.Sprintf(" %+.1f percentage points", data.ComparedToPrev.FailRateChange))
|
||||
b.WriteString(`</span>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
</td>
|
||||
</tr>
|
||||
`)
|
||||
}
|
||||
|
||||
// Top 5 Installed Scripts
|
||||
b.WriteString(`<tr>
|
||||
<td style="padding:0 40px 24px;">
|
||||
<h2 style="margin:0 0 16px;font-size:16px;color:#1e293b;border-bottom:2px solid #e2e8f0;padding-bottom:8px;">🏆 Top 5 Installed Scripts</h2>
|
||||
<table width="100%" cellpadding="0" cellspacing="0" style="font-size:14px;">
|
||||
`)
|
||||
if len(data.TopApps) > 0 {
|
||||
for i, app := range data.TopApps {
|
||||
bgColor := "#ffffff"
|
||||
if i%2 == 0 {
|
||||
bgColor = "#f8fafc"
|
||||
}
|
||||
b.WriteString(fmt.Sprintf(`<tr style="background:%s;">
|
||||
<td style="padding:12px 16px;border-radius:4px 0 0 4px;">
|
||||
<span style="background:#e0e7ff;color:#4338ca;padding:2px 8px;border-radius:4px;font-size:12px;font-weight:600;">%d</span>
|
||||
<span style="margin-left:12px;font-weight:500;color:#1e293b;">%s</span>
|
||||
</td>
|
||||
<td style="padding:12px 16px;text-align:right;border-radius:0 4px 4px 0;color:#64748b;">%d installs</td>
|
||||
</tr>`, bgColor, i+1, app.Name, app.Total))
|
||||
}
|
||||
} else {
|
||||
b.WriteString(`<tr><td style="padding:12px 16px;color:#64748b;">No data available</td></tr>`)
|
||||
}
|
||||
b.WriteString(`</table>
|
||||
</td>
|
||||
</tr>
|
||||
`)
|
||||
|
||||
// Top 5 Failed Scripts
|
||||
b.WriteString(`<tr>
|
||||
<td style="padding:0 40px 24px;">
|
||||
<h2 style="margin:0 0 16px;font-size:16px;color:#1e293b;border-bottom:2px solid #e2e8f0;padding-bottom:8px;">⚠️ Top 5 Scripts with Highest Failure Rates</h2>
|
||||
<table width="100%" cellpadding="0" cellspacing="0" style="font-size:14px;">
|
||||
`)
|
||||
if len(data.TopFailedApps) > 0 {
|
||||
for i, app := range data.TopFailedApps {
|
||||
bgColor := "#ffffff"
|
||||
if i%2 == 0 {
|
||||
bgColor = "#fef2f2"
|
||||
}
|
||||
rateColor := "#dc2626"
|
||||
if app.FailureRate < 20 {
|
||||
rateColor = "#ea580c"
|
||||
}
|
||||
if app.FailureRate < 10 {
|
||||
rateColor = "#ca8a04"
|
||||
}
|
||||
b.WriteString(fmt.Sprintf(`<tr style="background:%s;">
|
||||
<td style="padding:12px 16px;border-radius:4px 0 0 4px;">
|
||||
<span style="font-weight:500;color:#1e293b;">%s</span>
|
||||
</td>
|
||||
<td style="padding:12px 16px;text-align:center;color:#64748b;">%d / %d failed</td>
|
||||
<td style="padding:12px 16px;text-align:right;border-radius:0 4px 4px 0;">
|
||||
<span style="background:%s;color:#ffffff;padding:4px 10px;border-radius:12px;font-size:12px;font-weight:600;">%.1f%%</span>
|
||||
</td>
|
||||
</tr>`, bgColor, app.Name, app.Failed, app.Total, rateColor, app.FailureRate))
|
||||
}
|
||||
} else {
|
||||
b.WriteString(`<tr><td style="padding:12px 16px;color:#16a34a;">🎉 No failures this week!</td></tr>`)
|
||||
}
|
||||
b.WriteString(`</table>
|
||||
</td>
|
||||
</tr>
|
||||
`)
|
||||
|
||||
// Type Distribution
|
||||
if len(data.TypeDistribution) > 0 {
|
||||
b.WriteString(`<tr>
|
||||
<td style="padding:0 40px 24px;">
|
||||
<h2 style="margin:0 0 16px;font-size:16px;color:#1e293b;border-bottom:2px solid #e2e8f0;padding-bottom:8px;">📦 Distribution by Type</h2>
|
||||
<table width="100%" cellpadding="0" cellspacing="0">
|
||||
<tr>
|
||||
`)
|
||||
for t, count := range data.TypeDistribution {
|
||||
percent := float64(count) / float64(data.TotalInstalls) * 100
|
||||
b.WriteString(fmt.Sprintf(`<td style="padding:8px;">
|
||||
<div style="background:#f1f5f9;border-radius:8px;padding:16px;text-align:center;">
|
||||
<div style="font-size:24px;font-weight:700;color:#475569;">%d</div>
|
||||
<div style="font-size:12px;color:#64748b;margin-top:4px;">%s (%.1f%%)</div>
|
||||
</div>
|
||||
</td>`, count, strings.ToUpper(t), percent))
|
||||
}
|
||||
b.WriteString(`</tr>
|
||||
</table>
|
||||
</td>
|
||||
</tr>
|
||||
`)
|
||||
}
|
||||
|
||||
// OS Distribution
|
||||
if len(data.OsDistribution) > 0 {
|
||||
b.WriteString(`<tr>
|
||||
<td style="padding:0 40px 24px;">
|
||||
<h2 style="margin:0 0 16px;font-size:16px;color:#1e293b;border-bottom:2px solid #e2e8f0;padding-bottom:8px;">🐧 Top Operating Systems</h2>
|
||||
<table width="100%" cellpadding="0" cellspacing="0" style="font-size:14px;">
|
||||
`)
|
||||
// Sort OS by count
|
||||
type osEntry struct {
|
||||
name string
|
||||
count int
|
||||
}
|
||||
var osList []osEntry
|
||||
for name, count := range data.OsDistribution {
|
||||
osList = append(osList, osEntry{name, count})
|
||||
}
|
||||
for i := 0; i < len(osList); i++ {
|
||||
for j := i + 1; j < len(osList); j++ {
|
||||
if osList[j].count > osList[i].count {
|
||||
osList[i], osList[j] = osList[j], osList[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
for i, os := range osList {
|
||||
if i >= 5 {
|
||||
break
|
||||
}
|
||||
percent := float64(os.count) / float64(data.TotalInstalls) * 100
|
||||
barWidth := int(percent * 2) // Scale for visual
|
||||
if barWidth > 100 {
|
||||
barWidth = 100
|
||||
}
|
||||
b.WriteString(fmt.Sprintf(`<tr>
|
||||
<td style="padding:8px 16px;width:100px;">%s</td>
|
||||
<td style="padding:8px 16px;">
|
||||
<div style="background:#e2e8f0;border-radius:4px;height:20px;width:100%%;">
|
||||
<div style="background:linear-gradient(90deg,#667eea,#764ba2);border-radius:4px;height:20px;width:%d%%;"></div>
|
||||
</div>
|
||||
</td>
|
||||
<td style="padding:8px 16px;text-align:right;width:80px;color:#64748b;">%d (%.1f%%)</td>
|
||||
</tr>`, os.name, barWidth, os.count, percent))
|
||||
}
|
||||
b.WriteString(`</table>
|
||||
</td>
|
||||
</tr>
|
||||
`)
|
||||
}
|
||||
|
||||
// Footer
|
||||
b.WriteString(`<tr>
|
||||
<td style="padding:24px 40px;background:#f8fafc;border-radius:0 0 12px 12px;border-top:1px solid #e2e8f0;">
|
||||
<p style="margin:0;font-size:12px;color:#64748b;text-align:center;">
|
||||
Generated `)
|
||||
b.WriteString(time.Now().Format("Jan 02, 2006 at 15:04 MST"))
|
||||
b.WriteString(`<br>
|
||||
<a href="https://github.com/community-scripts/ProxmoxVE" style="color:#667eea;text-decoration:none;">ProxmoxVE Helper Scripts</a> —
|
||||
This is an automated report from the telemetry service.
|
||||
</p>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
</table>
|
||||
</td></tr>
|
||||
</table>
|
||||
</body>
|
||||
</html>`)
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// generateWeeklyReportEmail creates the plain text email body (kept for compatibility)
|
||||
func (a *Alerter) generateWeeklyReportEmail(data *WeeklyReportData) string {
|
||||
var b strings.Builder
|
||||
|
||||
b.WriteString("ProxmoxVE Helper Scripts - Weekly Telemetry Report\n")
|
||||
b.WriteString("==================================================\n\n")
|
||||
|
||||
b.WriteString(fmt.Sprintf("Calendar Week: %d, %d\n", data.CalendarWeek, data.Year))
|
||||
b.WriteString(fmt.Sprintf("Period: %s - %s\n\n",
|
||||
data.StartDate.Format("Jan 02, 2006"),
|
||||
data.EndDate.Format("Jan 02, 2006")))
|
||||
|
||||
b.WriteString("OVERVIEW\n")
|
||||
b.WriteString("--------\n")
|
||||
b.WriteString(fmt.Sprintf("Total Installations: %d\n", data.TotalInstalls))
|
||||
b.WriteString(fmt.Sprintf("Successful: %d\n", data.SuccessCount))
|
||||
b.WriteString(fmt.Sprintf("Failed: %d\n", data.FailedCount))
|
||||
b.WriteString(fmt.Sprintf("Success Rate: %.1f%%\n\n", data.SuccessRate))
|
||||
|
||||
if data.ComparedToPrev.InstallsChange != 0 || data.ComparedToPrev.FailRateChange != 0 {
|
||||
b.WriteString("vs. Previous Week:\n")
|
||||
b.WriteString(fmt.Sprintf(" Installations: %+d (%.1f%%)\n", data.ComparedToPrev.InstallsChange, data.ComparedToPrev.InstallsPercent))
|
||||
b.WriteString(fmt.Sprintf(" Failure Rate: %+.1f pp\n\n", data.ComparedToPrev.FailRateChange))
|
||||
}
|
||||
|
||||
b.WriteString("TOP 5 INSTALLED SCRIPTS\n")
|
||||
b.WriteString("-----------------------\n")
|
||||
for i, app := range data.TopApps {
|
||||
if i >= 5 {
|
||||
break
|
||||
}
|
||||
b.WriteString(fmt.Sprintf("%d. %-25s %5d installs\n", i+1, app.Name, app.Total))
|
||||
}
|
||||
b.WriteString("\n")
|
||||
|
||||
b.WriteString("TOP 5 FAILED SCRIPTS\n")
|
||||
b.WriteString("--------------------\n")
|
||||
if len(data.TopFailedApps) > 0 {
|
||||
for i, app := range data.TopFailedApps {
|
||||
if i >= 5 {
|
||||
break
|
||||
}
|
||||
b.WriteString(fmt.Sprintf("%d. %-20s %3d/%3d failed (%.1f%%)\n",
|
||||
i+1, app.Name, app.Failed, app.Total, app.FailureRate))
|
||||
}
|
||||
} else {
|
||||
b.WriteString("No failures this week!\n")
|
||||
}
|
||||
b.WriteString("\n")
|
||||
|
||||
b.WriteString("---\n")
|
||||
b.WriteString(fmt.Sprintf("Generated: %s\n", time.Now().Format("Jan 02, 2006 15:04 MST")))
|
||||
b.WriteString("This is an automated report from the telemetry service.\n")
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// TestWeeklyReport sends a test weekly report email
|
||||
func (a *Alerter) TestWeeklyReport() error {
|
||||
return a.SendWeeklyReport()
|
||||
}
|
||||
158
misc/data/cache.go
Normal file
158
misc/data/cache.go
Normal file
@ -0,0 +1,158 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// CacheConfig holds cache configuration
|
||||
type CacheConfig struct {
|
||||
RedisURL string
|
||||
EnableRedis bool
|
||||
DefaultTTL time.Duration
|
||||
}
|
||||
|
||||
// Cache provides caching functionality with Redis or in-memory fallback
|
||||
type Cache struct {
|
||||
redis *redis.Client
|
||||
useRedis bool
|
||||
defaultTTL time.Duration
|
||||
|
||||
// In-memory fallback
|
||||
mu sync.RWMutex
|
||||
memData map[string]cacheEntry
|
||||
}
|
||||
|
||||
type cacheEntry struct {
|
||||
data []byte
|
||||
expiresAt time.Time
|
||||
}
|
||||
|
||||
// NewCache creates a new cache instance
|
||||
func NewCache(cfg CacheConfig) *Cache {
|
||||
c := &Cache{
|
||||
defaultTTL: cfg.DefaultTTL,
|
||||
memData: make(map[string]cacheEntry),
|
||||
}
|
||||
|
||||
if cfg.EnableRedis && cfg.RedisURL != "" {
|
||||
opts, err := redis.ParseURL(cfg.RedisURL)
|
||||
if err != nil {
|
||||
log.Printf("WARN: invalid redis URL, using in-memory cache: %v", err)
|
||||
return c
|
||||
}
|
||||
|
||||
client := redis.NewClient(opts)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := client.Ping(ctx).Err(); err != nil {
|
||||
log.Printf("WARN: redis connection failed, using in-memory cache: %v", err)
|
||||
return c
|
||||
}
|
||||
|
||||
c.redis = client
|
||||
c.useRedis = true
|
||||
log.Printf("INFO: connected to Redis for caching")
|
||||
}
|
||||
|
||||
// Start cleanup goroutine for in-memory cache
|
||||
if !c.useRedis {
|
||||
go c.cleanupLoop()
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Cache) cleanupLoop() {
|
||||
ticker := time.NewTicker(5 * time.Minute)
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
c.mu.Lock()
|
||||
now := time.Now()
|
||||
for k, v := range c.memData {
|
||||
if now.After(v.expiresAt) {
|
||||
delete(c.memData, k)
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Get retrieves a value from cache
|
||||
func (c *Cache) Get(ctx context.Context, key string, dest interface{}) bool {
|
||||
if c.useRedis {
|
||||
data, err := c.redis.Get(ctx, key).Bytes()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return json.Unmarshal(data, dest) == nil
|
||||
}
|
||||
|
||||
// In-memory fallback
|
||||
c.mu.RLock()
|
||||
entry, ok := c.memData[key]
|
||||
c.mu.RUnlock()
|
||||
|
||||
if !ok || time.Now().After(entry.expiresAt) {
|
||||
return false
|
||||
}
|
||||
|
||||
return json.Unmarshal(entry.data, dest) == nil
|
||||
}
|
||||
|
||||
// Set stores a value in cache
|
||||
func (c *Cache) Set(ctx context.Context, key string, value interface{}, ttl time.Duration) error {
|
||||
if ttl == 0 {
|
||||
ttl = c.defaultTTL
|
||||
}
|
||||
|
||||
data, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.useRedis {
|
||||
return c.redis.Set(ctx, key, data, ttl).Err()
|
||||
}
|
||||
|
||||
// In-memory fallback
|
||||
c.mu.Lock()
|
||||
c.memData[key] = cacheEntry{
|
||||
data: data,
|
||||
expiresAt: time.Now().Add(ttl),
|
||||
}
|
||||
c.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes a key from cache
|
||||
func (c *Cache) Delete(ctx context.Context, key string) error {
|
||||
if c.useRedis {
|
||||
return c.redis.Del(ctx, key).Err()
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
delete(c.memData, key)
|
||||
c.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// InvalidateDashboard clears dashboard cache
|
||||
func (c *Cache) InvalidateDashboard(ctx context.Context) {
|
||||
// Delete all dashboard cache keys
|
||||
for days := 1; days <= 365; days++ {
|
||||
_ = c.Delete(ctx, dashboardCacheKey(days))
|
||||
}
|
||||
}
|
||||
|
||||
func dashboardCacheKey(days int) string {
|
||||
return "dashboard:" + string(rune(days))
|
||||
}
|
||||
173
misc/data/cleanup.go
Normal file
173
misc/data/cleanup.go
Normal file
@ -0,0 +1,173 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
)
|
||||
|
||||
// CleanupConfig holds configuration for the cleanup job
|
||||
type CleanupConfig struct {
|
||||
Enabled bool
|
||||
CheckInterval time.Duration // How often to run cleanup
|
||||
StuckAfterHours int // Consider "installing" as stuck after X hours
|
||||
}
|
||||
|
||||
// Cleaner handles cleanup of stuck installations
|
||||
type Cleaner struct {
|
||||
cfg CleanupConfig
|
||||
pb *PBClient
|
||||
}
|
||||
|
||||
// NewCleaner creates a new cleaner instance
|
||||
func NewCleaner(cfg CleanupConfig, pb *PBClient) *Cleaner {
|
||||
return &Cleaner{
|
||||
cfg: cfg,
|
||||
pb: pb,
|
||||
}
|
||||
}
|
||||
|
||||
// Start begins the cleanup loop
|
||||
func (c *Cleaner) Start() {
|
||||
if !c.cfg.Enabled {
|
||||
log.Println("INFO: cleanup job disabled")
|
||||
return
|
||||
}
|
||||
|
||||
go c.cleanupLoop()
|
||||
log.Printf("INFO: cleanup job started (interval: %v, stuck after: %d hours)", c.cfg.CheckInterval, c.cfg.StuckAfterHours)
|
||||
}
|
||||
|
||||
func (c *Cleaner) cleanupLoop() {
|
||||
// Run immediately on start
|
||||
c.runCleanup()
|
||||
|
||||
ticker := time.NewTicker(c.cfg.CheckInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
c.runCleanup()
|
||||
}
|
||||
}
|
||||
|
||||
// runCleanup finds and updates stuck installations
|
||||
func (c *Cleaner) runCleanup() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Find stuck records
|
||||
stuckRecords, err := c.findStuckInstallations(ctx)
|
||||
if err != nil {
|
||||
log.Printf("WARN: cleanup - failed to find stuck installations: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(stuckRecords) == 0 {
|
||||
log.Printf("INFO: cleanup - no stuck installations found")
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("INFO: cleanup - found %d stuck installations", len(stuckRecords))
|
||||
|
||||
// Update each record
|
||||
updated := 0
|
||||
for _, record := range stuckRecords {
|
||||
if err := c.markAsUnknown(ctx, record.ID); err != nil {
|
||||
log.Printf("WARN: cleanup - failed to update record %s: %v", record.ID, err)
|
||||
continue
|
||||
}
|
||||
updated++
|
||||
}
|
||||
|
||||
log.Printf("INFO: cleanup - updated %d stuck installations to 'unknown'", updated)
|
||||
}
|
||||
|
||||
// StuckRecord represents a minimal record for cleanup
|
||||
type StuckRecord struct {
|
||||
ID string `json:"id"`
|
||||
NSAPP string `json:"nsapp"`
|
||||
Created string `json:"created"`
|
||||
}
|
||||
|
||||
// findStuckInstallations finds records that are stuck in "installing" status
|
||||
func (c *Cleaner) findStuckInstallations(ctx context.Context) ([]StuckRecord, error) {
|
||||
if err := c.pb.ensureAuth(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Calculate cutoff time
|
||||
cutoff := time.Now().Add(-time.Duration(c.cfg.StuckAfterHours) * time.Hour)
|
||||
cutoffStr := cutoff.Format("2006-01-02 15:04:05")
|
||||
|
||||
// Build filter: status='installing' AND created < cutoff
|
||||
filter := url.QueryEscape(fmt.Sprintf("status='installing' && created<'%s'", cutoffStr))
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet,
|
||||
fmt.Sprintf("%s/api/collections/%s/records?filter=%s&perPage=100",
|
||||
c.pb.baseURL, c.pb.devColl, filter),
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+c.pb.token)
|
||||
|
||||
resp, err := c.pb.http.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result struct {
|
||||
Items []StuckRecord `json:"items"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result.Items, nil
|
||||
}
|
||||
|
||||
// markAsUnknown updates a record's status to "unknown"
|
||||
func (c *Cleaner) markAsUnknown(ctx context.Context, recordID string) error {
|
||||
update := TelemetryStatusUpdate{
|
||||
Status: "unknown",
|
||||
Error: "Installation timed out - no completion status received",
|
||||
}
|
||||
return c.pb.UpdateTelemetryStatus(ctx, recordID, update)
|
||||
}
|
||||
|
||||
// RunNow triggers an immediate cleanup run (for testing/manual trigger)
|
||||
func (c *Cleaner) RunNow() (int, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
|
||||
stuckRecords, err := c.findStuckInstallations(ctx)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to find stuck installations: %w", err)
|
||||
}
|
||||
|
||||
updated := 0
|
||||
for _, record := range stuckRecords {
|
||||
if err := c.markAsUnknown(ctx, record.ID); err != nil {
|
||||
log.Printf("WARN: cleanup - failed to update record %s: %v", record.ID, err)
|
||||
continue
|
||||
}
|
||||
updated++
|
||||
}
|
||||
|
||||
return updated, nil
|
||||
}
|
||||
|
||||
// GetStuckCount returns the current number of stuck installations
|
||||
func (c *Cleaner) GetStuckCount(ctx context.Context) (int, error) {
|
||||
records, err := c.findStuckInstallations(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(records), nil
|
||||
}
|
||||
2312
misc/data/dashboard.go
Normal file
2312
misc/data/dashboard.go
Normal file
File diff suppressed because it is too large
Load Diff
55
misc/data/entrypoint.sh
Normal file
55
misc/data/entrypoint.sh
Normal file
@ -0,0 +1,55 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
echo "============================================="
|
||||
echo " ProxmoxVED Telemetry Service"
|
||||
echo "============================================="
|
||||
|
||||
# Map Coolify ENV names to migration script names
|
||||
# Coolify uses PB_URL, PB_TARGET_COLLECTION
|
||||
export POCKETBASE_URL="${POCKETBASE_URL:-$PB_URL}"
|
||||
export POCKETBASE_COLLECTION="${POCKETBASE_COLLECTION:-$PB_TARGET_COLLECTION}"
|
||||
|
||||
# Run migration if enabled
|
||||
if [ "$RUN_MIGRATION" = "true" ]; then
|
||||
echo ""
|
||||
echo "🔄 Migration mode enabled"
|
||||
echo " Source: $MIGRATION_SOURCE_URL"
|
||||
echo " Target: $POCKETBASE_URL"
|
||||
echo " Collection: $POCKETBASE_COLLECTION"
|
||||
echo ""
|
||||
|
||||
# Wait for PocketBase to be ready
|
||||
echo "⏳ Waiting for PocketBase to be ready..."
|
||||
RETRIES=30
|
||||
until wget -q --spider "$POCKETBASE_URL/api/health" 2>/dev/null; do
|
||||
RETRIES=$((RETRIES - 1))
|
||||
if [ $RETRIES -le 0 ]; then
|
||||
echo "❌ PocketBase not reachable after 30 attempts"
|
||||
if [ "$MIGRATION_REQUIRED" = "true" ]; then
|
||||
exit 1
|
||||
fi
|
||||
echo "⚠️ Continuing without migration..."
|
||||
break
|
||||
fi
|
||||
echo " Waiting... ($RETRIES attempts left)"
|
||||
sleep 2
|
||||
done
|
||||
|
||||
if wget -q --spider "$POCKETBASE_URL/api/health" 2>/dev/null; then
|
||||
echo "✅ PocketBase is ready"
|
||||
echo ""
|
||||
echo "🚀 Starting migration..."
|
||||
/app/migrate || {
|
||||
if [ "$MIGRATION_REQUIRED" = "true" ]; then
|
||||
echo "❌ Migration failed!"
|
||||
exit 1
|
||||
fi
|
||||
echo "⚠️ Migration failed, but continuing..."
|
||||
}
|
||||
echo ""
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "🚀 Starting telemetry service..."
|
||||
exec /app/telemetry-service
|
||||
10
misc/data/go.mod
Normal file
10
misc/data/go.mod
Normal file
@ -0,0 +1,10 @@
|
||||
module github.com/community-scripts/telemetry-service
|
||||
|
||||
go 1.25.5
|
||||
|
||||
require github.com/redis/go-redis/v9 v9.17.3
|
||||
|
||||
require (
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
)
|
||||
10
misc/data/go.sum
Normal file
10
misc/data/go.sum
Normal file
@ -0,0 +1,10 @@
|
||||
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
|
||||
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
|
||||
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
|
||||
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/redis/go-redis/v9 v9.17.3 h1:fN29NdNrE17KttK5Ndf20buqfDZwGNgoUr9qjl1DQx4=
|
||||
github.com/redis/go-redis/v9 v9.17.3/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370=
|
||||
366
misc/data/migrate.go
Normal file
366
misc/data/migrate.go
Normal file
@ -0,0 +1,366 @@
|
||||
// +build ignore
|
||||
|
||||
// Migration script to import data from the old API to PocketBase
|
||||
// Run with: go run migrate.go
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultSourceAPI = "https://api.htl-braunau.at/dev/data"
|
||||
defaultPBURL = "http://localhost:8090"
|
||||
batchSize = 100
|
||||
)
|
||||
|
||||
var (
|
||||
sourceAPI string
|
||||
summaryAPI string
|
||||
authToken string // PocketBase auth token
|
||||
)
|
||||
|
||||
// OldDataModel represents the data structure from the old API
|
||||
type OldDataModel struct {
|
||||
ID string `json:"id"`
|
||||
CtType int `json:"ct_type"`
|
||||
DiskSize int `json:"disk_size"`
|
||||
CoreCount int `json:"core_count"`
|
||||
RamSize int `json:"ram_size"`
|
||||
OsType string `json:"os_type"`
|
||||
OsVersion string `json:"os_version"`
|
||||
DisableIP6 string `json:"disableip6"`
|
||||
NsApp string `json:"nsapp"`
|
||||
Method string `json:"method"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
PveVersion string `json:"pve_version"`
|
||||
Status string `json:"status"`
|
||||
RandomID string `json:"random_id"`
|
||||
Type string `json:"type"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
// PBRecord represents the PocketBase record format
|
||||
type PBRecord struct {
|
||||
CtType int `json:"ct_type"`
|
||||
DiskSize int `json:"disk_size"`
|
||||
CoreCount int `json:"core_count"`
|
||||
RamSize int `json:"ram_size"`
|
||||
OsType string `json:"os_type"`
|
||||
OsVersion string `json:"os_version"`
|
||||
DisableIP6 string `json:"disableip6"`
|
||||
NsApp string `json:"nsapp"`
|
||||
Method string `json:"method"`
|
||||
PveVersion string `json:"pve_version"`
|
||||
Status string `json:"status"`
|
||||
RandomID string `json:"random_id"`
|
||||
Type string `json:"type"`
|
||||
Error string `json:"error"`
|
||||
// created_at will be set automatically by PocketBase
|
||||
}
|
||||
|
||||
type Summary struct {
|
||||
TotalEntries int `json:"total_entries"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Setup source URLs
|
||||
baseURL := os.Getenv("MIGRATION_SOURCE_URL")
|
||||
if baseURL == "" {
|
||||
baseURL = defaultSourceAPI
|
||||
}
|
||||
sourceAPI = baseURL + "/paginated"
|
||||
summaryAPI = baseURL + "/summary"
|
||||
|
||||
// Support both POCKETBASE_URL and PB_URL (Coolify uses PB_URL)
|
||||
pbURL := os.Getenv("POCKETBASE_URL")
|
||||
if pbURL == "" {
|
||||
pbURL = os.Getenv("PB_URL")
|
||||
}
|
||||
if pbURL == "" {
|
||||
pbURL = defaultPBURL
|
||||
}
|
||||
|
||||
// Support both POCKETBASE_COLLECTION and PB_TARGET_COLLECTION
|
||||
pbCollection := os.Getenv("POCKETBASE_COLLECTION")
|
||||
if pbCollection == "" {
|
||||
pbCollection = os.Getenv("PB_TARGET_COLLECTION")
|
||||
}
|
||||
if pbCollection == "" {
|
||||
pbCollection = "_dev_telemetry_data"
|
||||
}
|
||||
|
||||
// Auth collection
|
||||
authCollection := os.Getenv("PB_AUTH_COLLECTION")
|
||||
if authCollection == "" {
|
||||
authCollection = "_dev_telemetry_service"
|
||||
}
|
||||
|
||||
// Credentials
|
||||
pbIdentity := os.Getenv("PB_IDENTITY")
|
||||
pbPassword := os.Getenv("PB_PASSWORD")
|
||||
|
||||
fmt.Println("===========================================")
|
||||
fmt.Println(" Data Migration to PocketBase")
|
||||
fmt.Println("===========================================")
|
||||
fmt.Printf("Source API: %s\n", baseURL)
|
||||
fmt.Printf("PocketBase URL: %s\n", pbURL)
|
||||
fmt.Printf("Collection: %s\n", pbCollection)
|
||||
fmt.Printf("Auth Collection: %s\n", authCollection)
|
||||
fmt.Println("-------------------------------------------")
|
||||
|
||||
// Authenticate with PocketBase
|
||||
if pbIdentity != "" && pbPassword != "" {
|
||||
fmt.Println("🔐 Authenticating with PocketBase...")
|
||||
err := authenticate(pbURL, authCollection, pbIdentity, pbPassword)
|
||||
if err != nil {
|
||||
fmt.Printf("❌ Authentication failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println("✅ Authentication successful")
|
||||
} else {
|
||||
fmt.Println("⚠️ No credentials provided, trying without auth...")
|
||||
}
|
||||
fmt.Println("-------------------------------------------")
|
||||
|
||||
// Get total count
|
||||
summary, err := getSummary()
|
||||
if err != nil {
|
||||
fmt.Printf("❌ Failed to get summary: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("📊 Total entries to migrate: %d\n", summary.TotalEntries)
|
||||
fmt.Println("-------------------------------------------")
|
||||
|
||||
// Calculate pages
|
||||
totalPages := (summary.TotalEntries + batchSize - 1) / batchSize
|
||||
|
||||
var totalMigrated, totalFailed, totalSkipped int
|
||||
|
||||
for page := 1; page <= totalPages; page++ {
|
||||
fmt.Printf("📦 Fetching page %d/%d (items %d-%d)...\n",
|
||||
page, totalPages,
|
||||
(page-1)*batchSize+1,
|
||||
min(page*batchSize, summary.TotalEntries))
|
||||
|
||||
data, err := fetchPage(page, batchSize)
|
||||
if err != nil {
|
||||
fmt.Printf(" ❌ Failed to fetch page %d: %v\n", page, err)
|
||||
totalFailed += batchSize
|
||||
continue
|
||||
}
|
||||
|
||||
for i, record := range data {
|
||||
err := importRecord(pbURL, pbCollection, record)
|
||||
if err != nil {
|
||||
if isUniqueViolation(err) {
|
||||
totalSkipped++
|
||||
continue
|
||||
}
|
||||
fmt.Printf(" ❌ Failed to import record %d: %v\n", (page-1)*batchSize+i+1, err)
|
||||
totalFailed++
|
||||
continue
|
||||
}
|
||||
totalMigrated++
|
||||
}
|
||||
|
||||
fmt.Printf(" ✅ Page %d complete (migrated: %d, skipped: %d, failed: %d)\n",
|
||||
page, len(data), totalSkipped, totalFailed)
|
||||
|
||||
// Small delay to avoid overwhelming the server
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
fmt.Println("===========================================")
|
||||
fmt.Println(" Migration Complete")
|
||||
fmt.Println("===========================================")
|
||||
fmt.Printf("✅ Successfully migrated: %d\n", totalMigrated)
|
||||
fmt.Printf("⏭️ Skipped (duplicates): %d\n", totalSkipped)
|
||||
fmt.Printf("❌ Failed: %d\n", totalFailed)
|
||||
fmt.Println("===========================================")
|
||||
}
|
||||
|
||||
func getSummary() (*Summary, error) {
|
||||
resp, err := http.Get(summaryAPI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var summary Summary
|
||||
if err := json.NewDecoder(resp.Body).Decode(&summary); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &summary, nil
|
||||
}
|
||||
|
||||
func authenticate(pbURL, authCollection, identity, password string) error {
|
||||
body := map[string]string{
|
||||
"identity": identity,
|
||||
"password": password,
|
||||
}
|
||||
jsonData, _ := json.Marshal(body)
|
||||
|
||||
url := fmt.Sprintf("%s/api/collections/%s/auth-with-password", pbURL, authCollection)
|
||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
client := &http.Client{Timeout: 10 * time.Second}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return err
|
||||
}
|
||||
if result.Token == "" {
|
||||
return fmt.Errorf("no token in response")
|
||||
}
|
||||
|
||||
authToken = result.Token
|
||||
return nil
|
||||
}
|
||||
|
||||
func fetchPage(page, limit int) ([]OldDataModel, error) {
|
||||
url := fmt.Sprintf("%s?page=%d&limit=%d", sourceAPI, page, limit)
|
||||
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return nil, fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
var data []OldDataModel
|
||||
if err := json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func importRecord(pbURL, collection string, old OldDataModel) error {
|
||||
// Map status: "done" -> "success"
|
||||
status := old.Status
|
||||
switch status {
|
||||
case "done":
|
||||
status = "success"
|
||||
case "installing", "failed", "unknown", "success":
|
||||
// keep as-is
|
||||
default:
|
||||
status = "unknown"
|
||||
}
|
||||
|
||||
// Ensure ct_type is not 0 (required field)
|
||||
ctType := old.CtType
|
||||
if ctType == 0 {
|
||||
ctType = 1 // default to unprivileged
|
||||
}
|
||||
|
||||
// Ensure type is set
|
||||
recordType := old.Type
|
||||
if recordType == "" {
|
||||
recordType = "lxc"
|
||||
}
|
||||
|
||||
record := PBRecord{
|
||||
CtType: ctType,
|
||||
DiskSize: old.DiskSize,
|
||||
CoreCount: old.CoreCount,
|
||||
RamSize: old.RamSize,
|
||||
OsType: old.OsType,
|
||||
OsVersion: old.OsVersion,
|
||||
DisableIP6: old.DisableIP6,
|
||||
NsApp: old.NsApp,
|
||||
Method: old.Method,
|
||||
PveVersion: old.PveVersion,
|
||||
Status: status,
|
||||
RandomID: old.RandomID,
|
||||
Type: recordType,
|
||||
Error: old.Error,
|
||||
}
|
||||
|
||||
jsonData, err := json.Marshal(record)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s/api/collections/%s/records", pbURL, collection)
|
||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
if authToken != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+authToken)
|
||||
}
|
||||
|
||||
client := &http.Client{Timeout: 10 * time.Second}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isUniqueViolation(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
errStr := err.Error()
|
||||
return contains(errStr, "UNIQUE constraint failed") ||
|
||||
contains(errStr, "duplicate") ||
|
||||
contains(errStr, "already exists") ||
|
||||
contains(errStr, "validation_not_unique")
|
||||
}
|
||||
|
||||
func contains(s, substr string) bool {
|
||||
return len(s) >= len(substr) && (s == substr || len(s) > 0 && containsHelper(s, substr))
|
||||
}
|
||||
|
||||
func containsHelper(s, substr string) bool {
|
||||
for i := 0; i <= len(s)-len(substr); i++ {
|
||||
if s[i:i+len(substr)] == substr {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
67
misc/data/migrate.sh
Executable file
67
misc/data/migrate.sh
Executable file
@ -0,0 +1,67 @@
|
||||
#!/bin/bash
|
||||
# Migration script to import data from the old API to PocketBase
|
||||
# Usage: ./migrate.sh [POCKETBASE_URL] [COLLECTION_NAME]
|
||||
#
|
||||
# Examples:
|
||||
# ./migrate.sh # Uses defaults
|
||||
# ./migrate.sh http://localhost:8090 # Custom PB URL
|
||||
# ./migrate.sh http://localhost:8090 my_telemetry # Custom URL and collection
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# Default values
|
||||
POCKETBASE_URL="${1:-http://localhost:8090}"
|
||||
POCKETBASE_COLLECTION="${2:-_dev_telemetry_data}"
|
||||
|
||||
echo "============================================="
|
||||
echo " ProxmoxVED Data Migration Tool"
|
||||
echo "============================================="
|
||||
echo ""
|
||||
echo "This script will migrate telemetry data from:"
|
||||
echo " Source: https://api.htl-braunau.at/dev/data"
|
||||
echo " Target: $POCKETBASE_URL"
|
||||
echo " Collection: $POCKETBASE_COLLECTION"
|
||||
echo ""
|
||||
|
||||
# Check if PocketBase is reachable
|
||||
echo "🔍 Checking PocketBase connection..."
|
||||
if ! curl -sf "$POCKETBASE_URL/api/health" > /dev/null 2>&1; then
|
||||
echo "❌ Cannot reach PocketBase at $POCKETBASE_URL"
|
||||
echo " Make sure PocketBase is running and the URL is correct."
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ PocketBase is reachable"
|
||||
echo ""
|
||||
|
||||
# Check source API
|
||||
echo "🔍 Checking source API..."
|
||||
SUMMARY=$(curl -sf "https://api.htl-braunau.at/dev/data/summary" 2>/dev/null || echo "")
|
||||
if [ -z "$SUMMARY" ]; then
|
||||
echo "❌ Cannot reach source API"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TOTAL=$(echo "$SUMMARY" | grep -o '"total_entries":[0-9]*' | cut -d: -f2)
|
||||
echo "✅ Source API is reachable ($TOTAL entries available)"
|
||||
echo ""
|
||||
|
||||
# Confirm migration
|
||||
read -p "⚠️ Do you want to start the migration? [y/N] " -n 1 -r
|
||||
echo ""
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo "Migration cancelled."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Starting migration..."
|
||||
echo ""
|
||||
|
||||
# Run the Go migration script
|
||||
cd "$SCRIPT_DIR"
|
||||
POCKETBASE_URL="$POCKETBASE_URL" POCKETBASE_COLLECTION="$POCKETBASE_COLLECTION" go run migrate.go
|
||||
|
||||
echo ""
|
||||
echo "Migration complete!"
|
||||
67
misc/data/migration/migrate.sh
Normal file
67
misc/data/migration/migrate.sh
Normal file
@ -0,0 +1,67 @@
|
||||
#!/bin/bash
|
||||
# Migration script to import data from the old API to PocketBase
|
||||
# Usage: ./migrate.sh [POCKETBASE_URL] [COLLECTION_NAME]
|
||||
#
|
||||
# Examples:
|
||||
# ./migrate.sh # Uses defaults
|
||||
# ./migrate.sh http://localhost:8090 # Custom PB URL
|
||||
# ./migrate.sh http://localhost:8090 my_telemetry # Custom URL and collection
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# Default values
|
||||
POCKETBASE_URL="${1:-http://localhost:8090}"
|
||||
POCKETBASE_COLLECTION="${2:-_dev_telemetry_data}"
|
||||
|
||||
echo "============================================="
|
||||
echo " ProxmoxVED Data Migration Tool"
|
||||
echo "============================================="
|
||||
echo ""
|
||||
echo "This script will migrate telemetry data from:"
|
||||
echo " Source: https://api.htl-braunau.at/dev/data"
|
||||
echo " Target: $POCKETBASE_URL"
|
||||
echo " Collection: $POCKETBASE_COLLECTION"
|
||||
echo ""
|
||||
|
||||
# Check if PocketBase is reachable
|
||||
echo "🔍 Checking PocketBase connection..."
|
||||
if ! curl -sf "$POCKETBASE_URL/api/health" >/dev/null 2>&1; then
|
||||
echo "❌ Cannot reach PocketBase at $POCKETBASE_URL"
|
||||
echo " Make sure PocketBase is running and the URL is correct."
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ PocketBase is reachable"
|
||||
echo ""
|
||||
|
||||
# Check source API
|
||||
echo "🔍 Checking source API..."
|
||||
SUMMARY=$(curl -sf "https://api.htl-braunau.at/dev/data/summary" 2>/dev/null || echo "")
|
||||
if [ -z "$SUMMARY" ]; then
|
||||
echo "❌ Cannot reach source API"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TOTAL=$(echo "$SUMMARY" | grep -o '"total_entries":[0-9]*' | cut -d: -f2)
|
||||
echo "✅ Source API is reachable ($TOTAL entries available)"
|
||||
echo ""
|
||||
|
||||
# Confirm migration
|
||||
read -p "⚠️ Do you want to start the migration? [y/N] " -n 1 -r
|
||||
echo ""
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo "Migration cancelled."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Starting migration..."
|
||||
echo ""
|
||||
|
||||
# Run the Go migration script
|
||||
cd "$SCRIPT_DIR"
|
||||
POCKETBASE_URL="$POCKETBASE_URL" POCKETBASE_COLLECTION="$POCKETBASE_COLLECTION" go run migrate.go
|
||||
|
||||
echo ""
|
||||
echo "Migration complete!"
|
||||
492
misc/data/migration/migration.go
Normal file
492
misc/data/migration/migration.go
Normal file
@ -0,0 +1,492 @@
|
||||
// +build ignore
|
||||
|
||||
// Migration script to import data from the old API to PocketBase
|
||||
// Run with: go run migrate.go
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultSourceAPI = "https://api.htl-braunau.at/dev/data"
|
||||
defaultPBURL = "http://localhost:8090"
|
||||
batchSize = 100
|
||||
)
|
||||
|
||||
var (
|
||||
sourceAPI string
|
||||
summaryAPI string
|
||||
authToken string // PocketBase auth token
|
||||
)
|
||||
|
||||
// OldDataModel represents the data structure from the old API
|
||||
type OldDataModel struct {
|
||||
ID string `json:"id"`
|
||||
CtType int `json:"ct_type"`
|
||||
DiskSize int `json:"disk_size"`
|
||||
CoreCount int `json:"core_count"`
|
||||
RamSize int `json:"ram_size"`
|
||||
OsType string `json:"os_type"`
|
||||
OsVersion string `json:"os_version"`
|
||||
DisableIP6 string `json:"disableip6"`
|
||||
NsApp string `json:"nsapp"`
|
||||
Method string `json:"method"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
PveVersion string `json:"pve_version"`
|
||||
Status string `json:"status"`
|
||||
RandomID string `json:"random_id"`
|
||||
Type string `json:"type"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
// PBRecord represents the PocketBase record format
|
||||
type PBRecord struct {
|
||||
CtType int `json:"ct_type"`
|
||||
DiskSize int `json:"disk_size"`
|
||||
CoreCount int `json:"core_count"`
|
||||
RamSize int `json:"ram_size"`
|
||||
OsType string `json:"os_type"`
|
||||
OsVersion string `json:"os_version"`
|
||||
DisableIP6 string `json:"disableip6"`
|
||||
NsApp string `json:"nsapp"`
|
||||
Method string `json:"method"`
|
||||
PveVersion string `json:"pve_version"`
|
||||
Status string `json:"status"`
|
||||
RandomID string `json:"random_id"`
|
||||
Type string `json:"type"`
|
||||
Error string `json:"error"`
|
||||
// Temporary field for timestamp migration (PocketBase doesn't allow setting created/updated via API)
|
||||
// After migration, run SQL: UPDATE installations SET created = old_created, updated = old_created
|
||||
OldCreated string `json:"old_created,omitempty"`
|
||||
}
|
||||
|
||||
type Summary struct {
|
||||
TotalEntries int `json:"total_entries"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Setup source URLs
|
||||
baseURL := os.Getenv("MIGRATION_SOURCE_URL")
|
||||
if baseURL == "" {
|
||||
baseURL = defaultSourceAPI
|
||||
}
|
||||
sourceAPI = baseURL + "/paginated"
|
||||
summaryAPI = baseURL + "/summary"
|
||||
|
||||
// Support both POCKETBASE_URL and PB_URL (Coolify uses PB_URL)
|
||||
pbURL := os.Getenv("POCKETBASE_URL")
|
||||
if pbURL == "" {
|
||||
pbURL = os.Getenv("PB_URL")
|
||||
}
|
||||
if pbURL == "" {
|
||||
pbURL = defaultPBURL
|
||||
}
|
||||
|
||||
// Support both POCKETBASE_COLLECTION and PB_TARGET_COLLECTION
|
||||
pbCollection := os.Getenv("POCKETBASE_COLLECTION")
|
||||
if pbCollection == "" {
|
||||
pbCollection = os.Getenv("PB_TARGET_COLLECTION")
|
||||
}
|
||||
if pbCollection == "" {
|
||||
pbCollection = "_dev_telemetry_data"
|
||||
}
|
||||
|
||||
// Auth collection
|
||||
authCollection := os.Getenv("PB_AUTH_COLLECTION")
|
||||
if authCollection == "" {
|
||||
authCollection = "_dev_telemetry_service"
|
||||
}
|
||||
|
||||
// Credentials - prefer admin auth for timestamp preservation
|
||||
pbAdminEmail := os.Getenv("PB_ADMIN_EMAIL")
|
||||
pbAdminPassword := os.Getenv("PB_ADMIN_PASSWORD")
|
||||
pbIdentity := os.Getenv("PB_IDENTITY")
|
||||
pbPassword := os.Getenv("PB_PASSWORD")
|
||||
|
||||
fmt.Println("===========================================")
|
||||
fmt.Println(" Data Migration to PocketBase")
|
||||
fmt.Println("===========================================")
|
||||
fmt.Printf("Source API: %s\n", baseURL)
|
||||
fmt.Printf("PocketBase URL: %s\n", pbURL)
|
||||
fmt.Printf("Collection: %s\n", pbCollection)
|
||||
fmt.Println("-------------------------------------------")
|
||||
|
||||
// Authenticate with PocketBase - prefer Admin auth for timestamp support
|
||||
if pbAdminEmail != "" && pbAdminPassword != "" {
|
||||
fmt.Println("🔐 Authenticating as PocketBase Admin...")
|
||||
err := authenticateAdmin(pbURL, pbAdminEmail, pbAdminPassword)
|
||||
if err != nil {
|
||||
fmt.Printf("❌ Admin authentication failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println("✅ Admin authentication successful (timestamps will be preserved)")
|
||||
} else if pbIdentity != "" && pbPassword != "" {
|
||||
fmt.Println("🔐 Authenticating with PocketBase (collection auth)...")
|
||||
fmt.Println("⚠️ Note: Timestamps may not be preserved without admin auth")
|
||||
err := authenticate(pbURL, authCollection, pbIdentity, pbPassword)
|
||||
if err != nil {
|
||||
fmt.Printf("❌ Authentication failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println("✅ Authentication successful")
|
||||
} else {
|
||||
fmt.Println("⚠️ No credentials provided, trying without auth...")
|
||||
}
|
||||
fmt.Println("-------------------------------------------")
|
||||
|
||||
// Get total count
|
||||
summary, err := getSummary()
|
||||
if err != nil {
|
||||
fmt.Printf("❌ Failed to get summary: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("📊 Total entries to migrate: %d\n", summary.TotalEntries)
|
||||
fmt.Println("-------------------------------------------")
|
||||
|
||||
// Calculate pages
|
||||
totalPages := (summary.TotalEntries + batchSize - 1) / batchSize
|
||||
|
||||
var totalMigrated, totalFailed, totalSkipped int
|
||||
|
||||
for page := 1; page <= totalPages; page++ {
|
||||
fmt.Printf("📦 Fetching page %d/%d (items %d-%d)...\n",
|
||||
page, totalPages,
|
||||
(page-1)*batchSize+1,
|
||||
min(page*batchSize, summary.TotalEntries))
|
||||
|
||||
data, err := fetchPage(page, batchSize)
|
||||
if err != nil {
|
||||
fmt.Printf(" ❌ Failed to fetch page %d: %v\n", page, err)
|
||||
totalFailed += batchSize
|
||||
continue
|
||||
}
|
||||
|
||||
for i, record := range data {
|
||||
err := importRecord(pbURL, pbCollection, record)
|
||||
if err != nil {
|
||||
if isUniqueViolation(err) {
|
||||
totalSkipped++
|
||||
continue
|
||||
}
|
||||
fmt.Printf(" ❌ Failed to import record %d: %v\n", (page-1)*batchSize+i+1, err)
|
||||
totalFailed++
|
||||
continue
|
||||
}
|
||||
totalMigrated++
|
||||
}
|
||||
|
||||
fmt.Printf(" ✅ Page %d complete (migrated: %d, skipped: %d, failed: %d)\n",
|
||||
page, len(data), totalSkipped, totalFailed)
|
||||
|
||||
// Small delay to avoid overwhelming the server
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
fmt.Println("===========================================")
|
||||
fmt.Println(" Migration Complete")
|
||||
fmt.Println("===========================================")
|
||||
fmt.Printf("✅ Successfully migrated: %d\n", totalMigrated)
|
||||
fmt.Printf("⏭️ Skipped (duplicates): %d\n", totalSkipped)
|
||||
fmt.Printf("❌ Failed: %d\n", totalFailed)
|
||||
fmt.Println("===========================================")
|
||||
}
|
||||
|
||||
func getSummary() (*Summary, error) {
|
||||
resp, err := http.Get(summaryAPI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var summary Summary
|
||||
if err := json.NewDecoder(resp.Body).Decode(&summary); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &summary, nil
|
||||
}
|
||||
|
||||
// authenticateAdmin authenticates as PocketBase admin (required for setting timestamps)
|
||||
func authenticateAdmin(pbURL, email, password string) error {
|
||||
body := map[string]string{
|
||||
"identity": email,
|
||||
"password": password,
|
||||
}
|
||||
jsonData, _ := json.Marshal(body)
|
||||
|
||||
// Try new PocketBase v0.23+ endpoint first (_superusers collection)
|
||||
endpoints := []string{
|
||||
fmt.Sprintf("%s/api/collections/_superusers/auth-with-password", pbURL),
|
||||
fmt.Sprintf("%s/api/admins/auth-with-password", pbURL), // Legacy endpoint
|
||||
}
|
||||
|
||||
client := &http.Client{Timeout: 10 * time.Second}
|
||||
var lastErr error
|
||||
|
||||
for _, url := range endpoints {
|
||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
|
||||
if resp.StatusCode == 404 {
|
||||
resp.Body.Close()
|
||||
continue // Try next endpoint
|
||||
}
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
lastErr = fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body))
|
||||
continue
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
resp.Body.Close()
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
if result.Token == "" {
|
||||
lastErr = fmt.Errorf("no token in response")
|
||||
continue
|
||||
}
|
||||
|
||||
authToken = result.Token
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("all auth endpoints failed: %v", lastErr)
|
||||
}
|
||||
|
||||
func authenticate(pbURL, authCollection, identity, password string) error {
|
||||
body := map[string]string{
|
||||
"identity": identity,
|
||||
"password": password,
|
||||
}
|
||||
jsonData, _ := json.Marshal(body)
|
||||
|
||||
url := fmt.Sprintf("%s/api/collections/%s/auth-with-password", pbURL, authCollection)
|
||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
client := &http.Client{Timeout: 10 * time.Second}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return err
|
||||
}
|
||||
if result.Token == "" {
|
||||
return fmt.Errorf("no token in response")
|
||||
}
|
||||
|
||||
authToken = result.Token
|
||||
return nil
|
||||
}
|
||||
|
||||
func fetchPage(page, limit int) ([]OldDataModel, error) {
|
||||
url := fmt.Sprintf("%s?page=%d&limit=%d", sourceAPI, page, limit)
|
||||
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return nil, fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
var data []OldDataModel
|
||||
if err := json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func importRecord(pbURL, collection string, old OldDataModel) error {
|
||||
// Map status: "done" -> "success"
|
||||
status := old.Status
|
||||
switch status {
|
||||
case "done":
|
||||
status = "success"
|
||||
case "installing", "failed", "unknown", "success":
|
||||
// keep as-is
|
||||
default:
|
||||
status = "unknown"
|
||||
}
|
||||
|
||||
// ct_type: 1=unprivileged, 2=privileged in old data
|
||||
// PocketBase might expect 0/1, so normalize to 0 (unprivileged) or 1 (privileged)
|
||||
ctType := old.CtType
|
||||
if ctType <= 1 {
|
||||
ctType = 0 // unprivileged (default)
|
||||
} else {
|
||||
ctType = 1 // privileged/VM
|
||||
}
|
||||
|
||||
// Ensure type is set
|
||||
recordType := old.Type
|
||||
if recordType == "" {
|
||||
recordType = "lxc"
|
||||
}
|
||||
|
||||
// Ensure nsapp is set (required field)
|
||||
nsapp := old.NsApp
|
||||
if nsapp == "" {
|
||||
nsapp = "unknown"
|
||||
}
|
||||
|
||||
record := PBRecord{
|
||||
CtType: ctType,
|
||||
DiskSize: old.DiskSize,
|
||||
CoreCount: old.CoreCount,
|
||||
RamSize: old.RamSize,
|
||||
OsType: old.OsType,
|
||||
OsVersion: old.OsVersion,
|
||||
DisableIP6: old.DisableIP6,
|
||||
NsApp: nsapp,
|
||||
Method: old.Method,
|
||||
PveVersion: old.PveVersion,
|
||||
Status: status,
|
||||
RandomID: old.RandomID,
|
||||
Type: recordType,
|
||||
Error: old.Error,
|
||||
OldCreated: convertTimestamp(old.CreatedAt),
|
||||
}
|
||||
|
||||
jsonData, err := json.Marshal(record)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s/api/collections/%s/records", pbURL, collection)
|
||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
if authToken != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+authToken)
|
||||
}
|
||||
|
||||
client := &http.Client{Timeout: 10 * time.Second}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isUniqueViolation(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
errStr := err.Error()
|
||||
return contains(errStr, "UNIQUE constraint failed") ||
|
||||
contains(errStr, "duplicate") ||
|
||||
contains(errStr, "already exists") ||
|
||||
contains(errStr, "validation_not_unique")
|
||||
}
|
||||
|
||||
func contains(s, substr string) bool {
|
||||
return len(s) >= len(substr) && (s == substr || len(s) > 0 && containsHelper(s, substr))
|
||||
}
|
||||
|
||||
func containsHelper(s, substr string) bool {
|
||||
for i := 0; i <= len(s)-len(substr); i++ {
|
||||
if s[i:i+len(substr)] == substr {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// convertTimestamp converts various timestamp formats to PocketBase format
|
||||
// PocketBase expects: "2006-01-02 15:04:05.000Z" or similar
|
||||
func convertTimestamp(ts string) string {
|
||||
if ts == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Try parsing various formats
|
||||
formats := []string{
|
||||
time.RFC3339, // "2006-01-02T15:04:05Z07:00"
|
||||
time.RFC3339Nano, // "2006-01-02T15:04:05.999999999Z07:00"
|
||||
"2006-01-02T15:04:05.000Z", // ISO with milliseconds
|
||||
"2006-01-02T15:04:05Z", // ISO without milliseconds
|
||||
"2006-01-02T15:04:05", // ISO without timezone
|
||||
"2006-01-02 15:04:05", // SQL format
|
||||
"2006-01-02 15:04:05.000", // SQL with ms
|
||||
"2006-01-02 15:04:05.000 UTC", // SQL with UTC
|
||||
"2006-01-02T15:04:05.000+00:00", // ISO with offset
|
||||
}
|
||||
|
||||
var parsed time.Time
|
||||
var err error
|
||||
for _, format := range formats {
|
||||
parsed, err = time.Parse(format, ts)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// If all parsing fails, return empty (PocketBase will set current time)
|
||||
fmt.Printf(" ⚠️ Could not parse timestamp: %s\n", ts)
|
||||
return ""
|
||||
}
|
||||
|
||||
// Return in PocketBase format (UTC)
|
||||
return parsed.UTC().Format("2006-01-02 15:04:05.000Z")
|
||||
}
|
||||
1197
misc/data/service.go
Normal file
1197
misc/data/service.go
Normal file
File diff suppressed because it is too large
Load Diff
BIN
misc/data/telemetry-service
Executable file
BIN
misc/data/telemetry-service
Executable file
Binary file not shown.
@ -27,100 +27,90 @@
|
||||
# ------------------------------------------------------------------------------
|
||||
# explain_exit_code()
|
||||
#
|
||||
# - Maps numeric exit codes to human-readable error descriptions
|
||||
# - Supports:
|
||||
# * Generic/Shell errors (1, 2, 126, 127, 128, 130, 137, 139, 143)
|
||||
# * Package manager errors (APT, DPKG: 100, 101, 255)
|
||||
# * Node.js/npm errors (243-249, 254)
|
||||
# * Python/pip/uv errors (210-212)
|
||||
# * PostgreSQL errors (231-234)
|
||||
# * MySQL/MariaDB errors (241-244)
|
||||
# * MongoDB errors (251-254)
|
||||
# * Proxmox custom codes (200-231)
|
||||
# - Returns description string for given exit code
|
||||
# - Canonical version is defined in api.func (sourced before this file)
|
||||
# - This section only provides a fallback if api.func was not loaded
|
||||
# - See api.func SECTION 1 for the authoritative exit code mappings
|
||||
# ------------------------------------------------------------------------------
|
||||
explain_exit_code() {
|
||||
local code="$1"
|
||||
case "$code" in
|
||||
# --- Generic / Shell ---
|
||||
1) echo "General error / Operation not permitted" ;;
|
||||
2) echo "Misuse of shell builtins (e.g. syntax error)" ;;
|
||||
126) echo "Command invoked cannot execute (permission problem?)" ;;
|
||||
127) echo "Command not found" ;;
|
||||
128) echo "Invalid argument to exit" ;;
|
||||
130) echo "Terminated by Ctrl+C (SIGINT)" ;;
|
||||
137) echo "Killed (SIGKILL / Out of memory?)" ;;
|
||||
139) echo "Segmentation fault (core dumped)" ;;
|
||||
143) echo "Terminated (SIGTERM)" ;;
|
||||
|
||||
# --- Package manager / APT / DPKG ---
|
||||
100) echo "APT: Package manager error (broken packages / dependency problems)" ;;
|
||||
101) echo "APT: Configuration error (bad sources.list, malformed config)" ;;
|
||||
255) echo "DPKG: Fatal internal error" ;;
|
||||
|
||||
# --- Node.js / npm / pnpm / yarn ---
|
||||
243) echo "Node.js: Out of memory (JavaScript heap out of memory)" ;;
|
||||
245) echo "Node.js: Invalid command-line option" ;;
|
||||
246) echo "Node.js: Internal JavaScript Parse Error" ;;
|
||||
247) echo "Node.js: Fatal internal error" ;;
|
||||
248) echo "Node.js: Invalid C++ addon / N-API failure" ;;
|
||||
249) echo "Node.js: Inspector error" ;;
|
||||
254) echo "npm/pnpm/yarn: Unknown fatal error" ;;
|
||||
|
||||
# --- Python / pip / uv ---
|
||||
210) echo "Python: Virtualenv / uv environment missing or broken" ;;
|
||||
211) echo "Python: Dependency resolution failed" ;;
|
||||
212) echo "Python: Installation aborted (permissions or EXTERNALLY-MANAGED)" ;;
|
||||
|
||||
# --- PostgreSQL ---
|
||||
231) echo "PostgreSQL: Connection failed (server not running / wrong socket)" ;;
|
||||
232) echo "PostgreSQL: Authentication failed (bad user/password)" ;;
|
||||
233) echo "PostgreSQL: Database does not exist" ;;
|
||||
234) echo "PostgreSQL: Fatal error in query / syntax" ;;
|
||||
|
||||
# --- MySQL / MariaDB ---
|
||||
241) echo "MySQL/MariaDB: Connection failed (server not running / wrong socket)" ;;
|
||||
242) echo "MySQL/MariaDB: Authentication failed (bad user/password)" ;;
|
||||
243) echo "MySQL/MariaDB: Database does not exist" ;;
|
||||
244) echo "MySQL/MariaDB: Fatal error in query / syntax" ;;
|
||||
|
||||
# --- MongoDB ---
|
||||
251) echo "MongoDB: Connection failed (server not running)" ;;
|
||||
252) echo "MongoDB: Authentication failed (bad user/password)" ;;
|
||||
253) echo "MongoDB: Database not found" ;;
|
||||
254) echo "MongoDB: Fatal query error" ;;
|
||||
|
||||
# --- Proxmox Custom Codes ---
|
||||
200) echo "Proxmox: Failed to create lock file" ;;
|
||||
203) echo "Proxmox: Missing CTID variable" ;;
|
||||
204) echo "Proxmox: Missing PCT_OSTYPE variable" ;;
|
||||
205) echo "Proxmox: Invalid CTID (<100)" ;;
|
||||
206) echo "Proxmox: CTID already in use" ;;
|
||||
207) echo "Proxmox: Password contains unescaped special characters" ;;
|
||||
208) echo "Proxmox: Invalid configuration (DNS/MAC/Network format)" ;;
|
||||
209) echo "Proxmox: Container creation failed" ;;
|
||||
210) echo "Proxmox: Cluster not quorate" ;;
|
||||
211) echo "Proxmox: Timeout waiting for template lock" ;;
|
||||
212) echo "Proxmox: Storage type 'iscsidirect' does not support containers (VMs only)" ;;
|
||||
213) echo "Proxmox: Storage type does not support 'rootdir' content" ;;
|
||||
214) echo "Proxmox: Not enough storage space" ;;
|
||||
215) echo "Proxmox: Container created but not listed (ghost state)" ;;
|
||||
216) echo "Proxmox: RootFS entry missing in config" ;;
|
||||
217) echo "Proxmox: Storage not accessible" ;;
|
||||
219) echo "Proxmox: CephFS does not support containers - use RBD" ;;
|
||||
224) echo "Proxmox: PBS storage is for backups only" ;;
|
||||
218) echo "Proxmox: Template file corrupted or incomplete" ;;
|
||||
220) echo "Proxmox: Unable to resolve template path" ;;
|
||||
221) echo "Proxmox: Template file not readable" ;;
|
||||
222) echo "Proxmox: Template download failed" ;;
|
||||
223) echo "Proxmox: Template not available after download" ;;
|
||||
225) echo "Proxmox: No template available for OS/Version" ;;
|
||||
231) echo "Proxmox: LXC stack upgrade failed" ;;
|
||||
|
||||
# --- Default ---
|
||||
*) echo "Unknown error" ;;
|
||||
esac
|
||||
}
|
||||
if ! declare -f explain_exit_code &>/dev/null; then
|
||||
explain_exit_code() {
|
||||
local code="$1"
|
||||
case "$code" in
|
||||
1) echo "General error / Operation not permitted" ;;
|
||||
2) echo "Misuse of shell builtins (e.g. syntax error)" ;;
|
||||
6) echo "curl: DNS resolution failed (could not resolve host)" ;;
|
||||
7) echo "curl: Failed to connect (network unreachable / host down)" ;;
|
||||
22) echo "curl: HTTP error returned (404, 429, 500+)" ;;
|
||||
28) echo "curl: Operation timeout (network slow or server not responding)" ;;
|
||||
35) echo "curl: SSL/TLS handshake failed (certificate error)" ;;
|
||||
100) echo "APT: Package manager error (broken packages / dependency problems)" ;;
|
||||
101) echo "APT: Configuration error (bad sources.list, malformed config)" ;;
|
||||
102) echo "APT: Lock held by another process (dpkg/apt still running)" ;;
|
||||
124) echo "Command timed out (timeout command)" ;;
|
||||
126) echo "Command invoked cannot execute (permission problem?)" ;;
|
||||
127) echo "Command not found" ;;
|
||||
128) echo "Invalid argument to exit" ;;
|
||||
130) echo "Terminated by Ctrl+C (SIGINT)" ;;
|
||||
134) echo "Process aborted (SIGABRT - possibly Node.js heap overflow)" ;;
|
||||
137) echo "Killed (SIGKILL / Out of memory?)" ;;
|
||||
139) echo "Segmentation fault (core dumped)" ;;
|
||||
141) echo "Broken pipe (SIGPIPE - output closed prematurely)" ;;
|
||||
143) echo "Terminated (SIGTERM)" ;;
|
||||
150) echo "Systemd: Service failed to start" ;;
|
||||
151) echo "Systemd: Service unit not found" ;;
|
||||
152) echo "Permission denied (EACCES)" ;;
|
||||
153) echo "Build/compile failed (make/gcc/cmake)" ;;
|
||||
154) echo "Node.js: Native addon build failed (node-gyp)" ;;
|
||||
160) echo "Python: Virtualenv / uv environment missing or broken" ;;
|
||||
161) echo "Python: Dependency resolution failed" ;;
|
||||
162) echo "Python: Installation aborted (permissions or EXTERNALLY-MANAGED)" ;;
|
||||
170) echo "PostgreSQL: Connection failed (server not running / wrong socket)" ;;
|
||||
171) echo "PostgreSQL: Authentication failed (bad user/password)" ;;
|
||||
172) echo "PostgreSQL: Database does not exist" ;;
|
||||
173) echo "PostgreSQL: Fatal error in query / syntax" ;;
|
||||
180) echo "MySQL/MariaDB: Connection failed (server not running / wrong socket)" ;;
|
||||
181) echo "MySQL/MariaDB: Authentication failed (bad user/password)" ;;
|
||||
182) echo "MySQL/MariaDB: Database does not exist" ;;
|
||||
183) echo "MySQL/MariaDB: Fatal error in query / syntax" ;;
|
||||
190) echo "MongoDB: Connection failed (server not running)" ;;
|
||||
191) echo "MongoDB: Authentication failed (bad user/password)" ;;
|
||||
192) echo "MongoDB: Database not found" ;;
|
||||
193) echo "MongoDB: Fatal query error" ;;
|
||||
200) echo "Proxmox: Failed to create lock file" ;;
|
||||
203) echo "Proxmox: Missing CTID variable" ;;
|
||||
204) echo "Proxmox: Missing PCT_OSTYPE variable" ;;
|
||||
205) echo "Proxmox: Invalid CTID (<100)" ;;
|
||||
206) echo "Proxmox: CTID already in use" ;;
|
||||
207) echo "Proxmox: Password contains unescaped special characters" ;;
|
||||
208) echo "Proxmox: Invalid configuration (DNS/MAC/Network format)" ;;
|
||||
209) echo "Proxmox: Container creation failed" ;;
|
||||
210) echo "Proxmox: Cluster not quorate" ;;
|
||||
211) echo "Proxmox: Timeout waiting for template lock" ;;
|
||||
212) echo "Proxmox: Storage type 'iscsidirect' does not support containers (VMs only)" ;;
|
||||
213) echo "Proxmox: Storage type does not support 'rootdir' content" ;;
|
||||
214) echo "Proxmox: Not enough storage space" ;;
|
||||
215) echo "Proxmox: Container created but not listed (ghost state)" ;;
|
||||
216) echo "Proxmox: RootFS entry missing in config" ;;
|
||||
217) echo "Proxmox: Storage not accessible" ;;
|
||||
218) echo "Proxmox: Template file corrupted or incomplete" ;;
|
||||
219) echo "Proxmox: CephFS does not support containers - use RBD" ;;
|
||||
220) echo "Proxmox: Unable to resolve template path" ;;
|
||||
221) echo "Proxmox: Template file not readable" ;;
|
||||
222) echo "Proxmox: Template download failed" ;;
|
||||
223) echo "Proxmox: Template not available after download" ;;
|
||||
224) echo "Proxmox: PBS storage is for backups only" ;;
|
||||
225) echo "Proxmox: No template available for OS/Version" ;;
|
||||
231) echo "Proxmox: LXC stack upgrade failed" ;;
|
||||
243) echo "Node.js: Out of memory (JavaScript heap out of memory)" ;;
|
||||
245) echo "Node.js: Invalid command-line option" ;;
|
||||
246) echo "Node.js: Internal JavaScript Parse Error" ;;
|
||||
247) echo "Node.js: Fatal internal error" ;;
|
||||
248) echo "Node.js: Invalid C++ addon / N-API failure" ;;
|
||||
249) echo "npm/pnpm/yarn: Unknown fatal error" ;;
|
||||
255) echo "DPKG: Fatal internal error" ;;
|
||||
*) echo "Unknown error" ;;
|
||||
esac
|
||||
}
|
||||
fi
|
||||
|
||||
# ==============================================================================
|
||||
# SECTION 2: ERROR HANDLERS
|
||||
@ -197,12 +187,7 @@ error_handler() {
|
||||
|
||||
# Create error flag file with exit code for host detection
|
||||
echo "$exit_code" >"/root/.install-${SESSION_ID:-error}.failed" 2>/dev/null || true
|
||||
|
||||
if declare -f msg_custom >/dev/null 2>&1; then
|
||||
msg_custom "📋" "${YW}" "Log saved to: ${container_log}"
|
||||
else
|
||||
echo -e "${YW}Log saved to:${CL} ${BL}${container_log}${CL}"
|
||||
fi
|
||||
# Log path is shown by host as combined log - no need to show container path
|
||||
else
|
||||
# HOST CONTEXT: Show local log path and offer container cleanup
|
||||
if declare -f msg_custom >/dev/null 2>&1; then
|
||||
|
||||
@ -1778,7 +1778,7 @@ function fetch_and_deploy_gh_release() {
|
||||
local app="$1"
|
||||
local repo="$2"
|
||||
local mode="${3:-tarball}" # tarball | binary | prebuild | singlefile | tag
|
||||
local version="${4:-latest}"
|
||||
local version="${APPLICATION_VERSION:-${4:-latest}}"
|
||||
local target="${5:-/opt/$app}"
|
||||
local asset_pattern="${6:-}"
|
||||
|
||||
@ -1974,6 +1974,12 @@ function fetch_and_deploy_gh_release() {
|
||||
[[ "$arch" == "x86_64" ]] && arch="amd64"
|
||||
[[ "$arch" == "aarch64" ]] && arch="arm64"
|
||||
|
||||
# Get Debian codename for distro-specific packages
|
||||
local codename=""
|
||||
if [[ -f /etc/os-release ]]; then
|
||||
codename=$(grep -oP '(?<=VERSION_CODENAME=).*' /etc/os-release 2>/dev/null || true)
|
||||
fi
|
||||
|
||||
local assets url_match=""
|
||||
assets=$(echo "$json" | jq -r '.assets[].browser_download_url')
|
||||
|
||||
@ -1989,7 +1995,17 @@ function fetch_and_deploy_gh_release() {
|
||||
done
|
||||
fi
|
||||
|
||||
# If no match via explicit pattern, fall back to architecture heuristic
|
||||
# If no match via explicit pattern, try architecture + codename match
|
||||
if [[ -z "$url_match" && -n "$codename" ]]; then
|
||||
for u in $assets; do
|
||||
if [[ "$u" =~ $arch.*$codename.*\.deb$ ]] || [[ "$u" =~ $arch.*-$codename\.deb$ ]] || [[ "$u" =~ ${arch}-${codename}\.deb$ ]] || [[ "$u" =~ ${arch}_${codename}\.deb$ ]]; then
|
||||
url_match="$u"
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Fallback: architecture heuristic without codename
|
||||
if [[ -z "$url_match" ]]; then
|
||||
for u in $assets; do
|
||||
if [[ "$u" =~ ($arch|amd64|x86_64|aarch64|arm64).*\.deb$ ]]; then
|
||||
@ -2230,7 +2246,7 @@ function fetch_and_deploy_codeberg_release() {
|
||||
local app="$1"
|
||||
local repo="$2"
|
||||
local mode="${3:-tarball}" # tarball | binary | prebuild | singlefile | tag
|
||||
local version="${4:-latest}"
|
||||
local version="${APPLICATION_VERSION:-${4:-latest}}"
|
||||
local target="${5:-/opt/$app}"
|
||||
local asset_pattern="${6:-}"
|
||||
|
||||
|
||||
309
tools/pve/microcode.sh
Normal file
309
tools/pve/microcode.sh
Normal file
@ -0,0 +1,309 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: tteck (tteckster), MickLesk
|
||||
# License: MIT
|
||||
# https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
|
||||
function header_info {
|
||||
clear
|
||||
cat <<"EOF"
|
||||
____ __ ____ __
|
||||
/ __ \_________ ________ ______________ _____ / |/ (_)_____________ _________ ____/ /__
|
||||
/ /_/ / ___/ __ \/ ___/ _ \/ ___/ ___/ __ \/ ___/ / /|_/ / / ___/ ___/ __ \/ ___/ __ \/ __ / _ \
|
||||
/ ____/ / / /_/ / /__/ __(__ |__ ) /_/ / / / / / / / /__/ / / /_/ / /__/ /_/ / /_/ / __/
|
||||
/_/ /_/ \____/\___/\___/____/____/\____/_/ /_/ /_/_/\___/_/ \____/\___/\____/\__,_/\___/
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
RD=$(echo "\033[01;31m")
|
||||
YW=$(echo "\033[33m")
|
||||
GN=$(echo "\033[1;92m")
|
||||
BL=$(echo "\033[36m")
|
||||
CL=$(echo "\033[m")
|
||||
BFR="\\r\\033[K"
|
||||
HOLD="-"
|
||||
CM="${GN}✓${CL}"
|
||||
CROSS="${RD}✗${CL}"
|
||||
|
||||
msg_info() { echo -ne " ${HOLD} ${YW}$1..."; }
|
||||
msg_ok() { echo -e "${BFR} ${CM} ${GN}$1${CL}"; }
|
||||
msg_error() { echo -e "${BFR} ${CROSS} ${RD}$1${CL}"; }
|
||||
|
||||
header_info
|
||||
|
||||
# Enhanced CPU detection
|
||||
get_cpu_info() {
|
||||
CPU_VENDOR=$(lscpu | grep -oP 'Vendor ID:\s*\K\S+' | head -n 1)
|
||||
CPU_MODEL=$(lscpu | grep -oP 'Model name:\s*\K.*' | head -n 1 | xargs)
|
||||
CPU_FAMILY=$(lscpu | grep -oP 'CPU family:\s*\K\d+' | head -n 1)
|
||||
CPU_MODEL_NUM=$(lscpu | grep -oP 'Model:\s*\K\d+' | head -n 1)
|
||||
CPU_STEPPING=$(lscpu | grep -oP 'Stepping:\s*\K\d+' | head -n 1)
|
||||
|
||||
# Detect CPU generation/architecture
|
||||
CPU_ARCH="Unknown"
|
||||
if [ "$CPU_VENDOR" == "GenuineIntel" ]; then
|
||||
case "$CPU_MODEL_NUM" in
|
||||
# Intel Core Ultra (Meteor Lake)
|
||||
170 | 171 | 172) CPU_ARCH="Meteor Lake (Core Ultra)" ;;
|
||||
# Raptor Lake / Raptor Lake Refresh
|
||||
183 | 186 | 191) CPU_ARCH="Raptor Lake (13th/14th Gen)" ;;
|
||||
# Alder Lake
|
||||
151 | 154 | 167) CPU_ARCH="Alder Lake (12th Gen)" ;;
|
||||
# Rocket Lake
|
||||
167) CPU_ARCH="Rocket Lake (11th Gen)" ;;
|
||||
# Comet Lake
|
||||
165 | 166) CPU_ARCH="Comet Lake (10th Gen)" ;;
|
||||
# Ice Lake
|
||||
125 | 126) CPU_ARCH="Ice Lake (10th Gen)" ;;
|
||||
# Coffee Lake
|
||||
142 | 158) CPU_ARCH="Coffee Lake (8th/9th Gen)" ;;
|
||||
# Skylake / Kaby Lake
|
||||
78 | 94) CPU_ARCH="Skylake/Kaby Lake (6th/7th Gen)" ;;
|
||||
# Xeon Scalable
|
||||
85 | 106 | 108 | 143) CPU_ARCH="Xeon Scalable" ;;
|
||||
# Atom
|
||||
92 | 95 | 122 | 156) CPU_ARCH="Atom" ;;
|
||||
*) CPU_ARCH="Intel (Model $CPU_MODEL_NUM)" ;;
|
||||
esac
|
||||
elif [ "$CPU_VENDOR" == "AuthenticAMD" ]; then
|
||||
case "$CPU_FAMILY" in
|
||||
# Zen 5 (Granite Ridge, Turin)
|
||||
26) CPU_ARCH="Zen 5 (Ryzen 9000 / EPYC Turin)" ;;
|
||||
# Zen 4 (Raphael, Genoa)
|
||||
25)
|
||||
if [ "$CPU_MODEL_NUM" -ge 96 ]; then
|
||||
CPU_ARCH="Zen 4 (Ryzen 7000 / EPYC Genoa)"
|
||||
else
|
||||
CPU_ARCH="Zen 3 (Ryzen 5000 / EPYC Milan)"
|
||||
fi
|
||||
;;
|
||||
# Zen 3
|
||||
25) CPU_ARCH="Zen 3 (Ryzen 5000)" ;;
|
||||
# Zen 2
|
||||
23)
|
||||
if [ "$CPU_MODEL_NUM" -ge 49 ]; then
|
||||
CPU_ARCH="Zen 2 (Ryzen 3000 / EPYC Rome)"
|
||||
else
|
||||
CPU_ARCH="Zen/Zen+ (Ryzen 1000/2000)"
|
||||
fi
|
||||
;;
|
||||
# Older AMD
|
||||
21) CPU_ARCH="Bulldozer/Piledriver" ;;
|
||||
*) CPU_ARCH="AMD (Family $CPU_FAMILY)" ;;
|
||||
esac
|
||||
fi
|
||||
}
|
||||
|
||||
# Get current microcode revision
|
||||
get_current_microcode() {
|
||||
# Try multiple sources for microcode version
|
||||
current_microcode=$(journalctl -k 2>/dev/null | grep -i 'microcode' | grep -oP '(revision|updated.*to|Current revision:)\s*\K0x[0-9a-fA-F]+' | tail -1)
|
||||
|
||||
if [ -z "$current_microcode" ]; then
|
||||
current_microcode=$(dmesg 2>/dev/null | grep -i 'microcode' | grep -oP '0x[0-9a-fA-F]+' | tail -1)
|
||||
fi
|
||||
|
||||
if [ -z "$current_microcode" ]; then
|
||||
# Try reading from CPU directly
|
||||
if [ -f /sys/devices/system/cpu/cpu0/microcode/version ]; then
|
||||
current_microcode=$(cat /sys/devices/system/cpu/cpu0/microcode/version 2>/dev/null)
|
||||
fi
|
||||
fi
|
||||
|
||||
[ -z "$current_microcode" ] && current_microcode="Not detected"
|
||||
}
|
||||
|
||||
# Display CPU information
|
||||
show_cpu_info() {
|
||||
echo -e "\n${BL}╔══════════════════════════════════════════════════════════════╗${CL}"
|
||||
echo -e "${BL}║${CL} ${GN}CPU Information${CL} ${BL}║${CL}"
|
||||
echo -e "${BL}╠══════════════════════════════════════════════════════════════╣${CL}"
|
||||
echo -e "${BL}║${CL} ${YW}Model:${CL} $CPU_MODEL"
|
||||
echo -e "${BL}║${CL} ${YW}Vendor:${CL} $CPU_VENDOR"
|
||||
echo -e "${BL}║${CL} ${YW}Architecture:${CL} $CPU_ARCH"
|
||||
echo -e "${BL}║${CL} ${YW}Family/Model:${CL} $CPU_FAMILY / $CPU_MODEL_NUM (Stepping $CPU_STEPPING)"
|
||||
echo -e "${BL}║${CL} ${YW}Microcode:${CL} $current_microcode"
|
||||
echo -e "${BL}╚══════════════════════════════════════════════════════════════╝${CL}\n"
|
||||
}
|
||||
|
||||
intel() {
|
||||
if ! dpkg -s iucode-tool >/dev/null 2>&1; then
|
||||
msg_info "Installing iucode-tool (Intel microcode updater)"
|
||||
apt-get install -y iucode-tool &>/dev/null
|
||||
msg_ok "Installed iucode-tool"
|
||||
else
|
||||
msg_ok "Intel iucode-tool is already installed"
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
msg_info "Fetching available Intel microcode packages"
|
||||
intel_microcode=$(curl -fsSL "https://ftp.debian.org/debian/pool/non-free-firmware/i/intel-microcode/" | grep -oP 'href="intel-microcode[^"]*amd64\.deb"' | sed 's/href="//;s/"//' | sort -V)
|
||||
|
||||
[ -z "$intel_microcode" ] && {
|
||||
whiptail --backtitle "Proxmox VE Helper Scripts" --title "No Microcode Found" --msgbox "No microcode packages found. Try again later." 10 68
|
||||
msg_error "No microcode packages found"
|
||||
exit 1
|
||||
}
|
||||
msg_ok "Found $(echo "$intel_microcode" | wc -l) packages"
|
||||
|
||||
# Get latest version for recommendation
|
||||
latest_version=$(echo "$intel_microcode" | tail -1)
|
||||
|
||||
MICROCODE_MENU=()
|
||||
MSG_MAX_LENGTH=0
|
||||
|
||||
while read -r ITEM; do
|
||||
[ -z "$ITEM" ] && continue
|
||||
OFFSET=2
|
||||
((${#ITEM} + OFFSET > MSG_MAX_LENGTH)) && MSG_MAX_LENGTH=${#ITEM}+OFFSET
|
||||
# Mark latest as default ON
|
||||
if [ "$ITEM" == "$latest_version" ]; then
|
||||
MICROCODE_MENU+=("$ITEM" "(Latest - Recommended)" "ON")
|
||||
else
|
||||
MICROCODE_MENU+=("$ITEM" "" "OFF")
|
||||
fi
|
||||
done < <(echo "$intel_microcode")
|
||||
|
||||
microcode=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "Intel Microcode - Current: ${current_microcode}" --radiolist "\nCPU: ${CPU_MODEL}\nArchitecture: ${CPU_ARCH}\n\nSelect a microcode package to install:\n" 20 $((MSG_MAX_LENGTH + 65)) 8 "${MICROCODE_MENU[@]}" 3>&1 1>&2 2>&3 | tr -d '"')
|
||||
|
||||
[ -z "$microcode" ] && {
|
||||
whiptail --backtitle "Proxmox VE Helper Scripts" --title "No Microcode Selected" --msgbox "No microcode package selected." 10 68
|
||||
msg_info "Exiting"
|
||||
sleep 1
|
||||
msg_ok "Done"
|
||||
exit 0
|
||||
}
|
||||
|
||||
msg_info "Downloading Intel Microcode Package: $microcode"
|
||||
wget -q "https://ftp.debian.org/debian/pool/non-free-firmware/i/intel-microcode/$microcode" -O "/tmp/$microcode"
|
||||
msg_ok "Downloaded $microcode"
|
||||
|
||||
msg_info "Installing $microcode"
|
||||
dpkg -i "/tmp/$microcode" &>/dev/null
|
||||
msg_ok "Installed $microcode"
|
||||
|
||||
msg_info "Cleaning up"
|
||||
rm -f "/tmp/$microcode"
|
||||
msg_ok "Cleaned"
|
||||
|
||||
# Try to reload microcode without reboot (if supported)
|
||||
if [ -f /sys/devices/system/cpu/microcode/reload ]; then
|
||||
msg_info "Attempting live microcode reload"
|
||||
echo 1 >/sys/devices/system/cpu/microcode/reload 2>/dev/null && msg_ok "Live reload successful" || msg_info "Live reload not supported, reboot required"
|
||||
fi
|
||||
|
||||
# Check new version
|
||||
sleep 1
|
||||
new_microcode=$(cat /sys/devices/system/cpu/cpu0/microcode/version 2>/dev/null || echo "Check after reboot")
|
||||
|
||||
echo -e "\n${GN}╔══════════════════════════════════════════════════════════════╗${CL}"
|
||||
echo -e "${GN}║${CL} ${GN}Installation Complete${CL} ${GN}║${CL}"
|
||||
echo -e "${GN}╠══════════════════════════════════════════════════════════════╣${CL}"
|
||||
echo -e "${GN}║${CL} ${YW}Previous Microcode:${CL} $current_microcode"
|
||||
echo -e "${GN}║${CL} ${YW}New Microcode:${CL} $new_microcode"
|
||||
echo -e "${GN}╚══════════════════════════════════════════════════════════════╝${CL}"
|
||||
echo -e "\n${YW}Note:${CL} A system reboot is recommended to fully apply the microcode update.\n"
|
||||
}
|
||||
|
||||
amd() {
|
||||
msg_info "Fetching available AMD microcode packages"
|
||||
amd_microcode=$(curl -fsSL "https://ftp.debian.org/debian/pool/non-free-firmware/a/amd64-microcode/" | grep -oP 'href="amd64-microcode[^"]*amd64\.deb"' | sed 's/href="//;s/"//' | sort -V)
|
||||
|
||||
[ -z "$amd_microcode" ] && {
|
||||
whiptail --backtitle "Proxmox VE Helper Scripts" --title "No Microcode Found" --msgbox "No microcode packages found. Try again later." 10 68
|
||||
msg_error "No microcode packages found"
|
||||
exit 1
|
||||
}
|
||||
msg_ok "Found $(echo "$amd_microcode" | wc -l) packages"
|
||||
|
||||
# Get latest version for recommendation
|
||||
latest_version=$(echo "$amd_microcode" | tail -1)
|
||||
|
||||
MICROCODE_MENU=()
|
||||
MSG_MAX_LENGTH=0
|
||||
|
||||
while read -r ITEM; do
|
||||
[ -z "$ITEM" ] && continue
|
||||
OFFSET=2
|
||||
((${#ITEM} + OFFSET > MSG_MAX_LENGTH)) && MSG_MAX_LENGTH=${#ITEM}+OFFSET
|
||||
# Mark latest as default ON
|
||||
if [ "$ITEM" == "$latest_version" ]; then
|
||||
MICROCODE_MENU+=("$ITEM" "(Latest - Recommended)" "ON")
|
||||
else
|
||||
MICROCODE_MENU+=("$ITEM" "" "OFF")
|
||||
fi
|
||||
done < <(echo "$amd_microcode")
|
||||
|
||||
microcode=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "AMD Microcode - Current: ${current_microcode}" --radiolist "\nCPU: ${CPU_MODEL}\nArchitecture: ${CPU_ARCH}\n\nSelect a microcode package to install:\n" 20 $((MSG_MAX_LENGTH + 65)) 8 "${MICROCODE_MENU[@]}" 3>&1 1>&2 2>&3 | tr -d '"')
|
||||
|
||||
[ -z "$microcode" ] && {
|
||||
whiptail --backtitle "Proxmox VE Helper Scripts" --title "No Microcode Selected" --msgbox "No microcode package selected." 10 68
|
||||
msg_info "Exiting"
|
||||
sleep 1
|
||||
msg_ok "Done"
|
||||
exit 0
|
||||
}
|
||||
|
||||
msg_info "Downloading AMD Microcode Package: $microcode"
|
||||
wget -q "https://ftp.debian.org/debian/pool/non-free-firmware/a/amd64-microcode/$microcode" -O "/tmp/$microcode"
|
||||
msg_ok "Downloaded $microcode"
|
||||
|
||||
msg_info "Installing $microcode"
|
||||
dpkg -i "/tmp/$microcode" &>/dev/null
|
||||
msg_ok "Installed $microcode"
|
||||
|
||||
msg_info "Cleaning up"
|
||||
rm -f "/tmp/$microcode"
|
||||
msg_ok "Cleaned"
|
||||
|
||||
# Try to reload microcode without reboot (if supported)
|
||||
if [ -f /sys/devices/system/cpu/microcode/reload ]; then
|
||||
msg_info "Attempting live microcode reload"
|
||||
echo 1 >/sys/devices/system/cpu/microcode/reload 2>/dev/null && msg_ok "Live reload successful" || msg_info "Live reload not supported, reboot required"
|
||||
fi
|
||||
|
||||
# Check new version
|
||||
sleep 1
|
||||
new_microcode=$(cat /sys/devices/system/cpu/cpu0/microcode/version 2>/dev/null || echo "Check after reboot")
|
||||
|
||||
echo -e "\n${GN}╔══════════════════════════════════════════════════════════════╗${CL}"
|
||||
echo -e "${GN}║${CL} ${GN}Installation Complete${CL} ${GN}║${CL}"
|
||||
echo -e "${GN}╠══════════════════════════════════════════════════════════════╣${CL}"
|
||||
echo -e "${GN}║${CL} ${YW}Previous Microcode:${CL} $current_microcode"
|
||||
echo -e "${GN}║${CL} ${YW}New Microcode:${CL} $new_microcode"
|
||||
echo -e "${GN}╚══════════════════════════════════════════════════════════════╝${CL}"
|
||||
echo -e "\n${YW}Note:${CL} A system reboot is recommended to fully apply the microcode update.\n"
|
||||
}
|
||||
|
||||
# Main script
|
||||
if ! command -v pveversion >/dev/null 2>&1; then
|
||||
header_info
|
||||
msg_error "No PVE Detected!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Gather CPU information
|
||||
msg_info "Detecting CPU"
|
||||
get_cpu_info
|
||||
get_current_microcode
|
||||
msg_ok "CPU detected: $CPU_VENDOR"
|
||||
|
||||
# Show CPU info
|
||||
show_cpu_info
|
||||
|
||||
# Confirmation dialog with CPU info
|
||||
if ! whiptail --backtitle "Proxmox VE Helper Scripts" --title "Proxmox VE Processor Microcode" --yesno "CPU: ${CPU_MODEL}\nArchitecture: ${CPU_ARCH}\nCurrent Microcode: ${current_microcode}\n\nThis will check for CPU microcode packages with the option to install.\n\nProceed?" 14 70; then
|
||||
msg_info "Cancelled by user"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$CPU_VENDOR" == "GenuineIntel" ]; then
|
||||
intel
|
||||
elif [ "$CPU_VENDOR" == "AuthenticAMD" ]; then
|
||||
amd
|
||||
else
|
||||
msg_error "CPU vendor '${CPU_VENDOR}' is not supported"
|
||||
msg_info "Supported vendors: GenuineIntel, AuthenticAMD"
|
||||
exit 1
|
||||
fi
|
||||
Loading…
x
Reference in New Issue
Block a user