This commit is contained in:
CanbiZ (MickLesk) 2026-02-09 19:15:31 +01:00
commit c8bf9059a9
11 changed files with 1914 additions and 37 deletions

View File

@ -1,10 +1,52 @@
FROM golang:1.25-alpine AS build FROM golang:1.25-alpine AS build
WORKDIR /src WORKDIR /src
COPY go.mod go.sum* ./
RUN go mod download 2>/dev/null || true
COPY . . COPY . .
RUN go build -trimpath -ldflags "-s -w" -o /out/telemetry-ingest . RUN go build -trimpath -ldflags "-s -w" -o /out/telemetry-ingest .
RUN go build -trimpath -ldflags "-s -w" -o /out/migrate migrate.go
FROM alpine:3.23 FROM alpine:3.23
RUN apk add --no-cache ca-certificates tzdata
WORKDIR /app WORKDIR /app
COPY --from=build /out/telemetry-ingest /app/telemetry-ingest COPY --from=build /out/telemetry-ingest /app/telemetry-ingest
COPY --from=build /out/migrate /app/migrate
COPY entrypoint.sh /app/entrypoint.sh
RUN chmod +x /app/entrypoint.sh /app/migrate
# Service config
ENV LISTEN_ADDR=":8080"
ENV MAX_BODY_BYTES="1024"
ENV RATE_LIMIT_RPM="60"
ENV RATE_BURST="20"
ENV UPSTREAM_TIMEOUT_MS="4000"
ENV ENABLE_REQUEST_LOGGING="false"
# Cache config (optional)
ENV ENABLE_CACHE="true"
ENV CACHE_TTL_SECONDS="60"
ENV ENABLE_REDIS="false"
# ENV REDIS_URL="redis://localhost:6379"
# Alert config (optional)
ENV ALERT_ENABLED="false"
# ENV SMTP_HOST=""
# ENV SMTP_PORT="587"
# ENV SMTP_USER=""
# ENV SMTP_PASSWORD=""
# ENV SMTP_FROM="telemetry@proxmoxved.local"
# ENV SMTP_TO=""
# ENV SMTP_USE_TLS="false"
ENV ALERT_FAILURE_THRESHOLD="20.0"
ENV ALERT_CHECK_INTERVAL_MIN="15"
ENV ALERT_COOLDOWN_MIN="60"
# Migration config (optional)
ENV RUN_MIGRATION="false"
ENV MIGRATION_REQUIRED="false"
ENV MIGRATION_SOURCE_URL="https://api.htl-braunau.at/dev/data"
EXPOSE 8080 EXPOSE 8080
CMD ["/app/telemetry-ingest"] HEALTHCHECK --interval=30s --timeout=3s --start-period=5s \
CMD wget -q --spider http://localhost:8080/healthz || exit 1
ENTRYPOINT ["/app/entrypoint.sh"]

267
misc/data/alerts.go Normal file
View File

@ -0,0 +1,267 @@
package main
import (
"bytes"
"context"
"crypto/tls"
"fmt"
"log"
"net/smtp"
"strings"
"sync"
"time"
)
// AlertConfig holds SMTP alert configuration
type AlertConfig struct {
Enabled bool
SMTPHost string
SMTPPort int
SMTPUser string
SMTPPassword string
SMTPFrom string
SMTPTo []string
UseTLS bool
FailureThreshold float64 // Alert when failure rate exceeds this (e.g., 20.0 = 20%)
CheckInterval time.Duration // How often to check
Cooldown time.Duration // Minimum time between alerts
}
// Alerter handles alerting functionality
type Alerter struct {
cfg AlertConfig
lastAlertAt time.Time
mu sync.Mutex
pb *PBClient
lastStats alertStats
alertHistory []AlertEvent
}
type alertStats struct {
successCount int
failedCount int
checkedAt time.Time
}
// AlertEvent records an alert that was sent
type AlertEvent struct {
Timestamp time.Time `json:"timestamp"`
Type string `json:"type"`
Message string `json:"message"`
FailureRate float64 `json:"failure_rate,omitempty"`
}
// NewAlerter creates a new alerter instance
func NewAlerter(cfg AlertConfig, pb *PBClient) *Alerter {
return &Alerter{
cfg: cfg,
pb: pb,
alertHistory: make([]AlertEvent, 0),
}
}
// Start begins the alert monitoring loop
func (a *Alerter) Start() {
if !a.cfg.Enabled {
log.Println("INFO: alerting disabled")
return
}
if a.cfg.SMTPHost == "" || len(a.cfg.SMTPTo) == 0 {
log.Println("WARN: alerting enabled but SMTP not configured")
return
}
go a.monitorLoop()
log.Printf("INFO: alert monitoring started (threshold: %.1f%%, interval: %v)", a.cfg.FailureThreshold, a.cfg.CheckInterval)
}
func (a *Alerter) monitorLoop() {
ticker := time.NewTicker(a.cfg.CheckInterval)
defer ticker.Stop()
for range ticker.C {
a.checkAndAlert()
}
}
func (a *Alerter) checkAndAlert() {
ctx, cancel := newTimeoutContext(10 * time.Second)
defer cancel()
// Fetch last hour's data
data, err := a.pb.FetchDashboardData(ctx, 1)
if err != nil {
log.Printf("WARN: alert check failed: %v", err)
return
}
// Calculate current failure rate
total := data.SuccessCount + data.FailedCount
if total < 10 {
// Not enough data to determine rate
return
}
failureRate := float64(data.FailedCount) / float64(total) * 100
// Check if we should alert
if failureRate >= a.cfg.FailureThreshold {
a.maybeSendAlert(failureRate, data.FailedCount, total)
}
}
func (a *Alerter) maybeSendAlert(rate float64, failed, total int) {
a.mu.Lock()
defer a.mu.Unlock()
// Check cooldown
if time.Since(a.lastAlertAt) < a.cfg.Cooldown {
return
}
// Send alert
subject := fmt.Sprintf("[ProxmoxVED Alert] High Failure Rate: %.1f%%", rate)
body := fmt.Sprintf(`ProxmoxVE Helper Scripts - Telemetry Alert
High installation failure rate detected!
Current Statistics (last 24h):
- Failure Rate: %.1f%%
- Failed Installations: %d
- Total Installations: %d
- Threshold: %.1f%%
Time: %s
Please check the dashboard for more details.
---
This is an automated alert from the telemetry service.
`, rate, failed, total, a.cfg.FailureThreshold, time.Now().Format(time.RFC1123))
if err := a.sendEmail(subject, body); err != nil {
log.Printf("ERROR: failed to send alert email: %v", err)
return
}
a.lastAlertAt = time.Now()
a.alertHistory = append(a.alertHistory, AlertEvent{
Timestamp: time.Now(),
Type: "high_failure_rate",
Message: fmt.Sprintf("Failure rate %.1f%% exceeded threshold %.1f%%", rate, a.cfg.FailureThreshold),
FailureRate: rate,
})
// Keep only last 100 alerts
if len(a.alertHistory) > 100 {
a.alertHistory = a.alertHistory[len(a.alertHistory)-100:]
}
log.Printf("ALERT: sent high failure rate alert (%.1f%%)", rate)
}
func (a *Alerter) sendEmail(subject, body string) error {
// Build message
var msg bytes.Buffer
msg.WriteString(fmt.Sprintf("From: %s\r\n", a.cfg.SMTPFrom))
msg.WriteString(fmt.Sprintf("To: %s\r\n", strings.Join(a.cfg.SMTPTo, ", ")))
msg.WriteString(fmt.Sprintf("Subject: %s\r\n", subject))
msg.WriteString("MIME-Version: 1.0\r\n")
msg.WriteString("Content-Type: text/plain; charset=UTF-8\r\n")
msg.WriteString("\r\n")
msg.WriteString(body)
addr := fmt.Sprintf("%s:%d", a.cfg.SMTPHost, a.cfg.SMTPPort)
var auth smtp.Auth
if a.cfg.SMTPUser != "" && a.cfg.SMTPPassword != "" {
auth = smtp.PlainAuth("", a.cfg.SMTPUser, a.cfg.SMTPPassword, a.cfg.SMTPHost)
}
if a.cfg.UseTLS {
// TLS connection
tlsConfig := &tls.Config{
ServerName: a.cfg.SMTPHost,
}
conn, err := tls.Dial("tcp", addr, tlsConfig)
if err != nil {
return fmt.Errorf("TLS dial failed: %w", err)
}
defer conn.Close()
client, err := smtp.NewClient(conn, a.cfg.SMTPHost)
if err != nil {
return fmt.Errorf("SMTP client failed: %w", err)
}
defer client.Close()
if auth != nil {
if err := client.Auth(auth); err != nil {
return fmt.Errorf("SMTP auth failed: %w", err)
}
}
if err := client.Mail(a.cfg.SMTPFrom); err != nil {
return fmt.Errorf("SMTP MAIL failed: %w", err)
}
for _, to := range a.cfg.SMTPTo {
if err := client.Rcpt(to); err != nil {
return fmt.Errorf("SMTP RCPT failed: %w", err)
}
}
w, err := client.Data()
if err != nil {
return fmt.Errorf("SMTP DATA failed: %w", err)
}
_, err = w.Write(msg.Bytes())
if err != nil {
return fmt.Errorf("SMTP write failed: %w", err)
}
return w.Close()
}
// Non-TLS (STARTTLS)
return smtp.SendMail(addr, auth, a.cfg.SMTPFrom, a.cfg.SMTPTo, msg.Bytes())
}
// GetAlertHistory returns recent alert events
func (a *Alerter) GetAlertHistory() []AlertEvent {
a.mu.Lock()
defer a.mu.Unlock()
result := make([]AlertEvent, len(a.alertHistory))
copy(result, a.alertHistory)
return result
}
// TestAlert sends a test alert email
func (a *Alerter) TestAlert() error {
if !a.cfg.Enabled || a.cfg.SMTPHost == "" {
return fmt.Errorf("alerting not configured")
}
subject := "[ProxmoxVED] Test Alert"
body := fmt.Sprintf(`This is a test alert from ProxmoxVE Helper Scripts telemetry service.
If you received this email, your alert configuration is working correctly.
Time: %s
SMTP Host: %s
Recipients: %s
---
This is an automated test message.
`, time.Now().Format(time.RFC1123), a.cfg.SMTPHost, strings.Join(a.cfg.SMTPTo, ", "))
return a.sendEmail(subject, body)
}
// Helper for timeout context
func newTimeoutContext(d time.Duration) (context.Context, context.CancelFunc) {
return context.WithTimeout(context.Background(), d)
}

158
misc/data/cache.go Normal file
View File

@ -0,0 +1,158 @@
package main
import (
"context"
"encoding/json"
"log"
"sync"
"time"
"github.com/redis/go-redis/v9"
)
// CacheConfig holds cache configuration
type CacheConfig struct {
RedisURL string
EnableRedis bool
DefaultTTL time.Duration
}
// Cache provides caching functionality with Redis or in-memory fallback
type Cache struct {
redis *redis.Client
useRedis bool
defaultTTL time.Duration
// In-memory fallback
mu sync.RWMutex
memData map[string]cacheEntry
}
type cacheEntry struct {
data []byte
expiresAt time.Time
}
// NewCache creates a new cache instance
func NewCache(cfg CacheConfig) *Cache {
c := &Cache{
defaultTTL: cfg.DefaultTTL,
memData: make(map[string]cacheEntry),
}
if cfg.EnableRedis && cfg.RedisURL != "" {
opts, err := redis.ParseURL(cfg.RedisURL)
if err != nil {
log.Printf("WARN: invalid redis URL, using in-memory cache: %v", err)
return c
}
client := redis.NewClient(opts)
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
if err := client.Ping(ctx).Err(); err != nil {
log.Printf("WARN: redis connection failed, using in-memory cache: %v", err)
return c
}
c.redis = client
c.useRedis = true
log.Printf("INFO: connected to Redis for caching")
}
// Start cleanup goroutine for in-memory cache
if !c.useRedis {
go c.cleanupLoop()
}
return c
}
func (c *Cache) cleanupLoop() {
ticker := time.NewTicker(5 * time.Minute)
defer ticker.Stop()
for range ticker.C {
c.mu.Lock()
now := time.Now()
for k, v := range c.memData {
if now.After(v.expiresAt) {
delete(c.memData, k)
}
}
c.mu.Unlock()
}
}
// Get retrieves a value from cache
func (c *Cache) Get(ctx context.Context, key string, dest interface{}) bool {
if c.useRedis {
data, err := c.redis.Get(ctx, key).Bytes()
if err != nil {
return false
}
return json.Unmarshal(data, dest) == nil
}
// In-memory fallback
c.mu.RLock()
entry, ok := c.memData[key]
c.mu.RUnlock()
if !ok || time.Now().After(entry.expiresAt) {
return false
}
return json.Unmarshal(entry.data, dest) == nil
}
// Set stores a value in cache
func (c *Cache) Set(ctx context.Context, key string, value interface{}, ttl time.Duration) error {
if ttl == 0 {
ttl = c.defaultTTL
}
data, err := json.Marshal(value)
if err != nil {
return err
}
if c.useRedis {
return c.redis.Set(ctx, key, data, ttl).Err()
}
// In-memory fallback
c.mu.Lock()
c.memData[key] = cacheEntry{
data: data,
expiresAt: time.Now().Add(ttl),
}
c.mu.Unlock()
return nil
}
// Delete removes a key from cache
func (c *Cache) Delete(ctx context.Context, key string) error {
if c.useRedis {
return c.redis.Del(ctx, key).Err()
}
c.mu.Lock()
delete(c.memData, key)
c.mu.Unlock()
return nil
}
// InvalidateDashboard clears dashboard cache
func (c *Cache) InvalidateDashboard(ctx context.Context) {
// Delete all dashboard cache keys
for days := 1; days <= 365; days++ {
_ = c.Delete(ctx, dashboardCacheKey(days))
}
}
func dashboardCacheKey(days int) string {
return "dashboard:" + string(rune(days))
}

View File

@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"net/http" "net/http"
"net/url" "net/url"
"strings"
"time" "time"
) )
@ -19,6 +20,10 @@ type DashboardData struct {
TopApps []AppCount `json:"top_apps"` TopApps []AppCount `json:"top_apps"`
OsDistribution []OsCount `json:"os_distribution"` OsDistribution []OsCount `json:"os_distribution"`
MethodStats []MethodCount `json:"method_stats"` MethodStats []MethodCount `json:"method_stats"`
PveVersions []PveCount `json:"pve_versions"`
TypeStats []TypeCount `json:"type_stats"`
ErrorAnalysis []ErrorGroup `json:"error_analysis"`
FailedApps []AppFailure `json:"failed_apps"`
RecentRecords []TelemetryRecord `json:"recent_records"` RecentRecords []TelemetryRecord `json:"recent_records"`
DailyStats []DailyStat `json:"daily_stats"` DailyStats []DailyStat `json:"daily_stats"`
} }
@ -38,6 +43,29 @@ type MethodCount struct {
Count int `json:"count"` Count int `json:"count"`
} }
type PveCount struct {
Version string `json:"version"`
Count int `json:"count"`
}
type TypeCount struct {
Type string `json:"type"`
Count int `json:"count"`
}
type ErrorGroup struct {
Pattern string `json:"pattern"`
Count int `json:"count"`
Apps string `json:"apps"` // Comma-separated list of affected apps
}
type AppFailure struct {
App string `json:"app"`
TotalCount int `json:"total_count"`
FailedCount int `json:"failed_count"`
FailureRate float64 `json:"failure_rate"`
}
type DailyStat struct { type DailyStat struct {
Date string `json:"date"` Date string `json:"date"`
Success int `json:"success"` Success int `json:"success"`
@ -64,8 +92,12 @@ func (p *PBClient) FetchDashboardData(ctx context.Context, days int) (*Dashboard
// Aggregate statistics // Aggregate statistics
appCounts := make(map[string]int) appCounts := make(map[string]int)
appFailures := make(map[string]int)
osCounts := make(map[string]int) osCounts := make(map[string]int)
methodCounts := make(map[string]int) methodCounts := make(map[string]int)
pveCounts := make(map[string]int)
typeCounts := make(map[string]int)
errorPatterns := make(map[string]map[string]bool) // pattern -> set of apps
dailySuccess := make(map[string]int) dailySuccess := make(map[string]int)
dailyFailed := make(map[string]int) dailyFailed := make(map[string]int)
@ -73,10 +105,24 @@ func (p *PBClient) FetchDashboardData(ctx context.Context, days int) (*Dashboard
data.TotalInstalls++ data.TotalInstalls++
switch r.Status { switch r.Status {
case "sucess": case "success":
data.SuccessCount++ data.SuccessCount++
case "failed": case "failed":
data.FailedCount++ data.FailedCount++
// Track failed apps
if r.NSAPP != "" {
appFailures[r.NSAPP]++
}
// Group errors by pattern
if r.Error != "" {
pattern := normalizeError(r.Error)
if errorPatterns[pattern] == nil {
errorPatterns[pattern] = make(map[string]bool)
}
if r.NSAPP != "" {
errorPatterns[pattern][r.NSAPP] = true
}
}
case "installing": case "installing":
data.InstallingCount++ data.InstallingCount++
} }
@ -96,10 +142,20 @@ func (p *PBClient) FetchDashboardData(ctx context.Context, days int) (*Dashboard
methodCounts[r.Method]++ methodCounts[r.Method]++
} }
// Count PVE versions
if r.PveVer != "" {
pveCounts[r.PveVer]++
}
// Count types (LXC vs VM)
if r.Type != "" {
typeCounts[r.Type]++
}
// Daily stats (use Created field if available) // Daily stats (use Created field if available)
if r.Created != "" { if r.Created != "" {
date := r.Created[:10] // "2026-02-09" date := r.Created[:10] // "2026-02-09"
if r.Status == "sucess" { if r.Status == "success" {
dailySuccess[date]++ dailySuccess[date]++
} else if r.Status == "failed" { } else if r.Status == "failed" {
dailyFailed[date]++ dailyFailed[date]++
@ -117,6 +173,14 @@ func (p *PBClient) FetchDashboardData(ctx context.Context, days int) (*Dashboard
data.TopApps = topN(appCounts, 10) data.TopApps = topN(appCounts, 10)
data.OsDistribution = topNOs(osCounts, 10) data.OsDistribution = topNOs(osCounts, 10)
data.MethodStats = topNMethod(methodCounts, 10) data.MethodStats = topNMethod(methodCounts, 10)
data.PveVersions = topNPve(pveCounts, 10)
data.TypeStats = topNType(typeCounts, 10)
// Error analysis
data.ErrorAnalysis = buildErrorAnalysis(errorPatterns, 10)
// Failed apps with failure rates
data.FailedApps = buildFailedApps(appCounts, appFailures, 10)
// Daily stats for chart // Daily stats for chart
data.DailyStats = buildDailyStats(dailySuccess, dailyFailed, days) data.DailyStats = buildDailyStats(dailySuccess, dailyFailed, days)
@ -234,6 +298,158 @@ func topNMethod(m map[string]int, n int) []MethodCount {
return result return result
} }
func topNPve(m map[string]int, n int) []PveCount {
result := make([]PveCount, 0, len(m))
for k, v := range m {
result = append(result, PveCount{Version: k, Count: v})
}
for i := 0; i < len(result)-1; i++ {
for j := i + 1; j < len(result); j++ {
if result[j].Count > result[i].Count {
result[i], result[j] = result[j], result[i]
}
}
}
if len(result) > n {
return result[:n]
}
return result
}
func topNType(m map[string]int, n int) []TypeCount {
result := make([]TypeCount, 0, len(m))
for k, v := range m {
result = append(result, TypeCount{Type: k, Count: v})
}
for i := 0; i < len(result)-1; i++ {
for j := i + 1; j < len(result); j++ {
if result[j].Count > result[i].Count {
result[i], result[j] = result[j], result[i]
}
}
}
if len(result) > n {
return result[:n]
}
return result
}
// normalizeError simplifies error messages into patterns for grouping
func normalizeError(err string) string {
err = strings.TrimSpace(err)
if err == "" {
return "unknown"
}
// Normalize common patterns
err = strings.ToLower(err)
// Remove specific numbers, IPs, paths that vary
// Keep it simple for now - just truncate and normalize
if len(err) > 60 {
err = err[:60]
}
// Common error pattern replacements
patterns := map[string]string{
"connection refused": "connection refused",
"timeout": "timeout",
"no space left": "disk full",
"permission denied": "permission denied",
"not found": "not found",
"failed to download": "download failed",
"apt": "apt error",
"dpkg": "dpkg error",
"curl": "network error",
"wget": "network error",
"docker": "docker error",
"systemctl": "systemd error",
"service": "service error",
}
for pattern, label := range patterns {
if strings.Contains(err, pattern) {
return label
}
}
// If no pattern matches, return first 40 chars
if len(err) > 40 {
return err[:40] + "..."
}
return err
}
func buildErrorAnalysis(patterns map[string]map[string]bool, n int) []ErrorGroup {
result := make([]ErrorGroup, 0, len(patterns))
for pattern, apps := range patterns {
appList := make([]string, 0, len(apps))
for app := range apps {
appList = append(appList, app)
}
// Limit app list display
appsStr := strings.Join(appList, ", ")
if len(appsStr) > 50 {
appsStr = appsStr[:47] + "..."
}
result = append(result, ErrorGroup{
Pattern: pattern,
Count: len(apps), // Number of unique apps with this error
Apps: appsStr,
})
}
// Sort by count descending
for i := 0; i < len(result)-1; i++ {
for j := i + 1; j < len(result); j++ {
if result[j].Count > result[i].Count {
result[i], result[j] = result[j], result[i]
}
}
}
if len(result) > n {
return result[:n]
}
return result
}
func buildFailedApps(total, failed map[string]int, n int) []AppFailure {
result := make([]AppFailure, 0)
for app, failCount := range failed {
totalCount := total[app]
if totalCount == 0 {
continue
}
rate := float64(failCount) / float64(totalCount) * 100
result = append(result, AppFailure{
App: app,
TotalCount: totalCount,
FailedCount: failCount,
FailureRate: rate,
})
}
// Sort by failure rate descending
for i := 0; i < len(result)-1; i++ {
for j := i + 1; j < len(result); j++ {
if result[j].FailureRate > result[i].FailureRate {
result[i], result[j] = result[j], result[i]
}
}
}
if len(result) > n {
return result[:n]
}
return result
}
func buildDailyStats(success, failed map[string]int, days int) []DailyStat { func buildDailyStats(success, failed map[string]int, days int) []DailyStat {
result := make([]DailyStat, 0, days) result := make([]DailyStat, 0, days)
for i := days - 1; i >= 0; i-- { for i := days - 1; i >= 0; i-- {
@ -254,7 +470,9 @@ func DashboardHTML() string {
<head> <head>
<meta charset="UTF-8"> <meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0"> <meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Telemetry Dashboard - Community Scripts</title> <title>Telemetry Dashboard - ProxmoxVE Helper Scripts</title>
<meta name="description" content="Installation telemetry dashboard for ProxmoxVE Helper Scripts">
<link rel="icon" href="data:image/svg+xml,<svg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 100 100'><text y='.9em' font-size='90'>📊</text></svg>">
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script> <script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<style> <style>
:root { :root {
@ -271,6 +489,20 @@ func DashboardHTML() string {
--accent-purple: #a371f7; --accent-purple: #a371f7;
} }
[data-theme="light"] {
--bg-primary: #ffffff;
--bg-secondary: #f6f8fa;
--bg-tertiary: #eaeef2;
--border-color: #d0d7de;
--text-primary: #1f2328;
--text-secondary: #656d76;
--accent-blue: #0969da;
--accent-green: #1a7f37;
--accent-red: #cf222e;
--accent-yellow: #9a6700;
--accent-purple: #8250df;
}
* { * {
margin: 0; margin: 0;
padding: 0; padding: 0;
@ -468,7 +700,7 @@ func DashboardHTML() string {
font-weight: 500; font-weight: 500;
} }
.status-badge.sucess { background: rgba(63, 185, 80, 0.2); color: var(--accent-green); } .status-badge.success { background: rgba(63, 185, 80, 0.2); color: var(--accent-green); }
.status-badge.failed { background: rgba(248, 81, 73, 0.2); color: var(--accent-red); } .status-badge.failed { background: rgba(248, 81, 73, 0.2); color: var(--accent-red); }
.status-badge.installing { background: rgba(210, 153, 34, 0.2); color: var(--accent-yellow); } .status-badge.installing { background: rgba(210, 153, 34, 0.2); color: var(--accent-yellow); }
@ -493,6 +725,190 @@ func DashboardHTML() string {
font-size: 12px; font-size: 12px;
color: var(--text-secondary); color: var(--text-secondary);
} }
.footer {
margin-top: 24px;
padding-top: 16px;
border-top: 1px solid var(--border-color);
display: flex;
justify-content: space-between;
align-items: center;
color: var(--text-secondary);
font-size: 12px;
}
.footer a {
color: var(--accent-blue);
text-decoration: none;
}
.footer a:hover {
text-decoration: underline;
}
.export-btn {
background: var(--bg-tertiary);
border-color: var(--border-color);
color: var(--text-primary);
}
.export-btn:hover {
background: var(--bg-secondary);
}
.pve-version-card {
background: var(--bg-secondary);
border: 1px solid var(--border-color);
border-radius: 8px;
padding: 16px;
margin-bottom: 24px;
}
.pve-version-card h3 {
font-size: 14px;
font-weight: 600;
margin-bottom: 12px;
color: var(--text-secondary);
}
.pve-versions {
display: flex;
flex-wrap: wrap;
gap: 8px;
}
.pve-badge {
background: var(--bg-tertiary);
padding: 6px 12px;
border-radius: 16px;
font-size: 12px;
display: flex;
align-items: center;
gap: 6px;
}
.pve-badge .count {
background: var(--accent-purple);
color: #fff;
padding: 2px 6px;
border-radius: 10px;
font-size: 10px;
}
.theme-toggle {
background: var(--bg-tertiary);
border: 1px solid var(--border-color);
color: var(--text-primary);
padding: 8px 12px;
border-radius: 6px;
cursor: pointer;
display: flex;
align-items: center;
gap: 6px;
}
.theme-toggle:hover {
border-color: var(--accent-blue);
}
.error-analysis-card {
background: var(--bg-secondary);
border: 1px solid var(--border-color);
border-radius: 8px;
padding: 16px;
margin-bottom: 24px;
}
.error-analysis-card h3 {
font-size: 14px;
font-weight: 600;
margin-bottom: 12px;
color: var(--text-secondary);
display: flex;
align-items: center;
gap: 8px;
}
.error-list {
display: flex;
flex-direction: column;
gap: 8px;
}
.error-item {
background: var(--bg-tertiary);
border-radius: 6px;
padding: 12px;
display: flex;
justify-content: space-between;
align-items: center;
}
.error-item .pattern {
font-family: monospace;
color: var(--accent-red);
font-size: 13px;
}
.error-item .meta {
font-size: 12px;
color: var(--text-secondary);
}
.error-item .count-badge {
background: var(--accent-red);
color: #fff;
padding: 4px 10px;
border-radius: 12px;
font-size: 12px;
font-weight: 600;
}
.failed-apps-grid {
display: grid;
grid-template-columns: repeat(auto-fill, minmax(200px, 1fr));
gap: 12px;
margin-top: 12px;
}
.failed-app-card {
background: var(--bg-tertiary);
border-radius: 6px;
padding: 12px;
}
.failed-app-card .app-name {
font-weight: 600;
margin-bottom: 4px;
}
.failed-app-card .failure-rate {
font-size: 20px;
font-weight: 600;
color: var(--accent-red);
}
.failed-app-card .details {
font-size: 11px;
color: var(--text-secondary);
}
.pagination {
display: flex;
justify-content: center;
align-items: center;
gap: 8px;
padding: 16px;
}
.pagination button {
padding: 6px 12px;
}
.pagination span {
color: var(--text-secondary);
font-size: 14px;
}
</style> </style>
</head> </head>
<body> <body>
@ -510,8 +926,13 @@ func DashboardHTML() string {
<option value="14">Last 14 days</option> <option value="14">Last 14 days</option>
<option value="30" selected>Last 30 days</option> <option value="30" selected>Last 30 days</option>
<option value="90">Last 90 days</option> <option value="90">Last 90 days</option>
<option value="365">Last year</option>
</select> </select>
<button class="export-btn" onclick="exportCSV()">Export CSV</button>
<button onclick="refreshData()">Refresh</button> <button onclick="refreshData()">Refresh</button>
<button class="theme-toggle" onclick="toggleTheme()">
<span id="themeIcon">🌙</span>
</button>
<span class="last-updated" id="lastUpdated"></span> <span class="last-updated" id="lastUpdated"></span>
</div> </div>
</div> </div>
@ -539,6 +960,17 @@ func DashboardHTML() string {
<div class="label">Success Rate</div> <div class="label">Success Rate</div>
<div class="value rate" id="successRate">-</div> <div class="value rate" id="successRate">-</div>
</div> </div>
<div class="stat-card">
<div class="label">LXC / VM</div>
<div class="value" id="typeStats" style="font-size: 20px;">-</div>
</div>
</div>
<div class="pve-version-card">
<h3>Proxmox VE Versions</h3>
<div class="pve-versions" id="pveVersions">
<span class="loading">Loading...</span>
</div>
</div> </div>
<div class="charts-grid"> <div class="charts-grid">
@ -577,13 +1009,41 @@ func DashboardHTML() string {
</div> </div>
</div> </div>
<div class="error-analysis-card">
<h3>
<svg width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
<circle cx="12" cy="12" r="10"/>
<line x1="12" y1="8" x2="12" y2="12"/>
<line x1="12" y1="16" x2="12.01" y2="16"/>
</svg>
Error Analysis
</h3>
<div class="error-list" id="errorList">
<span class="loading">Loading...</span>
</div>
</div>
<div class="error-analysis-card">
<h3>
<svg width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
<path d="M10.29 3.86L1.82 18a2 2 0 0 0 1.71 3h16.94a2 2 0 0 0 1.71-3L13.71 3.86a2 2 0 0 0-3.42 0z"/>
<line x1="12" y1="9" x2="12" y2="13"/>
<line x1="12" y1="17" x2="12.01" y2="17"/>
</svg>
Apps with Highest Failure Rates
</h3>
<div class="failed-apps-grid" id="failedAppsGrid">
<span class="loading">Loading...</span>
</div>
</div>
<div class="table-card"> <div class="table-card">
<h3>Recent Installations</h3> <h3>Recent Installations</h3>
<div class="filters"> <div class="filters">
<input type="text" id="filterApp" placeholder="Filter by app..." oninput="filterTable()"> <input type="text" id="filterApp" placeholder="Filter by app..." oninput="filterTable()">
<select id="filterStatus" onchange="filterTable()"> <select id="filterStatus" onchange="filterTable()">
<option value="">All Status</option> <option value="">All Status</option>
<option value="sucess">Success</option> <option value="success">Success</option>
<option value="failed">Failed</option> <option value="failed">Failed</option>
<option value="installing">Installing</option> <option value="installing">Installing</option>
</select> </select>
@ -599,19 +1059,63 @@ func DashboardHTML() string {
<th>OS</th> <th>OS</th>
<th>Type</th> <th>Type</th>
<th>Method</th> <th>Method</th>
<th>Resources</th>
<th>Exit Code</th> <th>Exit Code</th>
<th>Error</th> <th>Error</th>
</tr> </tr>
</thead> </thead>
<tbody id="recordsTable"> <tbody id="recordsTable">
<tr><td colspan="7" class="loading">Loading...</td></tr> <tr><td colspan="8" class="loading">Loading...</td></tr>
</tbody> </tbody>
</table> </table>
<div class="pagination">
<button onclick="prevPage()" id="prevBtn" disabled> Previous</button>
<span id="pageInfo">Page 1</span>
<button onclick="nextPage()" id="nextBtn">Next </button>
</div>
</div>
<div class="footer">
<div>
<a href="https://github.com/community-scripts/ProxmoxVED" target="_blank">ProxmoxVE Helper Scripts</a>
&bull; Telemetry is anonymous and privacy-friendly
</div>
<div>
<a href="/healthz" target="_blank">Health Check</a> &bull;
<a href="/metrics" target="_blank">Metrics</a> &bull;
<a href="/api/dashboard" target="_blank">API</a>
</div>
</div> </div>
<script> <script>
let charts = {}; let charts = {};
let allRecords = []; let allRecords = [];
let currentPage = 1;
let totalPages = 1;
let currentTheme = localStorage.getItem('theme') || 'dark';
// Apply saved theme on load
if (currentTheme === 'light') {
document.documentElement.setAttribute('data-theme', 'light');
document.getElementById('themeIcon').textContent = '';
}
function toggleTheme() {
if (currentTheme === 'dark') {
document.documentElement.setAttribute('data-theme', 'light');
document.getElementById('themeIcon').textContent = '';
currentTheme = 'light';
} else {
document.documentElement.removeAttribute('data-theme');
document.getElementById('themeIcon').textContent = '🌙';
currentTheme = 'dark';
}
localStorage.setItem('theme', currentTheme);
// Redraw charts with new colors
if (Object.keys(charts).length > 0) {
refreshData();
}
}
const chartColors = { const chartColors = {
blue: 'rgba(88, 166, 255, 0.8)', blue: 'rgba(88, 166, 255, 0.8)',
@ -663,6 +1167,68 @@ func DashboardHTML() string {
document.getElementById('successRate').textContent = data.success_rate.toFixed(1) + '%'; document.getElementById('successRate').textContent = data.success_rate.toFixed(1) + '%';
document.getElementById('lastUpdated').textContent = 'Updated: ' + new Date().toLocaleTimeString(); document.getElementById('lastUpdated').textContent = 'Updated: ' + new Date().toLocaleTimeString();
document.getElementById('error').style.display = 'none'; document.getElementById('error').style.display = 'none';
// Type stats (LXC/VM)
if (data.type_stats && data.type_stats.length > 0) {
const lxc = data.type_stats.find(t => t.type === 'lxc');
const vm = data.type_stats.find(t => t.type === 'vm');
document.getElementById('typeStats').textContent =
(lxc ? lxc.count.toLocaleString() : '0') + ' / ' + (vm ? vm.count.toLocaleString() : '0');
}
// PVE Versions
if (data.pve_versions && data.pve_versions.length > 0) {
document.getElementById('pveVersions').innerHTML = data.pve_versions.map(p =>
'<span class="pve-badge">PVE ' + (p.version || 'unknown') + ' <span class="count">' + p.count + '</span></span>'
).join('');
} else {
document.getElementById('pveVersions').innerHTML = '<span>No version data</span>';
}
// Error Analysis
updateErrorAnalysis(data.error_analysis || []);
// Failed Apps
updateFailedApps(data.failed_apps || []);
}
function updateErrorAnalysis(errors) {
const container = document.getElementById('errorList');
if (!errors || errors.length === 0) {
container.innerHTML = '<span class="loading">No errors recorded</span>';
return;
}
container.innerHTML = errors.slice(0, 8).map(e =>
'<div class="error-item">' +
'<div>' +
'<div class="pattern">' + escapeHtml(e.pattern) + '</div>' +
'<div class="meta">Affects: ' + escapeHtml(e.apps) + '</div>' +
'</div>' +
'<span class="count-badge">' + e.count + ' apps</span>' +
'</div>'
).join('');
}
function updateFailedApps(apps) {
const container = document.getElementById('failedAppsGrid');
if (!apps || apps.length === 0) {
container.innerHTML = '<span class="loading">No failures recorded</span>';
return;
}
container.innerHTML = apps.slice(0, 8).map(a =>
'<div class="failed-app-card">' +
'<div class="app-name">' + escapeHtml(a.app) + '</div>' +
'<div class="failure-rate">' + a.failure_rate.toFixed(1) + '%</div>' +
'<div class="details">' + a.failed_count + ' / ' + a.total_count + ' failed</div>' +
'</div>'
).join('');
}
function escapeHtml(str) {
if (!str) return '';
return str.replace(/&/g, '&amp;').replace(/</g, '&lt;').replace(/>/g, '&gt;').replace(/"/g, '&quot;');
} }
function updateCharts(data) { function updateCharts(data) {
@ -793,39 +1359,109 @@ func DashboardHTML() string {
filterTable(); filterTable();
} }
function filterTable() { async function fetchPaginatedRecords() {
const appFilter = document.getElementById('filterApp').value.toLowerCase(); const status = document.getElementById('filterStatus').value;
const statusFilter = document.getElementById('filterStatus').value; const app = document.getElementById('filterApp').value;
const osFilter = document.getElementById('filterOs').value; const os = document.getElementById('filterOs').value;
const filtered = allRecords.filter(r => { try {
if (appFilter && !r.nsapp.toLowerCase().includes(appFilter)) return false; let url = '/api/records?page=' + currentPage + '&limit=50';
if (statusFilter && r.status !== statusFilter) return false; if (status) url += '&status=' + encodeURIComponent(status);
if (osFilter && r.os_type !== osFilter) return false; if (app) url += '&app=' + encodeURIComponent(app);
return true; if (os) url += '&os=' + encodeURIComponent(os);
});
const response = await fetch(url);
if (!response.ok) throw new Error('Failed to fetch records');
const data = await response.json();
totalPages = data.total_pages || 1;
document.getElementById('pageInfo').textContent = 'Page ' + currentPage + ' of ' + totalPages + ' (' + data.total + ' total)';
document.getElementById('prevBtn').disabled = currentPage <= 1;
document.getElementById('nextBtn').disabled = currentPage >= totalPages;
renderTableRows(data.records || []);
} catch (e) {
console.error('Pagination error:', e);
}
}
function prevPage() {
if (currentPage > 1) {
currentPage--;
fetchPaginatedRecords();
}
}
function nextPage() {
if (currentPage < totalPages) {
currentPage++;
fetchPaginatedRecords();
}
}
function renderTableRows(records) {
const tbody = document.getElementById('recordsTable'); const tbody = document.getElementById('recordsTable');
if (filtered.length === 0) { if (records.length === 0) {
tbody.innerHTML = '<tr><td colspan="7" class="loading">No records found</td></tr>'; tbody.innerHTML = '<tr><td colspan="8" class="loading">No records found</td></tr>';
return; return;
} }
tbody.innerHTML = filtered.slice(0, 50).map(r => { tbody.innerHTML = records.map(r => {
const statusClass = r.status || 'unknown'; const statusClass = r.status || 'unknown';
const resources = r.core_count || r.ram_size || r.disk_size
? (r.core_count || '?') + 'C / ' + (r.ram_size ? Math.round(r.ram_size/1024) + 'G' : '?') + ' / ' + (r.disk_size || '?') + 'GB'
: '-';
return '<tr>' + return '<tr>' +
'<td><strong>' + (r.nsapp || '-') + '</strong></td>' + '<td><strong>' + escapeHtml(r.nsapp || '-') + '</strong></td>' +
'<td><span class="status-badge ' + statusClass + '">' + (r.status || '-') + '</span></td>' + '<td><span class="status-badge ' + statusClass + '">' + escapeHtml(r.status || '-') + '</span></td>' +
'<td>' + (r.os_type || '-') + ' ' + (r.os_version || '') + '</td>' + '<td>' + escapeHtml(r.os_type || '-') + ' ' + escapeHtml(r.os_version || '') + '</td>' +
'<td>' + (r.type || '-') + '</td>' + '<td>' + escapeHtml(r.type || '-') + '</td>' +
'<td>' + (r.method || 'default') + '</td>' + '<td>' + escapeHtml(r.method || 'default') + '</td>' +
'<td>' + resources + '</td>' +
'<td>' + (r.exit_code || '-') + '</td>' + '<td>' + (r.exit_code || '-') + '</td>' +
'<td title="' + (r.error || '').replace(/"/g, '&quot;') + '">' + '<td title="' + escapeHtml(r.error || '') + '">' +
((r.error || '').slice(0, 40) + (r.error && r.error.length > 40 ? '...' : '')) + '</td>' + escapeHtml((r.error || '').slice(0, 40)) + (r.error && r.error.length > 40 ? '...' : '') + '</td>' +
'</tr>'; '</tr>';
}).join(''); }).join('');
} }
function filterTable() {
currentPage = 1;
fetchPaginatedRecords();
}
function exportCSV() {
if (allRecords.length === 0) {
alert('No data to export');
return;
}
const headers = ['App', 'Status', 'OS Type', 'OS Version', 'Type', 'Method', 'Cores', 'RAM (MB)', 'Disk (GB)', 'Exit Code', 'Error', 'PVE Version'];
const rows = allRecords.map(r => [
r.nsapp || '',
r.status || '',
r.os_type || '',
r.os_version || '',
r.type || '',
r.method || '',
r.core_count || '',
r.ram_size || '',
r.disk_size || '',
r.exit_code || '',
(r.error || '').replace(/,/g, ';'),
r.pve_version || ''
]);
const csv = [headers.join(','), ...rows.map(r => r.join(','))].join('\\n');
const blob = new Blob([csv], { type: 'text/csv' });
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = 'telemetry_' + new Date().toISOString().slice(0,10) + '.csv';
a.click();
URL.revokeObjectURL(url);
}
async function refreshData() { async function refreshData() {
try { try {
const data = await fetchData(); const data = await fetchData();

55
misc/data/entrypoint.sh Normal file
View File

@ -0,0 +1,55 @@
#!/bin/sh
set -e
echo "============================================="
echo " ProxmoxVED Telemetry Service"
echo "============================================="
# Map Coolify ENV names to migration script names
# Coolify uses PB_URL, PB_TARGET_COLLECTION
export POCKETBASE_URL="${POCKETBASE_URL:-$PB_URL}"
export POCKETBASE_COLLECTION="${POCKETBASE_COLLECTION:-$PB_TARGET_COLLECTION}"
# Run migration if enabled
if [ "$RUN_MIGRATION" = "true" ]; then
echo ""
echo "🔄 Migration mode enabled"
echo " Source: $MIGRATION_SOURCE_URL"
echo " Target: $POCKETBASE_URL"
echo " Collection: $POCKETBASE_COLLECTION"
echo ""
# Wait for PocketBase to be ready
echo "⏳ Waiting for PocketBase to be ready..."
RETRIES=30
until wget -q --spider "$POCKETBASE_URL/api/health" 2>/dev/null; do
RETRIES=$((RETRIES - 1))
if [ $RETRIES -le 0 ]; then
echo "❌ PocketBase not reachable after 30 attempts"
if [ "$MIGRATION_REQUIRED" = "true" ]; then
exit 1
fi
echo "⚠️ Continuing without migration..."
break
fi
echo " Waiting... ($RETRIES attempts left)"
sleep 2
done
if wget -q --spider "$POCKETBASE_URL/api/health" 2>/dev/null; then
echo "✅ PocketBase is ready"
echo ""
echo "🚀 Starting migration..."
/app/migrate || {
if [ "$MIGRATION_REQUIRED" = "true" ]; then
echo "❌ Migration failed!"
exit 1
fi
echo "⚠️ Migration failed, but continuing..."
}
echo ""
fi
fi
echo "🚀 Starting telemetry service..."
exec /app/telemetry-ingest

View File

@ -1,3 +1,10 @@
module telemetry-ingest module telemetry-ingest
go 1.25.5 go 1.25.5
require github.com/redis/go-redis/v9 v9.7.0
require (
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
)

10
misc/data/go.sum Normal file
View File

@ -0,0 +1,10 @@
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E=
github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw=

366
misc/data/migrate.go Normal file
View File

@ -0,0 +1,366 @@
// +build ignore
// Migration script to import data from the old API to PocketBase
// Run with: go run migrate.go
package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"time"
)
const (
defaultSourceAPI = "https://api.htl-braunau.at/dev/data"
defaultPBURL = "http://localhost:8090"
batchSize = 100
)
var (
sourceAPI string
summaryAPI string
authToken string // PocketBase auth token
)
// OldDataModel represents the data structure from the old API
type OldDataModel struct {
ID string `json:"id"`
CtType int `json:"ct_type"`
DiskSize int `json:"disk_size"`
CoreCount int `json:"core_count"`
RamSize int `json:"ram_size"`
OsType string `json:"os_type"`
OsVersion string `json:"os_version"`
DisableIP6 string `json:"disableip6"`
NsApp string `json:"nsapp"`
Method string `json:"method"`
CreatedAt string `json:"created_at"`
PveVersion string `json:"pve_version"`
Status string `json:"status"`
RandomID string `json:"random_id"`
Type string `json:"type"`
Error string `json:"error"`
}
// PBRecord represents the PocketBase record format
type PBRecord struct {
CtType int `json:"ct_type"`
DiskSize int `json:"disk_size"`
CoreCount int `json:"core_count"`
RamSize int `json:"ram_size"`
OsType string `json:"os_type"`
OsVersion string `json:"os_version"`
DisableIP6 string `json:"disableip6"`
NsApp string `json:"nsapp"`
Method string `json:"method"`
PveVersion string `json:"pve_version"`
Status string `json:"status"`
RandomID string `json:"random_id"`
Type string `json:"type"`
Error string `json:"error"`
// created_at will be set automatically by PocketBase
}
type Summary struct {
TotalEntries int `json:"total_entries"`
}
func main() {
// Setup source URLs
baseURL := os.Getenv("MIGRATION_SOURCE_URL")
if baseURL == "" {
baseURL = defaultSourceAPI
}
sourceAPI = baseURL + "/paginated"
summaryAPI = baseURL + "/summary"
// Support both POCKETBASE_URL and PB_URL (Coolify uses PB_URL)
pbURL := os.Getenv("POCKETBASE_URL")
if pbURL == "" {
pbURL = os.Getenv("PB_URL")
}
if pbURL == "" {
pbURL = defaultPBURL
}
// Support both POCKETBASE_COLLECTION and PB_TARGET_COLLECTION
pbCollection := os.Getenv("POCKETBASE_COLLECTION")
if pbCollection == "" {
pbCollection = os.Getenv("PB_TARGET_COLLECTION")
}
if pbCollection == "" {
pbCollection = "_dev_telemetry_data"
}
// Auth collection
authCollection := os.Getenv("PB_AUTH_COLLECTION")
if authCollection == "" {
authCollection = "_dev_telemetry_service"
}
// Credentials
pbIdentity := os.Getenv("PB_IDENTITY")
pbPassword := os.Getenv("PB_PASSWORD")
fmt.Println("===========================================")
fmt.Println(" Data Migration to PocketBase")
fmt.Println("===========================================")
fmt.Printf("Source API: %s\n", baseURL)
fmt.Printf("PocketBase URL: %s\n", pbURL)
fmt.Printf("Collection: %s\n", pbCollection)
fmt.Printf("Auth Collection: %s\n", authCollection)
fmt.Println("-------------------------------------------")
// Authenticate with PocketBase
if pbIdentity != "" && pbPassword != "" {
fmt.Println("🔐 Authenticating with PocketBase...")
err := authenticate(pbURL, authCollection, pbIdentity, pbPassword)
if err != nil {
fmt.Printf("❌ Authentication failed: %v\n", err)
os.Exit(1)
}
fmt.Println("✅ Authentication successful")
} else {
fmt.Println("⚠️ No credentials provided, trying without auth...")
}
fmt.Println("-------------------------------------------")
// Get total count
summary, err := getSummary()
if err != nil {
fmt.Printf("❌ Failed to get summary: %v\n", err)
os.Exit(1)
}
fmt.Printf("📊 Total entries to migrate: %d\n", summary.TotalEntries)
fmt.Println("-------------------------------------------")
// Calculate pages
totalPages := (summary.TotalEntries + batchSize - 1) / batchSize
var totalMigrated, totalFailed, totalSkipped int
for page := 1; page <= totalPages; page++ {
fmt.Printf("📦 Fetching page %d/%d (items %d-%d)...\n",
page, totalPages,
(page-1)*batchSize+1,
min(page*batchSize, summary.TotalEntries))
data, err := fetchPage(page, batchSize)
if err != nil {
fmt.Printf(" ❌ Failed to fetch page %d: %v\n", page, err)
totalFailed += batchSize
continue
}
for i, record := range data {
err := importRecord(pbURL, pbCollection, record)
if err != nil {
if isUniqueViolation(err) {
totalSkipped++
continue
}
fmt.Printf(" ❌ Failed to import record %d: %v\n", (page-1)*batchSize+i+1, err)
totalFailed++
continue
}
totalMigrated++
}
fmt.Printf(" ✅ Page %d complete (migrated: %d, skipped: %d, failed: %d)\n",
page, len(data), totalSkipped, totalFailed)
// Small delay to avoid overwhelming the server
time.Sleep(100 * time.Millisecond)
}
fmt.Println("===========================================")
fmt.Println(" Migration Complete")
fmt.Println("===========================================")
fmt.Printf("✅ Successfully migrated: %d\n", totalMigrated)
fmt.Printf("⏭️ Skipped (duplicates): %d\n", totalSkipped)
fmt.Printf("❌ Failed: %d\n", totalFailed)
fmt.Println("===========================================")
}
func getSummary() (*Summary, error) {
resp, err := http.Get(summaryAPI)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var summary Summary
if err := json.NewDecoder(resp.Body).Decode(&summary); err != nil {
return nil, err
}
return &summary, nil
}
func authenticate(pbURL, authCollection, identity, password string) error {
body := map[string]string{
"identity": identity,
"password": password,
}
jsonData, _ := json.Marshal(body)
url := fmt.Sprintf("%s/api/collections/%s/auth-with-password", pbURL, authCollection)
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
client := &http.Client{Timeout: 10 * time.Second}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body))
}
var result struct {
Token string `json:"token"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return err
}
if result.Token == "" {
return fmt.Errorf("no token in response")
}
authToken = result.Token
return nil
}
func fetchPage(page, limit int) ([]OldDataModel, error) {
url := fmt.Sprintf("%s?page=%d&limit=%d", sourceAPI, page, limit)
resp, err := http.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body))
}
var data []OldDataModel
if err := json.NewDecoder(resp.Body).Decode(&data); err != nil {
return nil, err
}
return data, nil
}
func importRecord(pbURL, collection string, old OldDataModel) error {
// Map status: "done" -> "success"
status := old.Status
switch status {
case "done":
status = "success"
case "installing", "failed", "unknown", "success":
// keep as-is
default:
status = "unknown"
}
// Ensure ct_type is not 0 (required field)
ctType := old.CtType
if ctType == 0 {
ctType = 1 // default to unprivileged
}
// Ensure type is set
recordType := old.Type
if recordType == "" {
recordType = "lxc"
}
record := PBRecord{
CtType: ctType,
DiskSize: old.DiskSize,
CoreCount: old.CoreCount,
RamSize: old.RamSize,
OsType: old.OsType,
OsVersion: old.OsVersion,
DisableIP6: old.DisableIP6,
NsApp: old.NsApp,
Method: old.Method,
PveVersion: old.PveVersion,
Status: status,
RandomID: old.RandomID,
Type: recordType,
Error: old.Error,
}
jsonData, err := json.Marshal(record)
if err != nil {
return err
}
url := fmt.Sprintf("%s/api/collections/%s/records", pbURL, collection)
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
if authToken != "" {
req.Header.Set("Authorization", "Bearer "+authToken)
}
client := &http.Client{Timeout: 10 * time.Second}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body))
}
return nil
}
func isUniqueViolation(err error) bool {
if err == nil {
return false
}
errStr := err.Error()
return contains(errStr, "UNIQUE constraint failed") ||
contains(errStr, "duplicate") ||
contains(errStr, "already exists") ||
contains(errStr, "validation_not_unique")
}
func contains(s, substr string) bool {
return len(s) >= len(substr) && (s == substr || len(s) > 0 && containsHelper(s, substr))
}
func containsHelper(s, substr string) bool {
for i := 0; i <= len(s)-len(substr); i++ {
if s[i:i+len(substr)] == substr {
return true
}
}
return false
}
func min(a, b int) int {
if a < b {
return a
}
return b
}

67
misc/data/migrate.sh Executable file
View File

@ -0,0 +1,67 @@
#!/bin/bash
# Migration script to import data from the old API to PocketBase
# Usage: ./migrate.sh [POCKETBASE_URL] [COLLECTION_NAME]
#
# Examples:
# ./migrate.sh # Uses defaults
# ./migrate.sh http://localhost:8090 # Custom PB URL
# ./migrate.sh http://localhost:8090 my_telemetry # Custom URL and collection
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Default values
POCKETBASE_URL="${1:-http://localhost:8090}"
POCKETBASE_COLLECTION="${2:-_dev_telemetry_data}"
echo "============================================="
echo " ProxmoxVED Data Migration Tool"
echo "============================================="
echo ""
echo "This script will migrate telemetry data from:"
echo " Source: https://api.htl-braunau.at/dev/data"
echo " Target: $POCKETBASE_URL"
echo " Collection: $POCKETBASE_COLLECTION"
echo ""
# Check if PocketBase is reachable
echo "🔍 Checking PocketBase connection..."
if ! curl -sf "$POCKETBASE_URL/api/health" > /dev/null 2>&1; then
echo "❌ Cannot reach PocketBase at $POCKETBASE_URL"
echo " Make sure PocketBase is running and the URL is correct."
exit 1
fi
echo "✅ PocketBase is reachable"
echo ""
# Check source API
echo "🔍 Checking source API..."
SUMMARY=$(curl -sf "https://api.htl-braunau.at/dev/data/summary" 2>/dev/null || echo "")
if [ -z "$SUMMARY" ]; then
echo "❌ Cannot reach source API"
exit 1
fi
TOTAL=$(echo "$SUMMARY" | grep -o '"total_entries":[0-9]*' | cut -d: -f2)
echo "✅ Source API is reachable ($TOTAL entries available)"
echo ""
# Confirm migration
read -p "⚠️ Do you want to start the migration? [y/N] " -n 1 -r
echo ""
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
echo "Migration cancelled."
exit 0
fi
echo ""
echo "Starting migration..."
echo ""
# Run the Go migration script
cd "$SCRIPT_DIR"
POCKETBASE_URL="$POCKETBASE_URL" POCKETBASE_COLLECTION="$POCKETBASE_COLLECTION" go run migrate.go
echo ""
echo "Migration complete!"

View File

@ -37,6 +37,25 @@ type Config struct {
RateKeyHeader string // e.g. "X-Telemetry-Key" RateKeyHeader string // e.g. "X-Telemetry-Key"
RequestTimeout time.Duration // upstream timeout RequestTimeout time.Duration // upstream timeout
EnableReqLogging bool // default false (GDPR-friendly) EnableReqLogging bool // default false (GDPR-friendly)
// Cache
RedisURL string
EnableRedis bool
CacheTTL time.Duration
CacheEnabled bool
// Alerts (SMTP)
AlertEnabled bool
SMTPHost string
SMTPPort int
SMTPUser string
SMTPPassword string
SMTPFrom string
SMTPTo []string
SMTPUseTLS bool
AlertFailureThreshold float64
AlertCheckInterval time.Duration
AlertCooldown time.Duration
} }
// TelemetryIn matches payload from api.func (bash client) // TelemetryIn matches payload from api.func (bash client)
@ -45,7 +64,7 @@ type TelemetryIn struct {
RandomID string `json:"random_id"` // Session UUID RandomID string `json:"random_id"` // Session UUID
Type string `json:"type"` // "lxc" or "vm" Type string `json:"type"` // "lxc" or "vm"
NSAPP string `json:"nsapp"` // Application name (e.g., "jellyfin") NSAPP string `json:"nsapp"` // Application name (e.g., "jellyfin")
Status string `json:"status"` // "installing", "sucess", "failed", "unknown" Status string `json:"status"` // "installing", "success", "failed", "unknown"
// Container/VM specs // Container/VM specs
CTType int `json:"ct_type,omitempty"` // 1=unprivileged, 2=privileged/VM CTType int `json:"ct_type,omitempty"` // 1=unprivileged, 2=privileged/VM
@ -238,6 +257,59 @@ func (p *PBClient) UpdateTelemetryStatus(ctx context.Context, recordID string, u
return nil return nil
} }
// FetchRecordsPaginated retrieves records with pagination and optional filters
func (p *PBClient) FetchRecordsPaginated(ctx context.Context, page, limit int, status, app, osType string) ([]TelemetryRecord, int, error) {
if err := p.ensureAuth(ctx); err != nil {
return nil, 0, err
}
// Build filter
var filters []string
if status != "" {
filters = append(filters, fmt.Sprintf("status='%s'", status))
}
if app != "" {
filters = append(filters, fmt.Sprintf("nsapp~'%s'", app))
}
if osType != "" {
filters = append(filters, fmt.Sprintf("os_type='%s'", osType))
}
filterStr := ""
if len(filters) > 0 {
filterStr = "&filter=" + strings.Join(filters, "&&")
}
reqURL := fmt.Sprintf("%s/api/collections/%s/records?sort=-created&page=%d&perPage=%d%s",
p.baseURL, p.targetColl, page, limit, filterStr)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, reqURL, nil)
if err != nil {
return nil, 0, err
}
req.Header.Set("Authorization", "Bearer "+p.token)
resp, err := p.http.Do(req)
if err != nil {
return nil, 0, err
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return nil, 0, fmt.Errorf("pocketbase fetch failed: %s", resp.Status)
}
var result struct {
Items []TelemetryRecord `json:"items"`
TotalItems int `json:"totalItems"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return nil, 0, err
}
return result.Items, result.TotalItems, nil
}
// UpsertTelemetry handles both creation and updates intelligently // UpsertTelemetry handles both creation and updates intelligently
// - status="installing": Always creates a new record // - status="installing": Always creates a new record
// - status!="installing": Updates existing record (found by random_id) with status/error/exit_code only // - status!="installing": Updates existing record (found by random_id) with status/error/exit_code only
@ -247,7 +319,7 @@ func (p *PBClient) UpsertTelemetry(ctx context.Context, payload TelemetryOut) er
return p.CreateTelemetry(ctx, payload) return p.CreateTelemetry(ctx, payload)
} }
// For status updates (sucess/failed/unknown), find and update existing record // For status updates (success/failed/unknown), find and update existing record
recordID, err := p.FindRecordByRandomID(ctx, payload.RandomID) recordID, err := p.FindRecordByRandomID(ctx, payload.RandomID)
if err != nil { if err != nil {
// Search failed, log and return error // Search failed, log and return error
@ -421,8 +493,8 @@ var (
// Allowed values for 'type' field // Allowed values for 'type' field
allowedType = map[string]bool{"lxc": true, "vm": true} allowedType = map[string]bool{"lxc": true, "vm": true}
// Allowed values for 'status' field (note: "sucess" is intentional, matches PB schema) // Allowed values for 'status' field
allowedStatus = map[string]bool{"installing": true, "sucess": true, "failed": true, "unknown": true} allowedStatus = map[string]bool{"installing": true, "success": true, "failed": true, "unknown": true}
// Allowed values for 'os_type' field // Allowed values for 'os_type' field
allowedOsType = map[string]bool{ allowedOsType = map[string]bool{
@ -536,6 +608,25 @@ func main() {
RateKeyHeader: env("RATE_KEY_HEADER", "X-Telemetry-Key"), RateKeyHeader: env("RATE_KEY_HEADER", "X-Telemetry-Key"),
RequestTimeout: time.Duration(envInt("UPSTREAM_TIMEOUT_MS", 4000)) * time.Millisecond, RequestTimeout: time.Duration(envInt("UPSTREAM_TIMEOUT_MS", 4000)) * time.Millisecond,
EnableReqLogging: envBool("ENABLE_REQUEST_LOGGING", false), EnableReqLogging: envBool("ENABLE_REQUEST_LOGGING", false),
// Cache config
RedisURL: env("REDIS_URL", ""),
EnableRedis: envBool("ENABLE_REDIS", false),
CacheTTL: time.Duration(envInt("CACHE_TTL_SECONDS", 60)) * time.Second,
CacheEnabled: envBool("ENABLE_CACHE", true),
// Alert config
AlertEnabled: envBool("ALERT_ENABLED", false),
SMTPHost: env("SMTP_HOST", ""),
SMTPPort: envInt("SMTP_PORT", 587),
SMTPUser: env("SMTP_USER", ""),
SMTPPassword: env("SMTP_PASSWORD", ""),
SMTPFrom: env("SMTP_FROM", "telemetry@proxmoxved.local"),
SMTPTo: splitCSV(env("SMTP_TO", "")),
SMTPUseTLS: envBool("SMTP_USE_TLS", false),
AlertFailureThreshold: envFloat("ALERT_FAILURE_THRESHOLD", 20.0),
AlertCheckInterval: time.Duration(envInt("ALERT_CHECK_INTERVAL_MIN", 15)) * time.Minute,
AlertCooldown: time.Duration(envInt("ALERT_COOLDOWN_MIN", 60)) * time.Minute,
} }
var pt *ProxyTrust var pt *ProxyTrust
@ -550,20 +641,100 @@ func main() {
pb := NewPBClient(cfg) pb := NewPBClient(cfg)
rl := NewRateLimiter(cfg.RateLimitRPM, cfg.RateBurst) rl := NewRateLimiter(cfg.RateLimitRPM, cfg.RateBurst)
// Initialize cache
cache := NewCache(CacheConfig{
RedisURL: cfg.RedisURL,
EnableRedis: cfg.EnableRedis,
DefaultTTL: cfg.CacheTTL,
})
// Initialize alerter
alerter := NewAlerter(AlertConfig{
Enabled: cfg.AlertEnabled,
SMTPHost: cfg.SMTPHost,
SMTPPort: cfg.SMTPPort,
SMTPUser: cfg.SMTPUser,
SMTPPassword: cfg.SMTPPassword,
SMTPFrom: cfg.SMTPFrom,
SMTPTo: cfg.SMTPTo,
UseTLS: cfg.SMTPUseTLS,
FailureThreshold: cfg.AlertFailureThreshold,
CheckInterval: cfg.AlertCheckInterval,
Cooldown: cfg.AlertCooldown,
}, pb)
alerter.Start()
mux := http.NewServeMux() mux := http.NewServeMux()
mux.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { mux.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200) // Check PocketBase connectivity
_, _ = w.Write([]byte("ok")) ctx, cancel := context.WithTimeout(r.Context(), 2*time.Second)
defer cancel()
status := map[string]interface{}{
"status": "ok",
"time": time.Now().UTC().Format(time.RFC3339),
}
if err := pb.ensureAuth(ctx); err != nil {
status["status"] = "degraded"
status["pocketbase"] = "disconnected"
w.WriteHeader(503)
} else {
status["pocketbase"] = "connected"
w.WriteHeader(200)
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(status)
}) })
// Dashboard HTML page // Dashboard HTML page - serve on root
mux.HandleFunc("/dashboard", func(w http.ResponseWriter, r *http.Request) { mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.NotFound(w, r)
return
}
w.Header().Set("Content-Type", "text/html; charset=utf-8") w.Header().Set("Content-Type", "text/html; charset=utf-8")
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
_, _ = w.Write([]byte(DashboardHTML())) _, _ = w.Write([]byte(DashboardHTML()))
}) })
// Dashboard API endpoint // Redirect /dashboard to / for backwards compatibility
mux.HandleFunc("/dashboard", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/", http.StatusMovedPermanently)
})
// Prometheus-style metrics endpoint
mux.HandleFunc("/metrics", func(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second)
defer cancel()
data, err := pb.FetchDashboardData(ctx, 1) // Last 24h only for metrics
if err != nil {
http.Error(w, "failed to fetch metrics", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "text/plain; version=0.0.4")
fmt.Fprintf(w, "# HELP telemetry_installs_total Total number of installations\n")
fmt.Fprintf(w, "# TYPE telemetry_installs_total counter\n")
fmt.Fprintf(w, "telemetry_installs_total %d\n\n", data.TotalInstalls)
fmt.Fprintf(w, "# HELP telemetry_installs_success_total Successful installations\n")
fmt.Fprintf(w, "# TYPE telemetry_installs_success_total counter\n")
fmt.Fprintf(w, "telemetry_installs_success_total %d\n\n", data.SuccessCount)
fmt.Fprintf(w, "# HELP telemetry_installs_failed_total Failed installations\n")
fmt.Fprintf(w, "# TYPE telemetry_installs_failed_total counter\n")
fmt.Fprintf(w, "telemetry_installs_failed_total %d\n\n", data.FailedCount)
fmt.Fprintf(w, "# HELP telemetry_installs_pending Current installing count\n")
fmt.Fprintf(w, "# TYPE telemetry_installs_pending gauge\n")
fmt.Fprintf(w, "telemetry_installs_pending %d\n\n", data.InstallingCount)
fmt.Fprintf(w, "# HELP telemetry_success_rate Success rate percentage\n")
fmt.Fprintf(w, "# TYPE telemetry_success_rate gauge\n")
fmt.Fprintf(w, "telemetry_success_rate %.2f\n", data.SuccessRate)
})
// Dashboard API endpoint (with caching)
mux.HandleFunc("/api/dashboard", func(w http.ResponseWriter, r *http.Request) { mux.HandleFunc("/api/dashboard", func(w http.ResponseWriter, r *http.Request) {
days := 30 days := 30
if d := r.URL.Query().Get("days"); d != "" { if d := r.URL.Query().Get("days"); d != "" {
@ -579,6 +750,16 @@ func main() {
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second) ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel() defer cancel()
// Try cache first
cacheKey := fmt.Sprintf("dashboard:%d", days)
var data *DashboardData
if cfg.CacheEnabled && cache.Get(ctx, cacheKey, &data) {
w.Header().Set("Content-Type", "application/json")
w.Header().Set("X-Cache", "HIT")
json.NewEncoder(w).Encode(data)
return
}
data, err := pb.FetchDashboardData(ctx, days) data, err := pb.FetchDashboardData(ctx, days)
if err != nil { if err != nil {
log.Printf("dashboard fetch failed: %v", err) log.Printf("dashboard fetch failed: %v", err)
@ -586,10 +767,86 @@ func main() {
return return
} }
// Cache the result
if cfg.CacheEnabled {
_ = cache.Set(ctx, cacheKey, data, cfg.CacheTTL)
}
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
w.Header().Set("X-Cache", "MISS")
json.NewEncoder(w).Encode(data) json.NewEncoder(w).Encode(data)
}) })
// Paginated records API
mux.HandleFunc("/api/records", func(w http.ResponseWriter, r *http.Request) {
page := 1
limit := 50
status := r.URL.Query().Get("status")
app := r.URL.Query().Get("app")
osType := r.URL.Query().Get("os")
if p := r.URL.Query().Get("page"); p != "" {
fmt.Sscanf(p, "%d", &page)
if page < 1 {
page = 1
}
}
if l := r.URL.Query().Get("limit"); l != "" {
fmt.Sscanf(l, "%d", &limit)
if limit < 1 {
limit = 1
}
if limit > 100 {
limit = 100
}
}
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
records, total, err := pb.FetchRecordsPaginated(ctx, page, limit, status, app, osType)
if err != nil {
log.Printf("records fetch failed: %v", err)
http.Error(w, "failed to fetch records", http.StatusInternalServerError)
return
}
response := map[string]interface{}{
"records": records,
"page": page,
"limit": limit,
"total": total,
"total_pages": (total + limit - 1) / limit,
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
})
// Alert history and test endpoints
mux.HandleFunc("/api/alerts", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]interface{}{
"enabled": cfg.AlertEnabled,
"history": alerter.GetAlertHistory(),
})
})
mux.HandleFunc("/api/alerts/test", func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
return
}
if err := alerter.TestAlert(); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
w.Write([]byte("test alert sent"))
})
mux.HandleFunc("/telemetry", func(w http.ResponseWriter, r *http.Request) { mux.HandleFunc("/telemetry", func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost { if r.Method != http.MethodPost {
http.Error(w, "method not allowed", http.StatusMethodNotAllowed) http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
@ -741,6 +998,18 @@ func envBool(k string, def bool) bool {
} }
return v == "1" || v == "true" || v == "yes" || v == "on" return v == "1" || v == "true" || v == "yes" || v == "on"
} }
func envFloat(k string, def float64) float64 {
v := os.Getenv(k)
if v == "" {
return def
}
var f float64
_, _ = fmt.Sscanf(v, "%f", &f)
if f == 0 && v != "0" {
return def
}
return f
}
func splitCSV(s string) []string { func splitCSV(s string) []string {
s = strings.TrimSpace(s) s = strings.TrimSpace(s)
if s == "" { if s == "" {

BIN
misc/data/telemetry-service Executable file

Binary file not shown.