diff --git a/misc/tools.func b/misc/tools.func index b85a0547..48f406d9 100644 --- a/misc/tools.func +++ b/misc/tools.func @@ -344,11 +344,6 @@ remove_old_tool_version() { npm uninstall -g "$module" >/dev/null 2>&1 || true done || true fi - # Remove tarball installation if exists - if [[ -d "/usr/local/lib/nodejs" ]]; then - rm -rf /usr/local/lib/nodejs - rm -f /usr/local/bin/node /usr/local/bin/npm /usr/local/bin/npx - fi cleanup_legacy_install "nodejs" cleanup_tool_keyrings "nodesource" ;; @@ -470,6 +465,7 @@ manage_tool_repository() { msg_error "Failed to download MongoDB GPG key" return 1 fi + chmod 644 "/etc/apt/keyrings/mongodb-server-${version}.gpg" # Setup repository local distro_codename @@ -579,7 +575,8 @@ EOF msg_error "Failed to download PHP keyring" return 1 } - dpkg -i /tmp/debsuryorg-archive-keyring.deb >/dev/null 2>&1 || { + # Don't use /dev/null redirection for dpkg as it may use background processes + dpkg -i /tmp/debsuryorg-archive-keyring.deb >>"$(get_active_logfile)" 2>&1 || { msg_error "Failed to install PHP keyring" rm -f /tmp/debsuryorg-archive-keyring.deb return 1 @@ -825,6 +822,54 @@ github_api_call() { return 1 } +# ------------------------------------------------------------------------------ +# Codeberg API call with retry logic +# ------------------------------------------------------------------------------ +codeberg_api_call() { + local url="$1" + local output_file="${2:-/dev/stdout}" + local max_retries=3 + local retry_delay=2 + + for attempt in $(seq 1 $max_retries); do + local http_code + http_code=$(curl -fsSL -w "%{http_code}" -o "$output_file" \ + -H "Accept: application/json" \ + "$url" 2>/dev/null || echo "000") + + case "$http_code" in + 200) + return 0 + ;; + 403) + # Rate limit - retry + if [[ $attempt -lt $max_retries ]]; then + msg_warn "Codeberg API rate limit, waiting ${retry_delay}s... (attempt $attempt/$max_retries)" + sleep "$retry_delay" + retry_delay=$((retry_delay * 2)) + continue + fi + msg_error "Codeberg API rate limit exceeded." + return 1 + ;; + 404) + msg_error "Codeberg API endpoint not found: $url" + return 1 + ;; + *) + if [[ $attempt -lt $max_retries ]]; then + sleep "$retry_delay" + continue + fi + msg_error "Codeberg API call failed with HTTP $http_code" + return 1 + ;; + esac + done + + return 1 +} + should_upgrade() { local current="$1" local target="$2" @@ -1250,12 +1295,33 @@ setup_deb822_repo() { return 1 } - # Import GPG - curl -fsSL "$gpg_url" | gpg --dearmor --yes -o "/etc/apt/keyrings/${name}.gpg" || { - msg_error "Failed to import GPG key for ${name}" + # Import GPG key (auto-detect binary vs ASCII-armored format) + local tmp_gpg + tmp_gpg=$(mktemp) || return 1 + curl -fsSL "$gpg_url" -o "$tmp_gpg" || { + msg_error "Failed to download GPG key for ${name}" + rm -f "$tmp_gpg" return 1 } + if grep -q "BEGIN PGP" "$tmp_gpg" 2>/dev/null; then + # ASCII-armored — dearmor to binary + gpg --dearmor --yes -o "/etc/apt/keyrings/${name}.gpg" <"$tmp_gpg" || { + msg_error "Failed to dearmor GPG key for ${name}" + rm -f "$tmp_gpg" + return 1 + } + else + # Already in binary GPG format — copy directly + cp "$tmp_gpg" "/etc/apt/keyrings/${name}.gpg" || { + msg_error "Failed to install GPG key for ${name}" + rm -f "$tmp_gpg" + return 1 + } + fi + rm -f "$tmp_gpg" + chmod 644 "/etc/apt/keyrings/${name}.gpg" + # Write deb822 { echo "Types: deb" @@ -1389,6 +1455,37 @@ get_latest_github_release() { echo "$version" } +# ------------------------------------------------------------------------------ +# Get latest Codeberg release version +# ------------------------------------------------------------------------------ +get_latest_codeberg_release() { + local repo="$1" + local strip_v="${2:-true}" + local temp_file=$(mktemp) + + # Codeberg API: get all releases and pick the first non-draft/non-prerelease + if ! codeberg_api_call "https://codeberg.org/api/v1/repos/${repo}/releases" "$temp_file"; then + rm -f "$temp_file" + return 1 + fi + + local version + # Codeberg uses same JSON structure but releases endpoint returns array + version=$(jq -r '[.[] | select(.draft==false and .prerelease==false)][0].tag_name // empty' "$temp_file") + + if [[ "$strip_v" == "true" ]]; then + version="${version#v}" + fi + + rm -f "$temp_file" + + if [[ -z "$version" ]]; then + return 1 + fi + + echo "$version" +} + # ------------------------------------------------------------------------------ # Debug logging (only if DEBUG=1) # ------------------------------------------------------------------------------ @@ -1456,7 +1553,8 @@ check_for_gh_release() { local app="$1" local source="$2" local pinned_version_in="${3:-}" # optional - local app_lc="${app,,}" + local app_lc="" + app_lc="$(echo "${app,,}" | tr -d ' ')" local current_file="$HOME/.${app_lc}" msg_info "Checking for update: ${app}" @@ -1469,31 +1567,54 @@ check_for_gh_release() { ensure_dependencies jq + # Build auth header if token is available + local header_args=() + [[ -n "${GITHUB_TOKEN:-}" ]] && header_args=(-H "Authorization: Bearer $GITHUB_TOKEN") + # Try /latest endpoint for non-pinned versions (most efficient) - local releases_json="" + local releases_json="" http_code="" if [[ -z "$pinned_version_in" ]]; then - releases_json=$(curl -fsSL --max-time 20 \ + http_code=$(curl -sSL --max-time 20 -w "%{http_code}" -o /tmp/gh_check.json \ -H 'Accept: application/vnd.github+json' \ -H 'X-GitHub-Api-Version: 2022-11-28' \ - "https://api.github.com/repos/${source}/releases/latest" 2>/dev/null) + "${header_args[@]}" \ + "https://api.github.com/repos/${source}/releases/latest" 2>/dev/null) || true - if [[ $? -eq 0 ]] && [[ -n "$releases_json" ]]; then - # Wrap single release in array for consistent processing - releases_json="[$releases_json]" + if [[ "$http_code" == "200" ]] && [[ -s /tmp/gh_check.json ]]; then + releases_json="[$(/dev/null) || true + + if [[ "$http_code" == "200" ]] && [[ -s /tmp/gh_check.json ]]; then + releases_json=$() +# - Queries the Codeberg API for the latest release tag +# - Compares it to a local cached version (~/.) # - If newer, sets global CHECK_UPDATE_RELEASE and returns 0 # # Usage: -# if check_for_gh_tag "immich" "immich-app/immich" "v2.5.2"; then +# if check_for_codeberg_release "autocaliweb" "gelbphoenix/autocaliweb" [optional] "v0.11.3"; then # # trigger update... # fi +# exit 0 +# } (end of update_script not from the function) # # Notes: -# - Requires explicit tag (no 'latest' support - use check_for_gh_release for that) -# - Sets CHECK_UPDATE_RELEASE to the tag name if update is needed +# - Requires `jq` (auto-installed if missing) +# - Does not modify anything, only checks version state +# - Does not support pre-releases # ------------------------------------------------------------------------------ -check_for_gh_tag() { +check_for_codeberg_release() { local app="$1" local source="$2" - local pinned_tag="$3" + local pinned_version_in="${3:-}" # optional local app_lc="${app,,}" local current_file="$HOME/.${app_lc}" - if [[ -z "$pinned_tag" ]]; then - msg_error "check_for_gh_tag requires a pinned tag version" - return 1 - fi - - msg_info "Checking for update: ${app} (tag: ${pinned_tag})" + msg_info "Checking for update: ${app}" # DNS check - if ! getent hosts api.github.com >/dev/null 2>&1; then - msg_error "Network error: cannot resolve api.github.com" + if ! getent hosts codeberg.org >/dev/null 2>&1; then + msg_error "Network error: cannot resolve codeberg.org" return 1 fi ensure_dependencies jq - # Check if tag exists via Git refs API - local tag_check - tag_check=$(curl -fsSL --max-time 20 \ - -H 'Accept: application/vnd.github+json' \ - -H 'X-GitHub-Api-Version: 2022-11-28' \ - "https://api.github.com/repos/${source}/git/refs/tags/${pinned_tag}" 2>/dev/null) + # Fetch releases from Codeberg API + local releases_json="" + releases_json=$(curl -fsSL --max-time 20 \ + -H 'Accept: application/json' \ + "https://codeberg.org/api/v1/repos/${source}/releases" 2>/dev/null) || { + msg_error "Unable to fetch releases for ${app}" + return 1 + } - if [[ $? -ne 0 ]] || [[ -z "$tag_check" ]] || echo "$tag_check" | jq -e '.message' &>/dev/null; then - msg_error "Tag ${pinned_tag} not found in ${source}" + mapfile -t raw_tags < <(jq -r '.[] | select(.draft==false and .prerelease==false) | .tag_name' <<<"$releases_json") + if ((${#raw_tags[@]} == 0)); then + msg_error "No stable releases found for ${app}" return 1 fi - local pin_clean="${pinned_tag#v}" + local clean_tags=() + for t in "${raw_tags[@]}"; do + clean_tags+=("${t#v}") + done - # Current installed version + local latest_raw="${raw_tags[0]}" + local latest_clean="${clean_tags[0]}" + + # current installed (stored without v) local current="" if [[ -f "$current_file" ]]; then current="$(<"$current_file")" + else + # Migration: search for any /opt/*_version.txt + local legacy_files + mapfile -t legacy_files < <(find /opt -maxdepth 1 -type f -name "*_version.txt" 2>/dev/null) + if ((${#legacy_files[@]} == 1)); then + current="$(<"${legacy_files[0]}")" + echo "${current#v}" >"$current_file" + rm -f "${legacy_files[0]}" + fi fi current="${current#v}" - if [[ "$current" != "$pin_clean" ]]; then - CHECK_UPDATE_RELEASE="$pinned_tag" - msg_ok "Update available: ${app} ${current:-not installed} → ${pin_clean}" + # Pinned version handling + if [[ -n "$pinned_version_in" ]]; then + local pin_clean="${pinned_version_in#v}" + local match_raw="" + for i in "${!clean_tags[@]}"; do + if [[ "${clean_tags[$i]}" == "$pin_clean" ]]; then + match_raw="${raw_tags[$i]}" + break + fi + done + + if [[ -z "$match_raw" ]]; then + msg_error "Pinned version ${pinned_version_in} not found upstream" + return 1 + fi + + if [[ "$current" != "$pin_clean" ]]; then + CHECK_UPDATE_RELEASE="$match_raw" + msg_ok "Update available: ${app} ${current:-not installed} → ${pin_clean}" + return 0 + fi + + msg_ok "No update available: ${app} is already on pinned version (${current})" + return 1 + fi + + # No pinning → use latest + if [[ -z "$current" || "$current" != "$latest_clean" ]]; then + CHECK_UPDATE_RELEASE="$latest_raw" + msg_ok "Update available: ${app} ${current:-not installed} → ${latest_clean}" return 0 fi - msg_ok "No update available: ${app} is already on version (${current})" + msg_ok "No update available: ${app} (${latest_clean})" return 1 } @@ -1692,11 +1855,7 @@ function download_with_progress() { # Content-Length aus HTTP-Header holen local content_length - content_length=$( - curl -fsSLI "$url" 2>/dev/null | - awk '(tolower($1) ~ /^content-length:/) && ($2 + 0 > 0) {print $2+0}' | - tail -1 | tr -cd '[:digit:]' || true - ) + content_length=$(curl -fsSLI "$url" | awk '/Content-Length/ {print $2}' | tr -d '\r' || true) if [[ -z "$content_length" ]]; then if ! curl -fL# -o "$output" "$url"; then @@ -1715,485 +1874,26 @@ function download_with_progress() { # Ensures /usr/local/bin is permanently in system PATH. # # Description: -# - Adds to /etc/profile.d if not present +# - Adds to /etc/profile.d for login shells (SSH, noVNC) +# - Adds to /root/.bashrc for non-login shells (pct enter) # ------------------------------------------------------------------------------ function ensure_usr_local_bin_persist() { - local PROFILE_FILE="/etc/profile.d/custom_path.sh" + # Skip on Proxmox host + command -v pveversion &>/dev/null && return - if [[ ! -f "$PROFILE_FILE" ]] && ! command -v pveversion &>/dev/null; then + # Login shells: /etc/profile.d/ + local PROFILE_FILE="/etc/profile.d/custom_path.sh" + if [[ ! -f "$PROFILE_FILE" ]]; then echo 'export PATH="/usr/local/bin:$PATH"' >"$PROFILE_FILE" chmod +x "$PROFILE_FILE" fi -} -# ------------------------------------------------------------------------------ -# Downloads and deploys latest GitHub release (source, binary, tarball, asset). -# -# Description: -# - Fetches latest release metadata from GitHub API -# - Supports the following modes: -# - tarball: Source code tarball (default if omitted) -# - source: Alias for tarball (same behavior) -# - binary: .deb package install (arch-dependent) -# - prebuild: Prebuilt .tar.gz archive (e.g. Go binaries) -# - singlefile: Standalone binary (no archive, direct chmod +x install) -# - Handles download, extraction/installation and version tracking in ~/. -# -# Parameters: -# $1 APP - Application name (used for install path and version file) -# $2 REPO - GitHub repository in form user/repo -# $3 MODE - Release type: -# tarball → source tarball (.tar.gz) -# binary → .deb file (auto-arch matched) -# prebuild → prebuilt archive (e.g. tar.gz) -# singlefile→ standalone binary (chmod +x) -# $4 VERSION - Optional release tag (default: latest) -# $5 TARGET_DIR - Optional install path (default: /opt/) -# $6 ASSET_FILENAME - Required for: -# - prebuild → archive filename or pattern -# - singlefile→ binary filename or pattern -# -# Optional: -# - Set GITHUB_TOKEN env var to increase API rate limit (recommended for CI/CD). -# -# Examples: -# # 1. Minimal: Fetch and deploy source tarball -# fetch_and_deploy_gh_release "myapp" "myuser/myapp" -# -# # 2. Binary install via .deb asset (architecture auto-detected) -# fetch_and_deploy_gh_release "myapp" "myuser/myapp" "binary" -# -# # 3. Prebuilt archive (.tar.gz) with asset filename match -# fetch_and_deploy_gh_release "hanko" "teamhanko/hanko" "prebuild" "latest" "/opt/hanko" "hanko_Linux_x86_64.tar.gz" -# -# # 4. Single binary (chmod +x) like Argus, Promtail etc. -# fetch_and_deploy_gh_release "argus" "release-argus/Argus" "singlefile" "0.26.3" "/opt/argus" "Argus-.*linux-amd64" -# -# # 5. Git tag (not a release) - bypasses Release API, fetches tarball directly from tag -# fetch_and_deploy_gh_release "immich" "immich-app/immich" "tag" "v2.5.2" "/opt/immich/source" -# ------------------------------------------------------------------------------ - -function fetch_and_deploy_gh_release() { - local app="$1" - local repo="$2" - local mode="${3:-tarball}" # tarball | binary | prebuild | singlefile | tag - local version="${var_appversion:-${4:-latest}}" - local target="${5:-/opt/$app}" - local asset_pattern="${6:-}" - - local app_lc=$(echo "${app,,}" | tr -d ' ') - local version_file="$HOME/.${app_lc}" - - local api_timeout="--connect-timeout 10 --max-time 60" - local download_timeout="--connect-timeout 15 --max-time 900" - - local current_version="" - [[ -f "$version_file" ]] && current_version=$(<"$version_file") - - ensure_dependencies jq - - ### Tag Mode (bypass Release API) ### - if [[ "$mode" == "tag" ]]; then - if [[ "$version" == "latest" ]]; then - msg_error "Mode 'tag' requires explicit version (not 'latest')" - return 1 - fi - - local tag_name="$version" - [[ "$tag_name" =~ ^v ]] && version="${tag_name:1}" || version="$tag_name" - - if [[ "$current_version" == "$version" ]]; then - $STD msg_ok "$app is already up-to-date (v$version)" - return 0 - fi - - # DNS check - if ! getent hosts "github.com" &>/dev/null; then - msg_error "DNS resolution failed for github.com – check /etc/resolv.conf or networking" - return 1 - fi - - local tmpdir - tmpdir=$(mktemp -d) || return 1 - - msg_info "Fetching GitHub tag: $app ($tag_name)" - - local safe_version="${version//@/_}" - safe_version="${safe_version//\//_}" - local filename="${app_lc}-${safe_version}.tar.gz" - local download_success=false - - # For tags with special characters (@, /), use codeload.github.com - if [[ "$tag_name" =~ [@/] ]]; then - local codeload_encoded="${tag_name//@/%40}" - local codeload_url="https://codeload.github.com/$repo/tar.gz/refs/tags/$codeload_encoded" - if curl $download_timeout -fsSL -o "$tmpdir/$filename" "$codeload_url"; then - download_success=true - fi - else - local direct_tarball_url="https://github.com/$repo/archive/refs/tags/${tag_name}.tar.gz" - if curl $download_timeout -fsSL -o "$tmpdir/$filename" "$direct_tarball_url"; then - download_success=true - fi - fi - - if [[ "$download_success" != "true" ]]; then - msg_error "Download failed for $app ($tag_name)" - rm -rf "$tmpdir" - return 1 - fi - - mkdir -p "$target" - if [[ "${CLEAN_INSTALL:-0}" == "1" ]]; then - rm -rf "${target:?}/"* - fi - - tar --no-same-owner -xzf "$tmpdir/$filename" -C "$tmpdir" || { - msg_error "Failed to extract tarball" - rm -rf "$tmpdir" - return 1 - } - - local unpack_dir - unpack_dir=$(find "$tmpdir" -mindepth 1 -maxdepth 1 -type d | head -n1) - - shopt -s dotglob nullglob - cp -r "$unpack_dir"/* "$target/" - shopt -u dotglob nullglob - - echo "$version" >"$version_file" - msg_ok "Deployed: $app ($version)" - rm -rf "$tmpdir" - return 0 + # Non-login shells (pct enter): /root/.bashrc + local BASHRC="/root/.bashrc" + if [[ -f "$BASHRC" ]] && ! grep -q '/usr/local/bin' "$BASHRC"; then + echo 'export PATH="/usr/local/bin:$PATH"' >>"$BASHRC" fi - - local api_url="https://api.github.com/repos/$repo/releases" - [[ "$version" != "latest" ]] && api_url="$api_url/tags/$version" || api_url="$api_url/latest" - local header=() - [[ -n "${GITHUB_TOKEN:-}" ]] && header=(-H "Authorization: token $GITHUB_TOKEN") - - # dns pre check - local gh_host - gh_host=$(awk -F/ '{print $3}' <<<"$api_url") - if ! getent hosts "$gh_host" &>/dev/null; then - msg_error "DNS resolution failed for $gh_host – check /etc/resolv.conf or networking" - return 1 - fi - - local max_retries=3 retry_delay=2 attempt=1 success=false resp http_code - - while ((attempt <= max_retries)); do - resp=$(curl $api_timeout -fsSL -w "%{http_code}" -o /tmp/gh_rel.json "${header[@]}" "$api_url") && success=true && break - sleep "$retry_delay" - ((attempt++)) - done - - if ! $success; then - msg_error "Failed to fetch release metadata from $api_url after $max_retries attempts" - return 1 - fi - - http_code="${resp:(-3)}" - [[ "$http_code" != "200" ]] && { - msg_error "GitHub API returned HTTP $http_code" - return 1 - } - - local json tag_name - json=$(/dev/null || uname -m) - [[ "$arch" == "x86_64" ]] && arch="amd64" - [[ "$arch" == "aarch64" ]] && arch="arm64" - - # Get Debian codename for distro-specific packages - local codename="" - if [[ -f /etc/os-release ]]; then - codename=$(grep -oP '(?<=VERSION_CODENAME=).*' /etc/os-release 2>/dev/null || true) - fi - - local assets url_match="" - assets=$(echo "$json" | jq -r '.assets[].browser_download_url') - - # If explicit filename pattern is provided (param $6), match that first - if [[ -n "$asset_pattern" ]]; then - for u in $assets; do - case "${u##*/}" in - $asset_pattern) - url_match="$u" - break - ;; - esac - done - fi - - # If no match via explicit pattern, try architecture + codename match - if [[ -z "$url_match" && -n "$codename" ]]; then - for u in $assets; do - if [[ "$u" =~ $arch.*$codename.*\.deb$ ]] || [[ "$u" =~ $arch.*-$codename\.deb$ ]] || [[ "$u" =~ ${arch}-${codename}\.deb$ ]] || [[ "$u" =~ ${arch}_${codename}\.deb$ ]]; then - url_match="$u" - break - fi - done - fi - - # Fallback: architecture heuristic without codename - if [[ -z "$url_match" ]]; then - for u in $assets; do - if [[ "$u" =~ ($arch|amd64|x86_64|aarch64|arm64).*\.deb$ ]]; then - url_match="$u" - break - fi - done - fi - - # Fallback: any .deb file - if [[ -z "$url_match" ]]; then - for u in $assets; do - [[ "$u" =~ \.deb$ ]] && url_match="$u" && break - done - fi - - if [[ -z "$url_match" ]]; then - msg_error "No suitable .deb asset found for $app" - rm -rf "$tmpdir" - return 1 - fi - - filename="${url_match##*/}" - curl $download_timeout -fsSL -o "$tmpdir/$filename" "$url_match" || { - msg_error "Download failed: $url_match" - rm -rf "$tmpdir" - return 1 - } - - chmod 644 "$tmpdir/$filename" - $STD apt install -y "$tmpdir/$filename" || { - $STD dpkg -i "$tmpdir/$filename" || { - msg_error "Both apt and dpkg installation failed" - rm -rf "$tmpdir" - return 1 - } - } - - ### Prebuild Mode ### - elif [[ "$mode" == "prebuild" ]]; then - local pattern="${6%\"}" - pattern="${pattern#\"}" - [[ -z "$pattern" ]] && { - msg_error "Mode 'prebuild' requires 6th parameter (asset filename pattern)" - rm -rf "$tmpdir" - return 1 - } - - local asset_url="" - for u in $(echo "$json" | jq -r '.assets[].browser_download_url'); do - filename_candidate="${u##*/}" - case "$filename_candidate" in - $pattern) - asset_url="$u" - break - ;; - esac - done - - [[ -z "$asset_url" ]] && { - msg_error "No asset matching '$pattern' found" - rm -rf "$tmpdir" - return 1 - } - - filename="${asset_url##*/}" - curl $download_timeout -fsSL -o "$tmpdir/$filename" "$asset_url" || { - msg_error "Download failed: $asset_url" - rm -rf "$tmpdir" - return 1 - } - - local unpack_tmp - unpack_tmp=$(mktemp -d) - mkdir -p "$target" - if [[ "${CLEAN_INSTALL:-0}" == "1" ]]; then - rm -rf "${target:?}/"* - fi - - if [[ "$filename" == *.zip ]]; then - ensure_dependencies unzip - unzip -q "$tmpdir/$filename" -d "$unpack_tmp" || { - msg_error "Failed to extract ZIP archive" - rm -rf "$tmpdir" "$unpack_tmp" - return 1 - } - elif [[ "$filename" == *.tar.* || "$filename" == *.tgz ]]; then - tar --no-same-owner -xf "$tmpdir/$filename" -C "$unpack_tmp" || { - msg_error "Failed to extract TAR archive" - rm -rf "$tmpdir" "$unpack_tmp" - return 1 - } - else - msg_error "Unsupported archive format: $filename" - rm -rf "$tmpdir" "$unpack_tmp" - return 1 - fi - - local top_dirs - top_dirs=$(find "$unpack_tmp" -mindepth 1 -maxdepth 1 -type d | wc -l) - local top_entries inner_dir - top_entries=$(find "$unpack_tmp" -mindepth 1 -maxdepth 1) - if [[ "$(echo "$top_entries" | wc -l)" -eq 1 && -d "$top_entries" ]]; then - # Strip leading folder - inner_dir="$top_entries" - shopt -s dotglob nullglob - if compgen -G "$inner_dir/*" >/dev/null; then - cp -r "$inner_dir"/* "$target/" || { - msg_error "Failed to copy contents from $inner_dir to $target" - rm -rf "$tmpdir" "$unpack_tmp" - return 1 - } - else - msg_error "Inner directory is empty: $inner_dir" - rm -rf "$tmpdir" "$unpack_tmp" - return 1 - fi - shopt -u dotglob nullglob - else - # Copy all contents - shopt -s dotglob nullglob - if compgen -G "$unpack_tmp/*" >/dev/null; then - cp -r "$unpack_tmp"/* "$target/" || { - msg_error "Failed to copy contents to $target" - rm -rf "$tmpdir" "$unpack_tmp" - return 1 - } - else - msg_error "Unpacked archive is empty" - rm -rf "$tmpdir" "$unpack_tmp" - return 1 - fi - shopt -u dotglob nullglob - fi - - ### Singlefile Mode ### - elif [[ "$mode" == "singlefile" ]]; then - local pattern="${6%\"}" - pattern="${pattern#\"}" - [[ -z "$pattern" ]] && { - msg_error "Mode 'singlefile' requires 6th parameter (asset filename pattern)" - rm -rf "$tmpdir" - return 1 - } - - local asset_url="" - for u in $(echo "$json" | jq -r '.assets[].browser_download_url'); do - filename_candidate="${u##*/}" - case "$filename_candidate" in - $pattern) - asset_url="$u" - break - ;; - esac - done - - [[ -z "$asset_url" ]] && { - msg_error "No asset matching '$pattern' found" - rm -rf "$tmpdir" - return 1 - } - - filename="${asset_url##*/}" - mkdir -p "$target" - - local use_filename="${USE_ORIGINAL_FILENAME:-false}" - local target_file="$app" - [[ "$use_filename" == "true" ]] && target_file="$filename" - - curl $download_timeout -fsSL -o "$target/$target_file" "$asset_url" || { - msg_error "Download failed: $asset_url" - rm -rf "$tmpdir" - return 1 - } - - if [[ "$target_file" != *.jar && -f "$target/$target_file" ]]; then - chmod +x "$target/$target_file" - fi - - else - msg_error "Unknown mode: $mode" - rm -rf "$tmpdir" - return 1 - fi - - echo "$version" >"$version_file" - msg_ok "Deployed: $app ($version)" - rm -rf "$tmpdir" } # ------------------------------------------------------------------------------ @@ -2631,47 +2331,553 @@ function fetch_and_deploy_codeberg_release() { } # ------------------------------------------------------------------------------ -# Loads LOCAL_IP from persistent store or detects if missing. +# Downloads and deploys latest GitHub release (source, binary, tarball, asset). # # Description: -# - Loads from /run/local-ip.env or performs runtime lookup +# - Fetches latest release metadata from GitHub API +# - Supports the following modes: +# - tarball: Source code tarball (default if omitted) +# - source: Alias for tarball (same behavior) +# - binary: .deb package install (arch-dependent) +# - prebuild: Prebuilt .tar.gz archive (e.g. Go binaries) +# - singlefile: Standalone binary (no archive, direct chmod +x install) +# - Handles download, extraction/installation and version tracking in ~/. +# +# Parameters: +# $1 APP - Application name (used for install path and version file) +# $2 REPO - GitHub repository in form user/repo +# $3 MODE - Release type: +# tarball → source tarball (.tar.gz) +# binary → .deb file (auto-arch matched) +# prebuild → prebuilt archive (e.g. tar.gz) +# singlefile→ standalone binary (chmod +x) +# $4 VERSION - Optional release tag (default: latest) +# $5 TARGET_DIR - Optional install path (default: /opt/) +# $6 ASSET_FILENAME - Required for: +# - prebuild → archive filename or pattern +# - singlefile→ binary filename or pattern +# +# Optional: +# - Set GITHUB_TOKEN env var to increase API rate limit (recommended for CI/CD). +# +# Examples: +# # 1. Minimal: Fetch and deploy source tarball +# fetch_and_deploy_gh_release "myapp" "myuser/myapp" +# +# # 2. Binary install via .deb asset (architecture auto-detected) +# fetch_and_deploy_gh_release "myapp" "myuser/myapp" "binary" +# +# # 3. Prebuilt archive (.tar.gz) with asset filename match +# fetch_and_deploy_gh_release "hanko" "teamhanko/hanko" "prebuild" "latest" "/opt/hanko" "hanko_Linux_x86_64.tar.gz" +# +# # 4. Single binary (chmod +x) like Argus, Promtail etc. +# fetch_and_deploy_gh_release "argus" "release-argus/Argus" "singlefile" "0.26.3" "/opt/argus" "Argus-.*linux-amd64" +# +# Notes: +# - For binary/prebuild/singlefile modes: if the target release has no +# matching asset, the function scans older releases and prompts the user +# (60s timeout, default yes) to use a previous version that has the asset. # ------------------------------------------------------------------------------ -function import_local_ip() { - local IP_FILE="/run/local-ip.env" - if [[ -f "$IP_FILE" ]]; then - # shellcheck disable=SC1090 - source "$IP_FILE" +# ------------------------------------------------------------------------------ +# Scans older GitHub releases for a matching asset when the latest release +# is missing the expected file. Used internally by fetch_and_deploy_gh_release. +# +# Arguments: +# $1 - GitHub repo (owner/repo) +# $2 - mode (binary|prebuild|singlefile) +# $3 - asset_pattern (glob pattern for asset filename) +# $4 - tag to skip (the already-checked release) +# +# Output: +# Prints the release JSON of the first older release that has a matching asset. +# Returns 0 on success, 1 if no matching release found or user declined. +# ------------------------------------------------------------------------------ +_gh_scan_older_releases() { + local repo="$1" + local mode="$2" + local asset_pattern="$3" + local skip_tag="$4" + + local header=() + [[ -n "${GITHUB_TOKEN:-}" ]] && header=(-H "Authorization: token $GITHUB_TOKEN") + + local releases_list + releases_list=$(curl --connect-timeout 10 --max-time 30 -fsSL \ + -H 'Accept: application/vnd.github+json' \ + -H 'X-GitHub-Api-Version: 2022-11-28' \ + "${header[@]}" \ + "https://api.github.com/repos/${repo}/releases?per_page=15" 2>/dev/null) || return 1 + + local count + count=$(echo "$releases_list" | jq 'length') + + for ((i = 0; i < count; i++)); do + local rel_tag rel_draft rel_prerelease + rel_tag=$(echo "$releases_list" | jq -r ".[$i].tag_name") + rel_draft=$(echo "$releases_list" | jq -r ".[$i].draft") + rel_prerelease=$(echo "$releases_list" | jq -r ".[$i].prerelease") + + # Skip drafts, prereleases, and the tag we already checked + [[ "$rel_draft" == "true" || "$rel_prerelease" == "true" ]] && continue + [[ "$rel_tag" == "$skip_tag" ]] && continue + + local has_match=false + + if [[ "$mode" == "binary" ]]; then + local arch + arch=$(dpkg --print-architecture 2>/dev/null || uname -m) + [[ "$arch" == "x86_64" ]] && arch="amd64" + [[ "$arch" == "aarch64" ]] && arch="arm64" + + # Check with explicit pattern first, then arch heuristic, then any .deb + if [[ -n "$asset_pattern" ]]; then + has_match=$(echo "$releases_list" | jq -r --arg pat "$asset_pattern" ".[$i].assets[].name" | while read -r name; do + case "$name" in $asset_pattern) + echo true + break + ;; + esac + done) + fi + if [[ "$has_match" != "true" ]]; then + has_match=$(echo "$releases_list" | jq -r ".[$i].assets[].browser_download_url" | grep -qE "($arch|amd64|x86_64|aarch64|arm64).*\.deb$" && echo true) + fi + if [[ "$has_match" != "true" ]]; then + has_match=$(echo "$releases_list" | jq -r ".[$i].assets[].browser_download_url" | grep -qE '\.deb$' && echo true) + fi + + elif [[ "$mode" == "prebuild" || "$mode" == "singlefile" ]]; then + has_match=$(echo "$releases_list" | jq -r ".[$i].assets[].name" | while read -r name; do + case "$name" in $asset_pattern) + echo true + break + ;; + esac + done) + fi + + if [[ "$has_match" == "true" ]]; then + local rel_version="$rel_tag" + [[ "$rel_tag" =~ ^v ]] && rel_version="${rel_tag:1}" + + local use_fallback="y" + if [[ -t 0 ]]; then + msg_warn "Release ${skip_tag} has no matching asset. Previous release ${rel_tag} has a compatible asset." + read -rp "Use version ${rel_tag} instead? [Y/n] (auto-yes in 60s): " -t 60 use_fallback || use_fallback="y" + use_fallback="${use_fallback:-y}" + fi + + if [[ "${use_fallback,,}" == "y" || "${use_fallback,,}" == "yes" ]]; then + echo "$releases_list" | jq ".[$i]" + return 0 + else + return 1 + fi + fi + done + + return 1 +} + +function fetch_and_deploy_gh_release() { + local app="$1" + local repo="$2" + local mode="${3:-tarball}" # tarball | binary | prebuild | singlefile + local version="${var_appversion:-${4:-latest}}" + local target="${5:-/opt/$app}" + local asset_pattern="${6:-}" + + local app_lc=$(echo "${app,,}" | tr -d ' ') + local version_file="$HOME/.${app_lc}" + + local api_timeout="--connect-timeout 10 --max-time 60" + local download_timeout="--connect-timeout 15 --max-time 900" + + local current_version="" + [[ -f "$version_file" ]] && current_version=$(<"$version_file") + + ensure_dependencies jq + + local api_url="https://api.github.com/repos/$repo/releases" + [[ "$version" != "latest" ]] && api_url="$api_url/tags/$version" || api_url="$api_url/latest" + local header=() + [[ -n "${GITHUB_TOKEN:-}" ]] && header=(-H "Authorization: token $GITHUB_TOKEN") + + # dns pre check + local gh_host + gh_host=$(awk -F/ '{print $3}' <<<"$api_url") + if ! getent hosts "$gh_host" &>/dev/null; then + msg_error "DNS resolution failed for $gh_host – check /etc/resolv.conf or networking" + return 1 fi - if [[ -z "${LOCAL_IP:-}" ]]; then - get_current_ip() { - local targets=("8.8.8.8" "1.1.1.1" "192.168.1.1" "10.0.0.1" "172.16.0.1" "default") - local ip + local max_retries=3 retry_delay=2 attempt=1 success=false http_code - for target in "${targets[@]}"; do - if [[ "$target" == "default" ]]; then - ip=$(ip route get 1 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}') - else - ip=$(ip route get "$target" 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}') - fi - if [[ -n "$ip" ]]; then - echo "$ip" - return 0 - fi - done + while ((attempt <= max_retries)); do + http_code=$(curl $api_timeout -sSL -w "%{http_code}" -o /tmp/gh_rel.json "${header[@]}" "$api_url" 2>/dev/null) || true + if [[ "$http_code" == "200" ]]; then + success=true + break + elif [[ "$http_code" == "403" ]]; then + if ((attempt < max_retries)); then + msg_warn "GitHub API rate limit hit, retrying in ${retry_delay}s... (attempt $attempt/$max_retries)" + sleep "$retry_delay" + retry_delay=$((retry_delay * 2)) + fi + else + sleep "$retry_delay" + fi + ((attempt++)) + done + if ! $success; then + if [[ "$http_code" == "403" ]]; then + msg_error "GitHub API rate limit exceeded (HTTP 403)." + msg_error "To increase the limit, export a GitHub token before running the script:" + msg_error " export GITHUB_TOKEN=\"ghp_your_token_here\"" + else + msg_error "Failed to fetch release metadata from $api_url after $max_retries attempts (HTTP $http_code)" + fi + return 1 + fi + + local json tag_name + json=$(/dev/null || uname -m) + [[ "$arch" == "x86_64" ]] && arch="amd64" + [[ "$arch" == "aarch64" ]] && arch="arm64" + + local assets url_match="" + assets=$(echo "$json" | jq -r '.assets[].browser_download_url') + + # If explicit filename pattern is provided (param $6), match that first + if [[ -n "$asset_pattern" ]]; then + for u in $assets; do + case "${u##*/}" in + $asset_pattern) + url_match="$u" + break + ;; + esac + done + fi + + # If no match via explicit pattern, fall back to architecture heuristic + if [[ -z "$url_match" ]]; then + for u in $assets; do + if [[ "$u" =~ ($arch|amd64|x86_64|aarch64|arm64).*\.deb$ ]]; then + url_match="$u" + break + fi + done + fi + + # Fallback: any .deb file + if [[ -z "$url_match" ]]; then + for u in $assets; do + [[ "$u" =~ \.deb$ ]] && url_match="$u" && break + done + fi + + # Fallback: scan older releases for a matching .deb asset + if [[ -z "$url_match" ]]; then + local fallback_json + if fallback_json=$(_gh_scan_older_releases "$repo" "binary" "$asset_pattern" "$tag_name"); then + json="$fallback_json" + tag_name=$(echo "$json" | jq -r '.tag_name // .name // empty') + [[ "$tag_name" =~ ^v ]] && version="${tag_name:1}" || version="$tag_name" + msg_info "Fetching GitHub release: $app ($version)" + assets=$(echo "$json" | jq -r '.assets[].browser_download_url') + if [[ -n "$asset_pattern" ]]; then + for u in $assets; do + case "${u##*/}" in $asset_pattern) + url_match="$u" + break + ;; + esac + done + fi + if [[ -z "$url_match" ]]; then + for u in $assets; do + if [[ "$u" =~ ($arch|amd64|x86_64|aarch64|arm64).*\.deb$ ]]; then + url_match="$u" + break + fi + done + fi + if [[ -z "$url_match" ]]; then + for u in $assets; do + [[ "$u" =~ \.deb$ ]] && url_match="$u" && break + done + fi + fi + fi + + if [[ -z "$url_match" ]]; then + msg_error "No suitable .deb asset found for $app" + rm -rf "$tmpdir" return 1 fi + + filename="${url_match##*/}" + curl $download_timeout -fsSL -o "$tmpdir/$filename" "$url_match" || { + msg_error "Download failed: $url_match" + rm -rf "$tmpdir" + return 1 + } + + chmod 644 "$tmpdir/$filename" + # SYSTEMD_OFFLINE=1 prevents systemd-tmpfiles failures in unprivileged LXC (Debian 13+/systemd 257+) + # Support DPKG_CONFOLD/DPKG_CONFNEW env vars for config file handling during .deb upgrades + local dpkg_opts="" + [[ "${DPKG_FORCE_CONFOLD:-}" == "1" ]] && dpkg_opts="-o Dpkg::Options::=--force-confold" + [[ "${DPKG_FORCE_CONFNEW:-}" == "1" ]] && dpkg_opts="-o Dpkg::Options::=--force-confnew" + DEBIAN_FRONTEND=noninteractive SYSTEMD_OFFLINE=1 $STD apt install -y $dpkg_opts "$tmpdir/$filename" || { + SYSTEMD_OFFLINE=1 $STD dpkg -i "$tmpdir/$filename" || { + msg_error "Both apt and dpkg installation failed" + rm -rf "$tmpdir" + return 1 + } + } + + ### Prebuild Mode ### + elif [[ "$mode" == "prebuild" ]]; then + local pattern="${6%\"}" + pattern="${pattern#\"}" + [[ -z "$pattern" ]] && { + msg_error "Mode 'prebuild' requires 6th parameter (asset filename pattern)" + rm -rf "$tmpdir" + return 1 + } + + local asset_url="" + for u in $(echo "$json" | jq -r '.assets[].browser_download_url'); do + filename_candidate="${u##*/}" + case "$filename_candidate" in + $pattern) + asset_url="$u" + break + ;; + esac + done + + # Fallback: scan older releases for a matching asset + if [[ -z "$asset_url" ]]; then + local fallback_json + if fallback_json=$(_gh_scan_older_releases "$repo" "prebuild" "$pattern" "$tag_name"); then + json="$fallback_json" + tag_name=$(echo "$json" | jq -r '.tag_name // .name // empty') + [[ "$tag_name" =~ ^v ]] && version="${tag_name:1}" || version="$tag_name" + msg_info "Fetching GitHub release: $app ($version)" + for u in $(echo "$json" | jq -r '.assets[].browser_download_url'); do + filename_candidate="${u##*/}" + case "$filename_candidate" in $pattern) + asset_url="$u" + break + ;; + esac + done + fi + fi + + [[ -z "$asset_url" ]] && { + msg_error "No asset matching '$pattern' found" + rm -rf "$tmpdir" + return 1 + } + + filename="${asset_url##*/}" + curl $download_timeout -fsSL -o "$tmpdir/$filename" "$asset_url" || { + msg_error "Download failed: $asset_url" + rm -rf "$tmpdir" + return 1 + } + + local unpack_tmp + unpack_tmp=$(mktemp -d) + mkdir -p "$target" + if [[ "${CLEAN_INSTALL:-0}" == "1" ]]; then + rm -rf "${target:?}/"* + fi + + if [[ "$filename" == *.zip ]]; then + ensure_dependencies unzip + unzip -q "$tmpdir/$filename" -d "$unpack_tmp" || { + msg_error "Failed to extract ZIP archive" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + elif [[ "$filename" == *.tar.* || "$filename" == *.tgz || "$filename" == *.txz ]]; then + tar --no-same-owner -xf "$tmpdir/$filename" -C "$unpack_tmp" || { + msg_error "Failed to extract TAR archive" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + else + msg_error "Unsupported archive format: $filename" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + fi + + local top_dirs + top_dirs=$(find "$unpack_tmp" -mindepth 1 -maxdepth 1 -type d | wc -l) + local top_entries inner_dir + top_entries=$(find "$unpack_tmp" -mindepth 1 -maxdepth 1) + if [[ "$(echo "$top_entries" | wc -l)" -eq 1 && -d "$top_entries" ]]; then + # Strip leading folder + inner_dir="$top_entries" + shopt -s dotglob nullglob + if compgen -G "$inner_dir/*" >/dev/null; then + cp -r "$inner_dir"/* "$target/" || { + msg_error "Failed to copy contents from $inner_dir to $target" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + else + msg_error "Inner directory is empty: $inner_dir" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + fi + shopt -u dotglob nullglob + else + # Copy all contents + shopt -s dotglob nullglob + if compgen -G "$unpack_tmp/*" >/dev/null; then + cp -r "$unpack_tmp"/* "$target/" || { + msg_error "Failed to copy contents to $target" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + else + msg_error "Unpacked archive is empty" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + fi + shopt -u dotglob nullglob + fi + + ### Singlefile Mode ### + elif [[ "$mode" == "singlefile" ]]; then + local pattern="${6%\"}" + pattern="${pattern#\"}" + [[ -z "$pattern" ]] && { + msg_error "Mode 'singlefile' requires 6th parameter (asset filename pattern)" + rm -rf "$tmpdir" + return 1 + } + + local asset_url="" + for u in $(echo "$json" | jq -r '.assets[].browser_download_url'); do + filename_candidate="${u##*/}" + case "$filename_candidate" in + $pattern) + asset_url="$u" + break + ;; + esac + done + + # Fallback: scan older releases for a matching asset + if [[ -z "$asset_url" ]]; then + local fallback_json + if fallback_json=$(_gh_scan_older_releases "$repo" "singlefile" "$pattern" "$tag_name"); then + json="$fallback_json" + tag_name=$(echo "$json" | jq -r '.tag_name // .name // empty') + [[ "$tag_name" =~ ^v ]] && version="${tag_name:1}" || version="$tag_name" + msg_info "Fetching GitHub release: $app ($version)" + for u in $(echo "$json" | jq -r '.assets[].browser_download_url'); do + filename_candidate="${u##*/}" + case "$filename_candidate" in $pattern) + asset_url="$u" + break + ;; + esac + done + fi + fi + [[ -z "$asset_url" ]] && { + msg_error "No asset matching '$pattern' found" + rm -rf "$tmpdir" + return 1 + } + + filename="${asset_url##*/}" + mkdir -p "$target" + + local use_filename="${USE_ORIGINAL_FILENAME:-false}" + local target_file="$app" + [[ "$use_filename" == "true" ]] && target_file="$filename" + + curl $download_timeout -fsSL -o "$target/$target_file" "$asset_url" || { + msg_error "Download failed: $asset_url" + rm -rf "$tmpdir" + return 1 + } + + if [[ "$target_file" != *.jar && -f "$target/$target_file" ]]; then + chmod +x "$target/$target_file" + fi + + else + msg_error "Unknown mode: $mode" + rm -rf "$tmpdir" + return 1 fi - export LOCAL_IP + echo "$version" >"$version_file" + msg_ok "Deployed: $app ($version)" + rm -rf "$tmpdir" } # ------------------------------------------------------------------------------ @@ -3301,6 +3507,7 @@ function setup_hwaccel() { # GPU Selection - Let user choose which GPU(s) to configure # ═══════════════════════════════════════════════════════════════════════════ local -a SELECTED_INDICES=() + local install_nvidia_drivers="yes" if [[ $gpu_count -eq 1 ]]; then # Single GPU - auto-select @@ -3362,6 +3569,30 @@ function setup_hwaccel() { fi fi + # Ask whether to install NVIDIA drivers in the container + local nvidia_selected="no" + for idx in "${SELECTED_INDICES[@]}"; do + if [[ "${GPU_TYPES[$idx]}" == "NVIDIA" ]]; then + nvidia_selected="yes" + break + fi + done + + if [[ "$nvidia_selected" == "yes" ]]; then + if [[ -n "${INSTALL_NVIDIA_DRIVERS:-}" ]]; then + install_nvidia_drivers="${INSTALL_NVIDIA_DRIVERS}" + else + echo "" + msg_custom "🎮" "${GN}" "NVIDIA GPU passthrough detected" + local nvidia_reply="" + read -r -t 60 -p "${TAB3}⚙️ Install NVIDIA driver libraries in the container? [Y/n] (auto-yes in 60s): " nvidia_reply || nvidia_reply="" + case "${nvidia_reply,,}" in + n | no) install_nvidia_drivers="no" ;; + *) install_nvidia_drivers="yes" ;; + esac + fi + fi + # ═══════════════════════════════════════════════════════════════════════════ # OS Detection # ═══════════════════════════════════════════════════════════════════════════ @@ -3422,7 +3653,11 @@ function setup_hwaccel() { # NVIDIA GPUs # ───────────────────────────────────────────────────────────────────────── NVIDIA) - _setup_nvidia_gpu "$os_id" "$os_codename" "$os_version" + if [[ "$install_nvidia_drivers" == "yes" ]]; then + _setup_nvidia_gpu "$os_id" "$os_codename" "$os_version" + else + msg_warn "Skipping NVIDIA driver installation (user opted to install manually)" + fi ;; esac done @@ -3449,6 +3684,7 @@ _setup_intel_arc() { $STD apt -y install \ intel-media-va-driver-non-free \ intel-opencl-icd \ + libmfx-gen1.2 \ vainfo \ intel-gpu-tools 2>/dev/null || msg_warn "Some Intel Arc packages failed" @@ -3456,27 +3692,33 @@ _setup_intel_arc() { # Add non-free repos _add_debian_nonfree "$os_codename" - # Arc requires latest drivers - fetch from GitHub - # Order matters: libigdgmm first (dependency), then IGC, then compute-runtime - msg_info "Fetching Intel compute-runtime for Arc support" + # For Trixie/Sid: Fetch latest drivers from GitHub (Debian repo packages may be too old or missing) + # For Bookworm: Use repo packages (GitHub latest requires libstdc++6 >= 13.1, unavailable on Bookworm) + if [[ "$os_codename" == "trixie" || "$os_codename" == "sid" ]]; then + msg_info "Fetching Intel compute-runtime from GitHub for Arc support" - # libigdgmm - bundled in compute-runtime releases (Debian version often too old) - fetch_and_deploy_gh_release "libigdgmm12" "intel/compute-runtime" "binary" "latest" "" "libigdgmm12_*_amd64.deb" || true + # libigdgmm - bundled in compute-runtime releases + fetch_and_deploy_gh_release "libigdgmm12" "intel/compute-runtime" "binary" "latest" "" "libigdgmm12_*_amd64.deb" || true - # Intel Graphics Compiler (note: packages have -2 suffix) - fetch_and_deploy_gh_release "intel-igc-core" "intel/intel-graphics-compiler" "binary" "latest" "" "intel-igc-core-2_*_amd64.deb" || true - fetch_and_deploy_gh_release "intel-igc-opencl" "intel/intel-graphics-compiler" "binary" "latest" "" "intel-igc-opencl-2_*_amd64.deb" || true + # Intel Graphics Compiler (note: packages have -2 suffix) + fetch_and_deploy_gh_release "intel-igc-core" "intel/intel-graphics-compiler" "binary" "latest" "" "intel-igc-core-2_*_amd64.deb" || true + fetch_and_deploy_gh_release "intel-igc-opencl" "intel/intel-graphics-compiler" "binary" "latest" "" "intel-igc-opencl-2_*_amd64.deb" || true - # Compute Runtime (depends on IGC and gmmlib) - fetch_and_deploy_gh_release "intel-opencl-icd" "intel/compute-runtime" "binary" "latest" "" "intel-opencl-icd_*_amd64.deb" || true - fetch_and_deploy_gh_release "intel-level-zero-gpu" "intel/compute-runtime" "binary" "latest" "" "libze-intel-gpu1_*_amd64.deb" || true + # Compute Runtime (depends on IGC and gmmlib) + fetch_and_deploy_gh_release "intel-opencl-icd" "intel/compute-runtime" "binary" "latest" "" "intel-opencl-icd_*_amd64.deb" || true + fetch_and_deploy_gh_release "intel-level-zero-gpu" "intel/compute-runtime" "binary" "latest" "" "libze-intel-gpu1_*_amd64.deb" || true + fi $STD apt -y install \ intel-media-va-driver-non-free \ ocl-icd-libopencl1 \ libvpl2 \ + libmfx-gen1.2 \ vainfo \ intel-gpu-tools 2>/dev/null || msg_warn "Some Intel Arc packages failed" + + # Bookworm has compatible versions of these packages in repos + [[ "$os_codename" == "bookworm" ]] && $STD apt -y install intel-opencl-icd libigdgmm12 2>/dev/null || true fi msg_ok "Intel Arc GPU configured" @@ -3805,13 +4047,25 @@ NVIDIA_PIN if $STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends $nvidia_pkgs 2>/dev/null; then msg_ok "Installed version-matched NVIDIA libraries" else - # Fallback to Ubuntu repo packages - $STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends libnvidia-decode libnvidia-encode nvidia-utils 2>/dev/null || msg_warn "NVIDIA installation failed" + # Fallback to Ubuntu repo packages with versioned nvidia-utils + msg_warn "CUDA repo install failed - trying Ubuntu native packages (nvidia-utils-${nvidia_major_version})" + if $STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends \ + libnvidia-decode-${nvidia_major_version} libnvidia-encode-${nvidia_major_version} nvidia-utils-${nvidia_major_version} 2>/dev/null; then + msg_ok "Installed Ubuntu NVIDIA packages (${nvidia_major_version})" + else + msg_warn "NVIDIA driver installation failed - please install manually: apt install nvidia-utils-${nvidia_major_version}" + fi fi else msg_warn "No NVIDIA packages for version ${nvidia_major_version}.x in CUDA repo" - # Fallback to Ubuntu repo packages - $STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends libnvidia-decode libnvidia-encode nvidia-utils 2>/dev/null || msg_warn "NVIDIA installation failed" + # Fallback to Ubuntu repo packages with versioned nvidia-utils + msg_info "Trying Ubuntu native packages (nvidia-utils-${nvidia_major_version})" + if $STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends \ + libnvidia-decode-${nvidia_major_version} libnvidia-encode-${nvidia_major_version} nvidia-utils-${nvidia_major_version} 2>/dev/null; then + msg_ok "Installed Ubuntu NVIDIA packages (${nvidia_major_version})" + else + msg_warn "NVIDIA driver installation failed - please install manually: apt install nvidia-utils-${nvidia_major_version}" + fi fi fi @@ -3836,6 +4090,7 @@ Types: deb URIs: http://deb.debian.org/debian Suites: bullseye bullseye-updates Components: non-free +Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg EOF ;; bookworm) @@ -3844,6 +4099,7 @@ Types: deb URIs: http://deb.debian.org/debian Suites: bookworm bookworm-updates Components: non-free non-free-firmware +Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg EOF ;; trixie | sid) @@ -3852,11 +4108,13 @@ Types: deb URIs: http://deb.debian.org/debian Suites: trixie trixie-updates Components: non-free non-free-firmware +Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg Types: deb URIs: http://deb.debian.org/debian-security Suites: trixie-security Components: non-free non-free-firmware +Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg EOF ;; esac @@ -3879,11 +4137,13 @@ Types: deb URIs: http://deb.debian.org/debian Suites: bullseye bullseye-updates Components: non-free +Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg Types: deb URIs: http://deb.debian.org/debian-security Suites: bullseye-security Components: non-free +Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg EOF ;; bookworm) @@ -3892,11 +4152,13 @@ Types: deb URIs: http://deb.debian.org/debian Suites: bookworm bookworm-updates Components: non-free-firmware +Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg Types: deb URIs: http://deb.debian.org/debian-security Suites: bookworm-security Components: non-free-firmware +Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg EOF ;; trixie | sid) @@ -3905,21 +4167,19 @@ Types: deb URIs: http://deb.debian.org/debian Suites: trixie trixie-updates Components: non-free-firmware +Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg Types: deb URIs: http://deb.debian.org/debian-security Suites: trixie-security Components: non-free-firmware +Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg EOF ;; esac $STD apt -y update } -# ══════════════════════════════════════════════════════════════════════════════ -# Helper: Setup GPU device permissions -# ══════════════════════════════════════════════════════════════════════════════ - # ══════════════════════════════════════════════════════════════════════════════ # Helper: Setup GPU device permissions # ══════════════════════════════════════════════════════════════════════════════ @@ -4222,10 +4482,11 @@ IP_FILE="/run/local-ip.env" mkdir -p "$(dirname "$IP_FILE")" get_current_ip() { - local targets=("8.8.8.8" "1.1.1.1" "192.168.1.1" "10.0.0.1" "172.16.0.1" "default") local ip - for target in "${targets[@]}"; do + # Try IPv4 targets first + local ipv4_targets=("8.8.8.8" "1.1.1.1" "192.168.1.1" "10.0.0.1" "172.16.0.1" "default") + for target in "${ipv4_targets[@]}"; do if [[ "$target" == "default" ]]; then ip=$(ip route get 1 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}') else @@ -4237,6 +4498,23 @@ get_current_ip() { fi done + # IPv6 fallback: Try direct interface lookup for eth0 + ip=$(ip -6 addr show eth0 scope global 2>/dev/null | awk '/inet6 / {print $2}' | cut -d/ -f1 | head -n1) + if [[ -n "$ip" && "$ip" =~ : ]]; then + echo "$ip" + return 0 + fi + + # IPv6 fallback: Use routing table with IPv6 targets (Google DNS, Cloudflare DNS) + local ipv6_targets=("2001:4860:4860::8888" "2606:4700:4700::1111") + for target in "${ipv6_targets[@]}"; do + ip=$(ip -6 route get "$target" 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}') + if [[ -n "$ip" && "$ip" =~ : ]]; then + echo "$ip" + return 0 + fi + done + return 1 } @@ -4275,57 +4553,37 @@ EOF } # ------------------------------------------------------------------------------ -# Installs or updates MariaDB from official repo. +# Installs or updates MariaDB. # # Description: +# - Uses Debian/Ubuntu distribution packages by default (most reliable) +# - Only uses official MariaDB repository when a specific version is requested # - Detects current MariaDB version and replaces it if necessary # - Preserves existing database data -# - Dynamically determines latest GA version if "latest" is given # # Variables: -# MARIADB_VERSION - MariaDB version to install (e.g. 10.11, latest) (default: latest) +# MARIADB_VERSION - MariaDB version to install (optional) +# - Not set or "latest": Uses distribution packages (recommended) +# - Specific version (e.g. "11.4", "12.2"): Uses MariaDB official repo # ------------------------------------------------------------------------------ setup_mariadb() { local MARIADB_VERSION="${MARIADB_VERSION:-latest}" + local USE_DISTRO_PACKAGES=false # Ensure non-interactive mode for all apt operations export DEBIAN_FRONTEND=noninteractive export NEEDRESTART_MODE=a export NEEDRESTART_SUSPEND=1 - # Resolve "latest" to actual version - if [[ "$MARIADB_VERSION" == "latest" ]]; then - if ! curl -fsI --max-time 10 http://mirror.mariadb.org/repo/ >/dev/null 2>&1; then - msg_warn "MariaDB mirror not reachable - trying mariadb_repo_setup fallback" - # Try using official mariadb_repo_setup script as fallback - if curl -fsSL --max-time 15 https://r.mariadb.com/downloads/mariadb_repo_setup 2>/dev/null | bash -s -- --skip-verify >/dev/null 2>&1; then - msg_ok "MariaDB repository configured via mariadb_repo_setup" - # Extract version from configured repo - MARIADB_VERSION=$(grep -oP 'repo/\K[0-9]+\.[0-9]+\.[0-9]+' /etc/apt/sources.list.d/mariadb.list 2>/dev/null | head -n1 || echo "12.2") - else - msg_warn "mariadb_repo_setup failed - using hardcoded fallback version" - MARIADB_VERSION="12.2" - fi - else - MARIADB_VERSION=$(curl -fsSL --max-time 15 http://mirror.mariadb.org/repo/ 2>/dev/null | - grep -Eo '[0-9]+\.[0-9]+\.[0-9]+/' | - grep -vE 'rc/|rolling/' | - sed 's|/||' | - sort -Vr | - head -n1 || echo "") - - if [[ -z "$MARIADB_VERSION" ]]; then - msg_warn "Could not parse latest GA MariaDB version from mirror - trying mariadb_repo_setup" - if curl -fsSL --max-time 15 https://r.mariadb.com/downloads/mariadb_repo_setup 2>/dev/null | bash -s -- --skip-verify >/dev/null 2>&1; then - msg_ok "MariaDB repository configured via mariadb_repo_setup" - MARIADB_VERSION=$(grep -oP 'repo/\K[0-9]+\.[0-9]+\.[0-9]+' /etc/apt/sources.list.d/mariadb.list 2>/dev/null | head -n1 || echo "12.2") - else - msg_warn "mariadb_repo_setup failed - using hardcoded fallback version" - MARIADB_VERSION="12.2" - fi - fi - fi + # Determine installation method: + # - "latest" or empty: Use distribution packages (avoids mirror issues) + # - Specific version: Use MariaDB official repository + if [[ "$MARIADB_VERSION" == "latest" || -z "$MARIADB_VERSION" ]]; then + USE_DISTRO_PACKAGES=true + msg_info "Setup MariaDB (distribution packages)" + else + msg_info "Setup MariaDB $MARIADB_VERSION (official repository)" fi # Get currently installed version @@ -4333,17 +4591,105 @@ setup_mariadb() { CURRENT_VERSION=$(is_tool_installed "mariadb" 2>/dev/null) || true # Pre-configure debconf to prevent any interactive prompts during install/upgrade - local MARIADB_MAJOR_MINOR - MARIADB_MAJOR_MINOR=$(echo "$MARIADB_VERSION" | awk -F. '{print $1"."$2}') - if [[ -n "$MARIADB_MAJOR_MINOR" ]]; then - debconf-set-selections </dev/null | grep -E "Candidate:" | awk '{print $2}' | grep -oP '^\d+:\K\d+\.\d+\.\d+' || echo "") + + if [[ -n "$DISTRO_VERSION" ]]; then + # Compare versions - if current is higher, keep it + local CURRENT_MAJOR DISTRO_MAJOR + CURRENT_MAJOR=$(echo "$CURRENT_VERSION" | awk -F. '{print $1}') + DISTRO_MAJOR=$(echo "$DISTRO_VERSION" | awk -F. '{print $1}') + + if [[ "$CURRENT_MAJOR" -gt "$DISTRO_MAJOR" ]]; then + msg_warn "MariaDB $CURRENT_VERSION is already installed (higher than distro $DISTRO_VERSION)" + msg_warn "Keeping existing installation to preserve data integrity" + msg_warn "To use distribution packages, manually remove MariaDB first" + _setup_mariadb_runtime_dir + cache_installed_version "mariadb" "$CURRENT_VERSION" + msg_ok "Setup MariaDB $CURRENT_VERSION (existing installation kept)" + return 0 + fi + fi + fi + + # Install or upgrade MariaDB from distribution packages + if ! install_packages_with_retry "mariadb-server" "mariadb-client"; then + msg_error "Failed to install MariaDB packages from distribution" + return 1 + fi + + # Get installed version for caching + local INSTALLED_VERSION="" + INSTALLED_VERSION=$(mariadb --version 2>/dev/null | grep -oP '\d+\.\d+\.\d+' | head -n1 || echo "distro") + + # Configure runtime directory and finish + _setup_mariadb_runtime_dir + cache_installed_version "mariadb" "$INSTALLED_VERSION" + msg_ok "Setup MariaDB $INSTALLED_VERSION (distribution packages)" + return 0 + fi + + # ============================================================================ + # OFFICIAL REPOSITORY PATH (only when specific version requested) + # ============================================================================ + + # First, check if there's an old/broken repository that needs cleanup + if [[ -f /etc/apt/sources.list.d/mariadb.sources ]] || [[ -f /etc/apt/sources.list.d/mariadb.list ]]; then + local OLD_REPO_VERSION="" + OLD_REPO_VERSION=$(grep -oP 'repo/\K[0-9]+\.[0-9]+(\.[0-9]+)?' /etc/apt/sources.list.d/mariadb.sources 2>/dev/null || + grep -oP 'repo/\K[0-9]+\.[0-9]+(\.[0-9]+)?' /etc/apt/sources.list.d/mariadb.list 2>/dev/null || echo "") + + # Check if old repo points to a different version + if [[ -n "$OLD_REPO_VERSION" ]] && [[ "${OLD_REPO_VERSION%.*}" != "${MARIADB_VERSION%.*}" ]]; then + msg_info "Cleaning up old MariaDB repository (was: $OLD_REPO_VERSION, requested: $MARIADB_VERSION)" + cleanup_old_repo_files "mariadb" + $STD apt update || msg_warn "APT update had issues, continuing..." + fi fi # Scenario 1: Already installed at target version - just update packages @@ -4384,9 +4730,7 @@ EOF remove_old_tool_version "mariadb" fi - # Scenario 3: Fresh install or version change - msg_info "Setup MariaDB $MARIADB_VERSION" - + # Scenario 3: Fresh install or version change with specific version # Prepare repository (cleanup + validation) prepare_repository_setup "mariadb" || { msg_error "Failed to prepare MariaDB repository" @@ -4414,21 +4758,37 @@ EOF # Install packages with retry logic if ! install_packages_with_retry "mariadb-server" "mariadb-client"; then - # Fallback: try without specific version - msg_warn "Failed to install MariaDB packages from upstream repo, trying distro fallback..." + # Fallback: try distribution packages + msg_warn "Failed to install MariaDB $MARIADB_VERSION from official repo, falling back to distribution packages..." cleanup_old_repo_files "mariadb" $STD apt update || { msg_warn "APT update also failed, continuing with cache" } - install_packages_with_retry "mariadb-server" "mariadb-client" || { - msg_error "Failed to install MariaDB packages (both upstream and distro)" + if install_packages_with_retry "mariadb-server" "mariadb-client"; then + local FALLBACK_VERSION="" + FALLBACK_VERSION=$(mariadb --version 2>/dev/null | grep -oP '\d+\.\d+\.\d+' | head -n1 || echo "distro") + msg_warn "Installed MariaDB $FALLBACK_VERSION from distribution instead of requested $MARIADB_VERSION" + _setup_mariadb_runtime_dir + cache_installed_version "mariadb" "$FALLBACK_VERSION" + msg_ok "Setup MariaDB $FALLBACK_VERSION (fallback to distribution packages)" + return 0 + else + msg_error "Failed to install MariaDB packages (both official repo and distribution)" return 1 - } + fi fi + _setup_mariadb_runtime_dir + cache_installed_version "mariadb" "$MARIADB_VERSION" + msg_ok "Setup MariaDB $MARIADB_VERSION" +} + +# ------------------------------------------------------------------------------ +# Helper function: Configure MariaDB runtime directory persistence +# ------------------------------------------------------------------------------ +_setup_mariadb_runtime_dir() { # Configure tmpfiles.d to ensure /run/mysqld directory is created on boot # This fixes the issue where MariaDB fails to start after container reboot - msg_info "Configuring MariaDB runtime directory persistence" # Create tmpfiles.d configuration with error handling if ! printf '# Ensure /run/mysqld directory exists with correct permissions for MariaDB\nd /run/mysqld 0755 mysql mysql -\n' >/etc/tmpfiles.d/mariadb.conf; then @@ -4448,11 +4808,6 @@ EOF msg_warn "mysql user not found - directory created with correct permissions but ownership not set" fi fi - - msg_ok "Configured MariaDB runtime directory persistence" - - cache_installed_version "mariadb" "$MARIADB_VERSION" - msg_ok "Setup MariaDB $MARIADB_VERSION" } # ------------------------------------------------------------------------------ @@ -4805,128 +5160,21 @@ EOF msg_ok "Setup MySQL $MYSQL_VERSION" } -# ------------------------------------------------------------------------------ -# Installs Node.js from official prebuilt binaries (alternative to NodeSource repo). -# -# Description: -# - Downloads official Node.js prebuilt binaries from nodejs.org -# - Extracts to /usr/local and creates symlinks -# - No package manager dependencies, no GPG issues -# -# Arguments: -# $1 - Node.js major version (e.g. 22, 24) -# -# Returns: -# 0 on success, 1 on failure -# ------------------------------------------------------------------------------ - -function install_nodejs_from_binary() { - local NODE_VERSION="$1" - local ARCH - - # Determine system architecture - ARCH=$(dpkg --print-architecture 2>/dev/null || uname -m) - case "$ARCH" in - amd64 | x86_64) ARCH="x64" ;; - arm64 | aarch64) ARCH="arm64" ;; - armv7l | armhf) ARCH="armv7l" ;; - ppc64le) ARCH="ppc64le" ;; - s390x) ARCH="s390x" ;; - *) - msg_error "Unsupported architecture: $ARCH" - return 1 - ;; - esac - - msg_info "Fetching Node.js v${NODE_VERSION}.x binary metadata" - - # Fetch latest version info from nodejs.org - local LATEST_URL="https://nodejs.org/dist/latest-v${NODE_VERSION}.x/" - local VERSION_INFO - VERSION_INFO=$(curl -fsSL "$LATEST_URL" 2>/dev/null | grep -oP "node-v${NODE_VERSION}\.[0-9]+\.[0-9]+-linux-${ARCH}\.tar\.xz" | head -n1) - - if [[ -z "$VERSION_INFO" ]]; then - msg_error "Could not find Node.js v${NODE_VERSION}.x binary for linux-${ARCH}" - return 1 - fi - - local FULL_VERSION - FULL_VERSION=$(echo "$VERSION_INFO" | grep -oP "v${NODE_VERSION}\.[0-9]+\.[0-9]+") - local TARBALL_NAME="node-${FULL_VERSION}-linux-${ARCH}.tar.xz" - local TARBALL_URL="${LATEST_URL}${TARBALL_NAME}" - local CHECKSUM_URL="${LATEST_URL}SHASUMS256.txt" - - msg_info "Downloading Node.js ${FULL_VERSION} for linux-${ARCH}" - - # Download tarball and checksum - local TEMP_DIR - TEMP_DIR=$(mktemp -d) - trap "rm -rf '$TEMP_DIR'" EXIT - - if ! curl -fsSL -o "${TEMP_DIR}/${TARBALL_NAME}" "$TARBALL_URL"; then - msg_error "Failed to download Node.js binary from $TARBALL_URL" - return 1 - fi - - if ! curl -fsSL -o "${TEMP_DIR}/SHASUMS256.txt" "$CHECKSUM_URL"; then - msg_warn "Could not download checksum file - skipping verification" - else - # Verify checksum - msg_info "Verifying checksum" - cd "$TEMP_DIR" || return 1 - if ! grep "$TARBALL_NAME" SHASUMS256.txt | sha256sum -c - >/dev/null 2>&1; then - msg_error "Checksum verification failed for $TARBALL_NAME" - return 1 - fi - msg_ok "Checksum verified" - fi - - # Remove old Node.js installation if exists - if [[ -d "/usr/local/lib/nodejs" ]]; then - msg_info "Removing old Node.js installation" - rm -rf /usr/local/lib/nodejs - fi - - # Extract tarball - msg_info "Installing Node.js ${FULL_VERSION}" - mkdir -p /usr/local/lib/nodejs - tar -xJf "${TEMP_DIR}/${TARBALL_NAME}" -C /usr/local/lib/nodejs --strip-components=1 - - # Create symlinks - ln -sf /usr/local/lib/nodejs/bin/node /usr/local/bin/node - ln -sf /usr/local/lib/nodejs/bin/npm /usr/local/bin/npm - ln -sf /usr/local/lib/nodejs/bin/npx /usr/local/bin/npx - - # Verify installation - if ! /usr/local/bin/node --version >/dev/null 2>&1; then - msg_error "Node.js installation verification failed" - return 1 - fi - - local INSTALLED_VERSION - INSTALLED_VERSION=$(/usr/local/bin/node --version 2>/dev/null) - msg_ok "Installed Node.js ${INSTALLED_VERSION}" - - return 0 -} - # ------------------------------------------------------------------------------ # Installs Node.js and optional global modules. # # Description: -# - Installs specified Node.js version using NodeSource APT repo OR official binaries +# - Installs specified Node.js version using NodeSource APT repo # - Optionally installs or updates global npm modules # # Variables: -# NODE_VERSION - Node.js version to install (default: 24 LTS) -# NODE_MODULE - Comma-separated list of global modules (e.g. "yarn,@vue/cli@5.0.0") -# NODE_INSTALL_METHOD - Installation method: "nodesource" (default) or "binary" +# NODE_VERSION - Node.js version to install (default: 24 LTS) +# NODE_MODULE - Comma-separated list of global modules (e.g. "yarn,@vue/cli@5.0.0") # ------------------------------------------------------------------------------ function setup_nodejs() { local NODE_VERSION="${NODE_VERSION:-24}" local NODE_MODULE="${NODE_MODULE:-}" - local NODE_INSTALL_METHOD="${NODE_INSTALL_METHOD:-nodesource}" # ALWAYS clean up legacy installations first (nvm, etc.) to prevent conflicts cleanup_legacy_install "nodejs" @@ -4948,17 +5196,7 @@ function setup_nodejs() { if [[ -n "$CURRENT_NODE_VERSION" && "$CURRENT_NODE_VERSION" == "$NODE_VERSION" ]]; then msg_info "Update Node.js $NODE_VERSION" - # For binary method, check if there's a newer patch version available - if [[ "$NODE_INSTALL_METHOD" == "binary" ]]; then - local CURRENT_FULL_VERSION - CURRENT_FULL_VERSION=$(node -v 2>/dev/null) - msg_info "Current version: $CURRENT_FULL_VERSION - checking for updates" - - # Try to install latest patch version - install_nodejs_from_binary "$NODE_VERSION" || { - msg_warn "Failed to check for Node.js updates, keeping current version" - } - fi + ensure_apt_working || return 1 # Just update npm to latest $STD npm install -g npm@latest 2>/dev/null || true @@ -4971,7 +5209,7 @@ function setup_nodejs() { msg_info "Upgrade Node.js from $CURRENT_NODE_VERSION to $NODE_VERSION" remove_old_tool_version "nodejs" else - msg_info "Setup Node.js $NODE_VERSION (method: $NODE_INSTALL_METHOD)" + msg_info "Setup Node.js $NODE_VERSION" fi # Remove ALL Debian nodejs packages BEFORE adding NodeSource repo @@ -4985,42 +5223,27 @@ function setup_nodejs() { # Remove any APT pinning (not needed) rm -f /etc/apt/preferences.d/nodesource 2>/dev/null || true - # Choose installation method - if [[ "$NODE_INSTALL_METHOD" == "binary" ]]; then - # Method 1: Install from official nodejs.org prebuilt binaries (no repo needed) - msg_info "Installing Node.js from official binaries" - ensure_dependencies curl ca-certificates + # Prepare repository (cleanup + validation) + prepare_repository_setup "nodesource" || { + msg_error "Failed to prepare Node.js repository" + return 1 + } - install_nodejs_from_binary "$NODE_VERSION" || { - msg_error "Failed to install Node.js from binary" - return 1 - } - else - # Method 2: Install from NodeSource APT repository (traditional method) - msg_info "Installing Node.js from NodeSource repository" + # Setup NodeSource repository + manage_tool_repository "nodejs" "$NODE_VERSION" "https://deb.nodesource.com/node_${NODE_VERSION}.x" "https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key" || { + msg_error "Failed to setup Node.js repository" + return 1 + } - # Prepare repository (cleanup + validation) - prepare_repository_setup "nodesource" || { - msg_error "Failed to prepare Node.js repository" - return 1 - } + # Force APT cache refresh after repository setup + $STD apt update - # Setup NodeSource repository - manage_tool_repository "nodejs" "$NODE_VERSION" "https://deb.nodesource.com/node_${NODE_VERSION}.x" "https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key" || { - msg_error "Failed to setup Node.js repository" - return 1 - } + ensure_dependencies curl ca-certificates gnupg - # Force APT cache refresh after repository setup - $STD apt update - - ensure_dependencies curl ca-certificates gnupg - - install_packages_with_retry "nodejs" || { - msg_error "Failed to install Node.js ${NODE_VERSION} from NodeSource" - return 1 - } - fi + install_packages_with_retry "nodejs" || { + msg_error "Failed to install Node.js ${NODE_VERSION} from NodeSource" + return 1 + } # Verify Node.js was installed correctly if ! command -v node >/dev/null 2>&1; then @@ -5124,6 +5347,8 @@ function setup_nodejs() { # - Adds Sury PHP repo if needed # - Installs default and user-defined modules # - Patches php.ini for CLI, Apache, and FPM as needed +# - Handles built-in modules gracefully (e.g., opcache in PHP 8.5+) +# - Skips unavailable packages without failing # # Variables: # PHP_VERSION - PHP version to install (default: 8.4) @@ -5134,6 +5359,17 @@ function setup_nodejs() { # PHP_UPLOAD_MAX_FILESIZE - (default: 128M) # PHP_POST_MAX_SIZE - (default: 128M) # PHP_MAX_EXECUTION_TIME - (default: 300) +# +# Notes on modules: +# - Base modules (always installed): bcmath, cli, curl, gd, intl, mbstring, +# readline, xml, zip, common +# - Extended modules (commonly needed): mysql, sqlite3, pgsql, redis, +# imagick, bz2, ldap, soap, imap, gmp, apcu +# - Some modules are built-in depending on PHP version: +# * PHP 8.5+: opcache is built-in (no separate package) +# * All versions: ctype, fileinfo, iconv, tokenizer, phar, posix, etc. +# are part of php-common +# - Unavailable modules are skipped with a warning, not an error # ------------------------------------------------------------------------------ function setup_php() { @@ -5145,23 +5381,69 @@ function setup_php() { DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"') DISTRO_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release) - local DEFAULT_MODULES="bcmath,cli,curl,gd,intl,mbstring,opcache,readline,xml,zip" - local COMBINED_MODULES + # Parse version for compatibility checks + local PHP_MAJOR="${PHP_VERSION%%.*}" + local PHP_MINOR="${PHP_VERSION#*.}" + PHP_MINOR="${PHP_MINOR%%.*}" + + # Modules that are ALWAYS part of php-common (no separate package needed) + # These are either built-in or virtual packages provided by php-common + local BUILTIN_MODULES="calendar,ctype,exif,ffi,fileinfo,ftp,gettext,iconv,pdo,phar,posix,shmop,sockets,sysvmsg,sysvsem,sysvshm,tokenizer" + + # Modules that became built-in in specific PHP versions + # PHP 8.5+: opcache is now part of the core + local BUILTIN_85="" + if [[ "$PHP_MAJOR" -gt 8 ]] || [[ "$PHP_MAJOR" -eq 8 && "$PHP_MINOR" -ge 5 ]]; then + BUILTIN_85="opcache" + fi + + # Base modules - essential for most PHP applications + # Note: 'common' provides many built-in extensions + local BASE_MODULES="cli,common,bcmath,curl,dom,gd,gmp,intl,mbstring,readline,xml,zip" + + # Add opcache only for PHP < 8.5 (it's built-in starting from 8.5) + if [[ "$PHP_MAJOR" -lt 8 ]] || [[ "$PHP_MAJOR" -eq 8 && "$PHP_MINOR" -lt 5 ]]; then + BASE_MODULES="${BASE_MODULES},opcache" + fi + + # Extended default modules - commonly needed by web applications + # These cover ~90% of typical use cases without bloat + local EXTENDED_MODULES="mysql,sqlite3,pgsql,redis,imagick,bz2,apcu" + + local COMBINED_MODULES="${BASE_MODULES},${EXTENDED_MODULES}" local PHP_MEMORY_LIMIT="${PHP_MEMORY_LIMIT:-512M}" local PHP_UPLOAD_MAX_FILESIZE="${PHP_UPLOAD_MAX_FILESIZE:-128M}" local PHP_POST_MAX_SIZE="${PHP_POST_MAX_SIZE:-128M}" local PHP_MAX_EXECUTION_TIME="${PHP_MAX_EXECUTION_TIME:-300}" - # Merge default + user-defined modules + # Merge with user-defined modules if [[ -n "$PHP_MODULE" ]]; then - COMBINED_MODULES="${DEFAULT_MODULES},${PHP_MODULE}" - else - COMBINED_MODULES="${DEFAULT_MODULES}" + COMBINED_MODULES="${COMBINED_MODULES},${PHP_MODULE}" fi + # Filter out built-in modules (they don't have separate packages) + local FILTERED_MODULES="" + IFS=',' read -ra ALL_MODULES <<<"$COMBINED_MODULES" + for mod in "${ALL_MODULES[@]}"; do + mod=$(echo "$mod" | tr -d '[:space:]') + [[ -z "$mod" ]] && continue + + # Skip if it's a known built-in module + if echo ",$BUILTIN_MODULES,$BUILTIN_85," | grep -qi ",$mod,"; then + continue + fi + + # Add to filtered list + if [[ -z "$FILTERED_MODULES" ]]; then + FILTERED_MODULES="$mod" + else + FILTERED_MODULES="${FILTERED_MODULES},$mod" + fi + done + # Deduplicate - COMBINED_MODULES=$(echo "$COMBINED_MODULES" | tr ',' '\n' | awk '!seen[$0]++' | paste -sd, -) + COMBINED_MODULES=$(echo "$FILTERED_MODULES" | tr ',' '\n' | awk '!seen[$0]++' | paste -sd, -) # Get current PHP-CLI version local CURRENT_PHP="" @@ -5198,7 +5480,8 @@ EOF # Ubuntu: Use ondrej/php PPA msg_info "Adding ondrej/php PPA for Ubuntu" $STD apt install -y software-properties-common - $STD add-apt-repository -y ppa:ondrej/php + # Don't use $STD for add-apt-repository as it uses background processes + add-apt-repository -y ppa:ondrej/php >>"$(get_active_logfile)" 2>&1 else # Debian: Use Sury repository manage_tool_repository "php" "$PHP_VERSION" "" "https://packages.sury.org/debsuryorg-archive-keyring.deb" || { @@ -5206,7 +5489,6 @@ EOF return 1 } fi - ensure_apt_working || return 1 $STD apt update @@ -5219,16 +5501,49 @@ EOF return 1 fi - # Build module list - without version pinning (preferences.d handles it) + # Build module list - verify each package exists before adding local MODULE_LIST="php${PHP_VERSION}" + local SKIPPED_MODULES="" IFS=',' read -ra MODULES <<<"$COMBINED_MODULES" for mod in "${MODULES[@]}"; do - MODULE_LIST+=" php${PHP_VERSION}-${mod}" + mod=$(echo "$mod" | tr -d '[:space:]') + [[ -z "$mod" ]] && continue + + local pkg_name="php${PHP_VERSION}-${mod}" + + # Check if package exists in repository + if apt-cache show "$pkg_name" &>/dev/null; then + MODULE_LIST+=" $pkg_name" + else + # Package doesn't exist - could be built-in or renamed + if [[ -z "$SKIPPED_MODULES" ]]; then + SKIPPED_MODULES="$mod" + else + SKIPPED_MODULES="${SKIPPED_MODULES}, $mod" + fi + fi done + # Log skipped modules (informational, not an error) + if [[ -n "$SKIPPED_MODULES" ]]; then + msg_info "Skipping unavailable/built-in modules: $SKIPPED_MODULES" + fi + if [[ "$PHP_FPM" == "YES" ]]; then - MODULE_LIST+=" php${PHP_VERSION}-fpm" + if apt-cache show "php${PHP_VERSION}-fpm" &>/dev/null; then + MODULE_LIST+=" php${PHP_VERSION}-fpm" + else + msg_warn "php${PHP_VERSION}-fpm not available" + fi + # Create systemd override for PHP-FPM to fix runtime directory issues in LXC containers + mkdir -p /etc/systemd/system/php${PHP_VERSION}-fpm.service.d/ + cat </etc/systemd/system/php${PHP_VERSION}-fpm.service.d/override.conf +[Service] +RuntimeDirectory=php +RuntimeDirectoryMode=0755 +EOF + $STD systemctl daemon-reload fi # install apache2 with PHP support if requested @@ -5247,38 +5562,31 @@ EOF # Install PHP packages (pinning via preferences.d ensures correct version) msg_info "Installing PHP ${PHP_VERSION} packages" - if ! install_packages_with_retry $MODULE_LIST; then - msg_warn "Failed to install PHP packages, attempting individual installation" + + # First attempt: Install all verified packages at once + if ! $STD apt install -y $MODULE_LIST 2>/dev/null; then + msg_warn "Bulk installation failed, attempting individual installation" # Install main package first (critical) - install_packages_with_retry "php${PHP_VERSION}" || { + if ! $STD apt install -y "php${PHP_VERSION}" 2>/dev/null; then msg_error "Failed to install php${PHP_VERSION}" return 1 - } + fi # Try to install Apache module individually if requested if [[ "$PHP_APACHE" == "YES" ]]; then - install_packages_with_retry "libapache2-mod-php${PHP_VERSION}" || { + $STD apt install -y "libapache2-mod-php${PHP_VERSION}" 2>/dev/null || { msg_warn "Could not install libapache2-mod-php${PHP_VERSION}" } fi - # Try to install modules individually - skip those that don't exist - for pkg in "${MODULES[@]}"; do - if apt-cache search "^php${PHP_VERSION}-${pkg}\$" 2>/dev/null | grep -q "^php${PHP_VERSION}-${pkg}"; then - install_packages_with_retry "php${PHP_VERSION}-${pkg}" || { - msg_warn "Could not install php${PHP_VERSION}-${pkg}" - } - fi + # Try to install each package individually + for pkg in $MODULE_LIST; do + [[ "$pkg" == "php${PHP_VERSION}" ]] && continue # Already installed + $STD apt install -y "$pkg" 2>/dev/null || { + msg_warn "Could not install $pkg - continuing without it" + } done - - if [[ "$PHP_FPM" == "YES" ]]; then - if apt-cache search "^php${PHP_VERSION}-fpm\$" 2>/dev/null | grep -q "^php${PHP_VERSION}-fpm"; then - install_packages_with_retry "php${PHP_VERSION}-fpm" || { - msg_warn "Could not install php${PHP_VERSION}-fpm" - } - fi - fi fi cache_installed_version "php" "$PHP_VERSION" @@ -5540,8 +5848,8 @@ function setup_postgresql_db() { fi msg_info "Setting up PostgreSQL Database" - $STD sudo -u postgres psql -c "CREATE ROLE \"$PG_DB_USER\" WITH LOGIN PASSWORD '$PG_DB_PASS';" - $STD sudo -u postgres psql -c "CREATE DATABASE \"$PG_DB_NAME\" WITH OWNER \"$PG_DB_USER\" ENCODING 'UTF8' TEMPLATE template0;" + $STD sudo -u postgres psql -c "CREATE ROLE $PG_DB_USER WITH LOGIN PASSWORD '$PG_DB_PASS';" + $STD sudo -u postgres psql -c "CREATE DATABASE $PG_DB_NAME WITH OWNER $PG_DB_USER ENCODING 'UTF8' TEMPLATE template0;" # Install extensions (comma-separated) if [[ -n "${PG_DB_EXTENSIONS:-}" ]]; then @@ -5554,26 +5862,26 @@ function setup_postgresql_db() { # ALTER ROLE settings for Django/Rails compatibility (unless skipped) if [[ "${PG_DB_SKIP_ALTER_ROLE:-}" != "true" ]]; then - $STD sudo -u postgres psql -c "ALTER ROLE \"$PG_DB_USER\" SET client_encoding TO 'utf8';" - $STD sudo -u postgres psql -c "ALTER ROLE \"$PG_DB_USER\" SET default_transaction_isolation TO 'read committed';" - $STD sudo -u postgres psql -c "ALTER ROLE \"$PG_DB_USER\" SET timezone TO 'UTC';" + $STD sudo -u postgres psql -c "ALTER ROLE $PG_DB_USER SET client_encoding TO 'utf8';" + $STD sudo -u postgres psql -c "ALTER ROLE $PG_DB_USER SET default_transaction_isolation TO 'read committed';" + $STD sudo -u postgres psql -c "ALTER ROLE $PG_DB_USER SET timezone TO 'UTC';" fi # Schema permissions (if requested) if [[ "${PG_DB_SCHEMA_PERMS:-}" == "true" ]]; then - $STD sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE \"$PG_DB_NAME\" TO \"$PG_DB_USER\";" - $STD sudo -u postgres psql -c "ALTER USER \"$PG_DB_USER\" CREATEDB;" - $STD sudo -u postgres psql -d "$PG_DB_NAME" -c "GRANT ALL ON SCHEMA public TO \"$PG_DB_USER\";" - $STD sudo -u postgres psql -d "$PG_DB_NAME" -c "GRANT CREATE ON SCHEMA public TO \"$PG_DB_USER\";" - $STD sudo -u postgres psql -d "$PG_DB_NAME" -c "ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO \"$PG_DB_USER\";" - $STD sudo -u postgres psql -d "$PG_DB_NAME" -c "ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO \"$PG_DB_USER\";" + $STD sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE $PG_DB_NAME TO $PG_DB_USER;" + $STD sudo -u postgres psql -c "ALTER USER $PG_DB_USER CREATEDB;" + $STD sudo -u postgres psql -d "$PG_DB_NAME" -c "GRANT ALL ON SCHEMA public TO $PG_DB_USER;" + $STD sudo -u postgres psql -d "$PG_DB_NAME" -c "GRANT CREATE ON SCHEMA public TO $PG_DB_USER;" + $STD sudo -u postgres psql -d "$PG_DB_NAME" -c "ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO $PG_DB_USER;" + $STD sudo -u postgres psql -d "$PG_DB_NAME" -c "ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO $PG_DB_USER;" fi # Superuser grant (if requested - WARNING!) if [[ "${PG_DB_GRANT_SUPERUSER:-}" == "true" ]]; then msg_warn "Granting SUPERUSER privilege (security risk!)" - $STD sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE \"$PG_DB_NAME\" TO \"$PG_DB_USER\";" - $STD sudo -u postgres psql -c "ALTER USER \"$PG_DB_USER\" WITH SUPERUSER;" + $STD sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE $PG_DB_NAME to $PG_DB_USER;" + $STD sudo -u postgres psql -c "ALTER USER $PG_DB_USER WITH SUPERUSER;" fi # Save credentials @@ -5796,6 +6104,306 @@ function setup_ruby() { msg_ok "Setup Ruby $RUBY_VERSION" } +# ------------------------------------------------------------------------------ +# Installs or updates MeiliSearch search engine. +# +# Description: +# - Fresh install: Downloads binary, creates config/service, starts +# - Update: Checks for new release, updates binary if available +# - Waits for service to be ready before returning +# - Exports API keys for use by caller +# +# Variables: +# MEILISEARCH_BIND - Bind address (default: 127.0.0.1:7700) +# MEILISEARCH_ENV - Environment: production/development (default: production) +# MEILISEARCH_DB_PATH - Database path (default: /var/lib/meilisearch/data) +# +# Exports: +# MEILISEARCH_MASTER_KEY - The master key for admin access +# MEILISEARCH_API_KEY - The default search API key +# MEILISEARCH_API_KEY_UID - The UID of the default API key +# +# Example (install script): +# setup_meilisearch +# +# Example (CT update_script): +# setup_meilisearch +# ------------------------------------------------------------------------------ + +function setup_meilisearch() { + local MEILISEARCH_BIND="${MEILISEARCH_BIND:-127.0.0.1:7700}" + local MEILISEARCH_ENV="${MEILISEARCH_ENV:-production}" + local MEILISEARCH_DB_PATH="${MEILISEARCH_DB_PATH:-/var/lib/meilisearch/data}" + local MEILISEARCH_DUMP_DIR="${MEILISEARCH_DUMP_DIR:-/var/lib/meilisearch/dumps}" + local MEILISEARCH_SNAPSHOT_DIR="${MEILISEARCH_SNAPSHOT_DIR:-/var/lib/meilisearch/snapshots}" + + # Get bind address for health checks + local MEILISEARCH_HOST="${MEILISEARCH_BIND%%:*}" + local MEILISEARCH_PORT="${MEILISEARCH_BIND##*:}" + [[ "$MEILISEARCH_HOST" == "0.0.0.0" ]] && MEILISEARCH_HOST="127.0.0.1" + + # Update mode: MeiliSearch already installed + if [[ -f /usr/bin/meilisearch ]]; then + if check_for_gh_release "meilisearch" "meilisearch/meilisearch"; then + msg_info "Updating MeiliSearch" + + # Get current and new version for compatibility check + local CURRENT_VERSION NEW_VERSION + CURRENT_VERSION=$(/usr/bin/meilisearch --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1) || CURRENT_VERSION="0.0.0" + NEW_VERSION="${CHECK_UPDATE_RELEASE#v}" + + # Extract major.minor for comparison (Meilisearch requires dump/restore between minor versions) + local CURRENT_MAJOR_MINOR NEW_MAJOR_MINOR + CURRENT_MAJOR_MINOR=$(echo "$CURRENT_VERSION" | cut -d. -f1,2) + NEW_MAJOR_MINOR=$(echo "$NEW_VERSION" | cut -d. -f1,2) + + # Determine if migration is needed (different major.minor = incompatible DB format) + local NEEDS_MIGRATION=false + if [[ "$CURRENT_MAJOR_MINOR" != "$NEW_MAJOR_MINOR" ]]; then + NEEDS_MIGRATION=true + msg_info "MeiliSearch version change detected (${CURRENT_VERSION} → ${NEW_VERSION}), preparing data migration" + fi + + # Read config values for dump/restore + local MEILI_HOST MEILI_PORT MEILI_MASTER_KEY MEILI_DUMP_DIR + MEILI_HOST="${MEILISEARCH_HOST:-127.0.0.1}" + MEILI_PORT="${MEILISEARCH_PORT:-7700}" + MEILI_DUMP_DIR="${MEILISEARCH_DUMP_DIR:-/var/lib/meilisearch/dumps}" + MEILI_MASTER_KEY=$(grep -E "^master_key\s*=" /etc/meilisearch.toml 2>/dev/null | sed 's/.*=\s*"\(.*\)"/\1/' | tr -d ' ') + + # Create dump before update if migration is needed + local DUMP_UID="" + if [[ "$NEEDS_MIGRATION" == "true" ]] && [[ -n "$MEILI_MASTER_KEY" ]]; then + msg_info "Creating MeiliSearch data dump before upgrade" + + # Trigger dump creation + local DUMP_RESPONSE + DUMP_RESPONSE=$(curl -s -X POST "http://${MEILI_HOST}:${MEILI_PORT}/dumps" \ + -H "Authorization: Bearer ${MEILI_MASTER_KEY}" \ + -H "Content-Type: application/json" 2>/dev/null) || true + + # The initial response only contains taskUid, not dumpUid + # dumpUid is only available after the task completes + local TASK_UID + TASK_UID=$(echo "$DUMP_RESPONSE" | grep -oP '"taskUid":\s*\K[0-9]+' || true) + + if [[ -n "$TASK_UID" ]]; then + msg_info "Waiting for dump task ${TASK_UID} to complete..." + local MAX_WAIT=120 + local WAITED=0 + local TASK_RESULT="" + + while [[ $WAITED -lt $MAX_WAIT ]]; do + TASK_RESULT=$(curl -s "http://${MEILI_HOST}:${MEILI_PORT}/tasks/${TASK_UID}" \ + -H "Authorization: Bearer ${MEILI_MASTER_KEY}" 2>/dev/null) || true + + local TASK_STATUS + TASK_STATUS=$(echo "$TASK_RESULT" | grep -oP '"status":\s*"\K[^"]+' || true) + + if [[ "$TASK_STATUS" == "succeeded" ]]; then + # Extract dumpUid from the completed task details + DUMP_UID=$(echo "$TASK_RESULT" | grep -oP '"dumpUid":\s*"\K[^"]+' || true) + if [[ -n "$DUMP_UID" ]]; then + msg_ok "MeiliSearch dump created successfully: ${DUMP_UID}" + else + msg_warn "Dump task succeeded but could not extract dumpUid" + fi + break + elif [[ "$TASK_STATUS" == "failed" ]]; then + local ERROR_MSG + ERROR_MSG=$(echo "$TASK_RESULT" | grep -oP '"message":\s*"\K[^"]+' || echo "Unknown error") + msg_warn "MeiliSearch dump failed: ${ERROR_MSG}" + break + fi + sleep 2 + WAITED=$((WAITED + 2)) + done + + if [[ $WAITED -ge $MAX_WAIT ]]; then + msg_warn "MeiliSearch dump timed out after ${MAX_WAIT}s" + fi + else + msg_warn "Could not trigger MeiliSearch dump (no taskUid in response)" + msg_info "Response was: ${DUMP_RESPONSE:-empty}" + fi + fi + + # If migration is needed but dump failed, we have options: + # 1. Abort the update (safest, but annoying) + # 2. Backup data directory and proceed (allows manual recovery) + # 3. Just proceed and hope for the best (dangerous) + # We choose option 2: backup and proceed with warning + if [[ "$NEEDS_MIGRATION" == "true" ]] && [[ -z "$DUMP_UID" ]]; then + local MEILI_DB_PATH + MEILI_DB_PATH=$(grep -E "^db_path\s*=" /etc/meilisearch.toml 2>/dev/null | sed 's/.*=\s*"\(.*\)"/\1/' | tr -d ' ') + MEILI_DB_PATH="${MEILI_DB_PATH:-/var/lib/meilisearch/data}" + + if [[ -d "$MEILI_DB_PATH" ]] && [[ -n "$(ls -A "$MEILI_DB_PATH" 2>/dev/null)" ]]; then + local BACKUP_PATH="${MEILI_DB_PATH}.backup.$(date +%Y%m%d%H%M%S)" + msg_warn "Backing up MeiliSearch data to ${BACKUP_PATH}" + mv "$MEILI_DB_PATH" "$BACKUP_PATH" + mkdir -p "$MEILI_DB_PATH" + msg_info "Data backed up. After update, you may need to reindex your data." + msg_info "Old data is preserved at: ${BACKUP_PATH}" + fi + fi + + # Stop service and update binary + systemctl stop meilisearch + fetch_and_deploy_gh_release "meilisearch" "meilisearch/meilisearch" "binary" + + # If migration needed and dump was created, remove old data and import dump + if [[ "$NEEDS_MIGRATION" == "true" ]] && [[ -n "$DUMP_UID" ]]; then + local MEILI_DB_PATH + MEILI_DB_PATH=$(grep -E "^db_path\s*=" /etc/meilisearch.toml 2>/dev/null | sed 's/.*=\s*"\(.*\)"/\1/' | tr -d ' ') + MEILI_DB_PATH="${MEILI_DB_PATH:-/var/lib/meilisearch/data}" + + msg_info "Removing old MeiliSearch database for migration" + rm -rf "${MEILI_DB_PATH:?}"/* + + # Import dump using CLI flag (this is the supported method) + local DUMP_FILE="${MEILI_DUMP_DIR}/${DUMP_UID}.dump" + if [[ -f "$DUMP_FILE" ]]; then + msg_info "Importing dump: ${DUMP_FILE}" + + # Start meilisearch with --import-dump flag + # This is a one-time import that happens during startup + /usr/bin/meilisearch --config-file-path /etc/meilisearch.toml --import-dump "$DUMP_FILE" & + local MEILI_PID=$! + + # Wait for meilisearch to become healthy (import happens during startup) + msg_info "Waiting for MeiliSearch to import and start..." + local MAX_WAIT=300 + local WAITED=0 + while [[ $WAITED -lt $MAX_WAIT ]]; do + if curl -sf "http://${MEILI_HOST}:${MEILI_PORT}/health" &>/dev/null; then + msg_ok "MeiliSearch is healthy after import" + break + fi + # Check if process is still running + if ! kill -0 $MEILI_PID 2>/dev/null; then + msg_warn "MeiliSearch process exited during import" + break + fi + sleep 3 + WAITED=$((WAITED + 3)) + done + + # Stop the manual process + kill $MEILI_PID 2>/dev/null || true + sleep 2 + + # Start via systemd for proper management + systemctl start meilisearch + + if systemctl is-active --quiet meilisearch; then + msg_ok "MeiliSearch migrated successfully" + else + msg_warn "MeiliSearch failed to start after migration - check logs with: journalctl -u meilisearch" + fi + else + msg_warn "Dump file not found: ${DUMP_FILE}" + systemctl start meilisearch + fi + else + systemctl start meilisearch + fi + + msg_ok "Updated MeiliSearch" + fi + return 0 + fi + + # Fresh install + msg_info "Setup MeiliSearch" + + # Install binary + fetch_and_deploy_gh_release "meilisearch" "meilisearch/meilisearch" "binary" || { + msg_error "Failed to install MeiliSearch binary" + return 1 + } + + # Download default config + curl -fsSL https://raw.githubusercontent.com/meilisearch/meilisearch/latest/config.toml -o /etc/meilisearch.toml || { + msg_error "Failed to download MeiliSearch config" + return 1 + } + + # Generate master key + MEILISEARCH_MASTER_KEY=$(openssl rand -base64 12) + export MEILISEARCH_MASTER_KEY + + # Configure + sed -i \ + -e "s|^env =.*|env = \"${MEILISEARCH_ENV}\"|" \ + -e "s|^# master_key =.*|master_key = \"${MEILISEARCH_MASTER_KEY}\"|" \ + -e "s|^db_path =.*|db_path = \"${MEILISEARCH_DB_PATH}\"|" \ + -e "s|^dump_dir =.*|dump_dir = \"${MEILISEARCH_DUMP_DIR}\"|" \ + -e "s|^snapshot_dir =.*|snapshot_dir = \"${MEILISEARCH_SNAPSHOT_DIR}\"|" \ + -e 's|^# no_analytics = true|no_analytics = true|' \ + -e "s|^http_addr =.*|http_addr = \"${MEILISEARCH_BIND}\"|" \ + /etc/meilisearch.toml + + # Create data directories + mkdir -p "${MEILISEARCH_DB_PATH}" "${MEILISEARCH_DUMP_DIR}" "${MEILISEARCH_SNAPSHOT_DIR}" + + # Create systemd service + cat </etc/systemd/system/meilisearch.service +[Unit] +Description=Meilisearch +After=network.target + +[Service] +ExecStart=/usr/bin/meilisearch --config-file-path /etc/meilisearch.toml +Restart=always + +[Install] +WantedBy=multi-user.target +EOF + + # Enable and start service + systemctl daemon-reload + systemctl enable -q --now meilisearch + + # Wait for MeiliSearch to be ready (up to 30 seconds) + for i in {1..30}; do + if curl -s -o /dev/null -w "%{http_code}" "http://${MEILISEARCH_HOST}:${MEILISEARCH_PORT}/health" 2>/dev/null | grep -q "200"; then + break + fi + sleep 1 + done + + # Verify service is running + if ! systemctl is-active --quiet meilisearch; then + msg_error "MeiliSearch service failed to start" + return 1 + fi + + # Get API keys with retry logic + MEILISEARCH_API_KEY="" + for i in {1..10}; do + MEILISEARCH_API_KEY=$(curl -s -X GET "http://${MEILISEARCH_HOST}:${MEILISEARCH_PORT}/keys" \ + -H "Authorization: Bearer ${MEILISEARCH_MASTER_KEY}" 2>/dev/null | + grep -o '"key":"[^"]*"' | head -n 1 | sed 's/"key":"//;s/"//') || true + [[ -n "$MEILISEARCH_API_KEY" ]] && break + sleep 2 + done + + MEILISEARCH_API_KEY_UID=$(curl -s -X GET "http://${MEILISEARCH_HOST}:${MEILISEARCH_PORT}/keys" \ + -H "Authorization: Bearer ${MEILISEARCH_MASTER_KEY}" 2>/dev/null | + grep -o '"uid":"[^"]*"' | head -n 1 | sed 's/"uid":"//;s/"//') || true + + export MEILISEARCH_API_KEY + export MEILISEARCH_API_KEY_UID + + # Cache version + local MEILISEARCH_VERSION + MEILISEARCH_VERSION=$(/usr/bin/meilisearch --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1) || true + cache_installed_version "meilisearch" "${MEILISEARCH_VERSION:-unknown}" + + msg_ok "Setup MeiliSearch ${MEILISEARCH_VERSION:-}" +} + # ------------------------------------------------------------------------------ # Installs or upgrades ClickHouse database server. # @@ -6527,20 +7135,20 @@ EOF } # ------------------------------------------------------------------------------ -# Fetch and deploy archive from URL +# Fetch and deploy from URL # Downloads an archive (zip, tar.gz, or .deb) from a URL and extracts/installs it # -# Usage: fetch_and_deploy_archive "url" "directory" +# Usage: fetch_and_deploy_from_url "url" "directory" # url - URL to the archive (zip, tar.gz, or .deb) # directory - Destination path where the archive will be extracted # (not used for .deb packages) # # Examples: -# fetch_and_deploy_archive "https://example.com/app.tar.gz" "/opt/myapp" -# fetch_and_deploy_archive "https://example.com/app.zip" "/opt/myapp" -# fetch_and_deploy_archive "https://example.com/package.deb" "" +# fetch_and_deploy_from_url "https://example.com/app.tar.gz" "/opt/myapp" +# fetch_and_deploy_from_url "https://example.com/app.zip" "/opt/myapp" +# fetch_and_deploy_from_url "https://example.com/package.deb" "" # ------------------------------------------------------------------------------ -function fetch_and_deploy_archive() { +function fetch_and_deploy_from_url() { local url="$1" local directory="$2" @@ -6551,13 +7159,6 @@ function fetch_and_deploy_archive() { local filename="${url##*/}" - local archive_type="zip" - if [[ "$filename" == *.tar.gz || "$filename" == *.tgz ]]; then - archive_type="tar" - elif [[ "$filename" == *.deb ]]; then - archive_type="deb" - fi - msg_info "Downloading from $url" local tmpdir @@ -6572,6 +7173,28 @@ function fetch_and_deploy_archive() { return 1 } + # Auto-detect archive type using file description + local file_desc + file_desc=$(file -b "$tmpdir/$filename") + + local archive_type="unknown" + + if [[ "$file_desc" =~ gzip.*compressed|gzip\ compressed\ data ]]; then + archive_type="tar" + elif [[ "$file_desc" =~ Zip.*archive|ZIP\ archive ]]; then + archive_type="zip" + elif [[ "$file_desc" =~ Debian.*package|Debian\ binary\ package ]]; then + archive_type="deb" + elif [[ "$file_desc" =~ POSIX.*tar.*archive|tar\ archive ]]; then + archive_type="tar" + else + msg_error "Unsupported or unknown archive type: $file_desc" + rm -rf "$tmpdir" + return 1 + fi + + msg_info "Detected archive type: $archive_type (file type: $file_desc)" + if [[ "$archive_type" == "deb" ]]; then msg_info "Installing .deb package"