-
-
Save AnnoyingTechnology/95912b4efefc15db37af1bad3c7b4f37 to your computer and use it in GitHub Desktop.
| #!/bin/bash | |
| # | |
| # Build Coral TPU kernel module in a throwaway LXC and install it on the host | |
| # WITHOUT DKMS or build deps on the host. | |
| # | |
| set -e | |
| CTID=999 | |
| STORAGE_PATH_BASE="/local-vms" | |
| CT_ROOT="${STORAGE_PATH_BASE}/subvol-${CTID}-disk-0" | |
| TEMPLATE_STORAGE="images" | |
| TEMPLATE_NAME="debian-13-standard_13.1-2_amd64.tar.zst" | |
| ROOTFS_STORAGE="local-vms" | |
| HOST_TPU_DIR="/tmp/tpu" | |
| BRIDGE="vmbr0" | |
| KVER="$(uname -r)" | |
| if [ "$(id -u)" -ne 0 ]; then | |
| echo "Run as root." | |
| exit 1 | |
| fi | |
| echo ">>> Using kernel: ${KVER}" | |
| echo ">>> (Info) CT root would be at: ${CT_ROOT}" | |
| mkdir -p "${HOST_TPU_DIR}" | |
| echo ">>> Cleaning up any old container ${CTID}…" | |
| if pct status "${CTID}" &>/dev/null; then | |
| pct stop "${CTID}" 2>/dev/null || true | |
| pct destroy "${CTID}" --force || true | |
| fi | |
| echo ">>> Creating LXC ${CTID}…" | |
| pct create "${CTID}" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE_NAME}" \ | |
| -hostname tpu-builder \ | |
| -memory 2048 \ | |
| -cores 2 \ | |
| -net0 name=eth0,bridge=${BRIDGE},ip=dhcp \ | |
| -rootfs "${ROOTFS_STORAGE}:8" \ | |
| -features nesting=1 | |
| echo ">>> Starting LXC ${CTID}…" | |
| pct start "${CTID}" | |
| echo ">>> Building inside container…" | |
| pct exec "${CTID}" -- env -i LANG=C LC_ALL=C DEBIAN_FRONTEND=noninteractive bash -lc ' | |
| set -e | |
| apt update | |
| apt install -y \ | |
| curl gnupg git build-essential ca-certificates \ | |
| libdw1 libelf-dev bc flex bison | |
| # Coral repo | |
| mkdir -p /etc/apt/keyrings | |
| curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg \ | |
| | gpg --dearmor -o /etc/apt/keyrings/coral-edgetpu.gpg | |
| echo "deb [signed-by=/etc/apt/keyrings/coral-edgetpu.gpg] https://packages.cloud.google.com/apt coral-edgetpu-stable main" \ | |
| > /etc/apt/sources.list.d/coral-edgetpu.list | |
| # Proxmox repo for headers | |
| curl -fsSL https://enterprise.proxmox.com/debian/proxmox-release-trixie.gpg \ | |
| -o /etc/apt/trusted.gpg.d/proxmox-release-trixie.gpg | |
| echo "deb [arch=amd64] http://download.proxmox.com/debian/pve trixie pve-no-subscription" \ | |
| > /etc/apt/sources.list.d/pve.list | |
| apt update | |
| # lib .deb for host | |
| mkdir -p /usr/local/src/coral | |
| cd /usr/local/src/coral | |
| apt-get download libedgetpu1-std | |
| # Headers & build deps INSIDE the CT only | |
| apt install -y "pve-headers-$(uname -r)" | |
| # Build the kernel module | |
| git clone https://github.com/google/gasket-driver.git | |
| cd gasket-driver | |
| git fetch origin pull/50/head:pr-50 | |
| git checkout pr-50 | |
| # Build from src/ where the module Makefile lives | |
| make -C /lib/modules/$(uname -r)/build M="$(pwd)/src" modules | |
| # We expect apex.ko and gasket.ko in src/ | |
| ls -l src/*.ko | |
| ' | |
| echo ">>> Copying artifacts out of CT ${CTID} with pct pull…" | |
| # Find the libedgetpu deb path inside the container | |
| LIB_DEB_IN_CT=$(pct exec "${CTID}" -- bash -lc 'echo /usr/local/src/coral/libedgetpu1-std_*.deb' | tr -d "\r") | |
| if [ -z "${LIB_DEB_IN_CT}" ] || [[ "${LIB_DEB_IN_CT}" == *"*"* ]]; then | |
| echo "ERROR: libedgetpu1-std_*.deb not found inside container" | |
| pct stop "${CTID}" 2>/dev/null || true | |
| pct destroy "${CTID}" --force 2>/dev/null || true | |
| exit 1 | |
| fi | |
| # Pull libedgetpu .deb from CT to host (DESTINATION MUST BE A FILE, NOT JUST A DIR) | |
| LIB_DEB_BASENAME=$(basename "${LIB_DEB_IN_CT}") | |
| pct pull "${CTID}" "${LIB_DEB_IN_CT}" "${HOST_TPU_DIR}/${LIB_DEB_BASENAME}" | |
| # Pull any built .ko modules from src/ in the CT to host | |
| KO_PATHS=$(pct exec "${CTID}" -- bash -lc 'echo /usr/local/src/coral/gasket-driver/src/*.ko 2>/dev/null || true' | tr -d "\r") | |
| if [ -n "${KO_PATHS}" ] && [[ "${KO_PATHS}" != *"*"* ]]; then | |
| for ko in ${KO_PATHS}; do | |
| KO_BASENAME=$(basename "${ko}") | |
| pct pull "${CTID}" "${ko}" "${HOST_TPU_DIR}/${KO_BASENAME}" || true | |
| done | |
| else | |
| echo ">>> WARNING: No .ko files found to pull (build may have failed?)" | |
| fi | |
| echo ">>> Stopping CT ${CTID}…" | |
| pct stop "${CTID}" || true | |
| echo ">>> Destroying CT ${CTID}…" | |
| pct destroy "${CTID}" --force || true | |
| echo ">>> Installing artifacts on host from ${HOST_TPU_DIR}…" | |
| cd "${HOST_TPU_DIR}" | |
| echo "Contents of ${HOST_TPU_DIR}:" | |
| ls -l | |
| LIB_DEB=$(ls libedgetpu1-std_*.deb 2>/dev/null || true) | |
| if [ -z "${LIB_DEB}" ]; then | |
| echo "ERROR: libedgetpu1-std_*.deb not found in ${HOST_TPU_DIR}" | |
| exit 1 | |
| fi | |
| # Install lib on host (this is the only host package change) | |
| dpkg -i "${LIB_DEB}" | |
| # Install kernel modules | |
| mkdir -p "/lib/modules/${KVER}/extra" | |
| for ko in *.ko; do | |
| [ -f "$ko" ] || continue | |
| echo ">>> Installing module $ko" | |
| cp "$ko" "/lib/modules/${KVER}/extra/" | |
| done | |
| echo ">>> Running depmod…" | |
| depmod "${KVER}" | |
| echo ">>> Trying to load modules…" | |
| if ! modprobe gasket; then | |
| echo "WARNING: modprobe gasket failed, check dmesg for details." | |
| fi | |
| if ! modprobe apex; then | |
| echo "WARNING: modprobe apex failed, check dmesg for details." | |
| fi | |
| echo ">>> Done." | |
| echo " - Artifacts live in ${HOST_TPU_DIR}" | |
| echo " - libedgetpu is installed; gasket/apex modules installed for ${KVER}" |
Glad you found it useful :)
I think this approach is currently the cleanest.
You don't have an images storage on your cluster. Adapt to your own context.
You won't have local-vms either.
Sorry, I have the same issue, newbie to Proxmox so not sure how to proceed or what you mean above "Adapt to your own content"
It means : adapt to your zfs pool names / storage names. The first one refer to where your iso/tar are stored, the other refers to where LXC/VM are stored.
In all honestly you should probably not be using tools you do not understand at least at a basic level or use scripts from random people on the internet.
This is a recipe for security disaster.
Getting there, Now sorted, got the storage names sorted, thanks.
The script ran through fine, how do I make the TPU available to frigate

Thank you so much for creating this! Such a time saver!
I also changed this line so the next VMID available is used:
CTID=$(pvesh get /cluster/nextid)