Skip to content

Instantly share code, notes, and snippets.

@eumel8
Last active February 15, 2026 21:08
Show Gist options
  • Select an option

  • Save eumel8/db8eb4bf32e551eb8ca96a9c5b3372ff to your computer and use it in GitHub Desktop.

Select an option

Save eumel8/db8eb4bf32e551eb8ca96a9c5b3372ff to your computer and use it in GitHub Desktop.
gitlab-ci-k8s
# Gitlab CI Pipeline for kind/k3d for end to end test
# Docker 28+ breaks nested container runtimes (k3d/kind) due to procfs hardening.
# Pinned to docker:27-dind intentionally for Helm E2E testing.
image: docker:27-dind
services:
- name: docker:27-dind
alias: docker
variables:
DOCKER_HOST: tcp://docker:2375
DOCKER_TLS_CERTDIR: ""
KIND_VERSION: v0.31.0
KUBECTL_VERSION: v1.34.3
HELM_VERSION: v4.1.0
CLUSTER_NAME: ci-cluster
K3S_VERSION: v1.35.0-k3s3
K3D_VERSION: v5.8.3
stages:
- deploy
- cleanup
before_script:
# Install Packages on Alpine Image
- apk add --no-cache curl bash
- docker version
# Install kind
- curl -Lo /usr/local/bin/kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-linux-amd64
- chmod +x /usr/local/bin/kind
# Install kubectl
- curl -Lo /usr/local/bin/kubectl https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl
- chmod +x /usr/local/bin/kubectl
# Install helm
- curl -fsSL https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz | tar -xzv
- mv linux-amd64/helm /usr/local/bin/
- rm -rf linux-amd64
kind-cluster:
stage: deploy
tags:
- dind
parallel:
matrix:
- KIND_VERSION: ["v0.31.0", "v0.30.0"]
script:
# Kind config
- |
cat <<EOF > kind-config.yaml
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
- role: worker
- role: worker
networking:
apiServerAddress: "0.0.0.0"
apiServerPort: 6443
kubeadmConfigPatches:
- |
kind: ClusterConfiguration
apiServer:
certSANs:
- "docker"
EOF
# Create cluster
- kind create cluster --name ${CLUSTER_NAME} --config kind-config.yaml
# Replace API endpoint in kubeconfig
- sed -i -E -e "s/localhost|0\.0\.0\.0/docker/g" "$HOME/.kube/config"
# Test
- kubectl cluster-info
- kubectl wait --for=condition=ready nodes --all --timeout=300s
- kubectl get nodes -o wide
# Start application test here
- echo "app test"
k3d-cluster:
stage: deploy
tags:
- dind
parallel:
matrix:
- K8S_VERSION: ["v1.35.0-k3s3", "v1.34.3-k3s3"]
script:
- curl -Lo /usr/local/bin/k3d https://github.com/k3d-io/k3d/releases/download/${K3D_VERSION}/k3d-linux-amd64
- chmod +x /usr/local/bin/kubectl /usr/local/bin/k3d
# Create cluster (API direkt erreichbar)
- |
k3d cluster create ${CLUSTER_NAME} \
--image rancher/k3s:${K3S_VERSION} \
--api-port 6443 \
--servers 1 \
--agents 0 \
--wait
# Test
- kubectl cluster-info
- kubectl wait --for=condition=ready nodes --all --timeout=300s
- kubectl get nodes -o wide
# Start application test here
- echo "app test"
cleanup-kind:
stage: cleanup
tags:
- dind
needs:
- kind-cluster
when: always
script:
- kind delete cluster --name ${CLUSTER_NAME}
@eumel8
Copy link
Author

eumel8 commented Feb 15, 2026

kwok/vanilla example

deploy-kwok:
  stage: deploy
  variables:
    # Version pinning for stability
    KWOK_REPO: "kubernetes-sigs/kwok"
    KWOK_VERSION: "v0.7.0"
    ETCD_VERSION: "v3.6.8"
    HELM_VERSION: "v4.1.0"
    # KWOK Configuration
    KWOK_KUBE_CONFIG: /tmp/kubeconfig
    KUBECONFIG: /tmp/kubeconfig
  image: docker.io/ubuntu:24.04
  tags:
    - aws_run_sysbox
  parallel:
    matrix:
      - K8S_VERSION: ["v1.32.12", "v1.33.8", "v1.34.3", "v1.35.1"]
  before_script:
    # 1. Isolate from Host Cluster (Critical for Sysbox/Runner environments)
    - unset KUBERNETES_SERVICE_HOST
    - unset KUBERNETES_SERVICE_PORT
    - echo ${K8S_VERSION}
    # 2. Install System Dependencies
    - apt-get update && apt-get install -y curl ca-certificates git socat jq
    # 3. Install KWOK (Kubernetes WithOut Kubelet)
    - curl -L -o /usr/local/bin/kwokctl "https://github.com/kubernetes-sigs/kwok/releases/download/${KWOK_VERSION}/kwokctl-linux-amd64"
    - curl -L -o /usr/local/bin/kwok "https://github.com/kubernetes-sigs/kwok/releases/download/${KWOK_VERSION}/kwok-linux-amd64"
    - chmod +x /usr/local/bin/kwokctl /usr/local/bin/kwok
    # 4. Install kubectl
    - curl -LO "https://dl.k8s.io/release/${K8S_VERSION}/bin/linux/amd64/kubectl"
    - chmod +x kubectl && mv kubectl /usr/local/bin/
    # 5. Install Kubernetes Control Plane Binaries (API, Controller, Scheduler)
    # KWOK uses these to simulate a full cluster in user-mode
    - curl -L -o /usr/local/bin/kube-apiserver https://dl.k8s.io/release/${K8S_VERSION}/bin/linux/amd64/kube-apiserver
    - curl -L -o /usr/local/bin/kube-controller-manager https://dl.k8s.io/release/${K8S_VERSION}/bin/linux/amd64/kube-controller-manager
    - curl -L -o /usr/local/bin/kube-scheduler https://dl.k8s.io/release/${K8S_VERSION}/bin/linux/amd64/kube-scheduler
    - chmod +x /usr/local/bin/kube-apiserver /usr/local/bin/kube-controller-manager /usr/local/bin/kube-scheduler
    # 6. Install Etcd (Required for Binary Runtime)
    - curl -L https://github.com/etcd-io/etcd/releases/download/${ETCD_VERSION}/etcd-${ETCD_VERSION}-linux-amd64.tar.gz -o etcd.tar.gz
    - tar xzf etcd.tar.gz && mv etcd-${ETCD_VERSION}-linux-amd64/etcd /usr/local/bin/ && rm -rf etcd*
    # 7. Install Helm
    - curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
  script:
    - echo "=== Starting Fake Kubernetes Cluster (KWOK/${K8S_VERSION}) ==="
    # Define Stage Policy: Simulate Pods as Running immediately
    - |
      cat <<EOF > kwok.yaml
      apiVersion: kwok.x-k8s.io/v1alpha1
      kind: Stage
      metadata:
        name: fast-forward
      spec:
        resourceRef:
          apiGroup: v1
          kind: Pod
        selector:
          matchExpressions:
          - key: .metadata.deletionTimestamp
            operator: DoesNotExist
        next:
          statusTemplate: |
            phase: Running
            conditions:
            - type: Ready
              status: "True"
              lastProbeTime: null
              lastTransitionTime: {{ .metadata.creationTimestamp }}
      EOF
    # Start Cluster in Binary Mode (No Docker required!)
    - kwokctl create cluster --runtime binary --kubeconfig $KWOK_KUBE_CONFIG --config kwok.yaml
    - echo "=== Configuring Cluster ==="
    # Create a Fake Node (Required for Scheduling)
    - |
      cat <<EOF | kubectl apply -f -
      apiVersion: v1
      kind: Node
      metadata:
        name: kwok-node-0
        labels:
          beta.kubernetes.io/arch: amd64
          beta.kubernetes.io/os: linux
          kubernetes.io/arch: amd64
          kubernetes.io/hostname: kwok-node-0
          kubernetes.io/os: linux
          kubernetes.io/role: agent
          node-role.kubernetes.io/agent: ""
          type: kwok
        annotations:
          node.alpha.kubernetes.io/ttl: "0"
          kwok.x-k8s.io/node: fake
      status:
        allocatable:
          cpu: 32
          memory: 256Gi
          pods: 110
        capacity:
          cpu: 32
          memory: 256Gi
          pods: 110
        conditions:
        - lastHeartbeatTime: "2023-01-01T00:00:00Z"
          lastTransitionTime: "2023-01-01T00:00:00Z"
          message: kubelet is posting ready status
          reason: KubeletReady
          status: "True"
          type: Ready
      EOF
    # Ensure Stage CRD is present (Fallback for older KWOK versions)
    - kubectl apply -f "https://github.com/kubernetes-sigs/kwok/raw/main/kustomize/crd/bases/kwok.x-k8s.io_stages.yaml" || true
    - kubectl apply -f kwok.yaml || true
    
    - echo "=== Cluster Ready ==="
    - kubectl get nodes
    # EXAMPLE: Installing other controllers (e.g. Ingress, Prometheus)
    # If your chart needs CRDs (e.g. ServiceMonitor), install them here:
    # - kubectl apply -f https://github.com/prometheus-operator/prometheus-operator/releases/download/v0.68.0/bundle.yaml
    # If you need an Ingress Controller fake:
    # - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml
    # (Note: Ingress pods will stay Pending/Running fake, but API resources will be accepted)
    - echo "=== Running E2E Test ==="
    - helm install e2e-test charts/my-app --wait --timeout 60s || { kubectl get pods; kubectl get events; exit 1;  }
    - echo "=== Verification ==="
    - kubectl get all -A
    - kubectl describe pod -l app=my-app

deploy-vanilla:
  stage: deploy
  image: docker.io/ubuntu:24.04
  tags:
    - aws_run_sysbox
  parallel:
    matrix:
      - K8S_VERSION: ["v1.32.12", "v1.33.8", "v1.34.3", "v1.35.1"]
  variables:
    ETCD_VERSION: "v3.6.8"
    KUBECONFIG: /tmp/kubeconfig
  before_script:
    - unset KUBERNETES_SERVICE_HOST
    - unset KUBERNETES_SERVICE_PORT
    - apt-get update && apt-get install -y curl ca-certificates git socat jq
    # Install kubectl & k8s binaries
    - curl -LO "https://dl.k8s.io/release/${K8S_VERSION}/bin/linux/amd64/kubectl"
    - chmod +x kubectl && mv kubectl /usr/local/bin/
    - curl -L -o /usr/local/bin/kube-apiserver https://dl.k8s.io/release/${K8S_VERSION}/bin/linux/amd64/kube-apiserver
    - curl -L -o /usr/local/bin/kube-controller-manager https://dl.k8s.io/release/${K8S_VERSION}/bin/linux/amd64/kube-controller-manager
    - curl -L -o /usr/local/bin/kube-scheduler https://dl.k8s.io/release/${K8S_VERSION}/bin/linux/amd64/kube-scheduler
    - chmod +x /usr/local/bin/kube-apiserver /usr/local/bin/kube-controller-manager /usr/local/bin/kube-scheduler
    # Install etcd
    - curl -L https://github.com/etcd-io/etcd/releases/download/${ETCD_VERSION}/etcd-${ETCD_VERSION}-linux-amd64.tar.gz -o etcd.tar.gz
    - tar xzf etcd.tar.gz --no-same-owner
    - mv etcd-${ETCD_VERSION}-linux-amd64/etcd /usr/local/bin/
    - rm -rf etcd*
    # Install Helm
    - curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
  script:
    - echo "=== Starting Vanilla Kubernetes Control Plane (${K8S_VERSION}) ==="
    - ls -la /usr/local/bin/
    # 1. Start Etcd
    - mkdir -p /tmp/etcd-data
    - nohup /usr/local/bin/etcd --data-dir /tmp/etcd-data --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://127.0.0.1:2379 > /tmp/etcd.log 2>&1 &
    - echo "Wait on Etcd..."
    - |
      timeout 30s bash -c 'until curl -s http://127.0.0.1:2379/health; do echo "Warte..."; sleep 1; done' || (cat /tmp/etcd.log && exit 1)
    # 2. Start API Server
    # Generate CA and Client Certs properly
    - mkdir -p /tmp/certs
    # 2.1 CA Authority
    - openssl genrsa -out /tmp/certs/ca.key 2048
    - openssl req -x509 -new -nodes -key /tmp/certs/ca.key -subj "/CN=kubernetes-ca" -days 365 -out /tmp/certs/ca.crt
    # 2.2 Service Account Key (for signing tokens)
    - openssl genrsa -out /tmp/certs/service-account.key 2048
    - openssl req -new -key /tmp/certs/service-account.key -out /tmp/certs/service-account.csr -subj "/CN=service-account"
    - openssl x509 -req -in /tmp/certs/service-account.csr -CA /tmp/certs/ca.crt -CAkey /tmp/certs/ca.key -CAcreateserial -out /tmp/certs/service-account.crt -days 365
    # 2.3 Admin User (CN=admin, O=system:masters)
    - openssl genrsa -out /tmp/certs/admin.key 2048
    - openssl req -new -key /tmp/certs/admin.key -out /tmp/certs/admin.csr -subj "/CN=admin/O=system:masters"
    - openssl x509 -req -in /tmp/certs/admin.csr -CA /tmp/certs/ca.crt -CAkey /tmp/certs/ca.key -CAcreateserial -out /tmp/certs/admin.crt -days 365
    # 2.4 API Server Start (One-Liner for robustness)
    - nohup /usr/local/bin/kube-apiserver --etcd-servers=http://127.0.0.1:2379 --service-cluster-ip-range=10.0.0.0/16 --cert-dir=/tmp/certs --secure-port=6443 --bind-address=127.0.0.1 --service-account-key-file=/tmp/certs/service-account.key --service-account-signing-key-file=/tmp/certs/service-account.key --service-account-issuer=https://kubernetes.default.svc.cluster.local --client-ca-file=/tmp/certs/ca.crt --disable-admission-plugins=ServiceAccount,NamespaceLifecycle,LimitRanger,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota --authorization-mode=Node,RBAC > /tmp/apiserver.log 2>&1 &
    - echo "Wait on API Server..."
    - |
      timeout 30s bash -c 'until curl -k -s https://127.0.0.1:6443/version; do echo "Warte..."; sleep 1; done' || (cat /tmp/apiserver.log && exit 1)
    # 2.5 Configure kubectl with Admin Cert
    - |
      /usr/local/bin/kubectl config set-cluster local --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true
      /usr/local/bin/kubectl config set-credentials admin --client-certificate=/tmp/certs/admin.crt --client-key=/tmp/certs/admin.key
      /usr/local/bin/kubectl config set-context local --cluster=local --user=admin
      /usr/local/bin/kubectl config use-context local
    # 3. Start Controller Manager (creates Pods from Deployments)
    - nohup /usr/local/bin/kube-controller-manager --master=https://127.0.0.1:6443 --kubeconfig=/tmp/kubeconfig --service-account-private-key-file=/tmp/certs/service-account.key --root-ca-file=/tmp/certs/service-account.crt > /tmp/controller.log 2>&1 &
    # 4. Start Scheduler (assigns Pods to Nodes)
    - nohup /usr/local/bin/kube-scheduler --master=https://127.0.0.1:6443 --kubeconfig=/tmp/kubeconfig > /tmp/scheduler.log 2>&1 &
    # 5. Start Fake Kubelet (Bash Loop)
    - echo "Starting Fake Kubelet..."
    - |
      (
        # Create Node Object
        cat <<EOF | kubectl apply -f -
        apiVersion: v1
        kind: Node
        metadata:
          name: fake-node-0
          labels:
            kubernetes.io/hostname: fake-node-0
            kubernetes.io/role: agent
        status:
          allocatable:
            cpu: "32"
            memory: "256Gi"
            pods: "110"
          capacity:
            cpu: "32"
            memory: "256Gi"
            pods: "110"
          conditions:
          - type: Ready
            status: "True"
            lastHeartbeatTime: "$(date -u +%FT%TZ)"
            lastTransitionTime: "$(date -u +%FT%TZ)"
            message: "Fake Kubelet is ready"
            reason: "KubeletReady"
      EOF
        while true; do
          # Node Heartbeat (Keep it Ready)
          kubectl patch node fake-node-0 --subresource=status --type=merge -p "{\"status\":{\"conditions\":[{\"type\":\"Ready\",\"status\":\"True\",\"lastHeartbeatTime\":\"$(date -u +%FT%TZ)\"}]}}" >/dev/null 2>&1
          # Pod Lifecycle (Pending -> Running)
          # Find all pods on our node (or pending pods) and set them to Running
          # Note: We use -o json and jq to be robust
          kubectl get pods --all-namespaces --field-selector=status.phase=Pending -o json | jq -r '.items[] | .metadata.name + " " + .metadata.namespace' | while read name namespace; do
             if [ ! -z "$name" ]; then
               echo "Fake Kubelet: Starting pod $name in $namespace..."
               # Patch status to Running and Ready
               kubectl patch pod $name -n $namespace --subresource=status --type=merge -p "{\"status\":{\"phase\":\"Running\",\"conditions\":[{\"type\":\"Ready\",\"status\":\"True\"}],\"containerStatuses\":[{\"name\":\"nginx\",\"ready\":true,\"state\":{\"running\":{\"startedAt\":\"$(date -u +%FT%TZ)\"}}}]}}" >/dev/null 2>&1
             fi
          done
          sleep 2
        done
      ) &
      FAKE_PID=$!
    - echo "=== Cluster Ready ==="
    - kubectl get nodes
    - echo "=== Running E2E Test ==="
    - helm install e2e-test charts/my-app --wait --timeout 60s
    - echo "=== Verification ==="
    - kubectl get all -A
    - kill $FAKE_PID

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment