mirror of
https://github.com/ghndrx/k8s-game-2048.git
synced 2026-02-10 06:45:07 +00:00
feat: Implement webhook-based deployment for k3s behind NAT
- Replace SSH/kubectl deployment with secure webhook-based approach - Add comprehensive webhook handler with HMAC signature verification - Support blue-green deployment strategy for production - Implement auto-promotion pipeline: dev → staging → prod - Add health checks using canonical Knative domains only - Include complete deployment documentation and setup scripts Changes: - Updated deploy-dev.yml, deploy-staging.yml, deploy-prod.yml workflows - Added webhook handler Python script with Flask API - Created Kubernetes manifests for webhook system deployment - Added ingress and service configuration for external access - Created setup script for automated webhook system installation - Documented complete webhook-based deployment guide Perfect for k3s clusters behind NAT without direct API access.
This commit is contained in:
170
manifests/webhook/webhook-handler.yaml
Normal file
170
manifests/webhook/webhook-handler.yaml
Normal file
@@ -0,0 +1,170 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: webhook-system
|
||||
labels:
|
||||
name: webhook-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: webhook-secret
|
||||
namespace: webhook-system
|
||||
type: Opaque
|
||||
stringData:
|
||||
webhook-secret: "CHANGE_ME_IN_PRODUCTION" # Replace with your actual webhook secret
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: webhook-handler-config
|
||||
namespace: webhook-system
|
||||
data:
|
||||
MANIFESTS_PATH: "/app/manifests"
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: webhook-handler
|
||||
namespace: webhook-system
|
||||
labels:
|
||||
app: webhook-handler
|
||||
spec:
|
||||
replicas: 2 # For high availability
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webhook-handler
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webhook-handler
|
||||
spec:
|
||||
serviceAccountName: webhook-handler
|
||||
containers:
|
||||
- name: webhook-handler
|
||||
image: python:3.11-slim
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: http
|
||||
env:
|
||||
- name: WEBHOOK_SECRET
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: webhook-secret
|
||||
key: webhook-secret
|
||||
- name: MANIFESTS_PATH
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: webhook-handler-config
|
||||
key: MANIFESTS_PATH
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
apt-get update && apt-get install -y curl
|
||||
curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl"
|
||||
chmod +x kubectl && mv kubectl /usr/local/bin/
|
||||
curl -fsSL https://get.docker.com | sh
|
||||
pip install flask
|
||||
python /app/webhook-handler.py
|
||||
volumeMounts:
|
||||
- name: webhook-handler-script
|
||||
mountPath: /app/webhook-handler.py
|
||||
subPath: webhook-handler.py
|
||||
- name: manifests
|
||||
mountPath: /app/manifests
|
||||
- name: docker-socket
|
||||
mountPath: /var/run/docker.sock
|
||||
- name: kubeconfig
|
||||
mountPath: /root/.kube/config
|
||||
subPath: config
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
resources:
|
||||
requests:
|
||||
memory: "256Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
volumes:
|
||||
- name: webhook-handler-script
|
||||
configMap:
|
||||
name: webhook-handler-script
|
||||
defaultMode: 0755
|
||||
- name: manifests
|
||||
hostPath:
|
||||
path: /home/administrator/k8s-game-2048/manifests # Update this path
|
||||
type: Directory
|
||||
- name: docker-socket
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
type: Socket
|
||||
- name: kubeconfig
|
||||
hostPath:
|
||||
path: /etc/rancher/k3s/k3s.yaml # Default k3s kubeconfig location
|
||||
type: File
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: webhook-handler
|
||||
namespace: webhook-system
|
||||
labels:
|
||||
app: webhook-handler
|
||||
spec:
|
||||
selector:
|
||||
app: webhook-handler
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: webhook-handler
|
||||
namespace: webhook-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: webhook-handler
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces", "secrets", "configmaps", "services"]
|
||||
verbs: ["get", "list", "create", "update", "patch", "delete"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["deployments", "replicasets"]
|
||||
verbs: ["get", "list", "create", "update", "patch", "delete"]
|
||||
- apiGroups: ["serving.knative.dev"]
|
||||
resources: ["services", "revisions"]
|
||||
verbs: ["get", "list", "create", "update", "patch", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events", "pods"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: webhook-handler
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: webhook-handler
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: webhook-handler
|
||||
namespace: webhook-system
|
||||
42
manifests/webhook/webhook-ingress.yaml
Normal file
42
manifests/webhook/webhook-ingress.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: webhook-handler-external
|
||||
namespace: webhook-system
|
||||
labels:
|
||||
app: webhook-handler
|
||||
spec:
|
||||
selector:
|
||||
app: webhook-handler
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
type: LoadBalancer # Change to NodePort if LoadBalancer is not available
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: webhook-handler-ingress
|
||||
namespace: webhook-system
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "true"
|
||||
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
|
||||
cert-manager.io/cluster-issuer: "letsencrypt-prod" # Adjust to your cert issuer
|
||||
spec:
|
||||
tls:
|
||||
- hosts:
|
||||
- webhook.yourdomain.com # Replace with your actual domain
|
||||
secretName: webhook-tls
|
||||
rules:
|
||||
- host: webhook.yourdomain.com # Replace with your actual domain
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: webhook-handler
|
||||
port:
|
||||
number: 80
|
||||
288
manifests/webhook/webhook-script-configmap.yaml
Normal file
288
manifests/webhook/webhook-script-configmap.yaml
Normal file
@@ -0,0 +1,288 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: webhook-handler-script
|
||||
namespace: webhook-system
|
||||
data:
|
||||
webhook-handler.py: |
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Webhook deployment handler for k8s-game-2048
|
||||
Receives webhook requests from GitHub Actions and deploys to k3s cluster
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
import hmac
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
from datetime import datetime
|
||||
from flask import Flask, request, jsonify
|
||||
|
||||
# Configuration
|
||||
app = Flask(__name__)
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
WEBHOOK_SECRET = os.environ.get('WEBHOOK_SECRET', 'change-me-in-production')
|
||||
MANIFESTS_PATH = os.environ.get('MANIFESTS_PATH', '/app/manifests')
|
||||
|
||||
def verify_signature(payload, signature):
|
||||
"""Verify HMAC signature from GitHub webhook"""
|
||||
if not signature:
|
||||
return False
|
||||
|
||||
expected = hmac.new(
|
||||
WEBHOOK_SECRET.encode('utf-8'),
|
||||
payload,
|
||||
hashlib.sha256
|
||||
).hexdigest()
|
||||
|
||||
return hmac.compare_digest(f"sha256={expected}", signature)
|
||||
|
||||
def run_command(cmd, **kwargs):
|
||||
"""Run shell command with logging"""
|
||||
logger.info(f"Running command: {' '.join(cmd)}")
|
||||
try:
|
||||
result = subprocess.run(cmd, check=True, capture_output=True, text=True, **kwargs)
|
||||
logger.info(f"Command output: {result.stdout}")
|
||||
return result
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error(f"Command failed: {e.stderr}")
|
||||
raise
|
||||
|
||||
def pull_image(image):
|
||||
"""Pull Docker image to ensure it's available"""
|
||||
logger.info(f"Pulling image: {image}")
|
||||
run_command(['docker', 'pull', image])
|
||||
|
||||
def apply_manifests(environment):
|
||||
"""Apply Kubernetes manifests for environment"""
|
||||
manifest_dir = f"{MANIFESTS_PATH}/{environment}"
|
||||
logger.info(f"Applying manifests from: {manifest_dir}")
|
||||
|
||||
if not os.path.exists(manifest_dir):
|
||||
raise FileNotFoundError(f"Manifest directory not found: {manifest_dir}")
|
||||
|
||||
run_command(['kubectl', 'apply', '-f', manifest_dir])
|
||||
|
||||
def update_service_image(service_name, namespace, image):
|
||||
"""Update Knative service with new image"""
|
||||
logger.info(f"Updating service {service_name} in namespace {namespace} with image {image}")
|
||||
|
||||
patch = {
|
||||
"spec": {
|
||||
"template": {
|
||||
"spec": {
|
||||
"containers": [{
|
||||
"image": image,
|
||||
"imagePullPolicy": "Always"
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
run_command([
|
||||
'kubectl', 'patch', 'ksvc', service_name,
|
||||
'-n', namespace,
|
||||
'--type', 'merge',
|
||||
'-p', json.dumps(patch)
|
||||
])
|
||||
|
||||
def wait_for_service_ready(service_name, namespace, timeout=300):
|
||||
"""Wait for Knative service to be ready"""
|
||||
logger.info(f"Waiting for service {service_name} to be ready...")
|
||||
|
||||
run_command([
|
||||
'kubectl', 'wait', '--for=condition=Ready',
|
||||
f'ksvc/{service_name}',
|
||||
'-n', namespace,
|
||||
f'--timeout={timeout}s'
|
||||
])
|
||||
|
||||
def implement_blue_green_deployment(service_name, namespace, traffic_split):
|
||||
"""Implement blue-green deployment with gradual traffic shifting"""
|
||||
if not traffic_split:
|
||||
return
|
||||
|
||||
logger.info("Starting blue-green deployment...")
|
||||
|
||||
# Get the latest revision
|
||||
result = run_command([
|
||||
'kubectl', 'get', 'ksvc', service_name,
|
||||
'-n', namespace,
|
||||
'-o', 'jsonpath={.status.latestReadyRevisionName}'
|
||||
])
|
||||
latest_revision = result.stdout.strip()
|
||||
|
||||
if not latest_revision:
|
||||
logger.warning("No latest revision found, skipping traffic split")
|
||||
return
|
||||
|
||||
# Phase 1: Initial traffic (e.g., 10%)
|
||||
initial_percent = traffic_split.get('initial', 10)
|
||||
logger.info(f"Phase 1: Routing {initial_percent}% traffic to new revision")
|
||||
traffic_patch = {
|
||||
"spec": {
|
||||
"traffic": [
|
||||
{"revisionName": latest_revision, "percent": initial_percent},
|
||||
{"latestRevision": False, "percent": 100 - initial_percent}
|
||||
]
|
||||
}
|
||||
}
|
||||
run_command([
|
||||
'kubectl', 'patch', 'ksvc', service_name,
|
||||
'-n', namespace,
|
||||
'--type', 'merge',
|
||||
'-p', json.dumps(traffic_patch)
|
||||
])
|
||||
time.sleep(60) # Wait 1 minute
|
||||
|
||||
# Phase 2: Intermediate traffic (e.g., 50%)
|
||||
intermediate_percent = traffic_split.get('intermediate', 50)
|
||||
logger.info(f"Phase 2: Routing {intermediate_percent}% traffic to new revision")
|
||||
traffic_patch["spec"]["traffic"] = [
|
||||
{"revisionName": latest_revision, "percent": intermediate_percent},
|
||||
{"latestRevision": False, "percent": 100 - intermediate_percent}
|
||||
]
|
||||
run_command([
|
||||
'kubectl', 'patch', 'ksvc', service_name,
|
||||
'-n', namespace,
|
||||
'--type', 'merge',
|
||||
'-p', json.dumps(traffic_patch)
|
||||
])
|
||||
time.sleep(60) # Wait 1 minute
|
||||
|
||||
# Phase 3: Full traffic (100%)
|
||||
logger.info("Phase 3: Routing 100% traffic to new revision")
|
||||
traffic_patch["spec"]["traffic"] = [
|
||||
{"latestRevision": True, "percent": 100}
|
||||
]
|
||||
run_command([
|
||||
'kubectl', 'patch', 'ksvc', service_name,
|
||||
'-n', namespace,
|
||||
'--type', 'merge',
|
||||
'-p', json.dumps(traffic_patch)
|
||||
])
|
||||
|
||||
@app.route('/webhook/deploy', methods=['POST'])
|
||||
def deploy():
|
||||
"""Main webhook endpoint for deployments"""
|
||||
try:
|
||||
# Verify signature
|
||||
signature = request.headers.get('X-Signature-SHA256')
|
||||
if not verify_signature(request.data, signature):
|
||||
logger.warning("Invalid webhook signature")
|
||||
return jsonify({"error": "Invalid signature"}), 401
|
||||
|
||||
# Parse payload
|
||||
data = request.json
|
||||
if not data:
|
||||
return jsonify({"error": "No JSON payload"}), 400
|
||||
|
||||
# Extract deployment details
|
||||
environment = data.get('environment')
|
||||
image = data.get('image')
|
||||
namespace = data.get('namespace')
|
||||
service_name = data.get('service_name')
|
||||
deployment_id = data.get('deployment_id')
|
||||
deployment_strategy = data.get('deployment_strategy', 'rolling')
|
||||
traffic_split = data.get('traffic_split')
|
||||
|
||||
# Validate required fields
|
||||
required_fields = ['environment', 'image', 'namespace', 'service_name']
|
||||
missing_fields = [field for field in required_fields if not data.get(field)]
|
||||
if missing_fields:
|
||||
return jsonify({"error": f"Missing required fields: {missing_fields}"}), 400
|
||||
|
||||
logger.info(f"Starting deployment {deployment_id}")
|
||||
logger.info(f"Environment: {environment}")
|
||||
logger.info(f"Image: {image}")
|
||||
logger.info(f"Namespace: {namespace}")
|
||||
logger.info(f"Service: {service_name}")
|
||||
logger.info(f"Strategy: {deployment_strategy}")
|
||||
|
||||
# Step 1: Pull the Docker image
|
||||
pull_image(image)
|
||||
|
||||
# Step 2: Apply manifests
|
||||
apply_manifests(environment)
|
||||
|
||||
# Step 3: Update service image
|
||||
update_service_image(service_name, namespace, image)
|
||||
|
||||
# Step 4: Wait for service to be ready
|
||||
wait_for_service_ready(service_name, namespace)
|
||||
|
||||
# Step 5: Apply deployment strategy
|
||||
if deployment_strategy == 'blue-green' and traffic_split:
|
||||
implement_blue_green_deployment(service_name, namespace, traffic_split)
|
||||
|
||||
logger.info(f"Deployment {deployment_id} completed successfully")
|
||||
|
||||
return jsonify({
|
||||
"status": "success",
|
||||
"deployment_id": deployment_id,
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"environment": environment,
|
||||
"image": image,
|
||||
"strategy": deployment_strategy
|
||||
})
|
||||
|
||||
except FileNotFoundError as e:
|
||||
logger.error(f"File not found: {e}")
|
||||
return jsonify({"error": str(e)}), 404
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error(f"Command failed: {e}")
|
||||
return jsonify({"error": f"Command failed: {e.stderr}"}), 500
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error: {e}")
|
||||
return jsonify({"error": str(e)}), 500
|
||||
|
||||
@app.route('/health', methods=['GET'])
|
||||
def health():
|
||||
"""Health check endpoint"""
|
||||
return jsonify({
|
||||
"status": "healthy",
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"version": "1.0.0"
|
||||
})
|
||||
|
||||
@app.route('/status', methods=['GET'])
|
||||
def status():
|
||||
"""Status endpoint with cluster information"""
|
||||
try:
|
||||
# Get cluster info
|
||||
result = run_command(['kubectl', 'cluster-info'])
|
||||
cluster_info = result.stdout
|
||||
|
||||
# Get webhook handler pod info
|
||||
result = run_command(['kubectl', 'get', 'pods', '-n', 'webhook-system', '--selector=app=webhook-handler'])
|
||||
pod_info = result.stdout
|
||||
|
||||
return jsonify({
|
||||
"status": "operational",
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"cluster_info": cluster_info,
|
||||
"pod_info": pod_info
|
||||
})
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
"status": "error",
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"error": str(e)
|
||||
})
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Verify environment
|
||||
logger.info("Starting webhook deployment handler...")
|
||||
logger.info(f"Webhook secret configured: {'Yes' if WEBHOOK_SECRET != 'change-me-in-production' else 'No (using default)'}")
|
||||
logger.info(f"Manifests path: {MANIFESTS_PATH}")
|
||||
|
||||
# Start the Flask app
|
||||
app.run(host='0.0.0.0', port=8080, debug=False)
|
||||
Reference in New Issue
Block a user