Merge pull request 'Phase 6: K8s PoC — create k8s-master + k8s-worker-01 VMs' (#64) from phase6-k8s-vms into main

This commit is contained in:
admin 2026-02-14 08:22:56 +01:00
commit e415a84bff
5 changed files with 335 additions and 0 deletions

View File

@ -0,0 +1,84 @@
# Kubernetes PoC cluster Phase 6
# 2 VMs on NAT bridge vmbr0 (10.10.10.200-201)
# kubeadm + containerd + Calico
#
# Access: DNAT 6443 from bare_srv_1 public IP k8s-master
# Firewall: Proxmox FW on node level restricts 6443 to control plane IP
#
# Cloud image dependency: proxmox_virtual_environment_download_file.ubuntu_2404_cloud (in main.tf)
locals {
k8s_nodes = {
"k8s-master" = {
vm_id = 300
ip_address = "10.10.10.200"
}
"k8s-worker-01" = {
vm_id = 301
ip_address = "10.10.10.201"
}
}
}
module "k8s_node" {
source = "../../modules/k8s-node"
for_each = local.k8s_nodes
name = each.key
vm_id = each.value.vm_id
ip_address = each.value.ip_address
depends_on = [proxmox_virtual_environment_download_file.ubuntu_2404_cloud]
}
# Proxmox node-level FW allow K8s API + ArgoCD from control plane
resource "proxmox_virtual_environment_firewall_rules" "k8s_api_access" {
node_name = "georgeops"
rule {
type = "in"
action = "ACCEPT"
proto = "tcp"
dport = "6443"
source = "78.109.17.180"
comment = "K8s API from control plane (DNAT to k8s-master)"
}
rule {
type = "in"
action = "ACCEPT"
proto = "tcp"
dport = "30443"
source = "78.109.17.180"
comment = "ArgoCD UI from control plane (DNAT to k8s-master)"
}
rule {
type = "in"
action = "ACCEPT"
proto = "tcp"
dport = "9200"
source = "78.109.17.180"
comment = "k8s-master node_exporter (DNAT)"
}
rule {
type = "in"
action = "ACCEPT"
proto = "tcp"
dport = "9201"
source = "78.109.17.180"
comment = "k8s-worker-01 node_exporter (DNAT)"
}
}
output "k8s_nodes" {
description = "K8s cluster nodes"
value = {
for name, node in module.k8s_node : name => {
vm_id = node.vm_id
ip_address = node.ip_address
}
}
}

View File

@ -0,0 +1,100 @@
#cloud-config
# K8s node cloud-init — installs containerd + kubeadm + node_exporter
# kubeadm init/join is NOT run here — done manually after boot
hostname: ${hostname}
manage_etc_hosts: true
disable_root: false
users:
- name: root
ssh_authorized_keys:
- ${ssh_key}
shell: /bin/bash
package_update: true
packages:
- apt-transport-https
- ca-certificates
- curl
- gnupg
write_files:
# Kernel modules for K8s networking
- path: /etc/modules-load.d/k8s.conf
content: |
overlay
br_netfilter
# Sysctl for K8s networking
- path: /etc/sysctl.d/99-kubernetes.conf
content: |
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
# containerd config — systemd cgroup driver (required for kubeadm)
- path: /etc/containerd/config.toml
content: |
version = 2
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
# node_exporter systemd unit
- path: /etc/systemd/system/node_exporter.service
content: |
[Unit]
Description=Prometheus Node Exporter
After=network.target
[Service]
User=node_exporter
ExecStart=/usr/local/bin/node_exporter
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
runcmd:
# ── Kernel modules ──
- modprobe overlay
- modprobe br_netfilter
- sysctl --system
# ── Disable swap (required for K8s) ──
- swapoff -a
- sed -i '/swap/d' /etc/fstab
# ── Install containerd from Docker repo ──
- install -m 0755 -d /etc/apt/keyrings
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
- chmod a+r /etc/apt/keyrings/docker.asc
- echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu $(. /etc/os-release && echo $VERSION_CODENAME) stable" > /etc/apt/sources.list.d/docker.list
- apt-get update
- apt-get install -y containerd.io
- mkdir -p /etc/containerd
- systemctl restart containerd
- systemctl enable containerd
# ── Install kubeadm, kubelet, kubectl (v1.31) ──
- curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.31/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
- echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.31/deb/ /" > /etc/apt/sources.list.d/kubernetes.list
- apt-get update
- apt-get install -y kubelet kubeadm kubectl
- apt-mark hold kubelet kubeadm kubectl
# ── Install node_exporter for monitoring ──
- useradd --no-create-home --shell /bin/false node_exporter
- curl -fsSL https://github.com/prometheus/node_exporter/releases/download/v1.10.2/node_exporter-1.10.2.linux-amd64.tar.gz -o /tmp/node_exporter.tar.gz
- tar xzf /tmp/node_exporter.tar.gz -C /tmp
- cp /tmp/node_exporter-1.10.2.linux-amd64/node_exporter /usr/local/bin/
- chown node_exporter:node_exporter /usr/local/bin/node_exporter
- rm -rf /tmp/node_exporter*
- systemctl daemon-reload
- systemctl enable --now node_exporter
# ── Signal cloud-init completion ──
- touch /var/lib/cloud/instance/k8s-ready

89
modules/k8s-node/main.tf Normal file
View File

@ -0,0 +1,89 @@
# K8s node module creates a Kubernetes node VM on vmbr0 (NAT)
#
# Resources created:
# 1. Cloud-init snippet (containerd, kubeadm, kubelet, node_exporter)
# 2. VM on NAT bridge vmbr0 (firewall=false NAT breaks with per-NIC FW)
#
# No per-VM Proxmox firewall NAT provides isolation.
# Host-level INPUT DROP on vmbr0 prevents VMhost access.
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "~> 0.90"
}
}
}
# Cloud-init snippet
resource "proxmox_virtual_environment_file" "cloud_init" {
content_type = "snippets"
datastore_id = "local"
node_name = var.node_name
source_raw {
data = templatefile("${path.module}/cloud-init.yaml.tftpl", {
hostname = var.name
ssh_key = var.ssh_public_key
})
file_name = "ci-${var.name}.yaml"
}
}
# VM
resource "proxmox_virtual_environment_vm" "k8s_node" {
depends_on = [proxmox_virtual_environment_file.cloud_init]
name = var.name
node_name = var.node_name
vm_id = var.vm_id
tags = ["k8s", "tofu", "ubuntu"]
stop_on_destroy = true
started = true
on_boot = true # K8s nodes auto-start on host reboot
cpu {
cores = var.cpu_cores
type = "x86-64-v2-AES"
}
memory {
dedicated = var.ram_mb
}
disk {
datastore_id = "local"
# Hardcoded path resource reference forces VM replacement (ForceNew)
file_id = "local:iso/ubuntu-24.04-cloudimg-amd64.img"
interface = "virtio0"
size = var.disk_gb
file_format = "qcow2"
discard = "on"
iothread = true
}
network_device {
bridge = "vmbr0"
firewall = false # NAT bridge firewall=true creates fwbr and breaks NAT
}
initialization {
datastore_id = "local"
user_data_file_id = proxmox_virtual_environment_file.cloud_init.id
ip_config {
ipv4 {
address = "${var.ip_address}/24"
gateway = "10.10.10.1"
}
}
dns {
servers = ["8.8.8.8", "1.1.1.1"]
}
}
}

View File

@ -0,0 +1,16 @@
# K8s node module outputs
output "vm_id" {
description = "Proxmox VMID"
value = proxmox_virtual_environment_vm.k8s_node.vm_id
}
output "name" {
description = "VM name"
value = proxmox_virtual_environment_vm.k8s_node.name
}
output "ip_address" {
description = "IP address on NAT bridge"
value = var.ip_address
}

View File

@ -0,0 +1,46 @@
# K8s node module variables
variable "name" {
description = "VM name (e.g., k8s-master, k8s-worker-01)"
type = string
}
variable "vm_id" {
description = "Proxmox VMID (300+ for K8s nodes)"
type = number
}
variable "ip_address" {
description = "IP on vmbr0 NAT bridge (10.10.10.x)"
type = string
}
variable "cpu_cores" {
description = "Number of CPU cores"
type = number
default = 4
}
variable "ram_mb" {
description = "RAM in MB"
type = number
default = 8192
}
variable "disk_gb" {
description = "Disk size in GB"
type = number
default = 60
}
variable "ssh_public_key" {
description = "SSH public key for control plane access"
type = string
default = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDO+Y8ns0RgUfR21POlIVsHD+Lp+x7cUBupqXsyMeVNZ claude@control-plane"
}
variable "node_name" {
description = "Proxmox node name"
type = string
default = "georgeops"
}