phase19: cleanup — remove unused ArgoCD apps, convert arch-docs to Deployment

Remove components not needed for PaaS-focused infrastructure:
- argo-rollouts: only used by arch-docs canary, convert to plain Deployment
- oauth2-proxy: was for dev/staging auth (removed in Phase 18)
- nginx-test: test deployment, not needed
- kube-bench: CIS benchmark scanner, not needed for PaaS
- trivy-operator: vulnerability scanner, not needed for PaaS
- drift-check RBAC: drift-check service being removed

arch-docs-prod: rollout.enabled=false → Helm uses Deployment template
This commit is contained in:
Claude 2026-02-24 10:40:13 +01:00
parent cf51494a08
commit 3dc6b0dd68
9 changed files with 1 additions and 378 deletions

View File

@ -1,50 +0,0 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: kube-bench
namespace: kube-system
labels:
app: kube-bench
spec:
schedule: "0 5 * * 0"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 1
jobTemplate:
spec:
template:
metadata:
labels:
app: kube-bench
spec:
hostPID: true
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
containers:
- name: kube-bench
image: aquasec/kube-bench:v0.10.0
command: ["kube-bench", "run", "--json"]
volumeMounts:
- name: var-lib-etcd
mountPath: /var/lib/etcd
readOnly: true
- name: etc-kubernetes
mountPath: /etc/kubernetes
readOnly: true
- name: usr-bin
mountPath: /usr/local/mount-from-host/bin
readOnly: true
restartPolicy: Never
volumes:
- name: var-lib-etcd
hostPath:
path: /var/lib/etcd
- name: etc-kubernetes
hostPath:
path: /etc/kubernetes
- name: usr-bin
hostPath:
path: /usr/bin
backoffLimit: 0

View File

@ -1,63 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-test
namespace: default
spec:
replicas: 3
selector:
matchLabels:
app: nginx-test
template:
metadata:
labels:
app: nginx-test
spec:
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
containers:
- name: nginx
image: nginxinc/nginx-unprivileged:1.27
ports:
- containerPort: 8080
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 200m
memory: 128Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: [ALL]
readOnlyRootFilesystem: true
volumeMounts:
- name: tmp
mountPath: /tmp
- name: cache
mountPath: /var/cache/nginx
- name: run
mountPath: /var/run
volumes:
- name: tmp
emptyDir: {}
- name: cache
emptyDir: {}
- name: run
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: nginx-test
namespace: default
spec:
selector:
app: nginx-test
ports:
- port: 80
targetPort: 8080
type: ClusterIP

View File

@ -1,67 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: drift-check
namespace: kube-system
---
apiVersion: v1
kind: Secret
metadata:
name: drift-check-token
namespace: kube-system
annotations:
kubernetes.io/service-account.name: drift-check
type: kubernetes.io/service-account-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: drift-check
rules:
# kubectl get pods -n {dev,staging,prod} -l ... -o jsonpath
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list"]
---
# Namespace-scoped bindings — only dev, staging, prod
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: drift-check
namespace: dev
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: drift-check
subjects:
- kind: ServiceAccount
name: drift-check
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: drift-check
namespace: staging
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: drift-check
subjects:
- kind: ServiceAccount
name: drift-check
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: drift-check
namespace: prod
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: drift-check
subjects:
- kind: ServiceAccount
name: drift-check
namespace: kube-system

View File

@ -1,44 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: argo-rollouts
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
chart: argo-rollouts
repoURL: https://argoproj.github.io/argo-helm
targetRevision: "2.39.1"
helm:
values: |
controller:
replicas: 1
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 200m
memory: 128Mi
dashboard:
enabled: false
destination:
server: https://kubernetes.default.svc
namespace: argo-rollouts
ignoreDifferences:
- group: apiextensions.k8s.io
kind: CustomResourceDefinition
jsonPointers:
- /spec
- /metadata/annotations
- /metadata/labels
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true
- RespectIgnoreDifferences=true

View File

@ -1,22 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kube-bench
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: http://10.10.10.1:3000/claude/k8s-apps.git
targetRevision: main
path: apps/kube-bench
destination:
server: https://kubernetes.default.svc
namespace: kube-system
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=false

View File

@ -1,17 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: nginx-test
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: http://10.10.10.1:3000/claude/k8s-apps.git
path: apps/nginx-test
destination:
server: https://kubernetes.default.svc
namespace: default
syncPolicy:
automated: {}

View File

@ -1,53 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: oauth2-proxy
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
chart: oauth2-proxy
repoURL: https://oauth2-proxy.github.io/manifests
targetRevision: "7.12.0"
helm:
values: |
replicaCount: 1
config:
clientID: "oauth2-proxy"
existingSecret: oauth2-proxy-secrets
configFile: |-
provider = "keycloak-oidc"
provider_display_name = "Keycloak"
oidc_issuer_url = "https://keycloak.georgepet.duckdns.org/realms/infrastructure"
skip_oidc_discovery = true
login_url = "https://keycloak.georgepet.duckdns.org/realms/infrastructure/protocol/openid-connect/auth"
redeem_url = "https://keycloak.georgepet.duckdns.org/realms/infrastructure/protocol/openid-connect/token"
oidc_jwks_url = "https://keycloak.georgepet.duckdns.org/realms/infrastructure/protocol/openid-connect/certs"
profile_url = "https://keycloak.georgepet.duckdns.org/realms/infrastructure/protocol/openid-connect/userinfo"
email_domains = ["*"]
cookie_secure = true
cookie_domains = [".georgepet.duckdns.org"]
whitelist_domains = [".georgepet.duckdns.org"]
set_xauthrequest = true
set_authorization_header = true
pass_access_token = true
skip_provider_button = true
upstreams = ["static://202"]
cookie_samesite = "lax"
allowed_groups = ["/infra-admins", "infra-admins", "/infra-operators", "infra-operators"]
resources:
requests:
cpu: 25m
memory: 32Mi
limits:
cpu: 100m
memory: 128Mi
destination:
server: https://kubernetes.default.svc
namespace: keycloak
syncPolicy:
automated:
prune: true
selfHeal: true

View File

@ -1,54 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: trivy-operator
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
chart: trivy-operator
repoURL: https://aquasecurity.github.io/helm-charts/
targetRevision: "0.32.0"
helm:
values: |
trivy:
ignoreUnfixed: true
severity: CRITICAL,HIGH
operator:
scanJobsConcurrentLimit: 1
vulnerabilityScannerScanOnlyCurrentRevisions: true
configAuditScannerEnabled: true
rbacAssessmentEnabled: false
infraAssessmentEnabled: false
clusterComplianceEnabled: false
resources:
requests:
cpu: 50m
memory: 128Mi
limits:
cpu: 200m
memory: 512Mi
nodeCollector:
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
scanJob:
resources:
requests:
cpu: 50m
memory: 128Mi
limits:
cpu: 200m
memory: 512Mi
destination:
server: https://kubernetes.default.svc
namespace: trivy-system
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@ -33,14 +33,7 @@ ingress:
clusterIssuer: letsencrypt-prod
rollout:
enabled: true
steps:
- setWeight: 20
- pause: { duration: 60s }
- setWeight: 50
- pause: { duration: 60s }
analysis:
enabled: true
enabled: false
featureFlags:
enabled: true