몇가지 편리성 및 이슈로 인해서 ingress-nginx에서 traefik로 ingress로 설치한다.

 

traefik로 서비스하는 흐름도

 

helm으로 template 생성을 위한 values 파일 생성

traefik-value-override.yaml

# dashboard 연결시 http 연결에 "--api.insecure" 옵션 필요
additionalArguments:
  - "--api.insecure"

# prometheus metirc 활성화 및 metric 추가 설정
metrics:
  prometheus:
    entryPoint: metrics
    addEntryPointsLabels: true
    addRoutersLabels: true
    addServicesLabels: true
    ## Buckets for latency metrics. Default="0.1,0.3,1.2,5.0"
    #buckets: ""

# autoscaling 활성화를 위한 Resource 설정
resources:
  requests:
    cpu: 100m
    memory: 200Mi
  
# dashboard를 8080 port로 추가 오픈
ports:
  traefik:
    port: 8080
    expose:
      default: true

# autoscaling 활성화
autoscaling:
  enabled: true
  minReplicas: 1
  maxReplicas: 100
  metrics:
    - type: Resource    
      resource:
        name: cpu
        target:
          averageUtilization: 30
          type: Utilization

 

helm으로 template 생성

# helm template traefik traefik \ 
--repo https://traefik.github.io/charts \
--dependency-update --include-crds \
-f traefik-value-override.yaml > traefik-deploy.yaml

or

# helm template traefik traefik --dependency-update --include-crds \
  --repo https://traefik.github.io/charts \
  --set-json 'additionalArguments=["--api.insecure"]' \
  --set metrics.prometheus.addEntryPointsLabels="true" \
  --set metrics.prometheus.addRoutersLabels="true" \
  --set metrics.prometheus.addServicesLabels="true" \
  --set resources.requests.cpu="100m" \
  --set resources.requests.memory="200Mi" \
  --set ports.traefik.port=8080 \
  --set ports.traefik.expose.default="true" \
  --set autoscaling.enabled="true" \
  --set autoscaling.minReplicas=1 \
  --set autoscaling.maxReplicas=100 \
  --set-json 'autoscaling.metrics=[{"type": "Resource","resource":{"name":"cpu","target":{"averageUtilization":30,"type":"Utilization"}}}]' > traefik-deploy.yaml

traefik template 배포

# kubectl apply -f traefik-deploy.yaml --server-side

 

traefik 배포 현황 검증

# kubectl get all | grep traefik
pod/traefik-664df7d9f4-b5khl                                 1/1     Running   0          4m35s
service/traefik                                   LoadBalancer   10.233.9.60     192.168.122.14   8080:31301/TCP,80:31502/TCP,443:30543/TCP   4m36s
deployment.apps/traefik                               1/1     1            1           4m35s
replicaset.apps/traefik-664df7d9f4                               1         1         1       4m35s
horizontalpodautoscaler.autoscaling/traefik                  Deployment/traefik                  7%/30%    1         100       1          4m35s

 

위와 같이 traefik이 구축되었고, 192.168.122.14 Loadbalancer가 80, 443, 8080으로 오픈되어 있다.
또한 8080은 dashboard로 연결된다.

# curl -v http://192.168.122.14:8080
*   Trying 192.168.122.14:8080...
* Connected to 192.168.122.14 (192.168.122.14) port 8080 (#0)
> GET / HTTP/1.1
> Host: 192.168.122.14:8080
> User-Agent: curl/7.81.0
> Accept: */*
>
* Mark bundle as not supporting multiuse
< HTTP/1.1 301 Moved Permanently
< Location: http://192.168.122.14:8080/dashboard/
< Date: Wed, 13 Nov 2024 04:10:06 GMT
< Content-Length: 17
<
* Connection #0 to host 192.168.122.14 left intact

 

서비스용 앱 배포

모든 요청에 응답하는 echo server를 앱으로 아래와 같은 파일로 구성해서 배포한다.

echo-server-dpm.yaml - Deployment

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: echo-server-dpm
  labels:
    app: echo-server
spec:
  replicas: 1
  selector:
    matchLabels:
      app: echo-server
  template:
    metadata:
      labels:
        app: echo-server
    spec:
      containers:
      - name: echo-server-pod
        image: ealen/echo-server
        resources:
          requests:
            cpu: 100m
            memory: 200Mi
        ports:
        - name: http
          containerPort: 80
# kubectl apply -f echo-server-dpm.yaml

 

echo-server-svc.yaml - Service

---
apiVersion: v1
kind: Service
metadata:
  name: echo-server-svc
  labels:
    app: echo-server
spec:
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: echo-server
# kubectl apply -f echo-server-svc.yaml

 

echo-server-hpa.yaml - HorizontalPodAutoscaler

---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
  name: echo-server-hpa
spec:
  scaleTargetRef:
    apiVersion: apps/v1
    kind: Deployment
    name: echo-server-dpm
  minReplicas: 2
  maxReplicas: 100
  metrics:
  - type: Resource
    resource:
      name: cpu
      target:
        type: Utilization
        averageUtilization: 30
# kubectl apply -f echo-server-hpa.yaml

 

구성 검증

1. HorizontalPodAutoscaler

# kubectl get hpa
NAME              REFERENCE                    TARGETS   MINPODS   MAXPODS   REPLICAS   AGE
echo-server-hpa   Deployment/echo-server-dpm   0%/30%    2         100       2          15m

위와 같이 배포된 hpa에서 메트릭이 올바로 수집되고 있는지 확인한다.

2. Service

# kubectl get svc
NAME              TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE
echo-server-svc   ClusterIP   10.233.28.255   <none>        80/TCP    16m

위와 같이 svc가 구성되어 있는지 확인한다.

3. 서비스 확인 (echo server pod에 연결해서 localhost/hello, echo-server-svc/hello로 호출해본다.

root@homemachine:~/echo-server-deployment# kubectl get pod
NAME                               READY   STATUS    RESTARTS   AGE
echo-server-dpm-5f6899f47d-9l954   1/1     Running   0          20m
echo-server-dpm-5f6899f47d-tz64q   1/1     Running   0          17m
metrics-server-55cc5bcdb8-qfkhv    1/1     Running   0          21h


root@homemachine:~/echo-server-deployment# kubectl exec -ti echo-server-dpm-5f6899f47d-9l954 -- /bin/sh
/app # wget -O - localhost/hello
Connecting to localhost ([::1]:80)
writing to stdout
{"host":{"hostname":"localhost","ip":"::1","ips":[]},"http":{"method":"GET","baseUrl":"","originalUrl":"/hello","protocol":"http"},"request":{"params":{"0":"/hello"},"query":{},"cookies":{},"body":{},"headers":{"host":"localhost","user-agent":"Wget","connection":"close"}},"environment":{"PATH":"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","HOSTNAME":"echo-server-dpm-5f6899f47d-9l954","NODE_VERSION":"20.11.0","YARN_VERSION":"1.22.19","METRICS_SERVER_SERVICE_HOST":"10.233.20.91","METRICS_SERVER_PORT":"tcp://10.233.20.91:443","KUBERNETES_SERVICE_HOST":"10.233.0.1","KUBERNETES_SERVICE_PORT":"443","KUBERNETES_SERVICE_PORT_HTTPS":"443","KUBERNETES_PORT_443_TCP_PORT":"443","KUBERNETES_PORT_443_TCP_ADDR":"10.233.0.1","METRICS_SERVER_PORT_443_TCP":"tcp://10.233.20.91:443","METRICS_SERVER_PORT_443_TCP_PORT":"443","KUBERNETES_PORT_443_TCP_PROTO":"tcp","METRICS_SERVER_SERVICE_PORT_HTTPS":"443","METRICS_SERVER_PORT_443_TCP_ADDR":"10.233.20.91","KUBERNETES_PORT":"tcp://10.233.0.1:443","KUBERNETES_PORT_443_TCP":"tcp://10.233.0.1:443","METRICS_SERVER_SERVICE_PORT":"443","METRICS_SERVER_PORT_443_TCP_PROTO":"-                    100% |*****************************************************************************|  1146  0:00:00 ETA
written to stdout

/app # wget -O - echo-server-svc/hello
Connecting to echo-server-svc (10.233.28.255:80)
writing to stdout
{"host":{"hostname":"echo-server-svc","ip":"::ffff:10.233.74.70","ips":[]},"http":{"method":"GET","baseUrl":"","originalUrl":"/hello","protocol":"http"},"request":{"params":{"0":"/hello"},"query":{},"cookies":{},"body":{},"headers":{"host":"echo-server-svc","user-agent":"Wget","connection":"close"}},"environment":{"PATH":"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","HOSTNAME":"echo-server-dpm-5f6899f47d-tz64q","NODE_VERSION":"20.11.0","YARN_VERSION":"1.22.19","ECHO_SERVER_SVC_PORT":"tcp://10.233.28.255:80","ECHO_SERVER_SVC_PORT_80_TCP_PROTO":"tcp","METRICS_SERVER_SERVICE_HOST":"10.233.20.91","METRICS_SERVER_PORT_443_TCP_PORT":"443","METRICS_SERVER_PORT_443_TCP_ADDR":"10.233.20.91","KUBERNETES_SERVICE_HOST":"10.233.0.1","ECHO_SERVER_SVC_SERVICE_PORT_HTTP":"80","METRICS_SERVER_SERVICE_PORT":"443","METRICS_SERVER_SERVICE_PORT_HTTPS":"443","KUBERNETES_PORT":"tcp://10.233.0.1:443","ECHO_SERVER_SVC_PORT_80_TCP_PORT":"80","ECHO_SERVER_SVC_PORT_80_TCP_ADDR":"10.233.28.255","ECHO_SERVER_SVC_SERVICE_PORT":"80","KUBERNETES_SERVICE_PORT_HTTPS":"443","KUBERNETES_PORT_443_TCP":"tcp://10.233.0.1:443","KUBERNETES_PORT_443_TCP_PROTO":"tcp","KUBERNETES_PORT_443_TCP_ADDR":"10.233.0.1","ECHO_SERVER_SVC_SERVICE_HOST":"10.233.28.255","ECHO_SERVER_SVC_PORT_80_TCP":"tcp://10.233.28.255:80","METRICS_SERVER_PORT":"tcp://10.233.20.91:443","METRICS_SERVER_PORT_443_TCP":"tcp://10.233.20.91:443","METRICS_SERVER_PORT_443_TCP_PROTO":"tcp","KUBERNETES_SERVICE_PORT":"443","KUBERNETES_PORT_44-                    100% |*****************************************************************************|  1534  0:00:00 ETA
written to stdout

위와 같이 정상적인 호출이 온다면 앱은 정상적으로 배포되었다.

 

서비스용 앱 Ingress 설정 배포

echo-server-ing.yaml

apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: echo-server-ing
spec:
  rules:
  - http:
      paths:
      - path: /hello
        pathType: Prefix
        backend:
          service:
            name: echo-server-svc
            port:
              number: 80
# kubectl apply -f echo-server-ing.yaml

 

ingress에 설정된 서비스 확인

Loadbalancer IP인 192.168.122.14/hello로 호출 해본다.

아래와 같은 결과가 나오면 정상적이다.

# curl 192.168.122.14/hello
{"host":{"hostname":"192.168.122.14","ip":"::ffff:10.233.71.11","ips":[]},"http":{"method":"GET","baseUrl":"","originalUrl":"/hello","protocol":"http"},"request":{"params":{"0":"/hello"},"query":{},"cookies":{},"body":{},"headers":{"host":"192.168.122.14","user-agent":"curl/7.81.0","accept":"*/*","x-forwarded-for":"192.168.122.74","x-forwarded-host":"192.168.122.14","x-forwarded-port":"80","x-forwarded-proto":"http","x-forwarded-server":"traefik-664df7d9f4-b5khl","x-real-ip":"192.168.122.74","accept-encoding":"gzip"}},"environment":{"PATH":"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","HOSTNAME":"echo-server-dpm-5b9bcf9d66-8lwxr","NODE_VERSION":"20.11.0","YARN_VERSION":"1.22.19","METRICS_SERVER_PORT_443_TCP":"tcp://10.233.46.146:443","PROMETHEUS_KUBE_PROMETHEUS_ALERTMANAGER_PORT_80_TCP":"tcp://10.233.44.116:80","PROMETHEUS_KUBE_PROMETHEUS_ALERTMANAGER_PORT_80_TCP_ADDR":"10.233.44.116","PROMETHEUS_KUBE_STATE_METRICS_PORT_8080_TCP_PORT":"8080","PROMETHEUS_KUBE_STATE_METRICS_PORT_8080_TCP_ADDR":"10.233.57.55","KUBERNETES_PORT_443_TCP_PORT":"443","PROMETHEUS_GRAFANA_SERVICE_HOST":"10.233.5.154","METRICS_SERVER_SERVICE_HOST":"10.233.46.146","TRAEFIK_SERVICE_PORT_WEB":"80","PROMETHEUS_GRAFANA_PORT_80_TCP_ADDR":"10.233.5.154","PROMETHEUS_KUBE_PROMETHEUS_ALERTMANAGER_PORT_80_TCP_PROTO":"tcp","PROMETHEUS_GRAFANA_PORT_80_TCP":"tcp://10.233.5.154:80","TRAEFIK_PORT_80_TCP":"tcp://10.233.9.60:80","PROMETHEUS_KUBE_PROMETHEUS_ALERTMANAGER_SERVICE_PORT_RELOADER_WEB":"8080","PROMETHEUS_PROMETHEUS_NODE_EXPORTER_PORT_9100_TCP":"tcp://10.233.51.19:9100","PROMETHEUS_KUBE_PROMETHEUS_OPERATOR_PORT":"tcp://10.233.9.250:443","PROMETHEUS_KUBE_PROMETHEUS_OPERATOR_PORT_443_TCP":"tcp://10.233.9.250:443","TRAEFIK_PORT_8080_TCP_ADDR":"10.233.9.60","METRICS_SERVER_PORT_443_TCP_ADDR":"10.233.46.146","KUBERNETES_PORT":"tcp://10.233.0.1:443","KUBERNETES_PORT_443_TCP":"tcp://10.233.0.1:443","PROMETHEUS_KUBE_PROMETHEUS_OPERATOR_PORT_443_TCP_ADDR":"10.233.9.250","TRAEFIK_SERVICE_HOST":"10.233.9.60","TRAEFIK_PORT_443_TCP_ADDR":"10.233.9.60","PROMETHEUS_KUBE_STATE_METRICS_SERVICE_PORT":"8080","PROMETHEUS_KUBE_PROMETHEUS_ALERTMANAGER_SERVICE_HOST":"10.233.44.116","PROMETHEUS_KUBE_STATE_METRICS_SERVICE_HOST":"10.233.57.55","PROMETHEUS_KUBE_STATE_METRICS_SERVICE_PORT_HTTP":"8080","KUBERNETES_PORT_443_TCP_ADDR":"10.233.0.1","PROMETHEUS_GRAFANA_PORT_80_TCP_PORT":"80","PROMETHEUS_KUBE_PROMETHEUS_OPERATOR_SERVICE_PORT":"443","PROMETHEUS_KUBE_PROMETHEUS_OPERATOR_SERVICE_PORT_HTTPS":"443","METRICS_SERVER_PORT":"tcp://10.233.46.146:443","PROMETHEUS_PROMETHEUS_NODE_EXPORTER_PORT_9100_TCP_PORT":"9100","PROMETHEUS_KUBE_PROMETHEUS_PROMETHEUS_PORT_8080_TCP":"tcp://10.233.49.17:8080","PROMETHEUS_KUBE_PROMETHEUS_ALERTMANAGER_SERVICE_PORT_HTTP_WEB":"80","PROMETHEUS_KUBE_STATE_METRICS_PORT":"tcp://10.233.57.55:8080","PROMETHEUS_KUBE_PROMETHEUS_OPERATOR_PORT_443_TCP_PROTO":"tcp","METRICS_SERVER_SERVICE_PORT_HTTPS":"443","PROMETHEUS_KUBE_PROMETHEUS_ALERTMANAGER_SERVICE_PORT":"80","PROMETHEUS_KUBE_PROMETHEUS_ALERTMANAGER_PORT_8080_TCP":"tcp://10.233.44.116:8080","PROMETHEUS_KUBE_STATE_METRICS_PORT_8080_TCP":"tcp://10.233.57.55:8080","PROMETHEUS_GRAFANA_SERVICE_PORT":"80","PROMETHEUS_GRAFANA_SERVICE_PORT_HTTP_WEB":"80","PROMETHEUS_GRAFANA_PORT":"tcp://10.233.5.154:80","TRAEFIK_PORT_443_TCP_PROTO":"tcp","PROMETHEUS_KUBE_PROMETHEUS_PROMETHEUS_SERVICE_HOST":"10.233.49.17","PROMETHEUS_PROMETHEUS_NODE_EXPORTER_SERVICE_HOST":"10.233.51.19","PROMETHEUS_PROMETHEUS_NODE_EXPORTER_SERVICE_PORT":"9100","PROMETHEUS_PROMETHEUS_NODE_EXPORTER_PORT_9100_TCP_ADDR":"10.233.51.19","PROMETHEUS_KUBE_PROMETHEUS_OPERATOR_SERVICE_HOST":"10.233.9.250","TRAEFIK_SERVICE_PORT_TRAEFIK":"8080","TRAEFIK_SERVICE_PORT_WEBSECURE":"443","TRAEFIK_PORT_80_TCP_PORT":"80","METRICS_SERVER_SERVICE_PORT":"443","PROMETHEUS_KUBE_PROMETHEUS_PROMETHEUS_PORT_80_TCP_PROTO":"tcp","PROMETHEUS_KUBE_PROMETHEUS_PROMETHEUS_PORT_80_TCP_PORT":"80","PROMETHEUS_KUBE_PROMETHEUS_ALERTMANAGER_PORT_8080_TCP_PROTO":"tcp","PROMETHEUS_KUBE_PROMETHEUS_ALERTMANAGER_PORT_8080_TCP_PORT":"8080","KUBERNETES_SERVICE_HOST":"10.233.0.1","TRAEFIK_PORT":"tcp://10.233.9.60:8080","TRAEFIK_PORT_80_TCP_ADDR":"10.233.9.60","PROMETHEUS_PROMETHEUS_NODE_EXPORTER_PORT_9100_TCP_PROTO":"tcp","PROMETHEUS_KUBE_PROMETHEUS_PROMETHEUS_SERVICE_PORT_HTTP_WEB":"80","PROMETHEUS_KUBE_PROMETHEUS_PROMETHEUS_PORT":"tcp://10.233.49.17:80","PROMETHEUS_KUBE_PROMETHEUS_PROMETHEUS_PORT_8080_TCP_ADDR":"10.233.49.17","PROMETHEUS_GRAFANA_PORT_80_TCP_PROTO":"tcp","TRAEFIK_PORT_8080_TCP":"tcp://10.233.9.60:8080","TRAEFIK_PORT_8080_TCP_PROTO":"tcp","TRAEFIK_PORT_80_TCP_PROTO":"tcp","METRICS_SERVER_PORT_443_TCP_PROTO":"tcp","PROMETHEUS_KUBE_PROMETHEUS_PROMETHEUS_PORT_80_TCP_ADDR":"10.233.49.17","PROMETHEUS_KUBE_PROMETHEUS_PROMETHEUS_PORT_8080_TCP_PROTO":"tcp","KUBERNETES_SERVICE_PORT_HTTPS":"443","KUBERNETES_PORT_443_TCP_PROTO":"tcp","TRAEFIK_PORT_443_TCP_PORT":"443","PROMETHEUS_KUBE_PROMETHEUS_PROMETHEUS_PORT_80_TCP":"tcp://10.233.49.17:80","PROMETHEUS_PROMETHEUS_NODE_EXPORTER_SERVICE_PORT_HTTP_METRICS":"9100","PROMETHEUS_PROMETHEUS_NODE_EXPORTER_PORT":"tcp://10.233.51.19:9100","PROMETHEUS_KUBE_PROMETHEUS_ALERTMANAGER_PORT_80_TCP_PORT":"80","METRICS_SERVER_PORT_443_TCP_PORT":"443","PROMETHEUS_KUBE_PROMETHEUS_ALERTMANAGER_PORT_8080_TCP_ADDR":"10.233.44.116","PROMETHEUS_KUBE_STATE_METRICS_PORT_8080_TCP_PROTO":"tcp","KUBERNETES_SERVICE_PORT":"443","PROMETHEUS_KUBE_PROMETHEUS_OPERATOR_PORT_443_TCP_PORT":"443","TRAEFIK_PORT_8080_TCP_PORT":"8080","TRAEFIK_PORT_443_TCP":"tcp://10.233.9.60:443","PROMETHEUS_KUBE_PROMETHEUS_PROMETHEUS_SERVICE_PORT_RELOADER_WEB":"8080","PROMETHEUS_KUBE_PROMETHEUS_PROMETHEUS_PORT_8080_TCP_PORT":"8080","TRAEFIK_SERVICE_PORT":"8080","PROMETHEUS_KUBE_PROMETHEUS_PROMETHEUS_SERVICE_PORT":"80","PROMETHEUS_KUBE_PROMETHEUS_ALERTMANAGER_PORT":"tcp://10.233.44.116:80","HOME":"/root"}}r