# Logging
# 실시간 로그 확인
controlplane ~ ➜ kubectl get pods
NAME READY STATUS RESTARTS AGE
webapp-1 1/1 Running 0 79s
controlplane ~ ➜ kubectl logs -f webapp-1
[2024-04-26 06:42:27,701] INFO in event-simulator: USER3 is viewing page1
[2024-04-26 06:42:28,702] INFO in event-simulator: USER4 is viewing page3
[2024-04-26 06:42:29,704] INFO in event-simulator: USER2 logged out
[2024-04-26 06:42:30,705] INFO in event-simulator: USER1 is viewing page2
[2024-04-26 06:42:31,706] INFO in event-simulator: USER4 is viewing page1
[2024-04-26 06:42:32,708] WARNING in event-simulator: USER5 Failed to Login as the account is locked due to MANY FAILED ATTEMPTS.
[2024-04-26 06:42:32,708] INFO in event-simulator: USER1 is viewing page3
[2024-04-26 06:42:33,709] INFO in event-simulator: USER4 is viewing page3
controlplane ~ ➜ kubectl get pods
NAME READY STATUS RESTARTS AGE
webapp-1 1/1 Running 0 5m42s
webapp-2 2/2 Running 0 3m33s
# webapp-2와 같이 multiple container라면, 어떤 contianer를 사용할 지 명시해주어야 한다.
controlplane ~ ➜ kubectl logs webapp-2 simple-webapp
# Monitoring
우리는 node-level metrics를 모니터링해야 할 필요가 있다.
EX. number of nodes in the cluster, how many nodes are healthy, resource perfomance..
쿠버네티스는 built-in 된 monitoring solution을 제공하지 않는 대신, Metrics server, Prometheus, Elastic Stack과 같은 open 소스들이 있다.
# Metrics Server
Kubernetes 클러스터에서 리소스 사용량 데이터(메트릭)를 수집하고 API로 제공하는 경량화된 구성 요소
- CPU, 메모리 사용량 등의 실시간 메트릭을 제공
- API 경로: /apis/metrics.k8s.io/
- metrics-server는 별도의 add-on으로 설치해야 한다.
kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
- Metrics Server는 단순히 실시간 데이터를 제공하기 때문에, 장기 데이터를 수집하거나 상세 모니터링이 필요하면 Prometheus, Grafana, Loki 같은 별도의 모니터링/로깅 솔루션을 사용하는 것이 적합하다.
# Heapster (구버전)
Kubernetes의 초기 메트릭 수집 도구
- 확장성이 낮고 복잡하며, 자원 소모가 많아 단순 모니터링용으로 적합하지 않다.
- 외부 저장소 필요 (InfluxDB 등)
☞ Kubernetes 1.11부터 공식적으로 폐기
[실습 - Metrics Server]
K8S cluster당 하나의 Metrics server를 가지고 있다.
kubernetes-metrics-server를 clone받고, top 명령으로 확인해준다.
# metrics-server deploy하기.
# CKA 강의에서 실습하기 위해 만들어진 코드로 실제 환경(Production Usecase)에서는 적합하지 않다.
$ git clone https://github.com/kodekloudhub/kubernetes-metrics-server.git
Cloning into 'kubernetes-metrics-server'...
remote: Enumerating objects: 31, done.
remote: Counting objects: 100% (19/19), done.
remote: Compressing objects: 100% (19/19), done.
remote: Total 31 (delta 8), reused 0 (delta 0), pack-reused 12
Receiving objects: 100% (31/31), 8.08 KiB | 8.08 MiB/s, done.
Resolving deltas: 100% (10/10), done.
$ cd kubernetes-metrics-server/
controlplane ~ ➜ git clone https://github.com/kodekloudhub/kubernetes-metrics-server.git
Cloning into 'kubernetes-metrics-server'...
remote: Enumerating objects: 31, done.
remote: Counting objects: 100% (19/19), done.
remote: Compressing objects: 100% (19/19), done.
remote: Total 31 (delta 8), reused 0 (delta 0), pack-reused 12
Receiving objects: 100% (31/31), 8.08 KiB | 8.08 MiB/s, done.
Resolving deltas: 100% (10/10), done.
controlplane ~ ➜ kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
serviceaccount/metrics-server created
clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
clusterrole.rbac.authorization.k8s.io/system:metrics-server created
rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
service/metrics-server created
deployment.apps/metrics-server created
apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
controlplane ~ ➜ cd kubernetes-metrics-server/
kubernetes-metrics-server/ $ ls
aggregated-metrics-reader.yaml metrics-server-deployment.yaml
auth-delegator.yaml metrics-server-service.yaml
auth-reader.yaml README.md
metrics-apiservice.yaml resource-reader.yaml
kubernetes-metrics-server/ $ kubectl create -f .
clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
serviceaccount/metrics-server created
deployment.apps/metrics-server created
service/metrics-server created
clusterrole.rbac.authorization.k8s.io/system:metrics-server created
clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
# 아니면 하기 command 실행
kubernetes-metrics-server/ $ kubectl create -f .
clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
serviceaccount/metrics-server created
deployment.apps/metrics-server created
service/metrics-server created
clusterrole.rbac.authorization.k8s.io/system:metrics-server created
clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
controlplane kubernetes-metrics-server/ $ ls
aggregated-metrics-reader.yaml metrics-server-deployment.yaml
auth-delegator.yaml metrics-server-service.yaml
auth-reader.yaml README.md
metrics-apiservice.yaml resource-reader.yaml
# CPU / Memory 확인하기
$ kubectl top node
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
controlplane 239m 0% 1107Mi 0%
node01 24m 0% 283Mi 0%
$ kubectl top pod
NAME CPU(cores) MEMORY(bytes)
elephant 14m 32Mi
lion 1m 17Mi
rabbit 99m 252Mi
# 실행 중인 Pod의 컨테이너 로그를 조회
# 실시간 확인(streaming)
$ kubectl logs -f <pod name> -n <namespace>
$ kubectl logs -f kube-scheduler-controlplane -n kube-system
I0107 12:21:01.761484 1 serving.go:386] Generated self-signed cert in-memory
W0107 12:21:05.551533 1 requestheader_controller.go:196] Unable to get configmap/extension-apiserver-authentication in kube-system. Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
W0107 12:21:05.551566 1 authentication.go:370] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
# 기록된 로그 확인
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
webapp-1 1/1 Running 0 11s
$ kubectl logs webapp-1
[2025-01-07 12:45:53,520] INFO in event-simulator: USER4 is viewing page2
[2025-01-07 12:45:54,521] INFO in event-simulator: USER4 is viewing page2
[2025-01-07 12:45:55,522] INFO in event-simulator: USER4 logged in
[2025-01-07 12:45:56,524] INFO in event-simulator: USER1 is viewing page3
[2025-01-07 12:45:57,524] INFO in event-simulator: USER4 is viewing page2
[2025-01-07 12:45:58,525] WARNING in event-simulator: USER5 Failed to Login as the account is locked due to MANY FAILED ATTEMPTS.
# Multiple container의 경우 특정 container name을 적어준다.
$ kubectl logs -f <pod_name> <container_name>
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
webapp-2 2/2 Running 0 2m18s # 2개의 container 존재
$ kubectl logs webapp-2 simple-webapp
[2025-01-07 12:47:53,499] INFO in event-simulator: USER2 is viewing page2
[2025-01-07 12:47:54,499] INFO in event-simulator: USER2 logged in
[2025-01-07 12:47:55,500] INFO in event-simulator: USER1 logged in
[2025-01-07 12:47:56,502] INFO in event-simulator: USER3 is viewing page1
[2025-01-07 12:47:57,502] INFO in event-simulator: USER4 is viewing page1
[2025-01-07 12:47:58,504] WARNING in event-simulator: USER5 Failed to Login as the account is locked due to MANY FAILED ATTEMPTS.
[2025-01-07 12:47:58,504] INFO in event-simulator: USER4 is viewing page2
[2025-01-07 12:47:59,505] INFO in event-simulator: USER3 logged in
[2025-01-07 12:48:00,506] INFO in event-simulator: USER3 logged out
[2025-01-07 12:48:01,507] WARNING in event-simulator: USER30 Order failed as the item is OUT OF STOCK.
# Pod가 재시작되었을 때 이전 컨테이너의 로그 조회(--previous 또는 -p)
$ kubectl logs webapp-2 simple-webapp --previous
반응형
'Container > Kubernetes' 카테고리의 다른 글
[K8S] Multiple Container (0) | 2024.04.28 |
---|---|
[K8S] SW Version & Cluster Upgrade (0) | 2024.04.26 |
[K8S] kube-scheduler (0) | 2024.04.25 |
[K8S] ETCD Backup 및 Restore (0) | 2024.04.21 |
CKA 시험준비 (0) | 2024.04.16 |