Cloudnet K8s Deploy 4주차 스터디를 진행하며 정리한 글입니다.
Kubespray
https://github.com/kubernetes-sigs/kubespray
GitHub - kubernetes-sigs/kubespray: Deploy a Production Ready Kubernetes Cluster
Deploy a Production Ready Kubernetes Cluster. Contribute to kubernetes-sigs/kubespray development by creating an account on GitHub.
github.com

Ansible 기반으로 쿠버네티스(Kubernetes) 클러스터를 자동으로 설치, 업그레이드, 관리하기 위한 오픈소스 배포 도구
사전 설정
# user 확인
root@k8s-ctr:~# whoami
pwd
root
/root
# Linux Kernel Requirements : 5.8+ 이상 권장
root@k8s-ctr:~# uname -a
Linux k8s-ctr 6.12.0-55.39.1.el10_0.aarch64 #1 SMP PREEMPT_DYNAMIC Wed Oct 15 11:18:23 EDT 2025 aarch64 GNU/Linux
# Python : 3.10 ~ 3.12 : (참고) bento/rockylinux-9 경우 3.9
root@k8s-ctr:~# which python && python -V
/usr/bin/python
Python 3.12.9
root@k8s-ctr:~# which python3 && python3 -V
/usr/bin/python3
Python 3.12.9
# pip , git 설치
root@k8s-ctr:~# dnf install -y python3-pip git
root@k8s-ctr:~# which pip && pip -V
which pip3 && pip3 -V
/usr/bin/pip
pip 23.3.2 from /usr/lib/python3.12/site-packages/pip (python 3.12)
/usr/bin/pip3
pip 23.3.2 from /usr/lib/python3.12/site-packages/pip (python 3.12)
# /etc/hosts 확인
root@k8s-ctr:~# ip -br -c -4 addr
lo UNKNOWN 127.0.0.1/8
enp0s8 UP 10.0.2.15/24
enp0s9 UP 192.168.10.10/24
root@k8s-ctr:~# cat /etc/hosts
# Loopback entries; do not change.
# For historical reasons, localhost precedes localhost.localdomain:
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
# See hosts(5) for proper format and other examples:
# 192.168.1.10 foo.example.org foo
# 192.168.1.13 bar.example.org bar
192.168.10.10 k8s-ctr
root@k8s-ctr:~# ping -c 1 k8s-ctr
PING k8s-ctr (192.168.10.10) 56(84) bytes of data.
64 bytes from k8s-ctr (192.168.10.10): icmp_seq=1 ttl=64 time=0.081 ms
--- k8s-ctr ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.081/0.081/0.081/0.000 ms
# SSH 접속을 위한 설정
root@k8s-ctr:~# echo "root:qwe123" | chpasswd
root@k8s-ctr:~# cat << EOF >> /etc/ssh/sshd_config
PermitRootLogin yes
PasswordAuthentication yes
EOF
root@k8s-ctr:~# systemctl restart sshd
# Setting SSH Key
root@k8s-ctr:~# ssh-keygen -t rsa -N "" -f /root/.ssh/id_rsa
Generating public/private rsa key pair.
Your identification has been saved in /root/.ssh/id_rsa
Your public key has been saved in /root/.ssh/id_rsa.pub
The key fingerprint is:
SHA256:XcI1pBqBcH9SBfcidaoxqrkg/OY4BpMlCLGgFU+K+t0 root@k8s-ctr
The key's randomart image is:
+---[RSA 3072]----+
|o.o.o.... o+B . |
|++ + ... + = = |
|* . . + O + . |
|+ . B B . |
|.+ S o |
|+o . . o |
| o+ o E |
| o+.. . |
| ..+o . |
+----[SHA256]-----+
root@k8s-ctr:~# ls -l ~/.ssh
total 8
-rw-------. 1 root root 2602 Feb 1 05:14 id_rsa
-rw-r--r--. 1 root root 566 Feb 1 05:14 id_rsa.pub
# ssh-copy-id
root@k8s-ctr:~# ssh-copy-id -o StrictHostKeyChecking=no root@192.168.10.10
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@192.168.10.10's password:
Number of key(s) added: 1
Now try logging into the machine, with: "ssh -o 'StrictHostKeyChecking=no' 'root@192.168.10.10'"
and check to make sure that only the key(s) you wanted were added.
# ssh 접속 확인 : IP, hostname
root@k8s-ctr:~# cat /root/.ssh/authorized_keys
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDL4YHNWCnsX/64y+O0O02ijzTrEHQpHyyJNZzNjcgxSP3alElK+Sv1S156RP7ZYQTntDiIaoZJQMd1rahijK4YFi8vKv32rug8+vO4NEEI2fr4VztCv0LPEE1L8hya9jhFJsLoQB7Z6eWNgh20ChnaekSwE1LfiPm1rT2cAVqSdNteoTz3za8/dsuhrJYqZrhvvqa3VEHjepUx0fxq0l/6s89OwmjdJsRaNF6L4KQyMRJmeIy4nPqDhwNavhqFt0NYrgoKd++JTXqQtJlPjg/Xm2TXFF+tpRNJzaSPUK8HSWUM3iBa7KVKMZ/1/RMXucq5rd0G60nd9bgR+e4xybcAcaZFgVIn8Lutoql9BkKQGK3E9XYn5mzrElrxVhBYkWTT/V5s3FyE0IWIbsLTFVoFHbjAizP8946Cxf9QTUyge0lvb+Q/6fNecyBmapCgshHSTo/Qwm1vECyF6n9f138rC/QmIFjuqa5Mf3PLJ4Azel0swhEpVOIMonnc06QBtpE= root@k8s-ctr
root@k8s-ctr:~# ssh root@192.168.10.10 hostname
k8s-ctr
root@k8s-ctr:~# ssh -o StrictHostKeyChecking=no root@k8s-ctr hostname
Warning: Permanently added 'k8s-ctr' (ED25519) to the list of known hosts.
k8s-ctr
root@k8s-ctr:~# ssh root@k8s-ctr hostname
k8s-ctr
# Clone Kubespray Repository
root@k8s-ctr:~# git clone -b v2.29.1 https://github.com/kubernetes-sigs/kubespray.git /root/kubespray
root@k8s-ctr:~# cd /root/kubespray
root@k8s-ctr:~/kubespray# pip3 install -r /root/kubespray/requirements.txt
# Ansible 버전 확인
root@k8s-ctr:~/kubespray# which ansible
/usr/local/bin/ansible
root@k8s-ctr:~/kubespray# ansible --version
ansible [core 2.17.14]
config file = /root/kubespray/ansible.cfg
configured module search path = ['/root/kubespray/library']
ansible python module location = /usr/local/lib/python3.12/site-packages/ansible
ansible collection location = /root/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/local/bin/ansible
python version = 3.12.9 (main, Aug 14 2025, 00:00:00) [GCC 14.2.1 20250110 (Red Hat 14.2.1-7)] (/usr/bin/python3)
jinja version = 3.1.6
libyaml = True
Kubespray를 통한 k8s 배포
root@k8s-ctr:~/kubespray# cp -rfp /root/kubespray/inventory/sample /root/kubespray/inventory/mycluster
root@k8s-ctr:~/kubespray# tree inventory/mycluster/
inventory/mycluster/
├── group_vars
│ ├── all
│ │ ├── all.yml
│ │ ├── aws.yml
│ │ ├── azure.yml
│ │ ├── containerd.yml
│ │ ├── coreos.yml
│ │ ├── cri-o.yml
│ │ ├── docker.yml
│ │ ├── etcd.yml
│ │ ├── gcp.yml
│ │ ├── hcloud.yml
│ │ ├── huaweicloud.yml
│ │ ├── oci.yml
│ │ ├── offline.yml
│ │ ├── openstack.yml
│ │ ├── upcloud.yml
│ │ └── vsphere.yml
│ └── k8s_cluster
│ ├── addons.yml
│ ├── k8s-cluster.yml
│ ├── k8s-net-calico.yml
│ ├── k8s-net-cilium.yml
│ ├── k8s-net-custom-cni.yml
│ ├── k8s-net-flannel.yml
│ ├── k8s-net-kube-ovn.yml
│ ├── k8s-net-kube-router.yml
│ ├── k8s-net-macvlan.yml
│ └── kube_control_plane.yml
└── inventory.ini
4 directories, 27 files
# inventory.ini 작성
root@k8s-ctr:~/kubespray# cat << EOF > /root/kubespray/inventory/mycluster/inventory.ini
k8s-ctr ansible_host=192.168.10.10 ip=192.168.10.10
[kube_control_plane]
k8s-ctr
[etcd:children]
kube_control_plane
[kube_node]
k8s-ctr
EOF
root@k8s-ctr:~/kubespray# grep "^[^#]" inventory/mycluster/group_vars/all/all.yml
---
bin_dir: /usr/local/bin
loadbalancer_apiserver_port: 6443
loadbalancer_apiserver_healthcheck_port: 8081
no_proxy_exclude_workers: false
kube_webhook_token_auth: false
kube_webhook_token_auth_url_skip_tls_verify: false
ntp_enabled: false
ntp_manage_config: false
ntp_servers:
- "0.pool.ntp.org iburst"
- "1.pool.ntp.org iburst"
- "2.pool.ntp.org iburst"
- "3.pool.ntp.org iburst"
unsafe_show_logs: false
allow_unsupported_distribution_setup: false
# 테스트할 기능 관련 수정
root@k8s-ctr:~/kubespray# sed -i 's|kube_network_plugin: calico|kube_network_plugin: flannel|g' inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
sed -i 's|kube_proxy_mode: ipvs|kube_proxy_mode: iptables|g' inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
sed -i 's|enable_nodelocaldns: true|enable_nodelocaldns: false|g' inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
sed -i 's|auto_renew_certificates: false|auto_renew_certificates: true|g' inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
sed -i 's|# auto_renew_certificates_systemd_calendar|auto_renew_certificates_systemd_calendar|g' inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
grep -iE 'kube_network_plugin:|kube_proxy_mode|enable_nodelocaldns:|^auto_renew_certificates' inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
kube_network_plugin: flannel
kube_proxy_mode: iptables
enable_nodelocaldns: false
auto_renew_certificates: true
auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:00:00"
## flannel 설정 수정 inventory/mycluster/group_vars/k8s_cluster/k8s-net-flannel.yml
root@k8s-ctr:~/kubespray# cat inventory/mycluster/group_vars/k8s_cluster/k8s-net-flannel.yml
# see roles/network_plugin/flannel/defaults/main.yml
## interface that should be used for flannel operations
## This is actually an inventory cluster-level item
# flannel_interface:
## Select interface that should be used for flannel operations by regexp on Name or IP
## This is actually an inventory cluster-level item
## example: select interface with ip from net 10.0.0.0/23
## single quote and escape backslashes
# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}'
# You can choose what type of flannel backend to use: 'vxlan', 'host-gw' or 'wireguard'
# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md
# flannel_backend_type: "vxlan"
# flannel_vxlan_vni: 1
# flannel_vxlan_port: 8472
# flannel_vxlan_direct_routing: false
root@k8s-ctr:~/kubespray# echo "flannel_interface: enp0s9" >> inventory/mycluster/group_vars/k8s_cluster/k8s-net-flannel.yml
root@k8s-ctr:~/kubespray# grep "^[^#]" inventory/mycluster/group_vars/k8s_cluster/k8s-net-flannel.yml
flannel_interface: enp0s9
# 테스트할 기능 관련 수정
root@k8s-ctr:~/kubespray# sed -i 's|helm_enabled: false|helm_enabled: true|g' inventory/mycluster/group_vars/k8s_cluster/addons.yml
sed -i 's|metrics_server_enabled: false|metrics_server_enabled: true|g' inventory/mycluster/group_vars/k8s_cluster/addons.yml
sed -i 's|node_feature_discovery_enabled: false|node_feature_discovery_enabled: true|g' inventory/mycluster/group_vars/k8s_cluster/addons.yml
grep -iE 'helm_enabled:|metrics_server_enabled:|node_feature_discovery_enabled:' inventory/mycluster/group_vars/k8s_cluster/addons.yml
helm_enabled: true
metrics_server_enabled: true
node_feature_discovery_enabled: true
# 기본 환경 정보 출력 저장
root@k8s-ctr:~/kubespray# ip addr | tee -a ip_addr-1.txt
root@k8s-ctr:~/kubespray# ss -tnlp | tee -a ss-1.txt
root@k8s-ctr:~/kubespray# df -hT | tee -a df-1.txt
root@k8s-ctr:~/kubespray# findmnt | tee -a findmnt-1.txt
root@k8s-ctr:~/kubespray# sysctl -a | tee -a sysctl-1.txt
# 지원 버전 정보 확인
root@k8s-ctr:~/kubespray# cat roles/kubespray_defaults/vars/main/checksums.yml | grep -i kube -A40
kubelet_checksums:
arm64:
1.33.7: sha256:3035c44e0d429946d6b4b66c593d371cf5bbbfc85df39d7e2a03c422e4fe404a
1.33.6: sha256:7d8b7c63309cfe2da2331a1ae13cce070b9ba01e487099e7881a4281667c131d
1.33.5: sha256:c6ad0510c089d49244eede2638b4a4ff125258fd29a0649e7eef05c7f79c737f
1.33.4: sha256:623329b1a5f4858e3a5406d3947807b75144f4e71dde11ef1a71362c3a8619cc
1.33.3: sha256:3f69bb32debfaf25fce91aa5e7181e1e32f3550f3257b93c17dfb37bed621a9c
1.33.2: sha256:0fa15aca9b90fe7aef1ed3aad31edd1d9944a8c7aae34162963a6aaaf726e065
1.33.1: sha256:10540261c311ae005b9af514d83c02694e12614406a8524fd2d0bad75296f70d
1.33.0: sha256:ae5a4fc6d733fc28ff198e2d80334e21fcb5c34e76b411c50fff9cb25accf05a
1.32.10: sha256:21cc3d98550d3a23052d649e77956f2557e7f6119ff1e27dc82b852d006136cd
1.32.9: sha256:29037381c79152409adacee83448a2bdb67e113f003613663c7589286200ded8
1.32.8: sha256:d5527714fac08eac4c1ddcbd8a3c6db35f3acd335d43360219d733273b672cce
1.32.7: sha256:b862a8d550875924c8abed6c15ba22564f7e232c239aa6a2e88caf069a0ab548
1.32.6: sha256:b045d4f8f96bf934c894f9704ab2931ffa3c6cf78a8d98e457482a6c455dab6d
1.32.5: sha256:034753a2e308afeb4ce3cf332d38346c6e660252eac93b268fac0e112a56ff46
1.32.4: sha256:91117b71eb2bb3dd79ec3ed444e058a347349108bf661838f53ee30d2a0ff168
1.32.3: sha256:5c3c98e6e0fa35d209595037e05022597954b8d764482417a9588e15218f0fe2
1.32.2: sha256:d74b659bbde5adf919529d079975900e51e10bc807f0fda9dc9f6bb07c4a3a7b
1.32.1: sha256:8e6d0eeedd9f0b8b38d4f600ee167816f71cf4dacfa3d9a9bb6c3561cc884e95
1.32.0: sha256:bda9b2324c96693b38c41ecea051bab4c7c434be5683050b5e19025b50dbc0bf
1.31.14: sha256:e2842f132933b990a8cbc285be3a28ff1cd213fe1a3380e24e37b1d2ce5e0ca6
1.31.13: sha256:37e8f83b7bc4cb1b6f49d99cb0d23c2c692f9782abc4f03aad37cc7bd504af68
1.31.12: sha256:3dab6925a2beb59fbfa7df2897e001af95886145f556cafdbde8c4facd7ca516
1.31.11: sha256:3a0e07fd72709736cd85ce64a2f5505b2bb085fe697417b96ff249febd5357b1
1.31.10: sha256:bdb7b70e6f17e6a6700c275c0a3e3632252cf34bf482b6a9fb8448efe8a0e287
1.31.9: sha256:2debf321e74f430c3832e2426766271f4d51e54927e6ad4be0235d31453dace6
1.31.8: sha256:c071aa506071db5f03a03ea3f406b4250359b08b7ae10eeee3cfb3da05411925
1.31.7: sha256:c6624e9e0bbf31334893f991f9a85c7018d8073c32147f421f6338bc92ac6f33
1.31.6: sha256:79b2bae5f578bae643e44ae1a40c834221983ac8e695c82aad79f2dc96c50ada
1.31.5: sha256:922a96405fdc3ae41e403565d06c5a6c3b733b0c3d0d1d61086b39c6760103d3
1.31.4: sha256:fb6f02f3324a72307acc11998eb5b1c3778167ae165c98f9d49bd011498e72f8
1.31.3: sha256:0ec590052f2d1cee158a789d705ca931cbc2556ceed364c4ad754fd36c61be28
1.31.2: sha256:118e1b0e85357a81557f9264521c083708f295d7c5f954a4113500fd1afca8f8
1.31.1: sha256:fbd98311e96b9dcdd73d1688760d410cc70aefce26272ff2f20eef51a7c0d1da
1.31.0: sha256:b310da449a9d2f8b928cab5ca12a6772617ba421023894e061ca2647e6d9f1c3
amd64:
1.33.7: sha256:2cea40c8c6929330e799f8fc73233a4b61e63f208739669865e2a23a39c3a007
1.33.6: sha256:10cd08fe1f9169fd7520123bcdfff87e37b8a4e21c39481faa382f00355b6973
1.33.5: sha256:8f6106b970259486c5af5cbee404d4f23406d96d99dfb92a6965b299c2a4db0e
1.33.4: sha256:109bd2607b054a477ede31c55ae814eae8e75543126dc4cea40b04424d843489
--
kubectl_checksums:
arm:
1.33.7: sha256:f6b9ac99f4efb406c5184d0a51d9ed896690c80155387007291309cbb8cdd847
1.33.6: sha256:89bcef827ac8662781740d092cff410744c0653d828b68cc14051294fcd717e6
1.33.5: sha256:5a3a416a85cfc9f7a348c0c0e6334b7449e00a57288ab5a57286ccf68a4d06af
1.33.4: sha256:eefd3864ce5440e0ba648b12d53ccffaad97f1c049781b1aa21af6a5278f035f
1.33.3: sha256:0124dba9e9091b872591cabcbaea7df07069cb132d38d95f3c7bc8d5b8b621a9
1.33.2: sha256:f3992382aa0ea21f71a976b6fd6a213781c9b58be60c42013950110cf2184f2a
1.33.1: sha256:6b1cd6e2bf05c6adaa76b952f9c4ea775f5255913974ccdb12145175d4809e93
1.33.0: sha256:bbb4b4906d483f62b0fc3a0aea3ddac942820984679ad11635b81ee881d69ab3
1.32.10: sha256:b42bc77586238b43b8c5cdd06086f1ab00190245dd8b66b28822785b177fbde4
1.32.9: sha256:84629d460b60693ca954e148ce522defd34d18bc5c934836cfaf0268930713dd
1.32.8: sha256:ed54b52631fdf5ecc4ddb12c47df481f84b5890683beaeaa55dc84e43d2cd023
1.32.7: sha256:c5416b59afdf897c4fbf08867c8a32b635f83f26e40980d38233fad6b345e37c
1.32.6: sha256:77fec65c6f08c28f8695de4db877d82d74c881ed3ed110ebfd88cbd4ee3d01dc
1.32.5: sha256:7270e6ac4b82b5e4bd037dccae1631964634214baa66a9548deb5edd3f79de31
1.32.4: sha256:bf28793213039690d018bbfa9bcfcfed76a9aa8e18dc299eced8709ca542fcdd
1.32.3: sha256:f990c878e54e5fac82eac7398ef643acca9807838b19014f1816fa9255b2d3d9
1.32.2: sha256:e1e6a2fd4571cd66c885aa42b290930660d34a7331ffb576fcab9fd1a0941a83
1.32.1: sha256:8ccf69be2578d3a324e9fc7d4f3b29bc9743cc02d72f33ba2d0fe30389014bc8
1.32.0: sha256:6b33ea8c80f785fb07be4d021301199ae9ee4f8d7ea037a8ae544d5a7514684e
1.31.14: sha256:23860bd774ec2c2cb1f409581c236725673c55506409da846a651ec27c2ca15d
1.31.13: sha256:875597876f9dcfb2b3197667c0fbb0691cbef3d9522de22875c1a5c02bc04de5
1.31.12: sha256:8e430e7a192355a60e1398580a861b4724b286ed38ff52a156500d3fae90c583
1.31.11: sha256:7768bb4e1b79ddac982968e47d9e25f357b7e9c0f08039134815a64062d5ea6f
1.31.10: sha256:1f3f644609513ed0c6045638e60fc9e9fb5de39c375719601f565e6ad82b9b85
1.31.9: sha256:54e560eb3ad4b2b0ae95d79d71b2816dfa154b33758e49f2583bec0980f19861
1.31.8: sha256:65fdd04f5171e44620cc4e0b9e0763b1b3d10b2b15c1f7f99b549d36482015d4
1.31.7: sha256:870d919f8ef5f5c608bd69c57893937910de6a8ed2c077fc4f0945375f61734d
1.31.6: sha256:b370a552cd6c9bb5fc42e4e9031b74f35da332f27b585760bacb0d3189d8634d
1.31.5: sha256:cbb4e470751ef8864ade9d008e848f691ac6cbdee320539797a68a5512b9f7f8
1.31.4: sha256:055d1672f63fda86c6dfa5a2354d627f908f68bde6bf8394fdc9a99cadc4de19
1.31.3: sha256:e0d00fbac98e67b774ff1ed9a0e6fc5be5c1f08cc69b0c8b483904ed15ad8c50
1.31.2: sha256:f2a638bdaa4764e82259ed1548ce2c86056e33a3d09147f7f0c2d4ee5b5e300c
1.31.1: sha256:51b178c9362a4fbe35644399f113d7f904d306261953a51c5c0a57676e209fa6
1.31.0: sha256:a4d6292c88c199688a03ea211bea08c8ae29f1794f5deeeef46862088d124baa
arm64:
1.33.7: sha256:fa7ee98fdb6fba92ae05b5e0cde0abd5972b2d9a4a084f7052a1fd0dce6bc1de
1.33.6: sha256:3ab32d945a67a6000ba332bf16382fc3646271da6b7d751608b320819e5b8f38
1.33.5: sha256:6db7c5d846c3b3ddfd39f3137a93fe96af3938860eefdbf2429805ee1656e381
1.33.4: sha256:76cd7a2aa59571519b68c3943521404cbce55dafb7d8866f8d0ea2995b396eef
--
kubeadm_checksums:
arm64:
1.33.7: sha256:b24eeeff288f9565e11a2527e5aed42c21386596110537adb805a5a2a7b3e9ce
1.33.6: sha256:ef80c198ca15a0850660323655ebf5c32cc4ab00da7a5a59efe95e4bcf8503ab
1.33.5: sha256:b1c00657649e35771569d095e531d826bd19baf57bcb53cccf3f91d7d60b7808
1.33.4: sha256:ef471b454d68ee211e279ddeaebde6ee7a8e14b66ae58e0d0184e967c3595892
1.33.3: sha256:bf8ed3bc3952e04f29863c6910ae84b359fe7ac1e642ed4d742ceb396e62c6f2
1.33.2: sha256:21efc1ba54a1cf25ac68208b7dde2e67f6d0331259f432947d83e70b975ad4cc
1.33.1: sha256:5b3e3a1e18d43522fdee0e15be13a42cee316e07ddcf47ef718104836edebb3e
1.33.0: sha256:746c0ee45f4d32ec5046fb10d4354f145ba1ff0c997f9712d46036650ad26340
1.32.10: sha256:a201f246be3d2c35ffa7fc51a1d2596797628f9b1455da52a246b42ce8e1f779
1.32.9: sha256:377349141e865849355140c78063fa2b87443bf1aecb06319be4de4df8dbd918
1.32.8: sha256:8dbd3fa2d94335d763b983caaf2798caae2d4183f6a95ebff28289f2e86edf68
1.32.7: sha256:a2aad7f7b320c3c847dea84c08e977ba8b5c84d4b7102b46ffd09d41af6c4b51
1.32.6: sha256:f786731c37ce6e89e6b71d5a7518e4d1c633337237e3803615056eb4640bfc8e
1.32.5: sha256:2956c694ff2891acdc4690b807f87ab48419b4925d3fad2ac52ace2a1160bd17
1.32.4: sha256:1b9d97b44758dc4da20d31e3b6d46f50af75ac48be887793e16797a43d9c30e7
1.32.3: sha256:f9d007aaf1468ea862ef2a1a1a3f6f34cc57358742ceaff518e1533f5a794181
1.32.2: sha256:fd8a8c1c41d719de703bf49c6f56692dd6477188d8f43dcb77019fd8bc30cbd3
1.32.1: sha256:55a57145708aaa37f716f140ef774ca64b7088b6df5ee8eae182936ad6580328
1.32.0: sha256:5da9746a449a3b8a8312b6dd8c48dcb861036cf394306cfbc66a298ba1e8fbde
1.31.14: sha256:ff9d9351423fd9c7b40a39a9be11df077b1f5a40c85b70349ca0ce55cd4fd336
1.31.13: sha256:30762e5a20eb8a4d52b278fe7d999fd76ab20b63b40cb1e60625bc73c6e11e96
1.31.12: sha256:88fc31963e833d72d1e26159166591aea537d762debb5cc0f0d059fdc717b43b
1.31.11: sha256:73dff62190cd26947a088ceb79d4d039a916091e0c80734e9ddd7b2e0b8efb8b
1.31.10: sha256:01e627449b5f94bc068f7d0680a07abfd118cbf9805c7bce3aea31a46e4a16cc
1.31.9: sha256:d8f5dbb17ce2dead6aedcc700e4293a9395e246079fcdc1772ab9e5cbfeca906
1.31.8: sha256:d0d1a6634e397e4f14b1e5f9b4bd55758ea70bfc114728730d25d563952e453e
1.31.7: sha256:3f95765db3b9ebb0cf2ff213ac3b42a831dd995a48d9a6b1d544137d3f2c3018
1.31.6: sha256:03b6df27c630f6137be129d2cef49dc4da12077381af8d234a92e451ba2a16d2
1.31.5: sha256:971904ff1ac2879d968cac1d4865b7c0ce0d9374506bd17bd0b123981803769b
1.31.4: sha256:4598c2f0c69e60feb47a070376da358f16efe0e1403c6aca97fa8f7ab1d0e7c0
1.31.3: sha256:8113900524bd1c8b3ce0b3ece0d37f96291cbf359946afae58a596319a5575c8
1.31.2: sha256:0f9d231569b3195504f8458415e9b3080e23fb6a749fe7752abfc7a2884efadf
1.31.1: sha256:66195cd53cda3c73c9ae5e49a1352c710c0ea9ce244bbdeb68b917d809f0ea78
1.31.0: sha256:dbeb84862d844d58f67ad6be64021681a314cda162a04e6047f376f2a9ad0226
amd64:
1.33.7: sha256:c10813d54f58ef33bbe6675f3d39c8bd401867743ebc729afdd043265040c31d
1.33.6: sha256:c1b84cb3482dd79e26629012f432541ccb505c17f5073aa1fdbca26b1e4909fd
1.33.5: sha256:6761219749c6c67a56a5668dfe65d669e0c1f34d4b280b72de6d74d47c601f1e
1.33.4: sha256:a109ebcb68e52d3dd605d92f92460c884dcc8b68aebe442404af19b6d9d778ec
# 배포: 아래처럼 반드시 ~/kubespray 디렉토리에서 ansible-playbook 를 실행하자!
root@k8s-ctr:~/kubespray# ansible-playbook -i inventory/mycluster/inventory.ini -v cluster.yml -e kube_version="1.33.3" --list-tasks # 배포 전, Task 목록 확인
Using /root/kubespray/ansible.cfg as config file
[WARNING]: Could not match supplied host pattern, ignoring: bastion
[WARNING]: Could not match supplied host pattern, ignoring: k8s_cluster
[WARNING]: Could not match supplied host pattern, ignoring: calico_rr
[WARNING]: Could not match supplied host pattern, ignoring: _kubespray_needs_etcd
playbook: cluster.yml
play #1 (all): Check Ansible version TAGS: [always]
tasks:
Check {{ minimal_ansible_version }} <= Ansible version < {{ maximal_ansible_version }} TAGS: [always, check]
Check that python netaddr is installed TAGS: [always, check]
Check that jinja is not too old (install via pip) TAGS: [always, check]
play #2 (all): Inventory setup and validation TAGS: [always]
tasks:
dynamic_groups : Match needed groups by their old names or definition TAGS: [always]
validate_inventory : Stop if removed tags are used TAGS: [always]
validate_inventory : Stop if kube_control_plane group is empty TAGS: [always]
validate_inventory : Stop if etcd group is empty in external etcd mode TAGS: [always]
validate_inventory : Warn if `kube_network_plugin` is `none TAGS: [always]
validate_inventory : Stop if unsupported version of Kubernetes TAGS: [always]
validate_inventory : Stop if known booleans are set as strings (Use JSON format on CLI: -e "{'key': true }") TAGS: [always]
validate_inventory : Stop if even number of etcd hosts TAGS: [always]
validate_inventory : Guarantee that enough network address space is available for all pods TAGS: [always]
validate_inventory : Stop if RBAC is not enabled when dashboard is enabled TAGS: [always]
validate_inventory : Check cloud_provider value TAGS: [always]
validate_inventory : Check external_cloud_provider value TAGS: [always]
validate_inventory : Check that kube_service_addresses is a network range TAGS: [always]
validate_inventory : Check that kube_pods_subnet is a network range TAGS: [always]
validate_inventory : Check that kube_pods_subnet does not collide with kube_service_addresses TAGS: [always]
validate_inventory : Check that ipv4 IP range is enough for the nodes TAGS: [always]
validate_inventory : Check that kube_service_addresses_ipv6 is a network range TAGS: [always]
validate_inventory : Check that kube_pods_subnet_ipv6 is a network range TAGS: [always]
validate_inventory : Check that kube_pods_subnet_ipv6 does not collide with kube_service_addresses_ipv6 TAGS: [always]
validate_inventory : Check that ipv6 IP range is enough for the nodes TAGS: [always]
validate_inventory : Stop if unsupported options selected TAGS: [always]
validate_inventory : Warn if `enable_dual_stack_networks` is set TAGS: [always]
validate_inventory : Stop if download_localhost is enabled but download_run_once is not TAGS: [always]
validate_inventory : Stop if kata_containers_enabled is enabled when container_manager is docker TAGS: [always]
validate_inventory : Stop if gvisor_enabled is enabled when container_manager is not containerd TAGS: [always]
validate_inventory : Ensure minimum containerd version TAGS: [always]
validate_inventory : Stop if auto_renew_certificates is enabled when certificates are managed externally (kube_external_ca_mode is true) TAGS: [always]
play #3 (bastion[0]): Install bastion ssh config TAGS: []
tasks:
bastion-ssh-config : Set bastion host IP and port TAGS: [bastion, localhost]
bastion-ssh-config : Store the current ansible_user in the real_user fact TAGS: [bastion, localhost]
bastion-ssh-config : Create ssh bastion conf TAGS: [bastion, localhost]
play #4 (k8s_cluster:etcd:calico_rr): Bootstrap hosts for Ansible TAGS: []
tasks:
bootstrap_os : Fetch /etc/os-release TAGS: [bootstrap_os]
bootstrap_os : Include vars TAGS: [bootstrap_os, facts]
bootstrap_os : Include tasks TAGS: [bootstrap_os]
system_packages : Gather OS information TAGS: [bootstrap_os, system-packages]
system_packages : Update package management cache (zypper) - SUSE TAGS: [bootstrap_os, system-packages]
system_packages : Remove legacy docker repo file TAGS: [bootstrap_os, system-packages]
system_packages : Install epel-release on RHEL derivatives TAGS: [bootstrap_os, system-packages]
system_packages : Manage packages TAGS: [bootstrap_os, system-packages]
bootstrap_os : Create remote_tmp for it is used by another module TAGS: [bootstrap_os]
bootstrap_os : Gather facts TAGS: [bootstrap_os]
bootstrap_os : Assign inventory name to unconfigured hostnames (non-CoreOS, non-Flatcar, Suse and ClearLinux, non-Fedora) TAGS: [bootstrap_os]
bootstrap_os : Ensure bash_completion.d folder exists TAGS: [bootstrap_os]
play #5 (k8s_cluster:etcd:calico_rr): Gather facts TAGS: [always]
tasks:
...
play #15 (k8s_cluster): Apply resolv.conf changes now that cluster DNS is up TAGS: []
tasks:
adduser : User | Create User Group TAGS: [kubelet, resolvconf]
adduser : User | Create User TAGS: [kubelet, resolvconf]
root@k8s-ctr:~/kubespray# ANSIBLE_FORCE_COLOR=true ansible-playbook -i inventory/mycluster/inventory.ini -v cluster.yml -e kube_version="1.33.3" | tee kubespray_install.log
Using /root/kubespray/ansible.cfg as config file
PLAY [Check Ansible version] ***************************************************
Sunday 01 February 2026 05:42:20 +0900 (0:00:00.012) 0:00:00.012 *******
TASK [Check 2.17.3 <= Ansible version < 2.18.0] ********************************
ok: [k8s-ctr] => {
"changed": false,
"msg": "All assertions passed"
}
Sunday 01 February 2026 05:42:20 +0900 (0:00:00.018) 0:00:00.031 *******
TASK [Check that python netaddr is installed] **********************************
ok: [k8s-ctr] => {
"changed": false,
"msg": "All assertions passed"
}
Sunday 01 February 2026 05:42:20 +0900 (0:00:00.048) 0:00:00.080 *******
TASK [Check that jinja is not too old (install via pip)] ***********************
ok: [k8s-ctr] => {
"changed": false,
"msg": "All assertions passed"
}
PLAY [Inventory setup and validation] ******************************************
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.022) 0:00:00.102 *******
TASK [dynamic_groups : Match needed groups by their old names or definition] ***
changed: [k8s-ctr] => (item={'key': 'k8s_cluster', 'value': ['kube_node', 'kube_control_plane', 'calico_rr']}) => {"add_group": "k8s_cluster", "ansible_loop_var": "item", "changed": true, "item": {"key": "k8s_cluster", "value": ["kube_node", "kube_control_plane", "calico_rr"]}, "parent_groups": ["all"]}
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.041) 0:00:00.144 *******
TASK [validate_inventory : Stop if removed tags are used] **********************
ok: [k8s-ctr] => {
"changed": false,
"msg": "All assertions passed"
}
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.021) 0:00:00.165 *******
TASK [validate_inventory : Stop if kube_control_plane group is empty] **********
ok: [k8s-ctr] => {
"changed": false,
"msg": "All assertions passed"
}
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.020) 0:00:00.186 *******
TASK [validate_inventory : Stop if etcd group is empty in external etcd mode] ***
ok: [k8s-ctr] => {
"changed": false,
"msg": "All assertions passed"
}
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.021) 0:00:00.207 *******
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.009) 0:00:00.217 *******
TASK [validate_inventory : Stop if unsupported version of Kubernetes] **********
ok: [k8s-ctr] => {
"changed": false,
"msg": "All assertions passed"
}
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.025) 0:00:00.242 *******
TASK [validate_inventory : Stop if known booleans are set as strings (Use JSON format on CLI: -e "{'key': true }")] ***
ok: [k8s-ctr] => {
"changed": false,
"msg": "All assertions passed"
}
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.025) 0:00:00.268 *******
TASK [validate_inventory : Stop if even number of etcd hosts] ******************
ok: [k8s-ctr] => {
"changed": false,
"msg": "All assertions passed"
}
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.024) 0:00:00.292 *******
TASK [validate_inventory : Guarantee that enough network address space is available for all pods] ***
ok: [k8s-ctr] => {
"changed": false,
"msg": "All assertions passed"
}
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.026) 0:00:00.318 *******
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.009) 0:00:00.328 *******
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.010) 0:00:00.339 *******
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.009) 0:00:00.348 *******
TASK [validate_inventory : Check that kube_service_addresses is a network range] ***
ok: [k8s-ctr] => {
"changed": false,
"msg": "All assertions passed"
}
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.049) 0:00:00.398 *******
TASK [validate_inventory : Check that kube_pods_subnet is a network range] *****
ok: [k8s-ctr] => {
"changed": false,
"msg": "All assertions passed"
}
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.048) 0:00:00.446 *******
TASK [validate_inventory : Check that kube_pods_subnet does not collide with kube_service_addresses] ***
ok: [k8s-ctr] => {
"changed": false,
"msg": "All assertions passed"
}
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.051) 0:00:00.497 *******
TASK [validate_inventory : Check that ipv4 IP range is enough for the nodes] ***
ok: [k8s-ctr] => {
"changed": false,
"msg": "All assertions passed"
}
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.048) 0:00:00.546 *******
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.016) 0:00:00.562 *******
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.015) 0:00:00.578 *******
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.014) 0:00:00.592 *******
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.013) 0:00:00.606 *******
TASK [validate_inventory : Stop if unsupported options selected] ***************
ok: [k8s-ctr] => {
"changed": false,
"msg": "All assertions passed"
}
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.025) 0:00:00.631 *******
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.013) 0:00:00.645 *******
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.008) 0:00:00.654 *******
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.009) 0:00:00.663 *******
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.009) 0:00:00.673 *******
TASK [validate_inventory : Ensure minimum containerd version] ******************
ok: [k8s-ctr] => {
"changed": false,
"msg": "All assertions passed"
}
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.023) 0:00:00.696 *******
[WARNING]: Could not match supplied host pattern, ignoring: bastion
[WARNING]: Could not match supplied host pattern, ignoring: calico_rr
PLAY [Install bastion ssh config] **********************************************
skipping: no hosts matched
PLAY [Bootstrap hosts for Ansible] *********************************************
Sunday 01 February 2026 05:42:21 +0900 (0:00:00.019) 0:00:00.715 *******
[WARNING]: raw module does not support the environment keyword
TASK [bootstrap_os : Fetch /etc/os-release] ************************************
...
root@k8s-ctr:~/kubespray# more kubespray_install.log
Using /root/kubespray/ansible.cfg as config file
PLAY [Check Ansible version] ***************************************************
Sunday 01 February 2026 05:42:20 +0900 (0:00:00.012) 0:00:00.012 *******
TASK [Check 2.17.3 <= Ansible version < 2.18.0] ********************************
ok: [k8s-ctr] => {
"changed": false,
"msg": "All assertions passed"
}
Sunday 01 February 2026 05:42:20 +0900 (0:00:00.018) 0:00:00.031 *******
TASK [Check that python netaddr is installed] **********************************
ok: [k8s-ctr] => {
"changed": false,
"msg": "All assertions passed"
}
Sunday 01 February 2026 05:42:20 +0900 (0:00:00.048) 0:00:00.080 *******
TASK [Check that jinja is not too old (install via pip)] ***********************
ok: [k8s-ctr] => {
"changed": false,
"msg": "All assertions passed"
}
PLAY [Inventory setup and validation] ******************************************
root@k8s-ctr:~/kubespray# kubectl get node -v=6
I0201 05:55:54.381070 35626 loader.go:402] Config loaded from file: /root/.kube/config
I0201 05:55:54.381828 35626 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false
I0201 05:55:54.381849 35626 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false
I0201 05:55:54.381854 35626 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false
I0201 05:55:54.381858 35626 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false
I0201 05:55:54.381861 35626 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true
I0201 05:55:54.392143 35626 round_trippers.go:632] "Response" verb="GET" url="https://127.0.0.1:6443/api/v1/nodes?limit=500" status="200 OK" milliseconds=7
NAME STATUS ROLES AGE VERSION
k8s-ctr Ready control-plane 4m7s v1.33.3
root@k8s-ctr:~/kubespray# ip addr | tee -a ip_addr-2.txt
ss -tnlp | tee -a ss-2.txt
df -hT | tee -a df-2.txt
findmnt | tee -a findmnt-2.txt
sysctl -a | tee -a sysctl-2.txt
Ansible Playbook & Role 분석

# cluster.yml은 Kubespray의 메인 플레이북으로, 클러스터 생성의 전체 과정을 정의
root@k8s-ctr:~/kubespray# cat /root/kubespray/cluster.yml
---
- name: Install Kubernetes
ansible.builtin.import_playbook: playbooks/cluster.yml
root@k8s-ctr:~/kubespray# cat kubespray_install.log | grep -E 'PLAY'
PLAY [Check Ansible version] ***************************************************
PLAY [Inventory setup and validation] ******************************************
PLAY [Install bastion ssh config] **********************************************
PLAY [Bootstrap hosts for Ansible] *********************************************
PLAY [Gather facts] ************************************************************
PLAY [Prepare for etcd install] ************************************************
PLAY [Add worker nodes to the etcd play if needed] *****************************
PLAY [Install etcd] ************************************************************
PLAY [Install Kubernetes nodes] ************************************************
PLAY [Install the control plane] ***********************************************
PLAY [Invoke kubeadm and install a CNI] ****************************************
PLAY [Install Calico Route Reflector] ******************************************
PLAY [Patch Kubernetes for Windows] ********************************************
PLAY [Install Kubernetes apps] *************************************************
PLAY [Apply resolv.conf changes now that cluster DNS is up] ********************
PLAY RECAP *********************************************************************
root@k8s-ctr:~/kubespray# cat kubespray_install.log | grep -E 'TASK' | wc -l
563
# playbooks/boilerplate.yml → ansible_version.yml → roles (dynamic_groups, validate_inventory) 소개
oot@k8s-ctr:~/kubespray# cat playbooks/boilerplate.yml
---
- name: Check ansible version
import_playbook: ansible_version.yml
# These are inventory compatibility tasks with two purposes:
# - to ensure we keep compatibility with old style group names
# - to reduce inventory boilerplate (defining parent groups / empty groups)
- name: Inventory setup and validation
hosts: all
gather_facts: false
tags: always
roles:
- dynamic_groups
- validate_inventory
- name: Install bastion ssh config
hosts: bastion[0]
gather_facts: false
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray_defaults }
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
# kubespray_defaults: Kubespray 전체 play / role / task가 참조하는 ‘최상위 기본 변수 집합’ ← bastion 대상이 아니여서 여기서는 Skip 되긴함
# 여기 값은 언제든 override 가능, 하지만 override 안 하면 전부 여기 기준
root@k8s-ctr:~/kubespray# tree roles/kubespray_defaults
roles/kubespray_defaults
├── defaults
│ └── main
│ ├── download.yml
│ └── main.yml
└── vars
└── main
├── checksums.yml
└── main.yml
5 directories, 4 files
1. 초기화 및 정보 수집 (Boilerplate & Facts)
- 모든 노드에 공통적으로 필요한 설정을 적용하고, 각 서버의 사양 정보를 수집하여 이후 설치 단계에서 변수로 활용합니다.
2. 인프라 및 엔진 준비 (Prepare for etcd & container-engine)
- kubernetes/preinstall: 방화벽, 커널 파라미터, Swap 비활성화 등 K8s 설치를 위한 OS 최적화를 수행합니다.
- container-engine: Docker나 Containerd 같은 컨테이너 런타임을 설치합니다.
- download: 설치에 필요한 모든 바이너리와 이미지들을 미리 다운로드합니다.
3. 데이터 저장소 및 노드 구성 (Etcd & K8s Nodes)
- etcd: 쿠버네티스의 상태 정보를 저장하는 DB 클러스터를 구축합니다.
- kubernetes/node: 모든 노드에 Kubelet, Kube-proxy 등 기초 컴포넌트를 설치합니다.
4. 컨트롤 플레인 및 네트워크 (Control Plane & CNI)
- control-plane: 마스터 노드에 API 서버, 스케줄러 등을 설정합니다.
- kubeadm: kubeadm init 또는 join을 통해 클러스터를 하나로 묶습니다.
- network_plugin: Calico, Flannel 등 CNI를 설치하여 파드 간 통신을 가능하게 합니다.
5. 부가 서비스 설치 (Apps & DNS)
- Ingress Controller, Storage Provisioner 등 클러스터 운영에 필요한 앱들을 배포하고, 최종적으로 노드의 DNS(resolv.conf)가 클러스터 내부 DNS를 바라보도록 수정합니다.
실습 환경 배포 분석
다운로드 파일 경로 확인 : local_release_dir: "/tmp/releases"
root@k8s-ctr:~/kubespray# tree -ug /tmp/releases/
[root root ] /tmp/releases/
├── [root root ] cni-plugins-linux-arm64-1.8.0.tgz
├── [root root ] containerd-2.1.5-linux-arm64.tar.gz
├── [root root ] containerd-rootless-setuptool.sh
├── [root root ] containerd-rootless.sh
├── [root 118 ] crictl
├── [root root ] crictl-1.33.0-linux-arm64.tar.gz
├── [root root ] etcd-3.5.25-linux-arm64.tar.gz
├── [vagrant vagrant ] etcd-v3.5.25-linux-arm64
│ ├── [vagrant vagrant ] Documentation
│ │ ├── [vagrant vagrant ] dev-guide
│ │ │ └── [vagrant vagrant ] apispec
│ │ │ └── [vagrant vagrant ] swagger
│ │ │ ├── [vagrant vagrant ] rpc.swagger.json
│ │ │ ├── [vagrant vagrant ] v3election.swagger.json
│ │ │ └── [vagrant vagrant ] v3lock.swagger.json
│ │ └── [vagrant vagrant ] README.md
│ ├── [vagrant vagrant ] etcd
│ ├── [vagrant vagrant ] etcdctl
│ ├── [vagrant vagrant ] etcdutl
│ ├── [vagrant vagrant ] README-etcdctl.md
│ ├── [vagrant vagrant ] README-etcdutl.md
│ ├── [vagrant vagrant ] README.md
│ └── [vagrant vagrant ] READMEv2-etcdctl.md
├── [root root ] helm-3.18.4
│ ├── [root root ] helm-3.18.4-linux-arm64.tar.gz
│ └── [root 118 ] linux-arm64
│ ├── [root 118 ] helm
│ ├── [root 118 ] LICENSE
│ └── [root 118 ] README.md
├── [root root ] images
├── [root root ] kubeadm-1.33.3-arm64
├── [root root ] kubectl-1.33.3-arm64
├── [root root ] kubelet-1.33.3-arm64
├── [root root ] nerdctl
├── [root root ] nerdctl-2.1.6-linux-arm64.tar.gz
└── [root root ] runc-1.3.4.arm64
9 directories, 28 files
설치된 바이너리 확인 : bin_dir: /usr/local/bin
root@k8s-ctr:~/kubespray# cat inventory/mycluster/group_vars/all/all.yml | grep 'bin_dir'
bin_dir: /usr/local/bin
root@k8s-ctr:~/kubespray# cat inventory/mycluster/group_vars/k8s_cluster/addons.yml | grep helm
helm_enabled: true
root@k8s-ctr:~/kubespray# helm version
version.BuildInfo{Version:"v3.18.4", GitCommit:"d80839cf37d860c8aa9a0503fe463278f26cd5e2", GitTreeState:"clean", GoVersion:"go1.24.4"}
root@k8s-ctr:~/kubespray# etcdctl version
etcdctl version: 3.5.25
API version: 3.5
root@k8s-ctr:~/kubespray# containerd --version
containerd github.com/containerd/containerd/v2 v2.1.5 fcd43222d6b07379a4be9786bda52438f0dd16a1
root@k8s-ctr:~/kubespray# kubeadm version -o yaml
clientVersion:
buildDate: "2025-07-15T18:05:14Z"
compiler: gc
gitCommit: 80779bd6ff08b451e1c165a338a7b69351e9b0b8
gitTreeState: clean
gitVersion: v1.33.3
goVersion: go1.24.4
major: "1"
minor: "33"
platform: linux/arm64
설치된 cni 관련 파일 확인 & kube_owner에 uid로 생성되는 파일 목록 확인
root@k8s-ctr:~/kubespray# cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
---
# Kubernetes configuration dirs and system namespace.
# Those are where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernetes.
# This puts them in a sane location and namespace.
# Editing those values will almost surely break something.
kube_config_dir: /etc/kubernetes
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl"
# This is where all of the bearer tokens will be stored
kube_token_dir: "{{ kube_config_dir }}/tokens"
kube_api_anonymous_auth: true
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
local_release_dir: "/tmp/releases"
# Random shifts for retrying failed ops like pushing/downloading
retry_stagger: 5
# This is the user that owns tha cluster installation.
kube_owner: kube
...
root@k8s-ctr:~/kubespray# find / -user kube 2>/dev/null
/etc/cni
/etc/cni/net.d
/etc/kubernetes
/etc/kubernetes/manifests
/usr/libexec/kubernetes
/usr/libexec/kubernetes/kubelet-plugins
/usr/libexec/kubernetes/kubelet-plugins/volume
/usr/libexec/kubernetes/kubelet-plugins/volume/exec
/usr/local/bin/kubernetes-scripts
/opt/cni
/opt/cni/bin
/opt/cni/bin/README.md
/opt/cni/bin/static
/opt/cni/bin/host-device
/opt/cni/bin/ipvlan
/opt/cni/bin/dhcp
/opt/cni/bin/LICENSE
/opt/cni/bin/portmap
/opt/cni/bin/tap
/opt/cni/bin/host-local
/opt/cni/bin/vlan
/opt/cni/bin/loopback
/opt/cni/bin/sbr
/opt/cni/bin/firewall
/opt/cni/bin/bandwidth
/opt/cni/bin/bridge
/opt/cni/bin/vrf
/opt/cni/bin/macvlan
/opt/cni/bin/tuning
/opt/cni/bin/dummy
/opt/cni/bin/ptp
root@k8s-ctr:~/kubespray# ls -l /opt
total 0
drwxr-xr-x. 3 kube root 17 Feb 1 05:43 cni
drwx--x--x. 4 root root 28 Feb 1 05:45 containerd
drwxr-xr-x. 7 root root 119 Oct 24 06:12 VBoxGuestAdditions-7.2.4
root@k8s-ctr:~/kubespray# tree -ug /opt/cni
[kube root ] /opt/cni
└── [kube root ] bin
├── [kube root ] bandwidth
├── [kube root ] bridge
├── [kube root ] dhcp
├── [kube root ] dummy
├── [kube root ] firewall
├── [root root ] flannel
├── [kube root ] host-device
├── [kube root ] host-local
├── [kube root ] ipvlan
├── [kube root ] LICENSE
├── [kube root ] loopback
├── [kube root ] macvlan
├── [kube root ] portmap
├── [kube root ] ptp
├── [kube root ] README.md
├── [kube root ] sbr
├── [kube root ] static
├── [kube root ] tap
├── [kube root ] tuning
├── [kube root ] vlan
└── [kube root ] vrf
2 directories, 21 files
root@k8s-ctr:~/kubespray# ls -l /etc | grep cni
drwxr-xr-x. 3 kube root 19 Feb 1 05:43 cni
root@k8s-ctr:~/kubespray# tree -ug /etc/cni
[kube root ] /etc/cni
└── [kube root ] net.d
└── [root root ] 10-flannel.conflist
2 directories, 1 file
root@k8s-ctr:~/kubespray# systemctl list-timers --all --no-pager
NEXT LEFT LAST PASSED UNIT ACTIVATES
Sun 2026-02-01 06:30:02 KST 7min Sun 2026-02-01 02:59:38 KST 1h 31min ago dnf-makecache.timer dnf-makecache.service
Sun 2026-02-01 06:57:58 KST 35min Sun 2026-02-01 05:26:20 KST 56min ago fwupd-refresh.timer fwupd-refresh.service
Mon 2026-02-02 00:01:34 KST 17h Sun 2026-02-01 05:02:59 KST 1h 19min ago fstrim.timer fstrim.service
Mon 2026-02-02 00:36:37 KST 18h Sun 2026-02-01 03:21:21 KST 1h 30min ago logrotate.timer logrotate.service
Mon 2026-02-02 00:52:56 KST 18h Sun 2026-02-01 01:47:45 KST 1h 36min ago plocate-updatedb.timer plocate-updatedb.service
Mon 2026-02-02 03:06:43 KST 20h - - k8s-certs-renew.timer k8s-certs-renew.service
Mon 2026-02-02 04:52:28 KST 22h Sun 2026-02-01 03:21:32 KST 1h 30min ago systemd-tmpfiles-clean.timer systemd-tmpfiles-clean.service
Sun 2026-02-08 01:00:00 KST 6 days Sun 2026-02-01 01:39:28 KST 1h 45min ago raid-check.timer raid-check.service
8 timers listed.
root@k8s-ctr:~/kubespray# systemctl status k8s-certs-renew.timer --no-pager
● k8s-certs-renew.timer - Timer to renew K8S control plane certificates
Loaded: loaded (/etc/systemd/system/k8s-certs-renew.timer; enabled; preset: disabled)
Active: active (waiting) since Sun 2026-02-01 05:51:55 KST; 30min ago
Invocation: fee2a54cd8874d9da45c494616978add
Trigger: Mon 2026-02-02 03:06:43 KST; 20h left
Triggers: ● k8s-certs-renew.service
Feb 01 05:51:55 k8s-ctr systemd[1]: Started k8s-certs-renew.timer - Timer to renew K8S control plane certificates.
root@k8s-ctr:~/kubespray# cat /etc/systemd/system/k8s-certs-renew.timer
[Unit]
Description=Timer to renew K8S control plane certificates
[Timer]
OnCalendar=Mon *-*-1,2,3,4,5,6,7 03:00:00
RandomizedDelaySec=10min
FixedRandomDelay=yes
Persistent=yes
[Install]
WantedBy=multi-user.target
root@k8s-ctr:~/kubespray# systemctl status k8s-certs-renew.service
○ k8s-certs-renew.service - Renew K8S control plane certificates
Loaded: loaded (/etc/systemd/system/k8s-certs-renew.service; static)
Active: inactive (dead)
TriggeredBy: ● k8s-certs-renew.timer
root@k8s-ctr:~/kubespray# cat /usr/local/bin/k8s-certs-renew.sh
#!/bin/bash
echo "## Check Expiration before renewal ##"
/usr/local/bin/kubeadm certs check-expiration
days_buffer=7 # set a time margin, because we should not renew at the last moment
calendar=Mon *-*-1,2,3,4,5,6,7 03:00:00
next_time=$(systemctl show k8s-certs-renew.timer -p NextElapseUSecRealtime --value)
if [ "${next_time}" == "" ]; then
echo "## Skip expiry comparison due to fail to parse next elapse from systemd calendar,do renewal directly ##"
else
current_time=$(date +%s)
target_time=$(date -d "${next_time} + ${days_buffer} days" +%s) # $next_time - $days_buffer days
expiry_threshold=$(( ${target_time} - ${current_time} ))
expired_certs=$(/usr/local/bin/kubeadm certs check-expiration -o jsonpath="{.certificates[?(@.residualTime<${expiry_threshold}.0)]}")
if [ "${expired_certs}" == "" ];then
echo "## Skip cert renew and K8S container restart, since all residualTimes are beyond threshold ##"
exit 0
fi
fi
echo "## Renewing certificates managed by kubeadm ##"
/usr/local/bin/kubeadm certs renew all
echo "## Restarting control plane pods managed by kubeadm ##"
/usr/local/bin/crictl pods --namespace kube-system --name 'kube-scheduler-*|kube-controller-manager-*|kube-apiserver-*|etcd-*' -q | /usr/bin/xargs /usr/local/bin/crictl rmp -f
echo "## Updating /root/.kube/config ##"
cp /etc/kubernetes/admin.conf /root/.kube/config
echo "## Waiting for apiserver to be up again ##"
until printf "" 2>>/dev/null >>/dev/tcp/127.0.0.1/6443; do sleep 1; done
echo "## Expiration after renewal ##"
/usr/local/bin/kubeadm certs check-expiration
sysctl 관련 작업
root@k8s-ctr:~/kubespray# cat roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
---
# Todo : selinux configuration
- name: Confirm selinux deployed
stat:
path: /etc/selinux/config
get_attributes: false
get_checksum: false
get_mime: false
when:
- ansible_os_family == "RedHat"
- "'Amazon' not in ansible_distribution"
register: slc
- name: Set selinux policy
ansible.posix.selinux:
policy: targeted
state: "{{ preinstall_selinux_state }}"
when:
- ansible_os_family == "RedHat"
- "'Amazon' not in ansible_distribution"
- slc.stat.exists
tags:
- bootstrap_os
- name: Disable IPv6 DNS lookup
lineinfile:
dest: /etc/gai.conf
line: "precedence ::ffff:0:0/96 100"
state: present
create: true
backup: "{{ leave_etc_backup_files }}"
mode: "0644"
when:
- disable_ipv6_dns
- not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
tags:
- bootstrap_os
- name: Clean previously used sysctl file locations
file:
path: "/etc/sysctl.d/{{ item }}"
state: absent
with_items:
- ipv4-ip_forward.conf
- bridge-nf-call.conf
- name: Stat sysctl file configuration
stat:
path: "{{ sysctl_file_path }}"
get_attributes: false
get_checksum: false
get_mime: false
register: sysctl_file_stat
tags:
- bootstrap_os
- name: Change sysctl file path to link source if linked
set_fact:
sysctl_file_path: "{{ sysctl_file_stat.stat.lnk_source }}"
when:
- sysctl_file_stat.stat.islnk is defined
- sysctl_file_stat.stat.islnk
tags:
- bootstrap_os
- name: Make sure sysctl file path folder exists
file:
name: "{{ sysctl_file_path | dirname }}"
state: directory
mode: "0755"
- name: Enable ip forwarding
ansible.posix.sysctl:
sysctl_file: "{{ sysctl_file_path }}"
name: net.ipv4.ip_forward
value: "1"
state: present
reload: true
ignoreerrors: "{{ sysctl_ignore_unknown_keys }}"
when: ipv4_stack | bool
- name: Enable ipv6 forwarding
ansible.posix.sysctl:
sysctl_file: "{{ sysctl_file_path }}"
name: net.ipv6.conf.all.forwarding
value: "1"
state: present
reload: true
ignoreerrors: "{{ sysctl_ignore_unknown_keys }}"
when: ipv6_stack | bool
- name: Check if we need to set fs.may_detach_mounts
stat:
path: /proc/sys/fs/may_detach_mounts
get_attributes: false
get_checksum: false
get_mime: false
register: fs_may_detach_mounts
ignore_errors: true # noqa ignore-errors
- name: Set fs.may_detach_mounts if needed
ansible.posix.sysctl:
sysctl_file: "{{ sysctl_file_path }}"
name: fs.may_detach_mounts
value: 1
state: present
reload: true
ignoreerrors: "{{ sysctl_ignore_unknown_keys }}"
when: fs_may_detach_mounts.stat.exists | d(false)
- name: Ensure kubelet expected parameters are set
ansible.posix.sysctl:
sysctl_file: "{{ sysctl_file_path }}"
name: "{{ item.name }}"
value: "{{ item.value }}"
state: present
reload: true
ignoreerrors: "{{ sysctl_ignore_unknown_keys }}"
with_items:
- { name: kernel.keys.root_maxbytes, value: 25000000 }
- { name: kernel.keys.root_maxkeys, value: 1000000 }
- { name: kernel.panic, value: 10 }
- { name: kernel.panic_on_oops, value: 1 }
- { name: vm.overcommit_memory, value: 1 }
- { name: vm.panic_on_oom, value: 0 }
when: kubelet_protect_kernel_defaults | bool
- name: Check dummy module
community.general.modprobe:
name: dummy
state: present
params: 'numdummies=0'
when: enable_nodelocaldns
- name: Set additional sysctl variables
ansible.posix.sysctl:
sysctl_file: "{{ sysctl_file_path }}"
name: "{{ item.name }}"
value: "{{ item.value }}"
state: present
reload: true
ignoreerrors: "{{ sysctl_ignore_unknown_keys }}"
with_items: "{{ additional_sysctl }}"
- name: Disable fapolicyd service
failed_when: false
systemd_service:
name: fapolicyd
state: stopped
enabled: false
when: disable_fapolicyd
root@k8s-ctr:~/kubespray# grep "^[^#]" /etc/sysctl.conf
net.ipv4.ip_forward=1
kernel.keys.root_maxbytes=25000000
kernel.keys.root_maxkeys=1000000
kernel.panic=10
kernel.panic_on_oops=1
vm.overcommit_memory=1
vm.panic_on_oom=0
net.ipv4.ip_local_reserved_ports=30000-32767
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-arptables=1
net.bridge.bridge-nf-call-ip6tables=1
root@k8s-ctr:~/kubespray#
ls -l /etc/sysctl.d/
total 4
lrwxrwxrwx. 1 root root 14 May 18 2025 99-sysctl.conf -> ../sysctl.conf
-rw-r--r--. 1 root root 120 Feb 1 01:39 k8s.conf
kubernetes/preinstall, tags: preinstall : kubelet이 안정적으로 기동되고, kubeadm이 실패하지 않도록 OS 상태를 Kubernetes 친화적으로 만드는 단계
root@k8s-ctr:~/kubespray# tree roles/kubernetes/preinstall/tasks/
roles/kubernetes/preinstall/tasks/
├── 0010-swapoff.yml
├── 0020-set_facts.yml
├── 0040-verify-settings.yml
├── 0050-create_directories.yml
├── 0060-resolvconf.yml
├── 0061-systemd-resolved.yml
├── 0062-networkmanager-unmanaged-devices.yml
├── 0063-networkmanager-dns.yml
├── 0080-system-configurations.yml
├── 0081-ntp-configurations.yml
├── 0100-dhclient-hooks.yml
├── 0110-dhclient-hooks-undo.yml
└── main.yml
1 directory, 13 files
root@k8s-ctr:~/kubespray# cat roles/kubernetes/preinstall/defaults/main.yml
---
# Set to true to allow pre-checks to fail and continue deployment
ignore_assert_errors: false
# Set to false to disable the backup parameter, set to true to accumulate backups of config files.
leave_etc_backup_files: true
nameservers: []
cloud_resolver: []
disable_host_nameservers: false
# Kubespray sets this to true after clusterDNS is running to apply changes to the host resolv.conf
dns_late: false
# Set to true if your network does not support IPv6
# This may be necessary for pulling Docker images from
# GCE docker repository
disable_ipv6_dns: false
# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``).
remove_default_searchdomains: false
kube_owner: kube
kube_cert_group: kube-cert
kube_config_dir: /etc/kubernetes
kube_cert_dir: "{{ kube_config_dir }}/ssl"
kube_cert_compat_dir: /etc/kubernetes/pki
kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
# Flatcar Container Linux by Kinvolk cloud init config file to define /etc/resolv.conf content
# for hostnet pods and infra needs
resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf
# sysctl_file_path to add sysctl conf to
sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"
# Minimal memory requirement in MB for safety checks
minimal_node_memory_mb: 1024
minimal_master_memory_mb: 1500
## NTP Settings
# Manage the NTP configuration file.
ntp_manage_config: false
# Specify the NTP servers
# Only takes effect when ntp_manage_config is true.
ntp_servers:
- "0.pool.ntp.org iburst"
- "1.pool.ntp.org iburst"
- "2.pool.ntp.org iburst"
- "3.pool.ntp.org iburst"
# Restrict NTP access to these hosts.
# Only takes effect when ntp_manage_config is true.
ntp_restrict:
- "127.0.0.1"
- "::1"
# Specify whether to filter interfaces
ntp_filter_interface: false
# Specify the interfaces
# Only takes effect when ntp_filter_interface is true
# ntp_interfaces:
# - ignore wildcard
# - listen xxx
# The NTP driftfile path
# Only takes effect when ntp_manage_config is true.
# Default value is `/var/lib/ntp/ntp.drift`, for ntpsec use '/var/lib/ntpsec/ntp.drift'
ntp_driftfile: >-
{% if ntp_package == "ntpsec" -%}
/var/lib/ntpsec/ntp.drift
{%- else -%}
/var/lib/ntp/ntp.drift
{%- endif -%}
# Only takes effect when ntp_manage_config is true.
ntp_tinker_panic: false
# Force sync time immediately after the ntp installed, which is useful in a newly installed system.
ntp_force_sync_immediately: false
# Set the timezone for your server. eg: "Etc/UTC","Etc/GMT-8". If not set, the timezone will not change.
ntp_timezone: ""
# Currently known os distributions
supported_os_distributions:
- 'RedHat'
- 'CentOS'
- 'Fedora'
- 'Ubuntu'
- 'Debian'
- 'Flatcar'
- 'Flatcar Container Linux by Kinvolk'
- 'Suse'
- 'openSUSE Leap'
- 'openSUSE Tumbleweed'
- 'ClearLinux'
- 'OracleLinux'
- 'AlmaLinux'
- 'Rocky'
- 'Amazon'
- 'Kylin Linux Advanced Server'
- 'UnionTech'
- 'UniontechOS'
- 'openEuler'
# Extending some distributions into the redhat os family
redhat_os_family_extensions:
- "UnionTech"
- "UniontechOS"
# Sets DNSStubListener=no, useful if you get "0.0.0.0:53: bind: address already in use"
systemd_resolved_disable_stub_listener: "{{ ansible_os_family in ['Flatcar', 'Flatcar Container Linux by Kinvolk'] }}"
# Used to disable File Access Policy Daemon service.
# If service is enabled, the CNI plugin installation will fail
disable_fapolicyd: true
파드 내에서 too many open files 에러 발생 시 ansible-playbook _ --tags "container-engine"
목표는 OS 커널에 프로세스 단위 제한 (ulimit)을 파드에도 적용
# base_runtime_spec
root@k8s-ctr:~/kubespray# cat /etc/containerd/cri-base.json | jq
{
"ociVersion": "1.2.1",
"process": {
"user": {
"uid": 0,
"gid": 0
},
"cwd": "/",
"capabilities": {
"bounding": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
],
"effective": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
],
"permitted": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
]
},
"rlimits": [
{
"type": "RLIMIT_NOFILE",
"hard": 65535,
"soft": 65535
}
],
"noNewPrivileges": true
},
"root": {
"path": "rootfs"
},
"mounts": [
{
"destination": "/proc",
"type": "proc",
"source": "proc",
"options": [
"nosuid",
"noexec",
"nodev"
]
},
{
"destination": "/dev",
"type": "tmpfs",
"source": "tmpfs",
"options": [
"nosuid",
"strictatime",
"mode=755",
"size=65536k"
]
},
{
"destination": "/dev/pts",
"type": "devpts",
"source": "devpts",
"options": [
"nosuid",
"noexec",
"newinstance",
"ptmxmode=0666",
"mode=0620",
"gid=5"
]
},
{
"destination": "/dev/shm",
"type": "tmpfs",
"source": "shm",
"options": [
"nosuid",
"noexec",
"nodev",
"mode=1777",
"size=65536k"
]
},
{
"destination": "/dev/mqueue",
"type": "mqueue",
"source": "mqueue",
"options": [
"nosuid",
"noexec",
"nodev"
]
},
{
"destination": "/sys",
"type": "sysfs",
"source": "sysfs",
"options": [
"nosuid",
"noexec",
"nodev",
"ro"
]
},
{
"destination": "/run",
"type": "tmpfs",
"source": "tmpfs",
"options": [
"nosuid",
"strictatime",
"mode=755",
"size=65536k"
]
}
],
"linux": {
"resources": {
"devices": [
{
"allow": false,
"access": "rwm"
}
]
},
"cgroupsPath": "/default",
"namespaces": [
{
"type": "pid"
},
{
"type": "ipc"
},
{
"type": "uts"
},
{
"type": "mount"
},
{
"type": "network"
}
],
"maskedPaths": [
"/proc/acpi",
"/proc/asound",
"/proc/kcore",
"/proc/keys",
"/proc/latency_stats",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/sys/firmware",
"/sys/devices/virtual/powercap",
"/proc/scsi"
],
"readonlyPaths": [
"/proc/bus",
"/proc/fs",
"/proc/irq",
"/proc/sys",
"/proc/sysrq-trigger"
]
}
}
# 파드 기동하며 확인
root@k8s-ctr:~/kubespray# cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: ubuntu
spec:
containers:
- name: ubuntu
image: ubuntu
command: ["sh", "-c", "sleep infinity"]
securityContext:
privileged: true
EOF
pod/ubuntu created
root@k8s-ctr:~/kubespray# kubectl exec -it ubuntu -- sh -c 'ulimit -a'
time(seconds) unlimited
file(blocks) unlimited
data(kbytes) unlimited
stack(kbytes) 8192
coredump(blocks) unlimited
memory(kbytes) unlimited
locked memory(kbytes) unlimited
process unlimited
nofiles 65535
vmemory(kbytes) unlimited
locks unlimited
rtprio 0
### 참고 ###
커널 전역 한계*
├─ fs.file-max
├─ file-nr
└─ inode 캐시
프로세스 한계*
├─ RLIMIT_NOFILE (ulimit -n)
├─ systemd LimitNOFILE
└─ PAM limits.conf
cgroup 한계
├─ pids.max
└─ systemd slice 제한
파일시스템
├─ inode 수
├─ dentry 캐시
└─ mount 옵션
런타임
├─ kubelet / containerd*
├─ JVM / Nginx
└─ epoll / socket 사용량
# 커널 전역 한계
root@k8s-ctr:~/kubespray# sysctl fs.file-max
fs.file-max = 9223372036854775807
root@k8s-ctr:~/kubespray# cat /proc/sys/fs/file-max
9223372036854775807
# 현재 사용량 확인
root@k8s-ctr:~/kubespray# cat /proc/sys/fs/file-nr
2720 0 9223372036854775807
# 프로세스 단위 제한 (사용자 및 프로세스 제한 (Shell/User Level))
root@k8s-ctr:~/kubespray# grep "^[^#]" /etc/security/limits.conf
root@k8s-ctr:~/kubespray# cat /etc/security/limits.conf
# /etc/security/limits.conf
#
#This file sets the resource limits for the users logged in via PAM.
#It does not affect resource limits of the system services.
#
#Also note that configuration files in /etc/security/limits.d directory,
#which are read in alphabetical order, override the settings in this
#file in case the domain is the same or more specific.
#That means, for example, that setting a limit for wildcard domain here
#can be overridden with a wildcard setting in a config file in the
#subdirectory, but a user specific setting here can be overridden only
#with a user specific setting in the subdirectory.
#
#Each line describes a limit for a user in the form:
#
#<domain> <type> <item> <value>
#
#Where:
#<domain> can be:
# - a user name
# - a group name, with @group syntax
# - the wildcard *, for default entry
# - the wildcard %, can be also used with %group syntax,
# for maxlogin limit
#
#<type> can have the two values:
# - "soft" for enforcing the soft limits
# - "hard" for enforcing hard limits
#
#<item> can be one of the following:
# - core - limits the core file size (KB)
# - data - max data size (KB)
# - fsize - maximum filesize (KB)
# - memlock - max locked-in-memory address space (KB)
# - nofile - max number of open file descriptors
# - rss - max resident set size (KB)
# - stack - max stack size (KB)
# - cpu - max CPU time (MIN)
# - nproc - max number of processes
# - as - address space limit (KB)
# - maxlogins - max number of logins for this user
# - maxsyslogins - max number of logins on the system
# - priority - the priority to run user process with
# - locks - max number of file locks the user can hold
# - sigpending - max number of pending signals
# - msgqueue - max memory used by POSIX message queues (bytes)
# - nice - max nice priority allowed to raise to values: [-20, 19]
# - rtprio - max realtime priority
#
#<domain> <type> <item> <value>
#
#* soft core 0
#* hard rss 10000
#@student hard nproc 20
#@faculty soft nproc 20
#@faculty hard nproc 50
#ftp hard nproc 0
#@student - maxlogins 4
# End of file
root@k8s-ctr:~/kubespray# ulimit -a
real-time non-blocking time (microseconds, -R) unlimited
core file size (blocks, -c) unlimited
data seg size (kbytes, -d) unlimited
scheduling priority (-e) 0
file size (blocks, -f) unlimited
pending signals (-i) 15495
max locked memory (kbytes, -l) 8192
max memory size (kbytes, -m) unlimited
open files (-n) 524288
pipe size (512 bytes, -p) 8
POSIX message queues (bytes, -q) 819200
real-time priority (-r) 0
stack size (kbytes, -s) 8192
cpu time (seconds, -t) unlimited
max user processes (-u) 15495
virtual memory (kbytes, -v) unlimited
file locks (-x) unlimited
root@k8s-ctr:~/kubespray# ulimit -n
524288
# systemd 서비스 레벨 제한
root@k8s-ctr:~/kubespray# systemctl show kubelet | grep LimitNOFILE
LimitNOFILE=524288
LimitNOFILESoft=1024
# kubelet 프로세스 기준 1,000,000 이지만 -> systemd 524,288 이므로 systemd 적용.
root@k8s-ctr:~/kubespray# cat /proc/$(pidof kubelet)/limits | grep open
Max open files 1000000 1000000 files
# containerd 프로세스 기준
root@k8s-ctr:~/kubespray# systemctl show containerd | grep LimitNOFILE
LimitNOFILE=1048576
LimitNOFILESoft=1048576
root@k8s-ctr:~/kubespray# cat /proc/$(pidof containerd)/limits | grep open
Max open files 1048576 1048576 files
# 설정 후 적용
# 기본 OCI Spec(Runtime Spec)을 수정(Patch)
root@k8s-ctr:~/kubespray# cat << EOF >> inventory/mycluster/group_vars/all/containerd.yml
containerd_default_base_runtime_spec_patch:
process:
rlimits: []
EOF
# 플레이북 실행
root@k8s-ctr:~/kubespray# ansible-playbook -i inventory/mycluster/inventory.ini -v cluster.yml --tags "container-engine" --limit k8s-ctr -e kube_version="1.33.3"
root@k8s-ctr:~/kubespray# cat /etc/containerd/cri-base.json | jq | grep rlimits
"rlimits": [],
root@k8s-ctr:~/kubespray# systemctl restart containerd.service
root@k8s-ctr:~/kubespray# systemctl status containerd.service --no-pager
● containerd.service - containerd container runtime
Loaded: loaded (/etc/systemd/system/containerd.service; enabled; preset: disabled)
Active: active (running) since Sun 2026-02-01 07:11:21 KST; 2s ago
Invocation: 0573ba291be54b13b5cb4d9cd0e9ce1d
# 파드 삭제 후 재기동
root@k8s-ctr:~/kubespray# kubectl delete pod ubuntu
pod "ubuntu" deleted
root@k8s-ctr:~/kubespray# cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: ubuntu
spec:
containers:
- name: ubuntu
image: ubuntu
command: ["sh", "-c", "sleep infinity"]
securityContext:
privileged: true
EOF
pod/ubuntu created
# 적용 확인
root@k8s-ctr:~/kubespray# kubectl exec -it ubuntu -- sh -c 'ulimit -a'
time(seconds) unlimited
file(blocks) unlimited
data(kbytes) unlimited
stack(kbytes) 8192
coredump(blocks) unlimited
memory(kbytes) unlimited
locked memory(kbytes) unlimited
process unlimited
nofiles 1048576 ## 확인
vmemory(kbytes) unlimited
locks unlimited
rtprio 0
다운로드의 경우 공통적으로 download role을 활용함
root@k8s-ctr:~/kubespray# tree roles/download/
roles/download/
├── meta
│ └── main.yml
├── tasks
│ ├── check_pull_required.yml
│ ├── download_container.yml
│ ├── download_file.yml
│ ├── extract_file.yml
│ ├── main.yml
│ ├── prep_download.yml
│ ├── prep_kubeadm_images.yml
│ └── set_container_facts.yml
└── templates
└── kubeadm-images.yaml.j2
4 directories, 10 files
kubeadm 관련 바이너리, 컨테이너 이미지 다운로드
root@k8s-ctr:~/kubespray# cat roles/download/tasks/prep_kubeadm_images.yml
---
- name: Prep_kubeadm_images | Download kubeadm binary
include_tasks: "download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.kubeadm) }}"
when:
- not skip_downloads | default(false)
- downloads.kubeadm.enabled
- name: Prep_kubeadm_images | Copy kubeadm binary from download dir to system path
copy:
src: "{{ downloads.kubeadm.dest }}"
dest: "{{ bin_dir }}/kubeadm"
mode: "0755"
remote_src: true
- name: Prep_kubeadm_images | Create kubeadm config
template:
src: "kubeadm-images.yaml.j2"
dest: "{{ kube_config_dir }}/kubeadm-images.yaml"
mode: "0644"
validate: "{{ kubeadm_config_validate_enabled | ternary(bin_dir + '/kubeadm config validate --config %s', omit) }}"
when:
- not skip_kubeadm_images | default(false)
- name: Prep_kubeadm_images | Generate list of required images
shell: "set -o pipefail && {{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -Ev 'coredns|pause'"
args:
executable: /bin/bash
register: kubeadm_images_raw
run_once: true
changed_when: false
when:
- not skip_kubeadm_images | default(false)
- name: Prep_kubeadm_images | Parse list of images
vars:
kubeadm_images_list: "{{ kubeadm_images_raw.stdout_lines }}"
set_fact:
kubeadm_image:
key: "kubeadm_{{ (item | regex_replace('^(?:.*\\/)*', '')).split(':')[0] }}"
value:
enabled: true
container: true
repo: "{{ item | regex_replace('^(.*):.*$', '\\1') }}"
tag: "{{ item | regex_replace('^.*:(.*)$', '\\1') }}"
groups:
- k8s_cluster
loop: "{{ kubeadm_images_list | flatten(levels=1) }}"
register: kubeadm_images_cooked
run_once: true
when:
- not skip_kubeadm_images | default(false)
- name: Prep_kubeadm_images | Convert list of images to dict for later use
set_fact:
kubeadm_images: "{{ kubeadm_images_cooked.results | map(attribute='ansible_facts.kubeadm_image') | list | items2dict }}"
run_once: true
when:
- not skip_kubeadm_images | default(false)
root@k8s-ctr:~/kubespray# cat roles/download/templates/kubeadm-images.yaml.j2
apiVersion: kubeadm.k8s.io/{{ kubeadm_config_api_version }}
kind: InitConfiguration
nodeRegistration:
criSocket: {{ cri_socket }}
---
apiVersion: kubeadm.k8s.io/{{ kubeadm_config_api_version }}
kind: ClusterConfiguration
imageRepository: {{ kubeadm_image_repo }}
kubernetesVersion: v{{ kube_version }}
etcd:
{% if etcd_deployment_type == "kubeadm" %}
local:
imageRepository: "{{ etcd_image_repo | regex_replace("/etcd$","") }}"
imageTag: "{{ etcd_image_tag }}"
{% else %}
external:
endpoints:
{% for endpoint in etcd_access_addresses.split(',') %}
- {{ endpoint }}
{% endfor %}
{% endif %}
dns:
imageRepository: {{ coredns_image_repo | regex_replace('/coredns(?!/coredns).*$', '') }}
imageTag: {{ coredns_image_tag }}
root@k8s-ctr:~/kubespray# nerdctl -n k8s.io images
REPOSITORY TAG IMAGE ID CREATED PLATFORM SIZE BLOB SIZE
ubuntu <none> cd1dba651b30 27 minutes ago linux/arm64 107.8MB 28.87MB
<none> <none> cd1dba651b30 27 minutes ago linux/arm64 107.8MB 28.87MB
ubuntu latest cd1dba651b30 27 minutes ago linux/arm64 107.8MB 28.87MB
registry.k8s.io/nfd/node-feature-discovery <none> d3f0fb2d50c2 2 hours ago linux/arm64 219MB 62.6MB
<none> <none> d3f0fb2d50c2 2 hours ago linux/arm64 219MB 62.6MB
registry.k8s.io/nfd/node-feature-discovery v0.16.4 d3f0fb2d50c2 2 hours ago linux/arm64 219MB 62.6MB
<none> <none> c69929cfba9e 2 hours ago linux/arm64 103.1MB 28.2MB
registry.k8s.io/kube-proxy v1.33.3 c69929cfba9e 2 hours ago linux/arm64 103.1MB 28.2MB
<none> <none> f3a2ffdd7483 2 hours ago linux/arm64 73.42MB 19.85MB
registry.k8s.io/kube-scheduler v1.33.3 f3a2ffdd7483 2 hours ago linux/arm64 73.42MB 19.85MB
<none> <none> 96091626e37c 2 hours ago linux/arm64 93.34MB 25.09MB
registry.k8s.io/kube-controller-manager v1.33.3 96091626e37c 2 hours ago linux/arm64 93.34MB 25.09MB
<none> <none> 125a8b488def 2 hours ago linux/arm64 99.89MB 27.35MB
registry.k8s.io/kube-apiserver v1.33.3 125a8b488def 2 hours ago linux/arm64 99.89MB 27.35MB
<none> <none> 89258156d0e9 2 hours ago linux/arm64 82.58MB 20.58MB
registry.k8s.io/metrics-server/metrics-server v0.8.0 89258156d0e9 2 hours ago linux/arm64 82.58MB 20.58MB
<none> <none> 69bf675e3567 2 hours ago linux/arm64 39.98MB 10.43MB
registry.k8s.io/cpa/cluster-proportional-autoscaler v1.8.8 69bf675e3567 2 hours ago linux/arm64 39.98MB 10.43MB
<none> <none> 40384aa1f5ea 2 hours ago linux/arm64 71.2MB 19.15MB
registry.k8s.io/coredns/coredns v1.12.0 40384aa1f5ea 2 hours ago linux/arm64 71.2MB 19.15MB
<none> <none> 30f1c0d78e0a 2 hours ago linux/arm64 52.73MB 21.85MB
nginx 1.28.0-alpine 30f1c0d78e0a 2 hours ago linux/arm64 52.73MB 21.85MB
<none> <none> ee6521f290b2 2 hours ago linux/arm64 516.1kB 265.5kB
registry.k8s.io/pause 3.10 ee6521f290b2 2 hours ago linux/arm64 516.1kB 265.5kB
<none> <none> 39d51a8cf650 2 hours ago linux/arm64 11.39MB 5.136MB
flannel/flannel-cni-plugin v1.7.1-flannel1 39d51a8cf650 2 hours ago linux/arm64 11.39MB 5.136MB
<none> <none> 478ca1ac04e4 2 hours ago linux/arm64 102.6MB 33.08MB
flannel/flannel v0.27.3 478ca1ac04e4 2 hours ago linux/arm64 102.6MB 33.08MB
'스터디 > K8s Deploy' 카테고리의 다른 글
| [K8s Deploy] Kubespray offline 설치 (0) | 2026.02.15 |
|---|---|
| [K8s Deploy] Kubespary HA & Upgrade (0) | 2026.02.04 |
| [K8s Deploy] Kubeadm Deep Dive (0) | 2026.01.24 |
| [K8s Deploy] Ansible 기초 (1) | 2026.01.18 |
| [K8s Deploy] Bootstrap Kubernetes the hard way (0) | 2026.01.10 |