OrangePi 安装k8s

准备环境

规划

软件 版本 组件 部署方式
操作系统 Ubuntu 22.04.4 LTS containerd 二进制
运行时 Containerd 1.7.20、runc 1.1.13 kubelet 二进制
crictl v1.28.0 ETCD manifests
Kubernetes v1.28.12 KCM manifests
etcd 3.5.12-0 APIserver manifests
coredns 1.9.4 Scheduler manifests
cilium v1.16.0 Coredns deployment
metrics-server v0.7.1 metrics-server deployment
istio 1.22.3 kube-proxy DaemonSet

github加速:https://mirror.ghproxy.com/https://xxxxx
github加速:https://ghproxy.net/https://xxxxx

内核优化

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
#  加载模块
$ sudo modprobe br_netfilter
$ sudo modprobe ip_vs_rr

# 参数调整
$ sudo cat <<EOF > /etc/sysctl.d/99-k8s.conf
fs.file-max=2097152
net.core.netdev_max_backlog=16384
net.ipv4.ip_forward=1
net.ipv4.neigh.default.gc_thresh3=8192
net.ipv4.tcp_wmem=4096 12582912 16777216
kernel.softlockup_panic=1
net.core.somaxconn=32768
net.ipv4.tcp_max_syn_backlog=8096
kernel.softlockup_all_cpu_backtrace=1
net.core.rmem_max=16777216
net.ipv4.tcp_rmem=4096 12582912 16777216
fs.inotify.max_queued_events=16384
fs.inotify.max_user_instances=16384
fs.inotify.max_user_watches=524288
kernel.pid_max=4194303
user.max_user_namespaces=0
vm.max_map_count=262144
net.bridge.bridge-nf-call-iptables=1
net.core.wmem_max=16777216
net.ipv4.neigh.default.gc_thresh2=1024
net.ipv4.tcp_slow_start_after_idle=0

## 阿里云
vm.swappiness = 0
kernel.sysrq = 1

net.ipv4.neigh.default.gc_stale_time = 120

# see details in https://help.aliyun.com/knowledge_detail/39428.html
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2

# see details in https://help.aliyun.com/knowledge_detail/41334.html
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
net.ipv4.tcp_slow_start_after_idle = 0
EOF

# 重启生效
$ sudo sysctl -p

containerd

https://www.zmq100.cn/2023/12/09/containerdInstall/
https://github.com/containerd/containerd
https://github.com/containerd/containerd/releases
https://github.com/containerd/containerd/blob/main/docs/cri/crictl.md
https://github.com/containerd/containerd/blob/main/docs/getting-started.md

1、下载

1
2
$ wget https://github.com/containerd/containerd/releases/download/v1.7.20/containerd-1.7.20-linux-arm64.tar.gz
$ tar Cxzvf /usr containerd-1.7.20-linux-arm64.tar.gz

2、systemd服务

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
# https://raw.githubusercontent.com/containerd/containerd/main/containerd.service
$ cat << EOF > /lib/systemd/system/containerd.service
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target

[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/bin/containerd

Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=1048576
# Comment TasksMax if your systemd version does not supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
OOMScoreAdjust=-999

[Install]
WantedBy=multi-user.target
EOF

3、config.toml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
# 默认配置文件 `$ containerd config default`
$ cat << EOF > /etc/containerd/config.toml
version = 2

root = "/var/lib/containerd"
state = "/run/containerd"
disabled_plugins = []
required_plugins = ["io.containerd.grpc.v1.cri"]
oom_score = -999

# Alibaba Cloud Vendor enhancement configuration
# imports = ["/etc/containerd/alibabacloud.toml"]


[grpc]
address = "/run/containerd/containerd.sock"
max_recv_message_size = 16777216
max_send_message_size = 16777216


[debug]
address = "/run/containerd/debug.sock"
level = "info"

[timeouts]
"io.containerd.timeout.shim.cleanup" = "5s"
"io.containerd.timeout.shim.load" = "5s"
"io.containerd.timeout.shim.shutdown" = "3s"
"io.containerd.timeout.task.state" = "2s"

[plugins]
[plugins."io.containerd.gc.v1.scheduler"]
pause_threshold = 0.02
deletion_threshold = 0
mutation_threshold = 100
schedule_delay = "0s"
startup_delay = "100ms"

[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "docker.m.daocloud.io/registry.k8s.io/pause:3.9"
ignore_image_defined_volumes = true

[plugins."io.containerd.grpc.v1.cri".containerd]
snapshotter = "overlayfs"
default_runtime_name = "runc"
disable_snapshot_annotations = true
discard_unpacked_layers = false

[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
privileged_without_host_devices = false
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
NoPivotRoot = false
NoNewKeyring = false
SystemdCgroup = true

[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
max_conf_num = 1

[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/cert.d"

[plugins."io.containerd.internal.v1.opt"]
path = "/opt/containerd"

[plugins."io.containerd.internal.v1.restart"]
interval = "10s"

[plugins."io.containerd.metadata.v1.bolt"]
content_sharing_policy = "shared"
EOF

4、插件1 runc

https://github.com/opencontainers/runc
https://github.com/opencontainers/runc/releases

1
2
$ wget -O /usr/bin/runc https://github.com/opencontainers/runc/releases/download/v1.1.13/runc.arm64
$ sudo chmod +x /usr/bin/runc

5、插件2 cniPlugin

https://github.com/containernetworking/plugins
https://github.com/containernetworking/plugins/releases
https://www.cni.dev/docs/cnitool/

1
2
3
$ mkdir -p /opt/cni/bin
$ wget https://github.com/containernetworking/plugins/releases/download/v1.5.1/cni-plugins-linux-arm64-v1.5.1.tgz
$ tar Cxzvf /opt/cni/bin cni-plugins-linux-arm64-v1.5.1.tgz

6、插件3 cricrl

https://github.com/kubernetes-sigs/cri-tools
https://github.com/kubernetes-sigs/cri-tools/releases
https://github.com/kubernetes-sigs/cri-tools/blob/master/docs/crictl.md

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# 下载
$ wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.28.0/crictl-v1.28.0-linux-arm64.tar.gz
$ sudo tar Czxvf /usr/bin crictl-v1.28.0-linux-arm64.tar.gz
# 配置
$ echo "export CONTAINER_RUNTIME_ENDPOINT=unix:///run/containerd/containerd.sock" >> /etc/profile
$ source /etc/profile

# or

$ cat << EOF > /etc/crictl.yaml
runtime-endpoint: unix:///run/containerd/containerd.sock
#image-endpoint: unix:///run/containerd/containerd.sock
#timeout: 2
#debug: true
#pull-image-on-create: false
EOF

7、启动conatinerd

1
2
3
$ systemctl daemon-reload
$ systemctl start containerd
$ systemctl enable --now containerd

镜像导入和二进制文件

https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#v12812

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
$ wget https://dl.k8s.io/v1.28.12/kubernetes-server-linux-arm64.tar.gz
$ tar xf kubernetes-server-linux-arm64.tar.gz
$ cd /root/kubernetes/server/bin
$ ctr -n k8s.io i import kube-apiserver.tar kube-controller-manager.tar kube-proxy.tar kube-scheduler.tar

==> a.tar.gz
containerd-1.7.20-linux-arm64.tar.gz
cni-plugins-linux-arm64-v1.5.1.tgz
crictl-v1.28.0-linux-arm64.tar.gz
istio-1.22.3-linux-arm64.tar.gz
cilium-linux-arm64.tar.gz
kubectl
kubelet
kubeadm
cilium.tar
quay.io/cilium/cilium-envoy:v1.29.7
quay.io/cilium/operator-generic:v1.16.0
quay.io/cilium/cilium:v1.16.0
istio.tar
docker.io/istio/pilot:1.22.3
docker.io/istio/proxyv2:1.22.3
kubernetes.tar
registry.k8s.io/pause:3.9
docker.io/coredns/coredns:1.9.4
registry.k8s.io/etcd:3.5.12-0
registry.k8s.io/metrics-server/metrics-server:v0.7.1
registry.k8s.io/kube-apiserver-arm64:v1.28.12
registry.k8s.io/kube-controller-manager-arm64:v1.28.12
registry.k8s.io/kube-proxy-arm64:v1.28.12
registry.k8s.io/kube-scheduler-arm64:v1.28.12
# 导入包
$ ctr -n k8s.io i import kubernetes.tar istio.tar cilium.tar
$ mv kube* /usr/bin

参考之前二进制部署
https://www.zmq100.cn/2023/04/30/binaryInstallKubernetes/

各种证书啊

1、 ca证书等

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
# 下载cfss
sudo wget -O /usr/bin/cfssl https://github.com/cloudflare/cfssl/releases/download/v1.6.5/cfssl_1.6.5_linux_arm64
sudo wget -O /usr/bin/cfssljson https://github.com/cloudflare/cfssl/releases/download/v1.6.5/cfssljson_1.6.5_linux_arm64
sudo wget -O /usr/bin/cfssl-certinfo https://github.com/cloudflare/cfssl/releases/download/v1.6.5/cfssl-certinfo_1.6.5_linux_arm64
chmod +x /usr/bin/cfssl*


## ETCD
$ mkdir -p /etc/kubernetes/pki/etcd
$ cd /etc/kubernetes/pki/etcd

# CA
$ echo '{"CN":"CA","key":{"algo":"rsa","size":2048}, "ca": {"expiry": "438000h"}}' |
cfssl gencert -initca - | cfssljson -bare ./ca -
echo '{"signing":{"default":{"expiry":"438000h","usages":["signing","key encipherment","server auth","client auth"]}}}' > ./ca-config.json

# etcd-server
echo '{"CN":"'etcd-server'","hosts":[""],"key":{"algo":"rsa","size":2048}}' |
cfssl gencert -config=./ca-config.json -ca=./ca.pem -ca-key=./ca-key.pem -hostname="localhost,orangepizero3,192.168.1.200,127.0.0.1" - | cfssljson -bare etcd-server

# etcd-peer
echo '{"CN":"'etcd-peer'","hosts":[""],"key":{"algo":"rsa","size":2048}}' |
cfssl gencert -config=./ca-config.json -ca=./ca.pem -ca-key=./ca-key.pem -hostname="localhost,orangepizero3,192.168.1.200,127.0.0.1" - | cfssljson -bare etcd-peer

# kube-apiserver-etcd-client
echo '{"CN": "kube-apiserver-etcd-client","hosts":[""],"key": { "algo": "rsa","size": 2048},"names": [{"CN": "kube-apiserver-etcd-client","O": "system:masters"}]}' |
cfssl gencert -config=./ca-config.json -ca=./ca.pem -ca-key=./ca-key.pem - | cfssljson -bare kube-apiserver-etcd-client

## kubernetes
$ mkdir -p /etc/kubernetes/pki
$ cd /etc/kubernetes/pki

# kubernetes CA
echo '{"CN":"kubernetes CA","key":{"algo":"rsa","size":2048}, "ca": {"expiry": "438000h"}}' |
cfssl gencert -initca - | cfssljson -bare ./ca -
echo '{"signing":{"default":{"expiry":"438000h","usages":["signing","key encipherment","server auth","client auth"]}}}' > ./ca-config.json

# kube-apiserver
echo '{"CN":"'kube-apiserver'","hosts":[""],"key":{"algo":"rsa","size":2048}}' |
cfssl gencert -config=./ca-config.json -ca=./ca.pem -ca-key=./ca-key.pem -hostname="apiserver.cluster.local,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster.local,localhost,orangepizero3,192.168.1.200,127.0.0.1,172.30.0.1" - | cfssljson -bare apiserver

# apiserver-kubelet-client
echo '{"CN": "apiserver-kubelet-client","hosts":[""],"key": { "algo": "rsa","size": 2048},"names": [{"CN": "kube-apiserver-kubelet-client","O": "system:masters"}]}' |
cfssl gencert -config=./ca-config.json -ca=./ca.pem -ca-key=./ca-key.pem - | cfssljson -bare apiserver-kubelet-client

# system:kube-scheduler
echo '{"CN": "system:kube-scheduler","hosts":[""],"key": { "algo": "rsa","size": 2048},"names": [{"CN": "system:kube-scheduler"}]}' |
cfssl gencert -config=./ca-config.json -ca=./ca.pem -ca-key=./ca-key.pem - | cfssljson -bare kube-scheduler

# system:kube-controller-manager
echo '{"CN": "system:kube-controller-manager","hosts":[""],"key": { "algo": "rsa","size": 2048},"names": [{"CN": "system:kube-controller-manager"}]}' |
cfssl gencert -config=./ca-config.json -ca=./ca.pem -ca-key=./ca-key.pem - | cfssljson -bare kube-controller-manager

# kubernetes-admin
echo '{"CN": "kubernetes-admin","hosts":[""],"key": { "algo": "rsa","size": 2048},"names": [{"CN": "kubernetes-admin","O": "system:masters"}]}' |
cfssl gencert -config=./ca-config.json -ca=./ca.pem -ca-key=./ca-key.pem - | cfssljson -bare kubernetes-admin

# front-proxy-ca
echo '{"CN":"front-proxy-ca","key":{"algo":"rsa","size":2048}, "ca": {"expiry": "438000h"}}' |
cfssl gencert -initca - | cfssljson -bare ./front-proxy-ca -
echo '{"signing":{"default":{"expiry":"438000h","usages":["signing","key encipherment","server auth","client auth"]}}}' > ./front-proxy-ca-config.json

# front-proxy-client
echo '{"CN":"front-proxy-client","hosts":[""],"key":{"algo":"rsa","size":2048},"names":[{"CN": "front-proxy-client"}]}' |
cfssl gencert -config=./front-proxy-ca-config.json -ca=./front-proxy-ca.pem -ca-key=./front-proxy-ca-key.pem - | cfssljson -bare front-proxy-client

#service-account-key-file 、 service-account-signing-key-file
$ openssl genpkey -algorithm RSA -out sa.key
$ openssl rsa -in sa.key -pubout -out sa.pub

2、kubeconfig

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
1、kubelet.conf
$ sudo cat <<EOF > /etc/kubernetes/kubelet.conf
apiVersion: v1
clusters:
- cluster:
certificate-authority: /etc/kubernetes/pki/ca.pem
server: https://192.168.1.200:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: system:node:orangepizero3
name: system:node:orangepizero3@kubernetes
current-context: system:node:orangepizero3@kubernetes
kind: Config
preferences: {}
users:
- name: system:node:orangepizero3
user:
client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem
client-key: /var/lib/kubelet/pki/kubelet-client-current.pem
EOF

2、scheduler.conf
$ sudo cat <<EOF > /etc/kubernetes/scheduler.conf
apiVersion: v1
clusters:
- cluster:
certificate-authority: /etc/kubernetes/pki/ca.pem
server: https://192.168.1.200:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: system:kube-scheduler
name: system:kube-scheduler@kubernetes
current-context: system:kube-scheduler@kubernetes
kind: Config
preferences: {}
users:
- name: system:kube-scheduler
user:
client-certificate: /etc/kubernetes/pki/kube-scheduler.pem
client-key: /etc/kubernetes/pki/kube-scheduler-key.pem
EOF

3、controller-manager.conf
$ sudo cat <<EOF > /etc/kubernetes/controller-manager.conf
apiVersion: v1
clusters:
- cluster:
certificate-authority: /etc/kubernetes/pki/ca.pem
server: https://192.168.1.200:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: system:kube-controller-manager
name: system:kube-controller-manager@kubernetes
current-context: system:kube-controller-manager@kubernetes
kind: Config
preferences: {}
users:
- name: system:kube-controller-manager
user:
client-certificate: /etc/kubernetes/pki/kube-controller-manager.pem
client-key: /etc/kubernetes/pki/kube-controller-manager-key.pem
EOF

4、admin.conf
$ sudo cat <<EOF > /etc/kubernetes/admin.conf
apiVersion: v1
clusters:
- cluster:
certificate-authority: /etc/kubernetes/pki/ca.pem
server: https://192.168.1.200:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubernetes-admin
name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
user:
client-certificate: /etc/kubernetes/pki/kubernetes-admin.pem
client-key: /etc/kubernetes/pki/kubernetes-admin-key.pem
EOF

5、bootstrap-kubelet.conf
$ sudo cat <<EOF > /etc/kubernetes/bootstrap-kubelet.conf
apiVersion: v1
clusters:
- cluster:
certificate-authority: /etc/kubernetes/pki/ca.pem
server: https://192.168.1.200:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubernetes-admin
name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
user:
client-certificate: /etc/kubernetes/pki/kubernetes-admin.pem
client-key: /etc/kubernetes/pki/kubernetes-admin-key.pem
EOF

manifests ETCD、KCM、APISERVER、SCHEDULER

/etc/kubernetes/manifests

1、etcd.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
apiVersion: v1
kind: Pod
metadata:
annotations:
kubeadm.kubernetes.io/etcd.advertise-client-urls: https://192.168.1.200:2379
creationTimestamp: null
labels:
component: etcd
tier: control-plane
name: etcd
namespace: kube-system
spec:
containers:
- command:
- etcd
- --advertise-client-urls=https://192.168.1.200:2379
- --cert-file=/etc/kubernetes/pki/etcd/etcd-server.pem
- --client-cert-auth=true
- --data-dir=/var/lib/etcd
- --experimental-initial-corrupt-check=true
- --experimental-watch-progress-notify-interval=5s
- --initial-advertise-peer-urls=https://192.168.1.200:2380
- --initial-cluster=orangepi=https://192.168.1.200:2380
- --key-file=/etc/kubernetes/pki/etcd/etcd-server-key.pem
- --listen-client-urls=https://127.0.0.1:2379,https://192.168.1.200:2379
- --listen-metrics-urls=http://0.0.0.0:2381
- --listen-peer-urls=https://192.168.1.200:2380
- --name=orangepi
- --peer-cert-file=/etc/kubernetes/pki/etcd/etcd-peer.pem
- --peer-client-cert-auth=true
- --peer-key-file=/etc/kubernetes/pki/etcd/etcd-peer-key.pem
- --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.pem
- --snapshot-count=10000
- --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.pem
image: docker.m.daocloud.io/registry.k8s.io/etcd:3.5.12-0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 8
httpGet:
host: 0.0.0.0
path: /health?exclude=NOSPACE&serializable=true
port: 2381
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
name: etcd
resources:
requests:
cpu: 100m
memory: 100Mi
startupProbe:
failureThreshold: 24
httpGet:
host: 0.0.0.0
path: /health?serializable=false
port: 2381
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
volumeMounts:
- mountPath: /var/lib/etcd
name: etcd-data
- mountPath: /etc/kubernetes/pki/etcd
name: etcd-certs
hostNetwork: true
priority: 2000001000
priorityClassName: system-node-critical
securityContext:
seccompProfile:
type: RuntimeDefault
volumes:
- hostPath:
path: /etc/kubernetes/pki/etcd
type: DirectoryOrCreate
name: etcd-certs
- hostPath:
path: /var/lib/etcd
type: DirectoryOrCreate
name: etcd-data
status: {}

2、kube-apiserver.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
apiVersion: v1
kind: Pod
metadata:
annotations:
kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: 192.168.1.200:6443
creationTimestamp: null
labels:
component: kube-apiserver
tier: control-plane
name: kube-apiserver
namespace: kube-system
spec:
containers:
- command:
- kube-apiserver
- --advertise-address=192.168.1.200
- --allow-privileged=true
- --audit-log-format=json
- --audit-log-maxage=7
- --audit-log-maxbackup=10
- --audit-log-maxsize=100
- --audit-log-path=/var/log/kubernetes/audit.log
- --audit-policy-file=/etc/kubernetes/audit-policy.yml
- --authorization-mode=Node,RBAC
- --client-ca-file=/etc/kubernetes/pki/ca.pem
- --enable-admission-plugins=NodeRestriction
- --enable-aggregator-routing=true
- --enable-bootstrap-token-auth=true
- --token-auth-file=/etc/kubernetes/token.csv
- --etcd-cafile=/etc/kubernetes/pki/etcd/ca.pem
- --etcd-certfile=/etc/kubernetes/pki/etcd/kube-apiserver-etcd-client.pem
- --etcd-keyfile=/etc/kubernetes/pki/etcd/kube-apiserver-etcd-client-key.pem
- --etcd-servers=https://127.0.0.1:2379
- --feature-gates=
- --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.pem
- --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client-key.pem
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem
- --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem
- --requestheader-allowed-names=front-proxy-client
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
- --requestheader-extra-headers-prefix=X-Remote-Extra-
- --requestheader-group-headers=X-Remote-Group
- --requestheader-username-headers=X-Remote-User
- --secure-port=6443
- --service-account-issuer=https://kubernetes.default.svc.cluster.local
- --service-account-key-file=/etc/kubernetes/pki/sa.pub
- --service-account-signing-key-file=/etc/kubernetes/pki/sa.key
- --service-cluster-ip-range=172.30.0.0/16
- --tls-cert-file=/etc/kubernetes/pki/apiserver.pem
- --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem
image: registry.k8s.io/kube-apiserver-arm64:v1.28.12
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 8
httpGet:
host: 192.168.1.200
path: /livez
port: 6443
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
name: kube-apiserver
readinessProbe:
failureThreshold: 3
httpGet:
host: 192.168.1.200
path: /readyz
port: 6443
scheme: HTTPS
periodSeconds: 1
timeoutSeconds: 15
resources:
requests:
cpu: 250m
startupProbe:
failureThreshold: 24
httpGet:
host: 192.168.1.200
path: /livez
port: 6443
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
volumeMounts:
- mountPath: /etc/kubernetes
name: audit
- mountPath: /var/log/kubernetes
name: audit-log
- mountPath: /etc/ssl/certs
name: ca-certs
readOnly: true
- mountPath: /etc/ca-certificates
name: etc-ca-certificates
readOnly: true
- mountPath: /etc/pki
name: etc-pki
readOnly: true
- mountPath: /etc/kubernetes/pki
name: k8s-certs
readOnly: true
- mountPath: /etc/localtime
name: localtime
readOnly: true
- mountPath: /usr/local/share/ca-certificates
name: usr-local-share-ca-certificates
readOnly: true
- mountPath: /usr/share/ca-certificates
name: usr-share-ca-certificates
readOnly: true
hostNetwork: true
priority: 2000001000
priorityClassName: system-node-critical
securityContext:
seccompProfile:
type: RuntimeDefault
volumes:
- hostPath:
path: /etc/kubernetes
type: DirectoryOrCreate
name: audit
- hostPath:
path: /var/log/kubernetes
type: DirectoryOrCreate
name: audit-log
- hostPath:
path: /etc/ssl/certs
type: DirectoryOrCreate
name: ca-certs
- hostPath:
path: /etc/ca-certificates
type: DirectoryOrCreate
name: etc-ca-certificates
- hostPath:
path: /etc/pki
type: DirectoryOrCreate
name: etc-pki
- hostPath:
path: /etc/kubernetes/pki
type: DirectoryOrCreate
name: k8s-certs
- hostPath:
path: /etc/localtime
type: File
name: localtime
- hostPath:
path: /usr/local/share/ca-certificates
type: DirectoryOrCreate
name: usr-local-share-ca-certificates
- hostPath:
path: /usr/share/ca-certificates
type: DirectoryOrCreate
name: usr-share-ca-certificates
status: {}

3、kube-scheduler.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
component: kube-scheduler
tier: control-plane
name: kube-scheduler
namespace: kube-system
spec:
containers:
- command:
- kube-scheduler
- --authentication-kubeconfig=/etc/kubernetes/scheduler.conf
- --authorization-kubeconfig=/etc/kubernetes/scheduler.conf
- --bind-address=0.0.0.0
- --feature-gates=
- --kubeconfig=/etc/kubernetes/scheduler.conf
- --leader-elect=false
image: registry.k8s.io/kube-scheduler-arm64:v1.28.12
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 8
httpGet:
path: /healthz
port: 10259
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
name: kube-scheduler
resources:
requests:
cpu: 100m
startupProbe:
failureThreshold: 24
httpGet:
path: /healthz
port: 10259
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
volumeMounts:
- mountPath: /etc/kubernetes/scheduler.conf
name: kubeconfig
readOnly: true
- mountPath: /etc/localtime
name: localtime
readOnly: true
- mountPath: /etc/kubernetes/pki
name: k8s-certs
readOnly: true
hostNetwork: true
priority: 2000001000
priorityClassName: system-node-critical
securityContext:
seccompProfile:
type: RuntimeDefault
volumes:
- hostPath:
path: /etc/kubernetes/scheduler.conf
type: FileOrCreate
name: kubeconfig
- hostPath:
path: /etc/localtime
type: File
name: localtime
- hostPath:
path: /etc/kubernetes/pki
type: DirectoryOrCreate
name: k8s-certs
status: {}

4、kube-controller-manager.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
component: kube-controller-manager
tier: control-plane
name: kube-controller-manager
namespace: kube-system
spec:
containers:
- command:
- kube-controller-manager
- --allocate-node-cidrs=true
- --authentication-kubeconfig=/etc/kubernetes/controller-manager.conf
- --authorization-kubeconfig=/etc/kubernetes/controller-manager.conf
- --bind-address=0.0.0.0
- --client-ca-file=/etc/kubernetes/pki/ca.pem
- --cluster-cidr=10.10.0.0/16
- --cluster-name=kubernetes
- --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem
- --cluster-signing-duration=876000h
- --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem
- --controllers=*,bootstrapsigner,tokencleaner
- --feature-gates=
- --kubeconfig=/etc/kubernetes/controller-manager.conf
- --leader-elect=false
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
- --root-ca-file=/etc/kubernetes/pki/ca.pem
- --service-account-private-key-file=/etc/kubernetes/pki/sa.key
- --service-cluster-ip-range=172.30.0.0/16
- --use-service-account-credentials=true
image: registry.k8s.io/kube-controller-manager-arm64:v1.28.12
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 8
httpGet:
path: /healthz
port: 10257
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
name: kube-controller-manager
resources:
requests:
cpu: 200m
startupProbe:
failureThreshold: 24
httpGet:
path: /healthz
port: 10257
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
volumeMounts:
- mountPath: /etc/ssl/certs
name: ca-certs
readOnly: true
- mountPath: /etc/ca-certificates
name: etc-ca-certificates
readOnly: true
- mountPath: /etc/pki
name: etc-pki
readOnly: true
- mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
name: flexvolume-dir
- mountPath: /etc/kubernetes/pki
name: k8s-certs
readOnly: true
- mountPath: /etc/kubernetes/controller-manager.conf
name: kubeconfig
readOnly: true
- mountPath: /etc/localtime
name: localtime
readOnly: true
- mountPath: /usr/local/share/ca-certificates
name: usr-local-share-ca-certificates
readOnly: true
- mountPath: /usr/share/ca-certificates
name: usr-share-ca-certificates
readOnly: true
hostNetwork: true
priority: 2000001000
priorityClassName: system-node-critical
securityContext:
seccompProfile:
type: RuntimeDefault
volumes:
- hostPath:
path: /etc/ssl/certs
type: DirectoryOrCreate
name: ca-certs
- hostPath:
path: /etc/ca-certificates
type: DirectoryOrCreate
name: etc-ca-certificates
- hostPath:
path: /etc/pki
type: DirectoryOrCreate
name: etc-pki
- hostPath:
path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
type: DirectoryOrCreate
name: flexvolume-dir
- hostPath:
path: /etc/kubernetes/pki
type: DirectoryOrCreate
name: k8s-certs
- hostPath:
path: /etc/kubernetes/controller-manager.conf
type: FileOrCreate
name: kubeconfig
- hostPath:
path: /etc/localtime
type: File
name: localtime
- hostPath:
path: /usr/local/share/ca-certificates
type: DirectoryOrCreate
name: usr-local-share-ca-certificates
- hostPath:
path: /usr/share/ca-certificates
type: DirectoryOrCreate
name: usr-share-ca-certificates
status: {}

kubelet配置安装

1、/var/lib/kubelet/config.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
$ cat <<EOF > /var/lib/kubelet/config.yaml
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
serializeImagePulls: false
cpuManagerPolicy: none
clusterDomain: cluster.local
clusterDNS:
- 172.30.0.10
tlsCipherSuites:
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
- TLS_RSA_WITH_AES_256_GCM_SHA384
- TLS_RSA_WITH_AES_128_GCM_SHA256
maxPods: 100
podPidsLimit: 16384
containerLogMaxSize: 100Mi
containerLogMaxFiles: 10
evictionHard:
imagefs.available: 15%
memory.available: 30Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
systemReserved:
cpu: 10m
memory: 10Mi
pid: "10"
kubeReserved:
cpu: 10m
memory: 10Mi
pid: "10"
EOF

2、/etc/systemd/system/kubelet.service

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
cat <<EOF > /etc/systemd/system/kubelet.service
[Unit]
Description=kubelet: The Kubernetes Node Agent
Documentation=http://kubernetes.io/docs/

[Service]
Restart=always
StartLimitInterval=0
RestartSec=10
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_SYSTEM_PODS_ARGS=--pod-manifest-path=/etc/kubernetes/manifests"
Environment="KUBELET_LOGLEVEL_ARGS=--v=3"
Environment="KUBELET_AUTHZ_ARGS=--authorization-mode=Webhook --authentication-token-webhook=true --anonymous-auth=false --client-ca-file=/etc/kubernetes/pki/ca.pem"
Environment="KUBELET_RUNTIME_ARGS=--container-runtime-endpoint=/var/run/containerd/containerd.sock"
Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=systemd"
Environment="KUBELET_LABEL_TAINT=--node-labels=zone=china "
Environment="KUBELET_CERTIFICATE_ARGS=--rotate-certificates=true --cert-dir=/var/lib/kubelet/pki"
Environment="KUBELET_ACK_CRITICAL_ARGS=--config=/var/lib/kubelet/config.yaml --hostname-override=orangepizero3 --cluster-dns=172.30.0.10"
# --cloud-provider=external --enable-load-reader=true --enable-controller-attach-detach=true
ExecStart=/usr/bin/kubelet \$KUBELET_KUBECONFIG_ARGS \$KUBELET_SYSTEM_PODS_ARGS \$KUBELET_LOGLEVEL_ARGS \$KUBELET_AUTHZ_ARGS \$KUBELET_RUNTIME_ARGS \$KUBELET_CGROUP_ARGS \$KUBELET_LABEL_TAINT \$KUBELET_CERTIFICATE_ARGS \$KUBELET_ACK_CRITICAL_ARGS \$KUBELET_CUSTOMIZED_ARGS

[Install]
WantedBy=multi-user.target
EOF

3、启动kubelet

1
2
3
$ systemctl daemon-reload
$ systemctl start kubelet
$ systemctl enable kubelet

插件1 kube-proxy

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-proxy
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kube-proxy
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kube-proxy
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:bootstrappers:kubeadm:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: kube-proxy
namespace: kube-system
rules:
- apiGroups:
- ""
resourceNames:
- kube-proxy
resources:
- configmaps
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubeproxy:node-proxier
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node-proxier
subjects:
- kind: ServiceAccount
name: kube-proxy
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
labels:
app: kube-proxy
name: kube-proxy
namespace: kube-system
data:
config.conf: |-
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
bindAddress: 0.0.0.0
clientConnection:
acceptContentTypes: ""
burst: 10
contentType: application/vnd.kubernetes.protobuf
kubeconfig: /var/lib/kube-proxy/kubeconfig.conf
qps: 5
clusterCIDR: 10.10.0.0/16
configSyncPeriod: 15m0s
conntrack:
maxPerCore: 32768
min: 131072
tcpCloseWaitTimeout: 1h0m0s
tcpEstablishedTimeout: 24h0m0s

mode: ipvs
kubeconfig.conf: |-
apiVersion: v1
kind: Config
clusters:
- cluster:
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
server: https://orangepizero3:6443
name: default
contexts:
- context:
cluster: default
namespace: default
user: default
name: default
current-context: default
users:
- name: default
user:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
k8s-app: kube-proxy
name: kube-proxy
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kube-proxy
template:
metadata:
labels:
k8s-app: kube-proxy
spec:
containers:
- command:
- /usr/local/bin/kube-proxy
- --config=/var/lib/kube-proxy/config.conf
- --hostname-override=$(NODE_NAME)
env:
- name: NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: registry.k8s.io/kube-proxy-arm64:v1.28.12
imagePullPolicy: IfNotPresent
name: kube-proxy
resources: {}
securityContext:
privileged: true
volumeMounts:
- mountPath: /var/lib/kube-proxy
name: kube-proxy
- mountPath: /run/xtables.lock
name: xtables-lock
- mountPath: /lib/modules
name: lib-modules
readOnly: true
dnsPolicy: ClusterFirst
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-node-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: kube-proxy
serviceAccountName: kube-proxy
terminationGracePeriodSeconds: 30
tolerations:
- operator: Exists
volumes:
- configMap:
defaultMode: 420
name: kube-proxy
name: kube-proxy
- hostPath:
path: /run/xtables.lock
type: FileOrCreate
name: xtables-lock
- hostPath:
path: /lib/modules
type: ""
name: lib-modules
updateStrategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate

插件2 cilium

https://docs.cilium.io/en/stable/gettingstarted/k8s-install-default/

1
2
3
4
$ wget https://github.com/cilium/cilium-cli/releases/download/v0.16.13/cilium-linux-arm64.tar.gz
$ tar xf cilium-linux-arm64.tar.gz
$ mv cilium /usr/bin/
$ cilium install --version 1.16.0

插件3 coredns

https://github.com/coredns/deployment/blob/master/kubernetes/coredns.yaml.sed

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health {
lameduck 15s
}
ready
kubeapi
k8s_event {
level info error warning
}
kubernetes cluster.local in-addr.arpa ip6.arpa {
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
log
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
app.kubernetes.io/name: coredns
spec:
# replicas: not specified here:
# 1. Default is 1.
# 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
app.kubernetes.io/name: coredns
template:
metadata:
labels:
k8s-app: kube-dns
app.kubernetes.io/name: coredns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
kubernetes.io/os: linux
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values: ["kube-dns"]
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: docker.m.daocloud.io/coredns/coredns:1.9.4
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 1m
memory: 1Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
app.kubernetes.io/name: coredns
spec:
selector:
k8s-app: kube-dns
app.kubernetes.io/name: coredns
clusterIP: 172.30.0.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP

插件5 metrics-server

1
2
# https://github.com/kubernetes-sigs/metrics-server?tab=readme-ov-file#requirements
sudo kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml

插件6 istio

https://github.com/istio/istio/releases/

1
2
3
4
5
6
7
8
$ wget https://github.com/istio/istio/releases/download/1.22.3/istio-1.22.3-linux-arm64.tar.gz
$ tar xf istio-1.22.3-linux-arm64.tar.gz
$ mv istio-1.22.3/bin/istioctl /usr/bin/
$ chmod +x /usr/bin/istioctl

# https://istio.io/latest/docs/setup/additional-setup/config-profiles/
$ istioctl install --set profile=default -y # 安装
$ istioctl uninstall --purge # 卸载