安装配置 node 节点(所有 node 节点同样的操作)
1.拉取相应的镜像
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.13.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.13.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.13.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.13.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.2.24
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.2.6
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.13.1 k8s.gcr.io/kube-apiserver:v1.13.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.13.1 k8s.gcr.io/kube-controller-manager:v1.13.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.13.1 k8s.gcr.io/kube-scheduler:v1.13.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.13.1 k8s.gcr.io/kube-proxy:v1.13.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1 k8s.gcr.io/pause:3.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.2.24 k8s.gcr.io/etcd:3.2.24
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.2.6 k8s.gcr.io/coredns:1.2.6
2.执行加入语句,把 node 加入集群
3.移除 Node
##在master节点上执行:
kubectl drain k8s07 --delete-local-data --force --ignore-daemonsets
kubectl delete node k8s07
##在k8s07上
kubeadm reset
ifconfig cni0 down
ip link delete cni0
ifconfig flannel.1 down
ip link delete flannel.1 rm -rf /var/lib/cni/
##在其他节点上执行
kubectl delete node k8s07
4.kube-proxy 开启 ipvs(任意一个 master 上操作)
##修改ConfigMap的kube-system/kube-proxy中的config.conf,mode: “ipvs”
[root@k8s01 ~]# kubectl edit cm kube-proxy -n kube-system
##重启所有的kube-proxy pod
[root@k8s01 ~]# kubectl get pod -n kube-system | grep kube-proxy | awk '{system("kubectl delete pod "$1" -n kube-system")}'
pod "kube-proxy-8r9bq" deleted
pod "kube-proxy-9k2zn" deleted
pod "kube-proxy-bv2bf" deleted
pod "kube-proxy-rkwg8" deleted
pod "kube-proxy-sq4lt" deleted
pod "kube-proxy-tvhkx" deleted
pod "kube-proxy-x6v57" deleted
##查看kube-proxy状态
[root@k8s01 ~]# kubectl get pod -n kube-system | grep kube-proxy
kube-proxy-5r7fp 1/1 Running 0 31s
kube-proxy-895rz 1/1 Running 0 23s
kube-proxy-ggkrw 1/1 Running 0 19s
kube-proxy-gszff 1/1 Running 0 35s
kube-proxy-jl552 1/1 Running 0 60s
kube-proxy-n72bp 1/1 Running 0 83s
kube-proxy-pr7f9 1/1 Running 0 72s
[root@k8s01 ~]# kubectl logs kube-proxy-5r7fp -n kube-system
I0119 15:44:25.451787 1 server_others.go:189] Using ipvs Proxier.
W0119 15:44:25.452270 1 proxier.go:365] IPVS scheduler not specified, use rr by default
I0119 15:44:25.452458 1 server_others.go:216] Tearing down inactive rules.
I0119 15:44:25.503432 1 server.go:464] Version: v1.13.1
I0119 15:44:25.511221 1 conntrack.go:52] Setting nf_conntrack_max to 131072
I0119 15:44:25.511419 1 config.go:202] Starting service config controller
I0119 15:44:25.511433 1 controller_utils.go:1027] Waiting for caches to sync for service config controller
I0119 15:44:25.511457 1 config.go:102] Starting endpoints config controller
I0119 15:44:25.511518 1 controller_utils.go:1027] Waiting for caches to sync for endpoints config controller
I0119 15:44:25.611687 1 controller_utils.go:1034] Caches are synced for service config controller
I0119 15:44:25.611695 1 controller_utils.go:1034] Caches are synced for endpoints config controller
##日志中打印出了Using ipvs Proxier,说明ipvs模式已经开启。
欢迎来到这里!
我们正在构建一个小众社区,大家在这里相互信任,以平等 • 自由 • 奔放的价值观进行分享交流。最终,希望大家能够找到与自己志同道合的伙伴,共同成长。
注册 关于