others linux服务器运维 django3 监控 k8s golang 数据库 大数据 前端 devops 理论基础 java oracle 运维日志

k8s 1.18.2 单master集群安装

访问量:1425 创建时间:2020-04-23

机器初始化

1.1配置ssh 取消dns解析过程
[root@localhost ~]# vim /etc/ssh/sshd_config
UseDNS no
[root@localhost ~]# systemctl restart sshd
1.2 禁用selinux
setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config

1.3 关闭swap
swapoff -a

1.4关闭firewalld
systemctl stop firewalld
systemctl disable firewalld

1.5 配置hosts
[root@localhost ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.0.125 k8s-n1 
192.168.0.126 k8s-n2 
192.168.0.127 k8s-n3 
192.168.0.128 k8s-n4 
192.168.0.125 k8s-apiserver 


hostnamectl set-hostname  k8s-n1  #依次配置4台机器的hostname

1.6配置 yum源
wget http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
       http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

1.7配置 sysctl
echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf
echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf
echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.all.disable_ipv6 = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.default.disable_ipv6 = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.lo.disable_ipv6 = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.all.forwarding = 1"  >> /etc/sysctl.conf

sysctl -p

1.8安装依赖包
yum install -y yum-utils  device-mapper-persistent-data  lvm2   nfs-utils wget 
查看软件包版本
[root@localhost ~]# yum list kubeadm docker-ce
已加载插件:fastestmirror, langpacks
Loading mirror speeds from cached hostfile
 * base: ftp.sjtu.edu.cn
 * extras: mirrors.aliyun.com
 * updates: mirrors.aliyun.com
可安装的软件包
docker-ce.x86_64                         3:19.03.8-3.el7                         docker-ce-stable
kubeadm.x86_64                           1.18.2-0                                kubernetes   

安装docker-ce(全部机器)

yum install -y docker-ce
sed -i "s#^ExecStart=/usr/bin/dockerd.*#ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd#g" /usr/lib/systemd/system/docker.service
systemctl daemon-reload
systemctl enable docker
systemctl start docker



cat <<EOF >  /etc/docker/daemon.json
{
    "registry-mirrors": ["https://docker.mirrors.ustc.edu.cn"]
}
EOF

systemctl restart docker

安装kubelet kubeadm kubectl(全部机器)

[root@localhost ~]# yum -y install  kubelet kubeadm kubectl
[root@localhost ~]# systemctl enable kubelet && systemctl start kubelet
[root@localhost ~]# echo "192.168.0.125 k8s-apiserver" >> /etc/hosts
[root@localhost ~]# mkdir k8s
[root@localhost ~]# vim k8s/kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.18.2
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
controlPlaneEndpoint: "k8s-apiserver:6443"
networking:
  serviceSubnet: "10.96.0.0/16"
  podSubnet: "10.100.0.1/16"
  dnsDomain: "cluster.local"

kubeadm init --config=k8s/kubeadm-config.yaml --upload-certs

......省略部分输出.......
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:
#<--以下命令用来创建k8s管理节点客户端
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:
#<--以下命令用来加入多master节点,这里安装单master,用不到
  kubeadm join k8s-apiserver:6443 --token cnnvha.r9xipg4uqko40wbv \
    --discovery-token-ca-cert-hash sha256:ad53046ea0fdced5e342b9387f75dc1661415895183b4021b3eb254cbc1d547e \
    --control-plane --certificate-key 6921039c3859186c1135cbc8fee02d4e638920a3e2adb7da6ffb82977e36d31e

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:
#<--以下命令用来向集群加入work节点
kubeadm join k8s-apiserver:6443 --token cnnvha.r9xipg4uqko40wbv \
    --discovery-token-ca-cert-hash sha256:ad53046ea0fdced5e342b9387f75dc1661415895183b4021b3eb254cbc1d547e 


[root@localhost ~]# mkdir /root/.kube/
[root@localhost ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

[root@localhost ~]# kubectl get pods -o wide -n kube-system
NAME                             READY   STATUS    RESTARTS   AGE     IP              NODE     NOMINATED NODE   READINESS GATES
coredns-546565776c-5qzzr         0/1     Pending   0          4m28s   <none>          <none>   <none>           <none>
coredns-546565776c-mqw5k         0/1     Pending   0          4m28s   <none>          <none>   <none>           <none>
etcd-k8s-n1                      1/1     Running   0          4m37s   192.168.0.125   k8s-n1   <none>           <none>
kube-apiserver-k8s-n1            1/1     Running   0          4m37s   192.168.0.125   k8s-n1   <none>           <none>
kube-controller-manager-k8s-n1   1/1     Running   0          4m37s   192.168.0.125   k8s-n1   <none>           <none>
kube-proxy-pvwkj                 1/1     Running   0          4m28s   192.168.0.125   k8s-n1   <none>           <none>
kube-scheduler-k8s-n1            1/1     Running   0          4m37s   192.168.0.125   k8s-n1   <none>           <none>

安装CNI 网络插件

网络插件官方文档:https://docs.projectcalico.org/v3.13/getting-started/kubernetes/self-managed-onprem/onpremises

[root@localhost ~]# cd k8s
[root@k8s-n1 k8s]# wget https://docs.projectcalico.org/v3.13/manifests/calico.yaml
[root@localhost k8s]# kubectl apply -f calico.yaml 
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created

[root@localhost k8s]# watch kubectl get pod -n kube-system -o wide

其他worker节点加入集群

[root@localhost ~]# kubeadm join k8s-apiserver:6443 --token cnnvha.r9xipg4uqko40wbv --discovery-token-ca-cert-hash sha256:ad53046ea0fdced5e342b9387f75dc1661415895183b4021b3eb254cbc1d547e 
#查看其他节点状态
[root@localhost k8s]# kubectl get nodes
NAME     STATUS     ROLES    AGE     VERSION
k8s-n1   Ready      master   23m     v1.18.2
k8s-n2   NotReady   <none>   2m37s   v1.18.2
k8s-n3   Ready      <none>   2m26s   v1.18.2
k8s-n4   NotReady   <none>   2m23s   v1.18.2

集群安装过程至此结束

删除worker节点

在要删除的节点上 kubeadm reset 在master上 kubectl delete node your-nade-name

worker节点加入集群(2小时后)

#通过下面的命令获取加入集群的命令token
kubeadm token create --print-join-command
登陆评论: 使用GITHUB登陆