https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/
system requirement 2C2G
安装Aliyun YUM Repo 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repomv /etc/yum.repos.d/epel.repo /etc/yum.repos.d/epel.repo.backupmv /etc/yum.repos.d/epel-testing.repo /etc/yum.repos.d/epel-testing.repo.backup wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo yum install -y chrony conntrack ipvsadm ipset jq iptables curl sysstat libseccomp wget socat git
优化内核参数 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-iptables=1 net.bridge.bridge-nf-call-ip6tables=1 net.ipv4.ip_forward=1 net.ipv4.tcp_tw_recycle=0 net.ipv4.tcp_tw_reuse=1 net.ipv4.tcp_timestamps=1 net.ipv4.neigh.default.gc_thresh1=1024 net.ipv4.neigh.default.gc_thresh2=2048 net.ipv4.neigh.default.gc_thresh3=4096 vm.swappiness=0 vm.overcommit_memory=1 vm.panic_on_oom=0 vm.max_map_count = 262144 fs.inotify.max_user_instances=8192 fs.inotify.max_user_watches=1048576 fs.file-max=52706963 fs.nr_open=52706963 net.ipv6.conf.all.disable_ipv6=1 net.netfilter.nf_conntrack_max=2310720 EOF sysctl --system
IPVS加载内核模块 1 2 3 4 5 6 7 8 9 vim /etc/sysconfig/modules/ipvs.modules modprobe -- ip_vs modprobe -- ip_vs_rr modprobe -- ip_vs_wrr modprobe -- ip_vs_sh modprobe -- nf_conntrack_ipv4
modprobe br_netfilter
SELinux 1 2 setenforce 0 sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
关闭 swap 分区 1 2 swapoff -a sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
设置iptables默认转发策略 1 2 3 4 systemctl stop firewalld systemctl disable firewalld iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat iptables -P FORWARD ACCEPT
关闭无用服务 systemctl stop postfix && systemctl disable postfix
安装Aliyun YUM Repo 1 2 3 4 5 6 7 8 9 10 11 cat <<EOF > /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ enabled=1 gpgcheck=1 repo_gpgcheck=1 gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg EOF
安装kube* & docker 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 yum install -y kubelet kubeadm kubectl systemctl enable kubelet && systemctl start kubelet yum install -y yum-utils device-mapper-persistent-data lvm2 yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo yum remove docker \ docker-client \ docker-client-latest \ docker-common \ docker-latest \ docker-latest-logrotate \ docker-logrotate \ docker-engine yum update -y && yum install -y \ containerd.io-1.2.13 \ docker-ce-19.03.8 \ docker-ce-cli-19.03.8mkdir /etc/dockercat > /etc/docker/daemon.json <<EOF { "registry-mirrors": ["https://890une7x.mirror.aliyuncs.com"], "exec-opts": ["native.cgroupdriver=systemd"], "log-driver": "json-file", "log-opts": { "max-size": "100m" }, "storage-driver": "overlay2", "storage-opts": [ "overlay2.override_kernel_check=true" ] } EOF mkdir -p /etc/systemd/system/docker.service.d systemctl start docker.service systemctl enable docker.service systemctl status docker.service
kubectl autocompletion https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion
kubeadm config vim kubeadm-config.yaml
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration kubernetesVersion: v1.18.3 controlPlaneEndpoint: <your-lb-ip>:<port> certificatesDir: /etc/kubernetes/pki clusterName: kubernetes apiServer: timeoutForControlPlane: 4m0s controllerManager: {}scheduler: {}imageRepository: registry.aliyuncs.com/google_containers networking: dnsDomain: cluster.local podSubnet: 172.30 .0 .0 /16 serviceSubnet: 10.96 .0 .0 /12 dns: type: CoreDNS etcd: local: dataDir: /var/lib/etcd --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration mode: ipvs
kubeadm deploy 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 kubeadm init --config kubeadm-config.yaml --upload-certs kubeadm config images pull --config kubeadm-config.yaml curl -LO https://docs.projectcalico.org/v3.14/manifests/calico.yaml kubectl apply -f calico.yaml kubeadm reset kubectl delete node <node-name>
ipvs重建 1 2 3 4 kubectl -n kube-system edit cm kube-proxy kubectl get pod -n kube-system | grep kube-proxy | awk '{system("kubectl delete pod "$1" -n kube-system")}'
Calico BGP模式重建 1 2 3 4 5 6 7 8 9 10 curl -LO https://docs.projectcalico.org/v3.14/manifests/calico.yaml kubectl apply -f calico.yaml
Operations 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 kubectl config set-context $(kubectl config current-context) --namespace=<insert-namespace-name-here> kubectl config view | grep namespace kubectl create secret docker-registry boer-harbor --docker-server=harbor.boer.xyz --docker-username=admin --docker-password=Admin@123 [email protected] --namespace=boer-public kubeadm token list kubeadm token create --print-join-command kubectl drain $NODENAME kubectl uncordon $NODENAME docker rm $(docker ps -a -f status=exited -q) docker ps --format "{{.ID}}\t{{.Command}}\t{{.Status}}\t{{.Ports}}" docker ps --filter "status=exited"
etcdctl 1 2 3 4 5 6 7 8 9 10 curl -LO etcd-v3.4.3-linux-amd64.tar.gzalias etcdctl='etcdctl --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/peer.crt --key=/etc/kubernetes/pki/etcd/peer.key' etcdctl get /registry/namespaces/kube-system -w=json | jq . etcdctl member list etcdctl help etcdctl snapshot save etcdctl snapshot status etcdctl snapshot restore
Calicoctl 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 curl -O -L https://github.com/projectcalico/calicoctl/releases/download/v3.14.1/calicoctlmv calicoctl /usr/local/bin/chmod a+x /usr/local/bin/calicoctl vim /etc/calico/calicoctl.cfg apiVersion: projectcalico.org/v3 kind: CalicoAPIConfig metadata: spec: datastoreType: "kubernetes" kubeconfig: "/root/.kube/config" calicoctl get nodes calicoctl node status calicoctl get ipPool -o yaml
Helm v2安装 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 curl -LO https://get.helm.sh/helm-v2.16.6-linux-amd64.tar.gz vim rbac-config.yaml apiVersion: v1 kind: ServiceAccount metadata: name: tiller namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: tiller roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - kind: ServiceAccount name: tiller namespace: kube-system helm init --service-account tiller -i registry.aliyuncs.com/google_containers/tiller:v2.16.6 helm repo list helm repo add stable https://mirror.azure.cn/kubernetes/charts helm repo add incubator https://mirror.azure.cn/kubernetes/charts-incubator helm repo update helm fetch stable/mysql helm install stable/mysql
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 apiVersion: v1 kind: Service metadata: name: theapp-service annotations: metallb.universe.tf/address-pool: default labels: app: theapp spec: type : LoadBalancer ports: - port: 5000 targetPort: 5000 selector: app: theapp kubectl get svc
MetalLB (头等舱)
vs
NodePort (经济舱)
Ingress-Nginx L7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 metadata: annotations: metallb.universe.tf/address-pool: default spec: type : LoadBalancer apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: name: theapp-ingress annotations: nginx.ingress.kubernetes.io/rewrite-target: / nginx.ingress.kubernetes.io/load-balance: "ip_hash" nginx.ingress.kubernetes.io/upstream-hash-by: "$request_uri " spec: rules: - host: theapp.boer.xyz http: paths: - path: / backend: serviceName: theapp-service servicePort: 5000
Rancher卸载 1 2 3 4 5 6 7 8 kubectl proxy &NAMESPACE =local kubectl get namespace $NAMESPACE -o json |jq '.spec = {"finalizers":[]}' >temp.json curl -k -H "Content-Type: application/json" -X PUT --data-binary @temp.json 127.0.0.1:8001/api/v1/namespaces/$NAMESPACE /finalize kubectl get customresourcedefinitions | grep cattle.io | awk '{print $1}' | xargs kubectl delete customresourcedefinitions
模型概览
Reference