本文共 36973 字,大约阅读时间需要 123 分钟。
#很多博友说搭建之后出现认证的问题,我验证了一下,配置是没有写错的#原因是51cto的markdown格式有点问题,代码粘贴上来之后出现了不兼容,缩进异常的情况#评论中出现的:error: unable to upgrade connection: Unauthorized#其实是因为直接复制代码生成的/etc/kubernetes/kubelet-config.yml文件缩进有问题#文章中已经修改了,为了让大家少踩点坑,这里贴出原文:http://note.youdao.com/noteshare?id=31d9d5db79cc3ae27e72c029b09ac4ab&sub=9489CC3D8A8C44F197A8A421DC7209D7
系统:Centos 7.5 1804 内核:3.10.0-862.el7.x86_64docker版本: 18.06.0-cekubernetes版本:v1.11 master 192.168.1.1 node1 192.168.1.2 node2 192.168.1.3etcd版本:v3.2.22 etcd1 192.168.1.4 etcd2 192.168.1.5 etcd3 192.168.1.6
为方便操作,所有操作均以root用户执行
以下操作仅在kubernetes集群节点执行即可
sed -ri 's#(SELINUX=).*#\1disabled#' /etc/selinux/configsetenforce 0systemctl disable firewalldsystemctl stop firewalld
swapoff -a
cat </etc/sysctl.d/k8s.confnet.bridge.bridge-nf-call-ip6tables = 1net.bridge.bridge-nf-call-iptables = 1vm.swappiness=0EOFsysctl --system
cat << EOF > /etc/sysconfig/modules/ipvs.modules #!/bin/bashipvs_modules_dir="/usr/lib/modules/\`uname -r\`/kernel/net/netfilter/ipvs"for i in \`ls \$ipvs_modules_dir | sed -r 's#(.*).ko.xz#\1#'\`; do /sbin/modinfo -F filename \$i &> /dev/null if [ \$? -eq 0 ]; then /sbin/modprobe \$i fidoneEOFchmod +x /etc/sysconfig/modules/ipvs.modules bash /etc/sysconfig/modules/ipvs.modules
#在master节点安装即可!!!wget -O /bin/cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64wget -O /bin/cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64wget -O /bin/cfssl-certinfo https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64for cfssl in `ls /bin/cfssl*`;do chmod +x $cfssl;done;
yum install docker-cesystemctl start dockercat << EOF > /etc/docker/daemon.json{ "registry-mirrors": ["https://registry.docker-cn.com"], "live-restore": true, "default-shm-size": "128M", "bridge": "none", "max-concurrent-downloads": 10, "oom-score-adjust": -1000, "debug": false} EOF systemctl restart docker#重启后执行ip a命令,看不到docker0的网卡即可
在master节点上操作
mkdir -pv $HOME/ssl && cd $HOME/sslcat > ca-config.json << EOF{ "signing": { "default": { "expiry": "87600h" }, "profiles": { "kubernetes": { "usages": [ "signing", "key encipherment", "server auth", "client auth" ], "expiry": "87600h" } } }}EOFcat > etcd-ca-csr.json << EOF{ "CN": "etcd", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Shenzhen", "L": "Shenzhen", "O": "etcd", "OU": "Etcd Security" } ]}EOFcat > etcd-csr.json << EOF{ "CN": "etcd", "hosts": [ "127.0.0.1", "192.168.1.4", "192.168.1.5", "192.168.1.6" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Shenzhen", "L": "Shenzhen", "O": "etcd", "OU": "Etcd Security" } ]}EOF#生成证书并复制证书至其他etcd节点cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare etcd-cacfssl gencert -ca=etcd-ca.pem -ca-key=etcd-ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcdmkdir -pv /etc/etcd/sslcp etcd*.pem /etc/etcd/sslscp -r /etc/etcd 192.168.1.4:/etc/scp -r /etc/etcd 192.168.1.5:/etc/scp -r /etc/etcd 192.168.1.6:/etc/
yum install -y etcd cat << EOF > /etc/etcd/etcd.conf#[Member]#ETCD_CORS=""ETCD_DATA_DIR="/var/lib/etcd/default.etcd"#ETCD_WAL_DIR=""ETCD_LISTEN_PEER_URLS="https://192.168.1.4:2380"ETCD_LISTEN_CLIENT_URLS="https://127.0.0.1:2379,https://192.168.1.4:2379"#ETCD_MAX_SNAPSHOTS="5"#ETCD_MAX_WALS="5"ETCD_NAME="etcd1"#ETCD_SNAPSHOT_COUNT="100000"#ETCD_HEARTBEAT_INTERVAL="100"#ETCD_ELECTION_TIMEOUT="1000"#ETCD_QUOTA_BACKEND_BYTES="0"#ETCD_MAX_REQUEST_BYTES="1572864"#ETCD_GRPC_KEEPALIVE_MIN_TIME="5s"#ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s"#ETCD_GRPC_KEEPALIVE_TIMEOUT="20s"##[Clustering]ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.1.4:2380"ETCD_ADVERTISE_CLIENT_URLS="https://127.0.0.1:2379,https://192.168.1.4:2379"#ETCD_DISCOVERY=""#ETCD_DISCOVERY_FALLBACK="proxy"#ETCD_DISCOVERY_PROXY=""#ETCD_DISCOVERY_SRV=""ETCD_INITIAL_CLUSTER="etcd1=https://192.168.1.4:2380,etcd2=https://192.168.1.5:2380,etcd3=https://192.168.1.6:2380"ETCD_INITIAL_CLUSTER_TOKEN="BigBoss"#ETCD_INITIAL_CLUSTER_STATE="new"#ETCD_STRICT_RECONFIG_CHECK="true"#ETCD_ENABLE_V2="true"##[Proxy]#ETCD_PROXY="off"#ETCD_PROXY_FAILURE_WAIT="5000"#ETCD_PROXY_REFRESH_INTERVAL="30000"#ETCD_PROXY_DIAL_TIMEOUT="1000"#ETCD_PROXY_WRITE_TIMEOUT="5000"#ETCD_PROXY_READ_TIMEOUT="0"##[Security]ETCD_CERT_FILE="/etc/etcd/ssl/etcd.pem"ETCD_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"#ETCD_CLIENT_CERT_AUTH="false"ETCD_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-ca.pem"#ETCD_AUTO_TLS="false"ETCD_PEER_CERT_FILE="/etc/etcd/ssl/etcd.pem"ETCD_PEER_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"#ETCD_PEER_CLIENT_CERT_AUTH="false"ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-ca.pem"#ETCD_PEER_AUTO_TLS="false"##[Logging]#ETCD_DEBUG="false"#ETCD_LOG_PACKAGE_LEVELS=""#ETCD_LOG_OUTPUT="default"##[Unsafe]#ETCD_FORCE_NEW_CLUSTER="false"##[Version]#ETCD_VERSION="false"#ETCD_AUTO_COMPACTION_RETENTION="0"##[Profiling]#ETCD_ENABLE_PPROF="false"#ETCD_METRICS="basic"##[Auth]#ETCD_AUTH_TOKEN="simple"EOFchown -R etcd.etcd /etc/etcdsystemctl enable etcdsystemctl start etcd
yum install -y etcd cat << EOF > /etc/etcd/etcd.conf#[Member]#ETCD_CORS=""ETCD_DATA_DIR="/var/lib/etcd/default.etcd"#ETCD_WAL_DIR=""ETCD_LISTEN_PEER_URLS="https://192.168.1.5:2380"ETCD_LISTEN_CLIENT_URLS="https://127.0.0.1:2379,https://192.168.1.5:2379"#ETCD_MAX_SNAPSHOTS="5"#ETCD_MAX_WALS="5"ETCD_NAME="etcd2"#ETCD_SNAPSHOT_COUNT="100000"#ETCD_HEARTBEAT_INTERVAL="100"#ETCD_ELECTION_TIMEOUT="1000"#ETCD_QUOTA_BACKEND_BYTES="0"#ETCD_MAX_REQUEST_BYTES="1572864"#ETCD_GRPC_KEEPALIVE_MIN_TIME="5s"#ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s"#ETCD_GRPC_KEEPALIVE_TIMEOUT="20s"##[Clustering]ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.1.5:2380"ETCD_ADVERTISE_CLIENT_URLS="https://127.0.0.1:2379,https://192.168.1.5:2379"#ETCD_DISCOVERY=""#ETCD_DISCOVERY_FALLBACK="proxy"#ETCD_DISCOVERY_PROXY=""#ETCD_DISCOVERY_SRV=""ETCD_INITIAL_CLUSTER="etcd1=https://192.168.1.4:2380,etcd2=https://192.168.1.5:2380,etcd3=https://192.168.1.6:2380"ETCD_INITIAL_CLUSTER_TOKEN="BigBoss"#ETCD_INITIAL_CLUSTER_STATE="new"#ETCD_STRICT_RECONFIG_CHECK="true"#ETCD_ENABLE_V2="true"##[Proxy]#ETCD_PROXY="off"#ETCD_PROXY_FAILURE_WAIT="5000"#ETCD_PROXY_REFRESH_INTERVAL="30000"#ETCD_PROXY_DIAL_TIMEOUT="1000"#ETCD_PROXY_WRITE_TIMEOUT="5000"#ETCD_PROXY_READ_TIMEOUT="0"##[Security]ETCD_CERT_FILE="/etc/etcd/ssl/etcd.pem"ETCD_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"#ETCD_CLIENT_CERT_AUTH="false"ETCD_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-ca.pem"#ETCD_AUTO_TLS="false"ETCD_PEER_CERT_FILE="/etc/etcd/ssl/etcd.pem"ETCD_PEER_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"#ETCD_PEER_CLIENT_CERT_AUTH="false"ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-ca.pem"#ETCD_PEER_AUTO_TLS="false"##[Logging]#ETCD_DEBUG="false"#ETCD_LOG_PACKAGE_LEVELS=""#ETCD_LOG_OUTPUT="default"##[Unsafe]#ETCD_FORCE_NEW_CLUSTER="false"##[Version]#ETCD_VERSION="false"#ETCD_AUTO_COMPACTION_RETENTION="0"##[Profiling]#ETCD_ENABLE_PPROF="false"#ETCD_METRICS="basic"##[Auth]#ETCD_AUTH_TOKEN="simple"EOFchown -R etcd.etcd /etc/etcdsystemctl enable etcdsystemctl start etcd
yum install -y etcd cat << EOF > /etc/etcd/etcd.conf#[Member]#ETCD_CORS=""ETCD_DATA_DIR="/var/lib/etcd/default.etcd"#ETCD_WAL_DIR=""ETCD_LISTEN_PEER_URLS="https://192.168.1.6:2380"ETCD_LISTEN_CLIENT_URLS="https://127.0.0.1:2379,https://192.168.1.6:2379"#ETCD_MAX_SNAPSHOTS="5"#ETCD_MAX_WALS="5"ETCD_NAME="etcd3"#ETCD_SNAPSHOT_COUNT="100000"#ETCD_HEARTBEAT_INTERVAL="100"#ETCD_ELECTION_TIMEOUT="1000"#ETCD_QUOTA_BACKEND_BYTES="0"#ETCD_MAX_REQUEST_BYTES="1572864"#ETCD_GRPC_KEEPALIVE_MIN_TIME="5s"#ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s"#ETCD_GRPC_KEEPALIVE_TIMEOUT="20s"##[Clustering]ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.1.6:2380"ETCD_ADVERTISE_CLIENT_URLS="https://127.0.0.1:2379,https://192.168.1.6:2379"#ETCD_DISCOVERY=""#ETCD_DISCOVERY_FALLBACK="proxy"#ETCD_DISCOVERY_PROXY=""#ETCD_DISCOVERY_SRV=""ETCD_INITIAL_CLUSTER="etcd1=https://192.168.1.4:2380,etcd2=https://192.168.1.5:2380,etcd3=https://192.168.1.6:2380"ETCD_INITIAL_CLUSTER_TOKEN="BigBoss"#ETCD_INITIAL_CLUSTER_STATE="new"#ETCD_STRICT_RECONFIG_CHECK="true"#ETCD_ENABLE_V2="true"##[Proxy]#ETCD_PROXY="off"#ETCD_PROXY_FAILURE_WAIT="5000"#ETCD_PROXY_REFRESH_INTERVAL="30000"#ETCD_PROXY_DIAL_TIMEOUT="1000"#ETCD_PROXY_WRITE_TIMEOUT="5000"#ETCD_PROXY_READ_TIMEOUT="0"##[Security]ETCD_CERT_FILE="/etc/etcd/ssl/etcd.pem"ETCD_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"#ETCD_CLIENT_CERT_AUTH="false"ETCD_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-ca.pem"#ETCD_AUTO_TLS="false"ETCD_PEER_CERT_FILE="/etc/etcd/ssl/etcd.pem"ETCD_PEER_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"#ETCD_PEER_CLIENT_CERT_AUTH="false"ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-ca.pem"#ETCD_PEER_AUTO_TLS="false"##[Logging]#ETCD_DEBUG="false"#ETCD_LOG_PACKAGE_LEVELS=""#ETCD_LOG_OUTPUT="default"##[Unsafe]#ETCD_FORCE_NEW_CLUSTER="false"##[Version]#ETCD_VERSION="false"#ETCD_AUTO_COMPACTION_RETENTION="0"##[Profiling]#ETCD_ENABLE_PPROF="false"#ETCD_METRICS="basic"##[Auth]#ETCD_AUTH_TOKEN="simple"EOFchown -R etcd.etcd /etc/etcdsystemctl enable etcdsystemctl start etcd
#在etcd1节点执行etcdctl --endpoints "https://127.0.0.1:2379" --ca-file=/etc/etcd/ssl/etcd-ca.pem \--cert-file=/etc/etcd/ssl/etcd.pem --key-file=/etc/etcd/ssl/etcd-key.pem cluster-health
在master节点操作
mkdir $HOME/ssl && cd $HOME/ssl
cat > ca-csr.json << EOF{ "CN": "kubernetes", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Shenzhen", "L": "Shenzhen", "O": "k8s", "OU": "System" } ], "ca": { "expiry": "87600h" }}EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare cals ca*.pem
#10.96.0.1 是 kube-apiserver 指定的 service-cluster-ip-range 网段的第一个IPcat > kube-apiserver-csr.json << EOF{ "CN": "kube-apiserver", "hosts": [ "127.0.0.1", "192.168.1.1", "192.168.1.2", "192.168.1.3", "10.96.0.1", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Shenzhen", "L": "Shenzhen", "O": "k8s", "OU": "System" } ]}EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json | cfssljson -bare kube-apiserverls kube-apiserver*.pem
cat > kube-controller-manager-csr.json << EOF{ "CN": "system:kube-controller-manager", "hosts": [ "127.0.0.1", "192.168.1.1", "192.168.1.2", "192.168.1.3" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Shenzhen", "L": "Shenzhen", "O": "system:kube-controller-manager", "OU": "System" } ]}EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-managerls kube-controller-manager*.pem
cat > kube-scheduler-csr.json << EOF{ "CN": "system:kube-scheduler", "hosts": [ "127.0.0.1", "192.168.1.1", "192.168.1.2", "192.168.1.3" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Shenzhen", "L": "Shenzhen", "O": "system:kube-scheduler", "OU": "System" } ]}EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-schedulerls kube-scheduler*.pem
cat > kube-proxy-csr.json << EOF{ "CN": "system:kube-proxy", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Shenzhen", "L": "Shenzhen", "O": "system:kube-proxy", "OU": "System" } ]}EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxyls kube-proxy*.pem
cat > admin-csr.json << EOF{ "CN": "admin", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Shenzhen", "L": "Shenzhen", "O": "system:masters", "OU": "System" } ]}EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare adminls admin*.pem
mkdir -pv /etc/kubernetes/pkicp ca*.pem admin*.pem kube-proxy*.pem kube-scheduler*.pem kube-controller-manager*.pem kube-apiserver*.pem /etc/kubernetes/pkiscp -r /etc/kubernetes 192.168.1.2:/etc/scp -r /etc/kubernetes 192.168.1.3:/etc/
cd /rootwget https://dl.k8s.io/v1.11.1/kubernetes-server-linux-amd64.tar.gztar -xf kubernetes-server-linux-amd64.tar.gz -C /usr/localmv /usr/local/kubernetes /usr/local/kubernetes-v1.11ln -s kubernetes-v1.11 /usr/local/kubernetescat > /etc/profile.d/kubernetes.sh << EOFk8s_home=/usr/local/kubernetesexport PATH=\$k8s_home/server/bin:\$PATHsource <(kubectl completion bash)EOFsource /etc/profile.d/kubernetes.shkubectl version
生成kubeconfig
export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')cat > /etc/kubernetes/token.csv << EOF${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"EOF
cd /etc/kubernetesexport KUBE_APISERVER="https://192.168.1.1:6443"kubectl config set-cluster kubernetes \--certificate-authority=/etc/kubernetes/pki/ca.pem \--embed-certs=true \--server=${KUBE_APISERVER} \--kubeconfig=kubelet-bootstrap.confkubectl config set-credentials kubelet-bootstrap \--token=${BOOTSTRAP_TOKEN} \--kubeconfig=kubelet-bootstrap.confkubectl config set-context default \--cluster=kubernetes \--user=kubelet-bootstrap \--kubeconfig=kubelet-bootstrap.confkubectl config use-context default --kubeconfig=kubelet-bootstrap.conf
export KUBE_APISERVER="https://192.168.1.1:6443"kubectl config set-cluster kubernetes \--certificate-authority=/etc/kubernetes/pki/ca.pem \--embed-certs=true \--server=${KUBE_APISERVER} \--kubeconfig=kube-controller-manager.confkubectl config set-credentials kube-controller-manager \--client-certificate=/etc/kubernetes/pki/kube-controller-manager.pem \--client-key=/etc/kubernetes/pki/kube-controller-manager-key.pem \--embed-certs=true \--kubeconfig=kube-controller-manager.confkubectl config set-context default \--cluster=kubernetes \--user=kube-controller-manager \--kubeconfig=kube-controller-manager.confkubectl config use-context default --kubeconfig=kube-controller-manager.conf
export KUBE_APISERVER="https://192.168.1.1:6443"kubectl config set-cluster kubernetes \ --certificate-authority=/etc/kubernetes/pki/ca.pem \ --embed-certs=true \ --server=${KUBE_APISERVER} \ --kubeconfig=kube-scheduler.confkubectl config set-credentials kube-scheduler \--client-certificate=/etc/kubernetes/pki/kube-scheduler.pem \--client-key=/etc/kubernetes/pki/kube-scheduler-key.pem \--embed-certs=true \--kubeconfig=kube-scheduler.confkubectl config set-context default \--cluster=kubernetes \--user=kube-scheduler \--kubeconfig=kube-scheduler.confkubectl config use-context default --kubeconfig=kube-scheduler.conf
export KUBE_APISERVER="https://192.168.1.1:6443"kubectl config set-cluster kubernetes \--certificate-authority=/etc/kubernetes/pki/ca.pem \--embed-certs=true \--server=${KUBE_APISERVER} \--kubeconfig=kube-proxy.confkubectl config set-credentials kube-proxy \--client-certificate=/etc/kubernetes/pki/kube-proxy.pem \--client-key=/etc/kubernetes/pki/kube-proxy-key.pem \--embed-certs=true \--kubeconfig=kube-proxy.confkubectl config set-context default \--cluster=kubernetes \--user=kube-proxy \--kubeconfig=kube-proxy.confkubectl config use-context default --kubeconfig=kube-proxy.conf
export KUBE_APISERVER="https://192.168.1.1:6443"kubectl config set-cluster kubernetes \--certificate-authority=/etc/kubernetes/pki/ca.pem \--embed-certs=true \--server=${KUBE_APISERVER} \--kubeconfig=admin.confkubectl config set-credentials admin \--client-certificate=/etc/kubernetes/pki/admin.pem \--client-key=/etc/kubernetes/pki/admin-key.pem \--embed-certs=true \--kubeconfig=admin.confkubectl config set-context default \--cluster=kubernetes \--user=admin \--kubeconfig=admin.confkubectl config use-context default --kubeconfig=admin.conf
scp kubelet-bootstrap.conf kube-proxy.conf 192.168.1.2:/etc/kubernetesscp kubelet-bootstrap.conf kube-proxy.conf 192.168.1.3:/etc/kubernetescd $HOME
配置启动kube-apiserver
mkdir -pv /etc/kubernetes/pki/etcdcd /etc/etcd/sslcp etcd-ca.pem etcd-key.pem etcd.pem /etc/kubernetes/pki/etcd
openssl genrsa -out /etc/kubernetes/pki/sa.key 2048openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.publs /etc/kubernetes/pki/sa.*cd $HOME
cat > /etc/systemd/system/kube-apiserver.service << EOF[Unit]Description=Kubernetes API ServiceDocumentation=https://github.com/kubernetes/kubernetesAfter=network.target[Service]EnvironmentFile=-/etc/kubernetes/configEnvironmentFile=-/etc/kubernetes/apiserverExecStart=/usr/local/kubernetes/server/bin/kube-apiserver \\ \$KUBE_LOGTOSTDERR \\ \$KUBE_LOG_LEVEL \\ \$KUBE_ETCD_ARGS \\ \$KUBE_API_ADDRESS \\ \$KUBE_SERVICE_ADDRESSES \\ \$KUBE_ADMISSION_CONTROL \\ \$KUBE_APISERVER_ARGSRestart=on-failureType=notifyLimitNOFILE=65536[Install]WantedBy=multi-user.targetEOF
cat > /etc/kubernetes/config << EOFKUBE_LOGTOSTDERR="--logtostderr=true"KUBE_LOG_LEVEL="--v=2"EOFcat > /etc/kubernetes/apiserver << EOFKUBE_API_ADDRESS="--advertise-address=192.168.1.1"KUBE_ETCD_ARGS="--etcd-servers=https://192.168.1.4:2379,https://192.168.1.5:2379,https://192.168.1.6:2379 --etcd-cafile=/etc/kubernetes/pki/etcd/etcd-ca.pem --etcd-certfile=/etc/kubernetes/pki/etcd/etcd.pem --etcd-keyfile=/etc/kubernetes/pki/etcd/etcd-key.pem"KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.96.0.0/12"KUBE_ADMISSION_CONTROL="--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"KUBE_APISERVER_ARGS="--allow-privileged=true --authorization-mode=Node,RBAC --enable-bootstrap-token-auth=true --token-auth-file=/etc/kubernetes/token.csv --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/pki/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/pki/kube-apiserver-key.pem --client-ca-file=/etc/kubernetes/pki/ca.pem --service-account-key-file=/etc/kubernetes/pki/sa.pub --enable-swagger-ui=true --secure-port=6443 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --anonymous-auth=false --kubelet-client-certificate=/etc/kubernetes/pki/admin.pem --kubelet-client-key=/etc/kubernetes/pki/admin-key.pem"EOF
systemctl daemon-reloadsystemctl enable kube-apiserversystemctl start kube-apiserversystemctl status kube-apiserver
curl -k https://192.168.1.1:6443/出现一下内容说明搭建成功:{"kind": "Status","apiVersion": "v1","metadata": {},"status": "Failure","message": "Unauthorized","reason": "Unauthorized","code": 401}
配置启动kube-controller-manager
cat > /etc/systemd/system/kube-controller-manager.service << EOFDescription=Kubernetes Controller ManagerDocumentation=https://github.com/kubernetes/kubernetes[Service]EnvironmentFile=-/etc/kubernetes/configEnvironmentFile=-/etc/kubernetes/controller-managerExecStart=/usr/local/kubernetes/server/bin/kube-controller-manager \\ \$KUBE_LOGTOSTDERR \\ \$KUBE_LOG_LEVEL \\ \$KUBECONFIG \\ \$KUBE_CONTROLLER_MANAGER_ARGSRestart=on-failureLimitNOFILE=65536[Install]WantedBy=multi-user.targetEOF
cat >/etc/kubernetes/controller-manager<
systemctl daemon-reloadsystemctl enable kube-controller-managersystemctl start kube-controller-managersystemctl status kube-controller-manager
配置启动kube-scheduler
cat > /etc/systemd/system/kube-scheduler.service << EOF[Unit]Description=Kubernetes Scheduler PluginDocumentation=https://github.com/kubernetes/kubernetes[Service]EnvironmentFile=-/etc/kubernetes/configEnvironmentFile=-/etc/kubernetes/schedulerExecStart=/usr/local/kubernetes/server/bin/kube-scheduler \\ \$KUBE_LOGTOSTDERR \\ \$KUBE_LOG_LEVEL \\ \$KUBECONFIG \\ \$KUBE_SCHEDULER_ARGSRestart=on-failureLimitNOFILE=65536[Install]WantedBy=multi-user.targetEOF
cat > /etc/kubernetes/scheduler << EOFKUBECONFIG="--kubeconfig=/etc/kubernetes/kube-scheduler.conf"KUBE_SCHEDULER_ARGS="--leader-elect=true --address=127.0.0.1"EOF
systemctl daemon-reloadsystemctl enable kube-schedulersystemctl start kube-schedulersystemctl status kube-scheduler
rm -rf $HOME/.kubemkdir -p $HOME/.kubesudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/configsudo chown $(id -u):$(id -g) $HOME/.kube/configkubectl get node
kubectl get componentstatuses [root@master ~]# kubectl get componentstatusesNAME STATUS MESSAGE ERRORcontroller-manager Healthy okscheduler Healthy oketcd-1 Healthy {"health": "true"}etcd-0 Healthy {"health": "true"}etcd-2 Healthy {"health": "true"}
kubectl create clusterrolebinding kubelet-bootstrap \--clusterrole=system:node-bootstrapper \--user=kubelet-bootstrap
master端操作
cd /rootwget https://github.com/containernetworking/plugins/releases/download/v0.7.1/cni-plugins-amd64-v0.7.1.tgzmkdir /opt/cni/bin -ptar -xf cni-plugins-amd64-v0.7.1.tgz -C /opt/cni/bin
#配置启动文件cat > /etc/systemd/system/kubelet.service << EOF[Unit]Description=Kubernetes Kubelet ServerDocumentation=https://github.com/kubernetes/kubernetesAfter=docker.serviceRequires=docker.service[Service]EnvironmentFile=-/etc/kubernetes/configEnvironmentFile=-/etc/kubernetes/kubeletExecStart=/usr/local/kubernetes/server/bin/kubelet \\ \$KUBE_LOGTOSTDERR \\ \$KUBE_LOG_LEVEL \\ \$KUBELET_CONFIG \\ \$KUBELET_HOSTNAME \\ \$KUBELET_POD_INFRA_CONTAINER \\ \$KUBELET_ARGSRestart=on-failure[Install]WantedBy=multi-user.targetEOFcat > /etc/kubernetes/config << EOFKUBE_LOGTOSTDERR="--logtostderr=true"KUBE_LOG_LEVEL="--v=2"EOFcat > /etc/kubernetes/kubelet << EOFKUBELET_HOSTNAME="--hostname-override=192.168.1.1"KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1"KUBELET_CONFIG="--config=/etc/kubernetes/kubelet-config.yml"KUBELET_ARGS="--bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.conf --kubeconfig=/etc/kubernetes/kubelet.conf --cert-dir=/etc/kubernetes/pki --network-plugin=cni --cni-bin-dir=/opt/cni/bin --cni-conf-dir=/etc/cni/net.d"EOFcat > /etc/kubernetes/kubelet-config.yml << EOFkind: KubeletConfigurationapiVersion: kubelet.config.k8s.io/v1beta1address: 192.168.1.1port: 10250cgroupDriver: cgroupfsclusterDNS:- 10.96.0.10clusterDomain: cluster.local.hairpinMode: promiscuous-bridgeserializeImagePulls: falseauthentication: x509: clientCAFile: /etc/kubernetes/pki/ca.pem anonymous: enabled: false webhook: enabled: falseEOF
systemctl daemon-reloadsystemctl enable kubeletsystemctl restart kubeletsystemctl status kubelet
在node1上操作
cd /rootwget https://dl.k8s.io/v1.11.1/kubernetes-node-linux-amd64.tar.gzwget https://github.com/containernetworking/plugins/releases/download/v0.7.1/cni-plugins-amd64-v0.7.1.tgztar -xf kubernetes-node-linux-amd64.tar.gz -C /usr/local/mv /usr/local/kubernetes /usr/local/kubernetes-v1.11ln -s kubernetes-v1.11 /usr/local/kubernetesmkdir /opt/cni/bin -ptar -xf cni-plugins-amd64-v0.7.1.tgz -C /opt/cni/bin
#配置systemctl启动文件cat > /etc/systemd/system/kubelet.service << EOF[Unit]Description=Kubernetes Kubelet ServerDocumentation=https://github.com/kubernetes/kubernetesAfter=docker.serviceRequires=docker.service[Service]EnvironmentFile=-/etc/kubernetes/configEnvironmentFile=-/etc/kubernetes/kubeletExecStart=/usr/local/kubernetes/node/bin/kubelet \\ \$KUBE_LOGTOSTDERR \\ \$KUBE_LOG_LEVEL \\ \$KUBELET_CONFIG \\ \$KUBELET_HOSTNAME \\ \$KUBELET_POD_INFRA_CONTAINER \\ \$KUBELET_ARGSRestart=on-failure[Install]WantedBy=multi-user.targetEOFcat > /etc/kubernetes/config << EOFKUBE_LOGTOSTDERR="--logtostderr=true"KUBE_LOG_LEVEL="--v=2"EOFcat > /etc/kubernetes/kubelet << EOFKUBELET_HOSTNAME="--hostname-override=192.168.1.2"KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1"KUBELET_CONFIG="--config=/etc/kubernetes/kubelet-config.yml"KUBELET_ARGS="--bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.conf --kubeconfig=/etc/kubernetes/kubelet.conf --cert-dir=/etc/kubernetes/pki --network-plugin=cni --cni-bin-dir=/opt/cni/bin --cni-conf-dir=/etc/cni/net.d"EOFcat > /etc/kubernetes/kubelet-config.yml << EOFkind: KubeletConfigurationapiVersion: kubelet.config.k8s.io/v1beta1address: 192.168.1.2port: 10250cgroupDriver: cgroupfsclusterDNS:- 10.96.0.10clusterDomain: cluster.local.hairpinMode: promiscuous-bridgeserializeImagePulls: falseauthentication: x509: clientCAFile: /etc/kubernetes/pki/ca.pem anonymous: enabled: false webhook: enabled: falseEOF
systemctl daemon-reloadsystemctl enable kubeletsystemctl restart kubeletsystemctl status kubelet
在node2上操作
cd /rootwget https://dl.k8s.io/v1.11.1/kubernetes-node-linux-amd64.tar.gzwget https://github.com/containernetworking/plugins/releases/download/v0.7.1/cni-plugins-amd64-v0.7.1.tgztar -xf kubernetes-node-linux-amd64.tar.gz -C /usr/local/mv /usr/local/kubernetes /usr/local/kubernetes-v1.11ln -s kubernetes-v1.11 /usr/local/kubernetesmkdir /opt/cni/bin -ptar -xf cni-plugins-amd64-v0.7.1.tgz -C /opt/cni/bin
#配置systemctl启动文件cat > /etc/systemd/system/kubelet.service << EOF[Unit]Description=Kubernetes Kubelet ServerDocumentation=https://github.com/kubernetes/kubernetesAfter=docker.serviceRequires=docker.service[Service]EnvironmentFile=-/etc/kubernetes/configEnvironmentFile=-/etc/kubernetes/kubeletExecStart=/usr/local/kubernetes/node/bin/kubelet \\ \$KUBE_LOGTOSTDERR \\ \$KUBE_LOG_LEVEL \\ \$KUBELET_CONFIG \\ \$KUBELET_HOSTNAME \\ \$KUBELET_POD_INFRA_CONTAINER \\ \$KUBELET_ARGSRestart=on-failure[Install]WantedBy=multi-user.targetEOFcat > /etc/kubernetes/config << EOFKUBE_LOGTOSTDERR="--logtostderr=true"KUBE_LOG_LEVEL="--v=2"EOFcat > /etc/kubernetes/kubelet << EOFKUBELET_HOSTNAME="--hostname-override=192.168.1.3"KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1"KUBELET_CONFIG="--config=/etc/kubernetes/kubelet-config.yml"KUBELET_ARGS="--bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.conf --kubeconfig=/etc/kubernetes/kubelet.conf --cert-dir=/etc/kubernetes/pki --network-plugin=cni --cni-bin-dir=/opt/cni/bin --cni-conf-dir=/etc/cni/net.d"EOFcat > /etc/kubernetes/kubelet-config.yml << EOFkind: KubeletConfigurationapiVersion: kubelet.config.k8s.io/v1beta1address: 192.168.1.3port: 10250cgroupDriver: cgroupfsclusterDNS:- 10.96.0.10clusterDomain: cluster.local.hairpinMode: promiscuous-bridgeserializeImagePulls: falseauthentication: x509: clientCAFile: /etc/kubernetes/pki/ca.pem anonymous: enabled: false webhook: enabled: falseEOF
systemctl daemon-reloadsystemctl enable kubeletsystemctl restart kubeletsystemctl status kubelet
#在master节点操作kubectl get csr#通过验证并添加进集群kubectl get csr | awk '/node/{print $1}' | xargs kubectl certificate approve###单独执行命令例子: kubectl certificate approve node-csr-Yiiv675wUCvQl3HH11jDr0cC9p3kbrXWrxvG3EjWGoE#查看节点#此时节点状态为 NotReady,因为还没有配置网络kubectl get nodes[root@master ~]#kubectl get nodes NAME STATUS ROLES AGE VERSION192.168.1.1 NotReady6s v1.11.1192.168.1.2 NotReady 7s v1.11.1192.168.1.3 NotReady 7s v1.11.1# 在node节点查看生成的文件ls -l /etc/kubernetes/kubelet.confls -l /etc/kubernetes/pki/kubelet*
在master节点操作
yum install -y conntrack-tools
cat > /etc/systemd/system/kube-proxy.service << EOF[Unit]Description=Kubernetes Kube-Proxy ServerDocumentation=https://github.com/kubernetes/kubernetesAfter=network.target[Service]EnvironmentFile=-/etc/kubernetes/configEnvironmentFile=-/etc/kubernetes/proxyExecStart=/usr/local/kubernetes/server/bin/kube-proxy \\ \$KUBE_LOGTOSTDERR \\ \$KUBE_LOG_LEVEL \\ \$KUBECONFIG \\ \$KUBE_PROXY_ARGSRestart=on-failureLimitNOFILE=65536[Install]WantedBy=multi-user.targetEOF#启用ipvs主要就是把kube-proxy的--proxy-mode配置选项修改为ipvs#并且要启用--masquerade-all,使用iptables辅助ipvs运行cat > /etc/kubernetes/proxy << EOFKUBECONFIG="--kubeconfig=/etc/kubernetes/kube-proxy.conf"KUBE_PROXY_ARGS="--proxy-mode=ipvs --masquerade-all=true --cluster-cidr=10.0.0.0/8"EOF
systemctl daemon-reloadsystemctl enable kube-proxysystemctl restart kube-proxysystemctl status kube-proxy
在所有的node上操作
yum install -y conntrack-tools
cat > /etc/systemd/system/kube-proxy.service << EOF[Unit]Description=Kubernetes Kube-Proxy ServerDocumentation=https://github.com/kubernetes/kubernetesAfter=network.target[Service]EnvironmentFile=-/etc/kubernetes/configEnvironmentFile=-/etc/kubernetes/proxyExecStart=/usr/local/kubernetes/node/bin/kube-proxy \\ \$KUBE_LOGTOSTDERR \\ \$KUBE_LOG_LEVEL \\ \$KUBECONFIG \\ \$KUBE_PROXY_ARGSRestart=on-failureLimitNOFILE=65536[Install]WantedBy=multi-user.targetEOF#启用ipvs主要就是把kube-proxy的--proxy-mode配置选项修改为ipvs#并且要启用--masquerade-all,使用iptables辅助ipvs运行cat > /etc/kubernetes/proxy << EOFKUBECONFIG="--kubeconfig=/etc/kubernetes/kube-proxy.conf"KUBE_PROXY_ARGS="--proxy-mode=ipvs --masquerade-all=true --cluster-cidr=10.0.0.0/8"EOF
systemctl daemon-reloadsystemctl enable kube-proxysystemctl restart kube-proxysystemctl status kube-proxy
在master节点操作
kubectl label nodes 192.168.1.1 node-role.kubernetes.io/master=
kubectl label nodes 192.168.1.2 node-role.kubernetes.io/node=kubectl label nodes 192.168.1.3 node-role.kubernetes.io/node=
kubectl taint nodes 192.168.1.1 node-role.kubernetes.io/master=true:NoSchedule
#此时节点状态为 NotReady#ROLES已经标识出了master和nodekubectl get nodeNAME STATUS ROLES AGE VERSION192.168.1.1 NotReady master 1m v1.11.1192.168.1.2 NotReady node 1m v1.11.1192.168.1.3 NotReady node 1m v1.11.1
以下网络二选一:
cd /root/mkdir flannelcd flannelwget https://raw.githubusercontent.com/coreos/flannel/v0.10.0/Documentation/kube-flannel.ymlsed -ri 's#("Network": ")10.244.0.0/16#\110.0.0.0/8#' kube-flannel.yml#修改kube-flannel文件中的网段为我们需要的网段kubectl apply -f .
cd /root/mkdir canalcd canalwget https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/canal/rbac.yamlwget https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/canal/canal.yamlsed -ri 's#("Network": ")10.244.0.0/16#\110.0.0.0/8#' canal.yaml#修改cannl文件中的网段为我们需要的网段kubectl apply -f .
kubectl get -n kube-system pod -o wide [root@master ~]# kubectl get -n kube-system pod -o wide NAME READY STATUS RESTARTS AGE IP NODEcanal-74zhp 3/3 Running 0 7m 192.168.1.3 192.168.1.3canal-cmz2p 3/3 Running 0 7m 192.168.1.1 192.168.1.1canal-mkcg2 3/3 Running 0 7m 192.168.1.2 192.168.1.2
kubectl get node [root@master ~]# NAME STATUS ROLES AGE VERSION192.168.1.1 Ready master 5h v1.11.1192.168.1.2 Ready node 5h v1.11.1192.168.1.3 Ready node 5h v1.11.1
#10.96.0.10 是kubelet中配置的dns#安装corednscd /root && mkdir coredns && cd corednswget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/coredns.yaml.sedwget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/deploy.shchmod +x deploy.sh./deploy.sh -i 10.96.0.10 > coredns.ymlkubectl apply -f coredns.yml#查看kubectl get svc,pods -n kube-system[root@master coredns]# kubectl get svc,pods -n kube-systemNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEservice/kube-dns ClusterIP 10.96.0.1053/UDP,53/TCP 2mNAME READY STATUS RESTARTS AGEpod/canal-5wkkd 3/3 Running 0 17hpod/canal-6mhhz 3/3 Running 0 17hpod/canal-k7ccs 3/3 Running 2 17hpod/coredns-6975654877-jpqg4 1/1 Running 0 2mpod/coredns-6975654877-lgz9n 1/1 Running 0 2m
cd /root && mkdir nginx && cd nginxcat << EOF > nginx.yaml---apiVersion: v1kind: Servicemetadata: name: nginxspec: selector: app: nginx type: NodePort ports: - port: 80 nodePort: 31000 name: nginx-port targetPort: 80 protocol: TCP---apiVersion: apps/v1kind: Deploymentmetadata: name: nginxspec: replicas: 2 selector: matchLabels: app: nginx template: metadata: name: nginx labels: app: nginx spec: containers: - name: nginx image: nginx ports: - containerPort: 80EOF
kubectl run curl --image=radial/busyboxplus:curl -i --ttynslookup kubernetesnslookup nginxcurl nginxexit[ root@curl-87b54756-qf7l9:/ ]$ nslookup kubernetesServer: 10.96.0.10Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.localName: kubernetesAddress 1: 10.96.0.1 kubernetes.default.svc.cluster.local[ root@curl-87b54756-qf7l9:/ ]$ nslookup nginxServer: 10.96.0.10Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.localName: nginxAddress 1: 10.105.93.85 nginx.default.svc.cluster.local[ root@curl-87b54756-qf7l9:/ ]$ curl nginxWelcome to nginx! ...[ root@curl-87b54756-qf7l9:/ ]$ exitSession ended, resume using 'kubectl attach curl-87b54756-qf7l9 -c curl -i -t' command when the pod is running
curl 192.168.1.2:31000[root@node5 ~]# curl 192.168.1.2:31000Welcome to nginx! Welcome to nginx!
If you see this page, the nginx web server is successfully installed andworking. Further configuration is required.
For online documentation and support please refer tonginx.org.
Commercial support is available atnginx.com.Thank you for using nginx.
yum install -y ipvsadmipvsadm[root@master ~]# ipvsadmIP Virtual Server version 1.2.1 (size=4096)Prot LocalAddress:Port Scheduler Flags -> RemoteAddress:Port Forward Weight ActiveConn InActConnTCP master:31000 rr -> 10.0.0.8:http Masq 1 0 0 -> 10.0.1.9:http Masq 1 0 0TCP master:31000 rr -> 10.0.0.8:http Masq 1 0 0 -> 10.0.1.9:http Masq 1 0 0TCP master:31000 rr -> 10.0.0.8:http Masq 1 0 0 -> 10.0.1.9:http Masq 1 0 0TCP master:https rr -> master:sun-sr-https Masq 1 2 0TCP master:domain rr -> 10.0.0.3:domain Masq 1 0 0 -> 10.0.1.3:domain Masq 1 0 0TCP master:http rr -> 10.0.0.8:http Masq 1 0 0 -> 10.0.1.9:http Masq 1 0 0TCP localhost:31000 rr -> 10.0.0.8:http Masq 1 0 0 -> 10.0.1.9:http Masq 1 0 0UDP master:domain rr -> 10.0.0.3:domain Masq 1 0 0 -> 10.0.1.3:domain Masq 1 0 0
转载于:https://blog.51cto.com/bigboss/2153651