1、kubernetes集群部暑
1.1、服務器規劃
1.2、系統初始化
#關閉防火牆
systemctl stop firewalld
systemctl disable firewalld
#關閉selinux
setenforce 0 #臨時
sed -i 's/enforcing/disabled/' /etc/selinux/config #永久
#關閉swap
swapoff -a
sed -i 's/.*swap.*/#&/' /etc/fstab
#添加hosts
# vi /etc/hosts
192.168.1.13 k8s-master1
192.168.1.14 k8s-node1
192.168.1.15 k8s-node2
#分別在各節點設置主機名稱
hostnamectl set-hostname k8s-master1
hostnamectl set-hostname k8s-node1
hostnamectl set-hostname k8s-node2
#同步系統時間
yum install ntpdate -y
ntpdate time.windows.com
# crontab -l
*/5 * * * * ntpdate time.windows.com
1.3、安裝etcd集群(k8s-master1)
1.3.1、生成etcd證書
使用cfssl自籤etcd的證書, 安裝cfssl工具: master節點上傳TLS.tar.gz
tar -xzf TLS.tar.gz
cd TLS
./cfssl.sh
# cat etcd/generate_etcd_cert.sh
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
# cd /root/TLS/etcd
# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
# ll *pem
ca-key.pem #ca的私鑰
ca.pem #ca數字證書
告訴ca要為etcd頒發一個證書:
# cat server-csr.json
{
"CN": "etcd",
"hosts": [
"192.168.1.13",
"192.168.1.14",
"192.168.1.15"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing"
}
]
}
# cat ca-config.json
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"www": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
生成etcd的證書:
# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
1.3.2、部署etcd三個節點
master上傳安裝包並解壓etcd:
# tar -xzf etcd.tar.gz
# cd etcd/ssl
# rm *pem -rf
修改etcd配置文件:
# cd /root/etcd/cfg
# vi etcd.conf
#[Member]
ETCD_NAME="etcd-1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.1.13:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.1.13:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.1.13:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.1.13:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.1.13:2380,etcd-2=https://192.168.1.14:2380,etcd-3=https://192.168.1.15:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
# cp /root/TLS/etcd/{ca,server,server-key}.pem /root/etcd/ssl/
# ls ssl/
ca.pem server-key.pem server.pem
# cat /root/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/opt/etcd/cfg/etcd.conf
ExecStart=/opt/etcd/bin/etcd \\
--name=${ETCD_NAME} \\
--data-dir=${ETCD_DATA_DIR} \\
--listen-peer-urls=${ETCD_LISTEN_PEER_URLS} \\
--listen-client-urls=${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \\
--advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} \\
--initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} \\
--initial-cluster=${ETCD_INITIAL_CLUSTER} \\
--initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} \\
--initial-cluster-state=new \\
--cert-file=/opt/etcd/ssl/server.pem \\
--key-file=/opt/etcd/ssl/server-key.pem \\
--peer-cert-file=/opt/etcd/ssl/server.pem \\
--peer-key-file=/opt/etcd/ssl/server-key.pem \\
--trusted-ca-file=/opt/etcd/ssl/ca.pem \\
--peer-trusted-ca-file=/opt/etcd/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
# cp -rf etcd /opt
拷貝etcd文件到node1,node2節點機器上:
scp -r /opt/etcd
scp -r /opt/etcd
修改node1,node2上的cfg/etcd.conf 中的ip信息;
node1配置:
# cat /opt/etcd/cfg/etcd.conf
#[Member]
ETCD_NAME="etcd-2"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.1.14:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.1.14:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.1.14:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.1.14:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.1.13:2380,etcd-2=https://192.168.1.14:2380,etcd-3=https://192.168.1.15:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
node2配置:
# cat /opt/etcd/cfg/etcd.conf
#[Member]
ETCD_NAME="etcd-3"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.1.15:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.1.15:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.1.15:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.1.15:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.1.13:2380,etcd-2=https://192.168.1.14:2380,etcd-3=https://192.168.1.15:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
拷貝服務啟動文件,到對應的機器目錄下去:
cp etcd.service /usr/lib/systemd/system
scp etcd.service
scp etcd.service
啟動etcd服務:
systemctl daemon-reload
systemctl start etcd
systemctl enable etcd
1.3.3、查看etcd集群狀態
# /opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.1.13:2379,https://192.168.1.14:2379,https://192.168.1.15:2379" cluster-health
1.4、 安裝master節點組件
1.4.1、生成apiserver證書
自籤api-server證書:
# cd TLS/k8s/
# cat kube-proxy-csr.json
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
# cat server-csr.json #node節點不用寫
{
"CN": "kubernetes",
"hosts": [
"10.0.0.1",
"127.0.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local",
"192.168.1.10",
"192.168.1.11",
"192.168.1.12",
"192.168.1.13"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
生成證書:
# ./generate_k8s_cert.sh
# ll *pem
-rw------- 1 root root 1675 Oct 9 14:20 ca-key.pem #CA用的證書
-rw-r--r-- 1 root root 1359 Oct 9 14:20 ca.pem
-rw------- 1 root root 1675 Oct 9 14:20 kube-proxy-key.pem #node要用的證書
-rw-r--r-- 1 root root 1403 Oct 9 14:20 kube-proxy.pem
-rw------- 1 root root 1675 Oct 9 14:20 server-key.pem #apiserver要用的證書
-rw-r--r-- 1 root root 1627 Oct 9 14:20 server.pem
1.4.2、部署apiserver,controller-manager和scheduler
在Master節點完成以下操作:
二進制包下載地址:
https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.16.md#v1161
部署kube-apiserver組件: #master上傳k8s-master.tar.gz
# tar -xzf k8s-master.tar.gz
先拷貝證書:
# cd kubernetes
# cp /root/TLS/k8s/*pem ssl/
# rm -rf ssl/kube-proxy*
# cp -rf /root/kubernetes /opt
#cp kube-apiserver.service kube-controller-manager.service kube-scheduler.service /usr/lib/systemd/system
# cd /opt/kubernetes/cfg
# cat kube-apiserver.conf
KUBE_APISERVER_OPTS="--logtostderr=false \\ #輸出日誌
--v=2 \\ #日誌級別
--log-dir=/opt/kubernetes/logs \\ #日誌存儲目錄
--etcd-servers=https://192.168.1.13:2379,https://192.168.1.14:2379,https://192.168.1.15:2379 \\ #etcd服務的地址
--bind-address=192.168.1.13 \\ #apiserver監聽的地址
--secure-port=6443 \\ #apiserver監聽的端口
--advertise-address=192.168.1.13 \\ #apiserver通告地址,與node通信
--allow-privileged=true \\ #允許創建的容器賦予的權限
--service-cluster-ip-range=10.0.0.0/24 \\ #集群內部的service的虛擬ip
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\ #啟用准入控制插件
--authorization-mode=RBAC,Node \\ #授權模式,指定的用戶賦予相應的權限
--enable-bootstrap-token-auth=true \\ #啟用bootstrap的token認證
--token-auth-file=/opt/kubernetes/cfg/token.csv \\ #token文件
--service-node-port-range=30000-32767 \\ #service 的nodeport的範圍
--kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \\ #訪問kubelet的證書
--kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \\
--tls-cert-file=/opt/kubernetes/ssl/server.pem \\ #配置apiserver使用https的證書
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \\
--client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--etcd-cafile=/opt/etcd/ssl/ca.pem \\ #連接etcd的證書
--etcd-certfile=/opt/etcd/ssl/server.pem \\
--etcd-keyfile=/opt/etcd/ssl/server-key.pem \\
--audit-log-maxage=30 \\ #日誌策略
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/opt/kubernetes/logs/k8s-audit.log"
# cat kube-controller-manager.conf
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--leader-elect=true \\
--master=127.0.0.1:8080 \\
--address=127.0.0.1 \\
--allocate-node-cidrs=true \\
--cluster-cidr=10.244.0.0/16 \\
--service-cluster-ip-range=10.0.0.0/24 \\
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--experimental-cluster-signing-duration=87600h0m0s"
# cat kube-scheduler.conf
KUBE_SCHEDULER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--leader-elect \\
--master=127.0.0.1:8080 \\
--address=127.0.0.1"
啟動服務:
systemctl start kube-apiserver
systemctl start kube-controller-manager
systemctl start kube-scheduler
systemctl enable kube-apiserver
systemctl enable kube-controller-manager
systemctl enable kube-scheduler
mv /opt/kubernetes/bin/kubectl /usr/local/bin
kubectl get node
kubectl get cs
1.4.3、啟用TLS Bootstrapping
為kubelet TLS Bootstrapping 授權:
# cat /opt/kubernetes/cfg/token.csv
c47ffb939f5ca36231d9e3121a252940,kubelet-bootstrap,10001,"system:node-bootstrapper"
格式:token,用戶,uid,用戶組
給kubelet-bootstrap授權:
# kubectl create clusterrolebinding kubelet-bootstrap \\
--clusterrole=system:node-bootstrapper \\
--user=kubelet-bootstrap
token也可自行生成替換: #已經設置好,不用配置
head -c 16 /dev/urandom | od -An -t x | tr -d ' '
但apiserver配置的token必須要與node節點bootstrap.kubeconfig配置裡一致。
1.5、 部署k8s node
1.5.1、安裝Docker
二進制包下載地址:
# tar -xzf k8s-node.tar.gz #k8s-node1上傳k8s-node.tar.gz
# tar zxvf docker-18.09.6.tgz
# mv docker/* /usr/bin
# mkdir /etc/docker
# mv daemon.json /etc/docker
# mv docker.service /usr/lib/systemd/system
# systemctl start docker
# systemctl status docker
# systemctl enable docker
# docker info
# docker version
1.5.2、部署kubelet和kube-proxy
拷貝證書到Node1:
#node操作
# mv kubernetes /opt
# mv kubelet.service kube-proxy.service /usr/lib/systemd/system
# master操作
# cd /root/TLS/k8s
# scp ca.pem kube-proxy*.pem [email protected]:/opt/kubernetes/ssl/
# scp ca.pem kube-proxy*.pem [email protected]:/opt/kubernetes/ssl/ #安裝node2
修改以下二個文件中IP地址: #node節點
# cd /opt/kubernetes/cfg
# grep 192 * #修改兩個配置文件ip
bootstrap.kubeconfig: server: https://192.168.1.13:6443
kube-proxy.kubeconfig: server: https://192.168.1.13:6443
# cat kubelet.conf
KUBELET_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--hostname-override=k8s-node1 \\ #node2改為k8s-node2
--network-plugin=cni \\
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
--config=/opt/kubernetes/cfg/kubelet-config.yml \\
--cert-dir=/opt/kubernetes/ssl \\
--pod-infra-container-image=lizhenliang/pause-amd64:3.0"
# cat bootstrap.kubeconfig
apiVersion: v1
clusters:
- cluster:
certificate-authority: /opt/kubernetes/ssl/ca.pem
server: https://192.168.1.13:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubelet-bootstrap
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kubelet-bootstrap
user:
token: c47ffb939f5ca36231d9e3121a252940
# cat token.csv #master查看
c47ffb939f5ca36231d9e3121a252940,kubelet-bootstrap,10001,"system:node-bootstrapper"
# cat kubelet-config.yml
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- 10.0.0.2
clusterDomain: cluster.local
failSwapOn: false
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /opt/kubernetes/ssl/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
# cat kube-proxy.kubeconfig
apiVersion: v1
clusters:
- cluster:
certificate-authority: /opt/kubernetes/ssl/ca.pem
server: https://192.168.1.13:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kube-proxy
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kube-proxy
user:
client-certificate: /opt/kubernetes/ssl/kube-proxy.pem
client-key: /opt/kubernetes/ssl/kube-proxy-key.pem
修改以下兩個文件中主機名:
# grep hostname *
kubelet.conf:--hostname-override=k8s-node1 \\
kube-proxy-config.yml:hostnameOverride: k8s-node1
# systemctl start kubelet
# systemctl start kube-proxy
# systemctl enable kubelet
# systemctl enable kube-proxy
1.5.3、允許給Node頒發證書
在master機器上驗證下:
# kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-cJs9g9zyqS6oiTrDRYNVesRfIroo7opoDiMY33fRduo 6m45s kubelet-bootstrap Pending
# kubectl certificate approve node-csr-cJs9g9zyqS6oiTrDRYNVesRfIroo7opoDiMY33fRduo
# kubectl get nodes
1.5.4、部署CNI網絡(node節點安裝)
二進制包下載地址:
# mkdir -p /opt/cni/bin /etc/cni/net.d
# tar -xzf cni-plugins-linux-amd64-v0.8.2.tgz -C /opt/cni/bin/
# scp -r /opt/cni/
# mkdir -p /etc/cni/net.d (k8s-node2執行)
確保kubelet啟用CNI:
# cat /opt/kubernetes/cfg/kubelet.conf |grep cni
--network-plugin=cni \\
在Master執行:上傳kube-flannel.yaml
# kubectl apply -f kube-flannel.yaml
# kubectl delete -f kube-flannel.yaml 取消安裝kube-flannel
# kubectl get pods -n kube-system
# kubectl describe pod kube-flannel-ds-amd64-f4rnr -n kube-system 查看pod信息
# kubectl logs kube-flannel-ds-amd64-f4rnr -n kube-system 查看pod日誌
# kubectl get pods -n kube-system -o wide
# kubectl get nodes
1.5.5、授權apiserver訪問kubelet
為提供安全性,kubelet禁止匿名訪問,必須授權才可以。
在master上傳apiserver-to-kubelet-rbac.yaml
<code># cat /opt/kubernetes/cfg/kubelet-config.yml /<code>
<code>……/<code>
<code>authentication:/<code>
<code> anonymous:/<code>
<code> enabled: false/<code>
<code> webhook:/<code>
<code> cacheTTL: 2m0s/<code>
<code> enabled: true/<code>
<code> x509:/<code>
<code>clientCAFile: /opt/kubernetes/ssl/ca.pem/<code>
<code>……/<code>
授權可以用logs查看pod的日誌:
# kubectl apply -f apiserver-to-kubelet-rbac.yaml
# kubectl get pods -n kube-system
# kubectl get pods -n kube-system -o wide
1.5.6、測試
# kubectl create deployment web --image=nginx:latest #創建
# kubectl delete deployment web #刪除
# kubectl get pods
# kubectl get pods -o wide
# kubectl expose deployment web --port=80 --type=NodePort
# kubectl get pods,svc
http://192.168.1.14:31429 # node節點加端口訪問
1.6、 部署Web UI和DNS
1.6.1、部署Web UI
#官方下載
<code># wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta4/aio/deploy/recommended.yaml/<code>
<code># vi recommended.yaml…kind: ServiceapiVersion: v1metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kubernetes-dashboardspec: type: NodePort ports: - port: 443 targetPort: 8443 nodePort: 30001 selector: k8s-app: kubernetes-dashboard…# kubectl apply -f recommended.yaml# kubectl apply -f dashboard.yaml #上傳修改過的配置文件# kubectl get pods -n kubernetes-dashboard# kubectl get svc -n kubernetes-dashboard創建service account並綁定默認cluster-admin管理員集群角色:# cat dashboard-adminuser.yaml apiVersion: v1kind: ServiceAccountmetadata: name: admin-user namespace: kubernetes-dashboard---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata: name: admin-userroleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-adminsubjects:- kind: ServiceAccount name: admin-user namespace: kubernetes-dashboard# kubectl apply -f dashboard-adminuser.yaml獲取token:# kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk '{print $1}')訪問地址:https://192.168.1.15:30001使用輸出的token登錄Dashboard。1.6.2、部署DNS# kubectl apply -f coredns.yaml #上傳coredns.yaml# kubectl get pod -n kube-system測試dns:# kubectl apply -f bs.yaml# kubectl get pods# kubectl exec -it busybox shping 10.0.0.218nslookup kubernets/<code>
閱讀更多 愛踢人生 的文章