1.kubernetes 集群升級;? 2.總結yaml文件? 3.etcd客戶端使用、數據備份和恢復;? 4.kubernetes集群維護常用命令; 5.資源對象? ? rc/rs/deployment、? ? service、? ? volume、? ? ? emptyDir、? ? ? hostpath、? ? ? NFS
1.kubernetes 集群升級
1.master升級:一個一個升級,先在node節點將master從配置文件中刪掉,然后重啟kube-lb服務,接著將master以替換二進制的方式升級master,最后再修改node節點的配置,將master加入負載均衡節點,重啟node節點服務,完成升級。
2.node升級:也是一個node升級完再升級下一個,升級一個node時,需要停掉kubelete和kube-proxy服務,然后將node的二進制替換掉,升級node,最后將kubelete和kube-proxy啟動。
master升級:在master1,先下載升級的二進制包,到github上下載:https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.21.md
root@k8s-master1:/etc/kubeasz# cd /usr/local/src/
root@k8s-master1:/usr/local/src# wget https://dl.k8s.io/v1.21.5/kubernetes.tar.gz
root@k8s-master1:/usr/local/src# wget https://dl.k8s.io/v1.21.5/kubernetes-client-linux-amd64.tar.gz
root@k8s-master1:/usr/local/src#wget?https://dl.k8s.io/v1.21.5/kubernetes-server-linux-amd64.tar.gz
root@k8s-master1:/usr/local/src#wget?https://dl.k8s.io/v1.21.5/kubernetes-node-linux-amd64.tar.gz?
root@k8s-master1:/usr/local/src# tar xf kubernetes-client-linux-amd64.tar.gz root@k8s-master1:/usr/local/src# tar xf kubernetes-node-linux-amd64.tar.gz root@k8s-master1:/usr/local/src# tar xf kubernetes-server-linux-amd64.tar.gz root@k8s-master1:/usr/local/src# tar xf kubernetes.tar.gz
###先升級master1,修改node上的lb配置將master1注掉-->重啟lb服務-->拷貝二進制包到master-->放開注釋-->重啟lb服務
root@k8s-master1:~# for i in {1..3};do ssh k8s-node$i "sed -i 's/server 192.168.241.51:6443/#server 192.168.241.51:6443/g' /etc/kube-lb/conf/kube-lb.conf" && systemctl restart kube-lb.service;done
###因為master1就是部署機器,因此,先停止服務,直接拷貝服務就可以了
root@k8s-master1:/usr/local/src/kubernetes# systemctl stop kube-apiserver.service kube-controller-manager.service kube-scheduler.service kubelet.service kube-proxy.service
root@k8s-master1:/usr/local/src/kubernetes# \cp server/bin/{kube-apiserver,kube-controller-manager,kube-proxy,kube-scheduler,kubelet,kubectl} /usr/local/bin/
啟動服務
root@k8s-master1:/usr/local/src/kubernetes# systemctl start kube-apiserver.service kube-controller-manager.service kube-scheduler.service kubelet.service kube-proxy.service
#查看master1的版本應該是1.21.5
root@k8s-master1:/usr/local/src/kubernetes# kubectl get nodeNAME STATUS ROLES AGE VERSION192.168.241.51 Ready,SchedulingDisabled master 102m v1.21.5192.168.241.52 Ready,SchedulingDisabled master 102m v1.21.0192.168.241.53 Ready,SchedulingDisabled master 96m v1.21.0192.168.241.57 Ready node 19m v1.21.0192.168.241.58 Ready node 101m v1.21.0192.168.241.59 Ready node 101m v1.21.0
將node節點的lb上的master重新添加回去,并重啟lb的服務
root@k8s-master1:/usr/local/src/kubernetes# for i in {1..3};do ssh k8s-node$i "sed -i 's/#server 192.168.241.51:6443/server 192.168.241.51:6443/g' /etc/kube-lb/conf/kube-lb.conf" && systemctl restart kube-lb.service;done
依次添加master2
root@k8s-master1:~# for i in {1..3};do ssh k8s-node$i "sed -i 's/server 192.168.241.52:6443/#server 192.168.241.52:6443/g' /etc/kube-lb/conf/kube-lb.conf" && systemctl restart kube-lb.service;done
root@k8s-master1:~#ssh? k8s-master2? "systemctl stop kube-apiserver.service kube-controller-manager.service kube-scheduler.service kubelet.service kube-proxy.service"
root@k8s-master1:/etc/kubeasz# cd /usr/local/src/kubernetes/root@k8s-master1:/usr/local/src/kubernetes# scp server/bin/{kube-apiserver,kube-controller-manager,kube-proxy,kube-scheduler,kubelet,kubectl} k8s-master2:/usr/local/bin/
root@k8s-master1:~#ssh? k8s-master2? "systemctl start kube-apiserver.service kube-controller-manager.service kube-scheduler.service kubelet.service kube-proxy.service"
root@k8s-master1:/usr/local/src/kubernetes# for i in {1..3};do ssh k8s-node$i "sed -i 's/#server 192.168.241.52:6443/server 192.168.241.52:6443/g' /etc/kube-lb/conf/kube-lb.conf" && systemctl restart kube-lb.service;done
root@k8s-master1:/usr/local/src/kubernetes# kubectl get nodeNAME STATUS ROLES AGE VERSION192.168.241.51 Ready,SchedulingDisabled master 112m v1.21.5192.168.241.52 Ready,SchedulingDisabled master 112m v1.21.5
升級master3
root@k8s-master1:~# for i in {1..3};do ssh k8s-node$i "sed -i 's/server 192.168.241.53:6443/#server 192.168.241.53:6443/g' /etc/kube-lb/conf/kube-lb.conf" && systemctl restart kube-lb.service;done
root@k8s-master1:~#ssh? k8s-master3? "systemctl stop kube-apiserver.service kube-controller-manager.service kube-scheduler.service kubelet.service kube-proxy.service"
root@k8s-master1:/etc/kubeasz# cd /usr/local/src/kubernetes/?
root@k8s-master1:/usr/local/src/kubernetes# scp server/bin/{kube-apiserver,kube-controller-manager,kube-proxy,kube-scheduler,kubelet,kubectl} k8s-master3:/usr/local/bin/
root@k8s-master1:~#ssh? k8s-master3? "systemctl start kube-apiserver.service kube-controller-manager.service kube-scheduler.service kubelet.service kube-proxy.service"
root@k8s-master1:/usr/local/src/kubernetes# for i in {1..3};do ssh k8s-node$i "sed -i 's/#server 192.168.241.53:6443/server 192.168.241.53:6443/g' /etc/kube-lb/conf/kube-lb.conf" && systemctl restart kube-lb.service;done
root@k8s-master1:/usr/local/src/kubernetes# kubectl get nodeNAME STATUS ROLES AGE VERSION192.168.241.51 Ready,SchedulingDisabled master 117m v1.21.5192.168.241.52 Ready,SchedulingDisabled master 117m v1.21.5192.168.241.53 Ready,SchedulingDisabled master 111m v1.21.5192.168.241.57 Ready node 34m v1.21.0192.168.241.58 Ready node 116m v1.21.0192.168.241.59 Ready node 116m v1.21.0
master升級完畢!
node升級:
##只需要停止kubelet和kube-proxy服務后將二進制文件拷貝到執行目錄,然后啟動這兩服務即可
root@k8s-master1:/usr/local/src/kubernetes# ssh k8s-node1 "systemctl stop kubelet kube-proxy"
root@k8s-master1:/usr/local/src/kubernetes# scp server/bin/{kubelet,kube-proxy,kubectl} k8s-node1:/usr/local/bin
root@k8s-master1:/usr/local/src/kubernetes# ssh k8s-node1 "systemctl start kubelet kube-proxy"
root@k8s-master1:/usr/local/src/kubernetes# ssh k8s-node2 "systemctl stop kubelet kube-proxy"
root@k8s-master1:/usr/local/src/kubernetes# scp server/bin/{kubelet,kube-proxy,kubectl} k8s-node2:/usr/local/bin
root@k8s-master1:/usr/local/src/kubernetes# ssh k8s-node2 "systemctl start kubelet kube-proxy"
root@k8s-master1:/usr/local/src/kubernetes# ssh k8s-node3 "systemctl stop kubelet kube-proxy"root@k8s-master1:/usr/local/src/kubernetes# scp server/bin/{kubelet,kube-proxy,kubectl} k8s-node3:/usr/local/bin
root@k8s-master1:/usr/local/src/kubernetes# ssh k8s-node3 "systemctl start kubelet kube-proxy"
root@k8s-master1:/usr/local/src/kubernetes# kubectl get nodeNAME STATUS ROLES AGE VERSION192.168.241.51 Ready,SchedulingDisabled master 123m v1.21.5192.168.241.52 Ready,SchedulingDisabled master 123m v1.21.5192.168.241.53 Ready,SchedulingDisabled master 117m v1.21.5192.168.241.57 NotReady node 40m v1.21.5192.168.241.58 Ready node 123m v1.21.5192.168.241.59 Ready node 123m v1.21.5
至此,master和node升級完畢!
2.總結yaml文件
yaml更適用于配置文件,json更適用于API數據返回,json也可以用作配置文件,json不能使用注釋。yaml和json可以互相轉換。
yaml格式:
大小寫敏感
縮進表示層級關系
縮進不能使用table,縮進一般是兩個空格,通緝縮進應該對齊
可以加注釋,# 注釋
比json更適用于配置文件
列表用短橫線表示? -
3.etcd客戶端使用、數據備份和恢復
etcd是kv分布式存儲系統
到etcd任意節點,執行以下操作:
##etcd命令客戶端工具etcdctl,命令使用幫助
root@k8s-etcd1:~# etcdctl member -h
root@k8s-etcd1:~# etcdctl -h
etcd健康狀態查詢:
root@k8s-etcd2:~# export NODE_IPS="192.168.241.54 192.168.241.55 192.168.241.56"?
root@k8s-etcd2:~# for ip in ${NODE_IPS} ;do ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints=https://${ip}:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem endpoint health;done
顯示集群成員信息: member list
root@k8s-etcd1:~# etcdctl member list
root@k8s-etcd1:~# etcdctl --write-out=table member list #etcdctl 3版本以上可以不加證書,但是建議加上
ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints=https://192.168.241.56:2379 --write-out=table member list --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem
以表格方式顯示節點詳細狀態: endpoint status
export NODE_IPS="192.168.241.54 192.168.241.55 192.168.241.56"
for ip in ${NODE_IPS} ;do ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints=https://${ip}:2379 --write-out=table endpoint status --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem endpoint health;done
可以看到leader是etcd3,現在可以停止etcd3的服務,
root@k8s-etcd3:/usr/local/bin# systemctl stop etcd
再到etcd1查看,leader不再是etcd3,會自動重新選舉一個新的leader
root@k8s-etcd1:~# for ip in ${NODE_IPS} ;do ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints=https://${ip}:2379 --write-out=table endpoint status --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem endpoint health;done
etcd數據的增刪改查
? ? 增,改: put
? ? 查:get
? ? 刪: del
寫入數據: etcdctl put /huahualin linux??
查看數據:?etcdctl get? /huahualin?
修改數據:?etcdctl put /huahualin centos
刪除數據:?etcdctl del? /huahualin?
查看有多少key
/usr/local/bin/etcdctl get / --prefix --keys-only
獲取key的值
root@k8s-etcd1:~# /usr/local/bin/etcdctl get /registry/services/endpoints/default/kubernetes
如果想查看多少個pod,但是也不準,可以過濾
/usr/local/bin/etcdctl get / --prefix --keys-only|grep pod |wc -l
etcd獲取calico相關
/usr/local/bin/etcdctl get / --prefix --keys-only|grep calico
root@k8s-etcd1:~# /usr/local/bin/etcdctl get /calico/ipam/v2/assignment/ipv4/block/10.200.169.128-26
etcd V3的watch機制:
基于不斷監控數據,發生變化就主動發通知客戶端,保證數據的快速同步,ETCD V3版本的watch機制支持watch某個固定的key,也支持一個范圍 相比如v2,內存只存key,值放在磁盤里,因此對磁盤的io很高了,
watch機制更穩定,基本上可以實現數據完全同步
通過Grpc實現遠程調用,長鏈接效率提升明顯
放棄目錄結構,純粹kv
watch的使用:可以實時監控數據的變化,這些都在etcdctl自動實現了,不需要我們去單獨watch
? ??etcdcl watch /huahualin
備份和恢復:使用快照來進行備份和恢復?
????WAL機制:預寫日志,可以用來恢復數據,記住數據變化的全部歷程
? ??etcd是鏡像集群,在每個節點正常同步數據的情況下,每個節點數據都是一樣的,因此備份只備份一份就行,還原也是只還原一份就可以了。集群壞一個不需要恢復數據,極端情況是所有節點都被刪了,才恢復。
? ?etcd數據備份:??ETCDCTL_API=3 /usr/local/bin/etcdctl snapshot save etcd-2021-1014.db
? ? etcd數據恢復:?ETCDCTL_API=3 /usr/local/bin/etcdctl snapshot restore etcd-2021-1014.db --data-dir=/tmp/etcd? ?##還原的目標目錄不能存在,否則會報錯,恢復數據需要恢復到etcd的數據目錄,目錄一般都是在 /var/lib/etcd ,先停etcd服務,然后把數據目錄刪除,進行數據恢復,最后啟動etcd服務。
使用kubeasz項目對生產環境的etcd數據進行備份和恢復:
? ??cd /etc/kubeasz/
? ??./ezctl backup k8s-01? ?#其實也是連接到其中一臺etcd然后將備份好的文件拷貝到master上的集群下的backup目錄下
? ? ?恢復:./ezctl restore k8s-01
? ? ? ? 假設刪除了一個pod: ??
? ???????????方法一:kubectl delete pod net-test1 -n default
? ? ? ? ? ? ?方法二:也可以到etcd下刪除key,? ?etcdctl del /registry/x/x/net-test1,刪除key很快
? ? ? ?數據恢復: 過程也是到etcd先停止服務避免寫入,然后刪除目錄進行恢復
? ? ? ? ?./ezctl restore k8s-01
4.kubernetes集群維護常用命令
kubectl get pod -A -o wide? ?查看所有pod
kubectl get service -A? ?查看所有service
kubectl get nodes -A? ?查看所以node節點
kubectl describe pod pod_name -n ns_name??如果不是默認namespace,需要指定ns,-n后面就是制定namespace的名稱
如果pod創建失敗,可以使用kubectl logs pod_name -n ns 查看容器日志
如果還看不到日志,可以到node節點查看syslog,
? ? ? ? ?cat /var/log/syslog 看有沒有報錯
? ? ? ? ?cat /var/log/kern.log
利用yaml創建資源: kubectl create -f file.yaml --save-config --record? ?這條命令類似于? kubectl apply -f file.yaml? ?,
kubectl create命令,是先刪除所有現有的東西,重新根據yaml文件生成新的。所以要求yaml文件中的配置必須是完整的.用同一個yaml 文件執行替換replace命令,將會不成功,fail掉。 kubectl apply命令,根據配置文件里面列出來的內容,升級現有的。所以yaml文件的內容可以只寫需要升級的屬性,就是說apply只會修改資源變化的部分,而create是需要刪掉服務,重新創建
用kubectl? apply -f file.yaml? ?比較多,
獲取token:
root@k8s-master2:~# kubectl get secrets?
NAME TYPE DATA AGE?
default-token-7mcjc kubernetes.io/service-account-token 3 10h 2l80Vwzag
root@k8s-master2:~# kubectl describe secret default-token-7mcjc HGRR0U
5.資源對象? ? rc/rs/deployment、? ? service、? ? volume、? ? ? emptyDir、? ? ? hostpath、? ? ? NFS
1.controller,總有三代:
? ? ?Replication Controller:副本控制器RC,只支持 (selector = !=),第一代pod副本控制器,主要控制副本,簡稱rc,現在很少用了
? ? ?Replicaset:服務控制集,除了支持rc的selector,還支持使用正則匹配,比如支持in,not in,匹配的范圍更大,第二代pod副本控制器RS,也是控制副本,簡稱rs
? ? ?Deployment:第三代pod副本控制器,其實也是調用的replicaset,優點是有更多的高級功能,除了擁有replicaset的功能外,還有別的功能,比如滾動升級、回滾等。用此方法創建的pod名稱有三段組成: deploymentname-Replicasetname-podname
2.service:ipvsadm -Ln 可以查看service的服務映射類型:分為集群內的service和集群外的訪問k8s集群內的service:使用ClusterIP?訪問K8S集群外的service: 使用nodePort ,這樣可以通過宿主機去訪問
ClusterIP:
cat nginx.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
? name: nginx-deployment
spec:
? replicas: 1
? selector:
? ? matchExpressions:
? ? - {key: app, operator: In, values: [ng-deploy-80,ng-rs-81]}
? template:
? ? metadata:? ? ? ?
? ? ? labels:
? ? ? ? app: ng-deploy-80
? ? spec:
? ? ? containers:
? ? ? ? - name:? ng-deploy-80
? ? ? ? ? image: nginx:1.16.1
? ? ? ? ? ports:
? ? ? ? ? - containerPort: 80
執行創建deployment控制器:? kubectl apply -f nginx.yaml
root@k8s-master1:~/yaml/service# cat service.yaml
apiVersion: v1
kind: Service
metadata:
? name: ng-deploy-80
spec:
? ports:
? - name: http
? ? port: 80
? ? targetPort: 80
? ? protocol: TCP
? type: ClusterIP
執行創建service:? kubectl apply -f service.yaml
查看: 在k8s的web頁面上查看,登錄任意一個節點的30002端口 https://192.168.133.59:30002,找到pods,可以看到對應的pod,
Nodeport:
還是使用上面那個nginx.yaml
root@k8s-master1:~/yaml/service# cat nodePort-svc.yaml
apiVersion: v1
kind: Service
metadata:
? name: ng-deploy-80
spec:
? ports:
? - name: http
? ? port: 90
? ? targetPort: 80
? ? nodePort: 30012
? ? protocol: TCP
? type: NodePort
? selector:
? ? app: ng-deploy-80?
執行創建service:? kubectl? apply -f nodePort.yaml
在瀏覽器訪問任何一個node節點的30012端口? http://192.168.133.59:30012/
通過負載均衡器訪問:因為通過node的30012訪問不太方便,所以可以在ha1和ha2上面配置多個
root@k8s-ha1:~# vim /etc/haproxy//haproxy.cfg
listen huahualin-nginx-80
? bind 192.168.241.62:80
? mode tcp
? server node1 192.168.241.57:30012 check inter 3s fall 3 rise 3
? server node2 192.168.241.58:30012 check inter 3s fall 3 rise 3
? server node3 192.168.241.59:30012 check inter 3s fall 3 rise 3
root@k8s-ha1:~# systemctl restart haproxy.service
瀏覽器訪問服務? http://192.168.241.62:80/? ,也可以把域名解析到本地,訪問域名
3.Volume:支持多種類型:比如nfs,?hostpath,emptyDir,? cinder,rdb等
舉例:hostpath,emptyDir,nfs的掛載方式
emptyDir:本地臨時卷,就是個空的目錄,還是臨時的,容器被刪除時,emptyDir中的數據也被刪除,掛載的容器里的/cache目錄不存在也會自動創建,在這個目錄下創建目錄,可以在使用kubectl get pods -o wide找到在哪個節點上,去那個節點下的/var/lib/kubelet/pods/目錄下查找這個pod的這個volumes就可以看到和cache映射的文件
root@k8s-master1:~/yaml/service# cat nginx.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
? name: nginx-deployment
spec:
? replicas: 1
? selector:
? ? matchExpressions:
? ? - {key: app, operator: In, values: [ng-deploy-80,ng-rs-81]}
? template:
? ? metadata:? ? ? ?
? ? ? labels:
? ? ? ? app: ng-deploy-80
? ? spec:
? ? ? containers:
? ? ? ? - name:? ng-deploy-80
? ? ? ? ? image: nginx:1.16.1
? ? ? ? ? ports:
? ? ? ? ? - containerPort: 80
kubectl? apply -f?nginx.yaml
hostPath:容器刪除時,數據不會刪除
root@k8s-master1:~/yaml/volume# kubectl exec -it nginx-deployment-98f46f4cc-2kbjd bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx-deployment-98f46f4cc-2kbjd:/# cd? /cache/
root@nginx-deployment-98f46f4cc-2kbjd:/cache# echo 333 >> test
##查看pod在哪個節點上,在59上,可以去59查看這個pod掛載emptyDir下面是否生成了test文件
root@k8s-master1:~/yaml/service# kubectl get pods -o wide
NAME? ? ? ? ? ? ? ? ? ? ? ? ? ? ? READY? STATUS? ? RESTARTS? AGE? IP? ? ? ? ? ? ? NODE? ? ? ? ? ? NOMINATED NODE? READINESS GATES
nginx-deployment-98f46f4cc-2kbjd? 1/1? ? Running? 0? ? ? ? ? 11h? 10.200.107.195? 192.168.241.59? <none>? ? ? ? ? <none>
##去59查看這個pod掛載emptyDir下面是否生成了test文件,
root@k8s-node3# find /var/lib/kubelet/* -name cache*
/var/lib/kubelet/pods/29767367-c535-491d-b1c0-beaaff531849/plugins/kubernetes.io~empty-dir/cache-volume
/var/lib/kubelet/pods/29767367-c535-491d-b1c0-beaaff531849/volumes/kubernetes.io~empty-dir/cache-volume
cd /var/lib/kubelet/pods/29767367-c535-491d-b1c0-beaaff531849/volumes/kubernetes.io~empty-dir/cache-volume
root@k8s-node3:/var/lib/kubelet/pods/29767367-c535-491d-b1c0-beaaff531849/volumes/kubernetes.io~empty-dir/cache-volume# echo ddd > test
現在去容器里查看就會有新的字串了
hostPath:可以持久化,但是不能共享,只能在當前主機使用,主機刪除以后可能就會被重新調度,很可能會分配到別的主機,別的主機沒有這個hostPath容器再創建后如果調度到別的主機那么自己的數據就看不到了,就丟了
root@k8s-master1:~/yaml/volume# cat hostpath.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
? name: nginx-deployment
spec:
? replicas: 1
? selector:
? ? matchExpressions:
? ? - {key: app, operator: In, values: [ng-deploy-80,ng-rs-81]}
? template:
? ? metadata:? ? ? ?
? ? ? labels:
? ? ? ? app: ng-deploy-80
? ? spec:
? ? ? containers:
? ? ? - name:? ng-deploy-80
? ? ? ? image: nginx:1.16.1
? ? ? ? ports:
? ? ? ? - containerPort: 80
? ? ? ? volumeMounts:
? ? ? ? - mountPath: /cache
? ? ? ? ? name: cache-volume
? ? ? volumes:
? ? ? - name: cache-volume
? ? ? ? hostPath:
? ? ? ? ? path: /tmp/cache
查看創建在哪個node上了,hostPath下的/tmp/cache路徑會自動創建,
root@k8s-master1:~/yaml/volume# kubectl get pods -o wide
NAME? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? READY? STATUS? ? RESTARTS? AGE? ? IP? ? ? ? ? ? ? NODE? ? ? ? ? ? NOMINATED NODE? READINESS GATES
nginx-deployment-5cc98d6c56-sdtvc? 1/1? ? Running? 0? ? ? ? ? 4m43s? 10.200.169.131? 192.168.241.58? <none>? ? ? ? ? <none>
然后去58這個node上查看/tmp/cache有沒有,有的
root@k8s-node2:~# ls /tmp/cache/
到master1節點進入容器創建文件
root@k8s-master1:~/yaml/volume# kubectl exec -it nginx-deployment-5cc98d6c56-sdtvc? bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx-deployment-5cc98d6c56-sdtvc:/# echo 123 > /cache/nginx.log
再到58節點查看,會有文件nginx.log生成
root@k8s-node2:~# ls /tmp/cache/
nginx.log
nfs:? 網絡文件系統共享存儲,多個pod可以同時掛載同一個nfs
##ha01上操作:
? 先安裝nfs,在ha01上面安裝nfs
apt update
apt install nfs-server
mkdir /data/nfs -p
vi /etc/exports
/data/huahualin_nfs *(rw,no_root_squash)? #這里授權的地方和權限不能用空格,以前要有空格
systemctl restart nfs-server.service
systemctl enable nfs-server.service
showmount -e? 如果有目錄說明可以掛載了
###到master1上,暴露node的30016端口
root@k8s-master1:~/yaml/volume# cat nfs.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
? name: nginx-deployment
spec:
? replicas: 1
? selector:
? ? matchExpressions:
? ? - {key: app, operator: In, values: [ng-deploy-80,ng-rs-81]}
? template:
? ? metadata:? ? ? ?
? ? ? labels:
? ? ? ? app: ng-deploy-80
? ? spec:
? ? ? containers:
? ? ? - name:? ng-deploy-80
? ? ? ? image: nginx:1.16.1
? ? ? ? ports:
? ? ? ? - containerPort: 80
? ? ? ? volumeMounts:
? ? ? ? - mountPath: /usr/share/nginx/html/mysite
? ? ? ? ? name: my-nfs-volume
? ? ? volumes:
? ? ? - name: my-nfs-volume
? ? ? ? nfs:
? ? ? ? ? server: 192.168.241.62? ? ? ? ? ? ? ?
? ? ? ? ? path: /data/nfs
---
apiVersion: v1
kind: Service
metadata:
? name: ng-deploy-80
spec:
? ports:
? - name: http
? ? port: 81
? ? targetPort: 80
? ? nodePort: 30016
? ? protocol: TCP
? type: NodePort
? selector:
? ? app: ng-deploy-80 ? ?
showmount -e? ha_ip? #檢查是否可以掛載的共享目錄,如果可以看到就可以掛載
##到ha01做負載均衡,修改端口號
root@k8s-ha1:~# cat /etc/haproxy/haproxy.cfg
listen huahualin-nginx-80
? bind 192.168.241.62:80
? mode tcp
? server node1 192.168.241.57:30016 check inter 3s fall 3 rise 3
? server node2 192.168.241.58:30016 check inter 3s fall 3 rise 3
? server node3 192.168.241.59:30016 check inter 3s fall 3 rise 3
重啟服務: systemctl restart haproxy
#在瀏覽器放問: 192.168.241.62:80
訪問dashboard: https://192.168.241.58:30002
? 進入剛剛創建的容器,可以看到掛載的目錄 /usr/share/nginx/html/mysite
root@nginx-deployment-7964d774d9-ntz6g:/# df -h? ?
Filesystem? ? ? ? ? ? ? ? Size? Used Avail Use% Mounted on
overlay? ? ? ? ? ? ? ? ? ? 29G? 12G? 16G? 42% /
tmpfs? ? ? ? ? ? ? ? ? ? ? 64M? ? 0? 64M? 0% /dev
tmpfs? ? ? ? ? ? ? ? ? ? 975M? ? 0? 975M? 0% /sys/fs/cgroup
/dev/sda5? ? ? ? ? ? ? ? ? 29G? 12G? 16G? 42% /etc/hosts
shm? ? ? ? ? ? ? ? ? ? ? ? 64M? ? 0? 64M? 0% /dev/shm
192.168.241.62:/data/nfs? 29G? 9.1G? 19G? 34% /usr/share/nginx/html/mysite
tmpfs? ? ? ? ? ? ? ? ? ? 975M? 12K? 975M? 1% /run/secrets/kubernetes.io/serviceaccount
tmpfs? ? ? ? ? ? ? ? ? ? 975M? ? 0? 975M? 0% /proc/acpi
tmpfs? ? ? ? ? ? ? ? ? ? 975M? ? 0? 975M? 0% /proc/scsi
tmpfs? ? ? ? ? ? ? ? ? ? 975M? ? 0? 975M? 0% /sys/firmware ?
在ha1的/data/nfs放圖片flowers1.jpg
訪問 192.168.241.62:80/mysite/flowers1.jpg
容器的掛載其實不是掛載到pod中的,容器沒有內核,其實是掛載到node節點上的,然后映射給容器,查看pod創建在哪個node上
root@k8s-master1:~/yaml/volume# kubectl get pod -o wide
NAME? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? READY? STATUS? ? RESTARTS? AGE? IP? ? ? ? ? ? ? NODE? ? ? ? ? ? NOMINATED NODE? READINESS GATES
nginx-deployment-7964d774d9-ntz6g? 1/1? ? Running? 1? ? ? ? ? 47m? 10.200.107.201? 192.168.241.59? <none>? ? ? ? ? <none>
到192.168.241.59上查看
root@k8s-node3:~# df -Th
192.168.241.62:/data/nfs nfs4? ? ? 29G? 9.1G? 19G? 34% /var/lib/kubelet/pods/0cdfdfa7-c8e5-4cad-b5a3-747f931a6a59/volumes/kubernetes.io~nfs/my-nfs-volume
如果要掛載多個nfs怎么弄?
? ##到master1上面,添加新的掛載用來掛載js文件,把nfs的/data/nfs/js掛載到/usr/share/nginx/html/js
vi? nfs.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
? name: nginx-deployment
spec:
? replicas: 1
? selector:
? ? matchExpressions:
? ? - {key: app, operator: In, values: [ng-deploy-80,ng-rs-81]}
? template:
? ? metadata:? ? ? ?
? ? ? labels:
? ? ? ? app: ng-deploy-80
? ? spec:
? ? ? containers:
? ? ? - name:? ng-deploy-80
? ? ? ? image: nginx:1.16.1
? ? ? ? ports:
? ? ? ? - containerPort: 80
? ? ? ? volumeMounts:
? ? ? ? - mountPath: /usr/share/nginx/html/mysite
? ? ? ? ? name: my-nfs-volume
? ? ? ? - mountPath: /usr/share/nginx/html/js
? ? ? ? ? name: my-nfs-js
? ? ? volumes:
? ? ? volumes:
? ? ? - name: my-nfs-volume
? ? ? ? nfs:
? ? ? ? ? server: 192.168.241.62? ? ? ? ? ? ? ?
? ? ? ? ? path: /data/nfs
? ? ? - name: my-nfs-js
? ? ? ? nfs:
? ? ? ? ? server: 192.168.241.62? ? ? ? ? ? ? ?
? ? ? ? ? path: /data/nfs/js
---
---
apiVersion: v1
kind: Service
metadata:
? name: ng-deploy-80
spec:
? ports:
? - name: http
? ? port: 81
? ? targetPort: 80
? ? nodePort: 30016
? ? protocol: TCP
? type: NodePort
? selector:
? ? app: ng-deploy-80
? ? kubectl apply -f nfs.yaml
? ? 到ha01創建? /data/nfs/js目錄
? ? mkdir /data/nfs/js
? ? 任意編寫個靜態文件,假裝是js,? vi 1.js
? ? 到dashboard進入容器查看:兩個掛載目錄/usr/share/nginx/html/js和/usr/share/nginx/html/mysite
root@nginx-deployment-79454b55b8-jbh4s:/# df -h
Filesystem? ? ? ? ? ? ? ? ? Size? Used Avail Use% Mounted on
overlay? ? ? ? ? ? ? ? ? ? ? 29G? 12G? 16G? 42% /
tmpfs? ? ? ? ? ? ? ? ? ? ? ? 64M? ? 0? 64M? 0% /dev
tmpfs? ? ? ? ? ? ? ? ? ? ? ? 975M? ? 0? 975M? 0% /sys/fs/cgroup
/dev/sda5? ? ? ? ? ? ? ? ? ? 29G? 12G? 16G? 42% /etc/hosts
shm? ? ? ? ? ? ? ? ? ? ? ? ? 64M? ? 0? 64M? 0% /dev/shm
192.168.241.62:/data/nfs/js? 29G? 9.1G? 19G? 34% /usr/share/nginx/html/js
tmpfs? ? ? ? ? ? ? ? ? ? ? ? 975M? 12K? 975M? 1% /run/secrets/kubernetes.io/serviceaccount
192.168.241.62:/data/nfs? ? ? 29G? 9.1G? 19G? 34% /usr/share/nginx/html/mysite
tmpfs? ? ? ? ? ? ? ? ? ? ? ? 975M? ? 0? 975M? 0% /proc/acpi
tmpfs? ? ? ? ? ? ? ? ? ? ? ? 975M? ? 0? 975M? 0% /proc/scsi
tmpfs? ? ? ? ? ? ? ? ? ? ? ? 975M? ? 0? 975M? 0% /sys/firmware?
到瀏覽器訪問
http://192.168.241.62/js/1.js