基于ceph rbd 在kubernetes harbor 空间下创建动态存储

[root@bs-k8s-ceph ~]# ceph osd pool create harbor 128Error ETIMEDOUT: crush test failed with -110: timed out during smoke test (5 seconds)//这个问题 我不知道怎么解决 因为过了一小会 就又好了[root@bs-k8s-ceph ~]# ceph osd pool create harbor 128pool harbor created[root@bs-k8s-ceph ceph]# ceph auth get-or-create client.harbor mon allow r osd allow class-read, allow rwx pool=harbor -o ceph.client.harbor.keyring[root@bs-k8s-ceph ceph]# ceph auth get client.harborexported keyring for client.harbor[client.harbor] key = AQDoCklen6e4NxAAVXmy/PG+R5iH8fNzMhk6Jg== caps mon = "allow r" caps osd = "allow class-read, allow rwx pool=harbor" [root@bs-k8s-node01 ~]# ceph auth get-key client.admin | base64QVFDNmNVSmV2eU8yRnhBQVBxYzE5Mm5PelNnZk5acmg5aEFQYXc9PQ==[root@bs-k8s-node01 ~]# ceph auth get-key client.harbor | base64[root@bs-k8s-master01 ~]# kubectl get nodesThe connection to the server 20.0.0.250:8443 was refused - did you specify the right host or port?[root@bs-hk-hk01 ~]# systemctl status haproxy● haproxy.service - HAProxy Load Balancer Loaded: loaded (/usr/lib/systemd/system/haproxy.service; enabled; vendor preset: disabled) Active: failed (Result: exit-code) since 日 2020-02-16 17:16:43 CST; 12min ago Process: 1168 ExecStart=/usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/haproxy.pid (code=exited, status=134) Main PID: 1168 (code=exited, status=134)2月 15 20:22:54 bs-hk-hk01 haproxy[1168]: [WARNING] 045/202254 (1184) : Server k8s_api_nodes...ue.2月 15 20:25:15 bs-hk-hk01 haproxy[1168]: [WARNING] 045/202515 (1183) : Server k8s_api_nodes...ue.2月 15 20:25:15 bs-hk-hk01 haproxy[1168]: [WARNING] 045/202515 (1184) : Server k8s_api_nodes...ue.2月 15 20:26:03 bs-hk-hk01 haproxy[1168]: [WARNING] 045/202603 (1184) : Server k8s_api_nodes...ue.2月 15 20:26:03 bs-hk-hk01 haproxy[1168]: [WARNING] 045/202603 (1183) : Server k8s_api_nodes...ue.2月 15 20:26:13 bs-hk-hk01 haproxy[1168]: [WARNING] 045/202613 (1183) : Server k8s_api_nodes...ue.2月 15 20:26:13 bs-hk-hk01 haproxy[1168]: [WARNING] 045/202613 (1184) : Server k8s_api_nodes...ue.2月 16 17:16:43 bs-hk-hk01 systemd[1]: haproxy.service: main process exited, code=exited, st...n/a2月 16 17:16:44 bs-hk-hk01 systemd[1]: Unit haproxy.service entered failed state.2月 16 17:16:44 bs-hk-hk01 systemd[1]: haproxy.service failed.Hint: Some lines were ellipsized, use -l to show in full.[root@bs-hk-hk01 ~]# systemctl start haproxy[root@bs-hk-hk01 ~]# systemctl status haproxy● haproxy.service - HAProxy Load Balancer Loaded: loaded (/usr/lib/systemd/system/haproxy.service; enabled; vendor preset: disabled) Active: active (running) since 日 2020-02-16 17:30:03 CST; 1s ago Process: 4196 ExecStartPre=/usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -c -q (code=exited, status=0/SUCCESS) Main PID: 4212 (haproxy) CGroup: /system.slice/haproxy.service ├─4212 /usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/haproxy.... ├─4216 /usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/haproxy.... └─4217 /usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/haproxy....2月 16 17:30:00 bs-hk-hk01 systemd[1]: Starting HAProxy Load Balancer...2月 16 17:30:03 bs-hk-hk01 systemd[1]: Started HAProxy Load Balancer.2月 16 17:30:04 bs-hk-hk01 haproxy[4212]: [WARNING] 046/173004 (4212) : config : option for...de.2月 16 17:30:04 bs-hk-hk01 haproxy[4212]: [WARNING] 046/173004 (4212) : config : option for...de.2月 16 17:30:04 bs-hk-hk01 haproxy[4212]: [WARNING] 046/173004 (4212) : Proxy stats: in mu...st.2月 16 17:30:04 bs-hk-hk01 haproxy[4212]: [NOTICE] 046/173004 (4212) : New worker #1 (4216) forked2月 16 17:30:04 bs-hk-hk01 haproxy[4212]: [NOTICE] 046/173004 (4212) : New worker #2 (4217) forkedHint: Some lines were ellipsized, use -l to show in full.[root@bs-hk-hk01 ~]# systemctl enable haproxy[root@bs-k8s-master01 ~]# kubectl get nodesNAME STATUS ROLES AGE VERSIONbs-k8s-master01 Ready master 7d6h v1.17.2bs-k8s-master02 Ready master 7d6h v1.17.2bs-k8s-master03 Ready master 7d6h v1.17.2bs-k8s-node01 Ready <none> 7d6h v1.17.2bs-k8s-node02 Ready <none> 7d6h v1.17.2bs-k8s-node03 Ready <none> 7d6h v1.17.2[root@bs-k8s-master01 ~]# kubectl get pods --all-namespaces NAMESPACE NAME READY STATUS RESTARTS AGEdefault rbd-provisioner-75b85f85bd-8ftdm 1/1 Running 11 7d6hkube-system calico-node-4jxbp 1/1 Running 4 7d6hkube-system calico-node-7t9cj 1/1 Running 7 7d6hkube-system calico-node-cchgl 1/1 Running 14 7d6hkube-system calico-node-czj76 1/1 Running 6 7d6hkube-system calico-node-lxb2s 1/1 Running 14 7d6hkube-system calico-node-nmg9t 1/1 Running 8 7d6hkube-system coredns-7f9c544f75-bwx9p 1/1 Running 4 7d6hkube-system coredns-7f9c544f75-q58mr 1/1 Running 3 7d6hkube-system dashboard-metrics-scraper-6b66849c9-qtwzx 1/1 Running 2 7d5hkube-system etcd-bs-k8s-master01 1/1 Running 17 7d6hkube-system etcd-bs-k8s-master02 1/1 Running 7 7d6hkube-system etcd-bs-k8s-master03 1/1 Running 32 7d6hkube-system kube-apiserver-bs-k8s-master01 1/1 Running 28 7d6hkube-system kube-apiserver-bs-k8s-master02 1/1 Running 15 7d6hkube-system kube-apiserver-bs-k8s-master03 1/1 Running 62 7d6hkube-system kube-controller-manager-bs-k8s-master01 1/1 Running 32 7d6hkube-system kube-controller-manager-bs-k8s-master02 1/1 Running 27 7d6hkube-system kube-controller-manager-bs-k8s-master03 1/1 Running 31 7d6hkube-system kube-proxy-26ffm 1/1 Running 3 7d6hkube-system kube-proxy-298tr 1/1 Running 5 7d6hkube-system kube-proxy-hzsmb 1/1 Running 3 7d6hkube-system kube-proxy-jb4sq 1/1 Running 4 7d6hkube-system kube-proxy-pt94r 1/1 Running 4 7d6hkube-system kube-proxy-wljwv 1/1 Running 4 7d6hkube-system kube-scheduler-bs-k8s-master01 1/1 Running 32 7d6hkube-system kube-scheduler-bs-k8s-master02 1/1 Running 21 7d6hkube-system kube-scheduler-bs-k8s-master03 1/1 Running 31 7d6hkube-system kubernetes-dashboard-887cbd9c6-j7ptq 1/1 Running 22 7d5h[root@bs-k8s-master01 harbor]# pwd/data/k8s/harbor[root@bs-k8s-master01 rbd]# kubectl apply -f ceph-harbor-namespace.yamlnamespace/harbor created[root@bs-k8s-master01 rbd]# kubectl get namespacesNAME STATUS AGEdefault Active 7d8hharbor Active 16skube-node-lease Active 7d8hkube-public Active 7d8hkube-system Active 7d8h[root@bs-k8s-master01 rbd]# cat ceph-harbor-namespace.yaml ###########################################################################Author: zisefeizhu#QQ: 2********0#Date: 2020-02-16#FileName: ceph-harbor-namespace.yaml#URL: https://www.cnblogs.com/zisefeizhu/#Description: The test script#Copyright (C): 2020 All rights reserved###########################################################################apiVersion: v1kind: Namespacemetadata: name: harbor[root@bs-k8s-master01 rbd]# kubectl apply -f external-storage-rbd-provisioner.yamlserviceaccount/rbd-provisioner createdclusterrole.rbac.authorization.k8s.io/rbd-provisioner unchangedclusterrolebinding.rbac.authorization.k8s.io/rbd-provisioner configuredrole.rbac.authorization.k8s.io/rbd-provisioner createdrolebinding.rbac.authorization.k8s.io/rbd-provisioner createddeployment.apps/rbd-provisioner created[root@bs-k8s-master01 rbd]# kubectl get pods -n harbor -o wideNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATESrbd-provisioner-75b85f85bd-dhnr4 1/1 Running 0 3m48s 10.209.46.84 bs-k8s-node01 <none> <none>[root@bs-k8s-master01 rbd]# cat external-storage-rbd-provisioner.yamlapiVersion: v1kind: ServiceAccountmetadata: name: rbd-provisioner namespace: harbor---kind: ClusterRoleapiVersion: rbac.authorization.k8s.io/v1metadata: name: rbd-provisionerrules: - apiGroups: [""] resources: ["persistentvolumes"] verbs: ["get", "list", "watch", "create", "delete"] - apiGroups: [""] resources: ["persistentvolumeclaims"] verbs: ["get", "list", "watch", "update"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] verbs: ["create", "update", "patch"] - apiGroups: [""] resources: ["endpoints"] verbs: ["get", "list", "watch", "create", "update", "patch"] - apiGroups: [""] resources: ["services"] resourceNames: ["kube-dns"] verbs: ["list", "get"]---kind: ClusterRoleBindingapiVersion: rbac.authorization.k8s.io/v1metadata: name: rbd-provisionersubjects: - kind: ServiceAccount name: rbd-provisioner namespace: harborroleRef: kind: ClusterRole name: rbd-provisioner apiGroup: rbac.authorization.k8s.io---apiVersion: rbac.authorization.k8s.io/v1kind: Rolemetadata: name: rbd-provisioner namespace: harborrules:- apiGroups: [""] resources: ["secrets"] verbs: ["get"]---apiVersion: rbac.authorization.k8s.io/v1kind: RoleBindingmetadata: name: rbd-provisioner namespace: harborroleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: rbd-provisionersubjects:- kind: ServiceAccount name: rbd-provisioner namespace: harbor---apiVersion: apps/v1kind: Deploymentmetadata: name: rbd-provisioner namespace: harborspec: replicas: 1 selector: matchLabels: app: rbd-provisioner strategy: type: Recreate template: metadata: labels: app: rbd-provisioner spec: containers: - name: rbd-provisioner image: "quay.io/external_storage/rbd-provisioner:latest" env: - name: PROVISIONER_NAME value: ceph.com/rbd serviceAccount: rbd-provisioner[root@bs-k8s-master01 harbor]# kubectl apply -f ceph-harbor-secret.yamlsecret/ceph-harbor-admin-secret createdsecret/ceph-harbor-harbor-secret created[root@bs-k8s-master01 harbor]# kubectl get secret -n harborNAME TYPE DATA AGEceph-harbor-admin-secret kubernetes.io/rbd 1 23sceph-harbor-harbor-secret kubernetes.io/rbd 1 23sdefault-token-8k9gs kubernetes.io/service-account-token 3 8m49srbd-provisioner-token-mhl29 kubernetes.io/service-account-token 3 5m24s[root@bs-k8s-master01 harbor]# cat ceph-harbor-secret.yaml ###########################################################################Author: zisefeizhu#QQ: 2********0#Date: 2020-02-16#FileName: ceph-harbor-secret.yaml#URL: https://www.cnblogs.com/zisefeizhu/#Description: The test script#Copyright (C): 2020 All rights reserved###########################################################################apiVersion: v1kind: Secretmetadata: name: ceph-harbor-admin-secret namespace: harbordata: key: QVFDNmNVSmV2eU8yRnhBQVBxYzE5Mm5PelNnZk5acmg5aEFQYXc9PQ==type: kubernetes.io/rbd---apiVersion: v1kind: Secretmetadata: name: ceph-harbor-harbor-secret namespace: harbordata: key: QVFEb0NrbGVuNmU0TnhBQVZYbXkvUEcrUjVpSDhmTnpNaGs2Smc9PQ==type: kubernetes.io/rbd[root@bs-k8s-master01 harbor]# kubectl apply -f ceph-harbor-storageclass.yamlstorageclass.storage.k8s.io/ceph-harbor created[root@bs-k8s-master01 harbor]# kubectl get scNAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGEceph-harbor ceph.com/rbd Retain Immediate false 11sceph-rbd ceph.com/rbd Retain Immediate false 25h[root@bs-k8s-master01 harbor]# cat ceph-harbor-storageclass.yaml###########################################################################Author: zisefeizhu#QQ: 2********0#Date: 2020-02-16#FileName: ceph-harbor-storageclass.yaml#URL: https://www.cnblogs.com/zisefeizhu/#Description: The test script#Copyright (C): 2020 All rights reserved###########################################################################apiVersion: storage.k8s.io/v1kind: StorageClassmetadata: name: ceph-harbor annotations: storageclass.kubernetes.io/is-default-class: "false"provisioner: ceph.com/rbdreclaimPolicy: Retainparameters: monitors: 20.0.0.206:6789,20.0.0.207:6789,20.0.0.208:6789 adminId: admin adminSecretName: ceph-harbor-admin-secret adminSecretNamespace: harbor pool: harbor fsType: xfs userId: harbor userSecretName: ceph-harbor-harbor-secret imageFormat: "2" imageFeatures: "layering"[root@bs-k8s-master01 harbor]# kubectl apply -f ceph-harbor-pvc.yamlpersistentvolumeclaim/pvc-ceph-harbor createdwp-pv-claim Bound pvc-494a130d-018c-4be3-9b31-e951cc4367a5 20Gi RWO ceph-rbd 23h[root@bs-k8s-master01 harbor]# kubectl get pv -n harborNAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGEpvc-494a130d-018c-4be3-9b31-e951cc4367a5 20Gi RWO Retain Bound default/wp-pv-claim ceph-rbd 23hpvc-4df6a301-c9f3-4694-8271-d1d0184c00aa 1Gi RWO Retain Bound harbor/pvc-ceph-harbor ceph-harbor 6spvc-8ffa3182-a2f6-47d9-a71d-ff8e8b379a16 1Gi RWO Retain Bound default/ceph-pvc ceph-rbd 26hpvc-ac7d3a09-123e-4614-886c-cded8822a078 20Gi RWO Retain Bound default/mysql-pv-claim ceph-rbd 23h[root@bs-k8s-master01 harbor]# kubectl get pvcNAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGEceph-pvc Bound pvc-8ffa3182-a2f6-47d9-a71d-ff8e8b379a16 1Gi RWO ceph-rbd 26hmysql-pv-claim Bound pvc-ac7d3a09-123e-4614-886c-cded8822a078 20Gi RWO ceph-rbd 23hwp-pv-claim Bound pvc-494a130d-018c-4be3-9b31-e951cc4367a5 20Gi RWO ceph-rbd 23h[root@bs-k8s-master01 harbor]# kubectl get pvc -n harborNAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGEpvc-ceph-harbor Bound pvc-4df6a301-c9f3-4694-8271-d1d0184c00aa 1Gi RWO ceph-harbor 24s[root@bs-k8s-master01 harbor]# cat ceph-harbor-pvc.yaml ###########################################################################Author: zisefeizhu#QQ: 2********0#Date: 2020-02-16#FileName: ceph-harbor-pvc.yaml#URL: https://www.cnblogs.com/zisefeizhu/#Description: The test script#Copyright (C): 2020 All rights reserved###########################################################################apiVersion: v1kind: PersistentVolumeClaimmetadata: name: pvc-ceph-harbor namespace: harborspec: storageClassName: ceph-harbor accessModes: - ReadWriteOnce resources: requests: storage: 1Gi//到此 完成了在harbor 名称空间下创建动态pv

 

相关文章