使用 KubeSphere 作为 K8s 管理平台

介绍

KubeSphere 是一个开放源代码的、基于 Kubernetes 构建的集群管理系统。对比使用 kubectl 命令来管理集群,KubeSphere 能帮助用户简化 Kubernetes 的使用,并优化云原生应用的管理和开发。

全中文官方文档:https://kubesphere.io/zh/docs/

部署

部署当前最新版本 v3.3.2。

安装

先从 Github 上把部署和配置文件下载下来:

[root@k8s-101 ~]# mkdir -p /hxz393/local/kubesphere/apply
[root@k8s-101 ~]# cd /hxz393/local/kubesphere/apply
[root@k8s-101 apply]# wget https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml
[root@k8s-101 apply]# wget https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml

按需要修改配置文件,内容如下:

[root@k8s-101 ~]# cat /hxz393/local/kubesphere/apply/cluster-configuration.yaml
---
apiVersion: installer.kubesphere.io/v1alpha1
kind: ClusterConfiguration
metadata:
  name: ks-installer
  namespace: kubesphere-system
  labels:
    version: v3.3.2
spec:
  persistence:
    storageClass: "longhorn"   # If there is no default StorageClass in your cluster, you need to specify an existing StorageClass here.
  authentication:
    # adminPassword: ""     # Custom password of the admin user. If the parameter exists but the value is empty, a random password is generated. If the parameter does not exist, P@88w0rd is used.
    jwtSecret: ""           # Keep the jwtSecret consistent with the Host Cluster. Retrieve the jwtSecret by executing "kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret" on the Host Cluster.
  local_registry: ""        # Add your private registry address if it is needed.
  # dev_tag: ""               # Add your kubesphere image tag you want to install, by default it's same as ks-installer release version.
  etcd:
    monitoring: true        # Enable or disable etcd monitoring dashboard installation. You have to create a Secret for etcd before you enable it.
    endpointIps: "192.168.1.101,192.168.1.102,192.168.1.103"  # etcd cluster EndpointIps. It can be a bunch of IPs here.
    port: 2379              # etcd port.
    tlsEnable: true
  common:
    core:
      console:
        enableMultiLogin: true  # Enable or disable simultaneous logins. It allows different users to log in with the same account at the same time.
        port: 10004
        type: NodePort

    # apiserver:            # Enlarge the apiserver and controller manager's resource requests and limits for the large cluster
    #  resources: {}
    # controllerManager:
    #  resources: {}
    redis:
      enabled: false
      enableHA: false
      volumeSize: 2Gi # Redis PVC size.
    openldap:
      enabled: false
      volumeSize: 2Gi   # openldap PVC size.
    minio:
      volumeSize: 20Gi # Minio PVC size.
    monitoring:
      # type: external   # Whether to specify the external prometheus stack, and need to modify the endpoint at the next line.
      endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090 # Prometheus endpoint to get metrics data.
      GPUMonitoring:     # Enable or disable the GPU-related metrics. If you enable this switch but have no GPU resources, Kubesphere will set it to zero.
        enabled: false
    gpu:                 # Install GPUKinds. The default GPU kind is nvidia.com/gpu. Other GPU kinds can be added here according to your needs.
      kinds:
      - resourceName: "nvidia.com/gpu"
        resourceType: "GPU"
        default: true
    es:   # Storage backend for logging, events and auditing.
      master:
        volumeSize: 1Gi  # The volume size of Elasticsearch master nodes.
        replicas: 1      # The total number of master nodes. Even numbers are not allowed.
      #   resources: {}
      data:
        volumeSize: 10Gi  # The volume size of Elasticsearch data nodes.
        replicas: 1       # The total number of data nodes.
      #   resources: {}
      logMaxAge: 7             # Log retention time in built-in Elasticsearch. It is 7 days by default.
      elkPrefix: logstash      # The string making up index names. The index name will be formatted as ks-<elk_prefix>-log.
      basicAuth:
        enabled: false
        username: ""
        password: ""
      externalElasticsearchHost: ""
      externalElasticsearchPort: ""
  alerting:                # (CPU: 0.1 Core, Memory: 100 MiB) It enables users to customize alerting policies to send messages to receivers in time with different time intervals and alerting levels to choose from.
    enabled: true         # Enable or disable the KubeSphere Alerting System.
    thanosruler:
      replicas: 1
  auditing:                # Provide a security-relevant chronological set of records,recording the sequence of activities happening on the platform, initiated by different tenants.
    enabled: true          # Enable or disable the KubeSphere Auditing Log System.
    # operator:
    #   resources: {}
    # webhook:
    #   resources: {}
  devops:                  # (CPU: 0.47 Core, Memory: 8.6 G) Provide an out-of-the-box CI/CD system based on Jenkins, and automated workflow tools including Source-to-Image & Binary-to-Image.
    enabled: false             # Enable or disable the KubeSphere DevOps System.
    # resources: {}
    jenkinsMemoryLim: 4Gi      # Jenkins memory limit.
    jenkinsMemoryReq: 2Gi   # Jenkins memory request.
    jenkinsVolumeSize: 8Gi     # Jenkins volume size.
  events:                  # Provide a graphical web console for Kubernetes Events exporting, filtering and alerting in multi-tenant Kubernetes clusters.
    enabled: true          # Enable or disable the KubeSphere Events System.
    # operator:
    #   resources: {}
    # exporter:
    #   resources: {}
    ruler:
      enabled: true
      replicas: 1
    #   resources: {}
  logging:                 # (CPU: 57 m, Memory: 2.76 G) Flexible logging functions are provided for log query, collection and management in a unified console. Additional log collectors can be added, such as Elasticsearch, Kafka and Fluentd.
    enabled: true         # Enable or disable the KubeSphere Logging System.
    logsidecar:
      enabled: true
      replicas: 1
      # resources: {}
  metrics_server:                    # (CPU: 56 m, Memory: 44.35 MiB) It enables HPA (Horizontal Pod Autoscaler).
    enabled: true                   # Enable or disable metrics-server.
  monitoring:
    storageClass: "longhorn"                 # If there is an independent StorageClass you need for Prometheus, you can specify it here. The default StorageClass is used by default.
    node_exporter:
      port: 9100
      # resources: {}
    # kube_rbac_proxy:
    #   resources: {}
    # kube_state_metrics:
    #   resources: {}
    prometheus:
      replicas: 1  # Prometheus replicas are responsible for monitoring different segments of data source and providing high availability.
      volumeSize: 5Gi  # Prometheus PVC size.
    #   resources: {}
    #   operator:
    #     resources: {}
    alertmanager:
      replicas: 1          # AlertManager Replicas.
    #   resources: {}
    notification_manager:
    #   resources: {}
    #   operator:
    #     resources: {}
    #   proxy:
    #     resources: {}
    gpu:                           # GPU monitoring-related plug-in installation.
      nvidia_dcgm_exporter:        # Ensure that gpu resources on your hosts can be used normally, otherwise this plug-in will not work properly.
        enabled: false             # Check whether the labels on the GPU hosts contain "nvidia.com/gpu.present=true" to ensure that the DCGM pod is scheduled to these nodes.
        # resources: {}
  multicluster:
    clusterRole: none  # host | member | none  # You can install a solo cluster, or specify it as the Host or Member Cluster.
  network:
    networkpolicy: # Network policies allow network isolation within the same cluster, which means firewalls can be set up between certain instances (Pods).
      # Make sure that the CNI network plugin used by the cluster supports NetworkPolicy. There are a number of CNI network plugins that support NetworkPolicy, including Calico, Cilium, Kube-router, Romana and Weave Net.
      enabled: false # Enable or disable network policies.
    ippool: # Use Pod IP Pools to manage the Pod network address space. Pods to be created can be assigned IP addresses from a Pod IP Pool.
      type: none # Specify "calico" for this field if Calico is used as your CNI plugin. "none" means that Pod IP Pools are disabled.
    topology: # Use Service Topology to view Service-to-Service communication based on Weave Scope.
      type: "weave-scope" # Specify "weave-scope" for this field to enable Service Topology. "none" means that Service Topology is disabled.
  openpitrix: # An App Store that is accessible to all platform tenants. You can use it to manage apps across their entire lifecycle.
    store:
      enabled: false # Enable or disable the KubeSphere App Store.
  servicemesh:         # (0.3 Core, 300 MiB) Provide fine-grained traffic management, observability and tracing, and visualized traffic topology.
    enabled: false     # Base component (pilot). Enable or disable KubeSphere Service Mesh (Istio-based).
    istio:  # Customizing the istio installation configuration, refer to https://istio.io/latest/docs/setup/additional-setup/customize-installation/
      components:
        ingressGateways:
        - name: istio-ingressgateway
          enabled: false
        cni:
          enabled: false
  edgeruntime:          # Add edge nodes to your cluster and deploy workloads on edge nodes.
    enabled: false
    kubeedge:        # kubeedge configurations
      enabled: false
      cloudCore:
        cloudHub:
          advertiseAddress: # At least a public IP address or an IP address which can be accessed by edge nodes must be provided.
            - ""            # Note that once KubeEdge is enabled, CloudCore will malfunction if the address is not provided.
        service:
          cloudhubNodePort: "30000"
          cloudhubQuicNodePort: "30001"
          cloudhubHttpsNodePort: "30002"
          cloudstreamNodePort: "30003"
          tunnelNodePort: "30004"
        # resources: {}
        # hostNetWork: false
      iptables-manager:
        enabled: true 
        mode: "external"
        # resources: {}
      # edgeService:
      #   resources: {}
  gatekeeper:        # Provide admission policy and rule management, A validating (mutating TBA) webhook that enforces CRD-based policies executed by Open Policy Agent.
    enabled: false   # Enable or disable Gatekeeper.
    # controller_manager:
    #   resources: {}
    # audit:
    #   resources: {}
  terminal:
    # image: 'alpine:3.15' # There must be an nsenter program in the image
    timeout: 600         # Container timeout, if set to 0, no timeout will be used. The unit is seconds

添加监控访问 etcdsecret

[root@k8s-101 ~]# kubectl create ns kubesphere-monitoring-system
[root@k8s-101 ~]# kubectl -n kubesphere-monitoring-system create secret generic kube-etcd-client-certs  \
--from-file=etcd-client-ca.crt=/etc/kubernetes/pki/etcd/ca.crt  \
--from-file=etcd-client.crt=/etc/kubernetes/pki/etcd/healthcheck-client.crt  \
--from-file=etcd-client.key=/etc/kubernetes/pki/etcd/healthcheck-client.key

开始部署:

[root@k8s-101 ~]# kubectl apply -f /hxz393/local/kubesphere/apply/kubesphere-installer.yaml
customresourcedefinition.apiextensions.k8s.io/clusterconfigurations.installer.kubesphere.io created
namespace/kubesphere-system created
serviceaccount/ks-installer created
clusterrole.rbac.authorization.k8s.io/ks-installer created
clusterrolebinding.rbac.authorization.k8s.io/ks-installer created
deployment.apps/ks-installer created
[root@k8s-101 ~]# kubectl apply -f /hxz393/local/kubesphere/apply/cluster-configuration.yaml
clusterconfiguration.installer.kubesphere.io/ks-installer created

持续观察 ks-installer 的日志:

[root@k8s-101 ~]# kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -f

直到日志出现登录账号信息:

#####################################################

Console: http://192.168.1.101:10004
Account: admin
Password: P@88w0rd
NOTES:

plain 1. After you log into the console, please check the
monitoring status of service components in
“Cluster Management”. If any service is not
ready, please wait patiently until all components
are up and running.
2. Please change the default password after login.

#####################################################
https://kubesphere.io 2023-05-21 23:20:30
#####################################################

表示安装已经初步完成,可以通过上面提示地址来访问 KubeSphere 了。

升级

必须逐个副版本升级。比如升级到最新的 v3.3.0 必须从 v3.2.x 升级:

[root@k8s-101 ~]# kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.0/kubesphere-installer.yaml  --force
[root@k8s-101 ~]# kubectl apply -f /hxz393/local/kubesphere/apply/cluster-configuration.yaml
[root@k8s-101 ~]# kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -f

删除

到官方仓库脚本目录下,找到删除脚本下载到本地,修改权限后执行:

[root@k8s-101 ~]# cd /hxz393/local/kubesphere/apply/
[root@k8s-101 apply]# curl -JLO https://raw.githubusercontent.com/kubesphere/ks-installer/master/scripts/kubesphere-delete.sh
[root@k8s-101 apply]# chmod 744 kubesphere-delete.sh 
[root@k8s-101 apply]# ./kubesphere-delete.sh 
Note:

Delete the KubeSphere cluster, including the module kubesphere-system kubesphere-devops-system kubesphere-devops-worker kubesphere-monitoring-system kubesphere-logging-system openpitrix-system.
Please reconfirm that you want to delete the KubeSphere cluster.  (yes/no) 

如果删除时候卡住,可以置 finalizers 为空。例如删除 rulers.events.kubesphere.io 时卡住,执行 kubectl patchfinalizers 参数打补丁:

[root@k8s-101 apply]# kubectl patch crd/rulers.events.kubesphere.io -p '{"metadata":{"finalizers":[]}}' --type=merge

配置

一般配置在网页端完成。

修改密码

部署完毕后,可以通过浏览器访问前端页面。默认登录用户名 admin,默认密码 P@88w0rd。登录后修改管理员密码。

如忘记集群管理员密码,需要在 host 集群运行命令重置密码。下面把管理员密码设置为 cXrHk1111

[root@k8s-101 apply]# kubectl patch users admin -p '{"spec":{"password":"cXrHk1111"}}' --type='merge' && kubectl annotate users admin iam.kubesphere.io/password-encrypted-

其他集群主节点运行修改命令并不会生效:

[root@k8s-101 apply]# kubectl get users
NAME      EMAIL                   STATUS
admin     admin@kubesphere.io     Active
hxz393    hxz393@x2b.net          Active

创建用户

创建一个只有普通查看权限的用户 hxz393

在“平台管理”>“访问控制”>“平台角色”中,新建一个平台角色 viewer

  • 在集群中,勾选查看权限。
  • 在访问控制中,勾选所有查看权限。
  • 点击确定来保存。

在“平台管理”>“访问控制”>“用户”中,新建一个用户 hxz393

  • 平台角色选择 viewer
  • 邮箱地址设置为 hxz393@x2b.net
  • 密码设置为 CQ*^[t=>o;F84HfCF]FpaD#H_u&3K.?OBv2NI#Xd,4l-X+Qggl*ke*1[nN*n~O[^
  • 点击确定来保存。

如果普通用户忘记密码,管理员没法为其修改密码,只能删除后重建。

安装插件

使用管理员账号登录,在“集群管理”>“定制资源定义”中搜索 clusterconfiguration,点击进去。

编辑 ks-installer 的 yaml 文件,修改对应插件 enabled 的值为 true,点击确定即可在后台更新安装。

配置报警

在“平台管理”>“平台设置”中,选择“通知管理”>“通知渠道”>“钉钉”,配置将报警信息发送给钉钉群:

  • 在钉钉群机器人设置页面,输入 Webhook URL 地址。

  • 输入 Secret 密钥。

  • 在过滤条件中选择告警级别,包含所有级别报警。

  • 点击发送测试信息测试连通性,没问题后点击确认保存。

配置域名

添加 Nginx 配置,来通过域名 kubesphere.x2b.net 访问服务。示例如下:

[root@k8s-101 ~]# vi /hxz393/local/nginx/config/conf.d/kubesphere.x2b.net.conf
server {
    listen 80;
    listen 443 ssl;
    server_name kubesphere.x2b.net;

    ssl_certificate   /cert/x2b.net.pem;
    ssl_certificate_key  /cert/x2b.net.key;
    ssl_session_timeout 5m;
    ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE:ECDH:AES:HIGH:!NULL:!aNULL:!MD5:!ADH:!RC4;
    ssl_protocols TLSv1.2 TLSv1.3 SSLv3;
    ssl_prefer_server_ciphers on;
    location / {
        proxy_pass http://ks-console.kubesphere-system/;
        proxy_set_header Host   $host;
        proxy_set_header X-Real-IP      $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
    }
    
    location /api/ {
        proxy_http_version 1.1;
        proxy_redirect off;
        proxy_pass http://ks-console.kubesphere-system;
        proxy_set_header    Host $host:$server_port;
        proxy_set_header    Upgrade $http_upgrade;
        proxy_set_header    X-Forwarded-Proto $scheme;
        proxy_set_header    Connection "upgrade"; 
        proxy_set_header    X-Forwarded-For $proxy_add_x_forwarded_for;
    }

    location /apis/monitoring.coreos.com/ {
        proxy_pass http://ks-console.kubesphere-system;
        proxy_redirect off;
        proxy_set_header        Host $host:$server_port;
        proxy_set_header        X-Real-IP $remote_addr;
        proxy_set_header        X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_connect_timeout  3600s;
        proxy_read_timeout  3600s;
        proxy_send_timeout  3600s;
        send_timeout  3600s;
    }

    location /api/v1/ {
        proxy_pass http://ks-console.kubesphere-system;
        # proxy_redirect off;
        proxy_set_header        Host $host:$server_port;
        proxy_set_header        X-Real-IP $remote_addr;
        proxy_set_header        X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_connect_timeout  3600s;
        proxy_read_timeout  3600s;
        proxy_send_timeout  3600s;
        send_timeout  3600s;
    }

    location /api/clusters/ {
        proxy_pass http://ks-console.kubesphere-system;
        # proxy_redirect off;
        proxy_set_header        Host $host:$server_port;
        proxy_set_header        X-Real-IP $remote_addr;
        proxy_set_header        X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_connect_timeout  3600s;
        proxy_read_timeout  3600s;
        proxy_send_timeout  3600s;
        send_timeout  3600s;
    }
	
    location /apis/storage.k8s.io {
        proxy_pass http://ks-console.kubesphere-system;
        proxy_redirect off;
        proxy_set_header        Host $host:$server_port;
        proxy_set_header        X-Real-IP $remote_addr;
        proxy_set_header        X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_connect_timeout  3600s;
        proxy_read_timeout  3600s;
        proxy_send_timeout  3600s;
        send_timeout  3600s;
    }

    location /apis/apps/v1/namespaces/ {
        proxy_http_version 1.1;
        proxy_pass http://ks-console.kubesphere-system;
        proxy_redirect off;
        proxy_set_header        Host $host:$server_port;
        proxy_set_header        X-Real-IP $remote_addr;
        proxy_set_header        X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_connect_timeout  3600s;
        proxy_read_timeout  3600s;
        proxy_send_timeout  3600s;
        send_timeout  3600s;
    }
    
    location /kapis/resources.kubesphere.io/v1alpha2/namespaces {
        proxy_http_version 1.1;
        proxy_pass http://ks-console.kubesphere-system;
        proxy_redirect off;
        proxy_set_header        Host $host:$server_port;
        proxy_set_header        X-Real-IP $remote_addr;
        proxy_set_header        X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_connect_timeout  3600s;
        proxy_read_timeout  3600s;
        proxy_send_timeout  3600s;
    }
    
    location /kapis/resources.kubesphere.io/ {
        proxy_http_version 1.1;
        proxy_redirect off;
        proxy_pass http://ks-console.kubesphere-system;
        proxy_set_header    Host $host:$server_port;
        proxy_set_header    Upgrade $http_upgrade;
        proxy_set_header    X-Forwarded-Proto $scheme;
        proxy_set_header    Connection "upgrade"; 
        proxy_set_header    X-Forwarded-For $proxy_add_x_forwarded_for;
    }

    location /apis/devops.kubesphere.io/ {
        proxy_http_version 1.1;
        proxy_pass http://ks-console.kubesphere-system;
        proxy_redirect off;
        proxy_set_header        Host $host:$server_port;
        proxy_set_header        X-Real-IP $remote_addr;
        proxy_set_header        X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_connect_timeout  3600s;
        proxy_read_timeout  3600s;
        proxy_send_timeout  3600s;
        send_timeout  3600s;
    }

    location /apis/apps/v1/ {
        proxy_http_version 1.1;
        proxy_redirect off;
        proxy_pass http://ks-console.kubesphere-system;
        proxy_set_header    Host $host:$server_port;
        proxy_set_header    Upgrade $http_upgrade;
        proxy_set_header    X-Forwarded-Proto $scheme;
        proxy_set_header    Connection "upgrade"; 
        proxy_set_header    X-Forwarded-For $proxy_add_x_forwarded_for;
    }

    location /apis/ {
        proxy_http_version 1.1;
        proxy_redirect off;
        proxy_pass http://ks-console.kubesphere-system;
        proxy_set_header    Host $host:$server_port;
        proxy_set_header    X-Forwarded-Proto $scheme;
        proxy_set_header    X-Forwarded-For $proxy_add_x_forwarded_for;
    }

    location /api/v1/watch/namespaces {
        proxy_pass http://ks-console.kubesphere-system;
        proxy_set_header    Host $host:$server_port;
        proxy_set_header    Upgrade $http_upgrade;
        proxy_set_header    X-Forwarded-Proto $scheme;
        proxy_set_header    Connection "upgrade"; 
        proxy_set_header    X-Forwarded-For $proxy_add_x_forwarded_for;
    }
	

    location /kapis/terminal.kubesphere.io/ {
        proxy_http_version 1.1;
        proxy_redirect off;
        proxy_pass http://ks-console.kubesphere-system;
        proxy_set_header    Host $host:$server_port;
        proxy_set_header    Upgrade $http_upgrade;
        proxy_set_header    X-Forwarded-Proto $scheme;
        proxy_set_header    Connection "upgrade"; 
        proxy_set_header    X-Forwarded-For $proxy_add_x_forwarded_for;
    }
    location /kapis/clusters/member/terminal.kubesphere.io/ {
        proxy_http_version 1.1;
        proxy_redirect off;
        proxy_pass http://ks-console.kubesphere-system;
        proxy_set_header    Host $host:$server_port;
        proxy_set_header    Upgrade $http_upgrade;
        proxy_set_header    X-Forwarded-Proto $scheme;
        proxy_set_header    Connection "upgrade"; 
        proxy_set_header    X-Forwarded-For $proxy_add_x_forwarded_for;
    }
    location /kapis/clusters/host/terminal.kubesphere.io/ {
        proxy_http_version 1.1;
        proxy_redirect off;
        proxy_pass http://ks-console.kubesphere-system;
        proxy_set_header    Host $host:$server_port;
        proxy_set_header    Upgrade $http_upgrade;
        proxy_set_header    X-Forwarded-Proto $scheme;
        proxy_set_header    Connection "upgrade"; 
        proxy_set_header    X-Forwarded-For $proxy_add_x_forwarded_for;
    }
}

[root@k8s-101 ~]# for i in $(kubectl get pod -n local|grep nginx|awk '{print $1}');do kubectl exec -it -n local $i -- nginx -s reload; done

多集群

KubeSphere 可以支持多集群管理。

主集群配置

也叫 Host 集群。修改 ClusterConfiguration 文件,配置 clusterRolehost

[root@k8s-250 ~]# kubectl edit cc ks-installer -n kubesphere-system
...
    storageClass: longhorn
  multicluster:
    clusterRole: host
  network:
...
[root@k8s-250 ~]# kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f

获取 jwtSecre 用于集群验证:

[root@k8s-250 ~]# kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret
      jwtSecret: MzRbXxHS4LFxfPacw0SUc0lNVdcjtImv

成员集群配置

在成员集群中,同样修改 clusterRole 值为 member,并且将主集群的 jwtSecret 值填进去:

[root@k8s-master1-pro ~]# kubectl edit cc ks-installer -n kubesphere-system
...
  authentication:
    jwtSecret: MzRbXxHS4LFxfPacw0SUc0lNVdcjtImv
...
  multicluster:
    clusterRole: member
...
[root@k8s-master1-pro ~]# kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f

导入成员集群

最后是在主集群导入成员集群:

  • 使用管理员登录主集群控制面板,在集群管理页面多了一个添加集群按钮。

  • 点击添加后,输入集群名称、选择标签、输入描述,点击下一步。

  • 在集群设置页面,连接方式选择直接连接。粘贴成员集群主节点上 $HOME/.kube/config 文件内容。注意修改 server 地址要能直连访问。

  • 点击创建来完成。

成员集群加入成功后,在主集群即能管理成员集群。