k8s之共享存储方案_GlusterFS.md 8.9 KB

安装 glusterfs 客户端

#在所有 k8s node 中安装
yum install -y glusterfs glusterfs-fuse

配置 hosts

172.16.1.228   gfs1
172.16.1.244   gfs2
172.16.1.248   gfs3

挂载

mount -t glusterfs gfs1:/mongo_volume /opt/gfsmg

部署heketi

#设置iptables,heketi默认以tcp8080端口提供RESTful API服务
vi /etc/sysconfig/iptables
-A INPUT -p tcp -m state --state NEW -m tcp --dport 8080 -j ACCEPT
service iptables restart

#安装heketi
yum install -y centos-release-gluster
yum install -y heketi heketi-client

配置heketi.json
vi /etc/heketi/heketi.json 
{
  "_port_comment": "Heketi Server Port Number",
  "port": "8080",

  "_use_auth": "Enable JWT authorization. Please enable for deployment",
  "use_auth": true,

  "_jwt": "Private keys for access",
  "jwt": {
    "_admin": "Admin has access to all APIs",
    "admin": {
      "key": "admin@123"
    },
    "_user": "User only has access to /volumes endpoint",
    "user": {
      "key": "user@123"
    }
  },

  "_glusterfs_comment": "GlusterFS Configuration",
  "glusterfs": {
    "_executor_comment": [
      "Execute plugin. Possible choices: mock, ssh",
      "mock: This setting is used for testing and development.",
      "      It will not send commands to any node.",
      "ssh:  This setting will notify Heketi to ssh to the nodes.",
      "      It will need the values in sshexec to be configured.",
      "kubernetes: Communicate with GlusterFS containers over",
      "            Kubernetes exec api."
],
    "executor": "ssh",
    "_sshexec_comment": "SSH username and private key file information",
    "sshexec": {
      "keyfile": "/etc/heketi/heketi_key",
      "user": "root",
      "port": "22",
      "fstab": "/etc/fstab"
    },

    "_kubeexec_comment": "Kubernetes configuration",
    "kubeexec": {
      "host" :"https://kubernetes.host:8443",
      "cert" : "/path/to/crt.file",
      "insecure": false,
      "user": "kubernetes username",
      "password": "password for kubernetes user",
      "namespace": "OpenShift project or Kubernetes namespace",
      "fstab": "Optional: Specify fstab file on node.  Default is /etc/fstab"
    },

    "_db_comment": "Database file name",
    "db": "/var/lib/heketi/heketi.db",

    "_loglevel_comment": [
      "Set log level. Choices are:",
      "  none, critical, error, warning, info, debug",
      "Default is warning"
],
    "loglevel" : "warning"
  }
}

#设置heketi免密访问GlusterFS
ssh-keygen -t rsa -q -f /etc/heketi/heketi_key -N ""

# heketi服务由heketi用户启动,heketi用户需要有新生成key的读赋权,否则服务无法启动
chown heketi:heketi /etc/heketi/heketi_key

# 分发公钥;
ssh-copy-id -i /etc/heketi/heketi_key.pub root@gfs1
ssh-copy-id -i /etc/heketi/heketi_key.pub root@gfs2
ssh-copy-id -i /etc/heketi/heketi_key.pub root@gfs3

#启动heketi
# 通过yum安装heketi,默认的systemd文件有1处错误;
# /usr/lib/systemd/system/heketi.service文件的”-config=/etc/heketi/heketi.json”应该修改为”--config=/etc/heketi/heketi.json”;
# 否则启动时报”Error: unknown shorthand flag: 'c' in -config=/etc/heketi/heketi.json“错,导致服务无法启动
systemctl enable heketi
systemctl restart heketi
systemctl status heketi

#验证
curl http://localhost:8080/hello

设置GlusterFS集群

# 通过topology.json文件定义组建GlusterFS集群;
# devices字段指定GlusterFS各节点的盘符(可以是多块盘),必须是未创建文件系统的裸设备
vi /etc/heketi/topology.json   
{
    "clusters": [
        {
            "nodes": [
                {
                    "node": {
                        "hostnames": {
                            "manage": [
                                "172.16.1.248"
                            ],
                            "storage": [
                                "172.16.1.248"
                            ]
                        },
                        "zone": 1
                    },
                    "devices": [
                        "/dev/vda2"
                    ]
                },
                {
                    "node": {
                        "hostnames": {
                            "manage": [
                                "172.16.1.244"
                            ],
                            "storage": [
                                "172.16.1.244"
                            ]
                        },
                        "zone": 2
                    },
                    "devices": [
                        "/dev/vda2"
                    ]
                },
                {
                    "node": {
                        "hostnames": {
                            "manage": [
                                "172.16.1.228"
                            ],
                            "storage": [
                                "172.16.1.228"
                            ]
                        },
                        "zone": 3
                    },
                    "devices": [
                        "/dev/vda2"
                    ]
                }
            ]
        }
    ]
}

#组建GlusterFS集群
heketi-cli --server http://localhost:8080 --user admin --secret admin@123 topology load --json=/etc/heketi/topology.json

# 查看heketi topology信息
heketi-cli --user admin --secret admin@123 topology info

创建storageclass

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: gluster-storageclass
provisioner: kubernetes.io/glusterfs
reclaimPolicy: Delete
parameters:
  resturl: "http://172.30.200.80:8080"
  restauthenabled: "true"
  restuser: "admin"
  secretNamespace: "default"
  secretName: "heketi-secret"
  volumetype: "replicate:2"

通过storageclass 动态创建PV来使用GlusterFS

使用示例参见apps/Mongo集群.md   

手动创建PV使用GlusterFS

#配置 endpoints
vi glusterfs-endpoints.json

{
  "kind": "Endpoints",
  "apiVersion": "v1",
  "metadata": {
    "name": "glusterfs-cluster"
  },
  "subsets": [
    {
      "addresses": [
        {
          "ip": "172.16.1.228"
        }
      ],
      "ports": [
        {
          "port": 1984
        }
      ]
    },
    {
      "addresses": [
        {
          "ip": "172.16.1.244"
        }
      ],
      "ports": [
        {
          "port": 1984
        }
      ]
    },
    {
      "addresses": [
        {
          "ip": "172.16.1.248"
        }
      ],
      "ports": [
        {
          "port": 1984
        }
      ]
    }
  ]
}

#导入 glusterfs-endpoints.json
kubectl apply -f glusterfs-endpoints.json

#创建PV
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv001
spec:
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteMany
  glusterfs:
    endpoints: "glusterfs-cluster"
    path: "mongo_volume"
    readOnly: false


#创建PVC
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: pvc001
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 8Gi

创建Mongo使用GlusterFS PV

cat>mongo-deployment-pvc.yaml<<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mongo-app
spec:
  replicas: 1
  selector:
    matchLabels:
      name: mongo-app
  template:
    metadata:
      labels:
        name: mongo-app
    spec:
      containers:
      - name: mongo-app
        image: mongo:4
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 27017
        volumeMounts:
        - name: mongo-pvc
          mountPath: /data/db/
      volumes:
       - name: mongo-pvc
         persistentVolumeClaim:
           claimName: pvc001
---
kind: Service
apiVersion: v1
metadata:
  name: mongo-app
spec:
  type: NodePort
  ports:
  - name: mongo
    port: 27017
    targetPort: 27017
    protocol: TCP
    nodePort: 32627
  selector:
    name: mongo-app
EOF