Модуль CEPH CSI

В репозитории shturval доступны для установки чарты shturval-ceph-cephfs (Ceph FS) и shturval-ceph-rbd (CEPH RBD). Для установки выберите необходимый чарт и нажмите “Установить”. Рекомендуем создать для устанавливаемого чарта новый неймспейс. В открывшемся окне выберите нужную версию, при необходимости создайте неймспейс.

После выбора версий в правой стороне экрана появятся доступные values чарта. Ниже представлены рекомендуемые параметры для конфигурации.

CEPH FS

csiConfig:
  - clusterID: "ИДЕНТИФИКАТОР_КЛАСТЕРА_CEPH"
    monitors:
      - "IP-адрес монтитора"
      - "IP-адрес монтитора"
      - "IP-адрес монтитора"
secret:
  # Specifies whether the secret should be created
  create: true
  name: csi-cephfs-secret
  # Key values correspond to a user name and its key, as defined in the
  # ceph cluster. User ID should have required access to the 'pool'
  # specified in the storage class
  adminID: ИДЕНТИФИКАТОР_СЕРВИСНОЙ_УЧЕТНОЙ_ЗАПИСИ_CEPH
  adminKey: КЛЮЧ_СЕРВИСНОЙ_УЧЕТНОЙ_ЗАПИСИ_CEPH
cephconf: |
  [global]
    auth cluster required = cephx
    auth service required = cephx
    auth client required = cephx

    # Workaround for http://tracker.ceph.com/issues/23446
    fuse_set_user_groups = false

    # ceph-fuse which uses libfuse2 by default has write buffer size of 2KiB
    # adding 'fuse_big_writes = true' option by default to override this limit
    # see https://github.com/ceph/ceph-csi/issues/1928
    fuse_big_writes = true
storageClass:
  # Specifies whether the Storage class should be created
  create: true
  name: csi-cephfs-sc
  # Annotations for the storage class
  # Example:
  # annotations:
  #   storageclass.kubernetes.io/is-default-class: "true"
  annotations: {}

  # String representing a Ceph cluster to provision storage from.
  # Should be unique across all Ceph clusters in use for provisioning,
  # cannot be greater than 36 bytes in length, and should remain immutable for
  # the lifetime of the StorageClass in use.
  clusterID: ИДЕНТИФИКАТОР_КЛАСТЕРА_CEPH
  # (required) CephFS filesystem name into which the volume shall be created
  # eg: fsName: myfs
  fsName: cephfs
  # (optional) Ceph pool into which volume data shall be stored
  # pool: <cephfs-data-pool>
  # For eg:
  # pool: "replicapool"
  pool: ""
  # (optional) Comma separated string of Ceph-fuse mount options.
  # For eg:
  # fuseMountOptions: debug
  fuseMountOptions: ""
  # (optional) Comma separated string of Cephfs kernel mount options.
  # Check man mount.ceph for mount options. For eg:
  # kernelMountOptions: readdir_max_bytes=1048576,norbytes
  kernelMountOptions: ""
  # (optional) The driver can use either ceph-fuse (fuse) or
  # ceph kernelclient (kernel).
  # If omitted, default volume mounter will be used - this is
  # determined by probing for ceph-fuse and mount.ceph
  # mounter: kernel
  mounter: ""
  # (optional) Prefix to use for naming subvolumes.
  # If omitted, defaults to "csi-vol-".
  # volumeNamePrefix: "foo-bar-"
  volumeNamePrefix: "shturval-"
  # The secrets have to contain user and/or Ceph admin credentials.
  provisionerSecret: csi-cephfs-secret
  # If the Namespaces are not specified, the secrets are assumed to
  # be in the Release namespace.
  provisionerSecretNamespace: ""
  controllerExpandSecret: csi-cephfs-secret
  controllerExpandSecretNamespace: ""
  nodeStageSecret: csi-cephfs-secret
  nodeStageSecretNamespace: ""
  reclaimPolicy: Delete
  allowVolumeExpansion: true
  mountOptions: []
  # Mount Options
  # Example:
  # mountOptions:
  #   - discard
nodeplugin:
  tolerations:
  - key: "role"
    operator: "Equal"
    value: "infra"
    effect: "NoSchedule"

Требования к правам доступа сервисной учетной записи

client.kubecephfs mon ‘allow r’ mgr ‘allow rw’ mds ‘allow rws’ osd ‘allow rw pool=cephfs_data, allow rw pool=cephfs_metadata’

CEPH RBD

csiConfig:
  - clusterID: "ИДЕНТИФИКАТОР_КЛАСТЕРА_CEPH"
    monitors:
      - "IP-адрес монтитора"
      - "IP-адрес монтитора"
      - "IP-адрес монтитора"
storageClass:
  # Specifies whether the storageclass should be created
  create: true
  name: csi-rbd-sc

  # Use Thick Provisioning for this storage class. Default: false. Enabled if not false.
  thickProvision: false

  # Annotations for the storage class
  # Example:
  # annotations:
  #   storageclass.kubernetes.io/is-default-class: "true"
  annotations: {}

  # (required) String representing a Ceph cluster to provision storage from.
  # Should be unique across all Ceph clusters in use for provisioning,
  # cannot be greater than 36 bytes in length, and should remain immutable for
  # the lifetime of the StorageClass in use.
  clusterID: "ИДЕНТИФИКАТОР_КЛАСТЕРА_CEPH"

  # (optional) If you want to use erasure coded pool with RBD, you need to
  # create two pools. one erasure coded and one replicated.
  # You need to specify the replicated pool here in the `pool` parameter, it is
  # used for the metadata of the images.
  # The erasure coded pool must be set as the `dataPool` parameter below.
  # dataPool: <ec-data-pool>
  dataPool: ""

  # (required) Ceph pool into which the RBD image shall be created
  # eg: pool: replicapool
  pool: kuberbd

  # (optional) RBD image features, CSI creates image with image-format 2 CSI
  # RBD currently supports `layering`, `journaling`, `exclusive-lock`,
  # `object-map`, `fast-diff`, `deep-flatten` features.
  # Refer https://docs.ceph.com/en/latest/rbd/rbd-config-ref/#image-features
  # for image feature dependencies.
  # imageFeatures: layering,journaling,exclusive-lock,object-map,fast-diff
  imageFeatures: "layering"

  # (optional) Specifies whether to try other mounters in case if the current
  # mounter fails to mount the rbd image for any reason. True means fallback
  # to next mounter, default is set to false.
  # Note: tryOtherMounters is currently useful to fallback from krbd to rbd-nbd
  # in case if any of the specified imageFeatures is not supported by krbd
  # driver on node scheduled for application pod launch, but in the future this
  # should work with any mounter type.
  # tryOtherMounters: false

  # (optional) uncomment the following to use rbd-nbd as mounter
  # on supported nodes
  # mounter: rbd-nbd
  mounter: ""

  # (optional) ceph client log location, eg: rbd-nbd
  # By default host-path /var/log/ceph of node is bind-mounted into
  # csi-rbdplugin pod at /var/log/ceph mount path. This is to configure
  # target bindmount path used inside container for ceph clients logging.
  # See docs/rbd-nbd.md for available configuration options.
  # cephLogDir: /var/log/ceph
  cephLogDir: ""

  # (optional) ceph client log strategy
  # By default, log file belonging to a particular volume will be deleted
  # on unmap, but you can choose to just compress instead of deleting it
  # or even preserve the log file in text format as it is.
  # Available options `remove` or `compress` or `preserve`
  # cephLogStrategy: remove
  cephLogStrategy: ""

  # (optional) Prefix to use for naming RBD images.
  # If omitted, defaults to "csi-vol-".
  # volumeNamePrefix: "foo-bar-"
  volumeNamePrefix: ""

  # (optional) Instruct the plugin it has to encrypt the volume
  # By default it is disabled. Valid values are "true" or "false".
  # A string is expected here, i.e. "true", not true.
  # encrypted: "true"
  encrypted: ""

  # (optional) Use external key management system for encryption passphrases by
  # specifying a unique ID matching KMS ConfigMap. The ID is only used for
  # correlation to configmap entry.
  encryptionKMSID: ""
  

  # Add topology constrained pools configuration, if topology based pools
  # are setup, and topology constrained provisioning is required.
  # For further information read TODO<doc>
  # topologyConstrainedPools: |
  #   [{"poolName":"pool0",
  #     "dataPool":"ec-pool0" # optional, erasure-coded pool for data
  #     "domainSegments":[
  #       {"domainLabel":"region","value":"east"},
  #       {"domainLabel":"zone","value":"zone1"}]},
  #    {"poolName":"pool1",
  #     "dataPool":"ec-pool1" # optional, erasure-coded pool for data
  #     "domainSegments":[
  #       {"domainLabel":"region","value":"east"},
  #       {"domainLabel":"zone","value":"zone2"}]},
  #    {"poolName":"pool2",
  #     "dataPool":"ec-pool2" # optional, erasure-coded pool for data
  #     "domainSegments":[
  #       {"domainLabel":"region","value":"west"},
  #       {"domainLabel":"zone","value":"zone1"}]}
  #   ]
  topologyConstrainedPools: []

  # (optional) mapOptions is a comma-separated list of map options.
  # For krbd options refer
  # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
  # For nbd options refer
  # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
  # Format:
  # mapOptions: "<mounter>:op1,op2;<mounter>:op1,op2"
  # An empty mounter field is treated as krbd type for compatibility.
  # eg:
  # mapOptions: "krbd:lock_on_read,queue_depth=1024;nbd:try-netlink"
  mapOptions: ""

  # (optional) unmapOptions is a comma-separated list of unmap options.
  # For krbd options refer
  # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
  # For nbd options refer
  # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
  # Format:
  # unmapOptions: "<mounter>:op1,op2;<mounter>:op1,op2"
  # An empty mounter field is treated as krbd type for compatibility.
  # eg:
  # unmapOptions: "krbd:force;nbd:force"
  unmapOptions: ""

  # The secrets have to contain Ceph credentials with required access
  # to the 'pool'.
  provisionerSecret: csi-rbd-secret
  # If Namespaces are left empty, the secrets are assumed to be in the
  # Release namespace.
  provisionerSecretNamespace: ""
  controllerExpandSecret: csi-rbd-secret
  controllerExpandSecretNamespace: ""
  nodeStageSecret: csi-rbd-secret
  nodeStageSecretNamespace: ""
  # Specify the filesystem type of the volume. If not specified,
  # csi-provisioner will set default as `ext4`.
  fstype: ext4
  reclaimPolicy: Delete
  allowVolumeExpansion: true
  mountOptions: []
  # Mount Options
  # Example:
  # mountOptions:
  #   - discard

# Mount the host /etc/selinux inside pods to support
# selinux-enabled filesystems
selinuxMount: true

secret:
  # Specifies whether the secret should be created
  create: true
  name: csi-rbd-secret
  # Key values correspond to a user name and its key, as defined in the
  # ceph cluster. User ID should have required access to the 'pool'
  # specified in the storage class
  userID: ИДЕНТИФИКАТОР_СЕРВИСНОЙ_УЧЕТНОЙ_ЗАПИСИ_CEPH
  userKey: КЛЮЧ_СЕРВИСНОЙ_УЧЕТНОЙ_ЗАПИСИ_CEPH
  # Encryption passphrase
  # encryptionPassphrase: test_passphrase

# This is a sample configmap that helps define a Ceph configuration as required
# by the CSI plugins.
# Sample ceph.conf available at
# https://github.com/ceph/ceph/blob/master/src/sample.ceph.conf Detailed
# documentation is available at
# https://docs.ceph.com/en/latest/rados/configuration/ceph-conf/
cephconf: |
  [global]
    auth_cluster_required = cephx
    auth_service_required = cephx
    auth_client_required = cephx
nodeplugin:
  tolerations:
  - key: "role"
    operator: "Equal"
    value: "infra"
    effect: "NoSchedule"

Требования к правам доступа сервисной учетной записи

client.kuberbd mon ‘profile rbd’ osd ‘profile rbd pool=kuberbd’

Особенности настройки

Если необходимо подключить несколько экземпляров CEPH FS или CEPH RBD в одном кластере необходимо:

  1. Устанавливать экземпляры сервисов в разные неймспейсы кластера.
  2. В параметр driverName прописать произвольные различные между экземплярами сервисов значения
  3. Значение порта в параметре nodePlugin.httpMetrics.ContainerPort сделать уникальным для каждого экземпляра сервиса.