CEPH RGW with CSI

https://github.com/yandex-cloud/k8s-csi-s3

https://access.redhat.com/documentation/zh-cn/red_hat_ceph_storage/6/html/developer_guide/s3-bucket-operations#s3-bucket-policies_dev

https://access.redhat.com/documentation/zh-cn/red_hat_ceph_storage/6/html-single/object_gateway_guide/index#deploying-the-ceph-object-gateway-using-the-command-line-interface_rgw

https://access.redhat.com/documentation/zh-cn/red_hat_ceph_storage/3/html/object_gateway_guide_for_red_hat_enterprise_linux/rgw-administration-rgw#administrative-data-storage-rgw

Step 1 Deploy Ceph Rgw

复制代码
# rgw rgw module
ceph mgr module enable rgw 

# use command create rgw
ceph orch apply rgw default-realm default-zone --placement="2  ceph01" --port=8000

注意3.2. 使用命令行界面部署 Ceph 对象网关 Red Hat Ceph Storage 5 | Red Hat Customer Portal

NUMBER_OF_DAEMONS 控制每个主机上部署的 Ceph 对象网关数量。要在不产生额外成本的情况下获得最高性能,请将此值设置为 2。

复制代码
# use yml create rgw
[root@ceph01 ~]# cat rgw.yml 
service_type: rgw
service_id: default
placement:
  count_per_host: 2
  hosts:
   - ceph01
spec:
  rgw_realm: default-realm
  rgw_zone: default-zone
  rgw_frontend_port: 1234

# create radonsgw user
radosgw-admin user create --uid=s3 --display-name="object_storage" --system

# get access_key, secret_key
radosgw-admin user info --uid=s3 --display-name="object_storage" --system

# create custom bucket, use s3cmd
yum -y install s3cmd

# set access_key, secret_key, host_base, host_bucket

s3cmd --configure
cat /root/.s3cfg
[default]
access_key = HIHPOUURDQ6SIEBVKTGO
access_token = 
add_encoding_exts = 
add_headers = 
bucket_location = US
ca_certs_file = 
cache_file = 
check_ssl_certificate = True
check_ssl_hostname = True
cloudfront_host = cloudfront.amazonaws.com
connection_max_age = 5
connection_pooling = True
content_disposition = 
content_type = 
default_mime_type = binary/octet-stream
delay_updates = False
delete_after = False
delete_after_fetch = False
delete_removed = False
dry_run = False
enable_multipart = True
encoding = UTF-8
encrypt = False
expiry_date = 
expiry_days = 
expiry_prefix = 
follow_symlinks = False
force = False
get_continue = False
gpg_command = /usr/bin/gpg
gpg_decrypt = %(gpg_command)s -d --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s
gpg_encrypt = %(gpg_command)s -c --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s
gpg_passphrase = 
guess_mime_type = True
host_base = http://10.220.9.54:80
#host_bucket = %(bucket)s.s3.amazonaws.com
host_bucket = http://10.220.9.54:80
human_readable_sizes = False
invalidate_default_index_on_cf = False
invalidate_default_index_root_on_cf = True
invalidate_on_cf = False
keep_dirs = False
kms_key = 
limit = -1
limitrate = 0
list_allow_unordered = False
list_md5 = False
log_target_prefix = 
long_listing = False
max_delete = -1
max_retries = 5
mime_type = 
multipart_chunk_size_mb = 15
multipart_copy_chunk_size_mb = 1024
multipart_max_chunks = 10000
preserve_attrs = True
progress_meter = True
proxy_host = 
proxy_port = 0
public_url_use_https = False
put_continue = False
recursive = False
recv_chunk = 65536
reduced_redundancy = False
requester_pays = False
restore_days = 1
restore_priority = Standard
secret_key = WYF14K7MJb6hOt4AALLvxWfN43rnSaYnz9CoT8Ym
send_chunk = 65536
server_side_encryption = False
signature_v2 = False
signurl_use_https = False
simpledb_host = sdb.amazonaws.com
skip_destination_validation = False
skip_existing = False
socket_timeout = 300
ssl_client_cert_file = 
ssl_client_key_file = 
stats = False
stop_on_error = False
storage_class = 
throttle_max = 100
upload_id = 
urlencoding_mode = normal
use_http_expect = False
use_https = False
use_mime_magic = True
verbosity = WARNING
website_endpoint = http://%(bucket)s.s3-website-%(location)s.amazonaws.com/
website_error = 
website_index = index.html

# create default bucket
s3cmd mb s3://my-new-bucket --host=localhost --host-bucket=localhost

Step 2 Install k8s-csi-s3

复制代码
# Download git project
git clone https://github.com/yandex-cloud/k8s-csi-s3.git

1. Create a secret with your S3 credentials
apiVersion: v1
kind: Secret
metadata:
  name: csi-s3-secret
  # Namespace depends on the configuration in the storageclass.yaml
  namespace: kube-system
stringData:
  accessKeyID: <YOUR_ACCESS_KEY_ID>
  secretAccessKey: <YOUR_SECRET_ACCESS_KEY>
  # For AWS set it to "https://s3.<region>.amazonaws.com", for example https://s3.eu-central-1.amazonaws.com
  endpoint: https://storage.yandexcloud.net
  # For AWS set it to AWS region
  #region: ""
The region can be empty if you are using some other S3 compatible storage.

2. Deploy the driver
cd deploy/kubernetes
kubectl create -f provisioner.yaml
kubectl create -f driver.yaml
kubectl create -f csi-s3.yaml

# use pvc-maunal.yaml deploy
apiVersion: v1
kind: PersistentVolume
metadata:
  name: manualbucket-with-path
spec:
  storageClassName: csi-s3
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteMany
  claimRef:
    namespace: default
    name: csi-s3-manual-pvc
  csi:
    driver: ru.yandex.s3.csi
    controllerPublishSecretRef:
      name: csi-s3-secret
      namespace: kube-system
    nodePublishSecretRef:
      name: csi-s3-secret
      namespace: kube-system
    nodeStageSecretRef:
      name: csi-s3-secret
      namespace: kube-system
    volumeAttributes:
      capacity: 10Gi
      mounter: geesefs
      options: --memory-limit 1000 --dir-mode 0777 --file-mode 0666
     #options: --memory-limit 1000 --dir-mode 0777 --file-mode 0666 --stat-cache-ttl 0s # --stat-cache-ttl 0s:"no cache,default 1m0s"
    volumeHandle: manualbucket/path # default_bucket
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: csi-s3-manual-pvc
spec:
  # Empty storage class disables dynamic provisioning
  storageClassName: ""
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 10Gi

Step 3 Test & Check

复制代码
Check if the PVC has been bound:

$ kubectl get pvc csi-s3-pvc
NAME         STATUS    VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
csi-s3-pvc   Bound     pvc-c5d4634f-8507-11e8-9f33-0e243832354b   5Gi        RWO            csi-s3         9s
Create a test pod which mounts your volume:

kubectl create -f examples/pod.yaml
If the pod can start, everything should be working.

Test the mount

$ kubectl exec -ti csi-s3-test-nginx bash
$ mount | grep fuse
pvc-035763df-0488-4941-9a34-f637292eb95c: on /usr/share/nginx/html/s3 type fuse.geesefs (rw,nosuid,nodev,relatime,user_id=65534,group_id=0,default_permissions,allow_other)
$ touch /usr/share/nginx/html/s3/hello_world.txt
$ ls /usr/share/nginx/html/s3
123  ansible.cfg  hello-world.txt

[root@ceph01 ~]# s3cmd ls s3://my-new-bucket/
2024-04-26 04:47            0  s3://my-new-bucket/123
2024-04-26 03:36          869  s3://my-new-bucket/ansible.cfg
2024-04-26 03:59           17  s3://my-new-bucket/hello-world.txt

Set Bucket Policy

复制代码
[root@sd-ceph01 ~]# cat aps-bucket-policy 
{
  "Version": "2012-10-17",
  "Statement": [{
    "Effect": "Allow",
    "Principal": {"AWS": ["arn:aws:iam::usfolks:user/aps"]},
    "Action": "s3:*", 
    "Resource": [
      "arn:aws:s3:::aps-bucket/*"
    ]
  }]
}
[root@sd-ceph01 ~]# s3cmd setpolicy aps-bucket-policy s3://aps-bucket
s3://aps-bucket/: Policy updated
相关推荐
珂玥c9 小时前
Ceph集群新增osd
ceph
老wang你好1 天前
Ceph分布式存储系统全解析
ceph
一个行走的民14 天前
分布式系统中 Map 增量(Delta)是否需要持久化
ceph
一个行走的民16 天前
BlueStore 核心原理与关键机制
ceph
奋斗的小青年I18 天前
Proxmox VE Ceph 超融合集群落地实战
windows·ceph·vmware·pve·超融合·proxmox
一个行走的民18 天前
深度剖析 Ceph PG 分裂机制:原理、底层、实操、影响、线上避坑(最全完整版)
ceph·算法
一个行走的民18 天前
Ceph 核心概念精讲:彻底搞懂 PG、PGP、pg_num、pgp_num
ceph
Mr.王8351 个月前
Kubernetes宿主机本地盘池化管理
ceph·云原生·容器·kubernetes
一个行走的民1 个月前
CEPH OSD心跳机制
ceph
一个行走的民1 个月前
Ceph PG 状态详解与线上故障处理
网络·ceph