CEPH RGW with CSI

https://github.com/yandex-cloud/k8s-csi-s3

https://access.redhat.com/documentation/zh-cn/red_hat_ceph_storage/6/html/developer_guide/s3-bucket-operations#s3-bucket-policies_dev

https://access.redhat.com/documentation/zh-cn/red_hat_ceph_storage/6/html-single/object_gateway_guide/index#deploying-the-ceph-object-gateway-using-the-command-line-interface_rgw

https://access.redhat.com/documentation/zh-cn/red_hat_ceph_storage/3/html/object_gateway_guide_for_red_hat_enterprise_linux/rgw-administration-rgw#administrative-data-storage-rgw

Step 1 Deploy Ceph Rgw

# rgw rgw module
ceph mgr module enable rgw 

# use command create rgw
ceph orch apply rgw default-realm default-zone --placement="2  ceph01" --port=8000

注意3.2. 使用命令行界面部署 Ceph 对象网关 Red Hat Ceph Storage 5 | Red Hat Customer Portal

NUMBER_OF_DAEMONS 控制每个主机上部署的 Ceph 对象网关数量。要在不产生额外成本的情况下获得最高性能,请将此值设置为 2。

# use yml create rgw
[root@ceph01 ~]# cat rgw.yml 
service_type: rgw
service_id: default
placement:
  count_per_host: 2
  hosts:
   - ceph01
spec:
  rgw_realm: default-realm
  rgw_zone: default-zone
  rgw_frontend_port: 1234

# create radonsgw user
radosgw-admin user create --uid=s3 --display-name="object_storage" --system

# get access_key, secret_key
radosgw-admin user info --uid=s3 --display-name="object_storage" --system

# create custom bucket, use s3cmd
yum -y install s3cmd

# set access_key, secret_key, host_base, host_bucket

s3cmd --configure
cat /root/.s3cfg
[default]
access_key = HIHPOUURDQ6SIEBVKTGO
access_token = 
add_encoding_exts = 
add_headers = 
bucket_location = US
ca_certs_file = 
cache_file = 
check_ssl_certificate = True
check_ssl_hostname = True
cloudfront_host = cloudfront.amazonaws.com
connection_max_age = 5
connection_pooling = True
content_disposition = 
content_type = 
default_mime_type = binary/octet-stream
delay_updates = False
delete_after = False
delete_after_fetch = False
delete_removed = False
dry_run = False
enable_multipart = True
encoding = UTF-8
encrypt = False
expiry_date = 
expiry_days = 
expiry_prefix = 
follow_symlinks = False
force = False
get_continue = False
gpg_command = /usr/bin/gpg
gpg_decrypt = %(gpg_command)s -d --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s
gpg_encrypt = %(gpg_command)s -c --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s
gpg_passphrase = 
guess_mime_type = True
host_base = http://10.220.9.54:80
#host_bucket = %(bucket)s.s3.amazonaws.com
host_bucket = http://10.220.9.54:80
human_readable_sizes = False
invalidate_default_index_on_cf = False
invalidate_default_index_root_on_cf = True
invalidate_on_cf = False
keep_dirs = False
kms_key = 
limit = -1
limitrate = 0
list_allow_unordered = False
list_md5 = False
log_target_prefix = 
long_listing = False
max_delete = -1
max_retries = 5
mime_type = 
multipart_chunk_size_mb = 15
multipart_copy_chunk_size_mb = 1024
multipart_max_chunks = 10000
preserve_attrs = True
progress_meter = True
proxy_host = 
proxy_port = 0
public_url_use_https = False
put_continue = False
recursive = False
recv_chunk = 65536
reduced_redundancy = False
requester_pays = False
restore_days = 1
restore_priority = Standard
secret_key = WYF14K7MJb6hOt4AALLvxWfN43rnSaYnz9CoT8Ym
send_chunk = 65536
server_side_encryption = False
signature_v2 = False
signurl_use_https = False
simpledb_host = sdb.amazonaws.com
skip_destination_validation = False
skip_existing = False
socket_timeout = 300
ssl_client_cert_file = 
ssl_client_key_file = 
stats = False
stop_on_error = False
storage_class = 
throttle_max = 100
upload_id = 
urlencoding_mode = normal
use_http_expect = False
use_https = False
use_mime_magic = True
verbosity = WARNING
website_endpoint = http://%(bucket)s.s3-website-%(location)s.amazonaws.com/
website_error = 
website_index = index.html

# create default bucket
s3cmd mb s3://my-new-bucket --host=localhost --host-bucket=localhost

Step 2 Install k8s-csi-s3

# Download git project
git clone https://github.com/yandex-cloud/k8s-csi-s3.git

1. Create a secret with your S3 credentials
apiVersion: v1
kind: Secret
metadata:
  name: csi-s3-secret
  # Namespace depends on the configuration in the storageclass.yaml
  namespace: kube-system
stringData:
  accessKeyID: <YOUR_ACCESS_KEY_ID>
  secretAccessKey: <YOUR_SECRET_ACCESS_KEY>
  # For AWS set it to "https://s3.<region>.amazonaws.com", for example https://s3.eu-central-1.amazonaws.com
  endpoint: https://storage.yandexcloud.net
  # For AWS set it to AWS region
  #region: ""
The region can be empty if you are using some other S3 compatible storage.

2. Deploy the driver
cd deploy/kubernetes
kubectl create -f provisioner.yaml
kubectl create -f driver.yaml
kubectl create -f csi-s3.yaml

# use pvc-maunal.yaml deploy
apiVersion: v1
kind: PersistentVolume
metadata:
  name: manualbucket-with-path
spec:
  storageClassName: csi-s3
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteMany
  claimRef:
    namespace: default
    name: csi-s3-manual-pvc
  csi:
    driver: ru.yandex.s3.csi
    controllerPublishSecretRef:
      name: csi-s3-secret
      namespace: kube-system
    nodePublishSecretRef:
      name: csi-s3-secret
      namespace: kube-system
    nodeStageSecretRef:
      name: csi-s3-secret
      namespace: kube-system
    volumeAttributes:
      capacity: 10Gi
      mounter: geesefs
      options: --memory-limit 1000 --dir-mode 0777 --file-mode 0666
     #options: --memory-limit 1000 --dir-mode 0777 --file-mode 0666 --stat-cache-ttl 0s # --stat-cache-ttl 0s:"no cache,default 1m0s"
    volumeHandle: manualbucket/path # default_bucket
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: csi-s3-manual-pvc
spec:
  # Empty storage class disables dynamic provisioning
  storageClassName: ""
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 10Gi

Step 3 Test & Check

Check if the PVC has been bound:

$ kubectl get pvc csi-s3-pvc
NAME         STATUS    VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
csi-s3-pvc   Bound     pvc-c5d4634f-8507-11e8-9f33-0e243832354b   5Gi        RWO            csi-s3         9s
Create a test pod which mounts your volume:

kubectl create -f examples/pod.yaml
If the pod can start, everything should be working.

Test the mount

$ kubectl exec -ti csi-s3-test-nginx bash
$ mount | grep fuse
pvc-035763df-0488-4941-9a34-f637292eb95c: on /usr/share/nginx/html/s3 type fuse.geesefs (rw,nosuid,nodev,relatime,user_id=65534,group_id=0,default_permissions,allow_other)
$ touch /usr/share/nginx/html/s3/hello_world.txt
$ ls /usr/share/nginx/html/s3
123  ansible.cfg  hello-world.txt

[root@ceph01 ~]# s3cmd ls s3://my-new-bucket/
2024-04-26 04:47            0  s3://my-new-bucket/123
2024-04-26 03:36          869  s3://my-new-bucket/ansible.cfg
2024-04-26 03:59           17  s3://my-new-bucket/hello-world.txt

Set Bucket Policy

[root@sd-ceph01 ~]# cat aps-bucket-policy 
{
  "Version": "2012-10-17",
  "Statement": [{
    "Effect": "Allow",
    "Principal": {"AWS": ["arn:aws:iam::usfolks:user/aps"]},
    "Action": "s3:*", 
    "Resource": [
      "arn:aws:s3:::aps-bucket/*"
    ]
  }]
}
[root@sd-ceph01 ~]# s3cmd setpolicy aps-bucket-policy s3://aps-bucket
s3://aps-bucket/: Policy updated
相关推荐
yueyingshaqiu0121 小时前
ubuntu ceph部署
ceph·ubuntu
Bener20082 天前
CEPH client.admin key获取
ceph
Comedy_宁4 天前
分布式NAS集群+ceph+CTDB
linux·分布式·ceph·nas·ctdb
Jiangxl~7 天前
第31讲:K8S StorageClass使用rbd-provisioner驱动与Ceph RBD块存储集成
linux·服务器·网络·ceph·容器·kubernetes·云计算
zhang-ge9 天前
cephadm部署ceph分布式存储集群笔记
笔记·分布式·ceph
手持钩笼引天下9 天前
ceph osd slow io (一):对象存储index osd 的rocksdb性能下降
运维·ceph
Jiangxl~10 天前
第30讲:Ceph集群RBD块存储通过CSI客户端与K8S StorageClass集成
服务器·ceph·阿里云·云原生·容器·kubernetes·云计算
学习等保ing......11 天前
Linux系统下的Swift与Ceph分布式存储解决方案
linux·ceph·swift
千码君201612 天前
Ceph: vdbench 测试ceph存储rbd块设备
ceph
Terry_Tsang14 天前
ceph scrub 错误记录
前端·数据库·ceph