ceph 13.2.1 常用命令手册

创建类

 $ ceph osd pool create softapp 128 
  softapp: pool名字
  128: pg数量
 $ ceph auth add client.softdev mon 'allow r' osd 'allow rw pool=softapp' 
 ceph-authtool -n client.softdev --cap osd 'allow rwx' --cap mon 'allow r' /etc/ceph/ceph.keyring
 $ ceph-authtool --create-keyring /etc/ceph/ceph.client.softdev.keyring

 $ ceph-authtool -C /etc/ceph/ceph.keyring
 $ ceph auth get client.admin -o /etc/ceph/ceph.keyring
 $ ceph-authtool /etc/ceph/ceph.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
 $ ceph osd pool rmsnap softdev softdevsnap-1

查看类

 $ ceph auth ls

 $ ceph auth get client.softdev
 $ ceph auth get client.softdev -o client.softdev.keyring

 $ ceph auth print-key client.softdev
 $ ceph --user softdev -s

 # ceph osd pool get-quota pool-name

 备注:pool-name:pool的名字
 $ ceph --user softdev pg dump
 $ ceph osd lspools
 $ rados df
 $ ceph osd pool get softapp crush_rule

 $ rbd ls
 $ rbd ls softapp
 $ rbd info softapp/softrbd
 $ rbd trash ls softapp
 # ceph osd pool get cloud-pool size

修改类

 $ ceph auth caps client.softdev mon 'allow r' osd 'allow *' 
 $ ceph-authtool /etc/ceph/ceph.keyring -n client.softdev --cap osd 'allow rwx' --cap mon 'allow r'
 $ ceph auth import -i /etc/ceph/ceph.client.softdev.keyring
 $ ceph osd pool set softapp pg_num 128
  将softdev池的pg数量修改为128
 $ ceph osd pool rename softapp softdevpool

 $ ceph osd pool set-quota softapp max_objects 10000
 $ ceph osd pool set-quota softapp max_bytes 100000000000

 $ rbd resize --size 20480 softapp/softrbd
  增加

 $ rbd resize --size 10240 softapp/softrbd --allow-shrink 
  允许减少
 $ rbd trash mv softapp/softrbd
 $ rbd trash restore softapp/117f6b8b4567
 softapp/softrbd的id为:117f6b8b4567
 $ rbd trash restore softapp/117f6b8b4567 --image softrbdnew

删除类

 $ ceph osd pool delete softapp softapp --yes-i-really-really-mean-it
 备注:要写两编池的名字
 $ rbd rm softapp/softrbd
 $ rbd trash rm softapp/softrbd
 $ ceph auth del client.softdev

数据上传及删除

 $ rados mkpool datapool

 $ rados put test-object ./test.txt --pool=datapool

 $ rados -p test-object ls

 $ ceph osd map datapool test-object 

 $ rados get test-object --pool=datapool ./test.fromdatapool.txt

 $ diff ./test.txt ./test.fromdatapool.txt
 $ rados rm test-object --pool=datapool

创建块并挂载本地

 命令:ceph osd pool create wangzhifeng-pool 128
 操作:
  [root@fcp01 dev]# ceph osd pool create wangzhifeng-pool 128
  pool 'wangzhifeng-pool' created
 命令:rbd create wangzhifeng-image --size 20480 --pool wangzhifeng-pool 
 操作:
 [root@fcp01 dev]# rbd create wangzhifeng-image --size 20480 --pool wangzhifeng-pool
 [root@fcp01 dev]#
 命令:rbd map wangzhifeng-image --pool wangzhifeng-pool 
 操作:
 [root@fcp01 dev]# rbd map wangzhifeng-image --pool wangzhifeng-pool
 /dev/rbd0
 命令:rbd showmapped
 操作:
 [root@fcp01 dev]# rbd showmapped
 id pool             image             snap device
 0  wangzhifeng-pool wangzhifeng-image -    /dev/rbd0
 命令:mkfs.ext4 /dev/rbd0
 操作:
 [root@fcp01 dev]# mkfs.ext4 /dev/rbd0
 mke2fs 1.42.9 (28-Dec-2013)
 Discarding device blocks: done
 Filesystem label=
 OS type: Linux
 Block size=4096 (log=2)
 Fragment size=4096 (log=2)
 Stride=1024 blocks, Stripe width=1024 blocks
 1310720 inodes, 5242880 blocks
 262144 blocks (5.00%) reserved for the super user
 First data block=0
 Maximum filesystem blocks=2153775104
 160 block groups
 32768 blocks per group, 32768 fragments per group
 8192 inodes per group
 Superblock backups stored on blocks:
        32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208,
        4096000

 Allocating group tables: done
 Writing inode tables: done
 Creating journal (32768 blocks): done
 Writing superblocks and filesystem accounting information: done

 [root@fcp01 dev]#
 命令:mkdir /wangzhifeng-fs
 操作:
 [root@fcp01 dev]# mkdir /wangzhifeng-fs
 [root@fcp01 dev]# ls /wangzhifeng-fs/
 [root@fcp01 dev]#
 命令:mount /dev/rbd0 /wangzhifeng-fs
 操作:
 [root@fcp01 dev]# mount /dev/rbd0 /wangzhifeng-fs
 [root@fcp01 dev]# df -h
 Filesystem               Size  Used Avail Use% Mounted on
 devtmpfs                 3.9G     0  3.9G   0% /dev
 tmpfs                    3.9G     0  3.9G   0% /dev/shm
 tmpfs                    3.9G   18M  3.9G   1% /run
 tmpfs                    3.9G     0  3.9G   0% /sys/fs/cgroup
 /dev/mapper/centos-root   44G  9.0G   35G  21% /
 /dev/sda1               1014M  173M  842M  17% /boot
 tmpfs                    3.9G   24K  3.9G   1% /var/lib/ceph/osd/ceph-0
 overlay                   44G  9.0G   35G  21% 
 tmpfs                    799M     0  799M   0% /run/user/0
 /dev/rbd0                 20G   45M   19G   1% /wangzhifeng-fs
 [root@fcp01 dev]#
 $ rbd unmap  /dev/rbd0
 $ rbd remove wangzhifeng-image --pool wangzhifeng-pool
 * ceph osd pool rm  abner-test abner-test --yes-i-really-really-mean-it

附录

 TYPE字段值为:client,osd,mon,mds
 ID字段:daemon的用户名或者ID
 allow: 在daemon的权限设置的前面
 r:只读
 w:只写
 x:调用类方法
 class-read: x的子集,class读方法
 class-write:x的子集,class写
 *,all: 给读写执行权限