在dataprovider上创建几个Pool
ceph osd pool create volumes 32
ceph osd pool create images 32
ceph osd pool create backups 32
ceph osd pool create vms 32
在glance节点执行下面的命令
sudo yum install python-ceph
在nova-compute
, cinder-backup,
cinder-volume节点
,执行下面的命令:
sudo yum install ceph
在controller,computenode上创建leadorceph用户
sudo useradd -d /home/leadorceph -m leadorceph
sudo passwd leadorceph
echo "leadorceph ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/leadorceph
sudo chmod 0440 /etc/sudoers.d/leadorceph
使用leadorceph用户的身份执行sudo visudo命令,然后修改Defaults requiretty 为Defaults:ceph !requiretty
ssh-copy-id leadorceph@controllernode
ssh-copy-id leadorceph@computenode
在dataprovider节点上执行下面的命令
ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images'
ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images'
ceph auth get-or-create client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=backups'
ceph auth get-or-create client.glance | ssh controllernode sudo tee /etc/ceph/ceph.client.glance.keyring
ssh controllernode sudo chown glance:glance /etc/ceph/ceph.client.glance.keyring
ceph auth get-or-create client.cinder | ssh controllernode sudo tee /etc/ceph/ceph.client.cinder.keyring
ssh controllernode sudo chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
ceph auth get-or-create client.cinder-backup | ssh controllernode sudo tee /etc/ceph/ceph.client.cinder-backup.keyring
ssh controllernode sudo chown cinder:cinder /etc/ceph/ceph.client.cinder-backup.keyring
ceph auth get-key client.cinder | ssh computenode tee client.cinder.key
切换用户su leadorceph
uuidgen
上面命令的输出为78f475b1-846f-47ba-8145-9f305de5c516
cat > secret.xml <
78f475b1-846f-47ba-8145-9f305de5c516
client.cinder secret
EOF
sudo virsh secret-define --file secret.xml
sudo virsh secret-set-value --secret 78f475b1-846f-47ba-8145-9f305de5c516 --base64 $(cat client.cinder.key) && rm client.cinder.key secret.xml
在controller节点,切换到root用户
编辑 /etc/glance/glance-api.conf
文件中的 [DEFAULT]
:
default_store = rbd
rbd_store_user = glance
rbd_store_pool = images
rbd_store_chunk_size = 8
在/etc/cinder/cinder.conf中添加下面的内容
:
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
glance_api_version = 2
rbd_user = cinder
rbd_secret_uuid = 78f475b1-846f-47ba-8145-9f305de5c516
backup_driver = cinder.backup.drivers.ceph
backup_ceph_conf = /etc/ceph/ceph.conf
backup_ceph_user = cinder-backup
backup_ceph_chunk_size = 134217728
backup_ceph_pool = backups
backup_ceph_stripe_unit = 0
backup_ceph_stripe_count = 0
restore_discard_excess_bytes = true
在 /etc/nova/nova.conf中添加以下内容
rbd_user = cinder
rbd_secret_uuid = 78f475b1-846f-47ba-8145-9f305de5c516
编辑computenode 上的ceph配置
[client]
rbd cache = true
rbd cache writethrough until flush = true
admin socket = /var/run/ceph/$cluster-$type.$id.$pid.$cctid.asok
computernode 的/etc/nova/nova.conf
文件添加下面的内容:
libvirt_images_type = rbd
libvirt_images_rbd_pool = vms
libvirt_images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = cinder
rbd_secret_uuid = 78f475b1-846f-47ba-8145-9f305de5c516
libvirt_inject_password = false
libvirt_inject_key = false
libvirt_inject_partition = -2
libvirt_live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST"
sudo service openstack-glance-api restart
sudo service openstack-nova-compute restart
sudo service openstack-cinder-volume restart
sudo service openstack-cinder-backup restart
创建硬盘,镜像后,在dadaprovider节点查看rados pool的状况
rados -p images ls
rados -p volumes ls