By shaohua

准备:
1.免密登录
2.格式化磁盘
  mkfs.xfs -f /dev/sdb && mkfs.xfs -f /dev/sdc && mkfs.xfs -f /dev/sdd && mkfs.xfs -f /dev/sde && mkfs.xfs -f /dev/sdf && mkfs.xfs -f /dev/sdg && mkfs.xfs -f /dev/sdh && mkfs.xfs -f /dev/sdi && mkfs.xfs -f /dev/sdj && mkfs.xfs -f /dev/sdk && mkfs.xfs -f /dev/sdl && mkfs.xfs -f /dev/sdm
3.时间同步
  service ntpd restart
  ntpq -p
  ntpstat
安装:
rpm -ivh http://download.ceph.com/rpm-hammer/el7/noarch/ceph-release-1-1.el7.noarch.rpm
mkdir /etc/ceph
cd /etc/ceph/
yum -y install ceph-deploy
ceph-deploy new bops-10-183-93-173 bops-10-183-93-174 bops-10-183-93-175
ceph-deploy install bops-10-183-93-173 bops-10-183-93-174 bops-10-183-93-175 (如果安装不上每个节点都导入以上yum源再执行 yum -y install ceph ceph-radosgw )
ceph-deploy --overwrite-conf mon create-initial
( ceph-deploy disk list bops-10-183-93-173  查看可用于ceph的磁盘列表,特别注意不要使用系统盘 )
ceph-deploy disk zap bops-10-183-93-173:/dev/sdb bops-10-183-93-173:/dev/sdc bops-10-183-93-173:/dev/sdd bops-10-183-93-173:/dev/sde bops-10-183-93-173:/dev/sdf bops-10-183-93-173:/dev/sdg bops-10-183-93-173:/dev/sdh bops-10-183-93-173:/dev/sdi bops-10-183-93-173:/dev/sdj bops-10-183-93-173:/dev/sdk bops-10-183-93-173:/dev/sdl bops-10-183-93-173:/dev/sdm bops-10-183-93-174:/dev/sdb bops-10-183-93-174:/dev/sdc bops-10-183-93-174:/dev/sdd bops-10-183-93-174:/dev/sde bops-10-183-93-174:/dev/sdf bops-10-183-93-174:/dev/sdg bops-10-183-93-174:/dev/sdh bops-10-183-93-174:/dev/sdi bops-10-183-93-174:/dev/sdj bops-10-183-93-174:/dev/sdk bops-10-183-93-174:/dev/sdl bops-10-183-93-174:/dev/sdm bops-10-183-93-175:/dev/sdb bops-10-183-93-175:/dev/sdc bops-10-183-93-175:/dev/sdd bops-10-183-93-175:/dev/sde bops-10-183-93-175:/dev/sdf bops-10-183-93-175:/dev/sdg bops-10-183-93-175:/dev/sdh bops-10-183-93-175:/dev/sdi bops-10-183-93-175:/dev/sdj bops-10-183-93-175:/dev/sdk bops-10-183-93-175:/dev/sdl bops-10-183-93-175:/dev/sdm
ceph-deploy osd create bops-10-183-93-173:/dev/sdb bops-10-183-93-173:/dev/sdc bops-10-183-93-173:/dev/sdd bops-10-183-93-173:/dev/sde bops-10-183-93-173:/dev/sdf bops-10-183-93-173:/dev/sdg bops-10-183-93-173:/dev/sdh bops-10-183-93-173:/dev/sdi bops-10-183-93-173:/dev/sdj bops-10-183-93-173:/dev/sdk bops-10-183-93-173:/dev/sdl bops-10-183-93-173:/dev/sdm bops-10-183-93-174:/dev/sdb bops-10-183-93-174:/dev/sdc bops-10-183-93-174:/dev/sdd bops-10-183-93-174:/dev/sde bops-10-183-93-174:/dev/sdf bops-10-183-93-174:/dev/sdg bops-10-183-93-174:/dev/sdh bops-10-183-93-174:/dev/sdi bops-10-183-93-174:/dev/sdj bops-10-183-93-174:/dev/sdk bops-10-183-93-174:/dev/sdl bops-10-183-93-174:/dev/sdm bops-10-183-93-175:/dev/sdb bops-10-183-93-175:/dev/sdc bops-10-183-93-175:/dev/sdd bops-10-183-93-175:/dev/sde bops-10-183-93-175:/dev/sdf bops-10-183-93-175:/dev/sdg bops-10-183-93-175:/dev/sdh bops-10-183-93-175:/dev/sdi bops-10-183-93-175:/dev/sdj bops-10-183-93-175:/dev/sdk bops-10-183-93-175:/dev/sdl bops-10-183-93-175:/dev/sdm
(如果ceph -s 显示 osd没有up 重启每台服务器)
ceph-deploy --overwrite-conf admin bops-10-183-93-173 bops-10-183-93-174 bops-10-183-93-175

查看ceph集群状态和信息

ceph -s
ceph osd tree
ceph osd pool get rbd pg_num
ceph osd lspools
ceph osd pool get rbd size
ceph osd pool get rbd min_size


ceph osd pool set rbd pg_num 1024
ceph osd pool set rbd pgp_num 1024

ceph-deploy --overwrite-conf mds create bops-10-183-93-173 bops-10-183-93-174 bops-10-183-93-175
( 如果osd数量有异常或者6789端口没起来需重启每台服务器 )

ceph osd pool create cephfs_data 1024
ceph osd pool create cephfs_metadata 1024
ceph fs new leadorfs cephfs_metadata cephfs_data

ceph mds stat
ceph fs ls
ceph osd tree

ceph auth get-or-create client.cephfs mon 'allow r' osd 'allow rwx pool=cephfs_metadata,allow rwx pool=cephfs_data' -o /etc/ceph/client.cephfs.keyring
ceph-authtool -p -n client.cephfs /etc/ceph/client.cephfs.keyring > /etc/ceph/client.cephfs
cat client.cephfs
ceph-deploy --overwrite-conf admin bops-10-183-93-173 bops-10-183-93-174 bops-10-183-93-175

客户端挂载

如果内核>=2.6.34可直接挂载
mkdir -p /mnt/cephfs
获取秘钥
ceph auth get-key client.cephfs
挂载
mount -t ceph 10.183.93.173,10.183.93.174,10.183.93.175:/ /mnt/cephfs -o name=cephfs,secret=AQDpfttY/RjrDhAAzYXiSjTENH4TBYQC1Qvt2w==
如果内核<2.6.34,使用ceph-fuse挂载



块设备挂载
ceph-deploy install  10-149-11-8 --repo-url=http://mirrors.aliyun.com/ceph/rpm-hammer/el7/ --gpg-url=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7
(如果安装不上每个节点都导入以上yum源再执行 yum -y install ceph ceph-radosgw)
ceph-deploy config push 10-149-11-8
ceph auth get-or-create client.rbd mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=rbd'
ceph auth get-or-create client.rbd | ssh root@10-149-11-8 tee /etc/ceph/ceph.client.rbd.keyring
ceph auth list

挂载块设备的客户端

内核>=2.6.34
modprobe rbd
cat /etc/ceph/ceph.client.rbd.keyring >> /etc/ceph/keyring
由于没有使用默认用户client.admin,我们必须提供用户名来连接Ceph集群
ceph -s --name client.rbd

创建Ceph RBD
rbd create rbd1 --size 102400 --name client.rbd
列出rbd镜像,保存块的存储池是"rbd",也可通过rbd命令-p选项指定一个存储池
rbd ls --name client.rbd
rbd ls -p rbd --name client.rbd
rbd list --name client.rbd
查看rbd镜像细节
rbd --image rbd1 info --name client.rbd
映射Ceph块设备
rbd map --image rbd1 --name client.rbd
查看映射的设备
rbd showmapped --name client.rbd
使用块设备
fdisk -l /dev/rbd1
mkfs.xfs /dev/rbd1
mkdir /mnt/ceph-disk1
mount /dev/rbd1 /mnt/ceph-disk1
df -h /mnt/ceph-disk1

开机重启自动映射该块设备,并自动挂载
wget https://raw.githubusercontent.com/ksingh7/ceph-cookbook/master/rbdmap -O /etc/init.d/rbdmap
chmod +x /etc/init.d/rbdmap
chkconfig --add rbdmap
chkconfig --list
修改rbdmap
[root@bops-10-183-93-172 ~]# cat /etc/ceph/rbdmap 
# RbdDevice        Parameters
#poolname/imagename    id=client,keyring=/etc/ceph/ceph.client.keyring
rbd/rbd1                id=rbd,keyring=/etc/ceph/keyring
[root@bops-10-183-93-172 ~]#
修改/etc/fstab,添加
/dev/rbd0     /mnt/ceph-rbd0          xfs     defaults,_netdev 0 0
mkdir -p /mnt/ceph-rbd0
/etc/init.d/rbdmap start
/etc/init.d/rbdmap status


调整Ceph RBD大小
rbd resize --image rbd1 --size 204800 --name client.rbd
rbd info --image rbd1 --name client.rbd
扩展文件系统来利用增加了的空间,XFS支持在线调整大小
dmesg | grep -i capacity
xfs_growfs -d /mnt/ceph-disk1

如果内核<2.6.34,可通过ceph-fuse客户端挂载
rpm -Uvh http://download.ceph.com/rpm-giant/el6/noarch/ceph-release-1-0.el6.noarch.rpm
yum -y install ceph-fuse
如果安装不上可按如下直接安装
rpm -Uvh http://download.ceph.com/rpm-giant/el6/x86_64/ceph-fuse-0.87.2-0.el6.x86_64.rpm

创建CephFS秘钥文件 /etc/ceph/client.cephfs.keyring内容如下
[client.cephfs]
key = AQDpfttY/RjrDhAAzYXiSjTENH4TBYQC1Qvt2w==


挂载:
ceph-fuse --keyring /etc/ceph/ceph.client.admin.keyring --name client.cephfs -m 10.149.11.143:6789,10.149.11.144:6789,10.149.11.145:6789 /cephfs


参考网站:
http://www.vpsee.com/2015/07/install-ceph-on-centos-7/
Copyright © opschina.org 2017 with zzlyzq@gmail.com all right reserved,powered by Gitbook该文件修订时间: 2017-09-23 17:40:08

results matching ""

    No results matching ""