Press "Enter" to skip to content

ceph-deploy 一键构建 ceph集群脚本

机器情况大概说明下:

3台CentOS Linux release 7.5.1804 (Core),最小安装。

IP分别为 10.255.255.141(ceph-1),10.255.255.142(ceph-1), 10.255.255.143(ceph-1)。
10.255.255.141机器为主部署节点。

部署之前,先把三台机器主机名 手动改好:

ceph-1#hostnamectl set-hostname --static ceph-1
ceph-2#hostnamectl set-hostname --static ceph-2
ceph-3#hostnamectl set-hostname --static ceph-3

然后在主节点上执行下面脚本:

#!/bin/bash

#需要预先配置 无秘钥进入
# ssh-keygen -t rsa
# ssh-copy-id root@ceph-2
# ssh-copy-id root@ceph-3

#配置开始
#主节点ip
ceph_1_ip="10.255.255.141"
#主节点主机名
ceph_1_name="ceph-1"
#从节点ip
ceph_2_ip="10.255.255.142"
#从节点主机名
ceph_2_name="ceph-2"
#从节点ip
ceph_3_ip="10.255.255.143"
#从节点主机名
ceph_3_name="ceph-3"
#子网
sub_network="10.255.255.0/24"


############警告,所有的挂在卷要提前卸载  sudo umount /dev/sdb* 
#节点1 osd 目标安装磁盘
ceph_1_dev=('/dev/sdb')
#节点2 osd 目标安装磁盘
ceph_2_dev=('/dev/sdb')
#节点3 osd 目标安装磁盘
ceph_3_dev=('/dev/sdb')

#节点1 bluestore db&wal 目标安装磁盘
# ceph_1_dev_journal=('sdb1' 'sdb2' 'sdb3' 'sdb4')
#节点2 bluestore db&wal 目标安装磁盘
# ceph_2_dev_journal=('sdb1' 'sdb2' 'sdb3' 'sdb4')
#节点3 bluestore db&wal 目标安装磁盘
# ceph_3_dev_journal=('sdb1' 'sdb2' 'sdb3' 'sdb4')
#配置结束

DEPLOY_DIR=$(cd `dirname $0`; pwd)

echo "配置节点hosts....."
# echo "$ceph_1_ip $ceph_1_name" >> /etc/hosts
# echo "$ceph_2_ip $ceph_2_name" >> /etc/hosts
# echo "$ceph_3_ip $ceph_3_name" >> /etc/hosts
ssh root@$ceph_2_name "echo '$ceph_1_ip $ceph_1_name' >> /etc/hosts;echo '$ceph_2_ip $ceph_2_name' >> /etc/hosts;echo '$ceph_3_ip $ceph_3_name' >> /etc/hosts"
ssh root@$ceph_3_name "echo '$ceph_1_ip $ceph_1_name' >> /etc/hosts;echo '$ceph_2_ip $ceph_2_name' >> /etc/hosts;echo '$ceph_3_ip $ceph_3_name' >> /etc/hosts"

#关闭SELINUX
echo "配置SELINUX....."
sed -i 's/SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
setenforce 0
ssh root@$ceph_2_name "sed -i 's/SELINUX=.*/SELINUX=disabled/' /etc/selinux/config;setenforce 0"
ssh root@$ceph_3_name "sed -i 's/SELINUX=.*/SELINUX=disabled/' /etc/selinux/config;setenforce 0"

#关闭防火墙
echo "关闭防火墙....."
systemctl stop firewalld && systemctl disable firewalld
ssh root@$ceph_2_name "systemctl stop firewalld;systemctl disable firewalld"
ssh root@$ceph_3_name "systemctl stop firewalld;systemctl disable firewalld"

#配置基础yum源
#在每个节点执行:
echo "配置yum源....."
yum clean all
yum install wget -y
rm -rf /etc/yum.repos.d/*.repo
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
sed -i '/aliyuncs/d' /etc/yum.repos.d/CentOS-Base.repo
sed -i '/aliyuncs/d' /etc/yum.repos.d/epel.repo


#配置ceph非本地源
#在每个节点执行:vim /etc/yum.repos.d/ceph.repo,并将以下内容放入ceph.repo
cat <<EOT >> /etc/yum.repos.d/ceph.repo
[ceph]
name=ceph
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/x86_64/
gpgcheck=0
[ceph-noarch]
name=cephnoarch
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/noarch/
gpgcheck=0
EOT
yum makecache

ssh root@$ceph_2_name "yum clean all"
ssh root@$ceph_3_name "yum clean all"
scp -r /etc/yum.repos.d/* root@$ceph_2_name:/etc/yum.repos.d/
scp -r /etc/yum.repos.d/* root@$ceph_3_name:/etc/yum.repos.d/ 
ssh root@$ceph_2_name "yum makecache"
ssh root@$ceph_3_name "yum makecache"

#安装ntp
echo "安装NTP....."
yum -y install ntp
systemctl enable ntpd
systemctl restart ntpd
ntpdate -u cn.pool.ntp.org

ssh root@$ceph_2_name "yum install ntp -y"
ssh root@$ceph_3_name "yum install ntp -y"
ssh root@$ceph_2_name "systemctl enable ntpd;systemctl restart ntpd"
ssh root@$ceph_3_name "systemctl enable ntpd;systemctl restart ntpd"
ssh root@$ceph_2_name "ntpdate -u cn.pool.ntp.org"
ssh root@$ceph_3_name "ntpdate -u cn.pool.ntp.org"

#安装ceph
echo "安装ceph....."
yum install ceph -y
ssh root@$ceph_2_name "yum install ceph -y"
ssh root@$ceph_3_name "yum install ceph -y"

#安装ceph-deploy
echo "安装ceph-deploy....."
yum install ceph-deploy -y

#部署ceph
echo "部署ceph....."
mkdir ~/cluster
cd ~/cluster
CLUSTER_DIR=$(cd `dirname $0`; pwd)
ceph-deploy new $ceph_1_name $ceph_2_name $ceph_3_name
echo "public_network=$sub_network" >> ceph.conf
#echo "osd_crush_update_on_start = false" >> ceph.conf

ceph-deploy mon create-initial
ceph-deploy admin $ceph_1_name $ceph_2_name $ceph_3_name

#配置osd
echo "部署osd....."
# index=0
# for dev_name in ${ceph_1_dev[@]}
# do
# ceph-volume lvm zap /dev/$dev_name
# ceph-deploy osd create $ceph_1_name --bluestore --data /dev/$dev_name --block-db /dev/${ceph_1_dev_journal[$index]} --block-wal /dev/${ceph_1_dev_journal[$index+1]}
# index=$[$index+2]
# done
# index=0
# for dev_name in ${ceph_2_dev[@]}
# do
# ssh root@$ceph_2_name "ceph-volume lvm zap /dev/$dev_name"
# ceph-deploy osd create $ceph_2_name --bluestore --data /dev/$dev_name --block-db /dev/${ceph_2_dev_journal[$index]} --block-wal /dev/${ceph_2_dev_journal[$index+1]}
# index=$[$index+2]
# done
# index=0
# for dev_name in ${ceph_3_dev[@]}
# do
# ssh root@$ceph_3_name "ceph-volume lvm zap /dev/$dev_name"
# ceph-deploy osd create $ceph_3_name --bluestore --data /dev/$dev_name --block-db /dev/${ceph_3_dev_journal[$index]} --block-wal /dev/${ceph_3_dev_journal[$index+1]}
# index=$[$index+2]
# done

for dev_name in ${ceph_1_dev[@]}
do
ceph-deploy disk zap $ceph_1_name $dev_name
ceph-deploy osd create --data $dev_name $ceph_1_name
done
for dev_name in ${ceph_2_dev[@]}
do
ceph-deploy disk zap $ceph_2_name $dev_name
ceph-deploy osd create --data $dev_name $ceph_2_name
done
for dev_name in ${ceph_3_dev[@]}
do
ceph-deploy disk zap $ceph_3_name $dev_name
ceph-deploy osd create --data $dev_name $ceph_3_name
done

#部署mgr
echo "部署mgr....."
ceph-deploy mgr create $ceph_1_name $ceph_2_name $ceph_3_name

#启动dashboard
ceph mgr module enable dashboard

#部署cephfs
echo "部署cephfs....."
ceph-deploy mds create $ceph_1_name $ceph_2_name $ceph_3_name


# ceph osd pool create cephfs_data 128
# ceph osd pool create cephfs_metadata 64
# ceph fs new cephfs cephfs_metadata cephfs_data

# mkdir /mnt/mycephfs  
# admin_key=`ceph auth get-key client.admin`
# admin_key_base64=`ceph auth get-key client.admin |base64`
# sleep 5
# #等待mds部署完成后再mount
# mount -t ceph $ceph_1_name:6789,$ceph_2_name:6789,$ceph_3_name:6789:/ /mnt/mycephfs -o name=admin,secret=$admin_key


#初始化rbd pool
ceph osd pool create rbd 128
rbd pool init rbd

echo "部署完成....."

参考资料1:https://www.jianshu.com/p/5ca7990db0f1
参考资料2:http://docs.ceph.com/docs/mimic/rados/deployment/ceph-deploy-osd/
参考资料3:http://docs.ceph.org.cn/rados/deployment/
参考资料4:https://www.jianshu.com/p/396bf436275a

发表回复

您的邮箱地址不会被公开。 必填项已用 * 标注