Press "Enter" to skip to content

ceph & docker swarm rexray/rbd drive install script

ceph 集群安装脚本

脚本示例是3台机器,需要提前把3台主机的hostname配置好,然后在主节点配置好无秘钥进入其他机器。然后在主节点执行脚本即可。

脚本中可能需要你修改的内容包括:

1、你的集群机器数量如果不是3台,那么需要你调整的位置较多,需要细心

2、需要调整脚本中的主机名

3、需要调整脚本中的主机ip

4、需要调整脚本中的目标磁盘,如果你的osd磁盘不是/dev/sdb。

#!/bin/bash

#需要预先配置 无秘钥进入
# ssh-keygen -t rsa
# ssh-copy-id root@ceph-2
# ssh-copy-id root@ceph-3

#配置开始
#主节点ip
ceph_1_ip="10.255.255.141"
#主节点主机名
ceph_1_name="ceph-1"
#从节点ip
ceph_2_ip="10.255.255.142"
#从节点主机名
ceph_2_name="ceph-2"
#从节点ip
ceph_3_ip="10.255.255.143"
#从节点主机名
ceph_3_name="ceph-3"
#子网
sub_network="10.255.255.0/24"

#节点1 osd 目标安装磁盘
ceph_1_dev=('/dev/sdb')
#节点2 osd 目标安装磁盘
ceph_2_dev=('/dev/sdb')
#节点3 osd 目标安装磁盘
ceph_3_dev=('/dev/sdb')

#节点1 bluestore db&wal 目标安装磁盘
ceph_1_dev_journal=('sdb1' 'sdb2' 'sdb3' 'sdb4')
#节点2 bluestore db&wal 目标安装磁盘
ceph_2_dev_journal=('sdb1' 'sdb2' 'sdb3' 'sdb4')
#节点3 bluestore db&wal 目标安装磁盘
ceph_3_dev_journal=('sdb1' 'sdb2' 'sdb3' 'sdb4')
#配置结束

DEPLOY_DIR=$(cd `dirname $0`; pwd)

echo "配置节点hosts....."
echo "$ceph_1_ip $ceph_1_name" >> /etc/hosts
echo "$ceph_2_ip $ceph_2_name" >> /etc/hosts
echo "$ceph_3_ip $ceph_3_name" >> /etc/hosts
ssh root@$ceph_2_name "echo '$ceph_1_ip $ceph_1_name' >> /etc/hosts;echo '$ceph_2_ip $ceph_2_name' >> /etc/hosts;echo '$ceph_3_ip $ceph_3_name' >> /etc/hosts"
ssh root@$ceph_3_name "echo '$ceph_1_ip $ceph_1_name' >> /etc/hosts;echo '$ceph_2_ip $ceph_2_name' >> /etc/hosts;echo '$ceph_3_ip $ceph_3_name' >> /etc/hosts"

#关闭SELINUX
echo "配置SELINUX....."
sed -i 's/SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
setenforce 0
ssh root@$ceph_2_name "sed -i 's/SELINUX=.*/SELINUX=disabled/' /etc/selinux/config;setenforce 0"
ssh root@$ceph_3_name "sed -i 's/SELINUX=.*/SELINUX=disabled/' /etc/selinux/config;setenforce 0"

#关闭防火墙
echo "关闭防火墙....."
systemctl stop firewalld && systemctl disable firewalld
ssh root@$ceph_2_name "systemctl stop firewalld;systemctl disable firewalld"
ssh root@$ceph_3_name "systemctl stop firewalld;systemctl disable firewalld"

#配置基础yum源
#在每个节点执行:
echo "配置yum源....."
# yum clean all
# yum install wget -y
# rm -rf /etc/yum.repos.d/*.repo
# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
# wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
# sed -i '/aliyuncs/d' /etc/yum.repos.d/CentOS-Base.repo
# sed -i '/aliyuncs/d' /etc/yum.repos.d/epel.repo


#use curl
rm -rf /etc/yum.repos.d/*.repo
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
curl -o /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
sed -i '/aliyuncs/d' /etc/yum.repos.d/CentOS-Base.repo
sed -i '/aliyuncs/d' /etc/yum.repos.d/epel.repo



#配置ceph非本地源
#在每个节点执行:vim /etc/yum.repos.d/ceph.repo,并将以下内容放入ceph.repo
cat <<EOT >> /etc/yum.repos.d/ceph.repo
[ceph]
name=ceph
baseurl=http://mirrors.aliyun.com/ceph/rpm-mimic/el7/x86_64/
gpgcheck=0
[ceph-noarch]
name=cephnoarch
baseurl=http://mirrors.aliyun.com/ceph/rpm-mimic/el7/noarch/
gpgcheck=0
EOT
yum makecache

ssh root@$ceph_2_name "yum clean all"
ssh root@$ceph_3_name "yum clean all"
scp -r /etc/yum.repos.d/* root@$ceph_2_name:/etc/yum.repos.d/
scp -r /etc/yum.repos.d/* root@$ceph_3_name:/etc/yum.repos.d/
ssh root@$ceph_2_name "yum makecache"
ssh root@$ceph_3_name "yum makecache"

#安装ntp
echo "安装NTP....."
yum -y install ntp
systemctl enable ntpd
systemctl restart ntpd
ntpdate -u cn.pool.ntp.org

ssh root@$ceph_2_name "yum install ntp -y"
ssh root@$ceph_3_name "yum install ntp -y"
ssh root@$ceph_2_name "systemctl enable ntpd;systemctl restart ntpd"
ssh root@$ceph_3_name "systemctl enable ntpd;systemctl restart ntpd"
ssh root@$ceph_2_name "ntpdate -u cn.pool.ntp.org"
ssh root@$ceph_3_name "ntpdate -u cn.pool.ntp.org"

#安装ceph
echo "安装ceph....."
yum install ceph -y
ssh root@$ceph_2_name "yum install ceph -y"
ssh root@$ceph_3_name "yum install ceph -y"

#安装ceph-deploy
echo "安装ceph-deploy....."
yum install ceph-deploy -y

#部署ceph
echo "部署ceph....."
mkdir ~/cluster
cd ~/cluster
CLUSTER_DIR=$(cd `dirname $0`; pwd)
ceph-deploy new $ceph_1_name $ceph_2_name $ceph_3_name
echo "public_network=$sub_network" >> ceph.conf
#echo "osd_crush_update_on_start = false" >> ceph.conf

ceph-deploy mon create-initial
ceph-deploy admin $ceph_1_name $ceph_2_name $ceph_3_name

#配置osd
echo "部署osd....."
# index=0
# for dev_name in ${ceph_1_dev[@]}
# do
# ceph-volume lvm zap /dev/$dev_name
# ceph-deploy osd create $ceph_1_name --bluestore --data /dev/$dev_name --block-db /dev/${ceph_1_dev_journal[$index]} --block-wal /dev/${ceph_1_dev_journal[$index+1]}
# index=$[$index+2]
# done
# index=0
# for dev_name in ${ceph_2_dev[@]}
# do
# ssh root@$ceph_2_name "ceph-volume lvm zap /dev/$dev_name"
# ceph-deploy osd create $ceph_2_name --bluestore --data /dev/$dev_name --block-db /dev/${ceph_2_dev_journal[$index]} --block-wal /dev/${ceph_2_dev_journal[$index+1]}
# index=$[$index+2]
# done
# index=0
# for dev_name in ${ceph_3_dev[@]}
# do
# ssh root@$ceph_3_name "ceph-volume lvm zap /dev/$dev_name"
# ceph-deploy osd create $ceph_3_name --bluestore --data /dev/$dev_name --block-db /dev/${ceph_3_dev_journal[$index]} --block-wal /dev/${ceph_3_dev_journal[$index+1]}
# index=$[$index+2]
# done

for dev_name in ${ceph_1_dev[@]}
do
ceph-deploy disk zap $ceph_1_name $dev_name
ceph-deploy osd create --data $dev_name $ceph_1_name
done
for dev_name in ${ceph_2_dev[@]}
do
ceph-deploy disk zap $ceph_2_name $dev_name
ceph-deploy osd create --data $dev_name $ceph_2_name
done
for dev_name in ${ceph_3_dev[@]}
do
ceph-deploy disk zap $ceph_3_name $dev_name
ceph-deploy osd create --data $dev_name $ceph_3_name
done

#部署mgr
echo "部署mgr....."
ceph-deploy mgr create $ceph_1_name $ceph_2_name $ceph_3_name

#启动dashboard
ceph mgr module enable dashboard
ceph config set mgr mgr/dashboard/ssl false
ceph config set mgr mgr/dashboard/server_port 7000
ceph dashboard set-login-credentials admin Spring01
ceph dashboard create-self-signed-cert
ceph config set mgr mgr/dashboard/server_addr $ceph_1_ip

ceph mgr module disable dashboard
ceph mgr module enable dashboard

#部署cephfs
echo "部署cephfs....."
ceph-deploy mds create $ceph_1_name $ceph_2_name $ceph_3_name
ceph osd pool create cephfs_data 128
ceph osd pool create cephfs_metadata 64
ceph fs new cephfs cephfs_metadata cephfs_data

mkdir /mnt/mycephfs
admin_key=`ceph auth get-key client.admin`
admin_key_base64=`ceph auth get-key client.admin |base64`
sleep 5
#等待mds部署完成后再mount
mount -t ceph $ceph_1_name:6789,$ceph_2_name:6789,$ceph_3_name:6789:/ /mnt/mycephfs -o name=admin,secret=$admin_key



#初始化默认rbd pool
pg_num = 50
pgp_num = 50

ceph osd pool create rbd $pg_num $pgp_num
rbd pool init rbd


echo "部署完成....."

swarm集群安装ceph客户端及驱动

需要将脚本里面的证书和配置从ceph集群复制过来,如果考虑安全性等需要自己去创建ceph客户端证书

# create ceph config directory
mkdir -p /etc/ceph/

# touch ceph config 
tee /etc/ceph/ceph.client.admin.keyring <<-'EOF'
[client.admin]
        key = AQCvmNRbYU1MNxAA6BZR/ORV+kK3e8O7p1dAuQ==
EOF

tee /etc/ceph/ceph.conf <<-'EOF'
[global]
fsid = 81c49ab9-048a-4c95-b075-1259eb5a2c6d
mon_initial_members = ceph-1, ceph-2, ceph-3
mon_host = 10.255.255.141,10.255.255.142,10.255.255.143
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx

public_network=10.255.255.0/24


EOF


# install ceph-common

yum clean all
yum install wget -y
rm -rf /etc/yum.repos.d/*.repo
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
sed -i '/aliyuncs/d' /etc/yum.repos.d/CentOS-Base.repo
sed -i '/aliyuncs/d' /etc/yum.repos.d/epel.repo


#配置ceph非本地源
#在每个节点执行:vim /etc/yum.repos.d/ceph.repo,并将以下内容放入ceph.repo
cat <<EOT >> /etc/yum.repos.d/ceph.repo
[ceph]
name=ceph
baseurl=http://mirrors.aliyun.com/ceph/rpm-mimic/el7/x86_64/
gpgcheck=0
[ceph-noarch]
name=cephnoarch
baseurl=http://mirrors.aliyun.com/ceph/rpm-mimic/el7/noarch/
gpgcheck=0
EOT

yum makecache

yum -y install ceph-common


# install rexray
curl -sSL https://rexray.io/install | sh

tee /etc/rexray/config.yml <<-'EOF'
rexray:
  logLevel:        debug
libstorage:
  logging:
    level:         debug
    httpRequests:  true
    httpResponses: true
libstorage:
  service: rbd
rbd:
  defaultPool: rbd
EOF


systemctl start rexray
systemctl enable rexray
systemctl status rexray

# install rexray/rbd docker plugin
docker plugin install rexray/rbd RBD_DEFAULTPOOL=rbd LINUX_VOLUME_FILEMODE=0777
发表回复

您的邮箱地址不会被公开。 必填项已用 * 标注