Ceph的安装

Posted y_zilong

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了Ceph的安装相关的知识,希望对你有一定的参考价值。

#配置主机名解析
root@ceph-deploy:~# cat /etc/hosts
127.0.0.1	localhost
127.0.1.1	ubuntu.yzl.cn	ubuntu

# The following lines are desirable for IPv6 capable hosts
::1     localhost ip6-localhost ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters

10.0.0.131 ceph-deploy.example.local ceph-deploy
10.0.0.132 ceph-mon1.example.local ceph-mon1
10.0.0.133 ceph-mon2.example.local ceph-mon2
10.0.0.134 ceph-mon3.example.local ceph-mon3
10.0.0.135 ceph-mgr1.example.local ceph-mgr1
10.0.0.136 ceph-mgr2.example.local ceph-mgr2
10.0.0.137 ceph-node1.example.local ceph-node1
10.0.0.138 ceph-node2.example.local ceph-node2
10.0.0.139 ceph-node3.example.local ceph-node3
10.0.0.140 ceph-node4.example.local ceph-node4

#各节点配置ceph apt仓库
wget -q -O- 'https://mirrors.tuna.tsinghua.edu.cn/ceph/keys/release.asc' | sudo apt-key add -

#18.04
sudo echo "deb https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific bionic main" >> /etc/apt/sources.list

apt update

#20.04
sudo echo "deb https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific focal main" >> /etc/apt/sources.list

#创建ceph用户
推荐使用指定的普通用户部署和运行ceph集群
在包含ceph-deploy节点的存储节点、mon节点和mgr节点等创建ceph用户
groupadd  -r -g 2022 cephuser && useradd -r -m  -s /bin/bash  -u 2022 -g 2022 cephuser && echo cephuser:123456 | chpasswd

echo "cephuser    ALL=(ALL)       NOPASSWD: ALL" >> /etc/sudoers

#配置免密钥登录
root@ceph-deploy:~# su - cephuser 
cephuser@ceph-deploy:~$ ssh-keygen
cephuser@ceph-deploy:~$ ssh-copy-id cephuser@10.0.0.131
。。。
cephuser@ceph-deploy:~$ ssh-copy-id cephuser@10.0.0.140

#ubuntu各服务器需要单独安装python2
root@ceph-deploy:~# apt install python2.7 -y
root@ceph-deploy:~# python2.7
Python 2.7.17 (default, Feb 27 2021, 15:10:58) 
[GCC 7.5.0] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> quit()
root@ceph-deploy:~# ln -sv /usr/bin/python2.7 /usr/bin/python2
#安装ceph部署工具
在ceph部署服务器安装部署工具ceph-deploy

root@ceph-deploy:~# apt-cache madison ceph-deploy
ceph-deploy |      2.0.1 | https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific bionic/main amd64 Packages
ceph-deploy |      2.0.1 | https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific bionic/main i386 Packages
ceph-deploy | 1.5.38-0ubuntu1 | https://mirrors.tuna.tsinghua.edu.cn/ubuntu bionic/universe amd64 Packages
ceph-deploy | 1.5.38-0ubuntu1 | https://mirrors.tuna.tsinghua.edu.cn/ubuntu bionic/universe i386 Packages
root@ceph-deploy:~# apt install -y ceph-deploy=2.0.1

#初始化mon节点
在管理节点初始化mon节点
root@ceph-deploy:~# su - cephuser 
cephuser@ceph-deploy:~$ mkdir ceph-cluster
cephuser@ceph-deploy:~$ id ceph
id: ‘ceph’: no such user

#1/集群基础环境初始化
cephuser@ceph-deploy:~$ cd ceph-cluster/
cephuser@ceph-deploy:~/ceph-cluster$ sudo ceph-deploy new --public-network 10.0.0.0/21  --cluster-network 192.168.36.0/21 ceph-mon1.example.local ceph-mon2.example.local ceph-mon3.example.local

#初始化node节点过程:
cephuser@ceph-deploy:~/ceph-cluster$ sudo ceph-deploy install --no-adjust-repos --nogpgcheck --release pacific ceph-node1 ceph-node2 ceph-node3

#2/初始化ceph-mon服务器
root@ceph-mon1:~# apt-cache madison ceph-mon
  ceph-mon | 16.2.5-1bionic | https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific bionic/main amd64 Packages
  ceph-mon | 12.2.13-0ubuntu0.18.04.8 | https://mirrors.tuna.tsinghua.edu.cn/ubuntu bionic-updates/main amd64 Packages
  ceph-mon | 12.2.13-0ubuntu0.18.04.4 | https://mirrors.tuna.tsinghua.edu.cn/ubuntu bionic-security/main amd64 Packages
  ceph-mon | 12.2.4-0ubuntu1 | https://mirrors.tuna.tsinghua.edu.cn/ubuntu bionic/main amd64 Packages
root@ceph-mon1:~# apt install ceph-mon=16.2.5-1bionic -y
root@ceph-mon2:~# apt install ceph-mon=16.2.5-1bionic -y
root@ceph-mon3:~# apt install ceph-mon=16.2.5-1bionic -y

cephuser@ceph-deploy:~/ceph-cluster$ cat ceph.conf 
[global]
fsid = e31c4ac0-73bc-4a9f-93fe-c9374ac87919
public_network = 10.0.0.0/21
cluster_network = 192.168.36.0/21
mon_initial_members = ceph-mon1, ceph-mon2, ceph-mon3
mon_host = 10.0.0.132,10.0.0.133,10.0.0.134
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx

cephuser@ceph-deploy:~/ceph-cluster$ sudo ceph-deploy mon   create-initial

#验证mon节点
root@ceph-mon1:~# ps -ef |grep mon
root        761      1  0 09:45 ?        00:00:00 /usr/lib/accountsservice/accounts-daemon
message+    765      1  0 09:45 ?        00:00:00 /usr/bin/dbus-daemon --system --address=systemd: --nofork --nopidfile --systemd-activation --syslog-only
daemon      810      1  0 09:45 ?        00:00:00 /usr/sbin/atd -f
ceph       5394      1  0 20:48 ?        00:00:00 /usr/bin/ceph-mon -f --cluster ceph --id ceph-mon1 --setuser ceph --setgroup ceph
root       5853   1584  0 20:49 pts/0    00:00:00 grep --color=auto mon
root@ceph-mon1:~# id ceph
uid=64045(ceph) gid=64045(ceph) groups=64045(ceph)

#更改ceph账户
root@ceph-deploy:~# echo "ceph   ALL=(ALL)       NOPASSWD: ALL" >> /etc/sudoers
cephuser@ceph-deploy:~$ passwd ceph
cephuser@ceph-deploy:~$ ssh-copy-id ceph@10.0.0.131
cephuser@ceph-deploy:~$ ssh-copy-id ceph@10.0.0.132
cephuser@ceph-deploy:~$ ssh-copy-id ceph@10.0.0.133
cephuser@ceph-deploy:~$ ssh-copy-id ceph@10.0.0.134
cephuser@ceph-deploy:~$ ssh-copy-id ceph@10.0.0.137
cephuser@ceph-deploy:~$ ssh-copy-id ceph@10.0.0.138
cephuser@ceph-deploy:~$ ssh-copy-id ceph@10.0.0.139
#分发admin密钥
在ceph-deploy节点把配置文件和admin密钥拷贝至ceph集群需要执行ceph管理命令的节点,从不需要后期通过ceph命令对ceph集群进行管理配置的时候每次都需要指定ceph-mon节点地址和ceph.client.admin.keyring文件,另外各ceph-mon节点也需要同步ceph的集群配置文件和认证文件
如果在ceph-deploy节点管理集群

cephuser@ceph-deploy:~/ceph-cluster$ sudo apt install ceph-common -y  #先安装ceph的公众组件

cephuser@ceph-deploy:~/ceph-cluster$ sudo ceph-deploy admin ceph-deploy #推送正证书给自己
cephuser@ceph-deploy:~/ceph-cluster$ sudo ceph-deploy admin ceph-node1
cephuser@ceph-deploy:~/ceph-cluster$ sudo ceph-deploy admin ceph-node2
cephuser@ceph-deploy:~/ceph-cluster$ sudo ceph-deploy admin ceph-node3
cephuser@ceph-deploy:~/ceph-cluster$ sudo ceph-deploy admin ceph-node4

#ceph node节点验证密钥
root@ceph-node1:~# ls -l /etc/ceph/
total 20
drwxr-xr-x  2 root root 4096 Aug 20 21:02 ./
drwxr-xr-x 94 root root 4096 Aug 20 21:05 ../
-rw-------  1 root root  151 Aug 20 21:02 ceph.client.admin.keyring
-rw-r--r--  1 root root  304 Aug 20 21:02 ceph.conf
-rw-r--r--  1 root root   92 Jul  8 22:17 rbdmap
-rw-------  1 root root    0 Aug 20 21:02 tmpaCN4eK

#认证文件是属主和属组为了安全考虑,默认设置为了root用户和root组,如果需要ceph用户也能执行ceph命令,那么就需要对ceph用户进行授权
cephuser@ceph-deploy:~/ceph-cluster$ sudo setfacl -m u:cephuser:rw /etc/ceph/ceph.client.admin.keyring
root@ceph-node1:~# sudo setfacl -m u:cephuser:rw /etc/ceph/ceph.client.admin.keyring
root@ceph-node2:~# sudo setfacl -m u:cephuser:rw /etc/ceph/ceph.client.admin.keyring
root@ceph-node3:~# sudo setfacl -m u:cephuser:rw /etc/ceph/ceph.client.admin.keyring
root@ceph-node4:~# sudo setfacl -m u:cephuser:rw /etc/ceph/ceph.client.admin.keyring
#mgr节点需要读取ceph的配置文件,即/etc/ceph目录中的配置文件

#初始化ceph-mgr节点:
root@ceph-mgr1:~# apt install ceph-mgr -y
root@ceph-mgr2:~# apt install ceph-mgr -y

#把节点加进来
cephuser@ceph-deploy:~/ceph-cluster$sudo ceph-deploy mgr create ceph-mgr1

#验证ceph-mgr节点
root@ceph-mgr1:~# ps -ef |grep ceph
root       5723      1  0 15:58 ?        00:00:00 /usr/bin/python3.6 /usr/bin/ceph-crash
ceph       8324      1  8 16:31 ?        00:00:07 /usr/bin/ceph-mgr -f --cluster ceph --id ceph-mgr1 --setuser ceph --setgroup ceph
root       8495   1760  0 16:33 pts/1    00:00:00 grep --color=auto ceph
#不允许非安全的交互
cephuser@ceph-deploy:~/ceph-cluster$ ceph -s
  cluster:
    id:     7429e771-83f8-492a-aa2c-375bab844aba
    health: HEALTH_WARN
            mons are allowing insecure global_id reclaim   #需要禁用非安全模式通信
            OSD count 0 < osd_pool_default_size 3     #集群的OSD数量小于3
 
  services:
    mon: 3 daemons, quorum ceph-mon1,ceph-mon2,ceph-mon3 (age 83m)
    mgr: ceph-mgr1(active, since 3m), standbys: ceph-mgr2
    osd: 0 osds: 0 up, 0 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     
 

cephuser@ceph-deploy:~/ceph-cluster$ ceph config set mon auth_allow_insecure_global_id_reclaim false

cephuser@ceph-deploy:~/ceph-cluster$ ceph -s
  cluster:
    id:     7429e771-83f8-492a-aa2c-375bab844aba
    health: HEALTH_WARN
            OSD count 0 < osd_pool_default_size 3
 
  services:
    mon: 3 daemons, quorum ceph-mon1,ceph-mon2,ceph-mon3 (age 87m)
    mgr: ceph-mgr1(active, since 8m), standbys: ceph-mgr2
    osd: 0 osds: 0 up, 0 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     
 
cephuser@ceph-deploy:~/ceph-cluster$ 
#准备OSD
#查看ceph-node1上面的磁盘
cephuser@ceph-deploy:~/ceph-cluster$ sudo ceph-deploy disk list ceph-node1
[ceph-node1][INFO  ] Running command: fdisk -l
[ceph-node1][INFO  ] Disk /dev/sda: 60 GiB, 64424509440 bytes, 125829120 sectors
[ceph-node1][INFO  ] Disk /dev/sdc: 100 GiB, 107374182400 bytes, 209715200 sectors
[ceph-node1][INFO  ] Disk /dev/sdb: 100 GiB, 107374182400 bytes, 209715200 sectors
[ceph-node1][INFO  ] Disk /dev/sdf: 100 GiB, 107374182400 bytes, 209715200 sectors
[ceph-node1][INFO  ] Disk /dev/sde: 100 GiB, 107374182400 bytes, 209715200 sectors
[ceph-node1][INFO  ] Disk /dev/sdd: 100 GiB, 107374182400 bytes, 209715200 sectors


cephuser@ceph-deploy:~/ceph-cluster$ ssh-copy-id root@10.0.0.137
cephuser@ceph-deploy:~/ceph-cluster$ ssh-copy-id root@10.0.0.138
cephuser@ceph-deploy:~/ceph-cluster$ ssh-copy-id root@10.0.0.139
cephuser@ceph-deploy:~/ceph-cluster$ ssh-copy-id root@10.0.0.140

#擦除磁盘之前通过deploy节点对node节点执行安装ceph基本运行环境
cephuser@ceph-deploy:~/ceph-cluster$ sudo ceph-deploy disk zap  ceph-node1 /dev/sdb
cephuser@ceph-deploy:~/ceph-cluster$ cat cachu.sh 
#!/bin/bash
sudo ceph-deploy disk zap  ceph-node1 /dev/sdb
sudo ceph-deploy disk zap  ceph-node1 /dev/sdc
sudo ceph-deploy disk zap  ceph-node1 /dev/sdd
sudo ceph-deploy disk zap  ceph-node1 /dev/sde
sudo ceph-deploy disk zap  ceph-node1 /dev/sdf

sudo ceph-deploy disk zap  ceph-node2 /dev/sdb
sudo ceph-deploy disk zap  ceph-node2 /dev/sdc
sudo ceph-deploy disk zap  ceph-node2 /dev/sdd
sudo ceph-deploy disk zap  ceph-node2 /dev/sde
sudo ceph-deploy disk zap  ceph-node2 /dev/sdf

sudo ceph-deploy disk zap  ceph-node3 /dev/sdb
sudo ceph-deploy disk zap  ceph-node3 /dev/sdc
sudo ceph-deploy disk zap  ceph-node3 /dev/sdd
sudo ceph-deploy disk zap  ceph-node3 /dev/sde
sudo ceph-deploy disk zap  ceph-node3 /dev/sdf

cephuser@ceph-deploy:~/ceph-cluster$ bash cachu.sh
#添加OSD
cephuser@ceph-deploy:~/ceph-cluster$ cat tianjia.sh 
#!/bin/bash
sudo ceph-deploy osd create ceph-node1 --data /dev/sdb
sudo ceph-deploy osd create ceph-node1 --data /dev/sdc
sudo ceph-deploy osd create ceph-node1 --data /dev/sdd
sudo ceph-deploy osd create ceph-node1 --data /dev/sde
sudo ceph-deploy osd create ceph-node1 --data /dev/sdf

sudo ceph-deploy osd create ceph-node2 --data /dev/sdb
sudo ceph-deploy osd create ceph-node2 --data /dev/sdc
sudo ceph-deploy osd create ceph-node2 --data /dev/sdd
sudo ceph-deploy osd create ceph-node2 --data /dev/sde
sudo ceph-deploy osd create ceph-node2 --data /dev/sdf

sudo ceph-deploy osd create ceph-node3 --data /dev/sdb
sudo ceph-deploy osd create ceph-node3 --data /dev/sdc
sudo ceph-deploy osd create ceph-node3 --data /dev/sdd
sudo ceph-deploy osd create ceph-node3 --data /dev/sde
sudo ceph-deploy osd create ceph-node3 --data /dev/sdf

cephuser@ceph-deploy:~/ceph-cluster$ bash tianjia.sh 
ceph-osd.0 ---> node1 /dev/sdb 
ceph-osd.1 ---> node1 /dev/sdc
ceph-osd.2 ---> node1 /dev/sdd
ceph-osd.3 ---> node1 /dev/sde
ceph-osd.4 ---> node1 /dev/sdf

ceph-osd.5 ---> node2 /dev/sdb 
ceph-osd.6 ---> node2 /dev/sdc
ceph-osd.7 ---> node2 /dev/sdd
ceph-osd.8 ---> node2 /dev/sde
ceph-osd.9 ---> node2 /dev/sdf

ceph-osd.10 ---> node3 /dev/sdb 


ceph-osd.11 ---> node3 /dev/sdc
ceph-osd.12 ---> node3 /dev/sdd
ceph-osd.13 ---> node3 /dev/sde
ceph-osd.14 ---> node3 /dev/sdf

ceph-osd.15 ---> node4 /dev/sdb 
ceph-osd.16 ---> node4 /dev/sdc
ceph-osd.17 ---> node4 /dev/sdd
ceph-osd.18 ---> node4 /dev/sde
ceph-osd.19 ---> node4 /dev/sdf

4/最终验证:
cephuser@ceph-deploy:~/ceph-cluster$ ceph status
  cluster:
    id:     6d76dc80-e391-462a-a4b2-34322f777645
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph-mon1,ceph-mon2,ceph-mon3 (age 13m)
    mgr: ceph-mgr1(active, since 70m), standbys: ceph-mgr2
    osd: 20 osds: 20 up (since 32m), 20 in (since 33m)
 
  data:
    pools:   2 pools, 33 pgs
    objects: 0 objects, 0 B
    usage:   188 MiB used, 2.0 TiB / 2.0 TiB avail
    pgs:     33 active+clean

以上是关于Ceph的安装的主要内容,如果未能解决你的问题,请参考以下文章

使用ceph-deploy进行ceph安装

ceph 安装ceph问题汇总

ceph--安装和部署

ceph安装部署

通过ceph-deploy安装ceph的问题

以Docker容器方式安装Ceph