ceph的部署,在k8s中的使用

# https://blog.frognew.com/2018/04/kubernetes-pv-cephfs.html    在Kubernetes上使用CephFS作为文件存储
# http://int32bit.me/2016/05/19/Ceph-Pool%E6%93%8D%E4%BD%9C%E6%80%BB%E7%BB%93/ Ceph Pool操作总结

cat >/etc/yum.repos.d/ceph-admin.repo<<EOF
[ceph]
name=ceph
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/x86_64/
gpgcheck=0
priority=1

[ceph-noarch]
name=cephnoarch
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch/
gpgcheck=0
priority=1
EOF
yum install ceph ceph-radosgw -y

useradd ceph-admin
echo 'admin' | passwd --stdin ceph-admin
echo "ceph-admin ALL = (root) NOPASSWD:ALL" > /etc/sudoers.d/ceph-admin
chmod 0440 /etc/sudoers.d/ceph-admin
#sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/' /etc/ssh/sshd_config
sed -i 's/Default requiretty/#Default requiretty/' /etc/sudoers

# 这块应该考虑用ansible更新所有的hosts文件,每增加一个节点,就整体更新一遍
echo "config the /etc/hosts! "

################### only for admin
#su ceph-admin
#ssh-keygen -t rsa
#echo ~/.ssh/id_rsa.pub


################### for every node
# 这么搞是为了避免远程登陆的时候出现权限问题
su ceph-admin -c 'mkdir -pv /home/ceph-admin/.ssh'
su ceph-admin -c 'chmod 700 /home/ceph-admin/.ssh'
su ceph-admin -c 'echo "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCfpkasU1KMDST1V6crQlz9Qb5PQ+7y6d5/SRg1gHt0B4p2YBwI3S/ga4IYexXcaRnme42cqDPsz3dcUYAm29W9Hy2lxhP/AImXewnF5jULfuKK/ck5eyDj2NbhOXQWfdgNWRNj6TJk8VsGP4sXsRzKeI//ukksiIF9O4w7aSpUngL0FK850svnxQZGG8kdecocAsNbCz3XJXXKjNyhQg6ihV46QkkM1iv89SOaKWD+CuzYfGuYN1qaal0X2yXRBUgPfJHn2rN3HT6Fc74GJcsjhJIV/TdJ6cWp+aCswE92NXcVcgzfnxtWYQmMDFtgc3k6KbgX1x9mo3yTLD/U9tgd [email protected]" >> /home/ceph-admin/.ssh/authorized_keys'
su ceph-admin -c 'chmod 600 /home/ceph-admin/.ssh/authorized_keys'


################### start deploy
# vim ceph.conf set public network = 10.1.2.0/24
ceph-deploy new node1 node2 node3
ceph-deploy mon create-initial # 部署上面new的节点
ceph-deploy admin node1 node2 node3 # 配置文件弄过去
ceph-deploy mgr create node1 node2 node3 # 创建manager 只有新版本需要
ceph-deploy osd create --data {device} {ceph-node} # 创建osd

ceph -s #KK效果

ceph-deploy mds create bjht1221 bjht1222 bjht1223 # 为了用cephfs,mds这个是必须的


# 切换回root,后面基本不用ceph-admin了
ceph osd pool create fs_data 128 # 这个数字需要斟酌,3个OSD的话,不让创建两个128
ceph osd pool create fs_meta_data 64 # 卑微,只好用64了

ceph fs new cephfs fs_meta_data fs_data # 创建新的cephfs

# 可以挂载一下测试测试
mount -t ceph node1:6789,node2:6789,node3:6789:/ /mnt/test-ceph -o name=admin,secret=`cat /etc/ceph/ceph.client.admin.keyring`

本文链接:

https://omen.ltd/archives/9/
1 + 5 =
快来做第一个评论的人吧~