@dyj2017
2017-11-02T10:00:20.000000Z
字数 3098
阅读 1244
ceph
ceph实验
rbd
# cat /etc/redhat-release
CentOS Linux release 7.3.1611 (Core)
# ceph -v
ceph version 12.2.1 (3e7492b9ada8bdc9a5cd0feafd42fbca27f9c38e) luminous (stable)
比如需要恢复所有前缀为 rbd_data.1041643c9869 的块设备,该块设备名为foo,文件系统类型为xfs,大小1G,挂载到了/root/foo/目录下,并有三个文件:
[root@node3 ~]# ls foo/
file1.txt file2.txt file3.txt
[root@node3 ~]# cat foo/file2.txt
Ceph
2222222222222222222222222222222222222
恢复该foo块设备需要进行下面的操作
[root@node3 ~]# rados -p rbd ls|sort
rbd_data.1041643c9869.0000000000000000
rbd_data.1041643c9869.0000000000000001
rbd_data.1041643c9869.000000000000001f
rbd_data.1041643c9869.000000000000003e
rbd_data.1041643c9869.000000000000005d
rbd_data.1041643c9869.000000000000007c
rbd_data.1041643c9869.000000000000007d
rbd_data.1041643c9869.000000000000007e
rbd_data.1041643c9869.000000000000009b
rbd_data.1041643c9869.00000000000000ba
rbd_data.1041643c9869.00000000000000d9
rbd_data.1041643c9869.00000000000000f8
rbd_data.1041643c9869.00000000000000ff
rbd_directory
rbd_header.1041643c9869
rbd_id.foo
rbd_info
如:
[root@node3 ~]# rados -p rbd get rbd_data.1041643c9869.0000000000000000 rbd_data.1041643c9869.0000000000000000
[root@node3 ~]# touch mkrbd.sh
[root@node3 ~]# chmod +x mkrbd.sh
[root@node3 ~]# vi mkrbd.sh
输入以下内容:
#!/bin/sh
# Rados object size 这是刚刚的4M的大小
obj_size=4194304
# DD bs value
rebuild_block_size=512
#rbd="${1}"
rbd="foo1" #生成的块名
#base="${2}" #prefix
base="rbd_data.1041643c9869"
#rbd_size="${3}" #1G
rbd_size="1073741824"
base_files=$(ls -1 ${base}.* 2>/dev/null | wc -l | awk '{print $1}')
if [ ${base_files} -lt 1 ]; then
echo "COULD NOT FIND FILES FOR ${base} IN $(pwd)"
exit
fi
# Create full size sparse image. Could use truncate, but wanted
# as few required files and dd what a must.
dd if=/dev/zero of=${rbd} bs=1 count=0 seek=${rbd_size} 2>/dev/null
for file_name in $(ls -1 ${base}.* 2>/dev/null); do
seek_loc=$(echo ${file_name} | awk -v os=${obj_size} -v rs=${rebuild_block_size} -F. '{print os*strtonum("0x" $NF)/rs}')
dd conv=notrunc if=${file_name} of=${rbd} seek=${seek_loc} bs=${rebuild_block_size} 2>/dev/null
done
执行该脚本后在本地生成了一个foo1的文件
[root@node3 ~]# file foo1
foo1: SGI XFS filesystem data (blksz 4096, inosz 512, v2 dirs)
[root@node3 ~]# du -sh foo1
11M foo1
[root@node3 ~]# ll -h foo1
-rw-r--r-- 1 root root 1.0G 10月 17 16:04 foo1
可以看出foo1是xfs文件,使用了11M,大小为1G
[root@node3 ~]# mount foo1 /mnt
mount: 文件系统类型错误、选项错误、/dev/loop0 上有坏超级块、
缺少代码页或助手程序,或其他错误
有些情况下在 syslog 中可以找到一些有用信息- 请尝试
dmesg | tail 这样的命令看看。
[root@node3 ~]# dmesg|tail
[88818.307314] XFS (rbd0): Mounting V5 Filesystem
[88818.865978] XFS (rbd0): Ending clean mount
[91099.845834] bash (6208): drop_caches: 1
[91492.345582] bash (6208): drop_caches: 1
[93485.275727] libceph: osd2 down
[93485.275739] libceph: osd5 down
[93495.518099] libceph: osd2 up
[93495.518165] libceph: osd5 up
[95288.897917] loop: module loaded
[98449.535689] XFS (loop0): Filesystem has duplicate UUID 313b2d89-f4bc-4ee6-a1d8-a996190222fd - can't mount
挂载foo1时出现了上面的错误提示,这是因为原来的foo块是/dev/rbd0的克隆,所以foo的UUID是和/dev/rbd0的是一样的,这时候我们umount /foo即可:
[root@node3 ~]# umount foo/
[root@node3 ~]# mount foo1 /mnt
[root@node3 ~]# ls /mnt/
file1.txt file2.txt file3.txt
[root@node3 ~]# cat /mnt/file2.txt
Ceph
2222222222222222222222222222222222222
可以看到已将完全恢复出了原来的foo块设备的内容,当然也可以给恢复的rbd名字称为foo