环境
VMware workstation 17pro
CentOS Linux release 7.9.2009 (Core)
------内存8G,16core
------硬盘系统盘100G
------四块20G硬盘
注意事项
1、在没有操作系统的情况下,可以在装系统时将磁盘做软raid,然后使用软raid作为系统盘
2、在重构时,软raid会大大增加CPU的负担,在实际生产环境中不建议使用
3、同一块盘的不同分区也可以进行软raid
4、环境硬盘均为SCSI类型,且为精简置备
创建raid0,raid1
当前环境状态
#查看当前磁盘状态
lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 100G 0 disk
├─sda1 8:1 0 1G 0 part /boot
└─sda2 8:2 0 99G 0 part
├─centos-root 253:0 0 50G 0 lvm /
├─centos-swap 253:1 0 3.9G 0 lvm [SWAP]
└─centos-home 253:2 0 45.1G 0 lvm /home
sdb 8:16 0 20G 0 disk
sdc 8:32 0 20G 0 disk
sdd 8:48 0 20G 0 disk
sde 8:64 0 20G 0 disk
sr0 11:0 1 4.5G 0 rom /run/media/root/CentOS 7 x86_64
安装mdadm
#如果是经过yum update那么大概率是不用安装的
yum -y install mdadm
创建raid
#创建raid
#创建raid名为/dev/md0,选另外名字可能报错 使用mdadm -C亦可
mdadm --create /dev/md0 \
-a yes \ #自动创建raid设备
-l 0 \ #设定raid类型为raid0
-n 2 /dev/sdb /dev/sdc #指定2块硬盘,sdb与sdc
#成功会显示
#mdadm: Defaulting to version 1.2 metadata
#mdadm: array /dev/md1 started.
mdadm --create /dev/md1 -a yes -l 1 -n 2 /dev/sdd /dev/sde
#输入yes忽略提示
查看软raid信息
#查看软raid信息
lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 100G 0 disk
├─sda1 8:1 0 1G 0 part /boot
└─sda2 8:2 0 99G 0 part
├─centos-root 253:0 0 50G 0 lvm /
├─centos-swap 253:1 0 3.9G 0 lvm [SWAP]
└─centos-home 253:2 0 45.1G 0 lvm /home
sdb 8:16 0 20G 0 disk
└─md0 9:0 0 40G 0 raid0
sdc 8:32 0 20G 0 disk
└─md0 9:0 0 40G 0 raid0
sdd 8:48 0 20G 0 disk
└─md1 9:1 0 20G 0 raid1
sde 8:64 0 20G 0 disk
└─md1 9:1 0 20G 0 raid1
sr0 11:0 1 4.5G 0 rom /run/media/root/CentOS 7 x86_64
mdadm --detail /dev/md0
#mdadm -D /dev/md0
/dev/md0:
Version : 1.2
Creation Time : Tue Dec 12 05:41:07 2023
Raid Level : raid0
Array Size : 41908224 (39.97 GiB 42.91 GB)
Raid Devices : 2
Total Devices : 2
Persistence : Superblock is persistent
Update Time : Tue Dec 12 05:41:07 2023
State : clean
Active Devices : 2
Working Devices : 2
Failed Devices : 0
Spare Devices : 0
Chunk Size : 512K
Consistency Policy : none
Name : 192.168.8.151:0 (local to host 192.168.8.151)
UUID : cb7e5ace:f809e250:75079d40:21413521
Events : 0
Number Major Minor RaidDevice State
0 8 16 0 active sync /dev/sdb
1 8 32 1 active sync /dev/sdc
#查看raid状态
cat /proc/mdstat
Personalities : [raid0] [raid1]
md1 : active raid1 sde[1] sdd[0]
20954112 blocks super 1.2 [2/2] [UU]
md0 : active raid0 sdc[1] sdb[0]
41908224 blocks super 1.2 512k chunks
停止与启动阵列,添加删除硬盘
#停止阵列
mdadm --stop /dev/md0
mdadm --stop /dev/md1
#重新启动阵列
mdadm -A /dev/md1
#清除使用后的raid超级块信息
mdadm --misc --zero-superblock /dev/sdb /dev/sdc
#将信息彻底清除,使其可以再被用于创建新阵列
#模拟磁盘故障
mdadm /dev/md1 -f /dev/sdd
#查看信息
cat /proc/mdstat
Personalities : [raid0] [raid1]
md1 : active raid1 sde[1] sdd[0](F)
20954112 blocks super 1.2 [2/1] [_U]
mdadm -D /dev/md1
/dev/md1:
Version : 1.2
Creation Time : Tue Dec 12 06:43:12 2023
Raid Level : raid1
Array Size : 20954112 (19.98 GiB 21.46 GB)
Used Dev Size : 20954112 (19.98 GiB 21.46 GB)
Raid Devices : 2
Total Devices : 2
Persistence : Superblock is persistent
Update Time : Tue Dec 12 06:47:50 2023
State : clean, degraded
Active Devices : 1
Working Devices : 1
Failed Devices : 1
Spare Devices : 0
#移除故障的磁盘
mdadm --manage /dev/md1 --remove /dev/sdd
#此时再查看就只剩一块盘了
mdadm -D /dev/md1
/dev/md1:
Version : 1.2
Creation Time : Tue Dec 12 06:43:12 2023
Raid Level : raid1
Array Size : 20954112 (19.98 GiB 21.46 GB)
Used Dev Size : 20954112 (19.98 GiB 21.46 GB)
Raid Devices : 2
Total Devices : 1
Persistence : Superblock is persistent
Update Time : Tue Dec 12 06:50:27 2023
State : clean, degraded
Active Devices : 1
Working Devices : 1
Failed Devices : 0
Spare Devices : 0
#再添加一块好的盘进去
mdadm --manage /dev/md1 --add /dev/sdc
#此时再查看mdstat状态,可以看到硬盘正在重构
cat /proc/mdstat
Personalities : [raid0] [raid1]
md1 : active raid1 sdc[2] sde[1]
20954112 blocks super 1.2 [2/1] [_U]
[=>...................] recovery = 8.5% (1800192/20954112) finish=1.4min speed=225024K/sec
#重构完重新查看
mdadm -D /dev/md1
/dev/md1:
Version : 1.2
Creation Time : Tue Dec 12 06:43:12 2023
Raid Level : raid1
Array Size : 20954112 (19.98 GiB 21.46 GB)
Used Dev Size : 20954112 (19.98 GiB 21.46 GB)
Raid Devices : 2
Total Devices : 2
Persistence : Superblock is persistent
Update Time : Tue Dec 12 06:56:35 2023
State : clean
Active Devices : 2
Working Devices : 2
Failed Devices : 0
Spare Devices : 0
Consistency Policy : resync
#清理环境
mdadm --stop /dev/md1
mdadm --misc --zero-superblock /dev/sdc /dev/sde
创建raid5
#创建raid5
mdadm --create /dev/md0 -a yes -l 5 -n 2 -x 2 /dev/sdb /dev/sdc /dev/sdd /dev/sde
#-x 是指定热备盘数量
#查看信息,可以看到正在重构
cat /proc/mdstat
Personalities : [raid0] [raid1] [raid6] [raid5] [raid4]
md0 : active raid5 sdc[4] sde[3](S) sdd[2](S) sdb[0]
20954112 blocks super 1.2 level 5, 512k chunk, algorithm 2 [2/1] [U_]
[==>..................] recovery = 14.3% (3000192/20954112) finish=1.4min speed=200012K/sec
mdadm -D /dev/md0
/dev/md0:
Version : 1.2
Creation Time : Tue Dec 12 07:05:36 2023
Raid Level : raid5
Array Size : 20954112 (19.98 GiB 21.46 GB)
Used Dev Size : 20954112 (19.98 GiB 21.46 GB)
Raid Devices : 2
Total Devices : 4
Persistence : Superblock is persistent
Update Time : Tue Dec 12 07:06:48 2023
State : clean, degraded, recovering
Active Devices : 1
Working Devices : 4
Failed Devices : 0
Spare Devices : 3
Layout : left-symmetric
Chunk Size : 512K
Consistency Policy : resync
Rebuild Status : 69% complete
Name : 192.168.8.151:0 (local to host 192.168.8.151)
UUID : 82e7f291:65e54bf3:d96624ce:964e3637
Events : 12
Number Major Minor RaidDevice State
0 8 16 0 active sync /dev/sdb
4 8 32 1 spare rebuilding /dev/sdc
2 8 48 - spare /dev/sdd
3 8 64 - spare /dev/sde
lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 100G 0 disk
├─sda1 8:1 0 1G 0 part /boot
└─sda2 8:2 0 99G 0 part
├─centos-root 253:0 0 50G 0 lvm /
├─centos-swap 253:1 0 3.9G 0 lvm [SWAP]
└─centos-home 253:2 0 45.1G 0 lvm /home
sdb 8:16 0 20G 0 disk
└─md0 9:0 0 20G 0 raid5
sdc 8:32 0 20G 0 disk
└─md0 9:0 0 20G 0 raid5
sdd 8:48 0 20G 0 disk
└─md0 9:0 0 20G 0 raid5
sde 8:64 0 20G 0 disk
└─md0 9:0 0 20G 0 raid5
#查看系统集成的mod,能看到kernal有支持raid的mod
lsmod | grep raid
raid456 151196 1
async_raid6_recov 17288 1 raid456
async_memcpy 12768 2 raid456,async_raid6_recov
async_pq 13332 2 raid456,async_raid6_recov
raid6_pq 102527 3 async_pq,raid456,async_raid6_recov
async_xor 13127 3 async_pq,raid456,async_raid6_recov
async_tx 13509 5 async_pq,raid456,async_xor,async_memcpy,async_raid6_recov
raid1 44113 0
raid0 18164 0
libcrc32c 12644 4 xfs,raid456,nf_nat,nf_conntrack