xref: /freebsd/sys/contrib/openzfs/tests/zfs-tests/tests/functional/redundancy/redundancy_draid.ksh (revision 271171e0d97b88ba2a7c3bf750c9672b484c1c13)
1#!/bin/ksh -p
2#
3# CDDL HEADER START
4#
5# The contents of this file are subject to the terms of the
6# Common Development and Distribution License (the "License").
7# You may not use this file except in compliance with the License.
8#
9# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10# or https://opensource.org/licenses/CDDL-1.0.
11# See the License for the specific language governing permissions
12# and limitations under the License.
13#
14# When distributing Covered Code, include this CDDL HEADER in each
15# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16# If applicable, add the following below this CDDL HEADER, with the
17# fields enclosed by brackets "[]" replaced with your own identifying
18# information: Portions Copyright [yyyy] [name of copyright owner]
19#
20# CDDL HEADER END
21#
22
23#
24# Copyright (c) 2020 by vStack. All rights reserved.
25# Copyright (c) 2021 by Delphix. All rights reserved.
26# Copyright (c) 2021 by Lawrence Livermore National Security, LLC.
27#
28
29. $STF_SUITE/include/libtest.shlib
30. $STF_SUITE/tests/functional/redundancy/redundancy.kshlib
31
32#
33# DESCRIPTION:
34#	dRAID should provide redundancy
35#
36# STRATEGY:
37#	1. Create block device files for the test draid pool
38#	2. For each parity value [1..3]
39#	    - create draid pool
40#	    - fill it with some directories/files
41#	    - verify self-healing by overwriting devices
42#	    - verify resilver by replacing devices
43#	    - verify scrub by zeroing devices
44#	    - destroy the draid pool
45
46typeset -r devs=6
47typeset -r dev_size_mb=512
48
49typeset -a disks
50
51prefetch_disable=$(get_tunable PREFETCH_DISABLE)
52
53function cleanup
54{
55	poolexists "$TESTPOOL" && destroy_pool "$TESTPOOL"
56
57	for i in {0..$devs}; do
58		rm -f "$TEST_BASE_DIR/dev-$i"
59	done
60
61	set_tunable32 PREFETCH_DISABLE $prefetch_disable
62}
63
64function test_selfheal # <pool> <parity> <dir>
65{
66	typeset pool=$1
67	typeset nparity=$2
68	typeset dir=$3
69
70	log_must zpool export $pool
71
72	for (( i=0; i<$nparity; i=i+1 )); do
73		log_must dd conv=notrunc if=/dev/zero of=$dir/dev-$i \
74		    bs=1M seek=4 count=$(($dev_size_mb-4))
75	done
76
77	log_must zpool import -o cachefile=none -d $dir $pool
78
79	typeset mntpnt=$(get_prop mountpoint $pool/fs)
80	log_must eval "find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1"
81	log_must check_pool_status $pool "errors" "No known data errors"
82
83	#
84	# Scrub the pool because the find command will only self-heal blocks
85	# from the files which were read.  Before overwriting additional
86	# devices we need to repair all of the blocks in the pool.
87	#
88	log_must zpool scrub -w $pool
89	log_must check_pool_status $pool "errors" "No known data errors"
90
91	log_must zpool clear $pool
92
93	log_must zpool export $pool
94
95	for (( i=$nparity; i<$nparity*2; i=i+1 )); do
96		log_must dd conv=notrunc if=/dev/zero of=$dir/dev-$i \
97		    bs=1M seek=4 count=$(($dev_size_mb-4))
98	done
99
100	log_must zpool import -o cachefile=none -d $dir $pool
101
102	typeset mntpnt=$(get_prop mountpoint $pool/fs)
103	log_must eval "find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1"
104	log_must check_pool_status $pool "errors" "No known data errors"
105
106	log_must zpool scrub -w $pool
107	log_must check_pool_status $pool "errors" "No known data errors"
108
109	log_must zpool clear $pool
110}
111
112function test_resilver # <pool> <parity> <dir>
113{
114	typeset pool=$1
115	typeset nparity=$2
116	typeset dir=$3
117
118	for (( i=0; i<$nparity; i=i+1 )); do
119		log_must zpool offline $pool $dir/dev-$i
120	done
121
122	log_must zpool export $pool
123
124	for (( i=0; i<$nparity; i=i+1 )); do
125		log_must zpool labelclear -f $dir/dev-$i
126	done
127
128	log_must zpool import -o cachefile=none -d $dir $pool
129
130	for (( i=0; i<$nparity; i=i+1 )); do
131		log_must zpool replace -fw $pool $dir/dev-$i
132	done
133
134	log_must check_pool_status $pool "errors" "No known data errors"
135	resilver_cksum=$(cksum_pool $pool)
136	if [[ $resilver_cksum != 0 ]]; then
137		log_must zpool status -v $pool
138		log_fail "resilver cksum errors: $resilver_cksum"
139	fi
140
141	log_must zpool clear $pool
142
143	for (( i=$nparity; i<$nparity*2; i=i+1 )); do
144		log_must zpool offline $pool $dir/dev-$i
145	done
146
147	log_must zpool export $pool
148
149	for (( i=$nparity; i<$nparity*2; i=i+1 )); do
150		log_must zpool labelclear -f $dir/dev-$i
151	done
152
153	log_must zpool import -o cachefile=none -d $dir $pool
154
155	for (( i=$nparity; i<$nparity*2; i=i+1 )); do
156		log_must zpool replace -fw $pool $dir/dev-$i
157	done
158
159	log_must check_pool_status $pool "errors" "No known data errors"
160	resilver_cksum=$(cksum_pool $pool)
161	if [[ $resilver_cksum != 0 ]]; then
162		log_must zpool status -v $pool
163		log_fail "resilver cksum errors: $resilver_cksum"
164	fi
165
166	log_must zpool clear $pool
167}
168
169function test_scrub # <pool> <parity> <dir>
170{
171	typeset pool=$1
172	typeset nparity=$2
173	typeset dir=$3
174
175	log_must zpool export $pool
176
177	for (( i=0; i<$nparity; i=i+1 )); do
178		dd conv=notrunc if=/dev/zero of=$dir/dev-$i \
179		    bs=1M seek=4 count=$(($dev_size_mb-4))
180	done
181
182	log_must zpool import -o cachefile=none -d $dir $pool
183
184	log_must zpool scrub -w $pool
185	log_must check_pool_status $pool "errors" "No known data errors"
186
187	log_must zpool clear $pool
188
189	log_must zpool export $pool
190
191	for (( i=$nparity; i<$nparity*2; i=i+1 )); do
192		dd conv=notrunc if=/dev/zero of=$dir/dev-$i \
193		    bs=1M seek=4 count=$(($dev_size_mb-4))
194	done
195
196	log_must zpool import -o cachefile=none -d $dir $pool
197
198	log_must zpool scrub -w $pool
199	log_must check_pool_status $pool "errors" "No known data errors"
200
201	log_must zpool clear $pool
202}
203
204log_onexit cleanup
205
206log_must set_tunable32 PREFETCH_DISABLE 1
207
208# Disk files which will be used by pool
209for i in {0..$(($devs - 1))}; do
210	device=$TEST_BASE_DIR/dev-$i
211	log_must truncate -s ${dev_size_mb}M $device
212	disks[${#disks[*]}+1]=$device
213done
214
215# Disk file which will be attached
216log_must truncate -s 512M $TEST_BASE_DIR/dev-$devs
217
218for nparity in 1 2 3; do
219	raid=draid$nparity
220	dir=$TEST_BASE_DIR
221
222	log_must zpool create -O compression=off -f -o cachefile=none $TESTPOOL $raid ${disks[@]}
223	log_must zfs set primarycache=metadata $TESTPOOL
224
225	log_must zfs create $TESTPOOL/fs
226	log_must fill_fs /$TESTPOOL/fs 1 512 100 1024 R
227
228	log_must zfs create -o compress=on $TESTPOOL/fs2
229	log_must fill_fs /$TESTPOOL/fs2 1 512 100 1024 R
230
231	log_must zfs create -o compress=on -o recordsize=8k $TESTPOOL/fs3
232	log_must fill_fs /$TESTPOOL/fs3 1 512 100 1024 R
233
234	typeset pool_size=$(get_pool_prop size $TESTPOOL)
235
236	log_must zpool export $TESTPOOL
237	log_must zpool import -o cachefile=none -d $dir $TESTPOOL
238
239	log_must check_pool_status $TESTPOOL "errors" "No known data errors"
240
241	test_selfheal $TESTPOOL $nparity $dir
242	test_resilver $TESTPOOL $nparity $dir
243	test_scrub $TESTPOOL $nparity $dir
244
245	log_must zpool destroy "$TESTPOOL"
246done
247
248log_pass "draid redundancy test succeeded."
249