xref: /freebsd/sys/contrib/openzfs/module/zfs/vdev_raidz_math.c (revision 5956d97f4b3204318ceb6aa9c77bd0bc6ea87a41)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (C) 2016 Gvozden Nešković. All rights reserved.
23  */
24 
25 #include <sys/zfs_context.h>
26 #include <sys/types.h>
27 #include <sys/zio.h>
28 #include <sys/debug.h>
29 #include <sys/zfs_debug.h>
30 #include <sys/vdev_raidz.h>
31 #include <sys/vdev_raidz_impl.h>
32 #include <sys/simd.h>
33 
34 /* Opaque implementation with NULL methods to represent original methods */
35 static const raidz_impl_ops_t vdev_raidz_original_impl = {
36 	.name = "original",
37 	.is_supported = raidz_will_scalar_work,
38 };
39 
40 /* RAIDZ parity op that contain the fastest methods */
41 static raidz_impl_ops_t vdev_raidz_fastest_impl = {
42 	.name = "fastest"
43 };
44 
45 /* All compiled in implementations */
46 static const raidz_impl_ops_t *const raidz_all_maths[] = {
47 	&vdev_raidz_original_impl,
48 	&vdev_raidz_scalar_impl,
49 #if defined(__x86_64) && defined(HAVE_SSE2)	/* only x86_64 for now */
50 	&vdev_raidz_sse2_impl,
51 #endif
52 #if defined(__x86_64) && defined(HAVE_SSSE3)	/* only x86_64 for now */
53 	&vdev_raidz_ssse3_impl,
54 #endif
55 #if defined(__x86_64) && defined(HAVE_AVX2)	/* only x86_64 for now */
56 	&vdev_raidz_avx2_impl,
57 #endif
58 #if defined(__x86_64) && defined(HAVE_AVX512F)	/* only x86_64 for now */
59 	&vdev_raidz_avx512f_impl,
60 #endif
61 #if defined(__x86_64) && defined(HAVE_AVX512BW)	/* only x86_64 for now */
62 	&vdev_raidz_avx512bw_impl,
63 #endif
64 #if defined(__aarch64__) && !defined(__FreeBSD__)
65 	&vdev_raidz_aarch64_neon_impl,
66 	&vdev_raidz_aarch64_neonx2_impl,
67 #endif
68 #if defined(__powerpc__) && defined(__altivec__)
69 	&vdev_raidz_powerpc_altivec_impl,
70 #endif
71 };
72 
73 /* Indicate that benchmark has been completed */
74 static boolean_t raidz_math_initialized = B_FALSE;
75 
76 /* Select raidz implementation */
77 #define	IMPL_FASTEST	(UINT32_MAX)
78 #define	IMPL_CYCLE	(UINT32_MAX - 1)
79 #define	IMPL_ORIGINAL	(0)
80 #define	IMPL_SCALAR	(1)
81 
82 #define	RAIDZ_IMPL_READ(i)	(*(volatile uint32_t *) &(i))
83 
84 static uint32_t zfs_vdev_raidz_impl = IMPL_SCALAR;
85 static uint32_t user_sel_impl = IMPL_FASTEST;
86 
87 /* Hold all supported implementations */
88 static size_t raidz_supp_impl_cnt = 0;
89 static raidz_impl_ops_t *raidz_supp_impl[ARRAY_SIZE(raidz_all_maths)];
90 
91 #if defined(_KERNEL)
92 /*
93  * kstats values for supported implementations
94  * Values represent per disk throughput of 8 disk+parity raidz vdev [B/s]
95  */
96 static raidz_impl_kstat_t raidz_impl_kstats[ARRAY_SIZE(raidz_all_maths) + 1];
97 
98 /* kstat for benchmarked implementations */
99 static kstat_t *raidz_math_kstat = NULL;
100 #endif
101 
102 /*
103  * Returns the RAIDZ operations for raidz_map() parity calculations.   When
104  * a SIMD implementation is not allowed in the current context, then fallback
105  * to the fastest generic implementation.
106  */
107 const raidz_impl_ops_t *
108 vdev_raidz_math_get_ops(void)
109 {
110 	if (!kfpu_allowed())
111 		return (&vdev_raidz_scalar_impl);
112 
113 	raidz_impl_ops_t *ops = NULL;
114 	const uint32_t impl = RAIDZ_IMPL_READ(zfs_vdev_raidz_impl);
115 
116 	switch (impl) {
117 	case IMPL_FASTEST:
118 		ASSERT(raidz_math_initialized);
119 		ops = &vdev_raidz_fastest_impl;
120 		break;
121 	case IMPL_CYCLE:
122 		/* Cycle through all supported implementations */
123 		ASSERT(raidz_math_initialized);
124 		ASSERT3U(raidz_supp_impl_cnt, >, 0);
125 		static size_t cycle_impl_idx = 0;
126 		size_t idx = (++cycle_impl_idx) % raidz_supp_impl_cnt;
127 		ops = raidz_supp_impl[idx];
128 		break;
129 	case IMPL_ORIGINAL:
130 		ops = (raidz_impl_ops_t *)&vdev_raidz_original_impl;
131 		break;
132 	case IMPL_SCALAR:
133 		ops = (raidz_impl_ops_t *)&vdev_raidz_scalar_impl;
134 		break;
135 	default:
136 		ASSERT3U(impl, <, raidz_supp_impl_cnt);
137 		ASSERT3U(raidz_supp_impl_cnt, >, 0);
138 		if (impl < ARRAY_SIZE(raidz_all_maths))
139 			ops = raidz_supp_impl[impl];
140 		break;
141 	}
142 
143 	ASSERT3P(ops, !=, NULL);
144 
145 	return (ops);
146 }
147 
148 /*
149  * Select parity generation method for raidz_map
150  */
151 int
152 vdev_raidz_math_generate(raidz_map_t *rm, raidz_row_t *rr)
153 {
154 	raidz_gen_f gen_parity = NULL;
155 
156 	switch (raidz_parity(rm)) {
157 		case 1:
158 			gen_parity = rm->rm_ops->gen[RAIDZ_GEN_P];
159 			break;
160 		case 2:
161 			gen_parity = rm->rm_ops->gen[RAIDZ_GEN_PQ];
162 			break;
163 		case 3:
164 			gen_parity = rm->rm_ops->gen[RAIDZ_GEN_PQR];
165 			break;
166 		default:
167 			gen_parity = NULL;
168 			cmn_err(CE_PANIC, "invalid RAID-Z configuration %llu",
169 			    (u_longlong_t)raidz_parity(rm));
170 			break;
171 	}
172 
173 	/* if method is NULL execute the original implementation */
174 	if (gen_parity == NULL)
175 		return (RAIDZ_ORIGINAL_IMPL);
176 
177 	gen_parity(rr);
178 
179 	return (0);
180 }
181 
182 static raidz_rec_f
183 reconstruct_fun_p_sel(raidz_map_t *rm, const int *parity_valid,
184     const int nbaddata)
185 {
186 	if (nbaddata == 1 && parity_valid[CODE_P]) {
187 		return (rm->rm_ops->rec[RAIDZ_REC_P]);
188 	}
189 	return ((raidz_rec_f) NULL);
190 }
191 
192 static raidz_rec_f
193 reconstruct_fun_pq_sel(raidz_map_t *rm, const int *parity_valid,
194     const int nbaddata)
195 {
196 	if (nbaddata == 1) {
197 		if (parity_valid[CODE_P]) {
198 			return (rm->rm_ops->rec[RAIDZ_REC_P]);
199 		} else if (parity_valid[CODE_Q]) {
200 			return (rm->rm_ops->rec[RAIDZ_REC_Q]);
201 		}
202 	} else if (nbaddata == 2 &&
203 	    parity_valid[CODE_P] && parity_valid[CODE_Q]) {
204 		return (rm->rm_ops->rec[RAIDZ_REC_PQ]);
205 	}
206 	return ((raidz_rec_f) NULL);
207 }
208 
209 static raidz_rec_f
210 reconstruct_fun_pqr_sel(raidz_map_t *rm, const int *parity_valid,
211     const int nbaddata)
212 {
213 	if (nbaddata == 1) {
214 		if (parity_valid[CODE_P]) {
215 			return (rm->rm_ops->rec[RAIDZ_REC_P]);
216 		} else if (parity_valid[CODE_Q]) {
217 			return (rm->rm_ops->rec[RAIDZ_REC_Q]);
218 		} else if (parity_valid[CODE_R]) {
219 			return (rm->rm_ops->rec[RAIDZ_REC_R]);
220 		}
221 	} else if (nbaddata == 2) {
222 		if (parity_valid[CODE_P] && parity_valid[CODE_Q]) {
223 			return (rm->rm_ops->rec[RAIDZ_REC_PQ]);
224 		} else if (parity_valid[CODE_P] && parity_valid[CODE_R]) {
225 			return (rm->rm_ops->rec[RAIDZ_REC_PR]);
226 		} else if (parity_valid[CODE_Q] && parity_valid[CODE_R]) {
227 			return (rm->rm_ops->rec[RAIDZ_REC_QR]);
228 		}
229 	} else if (nbaddata == 3 &&
230 	    parity_valid[CODE_P] && parity_valid[CODE_Q] &&
231 	    parity_valid[CODE_R]) {
232 		return (rm->rm_ops->rec[RAIDZ_REC_PQR]);
233 	}
234 	return ((raidz_rec_f) NULL);
235 }
236 
237 /*
238  * Select data reconstruction method for raidz_map
239  * @parity_valid - Parity validity flag
240  * @dt           - Failed data index array
241  * @nbaddata     - Number of failed data columns
242  */
243 int
244 vdev_raidz_math_reconstruct(raidz_map_t *rm, raidz_row_t *rr,
245     const int *parity_valid, const int *dt, const int nbaddata)
246 {
247 	raidz_rec_f rec_fn = NULL;
248 
249 	switch (raidz_parity(rm)) {
250 	case PARITY_P:
251 		rec_fn = reconstruct_fun_p_sel(rm, parity_valid, nbaddata);
252 		break;
253 	case PARITY_PQ:
254 		rec_fn = reconstruct_fun_pq_sel(rm, parity_valid, nbaddata);
255 		break;
256 	case PARITY_PQR:
257 		rec_fn = reconstruct_fun_pqr_sel(rm, parity_valid, nbaddata);
258 		break;
259 	default:
260 		cmn_err(CE_PANIC, "invalid RAID-Z configuration %llu",
261 		    (u_longlong_t)raidz_parity(rm));
262 		break;
263 	}
264 
265 	if (rec_fn == NULL)
266 		return (RAIDZ_ORIGINAL_IMPL);
267 	else
268 		return (rec_fn(rr, dt));
269 }
270 
271 const char *const raidz_gen_name[] = {
272 	"gen_p", "gen_pq", "gen_pqr"
273 };
274 const char *const raidz_rec_name[] = {
275 	"rec_p", "rec_q", "rec_r",
276 	"rec_pq", "rec_pr", "rec_qr", "rec_pqr"
277 };
278 
279 #if defined(_KERNEL)
280 
281 #define	RAIDZ_KSTAT_LINE_LEN	(17 + 10*12 + 1)
282 
283 static int
284 raidz_math_kstat_headers(char *buf, size_t size)
285 {
286 	ASSERT3U(size, >=, RAIDZ_KSTAT_LINE_LEN);
287 
288 	ssize_t off = snprintf(buf, size, "%-17s", "implementation");
289 
290 	for (int i = 0; i < ARRAY_SIZE(raidz_gen_name); i++)
291 		off += snprintf(buf + off, size - off, "%-16s",
292 		    raidz_gen_name[i]);
293 
294 	for (int i = 0; i < ARRAY_SIZE(raidz_rec_name); i++)
295 		off += snprintf(buf + off, size - off, "%-16s",
296 		    raidz_rec_name[i]);
297 
298 	(void) snprintf(buf + off, size - off, "\n");
299 
300 	return (0);
301 }
302 
303 static int
304 raidz_math_kstat_data(char *buf, size_t size, void *data)
305 {
306 	raidz_impl_kstat_t *fstat = &raidz_impl_kstats[raidz_supp_impl_cnt];
307 	raidz_impl_kstat_t *cstat = (raidz_impl_kstat_t *)data;
308 	ssize_t off = 0;
309 	int i;
310 
311 	ASSERT3U(size, >=, RAIDZ_KSTAT_LINE_LEN);
312 
313 	if (cstat == fstat) {
314 		off += snprintf(buf + off, size - off, "%-17s", "fastest");
315 
316 		for (i = 0; i < ARRAY_SIZE(raidz_gen_name); i++) {
317 			int id = fstat->gen[i];
318 			off += snprintf(buf + off, size - off, "%-16s",
319 			    raidz_supp_impl[id]->name);
320 		}
321 		for (i = 0; i < ARRAY_SIZE(raidz_rec_name); i++) {
322 			int id = fstat->rec[i];
323 			off += snprintf(buf + off, size - off, "%-16s",
324 			    raidz_supp_impl[id]->name);
325 		}
326 	} else {
327 		ptrdiff_t id = cstat - raidz_impl_kstats;
328 
329 		off += snprintf(buf + off, size - off, "%-17s",
330 		    raidz_supp_impl[id]->name);
331 
332 		for (i = 0; i < ARRAY_SIZE(raidz_gen_name); i++)
333 			off += snprintf(buf + off, size - off, "%-16llu",
334 			    (u_longlong_t)cstat->gen[i]);
335 
336 		for (i = 0; i < ARRAY_SIZE(raidz_rec_name); i++)
337 			off += snprintf(buf + off, size - off, "%-16llu",
338 			    (u_longlong_t)cstat->rec[i]);
339 	}
340 
341 	(void) snprintf(buf + off, size - off, "\n");
342 
343 	return (0);
344 }
345 
346 static void *
347 raidz_math_kstat_addr(kstat_t *ksp, loff_t n)
348 {
349 	if (n <= raidz_supp_impl_cnt)
350 		ksp->ks_private = (void *) (raidz_impl_kstats + n);
351 	else
352 		ksp->ks_private = NULL;
353 
354 	return (ksp->ks_private);
355 }
356 
357 #define	BENCH_D_COLS	(8ULL)
358 #define	BENCH_COLS	(BENCH_D_COLS + PARITY_PQR)
359 #define	BENCH_ZIO_SIZE	(1ULL << SPA_OLD_MAXBLOCKSHIFT)	/* 128 kiB */
360 #define	BENCH_NS	MSEC2NSEC(1)			/* 1ms */
361 
362 typedef void (*benchmark_fn)(raidz_map_t *rm, const int fn);
363 
364 static void
365 benchmark_gen_impl(raidz_map_t *rm, const int fn)
366 {
367 	(void) fn;
368 	vdev_raidz_generate_parity(rm);
369 }
370 
371 static void
372 benchmark_rec_impl(raidz_map_t *rm, const int fn)
373 {
374 	static const int rec_tgt[7][3] = {
375 		{1, 2, 3},	/* rec_p:   bad QR & D[0]	*/
376 		{0, 2, 3},	/* rec_q:   bad PR & D[0]	*/
377 		{0, 1, 3},	/* rec_r:   bad PQ & D[0]	*/
378 		{2, 3, 4},	/* rec_pq:  bad R  & D[0][1]	*/
379 		{1, 3, 4},	/* rec_pr:  bad Q  & D[0][1]	*/
380 		{0, 3, 4},	/* rec_qr:  bad P  & D[0][1]	*/
381 		{3, 4, 5}	/* rec_pqr: bad    & D[0][1][2] */
382 	};
383 
384 	vdev_raidz_reconstruct(rm, rec_tgt[fn], 3);
385 }
386 
387 /*
388  * Benchmarking of all supported implementations (raidz_supp_impl_cnt)
389  * is performed by setting the rm_ops pointer and calling the top level
390  * generate/reconstruct methods of bench_rm.
391  */
392 static void
393 benchmark_raidz_impl(raidz_map_t *bench_rm, const int fn, benchmark_fn bench_fn)
394 {
395 	uint64_t run_cnt, speed, best_speed = 0;
396 	hrtime_t t_start, t_diff;
397 	raidz_impl_ops_t *curr_impl;
398 	raidz_impl_kstat_t *fstat = &raidz_impl_kstats[raidz_supp_impl_cnt];
399 	int impl, i;
400 
401 	for (impl = 0; impl < raidz_supp_impl_cnt; impl++) {
402 		/* set an implementation to benchmark */
403 		curr_impl = raidz_supp_impl[impl];
404 		bench_rm->rm_ops = curr_impl;
405 
406 		run_cnt = 0;
407 		t_start = gethrtime();
408 
409 		do {
410 			for (i = 0; i < 5; i++, run_cnt++)
411 				bench_fn(bench_rm, fn);
412 
413 			t_diff = gethrtime() - t_start;
414 		} while (t_diff < BENCH_NS);
415 
416 		speed = run_cnt * BENCH_ZIO_SIZE * NANOSEC;
417 		speed /= (t_diff * BENCH_COLS);
418 
419 		if (bench_fn == benchmark_gen_impl)
420 			raidz_impl_kstats[impl].gen[fn] = speed;
421 		else
422 			raidz_impl_kstats[impl].rec[fn] = speed;
423 
424 		/* Update fastest implementation method */
425 		if (speed > best_speed) {
426 			best_speed = speed;
427 
428 			if (bench_fn == benchmark_gen_impl) {
429 				fstat->gen[fn] = impl;
430 				vdev_raidz_fastest_impl.gen[fn] =
431 				    curr_impl->gen[fn];
432 			} else {
433 				fstat->rec[fn] = impl;
434 				vdev_raidz_fastest_impl.rec[fn] =
435 				    curr_impl->rec[fn];
436 			}
437 		}
438 	}
439 }
440 #endif
441 
442 /*
443  * Initialize and benchmark all supported implementations.
444  */
445 static void
446 benchmark_raidz(void)
447 {
448 	raidz_impl_ops_t *curr_impl;
449 	int i, c;
450 
451 	/* Move supported impl into raidz_supp_impl */
452 	for (i = 0, c = 0; i < ARRAY_SIZE(raidz_all_maths); i++) {
453 		curr_impl = (raidz_impl_ops_t *)raidz_all_maths[i];
454 
455 		if (curr_impl->init)
456 			curr_impl->init();
457 
458 		if (curr_impl->is_supported())
459 			raidz_supp_impl[c++] = (raidz_impl_ops_t *)curr_impl;
460 	}
461 	membar_producer();		/* complete raidz_supp_impl[] init */
462 	raidz_supp_impl_cnt = c;	/* number of supported impl */
463 
464 #if defined(_KERNEL)
465 	abd_t *pabd;
466 	zio_t *bench_zio = NULL;
467 	raidz_map_t *bench_rm = NULL;
468 	uint64_t bench_parity;
469 
470 	/* Fake a zio and run the benchmark on a warmed up buffer */
471 	bench_zio = kmem_zalloc(sizeof (zio_t), KM_SLEEP);
472 	bench_zio->io_offset = 0;
473 	bench_zio->io_size = BENCH_ZIO_SIZE; /* only data columns */
474 	bench_zio->io_abd = abd_alloc_linear(BENCH_ZIO_SIZE, B_TRUE);
475 	memset(abd_to_buf(bench_zio->io_abd), 0xAA, BENCH_ZIO_SIZE);
476 
477 	/* Benchmark parity generation methods */
478 	for (int fn = 0; fn < RAIDZ_GEN_NUM; fn++) {
479 		bench_parity = fn + 1;
480 		/* New raidz_map is needed for each generate_p/q/r */
481 		bench_rm = vdev_raidz_map_alloc(bench_zio, SPA_MINBLOCKSHIFT,
482 		    BENCH_D_COLS + bench_parity, bench_parity);
483 
484 		benchmark_raidz_impl(bench_rm, fn, benchmark_gen_impl);
485 
486 		vdev_raidz_map_free(bench_rm);
487 	}
488 
489 	/* Benchmark data reconstruction methods */
490 	bench_rm = vdev_raidz_map_alloc(bench_zio, SPA_MINBLOCKSHIFT,
491 	    BENCH_COLS, PARITY_PQR);
492 
493 	/* Ensure that fake parity blocks are initialized */
494 	for (c = 0; c < bench_rm->rm_row[0]->rr_firstdatacol; c++) {
495 		pabd = bench_rm->rm_row[0]->rr_col[c].rc_abd;
496 		memset(abd_to_buf(pabd), 0xAA, abd_get_size(pabd));
497 	}
498 
499 	for (int fn = 0; fn < RAIDZ_REC_NUM; fn++)
500 		benchmark_raidz_impl(bench_rm, fn, benchmark_rec_impl);
501 
502 	vdev_raidz_map_free(bench_rm);
503 
504 	/* cleanup the bench zio */
505 	abd_free(bench_zio->io_abd);
506 	kmem_free(bench_zio, sizeof (zio_t));
507 #else
508 	/*
509 	 * Skip the benchmark in user space to avoid impacting libzpool
510 	 * consumers (zdb, zhack, zinject, ztest).  The last implementation
511 	 * is assumed to be the fastest and used by default.
512 	 */
513 	memcpy(&vdev_raidz_fastest_impl,
514 	    raidz_supp_impl[raidz_supp_impl_cnt - 1],
515 	    sizeof (vdev_raidz_fastest_impl));
516 	strcpy(vdev_raidz_fastest_impl.name, "fastest");
517 #endif /* _KERNEL */
518 }
519 
520 void
521 vdev_raidz_math_init(void)
522 {
523 	/* Determine the fastest available implementation. */
524 	benchmark_raidz();
525 
526 #if defined(_KERNEL)
527 	/* Install kstats for all implementations */
528 	raidz_math_kstat = kstat_create("zfs", 0, "vdev_raidz_bench", "misc",
529 	    KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL);
530 	if (raidz_math_kstat != NULL) {
531 		raidz_math_kstat->ks_data = NULL;
532 		raidz_math_kstat->ks_ndata = UINT32_MAX;
533 		kstat_set_raw_ops(raidz_math_kstat,
534 		    raidz_math_kstat_headers,
535 		    raidz_math_kstat_data,
536 		    raidz_math_kstat_addr);
537 		kstat_install(raidz_math_kstat);
538 	}
539 #endif
540 
541 	/* Finish initialization */
542 	atomic_swap_32(&zfs_vdev_raidz_impl, user_sel_impl);
543 	raidz_math_initialized = B_TRUE;
544 }
545 
546 void
547 vdev_raidz_math_fini(void)
548 {
549 	raidz_impl_ops_t const *curr_impl;
550 
551 #if defined(_KERNEL)
552 	if (raidz_math_kstat != NULL) {
553 		kstat_delete(raidz_math_kstat);
554 		raidz_math_kstat = NULL;
555 	}
556 #endif
557 
558 	for (int i = 0; i < ARRAY_SIZE(raidz_all_maths); i++) {
559 		curr_impl = raidz_all_maths[i];
560 		if (curr_impl->fini)
561 			curr_impl->fini();
562 	}
563 }
564 
565 static const struct {
566 	const char *name;
567 	uint32_t sel;
568 } math_impl_opts[] = {
569 		{ "cycle",	IMPL_CYCLE },
570 		{ "fastest",	IMPL_FASTEST },
571 		{ "original",	IMPL_ORIGINAL },
572 		{ "scalar",	IMPL_SCALAR }
573 };
574 
575 /*
576  * Function sets desired raidz implementation.
577  *
578  * If we are called before init(), user preference will be saved in
579  * user_sel_impl, and applied in later init() call. This occurs when module
580  * parameter is specified on module load. Otherwise, directly update
581  * zfs_vdev_raidz_impl.
582  *
583  * @val		Name of raidz implementation to use
584  * @param	Unused.
585  */
586 int
587 vdev_raidz_impl_set(const char *val)
588 {
589 	int err = -EINVAL;
590 	char req_name[RAIDZ_IMPL_NAME_MAX];
591 	uint32_t impl = RAIDZ_IMPL_READ(user_sel_impl);
592 	size_t i;
593 
594 	/* sanitize input */
595 	i = strnlen(val, RAIDZ_IMPL_NAME_MAX);
596 	if (i == 0 || i == RAIDZ_IMPL_NAME_MAX)
597 		return (err);
598 
599 	strlcpy(req_name, val, RAIDZ_IMPL_NAME_MAX);
600 	while (i > 0 && !!isspace(req_name[i-1]))
601 		i--;
602 	req_name[i] = '\0';
603 
604 	/* Check mandatory options */
605 	for (i = 0; i < ARRAY_SIZE(math_impl_opts); i++) {
606 		if (strcmp(req_name, math_impl_opts[i].name) == 0) {
607 			impl = math_impl_opts[i].sel;
608 			err = 0;
609 			break;
610 		}
611 	}
612 
613 	/* check all supported impl if init() was already called */
614 	if (err != 0 && raidz_math_initialized) {
615 		/* check all supported implementations */
616 		for (i = 0; i < raidz_supp_impl_cnt; i++) {
617 			if (strcmp(req_name, raidz_supp_impl[i]->name) == 0) {
618 				impl = i;
619 				err = 0;
620 				break;
621 			}
622 		}
623 	}
624 
625 	if (err == 0) {
626 		if (raidz_math_initialized)
627 			atomic_swap_32(&zfs_vdev_raidz_impl, impl);
628 		else
629 			atomic_swap_32(&user_sel_impl, impl);
630 	}
631 
632 	return (err);
633 }
634 
635 #if defined(_KERNEL) && defined(__linux__)
636 
637 static int
638 zfs_vdev_raidz_impl_set(const char *val, zfs_kernel_param_t *kp)
639 {
640 	return (vdev_raidz_impl_set(val));
641 }
642 
643 static int
644 zfs_vdev_raidz_impl_get(char *buffer, zfs_kernel_param_t *kp)
645 {
646 	int i, cnt = 0;
647 	char *fmt;
648 	const uint32_t impl = RAIDZ_IMPL_READ(zfs_vdev_raidz_impl);
649 
650 	ASSERT(raidz_math_initialized);
651 
652 	/* list mandatory options */
653 	for (i = 0; i < ARRAY_SIZE(math_impl_opts) - 2; i++) {
654 		fmt = (impl == math_impl_opts[i].sel) ? "[%s] " : "%s ";
655 		cnt += sprintf(buffer + cnt, fmt, math_impl_opts[i].name);
656 	}
657 
658 	/* list all supported implementations */
659 	for (i = 0; i < raidz_supp_impl_cnt; i++) {
660 		fmt = (i == impl) ? "[%s] " : "%s ";
661 		cnt += sprintf(buffer + cnt, fmt, raidz_supp_impl[i]->name);
662 	}
663 
664 	return (cnt);
665 }
666 
667 module_param_call(zfs_vdev_raidz_impl, zfs_vdev_raidz_impl_set,
668     zfs_vdev_raidz_impl_get, NULL, 0644);
669 MODULE_PARM_DESC(zfs_vdev_raidz_impl, "Select raidz implementation.");
670 #endif
671