xref: /freebsd/sys/contrib/openzfs/module/zfs/vdev_raidz_math.c (revision 61145dc2b94f12f6a47344fb9aac702321880e43)
1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3  * CDDL HEADER START
4  *
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or https://opensource.org/licenses/CDDL-1.0.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright (C) 2016 Gvozden Nešković. All rights reserved.
24  */
25 
26 #include <sys/simd.h>
27 #include <sys/zfs_context.h>
28 #include <sys/types.h>
29 #include <sys/zio.h>
30 #include <sys/debug.h>
31 #include <sys/zfs_debug.h>
32 #include <sys/vdev_raidz.h>
33 #include <sys/vdev_raidz_impl.h>
34 
35 /* Opaque implementation with NULL methods to represent original methods */
36 static const raidz_impl_ops_t vdev_raidz_original_impl = {
37 	.name = "original",
38 	.is_supported = raidz_will_scalar_work,
39 };
40 
41 /* RAIDZ parity op that contain the fastest methods */
42 static raidz_impl_ops_t vdev_raidz_fastest_impl = {
43 	.name = "fastest"
44 };
45 
46 /* All compiled in implementations */
47 static const raidz_impl_ops_t *const raidz_all_maths[] = {
48 	&vdev_raidz_original_impl,
49 	&vdev_raidz_scalar_impl,
50 #if defined(__x86_64) && defined(HAVE_SSE2)	/* only x86_64 for now */
51 	&vdev_raidz_sse2_impl,
52 #endif
53 #if defined(__x86_64) && defined(HAVE_SSSE3)	/* only x86_64 for now */
54 	&vdev_raidz_ssse3_impl,
55 #endif
56 #if defined(__x86_64) && defined(HAVE_AVX2)	/* only x86_64 for now */
57 	&vdev_raidz_avx2_impl,
58 #endif
59 #if defined(__x86_64) && defined(HAVE_AVX512F)	/* only x86_64 for now */
60 	&vdev_raidz_avx512f_impl,
61 #endif
62 #if defined(__x86_64) && defined(HAVE_AVX512BW)	/* only x86_64 for now */
63 	&vdev_raidz_avx512bw_impl,
64 #endif
65 #if defined(__aarch64__) && !defined(__FreeBSD__)
66 	&vdev_raidz_aarch64_neon_impl,
67 	&vdev_raidz_aarch64_neonx2_impl,
68 #endif
69 #if defined(__powerpc__) && defined(__altivec__)
70 	&vdev_raidz_powerpc_altivec_impl,
71 #endif
72 };
73 
74 /* Indicate that benchmark has been completed */
75 static boolean_t raidz_math_initialized = B_FALSE;
76 
77 /* Select raidz implementation */
78 #define	IMPL_FASTEST	(UINT32_MAX)
79 #define	IMPL_CYCLE	(UINT32_MAX - 1)
80 #define	IMPL_ORIGINAL	(0)
81 #define	IMPL_SCALAR	(1)
82 
83 #define	RAIDZ_IMPL_READ(i)	(*(volatile uint32_t *) &(i))
84 
85 uint32_t zfs_vdev_raidz_impl = IMPL_SCALAR;
86 static uint32_t user_sel_impl = IMPL_FASTEST;
87 
88 /* Hold all supported implementations */
89 static size_t raidz_supp_impl_cnt = 0;
90 static raidz_impl_ops_t *raidz_supp_impl[ARRAY_SIZE(raidz_all_maths)];
91 
92 #if defined(_KERNEL)
93 /*
94  * kstats values for supported implementations
95  * Values represent per disk throughput of 8 disk+parity raidz vdev [B/s]
96  */
97 static raidz_impl_kstat_t raidz_impl_kstats[ARRAY_SIZE(raidz_all_maths) + 1];
98 
99 /* kstat for benchmarked implementations */
100 static kstat_t *raidz_math_kstat = NULL;
101 #endif
102 
103 /*
104  * Returns the RAIDZ operations for raidz_map() parity calculations.   When
105  * a SIMD implementation is not allowed in the current context, then fallback
106  * to the fastest generic implementation.
107  */
108 const raidz_impl_ops_t *
vdev_raidz_math_get_ops(void)109 vdev_raidz_math_get_ops(void)
110 {
111 	if (!kfpu_allowed())
112 		return (&vdev_raidz_scalar_impl);
113 
114 	raidz_impl_ops_t *ops = NULL;
115 	const uint32_t impl = RAIDZ_IMPL_READ(zfs_vdev_raidz_impl);
116 
117 	switch (impl) {
118 	case IMPL_FASTEST:
119 		ASSERT(raidz_math_initialized);
120 		ops = &vdev_raidz_fastest_impl;
121 		break;
122 	case IMPL_CYCLE:
123 		/* Cycle through all supported implementations */
124 		ASSERT(raidz_math_initialized);
125 		ASSERT3U(raidz_supp_impl_cnt, >, 0);
126 		static size_t cycle_impl_idx = 0;
127 		size_t idx = (++cycle_impl_idx) % raidz_supp_impl_cnt;
128 		ops = raidz_supp_impl[idx];
129 		break;
130 	case IMPL_ORIGINAL:
131 		ops = (raidz_impl_ops_t *)&vdev_raidz_original_impl;
132 		break;
133 	case IMPL_SCALAR:
134 		ops = (raidz_impl_ops_t *)&vdev_raidz_scalar_impl;
135 		break;
136 	default:
137 		ASSERT3U(impl, <, raidz_supp_impl_cnt);
138 		ASSERT3U(raidz_supp_impl_cnt, >, 0);
139 		if (impl < ARRAY_SIZE(raidz_all_maths))
140 			ops = raidz_supp_impl[impl];
141 		break;
142 	}
143 
144 	ASSERT3P(ops, !=, NULL);
145 
146 	return (ops);
147 }
148 
149 /*
150  * Select parity generation method for raidz_map
151  */
152 int
vdev_raidz_math_generate(raidz_map_t * rm,raidz_row_t * rr)153 vdev_raidz_math_generate(raidz_map_t *rm, raidz_row_t *rr)
154 {
155 	raidz_gen_f gen_parity = NULL;
156 
157 	switch (raidz_parity(rm)) {
158 		case 1:
159 			gen_parity = rm->rm_ops->gen[RAIDZ_GEN_P];
160 			break;
161 		case 2:
162 			gen_parity = rm->rm_ops->gen[RAIDZ_GEN_PQ];
163 			break;
164 		case 3:
165 			gen_parity = rm->rm_ops->gen[RAIDZ_GEN_PQR];
166 			break;
167 		default:
168 			gen_parity = NULL;
169 			cmn_err(CE_PANIC, "invalid RAID-Z configuration %llu",
170 			    (u_longlong_t)raidz_parity(rm));
171 			break;
172 	}
173 
174 	/* if method is NULL execute the original implementation */
175 	if (gen_parity == NULL)
176 		return (RAIDZ_ORIGINAL_IMPL);
177 
178 	gen_parity(rr);
179 
180 	return (0);
181 }
182 
183 static raidz_rec_f
reconstruct_fun_p_sel(raidz_map_t * rm,const int * parity_valid,const int nbaddata)184 reconstruct_fun_p_sel(raidz_map_t *rm, const int *parity_valid,
185     const int nbaddata)
186 {
187 	if (nbaddata == 1 && parity_valid[CODE_P]) {
188 		return (rm->rm_ops->rec[RAIDZ_REC_P]);
189 	}
190 	return ((raidz_rec_f) NULL);
191 }
192 
193 static raidz_rec_f
reconstruct_fun_pq_sel(raidz_map_t * rm,const int * parity_valid,const int nbaddata)194 reconstruct_fun_pq_sel(raidz_map_t *rm, const int *parity_valid,
195     const int nbaddata)
196 {
197 	if (nbaddata == 1) {
198 		if (parity_valid[CODE_P]) {
199 			return (rm->rm_ops->rec[RAIDZ_REC_P]);
200 		} else if (parity_valid[CODE_Q]) {
201 			return (rm->rm_ops->rec[RAIDZ_REC_Q]);
202 		}
203 	} else if (nbaddata == 2 &&
204 	    parity_valid[CODE_P] && parity_valid[CODE_Q]) {
205 		return (rm->rm_ops->rec[RAIDZ_REC_PQ]);
206 	}
207 	return ((raidz_rec_f) NULL);
208 }
209 
210 static raidz_rec_f
reconstruct_fun_pqr_sel(raidz_map_t * rm,const int * parity_valid,const int nbaddata)211 reconstruct_fun_pqr_sel(raidz_map_t *rm, const int *parity_valid,
212     const int nbaddata)
213 {
214 	if (nbaddata == 1) {
215 		if (parity_valid[CODE_P]) {
216 			return (rm->rm_ops->rec[RAIDZ_REC_P]);
217 		} else if (parity_valid[CODE_Q]) {
218 			return (rm->rm_ops->rec[RAIDZ_REC_Q]);
219 		} else if (parity_valid[CODE_R]) {
220 			return (rm->rm_ops->rec[RAIDZ_REC_R]);
221 		}
222 	} else if (nbaddata == 2) {
223 		if (parity_valid[CODE_P] && parity_valid[CODE_Q]) {
224 			return (rm->rm_ops->rec[RAIDZ_REC_PQ]);
225 		} else if (parity_valid[CODE_P] && parity_valid[CODE_R]) {
226 			return (rm->rm_ops->rec[RAIDZ_REC_PR]);
227 		} else if (parity_valid[CODE_Q] && parity_valid[CODE_R]) {
228 			return (rm->rm_ops->rec[RAIDZ_REC_QR]);
229 		}
230 	} else if (nbaddata == 3 &&
231 	    parity_valid[CODE_P] && parity_valid[CODE_Q] &&
232 	    parity_valid[CODE_R]) {
233 		return (rm->rm_ops->rec[RAIDZ_REC_PQR]);
234 	}
235 	return ((raidz_rec_f) NULL);
236 }
237 
238 /*
239  * Select data reconstruction method for raidz_map
240  * @parity_valid - Parity validity flag
241  * @dt           - Failed data index array
242  * @nbaddata     - Number of failed data columns
243  */
244 int
vdev_raidz_math_reconstruct(raidz_map_t * rm,raidz_row_t * rr,const int * parity_valid,const int * dt,const int nbaddata)245 vdev_raidz_math_reconstruct(raidz_map_t *rm, raidz_row_t *rr,
246     const int *parity_valid, const int *dt, const int nbaddata)
247 {
248 	raidz_rec_f rec_fn = NULL;
249 
250 	switch (raidz_parity(rm)) {
251 	case PARITY_P:
252 		rec_fn = reconstruct_fun_p_sel(rm, parity_valid, nbaddata);
253 		break;
254 	case PARITY_PQ:
255 		rec_fn = reconstruct_fun_pq_sel(rm, parity_valid, nbaddata);
256 		break;
257 	case PARITY_PQR:
258 		rec_fn = reconstruct_fun_pqr_sel(rm, parity_valid, nbaddata);
259 		break;
260 	default:
261 		cmn_err(CE_PANIC, "invalid RAID-Z configuration %llu",
262 		    (u_longlong_t)raidz_parity(rm));
263 		break;
264 	}
265 
266 	if (rec_fn == NULL)
267 		return (RAIDZ_ORIGINAL_IMPL);
268 	else
269 		return (rec_fn(rr, dt));
270 }
271 
272 const char *const raidz_gen_name[] = {
273 	"gen_p", "gen_pq", "gen_pqr"
274 };
275 const char *const raidz_rec_name[] = {
276 	"rec_p", "rec_q", "rec_r",
277 	"rec_pq", "rec_pr", "rec_qr", "rec_pqr"
278 };
279 
280 #if defined(_KERNEL)
281 
282 #define	RAIDZ_KSTAT_LINE_LEN	(17 + 10*12 + 1)
283 
284 static int
raidz_math_kstat_headers(char * buf,size_t size)285 raidz_math_kstat_headers(char *buf, size_t size)
286 {
287 	ASSERT3U(size, >=, RAIDZ_KSTAT_LINE_LEN);
288 
289 	ssize_t off = kmem_scnprintf(buf, size, "%-17s", "implementation");
290 
291 	for (int i = 0; i < ARRAY_SIZE(raidz_gen_name); i++)
292 		off += kmem_scnprintf(buf + off, size - off, "%-16s",
293 		    raidz_gen_name[i]);
294 
295 	for (int i = 0; i < ARRAY_SIZE(raidz_rec_name); i++)
296 		off += kmem_scnprintf(buf + off, size - off, "%-16s",
297 		    raidz_rec_name[i]);
298 
299 	(void) kmem_scnprintf(buf + off, size - off, "\n");
300 
301 	return (0);
302 }
303 
304 static int
raidz_math_kstat_data(char * buf,size_t size,void * data)305 raidz_math_kstat_data(char *buf, size_t size, void *data)
306 {
307 	raidz_impl_kstat_t *fstat = &raidz_impl_kstats[raidz_supp_impl_cnt];
308 	raidz_impl_kstat_t *cstat = (raidz_impl_kstat_t *)data;
309 	ssize_t off = 0;
310 	int i;
311 
312 	ASSERT3U(size, >=, RAIDZ_KSTAT_LINE_LEN);
313 
314 	if (cstat == fstat) {
315 		off += kmem_scnprintf(buf + off, size - off, "%-17s",
316 		    "fastest");
317 
318 		for (i = 0; i < ARRAY_SIZE(raidz_gen_name); i++) {
319 			int id = fstat->gen[i];
320 			off += kmem_scnprintf(buf + off, size - off, "%-16s",
321 			    raidz_supp_impl[id]->name);
322 		}
323 		for (i = 0; i < ARRAY_SIZE(raidz_rec_name); i++) {
324 			int id = fstat->rec[i];
325 			off += kmem_scnprintf(buf + off, size - off, "%-16s",
326 			    raidz_supp_impl[id]->name);
327 		}
328 	} else {
329 		ptrdiff_t id = cstat - raidz_impl_kstats;
330 
331 		off += kmem_scnprintf(buf + off, size - off, "%-17s",
332 		    raidz_supp_impl[id]->name);
333 
334 		for (i = 0; i < ARRAY_SIZE(raidz_gen_name); i++)
335 			off += kmem_scnprintf(buf + off, size - off, "%-16llu",
336 			    (u_longlong_t)cstat->gen[i]);
337 
338 		for (i = 0; i < ARRAY_SIZE(raidz_rec_name); i++)
339 			off += kmem_scnprintf(buf + off, size - off, "%-16llu",
340 			    (u_longlong_t)cstat->rec[i]);
341 	}
342 
343 	(void) kmem_scnprintf(buf + off, size - off, "\n");
344 
345 	return (0);
346 }
347 
348 static void *
raidz_math_kstat_addr(kstat_t * ksp,loff_t n)349 raidz_math_kstat_addr(kstat_t *ksp, loff_t n)
350 {
351 	if (n <= raidz_supp_impl_cnt)
352 		ksp->ks_private = (void *) (raidz_impl_kstats + n);
353 	else
354 		ksp->ks_private = NULL;
355 
356 	return (ksp->ks_private);
357 }
358 
359 #define	BENCH_D_COLS	(8ULL)
360 #define	BENCH_COLS	(BENCH_D_COLS + PARITY_PQR)
361 #define	BENCH_ZIO_SIZE	(1ULL << SPA_OLD_MAXBLOCKSHIFT)	/* 128 kiB */
362 #define	BENCH_NS	MSEC2NSEC(1)			/* 1ms */
363 
364 typedef void (*benchmark_fn)(raidz_map_t *rm, const int fn);
365 
366 static void
benchmark_gen_impl(raidz_map_t * rm,const int fn)367 benchmark_gen_impl(raidz_map_t *rm, const int fn)
368 {
369 	(void) fn;
370 	vdev_raidz_generate_parity(rm);
371 }
372 
373 static void
benchmark_rec_impl(raidz_map_t * rm,const int fn)374 benchmark_rec_impl(raidz_map_t *rm, const int fn)
375 {
376 	static const int rec_tgt[7][3] = {
377 		{1, 2, 3},	/* rec_p:   bad QR & D[0]	*/
378 		{0, 2, 3},	/* rec_q:   bad PR & D[0]	*/
379 		{0, 1, 3},	/* rec_r:   bad PQ & D[0]	*/
380 		{2, 3, 4},	/* rec_pq:  bad R  & D[0][1]	*/
381 		{1, 3, 4},	/* rec_pr:  bad Q  & D[0][1]	*/
382 		{0, 3, 4},	/* rec_qr:  bad P  & D[0][1]	*/
383 		{3, 4, 5}	/* rec_pqr: bad    & D[0][1][2] */
384 	};
385 
386 	vdev_raidz_reconstruct(rm, rec_tgt[fn], 3);
387 }
388 
389 /*
390  * Benchmarking of all supported implementations (raidz_supp_impl_cnt)
391  * is performed by setting the rm_ops pointer and calling the top level
392  * generate/reconstruct methods of bench_rm.
393  */
394 static void
benchmark_raidz_impl(raidz_map_t * bench_rm,const int fn,benchmark_fn bench_fn)395 benchmark_raidz_impl(raidz_map_t *bench_rm, const int fn, benchmark_fn bench_fn)
396 {
397 	uint64_t run_cnt, speed, best_speed = 0;
398 	hrtime_t t_start, t_diff;
399 	raidz_impl_ops_t *curr_impl;
400 	raidz_impl_kstat_t *fstat = &raidz_impl_kstats[raidz_supp_impl_cnt];
401 	int impl, i;
402 
403 	for (impl = 0; impl < raidz_supp_impl_cnt; impl++) {
404 		/* set an implementation to benchmark */
405 		curr_impl = raidz_supp_impl[impl];
406 		bench_rm->rm_ops = curr_impl;
407 
408 		run_cnt = 0;
409 		t_start = gethrtime();
410 
411 		do {
412 			for (i = 0; i < 5; i++, run_cnt++)
413 				bench_fn(bench_rm, fn);
414 
415 			t_diff = gethrtime() - t_start;
416 		} while (t_diff < BENCH_NS);
417 
418 		speed = run_cnt * BENCH_ZIO_SIZE * NANOSEC;
419 		speed /= (t_diff * BENCH_COLS);
420 
421 		if (bench_fn == benchmark_gen_impl)
422 			raidz_impl_kstats[impl].gen[fn] = speed;
423 		else
424 			raidz_impl_kstats[impl].rec[fn] = speed;
425 
426 		/* Update fastest implementation method */
427 		if (speed > best_speed) {
428 			best_speed = speed;
429 
430 			if (bench_fn == benchmark_gen_impl) {
431 				fstat->gen[fn] = impl;
432 				vdev_raidz_fastest_impl.gen[fn] =
433 				    curr_impl->gen[fn];
434 			} else {
435 				fstat->rec[fn] = impl;
436 				vdev_raidz_fastest_impl.rec[fn] =
437 				    curr_impl->rec[fn];
438 			}
439 		}
440 	}
441 }
442 #endif
443 
444 /*
445  * Initialize and benchmark all supported implementations.
446  */
447 static void
benchmark_raidz(void)448 benchmark_raidz(void)
449 {
450 	raidz_impl_ops_t *curr_impl;
451 	int i, c;
452 
453 	/* Move supported impl into raidz_supp_impl */
454 	for (i = 0, c = 0; i < ARRAY_SIZE(raidz_all_maths); i++) {
455 		curr_impl = (raidz_impl_ops_t *)raidz_all_maths[i];
456 
457 		if (curr_impl->init)
458 			curr_impl->init();
459 
460 		if (curr_impl->is_supported())
461 			raidz_supp_impl[c++] = (raidz_impl_ops_t *)curr_impl;
462 	}
463 	membar_producer();		/* complete raidz_supp_impl[] init */
464 	raidz_supp_impl_cnt = c;	/* number of supported impl */
465 
466 #if defined(_KERNEL)
467 	abd_t *pabd;
468 	zio_t *bench_zio = NULL;
469 	raidz_map_t *bench_rm = NULL;
470 	uint64_t bench_parity;
471 
472 	/* Fake a zio and run the benchmark on a warmed up buffer */
473 	bench_zio = kmem_zalloc(sizeof (zio_t), KM_SLEEP);
474 	bench_zio->io_offset = 0;
475 	bench_zio->io_size = BENCH_ZIO_SIZE; /* only data columns */
476 	bench_zio->io_abd = abd_alloc_linear(BENCH_ZIO_SIZE, B_TRUE);
477 	memset(abd_to_buf(bench_zio->io_abd), 0xAA, BENCH_ZIO_SIZE);
478 
479 	/* Benchmark parity generation methods */
480 	for (int fn = 0; fn < RAIDZ_GEN_NUM; fn++) {
481 		bench_parity = fn + 1;
482 		/* New raidz_map is needed for each generate_p/q/r */
483 		bench_rm = vdev_raidz_map_alloc(bench_zio, SPA_MINBLOCKSHIFT,
484 		    BENCH_D_COLS + bench_parity, bench_parity);
485 
486 		benchmark_raidz_impl(bench_rm, fn, benchmark_gen_impl);
487 
488 		vdev_raidz_map_free(bench_rm);
489 	}
490 
491 	/* Benchmark data reconstruction methods */
492 	bench_rm = vdev_raidz_map_alloc(bench_zio, SPA_MINBLOCKSHIFT,
493 	    BENCH_COLS, PARITY_PQR);
494 
495 	/* Ensure that fake parity blocks are initialized */
496 	for (c = 0; c < bench_rm->rm_row[0]->rr_firstdatacol; c++) {
497 		pabd = bench_rm->rm_row[0]->rr_col[c].rc_abd;
498 		memset(abd_to_buf(pabd), 0xAA, abd_get_size(pabd));
499 	}
500 
501 	for (int fn = 0; fn < RAIDZ_REC_NUM; fn++)
502 		benchmark_raidz_impl(bench_rm, fn, benchmark_rec_impl);
503 
504 	vdev_raidz_map_free(bench_rm);
505 
506 	/* cleanup the bench zio */
507 	abd_free(bench_zio->io_abd);
508 	kmem_free(bench_zio, sizeof (zio_t));
509 #else
510 	/*
511 	 * Skip the benchmark in user space to avoid impacting libzpool
512 	 * consumers (zdb, zhack, zinject, ztest).  The last implementation
513 	 * is assumed to be the fastest and used by default.
514 	 */
515 	memcpy(&vdev_raidz_fastest_impl,
516 	    raidz_supp_impl[raidz_supp_impl_cnt - 1],
517 	    sizeof (vdev_raidz_fastest_impl));
518 	strcpy(vdev_raidz_fastest_impl.name, "fastest");
519 #endif /* _KERNEL */
520 }
521 
522 void
vdev_raidz_math_init(void)523 vdev_raidz_math_init(void)
524 {
525 	/* Determine the fastest available implementation. */
526 	benchmark_raidz();
527 
528 #if defined(_KERNEL)
529 	/* Install kstats for all implementations */
530 	raidz_math_kstat = kstat_create("zfs", 0, "vdev_raidz_bench", "misc",
531 	    KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL);
532 	if (raidz_math_kstat != NULL) {
533 		raidz_math_kstat->ks_data = NULL;
534 		raidz_math_kstat->ks_ndata = UINT32_MAX;
535 		kstat_set_raw_ops(raidz_math_kstat,
536 		    raidz_math_kstat_headers,
537 		    raidz_math_kstat_data,
538 		    raidz_math_kstat_addr);
539 		kstat_install(raidz_math_kstat);
540 	}
541 #endif
542 
543 	/* Finish initialization */
544 	atomic_swap_32(&zfs_vdev_raidz_impl, user_sel_impl);
545 	raidz_math_initialized = B_TRUE;
546 }
547 
548 void
vdev_raidz_math_fini(void)549 vdev_raidz_math_fini(void)
550 {
551 	raidz_impl_ops_t const *curr_impl;
552 
553 #if defined(_KERNEL)
554 	if (raidz_math_kstat != NULL) {
555 		kstat_delete(raidz_math_kstat);
556 		raidz_math_kstat = NULL;
557 	}
558 #endif
559 
560 	for (int i = 0; i < ARRAY_SIZE(raidz_all_maths); i++) {
561 		curr_impl = raidz_all_maths[i];
562 		if (curr_impl->fini)
563 			curr_impl->fini();
564 	}
565 }
566 
567 static const struct {
568 	const char *name;
569 	uint32_t sel;
570 } math_impl_opts[] = {
571 		{ "cycle",	IMPL_CYCLE },
572 		{ "fastest",	IMPL_FASTEST },
573 		{ "original",	IMPL_ORIGINAL },
574 		{ "scalar",	IMPL_SCALAR }
575 };
576 
577 /*
578  * Function sets desired raidz implementation.
579  *
580  * If we are called before init(), user preference will be saved in
581  * user_sel_impl, and applied in later init() call. This occurs when module
582  * parameter is specified on module load. Otherwise, directly update
583  * zfs_vdev_raidz_impl.
584  *
585  * @val		Name of raidz implementation to use
586  * @param	Unused.
587  */
588 int
vdev_raidz_impl_set(const char * val)589 vdev_raidz_impl_set(const char *val)
590 {
591 	int err = -EINVAL;
592 	char req_name[RAIDZ_IMPL_NAME_MAX];
593 	uint32_t impl = RAIDZ_IMPL_READ(user_sel_impl);
594 	size_t i;
595 
596 	/* sanitize input */
597 	i = strnlen(val, RAIDZ_IMPL_NAME_MAX);
598 	if (i == 0 || i == RAIDZ_IMPL_NAME_MAX)
599 		return (err);
600 
601 	strlcpy(req_name, val, RAIDZ_IMPL_NAME_MAX);
602 	while (i > 0 && !!isspace(req_name[i-1]))
603 		i--;
604 	req_name[i] = '\0';
605 
606 	/* Check mandatory options */
607 	for (i = 0; i < ARRAY_SIZE(math_impl_opts); i++) {
608 		if (strcmp(req_name, math_impl_opts[i].name) == 0) {
609 			impl = math_impl_opts[i].sel;
610 			err = 0;
611 			break;
612 		}
613 	}
614 
615 	/* check all supported impl if init() was already called */
616 	if (err != 0 && raidz_math_initialized) {
617 		/* check all supported implementations */
618 		for (i = 0; i < raidz_supp_impl_cnt; i++) {
619 			if (strcmp(req_name, raidz_supp_impl[i]->name) == 0) {
620 				impl = i;
621 				err = 0;
622 				break;
623 			}
624 		}
625 	}
626 
627 	if (err == 0) {
628 		if (raidz_math_initialized)
629 			atomic_swap_32(&zfs_vdev_raidz_impl, impl);
630 		else
631 			atomic_swap_32(&user_sel_impl, impl);
632 	}
633 
634 	return (err);
635 }
636 
637 #if defined(_KERNEL)
638 
639 int
vdev_raidz_impl_get(char * buffer,size_t size)640 vdev_raidz_impl_get(char *buffer, size_t size)
641 {
642 	int i, cnt = 0;
643 	char *fmt;
644 	const uint32_t impl = RAIDZ_IMPL_READ(zfs_vdev_raidz_impl);
645 
646 	ASSERT(raidz_math_initialized);
647 
648 	/* list mandatory options */
649 	for (i = 0; i < ARRAY_SIZE(math_impl_opts) - 2; i++) {
650 		fmt = (impl == math_impl_opts[i].sel) ? "[%s] " : "%s ";
651 		cnt += kmem_scnprintf(buffer + cnt, size - cnt, fmt,
652 		    math_impl_opts[i].name);
653 	}
654 
655 	/* list all supported implementations */
656 	for (i = 0; i < raidz_supp_impl_cnt; i++) {
657 		fmt = (i == impl) ? "[%s] " : "%s ";
658 		cnt += kmem_scnprintf(buffer + cnt, size - cnt, fmt,
659 		    raidz_supp_impl[i]->name);
660 	}
661 
662 	return (cnt);
663 }
664 
665 #endif
666