1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (C) 2016 Gvozden Nešković. All rights reserved. 23 */ 24 25 #include <sys/zfs_context.h> 26 #include <sys/types.h> 27 #include <sys/zio.h> 28 #include <sys/debug.h> 29 #include <sys/zfs_debug.h> 30 #include <sys/vdev_raidz.h> 31 #include <sys/vdev_raidz_impl.h> 32 #include <sys/simd.h> 33 34 /* Opaque implementation with NULL methods to represent original methods */ 35 static const raidz_impl_ops_t vdev_raidz_original_impl = { 36 .name = "original", 37 .is_supported = raidz_will_scalar_work, 38 }; 39 40 /* RAIDZ parity op that contain the fastest methods */ 41 static raidz_impl_ops_t vdev_raidz_fastest_impl = { 42 .name = "fastest" 43 }; 44 45 /* All compiled in implementations */ 46 static const raidz_impl_ops_t *const raidz_all_maths[] = { 47 &vdev_raidz_original_impl, 48 &vdev_raidz_scalar_impl, 49 #if defined(__x86_64) && defined(HAVE_SSE2) /* only x86_64 for now */ 50 &vdev_raidz_sse2_impl, 51 #endif 52 #if defined(__x86_64) && defined(HAVE_SSSE3) /* only x86_64 for now */ 53 &vdev_raidz_ssse3_impl, 54 #endif 55 #if defined(__x86_64) && defined(HAVE_AVX2) /* only x86_64 for now */ 56 &vdev_raidz_avx2_impl, 57 #endif 58 #if defined(__x86_64) && defined(HAVE_AVX512F) /* only x86_64 for now */ 59 &vdev_raidz_avx512f_impl, 60 #endif 61 #if defined(__x86_64) && defined(HAVE_AVX512BW) /* only x86_64 for now */ 62 &vdev_raidz_avx512bw_impl, 63 #endif 64 #if defined(__aarch64__) && !defined(__FreeBSD__) 65 &vdev_raidz_aarch64_neon_impl, 66 &vdev_raidz_aarch64_neonx2_impl, 67 #endif 68 #if defined(__powerpc__) && defined(__altivec__) 69 &vdev_raidz_powerpc_altivec_impl, 70 #endif 71 }; 72 73 /* Indicate that benchmark has been completed */ 74 static boolean_t raidz_math_initialized = B_FALSE; 75 76 /* Select raidz implementation */ 77 #define IMPL_FASTEST (UINT32_MAX) 78 #define IMPL_CYCLE (UINT32_MAX - 1) 79 #define IMPL_ORIGINAL (0) 80 #define IMPL_SCALAR (1) 81 82 #define RAIDZ_IMPL_READ(i) (*(volatile uint32_t *) &(i)) 83 84 static uint32_t zfs_vdev_raidz_impl = IMPL_SCALAR; 85 static uint32_t user_sel_impl = IMPL_FASTEST; 86 87 /* Hold all supported implementations */ 88 static size_t raidz_supp_impl_cnt = 0; 89 static raidz_impl_ops_t *raidz_supp_impl[ARRAY_SIZE(raidz_all_maths)]; 90 91 #if defined(_KERNEL) 92 /* 93 * kstats values for supported implementations 94 * Values represent per disk throughput of 8 disk+parity raidz vdev [B/s] 95 */ 96 static raidz_impl_kstat_t raidz_impl_kstats[ARRAY_SIZE(raidz_all_maths) + 1]; 97 98 /* kstat for benchmarked implementations */ 99 static kstat_t *raidz_math_kstat = NULL; 100 #endif 101 102 /* 103 * Returns the RAIDZ operations for raidz_map() parity calculations. When 104 * a SIMD implementation is not allowed in the current context, then fallback 105 * to the fastest generic implementation. 106 */ 107 const raidz_impl_ops_t * 108 vdev_raidz_math_get_ops(void) 109 { 110 if (!kfpu_allowed()) 111 return (&vdev_raidz_scalar_impl); 112 113 raidz_impl_ops_t *ops = NULL; 114 const uint32_t impl = RAIDZ_IMPL_READ(zfs_vdev_raidz_impl); 115 116 switch (impl) { 117 case IMPL_FASTEST: 118 ASSERT(raidz_math_initialized); 119 ops = &vdev_raidz_fastest_impl; 120 break; 121 case IMPL_CYCLE: 122 /* Cycle through all supported implementations */ 123 ASSERT(raidz_math_initialized); 124 ASSERT3U(raidz_supp_impl_cnt, >, 0); 125 static size_t cycle_impl_idx = 0; 126 size_t idx = (++cycle_impl_idx) % raidz_supp_impl_cnt; 127 ops = raidz_supp_impl[idx]; 128 break; 129 case IMPL_ORIGINAL: 130 ops = (raidz_impl_ops_t *)&vdev_raidz_original_impl; 131 break; 132 case IMPL_SCALAR: 133 ops = (raidz_impl_ops_t *)&vdev_raidz_scalar_impl; 134 break; 135 default: 136 ASSERT3U(impl, <, raidz_supp_impl_cnt); 137 ASSERT3U(raidz_supp_impl_cnt, >, 0); 138 if (impl < ARRAY_SIZE(raidz_all_maths)) 139 ops = raidz_supp_impl[impl]; 140 break; 141 } 142 143 ASSERT3P(ops, !=, NULL); 144 145 return (ops); 146 } 147 148 /* 149 * Select parity generation method for raidz_map 150 */ 151 int 152 vdev_raidz_math_generate(raidz_map_t *rm, raidz_row_t *rr) 153 { 154 raidz_gen_f gen_parity = NULL; 155 156 switch (raidz_parity(rm)) { 157 case 1: 158 gen_parity = rm->rm_ops->gen[RAIDZ_GEN_P]; 159 break; 160 case 2: 161 gen_parity = rm->rm_ops->gen[RAIDZ_GEN_PQ]; 162 break; 163 case 3: 164 gen_parity = rm->rm_ops->gen[RAIDZ_GEN_PQR]; 165 break; 166 default: 167 gen_parity = NULL; 168 cmn_err(CE_PANIC, "invalid RAID-Z configuration %llu", 169 (u_longlong_t)raidz_parity(rm)); 170 break; 171 } 172 173 /* if method is NULL execute the original implementation */ 174 if (gen_parity == NULL) 175 return (RAIDZ_ORIGINAL_IMPL); 176 177 gen_parity(rr); 178 179 return (0); 180 } 181 182 static raidz_rec_f 183 reconstruct_fun_p_sel(raidz_map_t *rm, const int *parity_valid, 184 const int nbaddata) 185 { 186 if (nbaddata == 1 && parity_valid[CODE_P]) { 187 return (rm->rm_ops->rec[RAIDZ_REC_P]); 188 } 189 return ((raidz_rec_f) NULL); 190 } 191 192 static raidz_rec_f 193 reconstruct_fun_pq_sel(raidz_map_t *rm, const int *parity_valid, 194 const int nbaddata) 195 { 196 if (nbaddata == 1) { 197 if (parity_valid[CODE_P]) { 198 return (rm->rm_ops->rec[RAIDZ_REC_P]); 199 } else if (parity_valid[CODE_Q]) { 200 return (rm->rm_ops->rec[RAIDZ_REC_Q]); 201 } 202 } else if (nbaddata == 2 && 203 parity_valid[CODE_P] && parity_valid[CODE_Q]) { 204 return (rm->rm_ops->rec[RAIDZ_REC_PQ]); 205 } 206 return ((raidz_rec_f) NULL); 207 } 208 209 static raidz_rec_f 210 reconstruct_fun_pqr_sel(raidz_map_t *rm, const int *parity_valid, 211 const int nbaddata) 212 { 213 if (nbaddata == 1) { 214 if (parity_valid[CODE_P]) { 215 return (rm->rm_ops->rec[RAIDZ_REC_P]); 216 } else if (parity_valid[CODE_Q]) { 217 return (rm->rm_ops->rec[RAIDZ_REC_Q]); 218 } else if (parity_valid[CODE_R]) { 219 return (rm->rm_ops->rec[RAIDZ_REC_R]); 220 } 221 } else if (nbaddata == 2) { 222 if (parity_valid[CODE_P] && parity_valid[CODE_Q]) { 223 return (rm->rm_ops->rec[RAIDZ_REC_PQ]); 224 } else if (parity_valid[CODE_P] && parity_valid[CODE_R]) { 225 return (rm->rm_ops->rec[RAIDZ_REC_PR]); 226 } else if (parity_valid[CODE_Q] && parity_valid[CODE_R]) { 227 return (rm->rm_ops->rec[RAIDZ_REC_QR]); 228 } 229 } else if (nbaddata == 3 && 230 parity_valid[CODE_P] && parity_valid[CODE_Q] && 231 parity_valid[CODE_R]) { 232 return (rm->rm_ops->rec[RAIDZ_REC_PQR]); 233 } 234 return ((raidz_rec_f) NULL); 235 } 236 237 /* 238 * Select data reconstruction method for raidz_map 239 * @parity_valid - Parity validity flag 240 * @dt - Failed data index array 241 * @nbaddata - Number of failed data columns 242 */ 243 int 244 vdev_raidz_math_reconstruct(raidz_map_t *rm, raidz_row_t *rr, 245 const int *parity_valid, const int *dt, const int nbaddata) 246 { 247 raidz_rec_f rec_fn = NULL; 248 249 switch (raidz_parity(rm)) { 250 case PARITY_P: 251 rec_fn = reconstruct_fun_p_sel(rm, parity_valid, nbaddata); 252 break; 253 case PARITY_PQ: 254 rec_fn = reconstruct_fun_pq_sel(rm, parity_valid, nbaddata); 255 break; 256 case PARITY_PQR: 257 rec_fn = reconstruct_fun_pqr_sel(rm, parity_valid, nbaddata); 258 break; 259 default: 260 cmn_err(CE_PANIC, "invalid RAID-Z configuration %llu", 261 (u_longlong_t)raidz_parity(rm)); 262 break; 263 } 264 265 if (rec_fn == NULL) 266 return (RAIDZ_ORIGINAL_IMPL); 267 else 268 return (rec_fn(rr, dt)); 269 } 270 271 const char *const raidz_gen_name[] = { 272 "gen_p", "gen_pq", "gen_pqr" 273 }; 274 const char *const raidz_rec_name[] = { 275 "rec_p", "rec_q", "rec_r", 276 "rec_pq", "rec_pr", "rec_qr", "rec_pqr" 277 }; 278 279 #if defined(_KERNEL) 280 281 #define RAIDZ_KSTAT_LINE_LEN (17 + 10*12 + 1) 282 283 static int 284 raidz_math_kstat_headers(char *buf, size_t size) 285 { 286 ASSERT3U(size, >=, RAIDZ_KSTAT_LINE_LEN); 287 288 ssize_t off = kmem_scnprintf(buf, size, "%-17s", "implementation"); 289 290 for (int i = 0; i < ARRAY_SIZE(raidz_gen_name); i++) 291 off += kmem_scnprintf(buf + off, size - off, "%-16s", 292 raidz_gen_name[i]); 293 294 for (int i = 0; i < ARRAY_SIZE(raidz_rec_name); i++) 295 off += kmem_scnprintf(buf + off, size - off, "%-16s", 296 raidz_rec_name[i]); 297 298 (void) kmem_scnprintf(buf + off, size - off, "\n"); 299 300 return (0); 301 } 302 303 static int 304 raidz_math_kstat_data(char *buf, size_t size, void *data) 305 { 306 raidz_impl_kstat_t *fstat = &raidz_impl_kstats[raidz_supp_impl_cnt]; 307 raidz_impl_kstat_t *cstat = (raidz_impl_kstat_t *)data; 308 ssize_t off = 0; 309 int i; 310 311 ASSERT3U(size, >=, RAIDZ_KSTAT_LINE_LEN); 312 313 if (cstat == fstat) { 314 off += kmem_scnprintf(buf + off, size - off, "%-17s", 315 "fastest"); 316 317 for (i = 0; i < ARRAY_SIZE(raidz_gen_name); i++) { 318 int id = fstat->gen[i]; 319 off += kmem_scnprintf(buf + off, size - off, "%-16s", 320 raidz_supp_impl[id]->name); 321 } 322 for (i = 0; i < ARRAY_SIZE(raidz_rec_name); i++) { 323 int id = fstat->rec[i]; 324 off += kmem_scnprintf(buf + off, size - off, "%-16s", 325 raidz_supp_impl[id]->name); 326 } 327 } else { 328 ptrdiff_t id = cstat - raidz_impl_kstats; 329 330 off += kmem_scnprintf(buf + off, size - off, "%-17s", 331 raidz_supp_impl[id]->name); 332 333 for (i = 0; i < ARRAY_SIZE(raidz_gen_name); i++) 334 off += kmem_scnprintf(buf + off, size - off, "%-16llu", 335 (u_longlong_t)cstat->gen[i]); 336 337 for (i = 0; i < ARRAY_SIZE(raidz_rec_name); i++) 338 off += kmem_scnprintf(buf + off, size - off, "%-16llu", 339 (u_longlong_t)cstat->rec[i]); 340 } 341 342 (void) kmem_scnprintf(buf + off, size - off, "\n"); 343 344 return (0); 345 } 346 347 static void * 348 raidz_math_kstat_addr(kstat_t *ksp, loff_t n) 349 { 350 if (n <= raidz_supp_impl_cnt) 351 ksp->ks_private = (void *) (raidz_impl_kstats + n); 352 else 353 ksp->ks_private = NULL; 354 355 return (ksp->ks_private); 356 } 357 358 #define BENCH_D_COLS (8ULL) 359 #define BENCH_COLS (BENCH_D_COLS + PARITY_PQR) 360 #define BENCH_ZIO_SIZE (1ULL << SPA_OLD_MAXBLOCKSHIFT) /* 128 kiB */ 361 #define BENCH_NS MSEC2NSEC(1) /* 1ms */ 362 363 typedef void (*benchmark_fn)(raidz_map_t *rm, const int fn); 364 365 static void 366 benchmark_gen_impl(raidz_map_t *rm, const int fn) 367 { 368 (void) fn; 369 vdev_raidz_generate_parity(rm); 370 } 371 372 static void 373 benchmark_rec_impl(raidz_map_t *rm, const int fn) 374 { 375 static const int rec_tgt[7][3] = { 376 {1, 2, 3}, /* rec_p: bad QR & D[0] */ 377 {0, 2, 3}, /* rec_q: bad PR & D[0] */ 378 {0, 1, 3}, /* rec_r: bad PQ & D[0] */ 379 {2, 3, 4}, /* rec_pq: bad R & D[0][1] */ 380 {1, 3, 4}, /* rec_pr: bad Q & D[0][1] */ 381 {0, 3, 4}, /* rec_qr: bad P & D[0][1] */ 382 {3, 4, 5} /* rec_pqr: bad & D[0][1][2] */ 383 }; 384 385 vdev_raidz_reconstruct(rm, rec_tgt[fn], 3); 386 } 387 388 /* 389 * Benchmarking of all supported implementations (raidz_supp_impl_cnt) 390 * is performed by setting the rm_ops pointer and calling the top level 391 * generate/reconstruct methods of bench_rm. 392 */ 393 static void 394 benchmark_raidz_impl(raidz_map_t *bench_rm, const int fn, benchmark_fn bench_fn) 395 { 396 uint64_t run_cnt, speed, best_speed = 0; 397 hrtime_t t_start, t_diff; 398 raidz_impl_ops_t *curr_impl; 399 raidz_impl_kstat_t *fstat = &raidz_impl_kstats[raidz_supp_impl_cnt]; 400 int impl, i; 401 402 for (impl = 0; impl < raidz_supp_impl_cnt; impl++) { 403 /* set an implementation to benchmark */ 404 curr_impl = raidz_supp_impl[impl]; 405 bench_rm->rm_ops = curr_impl; 406 407 run_cnt = 0; 408 t_start = gethrtime(); 409 410 do { 411 for (i = 0; i < 5; i++, run_cnt++) 412 bench_fn(bench_rm, fn); 413 414 t_diff = gethrtime() - t_start; 415 } while (t_diff < BENCH_NS); 416 417 speed = run_cnt * BENCH_ZIO_SIZE * NANOSEC; 418 speed /= (t_diff * BENCH_COLS); 419 420 if (bench_fn == benchmark_gen_impl) 421 raidz_impl_kstats[impl].gen[fn] = speed; 422 else 423 raidz_impl_kstats[impl].rec[fn] = speed; 424 425 /* Update fastest implementation method */ 426 if (speed > best_speed) { 427 best_speed = speed; 428 429 if (bench_fn == benchmark_gen_impl) { 430 fstat->gen[fn] = impl; 431 vdev_raidz_fastest_impl.gen[fn] = 432 curr_impl->gen[fn]; 433 } else { 434 fstat->rec[fn] = impl; 435 vdev_raidz_fastest_impl.rec[fn] = 436 curr_impl->rec[fn]; 437 } 438 } 439 } 440 } 441 #endif 442 443 /* 444 * Initialize and benchmark all supported implementations. 445 */ 446 static void 447 benchmark_raidz(void) 448 { 449 raidz_impl_ops_t *curr_impl; 450 int i, c; 451 452 /* Move supported impl into raidz_supp_impl */ 453 for (i = 0, c = 0; i < ARRAY_SIZE(raidz_all_maths); i++) { 454 curr_impl = (raidz_impl_ops_t *)raidz_all_maths[i]; 455 456 if (curr_impl->init) 457 curr_impl->init(); 458 459 if (curr_impl->is_supported()) 460 raidz_supp_impl[c++] = (raidz_impl_ops_t *)curr_impl; 461 } 462 membar_producer(); /* complete raidz_supp_impl[] init */ 463 raidz_supp_impl_cnt = c; /* number of supported impl */ 464 465 #if defined(_KERNEL) 466 abd_t *pabd; 467 zio_t *bench_zio = NULL; 468 raidz_map_t *bench_rm = NULL; 469 uint64_t bench_parity; 470 471 /* Fake a zio and run the benchmark on a warmed up buffer */ 472 bench_zio = kmem_zalloc(sizeof (zio_t), KM_SLEEP); 473 bench_zio->io_offset = 0; 474 bench_zio->io_size = BENCH_ZIO_SIZE; /* only data columns */ 475 bench_zio->io_abd = abd_alloc_linear(BENCH_ZIO_SIZE, B_TRUE); 476 memset(abd_to_buf(bench_zio->io_abd), 0xAA, BENCH_ZIO_SIZE); 477 478 /* Benchmark parity generation methods */ 479 for (int fn = 0; fn < RAIDZ_GEN_NUM; fn++) { 480 bench_parity = fn + 1; 481 /* New raidz_map is needed for each generate_p/q/r */ 482 bench_rm = vdev_raidz_map_alloc(bench_zio, SPA_MINBLOCKSHIFT, 483 BENCH_D_COLS + bench_parity, bench_parity); 484 485 benchmark_raidz_impl(bench_rm, fn, benchmark_gen_impl); 486 487 vdev_raidz_map_free(bench_rm); 488 } 489 490 /* Benchmark data reconstruction methods */ 491 bench_rm = vdev_raidz_map_alloc(bench_zio, SPA_MINBLOCKSHIFT, 492 BENCH_COLS, PARITY_PQR); 493 494 /* Ensure that fake parity blocks are initialized */ 495 for (c = 0; c < bench_rm->rm_row[0]->rr_firstdatacol; c++) { 496 pabd = bench_rm->rm_row[0]->rr_col[c].rc_abd; 497 memset(abd_to_buf(pabd), 0xAA, abd_get_size(pabd)); 498 } 499 500 for (int fn = 0; fn < RAIDZ_REC_NUM; fn++) 501 benchmark_raidz_impl(bench_rm, fn, benchmark_rec_impl); 502 503 vdev_raidz_map_free(bench_rm); 504 505 /* cleanup the bench zio */ 506 abd_free(bench_zio->io_abd); 507 kmem_free(bench_zio, sizeof (zio_t)); 508 #else 509 /* 510 * Skip the benchmark in user space to avoid impacting libzpool 511 * consumers (zdb, zhack, zinject, ztest). The last implementation 512 * is assumed to be the fastest and used by default. 513 */ 514 memcpy(&vdev_raidz_fastest_impl, 515 raidz_supp_impl[raidz_supp_impl_cnt - 1], 516 sizeof (vdev_raidz_fastest_impl)); 517 strcpy(vdev_raidz_fastest_impl.name, "fastest"); 518 #endif /* _KERNEL */ 519 } 520 521 void 522 vdev_raidz_math_init(void) 523 { 524 /* Determine the fastest available implementation. */ 525 benchmark_raidz(); 526 527 #if defined(_KERNEL) 528 /* Install kstats for all implementations */ 529 raidz_math_kstat = kstat_create("zfs", 0, "vdev_raidz_bench", "misc", 530 KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL); 531 if (raidz_math_kstat != NULL) { 532 raidz_math_kstat->ks_data = NULL; 533 raidz_math_kstat->ks_ndata = UINT32_MAX; 534 kstat_set_raw_ops(raidz_math_kstat, 535 raidz_math_kstat_headers, 536 raidz_math_kstat_data, 537 raidz_math_kstat_addr); 538 kstat_install(raidz_math_kstat); 539 } 540 #endif 541 542 /* Finish initialization */ 543 atomic_swap_32(&zfs_vdev_raidz_impl, user_sel_impl); 544 raidz_math_initialized = B_TRUE; 545 } 546 547 void 548 vdev_raidz_math_fini(void) 549 { 550 raidz_impl_ops_t const *curr_impl; 551 552 #if defined(_KERNEL) 553 if (raidz_math_kstat != NULL) { 554 kstat_delete(raidz_math_kstat); 555 raidz_math_kstat = NULL; 556 } 557 #endif 558 559 for (int i = 0; i < ARRAY_SIZE(raidz_all_maths); i++) { 560 curr_impl = raidz_all_maths[i]; 561 if (curr_impl->fini) 562 curr_impl->fini(); 563 } 564 } 565 566 static const struct { 567 const char *name; 568 uint32_t sel; 569 } math_impl_opts[] = { 570 { "cycle", IMPL_CYCLE }, 571 { "fastest", IMPL_FASTEST }, 572 { "original", IMPL_ORIGINAL }, 573 { "scalar", IMPL_SCALAR } 574 }; 575 576 /* 577 * Function sets desired raidz implementation. 578 * 579 * If we are called before init(), user preference will be saved in 580 * user_sel_impl, and applied in later init() call. This occurs when module 581 * parameter is specified on module load. Otherwise, directly update 582 * zfs_vdev_raidz_impl. 583 * 584 * @val Name of raidz implementation to use 585 * @param Unused. 586 */ 587 int 588 vdev_raidz_impl_set(const char *val) 589 { 590 int err = -EINVAL; 591 char req_name[RAIDZ_IMPL_NAME_MAX]; 592 uint32_t impl = RAIDZ_IMPL_READ(user_sel_impl); 593 size_t i; 594 595 /* sanitize input */ 596 i = strnlen(val, RAIDZ_IMPL_NAME_MAX); 597 if (i == 0 || i == RAIDZ_IMPL_NAME_MAX) 598 return (err); 599 600 strlcpy(req_name, val, RAIDZ_IMPL_NAME_MAX); 601 while (i > 0 && !!isspace(req_name[i-1])) 602 i--; 603 req_name[i] = '\0'; 604 605 /* Check mandatory options */ 606 for (i = 0; i < ARRAY_SIZE(math_impl_opts); i++) { 607 if (strcmp(req_name, math_impl_opts[i].name) == 0) { 608 impl = math_impl_opts[i].sel; 609 err = 0; 610 break; 611 } 612 } 613 614 /* check all supported impl if init() was already called */ 615 if (err != 0 && raidz_math_initialized) { 616 /* check all supported implementations */ 617 for (i = 0; i < raidz_supp_impl_cnt; i++) { 618 if (strcmp(req_name, raidz_supp_impl[i]->name) == 0) { 619 impl = i; 620 err = 0; 621 break; 622 } 623 } 624 } 625 626 if (err == 0) { 627 if (raidz_math_initialized) 628 atomic_swap_32(&zfs_vdev_raidz_impl, impl); 629 else 630 atomic_swap_32(&user_sel_impl, impl); 631 } 632 633 return (err); 634 } 635 636 #if defined(_KERNEL) && defined(__linux__) 637 638 static int 639 zfs_vdev_raidz_impl_set(const char *val, zfs_kernel_param_t *kp) 640 { 641 return (vdev_raidz_impl_set(val)); 642 } 643 644 static int 645 zfs_vdev_raidz_impl_get(char *buffer, zfs_kernel_param_t *kp) 646 { 647 int i, cnt = 0; 648 char *fmt; 649 const uint32_t impl = RAIDZ_IMPL_READ(zfs_vdev_raidz_impl); 650 651 ASSERT(raidz_math_initialized); 652 653 /* list mandatory options */ 654 for (i = 0; i < ARRAY_SIZE(math_impl_opts) - 2; i++) { 655 fmt = (impl == math_impl_opts[i].sel) ? "[%s] " : "%s "; 656 cnt += kmem_scnprintf(buffer + cnt, PAGE_SIZE - cnt, fmt, 657 math_impl_opts[i].name); 658 } 659 660 /* list all supported implementations */ 661 for (i = 0; i < raidz_supp_impl_cnt; i++) { 662 fmt = (i == impl) ? "[%s] " : "%s "; 663 cnt += kmem_scnprintf(buffer + cnt, PAGE_SIZE - cnt, fmt, 664 raidz_supp_impl[i]->name); 665 } 666 667 return (cnt); 668 } 669 670 module_param_call(zfs_vdev_raidz_impl, zfs_vdev_raidz_impl_set, 671 zfs_vdev_raidz_impl_get, NULL, 0644); 672 MODULE_PARM_DESC(zfs_vdev_raidz_impl, "Select raidz implementation."); 673 #endif 674