1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/zfs_context.h> 29 #include <sys/spa.h> 30 #include <sys/vdev_impl.h> 31 #include <sys/zio.h> 32 #include <sys/zio_checksum.h> 33 #include <sys/fs/zfs.h> 34 #include <sys/fm/fs/zfs.h> 35 36 /* 37 * Virtual device vector for RAID-Z. 38 */ 39 40 /* 41 * We currently allow up to two-way replication (i.e. single-fault 42 * reconstruction) models in RAID-Z vdevs. The blocks in such vdevs 43 * must all be multiples of two times the leaf vdev blocksize. 44 */ 45 #define VDEV_RAIDZ_ALIGN 2ULL 46 47 typedef struct raidz_col { 48 uint64_t rc_col; 49 uint64_t rc_offset; 50 uint64_t rc_size; 51 void *rc_data; 52 int rc_error; 53 short rc_tried; 54 short rc_skipped; 55 } raidz_col_t; 56 57 typedef struct raidz_map { 58 uint64_t rm_cols; 59 uint64_t rm_bigcols; 60 uint64_t rm_asize; 61 int rm_missing_child; 62 int rm_firstdatacol; 63 raidz_col_t rm_col[1]; 64 } raidz_map_t; 65 66 static raidz_map_t * 67 vdev_raidz_map_alloc(zio_t *zio, uint64_t unit_shift, uint64_t dcols) 68 { 69 raidz_map_t *rm; 70 uint64_t b = zio->io_offset >> unit_shift; 71 uint64_t s = zio->io_size >> unit_shift; 72 uint64_t f = b % dcols; 73 uint64_t o = (b / dcols) << unit_shift; 74 uint64_t q, r, c, bc, col, acols, coff; 75 int firstdatacol; 76 77 q = s / (dcols - 1); 78 r = s - q * (dcols - 1); 79 bc = r + !!r; 80 firstdatacol = 1; 81 82 acols = (q == 0 ? bc : dcols); 83 84 rm = kmem_alloc(offsetof(raidz_map_t, rm_col[acols]), KM_SLEEP); 85 86 rm->rm_cols = acols; 87 rm->rm_bigcols = bc; 88 rm->rm_asize = 0; 89 rm->rm_missing_child = -1; 90 rm->rm_firstdatacol = firstdatacol; 91 92 for (c = 0; c < acols; c++) { 93 col = f + c; 94 coff = o; 95 if (col >= dcols) { 96 col -= dcols; 97 coff += 1ULL << unit_shift; 98 } 99 rm->rm_col[c].rc_col = col; 100 rm->rm_col[c].rc_offset = coff; 101 rm->rm_col[c].rc_size = (q + (c < bc)) << unit_shift; 102 rm->rm_col[c].rc_data = NULL; 103 rm->rm_col[c].rc_error = 0; 104 rm->rm_col[c].rc_tried = 0; 105 rm->rm_col[c].rc_skipped = 0; 106 rm->rm_asize += rm->rm_col[c].rc_size; 107 } 108 109 rm->rm_asize = P2ROUNDUP(rm->rm_asize, VDEV_RAIDZ_ALIGN << unit_shift); 110 111 for (c = 0; c < rm->rm_firstdatacol; c++) 112 rm->rm_col[c].rc_data = zio_buf_alloc(rm->rm_col[c].rc_size); 113 114 rm->rm_col[c].rc_data = zio->io_data; 115 116 for (c = c + 1; c < acols; c++) 117 rm->rm_col[c].rc_data = (char *)rm->rm_col[c - 1].rc_data + 118 rm->rm_col[c - 1].rc_size; 119 120 /* 121 * To prevent hot parity disks, switch the parity and data 122 * columns every 1MB. 123 */ 124 ASSERT(rm->rm_cols >= 2); 125 ASSERT(rm->rm_col[0].rc_size == rm->rm_col[1].rc_size); 126 127 if (zio->io_offset & (1ULL << 20)) { 128 col = rm->rm_col[0].rc_col; 129 o = rm->rm_col[0].rc_offset; 130 rm->rm_col[0].rc_col = rm->rm_col[1].rc_col; 131 rm->rm_col[0].rc_offset = rm->rm_col[1].rc_offset; 132 rm->rm_col[1].rc_col = col; 133 rm->rm_col[1].rc_offset = o; 134 } 135 136 zio->io_vsd = rm; 137 return (rm); 138 } 139 140 static void 141 vdev_raidz_map_free(zio_t *zio) 142 { 143 raidz_map_t *rm = zio->io_vsd; 144 int c; 145 146 for (c = 0; c < rm->rm_firstdatacol; c++) 147 zio_buf_free(rm->rm_col[c].rc_data, rm->rm_col[c].rc_size); 148 149 kmem_free(rm, offsetof(raidz_map_t, rm_col[rm->rm_cols])); 150 zio->io_vsd = NULL; 151 } 152 153 static void 154 vdev_raidz_reconstruct(raidz_map_t *rm, int x) 155 { 156 uint64_t *dst, *src, count, xsize, csize; 157 int i, c; 158 159 for (c = 0; c < rm->rm_cols; c++) { 160 if (c == x) 161 continue; 162 src = rm->rm_col[c].rc_data; 163 dst = rm->rm_col[x].rc_data; 164 csize = rm->rm_col[c].rc_size; 165 xsize = rm->rm_col[x].rc_size; 166 count = MIN(csize, xsize) / sizeof (uint64_t); 167 if (c == !x) { 168 /* 169 * The initial copy happens at either c == 0 or c == 1. 170 * Both of these columns are 'big' columns, so we'll 171 * definitely initialize all of column x. 172 */ 173 ASSERT3U(xsize, <=, csize); 174 for (i = 0; i < count; i++) 175 *dst++ = *src++; 176 } else { 177 for (i = 0; i < count; i++) 178 *dst++ ^= *src++; 179 } 180 } 181 } 182 183 static int 184 vdev_raidz_open(vdev_t *vd, uint64_t *asize, uint64_t *ashift) 185 { 186 vdev_t *cvd; 187 int c, error; 188 int lasterror = 0; 189 int numerrors = 0; 190 191 /* 192 * XXX -- minimum children should be raid-type-specific 193 */ 194 if (vd->vdev_children < 2) { 195 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; 196 return (EINVAL); 197 } 198 199 for (c = 0; c < vd->vdev_children; c++) { 200 cvd = vd->vdev_child[c]; 201 202 if ((error = vdev_open(cvd)) != 0) { 203 lasterror = error; 204 numerrors++; 205 continue; 206 } 207 208 *asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1; 209 *ashift = MAX(*ashift, cvd->vdev_ashift); 210 } 211 212 *asize *= vd->vdev_children; 213 214 if (numerrors > 1) { 215 vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS; 216 return (lasterror); 217 } 218 219 return (0); 220 } 221 222 static void 223 vdev_raidz_close(vdev_t *vd) 224 { 225 int c; 226 227 for (c = 0; c < vd->vdev_children; c++) 228 vdev_close(vd->vdev_child[c]); 229 } 230 231 static uint64_t 232 vdev_raidz_asize(vdev_t *vd, uint64_t psize) 233 { 234 uint64_t asize; 235 uint64_t ashift = vd->vdev_top->vdev_ashift; 236 uint64_t cols = vd->vdev_children; 237 238 asize = ((psize - 1) >> ashift) + 1; 239 asize += (asize + cols - 2) / (cols - 1); 240 asize = P2ROUNDUP(asize, VDEV_RAIDZ_ALIGN) << ashift; 241 242 return (asize); 243 } 244 245 static void 246 vdev_raidz_child_done(zio_t *zio) 247 { 248 raidz_col_t *rc = zio->io_private; 249 250 rc->rc_error = zio->io_error; 251 rc->rc_tried = 1; 252 rc->rc_skipped = 0; 253 } 254 255 static void 256 vdev_raidz_repair_done(zio_t *zio) 257 { 258 ASSERT(zio->io_private == zio->io_parent); 259 vdev_raidz_map_free(zio->io_private); 260 } 261 262 static void 263 vdev_raidz_io_start(zio_t *zio) 264 { 265 vdev_t *vd = zio->io_vd; 266 vdev_t *tvd = vd->vdev_top; 267 vdev_t *cvd; 268 blkptr_t *bp = zio->io_bp; 269 raidz_map_t *rm; 270 raidz_col_t *rc; 271 int c; 272 273 rm = vdev_raidz_map_alloc(zio, tvd->vdev_ashift, vd->vdev_children); 274 275 ASSERT3U(rm->rm_asize, ==, vdev_psize_to_asize(vd, zio->io_size)); 276 277 if (zio->io_type == ZIO_TYPE_WRITE) { 278 279 /* 280 * Generate RAID parity in virtual column 0. 281 */ 282 vdev_raidz_reconstruct(rm, 0); 283 284 for (c = 0; c < rm->rm_cols; c++) { 285 rc = &rm->rm_col[c]; 286 cvd = vd->vdev_child[rc->rc_col]; 287 zio_nowait(zio_vdev_child_io(zio, NULL, cvd, 288 rc->rc_offset, rc->rc_data, rc->rc_size, 289 zio->io_type, zio->io_priority, ZIO_FLAG_CANFAIL, 290 vdev_raidz_child_done, rc)); 291 } 292 zio_wait_children_done(zio); 293 return; 294 } 295 296 ASSERT(zio->io_type == ZIO_TYPE_READ); 297 298 for (c = rm->rm_cols - 1; c >= 0; c--) { 299 rc = &rm->rm_col[c]; 300 cvd = vd->vdev_child[rc->rc_col]; 301 if (vdev_is_dead(cvd)) { 302 rm->rm_missing_child = c; 303 rc->rc_error = ENXIO; 304 rc->rc_tried = 1; /* don't even try */ 305 rc->rc_skipped = 1; 306 continue; 307 } 308 if (vdev_dtl_contains(&cvd->vdev_dtl_map, bp->blk_birth, 1)) { 309 rm->rm_missing_child = c; 310 rc->rc_error = ESTALE; 311 rc->rc_skipped = 1; 312 continue; 313 } 314 if (c >= rm->rm_firstdatacol || rm->rm_missing_child != -1 || 315 (zio->io_flags & ZIO_FLAG_SCRUB)) { 316 zio_nowait(zio_vdev_child_io(zio, NULL, cvd, 317 rc->rc_offset, rc->rc_data, rc->rc_size, 318 zio->io_type, zio->io_priority, ZIO_FLAG_CANFAIL, 319 vdev_raidz_child_done, rc)); 320 } 321 } 322 323 zio_wait_children_done(zio); 324 } 325 326 /* 327 * Report a checksum error for a child of a RAID-Z device. 328 */ 329 static void 330 raidz_checksum_error(zio_t *zio, raidz_col_t *rc) 331 { 332 vdev_t *vd = zio->io_vd->vdev_child[rc->rc_col]; 333 dprintf_bp(zio->io_bp, "imputed checksum error on %s: ", 334 vdev_description(vd)); 335 336 if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 337 mutex_enter(&vd->vdev_stat_lock); 338 vd->vdev_stat.vs_checksum_errors++; 339 mutex_exit(&vd->vdev_stat_lock); 340 } 341 342 if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) 343 zfs_ereport_post(FM_EREPORT_ZFS_CHECKSUM, 344 zio->io_spa, vd, zio, rc->rc_offset, rc->rc_size); 345 } 346 347 348 static void 349 vdev_raidz_io_done(zio_t *zio) 350 { 351 vdev_t *vd = zio->io_vd; 352 vdev_t *cvd; 353 raidz_map_t *rm = zio->io_vsd; 354 raidz_col_t *rc; 355 int unexpected_errors = 0; 356 int c; 357 358 ASSERT(zio->io_bp != NULL); /* XXX need to add code to enforce this */ 359 360 zio->io_error = 0; 361 zio->io_numerrors = 0; 362 363 for (c = 0; c < rm->rm_cols; c++) { 364 rc = &rm->rm_col[c]; 365 366 /* 367 * We preserve any EIOs because those may be worth retrying; 368 * whereas ECKSUM and ENXIO are more likely to be persistent. 369 */ 370 if (rc->rc_error) { 371 if (zio->io_error != EIO) 372 zio->io_error = rc->rc_error; 373 if (!rc->rc_skipped) 374 unexpected_errors++; 375 zio->io_numerrors++; 376 } 377 } 378 379 if (zio->io_type == ZIO_TYPE_WRITE) { 380 /* 381 * If this is not a failfast write, and we were able to 382 * write enough columns to reconstruct the data, good enough. 383 */ 384 /* XXPOLICY */ 385 if (zio->io_numerrors <= rm->rm_firstdatacol && 386 !(zio->io_flags & ZIO_FLAG_FAILFAST)) 387 zio->io_error = 0; 388 389 vdev_raidz_map_free(zio); 390 zio_next_stage(zio); 391 return; 392 } 393 394 ASSERT(zio->io_type == ZIO_TYPE_READ); 395 396 /* 397 * If there were no I/O errors, and the data checksums correctly, 398 * the read is complete. 399 */ 400 /* XXPOLICY */ 401 if (zio->io_numerrors == 0 && zio_checksum_error(zio) == 0) { 402 ASSERT(unexpected_errors == 0); 403 ASSERT(zio->io_error == 0); 404 405 /* 406 * We know the data's good. If we read the parity, 407 * verify that it's good as well. If not, fix it. 408 */ 409 for (c = 0; c < rm->rm_firstdatacol; c++) { 410 void *orig; 411 rc = &rm->rm_col[c]; 412 if (!rc->rc_tried) 413 continue; 414 orig = zio_buf_alloc(rc->rc_size); 415 bcopy(rc->rc_data, orig, rc->rc_size); 416 vdev_raidz_reconstruct(rm, c); 417 if (bcmp(orig, rc->rc_data, rc->rc_size) != 0) { 418 raidz_checksum_error(zio, rc); 419 rc->rc_error = ECKSUM; 420 unexpected_errors++; 421 } 422 zio_buf_free(orig, rc->rc_size); 423 } 424 goto done; 425 } 426 427 /* 428 * If there was exactly one I/O error, it's the one we expected, 429 * and the reconstructed data checksums, the read is complete. 430 * This happens when one child is offline and vdev_fault_assess() 431 * knows it, or when one child has stale data and the DTL knows it. 432 */ 433 if (zio->io_numerrors == 1 && (c = rm->rm_missing_child) != -1) { 434 rc = &rm->rm_col[c]; 435 ASSERT(unexpected_errors == 0); 436 ASSERT(rc->rc_error == ENXIO || rc->rc_error == ESTALE); 437 vdev_raidz_reconstruct(rm, c); 438 if (zio_checksum_error(zio) == 0) { 439 zio->io_error = 0; 440 goto done; 441 } 442 } 443 444 /* 445 * This isn't a typical error -- either we got a read error or 446 * more than one child claimed a problem. Read every block we 447 * haven't already so we can try combinatorial reconstruction. 448 */ 449 unexpected_errors = 1; 450 rm->rm_missing_child = -1; 451 452 for (c = 0; c < rm->rm_cols; c++) 453 if (!rm->rm_col[c].rc_tried) 454 break; 455 456 if (c != rm->rm_cols) { 457 zio->io_error = 0; 458 zio_vdev_io_redone(zio); 459 for (c = 0; c < rm->rm_cols; c++) { 460 rc = &rm->rm_col[c]; 461 if (rc->rc_tried) 462 continue; 463 zio_nowait(zio_vdev_child_io(zio, NULL, 464 vd->vdev_child[rc->rc_col], 465 rc->rc_offset, rc->rc_data, rc->rc_size, 466 zio->io_type, zio->io_priority, ZIO_FLAG_CANFAIL, 467 vdev_raidz_child_done, rc)); 468 } 469 zio_wait_children_done(zio); 470 return; 471 } 472 473 /* 474 * If there were more errors than parity disks, give up. 475 */ 476 if (zio->io_numerrors > rm->rm_firstdatacol) { 477 ASSERT(zio->io_error != 0); 478 goto done; 479 } 480 481 /* 482 * The number of I/O errors is correctable. Correct them here. 483 */ 484 ASSERT(zio->io_numerrors <= rm->rm_firstdatacol); 485 for (c = 0; c < rm->rm_cols; c++) { 486 rc = &rm->rm_col[c]; 487 ASSERT(rc->rc_tried); 488 if (rc->rc_error) { 489 vdev_raidz_reconstruct(rm, c); 490 if (zio_checksum_error(zio) == 0) 491 zio->io_error = 0; 492 else 493 zio->io_error = rc->rc_error; 494 goto done; 495 } 496 } 497 498 /* 499 * There were no I/O errors, but the data doesn't checksum. 500 * Try all permutations to see if we can find one that does. 501 */ 502 ASSERT(zio->io_numerrors == 0); 503 for (c = 0; c < rm->rm_cols; c++) { 504 void *orig; 505 rc = &rm->rm_col[c]; 506 507 orig = zio_buf_alloc(rc->rc_size); 508 bcopy(rc->rc_data, orig, rc->rc_size); 509 vdev_raidz_reconstruct(rm, c); 510 511 if (zio_checksum_error(zio) == 0) { 512 zio_buf_free(orig, rc->rc_size); 513 zio->io_error = 0; 514 /* 515 * If this child didn't know that it returned bad data, 516 * inform it. 517 */ 518 if (rc->rc_tried && rc->rc_error == 0) 519 raidz_checksum_error(zio, rc); 520 rc->rc_error = ECKSUM; 521 goto done; 522 } 523 524 bcopy(orig, rc->rc_data, rc->rc_size); 525 zio_buf_free(orig, rc->rc_size); 526 } 527 528 /* 529 * All combinations failed to checksum. Generate checksum ereports for 530 * every one. 531 */ 532 zio->io_error = ECKSUM; 533 if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 534 for (c = 0; c < rm->rm_cols; c++) { 535 rc = &rm->rm_col[c]; 536 zfs_ereport_post(FM_EREPORT_ZFS_CHECKSUM, 537 zio->io_spa, vd->vdev_child[rc->rc_col], zio, 538 rc->rc_offset, rc->rc_size); 539 } 540 } 541 542 done: 543 zio_checksum_verified(zio); 544 545 if (zio->io_error == 0 && (spa_mode & FWRITE) && 546 (unexpected_errors || (zio->io_flags & ZIO_FLAG_RESILVER))) { 547 zio_t *rio; 548 549 /* 550 * Use the good data we have in hand to repair damaged children. 551 * 552 * We issue all repair I/Os as children of 'rio' to arrange 553 * that vdev_raidz_map_free(zio) will be invoked after all 554 * repairs complete, but before we advance to the next stage. 555 */ 556 rio = zio_null(zio, zio->io_spa, 557 vdev_raidz_repair_done, zio, ZIO_FLAG_CANFAIL); 558 559 for (c = 0; c < rm->rm_cols; c++) { 560 rc = &rm->rm_col[c]; 561 cvd = vd->vdev_child[rc->rc_col]; 562 563 if (rc->rc_error == 0) 564 continue; 565 566 dprintf("%s resilvered %s @ 0x%llx error %d\n", 567 vdev_description(vd), 568 vdev_description(cvd), 569 zio->io_offset, rc->rc_error); 570 571 zio_nowait(zio_vdev_child_io(rio, NULL, cvd, 572 rc->rc_offset, rc->rc_data, rc->rc_size, 573 ZIO_TYPE_WRITE, zio->io_priority, 574 ZIO_FLAG_IO_REPAIR | ZIO_FLAG_CANFAIL | 575 ZIO_FLAG_DONT_PROPAGATE, NULL, NULL)); 576 } 577 578 zio_nowait(rio); 579 zio_wait_children_done(zio); 580 return; 581 } 582 583 vdev_raidz_map_free(zio); 584 zio_next_stage(zio); 585 } 586 587 static void 588 vdev_raidz_state_change(vdev_t *vd, int faulted, int degraded) 589 { 590 if (faulted > 1) 591 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 592 VDEV_AUX_NO_REPLICAS); 593 else if (degraded + faulted != 0) 594 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE); 595 else 596 vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE); 597 } 598 599 vdev_ops_t vdev_raidz_ops = { 600 vdev_raidz_open, 601 vdev_raidz_close, 602 vdev_raidz_asize, 603 vdev_raidz_io_start, 604 vdev_raidz_io_done, 605 vdev_raidz_state_change, 606 VDEV_TYPE_RAIDZ, /* name of this vdev type */ 607 B_FALSE /* not a leaf vdev */ 608 }; 609