1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 Alexander Motin <mav@FreeBSD.org> 5 * Copyright (c) 2000 - 2008 Søren Schmidt <sos@FreeBSD.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/bio.h> 35 #include <sys/endian.h> 36 #include <sys/kernel.h> 37 #include <sys/kobj.h> 38 #include <sys/limits.h> 39 #include <sys/lock.h> 40 #include <sys/malloc.h> 41 #include <sys/mutex.h> 42 #include <sys/systm.h> 43 #include <geom/geom.h> 44 #include <geom/geom_dbg.h> 45 #include "geom/raid/g_raid.h" 46 #include "g_raid_md_if.h" 47 48 static MALLOC_DEFINE(M_MD_PROMISE, "md_promise_data", "GEOM_RAID Promise metadata"); 49 50 #define PROMISE_MAX_DISKS 8 51 #define PROMISE_MAX_SUBDISKS 2 52 #define PROMISE_META_OFFSET 14 53 54 struct promise_raid_disk { 55 uint8_t flags; /* Subdisk status. */ 56 #define PROMISE_F_VALID 0x01 57 #define PROMISE_F_ONLINE 0x02 58 #define PROMISE_F_ASSIGNED 0x04 59 #define PROMISE_F_SPARE 0x08 60 #define PROMISE_F_DUPLICATE 0x10 61 #define PROMISE_F_REDIR 0x20 62 #define PROMISE_F_DOWN 0x40 63 #define PROMISE_F_READY 0x80 64 65 uint8_t number; /* Position in a volume. */ 66 uint8_t channel; /* ATA channel number. */ 67 uint8_t device; /* ATA device number. */ 68 uint64_t id __packed; /* Subdisk ID. */ 69 } __packed; 70 71 struct promise_raid_conf { 72 char promise_id[24]; 73 #define PROMISE_MAGIC "Promise Technology, Inc." 74 #define FREEBSD_MAGIC "FreeBSD ATA driver RAID " 75 76 uint32_t dummy_0; 77 uint64_t magic_0; 78 #define PROMISE_MAGIC0(x) (((uint64_t)(x.channel) << 48) | \ 79 ((uint64_t)(x.device != 0) << 56)) 80 uint16_t magic_1; 81 uint32_t magic_2; 82 uint8_t filler1[470]; 83 84 uint32_t integrity; 85 #define PROMISE_I_VALID 0x00000080 86 87 struct promise_raid_disk disk; /* This subdisk info. */ 88 uint32_t disk_offset; /* Subdisk offset. */ 89 uint32_t disk_sectors; /* Subdisk size */ 90 uint32_t disk_rebuild; /* Rebuild position. */ 91 uint16_t generation; /* Generation number. */ 92 uint8_t status; /* Volume status. */ 93 #define PROMISE_S_VALID 0x01 94 #define PROMISE_S_ONLINE 0x02 95 #define PROMISE_S_INITED 0x04 96 #define PROMISE_S_READY 0x08 97 #define PROMISE_S_DEGRADED 0x10 98 #define PROMISE_S_MARKED 0x20 99 #define PROMISE_S_MIGRATING 0x40 100 #define PROMISE_S_FUNCTIONAL 0x80 101 102 uint8_t type; /* Voluem type. */ 103 #define PROMISE_T_RAID0 0x00 104 #define PROMISE_T_RAID1 0x01 105 #define PROMISE_T_RAID3 0x02 106 #define PROMISE_T_RAID5 0x04 107 #define PROMISE_T_SPAN 0x08 108 #define PROMISE_T_JBOD 0x10 109 110 uint8_t total_disks; /* Disks in this volume. */ 111 uint8_t stripe_shift; /* Strip size. */ 112 uint8_t array_width; /* Number of RAID0 stripes. */ 113 uint8_t array_number; /* Global volume number. */ 114 uint32_t total_sectors; /* Volume size. */ 115 uint16_t cylinders; /* Volume geometry: C. */ 116 uint8_t heads; /* Volume geometry: H. */ 117 uint8_t sectors; /* Volume geometry: S. */ 118 uint64_t volume_id __packed; /* Volume ID, */ 119 struct promise_raid_disk disks[PROMISE_MAX_DISKS]; 120 /* Subdisks in this volume. */ 121 char name[32]; /* Volume label. */ 122 123 uint32_t filler2[8]; 124 uint32_t magic_3; /* Something related to rebuild. */ 125 uint64_t rebuild_lba64; /* Per-volume rebuild position. */ 126 uint32_t magic_4; 127 uint32_t magic_5; 128 uint32_t total_sectors_high; 129 uint8_t magic_6; 130 uint8_t sector_size; 131 uint16_t magic_7; 132 uint32_t magic_8[31]; 133 uint32_t backup_time; 134 uint16_t magic_9; 135 uint32_t disk_offset_high; 136 uint32_t disk_sectors_high; 137 uint32_t disk_rebuild_high; 138 uint16_t magic_10; 139 uint32_t magic_11[3]; 140 uint32_t filler3[284]; 141 uint32_t checksum; 142 } __packed; 143 144 struct g_raid_md_promise_perdisk { 145 int pd_updated; 146 int pd_subdisks; 147 struct promise_raid_conf *pd_meta[PROMISE_MAX_SUBDISKS]; 148 }; 149 150 struct g_raid_md_promise_pervolume { 151 struct promise_raid_conf *pv_meta; 152 uint64_t pv_id; 153 uint16_t pv_generation; 154 int pv_disks_present; 155 int pv_started; 156 struct callout pv_start_co; /* STARTING state timer. */ 157 }; 158 159 static g_raid_md_create_t g_raid_md_create_promise; 160 static g_raid_md_taste_t g_raid_md_taste_promise; 161 static g_raid_md_event_t g_raid_md_event_promise; 162 static g_raid_md_volume_event_t g_raid_md_volume_event_promise; 163 static g_raid_md_ctl_t g_raid_md_ctl_promise; 164 static g_raid_md_write_t g_raid_md_write_promise; 165 static g_raid_md_fail_disk_t g_raid_md_fail_disk_promise; 166 static g_raid_md_free_disk_t g_raid_md_free_disk_promise; 167 static g_raid_md_free_volume_t g_raid_md_free_volume_promise; 168 static g_raid_md_free_t g_raid_md_free_promise; 169 170 static kobj_method_t g_raid_md_promise_methods[] = { 171 KOBJMETHOD(g_raid_md_create, g_raid_md_create_promise), 172 KOBJMETHOD(g_raid_md_taste, g_raid_md_taste_promise), 173 KOBJMETHOD(g_raid_md_event, g_raid_md_event_promise), 174 KOBJMETHOD(g_raid_md_volume_event, g_raid_md_volume_event_promise), 175 KOBJMETHOD(g_raid_md_ctl, g_raid_md_ctl_promise), 176 KOBJMETHOD(g_raid_md_write, g_raid_md_write_promise), 177 KOBJMETHOD(g_raid_md_fail_disk, g_raid_md_fail_disk_promise), 178 KOBJMETHOD(g_raid_md_free_disk, g_raid_md_free_disk_promise), 179 KOBJMETHOD(g_raid_md_free_volume, g_raid_md_free_volume_promise), 180 KOBJMETHOD(g_raid_md_free, g_raid_md_free_promise), 181 { 0, 0 } 182 }; 183 184 static struct g_raid_md_class g_raid_md_promise_class = { 185 "Promise", 186 g_raid_md_promise_methods, 187 sizeof(struct g_raid_md_object), 188 .mdc_enable = 1, 189 .mdc_priority = 100 190 }; 191 192 static void 193 g_raid_md_promise_print(struct promise_raid_conf *meta) 194 { 195 int i; 196 197 if (g_raid_debug < 1) 198 return; 199 200 printf("********* ATA Promise Metadata *********\n"); 201 printf("promise_id <%.24s>\n", meta->promise_id); 202 printf("disk %02x %02x %02x %02x %016jx\n", 203 meta->disk.flags, meta->disk.number, meta->disk.channel, 204 meta->disk.device, meta->disk.id); 205 printf("disk_offset %u\n", meta->disk_offset); 206 printf("disk_sectors %u\n", meta->disk_sectors); 207 printf("disk_rebuild %u\n", meta->disk_rebuild); 208 printf("generation %u\n", meta->generation); 209 printf("status 0x%02x\n", meta->status); 210 printf("type %u\n", meta->type); 211 printf("total_disks %u\n", meta->total_disks); 212 printf("stripe_shift %u\n", meta->stripe_shift); 213 printf("array_width %u\n", meta->array_width); 214 printf("array_number %u\n", meta->array_number); 215 printf("total_sectors %u\n", meta->total_sectors); 216 printf("cylinders %u\n", meta->cylinders); 217 printf("heads %u\n", meta->heads); 218 printf("sectors %u\n", meta->sectors); 219 printf("volume_id 0x%016jx\n", meta->volume_id); 220 printf("disks:\n"); 221 for (i = 0; i < PROMISE_MAX_DISKS; i++ ) { 222 printf(" %02x %02x %02x %02x %016jx\n", 223 meta->disks[i].flags, meta->disks[i].number, 224 meta->disks[i].channel, meta->disks[i].device, 225 meta->disks[i].id); 226 } 227 printf("name <%.32s>\n", meta->name); 228 printf("magic_3 0x%08x\n", meta->magic_3); 229 printf("rebuild_lba64 %ju\n", meta->rebuild_lba64); 230 printf("magic_4 0x%08x\n", meta->magic_4); 231 printf("magic_5 0x%08x\n", meta->magic_5); 232 printf("total_sectors_high 0x%08x\n", meta->total_sectors_high); 233 printf("sector_size %u\n", meta->sector_size); 234 printf("backup_time %d\n", meta->backup_time); 235 printf("disk_offset_high 0x%08x\n", meta->disk_offset_high); 236 printf("disk_sectors_high 0x%08x\n", meta->disk_sectors_high); 237 printf("disk_rebuild_high 0x%08x\n", meta->disk_rebuild_high); 238 printf("=================================================\n"); 239 } 240 241 static struct promise_raid_conf * 242 promise_meta_copy(struct promise_raid_conf *meta) 243 { 244 struct promise_raid_conf *nmeta; 245 246 nmeta = malloc(sizeof(*nmeta), M_MD_PROMISE, M_WAITOK); 247 memcpy(nmeta, meta, sizeof(*nmeta)); 248 return (nmeta); 249 } 250 251 static int 252 promise_meta_find_disk(struct promise_raid_conf *meta, uint64_t id) 253 { 254 int pos; 255 256 for (pos = 0; pos < meta->total_disks; pos++) { 257 if (meta->disks[pos].id == id) 258 return (pos); 259 } 260 return (-1); 261 } 262 263 static int 264 promise_meta_unused_range(struct promise_raid_conf **metaarr, int nsd, 265 off_t sectors, off_t *off, off_t *size) 266 { 267 off_t coff, csize, tmp; 268 int i, j; 269 270 sectors -= 131072; 271 *off = 0; 272 *size = 0; 273 coff = 0; 274 csize = sectors; 275 i = 0; 276 while (1) { 277 for (j = 0; j < nsd; j++) { 278 tmp = ((off_t)metaarr[j]->disk_offset_high << 32) + 279 metaarr[j]->disk_offset; 280 if (tmp >= coff) 281 csize = MIN(csize, tmp - coff); 282 } 283 if (csize > *size) { 284 *off = coff; 285 *size = csize; 286 } 287 if (i >= nsd) 288 break; 289 coff = ((off_t)metaarr[i]->disk_offset_high << 32) + 290 metaarr[i]->disk_offset + 291 ((off_t)metaarr[i]->disk_sectors_high << 32) + 292 metaarr[i]->disk_sectors; 293 csize = sectors - coff; 294 i++; 295 } 296 return ((*size > 0) ? 1 : 0); 297 } 298 299 static int 300 promise_meta_translate_disk(struct g_raid_volume *vol, int md_disk_pos) 301 { 302 int disk_pos, width; 303 304 if (md_disk_pos >= 0 && vol->v_raid_level == G_RAID_VOLUME_RL_RAID1E) { 305 width = vol->v_disks_count / 2; 306 disk_pos = (md_disk_pos / width) + 307 (md_disk_pos % width) * width; 308 } else 309 disk_pos = md_disk_pos; 310 return (disk_pos); 311 } 312 313 static void 314 promise_meta_get_name(struct promise_raid_conf *meta, char *buf) 315 { 316 int i; 317 318 strncpy(buf, meta->name, 32); 319 buf[32] = 0; 320 for (i = 31; i >= 0; i--) { 321 if (buf[i] > 0x20) 322 break; 323 buf[i] = 0; 324 } 325 } 326 327 static void 328 promise_meta_put_name(struct promise_raid_conf *meta, char *buf) 329 { 330 331 memset(meta->name, 0x20, 32); 332 memcpy(meta->name, buf, MIN(strlen(buf), 32)); 333 } 334 335 static int 336 promise_meta_read(struct g_consumer *cp, struct promise_raid_conf **metaarr) 337 { 338 struct g_provider *pp; 339 struct promise_raid_conf *meta; 340 char *buf; 341 int error, i, subdisks; 342 uint32_t checksum, *ptr; 343 344 pp = cp->provider; 345 subdisks = 0; 346 347 if (pp->sectorsize * 4 < sizeof(*meta)) 348 return (subdisks); 349 if (pp->sectorsize * 4 > maxphys) { 350 G_RAID_DEBUG(1, "%s: Blocksize is too big.", pp->name); 351 return (subdisks); 352 } 353 next: 354 /* Read metadata block. */ 355 buf = g_read_data(cp, pp->mediasize - pp->sectorsize * 356 (63 - subdisks * PROMISE_META_OFFSET), 357 pp->sectorsize * 4, &error); 358 if (buf == NULL) { 359 G_RAID_DEBUG(1, "Cannot read metadata from %s (error=%d).", 360 pp->name, error); 361 return (subdisks); 362 } 363 meta = (struct promise_raid_conf *)buf; 364 365 /* Check if this is an Promise RAID struct */ 366 if (strncmp(meta->promise_id, PROMISE_MAGIC, strlen(PROMISE_MAGIC)) && 367 strncmp(meta->promise_id, FREEBSD_MAGIC, strlen(FREEBSD_MAGIC))) { 368 if (subdisks == 0) 369 G_RAID_DEBUG(1, 370 "Promise signature check failed on %s", pp->name); 371 g_free(buf); 372 return (subdisks); 373 } 374 meta = malloc(sizeof(*meta), M_MD_PROMISE, M_WAITOK); 375 memcpy(meta, buf, MIN(sizeof(*meta), pp->sectorsize * 4)); 376 g_free(buf); 377 378 /* Check metadata checksum. */ 379 for (checksum = 0, ptr = (uint32_t *)meta, i = 0; i < 511; i++) 380 checksum += *ptr++; 381 if (checksum != meta->checksum) { 382 G_RAID_DEBUG(1, "Promise checksum check failed on %s", pp->name); 383 free(meta, M_MD_PROMISE); 384 return (subdisks); 385 } 386 387 if ((meta->integrity & PROMISE_I_VALID) == 0) { 388 G_RAID_DEBUG(1, "Promise metadata is invalid on %s", pp->name); 389 free(meta, M_MD_PROMISE); 390 return (subdisks); 391 } 392 393 if (meta->total_disks > PROMISE_MAX_DISKS) { 394 G_RAID_DEBUG(1, "Wrong number of disks on %s (%d)", 395 pp->name, meta->total_disks); 396 free(meta, M_MD_PROMISE); 397 return (subdisks); 398 } 399 400 /* Remove filler garbage from fields used in newer metadata. */ 401 if (meta->disk_offset_high == 0x8b8c8d8e && 402 meta->disk_sectors_high == 0x8788898a && 403 meta->disk_rebuild_high == 0x83848586) { 404 meta->disk_offset_high = 0; 405 meta->disk_sectors_high = 0; 406 if (meta->disk_rebuild == UINT32_MAX) 407 meta->disk_rebuild_high = UINT32_MAX; 408 else 409 meta->disk_rebuild_high = 0; 410 if (meta->total_sectors_high == 0x15161718) { 411 meta->total_sectors_high = 0; 412 meta->backup_time = 0; 413 if (meta->rebuild_lba64 == 0x2122232425262728) 414 meta->rebuild_lba64 = UINT64_MAX; 415 } 416 } 417 if (meta->sector_size < 1 || meta->sector_size > 8) 418 meta->sector_size = 1; 419 420 /* Save this part and look for next. */ 421 *metaarr = meta; 422 metaarr++; 423 subdisks++; 424 if (subdisks < PROMISE_MAX_SUBDISKS) 425 goto next; 426 427 return (subdisks); 428 } 429 430 static int 431 promise_meta_write(struct g_consumer *cp, 432 struct promise_raid_conf **metaarr, int nsd) 433 { 434 struct g_provider *pp; 435 struct promise_raid_conf *meta; 436 char *buf; 437 off_t off, size; 438 int error, i, subdisk, fake; 439 uint32_t checksum, *ptr; 440 441 pp = cp->provider; 442 subdisk = 0; 443 fake = 0; 444 next: 445 buf = malloc(pp->sectorsize * 4, M_MD_PROMISE, M_WAITOK | M_ZERO); 446 meta = NULL; 447 if (subdisk < nsd) { 448 meta = metaarr[subdisk]; 449 } else if (!fake && promise_meta_unused_range(metaarr, nsd, 450 cp->provider->mediasize / cp->provider->sectorsize, 451 &off, &size)) { 452 /* Optionally add record for unused space. */ 453 meta = (struct promise_raid_conf *)buf; 454 memcpy(&meta->promise_id[0], PROMISE_MAGIC, 455 sizeof(PROMISE_MAGIC) - 1); 456 meta->dummy_0 = 0x00020000; 457 meta->integrity = PROMISE_I_VALID; 458 meta->disk.flags = PROMISE_F_ONLINE | PROMISE_F_VALID; 459 meta->disk.number = 0xff; 460 arc4rand(&meta->disk.id, sizeof(meta->disk.id), 0); 461 meta->disk_offset_high = off >> 32; 462 meta->disk_offset = (uint32_t)off; 463 meta->disk_sectors_high = size >> 32; 464 meta->disk_sectors = (uint32_t)size; 465 meta->disk_rebuild_high = UINT32_MAX; 466 meta->disk_rebuild = UINT32_MAX; 467 fake = 1; 468 } 469 if (meta != NULL) { 470 /* Recalculate checksum for case if metadata were changed. */ 471 meta->checksum = 0; 472 for (checksum = 0, ptr = (uint32_t *)meta, i = 0; i < 511; i++) 473 checksum += *ptr++; 474 meta->checksum = checksum; 475 memcpy(buf, meta, MIN(pp->sectorsize * 4, sizeof(*meta))); 476 } 477 error = g_write_data(cp, pp->mediasize - pp->sectorsize * 478 (63 - subdisk * PROMISE_META_OFFSET), 479 buf, pp->sectorsize * 4); 480 if (error != 0) { 481 G_RAID_DEBUG(1, "Cannot write metadata to %s (error=%d).", 482 pp->name, error); 483 } 484 free(buf, M_MD_PROMISE); 485 486 subdisk++; 487 if (subdisk < PROMISE_MAX_SUBDISKS) 488 goto next; 489 490 return (error); 491 } 492 493 static int 494 promise_meta_erase(struct g_consumer *cp) 495 { 496 struct g_provider *pp; 497 char *buf; 498 int error, subdisk; 499 500 pp = cp->provider; 501 buf = malloc(4 * pp->sectorsize, M_MD_PROMISE, M_WAITOK | M_ZERO); 502 for (subdisk = 0; subdisk < PROMISE_MAX_SUBDISKS; subdisk++) { 503 error = g_write_data(cp, pp->mediasize - pp->sectorsize * 504 (63 - subdisk * PROMISE_META_OFFSET), 505 buf, 4 * pp->sectorsize); 506 if (error != 0) { 507 G_RAID_DEBUG(1, "Cannot erase metadata on %s (error=%d).", 508 pp->name, error); 509 } 510 } 511 free(buf, M_MD_PROMISE); 512 return (error); 513 } 514 515 static int 516 promise_meta_write_spare(struct g_consumer *cp) 517 { 518 struct promise_raid_conf *meta; 519 off_t tmp; 520 int error; 521 522 meta = malloc(sizeof(*meta), M_MD_PROMISE, M_WAITOK | M_ZERO); 523 memcpy(&meta->promise_id[0], PROMISE_MAGIC, sizeof(PROMISE_MAGIC) - 1); 524 meta->dummy_0 = 0x00020000; 525 meta->integrity = PROMISE_I_VALID; 526 meta->disk.flags = PROMISE_F_SPARE | PROMISE_F_ONLINE | PROMISE_F_VALID; 527 meta->disk.number = 0xff; 528 arc4rand(&meta->disk.id, sizeof(meta->disk.id), 0); 529 tmp = cp->provider->mediasize / cp->provider->sectorsize - 131072; 530 meta->disk_sectors_high = tmp >> 32; 531 meta->disk_sectors = (uint32_t)tmp; 532 meta->disk_rebuild_high = UINT32_MAX; 533 meta->disk_rebuild = UINT32_MAX; 534 error = promise_meta_write(cp, &meta, 1); 535 free(meta, M_MD_PROMISE); 536 return (error); 537 } 538 539 static struct g_raid_volume * 540 g_raid_md_promise_get_volume(struct g_raid_softc *sc, uint64_t id) 541 { 542 struct g_raid_volume *vol; 543 struct g_raid_md_promise_pervolume *pv; 544 545 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) { 546 pv = vol->v_md_data; 547 if (pv->pv_id == id) 548 break; 549 } 550 return (vol); 551 } 552 553 static int 554 g_raid_md_promise_purge_volumes(struct g_raid_softc *sc) 555 { 556 struct g_raid_volume *vol, *tvol; 557 struct g_raid_md_promise_pervolume *pv; 558 int i, res; 559 560 res = 0; 561 TAILQ_FOREACH_SAFE(vol, &sc->sc_volumes, v_next, tvol) { 562 pv = vol->v_md_data; 563 if (!pv->pv_started || vol->v_stopping) 564 continue; 565 for (i = 0; i < vol->v_disks_count; i++) { 566 if (vol->v_subdisks[i].sd_state != G_RAID_SUBDISK_S_NONE) 567 break; 568 } 569 if (i >= vol->v_disks_count) { 570 g_raid_destroy_volume(vol); 571 res = 1; 572 } 573 } 574 return (res); 575 } 576 577 static int 578 g_raid_md_promise_purge_disks(struct g_raid_softc *sc) 579 { 580 struct g_raid_disk *disk, *tdisk; 581 struct g_raid_volume *vol; 582 struct g_raid_md_promise_perdisk *pd; 583 int i, j, res; 584 585 res = 0; 586 TAILQ_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tdisk) { 587 if (disk->d_state == G_RAID_DISK_S_SPARE) 588 continue; 589 pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data; 590 591 /* Scan for deleted volumes. */ 592 for (i = 0; i < pd->pd_subdisks; ) { 593 vol = g_raid_md_promise_get_volume(sc, 594 pd->pd_meta[i]->volume_id); 595 if (vol != NULL && !vol->v_stopping) { 596 i++; 597 continue; 598 } 599 free(pd->pd_meta[i], M_MD_PROMISE); 600 for (j = i; j < pd->pd_subdisks - 1; j++) 601 pd->pd_meta[j] = pd->pd_meta[j + 1]; 602 pd->pd_meta[pd->pd_subdisks - 1] = NULL; 603 pd->pd_subdisks--; 604 pd->pd_updated = 1; 605 } 606 607 /* If there is no metadata left - erase and delete disk. */ 608 if (pd->pd_subdisks == 0) { 609 promise_meta_erase(disk->d_consumer); 610 g_raid_destroy_disk(disk); 611 res = 1; 612 } 613 } 614 return (res); 615 } 616 617 static int 618 g_raid_md_promise_supported(int level, int qual, int disks, int force) 619 { 620 621 if (disks > PROMISE_MAX_DISKS) 622 return (0); 623 switch (level) { 624 case G_RAID_VOLUME_RL_RAID0: 625 if (disks < 1) 626 return (0); 627 if (!force && disks < 2) 628 return (0); 629 break; 630 case G_RAID_VOLUME_RL_RAID1: 631 if (disks < 1) 632 return (0); 633 if (!force && (disks != 2)) 634 return (0); 635 break; 636 case G_RAID_VOLUME_RL_RAID1E: 637 if (disks < 2) 638 return (0); 639 if (disks % 2 != 0) 640 return (0); 641 if (!force && (disks != 4)) 642 return (0); 643 break; 644 case G_RAID_VOLUME_RL_SINGLE: 645 if (disks != 1) 646 return (0); 647 break; 648 case G_RAID_VOLUME_RL_CONCAT: 649 if (disks < 2) 650 return (0); 651 break; 652 case G_RAID_VOLUME_RL_RAID5: 653 if (disks < 3) 654 return (0); 655 if (qual != G_RAID_VOLUME_RLQ_R5LA) 656 return (0); 657 break; 658 default: 659 return (0); 660 } 661 if (level != G_RAID_VOLUME_RL_RAID5 && qual != G_RAID_VOLUME_RLQ_NONE) 662 return (0); 663 return (1); 664 } 665 666 static int 667 g_raid_md_promise_start_disk(struct g_raid_disk *disk, int sdn, 668 struct g_raid_volume *vol) 669 { 670 struct g_raid_softc *sc; 671 struct g_raid_subdisk *sd; 672 struct g_raid_md_promise_perdisk *pd; 673 struct g_raid_md_promise_pervolume *pv; 674 struct promise_raid_conf *meta; 675 off_t eoff, esize, size; 676 int disk_pos, md_disk_pos, i, resurrection = 0; 677 678 sc = disk->d_softc; 679 pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data; 680 681 pv = vol->v_md_data; 682 meta = pv->pv_meta; 683 684 if (sdn >= 0) { 685 /* Find disk position in metadata by its serial. */ 686 md_disk_pos = promise_meta_find_disk(meta, pd->pd_meta[sdn]->disk.id); 687 /* For RAID0+1 we need to translate order. */ 688 disk_pos = promise_meta_translate_disk(vol, md_disk_pos); 689 } else { 690 md_disk_pos = -1; 691 disk_pos = -1; 692 } 693 if (disk_pos < 0) { 694 G_RAID_DEBUG1(1, sc, "Disk %s is not part of the volume %s", 695 g_raid_get_diskname(disk), vol->v_name); 696 /* Failed stale disk is useless for us. */ 697 if (sdn >= 0 && 698 pd->pd_meta[sdn]->disk.flags & PROMISE_F_DOWN) { 699 g_raid_change_disk_state(disk, G_RAID_DISK_S_STALE_FAILED); 700 return (0); 701 } 702 /* If we were given specific metadata subdisk - erase it. */ 703 if (sdn >= 0) { 704 free(pd->pd_meta[sdn], M_MD_PROMISE); 705 for (i = sdn; i < pd->pd_subdisks - 1; i++) 706 pd->pd_meta[i] = pd->pd_meta[i + 1]; 707 pd->pd_meta[pd->pd_subdisks - 1] = NULL; 708 pd->pd_subdisks--; 709 } 710 /* If we are in the start process, that's all for now. */ 711 if (!pv->pv_started) 712 goto nofit; 713 /* 714 * If we have already started - try to get use of the disk. 715 * Try to replace OFFLINE disks first, then FAILED. 716 */ 717 promise_meta_unused_range(pd->pd_meta, pd->pd_subdisks, 718 disk->d_consumer->provider->mediasize / 719 disk->d_consumer->provider->sectorsize, 720 &eoff, &esize); 721 if (esize == 0) { 722 G_RAID_DEBUG1(1, sc, "No free space on disk %s", 723 g_raid_get_diskname(disk)); 724 goto nofit; 725 } 726 size = INT64_MAX; 727 for (i = 0; i < vol->v_disks_count; i++) { 728 sd = &vol->v_subdisks[i]; 729 if (sd->sd_state != G_RAID_SUBDISK_S_NONE) 730 size = sd->sd_size; 731 if (sd->sd_state <= G_RAID_SUBDISK_S_FAILED && 732 (disk_pos < 0 || 733 vol->v_subdisks[i].sd_state < sd->sd_state)) 734 disk_pos = i; 735 } 736 if (disk_pos >= 0 && 737 vol->v_raid_level != G_RAID_VOLUME_RL_CONCAT && 738 (off_t)esize * 512 < size) { 739 G_RAID_DEBUG1(1, sc, "Disk %s free space " 740 "is too small (%ju < %ju)", 741 g_raid_get_diskname(disk), 742 (off_t)esize * 512, size); 743 disk_pos = -1; 744 } 745 if (disk_pos >= 0) { 746 if (vol->v_raid_level != G_RAID_VOLUME_RL_CONCAT) 747 esize = size / 512; 748 /* For RAID0+1 we need to translate order. */ 749 md_disk_pos = promise_meta_translate_disk(vol, disk_pos); 750 } else { 751 nofit: 752 if (pd->pd_subdisks == 0) { 753 g_raid_change_disk_state(disk, 754 G_RAID_DISK_S_SPARE); 755 } 756 return (0); 757 } 758 G_RAID_DEBUG1(1, sc, "Disk %s takes pos %d in the volume %s", 759 g_raid_get_diskname(disk), disk_pos, vol->v_name); 760 resurrection = 1; 761 } 762 763 sd = &vol->v_subdisks[disk_pos]; 764 765 if (resurrection && sd->sd_disk != NULL) { 766 g_raid_change_disk_state(sd->sd_disk, 767 G_RAID_DISK_S_STALE_FAILED); 768 TAILQ_REMOVE(&sd->sd_disk->d_subdisks, 769 sd, sd_next); 770 } 771 vol->v_subdisks[disk_pos].sd_disk = disk; 772 TAILQ_INSERT_TAIL(&disk->d_subdisks, sd, sd_next); 773 774 /* Welcome the new disk. */ 775 if (resurrection) 776 g_raid_change_disk_state(disk, G_RAID_DISK_S_ACTIVE); 777 else if (meta->disks[md_disk_pos].flags & PROMISE_F_DOWN) 778 g_raid_change_disk_state(disk, G_RAID_DISK_S_FAILED); 779 else 780 g_raid_change_disk_state(disk, G_RAID_DISK_S_ACTIVE); 781 782 if (resurrection) { 783 sd->sd_offset = (off_t)eoff * 512; 784 sd->sd_size = (off_t)esize * 512; 785 } else { 786 sd->sd_offset = (((off_t)pd->pd_meta[sdn]->disk_offset_high 787 << 32) + pd->pd_meta[sdn]->disk_offset) * 512; 788 sd->sd_size = (((off_t)pd->pd_meta[sdn]->disk_sectors_high 789 << 32) + pd->pd_meta[sdn]->disk_sectors) * 512; 790 } 791 792 if (resurrection) { 793 /* Stale disk, almost same as new. */ 794 g_raid_change_subdisk_state(sd, 795 G_RAID_SUBDISK_S_NEW); 796 } else if (meta->disks[md_disk_pos].flags & PROMISE_F_DOWN) { 797 /* Failed disk. */ 798 g_raid_change_subdisk_state(sd, 799 G_RAID_SUBDISK_S_FAILED); 800 } else if (meta->disks[md_disk_pos].flags & PROMISE_F_REDIR) { 801 /* Rebuilding disk. */ 802 g_raid_change_subdisk_state(sd, 803 G_RAID_SUBDISK_S_REBUILD); 804 if (pd->pd_meta[sdn]->generation != meta->generation) 805 sd->sd_rebuild_pos = 0; 806 else { 807 sd->sd_rebuild_pos = 808 (((off_t)pd->pd_meta[sdn]->disk_rebuild_high << 32) + 809 pd->pd_meta[sdn]->disk_rebuild) * 512; 810 } 811 } else if (!(meta->disks[md_disk_pos].flags & PROMISE_F_ONLINE)) { 812 /* Rebuilding disk. */ 813 g_raid_change_subdisk_state(sd, 814 G_RAID_SUBDISK_S_NEW); 815 } else if (pd->pd_meta[sdn]->generation != meta->generation || 816 (meta->status & PROMISE_S_MARKED)) { 817 /* Stale disk or dirty volume (unclean shutdown). */ 818 g_raid_change_subdisk_state(sd, 819 G_RAID_SUBDISK_S_STALE); 820 } else { 821 /* Up to date disk. */ 822 g_raid_change_subdisk_state(sd, 823 G_RAID_SUBDISK_S_ACTIVE); 824 } 825 g_raid_event_send(sd, G_RAID_SUBDISK_E_NEW, 826 G_RAID_EVENT_SUBDISK); 827 828 return (resurrection); 829 } 830 831 static void 832 g_raid_md_promise_refill(struct g_raid_softc *sc) 833 { 834 struct g_raid_volume *vol; 835 struct g_raid_subdisk *sd; 836 struct g_raid_disk *disk; 837 struct g_raid_md_object *md; 838 struct g_raid_md_promise_perdisk *pd; 839 struct g_raid_md_promise_pervolume *pv; 840 int update, updated, i, bad; 841 842 md = sc->sc_md; 843 restart: 844 updated = 0; 845 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) { 846 pv = vol->v_md_data; 847 if (!pv->pv_started || vol->v_stopping) 848 continue; 849 850 /* Search for subdisk that needs replacement. */ 851 bad = 0; 852 for (i = 0; i < vol->v_disks_count; i++) { 853 sd = &vol->v_subdisks[i]; 854 if (sd->sd_state == G_RAID_SUBDISK_S_NONE || 855 sd->sd_state == G_RAID_SUBDISK_S_FAILED) 856 bad = 1; 857 } 858 if (!bad) 859 continue; 860 861 G_RAID_DEBUG1(1, sc, "Volume %s is not complete, " 862 "trying to refill.", vol->v_name); 863 864 TAILQ_FOREACH(disk, &sc->sc_disks, d_next) { 865 /* Skip failed. */ 866 if (disk->d_state < G_RAID_DISK_S_SPARE) 867 continue; 868 /* Skip already used by this volume. */ 869 for (i = 0; i < vol->v_disks_count; i++) { 870 sd = &vol->v_subdisks[i]; 871 if (sd->sd_disk == disk) 872 break; 873 } 874 if (i < vol->v_disks_count) 875 continue; 876 877 /* Try to use disk if it has empty extents. */ 878 pd = disk->d_md_data; 879 if (pd->pd_subdisks < PROMISE_MAX_SUBDISKS) { 880 update = 881 g_raid_md_promise_start_disk(disk, -1, vol); 882 } else 883 update = 0; 884 if (update) { 885 updated = 1; 886 g_raid_md_write_promise(md, vol, NULL, disk); 887 break; 888 } 889 } 890 } 891 if (updated) 892 goto restart; 893 } 894 895 static void 896 g_raid_md_promise_start(struct g_raid_volume *vol) 897 { 898 struct g_raid_softc *sc; 899 struct g_raid_subdisk *sd; 900 struct g_raid_disk *disk; 901 struct g_raid_md_object *md; 902 struct g_raid_md_promise_perdisk *pd; 903 struct g_raid_md_promise_pervolume *pv; 904 struct promise_raid_conf *meta; 905 u_int i; 906 907 sc = vol->v_softc; 908 md = sc->sc_md; 909 pv = vol->v_md_data; 910 meta = pv->pv_meta; 911 912 vol->v_raid_level_qualifier = G_RAID_VOLUME_RLQ_NONE; 913 if (meta->type == PROMISE_T_RAID0) 914 vol->v_raid_level = G_RAID_VOLUME_RL_RAID0; 915 else if (meta->type == PROMISE_T_RAID1) { 916 if (meta->array_width == 1) 917 vol->v_raid_level = G_RAID_VOLUME_RL_RAID1; 918 else 919 vol->v_raid_level = G_RAID_VOLUME_RL_RAID1E; 920 } else if (meta->type == PROMISE_T_RAID3) 921 vol->v_raid_level = G_RAID_VOLUME_RL_RAID3; 922 else if (meta->type == PROMISE_T_RAID5) { 923 vol->v_raid_level = G_RAID_VOLUME_RL_RAID5; 924 vol->v_raid_level_qualifier = G_RAID_VOLUME_RLQ_R5LA; 925 } else if (meta->type == PROMISE_T_SPAN) 926 vol->v_raid_level = G_RAID_VOLUME_RL_CONCAT; 927 else if (meta->type == PROMISE_T_JBOD) 928 vol->v_raid_level = G_RAID_VOLUME_RL_SINGLE; 929 else 930 vol->v_raid_level = G_RAID_VOLUME_RL_UNKNOWN; 931 vol->v_strip_size = 512 << meta->stripe_shift; //ZZZ 932 vol->v_disks_count = meta->total_disks; 933 vol->v_mediasize = (off_t)meta->total_sectors * 512; //ZZZ 934 if (meta->total_sectors_high < 256) /* If value looks sane. */ 935 vol->v_mediasize += 936 ((off_t)meta->total_sectors_high << 32) * 512; //ZZZ 937 vol->v_sectorsize = 512 * meta->sector_size; 938 for (i = 0; i < vol->v_disks_count; i++) { 939 sd = &vol->v_subdisks[i]; 940 sd->sd_offset = (((off_t)meta->disk_offset_high << 32) + 941 meta->disk_offset) * 512; 942 sd->sd_size = (((off_t)meta->disk_sectors_high << 32) + 943 meta->disk_sectors) * 512; 944 } 945 g_raid_start_volume(vol); 946 947 /* Make all disks found till the moment take their places. */ 948 TAILQ_FOREACH(disk, &sc->sc_disks, d_next) { 949 pd = disk->d_md_data; 950 for (i = 0; i < pd->pd_subdisks; i++) { 951 if (pd->pd_meta[i]->volume_id == meta->volume_id) 952 g_raid_md_promise_start_disk(disk, i, vol); 953 } 954 } 955 956 pv->pv_started = 1; 957 callout_stop(&pv->pv_start_co); 958 G_RAID_DEBUG1(0, sc, "Volume started."); 959 g_raid_md_write_promise(md, vol, NULL, NULL); 960 961 /* Pickup any STALE/SPARE disks to refill array if needed. */ 962 g_raid_md_promise_refill(sc); 963 964 g_raid_event_send(vol, G_RAID_VOLUME_E_START, G_RAID_EVENT_VOLUME); 965 } 966 967 static void 968 g_raid_promise_go(void *arg) 969 { 970 struct g_raid_volume *vol; 971 struct g_raid_softc *sc; 972 struct g_raid_md_promise_pervolume *pv; 973 974 vol = arg; 975 pv = vol->v_md_data; 976 sc = vol->v_softc; 977 if (!pv->pv_started) { 978 G_RAID_DEBUG1(0, sc, "Force volume start due to timeout."); 979 g_raid_event_send(vol, G_RAID_VOLUME_E_STARTMD, 980 G_RAID_EVENT_VOLUME); 981 } 982 } 983 984 static void 985 g_raid_md_promise_new_disk(struct g_raid_disk *disk) 986 { 987 struct g_raid_softc *sc; 988 struct g_raid_md_object *md; 989 struct promise_raid_conf *pdmeta; 990 struct g_raid_md_promise_perdisk *pd; 991 struct g_raid_md_promise_pervolume *pv; 992 struct g_raid_volume *vol; 993 int i; 994 char buf[33]; 995 996 sc = disk->d_softc; 997 md = sc->sc_md; 998 pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data; 999 1000 if (pd->pd_subdisks == 0) { 1001 g_raid_change_disk_state(disk, G_RAID_DISK_S_SPARE); 1002 g_raid_md_promise_refill(sc); 1003 return; 1004 } 1005 1006 for (i = 0; i < pd->pd_subdisks; i++) { 1007 pdmeta = pd->pd_meta[i]; 1008 1009 /* Look for volume with matching ID. */ 1010 vol = g_raid_md_promise_get_volume(sc, pdmeta->volume_id); 1011 if (vol == NULL) { 1012 promise_meta_get_name(pdmeta, buf); 1013 vol = g_raid_create_volume(sc, buf, pdmeta->array_number); 1014 pv = malloc(sizeof(*pv), M_MD_PROMISE, M_WAITOK | M_ZERO); 1015 pv->pv_id = pdmeta->volume_id; 1016 vol->v_md_data = pv; 1017 callout_init(&pv->pv_start_co, 1); 1018 callout_reset(&pv->pv_start_co, 1019 g_raid_start_timeout * hz, 1020 g_raid_promise_go, vol); 1021 } else 1022 pv = vol->v_md_data; 1023 1024 /* If we haven't started yet - check metadata freshness. */ 1025 if (pv->pv_meta == NULL || !pv->pv_started) { 1026 if (pv->pv_meta == NULL || 1027 ((int16_t)(pdmeta->generation - pv->pv_generation)) > 0) { 1028 G_RAID_DEBUG1(1, sc, "Newer disk"); 1029 if (pv->pv_meta != NULL) 1030 free(pv->pv_meta, M_MD_PROMISE); 1031 pv->pv_meta = promise_meta_copy(pdmeta); 1032 pv->pv_generation = pv->pv_meta->generation; 1033 pv->pv_disks_present = 1; 1034 } else if (pdmeta->generation == pv->pv_generation) { 1035 pv->pv_disks_present++; 1036 G_RAID_DEBUG1(1, sc, "Matching disk (%d of %d up)", 1037 pv->pv_disks_present, 1038 pv->pv_meta->total_disks); 1039 } else { 1040 G_RAID_DEBUG1(1, sc, "Older disk"); 1041 } 1042 } 1043 } 1044 1045 for (i = 0; i < pd->pd_subdisks; i++) { 1046 pdmeta = pd->pd_meta[i]; 1047 1048 /* Look for volume with matching ID. */ 1049 vol = g_raid_md_promise_get_volume(sc, pdmeta->volume_id); 1050 if (vol == NULL) 1051 continue; 1052 pv = vol->v_md_data; 1053 1054 if (pv->pv_started) { 1055 if (g_raid_md_promise_start_disk(disk, i, vol)) 1056 g_raid_md_write_promise(md, vol, NULL, NULL); 1057 } else { 1058 /* If we collected all needed disks - start array. */ 1059 if (pv->pv_disks_present == pv->pv_meta->total_disks) 1060 g_raid_md_promise_start(vol); 1061 } 1062 } 1063 } 1064 1065 static int 1066 g_raid_md_create_promise(struct g_raid_md_object *md, struct g_class *mp, 1067 struct g_geom **gp) 1068 { 1069 struct g_geom *geom; 1070 struct g_raid_softc *sc; 1071 1072 /* Search for existing node. */ 1073 LIST_FOREACH(geom, &mp->geom, geom) { 1074 sc = geom->softc; 1075 if (sc == NULL) 1076 continue; 1077 if (sc->sc_stopping != 0) 1078 continue; 1079 if (sc->sc_md->mdo_class != md->mdo_class) 1080 continue; 1081 break; 1082 } 1083 if (geom != NULL) { 1084 *gp = geom; 1085 return (G_RAID_MD_TASTE_EXISTING); 1086 } 1087 1088 /* Create new one if not found. */ 1089 sc = g_raid_create_node(mp, "Promise", md); 1090 if (sc == NULL) 1091 return (G_RAID_MD_TASTE_FAIL); 1092 md->mdo_softc = sc; 1093 *gp = sc->sc_geom; 1094 return (G_RAID_MD_TASTE_NEW); 1095 } 1096 1097 static int 1098 g_raid_md_taste_promise(struct g_raid_md_object *md, struct g_class *mp, 1099 struct g_consumer *cp, struct g_geom **gp) 1100 { 1101 struct g_consumer *rcp; 1102 struct g_provider *pp; 1103 struct g_raid_softc *sc; 1104 struct g_raid_disk *disk; 1105 struct promise_raid_conf *metaarr[4]; 1106 struct g_raid_md_promise_perdisk *pd; 1107 struct g_geom *geom; 1108 int i, j, result, len, subdisks; 1109 char name[16]; 1110 uint16_t vendor; 1111 1112 G_RAID_DEBUG(1, "Tasting Promise on %s", cp->provider->name); 1113 pp = cp->provider; 1114 1115 /* Read metadata from device. */ 1116 g_topology_unlock(); 1117 vendor = 0xffff; 1118 len = sizeof(vendor); 1119 if (pp->geom->rank == 1) 1120 g_io_getattr("GEOM::hba_vendor", cp, &len, &vendor); 1121 subdisks = promise_meta_read(cp, metaarr); 1122 g_topology_lock(); 1123 if (subdisks == 0) { 1124 if (g_raid_aggressive_spare) { 1125 if (vendor == 0x105a || vendor == 0x1002) { 1126 G_RAID_DEBUG(1, 1127 "No Promise metadata, forcing spare."); 1128 goto search; 1129 } else { 1130 G_RAID_DEBUG(1, 1131 "Promise/ATI vendor mismatch " 1132 "0x%04x != 0x105a/0x1002", 1133 vendor); 1134 } 1135 } 1136 return (G_RAID_MD_TASTE_FAIL); 1137 } 1138 1139 /* Metadata valid. Print it. */ 1140 for (i = 0; i < subdisks; i++) 1141 g_raid_md_promise_print(metaarr[i]); 1142 1143 /* Purge meaningless (empty/spare) records. */ 1144 for (i = 0; i < subdisks; ) { 1145 if (metaarr[i]->disk.flags & PROMISE_F_ASSIGNED) { 1146 i++; 1147 continue; 1148 } 1149 free(metaarr[i], M_MD_PROMISE); 1150 for (j = i; j < subdisks - 1; j++) 1151 metaarr[i] = metaarr[j + 1]; 1152 metaarr[subdisks - 1] = NULL; 1153 subdisks--; 1154 } 1155 1156 search: 1157 /* Search for matching node. */ 1158 sc = NULL; 1159 LIST_FOREACH(geom, &mp->geom, geom) { 1160 sc = geom->softc; 1161 if (sc == NULL) 1162 continue; 1163 if (sc->sc_stopping != 0) 1164 continue; 1165 if (sc->sc_md->mdo_class != md->mdo_class) 1166 continue; 1167 break; 1168 } 1169 1170 /* Found matching node. */ 1171 if (geom != NULL) { 1172 G_RAID_DEBUG(1, "Found matching array %s", sc->sc_name); 1173 result = G_RAID_MD_TASTE_EXISTING; 1174 1175 } else { /* Not found matching node -- create one. */ 1176 result = G_RAID_MD_TASTE_NEW; 1177 snprintf(name, sizeof(name), "Promise"); 1178 sc = g_raid_create_node(mp, name, md); 1179 md->mdo_softc = sc; 1180 geom = sc->sc_geom; 1181 } 1182 1183 /* There is no return after this point, so we close passed consumer. */ 1184 g_access(cp, -1, 0, 0); 1185 1186 rcp = g_new_consumer(geom); 1187 rcp->flags |= G_CF_DIRECT_RECEIVE; 1188 g_attach(rcp, pp); 1189 if (g_access(rcp, 1, 1, 1) != 0) 1190 ; //goto fail1; 1191 1192 g_topology_unlock(); 1193 sx_xlock(&sc->sc_lock); 1194 1195 pd = malloc(sizeof(*pd), M_MD_PROMISE, M_WAITOK | M_ZERO); 1196 pd->pd_subdisks = subdisks; 1197 for (i = 0; i < subdisks; i++) 1198 pd->pd_meta[i] = metaarr[i]; 1199 disk = g_raid_create_disk(sc); 1200 disk->d_md_data = (void *)pd; 1201 disk->d_consumer = rcp; 1202 rcp->private = disk; 1203 1204 g_raid_get_disk_info(disk); 1205 1206 g_raid_md_promise_new_disk(disk); 1207 1208 sx_xunlock(&sc->sc_lock); 1209 g_topology_lock(); 1210 *gp = geom; 1211 return (result); 1212 } 1213 1214 static int 1215 g_raid_md_event_promise(struct g_raid_md_object *md, 1216 struct g_raid_disk *disk, u_int event) 1217 { 1218 struct g_raid_softc *sc; 1219 1220 sc = md->mdo_softc; 1221 if (disk == NULL) 1222 return (-1); 1223 switch (event) { 1224 case G_RAID_DISK_E_DISCONNECTED: 1225 /* Delete disk. */ 1226 g_raid_change_disk_state(disk, G_RAID_DISK_S_NONE); 1227 g_raid_destroy_disk(disk); 1228 g_raid_md_promise_purge_volumes(sc); 1229 1230 /* Write updated metadata to all disks. */ 1231 g_raid_md_write_promise(md, NULL, NULL, NULL); 1232 1233 /* Check if anything left. */ 1234 if (g_raid_ndisks(sc, -1) == 0) 1235 g_raid_destroy_node(sc, 0); 1236 else 1237 g_raid_md_promise_refill(sc); 1238 return (0); 1239 } 1240 return (-2); 1241 } 1242 1243 static int 1244 g_raid_md_volume_event_promise(struct g_raid_md_object *md, 1245 struct g_raid_volume *vol, u_int event) 1246 { 1247 struct g_raid_md_promise_pervolume *pv; 1248 1249 pv = (struct g_raid_md_promise_pervolume *)vol->v_md_data; 1250 switch (event) { 1251 case G_RAID_VOLUME_E_STARTMD: 1252 if (!pv->pv_started) 1253 g_raid_md_promise_start(vol); 1254 return (0); 1255 } 1256 return (-2); 1257 } 1258 1259 static int 1260 g_raid_md_ctl_promise(struct g_raid_md_object *md, 1261 struct gctl_req *req) 1262 { 1263 struct g_raid_softc *sc; 1264 struct g_raid_volume *vol, *vol1; 1265 struct g_raid_subdisk *sd; 1266 struct g_raid_disk *disk, *disks[PROMISE_MAX_DISKS]; 1267 struct g_raid_md_promise_perdisk *pd; 1268 struct g_raid_md_promise_pervolume *pv; 1269 struct g_consumer *cp; 1270 struct g_provider *pp; 1271 char arg[16]; 1272 const char *nodename, *verb, *volname, *levelname, *diskname; 1273 char *tmp; 1274 int *nargs, *force; 1275 off_t esize, offs[PROMISE_MAX_DISKS], size, sectorsize, strip; 1276 intmax_t *sizearg, *striparg; 1277 int numdisks, i, len, level, qual; 1278 int error; 1279 1280 sc = md->mdo_softc; 1281 verb = gctl_get_param(req, "verb", NULL); 1282 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs)); 1283 error = 0; 1284 if (strcmp(verb, "label") == 0) { 1285 if (*nargs < 4) { 1286 gctl_error(req, "Invalid number of arguments."); 1287 return (-1); 1288 } 1289 volname = gctl_get_asciiparam(req, "arg1"); 1290 if (volname == NULL) { 1291 gctl_error(req, "No volume name."); 1292 return (-2); 1293 } 1294 levelname = gctl_get_asciiparam(req, "arg2"); 1295 if (levelname == NULL) { 1296 gctl_error(req, "No RAID level."); 1297 return (-3); 1298 } 1299 if (strcasecmp(levelname, "RAID5") == 0) 1300 levelname = "RAID5-LA"; 1301 if (g_raid_volume_str2level(levelname, &level, &qual)) { 1302 gctl_error(req, "Unknown RAID level '%s'.", levelname); 1303 return (-4); 1304 } 1305 numdisks = *nargs - 3; 1306 force = gctl_get_paraml(req, "force", sizeof(*force)); 1307 if (!g_raid_md_promise_supported(level, qual, numdisks, 1308 force ? *force : 0)) { 1309 gctl_error(req, "Unsupported RAID level " 1310 "(0x%02x/0x%02x), or number of disks (%d).", 1311 level, qual, numdisks); 1312 return (-5); 1313 } 1314 1315 /* Search for disks, connect them and probe. */ 1316 size = INT64_MAX; 1317 sectorsize = 0; 1318 bzero(disks, sizeof(disks)); 1319 bzero(offs, sizeof(offs)); 1320 for (i = 0; i < numdisks; i++) { 1321 snprintf(arg, sizeof(arg), "arg%d", i + 3); 1322 diskname = gctl_get_asciiparam(req, arg); 1323 if (diskname == NULL) { 1324 gctl_error(req, "No disk name (%s).", arg); 1325 error = -6; 1326 break; 1327 } 1328 if (strcmp(diskname, "NONE") == 0) 1329 continue; 1330 1331 TAILQ_FOREACH(disk, &sc->sc_disks, d_next) { 1332 if (disk->d_consumer != NULL && 1333 disk->d_consumer->provider != NULL && 1334 strcmp(disk->d_consumer->provider->name, 1335 diskname) == 0) 1336 break; 1337 } 1338 if (disk != NULL) { 1339 if (disk->d_state != G_RAID_DISK_S_ACTIVE) { 1340 gctl_error(req, "Disk '%s' is in a " 1341 "wrong state (%s).", diskname, 1342 g_raid_disk_state2str(disk->d_state)); 1343 error = -7; 1344 break; 1345 } 1346 pd = disk->d_md_data; 1347 if (pd->pd_subdisks >= PROMISE_MAX_SUBDISKS) { 1348 gctl_error(req, "Disk '%s' already " 1349 "used by %d volumes.", 1350 diskname, pd->pd_subdisks); 1351 error = -7; 1352 break; 1353 } 1354 pp = disk->d_consumer->provider; 1355 disks[i] = disk; 1356 promise_meta_unused_range(pd->pd_meta, 1357 pd->pd_subdisks, 1358 pp->mediasize / pp->sectorsize, 1359 &offs[i], &esize); 1360 size = MIN(size, (off_t)esize * pp->sectorsize); 1361 sectorsize = MAX(sectorsize, pp->sectorsize); 1362 continue; 1363 } 1364 1365 g_topology_lock(); 1366 cp = g_raid_open_consumer(sc, diskname); 1367 if (cp == NULL) { 1368 gctl_error(req, "Can't open disk '%s'.", 1369 diskname); 1370 g_topology_unlock(); 1371 error = -8; 1372 break; 1373 } 1374 pp = cp->provider; 1375 pd = malloc(sizeof(*pd), M_MD_PROMISE, M_WAITOK | M_ZERO); 1376 disk = g_raid_create_disk(sc); 1377 disk->d_md_data = (void *)pd; 1378 disk->d_consumer = cp; 1379 disks[i] = disk; 1380 cp->private = disk; 1381 g_topology_unlock(); 1382 1383 g_raid_get_disk_info(disk); 1384 1385 /* Reserve some space for metadata. */ 1386 size = MIN(size, pp->mediasize - 131072llu * pp->sectorsize); 1387 sectorsize = MAX(sectorsize, pp->sectorsize); 1388 } 1389 if (error != 0) { 1390 for (i = 0; i < numdisks; i++) { 1391 if (disks[i] != NULL && 1392 disks[i]->d_state == G_RAID_DISK_S_NONE) 1393 g_raid_destroy_disk(disks[i]); 1394 } 1395 return (error); 1396 } 1397 1398 if (sectorsize <= 0) { 1399 gctl_error(req, "Can't get sector size."); 1400 return (-8); 1401 } 1402 1403 /* Handle size argument. */ 1404 len = sizeof(*sizearg); 1405 sizearg = gctl_get_param(req, "size", &len); 1406 if (sizearg != NULL && len == sizeof(*sizearg) && 1407 *sizearg > 0) { 1408 if (*sizearg > size) { 1409 gctl_error(req, "Size too big %lld > %lld.", 1410 (long long)*sizearg, (long long)size); 1411 return (-9); 1412 } 1413 size = *sizearg; 1414 } 1415 1416 /* Handle strip argument. */ 1417 strip = 131072; 1418 len = sizeof(*striparg); 1419 striparg = gctl_get_param(req, "strip", &len); 1420 if (striparg != NULL && len == sizeof(*striparg) && 1421 *striparg > 0) { 1422 if (*striparg < sectorsize) { 1423 gctl_error(req, "Strip size too small."); 1424 return (-10); 1425 } 1426 if (*striparg % sectorsize != 0) { 1427 gctl_error(req, "Incorrect strip size."); 1428 return (-11); 1429 } 1430 strip = *striparg; 1431 } 1432 1433 /* Round size down to strip or sector. */ 1434 if (level == G_RAID_VOLUME_RL_RAID1 || 1435 level == G_RAID_VOLUME_RL_SINGLE || 1436 level == G_RAID_VOLUME_RL_CONCAT) 1437 size -= (size % sectorsize); 1438 else if (level == G_RAID_VOLUME_RL_RAID1E && 1439 (numdisks & 1) != 0) 1440 size -= (size % (2 * strip)); 1441 else 1442 size -= (size % strip); 1443 if (size <= 0) { 1444 gctl_error(req, "Size too small."); 1445 return (-13); 1446 } 1447 1448 /* We have all we need, create things: volume, ... */ 1449 pv = malloc(sizeof(*pv), M_MD_PROMISE, M_WAITOK | M_ZERO); 1450 arc4rand(&pv->pv_id, sizeof(pv->pv_id), 0); 1451 pv->pv_generation = 0; 1452 pv->pv_started = 1; 1453 vol = g_raid_create_volume(sc, volname, -1); 1454 vol->v_md_data = pv; 1455 vol->v_raid_level = level; 1456 vol->v_raid_level_qualifier = qual; 1457 vol->v_strip_size = strip; 1458 vol->v_disks_count = numdisks; 1459 if (level == G_RAID_VOLUME_RL_RAID0 || 1460 level == G_RAID_VOLUME_RL_CONCAT || 1461 level == G_RAID_VOLUME_RL_SINGLE) 1462 vol->v_mediasize = size * numdisks; 1463 else if (level == G_RAID_VOLUME_RL_RAID1) 1464 vol->v_mediasize = size; 1465 else if (level == G_RAID_VOLUME_RL_RAID3 || 1466 level == G_RAID_VOLUME_RL_RAID5) 1467 vol->v_mediasize = size * (numdisks - 1); 1468 else { /* RAID1E */ 1469 vol->v_mediasize = ((size * numdisks) / strip / 2) * 1470 strip; 1471 } 1472 vol->v_sectorsize = sectorsize; 1473 g_raid_start_volume(vol); 1474 1475 /* , and subdisks. */ 1476 for (i = 0; i < numdisks; i++) { 1477 disk = disks[i]; 1478 sd = &vol->v_subdisks[i]; 1479 sd->sd_disk = disk; 1480 sd->sd_offset = (off_t)offs[i] * 512; 1481 sd->sd_size = size; 1482 if (disk == NULL) 1483 continue; 1484 TAILQ_INSERT_TAIL(&disk->d_subdisks, sd, sd_next); 1485 g_raid_change_disk_state(disk, 1486 G_RAID_DISK_S_ACTIVE); 1487 g_raid_change_subdisk_state(sd, 1488 G_RAID_SUBDISK_S_ACTIVE); 1489 g_raid_event_send(sd, G_RAID_SUBDISK_E_NEW, 1490 G_RAID_EVENT_SUBDISK); 1491 } 1492 1493 /* Write metadata based on created entities. */ 1494 G_RAID_DEBUG1(0, sc, "Array started."); 1495 g_raid_md_write_promise(md, vol, NULL, NULL); 1496 1497 /* Pickup any STALE/SPARE disks to refill array if needed. */ 1498 g_raid_md_promise_refill(sc); 1499 1500 g_raid_event_send(vol, G_RAID_VOLUME_E_START, 1501 G_RAID_EVENT_VOLUME); 1502 return (0); 1503 } 1504 if (strcmp(verb, "add") == 0) { 1505 gctl_error(req, "`add` command is not applicable, " 1506 "use `label` instead."); 1507 return (-99); 1508 } 1509 if (strcmp(verb, "delete") == 0) { 1510 nodename = gctl_get_asciiparam(req, "arg0"); 1511 if (nodename != NULL && strcasecmp(sc->sc_name, nodename) != 0) 1512 nodename = NULL; 1513 1514 /* Full node destruction. */ 1515 if (*nargs == 1 && nodename != NULL) { 1516 /* Check if some volume is still open. */ 1517 force = gctl_get_paraml(req, "force", sizeof(*force)); 1518 if (force != NULL && *force == 0 && 1519 g_raid_nopens(sc) != 0) { 1520 gctl_error(req, "Some volume is still open."); 1521 return (-4); 1522 } 1523 1524 TAILQ_FOREACH(disk, &sc->sc_disks, d_next) { 1525 if (disk->d_consumer) 1526 promise_meta_erase(disk->d_consumer); 1527 } 1528 g_raid_destroy_node(sc, 0); 1529 return (0); 1530 } 1531 1532 /* Destroy specified volume. If it was last - all node. */ 1533 if (*nargs > 2) { 1534 gctl_error(req, "Invalid number of arguments."); 1535 return (-1); 1536 } 1537 volname = gctl_get_asciiparam(req, 1538 nodename != NULL ? "arg1" : "arg0"); 1539 if (volname == NULL) { 1540 gctl_error(req, "No volume name."); 1541 return (-2); 1542 } 1543 1544 /* Search for volume. */ 1545 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) { 1546 if (strcmp(vol->v_name, volname) == 0) 1547 break; 1548 pp = vol->v_provider; 1549 if (pp == NULL) 1550 continue; 1551 if (strcmp(pp->name, volname) == 0) 1552 break; 1553 if (strncmp(pp->name, "raid/", 5) == 0 && 1554 strcmp(pp->name + 5, volname) == 0) 1555 break; 1556 } 1557 if (vol == NULL) { 1558 i = strtol(volname, &tmp, 10); 1559 if (verb != volname && tmp[0] == 0) { 1560 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) { 1561 if (vol->v_global_id == i) 1562 break; 1563 } 1564 } 1565 } 1566 if (vol == NULL) { 1567 gctl_error(req, "Volume '%s' not found.", volname); 1568 return (-3); 1569 } 1570 1571 /* Check if volume is still open. */ 1572 force = gctl_get_paraml(req, "force", sizeof(*force)); 1573 if (force != NULL && *force == 0 && 1574 vol->v_provider_open != 0) { 1575 gctl_error(req, "Volume is still open."); 1576 return (-4); 1577 } 1578 1579 /* Destroy volume and potentially node. */ 1580 i = 0; 1581 TAILQ_FOREACH(vol1, &sc->sc_volumes, v_next) 1582 i++; 1583 if (i >= 2) { 1584 g_raid_destroy_volume(vol); 1585 g_raid_md_promise_purge_disks(sc); 1586 g_raid_md_write_promise(md, NULL, NULL, NULL); 1587 } else { 1588 TAILQ_FOREACH(disk, &sc->sc_disks, d_next) { 1589 if (disk->d_consumer) 1590 promise_meta_erase(disk->d_consumer); 1591 } 1592 g_raid_destroy_node(sc, 0); 1593 } 1594 return (0); 1595 } 1596 if (strcmp(verb, "remove") == 0 || 1597 strcmp(verb, "fail") == 0) { 1598 if (*nargs < 2) { 1599 gctl_error(req, "Invalid number of arguments."); 1600 return (-1); 1601 } 1602 for (i = 1; i < *nargs; i++) { 1603 snprintf(arg, sizeof(arg), "arg%d", i); 1604 diskname = gctl_get_asciiparam(req, arg); 1605 if (diskname == NULL) { 1606 gctl_error(req, "No disk name (%s).", arg); 1607 error = -2; 1608 break; 1609 } 1610 if (strncmp(diskname, _PATH_DEV, 5) == 0) 1611 diskname += 5; 1612 1613 TAILQ_FOREACH(disk, &sc->sc_disks, d_next) { 1614 if (disk->d_consumer != NULL && 1615 disk->d_consumer->provider != NULL && 1616 strcmp(disk->d_consumer->provider->name, 1617 diskname) == 0) 1618 break; 1619 } 1620 if (disk == NULL) { 1621 gctl_error(req, "Disk '%s' not found.", 1622 diskname); 1623 error = -3; 1624 break; 1625 } 1626 1627 if (strcmp(verb, "fail") == 0) { 1628 g_raid_md_fail_disk_promise(md, NULL, disk); 1629 continue; 1630 } 1631 1632 /* Erase metadata on deleting disk and destroy it. */ 1633 promise_meta_erase(disk->d_consumer); 1634 g_raid_destroy_disk(disk); 1635 } 1636 g_raid_md_promise_purge_volumes(sc); 1637 1638 /* Write updated metadata to remaining disks. */ 1639 g_raid_md_write_promise(md, NULL, NULL, NULL); 1640 1641 /* Check if anything left. */ 1642 if (g_raid_ndisks(sc, -1) == 0) 1643 g_raid_destroy_node(sc, 0); 1644 else 1645 g_raid_md_promise_refill(sc); 1646 return (error); 1647 } 1648 if (strcmp(verb, "insert") == 0) { 1649 if (*nargs < 2) { 1650 gctl_error(req, "Invalid number of arguments."); 1651 return (-1); 1652 } 1653 for (i = 1; i < *nargs; i++) { 1654 /* Get disk name. */ 1655 snprintf(arg, sizeof(arg), "arg%d", i); 1656 diskname = gctl_get_asciiparam(req, arg); 1657 if (diskname == NULL) { 1658 gctl_error(req, "No disk name (%s).", arg); 1659 error = -3; 1660 break; 1661 } 1662 1663 /* Try to find provider with specified name. */ 1664 g_topology_lock(); 1665 cp = g_raid_open_consumer(sc, diskname); 1666 if (cp == NULL) { 1667 gctl_error(req, "Can't open disk '%s'.", 1668 diskname); 1669 g_topology_unlock(); 1670 error = -4; 1671 break; 1672 } 1673 pp = cp->provider; 1674 g_topology_unlock(); 1675 1676 pd = malloc(sizeof(*pd), M_MD_PROMISE, M_WAITOK | M_ZERO); 1677 1678 disk = g_raid_create_disk(sc); 1679 disk->d_consumer = cp; 1680 disk->d_md_data = (void *)pd; 1681 cp->private = disk; 1682 1683 g_raid_get_disk_info(disk); 1684 1685 /* Welcome the "new" disk. */ 1686 g_raid_change_disk_state(disk, G_RAID_DISK_S_SPARE); 1687 promise_meta_write_spare(cp); 1688 g_raid_md_promise_refill(sc); 1689 } 1690 return (error); 1691 } 1692 return (-100); 1693 } 1694 1695 static int 1696 g_raid_md_write_promise(struct g_raid_md_object *md, struct g_raid_volume *tvol, 1697 struct g_raid_subdisk *tsd, struct g_raid_disk *tdisk) 1698 { 1699 struct g_raid_softc *sc; 1700 struct g_raid_volume *vol; 1701 struct g_raid_subdisk *sd; 1702 struct g_raid_disk *disk; 1703 struct g_raid_md_promise_perdisk *pd; 1704 struct g_raid_md_promise_pervolume *pv; 1705 struct promise_raid_conf *meta; 1706 off_t rebuild_lba64; 1707 int i, j, pos, rebuild; 1708 1709 sc = md->mdo_softc; 1710 1711 if (sc->sc_stopping == G_RAID_DESTROY_HARD) 1712 return (0); 1713 1714 /* Generate new per-volume metadata for affected volumes. */ 1715 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) { 1716 if (vol->v_stopping) 1717 continue; 1718 1719 /* Skip volumes not related to specified targets. */ 1720 if (tvol != NULL && vol != tvol) 1721 continue; 1722 if (tsd != NULL && vol != tsd->sd_volume) 1723 continue; 1724 if (tdisk != NULL) { 1725 for (i = 0; i < vol->v_disks_count; i++) { 1726 if (vol->v_subdisks[i].sd_disk == tdisk) 1727 break; 1728 } 1729 if (i >= vol->v_disks_count) 1730 continue; 1731 } 1732 1733 pv = (struct g_raid_md_promise_pervolume *)vol->v_md_data; 1734 pv->pv_generation++; 1735 1736 meta = malloc(sizeof(*meta), M_MD_PROMISE, M_WAITOK | M_ZERO); 1737 if (pv->pv_meta != NULL) 1738 memcpy(meta, pv->pv_meta, sizeof(*meta)); 1739 memcpy(meta->promise_id, PROMISE_MAGIC, 1740 sizeof(PROMISE_MAGIC) - 1); 1741 meta->dummy_0 = 0x00020000; 1742 meta->integrity = PROMISE_I_VALID; 1743 1744 meta->generation = pv->pv_generation; 1745 meta->status = PROMISE_S_VALID | PROMISE_S_ONLINE | 1746 PROMISE_S_INITED | PROMISE_S_READY; 1747 if (vol->v_state <= G_RAID_VOLUME_S_DEGRADED) 1748 meta->status |= PROMISE_S_DEGRADED; 1749 if (vol->v_dirty) 1750 meta->status |= PROMISE_S_MARKED; /* XXX: INVENTED! */ 1751 if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID0 || 1752 vol->v_raid_level == G_RAID_VOLUME_RL_SINGLE) 1753 meta->type = PROMISE_T_RAID0; 1754 else if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID1 || 1755 vol->v_raid_level == G_RAID_VOLUME_RL_RAID1E) 1756 meta->type = PROMISE_T_RAID1; 1757 else if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID3) 1758 meta->type = PROMISE_T_RAID3; 1759 else if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID5) 1760 meta->type = PROMISE_T_RAID5; 1761 else if (vol->v_raid_level == G_RAID_VOLUME_RL_CONCAT) 1762 meta->type = PROMISE_T_SPAN; 1763 else 1764 meta->type = PROMISE_T_JBOD; 1765 meta->total_disks = vol->v_disks_count; 1766 meta->stripe_shift = ffs(vol->v_strip_size / 1024); 1767 meta->array_width = vol->v_disks_count; 1768 if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID1 || 1769 vol->v_raid_level == G_RAID_VOLUME_RL_RAID1E) 1770 meta->array_width /= 2; 1771 meta->array_number = vol->v_global_id; 1772 meta->total_sectors = vol->v_mediasize / 512; 1773 meta->total_sectors_high = (vol->v_mediasize / 512) >> 32; 1774 meta->sector_size = vol->v_sectorsize / 512; 1775 meta->cylinders = meta->total_sectors / (255 * 63) - 1; 1776 meta->heads = 254; 1777 meta->sectors = 63; 1778 meta->volume_id = pv->pv_id; 1779 rebuild_lba64 = UINT64_MAX; 1780 rebuild = 0; 1781 for (i = 0; i < vol->v_disks_count; i++) { 1782 sd = &vol->v_subdisks[i]; 1783 /* For RAID0+1 we need to translate order. */ 1784 pos = promise_meta_translate_disk(vol, i); 1785 meta->disks[pos].flags = PROMISE_F_VALID | 1786 PROMISE_F_ASSIGNED; 1787 if (sd->sd_state == G_RAID_SUBDISK_S_NONE) { 1788 meta->disks[pos].flags |= 0; 1789 } else if (sd->sd_state == G_RAID_SUBDISK_S_FAILED) { 1790 meta->disks[pos].flags |= 1791 PROMISE_F_DOWN | PROMISE_F_REDIR; 1792 } else if (sd->sd_state <= G_RAID_SUBDISK_S_REBUILD) { 1793 meta->disks[pos].flags |= 1794 PROMISE_F_ONLINE | PROMISE_F_REDIR; 1795 if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD) { 1796 rebuild_lba64 = MIN(rebuild_lba64, 1797 sd->sd_rebuild_pos / 512); 1798 } else 1799 rebuild_lba64 = 0; 1800 rebuild = 1; 1801 } else { 1802 meta->disks[pos].flags |= PROMISE_F_ONLINE; 1803 if (sd->sd_state < G_RAID_SUBDISK_S_ACTIVE) { 1804 meta->status |= PROMISE_S_MARKED; 1805 if (sd->sd_state == G_RAID_SUBDISK_S_RESYNC) { 1806 rebuild_lba64 = MIN(rebuild_lba64, 1807 sd->sd_rebuild_pos / 512); 1808 } else 1809 rebuild_lba64 = 0; 1810 } 1811 } 1812 if (pv->pv_meta != NULL) { 1813 meta->disks[pos].id = pv->pv_meta->disks[pos].id; 1814 } else { 1815 meta->disks[pos].number = i * 2; 1816 arc4rand(&meta->disks[pos].id, 1817 sizeof(meta->disks[pos].id), 0); 1818 } 1819 } 1820 promise_meta_put_name(meta, vol->v_name); 1821 1822 /* Try to mimic AMD BIOS rebuild/resync behavior. */ 1823 if (rebuild_lba64 != UINT64_MAX) { 1824 if (rebuild) 1825 meta->magic_3 = 0x03040010UL; /* Rebuild? */ 1826 else 1827 meta->magic_3 = 0x03040008UL; /* Resync? */ 1828 /* Translate from per-disk to per-volume LBA. */ 1829 if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID1 || 1830 vol->v_raid_level == G_RAID_VOLUME_RL_RAID1E) { 1831 rebuild_lba64 *= meta->array_width; 1832 } else if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID3 || 1833 vol->v_raid_level == G_RAID_VOLUME_RL_RAID5) { 1834 rebuild_lba64 *= meta->array_width - 1; 1835 } else 1836 rebuild_lba64 = 0; 1837 } else 1838 meta->magic_3 = 0x03000000UL; 1839 meta->rebuild_lba64 = rebuild_lba64; 1840 meta->magic_4 = 0x04010101UL; 1841 1842 /* Replace per-volume metadata with new. */ 1843 if (pv->pv_meta != NULL) 1844 free(pv->pv_meta, M_MD_PROMISE); 1845 pv->pv_meta = meta; 1846 1847 /* Copy new metadata to the disks, adding or replacing old. */ 1848 for (i = 0; i < vol->v_disks_count; i++) { 1849 sd = &vol->v_subdisks[i]; 1850 disk = sd->sd_disk; 1851 if (disk == NULL) 1852 continue; 1853 /* For RAID0+1 we need to translate order. */ 1854 pos = promise_meta_translate_disk(vol, i); 1855 pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data; 1856 for (j = 0; j < pd->pd_subdisks; j++) { 1857 if (pd->pd_meta[j]->volume_id == meta->volume_id) 1858 break; 1859 } 1860 if (j == pd->pd_subdisks) 1861 pd->pd_subdisks++; 1862 if (pd->pd_meta[j] != NULL) 1863 free(pd->pd_meta[j], M_MD_PROMISE); 1864 pd->pd_meta[j] = promise_meta_copy(meta); 1865 pd->pd_meta[j]->disk = meta->disks[pos]; 1866 pd->pd_meta[j]->disk.number = pos; 1867 pd->pd_meta[j]->disk_offset_high = 1868 (sd->sd_offset / 512) >> 32; 1869 pd->pd_meta[j]->disk_offset = sd->sd_offset / 512; 1870 pd->pd_meta[j]->disk_sectors_high = 1871 (sd->sd_size / 512) >> 32; 1872 pd->pd_meta[j]->disk_sectors = sd->sd_size / 512; 1873 if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD) { 1874 pd->pd_meta[j]->disk_rebuild_high = 1875 (sd->sd_rebuild_pos / 512) >> 32; 1876 pd->pd_meta[j]->disk_rebuild = 1877 sd->sd_rebuild_pos / 512; 1878 } else if (sd->sd_state < G_RAID_SUBDISK_S_REBUILD) { 1879 pd->pd_meta[j]->disk_rebuild_high = 0; 1880 pd->pd_meta[j]->disk_rebuild = 0; 1881 } else { 1882 pd->pd_meta[j]->disk_rebuild_high = UINT32_MAX; 1883 pd->pd_meta[j]->disk_rebuild = UINT32_MAX; 1884 } 1885 pd->pd_updated = 1; 1886 } 1887 } 1888 1889 TAILQ_FOREACH(disk, &sc->sc_disks, d_next) { 1890 pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data; 1891 if (disk->d_state != G_RAID_DISK_S_ACTIVE) 1892 continue; 1893 if (!pd->pd_updated) 1894 continue; 1895 G_RAID_DEBUG(1, "Writing Promise metadata to %s", 1896 g_raid_get_diskname(disk)); 1897 for (i = 0; i < pd->pd_subdisks; i++) 1898 g_raid_md_promise_print(pd->pd_meta[i]); 1899 promise_meta_write(disk->d_consumer, 1900 pd->pd_meta, pd->pd_subdisks); 1901 pd->pd_updated = 0; 1902 } 1903 1904 return (0); 1905 } 1906 1907 static int 1908 g_raid_md_fail_disk_promise(struct g_raid_md_object *md, 1909 struct g_raid_subdisk *tsd, struct g_raid_disk *tdisk) 1910 { 1911 struct g_raid_softc *sc; 1912 struct g_raid_md_promise_perdisk *pd; 1913 struct g_raid_subdisk *sd; 1914 int i, pos; 1915 1916 sc = md->mdo_softc; 1917 pd = (struct g_raid_md_promise_perdisk *)tdisk->d_md_data; 1918 1919 /* We can't fail disk that is not a part of array now. */ 1920 if (tdisk->d_state != G_RAID_DISK_S_ACTIVE) 1921 return (-1); 1922 1923 /* 1924 * Mark disk as failed in metadata and try to write that metadata 1925 * to the disk itself to prevent it's later resurrection as STALE. 1926 */ 1927 if (pd->pd_subdisks > 0 && tdisk->d_consumer != NULL) 1928 G_RAID_DEBUG(1, "Writing Promise metadata to %s", 1929 g_raid_get_diskname(tdisk)); 1930 for (i = 0; i < pd->pd_subdisks; i++) { 1931 pd->pd_meta[i]->disk.flags |= 1932 PROMISE_F_DOWN | PROMISE_F_REDIR; 1933 pos = pd->pd_meta[i]->disk.number; 1934 if (pos >= 0 && pos < PROMISE_MAX_DISKS) { 1935 pd->pd_meta[i]->disks[pos].flags |= 1936 PROMISE_F_DOWN | PROMISE_F_REDIR; 1937 } 1938 g_raid_md_promise_print(pd->pd_meta[i]); 1939 } 1940 if (tdisk->d_consumer != NULL) 1941 promise_meta_write(tdisk->d_consumer, 1942 pd->pd_meta, pd->pd_subdisks); 1943 1944 /* Change states. */ 1945 g_raid_change_disk_state(tdisk, G_RAID_DISK_S_FAILED); 1946 TAILQ_FOREACH(sd, &tdisk->d_subdisks, sd_next) { 1947 g_raid_change_subdisk_state(sd, 1948 G_RAID_SUBDISK_S_FAILED); 1949 g_raid_event_send(sd, G_RAID_SUBDISK_E_FAILED, 1950 G_RAID_EVENT_SUBDISK); 1951 } 1952 1953 /* Write updated metadata to remaining disks. */ 1954 g_raid_md_write_promise(md, NULL, NULL, tdisk); 1955 1956 g_raid_md_promise_refill(sc); 1957 return (0); 1958 } 1959 1960 static int 1961 g_raid_md_free_disk_promise(struct g_raid_md_object *md, 1962 struct g_raid_disk *disk) 1963 { 1964 struct g_raid_md_promise_perdisk *pd; 1965 int i; 1966 1967 pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data; 1968 for (i = 0; i < pd->pd_subdisks; i++) { 1969 if (pd->pd_meta[i] != NULL) { 1970 free(pd->pd_meta[i], M_MD_PROMISE); 1971 pd->pd_meta[i] = NULL; 1972 } 1973 } 1974 free(pd, M_MD_PROMISE); 1975 disk->d_md_data = NULL; 1976 return (0); 1977 } 1978 1979 static int 1980 g_raid_md_free_volume_promise(struct g_raid_md_object *md, 1981 struct g_raid_volume *vol) 1982 { 1983 struct g_raid_md_promise_pervolume *pv; 1984 1985 pv = (struct g_raid_md_promise_pervolume *)vol->v_md_data; 1986 if (pv && pv->pv_meta != NULL) { 1987 free(pv->pv_meta, M_MD_PROMISE); 1988 pv->pv_meta = NULL; 1989 } 1990 if (pv && !pv->pv_started) { 1991 pv->pv_started = 1; 1992 callout_stop(&pv->pv_start_co); 1993 } 1994 free(pv, M_MD_PROMISE); 1995 vol->v_md_data = NULL; 1996 return (0); 1997 } 1998 1999 static int 2000 g_raid_md_free_promise(struct g_raid_md_object *md) 2001 { 2002 2003 return (0); 2004 } 2005 2006 G_RAID_MD_DECLARE(promise, "Promise"); 2007