1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 26 /* 27 * Copyright (c) 2018, Joyent, Inc. 28 */ 29 30 #include <sys/scsi/scsi.h> 31 #include <sys/dktp/cm.h> 32 #include <sys/dktp/quetypes.h> 33 #include <sys/dktp/queue.h> 34 #include <sys/dktp/fctypes.h> 35 #include <sys/dktp/flowctrl.h> 36 #include <sys/dktp/cmdev.h> 37 #include <sys/dkio.h> 38 #include <sys/dktp/tgdk.h> 39 #include <sys/dktp/dadk.h> 40 #include <sys/dktp/bbh.h> 41 #include <sys/dktp/altsctr.h> 42 #include <sys/dktp/cmdk.h> 43 44 #include <sys/stat.h> 45 #include <sys/vtoc.h> 46 #include <sys/file.h> 47 #include <sys/dktp/dadkio.h> 48 #include <sys/aio_req.h> 49 50 #include <sys/cmlb.h> 51 52 /* 53 * Local Static Data 54 */ 55 #ifdef CMDK_DEBUG 56 #define DENT 0x0001 57 #define DIO 0x0002 58 59 static int cmdk_debug = DIO; 60 #endif 61 62 #ifndef TRUE 63 #define TRUE 1 64 #endif 65 66 #ifndef FALSE 67 #define FALSE 0 68 #endif 69 70 /* 71 * NDKMAP is the base number for accessing the fdisk partitions. 72 * c?d?p0 --> cmdk@?,?:q 73 */ 74 #define PARTITION0_INDEX (NDKMAP + 0) 75 76 #define DKTP_DATA (dkp->dk_tgobjp)->tg_data 77 #define DKTP_EXT (dkp->dk_tgobjp)->tg_ext 78 79 void *cmdk_state; 80 81 /* 82 * the cmdk_attach_mutex protects cmdk_max_instance in multi-threaded 83 * attach situations 84 */ 85 static kmutex_t cmdk_attach_mutex; 86 static int cmdk_max_instance = 0; 87 88 /* 89 * Panic dumpsys state 90 * There is only a single flag that is not mutex locked since 91 * the system is prevented from thread switching and cmdk_dump 92 * will only be called in a single threaded operation. 93 */ 94 static int cmdk_indump; 95 96 /* 97 * Local Function Prototypes 98 */ 99 static int cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp); 100 static void cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp); 101 static void cmdkmin(struct buf *bp); 102 static int cmdkrw(dev_t dev, struct uio *uio, int flag); 103 static int cmdkarw(dev_t dev, struct aio_req *aio, int flag); 104 105 /* 106 * Bad Block Handling Functions Prototypes 107 */ 108 static void cmdk_bbh_reopen(struct cmdk *dkp); 109 static opaque_t cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp); 110 static bbh_cookie_t cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle); 111 static void cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle); 112 static void cmdk_bbh_close(struct cmdk *dkp); 113 static void cmdk_bbh_setalts_idx(struct cmdk *dkp); 114 static int cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key); 115 116 static struct bbh_objops cmdk_bbh_ops = { 117 nulldev, 118 nulldev, 119 cmdk_bbh_gethandle, 120 cmdk_bbh_htoc, 121 cmdk_bbh_freehandle, 122 0, 0 123 }; 124 125 static int cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp); 126 static int cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp); 127 static int cmdkstrategy(struct buf *bp); 128 static int cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 129 static int cmdkioctl(dev_t, int, intptr_t, int, cred_t *, int *); 130 static int cmdkread(dev_t dev, struct uio *uio, cred_t *credp); 131 static int cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp); 132 static int cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 133 int mod_flags, char *name, caddr_t valuep, int *lengthp); 134 static int cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp); 135 static int cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp); 136 137 /* 138 * Device driver ops vector 139 */ 140 141 static struct cb_ops cmdk_cb_ops = { 142 cmdkopen, /* open */ 143 cmdkclose, /* close */ 144 cmdkstrategy, /* strategy */ 145 nodev, /* print */ 146 cmdkdump, /* dump */ 147 cmdkread, /* read */ 148 cmdkwrite, /* write */ 149 cmdkioctl, /* ioctl */ 150 nodev, /* devmap */ 151 nodev, /* mmap */ 152 nodev, /* segmap */ 153 nochpoll, /* poll */ 154 cmdk_prop_op, /* cb_prop_op */ 155 0, /* streamtab */ 156 D_64BIT | D_MP | D_NEW, /* Driver comaptibility flag */ 157 CB_REV, /* cb_rev */ 158 cmdkaread, /* async read */ 159 cmdkawrite /* async write */ 160 }; 161 162 static int cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 163 void **result); 164 static int cmdkprobe(dev_info_t *dip); 165 static int cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd); 166 static int cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd); 167 168 static void cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp); 169 static int cmdkresume(dev_info_t *dip); 170 static int cmdksuspend(dev_info_t *dip); 171 static int cmdkpower(dev_info_t *dip, int component, int level); 172 173 struct dev_ops cmdk_ops = { 174 DEVO_REV, /* devo_rev, */ 175 0, /* refcnt */ 176 cmdkinfo, /* info */ 177 nulldev, /* identify */ 178 cmdkprobe, /* probe */ 179 cmdkattach, /* attach */ 180 cmdkdetach, /* detach */ 181 nodev, /* reset */ 182 &cmdk_cb_ops, /* driver operations */ 183 (struct bus_ops *)0, /* bus operations */ 184 cmdkpower, /* power */ 185 ddi_quiesce_not_needed, /* quiesce */ 186 }; 187 188 /* 189 * This is the loadable module wrapper. 190 */ 191 #include <sys/modctl.h> 192 193 static struct modldrv modldrv = { 194 &mod_driverops, /* Type of module. This one is a driver */ 195 "Common Direct Access Disk", 196 &cmdk_ops, /* driver ops */ 197 }; 198 199 static struct modlinkage modlinkage = { 200 MODREV_1, (void *)&modldrv, NULL 201 }; 202 203 /* Function prototypes for cmlb callbacks */ 204 205 static int cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr, 206 diskaddr_t start, size_t length, void *tg_cookie); 207 208 static int cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg, 209 void *tg_cookie); 210 211 static void cmdk_devid_setup(struct cmdk *dkp); 212 static int cmdk_devid_modser(struct cmdk *dkp); 213 static int cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len); 214 static int cmdk_devid_fabricate(struct cmdk *dkp); 215 static int cmdk_devid_read(struct cmdk *dkp); 216 217 static cmlb_tg_ops_t cmdk_lb_ops = { 218 TG_DK_OPS_VERSION_1, 219 cmdk_lb_rdwr, 220 cmdk_lb_getinfo 221 }; 222 223 static boolean_t 224 cmdk_isopen(struct cmdk *dkp, dev_t dev) 225 { 226 int part, otyp; 227 ulong_t partbit; 228 229 ASSERT(MUTEX_HELD((&dkp->dk_mutex))); 230 231 part = CMDKPART(dev); 232 partbit = 1 << part; 233 234 /* account for close */ 235 if (dkp->dk_open_lyr[part] != 0) 236 return (B_TRUE); 237 for (otyp = 0; otyp < OTYPCNT; otyp++) 238 if (dkp->dk_open_reg[otyp] & partbit) 239 return (B_TRUE); 240 return (B_FALSE); 241 } 242 243 int 244 _init(void) 245 { 246 int rval; 247 248 if (rval = ddi_soft_state_init(&cmdk_state, sizeof (struct cmdk), 7)) 249 return (rval); 250 251 mutex_init(&cmdk_attach_mutex, NULL, MUTEX_DRIVER, NULL); 252 if ((rval = mod_install(&modlinkage)) != 0) { 253 mutex_destroy(&cmdk_attach_mutex); 254 ddi_soft_state_fini(&cmdk_state); 255 } 256 return (rval); 257 } 258 259 int 260 _fini(void) 261 { 262 return (EBUSY); 263 } 264 265 int 266 _info(struct modinfo *modinfop) 267 { 268 return (mod_info(&modlinkage, modinfop)); 269 } 270 271 /* 272 * Autoconfiguration Routines 273 */ 274 static int 275 cmdkprobe(dev_info_t *dip) 276 { 277 int instance; 278 int status; 279 struct cmdk *dkp; 280 281 instance = ddi_get_instance(dip); 282 283 if (ddi_get_soft_state(cmdk_state, instance)) 284 return (DDI_PROBE_PARTIAL); 285 286 if (ddi_soft_state_zalloc(cmdk_state, instance) != DDI_SUCCESS) 287 return (DDI_PROBE_PARTIAL); 288 289 if ((dkp = ddi_get_soft_state(cmdk_state, instance)) == NULL) 290 return (DDI_PROBE_PARTIAL); 291 292 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL); 293 rw_init(&dkp->dk_bbh_mutex, NULL, RW_DRIVER, NULL); 294 dkp->dk_dip = dip; 295 mutex_enter(&dkp->dk_mutex); 296 297 dkp->dk_dev = makedevice(ddi_driver_major(dip), 298 ddi_get_instance(dip) << CMDK_UNITSHF); 299 300 /* linkage to dadk and strategy */ 301 if (cmdk_create_obj(dip, dkp) != DDI_SUCCESS) { 302 mutex_exit(&dkp->dk_mutex); 303 mutex_destroy(&dkp->dk_mutex); 304 rw_destroy(&dkp->dk_bbh_mutex); 305 ddi_soft_state_free(cmdk_state, instance); 306 return (DDI_PROBE_PARTIAL); 307 } 308 309 status = dadk_probe(DKTP_DATA, KM_NOSLEEP); 310 if (status != DDI_PROBE_SUCCESS) { 311 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */ 312 mutex_exit(&dkp->dk_mutex); 313 mutex_destroy(&dkp->dk_mutex); 314 rw_destroy(&dkp->dk_bbh_mutex); 315 ddi_soft_state_free(cmdk_state, instance); 316 return (status); 317 } 318 319 mutex_exit(&dkp->dk_mutex); 320 #ifdef CMDK_DEBUG 321 if (cmdk_debug & DENT) 322 PRF("cmdkprobe: instance= %d name= `%s`\n", 323 instance, ddi_get_name_addr(dip)); 324 #endif 325 return (status); 326 } 327 328 static int 329 cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd) 330 { 331 int instance; 332 struct cmdk *dkp; 333 char *node_type; 334 335 switch (cmd) { 336 case DDI_ATTACH: 337 break; 338 case DDI_RESUME: 339 return (cmdkresume(dip)); 340 default: 341 return (DDI_FAILURE); 342 } 343 344 instance = ddi_get_instance(dip); 345 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 346 return (DDI_FAILURE); 347 348 dkp->dk_pm_level = CMDK_SPINDLE_UNINIT; 349 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL); 350 351 mutex_enter(&dkp->dk_mutex); 352 353 /* dadk_attach is an empty function that only returns SUCCESS */ 354 (void) dadk_attach(DKTP_DATA); 355 356 node_type = (DKTP_EXT->tg_nodetype); 357 358 /* 359 * this open allows cmlb to read the device 360 * and determine the label types 361 * so that cmlb can create minor nodes for device 362 */ 363 364 /* open the target disk */ 365 if (dadk_open(DKTP_DATA, 0) != DDI_SUCCESS) 366 goto fail2; 367 368 #ifdef _ILP32 369 { 370 struct tgdk_geom phyg; 371 (void) dadk_getphygeom(DKTP_DATA, &phyg); 372 if ((phyg.g_cap - 1) > DK_MAX_BLOCKS) { 373 (void) dadk_close(DKTP_DATA); 374 goto fail2; 375 } 376 } 377 #endif 378 379 380 /* mark as having opened target */ 381 dkp->dk_flag |= CMDK_TGDK_OPEN; 382 383 cmlb_alloc_handle((cmlb_handle_t *)&dkp->dk_cmlbhandle); 384 385 if (cmlb_attach(dip, 386 &cmdk_lb_ops, 387 DTYPE_DIRECT, /* device_type */ 388 B_FALSE, /* removable */ 389 B_FALSE, /* hot pluggable XXX */ 390 node_type, 391 CMLB_CREATE_ALTSLICE_VTOC_16_DTYPE_DIRECT, /* alter_behaviour */ 392 dkp->dk_cmlbhandle, 393 0) != 0) 394 goto fail1; 395 396 /* Calling validate will create minor nodes according to disk label */ 397 (void) cmlb_validate(dkp->dk_cmlbhandle, 0, 0); 398 399 /* set bbh (Bad Block Handling) */ 400 cmdk_bbh_reopen(dkp); 401 402 /* setup devid string */ 403 cmdk_devid_setup(dkp); 404 405 mutex_enter(&cmdk_attach_mutex); 406 if (instance > cmdk_max_instance) 407 cmdk_max_instance = instance; 408 mutex_exit(&cmdk_attach_mutex); 409 410 mutex_exit(&dkp->dk_mutex); 411 412 /* 413 * Add a zero-length attribute to tell the world we support 414 * kernel ioctls (for layered drivers) 415 */ 416 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 417 DDI_KERNEL_IOCTL, NULL, 0); 418 ddi_report_dev(dip); 419 420 /* 421 * Initialize power management 422 */ 423 mutex_init(&dkp->dk_pm_mutex, NULL, MUTEX_DRIVER, NULL); 424 cv_init(&dkp->dk_suspend_cv, NULL, CV_DRIVER, NULL); 425 cmdk_setup_pm(dip, dkp); 426 427 return (DDI_SUCCESS); 428 429 fail1: 430 cmlb_free_handle(&dkp->dk_cmlbhandle); 431 (void) dadk_close(DKTP_DATA); 432 fail2: 433 cmdk_destroy_obj(dip, dkp); 434 rw_destroy(&dkp->dk_bbh_mutex); 435 mutex_exit(&dkp->dk_mutex); 436 mutex_destroy(&dkp->dk_mutex); 437 ddi_soft_state_free(cmdk_state, instance); 438 return (DDI_FAILURE); 439 } 440 441 442 static int 443 cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd) 444 { 445 struct cmdk *dkp; 446 int instance; 447 int max_instance; 448 449 switch (cmd) { 450 case DDI_DETACH: 451 /* return (DDI_FAILURE); */ 452 break; 453 case DDI_SUSPEND: 454 return (cmdksuspend(dip)); 455 default: 456 #ifdef CMDK_DEBUG 457 if (cmdk_debug & DIO) { 458 PRF("cmdkdetach: cmd = %d unknown\n", cmd); 459 } 460 #endif 461 return (DDI_FAILURE); 462 } 463 464 mutex_enter(&cmdk_attach_mutex); 465 max_instance = cmdk_max_instance; 466 mutex_exit(&cmdk_attach_mutex); 467 468 /* check if any instance of driver is open */ 469 for (instance = 0; instance < max_instance; instance++) { 470 dkp = ddi_get_soft_state(cmdk_state, instance); 471 if (!dkp) 472 continue; 473 if (dkp->dk_flag & CMDK_OPEN) 474 return (DDI_FAILURE); 475 } 476 477 instance = ddi_get_instance(dip); 478 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 479 return (DDI_SUCCESS); 480 481 mutex_enter(&dkp->dk_mutex); 482 483 /* 484 * The cmdk_part_info call at the end of cmdkattach may have 485 * caused cmdk_reopen to do a TGDK_OPEN, make sure we close on 486 * detach for case when cmdkopen/cmdkclose never occurs. 487 */ 488 if (dkp->dk_flag & CMDK_TGDK_OPEN) { 489 dkp->dk_flag &= ~CMDK_TGDK_OPEN; 490 (void) dadk_close(DKTP_DATA); 491 } 492 493 cmlb_detach(dkp->dk_cmlbhandle, 0); 494 cmlb_free_handle(&dkp->dk_cmlbhandle); 495 ddi_prop_remove_all(dip); 496 497 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */ 498 499 /* 500 * free the devid structure if allocated before 501 */ 502 if (dkp->dk_devid) { 503 ddi_devid_free(dkp->dk_devid); 504 dkp->dk_devid = NULL; 505 } 506 507 mutex_exit(&dkp->dk_mutex); 508 mutex_destroy(&dkp->dk_mutex); 509 rw_destroy(&dkp->dk_bbh_mutex); 510 mutex_destroy(&dkp->dk_pm_mutex); 511 cv_destroy(&dkp->dk_suspend_cv); 512 ddi_soft_state_free(cmdk_state, instance); 513 514 return (DDI_SUCCESS); 515 } 516 517 static int 518 cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 519 { 520 dev_t dev = (dev_t)arg; 521 int instance; 522 struct cmdk *dkp; 523 524 #ifdef lint 525 dip = dip; /* no one ever uses this */ 526 #endif 527 #ifdef CMDK_DEBUG 528 if (cmdk_debug & DENT) 529 PRF("cmdkinfo: call\n"); 530 #endif 531 instance = CMDKUNIT(dev); 532 533 switch (infocmd) { 534 case DDI_INFO_DEVT2DEVINFO: 535 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 536 return (DDI_FAILURE); 537 *result = (void *) dkp->dk_dip; 538 break; 539 case DDI_INFO_DEVT2INSTANCE: 540 *result = (void *)(intptr_t)instance; 541 break; 542 default: 543 return (DDI_FAILURE); 544 } 545 return (DDI_SUCCESS); 546 } 547 548 /* 549 * Initialize the power management components 550 */ 551 static void 552 cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp) 553 { 554 char *pm_comp[] = { "NAME=cmdk", "0=off", "1=on", NULL }; 555 556 /* 557 * Since the cmdk device does not the 'reg' property, 558 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 559 * The following code is to tell cpr that this device 560 * DOES need to be suspended and resumed. 561 */ 562 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, 563 "pm-hardware-state", "needs-suspend-resume"); 564 565 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 566 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 567 if (pm_raise_power(dip, 0, CMDK_SPINDLE_ON) == DDI_SUCCESS) { 568 mutex_enter(&dkp->dk_pm_mutex); 569 dkp->dk_pm_level = CMDK_SPINDLE_ON; 570 dkp->dk_pm_is_enabled = 1; 571 mutex_exit(&dkp->dk_pm_mutex); 572 } else { 573 mutex_enter(&dkp->dk_pm_mutex); 574 dkp->dk_pm_level = CMDK_SPINDLE_OFF; 575 dkp->dk_pm_is_enabled = 0; 576 mutex_exit(&dkp->dk_pm_mutex); 577 } 578 } else { 579 mutex_enter(&dkp->dk_pm_mutex); 580 dkp->dk_pm_level = CMDK_SPINDLE_UNINIT; 581 dkp->dk_pm_is_enabled = 0; 582 mutex_exit(&dkp->dk_pm_mutex); 583 } 584 } 585 586 /* 587 * suspend routine, it will be run when get the command 588 * DDI_SUSPEND at detach(9E) from system power management 589 */ 590 static int 591 cmdksuspend(dev_info_t *dip) 592 { 593 struct cmdk *dkp; 594 int instance; 595 clock_t count = 0; 596 597 instance = ddi_get_instance(dip); 598 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 599 return (DDI_FAILURE); 600 mutex_enter(&dkp->dk_mutex); 601 if (dkp->dk_flag & CMDK_SUSPEND) { 602 mutex_exit(&dkp->dk_mutex); 603 return (DDI_SUCCESS); 604 } 605 dkp->dk_flag |= CMDK_SUSPEND; 606 607 /* need to wait a while */ 608 while (dadk_getcmds(DKTP_DATA) != 0) { 609 delay(drv_usectohz(1000000)); 610 if (count > 60) { 611 dkp->dk_flag &= ~CMDK_SUSPEND; 612 cv_broadcast(&dkp->dk_suspend_cv); 613 mutex_exit(&dkp->dk_mutex); 614 return (DDI_FAILURE); 615 } 616 count++; 617 } 618 mutex_exit(&dkp->dk_mutex); 619 return (DDI_SUCCESS); 620 } 621 622 /* 623 * resume routine, it will be run when get the command 624 * DDI_RESUME at attach(9E) from system power management 625 */ 626 static int 627 cmdkresume(dev_info_t *dip) 628 { 629 struct cmdk *dkp; 630 int instance; 631 632 instance = ddi_get_instance(dip); 633 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 634 return (DDI_FAILURE); 635 mutex_enter(&dkp->dk_mutex); 636 if (!(dkp->dk_flag & CMDK_SUSPEND)) { 637 mutex_exit(&dkp->dk_mutex); 638 return (DDI_FAILURE); 639 } 640 dkp->dk_pm_level = CMDK_SPINDLE_ON; 641 dkp->dk_flag &= ~CMDK_SUSPEND; 642 cv_broadcast(&dkp->dk_suspend_cv); 643 mutex_exit(&dkp->dk_mutex); 644 return (DDI_SUCCESS); 645 646 } 647 648 /* 649 * power management entry point, it was used to 650 * change power management component. 651 * Actually, the real hard drive suspend/resume 652 * was handled in ata, so this function is not 653 * doing any real work other than verifying that 654 * the disk is idle. 655 */ 656 static int 657 cmdkpower(dev_info_t *dip, int component, int level) 658 { 659 struct cmdk *dkp; 660 int instance; 661 662 instance = ddi_get_instance(dip); 663 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || 664 component != 0 || level > CMDK_SPINDLE_ON || 665 level < CMDK_SPINDLE_OFF) { 666 return (DDI_FAILURE); 667 } 668 669 mutex_enter(&dkp->dk_pm_mutex); 670 if (dkp->dk_pm_is_enabled && dkp->dk_pm_level == level) { 671 mutex_exit(&dkp->dk_pm_mutex); 672 return (DDI_SUCCESS); 673 } 674 mutex_exit(&dkp->dk_pm_mutex); 675 676 if ((level == CMDK_SPINDLE_OFF) && 677 (dadk_getcmds(DKTP_DATA) != 0)) { 678 return (DDI_FAILURE); 679 } 680 681 mutex_enter(&dkp->dk_pm_mutex); 682 dkp->dk_pm_level = level; 683 mutex_exit(&dkp->dk_pm_mutex); 684 return (DDI_SUCCESS); 685 } 686 687 static int 688 cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 689 char *name, caddr_t valuep, int *lengthp) 690 { 691 struct cmdk *dkp; 692 693 #ifdef CMDK_DEBUG 694 if (cmdk_debug & DENT) 695 PRF("cmdk_prop_op: call\n"); 696 #endif 697 698 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 699 if (dkp == NULL) 700 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 701 name, valuep, lengthp)); 702 703 return (cmlb_prop_op(dkp->dk_cmlbhandle, 704 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 705 CMDKPART(dev), NULL)); 706 } 707 708 /* 709 * dump routine 710 */ 711 static int 712 cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 713 { 714 int instance; 715 struct cmdk *dkp; 716 diskaddr_t p_lblksrt; 717 diskaddr_t p_lblkcnt; 718 struct buf local; 719 struct buf *bp; 720 721 #ifdef CMDK_DEBUG 722 if (cmdk_debug & DENT) 723 PRF("cmdkdump: call\n"); 724 #endif 725 instance = CMDKUNIT(dev); 726 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || (blkno < 0)) 727 return (ENXIO); 728 729 if (cmlb_partinfo( 730 dkp->dk_cmlbhandle, 731 CMDKPART(dev), 732 &p_lblkcnt, 733 &p_lblksrt, 734 NULL, 735 NULL, 736 0)) { 737 return (ENXIO); 738 } 739 740 if ((blkno+nblk) > p_lblkcnt) 741 return (EINVAL); 742 743 cmdk_indump = 1; /* Tell disk targets we are panic dumpping */ 744 745 bp = &local; 746 bzero(bp, sizeof (*bp)); 747 bp->b_flags = B_BUSY; 748 bp->b_un.b_addr = addr; 749 bp->b_bcount = nblk << SCTRSHFT; 750 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + blkno))); 751 752 (void) dadk_dump(DKTP_DATA, bp); 753 return (bp->b_error); 754 } 755 756 /* 757 * Copy in the dadkio_rwcmd according to the user's data model. If needed, 758 * convert it for our internal use. 759 */ 760 static int 761 rwcmd_copyin(struct dadkio_rwcmd *rwcmdp, caddr_t inaddr, int flag) 762 { 763 switch (ddi_model_convert_from(flag)) { 764 case DDI_MODEL_ILP32: { 765 struct dadkio_rwcmd32 cmd32; 766 767 if (ddi_copyin(inaddr, &cmd32, 768 sizeof (struct dadkio_rwcmd32), flag)) { 769 return (EFAULT); 770 } 771 772 rwcmdp->cmd = cmd32.cmd; 773 rwcmdp->flags = cmd32.flags; 774 rwcmdp->blkaddr = (blkaddr_t)cmd32.blkaddr; 775 rwcmdp->buflen = cmd32.buflen; 776 rwcmdp->bufaddr = (caddr_t)(intptr_t)cmd32.bufaddr; 777 /* 778 * Note: we do not convert the 'status' field, 779 * as it should not contain valid data at this 780 * point. 781 */ 782 bzero(&rwcmdp->status, sizeof (rwcmdp->status)); 783 break; 784 } 785 case DDI_MODEL_NONE: { 786 if (ddi_copyin(inaddr, rwcmdp, 787 sizeof (struct dadkio_rwcmd), flag)) { 788 return (EFAULT); 789 } 790 } 791 } 792 return (0); 793 } 794 795 /* 796 * If necessary, convert the internal rwcmdp and status to the appropriate 797 * data model and copy it out to the user. 798 */ 799 static int 800 rwcmd_copyout(struct dadkio_rwcmd *rwcmdp, caddr_t outaddr, int flag) 801 { 802 switch (ddi_model_convert_from(flag)) { 803 case DDI_MODEL_ILP32: { 804 struct dadkio_rwcmd32 cmd32; 805 806 cmd32.cmd = rwcmdp->cmd; 807 cmd32.flags = rwcmdp->flags; 808 cmd32.blkaddr = rwcmdp->blkaddr; 809 cmd32.buflen = rwcmdp->buflen; 810 ASSERT64(((uintptr_t)rwcmdp->bufaddr >> 32) == 0); 811 cmd32.bufaddr = (caddr32_t)(uintptr_t)rwcmdp->bufaddr; 812 813 cmd32.status.status = rwcmdp->status.status; 814 cmd32.status.resid = rwcmdp->status.resid; 815 cmd32.status.failed_blk_is_valid = 816 rwcmdp->status.failed_blk_is_valid; 817 cmd32.status.failed_blk = rwcmdp->status.failed_blk; 818 cmd32.status.fru_code_is_valid = 819 rwcmdp->status.fru_code_is_valid; 820 cmd32.status.fru_code = rwcmdp->status.fru_code; 821 822 bcopy(rwcmdp->status.add_error_info, 823 cmd32.status.add_error_info, DADKIO_ERROR_INFO_LEN); 824 825 if (ddi_copyout(&cmd32, outaddr, 826 sizeof (struct dadkio_rwcmd32), flag)) 827 return (EFAULT); 828 break; 829 } 830 case DDI_MODEL_NONE: { 831 if (ddi_copyout(rwcmdp, outaddr, 832 sizeof (struct dadkio_rwcmd), flag)) 833 return (EFAULT); 834 } 835 } 836 return (0); 837 } 838 839 /* 840 * ioctl routine 841 */ 842 static int 843 cmdkioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *credp, int *rvalp) 844 { 845 int instance; 846 struct scsi_device *devp; 847 struct cmdk *dkp; 848 char data[NBPSCTR]; 849 850 instance = CMDKUNIT(dev); 851 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 852 return (ENXIO); 853 854 mutex_enter(&dkp->dk_mutex); 855 while (dkp->dk_flag & CMDK_SUSPEND) { 856 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 857 } 858 mutex_exit(&dkp->dk_mutex); 859 860 bzero(data, sizeof (data)); 861 862 switch (cmd) { 863 864 case DKIOCGMEDIAINFO: { 865 struct dk_minfo media_info; 866 struct tgdk_geom phyg; 867 868 /* dadk_getphygeom always returns success */ 869 (void) dadk_getphygeom(DKTP_DATA, &phyg); 870 871 media_info.dki_lbsize = phyg.g_secsiz; 872 media_info.dki_capacity = phyg.g_cap; 873 media_info.dki_media_type = DK_FIXED_DISK; 874 875 if (ddi_copyout(&media_info, (void *)arg, 876 sizeof (struct dk_minfo), flag)) { 877 return (EFAULT); 878 } else { 879 return (0); 880 } 881 } 882 883 case DKIOCINFO: { 884 struct dk_cinfo *info = (struct dk_cinfo *)data; 885 886 /* controller information */ 887 info->dki_ctype = (DKTP_EXT->tg_ctype); 888 info->dki_cnum = ddi_get_instance(ddi_get_parent(dkp->dk_dip)); 889 (void) strcpy(info->dki_cname, 890 ddi_get_name(ddi_get_parent(dkp->dk_dip))); 891 892 /* Unit Information */ 893 info->dki_unit = ddi_get_instance(dkp->dk_dip); 894 devp = ddi_get_driver_private(dkp->dk_dip); 895 info->dki_slave = (CMDEV_TARG(devp)<<3) | CMDEV_LUN(devp); 896 (void) strcpy(info->dki_dname, ddi_driver_name(dkp->dk_dip)); 897 info->dki_flags = DKI_FMTVOL; 898 info->dki_partition = CMDKPART(dev); 899 900 info->dki_maxtransfer = maxphys / DEV_BSIZE; 901 info->dki_addr = 1; 902 info->dki_space = 0; 903 info->dki_prio = 0; 904 info->dki_vec = 0; 905 906 if (ddi_copyout(data, (void *)arg, sizeof (*info), flag)) 907 return (EFAULT); 908 else 909 return (0); 910 } 911 912 case DKIOCSTATE: { 913 int state; 914 int rval; 915 diskaddr_t p_lblksrt; 916 diskaddr_t p_lblkcnt; 917 918 if (ddi_copyin((void *)arg, &state, sizeof (int), flag)) 919 return (EFAULT); 920 921 /* dadk_check_media blocks until state changes */ 922 if (rval = dadk_check_media(DKTP_DATA, &state)) 923 return (rval); 924 925 if (state == DKIO_INSERTED) { 926 927 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0) 928 return (ENXIO); 929 930 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(dev), 931 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0)) 932 return (ENXIO); 933 934 if (p_lblkcnt <= 0) 935 return (ENXIO); 936 } 937 938 if (ddi_copyout(&state, (caddr_t)arg, sizeof (int), flag)) 939 return (EFAULT); 940 941 return (0); 942 } 943 944 /* 945 * is media removable? 946 */ 947 case DKIOCREMOVABLE: { 948 int i; 949 950 i = (DKTP_EXT->tg_rmb) ? 1 : 0; 951 952 if (ddi_copyout(&i, (caddr_t)arg, sizeof (int), flag)) 953 return (EFAULT); 954 955 return (0); 956 } 957 958 case DKIOCADDBAD: 959 /* 960 * This is not an update mechanism to add bad blocks 961 * to the bad block structures stored on disk. 962 * 963 * addbadsec(1M) will update the bad block data on disk 964 * and use this ioctl to force the driver to re-initialize 965 * the list of bad blocks in the driver. 966 */ 967 968 /* start BBH */ 969 cmdk_bbh_reopen(dkp); 970 return (0); 971 972 case DKIOCG_PHYGEOM: 973 case DKIOCG_VIRTGEOM: 974 case DKIOCGGEOM: 975 case DKIOCSGEOM: 976 case DKIOCGAPART: 977 case DKIOCSAPART: 978 case DKIOCGVTOC: 979 case DKIOCSVTOC: 980 case DKIOCPARTINFO: 981 case DKIOCGEXTVTOC: 982 case DKIOCSEXTVTOC: 983 case DKIOCEXTPARTINFO: 984 case DKIOCGMBOOT: 985 case DKIOCSMBOOT: 986 case DKIOCGETEFI: 987 case DKIOCSETEFI: 988 case DKIOCPARTITION: 989 case DKIOCSETEXTPART: 990 { 991 int rc; 992 993 rc = cmlb_ioctl(dkp->dk_cmlbhandle, dev, cmd, arg, flag, 994 credp, rvalp, 0); 995 if (cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC) 996 cmdk_devid_setup(dkp); 997 return (rc); 998 } 999 1000 case DIOCTL_RWCMD: { 1001 struct dadkio_rwcmd *rwcmdp; 1002 int status; 1003 1004 rwcmdp = kmem_alloc(sizeof (struct dadkio_rwcmd), KM_SLEEP); 1005 1006 status = rwcmd_copyin(rwcmdp, (caddr_t)arg, flag); 1007 1008 if (status == 0) { 1009 bzero(&(rwcmdp->status), sizeof (struct dadkio_status)); 1010 status = dadk_ioctl(DKTP_DATA, 1011 dev, 1012 cmd, 1013 (uintptr_t)rwcmdp, 1014 flag, 1015 credp, 1016 rvalp); 1017 } 1018 if (status == 0) 1019 status = rwcmd_copyout(rwcmdp, (caddr_t)arg, flag); 1020 1021 kmem_free(rwcmdp, sizeof (struct dadkio_rwcmd)); 1022 return (status); 1023 } 1024 1025 default: 1026 return (dadk_ioctl(DKTP_DATA, 1027 dev, 1028 cmd, 1029 arg, 1030 flag, 1031 credp, 1032 rvalp)); 1033 } 1034 } 1035 1036 /*ARGSUSED1*/ 1037 static int 1038 cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp) 1039 { 1040 int part; 1041 ulong_t partbit; 1042 int instance; 1043 struct cmdk *dkp; 1044 int lastclose = 1; 1045 int i; 1046 1047 instance = CMDKUNIT(dev); 1048 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || 1049 (otyp >= OTYPCNT)) 1050 return (ENXIO); 1051 1052 mutex_enter(&dkp->dk_mutex); 1053 1054 /* check if device has been opened */ 1055 ASSERT(cmdk_isopen(dkp, dev)); 1056 if (!(dkp->dk_flag & CMDK_OPEN)) { 1057 mutex_exit(&dkp->dk_mutex); 1058 return (ENXIO); 1059 } 1060 1061 while (dkp->dk_flag & CMDK_SUSPEND) { 1062 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1063 } 1064 1065 part = CMDKPART(dev); 1066 partbit = 1 << part; 1067 1068 /* account for close */ 1069 if (otyp == OTYP_LYR) { 1070 ASSERT(dkp->dk_open_lyr[part] > 0); 1071 if (dkp->dk_open_lyr[part]) 1072 dkp->dk_open_lyr[part]--; 1073 } else { 1074 ASSERT((dkp->dk_open_reg[otyp] & partbit) != 0); 1075 dkp->dk_open_reg[otyp] &= ~partbit; 1076 } 1077 dkp->dk_open_exl &= ~partbit; 1078 1079 for (i = 0; i < CMDK_MAXPART; i++) 1080 if (dkp->dk_open_lyr[i] != 0) { 1081 lastclose = 0; 1082 break; 1083 } 1084 1085 if (lastclose) 1086 for (i = 0; i < OTYPCNT; i++) 1087 if (dkp->dk_open_reg[i] != 0) { 1088 lastclose = 0; 1089 break; 1090 } 1091 1092 mutex_exit(&dkp->dk_mutex); 1093 1094 if (lastclose) 1095 cmlb_invalidate(dkp->dk_cmlbhandle, 0); 1096 1097 return (DDI_SUCCESS); 1098 } 1099 1100 /*ARGSUSED3*/ 1101 static int 1102 cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp) 1103 { 1104 dev_t dev = *dev_p; 1105 int part; 1106 ulong_t partbit; 1107 int instance; 1108 struct cmdk *dkp; 1109 diskaddr_t p_lblksrt; 1110 diskaddr_t p_lblkcnt; 1111 int i; 1112 int nodelay; 1113 1114 instance = CMDKUNIT(dev); 1115 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1116 return (ENXIO); 1117 1118 if (otyp >= OTYPCNT) 1119 return (EINVAL); 1120 1121 mutex_enter(&dkp->dk_mutex); 1122 while (dkp->dk_flag & CMDK_SUSPEND) { 1123 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1124 } 1125 mutex_exit(&dkp->dk_mutex); 1126 1127 part = CMDKPART(dev); 1128 partbit = 1 << part; 1129 nodelay = (flag & (FNDELAY | FNONBLOCK)); 1130 1131 mutex_enter(&dkp->dk_mutex); 1132 1133 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0) { 1134 1135 /* fail if not doing non block open */ 1136 if (!nodelay) { 1137 mutex_exit(&dkp->dk_mutex); 1138 return (ENXIO); 1139 } 1140 } else if (cmlb_partinfo(dkp->dk_cmlbhandle, part, &p_lblkcnt, 1141 &p_lblksrt, NULL, NULL, 0) == 0) { 1142 1143 if (p_lblkcnt <= 0 && (!nodelay || otyp != OTYP_CHR)) { 1144 mutex_exit(&dkp->dk_mutex); 1145 return (ENXIO); 1146 } 1147 } else { 1148 /* fail if not doing non block open */ 1149 if (!nodelay) { 1150 mutex_exit(&dkp->dk_mutex); 1151 return (ENXIO); 1152 } 1153 } 1154 1155 if ((DKTP_EXT->tg_rdonly) && (flag & FWRITE)) { 1156 mutex_exit(&dkp->dk_mutex); 1157 return (EROFS); 1158 } 1159 1160 /* check for part already opend exclusively */ 1161 if (dkp->dk_open_exl & partbit) 1162 goto excl_open_fail; 1163 1164 /* check if we can establish exclusive open */ 1165 if (flag & FEXCL) { 1166 if (dkp->dk_open_lyr[part]) 1167 goto excl_open_fail; 1168 for (i = 0; i < OTYPCNT; i++) { 1169 if (dkp->dk_open_reg[i] & partbit) 1170 goto excl_open_fail; 1171 } 1172 } 1173 1174 /* open will succeed, account for open */ 1175 dkp->dk_flag |= CMDK_OPEN; 1176 if (otyp == OTYP_LYR) 1177 dkp->dk_open_lyr[part]++; 1178 else 1179 dkp->dk_open_reg[otyp] |= partbit; 1180 if (flag & FEXCL) 1181 dkp->dk_open_exl |= partbit; 1182 1183 mutex_exit(&dkp->dk_mutex); 1184 return (DDI_SUCCESS); 1185 1186 excl_open_fail: 1187 mutex_exit(&dkp->dk_mutex); 1188 return (EBUSY); 1189 } 1190 1191 /* 1192 * read routine 1193 */ 1194 /*ARGSUSED2*/ 1195 static int 1196 cmdkread(dev_t dev, struct uio *uio, cred_t *credp) 1197 { 1198 return (cmdkrw(dev, uio, B_READ)); 1199 } 1200 1201 /* 1202 * async read routine 1203 */ 1204 /*ARGSUSED2*/ 1205 static int 1206 cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp) 1207 { 1208 return (cmdkarw(dev, aio, B_READ)); 1209 } 1210 1211 /* 1212 * write routine 1213 */ 1214 /*ARGSUSED2*/ 1215 static int 1216 cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp) 1217 { 1218 return (cmdkrw(dev, uio, B_WRITE)); 1219 } 1220 1221 /* 1222 * async write routine 1223 */ 1224 /*ARGSUSED2*/ 1225 static int 1226 cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp) 1227 { 1228 return (cmdkarw(dev, aio, B_WRITE)); 1229 } 1230 1231 static void 1232 cmdkmin(struct buf *bp) 1233 { 1234 if (bp->b_bcount > DK_MAXRECSIZE) 1235 bp->b_bcount = DK_MAXRECSIZE; 1236 } 1237 1238 static int 1239 cmdkrw(dev_t dev, struct uio *uio, int flag) 1240 { 1241 int instance; 1242 struct cmdk *dkp; 1243 1244 instance = CMDKUNIT(dev); 1245 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1246 return (ENXIO); 1247 1248 mutex_enter(&dkp->dk_mutex); 1249 while (dkp->dk_flag & CMDK_SUSPEND) { 1250 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1251 } 1252 mutex_exit(&dkp->dk_mutex); 1253 1254 return (physio(cmdkstrategy, (struct buf *)0, dev, flag, cmdkmin, uio)); 1255 } 1256 1257 static int 1258 cmdkarw(dev_t dev, struct aio_req *aio, int flag) 1259 { 1260 int instance; 1261 struct cmdk *dkp; 1262 1263 instance = CMDKUNIT(dev); 1264 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1265 return (ENXIO); 1266 1267 mutex_enter(&dkp->dk_mutex); 1268 while (dkp->dk_flag & CMDK_SUSPEND) { 1269 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1270 } 1271 mutex_exit(&dkp->dk_mutex); 1272 1273 return (aphysio(cmdkstrategy, anocancel, dev, flag, cmdkmin, aio)); 1274 } 1275 1276 /* 1277 * strategy routine 1278 */ 1279 static int 1280 cmdkstrategy(struct buf *bp) 1281 { 1282 int instance; 1283 struct cmdk *dkp; 1284 long d_cnt; 1285 diskaddr_t p_lblksrt; 1286 diskaddr_t p_lblkcnt; 1287 1288 instance = CMDKUNIT(bp->b_edev); 1289 if (cmdk_indump || !(dkp = ddi_get_soft_state(cmdk_state, instance)) || 1290 (dkblock(bp) < 0)) { 1291 bp->b_resid = bp->b_bcount; 1292 SETBPERR(bp, ENXIO); 1293 biodone(bp); 1294 return (0); 1295 } 1296 1297 mutex_enter(&dkp->dk_mutex); 1298 ASSERT(cmdk_isopen(dkp, bp->b_edev)); 1299 while (dkp->dk_flag & CMDK_SUSPEND) { 1300 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1301 } 1302 mutex_exit(&dkp->dk_mutex); 1303 1304 bp->b_flags &= ~(B_DONE|B_ERROR); 1305 bp->b_resid = 0; 1306 bp->av_back = NULL; 1307 1308 /* 1309 * only re-read the vtoc if necessary (force == FALSE) 1310 */ 1311 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(bp->b_edev), 1312 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0)) { 1313 SETBPERR(bp, ENXIO); 1314 } 1315 1316 if ((bp->b_bcount & (NBPSCTR-1)) || (dkblock(bp) > p_lblkcnt)) 1317 SETBPERR(bp, ENXIO); 1318 1319 if ((bp->b_flags & B_ERROR) || (dkblock(bp) == p_lblkcnt)) { 1320 bp->b_resid = bp->b_bcount; 1321 biodone(bp); 1322 return (0); 1323 } 1324 1325 d_cnt = bp->b_bcount >> SCTRSHFT; 1326 if ((dkblock(bp) + d_cnt) > p_lblkcnt) { 1327 bp->b_resid = ((dkblock(bp) + d_cnt) - p_lblkcnt) << SCTRSHFT; 1328 bp->b_bcount -= bp->b_resid; 1329 } 1330 1331 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + dkblock(bp)))); 1332 if (dadk_strategy(DKTP_DATA, bp) != DDI_SUCCESS) { 1333 bp->b_resid += bp->b_bcount; 1334 biodone(bp); 1335 } 1336 return (0); 1337 } 1338 1339 static int 1340 cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp) 1341 { 1342 struct scsi_device *devp; 1343 opaque_t queobjp = NULL; 1344 opaque_t flcobjp = NULL; 1345 char que_keyvalp[64]; 1346 int que_keylen; 1347 char flc_keyvalp[64]; 1348 int flc_keylen; 1349 1350 ASSERT(mutex_owned(&dkp->dk_mutex)); 1351 1352 /* Create linkage to queueing routines based on property */ 1353 que_keylen = sizeof (que_keyvalp); 1354 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1355 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) != 1356 DDI_PROP_SUCCESS) { 1357 cmn_err(CE_WARN, "cmdk_create_obj: queue property undefined"); 1358 return (DDI_FAILURE); 1359 } 1360 que_keyvalp[que_keylen] = (char)0; 1361 1362 if (strcmp(que_keyvalp, "qfifo") == 0) { 1363 queobjp = (opaque_t)qfifo_create(); 1364 } else if (strcmp(que_keyvalp, "qsort") == 0) { 1365 queobjp = (opaque_t)qsort_create(); 1366 } else { 1367 return (DDI_FAILURE); 1368 } 1369 1370 /* Create linkage to dequeueing routines based on property */ 1371 flc_keylen = sizeof (flc_keyvalp); 1372 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1373 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) != 1374 DDI_PROP_SUCCESS) { 1375 cmn_err(CE_WARN, 1376 "cmdk_create_obj: flow-control property undefined"); 1377 return (DDI_FAILURE); 1378 } 1379 1380 flc_keyvalp[flc_keylen] = (char)0; 1381 1382 if (strcmp(flc_keyvalp, "dsngl") == 0) { 1383 flcobjp = (opaque_t)dsngl_create(); 1384 } else if (strcmp(flc_keyvalp, "dmult") == 0) { 1385 flcobjp = (opaque_t)dmult_create(); 1386 } else { 1387 return (DDI_FAILURE); 1388 } 1389 1390 /* populate bbh_obj object stored in dkp */ 1391 dkp->dk_bbh_obj.bbh_data = dkp; 1392 dkp->dk_bbh_obj.bbh_ops = &cmdk_bbh_ops; 1393 1394 /* create linkage to dadk */ 1395 dkp->dk_tgobjp = (opaque_t)dadk_create(); 1396 1397 devp = ddi_get_driver_private(dip); 1398 (void) dadk_init(DKTP_DATA, devp, flcobjp, queobjp, &dkp->dk_bbh_obj, 1399 NULL); 1400 1401 return (DDI_SUCCESS); 1402 } 1403 1404 static void 1405 cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp) 1406 { 1407 char que_keyvalp[64]; 1408 int que_keylen; 1409 char flc_keyvalp[64]; 1410 int flc_keylen; 1411 1412 ASSERT(mutex_owned(&dkp->dk_mutex)); 1413 1414 (void) dadk_free((dkp->dk_tgobjp)); 1415 dkp->dk_tgobjp = NULL; 1416 1417 que_keylen = sizeof (que_keyvalp); 1418 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1419 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) != 1420 DDI_PROP_SUCCESS) { 1421 cmn_err(CE_WARN, "cmdk_destroy_obj: queue property undefined"); 1422 return; 1423 } 1424 que_keyvalp[que_keylen] = (char)0; 1425 1426 flc_keylen = sizeof (flc_keyvalp); 1427 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1428 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) != 1429 DDI_PROP_SUCCESS) { 1430 cmn_err(CE_WARN, 1431 "cmdk_destroy_obj: flow-control property undefined"); 1432 return; 1433 } 1434 flc_keyvalp[flc_keylen] = (char)0; 1435 } 1436 /*ARGSUSED5*/ 1437 static int 1438 cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr, 1439 diskaddr_t start, size_t count, void *tg_cookie) 1440 { 1441 struct cmdk *dkp; 1442 opaque_t handle; 1443 int rc = 0; 1444 char *bufa; 1445 size_t buflen; 1446 1447 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 1448 if (dkp == NULL) 1449 return (ENXIO); 1450 1451 if (cmd != TG_READ && cmd != TG_WRITE) 1452 return (EINVAL); 1453 1454 /* buflen must be multiple of 512 */ 1455 buflen = (count + NBPSCTR - 1) & -NBPSCTR; 1456 handle = dadk_iob_alloc(DKTP_DATA, start, buflen, KM_SLEEP); 1457 if (!handle) 1458 return (ENOMEM); 1459 1460 if (cmd == TG_READ) { 1461 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1462 if (!bufa) 1463 rc = EIO; 1464 else 1465 bcopy(bufa, bufaddr, count); 1466 } else { 1467 bufa = dadk_iob_htoc(DKTP_DATA, handle); 1468 bcopy(bufaddr, bufa, count); 1469 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_WRITE); 1470 if (!bufa) 1471 rc = EIO; 1472 } 1473 (void) dadk_iob_free(DKTP_DATA, handle); 1474 1475 return (rc); 1476 } 1477 1478 /*ARGSUSED3*/ 1479 static int 1480 cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg, void *tg_cookie) 1481 { 1482 1483 struct cmdk *dkp; 1484 struct tgdk_geom phyg; 1485 1486 1487 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 1488 if (dkp == NULL) 1489 return (ENXIO); 1490 1491 switch (cmd) { 1492 case TG_GETPHYGEOM: { 1493 cmlb_geom_t *phygeomp = (cmlb_geom_t *)arg; 1494 1495 /* dadk_getphygeom always returns success */ 1496 (void) dadk_getphygeom(DKTP_DATA, &phyg); 1497 1498 phygeomp->g_capacity = phyg.g_cap; 1499 phygeomp->g_nsect = phyg.g_sec; 1500 phygeomp->g_nhead = phyg.g_head; 1501 phygeomp->g_acyl = phyg.g_acyl; 1502 phygeomp->g_ncyl = phyg.g_cyl; 1503 phygeomp->g_secsize = phyg.g_secsiz; 1504 phygeomp->g_intrlv = 1; 1505 phygeomp->g_rpm = 3600; 1506 1507 return (0); 1508 } 1509 1510 case TG_GETVIRTGEOM: { 1511 cmlb_geom_t *virtgeomp = (cmlb_geom_t *)arg; 1512 diskaddr_t capacity; 1513 1514 (void) dadk_getgeom(DKTP_DATA, &phyg); 1515 capacity = phyg.g_cap; 1516 1517 /* 1518 * If the controller returned us something that doesn't 1519 * really fit into an Int 13/function 8 geometry 1520 * result, just fail the ioctl. See PSARC 1998/313. 1521 */ 1522 if (capacity < 0 || capacity >= 63 * 254 * 1024) 1523 return (EINVAL); 1524 1525 virtgeomp->g_capacity = capacity; 1526 virtgeomp->g_nsect = 63; 1527 virtgeomp->g_nhead = 254; 1528 virtgeomp->g_ncyl = capacity / (63 * 254); 1529 virtgeomp->g_acyl = 0; 1530 virtgeomp->g_secsize = 512; 1531 virtgeomp->g_intrlv = 1; 1532 virtgeomp->g_rpm = 3600; 1533 1534 return (0); 1535 } 1536 1537 case TG_GETCAPACITY: 1538 case TG_GETBLOCKSIZE: 1539 { 1540 1541 /* dadk_getphygeom always returns success */ 1542 (void) dadk_getphygeom(DKTP_DATA, &phyg); 1543 if (cmd == TG_GETCAPACITY) 1544 *(diskaddr_t *)arg = phyg.g_cap; 1545 else 1546 *(uint32_t *)arg = (uint32_t)phyg.g_secsiz; 1547 1548 return (0); 1549 } 1550 1551 case TG_GETATTR: { 1552 tg_attribute_t *tgattribute = (tg_attribute_t *)arg; 1553 if ((DKTP_EXT->tg_rdonly)) 1554 tgattribute->media_is_writable = FALSE; 1555 else 1556 tgattribute->media_is_writable = TRUE; 1557 tgattribute->media_is_rotational = TRUE; 1558 1559 return (0); 1560 } 1561 1562 default: 1563 return (ENOTTY); 1564 } 1565 } 1566 1567 1568 1569 1570 1571 /* 1572 * Create and register the devid. 1573 * There are 4 different ways we can get a device id: 1574 * 1. Already have one - nothing to do 1575 * 2. Build one from the drive's model and serial numbers 1576 * 3. Read one from the disk (first sector of last track) 1577 * 4. Fabricate one and write it on the disk. 1578 * If any of these succeeds, register the deviceid 1579 */ 1580 static void 1581 cmdk_devid_setup(struct cmdk *dkp) 1582 { 1583 int rc; 1584 1585 /* Try options until one succeeds, or all have failed */ 1586 1587 /* 1. All done if already registered */ 1588 if (dkp->dk_devid != NULL) 1589 return; 1590 1591 /* 2. Build a devid from the model and serial number */ 1592 rc = cmdk_devid_modser(dkp); 1593 if (rc != DDI_SUCCESS) { 1594 /* 3. Read devid from the disk, if present */ 1595 rc = cmdk_devid_read(dkp); 1596 1597 /* 4. otherwise make one up and write it on the disk */ 1598 if (rc != DDI_SUCCESS) 1599 rc = cmdk_devid_fabricate(dkp); 1600 } 1601 1602 /* If we managed to get a devid any of the above ways, register it */ 1603 if (rc == DDI_SUCCESS) 1604 (void) ddi_devid_register(dkp->dk_dip, dkp->dk_devid); 1605 1606 } 1607 1608 /* 1609 * Build a devid from the model and serial number 1610 * Return DDI_SUCCESS or DDI_FAILURE. 1611 */ 1612 static int 1613 cmdk_devid_modser(struct cmdk *dkp) 1614 { 1615 int rc = DDI_FAILURE; 1616 char *hwid; 1617 int modlen; 1618 int serlen; 1619 1620 /* 1621 * device ID is a concatenation of model number, '=', serial number. 1622 */ 1623 hwid = kmem_alloc(CMDK_HWIDLEN, KM_SLEEP); 1624 modlen = cmdk_get_modser(dkp, DIOCTL_GETMODEL, hwid, CMDK_HWIDLEN); 1625 if (modlen == 0) { 1626 rc = DDI_FAILURE; 1627 goto err; 1628 } 1629 hwid[modlen++] = '='; 1630 serlen = cmdk_get_modser(dkp, DIOCTL_GETSERIAL, 1631 hwid + modlen, CMDK_HWIDLEN - modlen); 1632 if (serlen == 0) { 1633 rc = DDI_FAILURE; 1634 goto err; 1635 } 1636 hwid[modlen + serlen] = 0; 1637 1638 /* Initialize the device ID, trailing NULL not included */ 1639 rc = ddi_devid_init(dkp->dk_dip, DEVID_ATA_SERIAL, modlen + serlen, 1640 hwid, &dkp->dk_devid); 1641 if (rc != DDI_SUCCESS) { 1642 rc = DDI_FAILURE; 1643 goto err; 1644 } 1645 1646 rc = DDI_SUCCESS; 1647 1648 err: 1649 kmem_free(hwid, CMDK_HWIDLEN); 1650 return (rc); 1651 } 1652 1653 static int 1654 cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len) 1655 { 1656 dadk_ioc_string_t strarg; 1657 int rval; 1658 char *s; 1659 char ch; 1660 boolean_t ret; 1661 int i; 1662 int tb; 1663 1664 strarg.is_buf = buf; 1665 strarg.is_size = len; 1666 if (dadk_ioctl(DKTP_DATA, 1667 dkp->dk_dev, 1668 ioccmd, 1669 (uintptr_t)&strarg, 1670 FNATIVE | FKIOCTL, 1671 NULL, 1672 &rval) != 0) 1673 return (0); 1674 1675 /* 1676 * valid model/serial string must contain a non-zero non-space 1677 * trim trailing spaces/NULL 1678 */ 1679 ret = B_FALSE; 1680 s = buf; 1681 for (i = 0; i < strarg.is_size; i++) { 1682 ch = *s++; 1683 if (ch != ' ' && ch != '\0') 1684 tb = i + 1; 1685 if (ch != ' ' && ch != '\0' && ch != '0') 1686 ret = B_TRUE; 1687 } 1688 1689 if (ret == B_FALSE) 1690 return (0); 1691 1692 return (tb); 1693 } 1694 1695 /* 1696 * Read a devid from on the first block of the last track of 1697 * the last cylinder. Make sure what we read is a valid devid. 1698 * Return DDI_SUCCESS or DDI_FAILURE. 1699 */ 1700 static int 1701 cmdk_devid_read(struct cmdk *dkp) 1702 { 1703 diskaddr_t blk; 1704 struct dk_devid *dkdevidp; 1705 uint_t *ip; 1706 int chksum; 1707 int i, sz; 1708 tgdk_iob_handle handle = NULL; 1709 int rc = DDI_FAILURE; 1710 1711 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0)) 1712 goto err; 1713 1714 /* read the devid */ 1715 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP); 1716 if (handle == NULL) 1717 goto err; 1718 1719 dkdevidp = (struct dk_devid *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1720 if (dkdevidp == NULL) 1721 goto err; 1722 1723 /* Validate the revision */ 1724 if ((dkdevidp->dkd_rev_hi != DK_DEVID_REV_MSB) || 1725 (dkdevidp->dkd_rev_lo != DK_DEVID_REV_LSB)) 1726 goto err; 1727 1728 /* Calculate the checksum */ 1729 chksum = 0; 1730 ip = (uint_t *)dkdevidp; 1731 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++) 1732 chksum ^= ip[i]; 1733 if (DKD_GETCHKSUM(dkdevidp) != chksum) 1734 goto err; 1735 1736 /* Validate the device id */ 1737 if (ddi_devid_valid((ddi_devid_t)dkdevidp->dkd_devid) != DDI_SUCCESS) 1738 goto err; 1739 1740 /* keep a copy of the device id */ 1741 sz = ddi_devid_sizeof((ddi_devid_t)dkdevidp->dkd_devid); 1742 dkp->dk_devid = kmem_alloc(sz, KM_SLEEP); 1743 bcopy(dkdevidp->dkd_devid, dkp->dk_devid, sz); 1744 1745 rc = DDI_SUCCESS; 1746 1747 err: 1748 if (handle != NULL) 1749 (void) dadk_iob_free(DKTP_DATA, handle); 1750 return (rc); 1751 } 1752 1753 /* 1754 * Create a devid and write it on the first block of the last track of 1755 * the last cylinder. 1756 * Return DDI_SUCCESS or DDI_FAILURE. 1757 */ 1758 static int 1759 cmdk_devid_fabricate(struct cmdk *dkp) 1760 { 1761 ddi_devid_t devid = NULL; /* devid made by ddi_devid_init */ 1762 struct dk_devid *dkdevidp; /* devid struct stored on disk */ 1763 diskaddr_t blk; 1764 tgdk_iob_handle handle = NULL; 1765 uint_t *ip, chksum; 1766 int i; 1767 int rc = DDI_FAILURE; 1768 1769 if (ddi_devid_init(dkp->dk_dip, DEVID_FAB, 0, NULL, &devid) != 1770 DDI_SUCCESS) 1771 goto err; 1772 1773 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0)) { 1774 /* no device id block address */ 1775 goto err; 1776 } 1777 1778 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP); 1779 if (!handle) 1780 goto err; 1781 1782 /* Locate the buffer */ 1783 dkdevidp = (struct dk_devid *)dadk_iob_htoc(DKTP_DATA, handle); 1784 1785 /* Fill in the revision */ 1786 bzero(dkdevidp, NBPSCTR); 1787 dkdevidp->dkd_rev_hi = DK_DEVID_REV_MSB; 1788 dkdevidp->dkd_rev_lo = DK_DEVID_REV_LSB; 1789 1790 /* Copy in the device id */ 1791 i = ddi_devid_sizeof(devid); 1792 if (i > DK_DEVID_SIZE) 1793 goto err; 1794 bcopy(devid, dkdevidp->dkd_devid, i); 1795 1796 /* Calculate the chksum */ 1797 chksum = 0; 1798 ip = (uint_t *)dkdevidp; 1799 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++) 1800 chksum ^= ip[i]; 1801 1802 /* Fill in the checksum */ 1803 DKD_FORMCHKSUM(chksum, dkdevidp); 1804 1805 /* write the devid */ 1806 (void) dadk_iob_xfer(DKTP_DATA, handle, B_WRITE); 1807 1808 dkp->dk_devid = devid; 1809 1810 rc = DDI_SUCCESS; 1811 1812 err: 1813 if (handle != NULL) 1814 (void) dadk_iob_free(DKTP_DATA, handle); 1815 1816 if (rc != DDI_SUCCESS && devid != NULL) 1817 ddi_devid_free(devid); 1818 1819 return (rc); 1820 } 1821 1822 static void 1823 cmdk_bbh_free_alts(struct cmdk *dkp) 1824 { 1825 if (dkp->dk_alts_hdl) { 1826 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1827 kmem_free(dkp->dk_slc_cnt, 1828 NDKMAP * (sizeof (uint32_t) + sizeof (struct alts_ent *))); 1829 dkp->dk_alts_hdl = NULL; 1830 } 1831 } 1832 1833 static void 1834 cmdk_bbh_reopen(struct cmdk *dkp) 1835 { 1836 tgdk_iob_handle handle = NULL; 1837 diskaddr_t slcb, slcn, slce; 1838 struct alts_parttbl *ap; 1839 struct alts_ent *enttblp; 1840 uint32_t altused; 1841 uint32_t altbase; 1842 uint32_t altlast; 1843 int alts; 1844 uint16_t vtoctag; 1845 int i, j; 1846 1847 /* find slice with V_ALTSCTR tag */ 1848 for (alts = 0; alts < NDKMAP; alts++) { 1849 if (cmlb_partinfo( 1850 dkp->dk_cmlbhandle, 1851 alts, 1852 &slcn, 1853 &slcb, 1854 NULL, 1855 &vtoctag, 1856 0)) { 1857 goto empty; /* no partition table exists */ 1858 } 1859 1860 if (vtoctag == V_ALTSCTR && slcn > 1) 1861 break; 1862 } 1863 if (alts >= NDKMAP) { 1864 goto empty; /* no V_ALTSCTR slice defined */ 1865 } 1866 1867 /* read in ALTS label block */ 1868 handle = dadk_iob_alloc(DKTP_DATA, slcb, NBPSCTR, KM_SLEEP); 1869 if (!handle) { 1870 goto empty; 1871 } 1872 1873 ap = (struct alts_parttbl *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1874 if (!ap || (ap->alts_sanity != ALTS_SANITY)) { 1875 goto empty; 1876 } 1877 1878 altused = ap->alts_ent_used; /* number of BB entries */ 1879 altbase = ap->alts_ent_base; /* blk offset from begin slice */ 1880 altlast = ap->alts_ent_end; /* blk offset to last block */ 1881 /* ((altused * sizeof (struct alts_ent) + NBPSCTR - 1) & ~NBPSCTR) */ 1882 1883 if (altused == 0 || 1884 altbase < 1 || 1885 altbase > altlast || 1886 altlast >= slcn) { 1887 goto empty; 1888 } 1889 (void) dadk_iob_free(DKTP_DATA, handle); 1890 1891 /* read in ALTS remapping table */ 1892 handle = dadk_iob_alloc(DKTP_DATA, 1893 slcb + altbase, 1894 (altlast - altbase + 1) << SCTRSHFT, KM_SLEEP); 1895 if (!handle) { 1896 goto empty; 1897 } 1898 1899 enttblp = (struct alts_ent *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1900 if (!enttblp) { 1901 goto empty; 1902 } 1903 1904 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER); 1905 1906 /* allocate space for dk_slc_cnt and dk_slc_ent tables */ 1907 if (dkp->dk_slc_cnt == NULL) { 1908 dkp->dk_slc_cnt = kmem_alloc(NDKMAP * 1909 (sizeof (long) + sizeof (struct alts_ent *)), KM_SLEEP); 1910 } 1911 dkp->dk_slc_ent = (struct alts_ent **)(dkp->dk_slc_cnt + NDKMAP); 1912 1913 /* free previous BB table (if any) */ 1914 if (dkp->dk_alts_hdl) { 1915 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1916 dkp->dk_alts_hdl = NULL; 1917 dkp->dk_altused = 0; 1918 } 1919 1920 /* save linkage to new BB table */ 1921 dkp->dk_alts_hdl = handle; 1922 dkp->dk_altused = altused; 1923 1924 /* 1925 * build indexes to BB table by slice 1926 * effectively we have 1927 * struct alts_ent *enttblp[altused]; 1928 * 1929 * uint32_t dk_slc_cnt[NDKMAP]; 1930 * struct alts_ent *dk_slc_ent[NDKMAP]; 1931 */ 1932 for (i = 0; i < NDKMAP; i++) { 1933 if (cmlb_partinfo( 1934 dkp->dk_cmlbhandle, 1935 i, 1936 &slcn, 1937 &slcb, 1938 NULL, 1939 NULL, 1940 0)) { 1941 goto empty1; 1942 } 1943 1944 dkp->dk_slc_cnt[i] = 0; 1945 if (slcn == 0) 1946 continue; /* slice is not allocated */ 1947 1948 /* last block in slice */ 1949 slce = slcb + slcn - 1; 1950 1951 /* find first remap entry in after beginnning of slice */ 1952 for (j = 0; j < altused; j++) { 1953 if (enttblp[j].bad_start + enttblp[j].bad_end >= slcb) 1954 break; 1955 } 1956 dkp->dk_slc_ent[i] = enttblp + j; 1957 1958 /* count remap entrys until end of slice */ 1959 for (; j < altused && enttblp[j].bad_start <= slce; j++) { 1960 dkp->dk_slc_cnt[i] += 1; 1961 } 1962 } 1963 1964 rw_exit(&dkp->dk_bbh_mutex); 1965 return; 1966 1967 empty: 1968 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER); 1969 empty1: 1970 if (handle && handle != dkp->dk_alts_hdl) 1971 (void) dadk_iob_free(DKTP_DATA, handle); 1972 1973 if (dkp->dk_alts_hdl) { 1974 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1975 dkp->dk_alts_hdl = NULL; 1976 } 1977 1978 rw_exit(&dkp->dk_bbh_mutex); 1979 } 1980 1981 /*ARGSUSED*/ 1982 static bbh_cookie_t 1983 cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle) 1984 { 1985 struct bbh_handle *hp; 1986 bbh_cookie_t ckp; 1987 1988 hp = (struct bbh_handle *)handle; 1989 ckp = hp->h_cktab + hp->h_idx; 1990 hp->h_idx++; 1991 return (ckp); 1992 } 1993 1994 /*ARGSUSED*/ 1995 static void 1996 cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle) 1997 { 1998 struct bbh_handle *hp; 1999 2000 hp = (struct bbh_handle *)handle; 2001 kmem_free(handle, (sizeof (struct bbh_handle) + 2002 (hp->h_totck * (sizeof (struct bbh_cookie))))); 2003 } 2004 2005 2006 /* 2007 * cmdk_bbh_gethandle remaps the bad sectors to alternates. 2008 * There are 7 different cases when the comparison is made 2009 * between the bad sector cluster and the disk section. 2010 * 2011 * bad sector cluster gggggggggggbbbbbbbggggggggggg 2012 * case 1: ddddd 2013 * case 2: -d----- 2014 * case 3: ddddd 2015 * case 4: dddddddddddd 2016 * case 5: ddddddd----- 2017 * case 6: ---ddddddd 2018 * case 7: ddddddd 2019 * 2020 * where: g = good sector, b = bad sector 2021 * d = sector in disk section 2022 * - = disk section may be extended to cover those disk area 2023 */ 2024 2025 static opaque_t 2026 cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp) 2027 { 2028 struct cmdk *dkp = (struct cmdk *)bbh_data; 2029 struct bbh_handle *hp; 2030 struct bbh_cookie *ckp; 2031 struct alts_ent *altp; 2032 uint32_t alts_used; 2033 uint32_t part = CMDKPART(bp->b_edev); 2034 daddr32_t lastsec; 2035 long d_count; 2036 int i; 2037 int idx; 2038 int cnt; 2039 2040 if (part >= V_NUMPAR) 2041 return (NULL); 2042 2043 /* 2044 * This if statement is atomic and it will succeed 2045 * if there are no bad blocks (almost always) 2046 * 2047 * so this if is performed outside of the rw_enter for speed 2048 * and then repeated inside the rw_enter for safety 2049 */ 2050 if (!dkp->dk_alts_hdl) { 2051 return (NULL); 2052 } 2053 2054 rw_enter(&dkp->dk_bbh_mutex, RW_READER); 2055 2056 if (dkp->dk_alts_hdl == NULL) { 2057 rw_exit(&dkp->dk_bbh_mutex); 2058 return (NULL); 2059 } 2060 2061 alts_used = dkp->dk_slc_cnt[part]; 2062 if (alts_used == 0) { 2063 rw_exit(&dkp->dk_bbh_mutex); 2064 return (NULL); 2065 } 2066 altp = dkp->dk_slc_ent[part]; 2067 2068 /* 2069 * binary search for the largest bad sector index in the alternate 2070 * entry table which overlaps or larger than the starting d_sec 2071 */ 2072 i = cmdk_bbh_bsearch(altp, alts_used, GET_BP_SEC(bp)); 2073 /* if starting sector is > the largest bad sector, return */ 2074 if (i == -1) { 2075 rw_exit(&dkp->dk_bbh_mutex); 2076 return (NULL); 2077 } 2078 /* i is the starting index. Set altp to the starting entry addr */ 2079 altp += i; 2080 2081 d_count = bp->b_bcount >> SCTRSHFT; 2082 lastsec = GET_BP_SEC(bp) + d_count - 1; 2083 2084 /* calculate the number of bad sectors */ 2085 for (idx = i, cnt = 0; idx < alts_used; idx++, altp++, cnt++) { 2086 if (lastsec < altp->bad_start) 2087 break; 2088 } 2089 2090 if (!cnt) { 2091 rw_exit(&dkp->dk_bbh_mutex); 2092 return (NULL); 2093 } 2094 2095 /* calculate the maximum number of reserved cookies */ 2096 cnt <<= 1; 2097 cnt++; 2098 2099 /* allocate the handle */ 2100 hp = (struct bbh_handle *)kmem_zalloc((sizeof (*hp) + 2101 (cnt * sizeof (*ckp))), KM_SLEEP); 2102 2103 hp->h_idx = 0; 2104 hp->h_totck = cnt; 2105 ckp = hp->h_cktab = (struct bbh_cookie *)(hp + 1); 2106 ckp[0].ck_sector = GET_BP_SEC(bp); 2107 ckp[0].ck_seclen = d_count; 2108 2109 altp = dkp->dk_slc_ent[part]; 2110 altp += i; 2111 for (idx = 0; i < alts_used; i++, altp++) { 2112 /* CASE 1: */ 2113 if (lastsec < altp->bad_start) 2114 break; 2115 2116 /* CASE 3: */ 2117 if (ckp[idx].ck_sector > altp->bad_end) 2118 continue; 2119 2120 /* CASE 2 and 7: */ 2121 if ((ckp[idx].ck_sector >= altp->bad_start) && 2122 (lastsec <= altp->bad_end)) { 2123 ckp[idx].ck_sector = altp->good_start + 2124 ckp[idx].ck_sector - altp->bad_start; 2125 break; 2126 } 2127 2128 /* at least one bad sector in our section. break it. */ 2129 /* CASE 5: */ 2130 if ((lastsec >= altp->bad_start) && 2131 (lastsec <= altp->bad_end)) { 2132 ckp[idx+1].ck_seclen = lastsec - altp->bad_start + 1; 2133 ckp[idx].ck_seclen -= ckp[idx+1].ck_seclen; 2134 ckp[idx+1].ck_sector = altp->good_start; 2135 break; 2136 } 2137 /* CASE 6: */ 2138 if ((ckp[idx].ck_sector <= altp->bad_end) && 2139 (ckp[idx].ck_sector >= altp->bad_start)) { 2140 ckp[idx+1].ck_seclen = ckp[idx].ck_seclen; 2141 ckp[idx].ck_seclen = altp->bad_end - 2142 ckp[idx].ck_sector + 1; 2143 ckp[idx+1].ck_seclen -= ckp[idx].ck_seclen; 2144 ckp[idx].ck_sector = altp->good_start + 2145 ckp[idx].ck_sector - altp->bad_start; 2146 idx++; 2147 ckp[idx].ck_sector = altp->bad_end + 1; 2148 continue; /* check rest of section */ 2149 } 2150 2151 /* CASE 4: */ 2152 ckp[idx].ck_seclen = altp->bad_start - ckp[idx].ck_sector; 2153 ckp[idx+1].ck_sector = altp->good_start; 2154 ckp[idx+1].ck_seclen = altp->bad_end - altp->bad_start + 1; 2155 idx += 2; 2156 ckp[idx].ck_sector = altp->bad_end + 1; 2157 ckp[idx].ck_seclen = lastsec - altp->bad_end; 2158 } 2159 2160 rw_exit(&dkp->dk_bbh_mutex); 2161 return ((opaque_t)hp); 2162 } 2163 2164 static int 2165 cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key) 2166 { 2167 int i; 2168 int ind; 2169 int interval; 2170 int mystatus = -1; 2171 2172 if (!cnt) 2173 return (mystatus); 2174 2175 ind = 1; /* compiler complains about possible uninitialized var */ 2176 for (i = 1; i <= cnt; i <<= 1) 2177 ind = i; 2178 2179 for (interval = ind; interval; ) { 2180 if ((key >= buf[ind-1].bad_start) && 2181 (key <= buf[ind-1].bad_end)) { 2182 return (ind-1); 2183 } else { 2184 interval >>= 1; 2185 if (key < buf[ind-1].bad_start) { 2186 /* record the largest bad sector index */ 2187 mystatus = ind-1; 2188 if (!interval) 2189 break; 2190 ind = ind - interval; 2191 } else { 2192 /* 2193 * if key is larger than the last element 2194 * then break 2195 */ 2196 if ((ind == cnt) || !interval) 2197 break; 2198 if ((ind+interval) <= cnt) 2199 ind += interval; 2200 } 2201 } 2202 } 2203 return (mystatus); 2204 } 2205