1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/scsi/scsi.h> 28 #include <sys/dktp/cm.h> 29 #include <sys/dktp/quetypes.h> 30 #include <sys/dktp/queue.h> 31 #include <sys/dktp/fctypes.h> 32 #include <sys/dktp/flowctrl.h> 33 #include <sys/dktp/cmdev.h> 34 #include <sys/dkio.h> 35 #include <sys/dktp/tgdk.h> 36 #include <sys/dktp/dadk.h> 37 #include <sys/dktp/bbh.h> 38 #include <sys/dktp/altsctr.h> 39 #include <sys/dktp/cmdk.h> 40 41 #include <sys/stat.h> 42 #include <sys/vtoc.h> 43 #include <sys/file.h> 44 #include <sys/dktp/dadkio.h> 45 #include <sys/aio_req.h> 46 47 #include <sys/cmlb.h> 48 49 /* 50 * Local Static Data 51 */ 52 #ifdef CMDK_DEBUG 53 #define DENT 0x0001 54 #define DIO 0x0002 55 56 static int cmdk_debug = DIO; 57 #endif 58 59 #ifndef TRUE 60 #define TRUE 1 61 #endif 62 63 #ifndef FALSE 64 #define FALSE 0 65 #endif 66 67 /* 68 * NDKMAP is the base number for accessing the fdisk partitions. 69 * c?d?p0 --> cmdk@?,?:q 70 */ 71 #define PARTITION0_INDEX (NDKMAP + 0) 72 73 #define DKTP_DATA (dkp->dk_tgobjp)->tg_data 74 #define DKTP_EXT (dkp->dk_tgobjp)->tg_ext 75 76 static void *cmdk_state; 77 78 /* 79 * the cmdk_attach_mutex protects cmdk_max_instance in multi-threaded 80 * attach situations 81 */ 82 static kmutex_t cmdk_attach_mutex; 83 static int cmdk_max_instance = 0; 84 85 /* 86 * Panic dumpsys state 87 * There is only a single flag that is not mutex locked since 88 * the system is prevented from thread switching and cmdk_dump 89 * will only be called in a single threaded operation. 90 */ 91 static int cmdk_indump; 92 93 /* 94 * Local Function Prototypes 95 */ 96 static int cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp); 97 static void cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp); 98 static void cmdkmin(struct buf *bp); 99 static int cmdkrw(dev_t dev, struct uio *uio, int flag); 100 static int cmdkarw(dev_t dev, struct aio_req *aio, int flag); 101 102 /* 103 * Bad Block Handling Functions Prototypes 104 */ 105 static void cmdk_bbh_reopen(struct cmdk *dkp); 106 static opaque_t cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp); 107 static bbh_cookie_t cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle); 108 static void cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle); 109 static void cmdk_bbh_close(struct cmdk *dkp); 110 static void cmdk_bbh_setalts_idx(struct cmdk *dkp); 111 static int cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key); 112 113 static struct bbh_objops cmdk_bbh_ops = { 114 nulldev, 115 nulldev, 116 cmdk_bbh_gethandle, 117 cmdk_bbh_htoc, 118 cmdk_bbh_freehandle, 119 0, 0 120 }; 121 122 static int cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp); 123 static int cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp); 124 static int cmdkstrategy(struct buf *bp); 125 static int cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 126 static int cmdkioctl(dev_t, int, intptr_t, int, cred_t *, int *); 127 static int cmdkread(dev_t dev, struct uio *uio, cred_t *credp); 128 static int cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp); 129 static int cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 130 int mod_flags, char *name, caddr_t valuep, int *lengthp); 131 static int cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp); 132 static int cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp); 133 134 /* 135 * Device driver ops vector 136 */ 137 138 static struct cb_ops cmdk_cb_ops = { 139 cmdkopen, /* open */ 140 cmdkclose, /* close */ 141 cmdkstrategy, /* strategy */ 142 nodev, /* print */ 143 cmdkdump, /* dump */ 144 cmdkread, /* read */ 145 cmdkwrite, /* write */ 146 cmdkioctl, /* ioctl */ 147 nodev, /* devmap */ 148 nodev, /* mmap */ 149 nodev, /* segmap */ 150 nochpoll, /* poll */ 151 cmdk_prop_op, /* cb_prop_op */ 152 0, /* streamtab */ 153 D_64BIT | D_MP | D_NEW, /* Driver comaptibility flag */ 154 CB_REV, /* cb_rev */ 155 cmdkaread, /* async read */ 156 cmdkawrite /* async write */ 157 }; 158 159 static int cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 160 void **result); 161 static int cmdkprobe(dev_info_t *dip); 162 static int cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd); 163 static int cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd); 164 165 static void cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp); 166 static int cmdkresume(dev_info_t *dip); 167 static int cmdksuspend(dev_info_t *dip); 168 static int cmdkpower(dev_info_t *dip, int component, int level); 169 170 struct dev_ops cmdk_ops = { 171 DEVO_REV, /* devo_rev, */ 172 0, /* refcnt */ 173 cmdkinfo, /* info */ 174 nulldev, /* identify */ 175 cmdkprobe, /* probe */ 176 cmdkattach, /* attach */ 177 cmdkdetach, /* detach */ 178 nodev, /* reset */ 179 &cmdk_cb_ops, /* driver operations */ 180 (struct bus_ops *)0, /* bus operations */ 181 cmdkpower, /* power */ 182 ddi_quiesce_not_needed, /* quiesce */ 183 }; 184 185 /* 186 * This is the loadable module wrapper. 187 */ 188 #include <sys/modctl.h> 189 190 extern struct mod_ops mod_driverops; 191 192 static struct modldrv modldrv = { 193 &mod_driverops, /* Type of module. This one is a driver */ 194 "Common Direct Access Disk", 195 &cmdk_ops, /* driver ops */ 196 }; 197 198 static struct modlinkage modlinkage = { 199 MODREV_1, (void *)&modldrv, NULL 200 }; 201 202 /* Function prototypes for cmlb callbacks */ 203 204 static int cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr, 205 diskaddr_t start, size_t length, void *tg_cookie); 206 207 static int cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg, 208 void *tg_cookie); 209 210 static void cmdk_devid_setup(struct cmdk *dkp); 211 static int cmdk_devid_modser(struct cmdk *dkp); 212 static int cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len); 213 static int cmdk_devid_fabricate(struct cmdk *dkp); 214 static int cmdk_devid_read(struct cmdk *dkp); 215 216 static cmlb_tg_ops_t cmdk_lb_ops = { 217 TG_DK_OPS_VERSION_1, 218 cmdk_lb_rdwr, 219 cmdk_lb_getinfo 220 }; 221 222 static boolean_t 223 cmdk_isopen(struct cmdk *dkp, dev_t dev) 224 { 225 int part, otyp; 226 ulong_t partbit; 227 228 ASSERT(MUTEX_HELD((&dkp->dk_mutex))); 229 230 part = CMDKPART(dev); 231 partbit = 1 << part; 232 233 /* account for close */ 234 if (dkp->dk_open_lyr[part] != 0) 235 return (B_TRUE); 236 for (otyp = 0; otyp < OTYPCNT; otyp++) 237 if (dkp->dk_open_reg[otyp] & partbit) 238 return (B_TRUE); 239 return (B_FALSE); 240 } 241 242 int 243 _init(void) 244 { 245 int rval; 246 247 if (rval = ddi_soft_state_init(&cmdk_state, sizeof (struct cmdk), 7)) 248 return (rval); 249 250 mutex_init(&cmdk_attach_mutex, NULL, MUTEX_DRIVER, NULL); 251 if ((rval = mod_install(&modlinkage)) != 0) { 252 mutex_destroy(&cmdk_attach_mutex); 253 ddi_soft_state_fini(&cmdk_state); 254 } 255 return (rval); 256 } 257 258 int 259 _fini(void) 260 { 261 return (EBUSY); 262 263 /* 264 * This has been commented out until cmdk is a true 265 * unloadable module. Right now x86's are panicking on 266 * a diskless reconfig boot. 267 */ 268 269 #if 0 /* bugid 1186679 */ 270 int rval; 271 272 rval = mod_remove(&modlinkage); 273 if (rval != 0) 274 return (rval); 275 276 mutex_destroy(&cmdk_attach_mutex); 277 ddi_soft_state_fini(&cmdk_state); 278 279 return (0); 280 #endif 281 } 282 283 int 284 _info(struct modinfo *modinfop) 285 { 286 return (mod_info(&modlinkage, modinfop)); 287 } 288 289 /* 290 * Autoconfiguration Routines 291 */ 292 static int 293 cmdkprobe(dev_info_t *dip) 294 { 295 int instance; 296 int status; 297 struct cmdk *dkp; 298 299 instance = ddi_get_instance(dip); 300 301 if (ddi_get_soft_state(cmdk_state, instance)) 302 return (DDI_PROBE_PARTIAL); 303 304 if ((ddi_soft_state_zalloc(cmdk_state, instance) != DDI_SUCCESS) || 305 ((dkp = ddi_get_soft_state(cmdk_state, instance)) == NULL)) 306 return (DDI_PROBE_PARTIAL); 307 308 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL); 309 rw_init(&dkp->dk_bbh_mutex, NULL, RW_DRIVER, NULL); 310 dkp->dk_dip = dip; 311 mutex_enter(&dkp->dk_mutex); 312 313 dkp->dk_dev = makedevice(ddi_driver_major(dip), 314 ddi_get_instance(dip) << CMDK_UNITSHF); 315 316 /* linkage to dadk and strategy */ 317 if (cmdk_create_obj(dip, dkp) != DDI_SUCCESS) { 318 mutex_exit(&dkp->dk_mutex); 319 mutex_destroy(&dkp->dk_mutex); 320 rw_destroy(&dkp->dk_bbh_mutex); 321 ddi_soft_state_free(cmdk_state, instance); 322 return (DDI_PROBE_PARTIAL); 323 } 324 325 status = dadk_probe(DKTP_DATA, KM_NOSLEEP); 326 if (status != DDI_PROBE_SUCCESS) { 327 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */ 328 mutex_exit(&dkp->dk_mutex); 329 mutex_destroy(&dkp->dk_mutex); 330 rw_destroy(&dkp->dk_bbh_mutex); 331 ddi_soft_state_free(cmdk_state, instance); 332 return (status); 333 } 334 335 mutex_exit(&dkp->dk_mutex); 336 #ifdef CMDK_DEBUG 337 if (cmdk_debug & DENT) 338 PRF("cmdkprobe: instance= %d name= `%s`\n", 339 instance, ddi_get_name_addr(dip)); 340 #endif 341 return (status); 342 } 343 344 static int 345 cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd) 346 { 347 int instance; 348 struct cmdk *dkp; 349 char *node_type; 350 351 switch (cmd) { 352 case DDI_ATTACH: 353 break; 354 case DDI_RESUME: 355 return (cmdkresume(dip)); 356 default: 357 return (DDI_FAILURE); 358 } 359 360 instance = ddi_get_instance(dip); 361 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 362 return (DDI_FAILURE); 363 364 dkp->dk_pm_level = CMDK_SPINDLE_UNINIT; 365 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL); 366 367 mutex_enter(&dkp->dk_mutex); 368 369 /* dadk_attach is an empty function that only returns SUCCESS */ 370 (void) dadk_attach(DKTP_DATA); 371 372 node_type = (DKTP_EXT->tg_nodetype); 373 374 /* 375 * this open allows cmlb to read the device 376 * and determine the label types 377 * so that cmlb can create minor nodes for device 378 */ 379 380 /* open the target disk */ 381 if (dadk_open(DKTP_DATA, 0) != DDI_SUCCESS) 382 goto fail2; 383 384 #ifdef _ILP32 385 { 386 struct tgdk_geom phyg; 387 (void) dadk_getphygeom(DKTP_DATA, &phyg); 388 if ((phyg.g_cap - 1) > DK_MAX_BLOCKS) { 389 (void) dadk_close(DKTP_DATA); 390 goto fail2; 391 } 392 } 393 #endif 394 395 396 /* mark as having opened target */ 397 dkp->dk_flag |= CMDK_TGDK_OPEN; 398 399 cmlb_alloc_handle((cmlb_handle_t *)&dkp->dk_cmlbhandle); 400 401 if (cmlb_attach(dip, 402 &cmdk_lb_ops, 403 DTYPE_DIRECT, /* device_type */ 404 0, /* removable */ 405 0, /* hot pluggable XXX */ 406 node_type, 407 CMLB_CREATE_ALTSLICE_VTOC_16_DTYPE_DIRECT, /* alter_behaviour */ 408 dkp->dk_cmlbhandle, 409 0) != 0) 410 goto fail1; 411 412 /* Calling validate will create minor nodes according to disk label */ 413 (void) cmlb_validate(dkp->dk_cmlbhandle, 0, 0); 414 415 /* set bbh (Bad Block Handling) */ 416 cmdk_bbh_reopen(dkp); 417 418 /* setup devid string */ 419 cmdk_devid_setup(dkp); 420 421 mutex_enter(&cmdk_attach_mutex); 422 if (instance > cmdk_max_instance) 423 cmdk_max_instance = instance; 424 mutex_exit(&cmdk_attach_mutex); 425 426 mutex_exit(&dkp->dk_mutex); 427 428 /* 429 * Add a zero-length attribute to tell the world we support 430 * kernel ioctls (for layered drivers) 431 */ 432 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 433 DDI_KERNEL_IOCTL, NULL, 0); 434 ddi_report_dev(dip); 435 436 /* 437 * Initialize power management 438 */ 439 mutex_init(&dkp->dk_pm_mutex, NULL, MUTEX_DRIVER, NULL); 440 cv_init(&dkp->dk_suspend_cv, NULL, CV_DRIVER, NULL); 441 cmdk_setup_pm(dip, dkp); 442 443 return (DDI_SUCCESS); 444 445 fail1: 446 cmlb_free_handle(&dkp->dk_cmlbhandle); 447 (void) dadk_close(DKTP_DATA); 448 fail2: 449 cmdk_destroy_obj(dip, dkp); 450 rw_destroy(&dkp->dk_bbh_mutex); 451 mutex_exit(&dkp->dk_mutex); 452 mutex_destroy(&dkp->dk_mutex); 453 ddi_soft_state_free(cmdk_state, instance); 454 return (DDI_FAILURE); 455 } 456 457 458 static int 459 cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd) 460 { 461 struct cmdk *dkp; 462 int instance; 463 int max_instance; 464 465 switch (cmd) { 466 case DDI_DETACH: 467 /* return (DDI_FAILURE); */ 468 break; 469 case DDI_SUSPEND: 470 return (cmdksuspend(dip)); 471 default: 472 #ifdef CMDK_DEBUG 473 if (cmdk_debug & DIO) { 474 PRF("cmdkdetach: cmd = %d unknown\n", cmd); 475 } 476 #endif 477 return (DDI_FAILURE); 478 } 479 480 mutex_enter(&cmdk_attach_mutex); 481 max_instance = cmdk_max_instance; 482 mutex_exit(&cmdk_attach_mutex); 483 484 /* check if any instance of driver is open */ 485 for (instance = 0; instance < max_instance; instance++) { 486 dkp = ddi_get_soft_state(cmdk_state, instance); 487 if (!dkp) 488 continue; 489 if (dkp->dk_flag & CMDK_OPEN) 490 return (DDI_FAILURE); 491 } 492 493 instance = ddi_get_instance(dip); 494 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 495 return (DDI_SUCCESS); 496 497 mutex_enter(&dkp->dk_mutex); 498 499 /* 500 * The cmdk_part_info call at the end of cmdkattach may have 501 * caused cmdk_reopen to do a TGDK_OPEN, make sure we close on 502 * detach for case when cmdkopen/cmdkclose never occurs. 503 */ 504 if (dkp->dk_flag & CMDK_TGDK_OPEN) { 505 dkp->dk_flag &= ~CMDK_TGDK_OPEN; 506 (void) dadk_close(DKTP_DATA); 507 } 508 509 cmlb_detach(dkp->dk_cmlbhandle, 0); 510 cmlb_free_handle(&dkp->dk_cmlbhandle); 511 ddi_prop_remove_all(dip); 512 513 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */ 514 mutex_exit(&dkp->dk_mutex); 515 mutex_destroy(&dkp->dk_mutex); 516 rw_destroy(&dkp->dk_bbh_mutex); 517 mutex_destroy(&dkp->dk_pm_mutex); 518 cv_destroy(&dkp->dk_suspend_cv); 519 ddi_soft_state_free(cmdk_state, instance); 520 521 return (DDI_SUCCESS); 522 } 523 524 static int 525 cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 526 { 527 dev_t dev = (dev_t)arg; 528 int instance; 529 struct cmdk *dkp; 530 531 #ifdef lint 532 dip = dip; /* no one ever uses this */ 533 #endif 534 #ifdef CMDK_DEBUG 535 if (cmdk_debug & DENT) 536 PRF("cmdkinfo: call\n"); 537 #endif 538 instance = CMDKUNIT(dev); 539 540 switch (infocmd) { 541 case DDI_INFO_DEVT2DEVINFO: 542 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 543 return (DDI_FAILURE); 544 *result = (void *) dkp->dk_dip; 545 break; 546 case DDI_INFO_DEVT2INSTANCE: 547 *result = (void *)(intptr_t)instance; 548 break; 549 default: 550 return (DDI_FAILURE); 551 } 552 return (DDI_SUCCESS); 553 } 554 555 /* 556 * Initialize the power management components 557 */ 558 static void 559 cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp) 560 { 561 char *pm_comp[] = { "NAME=cmdk", "0=off", "1=on", NULL }; 562 563 /* 564 * Since the cmdk device does not the 'reg' property, 565 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 566 * The following code is to tell cpr that this device 567 * DOES need to be suspended and resumed. 568 */ 569 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, 570 "pm-hardware-state", "needs-suspend-resume"); 571 572 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 573 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 574 if (pm_raise_power(dip, 0, CMDK_SPINDLE_ON) == DDI_SUCCESS) { 575 mutex_enter(&dkp->dk_pm_mutex); 576 dkp->dk_pm_level = CMDK_SPINDLE_ON; 577 dkp->dk_pm_is_enabled = 1; 578 mutex_exit(&dkp->dk_pm_mutex); 579 } else { 580 mutex_enter(&dkp->dk_pm_mutex); 581 dkp->dk_pm_level = CMDK_SPINDLE_OFF; 582 dkp->dk_pm_is_enabled = 0; 583 mutex_exit(&dkp->dk_pm_mutex); 584 } 585 } else { 586 mutex_enter(&dkp->dk_pm_mutex); 587 dkp->dk_pm_level = CMDK_SPINDLE_UNINIT; 588 dkp->dk_pm_is_enabled = 0; 589 mutex_exit(&dkp->dk_pm_mutex); 590 } 591 } 592 593 /* 594 * suspend routine, it will be run when get the command 595 * DDI_SUSPEND at detach(9E) from system power management 596 */ 597 static int 598 cmdksuspend(dev_info_t *dip) 599 { 600 struct cmdk *dkp; 601 int instance; 602 clock_t count = 0; 603 604 instance = ddi_get_instance(dip); 605 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 606 return (DDI_FAILURE); 607 mutex_enter(&dkp->dk_mutex); 608 if (dkp->dk_flag & CMDK_SUSPEND) { 609 mutex_exit(&dkp->dk_mutex); 610 return (DDI_SUCCESS); 611 } 612 dkp->dk_flag |= CMDK_SUSPEND; 613 614 /* need to wait a while */ 615 while (dadk_getcmds(DKTP_DATA) != 0) { 616 delay(drv_usectohz(1000000)); 617 if (count > 60) { 618 dkp->dk_flag &= ~CMDK_SUSPEND; 619 cv_broadcast(&dkp->dk_suspend_cv); 620 mutex_exit(&dkp->dk_mutex); 621 return (DDI_FAILURE); 622 } 623 count++; 624 } 625 mutex_exit(&dkp->dk_mutex); 626 return (DDI_SUCCESS); 627 } 628 629 /* 630 * resume routine, it will be run when get the command 631 * DDI_RESUME at attach(9E) from system power management 632 */ 633 static int 634 cmdkresume(dev_info_t *dip) 635 { 636 struct cmdk *dkp; 637 int instance; 638 639 instance = ddi_get_instance(dip); 640 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 641 return (DDI_FAILURE); 642 mutex_enter(&dkp->dk_mutex); 643 if (!(dkp->dk_flag & CMDK_SUSPEND)) { 644 mutex_exit(&dkp->dk_mutex); 645 return (DDI_FAILURE); 646 } 647 dkp->dk_pm_level = CMDK_SPINDLE_ON; 648 dkp->dk_flag &= ~CMDK_SUSPEND; 649 cv_broadcast(&dkp->dk_suspend_cv); 650 mutex_exit(&dkp->dk_mutex); 651 return (DDI_SUCCESS); 652 653 } 654 655 /* 656 * power management entry point, it was used to 657 * change power management component. 658 * Actually, the real hard drive suspend/resume 659 * was handled in ata, so this function is not 660 * doing any real work other than verifying that 661 * the disk is idle. 662 */ 663 static int 664 cmdkpower(dev_info_t *dip, int component, int level) 665 { 666 struct cmdk *dkp; 667 int instance; 668 669 instance = ddi_get_instance(dip); 670 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || 671 component != 0 || level > CMDK_SPINDLE_ON || 672 level < CMDK_SPINDLE_OFF) { 673 return (DDI_FAILURE); 674 } 675 676 mutex_enter(&dkp->dk_pm_mutex); 677 if (dkp->dk_pm_is_enabled && dkp->dk_pm_level == level) { 678 mutex_exit(&dkp->dk_pm_mutex); 679 return (DDI_SUCCESS); 680 } 681 mutex_exit(&dkp->dk_pm_mutex); 682 683 if ((level == CMDK_SPINDLE_OFF) && 684 (dadk_getcmds(DKTP_DATA) != 0)) { 685 return (DDI_FAILURE); 686 } 687 688 mutex_enter(&dkp->dk_pm_mutex); 689 dkp->dk_pm_level = level; 690 mutex_exit(&dkp->dk_pm_mutex); 691 return (DDI_SUCCESS); 692 } 693 694 static int 695 cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 696 char *name, caddr_t valuep, int *lengthp) 697 { 698 struct cmdk *dkp; 699 700 #ifdef CMDK_DEBUG 701 if (cmdk_debug & DENT) 702 PRF("cmdk_prop_op: call\n"); 703 #endif 704 705 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 706 if (dkp == NULL) 707 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 708 name, valuep, lengthp)); 709 710 return (cmlb_prop_op(dkp->dk_cmlbhandle, 711 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 712 CMDKPART(dev), NULL)); 713 } 714 715 /* 716 * dump routine 717 */ 718 static int 719 cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 720 { 721 int instance; 722 struct cmdk *dkp; 723 diskaddr_t p_lblksrt; 724 diskaddr_t p_lblkcnt; 725 struct buf local; 726 struct buf *bp; 727 728 #ifdef CMDK_DEBUG 729 if (cmdk_debug & DENT) 730 PRF("cmdkdump: call\n"); 731 #endif 732 instance = CMDKUNIT(dev); 733 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || (blkno < 0)) 734 return (ENXIO); 735 736 if (cmlb_partinfo( 737 dkp->dk_cmlbhandle, 738 CMDKPART(dev), 739 &p_lblkcnt, 740 &p_lblksrt, 741 NULL, 742 NULL, 743 0)) { 744 return (ENXIO); 745 } 746 747 if ((blkno+nblk) > p_lblkcnt) 748 return (EINVAL); 749 750 cmdk_indump = 1; /* Tell disk targets we are panic dumpping */ 751 752 bp = &local; 753 bzero(bp, sizeof (*bp)); 754 bp->b_flags = B_BUSY; 755 bp->b_un.b_addr = addr; 756 bp->b_bcount = nblk << SCTRSHFT; 757 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + blkno))); 758 759 (void) dadk_dump(DKTP_DATA, bp); 760 return (bp->b_error); 761 } 762 763 /* 764 * Copy in the dadkio_rwcmd according to the user's data model. If needed, 765 * convert it for our internal use. 766 */ 767 static int 768 rwcmd_copyin(struct dadkio_rwcmd *rwcmdp, caddr_t inaddr, int flag) 769 { 770 switch (ddi_model_convert_from(flag)) { 771 case DDI_MODEL_ILP32: { 772 struct dadkio_rwcmd32 cmd32; 773 774 if (ddi_copyin(inaddr, &cmd32, 775 sizeof (struct dadkio_rwcmd32), flag)) { 776 return (EFAULT); 777 } 778 779 rwcmdp->cmd = cmd32.cmd; 780 rwcmdp->flags = cmd32.flags; 781 rwcmdp->blkaddr = (blkaddr_t)cmd32.blkaddr; 782 rwcmdp->buflen = cmd32.buflen; 783 rwcmdp->bufaddr = (caddr_t)(intptr_t)cmd32.bufaddr; 784 /* 785 * Note: we do not convert the 'status' field, 786 * as it should not contain valid data at this 787 * point. 788 */ 789 bzero(&rwcmdp->status, sizeof (rwcmdp->status)); 790 break; 791 } 792 case DDI_MODEL_NONE: { 793 if (ddi_copyin(inaddr, rwcmdp, 794 sizeof (struct dadkio_rwcmd), flag)) { 795 return (EFAULT); 796 } 797 } 798 } 799 return (0); 800 } 801 802 /* 803 * If necessary, convert the internal rwcmdp and status to the appropriate 804 * data model and copy it out to the user. 805 */ 806 static int 807 rwcmd_copyout(struct dadkio_rwcmd *rwcmdp, caddr_t outaddr, int flag) 808 { 809 switch (ddi_model_convert_from(flag)) { 810 case DDI_MODEL_ILP32: { 811 struct dadkio_rwcmd32 cmd32; 812 813 cmd32.cmd = rwcmdp->cmd; 814 cmd32.flags = rwcmdp->flags; 815 cmd32.blkaddr = rwcmdp->blkaddr; 816 cmd32.buflen = rwcmdp->buflen; 817 ASSERT64(((uintptr_t)rwcmdp->bufaddr >> 32) == 0); 818 cmd32.bufaddr = (caddr32_t)(uintptr_t)rwcmdp->bufaddr; 819 820 cmd32.status.status = rwcmdp->status.status; 821 cmd32.status.resid = rwcmdp->status.resid; 822 cmd32.status.failed_blk_is_valid = 823 rwcmdp->status.failed_blk_is_valid; 824 cmd32.status.failed_blk = rwcmdp->status.failed_blk; 825 cmd32.status.fru_code_is_valid = 826 rwcmdp->status.fru_code_is_valid; 827 cmd32.status.fru_code = rwcmdp->status.fru_code; 828 829 bcopy(rwcmdp->status.add_error_info, 830 cmd32.status.add_error_info, DADKIO_ERROR_INFO_LEN); 831 832 if (ddi_copyout(&cmd32, outaddr, 833 sizeof (struct dadkio_rwcmd32), flag)) 834 return (EFAULT); 835 break; 836 } 837 case DDI_MODEL_NONE: { 838 if (ddi_copyout(rwcmdp, outaddr, 839 sizeof (struct dadkio_rwcmd), flag)) 840 return (EFAULT); 841 } 842 } 843 return (0); 844 } 845 846 /* 847 * ioctl routine 848 */ 849 static int 850 cmdkioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *credp, int *rvalp) 851 { 852 int instance; 853 struct scsi_device *devp; 854 struct cmdk *dkp; 855 char data[NBPSCTR]; 856 857 instance = CMDKUNIT(dev); 858 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 859 return (ENXIO); 860 861 mutex_enter(&dkp->dk_mutex); 862 while (dkp->dk_flag & CMDK_SUSPEND) { 863 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 864 } 865 mutex_exit(&dkp->dk_mutex); 866 867 bzero(data, sizeof (data)); 868 869 switch (cmd) { 870 871 case DKIOCGMEDIAINFO: { 872 struct dk_minfo media_info; 873 struct tgdk_geom phyg; 874 875 /* dadk_getphygeom always returns success */ 876 (void) dadk_getphygeom(DKTP_DATA, &phyg); 877 878 media_info.dki_lbsize = phyg.g_secsiz; 879 media_info.dki_capacity = phyg.g_cap; 880 media_info.dki_media_type = DK_FIXED_DISK; 881 882 if (ddi_copyout(&media_info, (void *)arg, 883 sizeof (struct dk_minfo), flag)) { 884 return (EFAULT); 885 } else { 886 return (0); 887 } 888 } 889 890 case DKIOCINFO: { 891 struct dk_cinfo *info = (struct dk_cinfo *)data; 892 893 /* controller information */ 894 info->dki_ctype = (DKTP_EXT->tg_ctype); 895 info->dki_cnum = ddi_get_instance(ddi_get_parent(dkp->dk_dip)); 896 (void) strcpy(info->dki_cname, 897 ddi_get_name(ddi_get_parent(dkp->dk_dip))); 898 899 /* Unit Information */ 900 info->dki_unit = ddi_get_instance(dkp->dk_dip); 901 devp = ddi_get_driver_private(dkp->dk_dip); 902 info->dki_slave = (CMDEV_TARG(devp)<<3) | CMDEV_LUN(devp); 903 (void) strcpy(info->dki_dname, ddi_driver_name(dkp->dk_dip)); 904 info->dki_flags = DKI_FMTVOL; 905 info->dki_partition = CMDKPART(dev); 906 907 info->dki_maxtransfer = maxphys / DEV_BSIZE; 908 info->dki_addr = 1; 909 info->dki_space = 0; 910 info->dki_prio = 0; 911 info->dki_vec = 0; 912 913 if (ddi_copyout(data, (void *)arg, sizeof (*info), flag)) 914 return (EFAULT); 915 else 916 return (0); 917 } 918 919 case DKIOCSTATE: { 920 int state; 921 int rval; 922 diskaddr_t p_lblksrt; 923 diskaddr_t p_lblkcnt; 924 925 if (ddi_copyin((void *)arg, &state, sizeof (int), flag)) 926 return (EFAULT); 927 928 /* dadk_check_media blocks until state changes */ 929 if (rval = dadk_check_media(DKTP_DATA, &state)) 930 return (rval); 931 932 if (state == DKIO_INSERTED) { 933 934 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0) 935 return (ENXIO); 936 937 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(dev), 938 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0)) 939 return (ENXIO); 940 941 if (p_lblkcnt <= 0) 942 return (ENXIO); 943 } 944 945 if (ddi_copyout(&state, (caddr_t)arg, sizeof (int), flag)) 946 return (EFAULT); 947 948 return (0); 949 } 950 951 /* 952 * is media removable? 953 */ 954 case DKIOCREMOVABLE: { 955 int i; 956 957 i = (DKTP_EXT->tg_rmb) ? 1 : 0; 958 959 if (ddi_copyout(&i, (caddr_t)arg, sizeof (int), flag)) 960 return (EFAULT); 961 962 return (0); 963 } 964 965 case DKIOCADDBAD: 966 /* 967 * This is not an update mechanism to add bad blocks 968 * to the bad block structures stored on disk. 969 * 970 * addbadsec(1M) will update the bad block data on disk 971 * and use this ioctl to force the driver to re-initialize 972 * the list of bad blocks in the driver. 973 */ 974 975 /* start BBH */ 976 cmdk_bbh_reopen(dkp); 977 return (0); 978 979 case DKIOCG_PHYGEOM: 980 case DKIOCG_VIRTGEOM: 981 case DKIOCGGEOM: 982 case DKIOCSGEOM: 983 case DKIOCGAPART: 984 case DKIOCSAPART: 985 case DKIOCGVTOC: 986 case DKIOCSVTOC: 987 case DKIOCPARTINFO: 988 case DKIOCGEXTVTOC: 989 case DKIOCSEXTVTOC: 990 case DKIOCEXTPARTINFO: 991 case DKIOCGMBOOT: 992 case DKIOCSMBOOT: 993 case DKIOCGETEFI: 994 case DKIOCSETEFI: 995 case DKIOCPARTITION: 996 { 997 int rc; 998 999 rc = cmlb_ioctl(dkp->dk_cmlbhandle, dev, cmd, arg, flag, 1000 credp, rvalp, 0); 1001 if (cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC) 1002 cmdk_devid_setup(dkp); 1003 return (rc); 1004 } 1005 1006 case DIOCTL_RWCMD: { 1007 struct dadkio_rwcmd *rwcmdp; 1008 int status; 1009 1010 rwcmdp = kmem_alloc(sizeof (struct dadkio_rwcmd), KM_SLEEP); 1011 1012 status = rwcmd_copyin(rwcmdp, (caddr_t)arg, flag); 1013 1014 if (status == 0) { 1015 bzero(&(rwcmdp->status), sizeof (struct dadkio_status)); 1016 status = dadk_ioctl(DKTP_DATA, 1017 dev, 1018 cmd, 1019 (uintptr_t)rwcmdp, 1020 flag, 1021 credp, 1022 rvalp); 1023 } 1024 if (status == 0) 1025 status = rwcmd_copyout(rwcmdp, (caddr_t)arg, flag); 1026 1027 kmem_free(rwcmdp, sizeof (struct dadkio_rwcmd)); 1028 return (status); 1029 } 1030 1031 default: 1032 return (dadk_ioctl(DKTP_DATA, 1033 dev, 1034 cmd, 1035 arg, 1036 flag, 1037 credp, 1038 rvalp)); 1039 } 1040 } 1041 1042 /*ARGSUSED1*/ 1043 static int 1044 cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp) 1045 { 1046 int part; 1047 ulong_t partbit; 1048 int instance; 1049 struct cmdk *dkp; 1050 int lastclose = 1; 1051 int i; 1052 1053 instance = CMDKUNIT(dev); 1054 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || 1055 (otyp >= OTYPCNT)) 1056 return (ENXIO); 1057 1058 mutex_enter(&dkp->dk_mutex); 1059 1060 /* check if device has been opened */ 1061 ASSERT(cmdk_isopen(dkp, dev)); 1062 if (!(dkp->dk_flag & CMDK_OPEN)) { 1063 mutex_exit(&dkp->dk_mutex); 1064 return (ENXIO); 1065 } 1066 1067 while (dkp->dk_flag & CMDK_SUSPEND) { 1068 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1069 } 1070 1071 part = CMDKPART(dev); 1072 partbit = 1 << part; 1073 1074 /* account for close */ 1075 if (otyp == OTYP_LYR) { 1076 ASSERT(dkp->dk_open_lyr[part] > 0); 1077 if (dkp->dk_open_lyr[part]) 1078 dkp->dk_open_lyr[part]--; 1079 } else { 1080 ASSERT((dkp->dk_open_reg[otyp] & partbit) != 0); 1081 dkp->dk_open_reg[otyp] &= ~partbit; 1082 } 1083 dkp->dk_open_exl &= ~partbit; 1084 1085 for (i = 0; i < CMDK_MAXPART; i++) 1086 if (dkp->dk_open_lyr[i] != 0) { 1087 lastclose = 0; 1088 break; 1089 } 1090 1091 if (lastclose) 1092 for (i = 0; i < OTYPCNT; i++) 1093 if (dkp->dk_open_reg[i] != 0) { 1094 lastclose = 0; 1095 break; 1096 } 1097 1098 mutex_exit(&dkp->dk_mutex); 1099 1100 if (lastclose) 1101 cmlb_invalidate(dkp->dk_cmlbhandle, 0); 1102 1103 return (DDI_SUCCESS); 1104 } 1105 1106 /*ARGSUSED3*/ 1107 static int 1108 cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp) 1109 { 1110 dev_t dev = *dev_p; 1111 int part; 1112 ulong_t partbit; 1113 int instance; 1114 struct cmdk *dkp; 1115 diskaddr_t p_lblksrt; 1116 diskaddr_t p_lblkcnt; 1117 int i; 1118 int nodelay; 1119 1120 instance = CMDKUNIT(dev); 1121 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1122 return (ENXIO); 1123 1124 if (otyp >= OTYPCNT) 1125 return (EINVAL); 1126 1127 mutex_enter(&dkp->dk_mutex); 1128 while (dkp->dk_flag & CMDK_SUSPEND) { 1129 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1130 } 1131 mutex_exit(&dkp->dk_mutex); 1132 1133 part = CMDKPART(dev); 1134 partbit = 1 << part; 1135 nodelay = (flag & (FNDELAY | FNONBLOCK)); 1136 1137 mutex_enter(&dkp->dk_mutex); 1138 1139 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0) { 1140 1141 /* fail if not doing non block open */ 1142 if (!nodelay) { 1143 mutex_exit(&dkp->dk_mutex); 1144 return (ENXIO); 1145 } 1146 } else if (cmlb_partinfo(dkp->dk_cmlbhandle, part, &p_lblkcnt, 1147 &p_lblksrt, NULL, NULL, 0) == 0) { 1148 1149 if (p_lblkcnt <= 0 && (!nodelay || otyp != OTYP_CHR)) { 1150 mutex_exit(&dkp->dk_mutex); 1151 return (ENXIO); 1152 } 1153 } else { 1154 /* fail if not doing non block open */ 1155 if (!nodelay) { 1156 mutex_exit(&dkp->dk_mutex); 1157 return (ENXIO); 1158 } 1159 } 1160 1161 if ((DKTP_EXT->tg_rdonly) && (flag & FWRITE)) { 1162 mutex_exit(&dkp->dk_mutex); 1163 return (EROFS); 1164 } 1165 1166 /* check for part already opend exclusively */ 1167 if (dkp->dk_open_exl & partbit) 1168 goto excl_open_fail; 1169 1170 /* check if we can establish exclusive open */ 1171 if (flag & FEXCL) { 1172 if (dkp->dk_open_lyr[part]) 1173 goto excl_open_fail; 1174 for (i = 0; i < OTYPCNT; i++) { 1175 if (dkp->dk_open_reg[i] & partbit) 1176 goto excl_open_fail; 1177 } 1178 } 1179 1180 /* open will succeed, account for open */ 1181 dkp->dk_flag |= CMDK_OPEN; 1182 if (otyp == OTYP_LYR) 1183 dkp->dk_open_lyr[part]++; 1184 else 1185 dkp->dk_open_reg[otyp] |= partbit; 1186 if (flag & FEXCL) 1187 dkp->dk_open_exl |= partbit; 1188 1189 mutex_exit(&dkp->dk_mutex); 1190 return (DDI_SUCCESS); 1191 1192 excl_open_fail: 1193 mutex_exit(&dkp->dk_mutex); 1194 return (EBUSY); 1195 } 1196 1197 /* 1198 * read routine 1199 */ 1200 /*ARGSUSED2*/ 1201 static int 1202 cmdkread(dev_t dev, struct uio *uio, cred_t *credp) 1203 { 1204 return (cmdkrw(dev, uio, B_READ)); 1205 } 1206 1207 /* 1208 * async read routine 1209 */ 1210 /*ARGSUSED2*/ 1211 static int 1212 cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp) 1213 { 1214 return (cmdkarw(dev, aio, B_READ)); 1215 } 1216 1217 /* 1218 * write routine 1219 */ 1220 /*ARGSUSED2*/ 1221 static int 1222 cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp) 1223 { 1224 return (cmdkrw(dev, uio, B_WRITE)); 1225 } 1226 1227 /* 1228 * async write routine 1229 */ 1230 /*ARGSUSED2*/ 1231 static int 1232 cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp) 1233 { 1234 return (cmdkarw(dev, aio, B_WRITE)); 1235 } 1236 1237 static void 1238 cmdkmin(struct buf *bp) 1239 { 1240 if (bp->b_bcount > DK_MAXRECSIZE) 1241 bp->b_bcount = DK_MAXRECSIZE; 1242 } 1243 1244 static int 1245 cmdkrw(dev_t dev, struct uio *uio, int flag) 1246 { 1247 int instance; 1248 struct cmdk *dkp; 1249 1250 instance = CMDKUNIT(dev); 1251 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1252 return (ENXIO); 1253 1254 mutex_enter(&dkp->dk_mutex); 1255 while (dkp->dk_flag & CMDK_SUSPEND) { 1256 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1257 } 1258 mutex_exit(&dkp->dk_mutex); 1259 1260 return (physio(cmdkstrategy, (struct buf *)0, dev, flag, cmdkmin, uio)); 1261 } 1262 1263 static int 1264 cmdkarw(dev_t dev, struct aio_req *aio, int flag) 1265 { 1266 int instance; 1267 struct cmdk *dkp; 1268 1269 instance = CMDKUNIT(dev); 1270 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1271 return (ENXIO); 1272 1273 mutex_enter(&dkp->dk_mutex); 1274 while (dkp->dk_flag & CMDK_SUSPEND) { 1275 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1276 } 1277 mutex_exit(&dkp->dk_mutex); 1278 1279 return (aphysio(cmdkstrategy, anocancel, dev, flag, cmdkmin, aio)); 1280 } 1281 1282 /* 1283 * strategy routine 1284 */ 1285 static int 1286 cmdkstrategy(struct buf *bp) 1287 { 1288 int instance; 1289 struct cmdk *dkp; 1290 long d_cnt; 1291 diskaddr_t p_lblksrt; 1292 diskaddr_t p_lblkcnt; 1293 1294 instance = CMDKUNIT(bp->b_edev); 1295 if (cmdk_indump || !(dkp = ddi_get_soft_state(cmdk_state, instance)) || 1296 (dkblock(bp) < 0)) { 1297 bp->b_resid = bp->b_bcount; 1298 SETBPERR(bp, ENXIO); 1299 biodone(bp); 1300 return (0); 1301 } 1302 1303 mutex_enter(&dkp->dk_mutex); 1304 ASSERT(cmdk_isopen(dkp, bp->b_edev)); 1305 while (dkp->dk_flag & CMDK_SUSPEND) { 1306 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1307 } 1308 mutex_exit(&dkp->dk_mutex); 1309 1310 bp->b_flags &= ~(B_DONE|B_ERROR); 1311 bp->b_resid = 0; 1312 bp->av_back = NULL; 1313 1314 /* 1315 * only re-read the vtoc if necessary (force == FALSE) 1316 */ 1317 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(bp->b_edev), 1318 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0)) { 1319 SETBPERR(bp, ENXIO); 1320 } 1321 1322 if ((bp->b_bcount & (NBPSCTR-1)) || (dkblock(bp) > p_lblkcnt)) 1323 SETBPERR(bp, ENXIO); 1324 1325 if ((bp->b_flags & B_ERROR) || (dkblock(bp) == p_lblkcnt)) { 1326 bp->b_resid = bp->b_bcount; 1327 biodone(bp); 1328 return (0); 1329 } 1330 1331 d_cnt = bp->b_bcount >> SCTRSHFT; 1332 if ((dkblock(bp) + d_cnt) > p_lblkcnt) { 1333 bp->b_resid = ((dkblock(bp) + d_cnt) - p_lblkcnt) << SCTRSHFT; 1334 bp->b_bcount -= bp->b_resid; 1335 } 1336 1337 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + dkblock(bp)))); 1338 if (dadk_strategy(DKTP_DATA, bp) != DDI_SUCCESS) { 1339 bp->b_resid += bp->b_bcount; 1340 biodone(bp); 1341 } 1342 return (0); 1343 } 1344 1345 static int 1346 cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp) 1347 { 1348 struct scsi_device *devp; 1349 opaque_t queobjp = NULL; 1350 opaque_t flcobjp = NULL; 1351 char que_keyvalp[64]; 1352 int que_keylen; 1353 char flc_keyvalp[64]; 1354 int flc_keylen; 1355 1356 ASSERT(mutex_owned(&dkp->dk_mutex)); 1357 1358 /* Create linkage to queueing routines based on property */ 1359 que_keylen = sizeof (que_keyvalp); 1360 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1361 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) != 1362 DDI_PROP_SUCCESS) { 1363 cmn_err(CE_WARN, "cmdk_create_obj: queue property undefined"); 1364 return (DDI_FAILURE); 1365 } 1366 que_keyvalp[que_keylen] = (char)0; 1367 1368 if (strcmp(que_keyvalp, "qfifo") == 0) { 1369 queobjp = (opaque_t)qfifo_create(); 1370 } else if (strcmp(que_keyvalp, "qsort") == 0) { 1371 queobjp = (opaque_t)qsort_create(); 1372 } else { 1373 return (DDI_FAILURE); 1374 } 1375 1376 /* Create linkage to dequeueing routines based on property */ 1377 flc_keylen = sizeof (flc_keyvalp); 1378 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1379 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) != 1380 DDI_PROP_SUCCESS) { 1381 cmn_err(CE_WARN, 1382 "cmdk_create_obj: flow-control property undefined"); 1383 return (DDI_FAILURE); 1384 } 1385 1386 flc_keyvalp[flc_keylen] = (char)0; 1387 1388 if (strcmp(flc_keyvalp, "dsngl") == 0) { 1389 flcobjp = (opaque_t)dsngl_create(); 1390 } else if (strcmp(flc_keyvalp, "dmult") == 0) { 1391 flcobjp = (opaque_t)dmult_create(); 1392 } else { 1393 return (DDI_FAILURE); 1394 } 1395 1396 /* populate bbh_obj object stored in dkp */ 1397 dkp->dk_bbh_obj.bbh_data = dkp; 1398 dkp->dk_bbh_obj.bbh_ops = &cmdk_bbh_ops; 1399 1400 /* create linkage to dadk */ 1401 dkp->dk_tgobjp = (opaque_t)dadk_create(); 1402 1403 devp = ddi_get_driver_private(dip); 1404 (void) dadk_init(DKTP_DATA, devp, flcobjp, queobjp, &dkp->dk_bbh_obj, 1405 NULL); 1406 1407 return (DDI_SUCCESS); 1408 } 1409 1410 static void 1411 cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp) 1412 { 1413 char que_keyvalp[64]; 1414 int que_keylen; 1415 char flc_keyvalp[64]; 1416 int flc_keylen; 1417 1418 ASSERT(mutex_owned(&dkp->dk_mutex)); 1419 1420 (void) dadk_free((dkp->dk_tgobjp)); 1421 dkp->dk_tgobjp = NULL; 1422 1423 que_keylen = sizeof (que_keyvalp); 1424 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1425 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) != 1426 DDI_PROP_SUCCESS) { 1427 cmn_err(CE_WARN, "cmdk_destroy_obj: queue property undefined"); 1428 return; 1429 } 1430 que_keyvalp[que_keylen] = (char)0; 1431 1432 flc_keylen = sizeof (flc_keyvalp); 1433 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1434 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) != 1435 DDI_PROP_SUCCESS) { 1436 cmn_err(CE_WARN, 1437 "cmdk_destroy_obj: flow-control property undefined"); 1438 return; 1439 } 1440 flc_keyvalp[flc_keylen] = (char)0; 1441 } 1442 /*ARGSUSED5*/ 1443 static int 1444 cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr, 1445 diskaddr_t start, size_t count, void *tg_cookie) 1446 { 1447 struct cmdk *dkp; 1448 opaque_t handle; 1449 int rc = 0; 1450 char *bufa; 1451 1452 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 1453 if (dkp == NULL) 1454 return (ENXIO); 1455 1456 if (cmd != TG_READ && cmd != TG_WRITE) 1457 return (EINVAL); 1458 1459 /* count must be multiple of 512 */ 1460 count = (count + NBPSCTR - 1) & -NBPSCTR; 1461 handle = dadk_iob_alloc(DKTP_DATA, start, count, KM_SLEEP); 1462 if (!handle) 1463 return (ENOMEM); 1464 1465 if (cmd == TG_READ) { 1466 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1467 if (!bufa) 1468 rc = EIO; 1469 else 1470 bcopy(bufa, bufaddr, count); 1471 } else { 1472 bufa = dadk_iob_htoc(DKTP_DATA, handle); 1473 bcopy(bufaddr, bufa, count); 1474 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_WRITE); 1475 if (!bufa) 1476 rc = EIO; 1477 } 1478 (void) dadk_iob_free(DKTP_DATA, handle); 1479 1480 return (rc); 1481 } 1482 1483 /*ARGSUSED3*/ 1484 static int 1485 cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg, void *tg_cookie) 1486 { 1487 1488 struct cmdk *dkp; 1489 struct tgdk_geom phyg; 1490 1491 1492 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 1493 if (dkp == NULL) 1494 return (ENXIO); 1495 1496 switch (cmd) { 1497 case TG_GETPHYGEOM: { 1498 cmlb_geom_t *phygeomp = (cmlb_geom_t *)arg; 1499 1500 /* dadk_getphygeom always returns success */ 1501 (void) dadk_getphygeom(DKTP_DATA, &phyg); 1502 1503 phygeomp->g_capacity = phyg.g_cap; 1504 phygeomp->g_nsect = phyg.g_sec; 1505 phygeomp->g_nhead = phyg.g_head; 1506 phygeomp->g_acyl = phyg.g_acyl; 1507 phygeomp->g_ncyl = phyg.g_cyl; 1508 phygeomp->g_secsize = phyg.g_secsiz; 1509 phygeomp->g_intrlv = 1; 1510 phygeomp->g_rpm = 3600; 1511 1512 return (0); 1513 } 1514 1515 case TG_GETVIRTGEOM: { 1516 cmlb_geom_t *virtgeomp = (cmlb_geom_t *)arg; 1517 diskaddr_t capacity; 1518 1519 (void) dadk_getgeom(DKTP_DATA, &phyg); 1520 capacity = phyg.g_cap; 1521 1522 /* 1523 * If the controller returned us something that doesn't 1524 * really fit into an Int 13/function 8 geometry 1525 * result, just fail the ioctl. See PSARC 1998/313. 1526 */ 1527 if (capacity < 0 || capacity >= 63 * 254 * 1024) 1528 return (EINVAL); 1529 1530 virtgeomp->g_capacity = capacity; 1531 virtgeomp->g_nsect = 63; 1532 virtgeomp->g_nhead = 254; 1533 virtgeomp->g_ncyl = capacity / (63 * 254); 1534 virtgeomp->g_acyl = 0; 1535 virtgeomp->g_secsize = 512; 1536 virtgeomp->g_intrlv = 1; 1537 virtgeomp->g_rpm = 3600; 1538 1539 return (0); 1540 } 1541 1542 case TG_GETCAPACITY: 1543 case TG_GETBLOCKSIZE: 1544 { 1545 1546 /* dadk_getphygeom always returns success */ 1547 (void) dadk_getphygeom(DKTP_DATA, &phyg); 1548 if (cmd == TG_GETCAPACITY) 1549 *(diskaddr_t *)arg = phyg.g_cap; 1550 else 1551 *(uint32_t *)arg = (uint32_t)phyg.g_secsiz; 1552 1553 return (0); 1554 } 1555 1556 case TG_GETATTR: { 1557 tg_attribute_t *tgattribute = (tg_attribute_t *)arg; 1558 if ((DKTP_EXT->tg_rdonly)) 1559 tgattribute->media_is_writable = FALSE; 1560 else 1561 tgattribute->media_is_writable = TRUE; 1562 1563 return (0); 1564 } 1565 1566 default: 1567 return (ENOTTY); 1568 } 1569 } 1570 1571 1572 1573 1574 1575 /* 1576 * Create and register the devid. 1577 * There are 4 different ways we can get a device id: 1578 * 1. Already have one - nothing to do 1579 * 2. Build one from the drive's model and serial numbers 1580 * 3. Read one from the disk (first sector of last track) 1581 * 4. Fabricate one and write it on the disk. 1582 * If any of these succeeds, register the deviceid 1583 */ 1584 static void 1585 cmdk_devid_setup(struct cmdk *dkp) 1586 { 1587 int rc; 1588 1589 /* Try options until one succeeds, or all have failed */ 1590 1591 /* 1. All done if already registered */ 1592 if (dkp->dk_devid != NULL) 1593 return; 1594 1595 /* 2. Build a devid from the model and serial number */ 1596 rc = cmdk_devid_modser(dkp); 1597 if (rc != DDI_SUCCESS) { 1598 /* 3. Read devid from the disk, if present */ 1599 rc = cmdk_devid_read(dkp); 1600 1601 /* 4. otherwise make one up and write it on the disk */ 1602 if (rc != DDI_SUCCESS) 1603 rc = cmdk_devid_fabricate(dkp); 1604 } 1605 1606 /* If we managed to get a devid any of the above ways, register it */ 1607 if (rc == DDI_SUCCESS) 1608 (void) ddi_devid_register(dkp->dk_dip, dkp->dk_devid); 1609 1610 } 1611 1612 /* 1613 * Build a devid from the model and serial number 1614 * Return DDI_SUCCESS or DDI_FAILURE. 1615 */ 1616 static int 1617 cmdk_devid_modser(struct cmdk *dkp) 1618 { 1619 int rc = DDI_FAILURE; 1620 char *hwid; 1621 int modlen; 1622 int serlen; 1623 1624 /* 1625 * device ID is a concatenation of model number, '=', serial number. 1626 */ 1627 hwid = kmem_alloc(CMDK_HWIDLEN, KM_SLEEP); 1628 modlen = cmdk_get_modser(dkp, DIOCTL_GETMODEL, hwid, CMDK_HWIDLEN); 1629 if (modlen == 0) { 1630 rc = DDI_FAILURE; 1631 goto err; 1632 } 1633 hwid[modlen++] = '='; 1634 serlen = cmdk_get_modser(dkp, DIOCTL_GETSERIAL, 1635 hwid + modlen, CMDK_HWIDLEN - modlen); 1636 if (serlen == 0) { 1637 rc = DDI_FAILURE; 1638 goto err; 1639 } 1640 hwid[modlen + serlen] = 0; 1641 1642 /* Initialize the device ID, trailing NULL not included */ 1643 rc = ddi_devid_init(dkp->dk_dip, DEVID_ATA_SERIAL, modlen + serlen, 1644 hwid, &dkp->dk_devid); 1645 if (rc != DDI_SUCCESS) { 1646 rc = DDI_FAILURE; 1647 goto err; 1648 } 1649 1650 rc = DDI_SUCCESS; 1651 1652 err: 1653 kmem_free(hwid, CMDK_HWIDLEN); 1654 return (rc); 1655 } 1656 1657 static int 1658 cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len) 1659 { 1660 dadk_ioc_string_t strarg; 1661 int rval; 1662 char *s; 1663 char ch; 1664 boolean_t ret; 1665 int i; 1666 int tb; 1667 1668 strarg.is_buf = buf; 1669 strarg.is_size = len; 1670 if (dadk_ioctl(DKTP_DATA, 1671 dkp->dk_dev, 1672 ioccmd, 1673 (uintptr_t)&strarg, 1674 FNATIVE | FKIOCTL, 1675 NULL, 1676 &rval) != 0) 1677 return (0); 1678 1679 /* 1680 * valid model/serial string must contain a non-zero non-space 1681 * trim trailing spaces/NULL 1682 */ 1683 ret = B_FALSE; 1684 s = buf; 1685 for (i = 0; i < strarg.is_size; i++) { 1686 ch = *s++; 1687 if (ch != ' ' && ch != '\0') 1688 tb = i + 1; 1689 if (ch != ' ' && ch != '\0' && ch != '0') 1690 ret = B_TRUE; 1691 } 1692 1693 if (ret == B_FALSE) 1694 return (0); 1695 1696 return (tb); 1697 } 1698 1699 /* 1700 * Read a devid from on the first block of the last track of 1701 * the last cylinder. Make sure what we read is a valid devid. 1702 * Return DDI_SUCCESS or DDI_FAILURE. 1703 */ 1704 static int 1705 cmdk_devid_read(struct cmdk *dkp) 1706 { 1707 diskaddr_t blk; 1708 struct dk_devid *dkdevidp; 1709 uint_t *ip; 1710 int chksum; 1711 int i, sz; 1712 tgdk_iob_handle handle = NULL; 1713 int rc = DDI_FAILURE; 1714 1715 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0)) 1716 goto err; 1717 1718 /* read the devid */ 1719 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP); 1720 if (handle == NULL) 1721 goto err; 1722 1723 dkdevidp = (struct dk_devid *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1724 if (dkdevidp == NULL) 1725 goto err; 1726 1727 /* Validate the revision */ 1728 if ((dkdevidp->dkd_rev_hi != DK_DEVID_REV_MSB) || 1729 (dkdevidp->dkd_rev_lo != DK_DEVID_REV_LSB)) 1730 goto err; 1731 1732 /* Calculate the checksum */ 1733 chksum = 0; 1734 ip = (uint_t *)dkdevidp; 1735 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++) 1736 chksum ^= ip[i]; 1737 if (DKD_GETCHKSUM(dkdevidp) != chksum) 1738 goto err; 1739 1740 /* Validate the device id */ 1741 if (ddi_devid_valid((ddi_devid_t)dkdevidp->dkd_devid) != DDI_SUCCESS) 1742 goto err; 1743 1744 /* keep a copy of the device id */ 1745 sz = ddi_devid_sizeof((ddi_devid_t)dkdevidp->dkd_devid); 1746 dkp->dk_devid = kmem_alloc(sz, KM_SLEEP); 1747 bcopy(dkdevidp->dkd_devid, dkp->dk_devid, sz); 1748 1749 rc = DDI_SUCCESS; 1750 1751 err: 1752 if (handle != NULL) 1753 (void) dadk_iob_free(DKTP_DATA, handle); 1754 return (rc); 1755 } 1756 1757 /* 1758 * Create a devid and write it on the first block of the last track of 1759 * the last cylinder. 1760 * Return DDI_SUCCESS or DDI_FAILURE. 1761 */ 1762 static int 1763 cmdk_devid_fabricate(struct cmdk *dkp) 1764 { 1765 ddi_devid_t devid = NULL; /* devid made by ddi_devid_init */ 1766 struct dk_devid *dkdevidp; /* devid struct stored on disk */ 1767 diskaddr_t blk; 1768 tgdk_iob_handle handle = NULL; 1769 uint_t *ip, chksum; 1770 int i; 1771 int rc = DDI_FAILURE; 1772 1773 if (ddi_devid_init(dkp->dk_dip, DEVID_FAB, 0, NULL, &devid) != 1774 DDI_SUCCESS) 1775 goto err; 1776 1777 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0)) { 1778 /* no device id block address */ 1779 goto err; 1780 } 1781 1782 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP); 1783 if (!handle) 1784 goto err; 1785 1786 /* Locate the buffer */ 1787 dkdevidp = (struct dk_devid *)dadk_iob_htoc(DKTP_DATA, handle); 1788 1789 /* Fill in the revision */ 1790 bzero(dkdevidp, NBPSCTR); 1791 dkdevidp->dkd_rev_hi = DK_DEVID_REV_MSB; 1792 dkdevidp->dkd_rev_lo = DK_DEVID_REV_LSB; 1793 1794 /* Copy in the device id */ 1795 i = ddi_devid_sizeof(devid); 1796 if (i > DK_DEVID_SIZE) 1797 goto err; 1798 bcopy(devid, dkdevidp->dkd_devid, i); 1799 1800 /* Calculate the chksum */ 1801 chksum = 0; 1802 ip = (uint_t *)dkdevidp; 1803 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++) 1804 chksum ^= ip[i]; 1805 1806 /* Fill in the checksum */ 1807 DKD_FORMCHKSUM(chksum, dkdevidp); 1808 1809 /* write the devid */ 1810 (void) dadk_iob_xfer(DKTP_DATA, handle, B_WRITE); 1811 1812 dkp->dk_devid = devid; 1813 1814 rc = DDI_SUCCESS; 1815 1816 err: 1817 if (handle != NULL) 1818 (void) dadk_iob_free(DKTP_DATA, handle); 1819 1820 if (rc != DDI_SUCCESS && devid != NULL) 1821 ddi_devid_free(devid); 1822 1823 return (rc); 1824 } 1825 1826 static void 1827 cmdk_bbh_free_alts(struct cmdk *dkp) 1828 { 1829 if (dkp->dk_alts_hdl) { 1830 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1831 kmem_free(dkp->dk_slc_cnt, 1832 NDKMAP * (sizeof (uint32_t) + sizeof (struct alts_ent *))); 1833 dkp->dk_alts_hdl = NULL; 1834 } 1835 } 1836 1837 static void 1838 cmdk_bbh_reopen(struct cmdk *dkp) 1839 { 1840 tgdk_iob_handle handle = NULL; 1841 diskaddr_t slcb, slcn, slce; 1842 struct alts_parttbl *ap; 1843 struct alts_ent *enttblp; 1844 uint32_t altused; 1845 uint32_t altbase; 1846 uint32_t altlast; 1847 int alts; 1848 uint16_t vtoctag; 1849 int i, j; 1850 1851 /* find slice with V_ALTSCTR tag */ 1852 for (alts = 0; alts < NDKMAP; alts++) { 1853 if (cmlb_partinfo( 1854 dkp->dk_cmlbhandle, 1855 alts, 1856 &slcn, 1857 &slcb, 1858 NULL, 1859 &vtoctag, 1860 0)) { 1861 goto empty; /* no partition table exists */ 1862 } 1863 1864 if (vtoctag == V_ALTSCTR && slcn > 1) 1865 break; 1866 } 1867 if (alts >= NDKMAP) { 1868 goto empty; /* no V_ALTSCTR slice defined */ 1869 } 1870 1871 /* read in ALTS label block */ 1872 handle = dadk_iob_alloc(DKTP_DATA, slcb, NBPSCTR, KM_SLEEP); 1873 if (!handle) { 1874 goto empty; 1875 } 1876 1877 ap = (struct alts_parttbl *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1878 if (!ap || (ap->alts_sanity != ALTS_SANITY)) { 1879 goto empty; 1880 } 1881 1882 altused = ap->alts_ent_used; /* number of BB entries */ 1883 altbase = ap->alts_ent_base; /* blk offset from begin slice */ 1884 altlast = ap->alts_ent_end; /* blk offset to last block */ 1885 /* ((altused * sizeof (struct alts_ent) + NBPSCTR - 1) & ~NBPSCTR) */ 1886 1887 if (altused == 0 || 1888 altbase < 1 || 1889 altbase > altlast || 1890 altlast >= slcn) { 1891 goto empty; 1892 } 1893 (void) dadk_iob_free(DKTP_DATA, handle); 1894 1895 /* read in ALTS remapping table */ 1896 handle = dadk_iob_alloc(DKTP_DATA, 1897 slcb + altbase, 1898 (altlast - altbase + 1) << SCTRSHFT, KM_SLEEP); 1899 if (!handle) { 1900 goto empty; 1901 } 1902 1903 enttblp = (struct alts_ent *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1904 if (!enttblp) { 1905 goto empty; 1906 } 1907 1908 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER); 1909 1910 /* allocate space for dk_slc_cnt and dk_slc_ent tables */ 1911 if (dkp->dk_slc_cnt == NULL) { 1912 dkp->dk_slc_cnt = kmem_alloc(NDKMAP * 1913 (sizeof (long) + sizeof (struct alts_ent *)), KM_SLEEP); 1914 } 1915 dkp->dk_slc_ent = (struct alts_ent **)(dkp->dk_slc_cnt + NDKMAP); 1916 1917 /* free previous BB table (if any) */ 1918 if (dkp->dk_alts_hdl) { 1919 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1920 dkp->dk_alts_hdl = NULL; 1921 dkp->dk_altused = 0; 1922 } 1923 1924 /* save linkage to new BB table */ 1925 dkp->dk_alts_hdl = handle; 1926 dkp->dk_altused = altused; 1927 1928 /* 1929 * build indexes to BB table by slice 1930 * effectively we have 1931 * struct alts_ent *enttblp[altused]; 1932 * 1933 * uint32_t dk_slc_cnt[NDKMAP]; 1934 * struct alts_ent *dk_slc_ent[NDKMAP]; 1935 */ 1936 for (i = 0; i < NDKMAP; i++) { 1937 if (cmlb_partinfo( 1938 dkp->dk_cmlbhandle, 1939 i, 1940 &slcn, 1941 &slcb, 1942 NULL, 1943 NULL, 1944 0)) { 1945 goto empty1; 1946 } 1947 1948 dkp->dk_slc_cnt[i] = 0; 1949 if (slcn == 0) 1950 continue; /* slice is not allocated */ 1951 1952 /* last block in slice */ 1953 slce = slcb + slcn - 1; 1954 1955 /* find first remap entry in after beginnning of slice */ 1956 for (j = 0; j < altused; j++) { 1957 if (enttblp[j].bad_start + enttblp[j].bad_end >= slcb) 1958 break; 1959 } 1960 dkp->dk_slc_ent[i] = enttblp + j; 1961 1962 /* count remap entrys until end of slice */ 1963 for (; j < altused && enttblp[j].bad_start <= slce; j++) { 1964 dkp->dk_slc_cnt[i] += 1; 1965 } 1966 } 1967 1968 rw_exit(&dkp->dk_bbh_mutex); 1969 return; 1970 1971 empty: 1972 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER); 1973 empty1: 1974 if (handle && handle != dkp->dk_alts_hdl) 1975 (void) dadk_iob_free(DKTP_DATA, handle); 1976 1977 if (dkp->dk_alts_hdl) { 1978 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1979 dkp->dk_alts_hdl = NULL; 1980 } 1981 1982 rw_exit(&dkp->dk_bbh_mutex); 1983 } 1984 1985 /*ARGSUSED*/ 1986 static bbh_cookie_t 1987 cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle) 1988 { 1989 struct bbh_handle *hp; 1990 bbh_cookie_t ckp; 1991 1992 hp = (struct bbh_handle *)handle; 1993 ckp = hp->h_cktab + hp->h_idx; 1994 hp->h_idx++; 1995 return (ckp); 1996 } 1997 1998 /*ARGSUSED*/ 1999 static void 2000 cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle) 2001 { 2002 struct bbh_handle *hp; 2003 2004 hp = (struct bbh_handle *)handle; 2005 kmem_free(handle, (sizeof (struct bbh_handle) + 2006 (hp->h_totck * (sizeof (struct bbh_cookie))))); 2007 } 2008 2009 2010 /* 2011 * cmdk_bbh_gethandle remaps the bad sectors to alternates. 2012 * There are 7 different cases when the comparison is made 2013 * between the bad sector cluster and the disk section. 2014 * 2015 * bad sector cluster gggggggggggbbbbbbbggggggggggg 2016 * case 1: ddddd 2017 * case 2: -d----- 2018 * case 3: ddddd 2019 * case 4: dddddddddddd 2020 * case 5: ddddddd----- 2021 * case 6: ---ddddddd 2022 * case 7: ddddddd 2023 * 2024 * where: g = good sector, b = bad sector 2025 * d = sector in disk section 2026 * - = disk section may be extended to cover those disk area 2027 */ 2028 2029 static opaque_t 2030 cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp) 2031 { 2032 struct cmdk *dkp = (struct cmdk *)bbh_data; 2033 struct bbh_handle *hp; 2034 struct bbh_cookie *ckp; 2035 struct alts_ent *altp; 2036 uint32_t alts_used; 2037 uint32_t part = CMDKPART(bp->b_edev); 2038 daddr32_t lastsec; 2039 long d_count; 2040 int i; 2041 int idx; 2042 int cnt; 2043 2044 if (part >= V_NUMPAR) 2045 return (NULL); 2046 2047 /* 2048 * This if statement is atomic and it will succeed 2049 * if there are no bad blocks (almost always) 2050 * 2051 * so this if is performed outside of the rw_enter for speed 2052 * and then repeated inside the rw_enter for safety 2053 */ 2054 if (!dkp->dk_alts_hdl) { 2055 return (NULL); 2056 } 2057 2058 rw_enter(&dkp->dk_bbh_mutex, RW_READER); 2059 2060 if (dkp->dk_alts_hdl == NULL) { 2061 rw_exit(&dkp->dk_bbh_mutex); 2062 return (NULL); 2063 } 2064 2065 alts_used = dkp->dk_slc_cnt[part]; 2066 if (alts_used == 0) { 2067 rw_exit(&dkp->dk_bbh_mutex); 2068 return (NULL); 2069 } 2070 altp = dkp->dk_slc_ent[part]; 2071 2072 /* 2073 * binary search for the largest bad sector index in the alternate 2074 * entry table which overlaps or larger than the starting d_sec 2075 */ 2076 i = cmdk_bbh_bsearch(altp, alts_used, GET_BP_SEC(bp)); 2077 /* if starting sector is > the largest bad sector, return */ 2078 if (i == -1) { 2079 rw_exit(&dkp->dk_bbh_mutex); 2080 return (NULL); 2081 } 2082 /* i is the starting index. Set altp to the starting entry addr */ 2083 altp += i; 2084 2085 d_count = bp->b_bcount >> SCTRSHFT; 2086 lastsec = GET_BP_SEC(bp) + d_count - 1; 2087 2088 /* calculate the number of bad sectors */ 2089 for (idx = i, cnt = 0; idx < alts_used; idx++, altp++, cnt++) { 2090 if (lastsec < altp->bad_start) 2091 break; 2092 } 2093 2094 if (!cnt) { 2095 rw_exit(&dkp->dk_bbh_mutex); 2096 return (NULL); 2097 } 2098 2099 /* calculate the maximum number of reserved cookies */ 2100 cnt <<= 1; 2101 cnt++; 2102 2103 /* allocate the handle */ 2104 hp = (struct bbh_handle *)kmem_zalloc((sizeof (*hp) + 2105 (cnt * sizeof (*ckp))), KM_SLEEP); 2106 2107 hp->h_idx = 0; 2108 hp->h_totck = cnt; 2109 ckp = hp->h_cktab = (struct bbh_cookie *)(hp + 1); 2110 ckp[0].ck_sector = GET_BP_SEC(bp); 2111 ckp[0].ck_seclen = d_count; 2112 2113 altp = dkp->dk_slc_ent[part]; 2114 altp += i; 2115 for (idx = 0; i < alts_used; i++, altp++) { 2116 /* CASE 1: */ 2117 if (lastsec < altp->bad_start) 2118 break; 2119 2120 /* CASE 3: */ 2121 if (ckp[idx].ck_sector > altp->bad_end) 2122 continue; 2123 2124 /* CASE 2 and 7: */ 2125 if ((ckp[idx].ck_sector >= altp->bad_start) && 2126 (lastsec <= altp->bad_end)) { 2127 ckp[idx].ck_sector = altp->good_start + 2128 ckp[idx].ck_sector - altp->bad_start; 2129 break; 2130 } 2131 2132 /* at least one bad sector in our section. break it. */ 2133 /* CASE 5: */ 2134 if ((lastsec >= altp->bad_start) && 2135 (lastsec <= altp->bad_end)) { 2136 ckp[idx+1].ck_seclen = lastsec - altp->bad_start + 1; 2137 ckp[idx].ck_seclen -= ckp[idx+1].ck_seclen; 2138 ckp[idx+1].ck_sector = altp->good_start; 2139 break; 2140 } 2141 /* CASE 6: */ 2142 if ((ckp[idx].ck_sector <= altp->bad_end) && 2143 (ckp[idx].ck_sector >= altp->bad_start)) { 2144 ckp[idx+1].ck_seclen = ckp[idx].ck_seclen; 2145 ckp[idx].ck_seclen = altp->bad_end - 2146 ckp[idx].ck_sector + 1; 2147 ckp[idx+1].ck_seclen -= ckp[idx].ck_seclen; 2148 ckp[idx].ck_sector = altp->good_start + 2149 ckp[idx].ck_sector - altp->bad_start; 2150 idx++; 2151 ckp[idx].ck_sector = altp->bad_end + 1; 2152 continue; /* check rest of section */ 2153 } 2154 2155 /* CASE 4: */ 2156 ckp[idx].ck_seclen = altp->bad_start - ckp[idx].ck_sector; 2157 ckp[idx+1].ck_sector = altp->good_start; 2158 ckp[idx+1].ck_seclen = altp->bad_end - altp->bad_start + 1; 2159 idx += 2; 2160 ckp[idx].ck_sector = altp->bad_end + 1; 2161 ckp[idx].ck_seclen = lastsec - altp->bad_end; 2162 } 2163 2164 rw_exit(&dkp->dk_bbh_mutex); 2165 return ((opaque_t)hp); 2166 } 2167 2168 static int 2169 cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key) 2170 { 2171 int i; 2172 int ind; 2173 int interval; 2174 int mystatus = -1; 2175 2176 if (!cnt) 2177 return (mystatus); 2178 2179 ind = 1; /* compiler complains about possible uninitialized var */ 2180 for (i = 1; i <= cnt; i <<= 1) 2181 ind = i; 2182 2183 for (interval = ind; interval; ) { 2184 if ((key >= buf[ind-1].bad_start) && 2185 (key <= buf[ind-1].bad_end)) { 2186 return (ind-1); 2187 } else { 2188 interval >>= 1; 2189 if (key < buf[ind-1].bad_start) { 2190 /* record the largest bad sector index */ 2191 mystatus = ind-1; 2192 if (!interval) 2193 break; 2194 ind = ind - interval; 2195 } else { 2196 /* 2197 * if key is larger than the last element 2198 * then break 2199 */ 2200 if ((ind == cnt) || !interval) 2201 break; 2202 if ((ind+interval) <= cnt) 2203 ind += interval; 2204 } 2205 } 2206 } 2207 return (mystatus); 2208 } 2209