1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/scsi/scsi.h> 29 #include <sys/dktp/cm.h> 30 #include <sys/dktp/quetypes.h> 31 #include <sys/dktp/queue.h> 32 #include <sys/dktp/fctypes.h> 33 #include <sys/dktp/flowctrl.h> 34 #include <sys/dktp/cmdev.h> 35 #include <sys/dkio.h> 36 #include <sys/dktp/tgdk.h> 37 #include <sys/dktp/dadk.h> 38 #include <sys/dktp/bbh.h> 39 #include <sys/dktp/altsctr.h> 40 #include <sys/dktp/cmdk.h> 41 42 #include <sys/stat.h> 43 #include <sys/vtoc.h> 44 #include <sys/file.h> 45 #include <sys/dktp/dadkio.h> 46 #include <sys/aio_req.h> 47 48 #include <sys/cmlb.h> 49 50 /* 51 * Local Static Data 52 */ 53 #ifdef CMDK_DEBUG 54 #define DENT 0x0001 55 #define DIO 0x0002 56 57 static int cmdk_debug = DIO; 58 #endif 59 60 #ifndef TRUE 61 #define TRUE 1 62 #endif 63 64 #ifndef FALSE 65 #define FALSE 0 66 #endif 67 68 /* 69 * NDKMAP is the base number for accessing the fdisk partitions. 70 * c?d?p0 --> cmdk@?,?:q 71 */ 72 #define PARTITION0_INDEX (NDKMAP + 0) 73 74 #define DKTP_DATA (dkp->dk_tgobjp)->tg_data 75 #define DKTP_EXT (dkp->dk_tgobjp)->tg_ext 76 77 static void *cmdk_state; 78 79 /* 80 * the cmdk_attach_mutex protects cmdk_max_instance in multi-threaded 81 * attach situations 82 */ 83 static kmutex_t cmdk_attach_mutex; 84 static int cmdk_max_instance = 0; 85 86 /* 87 * Panic dumpsys state 88 * There is only a single flag that is not mutex locked since 89 * the system is prevented from thread switching and cmdk_dump 90 * will only be called in a single threaded operation. 91 */ 92 static int cmdk_indump; 93 94 /* 95 * Local Function Prototypes 96 */ 97 static int cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp); 98 static void cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp); 99 static void cmdkmin(struct buf *bp); 100 static int cmdkrw(dev_t dev, struct uio *uio, int flag); 101 static int cmdkarw(dev_t dev, struct aio_req *aio, int flag); 102 103 /* 104 * Bad Block Handling Functions Prototypes 105 */ 106 static void cmdk_bbh_reopen(struct cmdk *dkp); 107 static opaque_t cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp); 108 static bbh_cookie_t cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle); 109 static void cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle); 110 static void cmdk_bbh_close(struct cmdk *dkp); 111 static void cmdk_bbh_setalts_idx(struct cmdk *dkp); 112 static int cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key); 113 114 static struct bbh_objops cmdk_bbh_ops = { 115 nulldev, 116 nulldev, 117 cmdk_bbh_gethandle, 118 cmdk_bbh_htoc, 119 cmdk_bbh_freehandle, 120 0, 0 121 }; 122 123 static int cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp); 124 static int cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp); 125 static int cmdkstrategy(struct buf *bp); 126 static int cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 127 static int cmdkioctl(dev_t, int, intptr_t, int, cred_t *, int *); 128 static int cmdkread(dev_t dev, struct uio *uio, cred_t *credp); 129 static int cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp); 130 static int cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 131 int mod_flags, char *name, caddr_t valuep, int *lengthp); 132 static int cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp); 133 static int cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp); 134 135 /* 136 * Device driver ops vector 137 */ 138 139 static struct cb_ops cmdk_cb_ops = { 140 cmdkopen, /* open */ 141 cmdkclose, /* close */ 142 cmdkstrategy, /* strategy */ 143 nodev, /* print */ 144 cmdkdump, /* dump */ 145 cmdkread, /* read */ 146 cmdkwrite, /* write */ 147 cmdkioctl, /* ioctl */ 148 nodev, /* devmap */ 149 nodev, /* mmap */ 150 nodev, /* segmap */ 151 nochpoll, /* poll */ 152 cmdk_prop_op, /* cb_prop_op */ 153 0, /* streamtab */ 154 D_64BIT | D_MP | D_NEW, /* Driver comaptibility flag */ 155 CB_REV, /* cb_rev */ 156 cmdkaread, /* async read */ 157 cmdkawrite /* async write */ 158 }; 159 160 static int cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 161 void **result); 162 static int cmdkprobe(dev_info_t *dip); 163 static int cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd); 164 static int cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd); 165 166 static void cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp); 167 static int cmdkresume(dev_info_t *dip); 168 static int cmdksuspend(dev_info_t *dip); 169 static int cmdkpower(dev_info_t *dip, int component, int level); 170 171 struct dev_ops cmdk_ops = { 172 DEVO_REV, /* devo_rev, */ 173 0, /* refcnt */ 174 cmdkinfo, /* info */ 175 nulldev, /* identify */ 176 cmdkprobe, /* probe */ 177 cmdkattach, /* attach */ 178 cmdkdetach, /* detach */ 179 nodev, /* reset */ 180 &cmdk_cb_ops, /* driver operations */ 181 (struct bus_ops *)0, /* bus operations */ 182 cmdkpower /* power */ 183 }; 184 185 /* 186 * This is the loadable module wrapper. 187 */ 188 #include <sys/modctl.h> 189 190 extern struct mod_ops mod_driverops; 191 192 static struct modldrv modldrv = { 193 &mod_driverops, /* Type of module. This one is a driver */ 194 "Common Direct Access Disk %I%", 195 &cmdk_ops, /* driver ops */ 196 }; 197 198 static struct modlinkage modlinkage = { 199 MODREV_1, (void *)&modldrv, NULL 200 }; 201 202 /* Function prototypes for cmlb callbacks */ 203 204 static int cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr, 205 diskaddr_t start, size_t length, void *tg_cookie); 206 207 static int cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg, 208 void *tg_cookie); 209 210 static void cmdk_devid_setup(struct cmdk *dkp); 211 static int cmdk_devid_modser(struct cmdk *dkp); 212 static int cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len); 213 static int cmdk_devid_fabricate(struct cmdk *dkp); 214 static int cmdk_devid_read(struct cmdk *dkp); 215 216 static cmlb_tg_ops_t cmdk_lb_ops = { 217 TG_DK_OPS_VERSION_1, 218 cmdk_lb_rdwr, 219 cmdk_lb_getinfo 220 }; 221 222 static boolean_t 223 cmdk_isopen(struct cmdk *dkp, dev_t dev) 224 { 225 int part, otyp; 226 ulong_t partbit; 227 228 ASSERT(MUTEX_HELD((&dkp->dk_mutex))); 229 230 part = CMDKPART(dev); 231 partbit = 1 << part; 232 233 /* account for close */ 234 if (dkp->dk_open_lyr[part] != 0) 235 return (B_TRUE); 236 for (otyp = 0; otyp < OTYPCNT; otyp++) 237 if (dkp->dk_open_reg[otyp] & partbit) 238 return (B_TRUE); 239 return (B_FALSE); 240 } 241 242 int 243 _init(void) 244 { 245 int rval; 246 247 if (rval = ddi_soft_state_init(&cmdk_state, sizeof (struct cmdk), 7)) 248 return (rval); 249 250 mutex_init(&cmdk_attach_mutex, NULL, MUTEX_DRIVER, NULL); 251 if ((rval = mod_install(&modlinkage)) != 0) { 252 mutex_destroy(&cmdk_attach_mutex); 253 ddi_soft_state_fini(&cmdk_state); 254 } 255 return (rval); 256 } 257 258 int 259 _fini(void) 260 { 261 return (EBUSY); 262 263 /* 264 * This has been commented out until cmdk is a true 265 * unloadable module. Right now x86's are panicking on 266 * a diskless reconfig boot. 267 */ 268 269 #if 0 /* bugid 1186679 */ 270 int rval; 271 272 rval = mod_remove(&modlinkage); 273 if (rval != 0) 274 return (rval); 275 276 mutex_destroy(&cmdk_attach_mutex); 277 ddi_soft_state_fini(&cmdk_state); 278 279 return (0); 280 #endif 281 } 282 283 int 284 _info(struct modinfo *modinfop) 285 { 286 return (mod_info(&modlinkage, modinfop)); 287 } 288 289 /* 290 * Autoconfiguration Routines 291 */ 292 static int 293 cmdkprobe(dev_info_t *dip) 294 { 295 int instance; 296 int status; 297 struct cmdk *dkp; 298 299 instance = ddi_get_instance(dip); 300 301 if (ddi_get_soft_state(cmdk_state, instance)) 302 return (DDI_PROBE_PARTIAL); 303 304 if ((ddi_soft_state_zalloc(cmdk_state, instance) != DDI_SUCCESS) || 305 ((dkp = ddi_get_soft_state(cmdk_state, instance)) == NULL)) 306 return (DDI_PROBE_PARTIAL); 307 308 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL); 309 rw_init(&dkp->dk_bbh_mutex, NULL, RW_DRIVER, NULL); 310 dkp->dk_dip = dip; 311 mutex_enter(&dkp->dk_mutex); 312 313 dkp->dk_dev = makedevice(ddi_driver_major(dip), 314 ddi_get_instance(dip) << CMDK_UNITSHF); 315 316 /* linkage to dadk and strategy */ 317 if (cmdk_create_obj(dip, dkp) != DDI_SUCCESS) { 318 mutex_exit(&dkp->dk_mutex); 319 mutex_destroy(&dkp->dk_mutex); 320 rw_destroy(&dkp->dk_bbh_mutex); 321 ddi_soft_state_free(cmdk_state, instance); 322 return (DDI_PROBE_PARTIAL); 323 } 324 325 status = dadk_probe(DKTP_DATA, KM_NOSLEEP); 326 if (status != DDI_PROBE_SUCCESS) { 327 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */ 328 mutex_exit(&dkp->dk_mutex); 329 mutex_destroy(&dkp->dk_mutex); 330 rw_destroy(&dkp->dk_bbh_mutex); 331 ddi_soft_state_free(cmdk_state, instance); 332 return (status); 333 } 334 335 mutex_exit(&dkp->dk_mutex); 336 #ifdef CMDK_DEBUG 337 if (cmdk_debug & DENT) 338 PRF("cmdkprobe: instance= %d name= `%s`\n", 339 instance, ddi_get_name_addr(dip)); 340 #endif 341 return (status); 342 } 343 344 static int 345 cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd) 346 { 347 int instance; 348 struct cmdk *dkp; 349 char *node_type; 350 351 switch (cmd) { 352 case DDI_ATTACH: 353 break; 354 case DDI_RESUME: 355 return (cmdkresume(dip)); 356 default: 357 return (DDI_FAILURE); 358 } 359 360 instance = ddi_get_instance(dip); 361 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 362 return (DDI_FAILURE); 363 364 dkp->dk_pm_level = CMDK_SPINDLE_UNINIT; 365 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL); 366 367 mutex_enter(&dkp->dk_mutex); 368 369 /* dadk_attach is an empty function that only returns SUCCESS */ 370 (void) dadk_attach(DKTP_DATA); 371 372 node_type = (DKTP_EXT->tg_nodetype); 373 374 /* 375 * this open allows cmlb to read the device 376 * and determine the label types 377 * so that cmlb can create minor nodes for device 378 */ 379 380 /* open the target disk */ 381 if (dadk_open(DKTP_DATA, 0) != DDI_SUCCESS) 382 goto fail2; 383 384 /* mark as having opened target */ 385 dkp->dk_flag |= CMDK_TGDK_OPEN; 386 387 cmlb_alloc_handle((cmlb_handle_t *)&dkp->dk_cmlbhandle); 388 389 if (cmlb_attach(dip, 390 &cmdk_lb_ops, 391 DTYPE_DIRECT, /* device_type */ 392 0, /* removable */ 393 0, /* hot pluggable XXX */ 394 node_type, 395 CMLB_CREATE_ALTSLICE_VTOC_16_DTYPE_DIRECT, /* alter_behaviour */ 396 dkp->dk_cmlbhandle, 397 0) != 0) 398 goto fail1; 399 400 /* Calling validate will create minor nodes according to disk label */ 401 (void) cmlb_validate(dkp->dk_cmlbhandle, 0, 0); 402 403 /* set bbh (Bad Block Handling) */ 404 cmdk_bbh_reopen(dkp); 405 406 /* setup devid string */ 407 cmdk_devid_setup(dkp); 408 409 mutex_enter(&cmdk_attach_mutex); 410 if (instance > cmdk_max_instance) 411 cmdk_max_instance = instance; 412 mutex_exit(&cmdk_attach_mutex); 413 414 mutex_exit(&dkp->dk_mutex); 415 416 /* 417 * Add a zero-length attribute to tell the world we support 418 * kernel ioctls (for layered drivers) 419 */ 420 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 421 DDI_KERNEL_IOCTL, NULL, 0); 422 ddi_report_dev(dip); 423 424 /* 425 * Initialize power management 426 */ 427 mutex_init(&dkp->dk_pm_mutex, NULL, MUTEX_DRIVER, NULL); 428 cv_init(&dkp->dk_suspend_cv, NULL, CV_DRIVER, NULL); 429 cmdk_setup_pm(dip, dkp); 430 431 return (DDI_SUCCESS); 432 433 fail1: 434 cmlb_free_handle(&dkp->dk_cmlbhandle); 435 (void) dadk_close(DKTP_DATA); 436 fail2: 437 cmdk_destroy_obj(dip, dkp); 438 rw_destroy(&dkp->dk_bbh_mutex); 439 mutex_exit(&dkp->dk_mutex); 440 mutex_destroy(&dkp->dk_mutex); 441 ddi_soft_state_free(cmdk_state, instance); 442 return (DDI_FAILURE); 443 } 444 445 446 static int 447 cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd) 448 { 449 struct cmdk *dkp; 450 int instance; 451 int max_instance; 452 453 switch (cmd) { 454 case DDI_DETACH: 455 /* return (DDI_FAILURE); */ 456 break; 457 case DDI_SUSPEND: 458 return (cmdksuspend(dip)); 459 default: 460 #ifdef CMDK_DEBUG 461 if (cmdk_debug & DIO) { 462 PRF("cmdkdetach: cmd = %d unknown\n", cmd); 463 } 464 #endif 465 return (DDI_FAILURE); 466 } 467 468 mutex_enter(&cmdk_attach_mutex); 469 max_instance = cmdk_max_instance; 470 mutex_exit(&cmdk_attach_mutex); 471 472 /* check if any instance of driver is open */ 473 for (instance = 0; instance < max_instance; instance++) { 474 dkp = ddi_get_soft_state(cmdk_state, instance); 475 if (!dkp) 476 continue; 477 if (dkp->dk_flag & CMDK_OPEN) 478 return (DDI_FAILURE); 479 } 480 481 instance = ddi_get_instance(dip); 482 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 483 return (DDI_SUCCESS); 484 485 mutex_enter(&dkp->dk_mutex); 486 487 /* 488 * The cmdk_part_info call at the end of cmdkattach may have 489 * caused cmdk_reopen to do a TGDK_OPEN, make sure we close on 490 * detach for case when cmdkopen/cmdkclose never occurs. 491 */ 492 if (dkp->dk_flag & CMDK_TGDK_OPEN) { 493 dkp->dk_flag &= ~CMDK_TGDK_OPEN; 494 (void) dadk_close(DKTP_DATA); 495 } 496 497 cmlb_detach(dkp->dk_cmlbhandle, 0); 498 cmlb_free_handle(&dkp->dk_cmlbhandle); 499 ddi_prop_remove_all(dip); 500 501 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */ 502 mutex_exit(&dkp->dk_mutex); 503 mutex_destroy(&dkp->dk_mutex); 504 rw_destroy(&dkp->dk_bbh_mutex); 505 mutex_destroy(&dkp->dk_pm_mutex); 506 cv_destroy(&dkp->dk_suspend_cv); 507 ddi_soft_state_free(cmdk_state, instance); 508 509 return (DDI_SUCCESS); 510 } 511 512 static int 513 cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 514 { 515 dev_t dev = (dev_t)arg; 516 int instance; 517 struct cmdk *dkp; 518 519 #ifdef lint 520 dip = dip; /* no one ever uses this */ 521 #endif 522 #ifdef CMDK_DEBUG 523 if (cmdk_debug & DENT) 524 PRF("cmdkinfo: call\n"); 525 #endif 526 instance = CMDKUNIT(dev); 527 528 switch (infocmd) { 529 case DDI_INFO_DEVT2DEVINFO: 530 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 531 return (DDI_FAILURE); 532 *result = (void *) dkp->dk_dip; 533 break; 534 case DDI_INFO_DEVT2INSTANCE: 535 *result = (void *)(intptr_t)instance; 536 break; 537 default: 538 return (DDI_FAILURE); 539 } 540 return (DDI_SUCCESS); 541 } 542 543 /* 544 * Initialize the power management components 545 */ 546 static void 547 cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp) 548 { 549 char *pm_comp[] = { "NAME=cmdk", "0=off", "1=on", NULL }; 550 551 /* 552 * Since the cmdk device does not the 'reg' property, 553 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 554 * The following code is to tell cpr that this device 555 * DOES need to be suspended and resumed. 556 */ 557 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, 558 "pm-hardware-state", "needs-suspend-resume"); 559 560 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 561 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 562 if (pm_raise_power(dip, 0, CMDK_SPINDLE_ON) == DDI_SUCCESS) { 563 mutex_enter(&dkp->dk_pm_mutex); 564 dkp->dk_pm_level = CMDK_SPINDLE_ON; 565 dkp->dk_pm_is_enabled = 1; 566 mutex_exit(&dkp->dk_pm_mutex); 567 } else { 568 mutex_enter(&dkp->dk_pm_mutex); 569 dkp->dk_pm_level = CMDK_SPINDLE_OFF; 570 dkp->dk_pm_is_enabled = 0; 571 mutex_exit(&dkp->dk_pm_mutex); 572 } 573 } else { 574 mutex_enter(&dkp->dk_pm_mutex); 575 dkp->dk_pm_level = CMDK_SPINDLE_UNINIT; 576 dkp->dk_pm_is_enabled = 0; 577 mutex_exit(&dkp->dk_pm_mutex); 578 } 579 } 580 581 /* 582 * suspend routine, it will be run when get the command 583 * DDI_SUSPEND at detach(9E) from system power management 584 */ 585 static int 586 cmdksuspend(dev_info_t *dip) 587 { 588 struct cmdk *dkp; 589 int instance; 590 clock_t count = 0; 591 592 instance = ddi_get_instance(dip); 593 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 594 return (DDI_FAILURE); 595 mutex_enter(&dkp->dk_mutex); 596 if (dkp->dk_flag & CMDK_SUSPEND) { 597 mutex_exit(&dkp->dk_mutex); 598 return (DDI_SUCCESS); 599 } 600 dkp->dk_flag |= CMDK_SUSPEND; 601 602 /* need to wait a while */ 603 while (dadk_getcmds(DKTP_DATA) != 0) { 604 delay(drv_usectohz(1000000)); 605 if (count > 60) { 606 dkp->dk_flag &= ~CMDK_SUSPEND; 607 cv_broadcast(&dkp->dk_suspend_cv); 608 mutex_exit(&dkp->dk_mutex); 609 return (DDI_FAILURE); 610 } 611 count++; 612 } 613 mutex_exit(&dkp->dk_mutex); 614 return (DDI_SUCCESS); 615 } 616 617 /* 618 * resume routine, it will be run when get the command 619 * DDI_RESUME at attach(9E) from system power management 620 */ 621 static int 622 cmdkresume(dev_info_t *dip) 623 { 624 struct cmdk *dkp; 625 int instance; 626 627 instance = ddi_get_instance(dip); 628 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 629 return (DDI_FAILURE); 630 mutex_enter(&dkp->dk_mutex); 631 if (!(dkp->dk_flag & CMDK_SUSPEND)) { 632 mutex_exit(&dkp->dk_mutex); 633 return (DDI_FAILURE); 634 } 635 dkp->dk_pm_level = CMDK_SPINDLE_ON; 636 dkp->dk_flag &= ~CMDK_SUSPEND; 637 cv_broadcast(&dkp->dk_suspend_cv); 638 mutex_exit(&dkp->dk_mutex); 639 return (DDI_SUCCESS); 640 641 } 642 643 /* 644 * power management entry point, it was used to 645 * change power management component. 646 * Actually, the real hard drive suspend/resume 647 * was handled in ata, so this function is not 648 * doing any real work other than verifying that 649 * the disk is idle. 650 */ 651 static int 652 cmdkpower(dev_info_t *dip, int component, int level) 653 { 654 struct cmdk *dkp; 655 int instance; 656 657 instance = ddi_get_instance(dip); 658 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || 659 component != 0 || level > CMDK_SPINDLE_ON || 660 level < CMDK_SPINDLE_OFF) { 661 return (DDI_FAILURE); 662 } 663 664 mutex_enter(&dkp->dk_pm_mutex); 665 if (dkp->dk_pm_is_enabled && dkp->dk_pm_level == level) { 666 mutex_exit(&dkp->dk_pm_mutex); 667 return (DDI_SUCCESS); 668 } 669 mutex_exit(&dkp->dk_pm_mutex); 670 671 if ((level == CMDK_SPINDLE_OFF) && 672 (dadk_getcmds(DKTP_DATA) != 0)) { 673 return (DDI_FAILURE); 674 } 675 676 mutex_enter(&dkp->dk_pm_mutex); 677 dkp->dk_pm_level = level; 678 mutex_exit(&dkp->dk_pm_mutex); 679 return (DDI_SUCCESS); 680 } 681 682 static int 683 cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 684 char *name, caddr_t valuep, int *lengthp) 685 { 686 struct cmdk *dkp; 687 688 #ifdef CMDK_DEBUG 689 if (cmdk_debug & DENT) 690 PRF("cmdk_prop_op: call\n"); 691 #endif 692 693 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 694 if (dkp == NULL) 695 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 696 name, valuep, lengthp)); 697 698 return (cmlb_prop_op(dkp->dk_cmlbhandle, 699 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 700 CMDKPART(dev), NULL)); 701 } 702 703 /* 704 * dump routine 705 */ 706 static int 707 cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 708 { 709 int instance; 710 struct cmdk *dkp; 711 diskaddr_t p_lblksrt; 712 diskaddr_t p_lblkcnt; 713 struct buf local; 714 struct buf *bp; 715 716 #ifdef CMDK_DEBUG 717 if (cmdk_debug & DENT) 718 PRF("cmdkdump: call\n"); 719 #endif 720 instance = CMDKUNIT(dev); 721 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || (blkno < 0)) 722 return (ENXIO); 723 724 if (cmlb_partinfo( 725 dkp->dk_cmlbhandle, 726 CMDKPART(dev), 727 &p_lblkcnt, 728 &p_lblksrt, 729 NULL, 730 NULL, 731 0)) { 732 return (ENXIO); 733 } 734 735 if ((blkno+nblk) > p_lblkcnt) 736 return (EINVAL); 737 738 cmdk_indump = 1; /* Tell disk targets we are panic dumpping */ 739 740 bp = &local; 741 bzero(bp, sizeof (*bp)); 742 bp->b_flags = B_BUSY; 743 bp->b_un.b_addr = addr; 744 bp->b_bcount = nblk << SCTRSHFT; 745 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + blkno))); 746 747 (void) dadk_dump(DKTP_DATA, bp); 748 return (bp->b_error); 749 } 750 751 /* 752 * Copy in the dadkio_rwcmd according to the user's data model. If needed, 753 * convert it for our internal use. 754 */ 755 static int 756 rwcmd_copyin(struct dadkio_rwcmd *rwcmdp, caddr_t inaddr, int flag) 757 { 758 switch (ddi_model_convert_from(flag)) { 759 case DDI_MODEL_ILP32: { 760 struct dadkio_rwcmd32 cmd32; 761 762 if (ddi_copyin(inaddr, &cmd32, 763 sizeof (struct dadkio_rwcmd32), flag)) { 764 return (EFAULT); 765 } 766 767 rwcmdp->cmd = cmd32.cmd; 768 rwcmdp->flags = cmd32.flags; 769 rwcmdp->blkaddr = (daddr_t)cmd32.blkaddr; 770 rwcmdp->buflen = cmd32.buflen; 771 rwcmdp->bufaddr = (caddr_t)(intptr_t)cmd32.bufaddr; 772 /* 773 * Note: we do not convert the 'status' field, 774 * as it should not contain valid data at this 775 * point. 776 */ 777 bzero(&rwcmdp->status, sizeof (rwcmdp->status)); 778 break; 779 } 780 case DDI_MODEL_NONE: { 781 if (ddi_copyin(inaddr, rwcmdp, 782 sizeof (struct dadkio_rwcmd), flag)) { 783 return (EFAULT); 784 } 785 } 786 } 787 return (0); 788 } 789 790 /* 791 * If necessary, convert the internal rwcmdp and status to the appropriate 792 * data model and copy it out to the user. 793 */ 794 static int 795 rwcmd_copyout(struct dadkio_rwcmd *rwcmdp, caddr_t outaddr, int flag) 796 { 797 switch (ddi_model_convert_from(flag)) { 798 case DDI_MODEL_ILP32: { 799 struct dadkio_rwcmd32 cmd32; 800 801 cmd32.cmd = rwcmdp->cmd; 802 cmd32.flags = rwcmdp->flags; 803 cmd32.blkaddr = rwcmdp->blkaddr; 804 cmd32.buflen = rwcmdp->buflen; 805 ASSERT64(((uintptr_t)rwcmdp->bufaddr >> 32) == 0); 806 cmd32.bufaddr = (caddr32_t)(uintptr_t)rwcmdp->bufaddr; 807 808 cmd32.status.status = rwcmdp->status.status; 809 cmd32.status.resid = rwcmdp->status.resid; 810 cmd32.status.failed_blk_is_valid = 811 rwcmdp->status.failed_blk_is_valid; 812 cmd32.status.failed_blk = rwcmdp->status.failed_blk; 813 cmd32.status.fru_code_is_valid = 814 rwcmdp->status.fru_code_is_valid; 815 cmd32.status.fru_code = rwcmdp->status.fru_code; 816 817 bcopy(rwcmdp->status.add_error_info, 818 cmd32.status.add_error_info, DADKIO_ERROR_INFO_LEN); 819 820 if (ddi_copyout(&cmd32, outaddr, 821 sizeof (struct dadkio_rwcmd32), flag)) 822 return (EFAULT); 823 break; 824 } 825 case DDI_MODEL_NONE: { 826 if (ddi_copyout(rwcmdp, outaddr, 827 sizeof (struct dadkio_rwcmd), flag)) 828 return (EFAULT); 829 } 830 } 831 return (0); 832 } 833 834 /* 835 * ioctl routine 836 */ 837 static int 838 cmdkioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *credp, int *rvalp) 839 { 840 int instance; 841 struct scsi_device *devp; 842 struct cmdk *dkp; 843 char data[NBPSCTR]; 844 845 instance = CMDKUNIT(dev); 846 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 847 return (ENXIO); 848 849 mutex_enter(&dkp->dk_mutex); 850 while (dkp->dk_flag & CMDK_SUSPEND) { 851 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 852 } 853 mutex_exit(&dkp->dk_mutex); 854 855 bzero(data, sizeof (data)); 856 857 switch (cmd) { 858 859 case DKIOCGMEDIAINFO: { 860 struct dk_minfo media_info; 861 struct tgdk_geom phyg; 862 863 /* dadk_getphygeom always returns success */ 864 (void) dadk_getphygeom(DKTP_DATA, &phyg); 865 866 media_info.dki_lbsize = phyg.g_secsiz; 867 media_info.dki_capacity = phyg.g_cap; 868 media_info.dki_media_type = DK_FIXED_DISK; 869 870 if (ddi_copyout(&media_info, (void *)arg, 871 sizeof (struct dk_minfo), flag)) { 872 return (EFAULT); 873 } else { 874 return (0); 875 } 876 } 877 878 case DKIOCINFO: { 879 struct dk_cinfo *info = (struct dk_cinfo *)data; 880 881 /* controller information */ 882 info->dki_ctype = (DKTP_EXT->tg_ctype); 883 info->dki_cnum = ddi_get_instance(ddi_get_parent(dkp->dk_dip)); 884 (void) strcpy(info->dki_cname, 885 ddi_get_name(ddi_get_parent(dkp->dk_dip))); 886 887 /* Unit Information */ 888 info->dki_unit = ddi_get_instance(dkp->dk_dip); 889 devp = ddi_get_driver_private(dkp->dk_dip); 890 info->dki_slave = (CMDEV_TARG(devp)<<3) | CMDEV_LUN(devp); 891 (void) strcpy(info->dki_dname, ddi_driver_name(dkp->dk_dip)); 892 info->dki_flags = DKI_FMTVOL; 893 info->dki_partition = CMDKPART(dev); 894 895 info->dki_maxtransfer = maxphys / DEV_BSIZE; 896 info->dki_addr = 1; 897 info->dki_space = 0; 898 info->dki_prio = 0; 899 info->dki_vec = 0; 900 901 if (ddi_copyout(data, (void *)arg, sizeof (*info), flag)) 902 return (EFAULT); 903 else 904 return (0); 905 } 906 907 case DKIOCSTATE: { 908 int state; 909 int rval; 910 diskaddr_t p_lblksrt; 911 diskaddr_t p_lblkcnt; 912 913 if (ddi_copyin((void *)arg, &state, sizeof (int), flag)) 914 return (EFAULT); 915 916 /* dadk_check_media blocks until state changes */ 917 if (rval = dadk_check_media(DKTP_DATA, &state)) 918 return (rval); 919 920 if (state == DKIO_INSERTED) { 921 922 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0) 923 return (ENXIO); 924 925 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(dev), 926 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0)) 927 return (ENXIO); 928 929 if (p_lblkcnt <= 0) 930 return (ENXIO); 931 } 932 933 if (ddi_copyout(&state, (caddr_t)arg, sizeof (int), flag)) 934 return (EFAULT); 935 936 return (0); 937 } 938 939 /* 940 * is media removable? 941 */ 942 case DKIOCREMOVABLE: { 943 int i; 944 945 i = (DKTP_EXT->tg_rmb) ? 1 : 0; 946 947 if (ddi_copyout(&i, (caddr_t)arg, sizeof (int), flag)) 948 return (EFAULT); 949 950 return (0); 951 } 952 953 case DKIOCADDBAD: 954 /* 955 * This is not an update mechanism to add bad blocks 956 * to the bad block structures stored on disk. 957 * 958 * addbadsec(1M) will update the bad block data on disk 959 * and use this ioctl to force the driver to re-initialize 960 * the list of bad blocks in the driver. 961 */ 962 963 /* start BBH */ 964 cmdk_bbh_reopen(dkp); 965 return (0); 966 967 case DKIOCG_PHYGEOM: 968 case DKIOCG_VIRTGEOM: 969 case DKIOCGGEOM: 970 case DKIOCSGEOM: 971 case DKIOCGAPART: 972 case DKIOCSAPART: 973 case DKIOCGVTOC: 974 case DKIOCSVTOC: 975 case DKIOCPARTINFO: 976 case DKIOCGMBOOT: 977 case DKIOCSMBOOT: 978 case DKIOCGETEFI: 979 case DKIOCSETEFI: 980 case DKIOCPARTITION: 981 { 982 int rc; 983 984 rc = cmlb_ioctl(dkp->dk_cmlbhandle, dev, cmd, arg, flag, 985 credp, rvalp, 0); 986 if (cmd == DKIOCSVTOC) 987 cmdk_devid_setup(dkp); 988 return (rc); 989 } 990 991 case DIOCTL_RWCMD: { 992 struct dadkio_rwcmd *rwcmdp; 993 int status; 994 995 rwcmdp = kmem_alloc(sizeof (struct dadkio_rwcmd), KM_SLEEP); 996 997 status = rwcmd_copyin(rwcmdp, (caddr_t)arg, flag); 998 999 if (status == 0) { 1000 bzero(&(rwcmdp->status), sizeof (struct dadkio_status)); 1001 status = dadk_ioctl(DKTP_DATA, 1002 dev, 1003 cmd, 1004 (uintptr_t)rwcmdp, 1005 flag, 1006 credp, 1007 rvalp); 1008 } 1009 if (status == 0) 1010 status = rwcmd_copyout(rwcmdp, (caddr_t)arg, flag); 1011 1012 kmem_free(rwcmdp, sizeof (struct dadkio_rwcmd)); 1013 return (status); 1014 } 1015 1016 default: 1017 return (dadk_ioctl(DKTP_DATA, 1018 dev, 1019 cmd, 1020 arg, 1021 flag, 1022 credp, 1023 rvalp)); 1024 } 1025 } 1026 1027 /*ARGSUSED1*/ 1028 static int 1029 cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp) 1030 { 1031 int part; 1032 ulong_t partbit; 1033 int instance; 1034 struct cmdk *dkp; 1035 int lastclose = 1; 1036 int i; 1037 1038 instance = CMDKUNIT(dev); 1039 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || 1040 (otyp >= OTYPCNT)) 1041 return (ENXIO); 1042 1043 mutex_enter(&dkp->dk_mutex); 1044 1045 /* check if device has been opened */ 1046 ASSERT(cmdk_isopen(dkp, dev)); 1047 if (!(dkp->dk_flag & CMDK_OPEN)) { 1048 mutex_exit(&dkp->dk_mutex); 1049 return (ENXIO); 1050 } 1051 1052 while (dkp->dk_flag & CMDK_SUSPEND) { 1053 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1054 } 1055 1056 part = CMDKPART(dev); 1057 partbit = 1 << part; 1058 1059 /* account for close */ 1060 if (otyp == OTYP_LYR) { 1061 ASSERT(dkp->dk_open_lyr[part] > 0); 1062 if (dkp->dk_open_lyr[part]) 1063 dkp->dk_open_lyr[part]--; 1064 } else { 1065 ASSERT((dkp->dk_open_reg[otyp] & partbit) != 0); 1066 dkp->dk_open_reg[otyp] &= ~partbit; 1067 } 1068 dkp->dk_open_exl &= ~partbit; 1069 1070 for (i = 0; i < CMDK_MAXPART; i++) 1071 if (dkp->dk_open_lyr[i] != 0) { 1072 lastclose = 0; 1073 break; 1074 } 1075 1076 if (lastclose) 1077 for (i = 0; i < OTYPCNT; i++) 1078 if (dkp->dk_open_reg[i] != 0) { 1079 lastclose = 0; 1080 break; 1081 } 1082 1083 mutex_exit(&dkp->dk_mutex); 1084 1085 if (lastclose) 1086 cmlb_invalidate(dkp->dk_cmlbhandle, 0); 1087 1088 return (DDI_SUCCESS); 1089 } 1090 1091 /*ARGSUSED3*/ 1092 static int 1093 cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp) 1094 { 1095 dev_t dev = *dev_p; 1096 int part; 1097 ulong_t partbit; 1098 int instance; 1099 struct cmdk *dkp; 1100 diskaddr_t p_lblksrt; 1101 diskaddr_t p_lblkcnt; 1102 int i; 1103 int nodelay; 1104 1105 instance = CMDKUNIT(dev); 1106 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1107 return (ENXIO); 1108 1109 if (otyp >= OTYPCNT) 1110 return (EINVAL); 1111 1112 mutex_enter(&dkp->dk_mutex); 1113 while (dkp->dk_flag & CMDK_SUSPEND) { 1114 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1115 } 1116 mutex_exit(&dkp->dk_mutex); 1117 1118 part = CMDKPART(dev); 1119 partbit = 1 << part; 1120 nodelay = (flag & (FNDELAY | FNONBLOCK)); 1121 1122 mutex_enter(&dkp->dk_mutex); 1123 1124 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0) { 1125 1126 /* fail if not doing non block open */ 1127 if (!nodelay) { 1128 mutex_exit(&dkp->dk_mutex); 1129 return (ENXIO); 1130 } 1131 } else if (cmlb_partinfo(dkp->dk_cmlbhandle, part, &p_lblkcnt, 1132 &p_lblksrt, NULL, NULL, 0) == 0) { 1133 1134 if (p_lblkcnt <= 0 && (!nodelay || otyp != OTYP_CHR)) { 1135 mutex_exit(&dkp->dk_mutex); 1136 return (ENXIO); 1137 } 1138 } else { 1139 /* fail if not doing non block open */ 1140 if (!nodelay) { 1141 mutex_exit(&dkp->dk_mutex); 1142 return (ENXIO); 1143 } 1144 } 1145 1146 if ((DKTP_EXT->tg_rdonly) && (flag & FWRITE)) { 1147 mutex_exit(&dkp->dk_mutex); 1148 return (EROFS); 1149 } 1150 1151 /* check for part already opend exclusively */ 1152 if (dkp->dk_open_exl & partbit) 1153 goto excl_open_fail; 1154 1155 /* check if we can establish exclusive open */ 1156 if (flag & FEXCL) { 1157 if (dkp->dk_open_lyr[part]) 1158 goto excl_open_fail; 1159 for (i = 0; i < OTYPCNT; i++) { 1160 if (dkp->dk_open_reg[i] & partbit) 1161 goto excl_open_fail; 1162 } 1163 } 1164 1165 /* open will succeed, account for open */ 1166 dkp->dk_flag |= CMDK_OPEN; 1167 if (otyp == OTYP_LYR) 1168 dkp->dk_open_lyr[part]++; 1169 else 1170 dkp->dk_open_reg[otyp] |= partbit; 1171 if (flag & FEXCL) 1172 dkp->dk_open_exl |= partbit; 1173 1174 mutex_exit(&dkp->dk_mutex); 1175 return (DDI_SUCCESS); 1176 1177 excl_open_fail: 1178 mutex_exit(&dkp->dk_mutex); 1179 return (EBUSY); 1180 } 1181 1182 /* 1183 * read routine 1184 */ 1185 /*ARGSUSED2*/ 1186 static int 1187 cmdkread(dev_t dev, struct uio *uio, cred_t *credp) 1188 { 1189 return (cmdkrw(dev, uio, B_READ)); 1190 } 1191 1192 /* 1193 * async read routine 1194 */ 1195 /*ARGSUSED2*/ 1196 static int 1197 cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp) 1198 { 1199 return (cmdkarw(dev, aio, B_READ)); 1200 } 1201 1202 /* 1203 * write routine 1204 */ 1205 /*ARGSUSED2*/ 1206 static int 1207 cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp) 1208 { 1209 return (cmdkrw(dev, uio, B_WRITE)); 1210 } 1211 1212 /* 1213 * async write routine 1214 */ 1215 /*ARGSUSED2*/ 1216 static int 1217 cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp) 1218 { 1219 return (cmdkarw(dev, aio, B_WRITE)); 1220 } 1221 1222 static void 1223 cmdkmin(struct buf *bp) 1224 { 1225 if (bp->b_bcount > DK_MAXRECSIZE) 1226 bp->b_bcount = DK_MAXRECSIZE; 1227 } 1228 1229 static int 1230 cmdkrw(dev_t dev, struct uio *uio, int flag) 1231 { 1232 int instance; 1233 struct cmdk *dkp; 1234 1235 instance = CMDKUNIT(dev); 1236 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1237 return (ENXIO); 1238 1239 mutex_enter(&dkp->dk_mutex); 1240 while (dkp->dk_flag & CMDK_SUSPEND) { 1241 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1242 } 1243 mutex_exit(&dkp->dk_mutex); 1244 1245 return (physio(cmdkstrategy, (struct buf *)0, dev, flag, cmdkmin, uio)); 1246 } 1247 1248 static int 1249 cmdkarw(dev_t dev, struct aio_req *aio, int flag) 1250 { 1251 int instance; 1252 struct cmdk *dkp; 1253 1254 instance = CMDKUNIT(dev); 1255 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1256 return (ENXIO); 1257 1258 mutex_enter(&dkp->dk_mutex); 1259 while (dkp->dk_flag & CMDK_SUSPEND) { 1260 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1261 } 1262 mutex_exit(&dkp->dk_mutex); 1263 1264 return (aphysio(cmdkstrategy, anocancel, dev, flag, cmdkmin, aio)); 1265 } 1266 1267 /* 1268 * strategy routine 1269 */ 1270 static int 1271 cmdkstrategy(struct buf *bp) 1272 { 1273 int instance; 1274 struct cmdk *dkp; 1275 long d_cnt; 1276 diskaddr_t p_lblksrt; 1277 diskaddr_t p_lblkcnt; 1278 1279 instance = CMDKUNIT(bp->b_edev); 1280 if (cmdk_indump || !(dkp = ddi_get_soft_state(cmdk_state, instance)) || 1281 (dkblock(bp) < 0)) { 1282 bp->b_resid = bp->b_bcount; 1283 SETBPERR(bp, ENXIO); 1284 biodone(bp); 1285 return (0); 1286 } 1287 1288 mutex_enter(&dkp->dk_mutex); 1289 ASSERT(cmdk_isopen(dkp, bp->b_edev)); 1290 while (dkp->dk_flag & CMDK_SUSPEND) { 1291 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1292 } 1293 mutex_exit(&dkp->dk_mutex); 1294 1295 bp->b_flags &= ~(B_DONE|B_ERROR); 1296 bp->b_resid = 0; 1297 bp->av_back = NULL; 1298 1299 /* 1300 * only re-read the vtoc if necessary (force == FALSE) 1301 */ 1302 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(bp->b_edev), 1303 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0)) { 1304 SETBPERR(bp, ENXIO); 1305 } 1306 1307 if ((bp->b_bcount & (NBPSCTR-1)) || (dkblock(bp) > p_lblkcnt)) 1308 SETBPERR(bp, ENXIO); 1309 1310 if ((bp->b_flags & B_ERROR) || (dkblock(bp) == p_lblkcnt)) { 1311 bp->b_resid = bp->b_bcount; 1312 biodone(bp); 1313 return (0); 1314 } 1315 1316 d_cnt = bp->b_bcount >> SCTRSHFT; 1317 if ((dkblock(bp) + d_cnt) > p_lblkcnt) { 1318 bp->b_resid = ((dkblock(bp) + d_cnt) - p_lblkcnt) << SCTRSHFT; 1319 bp->b_bcount -= bp->b_resid; 1320 } 1321 1322 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + dkblock(bp)))); 1323 if (dadk_strategy(DKTP_DATA, bp) != DDI_SUCCESS) { 1324 bp->b_resid += bp->b_bcount; 1325 biodone(bp); 1326 } 1327 return (0); 1328 } 1329 1330 static int 1331 cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp) 1332 { 1333 struct scsi_device *devp; 1334 opaque_t queobjp = NULL; 1335 opaque_t flcobjp = NULL; 1336 char que_keyvalp[64]; 1337 int que_keylen; 1338 char flc_keyvalp[64]; 1339 int flc_keylen; 1340 1341 ASSERT(mutex_owned(&dkp->dk_mutex)); 1342 1343 /* Create linkage to queueing routines based on property */ 1344 que_keylen = sizeof (que_keyvalp); 1345 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1346 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) != 1347 DDI_PROP_SUCCESS) { 1348 cmn_err(CE_WARN, "cmdk_create_obj: queue property undefined"); 1349 return (DDI_FAILURE); 1350 } 1351 que_keyvalp[que_keylen] = (char)0; 1352 1353 if (strcmp(que_keyvalp, "qfifo") == 0) { 1354 queobjp = (opaque_t)qfifo_create(); 1355 } else if (strcmp(que_keyvalp, "qsort") == 0) { 1356 queobjp = (opaque_t)qsort_create(); 1357 } else { 1358 return (DDI_FAILURE); 1359 } 1360 1361 /* Create linkage to dequeueing routines based on property */ 1362 flc_keylen = sizeof (flc_keyvalp); 1363 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1364 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) != 1365 DDI_PROP_SUCCESS) { 1366 cmn_err(CE_WARN, 1367 "cmdk_create_obj: flow-control property undefined"); 1368 return (DDI_FAILURE); 1369 } 1370 1371 flc_keyvalp[flc_keylen] = (char)0; 1372 1373 if (strcmp(flc_keyvalp, "dsngl") == 0) { 1374 flcobjp = (opaque_t)dsngl_create(); 1375 } else if (strcmp(flc_keyvalp, "dmult") == 0) { 1376 flcobjp = (opaque_t)dmult_create(); 1377 } else { 1378 return (DDI_FAILURE); 1379 } 1380 1381 /* populate bbh_obj object stored in dkp */ 1382 dkp->dk_bbh_obj.bbh_data = dkp; 1383 dkp->dk_bbh_obj.bbh_ops = &cmdk_bbh_ops; 1384 1385 /* create linkage to dadk */ 1386 dkp->dk_tgobjp = (opaque_t)dadk_create(); 1387 1388 devp = ddi_get_driver_private(dip); 1389 (void) dadk_init(DKTP_DATA, devp, flcobjp, queobjp, &dkp->dk_bbh_obj, 1390 NULL); 1391 1392 return (DDI_SUCCESS); 1393 } 1394 1395 static void 1396 cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp) 1397 { 1398 char que_keyvalp[64]; 1399 int que_keylen; 1400 char flc_keyvalp[64]; 1401 int flc_keylen; 1402 1403 ASSERT(mutex_owned(&dkp->dk_mutex)); 1404 1405 (void) dadk_free((dkp->dk_tgobjp)); 1406 dkp->dk_tgobjp = NULL; 1407 1408 que_keylen = sizeof (que_keyvalp); 1409 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1410 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) != 1411 DDI_PROP_SUCCESS) { 1412 cmn_err(CE_WARN, "cmdk_destroy_obj: queue property undefined"); 1413 return; 1414 } 1415 que_keyvalp[que_keylen] = (char)0; 1416 1417 flc_keylen = sizeof (flc_keyvalp); 1418 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1419 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) != 1420 DDI_PROP_SUCCESS) { 1421 cmn_err(CE_WARN, 1422 "cmdk_destroy_obj: flow-control property undefined"); 1423 return; 1424 } 1425 flc_keyvalp[flc_keylen] = (char)0; 1426 } 1427 /*ARGSUSED5*/ 1428 static int 1429 cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr, 1430 diskaddr_t start, size_t count, void *tg_cookie) 1431 { 1432 struct cmdk *dkp; 1433 opaque_t handle; 1434 int rc = 0; 1435 char *bufa; 1436 1437 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 1438 if (dkp == NULL) 1439 return (ENXIO); 1440 1441 if (cmd != TG_READ && cmd != TG_WRITE) 1442 return (EINVAL); 1443 1444 /* count must be multiple of 512 */ 1445 count = (count + NBPSCTR - 1) & -NBPSCTR; 1446 handle = dadk_iob_alloc(DKTP_DATA, start, count, KM_SLEEP); 1447 if (!handle) 1448 return (ENOMEM); 1449 1450 if (cmd == TG_READ) { 1451 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1452 if (!bufa) 1453 rc = EIO; 1454 else 1455 bcopy(bufa, bufaddr, count); 1456 } else { 1457 bufa = dadk_iob_htoc(DKTP_DATA, handle); 1458 bcopy(bufaddr, bufa, count); 1459 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_WRITE); 1460 if (!bufa) 1461 rc = EIO; 1462 } 1463 (void) dadk_iob_free(DKTP_DATA, handle); 1464 1465 return (rc); 1466 } 1467 1468 /*ARGSUSED3*/ 1469 static int 1470 cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg, void *tg_cookie) 1471 { 1472 1473 struct cmdk *dkp; 1474 struct tgdk_geom phyg; 1475 1476 1477 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 1478 if (dkp == NULL) 1479 return (ENXIO); 1480 1481 switch (cmd) { 1482 case TG_GETPHYGEOM: { 1483 cmlb_geom_t *phygeomp = (cmlb_geom_t *)arg; 1484 1485 /* dadk_getphygeom always returns success */ 1486 (void) dadk_getphygeom(DKTP_DATA, &phyg); 1487 1488 phygeomp->g_capacity = phyg.g_cap; 1489 phygeomp->g_nsect = phyg.g_sec; 1490 phygeomp->g_nhead = phyg.g_head; 1491 phygeomp->g_acyl = phyg.g_acyl; 1492 phygeomp->g_ncyl = phyg.g_cyl; 1493 phygeomp->g_secsize = phyg.g_secsiz; 1494 phygeomp->g_intrlv = 1; 1495 phygeomp->g_rpm = 3600; 1496 1497 return (0); 1498 } 1499 1500 case TG_GETVIRTGEOM: { 1501 cmlb_geom_t *virtgeomp = (cmlb_geom_t *)arg; 1502 diskaddr_t capacity; 1503 1504 (void) dadk_getgeom(DKTP_DATA, &phyg); 1505 capacity = phyg.g_cap; 1506 1507 /* 1508 * If the controller returned us something that doesn't 1509 * really fit into an Int 13/function 8 geometry 1510 * result, just fail the ioctl. See PSARC 1998/313. 1511 */ 1512 if (capacity < 0 || capacity >= 63 * 254 * 1024) 1513 return (EINVAL); 1514 1515 virtgeomp->g_capacity = capacity; 1516 virtgeomp->g_nsect = 63; 1517 virtgeomp->g_nhead = 254; 1518 virtgeomp->g_ncyl = capacity / (63 * 254); 1519 virtgeomp->g_acyl = 0; 1520 virtgeomp->g_secsize = 512; 1521 virtgeomp->g_intrlv = 1; 1522 virtgeomp->g_rpm = 3600; 1523 1524 return (0); 1525 } 1526 1527 case TG_GETCAPACITY: 1528 case TG_GETBLOCKSIZE: 1529 { 1530 1531 /* dadk_getphygeom always returns success */ 1532 (void) dadk_getphygeom(DKTP_DATA, &phyg); 1533 if (cmd == TG_GETCAPACITY) 1534 *(diskaddr_t *)arg = phyg.g_cap; 1535 else 1536 *(uint32_t *)arg = (uint32_t)phyg.g_secsiz; 1537 1538 return (0); 1539 } 1540 1541 case TG_GETATTR: { 1542 tg_attribute_t *tgattribute = (tg_attribute_t *)arg; 1543 if ((DKTP_EXT->tg_rdonly)) 1544 tgattribute->media_is_writable = FALSE; 1545 else 1546 tgattribute->media_is_writable = TRUE; 1547 1548 return (0); 1549 } 1550 1551 default: 1552 return (ENOTTY); 1553 } 1554 } 1555 1556 1557 1558 1559 1560 /* 1561 * Create and register the devid. 1562 * There are 4 different ways we can get a device id: 1563 * 1. Already have one - nothing to do 1564 * 2. Build one from the drive's model and serial numbers 1565 * 3. Read one from the disk (first sector of last track) 1566 * 4. Fabricate one and write it on the disk. 1567 * If any of these succeeds, register the deviceid 1568 */ 1569 static void 1570 cmdk_devid_setup(struct cmdk *dkp) 1571 { 1572 int rc; 1573 1574 /* Try options until one succeeds, or all have failed */ 1575 1576 /* 1. All done if already registered */ 1577 if (dkp->dk_devid != NULL) 1578 return; 1579 1580 /* 2. Build a devid from the model and serial number */ 1581 rc = cmdk_devid_modser(dkp); 1582 if (rc != DDI_SUCCESS) { 1583 /* 3. Read devid from the disk, if present */ 1584 rc = cmdk_devid_read(dkp); 1585 1586 /* 4. otherwise make one up and write it on the disk */ 1587 if (rc != DDI_SUCCESS) 1588 rc = cmdk_devid_fabricate(dkp); 1589 } 1590 1591 /* If we managed to get a devid any of the above ways, register it */ 1592 if (rc == DDI_SUCCESS) 1593 (void) ddi_devid_register(dkp->dk_dip, dkp->dk_devid); 1594 1595 } 1596 1597 /* 1598 * Build a devid from the model and serial number 1599 * Return DDI_SUCCESS or DDI_FAILURE. 1600 */ 1601 static int 1602 cmdk_devid_modser(struct cmdk *dkp) 1603 { 1604 int rc = DDI_FAILURE; 1605 char *hwid; 1606 int modlen; 1607 int serlen; 1608 1609 /* 1610 * device ID is a concatenation of model number, '=', serial number. 1611 */ 1612 hwid = kmem_alloc(CMDK_HWIDLEN, KM_SLEEP); 1613 modlen = cmdk_get_modser(dkp, DIOCTL_GETMODEL, hwid, CMDK_HWIDLEN); 1614 if (modlen == 0) { 1615 rc = DDI_FAILURE; 1616 goto err; 1617 } 1618 hwid[modlen++] = '='; 1619 serlen = cmdk_get_modser(dkp, DIOCTL_GETSERIAL, 1620 hwid + modlen, CMDK_HWIDLEN - modlen); 1621 if (serlen == 0) { 1622 rc = DDI_FAILURE; 1623 goto err; 1624 } 1625 hwid[modlen + serlen] = 0; 1626 1627 /* Initialize the device ID, trailing NULL not included */ 1628 rc = ddi_devid_init(dkp->dk_dip, DEVID_ATA_SERIAL, modlen + serlen, 1629 hwid, (ddi_devid_t *)&dkp->dk_devid); 1630 if (rc != DDI_SUCCESS) { 1631 rc = DDI_FAILURE; 1632 goto err; 1633 } 1634 1635 rc = DDI_SUCCESS; 1636 1637 err: 1638 kmem_free(hwid, CMDK_HWIDLEN); 1639 return (rc); 1640 } 1641 1642 static int 1643 cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len) 1644 { 1645 dadk_ioc_string_t strarg; 1646 int rval; 1647 char *s; 1648 char ch; 1649 boolean_t ret; 1650 int i; 1651 int tb; 1652 1653 strarg.is_buf = buf; 1654 strarg.is_size = len; 1655 if (dadk_ioctl(DKTP_DATA, 1656 dkp->dk_dev, 1657 ioccmd, 1658 (uintptr_t)&strarg, 1659 FNATIVE | FKIOCTL, 1660 NULL, 1661 &rval) != 0) 1662 return (0); 1663 1664 /* 1665 * valid model/serial string must contain a non-zero non-space 1666 * trim trailing spaces/NULL 1667 */ 1668 ret = B_FALSE; 1669 s = buf; 1670 for (i = 0; i < strarg.is_size; i++) { 1671 ch = *s++; 1672 if (ch != ' ' && ch != '\0') 1673 tb = i + 1; 1674 if (ch != ' ' && ch != '\0' && ch != '0') 1675 ret = B_TRUE; 1676 } 1677 1678 if (ret == B_FALSE) 1679 return (0); 1680 1681 return (tb); 1682 } 1683 1684 /* 1685 * Read a devid from on the first block of the last track of 1686 * the last cylinder. Make sure what we read is a valid devid. 1687 * Return DDI_SUCCESS or DDI_FAILURE. 1688 */ 1689 static int 1690 cmdk_devid_read(struct cmdk *dkp) 1691 { 1692 diskaddr_t blk; 1693 struct dk_devid *dkdevidp; 1694 uint_t *ip; 1695 int chksum; 1696 int i, sz; 1697 tgdk_iob_handle handle; 1698 int rc = DDI_FAILURE; 1699 1700 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0)) 1701 goto err; 1702 1703 /* read the devid */ 1704 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP); 1705 if (handle == NULL) 1706 goto err; 1707 1708 dkdevidp = (struct dk_devid *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1709 if (dkdevidp == NULL) 1710 goto err; 1711 1712 /* Validate the revision */ 1713 if ((dkdevidp->dkd_rev_hi != DK_DEVID_REV_MSB) || 1714 (dkdevidp->dkd_rev_lo != DK_DEVID_REV_LSB)) 1715 goto err; 1716 1717 /* Calculate the checksum */ 1718 chksum = 0; 1719 ip = (uint_t *)dkdevidp; 1720 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++) 1721 chksum ^= ip[i]; 1722 if (DKD_GETCHKSUM(dkdevidp) != chksum) 1723 goto err; 1724 1725 /* Validate the device id */ 1726 if (ddi_devid_valid((ddi_devid_t)dkdevidp->dkd_devid) != DDI_SUCCESS) 1727 goto err; 1728 1729 /* keep a copy of the device id */ 1730 sz = ddi_devid_sizeof((ddi_devid_t)dkdevidp->dkd_devid); 1731 dkp->dk_devid = kmem_alloc(sz, KM_SLEEP); 1732 bcopy(dkdevidp->dkd_devid, dkp->dk_devid, sz); 1733 1734 rc = DDI_SUCCESS; 1735 1736 err: 1737 if (handle != NULL) 1738 (void) dadk_iob_free(DKTP_DATA, handle); 1739 return (rc); 1740 } 1741 1742 /* 1743 * Create a devid and write it on the first block of the last track of 1744 * the last cylinder. 1745 * Return DDI_SUCCESS or DDI_FAILURE. 1746 */ 1747 static int 1748 cmdk_devid_fabricate(struct cmdk *dkp) 1749 { 1750 ddi_devid_t devid = NULL; /* devid made by ddi_devid_init */ 1751 struct dk_devid *dkdevidp; /* devid struct stored on disk */ 1752 diskaddr_t blk; 1753 tgdk_iob_handle handle = NULL; 1754 uint_t *ip, chksum; 1755 int i; 1756 int rc; 1757 1758 rc = ddi_devid_init(dkp->dk_dip, DEVID_FAB, 0, NULL, &devid); 1759 if (rc != DDI_SUCCESS) 1760 goto err; 1761 1762 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0)) { 1763 /* no device id block address */ 1764 return (DDI_FAILURE); 1765 } 1766 1767 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP); 1768 if (!handle) 1769 goto err; 1770 1771 /* Locate the buffer */ 1772 dkdevidp = (struct dk_devid *)dadk_iob_htoc(DKTP_DATA, handle); 1773 1774 /* Fill in the revision */ 1775 bzero(dkdevidp, NBPSCTR); 1776 dkdevidp->dkd_rev_hi = DK_DEVID_REV_MSB; 1777 dkdevidp->dkd_rev_lo = DK_DEVID_REV_LSB; 1778 1779 /* Copy in the device id */ 1780 i = ddi_devid_sizeof(devid); 1781 if (i > DK_DEVID_SIZE) 1782 goto err; 1783 bcopy(devid, dkdevidp->dkd_devid, i); 1784 1785 /* Calculate the chksum */ 1786 chksum = 0; 1787 ip = (uint_t *)dkdevidp; 1788 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++) 1789 chksum ^= ip[i]; 1790 1791 /* Fill in the checksum */ 1792 DKD_FORMCHKSUM(chksum, dkdevidp); 1793 1794 /* write the devid */ 1795 (void) dadk_iob_xfer(DKTP_DATA, handle, B_WRITE); 1796 1797 dkp->dk_devid = devid; 1798 1799 rc = DDI_SUCCESS; 1800 1801 err: 1802 if (handle != NULL) 1803 (void) dadk_iob_free(DKTP_DATA, handle); 1804 1805 if (rc != DDI_SUCCESS && devid != NULL) 1806 ddi_devid_free(devid); 1807 1808 return (rc); 1809 } 1810 1811 static void 1812 cmdk_bbh_free_alts(struct cmdk *dkp) 1813 { 1814 if (dkp->dk_alts_hdl) { 1815 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1816 kmem_free(dkp->dk_slc_cnt, 1817 NDKMAP * (sizeof (uint32_t) + sizeof (struct alts_ent *))); 1818 dkp->dk_alts_hdl = NULL; 1819 } 1820 } 1821 1822 static void 1823 cmdk_bbh_reopen(struct cmdk *dkp) 1824 { 1825 tgdk_iob_handle handle = NULL; 1826 diskaddr_t slcb, slcn, slce; 1827 struct alts_parttbl *ap; 1828 struct alts_ent *enttblp; 1829 uint32_t altused; 1830 uint32_t altbase; 1831 uint32_t altlast; 1832 int alts; 1833 uint16_t vtoctag; 1834 int i, j; 1835 1836 /* find slice with V_ALTSCTR tag */ 1837 for (alts = 0; alts < NDKMAP; alts++) { 1838 if (cmlb_partinfo( 1839 dkp->dk_cmlbhandle, 1840 alts, 1841 &slcn, 1842 &slcb, 1843 NULL, 1844 &vtoctag, 1845 0)) { 1846 goto empty; /* no partition table exists */ 1847 } 1848 1849 if (vtoctag == V_ALTSCTR && slcn > 1) 1850 break; 1851 } 1852 if (alts >= NDKMAP) { 1853 goto empty; /* no V_ALTSCTR slice defined */ 1854 } 1855 1856 /* read in ALTS label block */ 1857 handle = dadk_iob_alloc(DKTP_DATA, slcb, NBPSCTR, KM_SLEEP); 1858 if (!handle) { 1859 goto empty; 1860 } 1861 1862 ap = (struct alts_parttbl *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1863 if (!ap || (ap->alts_sanity != ALTS_SANITY)) { 1864 goto empty; 1865 } 1866 1867 altused = ap->alts_ent_used; /* number of BB entries */ 1868 altbase = ap->alts_ent_base; /* blk offset from begin slice */ 1869 altlast = ap->alts_ent_end; /* blk offset to last block */ 1870 /* ((altused * sizeof (struct alts_ent) + NBPSCTR - 1) & ~NBPSCTR) */ 1871 1872 if (altused == 0 || 1873 altbase < 1 || 1874 altbase > altlast || 1875 altlast >= slcn) { 1876 goto empty; 1877 } 1878 (void) dadk_iob_free(DKTP_DATA, handle); 1879 1880 /* read in ALTS remapping table */ 1881 handle = dadk_iob_alloc(DKTP_DATA, 1882 slcb + altbase, 1883 (altlast - altbase + 1) << SCTRSHFT, KM_SLEEP); 1884 if (!handle) { 1885 goto empty; 1886 } 1887 1888 enttblp = (struct alts_ent *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1889 if (!enttblp) { 1890 goto empty; 1891 } 1892 1893 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER); 1894 1895 /* allocate space for dk_slc_cnt and dk_slc_ent tables */ 1896 if (dkp->dk_slc_cnt == NULL) { 1897 dkp->dk_slc_cnt = kmem_alloc(NDKMAP * 1898 (sizeof (long) + sizeof (struct alts_ent *)), KM_SLEEP); 1899 } 1900 dkp->dk_slc_ent = (struct alts_ent **)(dkp->dk_slc_cnt + NDKMAP); 1901 1902 /* free previous BB table (if any) */ 1903 if (dkp->dk_alts_hdl) { 1904 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1905 dkp->dk_alts_hdl = NULL; 1906 dkp->dk_altused = 0; 1907 } 1908 1909 /* save linkage to new BB table */ 1910 dkp->dk_alts_hdl = handle; 1911 dkp->dk_altused = altused; 1912 1913 /* 1914 * build indexes to BB table by slice 1915 * effectively we have 1916 * struct alts_ent *enttblp[altused]; 1917 * 1918 * uint32_t dk_slc_cnt[NDKMAP]; 1919 * struct alts_ent *dk_slc_ent[NDKMAP]; 1920 */ 1921 for (i = 0; i < NDKMAP; i++) { 1922 if (cmlb_partinfo( 1923 dkp->dk_cmlbhandle, 1924 i, 1925 &slcn, 1926 &slcb, 1927 NULL, 1928 NULL, 1929 0)) { 1930 goto empty1; 1931 } 1932 1933 dkp->dk_slc_cnt[i] = 0; 1934 if (slcn == 0) 1935 continue; /* slice is not allocated */ 1936 1937 /* last block in slice */ 1938 slce = slcb + slcn - 1; 1939 1940 /* find first remap entry in after beginnning of slice */ 1941 for (j = 0; j < altused; j++) { 1942 if (enttblp[j].bad_start + enttblp[j].bad_end >= slcb) 1943 break; 1944 } 1945 dkp->dk_slc_ent[i] = enttblp + j; 1946 1947 /* count remap entrys until end of slice */ 1948 for (; j < altused && enttblp[j].bad_start <= slce; j++) { 1949 dkp->dk_slc_cnt[i] += 1; 1950 } 1951 } 1952 1953 rw_exit(&dkp->dk_bbh_mutex); 1954 return; 1955 1956 empty: 1957 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER); 1958 empty1: 1959 if (handle && handle != dkp->dk_alts_hdl) 1960 (void) dadk_iob_free(DKTP_DATA, handle); 1961 1962 if (dkp->dk_alts_hdl) { 1963 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1964 dkp->dk_alts_hdl = NULL; 1965 } 1966 1967 rw_exit(&dkp->dk_bbh_mutex); 1968 } 1969 1970 /*ARGSUSED*/ 1971 static bbh_cookie_t 1972 cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle) 1973 { 1974 struct bbh_handle *hp; 1975 bbh_cookie_t ckp; 1976 1977 hp = (struct bbh_handle *)handle; 1978 ckp = hp->h_cktab + hp->h_idx; 1979 hp->h_idx++; 1980 return (ckp); 1981 } 1982 1983 /*ARGSUSED*/ 1984 static void 1985 cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle) 1986 { 1987 struct bbh_handle *hp; 1988 1989 hp = (struct bbh_handle *)handle; 1990 kmem_free(handle, (sizeof (struct bbh_handle) + 1991 (hp->h_totck * (sizeof (struct bbh_cookie))))); 1992 } 1993 1994 1995 /* 1996 * cmdk_bbh_gethandle remaps the bad sectors to alternates. 1997 * There are 7 different cases when the comparison is made 1998 * between the bad sector cluster and the disk section. 1999 * 2000 * bad sector cluster gggggggggggbbbbbbbggggggggggg 2001 * case 1: ddddd 2002 * case 2: -d----- 2003 * case 3: ddddd 2004 * case 4: dddddddddddd 2005 * case 5: ddddddd----- 2006 * case 6: ---ddddddd 2007 * case 7: ddddddd 2008 * 2009 * where: g = good sector, b = bad sector 2010 * d = sector in disk section 2011 * - = disk section may be extended to cover those disk area 2012 */ 2013 2014 static opaque_t 2015 cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp) 2016 { 2017 struct cmdk *dkp = (struct cmdk *)bbh_data; 2018 struct bbh_handle *hp; 2019 struct bbh_cookie *ckp; 2020 struct alts_ent *altp; 2021 uint32_t alts_used; 2022 uint32_t part = CMDKPART(bp->b_edev); 2023 daddr32_t lastsec; 2024 long d_count; 2025 int i; 2026 int idx; 2027 int cnt; 2028 2029 if (part >= V_NUMPAR) 2030 return (NULL); 2031 2032 /* 2033 * This if statement is atomic and it will succeed 2034 * if there are no bad blocks (almost always) 2035 * 2036 * so this if is performed outside of the rw_enter for speed 2037 * and then repeated inside the rw_enter for safety 2038 */ 2039 if (!dkp->dk_alts_hdl) { 2040 return (NULL); 2041 } 2042 2043 rw_enter(&dkp->dk_bbh_mutex, RW_READER); 2044 2045 if (dkp->dk_alts_hdl == NULL) { 2046 rw_exit(&dkp->dk_bbh_mutex); 2047 return (NULL); 2048 } 2049 2050 alts_used = dkp->dk_slc_cnt[part]; 2051 if (alts_used == 0) { 2052 rw_exit(&dkp->dk_bbh_mutex); 2053 return (NULL); 2054 } 2055 altp = dkp->dk_slc_ent[part]; 2056 2057 /* 2058 * binary search for the largest bad sector index in the alternate 2059 * entry table which overlaps or larger than the starting d_sec 2060 */ 2061 i = cmdk_bbh_bsearch(altp, alts_used, GET_BP_SEC(bp)); 2062 /* if starting sector is > the largest bad sector, return */ 2063 if (i == -1) { 2064 rw_exit(&dkp->dk_bbh_mutex); 2065 return (NULL); 2066 } 2067 /* i is the starting index. Set altp to the starting entry addr */ 2068 altp += i; 2069 2070 d_count = bp->b_bcount >> SCTRSHFT; 2071 lastsec = GET_BP_SEC(bp) + d_count - 1; 2072 2073 /* calculate the number of bad sectors */ 2074 for (idx = i, cnt = 0; idx < alts_used; idx++, altp++, cnt++) { 2075 if (lastsec < altp->bad_start) 2076 break; 2077 } 2078 2079 if (!cnt) { 2080 rw_exit(&dkp->dk_bbh_mutex); 2081 return (NULL); 2082 } 2083 2084 /* calculate the maximum number of reserved cookies */ 2085 cnt <<= 1; 2086 cnt++; 2087 2088 /* allocate the handle */ 2089 hp = (struct bbh_handle *)kmem_zalloc((sizeof (*hp) + 2090 (cnt * sizeof (*ckp))), KM_SLEEP); 2091 2092 hp->h_idx = 0; 2093 hp->h_totck = cnt; 2094 ckp = hp->h_cktab = (struct bbh_cookie *)(hp + 1); 2095 ckp[0].ck_sector = GET_BP_SEC(bp); 2096 ckp[0].ck_seclen = d_count; 2097 2098 altp = dkp->dk_slc_ent[part]; 2099 altp += i; 2100 for (idx = 0; i < alts_used; i++, altp++) { 2101 /* CASE 1: */ 2102 if (lastsec < altp->bad_start) 2103 break; 2104 2105 /* CASE 3: */ 2106 if (ckp[idx].ck_sector > altp->bad_end) 2107 continue; 2108 2109 /* CASE 2 and 7: */ 2110 if ((ckp[idx].ck_sector >= altp->bad_start) && 2111 (lastsec <= altp->bad_end)) { 2112 ckp[idx].ck_sector = altp->good_start + 2113 ckp[idx].ck_sector - altp->bad_start; 2114 break; 2115 } 2116 2117 /* at least one bad sector in our section. break it. */ 2118 /* CASE 5: */ 2119 if ((lastsec >= altp->bad_start) && 2120 (lastsec <= altp->bad_end)) { 2121 ckp[idx+1].ck_seclen = lastsec - altp->bad_start + 1; 2122 ckp[idx].ck_seclen -= ckp[idx+1].ck_seclen; 2123 ckp[idx+1].ck_sector = altp->good_start; 2124 break; 2125 } 2126 /* CASE 6: */ 2127 if ((ckp[idx].ck_sector <= altp->bad_end) && 2128 (ckp[idx].ck_sector >= altp->bad_start)) { 2129 ckp[idx+1].ck_seclen = ckp[idx].ck_seclen; 2130 ckp[idx].ck_seclen = altp->bad_end - 2131 ckp[idx].ck_sector + 1; 2132 ckp[idx+1].ck_seclen -= ckp[idx].ck_seclen; 2133 ckp[idx].ck_sector = altp->good_start + 2134 ckp[idx].ck_sector - altp->bad_start; 2135 idx++; 2136 ckp[idx].ck_sector = altp->bad_end + 1; 2137 continue; /* check rest of section */ 2138 } 2139 2140 /* CASE 4: */ 2141 ckp[idx].ck_seclen = altp->bad_start - ckp[idx].ck_sector; 2142 ckp[idx+1].ck_sector = altp->good_start; 2143 ckp[idx+1].ck_seclen = altp->bad_end - altp->bad_start + 1; 2144 idx += 2; 2145 ckp[idx].ck_sector = altp->bad_end + 1; 2146 ckp[idx].ck_seclen = lastsec - altp->bad_end; 2147 } 2148 2149 rw_exit(&dkp->dk_bbh_mutex); 2150 return ((opaque_t)hp); 2151 } 2152 2153 static int 2154 cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key) 2155 { 2156 int i; 2157 int ind; 2158 int interval; 2159 int mystatus = -1; 2160 2161 if (!cnt) 2162 return (mystatus); 2163 2164 ind = 1; /* compiler complains about possible uninitialized var */ 2165 for (i = 1; i <= cnt; i <<= 1) 2166 ind = i; 2167 2168 for (interval = ind; interval; ) { 2169 if ((key >= buf[ind-1].bad_start) && 2170 (key <= buf[ind-1].bad_end)) { 2171 return (ind-1); 2172 } else { 2173 interval >>= 1; 2174 if (key < buf[ind-1].bad_start) { 2175 /* record the largest bad sector index */ 2176 mystatus = ind-1; 2177 if (!interval) 2178 break; 2179 ind = ind - interval; 2180 } else { 2181 /* 2182 * if key is larger than the last element 2183 * then break 2184 */ 2185 if ((ind == cnt) || !interval) 2186 break; 2187 if ((ind+interval) <= cnt) 2188 ind += interval; 2189 } 2190 } 2191 } 2192 return (mystatus); 2193 } 2194