1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/scsi/scsi.h> 27 #include <sys/dktp/cm.h> 28 #include <sys/dktp/quetypes.h> 29 #include <sys/dktp/queue.h> 30 #include <sys/dktp/fctypes.h> 31 #include <sys/dktp/flowctrl.h> 32 #include <sys/dktp/cmdev.h> 33 #include <sys/dkio.h> 34 #include <sys/dktp/tgdk.h> 35 #include <sys/dktp/dadk.h> 36 #include <sys/dktp/bbh.h> 37 #include <sys/dktp/altsctr.h> 38 #include <sys/dktp/cmdk.h> 39 40 #include <sys/stat.h> 41 #include <sys/vtoc.h> 42 #include <sys/file.h> 43 #include <sys/dktp/dadkio.h> 44 #include <sys/aio_req.h> 45 46 #include <sys/cmlb.h> 47 48 /* 49 * Local Static Data 50 */ 51 #ifdef CMDK_DEBUG 52 #define DENT 0x0001 53 #define DIO 0x0002 54 55 static int cmdk_debug = DIO; 56 #endif 57 58 #ifndef TRUE 59 #define TRUE 1 60 #endif 61 62 #ifndef FALSE 63 #define FALSE 0 64 #endif 65 66 /* 67 * NDKMAP is the base number for accessing the fdisk partitions. 68 * c?d?p0 --> cmdk@?,?:q 69 */ 70 #define PARTITION0_INDEX (NDKMAP + 0) 71 72 #define DKTP_DATA (dkp->dk_tgobjp)->tg_data 73 #define DKTP_EXT (dkp->dk_tgobjp)->tg_ext 74 75 static void *cmdk_state; 76 77 /* 78 * the cmdk_attach_mutex protects cmdk_max_instance in multi-threaded 79 * attach situations 80 */ 81 static kmutex_t cmdk_attach_mutex; 82 static int cmdk_max_instance = 0; 83 84 /* 85 * Panic dumpsys state 86 * There is only a single flag that is not mutex locked since 87 * the system is prevented from thread switching and cmdk_dump 88 * will only be called in a single threaded operation. 89 */ 90 static int cmdk_indump; 91 92 /* 93 * Local Function Prototypes 94 */ 95 static int cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp); 96 static void cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp); 97 static void cmdkmin(struct buf *bp); 98 static int cmdkrw(dev_t dev, struct uio *uio, int flag); 99 static int cmdkarw(dev_t dev, struct aio_req *aio, int flag); 100 101 /* 102 * Bad Block Handling Functions Prototypes 103 */ 104 static void cmdk_bbh_reopen(struct cmdk *dkp); 105 static opaque_t cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp); 106 static bbh_cookie_t cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle); 107 static void cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle); 108 static void cmdk_bbh_close(struct cmdk *dkp); 109 static void cmdk_bbh_setalts_idx(struct cmdk *dkp); 110 static int cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key); 111 112 static struct bbh_objops cmdk_bbh_ops = { 113 nulldev, 114 nulldev, 115 cmdk_bbh_gethandle, 116 cmdk_bbh_htoc, 117 cmdk_bbh_freehandle, 118 0, 0 119 }; 120 121 static int cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp); 122 static int cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp); 123 static int cmdkstrategy(struct buf *bp); 124 static int cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 125 static int cmdkioctl(dev_t, int, intptr_t, int, cred_t *, int *); 126 static int cmdkread(dev_t dev, struct uio *uio, cred_t *credp); 127 static int cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp); 128 static int cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 129 int mod_flags, char *name, caddr_t valuep, int *lengthp); 130 static int cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp); 131 static int cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp); 132 133 /* 134 * Device driver ops vector 135 */ 136 137 static struct cb_ops cmdk_cb_ops = { 138 cmdkopen, /* open */ 139 cmdkclose, /* close */ 140 cmdkstrategy, /* strategy */ 141 nodev, /* print */ 142 cmdkdump, /* dump */ 143 cmdkread, /* read */ 144 cmdkwrite, /* write */ 145 cmdkioctl, /* ioctl */ 146 nodev, /* devmap */ 147 nodev, /* mmap */ 148 nodev, /* segmap */ 149 nochpoll, /* poll */ 150 cmdk_prop_op, /* cb_prop_op */ 151 0, /* streamtab */ 152 D_64BIT | D_MP | D_NEW, /* Driver comaptibility flag */ 153 CB_REV, /* cb_rev */ 154 cmdkaread, /* async read */ 155 cmdkawrite /* async write */ 156 }; 157 158 static int cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 159 void **result); 160 static int cmdkprobe(dev_info_t *dip); 161 static int cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd); 162 static int cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd); 163 164 static void cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp); 165 static int cmdkresume(dev_info_t *dip); 166 static int cmdksuspend(dev_info_t *dip); 167 static int cmdkpower(dev_info_t *dip, int component, int level); 168 169 struct dev_ops cmdk_ops = { 170 DEVO_REV, /* devo_rev, */ 171 0, /* refcnt */ 172 cmdkinfo, /* info */ 173 nulldev, /* identify */ 174 cmdkprobe, /* probe */ 175 cmdkattach, /* attach */ 176 cmdkdetach, /* detach */ 177 nodev, /* reset */ 178 &cmdk_cb_ops, /* driver operations */ 179 (struct bus_ops *)0, /* bus operations */ 180 cmdkpower /* power */ 181 }; 182 183 /* 184 * This is the loadable module wrapper. 185 */ 186 #include <sys/modctl.h> 187 188 extern struct mod_ops mod_driverops; 189 190 static struct modldrv modldrv = { 191 &mod_driverops, /* Type of module. This one is a driver */ 192 "Common Direct Access Disk", 193 &cmdk_ops, /* driver ops */ 194 }; 195 196 static struct modlinkage modlinkage = { 197 MODREV_1, (void *)&modldrv, NULL 198 }; 199 200 /* Function prototypes for cmlb callbacks */ 201 202 static int cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr, 203 diskaddr_t start, size_t length, void *tg_cookie); 204 205 static int cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg, 206 void *tg_cookie); 207 208 static void cmdk_devid_setup(struct cmdk *dkp); 209 static int cmdk_devid_modser(struct cmdk *dkp); 210 static int cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len); 211 static int cmdk_devid_fabricate(struct cmdk *dkp); 212 static int cmdk_devid_read(struct cmdk *dkp); 213 214 static cmlb_tg_ops_t cmdk_lb_ops = { 215 TG_DK_OPS_VERSION_1, 216 cmdk_lb_rdwr, 217 cmdk_lb_getinfo 218 }; 219 220 static boolean_t 221 cmdk_isopen(struct cmdk *dkp, dev_t dev) 222 { 223 int part, otyp; 224 ulong_t partbit; 225 226 ASSERT(MUTEX_HELD((&dkp->dk_mutex))); 227 228 part = CMDKPART(dev); 229 partbit = 1 << part; 230 231 /* account for close */ 232 if (dkp->dk_open_lyr[part] != 0) 233 return (B_TRUE); 234 for (otyp = 0; otyp < OTYPCNT; otyp++) 235 if (dkp->dk_open_reg[otyp] & partbit) 236 return (B_TRUE); 237 return (B_FALSE); 238 } 239 240 int 241 _init(void) 242 { 243 int rval; 244 245 if (rval = ddi_soft_state_init(&cmdk_state, sizeof (struct cmdk), 7)) 246 return (rval); 247 248 mutex_init(&cmdk_attach_mutex, NULL, MUTEX_DRIVER, NULL); 249 if ((rval = mod_install(&modlinkage)) != 0) { 250 mutex_destroy(&cmdk_attach_mutex); 251 ddi_soft_state_fini(&cmdk_state); 252 } 253 return (rval); 254 } 255 256 int 257 _fini(void) 258 { 259 return (EBUSY); 260 261 /* 262 * This has been commented out until cmdk is a true 263 * unloadable module. Right now x86's are panicking on 264 * a diskless reconfig boot. 265 */ 266 267 #if 0 /* bugid 1186679 */ 268 int rval; 269 270 rval = mod_remove(&modlinkage); 271 if (rval != 0) 272 return (rval); 273 274 mutex_destroy(&cmdk_attach_mutex); 275 ddi_soft_state_fini(&cmdk_state); 276 277 return (0); 278 #endif 279 } 280 281 int 282 _info(struct modinfo *modinfop) 283 { 284 return (mod_info(&modlinkage, modinfop)); 285 } 286 287 /* 288 * Autoconfiguration Routines 289 */ 290 static int 291 cmdkprobe(dev_info_t *dip) 292 { 293 int instance; 294 int status; 295 struct cmdk *dkp; 296 297 instance = ddi_get_instance(dip); 298 299 if (ddi_get_soft_state(cmdk_state, instance)) 300 return (DDI_PROBE_PARTIAL); 301 302 if ((ddi_soft_state_zalloc(cmdk_state, instance) != DDI_SUCCESS) || 303 ((dkp = ddi_get_soft_state(cmdk_state, instance)) == NULL)) 304 return (DDI_PROBE_PARTIAL); 305 306 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL); 307 rw_init(&dkp->dk_bbh_mutex, NULL, RW_DRIVER, NULL); 308 dkp->dk_dip = dip; 309 mutex_enter(&dkp->dk_mutex); 310 311 dkp->dk_dev = makedevice(ddi_driver_major(dip), 312 ddi_get_instance(dip) << CMDK_UNITSHF); 313 314 /* linkage to dadk and strategy */ 315 if (cmdk_create_obj(dip, dkp) != DDI_SUCCESS) { 316 mutex_exit(&dkp->dk_mutex); 317 mutex_destroy(&dkp->dk_mutex); 318 rw_destroy(&dkp->dk_bbh_mutex); 319 ddi_soft_state_free(cmdk_state, instance); 320 return (DDI_PROBE_PARTIAL); 321 } 322 323 status = dadk_probe(DKTP_DATA, KM_NOSLEEP); 324 if (status != DDI_PROBE_SUCCESS) { 325 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */ 326 mutex_exit(&dkp->dk_mutex); 327 mutex_destroy(&dkp->dk_mutex); 328 rw_destroy(&dkp->dk_bbh_mutex); 329 ddi_soft_state_free(cmdk_state, instance); 330 return (status); 331 } 332 333 mutex_exit(&dkp->dk_mutex); 334 #ifdef CMDK_DEBUG 335 if (cmdk_debug & DENT) 336 PRF("cmdkprobe: instance= %d name= `%s`\n", 337 instance, ddi_get_name_addr(dip)); 338 #endif 339 return (status); 340 } 341 342 static int 343 cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd) 344 { 345 int instance; 346 struct cmdk *dkp; 347 char *node_type; 348 349 switch (cmd) { 350 case DDI_ATTACH: 351 break; 352 case DDI_RESUME: 353 return (cmdkresume(dip)); 354 default: 355 return (DDI_FAILURE); 356 } 357 358 instance = ddi_get_instance(dip); 359 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 360 return (DDI_FAILURE); 361 362 dkp->dk_pm_level = CMDK_SPINDLE_UNINIT; 363 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL); 364 365 mutex_enter(&dkp->dk_mutex); 366 367 /* dadk_attach is an empty function that only returns SUCCESS */ 368 (void) dadk_attach(DKTP_DATA); 369 370 node_type = (DKTP_EXT->tg_nodetype); 371 372 /* 373 * this open allows cmlb to read the device 374 * and determine the label types 375 * so that cmlb can create minor nodes for device 376 */ 377 378 /* open the target disk */ 379 if (dadk_open(DKTP_DATA, 0) != DDI_SUCCESS) 380 goto fail2; 381 382 /* mark as having opened target */ 383 dkp->dk_flag |= CMDK_TGDK_OPEN; 384 385 cmlb_alloc_handle((cmlb_handle_t *)&dkp->dk_cmlbhandle); 386 387 if (cmlb_attach(dip, 388 &cmdk_lb_ops, 389 DTYPE_DIRECT, /* device_type */ 390 0, /* removable */ 391 0, /* hot pluggable XXX */ 392 node_type, 393 CMLB_CREATE_ALTSLICE_VTOC_16_DTYPE_DIRECT, /* alter_behaviour */ 394 dkp->dk_cmlbhandle, 395 0) != 0) 396 goto fail1; 397 398 /* Calling validate will create minor nodes according to disk label */ 399 (void) cmlb_validate(dkp->dk_cmlbhandle, 0, 0); 400 401 /* set bbh (Bad Block Handling) */ 402 cmdk_bbh_reopen(dkp); 403 404 /* setup devid string */ 405 cmdk_devid_setup(dkp); 406 407 mutex_enter(&cmdk_attach_mutex); 408 if (instance > cmdk_max_instance) 409 cmdk_max_instance = instance; 410 mutex_exit(&cmdk_attach_mutex); 411 412 mutex_exit(&dkp->dk_mutex); 413 414 /* 415 * Add a zero-length attribute to tell the world we support 416 * kernel ioctls (for layered drivers) 417 */ 418 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 419 DDI_KERNEL_IOCTL, NULL, 0); 420 ddi_report_dev(dip); 421 422 /* 423 * Initialize power management 424 */ 425 mutex_init(&dkp->dk_pm_mutex, NULL, MUTEX_DRIVER, NULL); 426 cv_init(&dkp->dk_suspend_cv, NULL, CV_DRIVER, NULL); 427 cmdk_setup_pm(dip, dkp); 428 429 return (DDI_SUCCESS); 430 431 fail1: 432 cmlb_free_handle(&dkp->dk_cmlbhandle); 433 (void) dadk_close(DKTP_DATA); 434 fail2: 435 cmdk_destroy_obj(dip, dkp); 436 rw_destroy(&dkp->dk_bbh_mutex); 437 mutex_exit(&dkp->dk_mutex); 438 mutex_destroy(&dkp->dk_mutex); 439 ddi_soft_state_free(cmdk_state, instance); 440 return (DDI_FAILURE); 441 } 442 443 444 static int 445 cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd) 446 { 447 struct cmdk *dkp; 448 int instance; 449 int max_instance; 450 451 switch (cmd) { 452 case DDI_DETACH: 453 /* return (DDI_FAILURE); */ 454 break; 455 case DDI_SUSPEND: 456 return (cmdksuspend(dip)); 457 default: 458 #ifdef CMDK_DEBUG 459 if (cmdk_debug & DIO) { 460 PRF("cmdkdetach: cmd = %d unknown\n", cmd); 461 } 462 #endif 463 return (DDI_FAILURE); 464 } 465 466 mutex_enter(&cmdk_attach_mutex); 467 max_instance = cmdk_max_instance; 468 mutex_exit(&cmdk_attach_mutex); 469 470 /* check if any instance of driver is open */ 471 for (instance = 0; instance < max_instance; instance++) { 472 dkp = ddi_get_soft_state(cmdk_state, instance); 473 if (!dkp) 474 continue; 475 if (dkp->dk_flag & CMDK_OPEN) 476 return (DDI_FAILURE); 477 } 478 479 instance = ddi_get_instance(dip); 480 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 481 return (DDI_SUCCESS); 482 483 mutex_enter(&dkp->dk_mutex); 484 485 /* 486 * The cmdk_part_info call at the end of cmdkattach may have 487 * caused cmdk_reopen to do a TGDK_OPEN, make sure we close on 488 * detach for case when cmdkopen/cmdkclose never occurs. 489 */ 490 if (dkp->dk_flag & CMDK_TGDK_OPEN) { 491 dkp->dk_flag &= ~CMDK_TGDK_OPEN; 492 (void) dadk_close(DKTP_DATA); 493 } 494 495 cmlb_detach(dkp->dk_cmlbhandle, 0); 496 cmlb_free_handle(&dkp->dk_cmlbhandle); 497 ddi_prop_remove_all(dip); 498 499 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */ 500 mutex_exit(&dkp->dk_mutex); 501 mutex_destroy(&dkp->dk_mutex); 502 rw_destroy(&dkp->dk_bbh_mutex); 503 mutex_destroy(&dkp->dk_pm_mutex); 504 cv_destroy(&dkp->dk_suspend_cv); 505 ddi_soft_state_free(cmdk_state, instance); 506 507 return (DDI_SUCCESS); 508 } 509 510 static int 511 cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 512 { 513 dev_t dev = (dev_t)arg; 514 int instance; 515 struct cmdk *dkp; 516 517 #ifdef lint 518 dip = dip; /* no one ever uses this */ 519 #endif 520 #ifdef CMDK_DEBUG 521 if (cmdk_debug & DENT) 522 PRF("cmdkinfo: call\n"); 523 #endif 524 instance = CMDKUNIT(dev); 525 526 switch (infocmd) { 527 case DDI_INFO_DEVT2DEVINFO: 528 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 529 return (DDI_FAILURE); 530 *result = (void *) dkp->dk_dip; 531 break; 532 case DDI_INFO_DEVT2INSTANCE: 533 *result = (void *)(intptr_t)instance; 534 break; 535 default: 536 return (DDI_FAILURE); 537 } 538 return (DDI_SUCCESS); 539 } 540 541 /* 542 * Initialize the power management components 543 */ 544 static void 545 cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp) 546 { 547 char *pm_comp[] = { "NAME=cmdk", "0=off", "1=on", NULL }; 548 549 /* 550 * Since the cmdk device does not the 'reg' property, 551 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 552 * The following code is to tell cpr that this device 553 * DOES need to be suspended and resumed. 554 */ 555 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, 556 "pm-hardware-state", "needs-suspend-resume"); 557 558 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 559 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 560 if (pm_raise_power(dip, 0, CMDK_SPINDLE_ON) == DDI_SUCCESS) { 561 mutex_enter(&dkp->dk_pm_mutex); 562 dkp->dk_pm_level = CMDK_SPINDLE_ON; 563 dkp->dk_pm_is_enabled = 1; 564 mutex_exit(&dkp->dk_pm_mutex); 565 } else { 566 mutex_enter(&dkp->dk_pm_mutex); 567 dkp->dk_pm_level = CMDK_SPINDLE_OFF; 568 dkp->dk_pm_is_enabled = 0; 569 mutex_exit(&dkp->dk_pm_mutex); 570 } 571 } else { 572 mutex_enter(&dkp->dk_pm_mutex); 573 dkp->dk_pm_level = CMDK_SPINDLE_UNINIT; 574 dkp->dk_pm_is_enabled = 0; 575 mutex_exit(&dkp->dk_pm_mutex); 576 } 577 } 578 579 /* 580 * suspend routine, it will be run when get the command 581 * DDI_SUSPEND at detach(9E) from system power management 582 */ 583 static int 584 cmdksuspend(dev_info_t *dip) 585 { 586 struct cmdk *dkp; 587 int instance; 588 clock_t count = 0; 589 590 instance = ddi_get_instance(dip); 591 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 592 return (DDI_FAILURE); 593 mutex_enter(&dkp->dk_mutex); 594 if (dkp->dk_flag & CMDK_SUSPEND) { 595 mutex_exit(&dkp->dk_mutex); 596 return (DDI_SUCCESS); 597 } 598 dkp->dk_flag |= CMDK_SUSPEND; 599 600 /* need to wait a while */ 601 while (dadk_getcmds(DKTP_DATA) != 0) { 602 delay(drv_usectohz(1000000)); 603 if (count > 60) { 604 dkp->dk_flag &= ~CMDK_SUSPEND; 605 cv_broadcast(&dkp->dk_suspend_cv); 606 mutex_exit(&dkp->dk_mutex); 607 return (DDI_FAILURE); 608 } 609 count++; 610 } 611 mutex_exit(&dkp->dk_mutex); 612 return (DDI_SUCCESS); 613 } 614 615 /* 616 * resume routine, it will be run when get the command 617 * DDI_RESUME at attach(9E) from system power management 618 */ 619 static int 620 cmdkresume(dev_info_t *dip) 621 { 622 struct cmdk *dkp; 623 int instance; 624 625 instance = ddi_get_instance(dip); 626 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 627 return (DDI_FAILURE); 628 mutex_enter(&dkp->dk_mutex); 629 if (!(dkp->dk_flag & CMDK_SUSPEND)) { 630 mutex_exit(&dkp->dk_mutex); 631 return (DDI_FAILURE); 632 } 633 dkp->dk_pm_level = CMDK_SPINDLE_ON; 634 dkp->dk_flag &= ~CMDK_SUSPEND; 635 cv_broadcast(&dkp->dk_suspend_cv); 636 mutex_exit(&dkp->dk_mutex); 637 return (DDI_SUCCESS); 638 639 } 640 641 /* 642 * power management entry point, it was used to 643 * change power management component. 644 * Actually, the real hard drive suspend/resume 645 * was handled in ata, so this function is not 646 * doing any real work other than verifying that 647 * the disk is idle. 648 */ 649 static int 650 cmdkpower(dev_info_t *dip, int component, int level) 651 { 652 struct cmdk *dkp; 653 int instance; 654 655 instance = ddi_get_instance(dip); 656 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || 657 component != 0 || level > CMDK_SPINDLE_ON || 658 level < CMDK_SPINDLE_OFF) { 659 return (DDI_FAILURE); 660 } 661 662 mutex_enter(&dkp->dk_pm_mutex); 663 if (dkp->dk_pm_is_enabled && dkp->dk_pm_level == level) { 664 mutex_exit(&dkp->dk_pm_mutex); 665 return (DDI_SUCCESS); 666 } 667 mutex_exit(&dkp->dk_pm_mutex); 668 669 if ((level == CMDK_SPINDLE_OFF) && 670 (dadk_getcmds(DKTP_DATA) != 0)) { 671 return (DDI_FAILURE); 672 } 673 674 mutex_enter(&dkp->dk_pm_mutex); 675 dkp->dk_pm_level = level; 676 mutex_exit(&dkp->dk_pm_mutex); 677 return (DDI_SUCCESS); 678 } 679 680 static int 681 cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 682 char *name, caddr_t valuep, int *lengthp) 683 { 684 struct cmdk *dkp; 685 686 #ifdef CMDK_DEBUG 687 if (cmdk_debug & DENT) 688 PRF("cmdk_prop_op: call\n"); 689 #endif 690 691 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 692 if (dkp == NULL) 693 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 694 name, valuep, lengthp)); 695 696 return (cmlb_prop_op(dkp->dk_cmlbhandle, 697 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 698 CMDKPART(dev), NULL)); 699 } 700 701 /* 702 * dump routine 703 */ 704 static int 705 cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 706 { 707 int instance; 708 struct cmdk *dkp; 709 diskaddr_t p_lblksrt; 710 diskaddr_t p_lblkcnt; 711 struct buf local; 712 struct buf *bp; 713 714 #ifdef CMDK_DEBUG 715 if (cmdk_debug & DENT) 716 PRF("cmdkdump: call\n"); 717 #endif 718 instance = CMDKUNIT(dev); 719 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || (blkno < 0)) 720 return (ENXIO); 721 722 if (cmlb_partinfo( 723 dkp->dk_cmlbhandle, 724 CMDKPART(dev), 725 &p_lblkcnt, 726 &p_lblksrt, 727 NULL, 728 NULL, 729 0)) { 730 return (ENXIO); 731 } 732 733 if ((blkno+nblk) > p_lblkcnt) 734 return (EINVAL); 735 736 cmdk_indump = 1; /* Tell disk targets we are panic dumpping */ 737 738 bp = &local; 739 bzero(bp, sizeof (*bp)); 740 bp->b_flags = B_BUSY; 741 bp->b_un.b_addr = addr; 742 bp->b_bcount = nblk << SCTRSHFT; 743 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + blkno))); 744 745 (void) dadk_dump(DKTP_DATA, bp); 746 return (bp->b_error); 747 } 748 749 /* 750 * Copy in the dadkio_rwcmd according to the user's data model. If needed, 751 * convert it for our internal use. 752 */ 753 static int 754 rwcmd_copyin(struct dadkio_rwcmd *rwcmdp, caddr_t inaddr, int flag) 755 { 756 switch (ddi_model_convert_from(flag)) { 757 case DDI_MODEL_ILP32: { 758 struct dadkio_rwcmd32 cmd32; 759 760 if (ddi_copyin(inaddr, &cmd32, 761 sizeof (struct dadkio_rwcmd32), flag)) { 762 return (EFAULT); 763 } 764 765 rwcmdp->cmd = cmd32.cmd; 766 rwcmdp->flags = cmd32.flags; 767 rwcmdp->blkaddr = (daddr_t)cmd32.blkaddr; 768 rwcmdp->buflen = cmd32.buflen; 769 rwcmdp->bufaddr = (caddr_t)(intptr_t)cmd32.bufaddr; 770 /* 771 * Note: we do not convert the 'status' field, 772 * as it should not contain valid data at this 773 * point. 774 */ 775 bzero(&rwcmdp->status, sizeof (rwcmdp->status)); 776 break; 777 } 778 case DDI_MODEL_NONE: { 779 if (ddi_copyin(inaddr, rwcmdp, 780 sizeof (struct dadkio_rwcmd), flag)) { 781 return (EFAULT); 782 } 783 } 784 } 785 return (0); 786 } 787 788 /* 789 * If necessary, convert the internal rwcmdp and status to the appropriate 790 * data model and copy it out to the user. 791 */ 792 static int 793 rwcmd_copyout(struct dadkio_rwcmd *rwcmdp, caddr_t outaddr, int flag) 794 { 795 switch (ddi_model_convert_from(flag)) { 796 case DDI_MODEL_ILP32: { 797 struct dadkio_rwcmd32 cmd32; 798 799 cmd32.cmd = rwcmdp->cmd; 800 cmd32.flags = rwcmdp->flags; 801 cmd32.blkaddr = rwcmdp->blkaddr; 802 cmd32.buflen = rwcmdp->buflen; 803 ASSERT64(((uintptr_t)rwcmdp->bufaddr >> 32) == 0); 804 cmd32.bufaddr = (caddr32_t)(uintptr_t)rwcmdp->bufaddr; 805 806 cmd32.status.status = rwcmdp->status.status; 807 cmd32.status.resid = rwcmdp->status.resid; 808 cmd32.status.failed_blk_is_valid = 809 rwcmdp->status.failed_blk_is_valid; 810 cmd32.status.failed_blk = rwcmdp->status.failed_blk; 811 cmd32.status.fru_code_is_valid = 812 rwcmdp->status.fru_code_is_valid; 813 cmd32.status.fru_code = rwcmdp->status.fru_code; 814 815 bcopy(rwcmdp->status.add_error_info, 816 cmd32.status.add_error_info, DADKIO_ERROR_INFO_LEN); 817 818 if (ddi_copyout(&cmd32, outaddr, 819 sizeof (struct dadkio_rwcmd32), flag)) 820 return (EFAULT); 821 break; 822 } 823 case DDI_MODEL_NONE: { 824 if (ddi_copyout(rwcmdp, outaddr, 825 sizeof (struct dadkio_rwcmd), flag)) 826 return (EFAULT); 827 } 828 } 829 return (0); 830 } 831 832 /* 833 * ioctl routine 834 */ 835 static int 836 cmdkioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *credp, int *rvalp) 837 { 838 int instance; 839 struct scsi_device *devp; 840 struct cmdk *dkp; 841 char data[NBPSCTR]; 842 843 instance = CMDKUNIT(dev); 844 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 845 return (ENXIO); 846 847 mutex_enter(&dkp->dk_mutex); 848 while (dkp->dk_flag & CMDK_SUSPEND) { 849 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 850 } 851 mutex_exit(&dkp->dk_mutex); 852 853 bzero(data, sizeof (data)); 854 855 switch (cmd) { 856 857 case DKIOCGMEDIAINFO: { 858 struct dk_minfo media_info; 859 struct tgdk_geom phyg; 860 861 /* dadk_getphygeom always returns success */ 862 (void) dadk_getphygeom(DKTP_DATA, &phyg); 863 864 media_info.dki_lbsize = phyg.g_secsiz; 865 media_info.dki_capacity = phyg.g_cap; 866 media_info.dki_media_type = DK_FIXED_DISK; 867 868 if (ddi_copyout(&media_info, (void *)arg, 869 sizeof (struct dk_minfo), flag)) { 870 return (EFAULT); 871 } else { 872 return (0); 873 } 874 } 875 876 case DKIOCINFO: { 877 struct dk_cinfo *info = (struct dk_cinfo *)data; 878 879 /* controller information */ 880 info->dki_ctype = (DKTP_EXT->tg_ctype); 881 info->dki_cnum = ddi_get_instance(ddi_get_parent(dkp->dk_dip)); 882 (void) strcpy(info->dki_cname, 883 ddi_get_name(ddi_get_parent(dkp->dk_dip))); 884 885 /* Unit Information */ 886 info->dki_unit = ddi_get_instance(dkp->dk_dip); 887 devp = ddi_get_driver_private(dkp->dk_dip); 888 info->dki_slave = (CMDEV_TARG(devp)<<3) | CMDEV_LUN(devp); 889 (void) strcpy(info->dki_dname, ddi_driver_name(dkp->dk_dip)); 890 info->dki_flags = DKI_FMTVOL; 891 info->dki_partition = CMDKPART(dev); 892 893 info->dki_maxtransfer = maxphys / DEV_BSIZE; 894 info->dki_addr = 1; 895 info->dki_space = 0; 896 info->dki_prio = 0; 897 info->dki_vec = 0; 898 899 if (ddi_copyout(data, (void *)arg, sizeof (*info), flag)) 900 return (EFAULT); 901 else 902 return (0); 903 } 904 905 case DKIOCSTATE: { 906 int state; 907 int rval; 908 diskaddr_t p_lblksrt; 909 diskaddr_t p_lblkcnt; 910 911 if (ddi_copyin((void *)arg, &state, sizeof (int), flag)) 912 return (EFAULT); 913 914 /* dadk_check_media blocks until state changes */ 915 if (rval = dadk_check_media(DKTP_DATA, &state)) 916 return (rval); 917 918 if (state == DKIO_INSERTED) { 919 920 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0) 921 return (ENXIO); 922 923 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(dev), 924 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0)) 925 return (ENXIO); 926 927 if (p_lblkcnt <= 0) 928 return (ENXIO); 929 } 930 931 if (ddi_copyout(&state, (caddr_t)arg, sizeof (int), flag)) 932 return (EFAULT); 933 934 return (0); 935 } 936 937 /* 938 * is media removable? 939 */ 940 case DKIOCREMOVABLE: { 941 int i; 942 943 i = (DKTP_EXT->tg_rmb) ? 1 : 0; 944 945 if (ddi_copyout(&i, (caddr_t)arg, sizeof (int), flag)) 946 return (EFAULT); 947 948 return (0); 949 } 950 951 case DKIOCADDBAD: 952 /* 953 * This is not an update mechanism to add bad blocks 954 * to the bad block structures stored on disk. 955 * 956 * addbadsec(1M) will update the bad block data on disk 957 * and use this ioctl to force the driver to re-initialize 958 * the list of bad blocks in the driver. 959 */ 960 961 /* start BBH */ 962 cmdk_bbh_reopen(dkp); 963 return (0); 964 965 case DKIOCG_PHYGEOM: 966 case DKIOCG_VIRTGEOM: 967 case DKIOCGGEOM: 968 case DKIOCSGEOM: 969 case DKIOCGAPART: 970 case DKIOCSAPART: 971 case DKIOCGVTOC: 972 case DKIOCSVTOC: 973 case DKIOCPARTINFO: 974 case DKIOCGMBOOT: 975 case DKIOCSMBOOT: 976 case DKIOCGETEFI: 977 case DKIOCSETEFI: 978 case DKIOCPARTITION: 979 { 980 int rc; 981 982 rc = cmlb_ioctl(dkp->dk_cmlbhandle, dev, cmd, arg, flag, 983 credp, rvalp, 0); 984 if (cmd == DKIOCSVTOC) 985 cmdk_devid_setup(dkp); 986 return (rc); 987 } 988 989 case DIOCTL_RWCMD: { 990 struct dadkio_rwcmd *rwcmdp; 991 int status; 992 993 rwcmdp = kmem_alloc(sizeof (struct dadkio_rwcmd), KM_SLEEP); 994 995 status = rwcmd_copyin(rwcmdp, (caddr_t)arg, flag); 996 997 if (status == 0) { 998 bzero(&(rwcmdp->status), sizeof (struct dadkio_status)); 999 status = dadk_ioctl(DKTP_DATA, 1000 dev, 1001 cmd, 1002 (uintptr_t)rwcmdp, 1003 flag, 1004 credp, 1005 rvalp); 1006 } 1007 if (status == 0) 1008 status = rwcmd_copyout(rwcmdp, (caddr_t)arg, flag); 1009 1010 kmem_free(rwcmdp, sizeof (struct dadkio_rwcmd)); 1011 return (status); 1012 } 1013 1014 default: 1015 return (dadk_ioctl(DKTP_DATA, 1016 dev, 1017 cmd, 1018 arg, 1019 flag, 1020 credp, 1021 rvalp)); 1022 } 1023 } 1024 1025 /*ARGSUSED1*/ 1026 static int 1027 cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp) 1028 { 1029 int part; 1030 ulong_t partbit; 1031 int instance; 1032 struct cmdk *dkp; 1033 int lastclose = 1; 1034 int i; 1035 1036 instance = CMDKUNIT(dev); 1037 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || 1038 (otyp >= OTYPCNT)) 1039 return (ENXIO); 1040 1041 mutex_enter(&dkp->dk_mutex); 1042 1043 /* check if device has been opened */ 1044 ASSERT(cmdk_isopen(dkp, dev)); 1045 if (!(dkp->dk_flag & CMDK_OPEN)) { 1046 mutex_exit(&dkp->dk_mutex); 1047 return (ENXIO); 1048 } 1049 1050 while (dkp->dk_flag & CMDK_SUSPEND) { 1051 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1052 } 1053 1054 part = CMDKPART(dev); 1055 partbit = 1 << part; 1056 1057 /* account for close */ 1058 if (otyp == OTYP_LYR) { 1059 ASSERT(dkp->dk_open_lyr[part] > 0); 1060 if (dkp->dk_open_lyr[part]) 1061 dkp->dk_open_lyr[part]--; 1062 } else { 1063 ASSERT((dkp->dk_open_reg[otyp] & partbit) != 0); 1064 dkp->dk_open_reg[otyp] &= ~partbit; 1065 } 1066 dkp->dk_open_exl &= ~partbit; 1067 1068 for (i = 0; i < CMDK_MAXPART; i++) 1069 if (dkp->dk_open_lyr[i] != 0) { 1070 lastclose = 0; 1071 break; 1072 } 1073 1074 if (lastclose) 1075 for (i = 0; i < OTYPCNT; i++) 1076 if (dkp->dk_open_reg[i] != 0) { 1077 lastclose = 0; 1078 break; 1079 } 1080 1081 mutex_exit(&dkp->dk_mutex); 1082 1083 if (lastclose) 1084 cmlb_invalidate(dkp->dk_cmlbhandle, 0); 1085 1086 return (DDI_SUCCESS); 1087 } 1088 1089 /*ARGSUSED3*/ 1090 static int 1091 cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp) 1092 { 1093 dev_t dev = *dev_p; 1094 int part; 1095 ulong_t partbit; 1096 int instance; 1097 struct cmdk *dkp; 1098 diskaddr_t p_lblksrt; 1099 diskaddr_t p_lblkcnt; 1100 int i; 1101 int nodelay; 1102 1103 instance = CMDKUNIT(dev); 1104 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1105 return (ENXIO); 1106 1107 if (otyp >= OTYPCNT) 1108 return (EINVAL); 1109 1110 mutex_enter(&dkp->dk_mutex); 1111 while (dkp->dk_flag & CMDK_SUSPEND) { 1112 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1113 } 1114 mutex_exit(&dkp->dk_mutex); 1115 1116 part = CMDKPART(dev); 1117 partbit = 1 << part; 1118 nodelay = (flag & (FNDELAY | FNONBLOCK)); 1119 1120 mutex_enter(&dkp->dk_mutex); 1121 1122 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0) { 1123 1124 /* fail if not doing non block open */ 1125 if (!nodelay) { 1126 mutex_exit(&dkp->dk_mutex); 1127 return (ENXIO); 1128 } 1129 } else if (cmlb_partinfo(dkp->dk_cmlbhandle, part, &p_lblkcnt, 1130 &p_lblksrt, NULL, NULL, 0) == 0) { 1131 1132 if (p_lblkcnt <= 0 && (!nodelay || otyp != OTYP_CHR)) { 1133 mutex_exit(&dkp->dk_mutex); 1134 return (ENXIO); 1135 } 1136 } else { 1137 /* fail if not doing non block open */ 1138 if (!nodelay) { 1139 mutex_exit(&dkp->dk_mutex); 1140 return (ENXIO); 1141 } 1142 } 1143 1144 if ((DKTP_EXT->tg_rdonly) && (flag & FWRITE)) { 1145 mutex_exit(&dkp->dk_mutex); 1146 return (EROFS); 1147 } 1148 1149 /* check for part already opend exclusively */ 1150 if (dkp->dk_open_exl & partbit) 1151 goto excl_open_fail; 1152 1153 /* check if we can establish exclusive open */ 1154 if (flag & FEXCL) { 1155 if (dkp->dk_open_lyr[part]) 1156 goto excl_open_fail; 1157 for (i = 0; i < OTYPCNT; i++) { 1158 if (dkp->dk_open_reg[i] & partbit) 1159 goto excl_open_fail; 1160 } 1161 } 1162 1163 /* open will succeed, account for open */ 1164 dkp->dk_flag |= CMDK_OPEN; 1165 if (otyp == OTYP_LYR) 1166 dkp->dk_open_lyr[part]++; 1167 else 1168 dkp->dk_open_reg[otyp] |= partbit; 1169 if (flag & FEXCL) 1170 dkp->dk_open_exl |= partbit; 1171 1172 mutex_exit(&dkp->dk_mutex); 1173 return (DDI_SUCCESS); 1174 1175 excl_open_fail: 1176 mutex_exit(&dkp->dk_mutex); 1177 return (EBUSY); 1178 } 1179 1180 /* 1181 * read routine 1182 */ 1183 /*ARGSUSED2*/ 1184 static int 1185 cmdkread(dev_t dev, struct uio *uio, cred_t *credp) 1186 { 1187 return (cmdkrw(dev, uio, B_READ)); 1188 } 1189 1190 /* 1191 * async read routine 1192 */ 1193 /*ARGSUSED2*/ 1194 static int 1195 cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp) 1196 { 1197 return (cmdkarw(dev, aio, B_READ)); 1198 } 1199 1200 /* 1201 * write routine 1202 */ 1203 /*ARGSUSED2*/ 1204 static int 1205 cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp) 1206 { 1207 return (cmdkrw(dev, uio, B_WRITE)); 1208 } 1209 1210 /* 1211 * async write routine 1212 */ 1213 /*ARGSUSED2*/ 1214 static int 1215 cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp) 1216 { 1217 return (cmdkarw(dev, aio, B_WRITE)); 1218 } 1219 1220 static void 1221 cmdkmin(struct buf *bp) 1222 { 1223 if (bp->b_bcount > DK_MAXRECSIZE) 1224 bp->b_bcount = DK_MAXRECSIZE; 1225 } 1226 1227 static int 1228 cmdkrw(dev_t dev, struct uio *uio, int flag) 1229 { 1230 int instance; 1231 struct cmdk *dkp; 1232 1233 instance = CMDKUNIT(dev); 1234 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1235 return (ENXIO); 1236 1237 mutex_enter(&dkp->dk_mutex); 1238 while (dkp->dk_flag & CMDK_SUSPEND) { 1239 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1240 } 1241 mutex_exit(&dkp->dk_mutex); 1242 1243 return (physio(cmdkstrategy, (struct buf *)0, dev, flag, cmdkmin, uio)); 1244 } 1245 1246 static int 1247 cmdkarw(dev_t dev, struct aio_req *aio, int flag) 1248 { 1249 int instance; 1250 struct cmdk *dkp; 1251 1252 instance = CMDKUNIT(dev); 1253 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1254 return (ENXIO); 1255 1256 mutex_enter(&dkp->dk_mutex); 1257 while (dkp->dk_flag & CMDK_SUSPEND) { 1258 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1259 } 1260 mutex_exit(&dkp->dk_mutex); 1261 1262 return (aphysio(cmdkstrategy, anocancel, dev, flag, cmdkmin, aio)); 1263 } 1264 1265 /* 1266 * strategy routine 1267 */ 1268 static int 1269 cmdkstrategy(struct buf *bp) 1270 { 1271 int instance; 1272 struct cmdk *dkp; 1273 long d_cnt; 1274 diskaddr_t p_lblksrt; 1275 diskaddr_t p_lblkcnt; 1276 1277 instance = CMDKUNIT(bp->b_edev); 1278 if (cmdk_indump || !(dkp = ddi_get_soft_state(cmdk_state, instance)) || 1279 (dkblock(bp) < 0)) { 1280 bp->b_resid = bp->b_bcount; 1281 SETBPERR(bp, ENXIO); 1282 biodone(bp); 1283 return (0); 1284 } 1285 1286 mutex_enter(&dkp->dk_mutex); 1287 ASSERT(cmdk_isopen(dkp, bp->b_edev)); 1288 while (dkp->dk_flag & CMDK_SUSPEND) { 1289 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1290 } 1291 mutex_exit(&dkp->dk_mutex); 1292 1293 bp->b_flags &= ~(B_DONE|B_ERROR); 1294 bp->b_resid = 0; 1295 bp->av_back = NULL; 1296 1297 /* 1298 * only re-read the vtoc if necessary (force == FALSE) 1299 */ 1300 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(bp->b_edev), 1301 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0)) { 1302 SETBPERR(bp, ENXIO); 1303 } 1304 1305 if ((bp->b_bcount & (NBPSCTR-1)) || (dkblock(bp) > p_lblkcnt)) 1306 SETBPERR(bp, ENXIO); 1307 1308 if ((bp->b_flags & B_ERROR) || (dkblock(bp) == p_lblkcnt)) { 1309 bp->b_resid = bp->b_bcount; 1310 biodone(bp); 1311 return (0); 1312 } 1313 1314 d_cnt = bp->b_bcount >> SCTRSHFT; 1315 if ((dkblock(bp) + d_cnt) > p_lblkcnt) { 1316 bp->b_resid = ((dkblock(bp) + d_cnt) - p_lblkcnt) << SCTRSHFT; 1317 bp->b_bcount -= bp->b_resid; 1318 } 1319 1320 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + dkblock(bp)))); 1321 if (dadk_strategy(DKTP_DATA, bp) != DDI_SUCCESS) { 1322 bp->b_resid += bp->b_bcount; 1323 biodone(bp); 1324 } 1325 return (0); 1326 } 1327 1328 static int 1329 cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp) 1330 { 1331 struct scsi_device *devp; 1332 opaque_t queobjp = NULL; 1333 opaque_t flcobjp = NULL; 1334 char que_keyvalp[64]; 1335 int que_keylen; 1336 char flc_keyvalp[64]; 1337 int flc_keylen; 1338 1339 ASSERT(mutex_owned(&dkp->dk_mutex)); 1340 1341 /* Create linkage to queueing routines based on property */ 1342 que_keylen = sizeof (que_keyvalp); 1343 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1344 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) != 1345 DDI_PROP_SUCCESS) { 1346 cmn_err(CE_WARN, "cmdk_create_obj: queue property undefined"); 1347 return (DDI_FAILURE); 1348 } 1349 que_keyvalp[que_keylen] = (char)0; 1350 1351 if (strcmp(que_keyvalp, "qfifo") == 0) { 1352 queobjp = (opaque_t)qfifo_create(); 1353 } else if (strcmp(que_keyvalp, "qsort") == 0) { 1354 queobjp = (opaque_t)qsort_create(); 1355 } else { 1356 return (DDI_FAILURE); 1357 } 1358 1359 /* Create linkage to dequeueing routines based on property */ 1360 flc_keylen = sizeof (flc_keyvalp); 1361 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1362 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) != 1363 DDI_PROP_SUCCESS) { 1364 cmn_err(CE_WARN, 1365 "cmdk_create_obj: flow-control property undefined"); 1366 return (DDI_FAILURE); 1367 } 1368 1369 flc_keyvalp[flc_keylen] = (char)0; 1370 1371 if (strcmp(flc_keyvalp, "dsngl") == 0) { 1372 flcobjp = (opaque_t)dsngl_create(); 1373 } else if (strcmp(flc_keyvalp, "dmult") == 0) { 1374 flcobjp = (opaque_t)dmult_create(); 1375 } else { 1376 return (DDI_FAILURE); 1377 } 1378 1379 /* populate bbh_obj object stored in dkp */ 1380 dkp->dk_bbh_obj.bbh_data = dkp; 1381 dkp->dk_bbh_obj.bbh_ops = &cmdk_bbh_ops; 1382 1383 /* create linkage to dadk */ 1384 dkp->dk_tgobjp = (opaque_t)dadk_create(); 1385 1386 devp = ddi_get_driver_private(dip); 1387 (void) dadk_init(DKTP_DATA, devp, flcobjp, queobjp, &dkp->dk_bbh_obj, 1388 NULL); 1389 1390 return (DDI_SUCCESS); 1391 } 1392 1393 static void 1394 cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp) 1395 { 1396 char que_keyvalp[64]; 1397 int que_keylen; 1398 char flc_keyvalp[64]; 1399 int flc_keylen; 1400 1401 ASSERT(mutex_owned(&dkp->dk_mutex)); 1402 1403 (void) dadk_free((dkp->dk_tgobjp)); 1404 dkp->dk_tgobjp = NULL; 1405 1406 que_keylen = sizeof (que_keyvalp); 1407 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1408 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) != 1409 DDI_PROP_SUCCESS) { 1410 cmn_err(CE_WARN, "cmdk_destroy_obj: queue property undefined"); 1411 return; 1412 } 1413 que_keyvalp[que_keylen] = (char)0; 1414 1415 flc_keylen = sizeof (flc_keyvalp); 1416 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1417 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) != 1418 DDI_PROP_SUCCESS) { 1419 cmn_err(CE_WARN, 1420 "cmdk_destroy_obj: flow-control property undefined"); 1421 return; 1422 } 1423 flc_keyvalp[flc_keylen] = (char)0; 1424 } 1425 /*ARGSUSED5*/ 1426 static int 1427 cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr, 1428 diskaddr_t start, size_t count, void *tg_cookie) 1429 { 1430 struct cmdk *dkp; 1431 opaque_t handle; 1432 int rc = 0; 1433 char *bufa; 1434 1435 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 1436 if (dkp == NULL) 1437 return (ENXIO); 1438 1439 if (cmd != TG_READ && cmd != TG_WRITE) 1440 return (EINVAL); 1441 1442 /* count must be multiple of 512 */ 1443 count = (count + NBPSCTR - 1) & -NBPSCTR; 1444 handle = dadk_iob_alloc(DKTP_DATA, start, count, KM_SLEEP); 1445 if (!handle) 1446 return (ENOMEM); 1447 1448 if (cmd == TG_READ) { 1449 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1450 if (!bufa) 1451 rc = EIO; 1452 else 1453 bcopy(bufa, bufaddr, count); 1454 } else { 1455 bufa = dadk_iob_htoc(DKTP_DATA, handle); 1456 bcopy(bufaddr, bufa, count); 1457 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_WRITE); 1458 if (!bufa) 1459 rc = EIO; 1460 } 1461 (void) dadk_iob_free(DKTP_DATA, handle); 1462 1463 return (rc); 1464 } 1465 1466 /*ARGSUSED3*/ 1467 static int 1468 cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg, void *tg_cookie) 1469 { 1470 1471 struct cmdk *dkp; 1472 struct tgdk_geom phyg; 1473 1474 1475 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 1476 if (dkp == NULL) 1477 return (ENXIO); 1478 1479 switch (cmd) { 1480 case TG_GETPHYGEOM: { 1481 cmlb_geom_t *phygeomp = (cmlb_geom_t *)arg; 1482 1483 /* dadk_getphygeom always returns success */ 1484 (void) dadk_getphygeom(DKTP_DATA, &phyg); 1485 1486 phygeomp->g_capacity = phyg.g_cap; 1487 phygeomp->g_nsect = phyg.g_sec; 1488 phygeomp->g_nhead = phyg.g_head; 1489 phygeomp->g_acyl = phyg.g_acyl; 1490 phygeomp->g_ncyl = phyg.g_cyl; 1491 phygeomp->g_secsize = phyg.g_secsiz; 1492 phygeomp->g_intrlv = 1; 1493 phygeomp->g_rpm = 3600; 1494 1495 return (0); 1496 } 1497 1498 case TG_GETVIRTGEOM: { 1499 cmlb_geom_t *virtgeomp = (cmlb_geom_t *)arg; 1500 diskaddr_t capacity; 1501 1502 (void) dadk_getgeom(DKTP_DATA, &phyg); 1503 capacity = phyg.g_cap; 1504 1505 /* 1506 * If the controller returned us something that doesn't 1507 * really fit into an Int 13/function 8 geometry 1508 * result, just fail the ioctl. See PSARC 1998/313. 1509 */ 1510 if (capacity < 0 || capacity >= 63 * 254 * 1024) 1511 return (EINVAL); 1512 1513 virtgeomp->g_capacity = capacity; 1514 virtgeomp->g_nsect = 63; 1515 virtgeomp->g_nhead = 254; 1516 virtgeomp->g_ncyl = capacity / (63 * 254); 1517 virtgeomp->g_acyl = 0; 1518 virtgeomp->g_secsize = 512; 1519 virtgeomp->g_intrlv = 1; 1520 virtgeomp->g_rpm = 3600; 1521 1522 return (0); 1523 } 1524 1525 case TG_GETCAPACITY: 1526 case TG_GETBLOCKSIZE: 1527 { 1528 1529 /* dadk_getphygeom always returns success */ 1530 (void) dadk_getphygeom(DKTP_DATA, &phyg); 1531 if (cmd == TG_GETCAPACITY) 1532 *(diskaddr_t *)arg = phyg.g_cap; 1533 else 1534 *(uint32_t *)arg = (uint32_t)phyg.g_secsiz; 1535 1536 return (0); 1537 } 1538 1539 case TG_GETATTR: { 1540 tg_attribute_t *tgattribute = (tg_attribute_t *)arg; 1541 if ((DKTP_EXT->tg_rdonly)) 1542 tgattribute->media_is_writable = FALSE; 1543 else 1544 tgattribute->media_is_writable = TRUE; 1545 1546 return (0); 1547 } 1548 1549 default: 1550 return (ENOTTY); 1551 } 1552 } 1553 1554 1555 1556 1557 1558 /* 1559 * Create and register the devid. 1560 * There are 4 different ways we can get a device id: 1561 * 1. Already have one - nothing to do 1562 * 2. Build one from the drive's model and serial numbers 1563 * 3. Read one from the disk (first sector of last track) 1564 * 4. Fabricate one and write it on the disk. 1565 * If any of these succeeds, register the deviceid 1566 */ 1567 static void 1568 cmdk_devid_setup(struct cmdk *dkp) 1569 { 1570 int rc; 1571 1572 /* Try options until one succeeds, or all have failed */ 1573 1574 /* 1. All done if already registered */ 1575 if (dkp->dk_devid != NULL) 1576 return; 1577 1578 /* 2. Build a devid from the model and serial number */ 1579 rc = cmdk_devid_modser(dkp); 1580 if (rc != DDI_SUCCESS) { 1581 /* 3. Read devid from the disk, if present */ 1582 rc = cmdk_devid_read(dkp); 1583 1584 /* 4. otherwise make one up and write it on the disk */ 1585 if (rc != DDI_SUCCESS) 1586 rc = cmdk_devid_fabricate(dkp); 1587 } 1588 1589 /* If we managed to get a devid any of the above ways, register it */ 1590 if (rc == DDI_SUCCESS) 1591 (void) ddi_devid_register(dkp->dk_dip, dkp->dk_devid); 1592 1593 } 1594 1595 /* 1596 * Build a devid from the model and serial number 1597 * Return DDI_SUCCESS or DDI_FAILURE. 1598 */ 1599 static int 1600 cmdk_devid_modser(struct cmdk *dkp) 1601 { 1602 int rc = DDI_FAILURE; 1603 char *hwid; 1604 int modlen; 1605 int serlen; 1606 1607 /* 1608 * device ID is a concatenation of model number, '=', serial number. 1609 */ 1610 hwid = kmem_alloc(CMDK_HWIDLEN, KM_SLEEP); 1611 modlen = cmdk_get_modser(dkp, DIOCTL_GETMODEL, hwid, CMDK_HWIDLEN); 1612 if (modlen == 0) { 1613 rc = DDI_FAILURE; 1614 goto err; 1615 } 1616 hwid[modlen++] = '='; 1617 serlen = cmdk_get_modser(dkp, DIOCTL_GETSERIAL, 1618 hwid + modlen, CMDK_HWIDLEN - modlen); 1619 if (serlen == 0) { 1620 rc = DDI_FAILURE; 1621 goto err; 1622 } 1623 hwid[modlen + serlen] = 0; 1624 1625 /* Initialize the device ID, trailing NULL not included */ 1626 rc = ddi_devid_init(dkp->dk_dip, DEVID_ATA_SERIAL, modlen + serlen, 1627 hwid, (ddi_devid_t *)&dkp->dk_devid); 1628 if (rc != DDI_SUCCESS) { 1629 rc = DDI_FAILURE; 1630 goto err; 1631 } 1632 1633 rc = DDI_SUCCESS; 1634 1635 err: 1636 kmem_free(hwid, CMDK_HWIDLEN); 1637 return (rc); 1638 } 1639 1640 static int 1641 cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len) 1642 { 1643 dadk_ioc_string_t strarg; 1644 int rval; 1645 char *s; 1646 char ch; 1647 boolean_t ret; 1648 int i; 1649 int tb; 1650 1651 strarg.is_buf = buf; 1652 strarg.is_size = len; 1653 if (dadk_ioctl(DKTP_DATA, 1654 dkp->dk_dev, 1655 ioccmd, 1656 (uintptr_t)&strarg, 1657 FNATIVE | FKIOCTL, 1658 NULL, 1659 &rval) != 0) 1660 return (0); 1661 1662 /* 1663 * valid model/serial string must contain a non-zero non-space 1664 * trim trailing spaces/NULL 1665 */ 1666 ret = B_FALSE; 1667 s = buf; 1668 for (i = 0; i < strarg.is_size; i++) { 1669 ch = *s++; 1670 if (ch != ' ' && ch != '\0') 1671 tb = i + 1; 1672 if (ch != ' ' && ch != '\0' && ch != '0') 1673 ret = B_TRUE; 1674 } 1675 1676 if (ret == B_FALSE) 1677 return (0); 1678 1679 return (tb); 1680 } 1681 1682 /* 1683 * Read a devid from on the first block of the last track of 1684 * the last cylinder. Make sure what we read is a valid devid. 1685 * Return DDI_SUCCESS or DDI_FAILURE. 1686 */ 1687 static int 1688 cmdk_devid_read(struct cmdk *dkp) 1689 { 1690 diskaddr_t blk; 1691 struct dk_devid *dkdevidp; 1692 uint_t *ip; 1693 int chksum; 1694 int i, sz; 1695 tgdk_iob_handle handle; 1696 int rc = DDI_FAILURE; 1697 1698 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0)) 1699 goto err; 1700 1701 /* read the devid */ 1702 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP); 1703 if (handle == NULL) 1704 goto err; 1705 1706 dkdevidp = (struct dk_devid *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1707 if (dkdevidp == NULL) 1708 goto err; 1709 1710 /* Validate the revision */ 1711 if ((dkdevidp->dkd_rev_hi != DK_DEVID_REV_MSB) || 1712 (dkdevidp->dkd_rev_lo != DK_DEVID_REV_LSB)) 1713 goto err; 1714 1715 /* Calculate the checksum */ 1716 chksum = 0; 1717 ip = (uint_t *)dkdevidp; 1718 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++) 1719 chksum ^= ip[i]; 1720 if (DKD_GETCHKSUM(dkdevidp) != chksum) 1721 goto err; 1722 1723 /* Validate the device id */ 1724 if (ddi_devid_valid((ddi_devid_t)dkdevidp->dkd_devid) != DDI_SUCCESS) 1725 goto err; 1726 1727 /* keep a copy of the device id */ 1728 sz = ddi_devid_sizeof((ddi_devid_t)dkdevidp->dkd_devid); 1729 dkp->dk_devid = kmem_alloc(sz, KM_SLEEP); 1730 bcopy(dkdevidp->dkd_devid, dkp->dk_devid, sz); 1731 1732 rc = DDI_SUCCESS; 1733 1734 err: 1735 if (handle != NULL) 1736 (void) dadk_iob_free(DKTP_DATA, handle); 1737 return (rc); 1738 } 1739 1740 /* 1741 * Create a devid and write it on the first block of the last track of 1742 * the last cylinder. 1743 * Return DDI_SUCCESS or DDI_FAILURE. 1744 */ 1745 static int 1746 cmdk_devid_fabricate(struct cmdk *dkp) 1747 { 1748 ddi_devid_t devid = NULL; /* devid made by ddi_devid_init */ 1749 struct dk_devid *dkdevidp; /* devid struct stored on disk */ 1750 diskaddr_t blk; 1751 tgdk_iob_handle handle = NULL; 1752 uint_t *ip, chksum; 1753 int i; 1754 int rc; 1755 1756 rc = ddi_devid_init(dkp->dk_dip, DEVID_FAB, 0, NULL, &devid); 1757 if (rc != DDI_SUCCESS) 1758 goto err; 1759 1760 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0)) { 1761 /* no device id block address */ 1762 return (DDI_FAILURE); 1763 } 1764 1765 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP); 1766 if (!handle) 1767 goto err; 1768 1769 /* Locate the buffer */ 1770 dkdevidp = (struct dk_devid *)dadk_iob_htoc(DKTP_DATA, handle); 1771 1772 /* Fill in the revision */ 1773 bzero(dkdevidp, NBPSCTR); 1774 dkdevidp->dkd_rev_hi = DK_DEVID_REV_MSB; 1775 dkdevidp->dkd_rev_lo = DK_DEVID_REV_LSB; 1776 1777 /* Copy in the device id */ 1778 i = ddi_devid_sizeof(devid); 1779 if (i > DK_DEVID_SIZE) 1780 goto err; 1781 bcopy(devid, dkdevidp->dkd_devid, i); 1782 1783 /* Calculate the chksum */ 1784 chksum = 0; 1785 ip = (uint_t *)dkdevidp; 1786 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++) 1787 chksum ^= ip[i]; 1788 1789 /* Fill in the checksum */ 1790 DKD_FORMCHKSUM(chksum, dkdevidp); 1791 1792 /* write the devid */ 1793 (void) dadk_iob_xfer(DKTP_DATA, handle, B_WRITE); 1794 1795 dkp->dk_devid = devid; 1796 1797 rc = DDI_SUCCESS; 1798 1799 err: 1800 if (handle != NULL) 1801 (void) dadk_iob_free(DKTP_DATA, handle); 1802 1803 if (rc != DDI_SUCCESS && devid != NULL) 1804 ddi_devid_free(devid); 1805 1806 return (rc); 1807 } 1808 1809 static void 1810 cmdk_bbh_free_alts(struct cmdk *dkp) 1811 { 1812 if (dkp->dk_alts_hdl) { 1813 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1814 kmem_free(dkp->dk_slc_cnt, 1815 NDKMAP * (sizeof (uint32_t) + sizeof (struct alts_ent *))); 1816 dkp->dk_alts_hdl = NULL; 1817 } 1818 } 1819 1820 static void 1821 cmdk_bbh_reopen(struct cmdk *dkp) 1822 { 1823 tgdk_iob_handle handle = NULL; 1824 diskaddr_t slcb, slcn, slce; 1825 struct alts_parttbl *ap; 1826 struct alts_ent *enttblp; 1827 uint32_t altused; 1828 uint32_t altbase; 1829 uint32_t altlast; 1830 int alts; 1831 uint16_t vtoctag; 1832 int i, j; 1833 1834 /* find slice with V_ALTSCTR tag */ 1835 for (alts = 0; alts < NDKMAP; alts++) { 1836 if (cmlb_partinfo( 1837 dkp->dk_cmlbhandle, 1838 alts, 1839 &slcn, 1840 &slcb, 1841 NULL, 1842 &vtoctag, 1843 0)) { 1844 goto empty; /* no partition table exists */ 1845 } 1846 1847 if (vtoctag == V_ALTSCTR && slcn > 1) 1848 break; 1849 } 1850 if (alts >= NDKMAP) { 1851 goto empty; /* no V_ALTSCTR slice defined */ 1852 } 1853 1854 /* read in ALTS label block */ 1855 handle = dadk_iob_alloc(DKTP_DATA, slcb, NBPSCTR, KM_SLEEP); 1856 if (!handle) { 1857 goto empty; 1858 } 1859 1860 ap = (struct alts_parttbl *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1861 if (!ap || (ap->alts_sanity != ALTS_SANITY)) { 1862 goto empty; 1863 } 1864 1865 altused = ap->alts_ent_used; /* number of BB entries */ 1866 altbase = ap->alts_ent_base; /* blk offset from begin slice */ 1867 altlast = ap->alts_ent_end; /* blk offset to last block */ 1868 /* ((altused * sizeof (struct alts_ent) + NBPSCTR - 1) & ~NBPSCTR) */ 1869 1870 if (altused == 0 || 1871 altbase < 1 || 1872 altbase > altlast || 1873 altlast >= slcn) { 1874 goto empty; 1875 } 1876 (void) dadk_iob_free(DKTP_DATA, handle); 1877 1878 /* read in ALTS remapping table */ 1879 handle = dadk_iob_alloc(DKTP_DATA, 1880 slcb + altbase, 1881 (altlast - altbase + 1) << SCTRSHFT, KM_SLEEP); 1882 if (!handle) { 1883 goto empty; 1884 } 1885 1886 enttblp = (struct alts_ent *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1887 if (!enttblp) { 1888 goto empty; 1889 } 1890 1891 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER); 1892 1893 /* allocate space for dk_slc_cnt and dk_slc_ent tables */ 1894 if (dkp->dk_slc_cnt == NULL) { 1895 dkp->dk_slc_cnt = kmem_alloc(NDKMAP * 1896 (sizeof (long) + sizeof (struct alts_ent *)), KM_SLEEP); 1897 } 1898 dkp->dk_slc_ent = (struct alts_ent **)(dkp->dk_slc_cnt + NDKMAP); 1899 1900 /* free previous BB table (if any) */ 1901 if (dkp->dk_alts_hdl) { 1902 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1903 dkp->dk_alts_hdl = NULL; 1904 dkp->dk_altused = 0; 1905 } 1906 1907 /* save linkage to new BB table */ 1908 dkp->dk_alts_hdl = handle; 1909 dkp->dk_altused = altused; 1910 1911 /* 1912 * build indexes to BB table by slice 1913 * effectively we have 1914 * struct alts_ent *enttblp[altused]; 1915 * 1916 * uint32_t dk_slc_cnt[NDKMAP]; 1917 * struct alts_ent *dk_slc_ent[NDKMAP]; 1918 */ 1919 for (i = 0; i < NDKMAP; i++) { 1920 if (cmlb_partinfo( 1921 dkp->dk_cmlbhandle, 1922 i, 1923 &slcn, 1924 &slcb, 1925 NULL, 1926 NULL, 1927 0)) { 1928 goto empty1; 1929 } 1930 1931 dkp->dk_slc_cnt[i] = 0; 1932 if (slcn == 0) 1933 continue; /* slice is not allocated */ 1934 1935 /* last block in slice */ 1936 slce = slcb + slcn - 1; 1937 1938 /* find first remap entry in after beginnning of slice */ 1939 for (j = 0; j < altused; j++) { 1940 if (enttblp[j].bad_start + enttblp[j].bad_end >= slcb) 1941 break; 1942 } 1943 dkp->dk_slc_ent[i] = enttblp + j; 1944 1945 /* count remap entrys until end of slice */ 1946 for (; j < altused && enttblp[j].bad_start <= slce; j++) { 1947 dkp->dk_slc_cnt[i] += 1; 1948 } 1949 } 1950 1951 rw_exit(&dkp->dk_bbh_mutex); 1952 return; 1953 1954 empty: 1955 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER); 1956 empty1: 1957 if (handle && handle != dkp->dk_alts_hdl) 1958 (void) dadk_iob_free(DKTP_DATA, handle); 1959 1960 if (dkp->dk_alts_hdl) { 1961 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1962 dkp->dk_alts_hdl = NULL; 1963 } 1964 1965 rw_exit(&dkp->dk_bbh_mutex); 1966 } 1967 1968 /*ARGSUSED*/ 1969 static bbh_cookie_t 1970 cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle) 1971 { 1972 struct bbh_handle *hp; 1973 bbh_cookie_t ckp; 1974 1975 hp = (struct bbh_handle *)handle; 1976 ckp = hp->h_cktab + hp->h_idx; 1977 hp->h_idx++; 1978 return (ckp); 1979 } 1980 1981 /*ARGSUSED*/ 1982 static void 1983 cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle) 1984 { 1985 struct bbh_handle *hp; 1986 1987 hp = (struct bbh_handle *)handle; 1988 kmem_free(handle, (sizeof (struct bbh_handle) + 1989 (hp->h_totck * (sizeof (struct bbh_cookie))))); 1990 } 1991 1992 1993 /* 1994 * cmdk_bbh_gethandle remaps the bad sectors to alternates. 1995 * There are 7 different cases when the comparison is made 1996 * between the bad sector cluster and the disk section. 1997 * 1998 * bad sector cluster gggggggggggbbbbbbbggggggggggg 1999 * case 1: ddddd 2000 * case 2: -d----- 2001 * case 3: ddddd 2002 * case 4: dddddddddddd 2003 * case 5: ddddddd----- 2004 * case 6: ---ddddddd 2005 * case 7: ddddddd 2006 * 2007 * where: g = good sector, b = bad sector 2008 * d = sector in disk section 2009 * - = disk section may be extended to cover those disk area 2010 */ 2011 2012 static opaque_t 2013 cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp) 2014 { 2015 struct cmdk *dkp = (struct cmdk *)bbh_data; 2016 struct bbh_handle *hp; 2017 struct bbh_cookie *ckp; 2018 struct alts_ent *altp; 2019 uint32_t alts_used; 2020 uint32_t part = CMDKPART(bp->b_edev); 2021 daddr32_t lastsec; 2022 long d_count; 2023 int i; 2024 int idx; 2025 int cnt; 2026 2027 if (part >= V_NUMPAR) 2028 return (NULL); 2029 2030 /* 2031 * This if statement is atomic and it will succeed 2032 * if there are no bad blocks (almost always) 2033 * 2034 * so this if is performed outside of the rw_enter for speed 2035 * and then repeated inside the rw_enter for safety 2036 */ 2037 if (!dkp->dk_alts_hdl) { 2038 return (NULL); 2039 } 2040 2041 rw_enter(&dkp->dk_bbh_mutex, RW_READER); 2042 2043 if (dkp->dk_alts_hdl == NULL) { 2044 rw_exit(&dkp->dk_bbh_mutex); 2045 return (NULL); 2046 } 2047 2048 alts_used = dkp->dk_slc_cnt[part]; 2049 if (alts_used == 0) { 2050 rw_exit(&dkp->dk_bbh_mutex); 2051 return (NULL); 2052 } 2053 altp = dkp->dk_slc_ent[part]; 2054 2055 /* 2056 * binary search for the largest bad sector index in the alternate 2057 * entry table which overlaps or larger than the starting d_sec 2058 */ 2059 i = cmdk_bbh_bsearch(altp, alts_used, GET_BP_SEC(bp)); 2060 /* if starting sector is > the largest bad sector, return */ 2061 if (i == -1) { 2062 rw_exit(&dkp->dk_bbh_mutex); 2063 return (NULL); 2064 } 2065 /* i is the starting index. Set altp to the starting entry addr */ 2066 altp += i; 2067 2068 d_count = bp->b_bcount >> SCTRSHFT; 2069 lastsec = GET_BP_SEC(bp) + d_count - 1; 2070 2071 /* calculate the number of bad sectors */ 2072 for (idx = i, cnt = 0; idx < alts_used; idx++, altp++, cnt++) { 2073 if (lastsec < altp->bad_start) 2074 break; 2075 } 2076 2077 if (!cnt) { 2078 rw_exit(&dkp->dk_bbh_mutex); 2079 return (NULL); 2080 } 2081 2082 /* calculate the maximum number of reserved cookies */ 2083 cnt <<= 1; 2084 cnt++; 2085 2086 /* allocate the handle */ 2087 hp = (struct bbh_handle *)kmem_zalloc((sizeof (*hp) + 2088 (cnt * sizeof (*ckp))), KM_SLEEP); 2089 2090 hp->h_idx = 0; 2091 hp->h_totck = cnt; 2092 ckp = hp->h_cktab = (struct bbh_cookie *)(hp + 1); 2093 ckp[0].ck_sector = GET_BP_SEC(bp); 2094 ckp[0].ck_seclen = d_count; 2095 2096 altp = dkp->dk_slc_ent[part]; 2097 altp += i; 2098 for (idx = 0; i < alts_used; i++, altp++) { 2099 /* CASE 1: */ 2100 if (lastsec < altp->bad_start) 2101 break; 2102 2103 /* CASE 3: */ 2104 if (ckp[idx].ck_sector > altp->bad_end) 2105 continue; 2106 2107 /* CASE 2 and 7: */ 2108 if ((ckp[idx].ck_sector >= altp->bad_start) && 2109 (lastsec <= altp->bad_end)) { 2110 ckp[idx].ck_sector = altp->good_start + 2111 ckp[idx].ck_sector - altp->bad_start; 2112 break; 2113 } 2114 2115 /* at least one bad sector in our section. break it. */ 2116 /* CASE 5: */ 2117 if ((lastsec >= altp->bad_start) && 2118 (lastsec <= altp->bad_end)) { 2119 ckp[idx+1].ck_seclen = lastsec - altp->bad_start + 1; 2120 ckp[idx].ck_seclen -= ckp[idx+1].ck_seclen; 2121 ckp[idx+1].ck_sector = altp->good_start; 2122 break; 2123 } 2124 /* CASE 6: */ 2125 if ((ckp[idx].ck_sector <= altp->bad_end) && 2126 (ckp[idx].ck_sector >= altp->bad_start)) { 2127 ckp[idx+1].ck_seclen = ckp[idx].ck_seclen; 2128 ckp[idx].ck_seclen = altp->bad_end - 2129 ckp[idx].ck_sector + 1; 2130 ckp[idx+1].ck_seclen -= ckp[idx].ck_seclen; 2131 ckp[idx].ck_sector = altp->good_start + 2132 ckp[idx].ck_sector - altp->bad_start; 2133 idx++; 2134 ckp[idx].ck_sector = altp->bad_end + 1; 2135 continue; /* check rest of section */ 2136 } 2137 2138 /* CASE 4: */ 2139 ckp[idx].ck_seclen = altp->bad_start - ckp[idx].ck_sector; 2140 ckp[idx+1].ck_sector = altp->good_start; 2141 ckp[idx+1].ck_seclen = altp->bad_end - altp->bad_start + 1; 2142 idx += 2; 2143 ckp[idx].ck_sector = altp->bad_end + 1; 2144 ckp[idx].ck_seclen = lastsec - altp->bad_end; 2145 } 2146 2147 rw_exit(&dkp->dk_bbh_mutex); 2148 return ((opaque_t)hp); 2149 } 2150 2151 static int 2152 cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key) 2153 { 2154 int i; 2155 int ind; 2156 int interval; 2157 int mystatus = -1; 2158 2159 if (!cnt) 2160 return (mystatus); 2161 2162 ind = 1; /* compiler complains about possible uninitialized var */ 2163 for (i = 1; i <= cnt; i <<= 1) 2164 ind = i; 2165 2166 for (interval = ind; interval; ) { 2167 if ((key >= buf[ind-1].bad_start) && 2168 (key <= buf[ind-1].bad_end)) { 2169 return (ind-1); 2170 } else { 2171 interval >>= 1; 2172 if (key < buf[ind-1].bad_start) { 2173 /* record the largest bad sector index */ 2174 mystatus = ind-1; 2175 if (!interval) 2176 break; 2177 ind = ind - interval; 2178 } else { 2179 /* 2180 * if key is larger than the last element 2181 * then break 2182 */ 2183 if ((ind == cnt) || !interval) 2184 break; 2185 if ((ind+interval) <= cnt) 2186 ind += interval; 2187 } 2188 } 2189 } 2190 return (mystatus); 2191 } 2192