1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/scsi/scsi.h> 28 #include <sys/dktp/cm.h> 29 #include <sys/dktp/quetypes.h> 30 #include <sys/dktp/queue.h> 31 #include <sys/dktp/fctypes.h> 32 #include <sys/dktp/flowctrl.h> 33 #include <sys/dktp/cmdev.h> 34 #include <sys/dkio.h> 35 #include <sys/dktp/tgdk.h> 36 #include <sys/dktp/dadk.h> 37 #include <sys/dktp/bbh.h> 38 #include <sys/dktp/altsctr.h> 39 #include <sys/dktp/cmdk.h> 40 41 #include <sys/stat.h> 42 #include <sys/vtoc.h> 43 #include <sys/file.h> 44 #include <sys/dktp/dadkio.h> 45 #include <sys/aio_req.h> 46 47 #include <sys/cmlb.h> 48 49 /* 50 * Local Static Data 51 */ 52 #ifdef CMDK_DEBUG 53 #define DENT 0x0001 54 #define DIO 0x0002 55 56 static int cmdk_debug = DIO; 57 #endif 58 59 #ifndef TRUE 60 #define TRUE 1 61 #endif 62 63 #ifndef FALSE 64 #define FALSE 0 65 #endif 66 67 /* 68 * NDKMAP is the base number for accessing the fdisk partitions. 69 * c?d?p0 --> cmdk@?,?:q 70 */ 71 #define PARTITION0_INDEX (NDKMAP + 0) 72 73 #define DKTP_DATA (dkp->dk_tgobjp)->tg_data 74 #define DKTP_EXT (dkp->dk_tgobjp)->tg_ext 75 76 void *cmdk_state; 77 78 /* 79 * the cmdk_attach_mutex protects cmdk_max_instance in multi-threaded 80 * attach situations 81 */ 82 static kmutex_t cmdk_attach_mutex; 83 static int cmdk_max_instance = 0; 84 85 /* 86 * Panic dumpsys state 87 * There is only a single flag that is not mutex locked since 88 * the system is prevented from thread switching and cmdk_dump 89 * will only be called in a single threaded operation. 90 */ 91 static int cmdk_indump; 92 93 /* 94 * Local Function Prototypes 95 */ 96 static int cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp); 97 static void cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp); 98 static void cmdkmin(struct buf *bp); 99 static int cmdkrw(dev_t dev, struct uio *uio, int flag); 100 static int cmdkarw(dev_t dev, struct aio_req *aio, int flag); 101 102 /* 103 * Bad Block Handling Functions Prototypes 104 */ 105 static void cmdk_bbh_reopen(struct cmdk *dkp); 106 static opaque_t cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp); 107 static bbh_cookie_t cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle); 108 static void cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle); 109 static void cmdk_bbh_close(struct cmdk *dkp); 110 static void cmdk_bbh_setalts_idx(struct cmdk *dkp); 111 static int cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key); 112 113 static struct bbh_objops cmdk_bbh_ops = { 114 nulldev, 115 nulldev, 116 cmdk_bbh_gethandle, 117 cmdk_bbh_htoc, 118 cmdk_bbh_freehandle, 119 0, 0 120 }; 121 122 static int cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp); 123 static int cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp); 124 static int cmdkstrategy(struct buf *bp); 125 static int cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 126 static int cmdkioctl(dev_t, int, intptr_t, int, cred_t *, int *); 127 static int cmdkread(dev_t dev, struct uio *uio, cred_t *credp); 128 static int cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp); 129 static int cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 130 int mod_flags, char *name, caddr_t valuep, int *lengthp); 131 static int cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp); 132 static int cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp); 133 134 /* 135 * Device driver ops vector 136 */ 137 138 static struct cb_ops cmdk_cb_ops = { 139 cmdkopen, /* open */ 140 cmdkclose, /* close */ 141 cmdkstrategy, /* strategy */ 142 nodev, /* print */ 143 cmdkdump, /* dump */ 144 cmdkread, /* read */ 145 cmdkwrite, /* write */ 146 cmdkioctl, /* ioctl */ 147 nodev, /* devmap */ 148 nodev, /* mmap */ 149 nodev, /* segmap */ 150 nochpoll, /* poll */ 151 cmdk_prop_op, /* cb_prop_op */ 152 0, /* streamtab */ 153 D_64BIT | D_MP | D_NEW, /* Driver comaptibility flag */ 154 CB_REV, /* cb_rev */ 155 cmdkaread, /* async read */ 156 cmdkawrite /* async write */ 157 }; 158 159 static int cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 160 void **result); 161 static int cmdkprobe(dev_info_t *dip); 162 static int cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd); 163 static int cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd); 164 165 static void cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp); 166 static int cmdkresume(dev_info_t *dip); 167 static int cmdksuspend(dev_info_t *dip); 168 static int cmdkpower(dev_info_t *dip, int component, int level); 169 170 struct dev_ops cmdk_ops = { 171 DEVO_REV, /* devo_rev, */ 172 0, /* refcnt */ 173 cmdkinfo, /* info */ 174 nulldev, /* identify */ 175 cmdkprobe, /* probe */ 176 cmdkattach, /* attach */ 177 cmdkdetach, /* detach */ 178 nodev, /* reset */ 179 &cmdk_cb_ops, /* driver operations */ 180 (struct bus_ops *)0, /* bus operations */ 181 cmdkpower, /* power */ 182 ddi_quiesce_not_needed, /* quiesce */ 183 }; 184 185 /* 186 * This is the loadable module wrapper. 187 */ 188 #include <sys/modctl.h> 189 190 #ifndef XPV_HVM_DRIVER 191 static struct modldrv modldrv = { 192 &mod_driverops, /* Type of module. This one is a driver */ 193 "Common Direct Access Disk", 194 &cmdk_ops, /* driver ops */ 195 }; 196 197 static struct modlinkage modlinkage = { 198 MODREV_1, (void *)&modldrv, NULL 199 }; 200 201 202 #else /* XPV_HVM_DRIVER */ 203 static struct modlmisc modlmisc = { 204 &mod_miscops, /* Type of module. This one is a misc */ 205 "HVM Common Direct Access Disk", 206 }; 207 208 static struct modlinkage modlinkage = { 209 MODREV_1, (void *)&modlmisc, NULL 210 }; 211 212 #endif /* XPV_HVM_DRIVER */ 213 214 /* Function prototypes for cmlb callbacks */ 215 216 static int cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr, 217 diskaddr_t start, size_t length, void *tg_cookie); 218 219 static int cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg, 220 void *tg_cookie); 221 222 static void cmdk_devid_setup(struct cmdk *dkp); 223 static int cmdk_devid_modser(struct cmdk *dkp); 224 static int cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len); 225 static int cmdk_devid_fabricate(struct cmdk *dkp); 226 static int cmdk_devid_read(struct cmdk *dkp); 227 228 static cmlb_tg_ops_t cmdk_lb_ops = { 229 TG_DK_OPS_VERSION_1, 230 cmdk_lb_rdwr, 231 cmdk_lb_getinfo 232 }; 233 234 static boolean_t 235 cmdk_isopen(struct cmdk *dkp, dev_t dev) 236 { 237 int part, otyp; 238 ulong_t partbit; 239 240 ASSERT(MUTEX_HELD((&dkp->dk_mutex))); 241 242 part = CMDKPART(dev); 243 partbit = 1 << part; 244 245 /* account for close */ 246 if (dkp->dk_open_lyr[part] != 0) 247 return (B_TRUE); 248 for (otyp = 0; otyp < OTYPCNT; otyp++) 249 if (dkp->dk_open_reg[otyp] & partbit) 250 return (B_TRUE); 251 return (B_FALSE); 252 } 253 254 int 255 _init(void) 256 { 257 int rval; 258 259 #ifndef XPV_HVM_DRIVER 260 if (rval = ddi_soft_state_init(&cmdk_state, sizeof (struct cmdk), 7)) 261 return (rval); 262 #endif /* !XPV_HVM_DRIVER */ 263 264 mutex_init(&cmdk_attach_mutex, NULL, MUTEX_DRIVER, NULL); 265 if ((rval = mod_install(&modlinkage)) != 0) { 266 mutex_destroy(&cmdk_attach_mutex); 267 #ifndef XPV_HVM_DRIVER 268 ddi_soft_state_fini(&cmdk_state); 269 #endif /* !XPV_HVM_DRIVER */ 270 } 271 return (rval); 272 } 273 274 int 275 _fini(void) 276 { 277 return (EBUSY); 278 } 279 280 int 281 _info(struct modinfo *modinfop) 282 { 283 return (mod_info(&modlinkage, modinfop)); 284 } 285 286 /* 287 * Autoconfiguration Routines 288 */ 289 static int 290 cmdkprobe(dev_info_t *dip) 291 { 292 int instance; 293 int status; 294 struct cmdk *dkp; 295 296 instance = ddi_get_instance(dip); 297 298 #ifndef XPV_HVM_DRIVER 299 if (ddi_get_soft_state(cmdk_state, instance)) 300 return (DDI_PROBE_PARTIAL); 301 302 if (ddi_soft_state_zalloc(cmdk_state, instance) != DDI_SUCCESS) 303 return (DDI_PROBE_PARTIAL); 304 #endif /* !XPV_HVM_DRIVER */ 305 306 if ((dkp = ddi_get_soft_state(cmdk_state, instance)) == NULL) 307 return (DDI_PROBE_PARTIAL); 308 309 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL); 310 rw_init(&dkp->dk_bbh_mutex, NULL, RW_DRIVER, NULL); 311 dkp->dk_dip = dip; 312 mutex_enter(&dkp->dk_mutex); 313 314 dkp->dk_dev = makedevice(ddi_driver_major(dip), 315 ddi_get_instance(dip) << CMDK_UNITSHF); 316 317 /* linkage to dadk and strategy */ 318 if (cmdk_create_obj(dip, dkp) != DDI_SUCCESS) { 319 mutex_exit(&dkp->dk_mutex); 320 mutex_destroy(&dkp->dk_mutex); 321 rw_destroy(&dkp->dk_bbh_mutex); 322 #ifndef XPV_HVM_DRIVER 323 ddi_soft_state_free(cmdk_state, instance); 324 #endif /* !XPV_HVM_DRIVER */ 325 return (DDI_PROBE_PARTIAL); 326 } 327 328 status = dadk_probe(DKTP_DATA, KM_NOSLEEP); 329 if (status != DDI_PROBE_SUCCESS) { 330 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */ 331 mutex_exit(&dkp->dk_mutex); 332 mutex_destroy(&dkp->dk_mutex); 333 rw_destroy(&dkp->dk_bbh_mutex); 334 #ifndef XPV_HVM_DRIVER 335 ddi_soft_state_free(cmdk_state, instance); 336 #endif /* !XPV_HVM_DRIVER */ 337 return (status); 338 } 339 340 mutex_exit(&dkp->dk_mutex); 341 #ifdef CMDK_DEBUG 342 if (cmdk_debug & DENT) 343 PRF("cmdkprobe: instance= %d name= `%s`\n", 344 instance, ddi_get_name_addr(dip)); 345 #endif 346 return (status); 347 } 348 349 static int 350 cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd) 351 { 352 int instance; 353 struct cmdk *dkp; 354 char *node_type; 355 356 switch (cmd) { 357 case DDI_ATTACH: 358 break; 359 case DDI_RESUME: 360 return (cmdkresume(dip)); 361 default: 362 return (DDI_FAILURE); 363 } 364 365 instance = ddi_get_instance(dip); 366 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 367 return (DDI_FAILURE); 368 369 dkp->dk_pm_level = CMDK_SPINDLE_UNINIT; 370 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL); 371 372 mutex_enter(&dkp->dk_mutex); 373 374 /* dadk_attach is an empty function that only returns SUCCESS */ 375 (void) dadk_attach(DKTP_DATA); 376 377 node_type = (DKTP_EXT->tg_nodetype); 378 379 /* 380 * this open allows cmlb to read the device 381 * and determine the label types 382 * so that cmlb can create minor nodes for device 383 */ 384 385 /* open the target disk */ 386 if (dadk_open(DKTP_DATA, 0) != DDI_SUCCESS) 387 goto fail2; 388 389 #ifdef _ILP32 390 { 391 struct tgdk_geom phyg; 392 (void) dadk_getphygeom(DKTP_DATA, &phyg); 393 if ((phyg.g_cap - 1) > DK_MAX_BLOCKS) { 394 (void) dadk_close(DKTP_DATA); 395 goto fail2; 396 } 397 } 398 #endif 399 400 401 /* mark as having opened target */ 402 dkp->dk_flag |= CMDK_TGDK_OPEN; 403 404 cmlb_alloc_handle((cmlb_handle_t *)&dkp->dk_cmlbhandle); 405 406 if (cmlb_attach(dip, 407 &cmdk_lb_ops, 408 DTYPE_DIRECT, /* device_type */ 409 B_FALSE, /* removable */ 410 B_FALSE, /* hot pluggable XXX */ 411 node_type, 412 CMLB_CREATE_ALTSLICE_VTOC_16_DTYPE_DIRECT, /* alter_behaviour */ 413 dkp->dk_cmlbhandle, 414 0) != 0) 415 goto fail1; 416 417 /* Calling validate will create minor nodes according to disk label */ 418 (void) cmlb_validate(dkp->dk_cmlbhandle, 0, 0); 419 420 /* set bbh (Bad Block Handling) */ 421 cmdk_bbh_reopen(dkp); 422 423 /* setup devid string */ 424 cmdk_devid_setup(dkp); 425 426 mutex_enter(&cmdk_attach_mutex); 427 if (instance > cmdk_max_instance) 428 cmdk_max_instance = instance; 429 mutex_exit(&cmdk_attach_mutex); 430 431 mutex_exit(&dkp->dk_mutex); 432 433 /* 434 * Add a zero-length attribute to tell the world we support 435 * kernel ioctls (for layered drivers) 436 */ 437 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 438 DDI_KERNEL_IOCTL, NULL, 0); 439 ddi_report_dev(dip); 440 441 /* 442 * Initialize power management 443 */ 444 mutex_init(&dkp->dk_pm_mutex, NULL, MUTEX_DRIVER, NULL); 445 cv_init(&dkp->dk_suspend_cv, NULL, CV_DRIVER, NULL); 446 cmdk_setup_pm(dip, dkp); 447 448 return (DDI_SUCCESS); 449 450 fail1: 451 cmlb_free_handle(&dkp->dk_cmlbhandle); 452 (void) dadk_close(DKTP_DATA); 453 fail2: 454 cmdk_destroy_obj(dip, dkp); 455 rw_destroy(&dkp->dk_bbh_mutex); 456 mutex_exit(&dkp->dk_mutex); 457 mutex_destroy(&dkp->dk_mutex); 458 #ifndef XPV_HVM_DRIVER 459 ddi_soft_state_free(cmdk_state, instance); 460 #endif /* !XPV_HVM_DRIVER */ 461 return (DDI_FAILURE); 462 } 463 464 465 static int 466 cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd) 467 { 468 struct cmdk *dkp; 469 int instance; 470 int max_instance; 471 472 switch (cmd) { 473 case DDI_DETACH: 474 /* return (DDI_FAILURE); */ 475 break; 476 case DDI_SUSPEND: 477 return (cmdksuspend(dip)); 478 default: 479 #ifdef CMDK_DEBUG 480 if (cmdk_debug & DIO) { 481 PRF("cmdkdetach: cmd = %d unknown\n", cmd); 482 } 483 #endif 484 return (DDI_FAILURE); 485 } 486 487 mutex_enter(&cmdk_attach_mutex); 488 max_instance = cmdk_max_instance; 489 mutex_exit(&cmdk_attach_mutex); 490 491 /* check if any instance of driver is open */ 492 for (instance = 0; instance < max_instance; instance++) { 493 dkp = ddi_get_soft_state(cmdk_state, instance); 494 if (!dkp) 495 continue; 496 if (dkp->dk_flag & CMDK_OPEN) 497 return (DDI_FAILURE); 498 } 499 500 instance = ddi_get_instance(dip); 501 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 502 return (DDI_SUCCESS); 503 504 mutex_enter(&dkp->dk_mutex); 505 506 /* 507 * The cmdk_part_info call at the end of cmdkattach may have 508 * caused cmdk_reopen to do a TGDK_OPEN, make sure we close on 509 * detach for case when cmdkopen/cmdkclose never occurs. 510 */ 511 if (dkp->dk_flag & CMDK_TGDK_OPEN) { 512 dkp->dk_flag &= ~CMDK_TGDK_OPEN; 513 (void) dadk_close(DKTP_DATA); 514 } 515 516 cmlb_detach(dkp->dk_cmlbhandle, 0); 517 cmlb_free_handle(&dkp->dk_cmlbhandle); 518 ddi_prop_remove_all(dip); 519 520 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */ 521 mutex_exit(&dkp->dk_mutex); 522 mutex_destroy(&dkp->dk_mutex); 523 rw_destroy(&dkp->dk_bbh_mutex); 524 mutex_destroy(&dkp->dk_pm_mutex); 525 cv_destroy(&dkp->dk_suspend_cv); 526 #ifndef XPV_HVM_DRIVER 527 ddi_soft_state_free(cmdk_state, instance); 528 #endif /* !XPV_HVM_DRIVER */ 529 530 return (DDI_SUCCESS); 531 } 532 533 static int 534 cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 535 { 536 dev_t dev = (dev_t)arg; 537 int instance; 538 struct cmdk *dkp; 539 540 #ifdef lint 541 dip = dip; /* no one ever uses this */ 542 #endif 543 #ifdef CMDK_DEBUG 544 if (cmdk_debug & DENT) 545 PRF("cmdkinfo: call\n"); 546 #endif 547 instance = CMDKUNIT(dev); 548 549 switch (infocmd) { 550 case DDI_INFO_DEVT2DEVINFO: 551 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 552 return (DDI_FAILURE); 553 *result = (void *) dkp->dk_dip; 554 break; 555 case DDI_INFO_DEVT2INSTANCE: 556 *result = (void *)(intptr_t)instance; 557 break; 558 default: 559 return (DDI_FAILURE); 560 } 561 return (DDI_SUCCESS); 562 } 563 564 /* 565 * Initialize the power management components 566 */ 567 static void 568 cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp) 569 { 570 char *pm_comp[] = { "NAME=cmdk", "0=off", "1=on", NULL }; 571 572 /* 573 * Since the cmdk device does not the 'reg' property, 574 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 575 * The following code is to tell cpr that this device 576 * DOES need to be suspended and resumed. 577 */ 578 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, 579 "pm-hardware-state", "needs-suspend-resume"); 580 581 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 582 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 583 if (pm_raise_power(dip, 0, CMDK_SPINDLE_ON) == DDI_SUCCESS) { 584 mutex_enter(&dkp->dk_pm_mutex); 585 dkp->dk_pm_level = CMDK_SPINDLE_ON; 586 dkp->dk_pm_is_enabled = 1; 587 mutex_exit(&dkp->dk_pm_mutex); 588 } else { 589 mutex_enter(&dkp->dk_pm_mutex); 590 dkp->dk_pm_level = CMDK_SPINDLE_OFF; 591 dkp->dk_pm_is_enabled = 0; 592 mutex_exit(&dkp->dk_pm_mutex); 593 } 594 } else { 595 mutex_enter(&dkp->dk_pm_mutex); 596 dkp->dk_pm_level = CMDK_SPINDLE_UNINIT; 597 dkp->dk_pm_is_enabled = 0; 598 mutex_exit(&dkp->dk_pm_mutex); 599 } 600 } 601 602 /* 603 * suspend routine, it will be run when get the command 604 * DDI_SUSPEND at detach(9E) from system power management 605 */ 606 static int 607 cmdksuspend(dev_info_t *dip) 608 { 609 struct cmdk *dkp; 610 int instance; 611 clock_t count = 0; 612 613 instance = ddi_get_instance(dip); 614 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 615 return (DDI_FAILURE); 616 mutex_enter(&dkp->dk_mutex); 617 if (dkp->dk_flag & CMDK_SUSPEND) { 618 mutex_exit(&dkp->dk_mutex); 619 return (DDI_SUCCESS); 620 } 621 dkp->dk_flag |= CMDK_SUSPEND; 622 623 /* need to wait a while */ 624 while (dadk_getcmds(DKTP_DATA) != 0) { 625 delay(drv_usectohz(1000000)); 626 if (count > 60) { 627 dkp->dk_flag &= ~CMDK_SUSPEND; 628 cv_broadcast(&dkp->dk_suspend_cv); 629 mutex_exit(&dkp->dk_mutex); 630 return (DDI_FAILURE); 631 } 632 count++; 633 } 634 mutex_exit(&dkp->dk_mutex); 635 return (DDI_SUCCESS); 636 } 637 638 /* 639 * resume routine, it will be run when get the command 640 * DDI_RESUME at attach(9E) from system power management 641 */ 642 static int 643 cmdkresume(dev_info_t *dip) 644 { 645 struct cmdk *dkp; 646 int instance; 647 648 instance = ddi_get_instance(dip); 649 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 650 return (DDI_FAILURE); 651 mutex_enter(&dkp->dk_mutex); 652 if (!(dkp->dk_flag & CMDK_SUSPEND)) { 653 mutex_exit(&dkp->dk_mutex); 654 return (DDI_FAILURE); 655 } 656 dkp->dk_pm_level = CMDK_SPINDLE_ON; 657 dkp->dk_flag &= ~CMDK_SUSPEND; 658 cv_broadcast(&dkp->dk_suspend_cv); 659 mutex_exit(&dkp->dk_mutex); 660 return (DDI_SUCCESS); 661 662 } 663 664 /* 665 * power management entry point, it was used to 666 * change power management component. 667 * Actually, the real hard drive suspend/resume 668 * was handled in ata, so this function is not 669 * doing any real work other than verifying that 670 * the disk is idle. 671 */ 672 static int 673 cmdkpower(dev_info_t *dip, int component, int level) 674 { 675 struct cmdk *dkp; 676 int instance; 677 678 instance = ddi_get_instance(dip); 679 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || 680 component != 0 || level > CMDK_SPINDLE_ON || 681 level < CMDK_SPINDLE_OFF) { 682 return (DDI_FAILURE); 683 } 684 685 mutex_enter(&dkp->dk_pm_mutex); 686 if (dkp->dk_pm_is_enabled && dkp->dk_pm_level == level) { 687 mutex_exit(&dkp->dk_pm_mutex); 688 return (DDI_SUCCESS); 689 } 690 mutex_exit(&dkp->dk_pm_mutex); 691 692 if ((level == CMDK_SPINDLE_OFF) && 693 (dadk_getcmds(DKTP_DATA) != 0)) { 694 return (DDI_FAILURE); 695 } 696 697 mutex_enter(&dkp->dk_pm_mutex); 698 dkp->dk_pm_level = level; 699 mutex_exit(&dkp->dk_pm_mutex); 700 return (DDI_SUCCESS); 701 } 702 703 static int 704 cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 705 char *name, caddr_t valuep, int *lengthp) 706 { 707 struct cmdk *dkp; 708 709 #ifdef CMDK_DEBUG 710 if (cmdk_debug & DENT) 711 PRF("cmdk_prop_op: call\n"); 712 #endif 713 714 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 715 if (dkp == NULL) 716 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 717 name, valuep, lengthp)); 718 719 return (cmlb_prop_op(dkp->dk_cmlbhandle, 720 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 721 CMDKPART(dev), NULL)); 722 } 723 724 /* 725 * dump routine 726 */ 727 static int 728 cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 729 { 730 int instance; 731 struct cmdk *dkp; 732 diskaddr_t p_lblksrt; 733 diskaddr_t p_lblkcnt; 734 struct buf local; 735 struct buf *bp; 736 737 #ifdef CMDK_DEBUG 738 if (cmdk_debug & DENT) 739 PRF("cmdkdump: call\n"); 740 #endif 741 instance = CMDKUNIT(dev); 742 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || (blkno < 0)) 743 return (ENXIO); 744 745 if (cmlb_partinfo( 746 dkp->dk_cmlbhandle, 747 CMDKPART(dev), 748 &p_lblkcnt, 749 &p_lblksrt, 750 NULL, 751 NULL, 752 0)) { 753 return (ENXIO); 754 } 755 756 if ((blkno+nblk) > p_lblkcnt) 757 return (EINVAL); 758 759 cmdk_indump = 1; /* Tell disk targets we are panic dumpping */ 760 761 bp = &local; 762 bzero(bp, sizeof (*bp)); 763 bp->b_flags = B_BUSY; 764 bp->b_un.b_addr = addr; 765 bp->b_bcount = nblk << SCTRSHFT; 766 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + blkno))); 767 768 (void) dadk_dump(DKTP_DATA, bp); 769 return (bp->b_error); 770 } 771 772 /* 773 * Copy in the dadkio_rwcmd according to the user's data model. If needed, 774 * convert it for our internal use. 775 */ 776 static int 777 rwcmd_copyin(struct dadkio_rwcmd *rwcmdp, caddr_t inaddr, int flag) 778 { 779 switch (ddi_model_convert_from(flag)) { 780 case DDI_MODEL_ILP32: { 781 struct dadkio_rwcmd32 cmd32; 782 783 if (ddi_copyin(inaddr, &cmd32, 784 sizeof (struct dadkio_rwcmd32), flag)) { 785 return (EFAULT); 786 } 787 788 rwcmdp->cmd = cmd32.cmd; 789 rwcmdp->flags = cmd32.flags; 790 rwcmdp->blkaddr = (blkaddr_t)cmd32.blkaddr; 791 rwcmdp->buflen = cmd32.buflen; 792 rwcmdp->bufaddr = (caddr_t)(intptr_t)cmd32.bufaddr; 793 /* 794 * Note: we do not convert the 'status' field, 795 * as it should not contain valid data at this 796 * point. 797 */ 798 bzero(&rwcmdp->status, sizeof (rwcmdp->status)); 799 break; 800 } 801 case DDI_MODEL_NONE: { 802 if (ddi_copyin(inaddr, rwcmdp, 803 sizeof (struct dadkio_rwcmd), flag)) { 804 return (EFAULT); 805 } 806 } 807 } 808 return (0); 809 } 810 811 /* 812 * If necessary, convert the internal rwcmdp and status to the appropriate 813 * data model and copy it out to the user. 814 */ 815 static int 816 rwcmd_copyout(struct dadkio_rwcmd *rwcmdp, caddr_t outaddr, int flag) 817 { 818 switch (ddi_model_convert_from(flag)) { 819 case DDI_MODEL_ILP32: { 820 struct dadkio_rwcmd32 cmd32; 821 822 cmd32.cmd = rwcmdp->cmd; 823 cmd32.flags = rwcmdp->flags; 824 cmd32.blkaddr = rwcmdp->blkaddr; 825 cmd32.buflen = rwcmdp->buflen; 826 ASSERT64(((uintptr_t)rwcmdp->bufaddr >> 32) == 0); 827 cmd32.bufaddr = (caddr32_t)(uintptr_t)rwcmdp->bufaddr; 828 829 cmd32.status.status = rwcmdp->status.status; 830 cmd32.status.resid = rwcmdp->status.resid; 831 cmd32.status.failed_blk_is_valid = 832 rwcmdp->status.failed_blk_is_valid; 833 cmd32.status.failed_blk = rwcmdp->status.failed_blk; 834 cmd32.status.fru_code_is_valid = 835 rwcmdp->status.fru_code_is_valid; 836 cmd32.status.fru_code = rwcmdp->status.fru_code; 837 838 bcopy(rwcmdp->status.add_error_info, 839 cmd32.status.add_error_info, DADKIO_ERROR_INFO_LEN); 840 841 if (ddi_copyout(&cmd32, outaddr, 842 sizeof (struct dadkio_rwcmd32), flag)) 843 return (EFAULT); 844 break; 845 } 846 case DDI_MODEL_NONE: { 847 if (ddi_copyout(rwcmdp, outaddr, 848 sizeof (struct dadkio_rwcmd), flag)) 849 return (EFAULT); 850 } 851 } 852 return (0); 853 } 854 855 /* 856 * ioctl routine 857 */ 858 static int 859 cmdkioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *credp, int *rvalp) 860 { 861 int instance; 862 struct scsi_device *devp; 863 struct cmdk *dkp; 864 char data[NBPSCTR]; 865 866 instance = CMDKUNIT(dev); 867 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 868 return (ENXIO); 869 870 mutex_enter(&dkp->dk_mutex); 871 while (dkp->dk_flag & CMDK_SUSPEND) { 872 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 873 } 874 mutex_exit(&dkp->dk_mutex); 875 876 bzero(data, sizeof (data)); 877 878 switch (cmd) { 879 880 case DKIOCGMEDIAINFO: { 881 struct dk_minfo media_info; 882 struct tgdk_geom phyg; 883 884 /* dadk_getphygeom always returns success */ 885 (void) dadk_getphygeom(DKTP_DATA, &phyg); 886 887 media_info.dki_lbsize = phyg.g_secsiz; 888 media_info.dki_capacity = phyg.g_cap; 889 media_info.dki_media_type = DK_FIXED_DISK; 890 891 if (ddi_copyout(&media_info, (void *)arg, 892 sizeof (struct dk_minfo), flag)) { 893 return (EFAULT); 894 } else { 895 return (0); 896 } 897 } 898 899 case DKIOCINFO: { 900 struct dk_cinfo *info = (struct dk_cinfo *)data; 901 902 /* controller information */ 903 info->dki_ctype = (DKTP_EXT->tg_ctype); 904 info->dki_cnum = ddi_get_instance(ddi_get_parent(dkp->dk_dip)); 905 (void) strcpy(info->dki_cname, 906 ddi_get_name(ddi_get_parent(dkp->dk_dip))); 907 908 /* Unit Information */ 909 info->dki_unit = ddi_get_instance(dkp->dk_dip); 910 devp = ddi_get_driver_private(dkp->dk_dip); 911 info->dki_slave = (CMDEV_TARG(devp)<<3) | CMDEV_LUN(devp); 912 (void) strcpy(info->dki_dname, ddi_driver_name(dkp->dk_dip)); 913 info->dki_flags = DKI_FMTVOL; 914 info->dki_partition = CMDKPART(dev); 915 916 info->dki_maxtransfer = maxphys / DEV_BSIZE; 917 info->dki_addr = 1; 918 info->dki_space = 0; 919 info->dki_prio = 0; 920 info->dki_vec = 0; 921 922 if (ddi_copyout(data, (void *)arg, sizeof (*info), flag)) 923 return (EFAULT); 924 else 925 return (0); 926 } 927 928 case DKIOCSTATE: { 929 int state; 930 int rval; 931 diskaddr_t p_lblksrt; 932 diskaddr_t p_lblkcnt; 933 934 if (ddi_copyin((void *)arg, &state, sizeof (int), flag)) 935 return (EFAULT); 936 937 /* dadk_check_media blocks until state changes */ 938 if (rval = dadk_check_media(DKTP_DATA, &state)) 939 return (rval); 940 941 if (state == DKIO_INSERTED) { 942 943 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0) 944 return (ENXIO); 945 946 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(dev), 947 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0)) 948 return (ENXIO); 949 950 if (p_lblkcnt <= 0) 951 return (ENXIO); 952 } 953 954 if (ddi_copyout(&state, (caddr_t)arg, sizeof (int), flag)) 955 return (EFAULT); 956 957 return (0); 958 } 959 960 /* 961 * is media removable? 962 */ 963 case DKIOCREMOVABLE: { 964 int i; 965 966 i = (DKTP_EXT->tg_rmb) ? 1 : 0; 967 968 if (ddi_copyout(&i, (caddr_t)arg, sizeof (int), flag)) 969 return (EFAULT); 970 971 return (0); 972 } 973 974 case DKIOCADDBAD: 975 /* 976 * This is not an update mechanism to add bad blocks 977 * to the bad block structures stored on disk. 978 * 979 * addbadsec(1M) will update the bad block data on disk 980 * and use this ioctl to force the driver to re-initialize 981 * the list of bad blocks in the driver. 982 */ 983 984 /* start BBH */ 985 cmdk_bbh_reopen(dkp); 986 return (0); 987 988 case DKIOCG_PHYGEOM: 989 case DKIOCG_VIRTGEOM: 990 case DKIOCGGEOM: 991 case DKIOCSGEOM: 992 case DKIOCGAPART: 993 case DKIOCSAPART: 994 case DKIOCGVTOC: 995 case DKIOCSVTOC: 996 case DKIOCPARTINFO: 997 case DKIOCGEXTVTOC: 998 case DKIOCSEXTVTOC: 999 case DKIOCEXTPARTINFO: 1000 case DKIOCGMBOOT: 1001 case DKIOCSMBOOT: 1002 case DKIOCGETEFI: 1003 case DKIOCSETEFI: 1004 case DKIOCPARTITION: 1005 { 1006 int rc; 1007 1008 rc = cmlb_ioctl(dkp->dk_cmlbhandle, dev, cmd, arg, flag, 1009 credp, rvalp, 0); 1010 if (cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC) 1011 cmdk_devid_setup(dkp); 1012 return (rc); 1013 } 1014 1015 case DIOCTL_RWCMD: { 1016 struct dadkio_rwcmd *rwcmdp; 1017 int status; 1018 1019 rwcmdp = kmem_alloc(sizeof (struct dadkio_rwcmd), KM_SLEEP); 1020 1021 status = rwcmd_copyin(rwcmdp, (caddr_t)arg, flag); 1022 1023 if (status == 0) { 1024 bzero(&(rwcmdp->status), sizeof (struct dadkio_status)); 1025 status = dadk_ioctl(DKTP_DATA, 1026 dev, 1027 cmd, 1028 (uintptr_t)rwcmdp, 1029 flag, 1030 credp, 1031 rvalp); 1032 } 1033 if (status == 0) 1034 status = rwcmd_copyout(rwcmdp, (caddr_t)arg, flag); 1035 1036 kmem_free(rwcmdp, sizeof (struct dadkio_rwcmd)); 1037 return (status); 1038 } 1039 1040 default: 1041 return (dadk_ioctl(DKTP_DATA, 1042 dev, 1043 cmd, 1044 arg, 1045 flag, 1046 credp, 1047 rvalp)); 1048 } 1049 } 1050 1051 /*ARGSUSED1*/ 1052 static int 1053 cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp) 1054 { 1055 int part; 1056 ulong_t partbit; 1057 int instance; 1058 struct cmdk *dkp; 1059 int lastclose = 1; 1060 int i; 1061 1062 instance = CMDKUNIT(dev); 1063 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || 1064 (otyp >= OTYPCNT)) 1065 return (ENXIO); 1066 1067 mutex_enter(&dkp->dk_mutex); 1068 1069 /* check if device has been opened */ 1070 ASSERT(cmdk_isopen(dkp, dev)); 1071 if (!(dkp->dk_flag & CMDK_OPEN)) { 1072 mutex_exit(&dkp->dk_mutex); 1073 return (ENXIO); 1074 } 1075 1076 while (dkp->dk_flag & CMDK_SUSPEND) { 1077 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1078 } 1079 1080 part = CMDKPART(dev); 1081 partbit = 1 << part; 1082 1083 /* account for close */ 1084 if (otyp == OTYP_LYR) { 1085 ASSERT(dkp->dk_open_lyr[part] > 0); 1086 if (dkp->dk_open_lyr[part]) 1087 dkp->dk_open_lyr[part]--; 1088 } else { 1089 ASSERT((dkp->dk_open_reg[otyp] & partbit) != 0); 1090 dkp->dk_open_reg[otyp] &= ~partbit; 1091 } 1092 dkp->dk_open_exl &= ~partbit; 1093 1094 for (i = 0; i < CMDK_MAXPART; i++) 1095 if (dkp->dk_open_lyr[i] != 0) { 1096 lastclose = 0; 1097 break; 1098 } 1099 1100 if (lastclose) 1101 for (i = 0; i < OTYPCNT; i++) 1102 if (dkp->dk_open_reg[i] != 0) { 1103 lastclose = 0; 1104 break; 1105 } 1106 1107 mutex_exit(&dkp->dk_mutex); 1108 1109 if (lastclose) 1110 cmlb_invalidate(dkp->dk_cmlbhandle, 0); 1111 1112 return (DDI_SUCCESS); 1113 } 1114 1115 /*ARGSUSED3*/ 1116 static int 1117 cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp) 1118 { 1119 dev_t dev = *dev_p; 1120 int part; 1121 ulong_t partbit; 1122 int instance; 1123 struct cmdk *dkp; 1124 diskaddr_t p_lblksrt; 1125 diskaddr_t p_lblkcnt; 1126 int i; 1127 int nodelay; 1128 1129 instance = CMDKUNIT(dev); 1130 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1131 return (ENXIO); 1132 1133 if (otyp >= OTYPCNT) 1134 return (EINVAL); 1135 1136 mutex_enter(&dkp->dk_mutex); 1137 while (dkp->dk_flag & CMDK_SUSPEND) { 1138 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1139 } 1140 mutex_exit(&dkp->dk_mutex); 1141 1142 part = CMDKPART(dev); 1143 partbit = 1 << part; 1144 nodelay = (flag & (FNDELAY | FNONBLOCK)); 1145 1146 mutex_enter(&dkp->dk_mutex); 1147 1148 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0) { 1149 1150 /* fail if not doing non block open */ 1151 if (!nodelay) { 1152 mutex_exit(&dkp->dk_mutex); 1153 return (ENXIO); 1154 } 1155 } else if (cmlb_partinfo(dkp->dk_cmlbhandle, part, &p_lblkcnt, 1156 &p_lblksrt, NULL, NULL, 0) == 0) { 1157 1158 if (p_lblkcnt <= 0 && (!nodelay || otyp != OTYP_CHR)) { 1159 mutex_exit(&dkp->dk_mutex); 1160 return (ENXIO); 1161 } 1162 } else { 1163 /* fail if not doing non block open */ 1164 if (!nodelay) { 1165 mutex_exit(&dkp->dk_mutex); 1166 return (ENXIO); 1167 } 1168 } 1169 1170 if ((DKTP_EXT->tg_rdonly) && (flag & FWRITE)) { 1171 mutex_exit(&dkp->dk_mutex); 1172 return (EROFS); 1173 } 1174 1175 /* check for part already opend exclusively */ 1176 if (dkp->dk_open_exl & partbit) 1177 goto excl_open_fail; 1178 1179 /* check if we can establish exclusive open */ 1180 if (flag & FEXCL) { 1181 if (dkp->dk_open_lyr[part]) 1182 goto excl_open_fail; 1183 for (i = 0; i < OTYPCNT; i++) { 1184 if (dkp->dk_open_reg[i] & partbit) 1185 goto excl_open_fail; 1186 } 1187 } 1188 1189 /* open will succeed, account for open */ 1190 dkp->dk_flag |= CMDK_OPEN; 1191 if (otyp == OTYP_LYR) 1192 dkp->dk_open_lyr[part]++; 1193 else 1194 dkp->dk_open_reg[otyp] |= partbit; 1195 if (flag & FEXCL) 1196 dkp->dk_open_exl |= partbit; 1197 1198 mutex_exit(&dkp->dk_mutex); 1199 return (DDI_SUCCESS); 1200 1201 excl_open_fail: 1202 mutex_exit(&dkp->dk_mutex); 1203 return (EBUSY); 1204 } 1205 1206 /* 1207 * read routine 1208 */ 1209 /*ARGSUSED2*/ 1210 static int 1211 cmdkread(dev_t dev, struct uio *uio, cred_t *credp) 1212 { 1213 return (cmdkrw(dev, uio, B_READ)); 1214 } 1215 1216 /* 1217 * async read routine 1218 */ 1219 /*ARGSUSED2*/ 1220 static int 1221 cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp) 1222 { 1223 return (cmdkarw(dev, aio, B_READ)); 1224 } 1225 1226 /* 1227 * write routine 1228 */ 1229 /*ARGSUSED2*/ 1230 static int 1231 cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp) 1232 { 1233 return (cmdkrw(dev, uio, B_WRITE)); 1234 } 1235 1236 /* 1237 * async write routine 1238 */ 1239 /*ARGSUSED2*/ 1240 static int 1241 cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp) 1242 { 1243 return (cmdkarw(dev, aio, B_WRITE)); 1244 } 1245 1246 static void 1247 cmdkmin(struct buf *bp) 1248 { 1249 if (bp->b_bcount > DK_MAXRECSIZE) 1250 bp->b_bcount = DK_MAXRECSIZE; 1251 } 1252 1253 static int 1254 cmdkrw(dev_t dev, struct uio *uio, int flag) 1255 { 1256 int instance; 1257 struct cmdk *dkp; 1258 1259 instance = CMDKUNIT(dev); 1260 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1261 return (ENXIO); 1262 1263 mutex_enter(&dkp->dk_mutex); 1264 while (dkp->dk_flag & CMDK_SUSPEND) { 1265 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1266 } 1267 mutex_exit(&dkp->dk_mutex); 1268 1269 return (physio(cmdkstrategy, (struct buf *)0, dev, flag, cmdkmin, uio)); 1270 } 1271 1272 static int 1273 cmdkarw(dev_t dev, struct aio_req *aio, int flag) 1274 { 1275 int instance; 1276 struct cmdk *dkp; 1277 1278 instance = CMDKUNIT(dev); 1279 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1280 return (ENXIO); 1281 1282 mutex_enter(&dkp->dk_mutex); 1283 while (dkp->dk_flag & CMDK_SUSPEND) { 1284 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1285 } 1286 mutex_exit(&dkp->dk_mutex); 1287 1288 return (aphysio(cmdkstrategy, anocancel, dev, flag, cmdkmin, aio)); 1289 } 1290 1291 /* 1292 * strategy routine 1293 */ 1294 static int 1295 cmdkstrategy(struct buf *bp) 1296 { 1297 int instance; 1298 struct cmdk *dkp; 1299 long d_cnt; 1300 diskaddr_t p_lblksrt; 1301 diskaddr_t p_lblkcnt; 1302 1303 instance = CMDKUNIT(bp->b_edev); 1304 if (cmdk_indump || !(dkp = ddi_get_soft_state(cmdk_state, instance)) || 1305 (dkblock(bp) < 0)) { 1306 bp->b_resid = bp->b_bcount; 1307 SETBPERR(bp, ENXIO); 1308 biodone(bp); 1309 return (0); 1310 } 1311 1312 mutex_enter(&dkp->dk_mutex); 1313 ASSERT(cmdk_isopen(dkp, bp->b_edev)); 1314 while (dkp->dk_flag & CMDK_SUSPEND) { 1315 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1316 } 1317 mutex_exit(&dkp->dk_mutex); 1318 1319 bp->b_flags &= ~(B_DONE|B_ERROR); 1320 bp->b_resid = 0; 1321 bp->av_back = NULL; 1322 1323 /* 1324 * only re-read the vtoc if necessary (force == FALSE) 1325 */ 1326 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(bp->b_edev), 1327 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0)) { 1328 SETBPERR(bp, ENXIO); 1329 } 1330 1331 if ((bp->b_bcount & (NBPSCTR-1)) || (dkblock(bp) > p_lblkcnt)) 1332 SETBPERR(bp, ENXIO); 1333 1334 if ((bp->b_flags & B_ERROR) || (dkblock(bp) == p_lblkcnt)) { 1335 bp->b_resid = bp->b_bcount; 1336 biodone(bp); 1337 return (0); 1338 } 1339 1340 d_cnt = bp->b_bcount >> SCTRSHFT; 1341 if ((dkblock(bp) + d_cnt) > p_lblkcnt) { 1342 bp->b_resid = ((dkblock(bp) + d_cnt) - p_lblkcnt) << SCTRSHFT; 1343 bp->b_bcount -= bp->b_resid; 1344 } 1345 1346 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + dkblock(bp)))); 1347 if (dadk_strategy(DKTP_DATA, bp) != DDI_SUCCESS) { 1348 bp->b_resid += bp->b_bcount; 1349 biodone(bp); 1350 } 1351 return (0); 1352 } 1353 1354 static int 1355 cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp) 1356 { 1357 struct scsi_device *devp; 1358 opaque_t queobjp = NULL; 1359 opaque_t flcobjp = NULL; 1360 char que_keyvalp[64]; 1361 int que_keylen; 1362 char flc_keyvalp[64]; 1363 int flc_keylen; 1364 1365 ASSERT(mutex_owned(&dkp->dk_mutex)); 1366 1367 /* Create linkage to queueing routines based on property */ 1368 que_keylen = sizeof (que_keyvalp); 1369 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1370 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) != 1371 DDI_PROP_SUCCESS) { 1372 cmn_err(CE_WARN, "cmdk_create_obj: queue property undefined"); 1373 return (DDI_FAILURE); 1374 } 1375 que_keyvalp[que_keylen] = (char)0; 1376 1377 if (strcmp(que_keyvalp, "qfifo") == 0) { 1378 queobjp = (opaque_t)qfifo_create(); 1379 } else if (strcmp(que_keyvalp, "qsort") == 0) { 1380 queobjp = (opaque_t)qsort_create(); 1381 } else { 1382 return (DDI_FAILURE); 1383 } 1384 1385 /* Create linkage to dequeueing routines based on property */ 1386 flc_keylen = sizeof (flc_keyvalp); 1387 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1388 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) != 1389 DDI_PROP_SUCCESS) { 1390 cmn_err(CE_WARN, 1391 "cmdk_create_obj: flow-control property undefined"); 1392 return (DDI_FAILURE); 1393 } 1394 1395 flc_keyvalp[flc_keylen] = (char)0; 1396 1397 if (strcmp(flc_keyvalp, "dsngl") == 0) { 1398 flcobjp = (opaque_t)dsngl_create(); 1399 } else if (strcmp(flc_keyvalp, "dmult") == 0) { 1400 flcobjp = (opaque_t)dmult_create(); 1401 } else { 1402 return (DDI_FAILURE); 1403 } 1404 1405 /* populate bbh_obj object stored in dkp */ 1406 dkp->dk_bbh_obj.bbh_data = dkp; 1407 dkp->dk_bbh_obj.bbh_ops = &cmdk_bbh_ops; 1408 1409 /* create linkage to dadk */ 1410 dkp->dk_tgobjp = (opaque_t)dadk_create(); 1411 1412 devp = ddi_get_driver_private(dip); 1413 (void) dadk_init(DKTP_DATA, devp, flcobjp, queobjp, &dkp->dk_bbh_obj, 1414 NULL); 1415 1416 return (DDI_SUCCESS); 1417 } 1418 1419 static void 1420 cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp) 1421 { 1422 char que_keyvalp[64]; 1423 int que_keylen; 1424 char flc_keyvalp[64]; 1425 int flc_keylen; 1426 1427 ASSERT(mutex_owned(&dkp->dk_mutex)); 1428 1429 (void) dadk_free((dkp->dk_tgobjp)); 1430 dkp->dk_tgobjp = NULL; 1431 1432 que_keylen = sizeof (que_keyvalp); 1433 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1434 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) != 1435 DDI_PROP_SUCCESS) { 1436 cmn_err(CE_WARN, "cmdk_destroy_obj: queue property undefined"); 1437 return; 1438 } 1439 que_keyvalp[que_keylen] = (char)0; 1440 1441 flc_keylen = sizeof (flc_keyvalp); 1442 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1443 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) != 1444 DDI_PROP_SUCCESS) { 1445 cmn_err(CE_WARN, 1446 "cmdk_destroy_obj: flow-control property undefined"); 1447 return; 1448 } 1449 flc_keyvalp[flc_keylen] = (char)0; 1450 } 1451 /*ARGSUSED5*/ 1452 static int 1453 cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr, 1454 diskaddr_t start, size_t count, void *tg_cookie) 1455 { 1456 struct cmdk *dkp; 1457 opaque_t handle; 1458 int rc = 0; 1459 char *bufa; 1460 1461 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 1462 if (dkp == NULL) 1463 return (ENXIO); 1464 1465 if (cmd != TG_READ && cmd != TG_WRITE) 1466 return (EINVAL); 1467 1468 /* count must be multiple of 512 */ 1469 count = (count + NBPSCTR - 1) & -NBPSCTR; 1470 handle = dadk_iob_alloc(DKTP_DATA, start, count, KM_SLEEP); 1471 if (!handle) 1472 return (ENOMEM); 1473 1474 if (cmd == TG_READ) { 1475 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1476 if (!bufa) 1477 rc = EIO; 1478 else 1479 bcopy(bufa, bufaddr, count); 1480 } else { 1481 bufa = dadk_iob_htoc(DKTP_DATA, handle); 1482 bcopy(bufaddr, bufa, count); 1483 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_WRITE); 1484 if (!bufa) 1485 rc = EIO; 1486 } 1487 (void) dadk_iob_free(DKTP_DATA, handle); 1488 1489 return (rc); 1490 } 1491 1492 /*ARGSUSED3*/ 1493 static int 1494 cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg, void *tg_cookie) 1495 { 1496 1497 struct cmdk *dkp; 1498 struct tgdk_geom phyg; 1499 1500 1501 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 1502 if (dkp == NULL) 1503 return (ENXIO); 1504 1505 switch (cmd) { 1506 case TG_GETPHYGEOM: { 1507 cmlb_geom_t *phygeomp = (cmlb_geom_t *)arg; 1508 1509 /* dadk_getphygeom always returns success */ 1510 (void) dadk_getphygeom(DKTP_DATA, &phyg); 1511 1512 phygeomp->g_capacity = phyg.g_cap; 1513 phygeomp->g_nsect = phyg.g_sec; 1514 phygeomp->g_nhead = phyg.g_head; 1515 phygeomp->g_acyl = phyg.g_acyl; 1516 phygeomp->g_ncyl = phyg.g_cyl; 1517 phygeomp->g_secsize = phyg.g_secsiz; 1518 phygeomp->g_intrlv = 1; 1519 phygeomp->g_rpm = 3600; 1520 1521 return (0); 1522 } 1523 1524 case TG_GETVIRTGEOM: { 1525 cmlb_geom_t *virtgeomp = (cmlb_geom_t *)arg; 1526 diskaddr_t capacity; 1527 1528 (void) dadk_getgeom(DKTP_DATA, &phyg); 1529 capacity = phyg.g_cap; 1530 1531 /* 1532 * If the controller returned us something that doesn't 1533 * really fit into an Int 13/function 8 geometry 1534 * result, just fail the ioctl. See PSARC 1998/313. 1535 */ 1536 if (capacity < 0 || capacity >= 63 * 254 * 1024) 1537 return (EINVAL); 1538 1539 virtgeomp->g_capacity = capacity; 1540 virtgeomp->g_nsect = 63; 1541 virtgeomp->g_nhead = 254; 1542 virtgeomp->g_ncyl = capacity / (63 * 254); 1543 virtgeomp->g_acyl = 0; 1544 virtgeomp->g_secsize = 512; 1545 virtgeomp->g_intrlv = 1; 1546 virtgeomp->g_rpm = 3600; 1547 1548 return (0); 1549 } 1550 1551 case TG_GETCAPACITY: 1552 case TG_GETBLOCKSIZE: 1553 { 1554 1555 /* dadk_getphygeom always returns success */ 1556 (void) dadk_getphygeom(DKTP_DATA, &phyg); 1557 if (cmd == TG_GETCAPACITY) 1558 *(diskaddr_t *)arg = phyg.g_cap; 1559 else 1560 *(uint32_t *)arg = (uint32_t)phyg.g_secsiz; 1561 1562 return (0); 1563 } 1564 1565 case TG_GETATTR: { 1566 tg_attribute_t *tgattribute = (tg_attribute_t *)arg; 1567 if ((DKTP_EXT->tg_rdonly)) 1568 tgattribute->media_is_writable = FALSE; 1569 else 1570 tgattribute->media_is_writable = TRUE; 1571 1572 return (0); 1573 } 1574 1575 default: 1576 return (ENOTTY); 1577 } 1578 } 1579 1580 1581 1582 1583 1584 /* 1585 * Create and register the devid. 1586 * There are 4 different ways we can get a device id: 1587 * 1. Already have one - nothing to do 1588 * 2. Build one from the drive's model and serial numbers 1589 * 3. Read one from the disk (first sector of last track) 1590 * 4. Fabricate one and write it on the disk. 1591 * If any of these succeeds, register the deviceid 1592 */ 1593 static void 1594 cmdk_devid_setup(struct cmdk *dkp) 1595 { 1596 int rc; 1597 1598 /* Try options until one succeeds, or all have failed */ 1599 1600 /* 1. All done if already registered */ 1601 if (dkp->dk_devid != NULL) 1602 return; 1603 1604 /* 2. Build a devid from the model and serial number */ 1605 rc = cmdk_devid_modser(dkp); 1606 if (rc != DDI_SUCCESS) { 1607 /* 3. Read devid from the disk, if present */ 1608 rc = cmdk_devid_read(dkp); 1609 1610 /* 4. otherwise make one up and write it on the disk */ 1611 if (rc != DDI_SUCCESS) 1612 rc = cmdk_devid_fabricate(dkp); 1613 } 1614 1615 /* If we managed to get a devid any of the above ways, register it */ 1616 if (rc == DDI_SUCCESS) 1617 (void) ddi_devid_register(dkp->dk_dip, dkp->dk_devid); 1618 1619 } 1620 1621 /* 1622 * Build a devid from the model and serial number 1623 * Return DDI_SUCCESS or DDI_FAILURE. 1624 */ 1625 static int 1626 cmdk_devid_modser(struct cmdk *dkp) 1627 { 1628 int rc = DDI_FAILURE; 1629 char *hwid; 1630 int modlen; 1631 int serlen; 1632 1633 /* 1634 * device ID is a concatenation of model number, '=', serial number. 1635 */ 1636 hwid = kmem_alloc(CMDK_HWIDLEN, KM_SLEEP); 1637 modlen = cmdk_get_modser(dkp, DIOCTL_GETMODEL, hwid, CMDK_HWIDLEN); 1638 if (modlen == 0) { 1639 rc = DDI_FAILURE; 1640 goto err; 1641 } 1642 hwid[modlen++] = '='; 1643 serlen = cmdk_get_modser(dkp, DIOCTL_GETSERIAL, 1644 hwid + modlen, CMDK_HWIDLEN - modlen); 1645 if (serlen == 0) { 1646 rc = DDI_FAILURE; 1647 goto err; 1648 } 1649 hwid[modlen + serlen] = 0; 1650 1651 /* Initialize the device ID, trailing NULL not included */ 1652 rc = ddi_devid_init(dkp->dk_dip, DEVID_ATA_SERIAL, modlen + serlen, 1653 hwid, &dkp->dk_devid); 1654 if (rc != DDI_SUCCESS) { 1655 rc = DDI_FAILURE; 1656 goto err; 1657 } 1658 1659 rc = DDI_SUCCESS; 1660 1661 err: 1662 kmem_free(hwid, CMDK_HWIDLEN); 1663 return (rc); 1664 } 1665 1666 static int 1667 cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len) 1668 { 1669 dadk_ioc_string_t strarg; 1670 int rval; 1671 char *s; 1672 char ch; 1673 boolean_t ret; 1674 int i; 1675 int tb; 1676 1677 strarg.is_buf = buf; 1678 strarg.is_size = len; 1679 if (dadk_ioctl(DKTP_DATA, 1680 dkp->dk_dev, 1681 ioccmd, 1682 (uintptr_t)&strarg, 1683 FNATIVE | FKIOCTL, 1684 NULL, 1685 &rval) != 0) 1686 return (0); 1687 1688 /* 1689 * valid model/serial string must contain a non-zero non-space 1690 * trim trailing spaces/NULL 1691 */ 1692 ret = B_FALSE; 1693 s = buf; 1694 for (i = 0; i < strarg.is_size; i++) { 1695 ch = *s++; 1696 if (ch != ' ' && ch != '\0') 1697 tb = i + 1; 1698 if (ch != ' ' && ch != '\0' && ch != '0') 1699 ret = B_TRUE; 1700 } 1701 1702 if (ret == B_FALSE) 1703 return (0); 1704 1705 return (tb); 1706 } 1707 1708 /* 1709 * Read a devid from on the first block of the last track of 1710 * the last cylinder. Make sure what we read is a valid devid. 1711 * Return DDI_SUCCESS or DDI_FAILURE. 1712 */ 1713 static int 1714 cmdk_devid_read(struct cmdk *dkp) 1715 { 1716 diskaddr_t blk; 1717 struct dk_devid *dkdevidp; 1718 uint_t *ip; 1719 int chksum; 1720 int i, sz; 1721 tgdk_iob_handle handle = NULL; 1722 int rc = DDI_FAILURE; 1723 1724 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0)) 1725 goto err; 1726 1727 /* read the devid */ 1728 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP); 1729 if (handle == NULL) 1730 goto err; 1731 1732 dkdevidp = (struct dk_devid *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1733 if (dkdevidp == NULL) 1734 goto err; 1735 1736 /* Validate the revision */ 1737 if ((dkdevidp->dkd_rev_hi != DK_DEVID_REV_MSB) || 1738 (dkdevidp->dkd_rev_lo != DK_DEVID_REV_LSB)) 1739 goto err; 1740 1741 /* Calculate the checksum */ 1742 chksum = 0; 1743 ip = (uint_t *)dkdevidp; 1744 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++) 1745 chksum ^= ip[i]; 1746 if (DKD_GETCHKSUM(dkdevidp) != chksum) 1747 goto err; 1748 1749 /* Validate the device id */ 1750 if (ddi_devid_valid((ddi_devid_t)dkdevidp->dkd_devid) != DDI_SUCCESS) 1751 goto err; 1752 1753 /* keep a copy of the device id */ 1754 sz = ddi_devid_sizeof((ddi_devid_t)dkdevidp->dkd_devid); 1755 dkp->dk_devid = kmem_alloc(sz, KM_SLEEP); 1756 bcopy(dkdevidp->dkd_devid, dkp->dk_devid, sz); 1757 1758 rc = DDI_SUCCESS; 1759 1760 err: 1761 if (handle != NULL) 1762 (void) dadk_iob_free(DKTP_DATA, handle); 1763 return (rc); 1764 } 1765 1766 /* 1767 * Create a devid and write it on the first block of the last track of 1768 * the last cylinder. 1769 * Return DDI_SUCCESS or DDI_FAILURE. 1770 */ 1771 static int 1772 cmdk_devid_fabricate(struct cmdk *dkp) 1773 { 1774 ddi_devid_t devid = NULL; /* devid made by ddi_devid_init */ 1775 struct dk_devid *dkdevidp; /* devid struct stored on disk */ 1776 diskaddr_t blk; 1777 tgdk_iob_handle handle = NULL; 1778 uint_t *ip, chksum; 1779 int i; 1780 int rc = DDI_FAILURE; 1781 1782 if (ddi_devid_init(dkp->dk_dip, DEVID_FAB, 0, NULL, &devid) != 1783 DDI_SUCCESS) 1784 goto err; 1785 1786 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0)) { 1787 /* no device id block address */ 1788 goto err; 1789 } 1790 1791 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP); 1792 if (!handle) 1793 goto err; 1794 1795 /* Locate the buffer */ 1796 dkdevidp = (struct dk_devid *)dadk_iob_htoc(DKTP_DATA, handle); 1797 1798 /* Fill in the revision */ 1799 bzero(dkdevidp, NBPSCTR); 1800 dkdevidp->dkd_rev_hi = DK_DEVID_REV_MSB; 1801 dkdevidp->dkd_rev_lo = DK_DEVID_REV_LSB; 1802 1803 /* Copy in the device id */ 1804 i = ddi_devid_sizeof(devid); 1805 if (i > DK_DEVID_SIZE) 1806 goto err; 1807 bcopy(devid, dkdevidp->dkd_devid, i); 1808 1809 /* Calculate the chksum */ 1810 chksum = 0; 1811 ip = (uint_t *)dkdevidp; 1812 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++) 1813 chksum ^= ip[i]; 1814 1815 /* Fill in the checksum */ 1816 DKD_FORMCHKSUM(chksum, dkdevidp); 1817 1818 /* write the devid */ 1819 (void) dadk_iob_xfer(DKTP_DATA, handle, B_WRITE); 1820 1821 dkp->dk_devid = devid; 1822 1823 rc = DDI_SUCCESS; 1824 1825 err: 1826 if (handle != NULL) 1827 (void) dadk_iob_free(DKTP_DATA, handle); 1828 1829 if (rc != DDI_SUCCESS && devid != NULL) 1830 ddi_devid_free(devid); 1831 1832 return (rc); 1833 } 1834 1835 static void 1836 cmdk_bbh_free_alts(struct cmdk *dkp) 1837 { 1838 if (dkp->dk_alts_hdl) { 1839 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1840 kmem_free(dkp->dk_slc_cnt, 1841 NDKMAP * (sizeof (uint32_t) + sizeof (struct alts_ent *))); 1842 dkp->dk_alts_hdl = NULL; 1843 } 1844 } 1845 1846 static void 1847 cmdk_bbh_reopen(struct cmdk *dkp) 1848 { 1849 tgdk_iob_handle handle = NULL; 1850 diskaddr_t slcb, slcn, slce; 1851 struct alts_parttbl *ap; 1852 struct alts_ent *enttblp; 1853 uint32_t altused; 1854 uint32_t altbase; 1855 uint32_t altlast; 1856 int alts; 1857 uint16_t vtoctag; 1858 int i, j; 1859 1860 /* find slice with V_ALTSCTR tag */ 1861 for (alts = 0; alts < NDKMAP; alts++) { 1862 if (cmlb_partinfo( 1863 dkp->dk_cmlbhandle, 1864 alts, 1865 &slcn, 1866 &slcb, 1867 NULL, 1868 &vtoctag, 1869 0)) { 1870 goto empty; /* no partition table exists */ 1871 } 1872 1873 if (vtoctag == V_ALTSCTR && slcn > 1) 1874 break; 1875 } 1876 if (alts >= NDKMAP) { 1877 goto empty; /* no V_ALTSCTR slice defined */ 1878 } 1879 1880 /* read in ALTS label block */ 1881 handle = dadk_iob_alloc(DKTP_DATA, slcb, NBPSCTR, KM_SLEEP); 1882 if (!handle) { 1883 goto empty; 1884 } 1885 1886 ap = (struct alts_parttbl *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1887 if (!ap || (ap->alts_sanity != ALTS_SANITY)) { 1888 goto empty; 1889 } 1890 1891 altused = ap->alts_ent_used; /* number of BB entries */ 1892 altbase = ap->alts_ent_base; /* blk offset from begin slice */ 1893 altlast = ap->alts_ent_end; /* blk offset to last block */ 1894 /* ((altused * sizeof (struct alts_ent) + NBPSCTR - 1) & ~NBPSCTR) */ 1895 1896 if (altused == 0 || 1897 altbase < 1 || 1898 altbase > altlast || 1899 altlast >= slcn) { 1900 goto empty; 1901 } 1902 (void) dadk_iob_free(DKTP_DATA, handle); 1903 1904 /* read in ALTS remapping table */ 1905 handle = dadk_iob_alloc(DKTP_DATA, 1906 slcb + altbase, 1907 (altlast - altbase + 1) << SCTRSHFT, KM_SLEEP); 1908 if (!handle) { 1909 goto empty; 1910 } 1911 1912 enttblp = (struct alts_ent *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1913 if (!enttblp) { 1914 goto empty; 1915 } 1916 1917 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER); 1918 1919 /* allocate space for dk_slc_cnt and dk_slc_ent tables */ 1920 if (dkp->dk_slc_cnt == NULL) { 1921 dkp->dk_slc_cnt = kmem_alloc(NDKMAP * 1922 (sizeof (long) + sizeof (struct alts_ent *)), KM_SLEEP); 1923 } 1924 dkp->dk_slc_ent = (struct alts_ent **)(dkp->dk_slc_cnt + NDKMAP); 1925 1926 /* free previous BB table (if any) */ 1927 if (dkp->dk_alts_hdl) { 1928 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1929 dkp->dk_alts_hdl = NULL; 1930 dkp->dk_altused = 0; 1931 } 1932 1933 /* save linkage to new BB table */ 1934 dkp->dk_alts_hdl = handle; 1935 dkp->dk_altused = altused; 1936 1937 /* 1938 * build indexes to BB table by slice 1939 * effectively we have 1940 * struct alts_ent *enttblp[altused]; 1941 * 1942 * uint32_t dk_slc_cnt[NDKMAP]; 1943 * struct alts_ent *dk_slc_ent[NDKMAP]; 1944 */ 1945 for (i = 0; i < NDKMAP; i++) { 1946 if (cmlb_partinfo( 1947 dkp->dk_cmlbhandle, 1948 i, 1949 &slcn, 1950 &slcb, 1951 NULL, 1952 NULL, 1953 0)) { 1954 goto empty1; 1955 } 1956 1957 dkp->dk_slc_cnt[i] = 0; 1958 if (slcn == 0) 1959 continue; /* slice is not allocated */ 1960 1961 /* last block in slice */ 1962 slce = slcb + slcn - 1; 1963 1964 /* find first remap entry in after beginnning of slice */ 1965 for (j = 0; j < altused; j++) { 1966 if (enttblp[j].bad_start + enttblp[j].bad_end >= slcb) 1967 break; 1968 } 1969 dkp->dk_slc_ent[i] = enttblp + j; 1970 1971 /* count remap entrys until end of slice */ 1972 for (; j < altused && enttblp[j].bad_start <= slce; j++) { 1973 dkp->dk_slc_cnt[i] += 1; 1974 } 1975 } 1976 1977 rw_exit(&dkp->dk_bbh_mutex); 1978 return; 1979 1980 empty: 1981 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER); 1982 empty1: 1983 if (handle && handle != dkp->dk_alts_hdl) 1984 (void) dadk_iob_free(DKTP_DATA, handle); 1985 1986 if (dkp->dk_alts_hdl) { 1987 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1988 dkp->dk_alts_hdl = NULL; 1989 } 1990 1991 rw_exit(&dkp->dk_bbh_mutex); 1992 } 1993 1994 /*ARGSUSED*/ 1995 static bbh_cookie_t 1996 cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle) 1997 { 1998 struct bbh_handle *hp; 1999 bbh_cookie_t ckp; 2000 2001 hp = (struct bbh_handle *)handle; 2002 ckp = hp->h_cktab + hp->h_idx; 2003 hp->h_idx++; 2004 return (ckp); 2005 } 2006 2007 /*ARGSUSED*/ 2008 static void 2009 cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle) 2010 { 2011 struct bbh_handle *hp; 2012 2013 hp = (struct bbh_handle *)handle; 2014 kmem_free(handle, (sizeof (struct bbh_handle) + 2015 (hp->h_totck * (sizeof (struct bbh_cookie))))); 2016 } 2017 2018 2019 /* 2020 * cmdk_bbh_gethandle remaps the bad sectors to alternates. 2021 * There are 7 different cases when the comparison is made 2022 * between the bad sector cluster and the disk section. 2023 * 2024 * bad sector cluster gggggggggggbbbbbbbggggggggggg 2025 * case 1: ddddd 2026 * case 2: -d----- 2027 * case 3: ddddd 2028 * case 4: dddddddddddd 2029 * case 5: ddddddd----- 2030 * case 6: ---ddddddd 2031 * case 7: ddddddd 2032 * 2033 * where: g = good sector, b = bad sector 2034 * d = sector in disk section 2035 * - = disk section may be extended to cover those disk area 2036 */ 2037 2038 static opaque_t 2039 cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp) 2040 { 2041 struct cmdk *dkp = (struct cmdk *)bbh_data; 2042 struct bbh_handle *hp; 2043 struct bbh_cookie *ckp; 2044 struct alts_ent *altp; 2045 uint32_t alts_used; 2046 uint32_t part = CMDKPART(bp->b_edev); 2047 daddr32_t lastsec; 2048 long d_count; 2049 int i; 2050 int idx; 2051 int cnt; 2052 2053 if (part >= V_NUMPAR) 2054 return (NULL); 2055 2056 /* 2057 * This if statement is atomic and it will succeed 2058 * if there are no bad blocks (almost always) 2059 * 2060 * so this if is performed outside of the rw_enter for speed 2061 * and then repeated inside the rw_enter for safety 2062 */ 2063 if (!dkp->dk_alts_hdl) { 2064 return (NULL); 2065 } 2066 2067 rw_enter(&dkp->dk_bbh_mutex, RW_READER); 2068 2069 if (dkp->dk_alts_hdl == NULL) { 2070 rw_exit(&dkp->dk_bbh_mutex); 2071 return (NULL); 2072 } 2073 2074 alts_used = dkp->dk_slc_cnt[part]; 2075 if (alts_used == 0) { 2076 rw_exit(&dkp->dk_bbh_mutex); 2077 return (NULL); 2078 } 2079 altp = dkp->dk_slc_ent[part]; 2080 2081 /* 2082 * binary search for the largest bad sector index in the alternate 2083 * entry table which overlaps or larger than the starting d_sec 2084 */ 2085 i = cmdk_bbh_bsearch(altp, alts_used, GET_BP_SEC(bp)); 2086 /* if starting sector is > the largest bad sector, return */ 2087 if (i == -1) { 2088 rw_exit(&dkp->dk_bbh_mutex); 2089 return (NULL); 2090 } 2091 /* i is the starting index. Set altp to the starting entry addr */ 2092 altp += i; 2093 2094 d_count = bp->b_bcount >> SCTRSHFT; 2095 lastsec = GET_BP_SEC(bp) + d_count - 1; 2096 2097 /* calculate the number of bad sectors */ 2098 for (idx = i, cnt = 0; idx < alts_used; idx++, altp++, cnt++) { 2099 if (lastsec < altp->bad_start) 2100 break; 2101 } 2102 2103 if (!cnt) { 2104 rw_exit(&dkp->dk_bbh_mutex); 2105 return (NULL); 2106 } 2107 2108 /* calculate the maximum number of reserved cookies */ 2109 cnt <<= 1; 2110 cnt++; 2111 2112 /* allocate the handle */ 2113 hp = (struct bbh_handle *)kmem_zalloc((sizeof (*hp) + 2114 (cnt * sizeof (*ckp))), KM_SLEEP); 2115 2116 hp->h_idx = 0; 2117 hp->h_totck = cnt; 2118 ckp = hp->h_cktab = (struct bbh_cookie *)(hp + 1); 2119 ckp[0].ck_sector = GET_BP_SEC(bp); 2120 ckp[0].ck_seclen = d_count; 2121 2122 altp = dkp->dk_slc_ent[part]; 2123 altp += i; 2124 for (idx = 0; i < alts_used; i++, altp++) { 2125 /* CASE 1: */ 2126 if (lastsec < altp->bad_start) 2127 break; 2128 2129 /* CASE 3: */ 2130 if (ckp[idx].ck_sector > altp->bad_end) 2131 continue; 2132 2133 /* CASE 2 and 7: */ 2134 if ((ckp[idx].ck_sector >= altp->bad_start) && 2135 (lastsec <= altp->bad_end)) { 2136 ckp[idx].ck_sector = altp->good_start + 2137 ckp[idx].ck_sector - altp->bad_start; 2138 break; 2139 } 2140 2141 /* at least one bad sector in our section. break it. */ 2142 /* CASE 5: */ 2143 if ((lastsec >= altp->bad_start) && 2144 (lastsec <= altp->bad_end)) { 2145 ckp[idx+1].ck_seclen = lastsec - altp->bad_start + 1; 2146 ckp[idx].ck_seclen -= ckp[idx+1].ck_seclen; 2147 ckp[idx+1].ck_sector = altp->good_start; 2148 break; 2149 } 2150 /* CASE 6: */ 2151 if ((ckp[idx].ck_sector <= altp->bad_end) && 2152 (ckp[idx].ck_sector >= altp->bad_start)) { 2153 ckp[idx+1].ck_seclen = ckp[idx].ck_seclen; 2154 ckp[idx].ck_seclen = altp->bad_end - 2155 ckp[idx].ck_sector + 1; 2156 ckp[idx+1].ck_seclen -= ckp[idx].ck_seclen; 2157 ckp[idx].ck_sector = altp->good_start + 2158 ckp[idx].ck_sector - altp->bad_start; 2159 idx++; 2160 ckp[idx].ck_sector = altp->bad_end + 1; 2161 continue; /* check rest of section */ 2162 } 2163 2164 /* CASE 4: */ 2165 ckp[idx].ck_seclen = altp->bad_start - ckp[idx].ck_sector; 2166 ckp[idx+1].ck_sector = altp->good_start; 2167 ckp[idx+1].ck_seclen = altp->bad_end - altp->bad_start + 1; 2168 idx += 2; 2169 ckp[idx].ck_sector = altp->bad_end + 1; 2170 ckp[idx].ck_seclen = lastsec - altp->bad_end; 2171 } 2172 2173 rw_exit(&dkp->dk_bbh_mutex); 2174 return ((opaque_t)hp); 2175 } 2176 2177 static int 2178 cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key) 2179 { 2180 int i; 2181 int ind; 2182 int interval; 2183 int mystatus = -1; 2184 2185 if (!cnt) 2186 return (mystatus); 2187 2188 ind = 1; /* compiler complains about possible uninitialized var */ 2189 for (i = 1; i <= cnt; i <<= 1) 2190 ind = i; 2191 2192 for (interval = ind; interval; ) { 2193 if ((key >= buf[ind-1].bad_start) && 2194 (key <= buf[ind-1].bad_end)) { 2195 return (ind-1); 2196 } else { 2197 interval >>= 1; 2198 if (key < buf[ind-1].bad_start) { 2199 /* record the largest bad sector index */ 2200 mystatus = ind-1; 2201 if (!interval) 2202 break; 2203 ind = ind - interval; 2204 } else { 2205 /* 2206 * if key is larger than the last element 2207 * then break 2208 */ 2209 if ((ind == cnt) || !interval) 2210 break; 2211 if ((ind+interval) <= cnt) 2212 ind += interval; 2213 } 2214 } 2215 } 2216 return (mystatus); 2217 } 2218