1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/scsi/scsi.h> 29 #include <sys/dktp/cm.h> 30 #include <sys/dktp/quetypes.h> 31 #include <sys/dktp/queue.h> 32 #include <sys/dktp/fctypes.h> 33 #include <sys/dktp/flowctrl.h> 34 #include <sys/dktp/cmdev.h> 35 #include <sys/dkio.h> 36 #include <sys/dktp/tgdk.h> 37 #include <sys/dktp/dadk.h> 38 #include <sys/dktp/bbh.h> 39 #include <sys/dktp/altsctr.h> 40 #include <sys/dktp/cmdk.h> 41 42 #include <sys/stat.h> 43 #include <sys/vtoc.h> 44 #include <sys/file.h> 45 #include <sys/dktp/dadkio.h> 46 #include <sys/aio_req.h> 47 48 #include <sys/cmlb.h> 49 50 /* 51 * Local Static Data 52 */ 53 #ifdef CMDK_DEBUG 54 #define DENT 0x0001 55 #define DIO 0x0002 56 57 static int cmdk_debug = DIO; 58 #endif 59 60 #ifndef TRUE 61 #define TRUE 1 62 #endif 63 64 #ifndef FALSE 65 #define FALSE 0 66 #endif 67 68 /* 69 * NDKMAP is the base number for accessing the fdisk partitions. 70 * c?d?p0 --> cmdk@?,?:q 71 */ 72 #define PARTITION0_INDEX (NDKMAP + 0) 73 74 #define DKTP_DATA (dkp->dk_tgobjp)->tg_data 75 #define DKTP_EXT (dkp->dk_tgobjp)->tg_ext 76 77 static void *cmdk_state; 78 79 /* 80 * the cmdk_attach_mutex protects cmdk_max_instance in multi-threaded 81 * attach situations 82 */ 83 static kmutex_t cmdk_attach_mutex; 84 static int cmdk_max_instance = 0; 85 86 /* 87 * Panic dumpsys state 88 * There is only a single flag that is not mutex locked since 89 * the system is prevented from thread switching and cmdk_dump 90 * will only be called in a single threaded operation. 91 */ 92 static int cmdk_indump; 93 94 /* 95 * Local Function Prototypes 96 */ 97 static int cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp); 98 static void cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp); 99 static void cmdkmin(struct buf *bp); 100 static int cmdkrw(dev_t dev, struct uio *uio, int flag); 101 static int cmdkarw(dev_t dev, struct aio_req *aio, int flag); 102 103 /* 104 * Bad Block Handling Functions Prototypes 105 */ 106 static void cmdk_bbh_reopen(struct cmdk *dkp); 107 static opaque_t cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp); 108 static bbh_cookie_t cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle); 109 static void cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle); 110 static void cmdk_bbh_close(struct cmdk *dkp); 111 static void cmdk_bbh_setalts_idx(struct cmdk *dkp); 112 static int cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key); 113 114 static struct bbh_objops cmdk_bbh_ops = { 115 nulldev, 116 nulldev, 117 cmdk_bbh_gethandle, 118 cmdk_bbh_htoc, 119 cmdk_bbh_freehandle, 120 0, 0 121 }; 122 123 static int cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp); 124 static int cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp); 125 static int cmdkstrategy(struct buf *bp); 126 static int cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 127 static int cmdkioctl(dev_t, int, intptr_t, int, cred_t *, int *); 128 static int cmdkread(dev_t dev, struct uio *uio, cred_t *credp); 129 static int cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp); 130 static int cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 131 int mod_flags, char *name, caddr_t valuep, int *lengthp); 132 static int cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp); 133 static int cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp); 134 135 /* 136 * Device driver ops vector 137 */ 138 139 static struct cb_ops cmdk_cb_ops = { 140 cmdkopen, /* open */ 141 cmdkclose, /* close */ 142 cmdkstrategy, /* strategy */ 143 nodev, /* print */ 144 cmdkdump, /* dump */ 145 cmdkread, /* read */ 146 cmdkwrite, /* write */ 147 cmdkioctl, /* ioctl */ 148 nodev, /* devmap */ 149 nodev, /* mmap */ 150 nodev, /* segmap */ 151 nochpoll, /* poll */ 152 cmdk_prop_op, /* cb_prop_op */ 153 0, /* streamtab */ 154 D_64BIT | D_MP | D_NEW, /* Driver comaptibility flag */ 155 CB_REV, /* cb_rev */ 156 cmdkaread, /* async read */ 157 cmdkawrite /* async write */ 158 }; 159 160 static int cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 161 void **result); 162 static int cmdkprobe(dev_info_t *dip); 163 static int cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd); 164 static int cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd); 165 166 static void cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp); 167 static int cmdkresume(dev_info_t *dip); 168 static int cmdksuspend(dev_info_t *dip); 169 static int cmdkpower(dev_info_t *dip, int component, int level); 170 171 struct dev_ops cmdk_ops = { 172 DEVO_REV, /* devo_rev, */ 173 0, /* refcnt */ 174 cmdkinfo, /* info */ 175 nulldev, /* identify */ 176 cmdkprobe, /* probe */ 177 cmdkattach, /* attach */ 178 cmdkdetach, /* detach */ 179 nodev, /* reset */ 180 &cmdk_cb_ops, /* driver operations */ 181 (struct bus_ops *)0, /* bus operations */ 182 cmdkpower /* power */ 183 }; 184 185 /* 186 * This is the loadable module wrapper. 187 */ 188 #include <sys/modctl.h> 189 190 extern struct mod_ops mod_driverops; 191 192 static struct modldrv modldrv = { 193 &mod_driverops, /* Type of module. This one is a driver */ 194 "Common Direct Access Disk %I%", 195 &cmdk_ops, /* driver ops */ 196 }; 197 198 static struct modlinkage modlinkage = { 199 MODREV_1, (void *)&modldrv, NULL 200 }; 201 202 /* Function prototypes for cmlb callbacks */ 203 204 static int cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr, 205 diskaddr_t start, size_t length, void *tg_cookie); 206 207 static int cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg, 208 void *tg_cookie); 209 210 static void cmdk_devid_setup(struct cmdk *dkp); 211 static int cmdk_devid_modser(struct cmdk *dkp); 212 static int cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len); 213 static int cmdk_devid_fabricate(struct cmdk *dkp); 214 static int cmdk_devid_read(struct cmdk *dkp); 215 216 static cmlb_tg_ops_t cmdk_lb_ops = { 217 TG_DK_OPS_VERSION_1, 218 cmdk_lb_rdwr, 219 cmdk_lb_getinfo 220 }; 221 222 int 223 _init(void) 224 { 225 int rval; 226 227 if (rval = ddi_soft_state_init(&cmdk_state, sizeof (struct cmdk), 7)) 228 return (rval); 229 230 mutex_init(&cmdk_attach_mutex, NULL, MUTEX_DRIVER, NULL); 231 if ((rval = mod_install(&modlinkage)) != 0) { 232 mutex_destroy(&cmdk_attach_mutex); 233 ddi_soft_state_fini(&cmdk_state); 234 } 235 return (rval); 236 } 237 238 int 239 _fini(void) 240 { 241 return (EBUSY); 242 243 /* 244 * This has been commented out until cmdk is a true 245 * unloadable module. Right now x86's are panicking on 246 * a diskless reconfig boot. 247 */ 248 249 #if 0 /* bugid 1186679 */ 250 int rval; 251 252 rval = mod_remove(&modlinkage); 253 if (rval != 0) 254 return (rval); 255 256 mutex_destroy(&cmdk_attach_mutex); 257 ddi_soft_state_fini(&cmdk_state); 258 259 return (0); 260 #endif 261 } 262 263 int 264 _info(struct modinfo *modinfop) 265 { 266 return (mod_info(&modlinkage, modinfop)); 267 } 268 269 /* 270 * Autoconfiguration Routines 271 */ 272 static int 273 cmdkprobe(dev_info_t *dip) 274 { 275 int instance; 276 int status; 277 struct cmdk *dkp; 278 279 instance = ddi_get_instance(dip); 280 281 if (ddi_get_soft_state(cmdk_state, instance)) 282 return (DDI_PROBE_PARTIAL); 283 284 if ((ddi_soft_state_zalloc(cmdk_state, instance) != DDI_SUCCESS) || 285 ((dkp = ddi_get_soft_state(cmdk_state, instance)) == NULL)) 286 return (DDI_PROBE_PARTIAL); 287 288 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL); 289 rw_init(&dkp->dk_bbh_mutex, NULL, RW_DRIVER, NULL); 290 dkp->dk_dip = dip; 291 mutex_enter(&dkp->dk_mutex); 292 293 dkp->dk_dev = makedevice(ddi_driver_major(dip), 294 ddi_get_instance(dip) << CMDK_UNITSHF); 295 296 /* linkage to dadk and strategy */ 297 if (cmdk_create_obj(dip, dkp) != DDI_SUCCESS) { 298 mutex_exit(&dkp->dk_mutex); 299 mutex_destroy(&dkp->dk_mutex); 300 rw_destroy(&dkp->dk_bbh_mutex); 301 ddi_soft_state_free(cmdk_state, instance); 302 return (DDI_PROBE_PARTIAL); 303 } 304 305 status = dadk_probe(DKTP_DATA, KM_NOSLEEP); 306 if (status != DDI_PROBE_SUCCESS) { 307 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */ 308 mutex_exit(&dkp->dk_mutex); 309 mutex_destroy(&dkp->dk_mutex); 310 rw_destroy(&dkp->dk_bbh_mutex); 311 ddi_soft_state_free(cmdk_state, instance); 312 return (status); 313 } 314 315 mutex_exit(&dkp->dk_mutex); 316 #ifdef CMDK_DEBUG 317 if (cmdk_debug & DENT) 318 PRF("cmdkprobe: instance= %d name= `%s`\n", 319 instance, ddi_get_name_addr(dip)); 320 #endif 321 return (status); 322 } 323 324 static int 325 cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd) 326 { 327 int instance; 328 struct cmdk *dkp; 329 char *node_type; 330 331 switch (cmd) { 332 case DDI_ATTACH: 333 break; 334 case DDI_RESUME: 335 return (cmdkresume(dip)); 336 default: 337 return (DDI_FAILURE); 338 } 339 340 instance = ddi_get_instance(dip); 341 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 342 return (DDI_FAILURE); 343 344 dkp->dk_pm_level = CMDK_SPINDLE_UNINIT; 345 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL); 346 347 mutex_enter(&dkp->dk_mutex); 348 349 /* dadk_attach is an empty function that only returns SUCCESS */ 350 (void) dadk_attach(DKTP_DATA); 351 352 node_type = (DKTP_EXT->tg_nodetype); 353 354 /* 355 * this open allows cmlb to read the device 356 * and determine the label types 357 * so that cmlb can create minor nodes for device 358 */ 359 360 /* open the target disk */ 361 if (dadk_open(DKTP_DATA, 0) != DDI_SUCCESS) 362 goto fail2; 363 364 /* mark as having opened target */ 365 dkp->dk_flag |= CMDK_TGDK_OPEN; 366 367 cmlb_alloc_handle((cmlb_handle_t *)&dkp->dk_cmlbhandle); 368 369 if (cmlb_attach(dip, 370 &cmdk_lb_ops, 371 DTYPE_DIRECT, /* device_type */ 372 0, /* removable */ 373 0, /* hot pluggable XXX */ 374 node_type, 375 CMLB_CREATE_ALTSLICE_VTOC_16_DTYPE_DIRECT, /* alter_behaviour */ 376 dkp->dk_cmlbhandle, 377 0) != 0) 378 goto fail1; 379 380 /* Calling validate will create minor nodes according to disk label */ 381 (void) cmlb_validate(dkp->dk_cmlbhandle, 0, 0); 382 383 /* set bbh (Bad Block Handling) */ 384 cmdk_bbh_reopen(dkp); 385 386 /* setup devid string */ 387 cmdk_devid_setup(dkp); 388 389 mutex_enter(&cmdk_attach_mutex); 390 if (instance > cmdk_max_instance) 391 cmdk_max_instance = instance; 392 mutex_exit(&cmdk_attach_mutex); 393 394 mutex_exit(&dkp->dk_mutex); 395 396 /* 397 * Add a zero-length attribute to tell the world we support 398 * kernel ioctls (for layered drivers) 399 */ 400 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 401 DDI_KERNEL_IOCTL, NULL, 0); 402 ddi_report_dev(dip); 403 404 /* 405 * Initialize power management 406 */ 407 mutex_init(&dkp->dk_pm_mutex, NULL, MUTEX_DRIVER, NULL); 408 cv_init(&dkp->dk_suspend_cv, NULL, CV_DRIVER, NULL); 409 cmdk_setup_pm(dip, dkp); 410 411 return (DDI_SUCCESS); 412 413 fail1: 414 cmlb_free_handle(&dkp->dk_cmlbhandle); 415 (void) dadk_close(DKTP_DATA); 416 fail2: 417 cmdk_destroy_obj(dip, dkp); 418 rw_destroy(&dkp->dk_bbh_mutex); 419 mutex_exit(&dkp->dk_mutex); 420 mutex_destroy(&dkp->dk_mutex); 421 ddi_soft_state_free(cmdk_state, instance); 422 return (DDI_FAILURE); 423 } 424 425 426 static int 427 cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd) 428 { 429 struct cmdk *dkp; 430 int instance; 431 int max_instance; 432 433 switch (cmd) { 434 case DDI_DETACH: 435 /* return (DDI_FAILURE); */ 436 break; 437 case DDI_SUSPEND: 438 return (cmdksuspend(dip)); 439 default: 440 #ifdef CMDK_DEBUG 441 if (cmdk_debug & DIO) { 442 PRF("cmdkdetach: cmd = %d unknown\n", cmd); 443 } 444 #endif 445 return (DDI_FAILURE); 446 } 447 448 mutex_enter(&cmdk_attach_mutex); 449 max_instance = cmdk_max_instance; 450 mutex_exit(&cmdk_attach_mutex); 451 452 /* check if any instance of driver is open */ 453 for (instance = 0; instance < max_instance; instance++) { 454 dkp = ddi_get_soft_state(cmdk_state, instance); 455 if (!dkp) 456 continue; 457 if (dkp->dk_flag & CMDK_OPEN) 458 return (DDI_FAILURE); 459 } 460 461 instance = ddi_get_instance(dip); 462 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 463 return (DDI_SUCCESS); 464 465 mutex_enter(&dkp->dk_mutex); 466 467 /* 468 * The cmdk_part_info call at the end of cmdkattach may have 469 * caused cmdk_reopen to do a TGDK_OPEN, make sure we close on 470 * detach for case when cmdkopen/cmdkclose never occurs. 471 */ 472 if (dkp->dk_flag & CMDK_TGDK_OPEN) { 473 dkp->dk_flag &= ~CMDK_TGDK_OPEN; 474 (void) dadk_close(DKTP_DATA); 475 } 476 477 cmlb_detach(dkp->dk_cmlbhandle, 0); 478 cmlb_free_handle(&dkp->dk_cmlbhandle); 479 ddi_prop_remove_all(dip); 480 481 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */ 482 mutex_exit(&dkp->dk_mutex); 483 mutex_destroy(&dkp->dk_mutex); 484 rw_destroy(&dkp->dk_bbh_mutex); 485 mutex_destroy(&dkp->dk_pm_mutex); 486 cv_destroy(&dkp->dk_suspend_cv); 487 ddi_soft_state_free(cmdk_state, instance); 488 489 return (DDI_SUCCESS); 490 } 491 492 static int 493 cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 494 { 495 dev_t dev = (dev_t)arg; 496 int instance; 497 struct cmdk *dkp; 498 499 #ifdef lint 500 dip = dip; /* no one ever uses this */ 501 #endif 502 #ifdef CMDK_DEBUG 503 if (cmdk_debug & DENT) 504 PRF("cmdkinfo: call\n"); 505 #endif 506 instance = CMDKUNIT(dev); 507 508 switch (infocmd) { 509 case DDI_INFO_DEVT2DEVINFO: 510 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 511 return (DDI_FAILURE); 512 *result = (void *) dkp->dk_dip; 513 break; 514 case DDI_INFO_DEVT2INSTANCE: 515 *result = (void *)(intptr_t)instance; 516 break; 517 default: 518 return (DDI_FAILURE); 519 } 520 return (DDI_SUCCESS); 521 } 522 523 /* 524 * Initialize the power management components 525 */ 526 static void 527 cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp) 528 { 529 char *pm_comp[] = { "NAME=cmdk", "0=off", "1=on", NULL }; 530 531 /* 532 * Since the cmdk device does not the 'reg' property, 533 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 534 * The following code is to tell cpr that this device 535 * DOES need to be suspended and resumed. 536 */ 537 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, 538 "pm-hardware-state", "needs-suspend-resume"); 539 540 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 541 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 542 if (pm_raise_power(dip, 0, CMDK_SPINDLE_ON) == DDI_SUCCESS) { 543 mutex_enter(&dkp->dk_pm_mutex); 544 dkp->dk_pm_level = CMDK_SPINDLE_ON; 545 dkp->dk_pm_is_enabled = 1; 546 mutex_exit(&dkp->dk_pm_mutex); 547 } else { 548 mutex_enter(&dkp->dk_pm_mutex); 549 dkp->dk_pm_level = CMDK_SPINDLE_OFF; 550 dkp->dk_pm_is_enabled = 0; 551 mutex_exit(&dkp->dk_pm_mutex); 552 } 553 } else { 554 mutex_enter(&dkp->dk_pm_mutex); 555 dkp->dk_pm_level = CMDK_SPINDLE_UNINIT; 556 dkp->dk_pm_is_enabled = 0; 557 mutex_exit(&dkp->dk_pm_mutex); 558 } 559 } 560 561 /* 562 * suspend routine, it will be run when get the command 563 * DDI_SUSPEND at detach(9E) from system power management 564 */ 565 static int 566 cmdksuspend(dev_info_t *dip) 567 { 568 struct cmdk *dkp; 569 int instance; 570 clock_t count = 0; 571 572 instance = ddi_get_instance(dip); 573 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 574 return (DDI_FAILURE); 575 mutex_enter(&dkp->dk_mutex); 576 if (dkp->dk_flag & CMDK_SUSPEND) { 577 mutex_exit(&dkp->dk_mutex); 578 return (DDI_SUCCESS); 579 } 580 dkp->dk_flag |= CMDK_SUSPEND; 581 582 /* need to wait a while */ 583 while (dadk_getcmds(DKTP_DATA) != 0) { 584 delay(drv_usectohz(1000000)); 585 if (count > 60) { 586 dkp->dk_flag &= ~CMDK_SUSPEND; 587 cv_broadcast(&dkp->dk_suspend_cv); 588 mutex_exit(&dkp->dk_mutex); 589 return (DDI_FAILURE); 590 } 591 count++; 592 } 593 mutex_exit(&dkp->dk_mutex); 594 return (DDI_SUCCESS); 595 } 596 597 /* 598 * resume routine, it will be run when get the command 599 * DDI_RESUME at attach(9E) from system power management 600 */ 601 static int 602 cmdkresume(dev_info_t *dip) 603 { 604 struct cmdk *dkp; 605 int instance; 606 607 instance = ddi_get_instance(dip); 608 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 609 return (DDI_FAILURE); 610 mutex_enter(&dkp->dk_mutex); 611 if (!(dkp->dk_flag & CMDK_SUSPEND)) { 612 mutex_exit(&dkp->dk_mutex); 613 return (DDI_FAILURE); 614 } 615 dkp->dk_pm_level = CMDK_SPINDLE_ON; 616 dkp->dk_flag &= ~CMDK_SUSPEND; 617 cv_broadcast(&dkp->dk_suspend_cv); 618 mutex_exit(&dkp->dk_mutex); 619 return (DDI_SUCCESS); 620 621 } 622 623 /* 624 * power management entry point, it was used to 625 * change power management component. 626 * Actually, the real hard drive suspend/resume 627 * was handled in ata, so this function is not 628 * doing any real work other than verifying that 629 * the disk is idle. 630 */ 631 static int 632 cmdkpower(dev_info_t *dip, int component, int level) 633 { 634 struct cmdk *dkp; 635 int instance; 636 637 instance = ddi_get_instance(dip); 638 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || 639 component != 0 || level > CMDK_SPINDLE_ON || 640 level < CMDK_SPINDLE_OFF) { 641 return (DDI_FAILURE); 642 } 643 644 mutex_enter(&dkp->dk_pm_mutex); 645 if (dkp->dk_pm_is_enabled && dkp->dk_pm_level == level) { 646 mutex_exit(&dkp->dk_pm_mutex); 647 return (DDI_SUCCESS); 648 } 649 mutex_exit(&dkp->dk_pm_mutex); 650 651 if ((level == CMDK_SPINDLE_OFF) && 652 (dadk_getcmds(DKTP_DATA) != 0)) { 653 return (DDI_FAILURE); 654 } 655 656 mutex_enter(&dkp->dk_pm_mutex); 657 dkp->dk_pm_level = level; 658 mutex_exit(&dkp->dk_pm_mutex); 659 return (DDI_SUCCESS); 660 } 661 662 static int 663 cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 664 char *name, caddr_t valuep, int *lengthp) 665 { 666 struct cmdk *dkp; 667 diskaddr_t p_lblksrt; 668 diskaddr_t p_lblkcnt; 669 670 #ifdef CMDK_DEBUG 671 if (cmdk_debug & DENT) 672 PRF("cmdk_prop_op: call\n"); 673 #endif 674 675 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 676 677 /* 678 * Our dynamic properties are all device specific and size oriented. 679 * Requests issued under conditions where size is valid are passed 680 * to ddi_prop_op_nblocks with the size information, otherwise the 681 * request is passed to ddi_prop_op. Size depends on valid label. 682 */ 683 if ((dev != DDI_DEV_T_ANY) && (dkp != NULL)) { 684 if (!cmlb_partinfo( 685 dkp->dk_cmlbhandle, 686 CMDKPART(dev), 687 &p_lblkcnt, 688 &p_lblksrt, 689 NULL, 690 NULL, 691 0)) 692 return (ddi_prop_op_nblocks(dev, dip, 693 prop_op, mod_flags, 694 name, valuep, lengthp, 695 (uint64_t)p_lblkcnt)); 696 } 697 698 return (ddi_prop_op(dev, dip, 699 prop_op, mod_flags, 700 name, valuep, lengthp)); 701 } 702 703 /* 704 * dump routine 705 */ 706 static int 707 cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 708 { 709 int instance; 710 struct cmdk *dkp; 711 diskaddr_t p_lblksrt; 712 diskaddr_t p_lblkcnt; 713 struct buf local; 714 struct buf *bp; 715 716 #ifdef CMDK_DEBUG 717 if (cmdk_debug & DENT) 718 PRF("cmdkdump: call\n"); 719 #endif 720 instance = CMDKUNIT(dev); 721 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || (blkno < 0)) 722 return (ENXIO); 723 724 if (cmlb_partinfo( 725 dkp->dk_cmlbhandle, 726 CMDKPART(dev), 727 &p_lblkcnt, 728 &p_lblksrt, 729 NULL, 730 NULL, 731 0)) { 732 return (ENXIO); 733 } 734 735 if ((blkno+nblk) > p_lblkcnt) 736 return (EINVAL); 737 738 cmdk_indump = 1; /* Tell disk targets we are panic dumpping */ 739 740 bp = &local; 741 bzero(bp, sizeof (*bp)); 742 bp->b_flags = B_BUSY; 743 bp->b_un.b_addr = addr; 744 bp->b_bcount = nblk << SCTRSHFT; 745 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + blkno))); 746 747 (void) dadk_dump(DKTP_DATA, bp); 748 return (bp->b_error); 749 } 750 751 /* 752 * Copy in the dadkio_rwcmd according to the user's data model. If needed, 753 * convert it for our internal use. 754 */ 755 static int 756 rwcmd_copyin(struct dadkio_rwcmd *rwcmdp, caddr_t inaddr, int flag) 757 { 758 switch (ddi_model_convert_from(flag)) { 759 case DDI_MODEL_ILP32: { 760 struct dadkio_rwcmd32 cmd32; 761 762 if (ddi_copyin(inaddr, &cmd32, 763 sizeof (struct dadkio_rwcmd32), flag)) { 764 return (EFAULT); 765 } 766 767 rwcmdp->cmd = cmd32.cmd; 768 rwcmdp->flags = cmd32.flags; 769 rwcmdp->blkaddr = (daddr_t)cmd32.blkaddr; 770 rwcmdp->buflen = cmd32.buflen; 771 rwcmdp->bufaddr = (caddr_t)(intptr_t)cmd32.bufaddr; 772 /* 773 * Note: we do not convert the 'status' field, 774 * as it should not contain valid data at this 775 * point. 776 */ 777 bzero(&rwcmdp->status, sizeof (rwcmdp->status)); 778 break; 779 } 780 case DDI_MODEL_NONE: { 781 if (ddi_copyin(inaddr, rwcmdp, 782 sizeof (struct dadkio_rwcmd), flag)) { 783 return (EFAULT); 784 } 785 } 786 } 787 return (0); 788 } 789 790 /* 791 * If necessary, convert the internal rwcmdp and status to the appropriate 792 * data model and copy it out to the user. 793 */ 794 static int 795 rwcmd_copyout(struct dadkio_rwcmd *rwcmdp, caddr_t outaddr, int flag) 796 { 797 switch (ddi_model_convert_from(flag)) { 798 case DDI_MODEL_ILP32: { 799 struct dadkio_rwcmd32 cmd32; 800 801 cmd32.cmd = rwcmdp->cmd; 802 cmd32.flags = rwcmdp->flags; 803 cmd32.blkaddr = rwcmdp->blkaddr; 804 cmd32.buflen = rwcmdp->buflen; 805 ASSERT64(((uintptr_t)rwcmdp->bufaddr >> 32) == 0); 806 cmd32.bufaddr = (caddr32_t)(uintptr_t)rwcmdp->bufaddr; 807 808 cmd32.status.status = rwcmdp->status.status; 809 cmd32.status.resid = rwcmdp->status.resid; 810 cmd32.status.failed_blk_is_valid = 811 rwcmdp->status.failed_blk_is_valid; 812 cmd32.status.failed_blk = rwcmdp->status.failed_blk; 813 cmd32.status.fru_code_is_valid = 814 rwcmdp->status.fru_code_is_valid; 815 cmd32.status.fru_code = rwcmdp->status.fru_code; 816 817 bcopy(rwcmdp->status.add_error_info, 818 cmd32.status.add_error_info, DADKIO_ERROR_INFO_LEN); 819 820 if (ddi_copyout(&cmd32, outaddr, 821 sizeof (struct dadkio_rwcmd32), flag)) 822 return (EFAULT); 823 break; 824 } 825 case DDI_MODEL_NONE: { 826 if (ddi_copyout(rwcmdp, outaddr, 827 sizeof (struct dadkio_rwcmd), flag)) 828 return (EFAULT); 829 } 830 } 831 return (0); 832 } 833 834 /* 835 * ioctl routine 836 */ 837 static int 838 cmdkioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *credp, int *rvalp) 839 { 840 int instance; 841 struct scsi_device *devp; 842 struct cmdk *dkp; 843 char data[NBPSCTR]; 844 845 instance = CMDKUNIT(dev); 846 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 847 return (ENXIO); 848 849 mutex_enter(&dkp->dk_mutex); 850 while (dkp->dk_flag & CMDK_SUSPEND) { 851 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 852 } 853 mutex_exit(&dkp->dk_mutex); 854 855 bzero(data, sizeof (data)); 856 857 switch (cmd) { 858 859 case DKIOCGMEDIAINFO: { 860 struct dk_minfo media_info; 861 struct tgdk_geom phyg; 862 863 /* dadk_getphygeom always returns success */ 864 (void) dadk_getphygeom(DKTP_DATA, &phyg); 865 866 media_info.dki_lbsize = phyg.g_secsiz; 867 media_info.dki_capacity = phyg.g_cap; 868 media_info.dki_media_type = DK_FIXED_DISK; 869 870 if (ddi_copyout(&media_info, (void *)arg, 871 sizeof (struct dk_minfo), flag)) { 872 return (EFAULT); 873 } else { 874 return (0); 875 } 876 } 877 878 case DKIOCINFO: { 879 struct dk_cinfo *info = (struct dk_cinfo *)data; 880 881 /* controller information */ 882 info->dki_ctype = (DKTP_EXT->tg_ctype); 883 info->dki_cnum = ddi_get_instance(ddi_get_parent(dkp->dk_dip)); 884 (void) strcpy(info->dki_cname, 885 ddi_get_name(ddi_get_parent(dkp->dk_dip))); 886 887 /* Unit Information */ 888 info->dki_unit = ddi_get_instance(dkp->dk_dip); 889 devp = ddi_get_driver_private(dkp->dk_dip); 890 info->dki_slave = (CMDEV_TARG(devp)<<3) | CMDEV_LUN(devp); 891 (void) strcpy(info->dki_dname, ddi_driver_name(dkp->dk_dip)); 892 info->dki_flags = DKI_FMTVOL; 893 info->dki_partition = CMDKPART(dev); 894 895 info->dki_maxtransfer = maxphys / DEV_BSIZE; 896 info->dki_addr = 1; 897 info->dki_space = 0; 898 info->dki_prio = 0; 899 info->dki_vec = 0; 900 901 if (ddi_copyout(data, (void *)arg, sizeof (*info), flag)) 902 return (EFAULT); 903 else 904 return (0); 905 } 906 907 case DKIOCSTATE: { 908 int state; 909 int rval; 910 diskaddr_t p_lblksrt; 911 diskaddr_t p_lblkcnt; 912 913 if (ddi_copyin((void *)arg, &state, sizeof (int), flag)) 914 return (EFAULT); 915 916 /* dadk_check_media blocks until state changes */ 917 if (rval = dadk_check_media(DKTP_DATA, &state)) 918 return (rval); 919 920 if (state == DKIO_INSERTED) { 921 922 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0) 923 return (ENXIO); 924 925 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(dev), 926 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0)) 927 return (ENXIO); 928 929 if (p_lblkcnt <= 0) 930 return (ENXIO); 931 } 932 933 if (ddi_copyout(&state, (caddr_t)arg, sizeof (int), flag)) 934 return (EFAULT); 935 936 return (0); 937 } 938 939 /* 940 * is media removable? 941 */ 942 case DKIOCREMOVABLE: { 943 int i; 944 945 i = (DKTP_EXT->tg_rmb) ? 1 : 0; 946 947 if (ddi_copyout(&i, (caddr_t)arg, sizeof (int), flag)) 948 return (EFAULT); 949 950 return (0); 951 } 952 953 case DKIOCADDBAD: 954 /* 955 * This is not an update mechanism to add bad blocks 956 * to the bad block structures stored on disk. 957 * 958 * addbadsec(1M) will update the bad block data on disk 959 * and use this ioctl to force the driver to re-initialize 960 * the list of bad blocks in the driver. 961 */ 962 963 /* start BBH */ 964 cmdk_bbh_reopen(dkp); 965 return (0); 966 967 case DKIOCG_PHYGEOM: 968 case DKIOCG_VIRTGEOM: 969 case DKIOCGGEOM: 970 case DKIOCSGEOM: 971 case DKIOCGAPART: 972 case DKIOCSAPART: 973 case DKIOCGVTOC: 974 case DKIOCSVTOC: 975 case DKIOCPARTINFO: 976 case DKIOCGMBOOT: 977 case DKIOCSMBOOT: 978 case DKIOCGETEFI: 979 case DKIOCSETEFI: 980 case DKIOCPARTITION: 981 { 982 int rc; 983 984 rc = cmlb_ioctl(dkp->dk_cmlbhandle, dev, cmd, arg, flag, 985 credp, rvalp, 0); 986 if (cmd == DKIOCSVTOC) 987 cmdk_devid_setup(dkp); 988 return (rc); 989 } 990 991 case DIOCTL_RWCMD: { 992 struct dadkio_rwcmd *rwcmdp; 993 int status; 994 995 rwcmdp = kmem_alloc(sizeof (struct dadkio_rwcmd), KM_SLEEP); 996 997 status = rwcmd_copyin(rwcmdp, (caddr_t)arg, flag); 998 999 if (status == 0) { 1000 bzero(&(rwcmdp->status), sizeof (struct dadkio_status)); 1001 status = dadk_ioctl(DKTP_DATA, 1002 dev, 1003 cmd, 1004 (uintptr_t)rwcmdp, 1005 flag, 1006 credp, 1007 rvalp); 1008 } 1009 if (status == 0) 1010 status = rwcmd_copyout(rwcmdp, (caddr_t)arg, flag); 1011 1012 kmem_free(rwcmdp, sizeof (struct dadkio_rwcmd)); 1013 return (status); 1014 } 1015 1016 default: 1017 return (dadk_ioctl(DKTP_DATA, 1018 dev, 1019 cmd, 1020 arg, 1021 flag, 1022 credp, 1023 rvalp)); 1024 } 1025 } 1026 1027 /*ARGSUSED1*/ 1028 static int 1029 cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp) 1030 { 1031 int part; 1032 ulong_t partbit; 1033 int instance; 1034 struct cmdk *dkp; 1035 int lastclose = 1; 1036 int i; 1037 1038 instance = CMDKUNIT(dev); 1039 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || 1040 (otyp >= OTYPCNT)) 1041 return (ENXIO); 1042 1043 mutex_enter(&dkp->dk_mutex); 1044 1045 /* check if device has been opened */ 1046 if (!(dkp->dk_flag & CMDK_OPEN)) { 1047 mutex_exit(&dkp->dk_mutex); 1048 return (ENXIO); 1049 } 1050 1051 while (dkp->dk_flag & CMDK_SUSPEND) { 1052 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1053 } 1054 1055 part = CMDKPART(dev); 1056 partbit = 1 << part; 1057 1058 /* account for close */ 1059 if (otyp == OTYP_LYR) { 1060 if (dkp->dk_open_lyr[part]) 1061 dkp->dk_open_lyr[part]--; 1062 } else 1063 dkp->dk_open_reg[otyp] &= ~partbit; 1064 dkp->dk_open_exl &= ~partbit; 1065 1066 for (i = 0; i < CMDK_MAXPART; i++) 1067 if (dkp->dk_open_lyr[i] != 0) { 1068 lastclose = 0; 1069 break; 1070 } 1071 1072 if (lastclose) 1073 for (i = 0; i < OTYPCNT; i++) 1074 if (dkp->dk_open_reg[i] != 0) { 1075 lastclose = 0; 1076 break; 1077 } 1078 1079 mutex_exit(&dkp->dk_mutex); 1080 1081 if (lastclose) 1082 cmlb_invalidate(dkp->dk_cmlbhandle, 0); 1083 1084 return (DDI_SUCCESS); 1085 } 1086 1087 /*ARGSUSED3*/ 1088 static int 1089 cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp) 1090 { 1091 dev_t dev = *dev_p; 1092 int part; 1093 ulong_t partbit; 1094 int instance; 1095 struct cmdk *dkp; 1096 diskaddr_t p_lblksrt; 1097 diskaddr_t p_lblkcnt; 1098 int i; 1099 int nodelay; 1100 1101 instance = CMDKUNIT(dev); 1102 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1103 return (ENXIO); 1104 1105 if (otyp >= OTYPCNT) 1106 return (EINVAL); 1107 1108 mutex_enter(&dkp->dk_mutex); 1109 while (dkp->dk_flag & CMDK_SUSPEND) { 1110 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1111 } 1112 mutex_exit(&dkp->dk_mutex); 1113 1114 part = CMDKPART(dev); 1115 partbit = 1 << part; 1116 nodelay = (flag & (FNDELAY | FNONBLOCK)); 1117 1118 mutex_enter(&dkp->dk_mutex); 1119 1120 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0) { 1121 1122 /* fail if not doing non block open */ 1123 if (!nodelay) { 1124 mutex_exit(&dkp->dk_mutex); 1125 return (ENXIO); 1126 } 1127 } else if (cmlb_partinfo(dkp->dk_cmlbhandle, part, &p_lblkcnt, 1128 &p_lblksrt, NULL, NULL, 0) == 0) { 1129 1130 if (p_lblkcnt <= 0 && (!nodelay || otyp != OTYP_CHR)) { 1131 mutex_exit(&dkp->dk_mutex); 1132 return (ENXIO); 1133 } 1134 } else { 1135 /* fail if not doing non block open */ 1136 if (!nodelay) { 1137 mutex_exit(&dkp->dk_mutex); 1138 return (ENXIO); 1139 } 1140 } 1141 1142 if ((DKTP_EXT->tg_rdonly) && (flag & FWRITE)) { 1143 mutex_exit(&dkp->dk_mutex); 1144 return (EROFS); 1145 } 1146 1147 /* check for part already opend exclusively */ 1148 if (dkp->dk_open_exl & partbit) 1149 goto excl_open_fail; 1150 1151 /* check if we can establish exclusive open */ 1152 if (flag & FEXCL) { 1153 if (dkp->dk_open_lyr[part]) 1154 goto excl_open_fail; 1155 for (i = 0; i < OTYPCNT; i++) { 1156 if (dkp->dk_open_reg[i] & partbit) 1157 goto excl_open_fail; 1158 } 1159 } 1160 1161 /* open will succeed, account for open */ 1162 dkp->dk_flag |= CMDK_OPEN; 1163 if (otyp == OTYP_LYR) 1164 dkp->dk_open_lyr[part]++; 1165 else 1166 dkp->dk_open_reg[otyp] |= partbit; 1167 if (flag & FEXCL) 1168 dkp->dk_open_exl |= partbit; 1169 1170 mutex_exit(&dkp->dk_mutex); 1171 return (DDI_SUCCESS); 1172 1173 excl_open_fail: 1174 mutex_exit(&dkp->dk_mutex); 1175 return (EBUSY); 1176 } 1177 1178 /* 1179 * read routine 1180 */ 1181 /*ARGSUSED2*/ 1182 static int 1183 cmdkread(dev_t dev, struct uio *uio, cred_t *credp) 1184 { 1185 return (cmdkrw(dev, uio, B_READ)); 1186 } 1187 1188 /* 1189 * async read routine 1190 */ 1191 /*ARGSUSED2*/ 1192 static int 1193 cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp) 1194 { 1195 return (cmdkarw(dev, aio, B_READ)); 1196 } 1197 1198 /* 1199 * write routine 1200 */ 1201 /*ARGSUSED2*/ 1202 static int 1203 cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp) 1204 { 1205 return (cmdkrw(dev, uio, B_WRITE)); 1206 } 1207 1208 /* 1209 * async write routine 1210 */ 1211 /*ARGSUSED2*/ 1212 static int 1213 cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp) 1214 { 1215 return (cmdkarw(dev, aio, B_WRITE)); 1216 } 1217 1218 static void 1219 cmdkmin(struct buf *bp) 1220 { 1221 if (bp->b_bcount > DK_MAXRECSIZE) 1222 bp->b_bcount = DK_MAXRECSIZE; 1223 } 1224 1225 static int 1226 cmdkrw(dev_t dev, struct uio *uio, int flag) 1227 { 1228 int instance; 1229 struct cmdk *dkp; 1230 1231 instance = CMDKUNIT(dev); 1232 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1233 return (ENXIO); 1234 1235 mutex_enter(&dkp->dk_mutex); 1236 while (dkp->dk_flag & CMDK_SUSPEND) { 1237 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1238 } 1239 mutex_exit(&dkp->dk_mutex); 1240 1241 return (physio(cmdkstrategy, (struct buf *)0, dev, flag, cmdkmin, uio)); 1242 } 1243 1244 static int 1245 cmdkarw(dev_t dev, struct aio_req *aio, int flag) 1246 { 1247 int instance; 1248 struct cmdk *dkp; 1249 1250 instance = CMDKUNIT(dev); 1251 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1252 return (ENXIO); 1253 1254 mutex_enter(&dkp->dk_mutex); 1255 while (dkp->dk_flag & CMDK_SUSPEND) { 1256 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1257 } 1258 mutex_exit(&dkp->dk_mutex); 1259 1260 return (aphysio(cmdkstrategy, anocancel, dev, flag, cmdkmin, aio)); 1261 } 1262 1263 /* 1264 * strategy routine 1265 */ 1266 static int 1267 cmdkstrategy(struct buf *bp) 1268 { 1269 int instance; 1270 struct cmdk *dkp; 1271 long d_cnt; 1272 diskaddr_t p_lblksrt; 1273 diskaddr_t p_lblkcnt; 1274 1275 instance = CMDKUNIT(bp->b_edev); 1276 if (cmdk_indump || !(dkp = ddi_get_soft_state(cmdk_state, instance)) || 1277 (dkblock(bp) < 0)) { 1278 bp->b_resid = bp->b_bcount; 1279 SETBPERR(bp, ENXIO); 1280 biodone(bp); 1281 return (0); 1282 } 1283 1284 mutex_enter(&dkp->dk_mutex); 1285 while (dkp->dk_flag & CMDK_SUSPEND) { 1286 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1287 } 1288 mutex_exit(&dkp->dk_mutex); 1289 1290 bp->b_flags &= ~(B_DONE|B_ERROR); 1291 bp->b_resid = 0; 1292 bp->av_back = NULL; 1293 1294 /* 1295 * only re-read the vtoc if necessary (force == FALSE) 1296 */ 1297 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(bp->b_edev), 1298 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0)) { 1299 SETBPERR(bp, ENXIO); 1300 } 1301 1302 if ((bp->b_bcount & (NBPSCTR-1)) || (dkblock(bp) > p_lblkcnt)) 1303 SETBPERR(bp, ENXIO); 1304 1305 if ((bp->b_flags & B_ERROR) || (dkblock(bp) == p_lblkcnt)) { 1306 bp->b_resid = bp->b_bcount; 1307 biodone(bp); 1308 return (0); 1309 } 1310 1311 d_cnt = bp->b_bcount >> SCTRSHFT; 1312 if ((dkblock(bp) + d_cnt) > p_lblkcnt) { 1313 bp->b_resid = ((dkblock(bp) + d_cnt) - p_lblkcnt) << SCTRSHFT; 1314 bp->b_bcount -= bp->b_resid; 1315 } 1316 1317 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + dkblock(bp)))); 1318 if (dadk_strategy(DKTP_DATA, bp) != DDI_SUCCESS) { 1319 bp->b_resid += bp->b_bcount; 1320 biodone(bp); 1321 } 1322 return (0); 1323 } 1324 1325 static int 1326 cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp) 1327 { 1328 struct scsi_device *devp; 1329 opaque_t queobjp = NULL; 1330 opaque_t flcobjp = NULL; 1331 char que_keyvalp[64]; 1332 int que_keylen; 1333 char flc_keyvalp[64]; 1334 int flc_keylen; 1335 1336 ASSERT(mutex_owned(&dkp->dk_mutex)); 1337 1338 /* Create linkage to queueing routines based on property */ 1339 que_keylen = sizeof (que_keyvalp); 1340 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1341 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) != 1342 DDI_PROP_SUCCESS) { 1343 cmn_err(CE_WARN, "cmdk_create_obj: queue property undefined"); 1344 return (DDI_FAILURE); 1345 } 1346 que_keyvalp[que_keylen] = (char)0; 1347 1348 if (strcmp(que_keyvalp, "qfifo") == 0) { 1349 queobjp = (opaque_t)qfifo_create(); 1350 } else if (strcmp(que_keyvalp, "qsort") == 0) { 1351 queobjp = (opaque_t)qsort_create(); 1352 } else { 1353 return (DDI_FAILURE); 1354 } 1355 1356 /* Create linkage to dequeueing routines based on property */ 1357 flc_keylen = sizeof (flc_keyvalp); 1358 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1359 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) != 1360 DDI_PROP_SUCCESS) { 1361 cmn_err(CE_WARN, 1362 "cmdk_create_obj: flow-control property undefined"); 1363 return (DDI_FAILURE); 1364 } 1365 1366 flc_keyvalp[flc_keylen] = (char)0; 1367 1368 if (strcmp(flc_keyvalp, "dsngl") == 0) { 1369 flcobjp = (opaque_t)dsngl_create(); 1370 } else if (strcmp(flc_keyvalp, "dmult") == 0) { 1371 flcobjp = (opaque_t)dmult_create(); 1372 } else { 1373 return (DDI_FAILURE); 1374 } 1375 1376 /* populate bbh_obj object stored in dkp */ 1377 dkp->dk_bbh_obj.bbh_data = dkp; 1378 dkp->dk_bbh_obj.bbh_ops = &cmdk_bbh_ops; 1379 1380 /* create linkage to dadk */ 1381 dkp->dk_tgobjp = (opaque_t)dadk_create(); 1382 1383 devp = ddi_get_driver_private(dip); 1384 (void) dadk_init(DKTP_DATA, devp, flcobjp, queobjp, &dkp->dk_bbh_obj, 1385 NULL); 1386 1387 return (DDI_SUCCESS); 1388 } 1389 1390 static void 1391 cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp) 1392 { 1393 char que_keyvalp[64]; 1394 int que_keylen; 1395 char flc_keyvalp[64]; 1396 int flc_keylen; 1397 1398 ASSERT(mutex_owned(&dkp->dk_mutex)); 1399 1400 (void) dadk_free((dkp->dk_tgobjp)); 1401 dkp->dk_tgobjp = NULL; 1402 1403 que_keylen = sizeof (que_keyvalp); 1404 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1405 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) != 1406 DDI_PROP_SUCCESS) { 1407 cmn_err(CE_WARN, "cmdk_destroy_obj: queue property undefined"); 1408 return; 1409 } 1410 que_keyvalp[que_keylen] = (char)0; 1411 1412 flc_keylen = sizeof (flc_keyvalp); 1413 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1414 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) != 1415 DDI_PROP_SUCCESS) { 1416 cmn_err(CE_WARN, 1417 "cmdk_destroy_obj: flow-control property undefined"); 1418 return; 1419 } 1420 flc_keyvalp[flc_keylen] = (char)0; 1421 } 1422 /*ARGSUSED5*/ 1423 static int 1424 cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr, 1425 diskaddr_t start, size_t count, void *tg_cookie) 1426 { 1427 struct cmdk *dkp; 1428 opaque_t handle; 1429 int rc = 0; 1430 char *bufa; 1431 1432 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 1433 if (dkp == NULL) 1434 return (ENXIO); 1435 1436 if (cmd != TG_READ && cmd != TG_WRITE) 1437 return (EINVAL); 1438 1439 /* count must be multiple of 512 */ 1440 count = (count + NBPSCTR - 1) & -NBPSCTR; 1441 handle = dadk_iob_alloc(DKTP_DATA, start, count, KM_SLEEP); 1442 if (!handle) 1443 return (ENOMEM); 1444 1445 if (cmd == TG_READ) { 1446 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1447 if (!bufa) 1448 rc = EIO; 1449 else 1450 bcopy(bufa, bufaddr, count); 1451 } else { 1452 bufa = dadk_iob_htoc(DKTP_DATA, handle); 1453 bcopy(bufaddr, bufa, count); 1454 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_WRITE); 1455 if (!bufa) 1456 rc = EIO; 1457 } 1458 (void) dadk_iob_free(DKTP_DATA, handle); 1459 1460 return (rc); 1461 } 1462 1463 /*ARGSUSED3*/ 1464 static int 1465 cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg, void *tg_cookie) 1466 { 1467 1468 struct cmdk *dkp; 1469 struct tgdk_geom phyg; 1470 1471 1472 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 1473 if (dkp == NULL) 1474 return (ENXIO); 1475 1476 switch (cmd) { 1477 case TG_GETPHYGEOM: { 1478 cmlb_geom_t *phygeomp = (cmlb_geom_t *)arg; 1479 1480 /* dadk_getphygeom always returns success */ 1481 (void) dadk_getphygeom(DKTP_DATA, &phyg); 1482 1483 phygeomp->g_capacity = phyg.g_cap; 1484 phygeomp->g_nsect = phyg.g_sec; 1485 phygeomp->g_nhead = phyg.g_head; 1486 phygeomp->g_acyl = phyg.g_acyl; 1487 phygeomp->g_ncyl = phyg.g_cyl; 1488 phygeomp->g_secsize = phyg.g_secsiz; 1489 phygeomp->g_intrlv = 1; 1490 phygeomp->g_rpm = 3600; 1491 1492 return (0); 1493 } 1494 1495 case TG_GETVIRTGEOM: { 1496 cmlb_geom_t *virtgeomp = (cmlb_geom_t *)arg; 1497 diskaddr_t capacity; 1498 1499 (void) dadk_getgeom(DKTP_DATA, &phyg); 1500 capacity = phyg.g_cap; 1501 1502 /* 1503 * If the controller returned us something that doesn't 1504 * really fit into an Int 13/function 8 geometry 1505 * result, just fail the ioctl. See PSARC 1998/313. 1506 */ 1507 if (capacity < 0 || capacity >= 63 * 254 * 1024) 1508 return (EINVAL); 1509 1510 virtgeomp->g_capacity = capacity; 1511 virtgeomp->g_nsect = 63; 1512 virtgeomp->g_nhead = 254; 1513 virtgeomp->g_ncyl = capacity / (63 * 254); 1514 virtgeomp->g_acyl = 0; 1515 virtgeomp->g_secsize = 512; 1516 virtgeomp->g_intrlv = 1; 1517 virtgeomp->g_rpm = 3600; 1518 1519 return (0); 1520 } 1521 1522 case TG_GETCAPACITY: 1523 case TG_GETBLOCKSIZE: 1524 { 1525 1526 /* dadk_getphygeom always returns success */ 1527 (void) dadk_getphygeom(DKTP_DATA, &phyg); 1528 if (cmd == TG_GETCAPACITY) 1529 *(diskaddr_t *)arg = phyg.g_cap; 1530 else 1531 *(uint32_t *)arg = (uint32_t)phyg.g_secsiz; 1532 1533 return (0); 1534 } 1535 1536 case TG_GETATTR: { 1537 tg_attribute_t *tgattribute = (tg_attribute_t *)arg; 1538 if ((DKTP_EXT->tg_rdonly)) 1539 tgattribute->media_is_writable = FALSE; 1540 else 1541 tgattribute->media_is_writable = TRUE; 1542 1543 return (0); 1544 } 1545 1546 default: 1547 return (ENOTTY); 1548 } 1549 } 1550 1551 1552 1553 1554 1555 /* 1556 * Create and register the devid. 1557 * There are 4 different ways we can get a device id: 1558 * 1. Already have one - nothing to do 1559 * 2. Build one from the drive's model and serial numbers 1560 * 3. Read one from the disk (first sector of last track) 1561 * 4. Fabricate one and write it on the disk. 1562 * If any of these succeeds, register the deviceid 1563 */ 1564 static void 1565 cmdk_devid_setup(struct cmdk *dkp) 1566 { 1567 int rc; 1568 1569 /* Try options until one succeeds, or all have failed */ 1570 1571 /* 1. All done if already registered */ 1572 if (dkp->dk_devid != NULL) 1573 return; 1574 1575 /* 2. Build a devid from the model and serial number */ 1576 rc = cmdk_devid_modser(dkp); 1577 if (rc != DDI_SUCCESS) { 1578 /* 3. Read devid from the disk, if present */ 1579 rc = cmdk_devid_read(dkp); 1580 1581 /* 4. otherwise make one up and write it on the disk */ 1582 if (rc != DDI_SUCCESS) 1583 rc = cmdk_devid_fabricate(dkp); 1584 } 1585 1586 /* If we managed to get a devid any of the above ways, register it */ 1587 if (rc == DDI_SUCCESS) 1588 (void) ddi_devid_register(dkp->dk_dip, dkp->dk_devid); 1589 1590 } 1591 1592 /* 1593 * Build a devid from the model and serial number 1594 * Return DDI_SUCCESS or DDI_FAILURE. 1595 */ 1596 static int 1597 cmdk_devid_modser(struct cmdk *dkp) 1598 { 1599 int rc = DDI_FAILURE; 1600 char *hwid; 1601 int modlen; 1602 int serlen; 1603 1604 /* 1605 * device ID is a concatenation of model number, '=', serial number. 1606 */ 1607 hwid = kmem_alloc(CMDK_HWIDLEN, KM_SLEEP); 1608 modlen = cmdk_get_modser(dkp, DIOCTL_GETMODEL, hwid, CMDK_HWIDLEN); 1609 if (modlen == 0) { 1610 rc = DDI_FAILURE; 1611 goto err; 1612 } 1613 hwid[modlen++] = '='; 1614 serlen = cmdk_get_modser(dkp, DIOCTL_GETSERIAL, 1615 hwid + modlen, CMDK_HWIDLEN - modlen); 1616 if (serlen == 0) { 1617 rc = DDI_FAILURE; 1618 goto err; 1619 } 1620 hwid[modlen + serlen] = 0; 1621 1622 /* Initialize the device ID, trailing NULL not included */ 1623 rc = ddi_devid_init(dkp->dk_dip, DEVID_ATA_SERIAL, modlen + serlen, 1624 hwid, (ddi_devid_t *)&dkp->dk_devid); 1625 if (rc != DDI_SUCCESS) { 1626 rc = DDI_FAILURE; 1627 goto err; 1628 } 1629 1630 rc = DDI_SUCCESS; 1631 1632 err: 1633 kmem_free(hwid, CMDK_HWIDLEN); 1634 return (rc); 1635 } 1636 1637 static int 1638 cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len) 1639 { 1640 dadk_ioc_string_t strarg; 1641 int rval; 1642 char *s; 1643 char ch; 1644 boolean_t ret; 1645 int i; 1646 int tb; 1647 1648 strarg.is_buf = buf; 1649 strarg.is_size = len; 1650 if (dadk_ioctl(DKTP_DATA, 1651 dkp->dk_dev, 1652 ioccmd, 1653 (uintptr_t)&strarg, 1654 FNATIVE | FKIOCTL, 1655 NULL, 1656 &rval) != 0) 1657 return (0); 1658 1659 /* 1660 * valid model/serial string must contain a non-zero non-space 1661 * trim trailing spaces/NULL 1662 */ 1663 ret = B_FALSE; 1664 s = buf; 1665 for (i = 0; i < strarg.is_size; i++) { 1666 ch = *s++; 1667 if (ch != ' ' && ch != '\0') 1668 tb = i + 1; 1669 if (ch != ' ' && ch != '\0' && ch != '0') 1670 ret = B_TRUE; 1671 } 1672 1673 if (ret == B_FALSE) 1674 return (0); 1675 1676 return (tb); 1677 } 1678 1679 /* 1680 * Read a devid from on the first block of the last track of 1681 * the last cylinder. Make sure what we read is a valid devid. 1682 * Return DDI_SUCCESS or DDI_FAILURE. 1683 */ 1684 static int 1685 cmdk_devid_read(struct cmdk *dkp) 1686 { 1687 diskaddr_t blk; 1688 struct dk_devid *dkdevidp; 1689 uint_t *ip; 1690 int chksum; 1691 int i, sz; 1692 tgdk_iob_handle handle; 1693 int rc = DDI_FAILURE; 1694 1695 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0)) 1696 goto err; 1697 1698 /* read the devid */ 1699 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP); 1700 if (handle == NULL) 1701 goto err; 1702 1703 dkdevidp = (struct dk_devid *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1704 if (dkdevidp == NULL) 1705 goto err; 1706 1707 /* Validate the revision */ 1708 if ((dkdevidp->dkd_rev_hi != DK_DEVID_REV_MSB) || 1709 (dkdevidp->dkd_rev_lo != DK_DEVID_REV_LSB)) 1710 goto err; 1711 1712 /* Calculate the checksum */ 1713 chksum = 0; 1714 ip = (uint_t *)dkdevidp; 1715 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++) 1716 chksum ^= ip[i]; 1717 if (DKD_GETCHKSUM(dkdevidp) != chksum) 1718 goto err; 1719 1720 /* Validate the device id */ 1721 if (ddi_devid_valid((ddi_devid_t)dkdevidp->dkd_devid) != DDI_SUCCESS) 1722 goto err; 1723 1724 /* keep a copy of the device id */ 1725 sz = ddi_devid_sizeof((ddi_devid_t)dkdevidp->dkd_devid); 1726 dkp->dk_devid = kmem_alloc(sz, KM_SLEEP); 1727 bcopy(dkdevidp->dkd_devid, dkp->dk_devid, sz); 1728 1729 rc = DDI_SUCCESS; 1730 1731 err: 1732 if (handle != NULL) 1733 (void) dadk_iob_free(DKTP_DATA, handle); 1734 return (rc); 1735 } 1736 1737 /* 1738 * Create a devid and write it on the first block of the last track of 1739 * the last cylinder. 1740 * Return DDI_SUCCESS or DDI_FAILURE. 1741 */ 1742 static int 1743 cmdk_devid_fabricate(struct cmdk *dkp) 1744 { 1745 ddi_devid_t devid = NULL; /* devid made by ddi_devid_init */ 1746 struct dk_devid *dkdevidp; /* devid struct stored on disk */ 1747 diskaddr_t blk; 1748 tgdk_iob_handle handle = NULL; 1749 uint_t *ip, chksum; 1750 int i; 1751 int rc; 1752 1753 rc = ddi_devid_init(dkp->dk_dip, DEVID_FAB, 0, NULL, &devid); 1754 if (rc != DDI_SUCCESS) 1755 goto err; 1756 1757 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0)) { 1758 /* no device id block address */ 1759 return (DDI_FAILURE); 1760 } 1761 1762 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP); 1763 if (!handle) 1764 goto err; 1765 1766 /* Locate the buffer */ 1767 dkdevidp = (struct dk_devid *)dadk_iob_htoc(DKTP_DATA, handle); 1768 1769 /* Fill in the revision */ 1770 bzero(dkdevidp, NBPSCTR); 1771 dkdevidp->dkd_rev_hi = DK_DEVID_REV_MSB; 1772 dkdevidp->dkd_rev_lo = DK_DEVID_REV_LSB; 1773 1774 /* Copy in the device id */ 1775 i = ddi_devid_sizeof(devid); 1776 if (i > DK_DEVID_SIZE) 1777 goto err; 1778 bcopy(devid, dkdevidp->dkd_devid, i); 1779 1780 /* Calculate the chksum */ 1781 chksum = 0; 1782 ip = (uint_t *)dkdevidp; 1783 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++) 1784 chksum ^= ip[i]; 1785 1786 /* Fill in the checksum */ 1787 DKD_FORMCHKSUM(chksum, dkdevidp); 1788 1789 /* write the devid */ 1790 (void) dadk_iob_xfer(DKTP_DATA, handle, B_WRITE); 1791 1792 dkp->dk_devid = devid; 1793 1794 rc = DDI_SUCCESS; 1795 1796 err: 1797 if (handle != NULL) 1798 (void) dadk_iob_free(DKTP_DATA, handle); 1799 1800 if (rc != DDI_SUCCESS && devid != NULL) 1801 ddi_devid_free(devid); 1802 1803 return (rc); 1804 } 1805 1806 static void 1807 cmdk_bbh_free_alts(struct cmdk *dkp) 1808 { 1809 if (dkp->dk_alts_hdl) { 1810 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1811 kmem_free(dkp->dk_slc_cnt, 1812 NDKMAP * (sizeof (uint32_t) + sizeof (struct alts_ent *))); 1813 dkp->dk_alts_hdl = NULL; 1814 } 1815 } 1816 1817 static void 1818 cmdk_bbh_reopen(struct cmdk *dkp) 1819 { 1820 tgdk_iob_handle handle = NULL; 1821 diskaddr_t slcb, slcn, slce; 1822 struct alts_parttbl *ap; 1823 struct alts_ent *enttblp; 1824 uint32_t altused; 1825 uint32_t altbase; 1826 uint32_t altlast; 1827 int alts; 1828 uint16_t vtoctag; 1829 int i, j; 1830 1831 /* find slice with V_ALTSCTR tag */ 1832 for (alts = 0; alts < NDKMAP; alts++) { 1833 if (cmlb_partinfo( 1834 dkp->dk_cmlbhandle, 1835 alts, 1836 &slcn, 1837 &slcb, 1838 NULL, 1839 &vtoctag, 1840 0)) { 1841 goto empty; /* no partition table exists */ 1842 } 1843 1844 if (vtoctag == V_ALTSCTR && slcn > 1) 1845 break; 1846 } 1847 if (alts >= NDKMAP) { 1848 goto empty; /* no V_ALTSCTR slice defined */ 1849 } 1850 1851 /* read in ALTS label block */ 1852 handle = dadk_iob_alloc(DKTP_DATA, slcb, NBPSCTR, KM_SLEEP); 1853 if (!handle) { 1854 goto empty; 1855 } 1856 1857 ap = (struct alts_parttbl *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1858 if (!ap || (ap->alts_sanity != ALTS_SANITY)) { 1859 goto empty; 1860 } 1861 1862 altused = ap->alts_ent_used; /* number of BB entries */ 1863 altbase = ap->alts_ent_base; /* blk offset from begin slice */ 1864 altlast = ap->alts_ent_end; /* blk offset to last block */ 1865 /* ((altused * sizeof (struct alts_ent) + NBPSCTR - 1) & ~NBPSCTR) */ 1866 1867 if (altused == 0 || 1868 altbase < 1 || 1869 altbase > altlast || 1870 altlast >= slcn) { 1871 goto empty; 1872 } 1873 (void) dadk_iob_free(DKTP_DATA, handle); 1874 1875 /* read in ALTS remapping table */ 1876 handle = dadk_iob_alloc(DKTP_DATA, 1877 slcb + altbase, 1878 (altlast - altbase + 1) << SCTRSHFT, KM_SLEEP); 1879 if (!handle) { 1880 goto empty; 1881 } 1882 1883 enttblp = (struct alts_ent *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1884 if (!enttblp) { 1885 goto empty; 1886 } 1887 1888 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER); 1889 1890 /* allocate space for dk_slc_cnt and dk_slc_ent tables */ 1891 if (dkp->dk_slc_cnt == NULL) { 1892 dkp->dk_slc_cnt = kmem_alloc(NDKMAP * 1893 (sizeof (long) + sizeof (struct alts_ent *)), KM_SLEEP); 1894 } 1895 dkp->dk_slc_ent = (struct alts_ent **)(dkp->dk_slc_cnt + NDKMAP); 1896 1897 /* free previous BB table (if any) */ 1898 if (dkp->dk_alts_hdl) { 1899 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1900 dkp->dk_alts_hdl = NULL; 1901 dkp->dk_altused = 0; 1902 } 1903 1904 /* save linkage to new BB table */ 1905 dkp->dk_alts_hdl = handle; 1906 dkp->dk_altused = altused; 1907 1908 /* 1909 * build indexes to BB table by slice 1910 * effectively we have 1911 * struct alts_ent *enttblp[altused]; 1912 * 1913 * uint32_t dk_slc_cnt[NDKMAP]; 1914 * struct alts_ent *dk_slc_ent[NDKMAP]; 1915 */ 1916 for (i = 0; i < NDKMAP; i++) { 1917 if (cmlb_partinfo( 1918 dkp->dk_cmlbhandle, 1919 i, 1920 &slcn, 1921 &slcb, 1922 NULL, 1923 NULL, 1924 0)) { 1925 goto empty1; 1926 } 1927 1928 dkp->dk_slc_cnt[i] = 0; 1929 if (slcn == 0) 1930 continue; /* slice is not allocated */ 1931 1932 /* last block in slice */ 1933 slce = slcb + slcn - 1; 1934 1935 /* find first remap entry in after beginnning of slice */ 1936 for (j = 0; j < altused; j++) { 1937 if (enttblp[j].bad_start + enttblp[j].bad_end >= slcb) 1938 break; 1939 } 1940 dkp->dk_slc_ent[i] = enttblp + j; 1941 1942 /* count remap entrys until end of slice */ 1943 for (; j < altused && enttblp[j].bad_start <= slce; j++) { 1944 dkp->dk_slc_cnt[i] += 1; 1945 } 1946 } 1947 1948 rw_exit(&dkp->dk_bbh_mutex); 1949 return; 1950 1951 empty: 1952 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER); 1953 empty1: 1954 if (handle && handle != dkp->dk_alts_hdl) 1955 (void) dadk_iob_free(DKTP_DATA, handle); 1956 1957 if (dkp->dk_alts_hdl) { 1958 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1959 dkp->dk_alts_hdl = NULL; 1960 } 1961 1962 rw_exit(&dkp->dk_bbh_mutex); 1963 } 1964 1965 /*ARGSUSED*/ 1966 static bbh_cookie_t 1967 cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle) 1968 { 1969 struct bbh_handle *hp; 1970 bbh_cookie_t ckp; 1971 1972 hp = (struct bbh_handle *)handle; 1973 ckp = hp->h_cktab + hp->h_idx; 1974 hp->h_idx++; 1975 return (ckp); 1976 } 1977 1978 /*ARGSUSED*/ 1979 static void 1980 cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle) 1981 { 1982 struct bbh_handle *hp; 1983 1984 hp = (struct bbh_handle *)handle; 1985 kmem_free(handle, (sizeof (struct bbh_handle) + 1986 (hp->h_totck * (sizeof (struct bbh_cookie))))); 1987 } 1988 1989 1990 /* 1991 * cmdk_bbh_gethandle remaps the bad sectors to alternates. 1992 * There are 7 different cases when the comparison is made 1993 * between the bad sector cluster and the disk section. 1994 * 1995 * bad sector cluster gggggggggggbbbbbbbggggggggggg 1996 * case 1: ddddd 1997 * case 2: -d----- 1998 * case 3: ddddd 1999 * case 4: dddddddddddd 2000 * case 5: ddddddd----- 2001 * case 6: ---ddddddd 2002 * case 7: ddddddd 2003 * 2004 * where: g = good sector, b = bad sector 2005 * d = sector in disk section 2006 * - = disk section may be extended to cover those disk area 2007 */ 2008 2009 static opaque_t 2010 cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp) 2011 { 2012 struct cmdk *dkp = (struct cmdk *)bbh_data; 2013 struct bbh_handle *hp; 2014 struct bbh_cookie *ckp; 2015 struct alts_ent *altp; 2016 uint32_t alts_used; 2017 uint32_t part = CMDKPART(bp->b_edev); 2018 daddr32_t lastsec; 2019 long d_count; 2020 int i; 2021 int idx; 2022 int cnt; 2023 2024 if (part >= V_NUMPAR) 2025 return (NULL); 2026 2027 /* 2028 * This if statement is atomic and it will succeed 2029 * if there are no bad blocks (almost always) 2030 * 2031 * so this if is performed outside of the rw_enter for speed 2032 * and then repeated inside the rw_enter for safety 2033 */ 2034 if (!dkp->dk_alts_hdl) { 2035 return (NULL); 2036 } 2037 2038 rw_enter(&dkp->dk_bbh_mutex, RW_READER); 2039 2040 if (dkp->dk_alts_hdl == NULL) { 2041 rw_exit(&dkp->dk_bbh_mutex); 2042 return (NULL); 2043 } 2044 2045 alts_used = dkp->dk_slc_cnt[part]; 2046 if (alts_used == 0) { 2047 rw_exit(&dkp->dk_bbh_mutex); 2048 return (NULL); 2049 } 2050 altp = dkp->dk_slc_ent[part]; 2051 2052 /* 2053 * binary search for the largest bad sector index in the alternate 2054 * entry table which overlaps or larger than the starting d_sec 2055 */ 2056 i = cmdk_bbh_bsearch(altp, alts_used, GET_BP_SEC(bp)); 2057 /* if starting sector is > the largest bad sector, return */ 2058 if (i == -1) { 2059 rw_exit(&dkp->dk_bbh_mutex); 2060 return (NULL); 2061 } 2062 /* i is the starting index. Set altp to the starting entry addr */ 2063 altp += i; 2064 2065 d_count = bp->b_bcount >> SCTRSHFT; 2066 lastsec = GET_BP_SEC(bp) + d_count - 1; 2067 2068 /* calculate the number of bad sectors */ 2069 for (idx = i, cnt = 0; idx < alts_used; idx++, altp++, cnt++) { 2070 if (lastsec < altp->bad_start) 2071 break; 2072 } 2073 2074 if (!cnt) { 2075 rw_exit(&dkp->dk_bbh_mutex); 2076 return (NULL); 2077 } 2078 2079 /* calculate the maximum number of reserved cookies */ 2080 cnt <<= 1; 2081 cnt++; 2082 2083 /* allocate the handle */ 2084 hp = (struct bbh_handle *)kmem_zalloc((sizeof (*hp) + 2085 (cnt * sizeof (*ckp))), KM_SLEEP); 2086 2087 hp->h_idx = 0; 2088 hp->h_totck = cnt; 2089 ckp = hp->h_cktab = (struct bbh_cookie *)(hp + 1); 2090 ckp[0].ck_sector = GET_BP_SEC(bp); 2091 ckp[0].ck_seclen = d_count; 2092 2093 altp = dkp->dk_slc_ent[part]; 2094 altp += i; 2095 for (idx = 0; i < alts_used; i++, altp++) { 2096 /* CASE 1: */ 2097 if (lastsec < altp->bad_start) 2098 break; 2099 2100 /* CASE 3: */ 2101 if (ckp[idx].ck_sector > altp->bad_end) 2102 continue; 2103 2104 /* CASE 2 and 7: */ 2105 if ((ckp[idx].ck_sector >= altp->bad_start) && 2106 (lastsec <= altp->bad_end)) { 2107 ckp[idx].ck_sector = altp->good_start + 2108 ckp[idx].ck_sector - altp->bad_start; 2109 break; 2110 } 2111 2112 /* at least one bad sector in our section. break it. */ 2113 /* CASE 5: */ 2114 if ((lastsec >= altp->bad_start) && 2115 (lastsec <= altp->bad_end)) { 2116 ckp[idx+1].ck_seclen = lastsec - altp->bad_start + 1; 2117 ckp[idx].ck_seclen -= ckp[idx+1].ck_seclen; 2118 ckp[idx+1].ck_sector = altp->good_start; 2119 break; 2120 } 2121 /* CASE 6: */ 2122 if ((ckp[idx].ck_sector <= altp->bad_end) && 2123 (ckp[idx].ck_sector >= altp->bad_start)) { 2124 ckp[idx+1].ck_seclen = ckp[idx].ck_seclen; 2125 ckp[idx].ck_seclen = altp->bad_end - 2126 ckp[idx].ck_sector + 1; 2127 ckp[idx+1].ck_seclen -= ckp[idx].ck_seclen; 2128 ckp[idx].ck_sector = altp->good_start + 2129 ckp[idx].ck_sector - altp->bad_start; 2130 idx++; 2131 ckp[idx].ck_sector = altp->bad_end + 1; 2132 continue; /* check rest of section */ 2133 } 2134 2135 /* CASE 4: */ 2136 ckp[idx].ck_seclen = altp->bad_start - ckp[idx].ck_sector; 2137 ckp[idx+1].ck_sector = altp->good_start; 2138 ckp[idx+1].ck_seclen = altp->bad_end - altp->bad_start + 1; 2139 idx += 2; 2140 ckp[idx].ck_sector = altp->bad_end + 1; 2141 ckp[idx].ck_seclen = lastsec - altp->bad_end; 2142 } 2143 2144 rw_exit(&dkp->dk_bbh_mutex); 2145 return ((opaque_t)hp); 2146 } 2147 2148 static int 2149 cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key) 2150 { 2151 int i; 2152 int ind; 2153 int interval; 2154 int mystatus = -1; 2155 2156 if (!cnt) 2157 return (mystatus); 2158 2159 ind = 1; /* compiler complains about possible uninitialized var */ 2160 for (i = 1; i <= cnt; i <<= 1) 2161 ind = i; 2162 2163 for (interval = ind; interval; ) { 2164 if ((key >= buf[ind-1].bad_start) && 2165 (key <= buf[ind-1].bad_end)) { 2166 return (ind-1); 2167 } else { 2168 interval >>= 1; 2169 if (key < buf[ind-1].bad_start) { 2170 /* record the largest bad sector index */ 2171 mystatus = ind-1; 2172 if (!interval) 2173 break; 2174 ind = ind - interval; 2175 } else { 2176 /* 2177 * if key is larger than the last element 2178 * then break 2179 */ 2180 if ((ind == cnt) || !interval) 2181 break; 2182 if ((ind+interval) <= cnt) 2183 ind += interval; 2184 } 2185 } 2186 } 2187 return (mystatus); 2188 } 2189