1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/scsi/scsi.h> 29 #include <sys/dktp/cm.h> 30 #include <sys/dktp/quetypes.h> 31 #include <sys/dktp/queue.h> 32 #include <sys/dktp/fctypes.h> 33 #include <sys/dktp/flowctrl.h> 34 #include <sys/dktp/cmdev.h> 35 #include <sys/dkio.h> 36 #include <sys/dktp/tgdk.h> 37 #include <sys/dktp/dadk.h> 38 #include <sys/dktp/bbh.h> 39 #include <sys/dktp/altsctr.h> 40 #include <sys/dktp/cmdk.h> 41 42 #include <sys/stat.h> 43 #include <sys/vtoc.h> 44 #include <sys/file.h> 45 #include <sys/dktp/dadkio.h> 46 #include <sys/aio_req.h> 47 48 #include <sys/cmlb.h> 49 50 /* 51 * Local Static Data 52 */ 53 #ifdef CMDK_DEBUG 54 #define DENT 0x0001 55 #define DIO 0x0002 56 57 static int cmdk_debug = DIO; 58 #endif 59 60 #ifndef TRUE 61 #define TRUE 1 62 #endif 63 64 #ifndef FALSE 65 #define FALSE 0 66 #endif 67 68 /* 69 * NDKMAP is the base number for accessing the fdisk partitions. 70 * c?d?p0 --> cmdk@?,?:q 71 */ 72 #define PARTITION0_INDEX (NDKMAP + 0) 73 74 #define DKTP_DATA (dkp->dk_tgobjp)->tg_data 75 #define DKTP_EXT (dkp->dk_tgobjp)->tg_ext 76 77 static void *cmdk_state; 78 79 /* 80 * the cmdk_attach_mutex protects cmdk_max_instance in multi-threaded 81 * attach situations 82 */ 83 static kmutex_t cmdk_attach_mutex; 84 static int cmdk_max_instance = 0; 85 86 /* 87 * Panic dumpsys state 88 * There is only a single flag that is not mutex locked since 89 * the system is prevented from thread switching and cmdk_dump 90 * will only be called in a single threaded operation. 91 */ 92 static int cmdk_indump; 93 94 /* 95 * Local Function Prototypes 96 */ 97 static int cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp); 98 static void cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp); 99 static void cmdkmin(struct buf *bp); 100 static int cmdkrw(dev_t dev, struct uio *uio, int flag); 101 static int cmdkarw(dev_t dev, struct aio_req *aio, int flag); 102 103 /* 104 * Bad Block Handling Functions Prototypes 105 */ 106 static void cmdk_bbh_reopen(struct cmdk *dkp); 107 static opaque_t cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp); 108 static bbh_cookie_t cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle); 109 static void cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle); 110 static void cmdk_bbh_close(struct cmdk *dkp); 111 static void cmdk_bbh_setalts_idx(struct cmdk *dkp); 112 static int cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key); 113 114 static struct bbh_objops cmdk_bbh_ops = { 115 nulldev, 116 nulldev, 117 cmdk_bbh_gethandle, 118 cmdk_bbh_htoc, 119 cmdk_bbh_freehandle, 120 0, 0 121 }; 122 123 static int cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp); 124 static int cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp); 125 static int cmdkstrategy(struct buf *bp); 126 static int cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 127 static int cmdkioctl(dev_t, int, intptr_t, int, cred_t *, int *); 128 static int cmdkread(dev_t dev, struct uio *uio, cred_t *credp); 129 static int cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp); 130 static int cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 131 int mod_flags, char *name, caddr_t valuep, int *lengthp); 132 static int cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp); 133 static int cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp); 134 135 /* 136 * Device driver ops vector 137 */ 138 139 static struct cb_ops cmdk_cb_ops = { 140 cmdkopen, /* open */ 141 cmdkclose, /* close */ 142 cmdkstrategy, /* strategy */ 143 nodev, /* print */ 144 cmdkdump, /* dump */ 145 cmdkread, /* read */ 146 cmdkwrite, /* write */ 147 cmdkioctl, /* ioctl */ 148 nodev, /* devmap */ 149 nodev, /* mmap */ 150 nodev, /* segmap */ 151 nochpoll, /* poll */ 152 cmdk_prop_op, /* cb_prop_op */ 153 0, /* streamtab */ 154 D_64BIT | D_MP | D_NEW, /* Driver comaptibility flag */ 155 CB_REV, /* cb_rev */ 156 cmdkaread, /* async read */ 157 cmdkawrite /* async write */ 158 }; 159 160 static int cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 161 void **result); 162 static int cmdkprobe(dev_info_t *dip); 163 static int cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd); 164 static int cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd); 165 166 struct dev_ops cmdk_ops = { 167 DEVO_REV, /* devo_rev, */ 168 0, /* refcnt */ 169 cmdkinfo, /* info */ 170 nulldev, /* identify */ 171 cmdkprobe, /* probe */ 172 cmdkattach, /* attach */ 173 cmdkdetach, /* detach */ 174 nodev, /* reset */ 175 &cmdk_cb_ops, /* driver operations */ 176 (struct bus_ops *)0 /* bus operations */ 177 }; 178 179 /* 180 * This is the loadable module wrapper. 181 */ 182 #include <sys/modctl.h> 183 184 extern struct mod_ops mod_driverops; 185 186 static struct modldrv modldrv = { 187 &mod_driverops, /* Type of module. This one is a driver */ 188 "Common Direct Access Disk %I%", 189 &cmdk_ops, /* driver ops */ 190 }; 191 192 static struct modlinkage modlinkage = { 193 MODREV_1, (void *)&modldrv, NULL 194 }; 195 196 /* Function prototypes for cmlb callbacks */ 197 198 static int cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr, 199 diskaddr_t start, size_t length, void *tg_cookie); 200 201 static int cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg, 202 void *tg_cookie); 203 204 static void cmdk_devid_setup(struct cmdk *dkp); 205 static int cmdk_devid_modser(struct cmdk *dkp); 206 static int cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len); 207 static int cmdk_devid_fabricate(struct cmdk *dkp); 208 static int cmdk_devid_read(struct cmdk *dkp); 209 210 static cmlb_tg_ops_t cmdk_lb_ops = { 211 TG_DK_OPS_VERSION_1, 212 cmdk_lb_rdwr, 213 cmdk_lb_getinfo 214 }; 215 216 int 217 _init(void) 218 { 219 int rval; 220 221 if (rval = ddi_soft_state_init(&cmdk_state, sizeof (struct cmdk), 7)) 222 return (rval); 223 224 mutex_init(&cmdk_attach_mutex, NULL, MUTEX_DRIVER, NULL); 225 if ((rval = mod_install(&modlinkage)) != 0) { 226 mutex_destroy(&cmdk_attach_mutex); 227 ddi_soft_state_fini(&cmdk_state); 228 } 229 return (rval); 230 } 231 232 int 233 _fini(void) 234 { 235 return (EBUSY); 236 237 /* 238 * This has been commented out until cmdk is a true 239 * unloadable module. Right now x86's are panicking on 240 * a diskless reconfig boot. 241 */ 242 243 #if 0 /* bugid 1186679 */ 244 int rval; 245 246 rval = mod_remove(&modlinkage); 247 if (rval != 0) 248 return (rval); 249 250 mutex_destroy(&cmdk_attach_mutex); 251 ddi_soft_state_fini(&cmdk_state); 252 253 return (0); 254 #endif 255 } 256 257 int 258 _info(struct modinfo *modinfop) 259 { 260 return (mod_info(&modlinkage, modinfop)); 261 } 262 263 /* 264 * Autoconfiguration Routines 265 */ 266 static int 267 cmdkprobe(dev_info_t *dip) 268 { 269 int instance; 270 int status; 271 struct cmdk *dkp; 272 273 instance = ddi_get_instance(dip); 274 275 if (ddi_get_soft_state(cmdk_state, instance)) 276 return (DDI_PROBE_PARTIAL); 277 278 if ((ddi_soft_state_zalloc(cmdk_state, instance) != DDI_SUCCESS) || 279 ((dkp = ddi_get_soft_state(cmdk_state, instance)) == NULL)) 280 return (DDI_PROBE_PARTIAL); 281 282 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL); 283 rw_init(&dkp->dk_bbh_mutex, NULL, RW_DRIVER, NULL); 284 dkp->dk_dip = dip; 285 mutex_enter(&dkp->dk_mutex); 286 287 dkp->dk_dev = makedevice(ddi_driver_major(dip), 288 ddi_get_instance(dip) << CMDK_UNITSHF); 289 290 /* linkage to dadk and strategy */ 291 if (cmdk_create_obj(dip, dkp) != DDI_SUCCESS) { 292 mutex_exit(&dkp->dk_mutex); 293 mutex_destroy(&dkp->dk_mutex); 294 rw_destroy(&dkp->dk_bbh_mutex); 295 ddi_soft_state_free(cmdk_state, instance); 296 return (DDI_PROBE_PARTIAL); 297 } 298 299 status = dadk_probe(DKTP_DATA, KM_NOSLEEP); 300 if (status != DDI_PROBE_SUCCESS) { 301 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */ 302 mutex_exit(&dkp->dk_mutex); 303 mutex_destroy(&dkp->dk_mutex); 304 rw_destroy(&dkp->dk_bbh_mutex); 305 ddi_soft_state_free(cmdk_state, instance); 306 return (status); 307 } 308 309 mutex_exit(&dkp->dk_mutex); 310 #ifdef CMDK_DEBUG 311 if (cmdk_debug & DENT) 312 PRF("cmdkprobe: instance= %d name= `%s`\n", 313 instance, ddi_get_name_addr(dip)); 314 #endif 315 return (status); 316 } 317 318 static int 319 cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd) 320 { 321 int instance; 322 struct cmdk *dkp; 323 char *node_type; 324 325 if (cmd != DDI_ATTACH) 326 return (DDI_FAILURE); 327 328 instance = ddi_get_instance(dip); 329 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 330 return (DDI_FAILURE); 331 332 mutex_enter(&dkp->dk_mutex); 333 334 /* dadk_attach is an empty function that only returns SUCCESS */ 335 (void) dadk_attach(DKTP_DATA); 336 337 node_type = (DKTP_EXT->tg_nodetype); 338 339 /* 340 * this open allows cmlb to read the device 341 * and determine the label types 342 * so that cmlb can create minor nodes for device 343 */ 344 345 /* open the target disk */ 346 if (dadk_open(DKTP_DATA, 0) != DDI_SUCCESS) 347 goto fail2; 348 349 /* mark as having opened target */ 350 dkp->dk_flag |= CMDK_TGDK_OPEN; 351 352 cmlb_alloc_handle((cmlb_handle_t *)&dkp->dk_cmlbhandle); 353 354 if (cmlb_attach(dip, 355 &cmdk_lb_ops, 356 DTYPE_DIRECT, /* device_type */ 357 0, /* removable */ 358 0, /* hot pluggable XXX */ 359 node_type, 360 CMLB_CREATE_ALTSLICE_VTOC_16_DTYPE_DIRECT, /* alter_behaviour */ 361 dkp->dk_cmlbhandle, 362 0) != 0) 363 goto fail1; 364 365 /* Calling validate will create minor nodes according to disk label */ 366 (void) cmlb_validate(dkp->dk_cmlbhandle, 0, 0); 367 368 /* set bbh (Bad Block Handling) */ 369 cmdk_bbh_reopen(dkp); 370 371 /* setup devid string */ 372 cmdk_devid_setup(dkp); 373 374 mutex_enter(&cmdk_attach_mutex); 375 if (instance > cmdk_max_instance) 376 cmdk_max_instance = instance; 377 mutex_exit(&cmdk_attach_mutex); 378 379 mutex_exit(&dkp->dk_mutex); 380 381 /* 382 * Add a zero-length attribute to tell the world we support 383 * kernel ioctls (for layered drivers) 384 */ 385 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 386 DDI_KERNEL_IOCTL, NULL, 0); 387 ddi_report_dev(dip); 388 389 return (DDI_SUCCESS); 390 391 fail1: 392 cmlb_free_handle(&dkp->dk_cmlbhandle); 393 (void) dadk_close(DKTP_DATA); 394 fail2: 395 cmdk_destroy_obj(dip, dkp); 396 rw_destroy(&dkp->dk_bbh_mutex); 397 mutex_exit(&dkp->dk_mutex); 398 mutex_destroy(&dkp->dk_mutex); 399 ddi_soft_state_free(cmdk_state, instance); 400 return (DDI_FAILURE); 401 } 402 403 404 static int 405 cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd) 406 { 407 struct cmdk *dkp; 408 int instance; 409 int max_instance; 410 411 if (cmd != DDI_DETACH) { 412 #ifdef CMDK_DEBUG 413 if (cmdk_debug & DIO) { 414 PRF("cmdkdetach: cmd = %d unknown\n", cmd); 415 } 416 #endif 417 return (DDI_FAILURE); 418 } 419 420 mutex_enter(&cmdk_attach_mutex); 421 max_instance = cmdk_max_instance; 422 mutex_exit(&cmdk_attach_mutex); 423 424 /* check if any instance of driver is open */ 425 for (instance = 0; instance < max_instance; instance++) { 426 dkp = ddi_get_soft_state(cmdk_state, instance); 427 if (!dkp) 428 continue; 429 if (dkp->dk_flag & CMDK_OPEN) 430 return (DDI_FAILURE); 431 } 432 433 instance = ddi_get_instance(dip); 434 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 435 return (DDI_SUCCESS); 436 437 mutex_enter(&dkp->dk_mutex); 438 439 /* 440 * The cmdk_part_info call at the end of cmdkattach may have 441 * caused cmdk_reopen to do a TGDK_OPEN, make sure we close on 442 * detach for case when cmdkopen/cmdkclose never occurs. 443 */ 444 if (dkp->dk_flag & CMDK_TGDK_OPEN) { 445 dkp->dk_flag &= ~CMDK_TGDK_OPEN; 446 (void) dadk_close(DKTP_DATA); 447 } 448 449 cmlb_detach(dkp->dk_cmlbhandle, 0); 450 cmlb_free_handle(&dkp->dk_cmlbhandle); 451 ddi_prop_remove_all(dip); 452 453 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */ 454 mutex_exit(&dkp->dk_mutex); 455 mutex_destroy(&dkp->dk_mutex); 456 rw_destroy(&dkp->dk_bbh_mutex); 457 ddi_soft_state_free(cmdk_state, instance); 458 459 return (DDI_SUCCESS); 460 } 461 462 static int 463 cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 464 { 465 dev_t dev = (dev_t)arg; 466 int instance; 467 struct cmdk *dkp; 468 469 #ifdef lint 470 dip = dip; /* no one ever uses this */ 471 #endif 472 #ifdef CMDK_DEBUG 473 if (cmdk_debug & DENT) 474 PRF("cmdkinfo: call\n"); 475 #endif 476 instance = CMDKUNIT(dev); 477 478 switch (infocmd) { 479 case DDI_INFO_DEVT2DEVINFO: 480 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 481 return (DDI_FAILURE); 482 *result = (void *) dkp->dk_dip; 483 break; 484 case DDI_INFO_DEVT2INSTANCE: 485 *result = (void *)(intptr_t)instance; 486 break; 487 default: 488 return (DDI_FAILURE); 489 } 490 return (DDI_SUCCESS); 491 } 492 493 static int 494 cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 495 char *name, caddr_t valuep, int *lengthp) 496 { 497 struct cmdk *dkp; 498 diskaddr_t p_lblksrt; 499 diskaddr_t p_lblkcnt; 500 501 #ifdef CMDK_DEBUG 502 if (cmdk_debug & DENT) 503 PRF("cmdk_prop_op: call\n"); 504 #endif 505 506 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 507 508 /* 509 * Our dynamic properties are all device specific and size oriented. 510 * Requests issued under conditions where size is valid are passed 511 * to ddi_prop_op_nblocks with the size information, otherwise the 512 * request is passed to ddi_prop_op. Size depends on valid label. 513 */ 514 if ((dev != DDI_DEV_T_ANY) && (dkp != NULL)) { 515 if (!cmlb_partinfo( 516 dkp->dk_cmlbhandle, 517 CMDKPART(dev), 518 &p_lblkcnt, 519 &p_lblksrt, 520 NULL, 521 NULL, 522 0)) 523 return (ddi_prop_op_nblocks(dev, dip, 524 prop_op, mod_flags, 525 name, valuep, lengthp, 526 (uint64_t)p_lblkcnt)); 527 } 528 529 return (ddi_prop_op(dev, dip, 530 prop_op, mod_flags, 531 name, valuep, lengthp)); 532 } 533 534 /* 535 * dump routine 536 */ 537 static int 538 cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 539 { 540 int instance; 541 struct cmdk *dkp; 542 diskaddr_t p_lblksrt; 543 diskaddr_t p_lblkcnt; 544 struct buf local; 545 struct buf *bp; 546 547 #ifdef CMDK_DEBUG 548 if (cmdk_debug & DENT) 549 PRF("cmdkdump: call\n"); 550 #endif 551 instance = CMDKUNIT(dev); 552 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || (blkno < 0)) 553 return (ENXIO); 554 555 if (cmlb_partinfo( 556 dkp->dk_cmlbhandle, 557 CMDKPART(dev), 558 &p_lblkcnt, 559 &p_lblksrt, 560 NULL, 561 NULL, 562 0)) { 563 return (ENXIO); 564 } 565 566 if ((blkno+nblk) > p_lblkcnt) 567 return (EINVAL); 568 569 cmdk_indump = 1; /* Tell disk targets we are panic dumpping */ 570 571 bp = &local; 572 bzero(bp, sizeof (*bp)); 573 bp->b_flags = B_BUSY; 574 bp->b_un.b_addr = addr; 575 bp->b_bcount = nblk << SCTRSHFT; 576 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + blkno))); 577 578 (void) dadk_dump(DKTP_DATA, bp); 579 return (bp->b_error); 580 } 581 582 /* 583 * Copy in the dadkio_rwcmd according to the user's data model. If needed, 584 * convert it for our internal use. 585 */ 586 static int 587 rwcmd_copyin(struct dadkio_rwcmd *rwcmdp, caddr_t inaddr, int flag) 588 { 589 switch (ddi_model_convert_from(flag)) { 590 case DDI_MODEL_ILP32: { 591 struct dadkio_rwcmd32 cmd32; 592 593 if (ddi_copyin(inaddr, &cmd32, 594 sizeof (struct dadkio_rwcmd32), flag)) { 595 return (EFAULT); 596 } 597 598 rwcmdp->cmd = cmd32.cmd; 599 rwcmdp->flags = cmd32.flags; 600 rwcmdp->blkaddr = (daddr_t)cmd32.blkaddr; 601 rwcmdp->buflen = cmd32.buflen; 602 rwcmdp->bufaddr = (caddr_t)(intptr_t)cmd32.bufaddr; 603 /* 604 * Note: we do not convert the 'status' field, 605 * as it should not contain valid data at this 606 * point. 607 */ 608 bzero(&rwcmdp->status, sizeof (rwcmdp->status)); 609 break; 610 } 611 case DDI_MODEL_NONE: { 612 if (ddi_copyin(inaddr, rwcmdp, 613 sizeof (struct dadkio_rwcmd), flag)) { 614 return (EFAULT); 615 } 616 } 617 } 618 return (0); 619 } 620 621 /* 622 * If necessary, convert the internal rwcmdp and status to the appropriate 623 * data model and copy it out to the user. 624 */ 625 static int 626 rwcmd_copyout(struct dadkio_rwcmd *rwcmdp, caddr_t outaddr, int flag) 627 { 628 switch (ddi_model_convert_from(flag)) { 629 case DDI_MODEL_ILP32: { 630 struct dadkio_rwcmd32 cmd32; 631 632 cmd32.cmd = rwcmdp->cmd; 633 cmd32.flags = rwcmdp->flags; 634 cmd32.blkaddr = rwcmdp->blkaddr; 635 cmd32.buflen = rwcmdp->buflen; 636 ASSERT64(((uintptr_t)rwcmdp->bufaddr >> 32) == 0); 637 cmd32.bufaddr = (caddr32_t)(uintptr_t)rwcmdp->bufaddr; 638 639 cmd32.status.status = rwcmdp->status.status; 640 cmd32.status.resid = rwcmdp->status.resid; 641 cmd32.status.failed_blk_is_valid = 642 rwcmdp->status.failed_blk_is_valid; 643 cmd32.status.failed_blk = rwcmdp->status.failed_blk; 644 cmd32.status.fru_code_is_valid = 645 rwcmdp->status.fru_code_is_valid; 646 cmd32.status.fru_code = rwcmdp->status.fru_code; 647 648 bcopy(rwcmdp->status.add_error_info, 649 cmd32.status.add_error_info, DADKIO_ERROR_INFO_LEN); 650 651 if (ddi_copyout(&cmd32, outaddr, 652 sizeof (struct dadkio_rwcmd32), flag)) 653 return (EFAULT); 654 break; 655 } 656 case DDI_MODEL_NONE: { 657 if (ddi_copyout(rwcmdp, outaddr, 658 sizeof (struct dadkio_rwcmd), flag)) 659 return (EFAULT); 660 } 661 } 662 return (0); 663 } 664 665 /* 666 * ioctl routine 667 */ 668 static int 669 cmdkioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *credp, int *rvalp) 670 { 671 int instance; 672 struct scsi_device *devp; 673 struct cmdk *dkp; 674 char data[NBPSCTR]; 675 676 instance = CMDKUNIT(dev); 677 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 678 return (ENXIO); 679 680 bzero(data, sizeof (data)); 681 682 switch (cmd) { 683 684 case DKIOCGMEDIAINFO: { 685 struct dk_minfo media_info; 686 struct tgdk_geom phyg; 687 688 /* dadk_getphygeom always returns success */ 689 (void) dadk_getphygeom(DKTP_DATA, &phyg); 690 691 media_info.dki_lbsize = phyg.g_secsiz; 692 media_info.dki_capacity = phyg.g_cap; 693 media_info.dki_media_type = DK_FIXED_DISK; 694 695 if (ddi_copyout(&media_info, (void *)arg, 696 sizeof (struct dk_minfo), flag)) { 697 return (EFAULT); 698 } else { 699 return (0); 700 } 701 } 702 703 case DKIOCINFO: { 704 struct dk_cinfo *info = (struct dk_cinfo *)data; 705 706 /* controller information */ 707 info->dki_ctype = (DKTP_EXT->tg_ctype); 708 info->dki_cnum = ddi_get_instance(ddi_get_parent(dkp->dk_dip)); 709 (void) strcpy(info->dki_cname, 710 ddi_get_name(ddi_get_parent(dkp->dk_dip))); 711 712 /* Unit Information */ 713 info->dki_unit = ddi_get_instance(dkp->dk_dip); 714 devp = ddi_get_driver_private(dkp->dk_dip); 715 info->dki_slave = (CMDEV_TARG(devp)<<3) | CMDEV_LUN(devp); 716 (void) strcpy(info->dki_dname, ddi_driver_name(dkp->dk_dip)); 717 info->dki_flags = DKI_FMTVOL; 718 info->dki_partition = CMDKPART(dev); 719 720 info->dki_maxtransfer = maxphys / DEV_BSIZE; 721 info->dki_addr = 1; 722 info->dki_space = 0; 723 info->dki_prio = 0; 724 info->dki_vec = 0; 725 726 if (ddi_copyout(data, (void *)arg, sizeof (*info), flag)) 727 return (EFAULT); 728 else 729 return (0); 730 } 731 732 case DKIOCSTATE: { 733 int state; 734 int rval; 735 diskaddr_t p_lblksrt; 736 diskaddr_t p_lblkcnt; 737 738 if (ddi_copyin((void *)arg, &state, sizeof (int), flag)) 739 return (EFAULT); 740 741 /* dadk_check_media blocks until state changes */ 742 if (rval = dadk_check_media(DKTP_DATA, &state)) 743 return (rval); 744 745 if (state == DKIO_INSERTED) { 746 747 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0) 748 return (ENXIO); 749 750 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(dev), 751 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0)) 752 return (ENXIO); 753 754 if (p_lblkcnt <= 0) 755 return (ENXIO); 756 } 757 758 if (ddi_copyout(&state, (caddr_t)arg, sizeof (int), flag)) 759 return (EFAULT); 760 761 return (0); 762 } 763 764 /* 765 * is media removable? 766 */ 767 case DKIOCREMOVABLE: { 768 int i; 769 770 i = (DKTP_EXT->tg_rmb) ? 1 : 0; 771 772 if (ddi_copyout(&i, (caddr_t)arg, sizeof (int), flag)) 773 return (EFAULT); 774 775 return (0); 776 } 777 778 case DKIOCADDBAD: 779 /* 780 * This is not an update mechanism to add bad blocks 781 * to the bad block structures stored on disk. 782 * 783 * addbadsec(1M) will update the bad block data on disk 784 * and use this ioctl to force the driver to re-initialize 785 * the list of bad blocks in the driver. 786 */ 787 788 /* start BBH */ 789 cmdk_bbh_reopen(dkp); 790 return (0); 791 792 case DKIOCG_PHYGEOM: 793 case DKIOCG_VIRTGEOM: 794 case DKIOCGGEOM: 795 case DKIOCSGEOM: 796 case DKIOCGAPART: 797 case DKIOCSAPART: 798 case DKIOCGVTOC: 799 case DKIOCSVTOC: 800 case DKIOCPARTINFO: 801 case DKIOCGMBOOT: 802 case DKIOCSMBOOT: 803 case DKIOCGETEFI: 804 case DKIOCSETEFI: 805 case DKIOCPARTITION: 806 { 807 int rc; 808 809 rc = cmlb_ioctl(dkp->dk_cmlbhandle, dev, cmd, arg, flag, 810 credp, rvalp, 0); 811 if (cmd == DKIOCSVTOC) 812 cmdk_devid_setup(dkp); 813 return (rc); 814 } 815 816 case DIOCTL_RWCMD: { 817 struct dadkio_rwcmd *rwcmdp; 818 int status; 819 820 rwcmdp = kmem_alloc(sizeof (struct dadkio_rwcmd), KM_SLEEP); 821 822 status = rwcmd_copyin(rwcmdp, (caddr_t)arg, flag); 823 824 if (status == 0) { 825 bzero(&(rwcmdp->status), sizeof (struct dadkio_status)); 826 status = dadk_ioctl(DKTP_DATA, 827 dev, 828 cmd, 829 (uintptr_t)rwcmdp, 830 flag, 831 credp, 832 rvalp); 833 } 834 if (status == 0) 835 status = rwcmd_copyout(rwcmdp, (caddr_t)arg, flag); 836 837 kmem_free(rwcmdp, sizeof (struct dadkio_rwcmd)); 838 return (status); 839 } 840 841 default: 842 return (dadk_ioctl(DKTP_DATA, 843 dev, 844 cmd, 845 arg, 846 flag, 847 credp, 848 rvalp)); 849 } 850 } 851 852 /*ARGSUSED1*/ 853 static int 854 cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp) 855 { 856 int part; 857 ulong_t partbit; 858 int instance; 859 struct cmdk *dkp; 860 int lastclose = 1; 861 int i; 862 863 instance = CMDKUNIT(dev); 864 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || 865 (otyp >= OTYPCNT)) 866 return (ENXIO); 867 868 mutex_enter(&dkp->dk_mutex); 869 870 /* check if device has been opened */ 871 if (!(dkp->dk_flag & CMDK_OPEN)) { 872 mutex_exit(&dkp->dk_mutex); 873 return (ENXIO); 874 } 875 876 part = CMDKPART(dev); 877 partbit = 1 << part; 878 879 /* account for close */ 880 if (otyp == OTYP_LYR) { 881 if (dkp->dk_open_lyr[part]) 882 dkp->dk_open_lyr[part]--; 883 } else 884 dkp->dk_open_reg[otyp] &= ~partbit; 885 dkp->dk_open_exl &= ~partbit; 886 887 for (i = 0; i < CMDK_MAXPART; i++) 888 if (dkp->dk_open_lyr[i] != 0) { 889 lastclose = 0; 890 break; 891 } 892 893 if (lastclose) 894 for (i = 0; i < OTYPCNT; i++) 895 if (dkp->dk_open_reg[i] != 0) { 896 lastclose = 0; 897 break; 898 } 899 900 mutex_exit(&dkp->dk_mutex); 901 902 if (lastclose) 903 cmlb_invalidate(dkp->dk_cmlbhandle, 0); 904 905 return (DDI_SUCCESS); 906 } 907 908 /*ARGSUSED3*/ 909 static int 910 cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp) 911 { 912 dev_t dev = *dev_p; 913 int part; 914 ulong_t partbit; 915 int instance; 916 struct cmdk *dkp; 917 diskaddr_t p_lblksrt; 918 diskaddr_t p_lblkcnt; 919 int i; 920 int nodelay; 921 922 instance = CMDKUNIT(dev); 923 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 924 return (ENXIO); 925 926 if (otyp >= OTYPCNT) 927 return (EINVAL); 928 929 part = CMDKPART(dev); 930 partbit = 1 << part; 931 nodelay = (flag & (FNDELAY | FNONBLOCK)); 932 933 mutex_enter(&dkp->dk_mutex); 934 935 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0) { 936 937 /* fail if not doing non block open */ 938 if (!nodelay) { 939 mutex_exit(&dkp->dk_mutex); 940 return (ENXIO); 941 } 942 } else if (cmlb_partinfo(dkp->dk_cmlbhandle, part, &p_lblkcnt, 943 &p_lblksrt, NULL, NULL, 0) == 0) { 944 945 if (p_lblkcnt <= 0 && (!nodelay || otyp != OTYP_CHR)) { 946 mutex_exit(&dkp->dk_mutex); 947 return (ENXIO); 948 } 949 } else { 950 /* fail if not doing non block open */ 951 if (!nodelay) { 952 mutex_exit(&dkp->dk_mutex); 953 return (ENXIO); 954 } 955 } 956 957 if ((DKTP_EXT->tg_rdonly) && (flag & FWRITE)) { 958 mutex_exit(&dkp->dk_mutex); 959 return (EROFS); 960 } 961 962 /* check for part already opend exclusively */ 963 if (dkp->dk_open_exl & partbit) 964 goto excl_open_fail; 965 966 /* check if we can establish exclusive open */ 967 if (flag & FEXCL) { 968 if (dkp->dk_open_lyr[part]) 969 goto excl_open_fail; 970 for (i = 0; i < OTYPCNT; i++) { 971 if (dkp->dk_open_reg[i] & partbit) 972 goto excl_open_fail; 973 } 974 } 975 976 /* open will succeed, account for open */ 977 dkp->dk_flag |= CMDK_OPEN; 978 if (otyp == OTYP_LYR) 979 dkp->dk_open_lyr[part]++; 980 else 981 dkp->dk_open_reg[otyp] |= partbit; 982 if (flag & FEXCL) 983 dkp->dk_open_exl |= partbit; 984 985 mutex_exit(&dkp->dk_mutex); 986 return (DDI_SUCCESS); 987 988 excl_open_fail: 989 mutex_exit(&dkp->dk_mutex); 990 return (EBUSY); 991 } 992 993 /* 994 * read routine 995 */ 996 /*ARGSUSED2*/ 997 static int 998 cmdkread(dev_t dev, struct uio *uio, cred_t *credp) 999 { 1000 return (cmdkrw(dev, uio, B_READ)); 1001 } 1002 1003 /* 1004 * async read routine 1005 */ 1006 /*ARGSUSED2*/ 1007 static int 1008 cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp) 1009 { 1010 return (cmdkarw(dev, aio, B_READ)); 1011 } 1012 1013 /* 1014 * write routine 1015 */ 1016 /*ARGSUSED2*/ 1017 static int 1018 cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp) 1019 { 1020 return (cmdkrw(dev, uio, B_WRITE)); 1021 } 1022 1023 /* 1024 * async write routine 1025 */ 1026 /*ARGSUSED2*/ 1027 static int 1028 cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp) 1029 { 1030 return (cmdkarw(dev, aio, B_WRITE)); 1031 } 1032 1033 static void 1034 cmdkmin(struct buf *bp) 1035 { 1036 if (bp->b_bcount > DK_MAXRECSIZE) 1037 bp->b_bcount = DK_MAXRECSIZE; 1038 } 1039 1040 static int 1041 cmdkrw(dev_t dev, struct uio *uio, int flag) 1042 { 1043 return (physio(cmdkstrategy, (struct buf *)0, dev, flag, cmdkmin, uio)); 1044 } 1045 1046 static int 1047 cmdkarw(dev_t dev, struct aio_req *aio, int flag) 1048 { 1049 return (aphysio(cmdkstrategy, anocancel, dev, flag, cmdkmin, aio)); 1050 } 1051 1052 /* 1053 * strategy routine 1054 */ 1055 static int 1056 cmdkstrategy(struct buf *bp) 1057 { 1058 int instance; 1059 struct cmdk *dkp; 1060 long d_cnt; 1061 diskaddr_t p_lblksrt; 1062 diskaddr_t p_lblkcnt; 1063 1064 instance = CMDKUNIT(bp->b_edev); 1065 if (cmdk_indump || !(dkp = ddi_get_soft_state(cmdk_state, instance)) || 1066 (dkblock(bp) < 0)) { 1067 bp->b_resid = bp->b_bcount; 1068 SETBPERR(bp, ENXIO); 1069 biodone(bp); 1070 return (0); 1071 } 1072 1073 bp->b_flags &= ~(B_DONE|B_ERROR); 1074 bp->b_resid = 0; 1075 bp->av_back = NULL; 1076 1077 /* 1078 * only re-read the vtoc if necessary (force == FALSE) 1079 */ 1080 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(bp->b_edev), 1081 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0)) { 1082 SETBPERR(bp, ENXIO); 1083 } 1084 1085 if ((bp->b_bcount & (NBPSCTR-1)) || (dkblock(bp) > p_lblkcnt)) 1086 SETBPERR(bp, ENXIO); 1087 1088 if ((bp->b_flags & B_ERROR) || (dkblock(bp) == p_lblkcnt)) { 1089 bp->b_resid = bp->b_bcount; 1090 biodone(bp); 1091 return (0); 1092 } 1093 1094 d_cnt = bp->b_bcount >> SCTRSHFT; 1095 if ((dkblock(bp) + d_cnt) > p_lblkcnt) { 1096 bp->b_resid = ((dkblock(bp) + d_cnt) - p_lblkcnt) << SCTRSHFT; 1097 bp->b_bcount -= bp->b_resid; 1098 } 1099 1100 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + dkblock(bp)))); 1101 if (dadk_strategy(DKTP_DATA, bp) != DDI_SUCCESS) { 1102 bp->b_resid += bp->b_bcount; 1103 biodone(bp); 1104 } 1105 return (0); 1106 } 1107 1108 static int 1109 cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp) 1110 { 1111 struct scsi_device *devp; 1112 opaque_t queobjp = NULL; 1113 opaque_t flcobjp = NULL; 1114 char que_keyvalp[64]; 1115 int que_keylen; 1116 char flc_keyvalp[64]; 1117 int flc_keylen; 1118 1119 ASSERT(mutex_owned(&dkp->dk_mutex)); 1120 1121 /* Create linkage to queueing routines based on property */ 1122 que_keylen = sizeof (que_keyvalp); 1123 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1124 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) != 1125 DDI_PROP_SUCCESS) { 1126 cmn_err(CE_WARN, "cmdk_create_obj: queue property undefined"); 1127 return (DDI_FAILURE); 1128 } 1129 que_keyvalp[que_keylen] = (char)0; 1130 1131 if (strcmp(que_keyvalp, "qfifo") == 0) { 1132 queobjp = (opaque_t)qfifo_create(); 1133 } else if (strcmp(que_keyvalp, "qsort") == 0) { 1134 queobjp = (opaque_t)qsort_create(); 1135 } else { 1136 return (DDI_FAILURE); 1137 } 1138 1139 /* Create linkage to dequeueing routines based on property */ 1140 flc_keylen = sizeof (flc_keyvalp); 1141 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1142 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) != 1143 DDI_PROP_SUCCESS) { 1144 cmn_err(CE_WARN, 1145 "cmdk_create_obj: flow-control property undefined"); 1146 return (DDI_FAILURE); 1147 } 1148 1149 flc_keyvalp[flc_keylen] = (char)0; 1150 1151 if (strcmp(flc_keyvalp, "dsngl") == 0) { 1152 flcobjp = (opaque_t)dsngl_create(); 1153 } else if (strcmp(flc_keyvalp, "dmult") == 0) { 1154 flcobjp = (opaque_t)dmult_create(); 1155 } else { 1156 return (DDI_FAILURE); 1157 } 1158 1159 /* populate bbh_obj object stored in dkp */ 1160 dkp->dk_bbh_obj.bbh_data = dkp; 1161 dkp->dk_bbh_obj.bbh_ops = &cmdk_bbh_ops; 1162 1163 /* create linkage to dadk */ 1164 dkp->dk_tgobjp = (opaque_t)dadk_create(); 1165 1166 devp = ddi_get_driver_private(dip); 1167 (void) dadk_init(DKTP_DATA, devp, flcobjp, queobjp, &dkp->dk_bbh_obj, 1168 NULL); 1169 1170 return (DDI_SUCCESS); 1171 } 1172 1173 static void 1174 cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp) 1175 { 1176 char que_keyvalp[64]; 1177 int que_keylen; 1178 char flc_keyvalp[64]; 1179 int flc_keylen; 1180 1181 ASSERT(mutex_owned(&dkp->dk_mutex)); 1182 1183 (void) dadk_free((dkp->dk_tgobjp)); 1184 dkp->dk_tgobjp = NULL; 1185 1186 que_keylen = sizeof (que_keyvalp); 1187 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1188 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) != 1189 DDI_PROP_SUCCESS) { 1190 cmn_err(CE_WARN, "cmdk_destroy_obj: queue property undefined"); 1191 return; 1192 } 1193 que_keyvalp[que_keylen] = (char)0; 1194 1195 flc_keylen = sizeof (flc_keyvalp); 1196 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1197 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) != 1198 DDI_PROP_SUCCESS) { 1199 cmn_err(CE_WARN, 1200 "cmdk_destroy_obj: flow-control property undefined"); 1201 return; 1202 } 1203 flc_keyvalp[flc_keylen] = (char)0; 1204 } 1205 /*ARGSUSED5*/ 1206 static int 1207 cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr, 1208 diskaddr_t start, size_t count, void *tg_cookie) 1209 { 1210 struct cmdk *dkp; 1211 opaque_t handle; 1212 int rc = 0; 1213 char *bufa; 1214 1215 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 1216 if (dkp == NULL) 1217 return (ENXIO); 1218 1219 if (cmd != TG_READ && cmd != TG_WRITE) 1220 return (EINVAL); 1221 1222 /* count must be multiple of 512 */ 1223 count = (count + NBPSCTR - 1) & -NBPSCTR; 1224 handle = dadk_iob_alloc(DKTP_DATA, start, count, KM_SLEEP); 1225 if (!handle) 1226 return (ENOMEM); 1227 1228 if (cmd == TG_READ) { 1229 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1230 if (!bufa) 1231 rc = EIO; 1232 else 1233 bcopy(bufa, bufaddr, count); 1234 } else { 1235 bufa = dadk_iob_htoc(DKTP_DATA, handle); 1236 bcopy(bufaddr, bufa, count); 1237 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_WRITE); 1238 if (!bufa) 1239 rc = EIO; 1240 } 1241 (void) dadk_iob_free(DKTP_DATA, handle); 1242 1243 return (rc); 1244 } 1245 1246 /*ARGSUSED3*/ 1247 static int 1248 cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg, void *tg_cookie) 1249 { 1250 1251 struct cmdk *dkp; 1252 struct tgdk_geom phyg; 1253 1254 1255 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 1256 if (dkp == NULL) 1257 return (ENXIO); 1258 1259 switch (cmd) { 1260 case TG_GETPHYGEOM: { 1261 cmlb_geom_t *phygeomp = (cmlb_geom_t *)arg; 1262 1263 /* dadk_getphygeom always returns success */ 1264 (void) dadk_getphygeom(DKTP_DATA, &phyg); 1265 1266 phygeomp->g_capacity = phyg.g_cap; 1267 phygeomp->g_nsect = phyg.g_sec; 1268 phygeomp->g_nhead = phyg.g_head; 1269 phygeomp->g_acyl = phyg.g_acyl; 1270 phygeomp->g_ncyl = phyg.g_cyl; 1271 phygeomp->g_secsize = phyg.g_secsiz; 1272 phygeomp->g_intrlv = 1; 1273 phygeomp->g_rpm = 3600; 1274 1275 return (0); 1276 } 1277 1278 case TG_GETVIRTGEOM: { 1279 cmlb_geom_t *virtgeomp = (cmlb_geom_t *)arg; 1280 diskaddr_t capacity; 1281 1282 (void) dadk_getgeom(DKTP_DATA, &phyg); 1283 capacity = phyg.g_cap; 1284 1285 /* 1286 * If the controller returned us something that doesn't 1287 * really fit into an Int 13/function 8 geometry 1288 * result, just fail the ioctl. See PSARC 1998/313. 1289 */ 1290 if (capacity < 0 || capacity >= 63 * 254 * 1024) 1291 return (EINVAL); 1292 1293 virtgeomp->g_capacity = capacity; 1294 virtgeomp->g_nsect = 63; 1295 virtgeomp->g_nhead = 254; 1296 virtgeomp->g_ncyl = capacity / (63 * 254); 1297 virtgeomp->g_acyl = 0; 1298 virtgeomp->g_secsize = 512; 1299 virtgeomp->g_intrlv = 1; 1300 virtgeomp->g_rpm = 3600; 1301 1302 return (0); 1303 } 1304 1305 case TG_GETCAPACITY: 1306 case TG_GETBLOCKSIZE: 1307 { 1308 1309 /* dadk_getphygeom always returns success */ 1310 (void) dadk_getphygeom(DKTP_DATA, &phyg); 1311 if (cmd == TG_GETCAPACITY) 1312 *(diskaddr_t *)arg = phyg.g_cap; 1313 else 1314 *(uint32_t *)arg = (uint32_t)phyg.g_secsiz; 1315 1316 return (0); 1317 } 1318 1319 case TG_GETATTR: { 1320 tg_attribute_t *tgattribute = (tg_attribute_t *)arg; 1321 if ((DKTP_EXT->tg_rdonly)) 1322 tgattribute->media_is_writable = FALSE; 1323 else 1324 tgattribute->media_is_writable = TRUE; 1325 1326 return (0); 1327 } 1328 1329 default: 1330 return (ENOTTY); 1331 } 1332 } 1333 1334 1335 1336 1337 1338 /* 1339 * Create and register the devid. 1340 * There are 4 different ways we can get a device id: 1341 * 1. Already have one - nothing to do 1342 * 2. Build one from the drive's model and serial numbers 1343 * 3. Read one from the disk (first sector of last track) 1344 * 4. Fabricate one and write it on the disk. 1345 * If any of these succeeds, register the deviceid 1346 */ 1347 static void 1348 cmdk_devid_setup(struct cmdk *dkp) 1349 { 1350 int rc; 1351 1352 /* Try options until one succeeds, or all have failed */ 1353 1354 /* 1. All done if already registered */ 1355 if (dkp->dk_devid != NULL) 1356 return; 1357 1358 /* 2. Build a devid from the model and serial number */ 1359 rc = cmdk_devid_modser(dkp); 1360 if (rc != DDI_SUCCESS) { 1361 /* 3. Read devid from the disk, if present */ 1362 rc = cmdk_devid_read(dkp); 1363 1364 /* 4. otherwise make one up and write it on the disk */ 1365 if (rc != DDI_SUCCESS) 1366 rc = cmdk_devid_fabricate(dkp); 1367 } 1368 1369 /* If we managed to get a devid any of the above ways, register it */ 1370 if (rc == DDI_SUCCESS) 1371 (void) ddi_devid_register(dkp->dk_dip, dkp->dk_devid); 1372 1373 } 1374 1375 /* 1376 * Build a devid from the model and serial number 1377 * Return DDI_SUCCESS or DDI_FAILURE. 1378 */ 1379 static int 1380 cmdk_devid_modser(struct cmdk *dkp) 1381 { 1382 int rc = DDI_FAILURE; 1383 char *hwid; 1384 int modlen; 1385 int serlen; 1386 1387 /* 1388 * device ID is a concatenation of model number, '=', serial number. 1389 */ 1390 hwid = kmem_alloc(CMDK_HWIDLEN, KM_SLEEP); 1391 modlen = cmdk_get_modser(dkp, DIOCTL_GETMODEL, hwid, CMDK_HWIDLEN); 1392 if (modlen == 0) { 1393 rc = DDI_FAILURE; 1394 goto err; 1395 } 1396 hwid[modlen++] = '='; 1397 serlen = cmdk_get_modser(dkp, DIOCTL_GETSERIAL, 1398 hwid + modlen, CMDK_HWIDLEN - modlen); 1399 if (serlen == 0) { 1400 rc = DDI_FAILURE; 1401 goto err; 1402 } 1403 hwid[modlen + serlen] = 0; 1404 1405 /* Initialize the device ID, trailing NULL not included */ 1406 rc = ddi_devid_init(dkp->dk_dip, DEVID_ATA_SERIAL, modlen + serlen, 1407 hwid, (ddi_devid_t *)&dkp->dk_devid); 1408 if (rc != DDI_SUCCESS) { 1409 rc = DDI_FAILURE; 1410 goto err; 1411 } 1412 1413 rc = DDI_SUCCESS; 1414 1415 err: 1416 kmem_free(hwid, CMDK_HWIDLEN); 1417 return (rc); 1418 } 1419 1420 static int 1421 cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len) 1422 { 1423 dadk_ioc_string_t strarg; 1424 int rval; 1425 char *s; 1426 char ch; 1427 boolean_t ret; 1428 int i; 1429 int tb; 1430 1431 strarg.is_buf = buf; 1432 strarg.is_size = len; 1433 if (dadk_ioctl(DKTP_DATA, 1434 dkp->dk_dev, 1435 ioccmd, 1436 (uintptr_t)&strarg, 1437 FNATIVE | FKIOCTL, 1438 NULL, 1439 &rval) != 0) 1440 return (0); 1441 1442 /* 1443 * valid model/serial string must contain a non-zero non-space 1444 * trim trailing spaces/NULL 1445 */ 1446 ret = B_FALSE; 1447 s = buf; 1448 for (i = 0; i < strarg.is_size; i++) { 1449 ch = *s++; 1450 if (ch != ' ' && ch != '\0') 1451 tb = i + 1; 1452 if (ch != ' ' && ch != '\0' && ch != '0') 1453 ret = B_TRUE; 1454 } 1455 1456 if (ret == B_FALSE) 1457 return (0); 1458 1459 return (tb); 1460 } 1461 1462 /* 1463 * Read a devid from on the first block of the last track of 1464 * the last cylinder. Make sure what we read is a valid devid. 1465 * Return DDI_SUCCESS or DDI_FAILURE. 1466 */ 1467 static int 1468 cmdk_devid_read(struct cmdk *dkp) 1469 { 1470 diskaddr_t blk; 1471 struct dk_devid *dkdevidp; 1472 uint_t *ip; 1473 int chksum; 1474 int i, sz; 1475 tgdk_iob_handle handle; 1476 int rc = DDI_FAILURE; 1477 1478 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0)) 1479 goto err; 1480 1481 /* read the devid */ 1482 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP); 1483 if (handle == NULL) 1484 goto err; 1485 1486 dkdevidp = (struct dk_devid *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1487 if (dkdevidp == NULL) 1488 goto err; 1489 1490 /* Validate the revision */ 1491 if ((dkdevidp->dkd_rev_hi != DK_DEVID_REV_MSB) || 1492 (dkdevidp->dkd_rev_lo != DK_DEVID_REV_LSB)) 1493 goto err; 1494 1495 /* Calculate the checksum */ 1496 chksum = 0; 1497 ip = (uint_t *)dkdevidp; 1498 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++) 1499 chksum ^= ip[i]; 1500 if (DKD_GETCHKSUM(dkdevidp) != chksum) 1501 goto err; 1502 1503 /* Validate the device id */ 1504 if (ddi_devid_valid((ddi_devid_t)dkdevidp->dkd_devid) != DDI_SUCCESS) 1505 goto err; 1506 1507 /* keep a copy of the device id */ 1508 sz = ddi_devid_sizeof((ddi_devid_t)dkdevidp->dkd_devid); 1509 dkp->dk_devid = kmem_alloc(sz, KM_SLEEP); 1510 bcopy(dkdevidp->dkd_devid, dkp->dk_devid, sz); 1511 1512 rc = DDI_SUCCESS; 1513 1514 err: 1515 if (handle != NULL) 1516 (void) dadk_iob_free(DKTP_DATA, handle); 1517 return (rc); 1518 } 1519 1520 /* 1521 * Create a devid and write it on the first block of the last track of 1522 * the last cylinder. 1523 * Return DDI_SUCCESS or DDI_FAILURE. 1524 */ 1525 static int 1526 cmdk_devid_fabricate(struct cmdk *dkp) 1527 { 1528 ddi_devid_t devid = NULL; /* devid made by ddi_devid_init */ 1529 struct dk_devid *dkdevidp; /* devid struct stored on disk */ 1530 diskaddr_t blk; 1531 tgdk_iob_handle handle = NULL; 1532 uint_t *ip, chksum; 1533 int i; 1534 int rc; 1535 1536 rc = ddi_devid_init(dkp->dk_dip, DEVID_FAB, 0, NULL, &devid); 1537 if (rc != DDI_SUCCESS) 1538 goto err; 1539 1540 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0)) { 1541 /* no device id block address */ 1542 return (DDI_FAILURE); 1543 } 1544 1545 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP); 1546 if (!handle) 1547 goto err; 1548 1549 /* Locate the buffer */ 1550 dkdevidp = (struct dk_devid *)dadk_iob_htoc(DKTP_DATA, handle); 1551 1552 /* Fill in the revision */ 1553 bzero(dkdevidp, NBPSCTR); 1554 dkdevidp->dkd_rev_hi = DK_DEVID_REV_MSB; 1555 dkdevidp->dkd_rev_lo = DK_DEVID_REV_LSB; 1556 1557 /* Copy in the device id */ 1558 i = ddi_devid_sizeof(devid); 1559 if (i > DK_DEVID_SIZE) 1560 goto err; 1561 bcopy(devid, dkdevidp->dkd_devid, i); 1562 1563 /* Calculate the chksum */ 1564 chksum = 0; 1565 ip = (uint_t *)dkdevidp; 1566 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++) 1567 chksum ^= ip[i]; 1568 1569 /* Fill in the checksum */ 1570 DKD_FORMCHKSUM(chksum, dkdevidp); 1571 1572 /* write the devid */ 1573 (void) dadk_iob_xfer(DKTP_DATA, handle, B_WRITE); 1574 1575 dkp->dk_devid = devid; 1576 1577 rc = DDI_SUCCESS; 1578 1579 err: 1580 if (handle != NULL) 1581 (void) dadk_iob_free(DKTP_DATA, handle); 1582 1583 if (rc != DDI_SUCCESS && devid != NULL) 1584 ddi_devid_free(devid); 1585 1586 return (rc); 1587 } 1588 1589 static void 1590 cmdk_bbh_free_alts(struct cmdk *dkp) 1591 { 1592 if (dkp->dk_alts_hdl) { 1593 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1594 kmem_free(dkp->dk_slc_cnt, 1595 NDKMAP * (sizeof (uint32_t) + sizeof (struct alts_ent *))); 1596 dkp->dk_alts_hdl = NULL; 1597 } 1598 } 1599 1600 static void 1601 cmdk_bbh_reopen(struct cmdk *dkp) 1602 { 1603 tgdk_iob_handle handle = NULL; 1604 diskaddr_t slcb, slcn, slce; 1605 struct alts_parttbl *ap; 1606 struct alts_ent *enttblp; 1607 uint32_t altused; 1608 uint32_t altbase; 1609 uint32_t altlast; 1610 int alts; 1611 uint16_t vtoctag; 1612 int i, j; 1613 1614 /* find slice with V_ALTSCTR tag */ 1615 for (alts = 0; alts < NDKMAP; alts++) { 1616 if (cmlb_partinfo( 1617 dkp->dk_cmlbhandle, 1618 alts, 1619 &slcn, 1620 &slcb, 1621 NULL, 1622 &vtoctag, 1623 0)) { 1624 goto empty; /* no partition table exists */ 1625 } 1626 1627 if (vtoctag == V_ALTSCTR && slcn > 1) 1628 break; 1629 } 1630 if (alts >= NDKMAP) { 1631 goto empty; /* no V_ALTSCTR slice defined */ 1632 } 1633 1634 /* read in ALTS label block */ 1635 handle = dadk_iob_alloc(DKTP_DATA, slcb, NBPSCTR, KM_SLEEP); 1636 if (!handle) { 1637 goto empty; 1638 } 1639 1640 ap = (struct alts_parttbl *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1641 if (!ap || (ap->alts_sanity != ALTS_SANITY)) { 1642 goto empty; 1643 } 1644 1645 altused = ap->alts_ent_used; /* number of BB entries */ 1646 altbase = ap->alts_ent_base; /* blk offset from begin slice */ 1647 altlast = ap->alts_ent_end; /* blk offset to last block */ 1648 /* ((altused * sizeof (struct alts_ent) + NBPSCTR - 1) & ~NBPSCTR) */ 1649 1650 if (altused == 0 || 1651 altbase < 1 || 1652 altbase > altlast || 1653 altlast >= slcn) { 1654 goto empty; 1655 } 1656 (void) dadk_iob_free(DKTP_DATA, handle); 1657 1658 /* read in ALTS remapping table */ 1659 handle = dadk_iob_alloc(DKTP_DATA, 1660 slcb + altbase, 1661 (altlast - altbase + 1) << SCTRSHFT, KM_SLEEP); 1662 if (!handle) { 1663 goto empty; 1664 } 1665 1666 enttblp = (struct alts_ent *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1667 if (!enttblp) { 1668 goto empty; 1669 } 1670 1671 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER); 1672 1673 /* allocate space for dk_slc_cnt and dk_slc_ent tables */ 1674 if (dkp->dk_slc_cnt == NULL) { 1675 dkp->dk_slc_cnt = kmem_alloc(NDKMAP * 1676 (sizeof (long) + sizeof (struct alts_ent *)), KM_SLEEP); 1677 } 1678 dkp->dk_slc_ent = (struct alts_ent **)(dkp->dk_slc_cnt + NDKMAP); 1679 1680 /* free previous BB table (if any) */ 1681 if (dkp->dk_alts_hdl) { 1682 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1683 dkp->dk_alts_hdl = NULL; 1684 dkp->dk_altused = 0; 1685 } 1686 1687 /* save linkage to new BB table */ 1688 dkp->dk_alts_hdl = handle; 1689 dkp->dk_altused = altused; 1690 1691 /* 1692 * build indexes to BB table by slice 1693 * effectively we have 1694 * struct alts_ent *enttblp[altused]; 1695 * 1696 * uint32_t dk_slc_cnt[NDKMAP]; 1697 * struct alts_ent *dk_slc_ent[NDKMAP]; 1698 */ 1699 for (i = 0; i < NDKMAP; i++) { 1700 if (cmlb_partinfo( 1701 dkp->dk_cmlbhandle, 1702 i, 1703 &slcn, 1704 &slcb, 1705 NULL, 1706 NULL, 1707 0)) { 1708 goto empty1; 1709 } 1710 1711 dkp->dk_slc_cnt[i] = 0; 1712 if (slcn == 0) 1713 continue; /* slice is not allocated */ 1714 1715 /* last block in slice */ 1716 slce = slcb + slcn - 1; 1717 1718 /* find first remap entry in after beginnning of slice */ 1719 for (j = 0; j < altused; j++) { 1720 if (enttblp[j].bad_start + enttblp[j].bad_end >= slcb) 1721 break; 1722 } 1723 dkp->dk_slc_ent[i] = enttblp + j; 1724 1725 /* count remap entrys until end of slice */ 1726 for (; j < altused && enttblp[j].bad_start <= slce; j++) { 1727 dkp->dk_slc_cnt[i] += 1; 1728 } 1729 } 1730 1731 rw_exit(&dkp->dk_bbh_mutex); 1732 return; 1733 1734 empty: 1735 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER); 1736 empty1: 1737 if (handle && handle != dkp->dk_alts_hdl) 1738 (void) dadk_iob_free(DKTP_DATA, handle); 1739 1740 if (dkp->dk_alts_hdl) { 1741 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1742 dkp->dk_alts_hdl = NULL; 1743 } 1744 1745 rw_exit(&dkp->dk_bbh_mutex); 1746 } 1747 1748 /*ARGSUSED*/ 1749 static bbh_cookie_t 1750 cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle) 1751 { 1752 struct bbh_handle *hp; 1753 bbh_cookie_t ckp; 1754 1755 hp = (struct bbh_handle *)handle; 1756 ckp = hp->h_cktab + hp->h_idx; 1757 hp->h_idx++; 1758 return (ckp); 1759 } 1760 1761 /*ARGSUSED*/ 1762 static void 1763 cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle) 1764 { 1765 struct bbh_handle *hp; 1766 1767 hp = (struct bbh_handle *)handle; 1768 kmem_free(handle, (sizeof (struct bbh_handle) + 1769 (hp->h_totck * (sizeof (struct bbh_cookie))))); 1770 } 1771 1772 1773 /* 1774 * cmdk_bbh_gethandle remaps the bad sectors to alternates. 1775 * There are 7 different cases when the comparison is made 1776 * between the bad sector cluster and the disk section. 1777 * 1778 * bad sector cluster gggggggggggbbbbbbbggggggggggg 1779 * case 1: ddddd 1780 * case 2: -d----- 1781 * case 3: ddddd 1782 * case 4: dddddddddddd 1783 * case 5: ddddddd----- 1784 * case 6: ---ddddddd 1785 * case 7: ddddddd 1786 * 1787 * where: g = good sector, b = bad sector 1788 * d = sector in disk section 1789 * - = disk section may be extended to cover those disk area 1790 */ 1791 1792 static opaque_t 1793 cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp) 1794 { 1795 struct cmdk *dkp = (struct cmdk *)bbh_data; 1796 struct bbh_handle *hp; 1797 struct bbh_cookie *ckp; 1798 struct alts_ent *altp; 1799 uint32_t alts_used; 1800 uint32_t part = CMDKPART(bp->b_edev); 1801 daddr32_t lastsec; 1802 long d_count; 1803 int i; 1804 int idx; 1805 int cnt; 1806 1807 if (part >= V_NUMPAR) 1808 return (NULL); 1809 1810 /* 1811 * This if statement is atomic and it will succeed 1812 * if there are no bad blocks (almost always) 1813 * 1814 * so this if is performed outside of the rw_enter for speed 1815 * and then repeated inside the rw_enter for safety 1816 */ 1817 if (!dkp->dk_alts_hdl) { 1818 return (NULL); 1819 } 1820 1821 rw_enter(&dkp->dk_bbh_mutex, RW_READER); 1822 1823 if (dkp->dk_alts_hdl == NULL) { 1824 rw_exit(&dkp->dk_bbh_mutex); 1825 return (NULL); 1826 } 1827 1828 alts_used = dkp->dk_slc_cnt[part]; 1829 if (alts_used == 0) { 1830 rw_exit(&dkp->dk_bbh_mutex); 1831 return (NULL); 1832 } 1833 altp = dkp->dk_slc_ent[part]; 1834 1835 /* 1836 * binary search for the largest bad sector index in the alternate 1837 * entry table which overlaps or larger than the starting d_sec 1838 */ 1839 i = cmdk_bbh_bsearch(altp, alts_used, GET_BP_SEC(bp)); 1840 /* if starting sector is > the largest bad sector, return */ 1841 if (i == -1) { 1842 rw_exit(&dkp->dk_bbh_mutex); 1843 return (NULL); 1844 } 1845 /* i is the starting index. Set altp to the starting entry addr */ 1846 altp += i; 1847 1848 d_count = bp->b_bcount >> SCTRSHFT; 1849 lastsec = GET_BP_SEC(bp) + d_count - 1; 1850 1851 /* calculate the number of bad sectors */ 1852 for (idx = i, cnt = 0; idx < alts_used; idx++, altp++, cnt++) { 1853 if (lastsec < altp->bad_start) 1854 break; 1855 } 1856 1857 if (!cnt) { 1858 rw_exit(&dkp->dk_bbh_mutex); 1859 return (NULL); 1860 } 1861 1862 /* calculate the maximum number of reserved cookies */ 1863 cnt <<= 1; 1864 cnt++; 1865 1866 /* allocate the handle */ 1867 hp = (struct bbh_handle *)kmem_zalloc((sizeof (*hp) + 1868 (cnt * sizeof (*ckp))), KM_SLEEP); 1869 1870 hp->h_idx = 0; 1871 hp->h_totck = cnt; 1872 ckp = hp->h_cktab = (struct bbh_cookie *)(hp + 1); 1873 ckp[0].ck_sector = GET_BP_SEC(bp); 1874 ckp[0].ck_seclen = d_count; 1875 1876 altp = dkp->dk_slc_ent[part]; 1877 altp += i; 1878 for (idx = 0; i < alts_used; i++, altp++) { 1879 /* CASE 1: */ 1880 if (lastsec < altp->bad_start) 1881 break; 1882 1883 /* CASE 3: */ 1884 if (ckp[idx].ck_sector > altp->bad_end) 1885 continue; 1886 1887 /* CASE 2 and 7: */ 1888 if ((ckp[idx].ck_sector >= altp->bad_start) && 1889 (lastsec <= altp->bad_end)) { 1890 ckp[idx].ck_sector = altp->good_start + 1891 ckp[idx].ck_sector - altp->bad_start; 1892 break; 1893 } 1894 1895 /* at least one bad sector in our section. break it. */ 1896 /* CASE 5: */ 1897 if ((lastsec >= altp->bad_start) && 1898 (lastsec <= altp->bad_end)) { 1899 ckp[idx+1].ck_seclen = lastsec - altp->bad_start + 1; 1900 ckp[idx].ck_seclen -= ckp[idx+1].ck_seclen; 1901 ckp[idx+1].ck_sector = altp->good_start; 1902 break; 1903 } 1904 /* CASE 6: */ 1905 if ((ckp[idx].ck_sector <= altp->bad_end) && 1906 (ckp[idx].ck_sector >= altp->bad_start)) { 1907 ckp[idx+1].ck_seclen = ckp[idx].ck_seclen; 1908 ckp[idx].ck_seclen = altp->bad_end - 1909 ckp[idx].ck_sector + 1; 1910 ckp[idx+1].ck_seclen -= ckp[idx].ck_seclen; 1911 ckp[idx].ck_sector = altp->good_start + 1912 ckp[idx].ck_sector - altp->bad_start; 1913 idx++; 1914 ckp[idx].ck_sector = altp->bad_end + 1; 1915 continue; /* check rest of section */ 1916 } 1917 1918 /* CASE 4: */ 1919 ckp[idx].ck_seclen = altp->bad_start - ckp[idx].ck_sector; 1920 ckp[idx+1].ck_sector = altp->good_start; 1921 ckp[idx+1].ck_seclen = altp->bad_end - altp->bad_start + 1; 1922 idx += 2; 1923 ckp[idx].ck_sector = altp->bad_end + 1; 1924 ckp[idx].ck_seclen = lastsec - altp->bad_end; 1925 } 1926 1927 rw_exit(&dkp->dk_bbh_mutex); 1928 return ((opaque_t)hp); 1929 } 1930 1931 static int 1932 cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key) 1933 { 1934 int i; 1935 int ind; 1936 int interval; 1937 int mystatus = -1; 1938 1939 if (!cnt) 1940 return (mystatus); 1941 1942 ind = 1; /* compiler complains about possible uninitialized var */ 1943 for (i = 1; i <= cnt; i <<= 1) 1944 ind = i; 1945 1946 for (interval = ind; interval; ) { 1947 if ((key >= buf[ind-1].bad_start) && 1948 (key <= buf[ind-1].bad_end)) { 1949 return (ind-1); 1950 } else { 1951 interval >>= 1; 1952 if (key < buf[ind-1].bad_start) { 1953 /* record the largest bad sector index */ 1954 mystatus = ind-1; 1955 if (!interval) 1956 break; 1957 ind = ind - interval; 1958 } else { 1959 /* 1960 * if key is larger than the last element 1961 * then break 1962 */ 1963 if ((ind == cnt) || !interval) 1964 break; 1965 if ((ind+interval) <= cnt) 1966 ind += interval; 1967 } 1968 } 1969 } 1970 return (mystatus); 1971 } 1972