1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * Layered driver support. 30 */ 31 32 #include <sys/atomic.h> 33 #include <sys/types.h> 34 #include <sys/t_lock.h> 35 #include <sys/param.h> 36 #include <sys/conf.h> 37 #include <sys/systm.h> 38 #include <sys/sysmacros.h> 39 #include <sys/buf.h> 40 #include <sys/cred.h> 41 #include <sys/uio.h> 42 #include <sys/vnode.h> 43 #include <sys/fs/snode.h> 44 #include <sys/open.h> 45 #include <sys/kmem.h> 46 #include <sys/file.h> 47 #include <sys/bootconf.h> 48 #include <sys/pathname.h> 49 #include <sys/bitmap.h> 50 #include <sys/stat.h> 51 #include <sys/dditypes.h> 52 #include <sys/ddi_impldefs.h> 53 #include <sys/ddi.h> 54 #include <sys/sunddi.h> 55 #include <sys/sunndi.h> 56 #include <sys/esunddi.h> 57 #include <sys/autoconf.h> 58 #include <sys/sunldi.h> 59 #include <sys/sunldi_impl.h> 60 #include <sys/errno.h> 61 #include <sys/debug.h> 62 #include <sys/modctl.h> 63 #include <sys/var.h> 64 #include <vm/seg_vn.h> 65 66 #include <sys/stropts.h> 67 #include <sys/strsubr.h> 68 #include <sys/socket.h> 69 #include <sys/socketvar.h> 70 #include <sys/kstr.h> 71 72 73 /* 74 * Define macros to manipulate snode, vnode, and open device flags 75 */ 76 #define VTYP_VALID(i) (((i) == VCHR) || ((i) == VBLK)) 77 #define VTYP_TO_OTYP(i) (((i) == VCHR) ? OTYP_CHR : OTYP_BLK) 78 #define VTYP_TO_STYP(i) (((i) == VCHR) ? S_IFCHR : S_IFBLK) 79 80 #define OTYP_VALID(i) (((i) == OTYP_CHR) || ((i) == OTYP_BLK)) 81 #define OTYP_TO_VTYP(i) (((i) == OTYP_CHR) ? VCHR : VBLK) 82 #define OTYP_TO_STYP(i) (((i) == OTYP_CHR) ? S_IFCHR : S_IFBLK) 83 84 #define STYP_VALID(i) (((i) == S_IFCHR) || ((i) == S_IFBLK)) 85 #define STYP_TO_VTYP(i) (((i) == S_IFCHR) ? VCHR : VBLK) 86 87 /* 88 * Define macros for accessing layered driver hash structures 89 */ 90 #define LH_HASH(vp) (handle_hash_func(vp) % LH_HASH_SZ) 91 #define LI_HASH(mid, dip, dev) (ident_hash_func(mid, dip, dev) % LI_HASH_SZ) 92 93 /* 94 * Define layered handle flags used in the lh_type field 95 */ 96 #define LH_STREAM (0x1) /* handle to a streams device */ 97 #define LH_CBDEV (0x2) /* handle to a char/block device */ 98 99 /* 100 * Define marco for devid property lookups 101 */ 102 #define DEVID_PROP_FLAGS (DDI_PROP_DONTPASS | \ 103 DDI_PROP_TYPE_STRING|DDI_PROP_CANSLEEP) 104 105 106 /* 107 * globals 108 */ 109 static kmutex_t ldi_ident_hash_lock[LI_HASH_SZ]; 110 static struct ldi_ident *ldi_ident_hash[LI_HASH_SZ]; 111 112 static kmutex_t ldi_handle_hash_lock[LH_HASH_SZ]; 113 static struct ldi_handle *ldi_handle_hash[LH_HASH_SZ]; 114 static size_t ldi_handle_hash_count; 115 116 void 117 ldi_init(void) 118 { 119 int i; 120 121 ldi_handle_hash_count = 0; 122 for (i = 0; i < LH_HASH_SZ; i++) { 123 mutex_init(&ldi_handle_hash_lock[i], NULL, MUTEX_DEFAULT, NULL); 124 ldi_handle_hash[i] = NULL; 125 } 126 for (i = 0; i < LI_HASH_SZ; i++) { 127 mutex_init(&ldi_ident_hash_lock[i], NULL, MUTEX_DEFAULT, NULL); 128 ldi_ident_hash[i] = NULL; 129 } 130 } 131 132 /* 133 * LDI ident manipulation functions 134 */ 135 static uint_t 136 ident_hash_func(modid_t modid, dev_info_t *dip, dev_t dev) 137 { 138 if (dip != NULL) { 139 uintptr_t k = (uintptr_t)dip; 140 k >>= (int)highbit(sizeof (struct dev_info)); 141 return ((uint_t)k); 142 } else if (dev != DDI_DEV_T_NONE) { 143 return (modid + getminor(dev) + getmajor(dev)); 144 } else { 145 return (modid); 146 } 147 } 148 149 static struct ldi_ident ** 150 ident_find_ref_nolock(modid_t modid, dev_info_t *dip, dev_t dev, major_t major) 151 { 152 struct ldi_ident **lipp = NULL; 153 uint_t index = LI_HASH(modid, dip, dev); 154 155 ASSERT(MUTEX_HELD(&ldi_ident_hash_lock[index])); 156 157 for (lipp = &(ldi_ident_hash[index]); 158 (*lipp != NULL); 159 lipp = &((*lipp)->li_next)) { 160 if (((*lipp)->li_modid == modid) && 161 ((*lipp)->li_major == major) && 162 ((*lipp)->li_dip == dip) && 163 ((*lipp)->li_dev == dev)) 164 break; 165 } 166 167 ASSERT(lipp != NULL); 168 return (lipp); 169 } 170 171 static struct ldi_ident * 172 ident_alloc(char *mod_name, dev_info_t *dip, dev_t dev, major_t major) 173 { 174 struct ldi_ident *lip, **lipp; 175 modid_t modid; 176 uint_t index; 177 178 ASSERT(mod_name != NULL); 179 180 /* get the module id */ 181 modid = mod_name_to_modid(mod_name); 182 ASSERT(modid != -1); 183 184 /* allocate a new ident in case we need it */ 185 lip = kmem_zalloc(sizeof (*lip), KM_SLEEP); 186 187 /* search the hash for a matching ident */ 188 index = LI_HASH(modid, dip, dev); 189 mutex_enter(&ldi_ident_hash_lock[index]); 190 lipp = ident_find_ref_nolock(modid, dip, dev, major); 191 192 if (*lipp != NULL) { 193 /* we found an indent in the hash */ 194 ASSERT(strcmp((*lipp)->li_modname, mod_name) == 0); 195 (*lipp)->li_ref++; 196 mutex_exit(&ldi_ident_hash_lock[index]); 197 kmem_free(lip, sizeof (struct ldi_ident)); 198 return (*lipp); 199 } 200 201 /* initialize the new ident */ 202 lip->li_next = NULL; 203 lip->li_ref = 1; 204 lip->li_modid = modid; 205 lip->li_major = major; 206 lip->li_dip = dip; 207 lip->li_dev = dev; 208 (void) strncpy(lip->li_modname, mod_name, sizeof (lip->li_modname) - 1); 209 210 /* add it to the ident hash */ 211 lip->li_next = ldi_ident_hash[index]; 212 ldi_ident_hash[index] = lip; 213 214 mutex_exit(&ldi_ident_hash_lock[index]); 215 return (lip); 216 } 217 218 static void 219 ident_hold(struct ldi_ident *lip) 220 { 221 uint_t index; 222 223 ASSERT(lip != NULL); 224 index = LI_HASH(lip->li_modid, lip->li_dip, lip->li_dev); 225 mutex_enter(&ldi_ident_hash_lock[index]); 226 ASSERT(lip->li_ref > 0); 227 lip->li_ref++; 228 mutex_exit(&ldi_ident_hash_lock[index]); 229 } 230 231 static void 232 ident_release(struct ldi_ident *lip) 233 { 234 struct ldi_ident **lipp; 235 uint_t index; 236 237 ASSERT(lip != NULL); 238 index = LI_HASH(lip->li_modid, lip->li_dip, lip->li_dev); 239 mutex_enter(&ldi_ident_hash_lock[index]); 240 241 ASSERT(lip->li_ref > 0); 242 if (--lip->li_ref > 0) { 243 /* there are more references to this ident */ 244 mutex_exit(&ldi_ident_hash_lock[index]); 245 return; 246 } 247 248 /* this was the last reference/open for this ident. free it. */ 249 lipp = ident_find_ref_nolock( 250 lip->li_modid, lip->li_dip, lip->li_dev, lip->li_major); 251 252 ASSERT((lipp != NULL) && (*lipp != NULL)); 253 *lipp = lip->li_next; 254 mutex_exit(&ldi_ident_hash_lock[index]); 255 kmem_free(lip, sizeof (struct ldi_ident)); 256 } 257 258 /* 259 * LDI handle manipulation functions 260 */ 261 static uint_t 262 handle_hash_func(void *vp) 263 { 264 uintptr_t k = (uintptr_t)vp; 265 k >>= (int)highbit(sizeof (vnode_t)); 266 return ((uint_t)k); 267 } 268 269 static struct ldi_handle ** 270 handle_find_ref_nolock(vnode_t *vp, struct ldi_ident *ident) 271 { 272 struct ldi_handle **lhpp = NULL; 273 uint_t index = LH_HASH(vp); 274 275 ASSERT(MUTEX_HELD(&ldi_handle_hash_lock[index])); 276 277 for (lhpp = &(ldi_handle_hash[index]); 278 (*lhpp != NULL); 279 lhpp = &((*lhpp)->lh_next)) { 280 if (((*lhpp)->lh_ident == ident) && 281 ((*lhpp)->lh_vp == vp)) 282 break; 283 } 284 285 ASSERT(lhpp != NULL); 286 return (lhpp); 287 } 288 289 static struct ldi_handle * 290 handle_find(vnode_t *vp, struct ldi_ident *ident) 291 { 292 struct ldi_handle **lhpp; 293 int index = LH_HASH(vp); 294 295 mutex_enter(&ldi_handle_hash_lock[index]); 296 lhpp = handle_find_ref_nolock(vp, ident); 297 mutex_exit(&ldi_handle_hash_lock[index]); 298 ASSERT(lhpp != NULL); 299 return (*lhpp); 300 } 301 302 static struct ldi_handle * 303 handle_alloc(vnode_t *vp, struct ldi_ident *ident) 304 { 305 struct ldi_handle *lhp, **lhpp; 306 uint_t index; 307 308 ASSERT((vp != NULL) && (ident != NULL)); 309 310 /* allocate a new handle in case we need it */ 311 lhp = kmem_zalloc(sizeof (*lhp), KM_SLEEP); 312 313 /* search the hash for a matching handle */ 314 index = LH_HASH(vp); 315 mutex_enter(&ldi_handle_hash_lock[index]); 316 lhpp = handle_find_ref_nolock(vp, ident); 317 318 if (*lhpp != NULL) { 319 /* we found a handle in the hash */ 320 (*lhpp)->lh_ref++; 321 mutex_exit(&ldi_handle_hash_lock[index]); 322 323 LDI_ALLOCFREE((CE_WARN, "ldi handle alloc: dup " 324 "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x", 325 (void *)*lhpp, (void *)ident, (void *)vp, 326 mod_major_to_name(getmajor(vp->v_rdev)), 327 getminor(vp->v_rdev))); 328 329 kmem_free(lhp, sizeof (struct ldi_handle)); 330 return (*lhpp); 331 } 332 333 /* initialize the new handle */ 334 lhp->lh_ref = 1; 335 lhp->lh_vp = vp; 336 lhp->lh_ident = ident; 337 mutex_init(lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL); 338 339 /* set the device type for this handle */ 340 lhp->lh_type = 0; 341 if (STREAMSTAB(getmajor(vp->v_rdev))) { 342 ASSERT(vp->v_type == VCHR); 343 lhp->lh_type |= LH_STREAM; 344 } else { 345 lhp->lh_type |= LH_CBDEV; 346 } 347 348 /* get holds on other objects */ 349 ident_hold(ident); 350 ASSERT(vp->v_count >= 1); 351 VN_HOLD(vp); 352 353 /* add it to the handle hash */ 354 lhp->lh_next = ldi_handle_hash[index]; 355 ldi_handle_hash[index] = lhp; 356 atomic_add_long(&ldi_handle_hash_count, 1); 357 358 LDI_ALLOCFREE((CE_WARN, "ldi handle alloc: new " 359 "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x", 360 (void *)lhp, (void *)ident, (void *)vp, 361 mod_major_to_name(getmajor(vp->v_rdev)), 362 getminor(vp->v_rdev))); 363 364 mutex_exit(&ldi_handle_hash_lock[index]); 365 return (lhp); 366 } 367 368 static void 369 handle_release(struct ldi_handle *lhp) 370 { 371 struct ldi_handle **lhpp; 372 uint_t index; 373 374 ASSERT(lhp != NULL); 375 376 index = LH_HASH(lhp->lh_vp); 377 mutex_enter(&ldi_handle_hash_lock[index]); 378 379 LDI_ALLOCFREE((CE_WARN, "ldi handle release: " 380 "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x", 381 (void *)lhp, (void *)lhp->lh_ident, (void *)lhp->lh_vp, 382 mod_major_to_name(getmajor(lhp->lh_vp->v_rdev)), 383 getminor(lhp->lh_vp->v_rdev))); 384 385 ASSERT(lhp->lh_ref > 0); 386 if (--lhp->lh_ref > 0) { 387 /* there are more references to this handle */ 388 mutex_exit(&ldi_handle_hash_lock[index]); 389 return; 390 } 391 392 /* this was the last reference/open for this handle. free it. */ 393 lhpp = handle_find_ref_nolock(lhp->lh_vp, lhp->lh_ident); 394 ASSERT((lhpp != NULL) && (*lhpp != NULL)); 395 *lhpp = lhp->lh_next; 396 atomic_add_long(&ldi_handle_hash_count, -1); 397 mutex_exit(&ldi_handle_hash_lock[index]); 398 399 VN_RELE(lhp->lh_vp); 400 ident_release(lhp->lh_ident); 401 mutex_destroy(lhp->lh_lock); 402 kmem_free(lhp, sizeof (struct ldi_handle)); 403 } 404 405 /* 406 * LDI event manipulation functions 407 */ 408 static void 409 handle_event_add(ldi_event_t *lep) 410 { 411 struct ldi_handle *lhp = lep->le_lhp; 412 413 ASSERT(lhp != NULL); 414 415 mutex_enter(lhp->lh_lock); 416 if (lhp->lh_events == NULL) { 417 lhp->lh_events = lep; 418 mutex_exit(lhp->lh_lock); 419 return; 420 } 421 422 lep->le_next = lhp->lh_events; 423 lhp->lh_events->le_prev = lep; 424 lhp->lh_events = lep; 425 mutex_exit(lhp->lh_lock); 426 } 427 428 static void 429 handle_event_remove(ldi_event_t *lep) 430 { 431 struct ldi_handle *lhp = lep->le_lhp; 432 433 ASSERT(lhp != NULL); 434 435 mutex_enter(lhp->lh_lock); 436 if (lep->le_prev) 437 lep->le_prev->le_next = lep->le_next; 438 if (lep->le_next) 439 lep->le_next->le_prev = lep->le_prev; 440 if (lhp->lh_events == lep) 441 lhp->lh_events = lep->le_next; 442 mutex_exit(lhp->lh_lock); 443 444 } 445 446 static void 447 i_ldi_callback(dev_info_t *dip, ddi_eventcookie_t event_cookie, 448 void *arg, void *bus_impldata) 449 { 450 ldi_event_t *lep = (ldi_event_t *)arg; 451 452 ASSERT(lep != NULL); 453 454 LDI_EVENTCB((CE_NOTE, "%s: dip=0x%p, " 455 "event_cookie=0x%p, ldi_eventp=0x%p", "i_ldi_callback", 456 (void *)dip, (void *)event_cookie, (void *)lep)); 457 458 lep->le_handler(lep->le_lhp, event_cookie, lep->le_arg, bus_impldata); 459 } 460 461 /* 462 * LDI open helper functions 463 */ 464 465 /* get a vnode to a device by dev_t and otyp */ 466 static int 467 ldi_vp_from_dev(dev_t dev, int otyp, vnode_t **vpp) 468 { 469 dev_info_t *dip; 470 vnode_t *vp; 471 472 /* sanity check required input parameters */ 473 if ((dev == DDI_DEV_T_NONE) || (!OTYP_VALID(otyp)) || (vpp == NULL)) 474 return (EINVAL); 475 476 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 477 return (ENODEV); 478 479 if (STREAMSTAB(getmajor(dev)) && (otyp != OTYP_CHR)) { 480 ddi_release_devi(dip); /* from e_ddi_hold_devi_by_dev */ 481 return (ENXIO); 482 } 483 484 vp = makespecvp(dev, OTYP_TO_VTYP(otyp)); 485 spec_assoc_vp_with_devi(vp, dip); 486 ddi_release_devi(dip); /* from e_ddi_hold_devi_by_dev */ 487 488 *vpp = vp; 489 return (0); 490 } 491 492 /* get a vnode to a device by pathname */ 493 static int 494 ldi_vp_from_name(char *path, vnode_t **vpp) 495 { 496 vnode_t *vp = NULL; 497 int ret; 498 499 /* sanity check required input parameters */ 500 if ((path == NULL) || (vpp == NULL)) 501 return (EINVAL); 502 503 if (modrootloaded) { 504 cred_t *saved_cred = curthread->t_cred; 505 506 /* we don't want lookupname to fail because of credentials */ 507 curthread->t_cred = kcred; 508 509 /* 510 * all lookups should be done in the global zone. but 511 * lookupnameat() won't actually do this if an absolute 512 * path is passed in. since the ldi interfaces require an 513 * absolute path we pass lookupnameat() a pointer to 514 * the character after the leading '/' and tell it to 515 * start searching at the current system root directory. 516 */ 517 ASSERT(*path == '/'); 518 ret = lookupnameat(path + 1, UIO_SYSSPACE, FOLLOW, NULLVPP, 519 &vp, rootdir); 520 521 /* restore this threads credentials */ 522 curthread->t_cred = saved_cred; 523 524 if (ret == 0) { 525 if (!vn_matchops(vp, spec_getvnodeops()) || 526 !VTYP_VALID(vp->v_type)) { 527 VN_RELE(vp); 528 return (ENXIO); 529 } 530 } 531 } 532 533 if (vp == NULL) { 534 dev_info_t *dip; 535 dev_t dev; 536 int spec_type; 537 538 /* 539 * Root is not mounted, the minor node is not specified, 540 * or an OBP path has been specified. 541 */ 542 543 /* 544 * Determine if path can be pruned to produce an 545 * OBP or devfs path for resolve_pathname. 546 */ 547 if (strncmp(path, "/devices/", 9) == 0) 548 path += strlen("/devices"); 549 550 /* 551 * if no minor node was specified the DEFAULT minor node 552 * will be returned. if there is no DEFAULT minor node 553 * one will be fabricated of type S_IFCHR with the minor 554 * number equal to the instance number. 555 */ 556 ret = resolve_pathname(path, &dip, &dev, &spec_type); 557 if (ret != 0) 558 return (ENODEV); 559 560 ASSERT(STYP_VALID(spec_type)); 561 vp = makespecvp(dev, STYP_TO_VTYP(spec_type)); 562 spec_assoc_vp_with_devi(vp, dip); 563 ddi_release_devi(dip); 564 } 565 566 *vpp = vp; 567 return (0); 568 } 569 570 static int 571 ldi_devid_match(ddi_devid_t devid, dev_info_t *dip, dev_t dev) 572 { 573 char *devidstr; 574 ddi_prop_t *propp; 575 576 /* convert devid as a string property */ 577 if ((devidstr = ddi_devid_str_encode(devid, NULL)) == NULL) 578 return (0); 579 580 /* 581 * Search for the devid. For speed and ease in locking this 582 * code directly uses the property implementation. See 583 * ddi_common_devid_to_devlist() for a comment as to why. 584 */ 585 mutex_enter(&(DEVI(dip)->devi_lock)); 586 587 /* check if there is a DDI_DEV_T_NONE devid property */ 588 propp = i_ddi_prop_search(DDI_DEV_T_NONE, 589 DEVID_PROP_NAME, DEVID_PROP_FLAGS, &DEVI(dip)->devi_hw_prop_ptr); 590 if (propp != NULL) { 591 if (ddi_devid_str_compare(propp->prop_val, devidstr) == 0) { 592 /* a DDI_DEV_T_NONE devid exists and matchs */ 593 mutex_exit(&(DEVI(dip)->devi_lock)); 594 ddi_devid_str_free(devidstr); 595 return (1); 596 } else { 597 /* a DDI_DEV_T_NONE devid exists and doesn't match */ 598 mutex_exit(&(DEVI(dip)->devi_lock)); 599 ddi_devid_str_free(devidstr); 600 return (0); 601 } 602 } 603 604 /* check if there is a devt specific devid property */ 605 propp = i_ddi_prop_search(dev, 606 DEVID_PROP_NAME, DEVID_PROP_FLAGS, &(DEVI(dip)->devi_hw_prop_ptr)); 607 if (propp != NULL) { 608 if (ddi_devid_str_compare(propp->prop_val, devidstr) == 0) { 609 /* a devt specific devid exists and matchs */ 610 mutex_exit(&(DEVI(dip)->devi_lock)); 611 ddi_devid_str_free(devidstr); 612 return (1); 613 } else { 614 /* a devt specific devid exists and doesn't match */ 615 mutex_exit(&(DEVI(dip)->devi_lock)); 616 ddi_devid_str_free(devidstr); 617 return (0); 618 } 619 } 620 621 /* we didn't find any devids associated with the device */ 622 mutex_exit(&(DEVI(dip)->devi_lock)); 623 ddi_devid_str_free(devidstr); 624 return (0); 625 } 626 627 /* get a handle to a device by devid and minor name */ 628 static int 629 ldi_vp_from_devid(ddi_devid_t devid, char *minor_name, vnode_t **vpp) 630 { 631 dev_info_t *dip; 632 vnode_t *vp; 633 int ret, i, ndevs, styp; 634 dev_t dev, *devs; 635 636 /* sanity check required input parameters */ 637 if ((devid == NULL) || (minor_name == NULL) || (vpp == NULL)) 638 return (EINVAL); 639 640 ret = ddi_lyr_devid_to_devlist(devid, minor_name, &ndevs, &devs); 641 if ((ret != DDI_SUCCESS) || (ndevs <= 0)) 642 return (ENODEV); 643 644 for (i = 0; i < ndevs; i++) { 645 dev = devs[i]; 646 647 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 648 continue; 649 650 /* 651 * now we have to verify that the devid of the disk 652 * still matches what was requested. 653 * 654 * we have to do this because the devid could have 655 * changed between the call to ddi_lyr_devid_to_devlist() 656 * and e_ddi_hold_devi_by_dev(). this is because when 657 * ddi_lyr_devid_to_devlist() returns a list of devts 658 * there is no kind of hold on those devts so a device 659 * could have been replaced out from under us in the 660 * interim. 661 */ 662 if ((i_ddi_minorname_to_devtspectype(dip, minor_name, 663 NULL, &styp) == DDI_SUCCESS) && 664 ldi_devid_match(devid, dip, dev)) 665 break; 666 667 ddi_release_devi(dip); /* from e_ddi_hold_devi_by_dev() */ 668 } 669 670 ddi_lyr_free_devlist(devs, ndevs); 671 672 if (i == ndevs) 673 return (ENODEV); 674 675 ASSERT(STYP_VALID(styp)); 676 vp = makespecvp(dev, STYP_TO_VTYP(styp)); 677 spec_assoc_vp_with_devi(vp, dip); 678 ddi_release_devi(dip); /* from e_ddi_hold_devi_by_dev */ 679 680 *vpp = vp; 681 return (0); 682 } 683 684 /* given a vnode, open a device */ 685 static int 686 ldi_open_by_vp(vnode_t **vpp, int flag, cred_t *cr, 687 ldi_handle_t *lhp, struct ldi_ident *li) 688 { 689 struct ldi_handle *nlhp; 690 vnode_t *vp; 691 int err; 692 693 ASSERT((vpp != NULL) && (*vpp != NULL)); 694 ASSERT((lhp != NULL) && (li != NULL)); 695 696 vp = *vpp; 697 /* if the vnode passed in is not a device, then bail */ 698 if (!vn_matchops(vp, spec_getvnodeops()) || !VTYP_VALID(vp->v_type)) 699 return (ENXIO); 700 701 /* 702 * the caller may have specified a node that 703 * doesn't have cb_ops defined. the ldi doesn't yet 704 * support opening devices without a valid cb_ops. 705 */ 706 if (devopsp[getmajor(vp->v_rdev)]->devo_cb_ops == NULL) 707 return (ENXIO); 708 709 /* open the device */ 710 if ((err = VOP_OPEN(&vp, flag | FKLYR, cr)) != 0) 711 return (err); 712 713 /* possible clone open, make sure that we still have a spec node */ 714 ASSERT(vn_matchops(vp, spec_getvnodeops())); 715 716 nlhp = handle_alloc(vp, li); 717 718 if (vp != *vpp) { 719 /* 720 * allocating the layered handle took a new hold on the vnode 721 * so we can release the hold that was returned by the clone 722 * open 723 */ 724 LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p", 725 "ldi clone open", (void *)nlhp)); 726 } else { 727 LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p", 728 "ldi open", (void *)nlhp)); 729 } 730 731 /* Flush back any dirty pages associated with the device. */ 732 if (nlhp->lh_type & LH_CBDEV) { 733 vnode_t *cvp = common_specvp(nlhp->lh_vp); 734 dev_t dev = cvp->v_rdev; 735 736 (void) VOP_PUTPAGE(cvp, 0, 0, B_INVAL, kcred); 737 bflush(dev); 738 } 739 740 *vpp = vp; 741 *lhp = (ldi_handle_t)nlhp; 742 return (0); 743 } 744 745 /* Call a drivers prop_op(9E) interface */ 746 static int 747 i_ldi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 748 int flags, char *name, caddr_t valuep, int *lengthp) 749 { 750 struct dev_ops *ops = NULL; 751 int res; 752 753 ASSERT((dip != NULL) && (name != NULL)); 754 ASSERT((prop_op == PROP_LEN) || (valuep != NULL)); 755 ASSERT(lengthp != NULL); 756 757 /* 758 * we can only be invoked after a driver has been opened and 759 * someone has a layered handle to it, so there had better be 760 * a valid ops vector. 761 */ 762 ops = DEVI(dip)->devi_ops; 763 ASSERT(ops && ops->devo_cb_ops); 764 765 /* 766 * Some nexus drivers incorrectly set cb_prop_op to nodev, 767 * nulldev or even NULL. 768 */ 769 if ((ops->devo_cb_ops->cb_prop_op == nodev) || 770 (ops->devo_cb_ops->cb_prop_op == nulldev) || 771 (ops->devo_cb_ops->cb_prop_op == NULL)) { 772 return (DDI_PROP_NOT_FOUND); 773 } 774 775 /* check if this is actually DDI_DEV_T_ANY query */ 776 if (flags & LDI_DEV_T_ANY) { 777 flags &= ~LDI_DEV_T_ANY; 778 dev = DDI_DEV_T_ANY; 779 } 780 781 res = cdev_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp); 782 return (res); 783 } 784 785 static void 786 i_ldi_prop_op_free(struct prop_driver_data *pdd) 787 { 788 kmem_free(pdd, pdd->pdd_size); 789 } 790 791 static caddr_t 792 i_ldi_prop_op_alloc(int prop_len) 793 { 794 struct prop_driver_data *pdd; 795 int pdd_size; 796 797 pdd_size = sizeof (struct prop_driver_data) + prop_len; 798 pdd = kmem_alloc(pdd_size, KM_SLEEP); 799 pdd->pdd_size = pdd_size; 800 pdd->pdd_prop_free = i_ldi_prop_op_free; 801 return ((caddr_t)&pdd[1]); 802 } 803 804 /* 805 * i_ldi_prop_op_typed() is a wrapper for i_ldi_prop_op that is used 806 * by the typed ldi property lookup interfaces. 807 */ 808 static int 809 i_ldi_prop_op_typed(dev_t dev, dev_info_t *dip, int flags, char *name, 810 caddr_t *datap, int *lengthp, int elem_size) 811 { 812 caddr_t prop_val; 813 int prop_len, res; 814 815 ASSERT((dip != NULL) && (name != NULL)); 816 ASSERT((datap != NULL) && (lengthp != NULL)); 817 818 /* 819 * first call the drivers prop_op() interface to allow it 820 * it to override default property values. 821 */ 822 res = i_ldi_prop_op(dev, dip, PROP_LEN, 823 flags | DDI_PROP_DYNAMIC, name, NULL, &prop_len); 824 if (res != DDI_PROP_SUCCESS) 825 return (DDI_PROP_NOT_FOUND); 826 827 /* sanity check the property length */ 828 if (prop_len == 0) { 829 /* 830 * the ddi typed interfaces don't allow a drivers to 831 * create properties with a length of 0. so we should 832 * prevent drivers from returning 0 length dynamic 833 * properties for typed property lookups. 834 */ 835 return (DDI_PROP_NOT_FOUND); 836 } 837 838 /* sanity check the property length against the element size */ 839 if (elem_size && ((prop_len % elem_size) != 0)) 840 return (DDI_PROP_NOT_FOUND); 841 842 /* 843 * got it. now allocate a prop_driver_data struct so that the 844 * user can free the property via ddi_prop_free(). 845 */ 846 prop_val = i_ldi_prop_op_alloc(prop_len); 847 848 /* lookup the property again, this time get the value */ 849 res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF, 850 flags | DDI_PROP_DYNAMIC, name, prop_val, &prop_len); 851 if (res != DDI_PROP_SUCCESS) { 852 ddi_prop_free(prop_val); 853 return (DDI_PROP_NOT_FOUND); 854 } 855 856 /* sanity check the property length */ 857 if (prop_len == 0) { 858 ddi_prop_free(prop_val); 859 return (DDI_PROP_NOT_FOUND); 860 } 861 862 /* sanity check the property length against the element size */ 863 if (elem_size && ((prop_len % elem_size) != 0)) { 864 ddi_prop_free(prop_val); 865 return (DDI_PROP_NOT_FOUND); 866 } 867 868 /* 869 * return the prop_driver_data struct and, optionally, the length 870 * of the data. 871 */ 872 *datap = prop_val; 873 *lengthp = prop_len; 874 875 return (DDI_PROP_SUCCESS); 876 } 877 878 /* 879 * i_check_string looks at a string property and makes sure its 880 * a valid null terminated string 881 */ 882 static int 883 i_check_string(char *str, int prop_len) 884 { 885 int i; 886 887 ASSERT(str != NULL); 888 889 for (i = 0; i < prop_len; i++) { 890 if (str[i] == '\0') 891 return (0); 892 } 893 return (1); 894 } 895 896 /* 897 * i_pack_string_array takes a a string array property that is represented 898 * as a concatination of strings (with the NULL character included for 899 * each string) and converts it into a format that can be returned by 900 * ldi_prop_lookup_string_array. 901 */ 902 static int 903 i_pack_string_array(char *str_concat, int prop_len, 904 char ***str_arrayp, int *nelemp) 905 { 906 int i, nelem, pack_size; 907 char **str_array, *strptr; 908 909 /* 910 * first we need to sanity check the input string array. 911 * in essence this can be done my making sure that the last 912 * character of the array passed in is null. (meaning the last 913 * string in the array is NULL terminated. 914 */ 915 if (str_concat[prop_len - 1] != '\0') 916 return (1); 917 918 /* now let's count the number of strings in the array */ 919 for (nelem = i = 0; i < prop_len; i++) 920 if (str_concat[i] == '\0') 921 nelem++; 922 ASSERT(nelem >= 1); 923 924 /* now let's allocate memory for the new packed property */ 925 pack_size = (sizeof (char *) * (nelem + 1)) + prop_len; 926 str_array = (char **)i_ldi_prop_op_alloc(pack_size); 927 928 /* let's copy the actual string data into the new property */ 929 strptr = (char *)&(str_array[nelem + 1]); 930 bcopy(str_concat, strptr, prop_len); 931 932 /* now initialize the string array pointers */ 933 for (i = 0; i < nelem; i++) { 934 str_array[i] = strptr; 935 strptr += strlen(strptr) + 1; 936 } 937 str_array[nelem] = NULL; 938 939 /* set the return values */ 940 *str_arrayp = str_array; 941 *nelemp = nelem; 942 943 return (0); 944 } 945 946 947 /* 948 * LDI Project private device usage interfaces 949 */ 950 951 /* 952 * Get a count of how many devices are currentl open by different consumers 953 */ 954 int 955 ldi_usage_count() 956 { 957 return (ldi_handle_hash_count); 958 } 959 960 static void 961 ldi_usage_walker_tgt_helper(ldi_usage_t *ldi_usage, vnode_t *vp) 962 { 963 dev_info_t *dip; 964 dev_t dev; 965 966 ASSERT(STYP_VALID(VTYP_TO_STYP(vp->v_type))); 967 968 /* get the target devt */ 969 dev = vp->v_rdev; 970 971 /* try to get the target dip */ 972 dip = VTOCS(vp)->s_dip; 973 if (dip != NULL) { 974 e_ddi_hold_devi(dip); 975 } else if (dev != DDI_DEV_T_NONE) { 976 dip = e_ddi_hold_devi_by_dev(dev, 0); 977 } 978 979 /* set the target information */ 980 ldi_usage->tgt_name = mod_major_to_name(getmajor(dev)); 981 ldi_usage->tgt_modid = mod_name_to_modid(ldi_usage->tgt_name); 982 ldi_usage->tgt_devt = dev; 983 ldi_usage->tgt_spec_type = VTYP_TO_STYP(vp->v_type); 984 ldi_usage->tgt_dip = dip; 985 } 986 987 988 static int 989 ldi_usage_walker_helper(struct ldi_ident *lip, vnode_t *vp, 990 void *arg, int (*callback)(const ldi_usage_t *, void *)) 991 { 992 ldi_usage_t ldi_usage; 993 struct devnames *dnp; 994 dev_info_t *dip; 995 major_t major; 996 dev_t dev; 997 int ret = LDI_USAGE_CONTINUE; 998 999 /* set the target device information */ 1000 ldi_usage_walker_tgt_helper(&ldi_usage, vp); 1001 1002 /* get the source devt */ 1003 dev = lip->li_dev; 1004 1005 /* try to get the source dip */ 1006 dip = lip->li_dip; 1007 if (dip != NULL) { 1008 e_ddi_hold_devi(dip); 1009 } else if (dev != DDI_DEV_T_NONE) { 1010 dip = e_ddi_hold_devi_by_dev(dev, 0); 1011 } 1012 1013 /* set the valid source information */ 1014 ldi_usage.src_modid = lip->li_modid; 1015 ldi_usage.src_name = lip->li_modname; 1016 ldi_usage.src_devt = dev; 1017 ldi_usage.src_dip = dip; 1018 1019 /* 1020 * if the source ident represents either: 1021 * 1022 * - a kernel module (and not a device or device driver) 1023 * - a device node 1024 * 1025 * then we currently have all the info we need to report the 1026 * usage information so invoke the callback function. 1027 */ 1028 if (((lip->li_major == -1) && (dev == DDI_DEV_T_NONE)) || 1029 (dip != NULL)) { 1030 ret = callback(&ldi_usage, arg); 1031 if (dip != NULL) 1032 ddi_release_devi(dip); 1033 if (ldi_usage.tgt_dip != NULL) 1034 ddi_release_devi(ldi_usage.tgt_dip); 1035 return (ret); 1036 } 1037 1038 /* 1039 * now this is kinda gross. 1040 * 1041 * what we do here is attempt to associate every device instance 1042 * of the source driver on the system with the open target driver. 1043 * we do this because we don't know which instance of the device 1044 * could potentially access the lower device so we assume that all 1045 * the instances could access it. 1046 * 1047 * there are two ways we could have gotten here: 1048 * 1049 * 1) this layered ident represents one created using only a 1050 * major number or a driver module name. this means that when 1051 * it was created we could not associate it with a particular 1052 * dev_t or device instance. 1053 * 1054 * when could this possibly happen you ask? 1055 * 1056 * a perfect example of this is streams persistent links. 1057 * when a persistant streams link is formed we can't associate 1058 * the lower device stream with any particular upper device 1059 * stream or instance. this is because any particular upper 1060 * device stream could be closed, then another could be 1061 * opened with a different dev_t and device instance, and it 1062 * would still have access to the lower linked stream. 1063 * 1064 * since any instance of the upper streams driver could 1065 * potentially access the lower stream whenever it wants, 1066 * we represent that here by associating the opened lower 1067 * device with every existing device instance of the upper 1068 * streams driver. 1069 * 1070 * 2) This case should really never happen but we'll include it 1071 * for completeness. 1072 * 1073 * it's possible that we could have gotten here because we 1074 * have a dev_t for the upper device but we couldn't find a 1075 * dip associated with that dev_t. 1076 * 1077 * the only types of devices that have dev_t without an 1078 * associated dip are unbound DLPIv2 network devices. These 1079 * types of devices exist to be able to attach a stream to any 1080 * instance of a hardware network device. since these types of 1081 * devices are usually hardware devices they should never 1082 * really have other devices open. 1083 */ 1084 if (dev != DDI_DEV_T_NONE) 1085 major = getmajor(dev); 1086 else 1087 major = lip->li_major; 1088 1089 ASSERT((major >= 0) && (major < devcnt)); 1090 1091 dnp = &devnamesp[major]; 1092 LOCK_DEV_OPS(&dnp->dn_lock); 1093 dip = dnp->dn_head; 1094 while ((dip) && (ret == LDI_USAGE_CONTINUE)) { 1095 e_ddi_hold_devi(dip); 1096 UNLOCK_DEV_OPS(&dnp->dn_lock); 1097 1098 /* set the source dip */ 1099 ldi_usage.src_dip = dip; 1100 1101 /* invoke the callback function */ 1102 ret = callback(&ldi_usage, arg); 1103 1104 LOCK_DEV_OPS(&dnp->dn_lock); 1105 ddi_release_devi(dip); 1106 dip = ddi_get_next(dip); 1107 } 1108 UNLOCK_DEV_OPS(&dnp->dn_lock); 1109 1110 /* if there was a target dip, release it */ 1111 if (ldi_usage.tgt_dip != NULL) 1112 ddi_release_devi(ldi_usage.tgt_dip); 1113 1114 return (ret); 1115 } 1116 1117 /* 1118 * ldi_usage_walker() - this walker reports LDI kernel device usage 1119 * information via the callback() callback function. the LDI keeps track 1120 * of what devices are being accessed in its own internal data structures. 1121 * this function walks those data structures to determine device usage. 1122 */ 1123 void 1124 ldi_usage_walker(void *arg, int (*callback)(const ldi_usage_t *, void *)) 1125 { 1126 struct ldi_handle *lhp; 1127 struct ldi_ident *lip; 1128 vnode_t *vp; 1129 int i; 1130 int ret = LDI_USAGE_CONTINUE; 1131 1132 for (i = 0; i < LH_HASH_SZ; i++) { 1133 mutex_enter(&ldi_handle_hash_lock[i]); 1134 1135 lhp = ldi_handle_hash[i]; 1136 while ((lhp != NULL) && (ret == LDI_USAGE_CONTINUE)) { 1137 lip = lhp->lh_ident; 1138 vp = lhp->lh_vp; 1139 1140 /* invoke the devinfo callback function */ 1141 ret = ldi_usage_walker_helper(lip, vp, arg, callback); 1142 1143 lhp = lhp->lh_next; 1144 } 1145 mutex_exit(&ldi_handle_hash_lock[i]); 1146 1147 if (ret != LDI_USAGE_CONTINUE) 1148 break; 1149 } 1150 } 1151 1152 /* 1153 * LDI Project private interfaces (streams linking interfaces) 1154 * 1155 * Streams supports a type of built in device layering via linking. 1156 * Certain types of streams drivers can be streams multiplexors. 1157 * A streams multiplexor supports the I_LINK/I_PLINK operation. 1158 * These operations allows other streams devices to be linked under the 1159 * multiplexor. By definition all streams multiplexors are devices 1160 * so this linking is a type of device layering where the multiplexor 1161 * device is layered on top of the device linked below it. 1162 */ 1163 1164 /* 1165 * ldi_mlink_lh() is invoked when streams are linked using LDI handles. 1166 * It is not used for normal I_LINKs and I_PLINKs using file descriptors. 1167 * 1168 * The streams framework keeps track of links via the file_t of the lower 1169 * stream. The LDI keeps track of devices using a vnode. In the case 1170 * of a streams link created via an LDI handle, fnk_lh() allocates 1171 * a file_t that the streams framework can use to track the linkage. 1172 */ 1173 int 1174 ldi_mlink_lh(vnode_t *vp, int cmd, intptr_t arg, cred_t *crp, int *rvalp) 1175 { 1176 struct ldi_handle *lhp = (struct ldi_handle *)arg; 1177 vnode_t *vpdown; 1178 file_t *fpdown; 1179 int err; 1180 1181 if (lhp == NULL) 1182 return (EINVAL); 1183 1184 vpdown = lhp->lh_vp; 1185 ASSERT(vn_matchops(vpdown, spec_getvnodeops())); 1186 ASSERT(cmd == _I_PLINK_LH); 1187 1188 /* 1189 * create a new lower vnode and a file_t that points to it, 1190 * streams linking requires a file_t. falloc() returns with 1191 * fpdown locked. 1192 */ 1193 VN_HOLD(vpdown); 1194 (void) falloc(vpdown, FREAD|FWRITE, &fpdown, NULL); 1195 mutex_exit(&fpdown->f_tlock); 1196 1197 /* try to establish the link */ 1198 err = mlink_file(vp, I_PLINK, fpdown, crp, rvalp, 1); 1199 1200 if (err != 0) { 1201 /* the link failed, free the file_t and release the vnode */ 1202 mutex_enter(&fpdown->f_tlock); 1203 unfalloc(fpdown); 1204 VN_RELE(vpdown); 1205 } 1206 1207 return (err); 1208 } 1209 1210 /* 1211 * ldi_mlink_fp() is invoked for all successfull streams linkages created 1212 * via I_LINK and I_PLINK. ldi_mlink_fp() records the linkage information 1213 * in its internal state so that the devinfo snapshot code has some 1214 * observability into streams device linkage information. 1215 */ 1216 void 1217 ldi_mlink_fp(struct stdata *stp, file_t *fpdown, int lhlink, int type) 1218 { 1219 vnode_t *vp = fpdown->f_vnode; 1220 struct snode *sp, *csp; 1221 ldi_ident_t li; 1222 major_t major; 1223 int ret; 1224 1225 /* if the lower stream is not a device then return */ 1226 if (!vn_matchops(vp, spec_getvnodeops())) 1227 return; 1228 1229 ASSERT(!servicing_interrupt()); 1230 1231 LDI_STREAMS_LNK((CE_NOTE, "%s: linking streams " 1232 "stp=0x%p, fpdown=0x%p", "ldi_mlink_fp", 1233 (void *)stp, (void *)fpdown)); 1234 1235 sp = VTOS(vp); 1236 csp = VTOS(sp->s_commonvp); 1237 1238 /* check if this was a plink via a layered handle */ 1239 if (lhlink) { 1240 /* 1241 * increment the common snode s_count. 1242 * 1243 * this is done because after the link operation there 1244 * are two ways that s_count can be decremented. 1245 * 1246 * when the layered handle used to create the link is 1247 * closed, spec_close() is called and it will decrement 1248 * s_count in the common snode. if we don't increment 1249 * s_count here then this could cause spec_close() to 1250 * actually close the device while it's still linked 1251 * under a multiplexer. 1252 * 1253 * also, when the lower stream is unlinked, closef() is 1254 * called for the file_t associated with this snode. 1255 * closef() will call spec_close(), which will decrement 1256 * s_count. if we dont't increment s_count here then this 1257 * could cause spec_close() to actually close the device 1258 * while there may still be valid layered handles 1259 * pointing to it. 1260 */ 1261 mutex_enter(&csp->s_lock); 1262 ASSERT(csp->s_count >= 1); 1263 csp->s_count++; 1264 mutex_exit(&csp->s_lock); 1265 1266 /* 1267 * decrement the f_count. 1268 * this is done because the layered driver framework does 1269 * not actually cache a copy of the file_t allocated to 1270 * do the link. this is done here instead of in ldi_mlink_lh() 1271 * because there is a window in ldi_mlink_lh() between where 1272 * milnk_file() returns and we would decrement the f_count 1273 * when the stream could be unlinked. 1274 */ 1275 mutex_enter(&fpdown->f_tlock); 1276 fpdown->f_count--; 1277 mutex_exit(&fpdown->f_tlock); 1278 } 1279 1280 /* 1281 * NOTE: here we rely on the streams subsystem not allowing 1282 * a stream to be multiplexed more than once. if this 1283 * changes, we break. 1284 * 1285 * mark the snode/stream as multiplexed 1286 */ 1287 mutex_enter(&sp->s_lock); 1288 ASSERT(!(sp->s_flag & SMUXED)); 1289 sp->s_flag |= SMUXED; 1290 mutex_exit(&sp->s_lock); 1291 1292 /* get a layered ident for the upper stream */ 1293 if (type == LINKNORMAL) { 1294 /* 1295 * if the link is not persistant then we can associate 1296 * the upper stream with a dev_t. this is because the 1297 * upper stream is associated with a vnode, which is 1298 * associated with a dev_t and this binding can't change 1299 * during the life of the stream. since the link isn't 1300 * persistant once the stream is destroyed the link is 1301 * destroyed. so the dev_t will be valid for the life 1302 * of the link. 1303 */ 1304 ret = ldi_ident_from_stream(getendq(stp->sd_wrq), &li); 1305 } else { 1306 /* 1307 * if the link is persistant we can only associate the 1308 * link with a driver (and not a dev_t.) this is 1309 * because subsequent opens of the upper device may result 1310 * in a different stream (and dev_t) having access to 1311 * the lower stream. 1312 * 1313 * for example, if the upper stream is closed after the 1314 * persistant link operation is compleated, a subsequent 1315 * open of the upper device will create a new stream which 1316 * may have a different dev_t and an unlink operation 1317 * can be performed using this new upper stream. 1318 */ 1319 ASSERT(type == LINKPERSIST); 1320 major = getmajor(stp->sd_vnode->v_rdev); 1321 ret = ldi_ident_from_major(major, &li); 1322 } 1323 1324 ASSERT(ret == 0); 1325 (void) handle_alloc(vp, (struct ldi_ident *)li); 1326 ldi_ident_release(li); 1327 } 1328 1329 void 1330 ldi_munlink_fp(struct stdata *stp, file_t *fpdown, int type) 1331 { 1332 struct ldi_handle *lhp; 1333 vnode_t *vp = (vnode_t *)fpdown->f_vnode; 1334 struct snode *sp; 1335 ldi_ident_t li; 1336 major_t major; 1337 int ret; 1338 1339 /* if the lower stream is not a device then return */ 1340 if (!vn_matchops(vp, spec_getvnodeops())) 1341 return; 1342 1343 ASSERT(!servicing_interrupt()); 1344 ASSERT((type == LINKNORMAL) || (type == LINKPERSIST)); 1345 1346 LDI_STREAMS_LNK((CE_NOTE, "%s: unlinking streams " 1347 "stp=0x%p, fpdown=0x%p", "ldi_munlink_fp", 1348 (void *)stp, (void *)fpdown)); 1349 1350 /* 1351 * NOTE: here we rely on the streams subsystem not allowing 1352 * a stream to be multiplexed more than once. if this 1353 * changes, we break. 1354 * 1355 * mark the snode/stream as not multiplexed 1356 */ 1357 sp = VTOS(vp); 1358 mutex_enter(&sp->s_lock); 1359 ASSERT(sp->s_flag & SMUXED); 1360 sp->s_flag &= ~SMUXED; 1361 mutex_exit(&sp->s_lock); 1362 1363 /* 1364 * clear the owner for this snode 1365 * see the comment in ldi_mlink_fp() for information about how 1366 * the ident is allocated 1367 */ 1368 if (type == LINKNORMAL) { 1369 ret = ldi_ident_from_stream(getendq(stp->sd_wrq), &li); 1370 } else { 1371 ASSERT(type == LINKPERSIST); 1372 major = getmajor(stp->sd_vnode->v_rdev); 1373 ret = ldi_ident_from_major(major, &li); 1374 } 1375 1376 ASSERT(ret == 0); 1377 lhp = handle_find(vp, (struct ldi_ident *)li); 1378 handle_release(lhp); 1379 ldi_ident_release(li); 1380 } 1381 1382 /* 1383 * LDI Consolidation private interfaces 1384 */ 1385 int 1386 ldi_ident_from_mod(struct modlinkage *modlp, ldi_ident_t *lip) 1387 { 1388 struct modctl *modp; 1389 major_t major; 1390 char *name; 1391 1392 if ((modlp == NULL) || (lip == NULL)) 1393 return (EINVAL); 1394 1395 ASSERT(!servicing_interrupt()); 1396 1397 modp = mod_getctl(modlp); 1398 if (modp == NULL) 1399 return (EINVAL); 1400 name = modp->mod_modname; 1401 if (name == NULL) 1402 return (EINVAL); 1403 major = mod_name_to_major(name); 1404 1405 *lip = (ldi_ident_t)ident_alloc(name, NULL, DDI_DEV_T_NONE, major); 1406 1407 LDI_ALLOCFREE((CE_WARN, "%s: li=0x%p, mod=%s", 1408 "ldi_ident_from_mod", (void *)*lip, name)); 1409 1410 return (0); 1411 } 1412 1413 ldi_ident_t 1414 ldi_ident_from_anon() 1415 { 1416 ldi_ident_t lip; 1417 1418 ASSERT(!servicing_interrupt()); 1419 1420 lip = (ldi_ident_t)ident_alloc("genunix", NULL, DDI_DEV_T_NONE, -1); 1421 1422 LDI_ALLOCFREE((CE_WARN, "%s: li=0x%p, mod=%s", 1423 "ldi_ident_from_anon", (void *)lip, "genunix")); 1424 1425 return (lip); 1426 } 1427 1428 1429 /* 1430 * LDI Public interfaces 1431 */ 1432 int 1433 ldi_ident_from_stream(struct queue *sq, ldi_ident_t *lip) 1434 { 1435 struct stdata *stp; 1436 dev_t dev; 1437 char *name; 1438 1439 if ((sq == NULL) || (lip == NULL)) 1440 return (EINVAL); 1441 1442 ASSERT(!servicing_interrupt()); 1443 1444 stp = sq->q_stream; 1445 if (!vn_matchops(stp->sd_vnode, spec_getvnodeops())) 1446 return (EINVAL); 1447 1448 dev = stp->sd_vnode->v_rdev; 1449 name = mod_major_to_name(getmajor(dev)); 1450 if (name == NULL) 1451 return (EINVAL); 1452 *lip = (ldi_ident_t)ident_alloc(name, NULL, dev, -1); 1453 1454 LDI_ALLOCFREE((CE_WARN, 1455 "%s: li=0x%p, mod=%s, minor=0x%x, stp=0x%p", 1456 "ldi_ident_from_stream", (void *)*lip, name, getminor(dev), 1457 (void *)stp)); 1458 1459 return (0); 1460 } 1461 1462 int 1463 ldi_ident_from_dev(dev_t dev, ldi_ident_t *lip) 1464 { 1465 char *name; 1466 1467 if (lip == NULL) 1468 return (EINVAL); 1469 1470 ASSERT(!servicing_interrupt()); 1471 1472 name = mod_major_to_name(getmajor(dev)); 1473 if (name == NULL) 1474 return (EINVAL); 1475 *lip = (ldi_ident_t)ident_alloc(name, NULL, dev, -1); 1476 1477 LDI_ALLOCFREE((CE_WARN, 1478 "%s: li=0x%p, mod=%s, minor=0x%x", 1479 "ldi_ident_from_dev", (void *)*lip, name, getminor(dev))); 1480 1481 return (0); 1482 } 1483 1484 int 1485 ldi_ident_from_dip(dev_info_t *dip, ldi_ident_t *lip) 1486 { 1487 struct dev_info *devi = (struct dev_info *)dip; 1488 char *name; 1489 1490 if ((dip == NULL) || (lip == NULL)) 1491 return (EINVAL); 1492 1493 ASSERT(!servicing_interrupt()); 1494 1495 name = mod_major_to_name(devi->devi_major); 1496 if (name == NULL) 1497 return (EINVAL); 1498 *lip = (ldi_ident_t)ident_alloc(name, dip, DDI_DEV_T_NONE, -1); 1499 1500 LDI_ALLOCFREE((CE_WARN, 1501 "%s: li=0x%p, mod=%s, dip=0x%p", 1502 "ldi_ident_from_dip", (void *)*lip, name, (void *)devi)); 1503 1504 return (0); 1505 } 1506 1507 int 1508 ldi_ident_from_major(major_t major, ldi_ident_t *lip) 1509 { 1510 char *name; 1511 1512 if (lip == NULL) 1513 return (EINVAL); 1514 1515 ASSERT(!servicing_interrupt()); 1516 1517 name = mod_major_to_name(major); 1518 if (name == NULL) 1519 return (EINVAL); 1520 *lip = (ldi_ident_t)ident_alloc(name, NULL, DDI_DEV_T_NONE, major); 1521 1522 LDI_ALLOCFREE((CE_WARN, 1523 "%s: li=0x%p, mod=%s", 1524 "ldi_ident_from_major", (void *)*lip, name)); 1525 1526 return (0); 1527 } 1528 1529 void 1530 ldi_ident_release(ldi_ident_t li) 1531 { 1532 struct ldi_ident *ident = (struct ldi_ident *)li; 1533 char *name; 1534 1535 if (li == NULL) 1536 return; 1537 1538 ASSERT(!servicing_interrupt()); 1539 1540 name = ident->li_modname; 1541 1542 LDI_ALLOCFREE((CE_WARN, 1543 "%s: li=0x%p, mod=%s", 1544 "ldi_ident_release", (void *)li, name)); 1545 1546 ident_release((struct ldi_ident *)li); 1547 } 1548 1549 /* get a handle to a device by dev_t and otyp */ 1550 int 1551 ldi_open_by_dev(dev_t *devp, int otyp, int flag, cred_t *cr, 1552 ldi_handle_t *lhp, ldi_ident_t li) 1553 { 1554 struct ldi_ident *lip = (struct ldi_ident *)li; 1555 int ret; 1556 vnode_t *vp; 1557 1558 /* sanity check required input parameters */ 1559 if ((devp == NULL) || (!OTYP_VALID(otyp)) || (cr == NULL) || 1560 (lhp == NULL) || (lip == NULL)) 1561 return (EINVAL); 1562 1563 ASSERT(!servicing_interrupt()); 1564 1565 if ((ret = ldi_vp_from_dev(*devp, otyp, &vp)) != 0) 1566 return (ret); 1567 1568 if ((ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip)) == 0) { 1569 *devp = vp->v_rdev; 1570 } 1571 VN_RELE(vp); 1572 1573 return (ret); 1574 } 1575 1576 /* get a handle to a device by pathname */ 1577 int 1578 ldi_open_by_name(char *pathname, int flag, cred_t *cr, 1579 ldi_handle_t *lhp, ldi_ident_t li) 1580 { 1581 struct ldi_ident *lip = (struct ldi_ident *)li; 1582 int ret; 1583 vnode_t *vp; 1584 1585 /* sanity check required input parameters */ 1586 if ((pathname == NULL) || (*pathname != '/') || 1587 (cr == NULL) || (lhp == NULL) || (lip == NULL)) 1588 return (EINVAL); 1589 1590 ASSERT(!servicing_interrupt()); 1591 1592 if ((ret = ldi_vp_from_name(pathname, &vp)) != 0) 1593 return (ret); 1594 1595 ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip); 1596 VN_RELE(vp); 1597 1598 return (ret); 1599 } 1600 1601 /* get a handle to a device by devid and minor_name */ 1602 int 1603 ldi_open_by_devid(ddi_devid_t devid, char *minor_name, 1604 int flag, cred_t *cr, ldi_handle_t *lhp, ldi_ident_t li) 1605 { 1606 struct ldi_ident *lip = (struct ldi_ident *)li; 1607 int ret; 1608 vnode_t *vp; 1609 1610 /* sanity check required input parameters */ 1611 if ((minor_name == NULL) || (cr == NULL) || 1612 (lhp == NULL) || (lip == NULL)) 1613 return (EINVAL); 1614 1615 ASSERT(!servicing_interrupt()); 1616 1617 if ((ret = ldi_vp_from_devid(devid, minor_name, &vp)) != 0) 1618 return (ret); 1619 1620 ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip); 1621 VN_RELE(vp); 1622 1623 return (ret); 1624 } 1625 1626 int 1627 ldi_close(ldi_handle_t lh, int flag, cred_t *cr) 1628 { 1629 struct ldi_handle *handlep = (struct ldi_handle *)lh; 1630 struct ldi_event *lep; 1631 int err = 0; 1632 1633 if (lh == NULL) 1634 return (EINVAL); 1635 1636 ASSERT(!servicing_interrupt()); 1637 1638 /* Flush back any dirty pages associated with the device. */ 1639 if (handlep->lh_type & LH_CBDEV) { 1640 vnode_t *cvp = common_specvp(handlep->lh_vp); 1641 dev_t dev = cvp->v_rdev; 1642 1643 (void) VOP_PUTPAGE(cvp, 0, 0, B_INVAL, kcred); 1644 bflush(dev); 1645 } 1646 1647 /* 1648 * Any event handlers should have been unregistered by the 1649 * time ldi_close() is called. If they haven't then it's a 1650 * bug. 1651 * 1652 * In a debug kernel we'll panic to make the problem obvious. 1653 */ 1654 ASSERT(handlep->lh_events == NULL); 1655 1656 /* 1657 * On a production kernel we'll "do the right thing" (unregister 1658 * the event handlers) and then complain about having to do the 1659 * work ourselves. 1660 */ 1661 while ((lep = handlep->lh_events) != NULL) { 1662 err = 1; 1663 (void) ldi_remove_event_handler(lh, (ldi_callback_id_t)lep); 1664 } 1665 if (err) { 1666 struct ldi_ident *lip = handlep->lh_ident; 1667 ASSERT(lip != NULL); 1668 cmn_err(CE_NOTE, "ldi err: %s " 1669 "failed to unregister layered event handlers before " 1670 "closing devices", lip->li_modname); 1671 } 1672 1673 /* do a layered close on the device */ 1674 err = VOP_CLOSE(handlep->lh_vp, flag | FKLYR, 1, (offset_t)0, cr); 1675 1676 LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p", "ldi close", (void *)lh)); 1677 1678 /* 1679 * Free the handle even if the device close failed. why? 1680 * 1681 * If the device close failed we can't really make assumptions 1682 * about the devices state so we shouldn't allow access to the 1683 * device via this handle any more. If the device consumer wants 1684 * to access the device again they should open it again. 1685 * 1686 * This is the same way file/device close failures are handled 1687 * in other places like spec_close() and closeandsetf(). 1688 */ 1689 handle_release(handlep); 1690 return (err); 1691 } 1692 1693 int 1694 ldi_read(ldi_handle_t lh, struct uio *uiop, cred_t *credp) 1695 { 1696 struct ldi_handle *handlep = (struct ldi_handle *)lh; 1697 vnode_t *vp; 1698 dev_t dev; 1699 int ret; 1700 1701 if (lh == NULL) 1702 return (EINVAL); 1703 1704 vp = handlep->lh_vp; 1705 dev = vp->v_rdev; 1706 if (handlep->lh_type & LH_CBDEV) { 1707 ret = cdev_read(dev, uiop, credp); 1708 } else if (handlep->lh_type & LH_STREAM) { 1709 ret = strread(vp, uiop, credp); 1710 } else { 1711 return (ENOTSUP); 1712 } 1713 return (ret); 1714 } 1715 1716 int 1717 ldi_write(ldi_handle_t lh, struct uio *uiop, cred_t *credp) 1718 { 1719 struct ldi_handle *handlep = (struct ldi_handle *)lh; 1720 vnode_t *vp; 1721 dev_t dev; 1722 int ret; 1723 1724 if (lh == NULL) 1725 return (EINVAL); 1726 1727 vp = handlep->lh_vp; 1728 dev = vp->v_rdev; 1729 if (handlep->lh_type & LH_CBDEV) { 1730 ret = cdev_write(dev, uiop, credp); 1731 } else if (handlep->lh_type & LH_STREAM) { 1732 ret = strwrite(vp, uiop, credp); 1733 } else { 1734 return (ENOTSUP); 1735 } 1736 return (ret); 1737 } 1738 1739 int 1740 ldi_get_size(ldi_handle_t lh, uint64_t *sizep) 1741 { 1742 int otyp; 1743 uint_t value; 1744 int64_t drv_prop64; 1745 struct ldi_handle *handlep = (struct ldi_handle *)lh; 1746 uint_t blksize; 1747 int blkshift; 1748 1749 1750 if ((lh == NULL) || (sizep == NULL)) 1751 return (DDI_FAILURE); 1752 1753 if (handlep->lh_type & LH_STREAM) 1754 return (DDI_FAILURE); 1755 1756 /* 1757 * Determine device type (char or block). 1758 * Character devices support Size/size 1759 * property value. Block devices may support 1760 * Nblocks/nblocks or Size/size property value. 1761 */ 1762 if ((ldi_get_otyp(lh, &otyp)) != 0) 1763 return (DDI_FAILURE); 1764 1765 if (otyp == OTYP_BLK) { 1766 if (ldi_prop_exists(lh, 1767 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Nblocks")) { 1768 1769 drv_prop64 = ldi_prop_get_int64(lh, 1770 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1771 "Nblocks", 0); 1772 blksize = ldi_prop_get_int(lh, 1773 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1774 "blksize", DEV_BSIZE); 1775 if (blksize == DEV_BSIZE) 1776 blksize = ldi_prop_get_int(lh, LDI_DEV_T_ANY | 1777 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1778 "device-blksize", DEV_BSIZE); 1779 1780 /* blksize must be a power of two */ 1781 ASSERT(BIT_ONLYONESET(blksize)); 1782 blkshift = highbit(blksize) - 1; 1783 1784 /* 1785 * We don't support Nblocks values that don't have 1786 * an accurate uint64_t byte count representation. 1787 */ 1788 if ((uint64_t)drv_prop64 >= (UINT64_MAX >> blkshift)) 1789 return (DDI_FAILURE); 1790 1791 *sizep = (uint64_t) 1792 (((u_offset_t)drv_prop64) << blkshift); 1793 return (DDI_SUCCESS); 1794 } 1795 1796 if (ldi_prop_exists(lh, 1797 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "nblocks")) { 1798 1799 value = ldi_prop_get_int(lh, 1800 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1801 "nblocks", 0); 1802 blksize = ldi_prop_get_int(lh, 1803 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1804 "blksize", DEV_BSIZE); 1805 if (blksize == DEV_BSIZE) 1806 blksize = ldi_prop_get_int(lh, LDI_DEV_T_ANY | 1807 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1808 "device-blksize", DEV_BSIZE); 1809 1810 /* blksize must be a power of two */ 1811 ASSERT(BIT_ONLYONESET(blksize)); 1812 blkshift = highbit(blksize) - 1; 1813 1814 /* 1815 * We don't support nblocks values that don't have an 1816 * accurate uint64_t byte count representation. 1817 */ 1818 if ((uint64_t)value >= (UINT64_MAX >> blkshift)) 1819 return (DDI_FAILURE); 1820 1821 *sizep = (uint64_t) 1822 (((u_offset_t)value) << blkshift); 1823 return (DDI_SUCCESS); 1824 } 1825 } 1826 1827 if (ldi_prop_exists(lh, 1828 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Size")) { 1829 1830 drv_prop64 = ldi_prop_get_int64(lh, 1831 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Size", 0); 1832 *sizep = (uint64_t)drv_prop64; 1833 return (DDI_SUCCESS); 1834 } 1835 1836 if (ldi_prop_exists(lh, 1837 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "size")) { 1838 1839 value = ldi_prop_get_int(lh, 1840 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "size", 0); 1841 *sizep = (uint64_t)value; 1842 return (DDI_SUCCESS); 1843 } 1844 1845 /* unable to determine device size */ 1846 return (DDI_FAILURE); 1847 } 1848 1849 int 1850 ldi_ioctl(ldi_handle_t lh, int cmd, intptr_t arg, int mode, 1851 cred_t *cr, int *rvalp) 1852 { 1853 struct ldi_handle *handlep = (struct ldi_handle *)lh; 1854 vnode_t *vp; 1855 dev_t dev; 1856 int ret, copymode; 1857 1858 if (lh == NULL) 1859 return (EINVAL); 1860 1861 /* 1862 * if the data pointed to by arg is located in the kernel then 1863 * make sure the FNATIVE flag is set. 1864 */ 1865 if (mode & FKIOCTL) 1866 mode = (mode & ~FMODELS) | FNATIVE | FKIOCTL; 1867 1868 vp = handlep->lh_vp; 1869 dev = vp->v_rdev; 1870 if (handlep->lh_type & LH_CBDEV) { 1871 ret = cdev_ioctl(dev, cmd, arg, mode, cr, rvalp); 1872 } else if (handlep->lh_type & LH_STREAM) { 1873 copymode = (mode & FKIOCTL) ? K_TO_K : U_TO_K; 1874 1875 /* 1876 * if we get an I_PLINK from within the kernel the 1877 * arg is a layered handle pointer instead of 1878 * a file descriptor, so we translate this ioctl 1879 * into a private one that can handle this. 1880 */ 1881 if ((mode & FKIOCTL) && (cmd == I_PLINK)) 1882 cmd = _I_PLINK_LH; 1883 1884 ret = strioctl(vp, cmd, arg, mode, copymode, cr, rvalp); 1885 } else { 1886 return (ENOTSUP); 1887 } 1888 1889 return (ret); 1890 } 1891 1892 int 1893 ldi_poll(ldi_handle_t lh, short events, int anyyet, short *reventsp, 1894 struct pollhead **phpp) 1895 { 1896 struct ldi_handle *handlep = (struct ldi_handle *)lh; 1897 vnode_t *vp; 1898 dev_t dev; 1899 int ret; 1900 1901 if (lh == NULL) 1902 return (EINVAL); 1903 1904 vp = handlep->lh_vp; 1905 dev = vp->v_rdev; 1906 if (handlep->lh_type & LH_CBDEV) { 1907 ret = cdev_poll(dev, events, anyyet, reventsp, phpp); 1908 } else if (handlep->lh_type & LH_STREAM) { 1909 ret = strpoll(vp->v_stream, events, anyyet, reventsp, phpp); 1910 } else { 1911 return (ENOTSUP); 1912 } 1913 1914 return (ret); 1915 } 1916 1917 int 1918 ldi_prop_op(ldi_handle_t lh, ddi_prop_op_t prop_op, 1919 int flags, char *name, caddr_t valuep, int *length) 1920 { 1921 struct ldi_handle *handlep = (struct ldi_handle *)lh; 1922 dev_t dev; 1923 dev_info_t *dip; 1924 int ret; 1925 struct snode *csp; 1926 1927 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0)) 1928 return (DDI_PROP_INVAL_ARG); 1929 1930 if ((prop_op != PROP_LEN) && (valuep == NULL)) 1931 return (DDI_PROP_INVAL_ARG); 1932 1933 if (length == NULL) 1934 return (DDI_PROP_INVAL_ARG); 1935 1936 /* 1937 * try to find the associated dip, 1938 * this places a hold on the driver 1939 */ 1940 dev = handlep->lh_vp->v_rdev; 1941 1942 csp = VTOCS(handlep->lh_vp); 1943 mutex_enter(&csp->s_lock); 1944 if ((dip = csp->s_dip) != NULL) 1945 e_ddi_hold_devi(dip); 1946 mutex_exit(&csp->s_lock); 1947 if (dip == NULL) 1948 dip = e_ddi_hold_devi_by_dev(dev, 0); 1949 1950 if (dip == NULL) 1951 return (DDI_PROP_NOT_FOUND); 1952 1953 ret = i_ldi_prop_op(dev, dip, prop_op, flags, name, valuep, length); 1954 ddi_release_devi(dip); 1955 1956 return (ret); 1957 } 1958 1959 int 1960 ldi_strategy(ldi_handle_t lh, struct buf *bp) 1961 { 1962 struct ldi_handle *handlep = (struct ldi_handle *)lh; 1963 dev_t dev; 1964 1965 if ((lh == NULL) || (bp == NULL)) 1966 return (EINVAL); 1967 1968 /* this entry point is only supported for cb devices */ 1969 dev = handlep->lh_vp->v_rdev; 1970 if (!(handlep->lh_type & LH_CBDEV)) 1971 return (ENOTSUP); 1972 1973 bp->b_edev = dev; 1974 bp->b_dev = cmpdev(dev); 1975 return (bdev_strategy(bp)); 1976 } 1977 1978 int 1979 ldi_dump(ldi_handle_t lh, caddr_t addr, daddr_t blkno, int nblk) 1980 { 1981 struct ldi_handle *handlep = (struct ldi_handle *)lh; 1982 dev_t dev; 1983 1984 if (lh == NULL) 1985 return (EINVAL); 1986 1987 /* this entry point is only supported for cb devices */ 1988 dev = handlep->lh_vp->v_rdev; 1989 if (!(handlep->lh_type & LH_CBDEV)) 1990 return (ENOTSUP); 1991 1992 return (bdev_dump(dev, addr, blkno, nblk)); 1993 } 1994 1995 int 1996 ldi_devmap(ldi_handle_t lh, devmap_cookie_t dhp, offset_t off, 1997 size_t len, size_t *maplen, uint_t model) 1998 { 1999 struct ldi_handle *handlep = (struct ldi_handle *)lh; 2000 dev_t dev; 2001 2002 if (lh == NULL) 2003 return (EINVAL); 2004 2005 /* this entry point is only supported for cb devices */ 2006 dev = handlep->lh_vp->v_rdev; 2007 if (!(handlep->lh_type & LH_CBDEV)) 2008 return (ENOTSUP); 2009 2010 return (cdev_devmap(dev, dhp, off, len, maplen, model)); 2011 } 2012 2013 int 2014 ldi_aread(ldi_handle_t lh, struct aio_req *aio_reqp, cred_t *cr) 2015 { 2016 struct ldi_handle *handlep = (struct ldi_handle *)lh; 2017 dev_t dev; 2018 struct cb_ops *cb; 2019 2020 if (lh == NULL) 2021 return (EINVAL); 2022 2023 /* this entry point is only supported for cb devices */ 2024 if (!(handlep->lh_type & LH_CBDEV)) 2025 return (ENOTSUP); 2026 2027 /* 2028 * Kaio is only supported on block devices. 2029 */ 2030 dev = handlep->lh_vp->v_rdev; 2031 cb = devopsp[getmajor(dev)]->devo_cb_ops; 2032 if (cb->cb_strategy == nodev || cb->cb_strategy == NULL) 2033 return (ENOTSUP); 2034 2035 if (cb->cb_aread == NULL) 2036 return (ENOTSUP); 2037 2038 return (cb->cb_aread(dev, aio_reqp, cr)); 2039 } 2040 2041 int 2042 ldi_awrite(ldi_handle_t lh, struct aio_req *aio_reqp, cred_t *cr) 2043 { 2044 struct ldi_handle *handlep = (struct ldi_handle *)lh; 2045 struct cb_ops *cb; 2046 dev_t dev; 2047 2048 if (lh == NULL) 2049 return (EINVAL); 2050 2051 /* this entry point is only supported for cb devices */ 2052 if (!(handlep->lh_type & LH_CBDEV)) 2053 return (ENOTSUP); 2054 2055 /* 2056 * Kaio is only supported on block devices. 2057 */ 2058 dev = handlep->lh_vp->v_rdev; 2059 cb = devopsp[getmajor(dev)]->devo_cb_ops; 2060 if (cb->cb_strategy == nodev || cb->cb_strategy == NULL) 2061 return (ENOTSUP); 2062 2063 if (cb->cb_awrite == NULL) 2064 return (ENOTSUP); 2065 2066 return (cb->cb_awrite(dev, aio_reqp, cr)); 2067 } 2068 2069 int 2070 ldi_putmsg(ldi_handle_t lh, mblk_t *smp) 2071 { 2072 struct ldi_handle *handlep = (struct ldi_handle *)lh; 2073 int ret; 2074 2075 if ((lh == NULL) || (smp == NULL)) 2076 return (EINVAL); 2077 2078 if (!(handlep->lh_type & LH_STREAM)) { 2079 freemsg(smp); 2080 return (ENOTSUP); 2081 } 2082 2083 /* Send message while honoring flow control */ 2084 ret = kstrputmsg(handlep->lh_vp, smp, NULL, 0, 0, 2085 MSG_BAND | MSG_HOLDSIG | MSG_IGNERROR, 0); 2086 2087 return (ret); 2088 } 2089 2090 int 2091 ldi_getmsg(ldi_handle_t lh, mblk_t **rmp, timestruc_t *timeo) 2092 { 2093 struct ldi_handle *handlep = (struct ldi_handle *)lh; 2094 clock_t timout; /* milliseconds */ 2095 uchar_t pri; 2096 rval_t rval; 2097 int ret, pflag; 2098 2099 2100 if (lh == NULL) 2101 return (EINVAL); 2102 2103 if (!(handlep->lh_type & LH_STREAM)) 2104 return (ENOTSUP); 2105 2106 /* Convert from nanoseconds to milliseconds */ 2107 if (timeo != NULL) { 2108 timout = timeo->tv_sec * 1000 + timeo->tv_nsec / 1000000; 2109 if (timout > INT_MAX) 2110 return (EINVAL); 2111 } else 2112 timout = -1; 2113 2114 /* Wait for timeout millseconds for a message */ 2115 pflag = MSG_ANY; 2116 pri = 0; 2117 *rmp = NULL; 2118 ret = kstrgetmsg(handlep->lh_vp, 2119 rmp, NULL, &pri, &pflag, timout, &rval); 2120 return (ret); 2121 } 2122 2123 int 2124 ldi_get_dev(ldi_handle_t lh, dev_t *devp) 2125 { 2126 struct ldi_handle *handlep = (struct ldi_handle *)lh; 2127 2128 if ((lh == NULL) || (devp == NULL)) 2129 return (EINVAL); 2130 2131 *devp = handlep->lh_vp->v_rdev; 2132 return (0); 2133 } 2134 2135 int 2136 ldi_get_otyp(ldi_handle_t lh, int *otyp) 2137 { 2138 struct ldi_handle *handlep = (struct ldi_handle *)lh; 2139 2140 if ((lh == NULL) || (otyp == NULL)) 2141 return (EINVAL); 2142 2143 *otyp = VTYP_TO_OTYP(handlep->lh_vp->v_type); 2144 return (0); 2145 } 2146 2147 int 2148 ldi_get_devid(ldi_handle_t lh, ddi_devid_t *devid) 2149 { 2150 struct ldi_handle *handlep = (struct ldi_handle *)lh; 2151 int ret; 2152 dev_t dev; 2153 2154 if ((lh == NULL) || (devid == NULL)) 2155 return (EINVAL); 2156 2157 dev = handlep->lh_vp->v_rdev; 2158 2159 ret = ddi_lyr_get_devid(dev, devid); 2160 if (ret != DDI_SUCCESS) 2161 return (ENOTSUP); 2162 2163 return (0); 2164 } 2165 2166 int 2167 ldi_get_minor_name(ldi_handle_t lh, char **minor_name) 2168 { 2169 struct ldi_handle *handlep = (struct ldi_handle *)lh; 2170 int ret, otyp; 2171 dev_t dev; 2172 2173 if ((lh == NULL) || (minor_name == NULL)) 2174 return (EINVAL); 2175 2176 dev = handlep->lh_vp->v_rdev; 2177 otyp = VTYP_TO_OTYP(handlep->lh_vp->v_type); 2178 2179 ret = ddi_lyr_get_minor_name(dev, OTYP_TO_STYP(otyp), minor_name); 2180 if (ret != DDI_SUCCESS) 2181 return (ENOTSUP); 2182 2183 return (0); 2184 } 2185 2186 int 2187 ldi_prop_lookup_int_array(ldi_handle_t lh, 2188 uint_t flags, char *name, int **data, uint_t *nelements) 2189 { 2190 struct ldi_handle *handlep = (struct ldi_handle *)lh; 2191 dev_info_t *dip; 2192 dev_t dev; 2193 int res; 2194 struct snode *csp; 2195 2196 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0)) 2197 return (DDI_PROP_INVAL_ARG); 2198 2199 dev = handlep->lh_vp->v_rdev; 2200 2201 csp = VTOCS(handlep->lh_vp); 2202 mutex_enter(&csp->s_lock); 2203 if ((dip = csp->s_dip) != NULL) 2204 e_ddi_hold_devi(dip); 2205 mutex_exit(&csp->s_lock); 2206 if (dip == NULL) 2207 dip = e_ddi_hold_devi_by_dev(dev, 0); 2208 2209 if (dip == NULL) { 2210 flags |= DDI_UNBND_DLPI2; 2211 } else if (flags & LDI_DEV_T_ANY) { 2212 flags &= ~LDI_DEV_T_ANY; 2213 dev = DDI_DEV_T_ANY; 2214 } 2215 2216 if (dip != NULL) { 2217 int *prop_val, prop_len; 2218 2219 res = i_ldi_prop_op_typed(dev, dip, flags, name, 2220 (caddr_t *)&prop_val, &prop_len, sizeof (int)); 2221 2222 /* if we got it then return it */ 2223 if (res == DDI_PROP_SUCCESS) { 2224 *nelements = prop_len / sizeof (int); 2225 *data = prop_val; 2226 2227 ddi_release_devi(dip); 2228 return (res); 2229 } 2230 } 2231 2232 /* call the normal property interfaces */ 2233 res = ddi_prop_lookup_int_array(dev, dip, flags, 2234 name, data, nelements); 2235 2236 if (dip != NULL) 2237 ddi_release_devi(dip); 2238 2239 return (res); 2240 } 2241 2242 int 2243 ldi_prop_lookup_int64_array(ldi_handle_t lh, 2244 uint_t flags, char *name, int64_t **data, uint_t *nelements) 2245 { 2246 struct ldi_handle *handlep = (struct ldi_handle *)lh; 2247 dev_info_t *dip; 2248 dev_t dev; 2249 int res; 2250 struct snode *csp; 2251 2252 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0)) 2253 return (DDI_PROP_INVAL_ARG); 2254 2255 dev = handlep->lh_vp->v_rdev; 2256 2257 csp = VTOCS(handlep->lh_vp); 2258 mutex_enter(&csp->s_lock); 2259 if ((dip = csp->s_dip) != NULL) 2260 e_ddi_hold_devi(dip); 2261 mutex_exit(&csp->s_lock); 2262 if (dip == NULL) 2263 dip = e_ddi_hold_devi_by_dev(dev, 0); 2264 2265 if (dip == NULL) { 2266 flags |= DDI_UNBND_DLPI2; 2267 } else if (flags & LDI_DEV_T_ANY) { 2268 flags &= ~LDI_DEV_T_ANY; 2269 dev = DDI_DEV_T_ANY; 2270 } 2271 2272 if (dip != NULL) { 2273 int64_t *prop_val; 2274 int prop_len; 2275 2276 res = i_ldi_prop_op_typed(dev, dip, flags, name, 2277 (caddr_t *)&prop_val, &prop_len, sizeof (int64_t)); 2278 2279 /* if we got it then return it */ 2280 if (res == DDI_PROP_SUCCESS) { 2281 *nelements = prop_len / sizeof (int64_t); 2282 *data = prop_val; 2283 2284 ddi_release_devi(dip); 2285 return (res); 2286 } 2287 } 2288 2289 /* call the normal property interfaces */ 2290 res = ddi_prop_lookup_int64_array(dev, dip, flags, 2291 name, data, nelements); 2292 2293 if (dip != NULL) 2294 ddi_release_devi(dip); 2295 2296 return (res); 2297 } 2298 2299 int 2300 ldi_prop_lookup_string_array(ldi_handle_t lh, 2301 uint_t flags, char *name, char ***data, uint_t *nelements) 2302 { 2303 struct ldi_handle *handlep = (struct ldi_handle *)lh; 2304 dev_info_t *dip; 2305 dev_t dev; 2306 int res; 2307 struct snode *csp; 2308 2309 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0)) 2310 return (DDI_PROP_INVAL_ARG); 2311 2312 dev = handlep->lh_vp->v_rdev; 2313 2314 csp = VTOCS(handlep->lh_vp); 2315 mutex_enter(&csp->s_lock); 2316 if ((dip = csp->s_dip) != NULL) 2317 e_ddi_hold_devi(dip); 2318 mutex_exit(&csp->s_lock); 2319 if (dip == NULL) 2320 dip = e_ddi_hold_devi_by_dev(dev, 0); 2321 2322 if (dip == NULL) { 2323 flags |= DDI_UNBND_DLPI2; 2324 } else if (flags & LDI_DEV_T_ANY) { 2325 flags &= ~LDI_DEV_T_ANY; 2326 dev = DDI_DEV_T_ANY; 2327 } 2328 2329 if (dip != NULL) { 2330 char *prop_val; 2331 int prop_len; 2332 2333 res = i_ldi_prop_op_typed(dev, dip, flags, name, 2334 (caddr_t *)&prop_val, &prop_len, 0); 2335 2336 /* if we got it then return it */ 2337 if (res == DDI_PROP_SUCCESS) { 2338 char **str_array; 2339 int nelem; 2340 2341 /* 2342 * pack the returned string array into the format 2343 * our callers expect 2344 */ 2345 if (i_pack_string_array(prop_val, prop_len, 2346 &str_array, &nelem) == 0) { 2347 2348 *data = str_array; 2349 *nelements = nelem; 2350 2351 ddi_prop_free(prop_val); 2352 ddi_release_devi(dip); 2353 return (res); 2354 } 2355 2356 /* 2357 * the format of the returned property must have 2358 * been bad so throw it out 2359 */ 2360 ddi_prop_free(prop_val); 2361 } 2362 } 2363 2364 /* call the normal property interfaces */ 2365 res = ddi_prop_lookup_string_array(dev, dip, flags, 2366 name, data, nelements); 2367 2368 if (dip != NULL) 2369 ddi_release_devi(dip); 2370 2371 return (res); 2372 } 2373 2374 int 2375 ldi_prop_lookup_string(ldi_handle_t lh, 2376 uint_t flags, char *name, char **data) 2377 { 2378 struct ldi_handle *handlep = (struct ldi_handle *)lh; 2379 dev_info_t *dip; 2380 dev_t dev; 2381 int res; 2382 struct snode *csp; 2383 2384 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0)) 2385 return (DDI_PROP_INVAL_ARG); 2386 2387 dev = handlep->lh_vp->v_rdev; 2388 2389 csp = VTOCS(handlep->lh_vp); 2390 mutex_enter(&csp->s_lock); 2391 if ((dip = csp->s_dip) != NULL) 2392 e_ddi_hold_devi(dip); 2393 mutex_exit(&csp->s_lock); 2394 if (dip == NULL) 2395 dip = e_ddi_hold_devi_by_dev(dev, 0); 2396 2397 if (dip == NULL) { 2398 flags |= DDI_UNBND_DLPI2; 2399 } else if (flags & LDI_DEV_T_ANY) { 2400 flags &= ~LDI_DEV_T_ANY; 2401 dev = DDI_DEV_T_ANY; 2402 } 2403 2404 if (dip != NULL) { 2405 char *prop_val; 2406 int prop_len; 2407 2408 res = i_ldi_prop_op_typed(dev, dip, flags, name, 2409 (caddr_t *)&prop_val, &prop_len, 0); 2410 2411 /* if we got it then return it */ 2412 if (res == DDI_PROP_SUCCESS) { 2413 /* 2414 * sanity check the vaule returned. 2415 */ 2416 if (i_check_string(prop_val, prop_len)) { 2417 ddi_prop_free(prop_val); 2418 } else { 2419 *data = prop_val; 2420 ddi_release_devi(dip); 2421 return (res); 2422 } 2423 } 2424 } 2425 2426 /* call the normal property interfaces */ 2427 res = ddi_prop_lookup_string(dev, dip, flags, name, data); 2428 2429 if (dip != NULL) 2430 ddi_release_devi(dip); 2431 2432 #ifdef DEBUG 2433 if (res == DDI_PROP_SUCCESS) { 2434 /* 2435 * keep ourselves honest 2436 * make sure the framework returns strings in the 2437 * same format as we're demanding from drivers. 2438 */ 2439 struct prop_driver_data *pdd; 2440 int pdd_prop_size; 2441 2442 pdd = ((struct prop_driver_data *)(*data)) - 1; 2443 pdd_prop_size = pdd->pdd_size - 2444 sizeof (struct prop_driver_data); 2445 ASSERT(i_check_string(*data, pdd_prop_size) == 0); 2446 } 2447 #endif /* DEBUG */ 2448 2449 return (res); 2450 } 2451 2452 int 2453 ldi_prop_lookup_byte_array(ldi_handle_t lh, 2454 uint_t flags, char *name, uchar_t **data, uint_t *nelements) 2455 { 2456 struct ldi_handle *handlep = (struct ldi_handle *)lh; 2457 dev_info_t *dip; 2458 dev_t dev; 2459 int res; 2460 struct snode *csp; 2461 2462 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0)) 2463 return (DDI_PROP_INVAL_ARG); 2464 2465 dev = handlep->lh_vp->v_rdev; 2466 2467 csp = VTOCS(handlep->lh_vp); 2468 mutex_enter(&csp->s_lock); 2469 if ((dip = csp->s_dip) != NULL) 2470 e_ddi_hold_devi(dip); 2471 mutex_exit(&csp->s_lock); 2472 if (dip == NULL) 2473 dip = e_ddi_hold_devi_by_dev(dev, 0); 2474 2475 if (dip == NULL) { 2476 flags |= DDI_UNBND_DLPI2; 2477 } else if (flags & LDI_DEV_T_ANY) { 2478 flags &= ~LDI_DEV_T_ANY; 2479 dev = DDI_DEV_T_ANY; 2480 } 2481 2482 if (dip != NULL) { 2483 uchar_t *prop_val; 2484 int prop_len; 2485 2486 res = i_ldi_prop_op_typed(dev, dip, flags, name, 2487 (caddr_t *)&prop_val, &prop_len, sizeof (uchar_t)); 2488 2489 /* if we got it then return it */ 2490 if (res == DDI_PROP_SUCCESS) { 2491 *nelements = prop_len / sizeof (uchar_t); 2492 *data = prop_val; 2493 2494 ddi_release_devi(dip); 2495 return (res); 2496 } 2497 } 2498 2499 /* call the normal property interfaces */ 2500 res = ddi_prop_lookup_byte_array(dev, dip, flags, 2501 name, data, nelements); 2502 2503 if (dip != NULL) 2504 ddi_release_devi(dip); 2505 2506 return (res); 2507 } 2508 2509 int 2510 ldi_prop_get_int(ldi_handle_t lh, 2511 uint_t flags, char *name, int defvalue) 2512 { 2513 struct ldi_handle *handlep = (struct ldi_handle *)lh; 2514 dev_info_t *dip; 2515 dev_t dev; 2516 int res; 2517 struct snode *csp; 2518 2519 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0)) 2520 return (defvalue); 2521 2522 dev = handlep->lh_vp->v_rdev; 2523 2524 csp = VTOCS(handlep->lh_vp); 2525 mutex_enter(&csp->s_lock); 2526 if ((dip = csp->s_dip) != NULL) 2527 e_ddi_hold_devi(dip); 2528 mutex_exit(&csp->s_lock); 2529 if (dip == NULL) 2530 dip = e_ddi_hold_devi_by_dev(dev, 0); 2531 2532 if (dip == NULL) { 2533 flags |= DDI_UNBND_DLPI2; 2534 } else if (flags & LDI_DEV_T_ANY) { 2535 flags &= ~LDI_DEV_T_ANY; 2536 dev = DDI_DEV_T_ANY; 2537 } 2538 2539 if (dip != NULL) { 2540 int prop_val; 2541 int prop_len; 2542 2543 /* 2544 * first call the drivers prop_op interface to allow it 2545 * it to override default property values. 2546 */ 2547 prop_len = sizeof (int); 2548 res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF, 2549 flags | DDI_PROP_DYNAMIC, name, 2550 (caddr_t)&prop_val, &prop_len); 2551 2552 /* if we got it then return it */ 2553 if ((res == DDI_PROP_SUCCESS) && 2554 (prop_len == sizeof (int))) { 2555 res = prop_val; 2556 ddi_release_devi(dip); 2557 return (res); 2558 } 2559 } 2560 2561 /* call the normal property interfaces */ 2562 res = ddi_prop_get_int(dev, dip, flags, name, defvalue); 2563 2564 if (dip != NULL) 2565 ddi_release_devi(dip); 2566 2567 return (res); 2568 } 2569 2570 int64_t 2571 ldi_prop_get_int64(ldi_handle_t lh, 2572 uint_t flags, char *name, int64_t defvalue) 2573 { 2574 struct ldi_handle *handlep = (struct ldi_handle *)lh; 2575 dev_info_t *dip; 2576 dev_t dev; 2577 int64_t res; 2578 struct snode *csp; 2579 2580 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0)) 2581 return (defvalue); 2582 2583 dev = handlep->lh_vp->v_rdev; 2584 2585 csp = VTOCS(handlep->lh_vp); 2586 mutex_enter(&csp->s_lock); 2587 if ((dip = csp->s_dip) != NULL) 2588 e_ddi_hold_devi(dip); 2589 mutex_exit(&csp->s_lock); 2590 if (dip == NULL) 2591 dip = e_ddi_hold_devi_by_dev(dev, 0); 2592 2593 if (dip == NULL) { 2594 flags |= DDI_UNBND_DLPI2; 2595 } else if (flags & LDI_DEV_T_ANY) { 2596 flags &= ~LDI_DEV_T_ANY; 2597 dev = DDI_DEV_T_ANY; 2598 } 2599 2600 if (dip != NULL) { 2601 int64_t prop_val; 2602 int prop_len; 2603 2604 /* 2605 * first call the drivers prop_op interface to allow it 2606 * it to override default property values. 2607 */ 2608 prop_len = sizeof (int64_t); 2609 res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF, 2610 flags | DDI_PROP_DYNAMIC, name, 2611 (caddr_t)&prop_val, &prop_len); 2612 2613 /* if we got it then return it */ 2614 if ((res == DDI_PROP_SUCCESS) && 2615 (prop_len == sizeof (int64_t))) { 2616 res = prop_val; 2617 ddi_release_devi(dip); 2618 return (res); 2619 } 2620 } 2621 2622 /* call the normal property interfaces */ 2623 res = ddi_prop_get_int64(dev, dip, flags, name, defvalue); 2624 2625 if (dip != NULL) 2626 ddi_release_devi(dip); 2627 2628 return (res); 2629 } 2630 2631 int 2632 ldi_prop_exists(ldi_handle_t lh, uint_t flags, char *name) 2633 { 2634 struct ldi_handle *handlep = (struct ldi_handle *)lh; 2635 dev_info_t *dip; 2636 dev_t dev; 2637 int res, prop_len; 2638 struct snode *csp; 2639 2640 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0)) 2641 return (0); 2642 2643 dev = handlep->lh_vp->v_rdev; 2644 2645 csp = VTOCS(handlep->lh_vp); 2646 mutex_enter(&csp->s_lock); 2647 if ((dip = csp->s_dip) != NULL) 2648 e_ddi_hold_devi(dip); 2649 mutex_exit(&csp->s_lock); 2650 if (dip == NULL) 2651 dip = e_ddi_hold_devi_by_dev(dev, 0); 2652 2653 /* if NULL dip, prop does NOT exist */ 2654 if (dip == NULL) 2655 return (0); 2656 2657 if (flags & LDI_DEV_T_ANY) { 2658 flags &= ~LDI_DEV_T_ANY; 2659 dev = DDI_DEV_T_ANY; 2660 } 2661 2662 /* 2663 * first call the drivers prop_op interface to allow it 2664 * it to override default property values. 2665 */ 2666 res = i_ldi_prop_op(dev, dip, PROP_LEN, 2667 flags | DDI_PROP_DYNAMIC, name, NULL, &prop_len); 2668 2669 if (res == DDI_PROP_SUCCESS) { 2670 ddi_release_devi(dip); 2671 return (1); 2672 } 2673 2674 /* call the normal property interfaces */ 2675 res = ddi_prop_exists(dev, dip, flags, name); 2676 2677 ddi_release_devi(dip); 2678 return (res); 2679 } 2680 2681 int 2682 ldi_get_eventcookie(ldi_handle_t lh, char *name, ddi_eventcookie_t *ecp) 2683 { 2684 struct ldi_handle *handlep = (struct ldi_handle *)lh; 2685 dev_info_t *dip; 2686 dev_t dev; 2687 int res; 2688 struct snode *csp; 2689 2690 if ((lh == NULL) || (name == NULL) || 2691 (strlen(name) == 0) || (ecp == NULL)) { 2692 return (DDI_FAILURE); 2693 } 2694 2695 ASSERT(!servicing_interrupt()); 2696 2697 dev = handlep->lh_vp->v_rdev; 2698 2699 csp = VTOCS(handlep->lh_vp); 2700 mutex_enter(&csp->s_lock); 2701 if ((dip = csp->s_dip) != NULL) 2702 e_ddi_hold_devi(dip); 2703 mutex_exit(&csp->s_lock); 2704 if (dip == NULL) 2705 dip = e_ddi_hold_devi_by_dev(dev, 0); 2706 2707 if (dip == NULL) 2708 return (DDI_FAILURE); 2709 2710 LDI_EVENTCB((CE_NOTE, "%s: event_name=%s, " 2711 "dip=0x%p, event_cookiep=0x%p", "ldi_get_eventcookie", 2712 name, (void *)dip, (void *)ecp)); 2713 2714 res = ddi_get_eventcookie(dip, name, ecp); 2715 2716 ddi_release_devi(dip); 2717 return (res); 2718 } 2719 2720 int 2721 ldi_add_event_handler(ldi_handle_t lh, ddi_eventcookie_t ec, 2722 void (*handler)(ldi_handle_t, ddi_eventcookie_t, void *, void *), 2723 void *arg, ldi_callback_id_t *id) 2724 { 2725 struct ldi_handle *handlep = (struct ldi_handle *)lh; 2726 struct ldi_event *lep; 2727 dev_info_t *dip; 2728 dev_t dev; 2729 int res; 2730 struct snode *csp; 2731 2732 if ((lh == NULL) || (ec == NULL) || (handler == NULL) || (id == NULL)) 2733 return (DDI_FAILURE); 2734 2735 ASSERT(!servicing_interrupt()); 2736 2737 dev = handlep->lh_vp->v_rdev; 2738 2739 csp = VTOCS(handlep->lh_vp); 2740 mutex_enter(&csp->s_lock); 2741 if ((dip = csp->s_dip) != NULL) 2742 e_ddi_hold_devi(dip); 2743 mutex_exit(&csp->s_lock); 2744 if (dip == NULL) 2745 dip = e_ddi_hold_devi_by_dev(dev, 0); 2746 2747 if (dip == NULL) 2748 return (DDI_FAILURE); 2749 2750 lep = kmem_zalloc(sizeof (struct ldi_event), KM_SLEEP); 2751 lep->le_lhp = handlep; 2752 lep->le_arg = arg; 2753 lep->le_handler = handler; 2754 2755 if ((res = ddi_add_event_handler(dip, ec, i_ldi_callback, 2756 (void *)lep, &lep->le_id)) != DDI_SUCCESS) { 2757 LDI_EVENTCB((CE_WARN, "%s: unable to add" 2758 "event callback", "ldi_add_event_handler")); 2759 ddi_release_devi(dip); 2760 kmem_free(lep, sizeof (struct ldi_event)); 2761 return (res); 2762 } 2763 2764 *id = (ldi_callback_id_t)lep; 2765 2766 LDI_EVENTCB((CE_NOTE, "%s: dip=0x%p, event=0x%p, " 2767 "ldi_eventp=0x%p, cb_id=0x%p", "ldi_add_event_handler", 2768 (void *)dip, (void *)ec, (void *)lep, (void *)id)); 2769 2770 handle_event_add(lep); 2771 ddi_release_devi(dip); 2772 return (res); 2773 } 2774 2775 int 2776 ldi_remove_event_handler(ldi_handle_t lh, ldi_callback_id_t id) 2777 { 2778 ldi_event_t *lep = (ldi_event_t *)id; 2779 int res; 2780 2781 if ((lh == NULL) || (id == NULL)) 2782 return (DDI_FAILURE); 2783 2784 ASSERT(!servicing_interrupt()); 2785 2786 if ((res = ddi_remove_event_handler(lep->le_id)) 2787 != DDI_SUCCESS) { 2788 LDI_EVENTCB((CE_WARN, "%s: unable to remove " 2789 "event callback", "ldi_remove_event_handler")); 2790 return (res); 2791 } 2792 2793 handle_event_remove(lep); 2794 kmem_free(lep, sizeof (struct ldi_event)); 2795 return (res); 2796 } 2797