1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2022 Garrett D'Amore 25 */ 26 27 #include <sys/note.h> 28 #include <sys/types.h> 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/buf.h> 32 #include <sys/uio.h> 33 #include <sys/cred.h> 34 #include <sys/poll.h> 35 #include <sys/mman.h> 36 #include <sys/kmem.h> 37 #include <sys/model.h> 38 #include <sys/file.h> 39 #include <sys/proc.h> 40 #include <sys/open.h> 41 #include <sys/user.h> 42 #include <sys/t_lock.h> 43 #include <sys/vm.h> 44 #include <sys/stat.h> 45 #include <vm/hat.h> 46 #include <vm/seg.h> 47 #include <vm/seg_vn.h> 48 #include <vm/seg_dev.h> 49 #include <vm/as.h> 50 #include <sys/cmn_err.h> 51 #include <sys/cpuvar.h> 52 #include <sys/debug.h> 53 #include <sys/autoconf.h> 54 #include <sys/sunddi.h> 55 #include <sys/esunddi.h> 56 #include <sys/sunndi.h> 57 #include <sys/kstat.h> 58 #include <sys/conf.h> 59 #include <sys/ddi_impldefs.h> /* include implementation structure defs */ 60 #include <sys/ndi_impldefs.h> /* include prototypes */ 61 #include <sys/ddi_periodic.h> 62 #include <sys/hwconf.h> 63 #include <sys/pathname.h> 64 #include <sys/modctl.h> 65 #include <sys/epm.h> 66 #include <sys/devctl.h> 67 #include <sys/callb.h> 68 #include <sys/cladm.h> 69 #include <sys/sysevent.h> 70 #include <sys/dacf_impl.h> 71 #include <sys/ddidevmap.h> 72 #include <sys/bootconf.h> 73 #include <sys/disp.h> 74 #include <sys/atomic.h> 75 #include <sys/promif.h> 76 #include <sys/instance.h> 77 #include <sys/sysevent/eventdefs.h> 78 #include <sys/task.h> 79 #include <sys/project.h> 80 #include <sys/taskq.h> 81 #include <sys/devpolicy.h> 82 #include <sys/ctype.h> 83 #include <net/if.h> 84 #include <sys/rctl.h> 85 #include <sys/zone.h> 86 #include <sys/clock_impl.h> 87 #include <sys/ddi.h> 88 #include <sys/modhash.h> 89 #include <sys/sunldi_impl.h> 90 #include <sys/fs/dv_node.h> 91 #include <sys/fs/snode.h> 92 93 extern pri_t minclsyspri; 94 95 extern rctl_hndl_t rc_project_locked_mem; 96 extern rctl_hndl_t rc_zone_locked_mem; 97 98 #ifdef DEBUG 99 static int sunddi_debug = 0; 100 #endif /* DEBUG */ 101 102 /* ddi_umem_unlock miscellaneous */ 103 104 static void i_ddi_umem_unlock_thread_start(void); 105 106 static kmutex_t ddi_umem_unlock_mutex; /* unlock list mutex */ 107 static kcondvar_t ddi_umem_unlock_cv; /* unlock list block/unblock */ 108 static kthread_t *ddi_umem_unlock_thread; 109 /* 110 * The ddi_umem_unlock FIFO list. NULL head pointer indicates empty list. 111 */ 112 static struct ddi_umem_cookie *ddi_umem_unlock_head = NULL; 113 static struct ddi_umem_cookie *ddi_umem_unlock_tail = NULL; 114 115 /* 116 * DDI(Sun) Function and flag definitions: 117 */ 118 119 #if defined(__x86) 120 /* 121 * Used to indicate which entries were chosen from a range. 122 */ 123 char *chosen_reg = "chosen-reg"; 124 #endif 125 126 /* 127 * Function used to ring system console bell 128 */ 129 void (*ddi_console_bell_func)(clock_t duration); 130 131 /* 132 * Creating register mappings and handling interrupts: 133 */ 134 135 /* 136 * Generic ddi_map: Call parent to fulfill request... 137 */ 138 139 int 140 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset, 141 off_t len, caddr_t *addrp) 142 { 143 dev_info_t *pdip; 144 145 ASSERT(dp); 146 pdip = (dev_info_t *)DEVI(dp)->devi_parent; 147 return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip, 148 dp, mp, offset, len, addrp)); 149 } 150 151 /* 152 * ddi_apply_range: (Called by nexi only.) 153 * Apply ranges in parent node dp, to child regspec rp... 154 */ 155 156 int 157 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp) 158 { 159 return (i_ddi_apply_range(dp, rdip, rp)); 160 } 161 162 int 163 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset, 164 off_t len) 165 { 166 ddi_map_req_t mr; 167 #if defined(__x86) 168 struct { 169 int bus; 170 int addr; 171 int size; 172 } reg, *reglist; 173 uint_t length; 174 int rc; 175 176 /* 177 * get the 'registers' or the 'reg' property. 178 * We look up the reg property as an array of 179 * int's. 180 */ 181 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 182 DDI_PROP_DONTPASS, "registers", (int **)®list, &length); 183 if (rc != DDI_PROP_SUCCESS) 184 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 185 DDI_PROP_DONTPASS, "reg", (int **)®list, &length); 186 if (rc == DDI_PROP_SUCCESS) { 187 /* 188 * point to the required entry. 189 */ 190 reg = reglist[rnumber]; 191 reg.addr += offset; 192 if (len != 0) 193 reg.size = len; 194 /* 195 * make a new property containing ONLY the required tuple. 196 */ 197 if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip, 198 chosen_reg, (int *)®, (sizeof (reg)/sizeof (int))) 199 != DDI_PROP_SUCCESS) { 200 cmn_err(CE_WARN, "%s%d: cannot create '%s' " 201 "property", DEVI(dip)->devi_name, 202 DEVI(dip)->devi_instance, chosen_reg); 203 } 204 /* 205 * free the memory allocated by 206 * ddi_prop_lookup_int_array (). 207 */ 208 ddi_prop_free((void *)reglist); 209 } 210 #endif 211 mr.map_op = DDI_MO_MAP_LOCKED; 212 mr.map_type = DDI_MT_RNUMBER; 213 mr.map_obj.rnumber = rnumber; 214 mr.map_prot = PROT_READ | PROT_WRITE; 215 mr.map_flags = DDI_MF_KERNEL_MAPPING; 216 mr.map_handlep = NULL; 217 mr.map_vers = DDI_MAP_VERSION; 218 219 /* 220 * Call my parent to map in my regs. 221 */ 222 223 return (ddi_map(dip, &mr, offset, len, kaddrp)); 224 } 225 226 void 227 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset, 228 off_t len) 229 { 230 ddi_map_req_t mr; 231 232 mr.map_op = DDI_MO_UNMAP; 233 mr.map_type = DDI_MT_RNUMBER; 234 mr.map_flags = DDI_MF_KERNEL_MAPPING; 235 mr.map_prot = PROT_READ | PROT_WRITE; /* who cares? */ 236 mr.map_obj.rnumber = rnumber; 237 mr.map_handlep = NULL; 238 mr.map_vers = DDI_MAP_VERSION; 239 240 /* 241 * Call my parent to unmap my regs. 242 */ 243 244 (void) ddi_map(dip, &mr, offset, len, kaddrp); 245 *kaddrp = (caddr_t)0; 246 #if defined(__x86) 247 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg); 248 #endif 249 } 250 251 int 252 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 253 off_t offset, off_t len, caddr_t *vaddrp) 254 { 255 return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp)); 256 } 257 258 /* 259 * nullbusmap: The/DDI default bus_map entry point for nexi 260 * not conforming to the reg/range paradigm (i.e. scsi, etc.) 261 * with no HAT/MMU layer to be programmed at this level. 262 * 263 * If the call is to map by rnumber, return an error, 264 * otherwise pass anything else up the tree to my parent. 265 */ 266 int 267 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 268 off_t offset, off_t len, caddr_t *vaddrp) 269 { 270 _NOTE(ARGUNUSED(rdip)) 271 if (mp->map_type == DDI_MT_RNUMBER) 272 return (DDI_ME_UNSUPPORTED); 273 274 return (ddi_map(dip, mp, offset, len, vaddrp)); 275 } 276 277 /* 278 * ddi_rnumber_to_regspec: Not for use by leaf drivers. 279 * Only for use by nexi using the reg/range paradigm. 280 */ 281 struct regspec * 282 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber) 283 { 284 return (i_ddi_rnumber_to_regspec(dip, rnumber)); 285 } 286 287 288 /* 289 * Note that we allow the dip to be nil because we may be called 290 * prior even to the instantiation of the devinfo tree itself - all 291 * regular leaf and nexus drivers should always use a non-nil dip! 292 * 293 * We treat peek in a somewhat cavalier fashion .. assuming that we'll 294 * simply get a synchronous fault as soon as we touch a missing address. 295 * 296 * Poke is rather more carefully handled because we might poke to a write 297 * buffer, "succeed", then only find some time later that we got an 298 * asynchronous fault that indicated that the address we were writing to 299 * was not really backed by hardware. 300 */ 301 302 static int 303 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size, 304 void *addr, void *value_p) 305 { 306 union { 307 uint64_t u64; 308 uint32_t u32; 309 uint16_t u16; 310 uint8_t u8; 311 } peekpoke_value; 312 313 peekpoke_ctlops_t peekpoke_args; 314 uint64_t dummy_result; 315 int rval; 316 317 /* Note: size is assumed to be correct; it is not checked. */ 318 peekpoke_args.size = size; 319 peekpoke_args.dev_addr = (uintptr_t)addr; 320 peekpoke_args.handle = NULL; 321 peekpoke_args.repcount = 1; 322 peekpoke_args.flags = 0; 323 324 if (cmd == DDI_CTLOPS_POKE) { 325 switch (size) { 326 case sizeof (uint8_t): 327 peekpoke_value.u8 = *(uint8_t *)value_p; 328 break; 329 case sizeof (uint16_t): 330 peekpoke_value.u16 = *(uint16_t *)value_p; 331 break; 332 case sizeof (uint32_t): 333 peekpoke_value.u32 = *(uint32_t *)value_p; 334 break; 335 case sizeof (uint64_t): 336 peekpoke_value.u64 = *(uint64_t *)value_p; 337 break; 338 } 339 } 340 341 peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64; 342 343 if (devi != NULL) 344 rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args, 345 &dummy_result); 346 else 347 rval = peekpoke_mem(cmd, &peekpoke_args); 348 349 /* 350 * A NULL value_p is permitted by ddi_peek(9F); discard the result. 351 */ 352 if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) { 353 switch (size) { 354 case sizeof (uint8_t): 355 *(uint8_t *)value_p = peekpoke_value.u8; 356 break; 357 case sizeof (uint16_t): 358 *(uint16_t *)value_p = peekpoke_value.u16; 359 break; 360 case sizeof (uint32_t): 361 *(uint32_t *)value_p = peekpoke_value.u32; 362 break; 363 case sizeof (uint64_t): 364 *(uint64_t *)value_p = peekpoke_value.u64; 365 break; 366 } 367 } 368 369 return (rval); 370 } 371 372 /* 373 * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this. 374 * they shouldn't be, but the 9f manpage kind of pseudo exposes it. 375 */ 376 int 377 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p) 378 { 379 switch (size) { 380 case sizeof (uint8_t): 381 case sizeof (uint16_t): 382 case sizeof (uint32_t): 383 case sizeof (uint64_t): 384 break; 385 default: 386 return (DDI_FAILURE); 387 } 388 389 return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p)); 390 } 391 392 int 393 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p) 394 { 395 switch (size) { 396 case sizeof (uint8_t): 397 case sizeof (uint16_t): 398 case sizeof (uint32_t): 399 case sizeof (uint64_t): 400 break; 401 default: 402 return (DDI_FAILURE); 403 } 404 405 return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p)); 406 } 407 408 int 409 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p) 410 { 411 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 412 val_p)); 413 } 414 415 int 416 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p) 417 { 418 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 419 val_p)); 420 } 421 422 int 423 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p) 424 { 425 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 426 val_p)); 427 } 428 429 int 430 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p) 431 { 432 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 433 val_p)); 434 } 435 436 int 437 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val) 438 { 439 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 440 } 441 442 int 443 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val) 444 { 445 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 446 } 447 448 int 449 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val) 450 { 451 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 452 } 453 454 int 455 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val) 456 { 457 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 458 } 459 460 /* 461 * ddi_peekpokeio() is used primarily by the mem drivers for moving 462 * data to and from uio structures via peek and poke. Note that we 463 * use "internal" routines ddi_peek and ddi_poke to make this go 464 * slightly faster, avoiding the call overhead .. 465 */ 466 int 467 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw, 468 caddr_t addr, size_t len, uint_t xfersize) 469 { 470 int64_t ibuffer; 471 int8_t w8; 472 size_t sz; 473 int o; 474 475 if (xfersize > sizeof (long)) 476 xfersize = sizeof (long); 477 478 while (len != 0) { 479 if ((len | (uintptr_t)addr) & 1) { 480 sz = sizeof (int8_t); 481 if (rw == UIO_WRITE) { 482 if ((o = uwritec(uio)) == -1) 483 return (DDI_FAILURE); 484 if (ddi_poke8(devi, (int8_t *)addr, 485 (int8_t)o) != DDI_SUCCESS) 486 return (DDI_FAILURE); 487 } else { 488 if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz, 489 (int8_t *)addr, &w8) != DDI_SUCCESS) 490 return (DDI_FAILURE); 491 if (ureadc(w8, uio)) 492 return (DDI_FAILURE); 493 } 494 } else { 495 switch (xfersize) { 496 case sizeof (int64_t): 497 if (((len | (uintptr_t)addr) & 498 (sizeof (int64_t) - 1)) == 0) { 499 sz = xfersize; 500 break; 501 } 502 /*FALLTHROUGH*/ 503 case sizeof (int32_t): 504 if (((len | (uintptr_t)addr) & 505 (sizeof (int32_t) - 1)) == 0) { 506 sz = xfersize; 507 break; 508 } 509 /*FALLTHROUGH*/ 510 default: 511 /* 512 * This still assumes that we might have an 513 * I/O bus out there that permits 16-bit 514 * transfers (and that it would be upset by 515 * 32-bit transfers from such locations). 516 */ 517 sz = sizeof (int16_t); 518 break; 519 } 520 521 if (rw == UIO_READ) { 522 if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz, 523 addr, &ibuffer) != DDI_SUCCESS) 524 return (DDI_FAILURE); 525 } 526 527 if (uiomove(&ibuffer, sz, rw, uio)) 528 return (DDI_FAILURE); 529 530 if (rw == UIO_WRITE) { 531 if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz, 532 addr, &ibuffer) != DDI_SUCCESS) 533 return (DDI_FAILURE); 534 } 535 } 536 addr += sz; 537 len -= sz; 538 } 539 return (DDI_SUCCESS); 540 } 541 542 /* 543 * These routines are used by drivers that do layered ioctls 544 * On sparc, they're implemented in assembler to avoid spilling 545 * register windows in the common (copyin) case .. 546 */ 547 #if !defined(__sparc) 548 int 549 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags) 550 { 551 if (flags & FKIOCTL) 552 return (kcopy(buf, kernbuf, size) ? -1 : 0); 553 return (copyin(buf, kernbuf, size)); 554 } 555 556 int 557 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags) 558 { 559 if (flags & FKIOCTL) 560 return (kcopy(buf, kernbuf, size) ? -1 : 0); 561 return (copyout(buf, kernbuf, size)); 562 } 563 #endif /* !__sparc */ 564 565 /* 566 * Conversions in nexus pagesize units. We don't duplicate the 567 * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI 568 * routines anyway. 569 */ 570 unsigned long 571 ddi_btop(dev_info_t *dip, unsigned long bytes) 572 { 573 unsigned long pages; 574 575 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages); 576 return (pages); 577 } 578 579 unsigned long 580 ddi_btopr(dev_info_t *dip, unsigned long bytes) 581 { 582 unsigned long pages; 583 584 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages); 585 return (pages); 586 } 587 588 unsigned long 589 ddi_ptob(dev_info_t *dip, unsigned long pages) 590 { 591 unsigned long bytes; 592 593 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes); 594 return (bytes); 595 } 596 597 unsigned int 598 ddi_enter_critical(void) 599 { 600 return ((uint_t)spl7()); 601 } 602 603 void 604 ddi_exit_critical(unsigned int spl) 605 { 606 splx((int)spl); 607 } 608 609 /* 610 * Nexus ctlops punter 611 */ 612 613 #if !defined(__sparc) 614 /* 615 * Request bus_ctl parent to handle a bus_ctl request 616 * 617 * (The sparc version is in sparc_ddi.s) 618 */ 619 int 620 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v) 621 { 622 int (*fp)(); 623 624 if (!d || !r) 625 return (DDI_FAILURE); 626 627 if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL) 628 return (DDI_FAILURE); 629 630 fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl; 631 return ((*fp)(d, r, op, a, v)); 632 } 633 634 #endif 635 636 /* 637 * DMA/DVMA setup 638 */ 639 640 #if !defined(__sparc) 641 /* 642 * Request bus_dma_ctl parent to fiddle with a dma request. 643 * 644 * (The sparc version is in sparc_subr.s) 645 */ 646 int 647 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip, 648 ddi_dma_handle_t handle, enum ddi_dma_ctlops request, 649 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags) 650 { 651 int (*fp)(); 652 653 if (dip != ddi_root_node()) 654 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl; 655 fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl; 656 return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags)); 657 } 658 #endif 659 660 /* 661 * For all DMA control functions, call the DMA control 662 * routine and return status. 663 * 664 * Just plain assume that the parent is to be called. 665 * If a nexus driver or a thread outside the framework 666 * of a nexus driver or a leaf driver calls these functions, 667 * it is up to them to deal with the fact that the parent's 668 * bus_dma_ctl function will be the first one called. 669 */ 670 671 #define HD ((ddi_dma_impl_t *)h)->dmai_rdip 672 673 /* 674 * This routine is left in place to satisfy link dependencies 675 * for any 3rd party nexus drivers that rely on it. It is never 676 * called, though. 677 */ 678 /*ARGSUSED*/ 679 int 680 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip, 681 struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep) 682 { 683 return (DDI_FAILURE); 684 } 685 686 #if !defined(__sparc) 687 688 /* 689 * The SPARC versions of these routines are done in assembler to 690 * save register windows, so they're in sparc_subr.s. 691 */ 692 693 int 694 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr, 695 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 696 { 697 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *, 698 int (*)(caddr_t), caddr_t, ddi_dma_handle_t *); 699 700 if (dip != ddi_root_node()) 701 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl; 702 703 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_allochdl; 704 return ((*funcp)(dip, rdip, attr, waitfp, arg, handlep)); 705 } 706 707 int 708 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep) 709 { 710 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t); 711 712 if (dip != ddi_root_node()) 713 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl; 714 715 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_freehdl; 716 return ((*funcp)(dip, rdip, handlep)); 717 } 718 719 int 720 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 721 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 722 ddi_dma_cookie_t *cp, uint_t *ccountp) 723 { 724 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, 725 struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *); 726 727 if (dip != ddi_root_node()) 728 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl; 729 730 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_bindhdl; 731 return ((*funcp)(dip, rdip, handle, dmareq, cp, ccountp)); 732 } 733 734 int 735 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 736 ddi_dma_handle_t handle) 737 { 738 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t); 739 740 if (dip != ddi_root_node()) 741 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl; 742 743 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl; 744 return ((*funcp)(dip, rdip, handle)); 745 } 746 747 748 int 749 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip, 750 ddi_dma_handle_t handle, off_t off, size_t len, 751 uint_t cache_flags) 752 { 753 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, 754 off_t, size_t, uint_t); 755 756 if (dip != ddi_root_node()) 757 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush; 758 759 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush; 760 return ((*funcp)(dip, rdip, handle, off, len, cache_flags)); 761 } 762 763 int 764 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip, 765 ddi_dma_handle_t handle, uint_t win, off_t *offp, 766 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 767 { 768 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, 769 uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *); 770 771 if (dip != ddi_root_node()) 772 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win; 773 774 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_win; 775 return ((*funcp)(dip, rdip, handle, win, offp, lenp, 776 cookiep, ccountp)); 777 } 778 779 int 780 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom) 781 { 782 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h; 783 dev_info_t *dip, *rdip; 784 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t, 785 size_t, uint_t); 786 787 /* 788 * the DMA nexus driver will set DMP_NOSYNC if the 789 * platform does not require any sync operation. For 790 * example if the memory is uncached or consistent 791 * and without any I/O write buffers involved. 792 */ 793 if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC) 794 return (DDI_SUCCESS); 795 796 dip = rdip = hp->dmai_rdip; 797 if (dip != ddi_root_node()) 798 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush; 799 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush; 800 return ((*funcp)(dip, rdip, h, o, l, whom)); 801 } 802 803 int 804 ddi_dma_unbind_handle(ddi_dma_handle_t h) 805 { 806 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h; 807 dev_info_t *dip, *rdip; 808 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t); 809 810 dip = rdip = hp->dmai_rdip; 811 if (dip != ddi_root_node()) 812 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl; 813 funcp = DEVI(rdip)->devi_bus_dma_unbindfunc; 814 return ((*funcp)(dip, rdip, h)); 815 } 816 817 #endif /* !__sparc */ 818 819 /* 820 * DMA burst sizes, and transfer minimums 821 */ 822 823 int 824 ddi_dma_burstsizes(ddi_dma_handle_t handle) 825 { 826 ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle; 827 828 if (!dimp) 829 return (0); 830 else 831 return (dimp->dmai_burstsizes); 832 } 833 834 /* 835 * Given two DMA attribute structures, apply the attributes 836 * of one to the other, following the rules of attributes 837 * and the wishes of the caller. 838 * 839 * The rules of DMA attribute structures are that you cannot 840 * make things *less* restrictive as you apply one set 841 * of attributes to another. 842 * 843 */ 844 void 845 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod) 846 { 847 attr->dma_attr_addr_lo = 848 MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo); 849 attr->dma_attr_addr_hi = 850 MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi); 851 attr->dma_attr_count_max = 852 MIN(attr->dma_attr_count_max, mod->dma_attr_count_max); 853 attr->dma_attr_align = 854 MAX(attr->dma_attr_align, mod->dma_attr_align); 855 attr->dma_attr_burstsizes = 856 (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes); 857 attr->dma_attr_minxfer = 858 maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer); 859 attr->dma_attr_maxxfer = 860 MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer); 861 attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg); 862 attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen, 863 (uint_t)mod->dma_attr_sgllen); 864 attr->dma_attr_granular = 865 MAX(attr->dma_attr_granular, mod->dma_attr_granular); 866 } 867 868 /* 869 * mmap/segmap interface: 870 */ 871 872 /* 873 * ddi_segmap: setup the default segment driver. Calls the drivers 874 * XXmmap routine to validate the range to be mapped. 875 * Return ENXIO of the range is not valid. Create 876 * a seg_dev segment that contains all of the 877 * necessary information and will reference the 878 * default segment driver routines. It returns zero 879 * on success or non-zero on failure. 880 */ 881 int 882 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len, 883 uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp) 884 { 885 extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *, 886 off_t, uint_t, uint_t, uint_t, struct cred *); 887 888 return (spec_segmap(dev, offset, asp, addrp, len, 889 prot, maxprot, flags, credp)); 890 } 891 892 /* 893 * ddi_map_fault: Resolve mappings at fault time. Used by segment 894 * drivers. Allows each successive parent to resolve 895 * address translations and add its mappings to the 896 * mapping list supplied in the page structure. It 897 * returns zero on success or non-zero on failure. 898 */ 899 900 int 901 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg, 902 caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock) 903 { 904 return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock)); 905 } 906 907 /* 908 * ddi_device_mapping_check: Called from ddi_segmap_setup. 909 * Invokes platform specific DDI to determine whether attributes specified 910 * in attr(9s) are valid for the region of memory that will be made 911 * available for direct access to user process via the mmap(2) system call. 912 */ 913 int 914 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp, 915 uint_t rnumber, uint_t *hat_flags) 916 { 917 ddi_acc_handle_t handle; 918 ddi_map_req_t mr; 919 ddi_acc_hdl_t *hp; 920 int result; 921 dev_info_t *dip; 922 923 /* 924 * we use e_ddi_hold_devi_by_dev to search for the devi. We 925 * release it immediately since it should already be held by 926 * a devfs vnode. 927 */ 928 if ((dip = 929 e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL) 930 return (-1); 931 ddi_release_devi(dip); /* for e_ddi_hold_devi_by_dev() */ 932 933 /* 934 * Allocate and initialize the common elements of data 935 * access handle. 936 */ 937 handle = impl_acc_hdl_alloc(KM_SLEEP, NULL); 938 if (handle == NULL) 939 return (-1); 940 941 hp = impl_acc_hdl_get(handle); 942 hp->ah_vers = VERS_ACCHDL; 943 hp->ah_dip = dip; 944 hp->ah_rnumber = rnumber; 945 hp->ah_offset = 0; 946 hp->ah_len = 0; 947 hp->ah_acc = *accattrp; 948 949 /* 950 * Set up the mapping request and call to parent. 951 */ 952 mr.map_op = DDI_MO_MAP_HANDLE; 953 mr.map_type = DDI_MT_RNUMBER; 954 mr.map_obj.rnumber = rnumber; 955 mr.map_prot = PROT_READ | PROT_WRITE; 956 mr.map_flags = DDI_MF_KERNEL_MAPPING; 957 mr.map_handlep = hp; 958 mr.map_vers = DDI_MAP_VERSION; 959 result = ddi_map(dip, &mr, 0, 0, NULL); 960 961 /* 962 * Region must be mappable, pick up flags from the framework. 963 */ 964 *hat_flags = hp->ah_hat_flags; 965 966 impl_acc_hdl_free(handle); 967 968 /* 969 * check for end result. 970 */ 971 if (result != DDI_SUCCESS) 972 return (-1); 973 return (0); 974 } 975 976 977 /* 978 * Property functions: See also, ddipropdefs.h. 979 * 980 * These functions are the framework for the property functions, 981 * i.e. they support software defined properties. All implementation 982 * specific property handling (i.e.: self-identifying devices and 983 * PROM defined properties are handled in the implementation specific 984 * functions (defined in ddi_implfuncs.h). 985 */ 986 987 /* 988 * nopropop: Shouldn't be called, right? 989 */ 990 int 991 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 992 char *name, caddr_t valuep, int *lengthp) 993 { 994 _NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp)) 995 return (DDI_PROP_NOT_FOUND); 996 } 997 998 #ifdef DDI_PROP_DEBUG 999 int ddi_prop_debug_flag = 0; 1000 1001 int 1002 ddi_prop_debug(int enable) 1003 { 1004 int prev = ddi_prop_debug_flag; 1005 1006 if ((enable != 0) || (prev != 0)) 1007 printf("ddi_prop_debug: debugging %s\n", 1008 enable ? "enabled" : "disabled"); 1009 ddi_prop_debug_flag = enable; 1010 return (prev); 1011 } 1012 1013 #endif /* DDI_PROP_DEBUG */ 1014 1015 /* 1016 * Search a property list for a match, if found return pointer 1017 * to matching prop struct, else return NULL. 1018 */ 1019 1020 ddi_prop_t * 1021 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head) 1022 { 1023 ddi_prop_t *propp; 1024 1025 /* 1026 * find the property in child's devinfo: 1027 * Search order defined by this search function is first matching 1028 * property with input dev == DDI_DEV_T_ANY matching any dev or 1029 * dev == propp->prop_dev, name == propp->name, and the correct 1030 * data type as specified in the flags. If a DDI_DEV_T_NONE dev 1031 * value made it this far then it implies a DDI_DEV_T_ANY search. 1032 */ 1033 if (dev == DDI_DEV_T_NONE) 1034 dev = DDI_DEV_T_ANY; 1035 1036 for (propp = *list_head; propp != NULL; propp = propp->prop_next) { 1037 1038 if (!DDI_STRSAME(propp->prop_name, name)) 1039 continue; 1040 1041 if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev)) 1042 continue; 1043 1044 if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0) 1045 continue; 1046 1047 return (propp); 1048 } 1049 1050 return ((ddi_prop_t *)0); 1051 } 1052 1053 /* 1054 * Search for property within devnames structures 1055 */ 1056 ddi_prop_t * 1057 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags) 1058 { 1059 major_t major; 1060 struct devnames *dnp; 1061 ddi_prop_t *propp; 1062 1063 /* 1064 * Valid dev_t value is needed to index into the 1065 * correct devnames entry, therefore a dev_t 1066 * value of DDI_DEV_T_ANY is not appropriate. 1067 */ 1068 ASSERT(dev != DDI_DEV_T_ANY); 1069 if (dev == DDI_DEV_T_ANY) { 1070 return ((ddi_prop_t *)0); 1071 } 1072 1073 major = getmajor(dev); 1074 dnp = &(devnamesp[major]); 1075 1076 if (dnp->dn_global_prop_ptr == NULL) 1077 return ((ddi_prop_t *)0); 1078 1079 LOCK_DEV_OPS(&dnp->dn_lock); 1080 1081 for (propp = dnp->dn_global_prop_ptr->prop_list; 1082 propp != NULL; 1083 propp = (ddi_prop_t *)propp->prop_next) { 1084 1085 if (!DDI_STRSAME(propp->prop_name, name)) 1086 continue; 1087 1088 if ((!(flags & DDI_PROP_ROOTNEX_GLOBAL)) && 1089 (!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev)) 1090 continue; 1091 1092 if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0) 1093 continue; 1094 1095 /* Property found, return it */ 1096 UNLOCK_DEV_OPS(&dnp->dn_lock); 1097 return (propp); 1098 } 1099 1100 UNLOCK_DEV_OPS(&dnp->dn_lock); 1101 return ((ddi_prop_t *)0); 1102 } 1103 1104 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>"; 1105 1106 /* 1107 * ddi_prop_search_global: 1108 * Search the global property list within devnames 1109 * for the named property. Return the encoded value. 1110 */ 1111 static int 1112 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name, 1113 void *valuep, uint_t *lengthp) 1114 { 1115 ddi_prop_t *propp; 1116 caddr_t buffer; 1117 1118 propp = i_ddi_search_global_prop(dev, name, flags); 1119 1120 /* Property NOT found, bail */ 1121 if (propp == (ddi_prop_t *)0) 1122 return (DDI_PROP_NOT_FOUND); 1123 1124 if (propp->prop_flags & DDI_PROP_UNDEF_IT) 1125 return (DDI_PROP_UNDEFINED); 1126 1127 if ((buffer = kmem_alloc(propp->prop_len, 1128 (flags & DDI_PROP_CANSLEEP) ? KM_SLEEP : KM_NOSLEEP)) == NULL) { 1129 cmn_err(CE_CONT, prop_no_mem_msg, name); 1130 return (DDI_PROP_NO_MEMORY); 1131 } 1132 1133 /* 1134 * Return the encoded data 1135 */ 1136 *(caddr_t *)valuep = buffer; 1137 *lengthp = propp->prop_len; 1138 bcopy(propp->prop_val, buffer, propp->prop_len); 1139 1140 return (DDI_PROP_SUCCESS); 1141 } 1142 1143 /* 1144 * ddi_prop_search_common: Lookup and return the encoded value 1145 */ 1146 int 1147 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1148 uint_t flags, char *name, void *valuep, uint_t *lengthp) 1149 { 1150 ddi_prop_t *propp; 1151 int i; 1152 caddr_t buffer = NULL; 1153 caddr_t prealloc = NULL; 1154 int plength = 0; 1155 dev_info_t *pdip; 1156 int (*bop)(); 1157 1158 /*CONSTANTCONDITION*/ 1159 while (1) { 1160 1161 mutex_enter(&(DEVI(dip)->devi_lock)); 1162 1163 1164 /* 1165 * find the property in child's devinfo: 1166 * Search order is: 1167 * 1. driver defined properties 1168 * 2. system defined properties 1169 * 3. driver global properties 1170 * 4. boot defined properties 1171 */ 1172 1173 propp = i_ddi_prop_search(dev, name, flags, 1174 &(DEVI(dip)->devi_drv_prop_ptr)); 1175 if (propp == NULL) { 1176 propp = i_ddi_prop_search(dev, name, flags, 1177 &(DEVI(dip)->devi_sys_prop_ptr)); 1178 } 1179 if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) { 1180 propp = i_ddi_prop_search(dev, name, flags, 1181 &DEVI(dip)->devi_global_prop_list->prop_list); 1182 } 1183 1184 if (propp == NULL) { 1185 propp = i_ddi_prop_search(dev, name, flags, 1186 &(DEVI(dip)->devi_hw_prop_ptr)); 1187 } 1188 1189 /* 1190 * Software property found? 1191 */ 1192 if (propp != (ddi_prop_t *)0) { 1193 1194 /* 1195 * If explicit undefine, return now. 1196 */ 1197 if (propp->prop_flags & DDI_PROP_UNDEF_IT) { 1198 mutex_exit(&(DEVI(dip)->devi_lock)); 1199 if (prealloc) 1200 kmem_free(prealloc, plength); 1201 return (DDI_PROP_UNDEFINED); 1202 } 1203 1204 /* 1205 * If we only want to know if it exists, return now 1206 */ 1207 if (prop_op == PROP_EXISTS) { 1208 mutex_exit(&(DEVI(dip)->devi_lock)); 1209 ASSERT(prealloc == NULL); 1210 return (DDI_PROP_SUCCESS); 1211 } 1212 1213 /* 1214 * If length only request or prop length == 0, 1215 * service request and return now. 1216 */ 1217 if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) { 1218 *lengthp = propp->prop_len; 1219 1220 /* 1221 * if prop_op is PROP_LEN_AND_VAL_ALLOC 1222 * that means prop_len is 0, so set valuep 1223 * also to NULL 1224 */ 1225 if (prop_op == PROP_LEN_AND_VAL_ALLOC) 1226 *(caddr_t *)valuep = NULL; 1227 1228 mutex_exit(&(DEVI(dip)->devi_lock)); 1229 if (prealloc) 1230 kmem_free(prealloc, plength); 1231 return (DDI_PROP_SUCCESS); 1232 } 1233 1234 /* 1235 * If LEN_AND_VAL_ALLOC and the request can sleep, 1236 * drop the mutex, allocate the buffer, and go 1237 * through the loop again. If we already allocated 1238 * the buffer, and the size of the property changed, 1239 * keep trying... 1240 */ 1241 if ((prop_op == PROP_LEN_AND_VAL_ALLOC) && 1242 (flags & DDI_PROP_CANSLEEP)) { 1243 if (prealloc && (propp->prop_len != plength)) { 1244 kmem_free(prealloc, plength); 1245 prealloc = NULL; 1246 } 1247 if (prealloc == NULL) { 1248 plength = propp->prop_len; 1249 mutex_exit(&(DEVI(dip)->devi_lock)); 1250 prealloc = kmem_alloc(plength, 1251 KM_SLEEP); 1252 continue; 1253 } 1254 } 1255 1256 /* 1257 * Allocate buffer, if required. Either way, 1258 * set `buffer' variable. 1259 */ 1260 i = *lengthp; /* Get callers length */ 1261 *lengthp = propp->prop_len; /* Set callers length */ 1262 1263 switch (prop_op) { 1264 1265 case PROP_LEN_AND_VAL_ALLOC: 1266 1267 if (prealloc == NULL) { 1268 buffer = kmem_alloc(propp->prop_len, 1269 KM_NOSLEEP); 1270 } else { 1271 buffer = prealloc; 1272 } 1273 1274 if (buffer == NULL) { 1275 mutex_exit(&(DEVI(dip)->devi_lock)); 1276 cmn_err(CE_CONT, prop_no_mem_msg, name); 1277 return (DDI_PROP_NO_MEMORY); 1278 } 1279 /* Set callers buf ptr */ 1280 *(caddr_t *)valuep = buffer; 1281 break; 1282 1283 case PROP_LEN_AND_VAL_BUF: 1284 1285 if (propp->prop_len > (i)) { 1286 mutex_exit(&(DEVI(dip)->devi_lock)); 1287 return (DDI_PROP_BUF_TOO_SMALL); 1288 } 1289 1290 buffer = valuep; /* Get callers buf ptr */ 1291 break; 1292 1293 default: 1294 break; 1295 } 1296 1297 /* 1298 * Do the copy. 1299 */ 1300 if (buffer != NULL) 1301 bcopy(propp->prop_val, buffer, propp->prop_len); 1302 mutex_exit(&(DEVI(dip)->devi_lock)); 1303 return (DDI_PROP_SUCCESS); 1304 } 1305 1306 mutex_exit(&(DEVI(dip)->devi_lock)); 1307 if (prealloc) 1308 kmem_free(prealloc, plength); 1309 prealloc = NULL; 1310 1311 /* 1312 * Prop not found, call parent bus_ops to deal with possible 1313 * h/w layer (possible PROM defined props, etc.) and to 1314 * possibly ascend the hierarchy, if allowed by flags. 1315 */ 1316 pdip = (dev_info_t *)DEVI(dip)->devi_parent; 1317 1318 /* 1319 * One last call for the root driver PROM props? 1320 */ 1321 if (dip == ddi_root_node()) { 1322 return (ddi_bus_prop_op(dev, dip, dip, prop_op, 1323 flags, name, valuep, (int *)lengthp)); 1324 } 1325 1326 /* 1327 * We may have been called to check for properties 1328 * within a single devinfo node that has no parent - 1329 * see make_prop() 1330 */ 1331 if (pdip == NULL) { 1332 ASSERT((flags & 1333 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) == 1334 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)); 1335 return (DDI_PROP_NOT_FOUND); 1336 } 1337 1338 /* 1339 * Instead of recursing, we do iterative calls up the tree. 1340 * As a bit of optimization, skip the bus_op level if the 1341 * node is a s/w node and if the parent's bus_prop_op function 1342 * is `ddi_bus_prop_op', because we know that in this case, 1343 * this function does nothing. 1344 * 1345 * 4225415: If the parent isn't attached, or the child 1346 * hasn't been named by the parent yet, use the default 1347 * ddi_bus_prop_op as a proxy for the parent. This 1348 * allows property lookups in any child/parent state to 1349 * include 'prom' and inherited properties, even when 1350 * there are no drivers attached to the child or parent. 1351 */ 1352 1353 bop = ddi_bus_prop_op; 1354 if (i_ddi_devi_attached(pdip) && 1355 (i_ddi_node_state(dip) >= DS_INITIALIZED)) 1356 bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op; 1357 1358 i = DDI_PROP_NOT_FOUND; 1359 1360 if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) { 1361 i = (*bop)(dev, pdip, dip, prop_op, 1362 flags | DDI_PROP_DONTPASS, 1363 name, valuep, lengthp); 1364 } 1365 1366 if ((flags & DDI_PROP_DONTPASS) || 1367 (i != DDI_PROP_NOT_FOUND)) 1368 return (i); 1369 1370 dip = pdip; 1371 } 1372 /*NOTREACHED*/ 1373 } 1374 1375 1376 /* 1377 * ddi_prop_op: The basic property operator for drivers. 1378 * 1379 * In ddi_prop_op, the type of valuep is interpreted based on prop_op: 1380 * 1381 * prop_op valuep 1382 * ------ ------ 1383 * 1384 * PROP_LEN <unused> 1385 * 1386 * PROP_LEN_AND_VAL_BUF Pointer to callers buffer 1387 * 1388 * PROP_LEN_AND_VAL_ALLOC Address of callers pointer (will be set to 1389 * address of allocated buffer, if successful) 1390 */ 1391 int 1392 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 1393 char *name, caddr_t valuep, int *lengthp) 1394 { 1395 int i; 1396 1397 ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0); 1398 1399 /* 1400 * If this was originally an LDI prop lookup then we bail here. 1401 * The reason is that the LDI property lookup interfaces first call 1402 * a drivers prop_op() entry point to allow it to override 1403 * properties. But if we've made it here, then the driver hasn't 1404 * overriden any properties. We don't want to continue with the 1405 * property search here because we don't have any type inforamtion. 1406 * When we return failure, the LDI interfaces will then proceed to 1407 * call the typed property interfaces to look up the property. 1408 */ 1409 if (mod_flags & DDI_PROP_DYNAMIC) 1410 return (DDI_PROP_NOT_FOUND); 1411 1412 /* 1413 * check for pre-typed property consumer asking for typed property: 1414 * see e_ddi_getprop_int64. 1415 */ 1416 if (mod_flags & DDI_PROP_CONSUMER_TYPED) 1417 mod_flags |= DDI_PROP_TYPE_INT64; 1418 mod_flags |= DDI_PROP_TYPE_ANY; 1419 1420 i = ddi_prop_search_common(dev, dip, prop_op, 1421 mod_flags, name, valuep, (uint_t *)lengthp); 1422 if (i == DDI_PROP_FOUND_1275) 1423 return (DDI_PROP_SUCCESS); 1424 return (i); 1425 } 1426 1427 /* 1428 * ddi_prop_op_nblocks_blksize: The basic property operator for drivers that 1429 * maintain size in number of blksize blocks. Provides a dynamic property 1430 * implementation for size oriented properties based on nblocks64 and blksize 1431 * values passed in by the driver. Fallback to ddi_prop_op if the nblocks64 1432 * is too large. This interface should not be used with a nblocks64 that 1433 * represents the driver's idea of how to represent unknown, if nblocks is 1434 * unknown use ddi_prop_op. 1435 */ 1436 int 1437 ddi_prop_op_nblocks_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1438 int mod_flags, char *name, caddr_t valuep, int *lengthp, 1439 uint64_t nblocks64, uint_t blksize) 1440 { 1441 uint64_t size64; 1442 int blkshift; 1443 1444 /* convert block size to shift value */ 1445 ASSERT(BIT_ONLYONESET(blksize)); 1446 blkshift = highbit(blksize) - 1; 1447 1448 /* 1449 * There is no point in supporting nblocks64 values that don't have 1450 * an accurate uint64_t byte count representation. 1451 */ 1452 if (nblocks64 >= (UINT64_MAX >> blkshift)) 1453 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 1454 name, valuep, lengthp)); 1455 1456 size64 = nblocks64 << blkshift; 1457 return (ddi_prop_op_size_blksize(dev, dip, prop_op, mod_flags, 1458 name, valuep, lengthp, size64, blksize)); 1459 } 1460 1461 /* 1462 * ddi_prop_op_nblocks: ddi_prop_op_nblocks_blksize with DEV_BSIZE blksize. 1463 */ 1464 int 1465 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1466 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64) 1467 { 1468 return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op, 1469 mod_flags, name, valuep, lengthp, nblocks64, DEV_BSIZE)); 1470 } 1471 1472 /* 1473 * ddi_prop_op_size_blksize: The basic property operator for block drivers that 1474 * maintain size in bytes. Provides a of dynamic property implementation for 1475 * size oriented properties based on size64 value and blksize passed in by the 1476 * driver. Fallback to ddi_prop_op if the size64 is too large. This interface 1477 * should not be used with a size64 that represents the driver's idea of how 1478 * to represent unknown, if size is unknown use ddi_prop_op. 1479 * 1480 * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned 1481 * integers. While the most likely interface to request them ([bc]devi_size) 1482 * is declared int (signed) there is no enforcement of this, which means we 1483 * can't enforce limitations here without risking regression. 1484 */ 1485 int 1486 ddi_prop_op_size_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1487 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64, 1488 uint_t blksize) 1489 { 1490 uint64_t nblocks64; 1491 int callers_length; 1492 caddr_t buffer; 1493 int blkshift; 1494 1495 /* 1496 * This is a kludge to support capture of size(9P) pure dynamic 1497 * properties in snapshots for non-cmlb code (without exposing 1498 * i_ddi_prop_dyn changes). When everyone uses cmlb, this code 1499 * should be removed. 1500 */ 1501 if (i_ddi_prop_dyn_driver_get(dip) == NULL) { 1502 static i_ddi_prop_dyn_t prop_dyn_size[] = { 1503 {"Size", DDI_PROP_TYPE_INT64, S_IFCHR}, 1504 {"Nblocks", DDI_PROP_TYPE_INT64, S_IFBLK}, 1505 {NULL} 1506 }; 1507 i_ddi_prop_dyn_driver_set(dip, prop_dyn_size); 1508 } 1509 1510 /* convert block size to shift value */ 1511 ASSERT(BIT_ONLYONESET(blksize)); 1512 blkshift = highbit(blksize) - 1; 1513 1514 /* compute DEV_BSIZE nblocks value */ 1515 nblocks64 = size64 >> blkshift; 1516 1517 /* get callers length, establish length of our dynamic properties */ 1518 callers_length = *lengthp; 1519 1520 if (strcmp(name, "Nblocks") == 0) 1521 *lengthp = sizeof (uint64_t); 1522 else if (strcmp(name, "Size") == 0) 1523 *lengthp = sizeof (uint64_t); 1524 else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX)) 1525 *lengthp = sizeof (uint32_t); 1526 else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX)) 1527 *lengthp = sizeof (uint32_t); 1528 else if ((strcmp(name, "blksize") == 0) && (blksize < UINT_MAX)) 1529 *lengthp = sizeof (uint32_t); 1530 else { 1531 /* fallback to ddi_prop_op */ 1532 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 1533 name, valuep, lengthp)); 1534 } 1535 1536 /* service request for the length of the property */ 1537 if (prop_op == PROP_LEN) 1538 return (DDI_PROP_SUCCESS); 1539 1540 switch (prop_op) { 1541 case PROP_LEN_AND_VAL_ALLOC: 1542 if ((buffer = kmem_alloc(*lengthp, 1543 (mod_flags & DDI_PROP_CANSLEEP) ? 1544 KM_SLEEP : KM_NOSLEEP)) == NULL) 1545 return (DDI_PROP_NO_MEMORY); 1546 1547 *(caddr_t *)valuep = buffer; /* set callers buf ptr */ 1548 break; 1549 1550 case PROP_LEN_AND_VAL_BUF: 1551 /* the length of the property and the request must match */ 1552 if (callers_length != *lengthp) 1553 return (DDI_PROP_INVAL_ARG); 1554 1555 buffer = valuep; /* get callers buf ptr */ 1556 break; 1557 1558 default: 1559 return (DDI_PROP_INVAL_ARG); 1560 } 1561 1562 /* transfer the value into the buffer */ 1563 if (strcmp(name, "Nblocks") == 0) 1564 *((uint64_t *)buffer) = nblocks64; 1565 else if (strcmp(name, "Size") == 0) 1566 *((uint64_t *)buffer) = size64; 1567 else if (strcmp(name, "nblocks") == 0) 1568 *((uint32_t *)buffer) = (uint32_t)nblocks64; 1569 else if (strcmp(name, "size") == 0) 1570 *((uint32_t *)buffer) = (uint32_t)size64; 1571 else if (strcmp(name, "blksize") == 0) 1572 *((uint32_t *)buffer) = (uint32_t)blksize; 1573 return (DDI_PROP_SUCCESS); 1574 } 1575 1576 /* 1577 * ddi_prop_op_size: ddi_prop_op_size_blksize with DEV_BSIZE block size. 1578 */ 1579 int 1580 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1581 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64) 1582 { 1583 return (ddi_prop_op_size_blksize(dev, dip, prop_op, 1584 mod_flags, name, valuep, lengthp, size64, DEV_BSIZE)); 1585 } 1586 1587 /* 1588 * Variable length props... 1589 */ 1590 1591 /* 1592 * ddi_getlongprop: Get variable length property len+val into a buffer 1593 * allocated by property provider via kmem_alloc. Requester 1594 * is responsible for freeing returned property via kmem_free. 1595 * 1596 * Arguments: 1597 * 1598 * dev_t: Input: dev_t of property. 1599 * dip: Input: dev_info_t pointer of child. 1600 * flags: Input: Possible flag modifiers are: 1601 * DDI_PROP_DONTPASS: Don't pass to parent if prop not found. 1602 * DDI_PROP_CANSLEEP: Memory allocation may sleep. 1603 * name: Input: name of property. 1604 * valuep: Output: Addr of callers buffer pointer. 1605 * lengthp:Output: *lengthp will contain prop length on exit. 1606 * 1607 * Possible Returns: 1608 * 1609 * DDI_PROP_SUCCESS: Prop found and returned. 1610 * DDI_PROP_NOT_FOUND: Prop not found 1611 * DDI_PROP_UNDEFINED: Prop explicitly undefined. 1612 * DDI_PROP_NO_MEMORY: Prop found, but unable to alloc mem. 1613 */ 1614 1615 int 1616 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags, 1617 char *name, caddr_t valuep, int *lengthp) 1618 { 1619 return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC, 1620 flags, name, valuep, lengthp)); 1621 } 1622 1623 /* 1624 * 1625 * ddi_getlongprop_buf: Get long prop into pre-allocated callers 1626 * buffer. (no memory allocation by provider). 1627 * 1628 * dev_t: Input: dev_t of property. 1629 * dip: Input: dev_info_t pointer of child. 1630 * flags: Input: DDI_PROP_DONTPASS or NULL 1631 * name: Input: name of property 1632 * valuep: Input: ptr to callers buffer. 1633 * lengthp:I/O: ptr to length of callers buffer on entry, 1634 * actual length of property on exit. 1635 * 1636 * Possible returns: 1637 * 1638 * DDI_PROP_SUCCESS Prop found and returned 1639 * DDI_PROP_NOT_FOUND Prop not found 1640 * DDI_PROP_UNDEFINED Prop explicitly undefined. 1641 * DDI_PROP_BUF_TOO_SMALL Prop found, callers buf too small, 1642 * no value returned, but actual prop 1643 * length returned in *lengthp 1644 * 1645 */ 1646 1647 int 1648 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags, 1649 char *name, caddr_t valuep, int *lengthp) 1650 { 1651 return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF, 1652 flags, name, valuep, lengthp)); 1653 } 1654 1655 /* 1656 * Integer/boolean sized props. 1657 * 1658 * Call is value only... returns found boolean or int sized prop value or 1659 * defvalue if prop not found or is wrong length or is explicitly undefined. 1660 * Only flag is DDI_PROP_DONTPASS... 1661 * 1662 * By convention, this interface returns boolean (0) sized properties 1663 * as value (int)1. 1664 * 1665 * This never returns an error, if property not found or specifically 1666 * undefined, the input `defvalue' is returned. 1667 */ 1668 1669 int 1670 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue) 1671 { 1672 int propvalue = defvalue; 1673 int proplength = sizeof (int); 1674 int error; 1675 1676 error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF, 1677 flags, name, (caddr_t)&propvalue, &proplength); 1678 1679 if ((error == DDI_PROP_SUCCESS) && (proplength == 0)) 1680 propvalue = 1; 1681 1682 return (propvalue); 1683 } 1684 1685 /* 1686 * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS 1687 * if returns DDI_PROP_SUCCESS, length returned in *lengthp. 1688 */ 1689 1690 int 1691 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp) 1692 { 1693 return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp)); 1694 } 1695 1696 /* 1697 * Allocate a struct prop_driver_data, along with 'size' bytes 1698 * for decoded property data. This structure is freed by 1699 * calling ddi_prop_free(9F). 1700 */ 1701 static void * 1702 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *)) 1703 { 1704 struct prop_driver_data *pdd; 1705 1706 /* 1707 * Allocate a structure with enough memory to store the decoded data. 1708 */ 1709 pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP); 1710 pdd->pdd_size = (sizeof (struct prop_driver_data) + size); 1711 pdd->pdd_prop_free = prop_free; 1712 1713 /* 1714 * Return a pointer to the location to put the decoded data. 1715 */ 1716 return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data))); 1717 } 1718 1719 /* 1720 * Allocated the memory needed to store the encoded data in the property 1721 * handle. 1722 */ 1723 static int 1724 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size) 1725 { 1726 /* 1727 * If size is zero, then set data to NULL and size to 0. This 1728 * is a boolean property. 1729 */ 1730 if (size == 0) { 1731 ph->ph_size = 0; 1732 ph->ph_data = NULL; 1733 ph->ph_cur_pos = NULL; 1734 ph->ph_save_pos = NULL; 1735 } else { 1736 if (ph->ph_flags == DDI_PROP_DONTSLEEP) { 1737 ph->ph_data = kmem_zalloc(size, KM_NOSLEEP); 1738 if (ph->ph_data == NULL) 1739 return (DDI_PROP_NO_MEMORY); 1740 } else 1741 ph->ph_data = kmem_zalloc(size, KM_SLEEP); 1742 ph->ph_size = size; 1743 ph->ph_cur_pos = ph->ph_data; 1744 ph->ph_save_pos = ph->ph_data; 1745 } 1746 return (DDI_PROP_SUCCESS); 1747 } 1748 1749 /* 1750 * Free the space allocated by the lookup routines. Each lookup routine 1751 * returns a pointer to the decoded data to the driver. The driver then 1752 * passes this pointer back to us. This data actually lives in a struct 1753 * prop_driver_data. We use negative indexing to find the beginning of 1754 * the structure and then free the entire structure using the size and 1755 * the free routine stored in the structure. 1756 */ 1757 void 1758 ddi_prop_free(void *datap) 1759 { 1760 struct prop_driver_data *pdd; 1761 1762 /* 1763 * Get the structure 1764 */ 1765 pdd = (struct prop_driver_data *) 1766 ((caddr_t)datap - sizeof (struct prop_driver_data)); 1767 /* 1768 * Call the free routine to free it 1769 */ 1770 (*pdd->pdd_prop_free)(pdd); 1771 } 1772 1773 /* 1774 * Free the data associated with an array of ints, 1775 * allocated with ddi_prop_decode_alloc(). 1776 */ 1777 static void 1778 ddi_prop_free_ints(struct prop_driver_data *pdd) 1779 { 1780 kmem_free(pdd, pdd->pdd_size); 1781 } 1782 1783 /* 1784 * Free a single string property or a single string contained within 1785 * the argv style return value of an array of strings. 1786 */ 1787 static void 1788 ddi_prop_free_string(struct prop_driver_data *pdd) 1789 { 1790 kmem_free(pdd, pdd->pdd_size); 1791 1792 } 1793 1794 /* 1795 * Free an array of strings. 1796 */ 1797 static void 1798 ddi_prop_free_strings(struct prop_driver_data *pdd) 1799 { 1800 kmem_free(pdd, pdd->pdd_size); 1801 } 1802 1803 /* 1804 * Free the data associated with an array of bytes. 1805 */ 1806 static void 1807 ddi_prop_free_bytes(struct prop_driver_data *pdd) 1808 { 1809 kmem_free(pdd, pdd->pdd_size); 1810 } 1811 1812 /* 1813 * Reset the current location pointer in the property handle to the 1814 * beginning of the data. 1815 */ 1816 void 1817 ddi_prop_reset_pos(prop_handle_t *ph) 1818 { 1819 ph->ph_cur_pos = ph->ph_data; 1820 ph->ph_save_pos = ph->ph_data; 1821 } 1822 1823 /* 1824 * Restore the current location pointer in the property handle to the 1825 * saved position. 1826 */ 1827 void 1828 ddi_prop_save_pos(prop_handle_t *ph) 1829 { 1830 ph->ph_save_pos = ph->ph_cur_pos; 1831 } 1832 1833 /* 1834 * Save the location that the current location pointer is pointing to.. 1835 */ 1836 void 1837 ddi_prop_restore_pos(prop_handle_t *ph) 1838 { 1839 ph->ph_cur_pos = ph->ph_save_pos; 1840 } 1841 1842 /* 1843 * Property encode/decode functions 1844 */ 1845 1846 /* 1847 * Decode a single integer property 1848 */ 1849 static int 1850 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements) 1851 { 1852 int i; 1853 int tmp; 1854 1855 /* 1856 * If there is nothing to decode return an error 1857 */ 1858 if (ph->ph_size == 0) 1859 return (DDI_PROP_END_OF_DATA); 1860 1861 /* 1862 * Decode the property as a single integer and return it 1863 * in data if we were able to decode it. 1864 */ 1865 i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp); 1866 if (i < DDI_PROP_RESULT_OK) { 1867 switch (i) { 1868 case DDI_PROP_RESULT_EOF: 1869 return (DDI_PROP_END_OF_DATA); 1870 1871 case DDI_PROP_RESULT_ERROR: 1872 return (DDI_PROP_CANNOT_DECODE); 1873 } 1874 } 1875 1876 *(int *)data = tmp; 1877 *nelements = 1; 1878 return (DDI_PROP_SUCCESS); 1879 } 1880 1881 /* 1882 * Decode a single 64 bit integer property 1883 */ 1884 static int 1885 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements) 1886 { 1887 int i; 1888 int64_t tmp; 1889 1890 /* 1891 * If there is nothing to decode return an error 1892 */ 1893 if (ph->ph_size == 0) 1894 return (DDI_PROP_END_OF_DATA); 1895 1896 /* 1897 * Decode the property as a single integer and return it 1898 * in data if we were able to decode it. 1899 */ 1900 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp); 1901 if (i < DDI_PROP_RESULT_OK) { 1902 switch (i) { 1903 case DDI_PROP_RESULT_EOF: 1904 return (DDI_PROP_END_OF_DATA); 1905 1906 case DDI_PROP_RESULT_ERROR: 1907 return (DDI_PROP_CANNOT_DECODE); 1908 } 1909 } 1910 1911 *(int64_t *)data = tmp; 1912 *nelements = 1; 1913 return (DDI_PROP_SUCCESS); 1914 } 1915 1916 /* 1917 * Decode an array of integers property 1918 */ 1919 static int 1920 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements) 1921 { 1922 int i; 1923 int cnt = 0; 1924 int *tmp; 1925 int *intp; 1926 int n; 1927 1928 /* 1929 * Figure out how many array elements there are by going through the 1930 * data without decoding it first and counting. 1931 */ 1932 for (;;) { 1933 i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL); 1934 if (i < 0) 1935 break; 1936 cnt++; 1937 } 1938 1939 /* 1940 * If there are no elements return an error 1941 */ 1942 if (cnt == 0) 1943 return (DDI_PROP_END_OF_DATA); 1944 1945 /* 1946 * If we cannot skip through the data, we cannot decode it 1947 */ 1948 if (i == DDI_PROP_RESULT_ERROR) 1949 return (DDI_PROP_CANNOT_DECODE); 1950 1951 /* 1952 * Reset the data pointer to the beginning of the encoded data 1953 */ 1954 ddi_prop_reset_pos(ph); 1955 1956 /* 1957 * Allocated memory to store the decoded value in. 1958 */ 1959 intp = ddi_prop_decode_alloc((cnt * sizeof (int)), 1960 ddi_prop_free_ints); 1961 1962 /* 1963 * Decode each element and place it in the space we just allocated 1964 */ 1965 tmp = intp; 1966 for (n = 0; n < cnt; n++, tmp++) { 1967 i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp); 1968 if (i < DDI_PROP_RESULT_OK) { 1969 /* 1970 * Free the space we just allocated 1971 * and return an error. 1972 */ 1973 ddi_prop_free(intp); 1974 switch (i) { 1975 case DDI_PROP_RESULT_EOF: 1976 return (DDI_PROP_END_OF_DATA); 1977 1978 case DDI_PROP_RESULT_ERROR: 1979 return (DDI_PROP_CANNOT_DECODE); 1980 } 1981 } 1982 } 1983 1984 *nelements = cnt; 1985 *(int **)data = intp; 1986 1987 return (DDI_PROP_SUCCESS); 1988 } 1989 1990 /* 1991 * Decode a 64 bit integer array property 1992 */ 1993 static int 1994 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements) 1995 { 1996 int i; 1997 int n; 1998 int cnt = 0; 1999 int64_t *tmp; 2000 int64_t *intp; 2001 2002 /* 2003 * Count the number of array elements by going 2004 * through the data without decoding it. 2005 */ 2006 for (;;) { 2007 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL); 2008 if (i < 0) 2009 break; 2010 cnt++; 2011 } 2012 2013 /* 2014 * If there are no elements return an error 2015 */ 2016 if (cnt == 0) 2017 return (DDI_PROP_END_OF_DATA); 2018 2019 /* 2020 * If we cannot skip through the data, we cannot decode it 2021 */ 2022 if (i == DDI_PROP_RESULT_ERROR) 2023 return (DDI_PROP_CANNOT_DECODE); 2024 2025 /* 2026 * Reset the data pointer to the beginning of the encoded data 2027 */ 2028 ddi_prop_reset_pos(ph); 2029 2030 /* 2031 * Allocate memory to store the decoded value. 2032 */ 2033 intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)), 2034 ddi_prop_free_ints); 2035 2036 /* 2037 * Decode each element and place it in the space allocated 2038 */ 2039 tmp = intp; 2040 for (n = 0; n < cnt; n++, tmp++) { 2041 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp); 2042 if (i < DDI_PROP_RESULT_OK) { 2043 /* 2044 * Free the space we just allocated 2045 * and return an error. 2046 */ 2047 ddi_prop_free(intp); 2048 switch (i) { 2049 case DDI_PROP_RESULT_EOF: 2050 return (DDI_PROP_END_OF_DATA); 2051 2052 case DDI_PROP_RESULT_ERROR: 2053 return (DDI_PROP_CANNOT_DECODE); 2054 } 2055 } 2056 } 2057 2058 *nelements = cnt; 2059 *(int64_t **)data = intp; 2060 2061 return (DDI_PROP_SUCCESS); 2062 } 2063 2064 /* 2065 * Encode an array of integers property (Can be one element) 2066 */ 2067 int 2068 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements) 2069 { 2070 int i; 2071 int *tmp; 2072 int cnt; 2073 int size; 2074 2075 /* 2076 * If there is no data, we cannot do anything 2077 */ 2078 if (nelements == 0) 2079 return (DDI_PROP_CANNOT_ENCODE); 2080 2081 /* 2082 * Get the size of an encoded int. 2083 */ 2084 size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL); 2085 2086 if (size < DDI_PROP_RESULT_OK) { 2087 switch (size) { 2088 case DDI_PROP_RESULT_EOF: 2089 return (DDI_PROP_END_OF_DATA); 2090 2091 case DDI_PROP_RESULT_ERROR: 2092 return (DDI_PROP_CANNOT_ENCODE); 2093 } 2094 } 2095 2096 /* 2097 * Allocate space in the handle to store the encoded int. 2098 */ 2099 if (ddi_prop_encode_alloc(ph, size * nelements) != 2100 DDI_PROP_SUCCESS) 2101 return (DDI_PROP_NO_MEMORY); 2102 2103 /* 2104 * Encode the array of ints. 2105 */ 2106 tmp = (int *)data; 2107 for (cnt = 0; cnt < nelements; cnt++, tmp++) { 2108 i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp); 2109 if (i < DDI_PROP_RESULT_OK) { 2110 switch (i) { 2111 case DDI_PROP_RESULT_EOF: 2112 return (DDI_PROP_END_OF_DATA); 2113 2114 case DDI_PROP_RESULT_ERROR: 2115 return (DDI_PROP_CANNOT_ENCODE); 2116 } 2117 } 2118 } 2119 2120 return (DDI_PROP_SUCCESS); 2121 } 2122 2123 2124 /* 2125 * Encode a 64 bit integer array property 2126 */ 2127 int 2128 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements) 2129 { 2130 int i; 2131 int cnt; 2132 int size; 2133 int64_t *tmp; 2134 2135 /* 2136 * If there is no data, we cannot do anything 2137 */ 2138 if (nelements == 0) 2139 return (DDI_PROP_CANNOT_ENCODE); 2140 2141 /* 2142 * Get the size of an encoded 64 bit int. 2143 */ 2144 size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL); 2145 2146 if (size < DDI_PROP_RESULT_OK) { 2147 switch (size) { 2148 case DDI_PROP_RESULT_EOF: 2149 return (DDI_PROP_END_OF_DATA); 2150 2151 case DDI_PROP_RESULT_ERROR: 2152 return (DDI_PROP_CANNOT_ENCODE); 2153 } 2154 } 2155 2156 /* 2157 * Allocate space in the handle to store the encoded int. 2158 */ 2159 if (ddi_prop_encode_alloc(ph, size * nelements) != 2160 DDI_PROP_SUCCESS) 2161 return (DDI_PROP_NO_MEMORY); 2162 2163 /* 2164 * Encode the array of ints. 2165 */ 2166 tmp = (int64_t *)data; 2167 for (cnt = 0; cnt < nelements; cnt++, tmp++) { 2168 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp); 2169 if (i < DDI_PROP_RESULT_OK) { 2170 switch (i) { 2171 case DDI_PROP_RESULT_EOF: 2172 return (DDI_PROP_END_OF_DATA); 2173 2174 case DDI_PROP_RESULT_ERROR: 2175 return (DDI_PROP_CANNOT_ENCODE); 2176 } 2177 } 2178 } 2179 2180 return (DDI_PROP_SUCCESS); 2181 } 2182 2183 /* 2184 * Decode a single string property 2185 */ 2186 static int 2187 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements) 2188 { 2189 char *tmp; 2190 char *str; 2191 int i; 2192 int size; 2193 2194 /* 2195 * If there is nothing to decode return an error 2196 */ 2197 if (ph->ph_size == 0) 2198 return (DDI_PROP_END_OF_DATA); 2199 2200 /* 2201 * Get the decoded size of the encoded string. 2202 */ 2203 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL); 2204 if (size < DDI_PROP_RESULT_OK) { 2205 switch (size) { 2206 case DDI_PROP_RESULT_EOF: 2207 return (DDI_PROP_END_OF_DATA); 2208 2209 case DDI_PROP_RESULT_ERROR: 2210 return (DDI_PROP_CANNOT_DECODE); 2211 } 2212 } 2213 2214 /* 2215 * Allocated memory to store the decoded value in. 2216 */ 2217 str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string); 2218 2219 ddi_prop_reset_pos(ph); 2220 2221 /* 2222 * Decode the str and place it in the space we just allocated 2223 */ 2224 tmp = str; 2225 i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp); 2226 if (i < DDI_PROP_RESULT_OK) { 2227 /* 2228 * Free the space we just allocated 2229 * and return an error. 2230 */ 2231 ddi_prop_free(str); 2232 switch (i) { 2233 case DDI_PROP_RESULT_EOF: 2234 return (DDI_PROP_END_OF_DATA); 2235 2236 case DDI_PROP_RESULT_ERROR: 2237 return (DDI_PROP_CANNOT_DECODE); 2238 } 2239 } 2240 2241 *(char **)data = str; 2242 *nelements = 1; 2243 2244 return (DDI_PROP_SUCCESS); 2245 } 2246 2247 /* 2248 * Decode an array of strings. 2249 */ 2250 int 2251 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements) 2252 { 2253 int cnt = 0; 2254 char **strs; 2255 char **tmp; 2256 char *ptr; 2257 int i; 2258 int n; 2259 int size; 2260 size_t nbytes; 2261 2262 /* 2263 * Figure out how many array elements there are by going through the 2264 * data without decoding it first and counting. 2265 */ 2266 for (;;) { 2267 i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL); 2268 if (i < 0) 2269 break; 2270 cnt++; 2271 } 2272 2273 /* 2274 * If there are no elements return an error 2275 */ 2276 if (cnt == 0) 2277 return (DDI_PROP_END_OF_DATA); 2278 2279 /* 2280 * If we cannot skip through the data, we cannot decode it 2281 */ 2282 if (i == DDI_PROP_RESULT_ERROR) 2283 return (DDI_PROP_CANNOT_DECODE); 2284 2285 /* 2286 * Reset the data pointer to the beginning of the encoded data 2287 */ 2288 ddi_prop_reset_pos(ph); 2289 2290 /* 2291 * Figure out how much memory we need for the sum total 2292 */ 2293 nbytes = (cnt + 1) * sizeof (char *); 2294 2295 for (n = 0; n < cnt; n++) { 2296 /* 2297 * Get the decoded size of the current encoded string. 2298 */ 2299 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL); 2300 if (size < DDI_PROP_RESULT_OK) { 2301 switch (size) { 2302 case DDI_PROP_RESULT_EOF: 2303 return (DDI_PROP_END_OF_DATA); 2304 2305 case DDI_PROP_RESULT_ERROR: 2306 return (DDI_PROP_CANNOT_DECODE); 2307 } 2308 } 2309 2310 nbytes += size; 2311 } 2312 2313 /* 2314 * Allocate memory in which to store the decoded strings. 2315 */ 2316 strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings); 2317 2318 /* 2319 * Set up pointers for each string by figuring out yet 2320 * again how long each string is. 2321 */ 2322 ddi_prop_reset_pos(ph); 2323 ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *)); 2324 for (tmp = strs, n = 0; n < cnt; n++, tmp++) { 2325 /* 2326 * Get the decoded size of the current encoded string. 2327 */ 2328 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL); 2329 if (size < DDI_PROP_RESULT_OK) { 2330 ddi_prop_free(strs); 2331 switch (size) { 2332 case DDI_PROP_RESULT_EOF: 2333 return (DDI_PROP_END_OF_DATA); 2334 2335 case DDI_PROP_RESULT_ERROR: 2336 return (DDI_PROP_CANNOT_DECODE); 2337 } 2338 } 2339 2340 *tmp = ptr; 2341 ptr += size; 2342 } 2343 2344 /* 2345 * String array is terminated by a NULL 2346 */ 2347 *tmp = NULL; 2348 2349 /* 2350 * Finally, we can decode each string 2351 */ 2352 ddi_prop_reset_pos(ph); 2353 for (tmp = strs, n = 0; n < cnt; n++, tmp++) { 2354 i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp); 2355 if (i < DDI_PROP_RESULT_OK) { 2356 /* 2357 * Free the space we just allocated 2358 * and return an error 2359 */ 2360 ddi_prop_free(strs); 2361 switch (i) { 2362 case DDI_PROP_RESULT_EOF: 2363 return (DDI_PROP_END_OF_DATA); 2364 2365 case DDI_PROP_RESULT_ERROR: 2366 return (DDI_PROP_CANNOT_DECODE); 2367 } 2368 } 2369 } 2370 2371 *(char ***)data = strs; 2372 *nelements = cnt; 2373 2374 return (DDI_PROP_SUCCESS); 2375 } 2376 2377 /* 2378 * Encode a string. 2379 */ 2380 int 2381 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements) 2382 { 2383 char **tmp; 2384 int size; 2385 int i; 2386 2387 /* 2388 * If there is no data, we cannot do anything 2389 */ 2390 if (nelements == 0) 2391 return (DDI_PROP_CANNOT_ENCODE); 2392 2393 /* 2394 * Get the size of the encoded string. 2395 */ 2396 tmp = (char **)data; 2397 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp); 2398 if (size < DDI_PROP_RESULT_OK) { 2399 switch (size) { 2400 case DDI_PROP_RESULT_EOF: 2401 return (DDI_PROP_END_OF_DATA); 2402 2403 case DDI_PROP_RESULT_ERROR: 2404 return (DDI_PROP_CANNOT_ENCODE); 2405 } 2406 } 2407 2408 /* 2409 * Allocate space in the handle to store the encoded string. 2410 */ 2411 if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS) 2412 return (DDI_PROP_NO_MEMORY); 2413 2414 ddi_prop_reset_pos(ph); 2415 2416 /* 2417 * Encode the string. 2418 */ 2419 tmp = (char **)data; 2420 i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp); 2421 if (i < DDI_PROP_RESULT_OK) { 2422 switch (i) { 2423 case DDI_PROP_RESULT_EOF: 2424 return (DDI_PROP_END_OF_DATA); 2425 2426 case DDI_PROP_RESULT_ERROR: 2427 return (DDI_PROP_CANNOT_ENCODE); 2428 } 2429 } 2430 2431 return (DDI_PROP_SUCCESS); 2432 } 2433 2434 2435 /* 2436 * Encode an array of strings. 2437 */ 2438 int 2439 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements) 2440 { 2441 int cnt = 0; 2442 char **tmp; 2443 int size; 2444 uint_t total_size; 2445 int i; 2446 2447 /* 2448 * If there is no data, we cannot do anything 2449 */ 2450 if (nelements == 0) 2451 return (DDI_PROP_CANNOT_ENCODE); 2452 2453 /* 2454 * Get the total size required to encode all the strings. 2455 */ 2456 total_size = 0; 2457 tmp = (char **)data; 2458 for (cnt = 0; cnt < nelements; cnt++, tmp++) { 2459 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp); 2460 if (size < DDI_PROP_RESULT_OK) { 2461 switch (size) { 2462 case DDI_PROP_RESULT_EOF: 2463 return (DDI_PROP_END_OF_DATA); 2464 2465 case DDI_PROP_RESULT_ERROR: 2466 return (DDI_PROP_CANNOT_ENCODE); 2467 } 2468 } 2469 total_size += (uint_t)size; 2470 } 2471 2472 /* 2473 * Allocate space in the handle to store the encoded strings. 2474 */ 2475 if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS) 2476 return (DDI_PROP_NO_MEMORY); 2477 2478 ddi_prop_reset_pos(ph); 2479 2480 /* 2481 * Encode the array of strings. 2482 */ 2483 tmp = (char **)data; 2484 for (cnt = 0; cnt < nelements; cnt++, tmp++) { 2485 i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp); 2486 if (i < DDI_PROP_RESULT_OK) { 2487 switch (i) { 2488 case DDI_PROP_RESULT_EOF: 2489 return (DDI_PROP_END_OF_DATA); 2490 2491 case DDI_PROP_RESULT_ERROR: 2492 return (DDI_PROP_CANNOT_ENCODE); 2493 } 2494 } 2495 } 2496 2497 return (DDI_PROP_SUCCESS); 2498 } 2499 2500 2501 /* 2502 * Decode an array of bytes. 2503 */ 2504 static int 2505 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements) 2506 { 2507 uchar_t *tmp; 2508 int nbytes; 2509 int i; 2510 2511 /* 2512 * If there are no elements return an error 2513 */ 2514 if (ph->ph_size == 0) 2515 return (DDI_PROP_END_OF_DATA); 2516 2517 /* 2518 * Get the size of the encoded array of bytes. 2519 */ 2520 nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE, 2521 data, ph->ph_size); 2522 if (nbytes < DDI_PROP_RESULT_OK) { 2523 switch (nbytes) { 2524 case DDI_PROP_RESULT_EOF: 2525 return (DDI_PROP_END_OF_DATA); 2526 2527 case DDI_PROP_RESULT_ERROR: 2528 return (DDI_PROP_CANNOT_DECODE); 2529 } 2530 } 2531 2532 /* 2533 * Allocated memory to store the decoded value in. 2534 */ 2535 tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes); 2536 2537 /* 2538 * Decode each element and place it in the space we just allocated 2539 */ 2540 i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes); 2541 if (i < DDI_PROP_RESULT_OK) { 2542 /* 2543 * Free the space we just allocated 2544 * and return an error 2545 */ 2546 ddi_prop_free(tmp); 2547 switch (i) { 2548 case DDI_PROP_RESULT_EOF: 2549 return (DDI_PROP_END_OF_DATA); 2550 2551 case DDI_PROP_RESULT_ERROR: 2552 return (DDI_PROP_CANNOT_DECODE); 2553 } 2554 } 2555 2556 *(uchar_t **)data = tmp; 2557 *nelements = nbytes; 2558 2559 return (DDI_PROP_SUCCESS); 2560 } 2561 2562 /* 2563 * Encode an array of bytes. 2564 */ 2565 int 2566 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements) 2567 { 2568 int size; 2569 int i; 2570 2571 /* 2572 * If there are no elements, then this is a boolean property, 2573 * so just create a property handle with no data and return. 2574 */ 2575 if (nelements == 0) { 2576 (void) ddi_prop_encode_alloc(ph, 0); 2577 return (DDI_PROP_SUCCESS); 2578 } 2579 2580 /* 2581 * Get the size of the encoded array of bytes. 2582 */ 2583 size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data, 2584 nelements); 2585 if (size < DDI_PROP_RESULT_OK) { 2586 switch (size) { 2587 case DDI_PROP_RESULT_EOF: 2588 return (DDI_PROP_END_OF_DATA); 2589 2590 case DDI_PROP_RESULT_ERROR: 2591 return (DDI_PROP_CANNOT_DECODE); 2592 } 2593 } 2594 2595 /* 2596 * Allocate space in the handle to store the encoded bytes. 2597 */ 2598 if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS) 2599 return (DDI_PROP_NO_MEMORY); 2600 2601 /* 2602 * Encode the array of bytes. 2603 */ 2604 i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data, 2605 nelements); 2606 if (i < DDI_PROP_RESULT_OK) { 2607 switch (i) { 2608 case DDI_PROP_RESULT_EOF: 2609 return (DDI_PROP_END_OF_DATA); 2610 2611 case DDI_PROP_RESULT_ERROR: 2612 return (DDI_PROP_CANNOT_ENCODE); 2613 } 2614 } 2615 2616 return (DDI_PROP_SUCCESS); 2617 } 2618 2619 /* 2620 * OBP 1275 integer, string and byte operators. 2621 * 2622 * DDI_PROP_CMD_DECODE: 2623 * 2624 * DDI_PROP_RESULT_ERROR: cannot decode the data 2625 * DDI_PROP_RESULT_EOF: end of data 2626 * DDI_PROP_OK: data was decoded 2627 * 2628 * DDI_PROP_CMD_ENCODE: 2629 * 2630 * DDI_PROP_RESULT_ERROR: cannot encode the data 2631 * DDI_PROP_RESULT_EOF: end of data 2632 * DDI_PROP_OK: data was encoded 2633 * 2634 * DDI_PROP_CMD_SKIP: 2635 * 2636 * DDI_PROP_RESULT_ERROR: cannot skip the data 2637 * DDI_PROP_RESULT_EOF: end of data 2638 * DDI_PROP_OK: data was skipped 2639 * 2640 * DDI_PROP_CMD_GET_ESIZE: 2641 * 2642 * DDI_PROP_RESULT_ERROR: cannot get encoded size 2643 * DDI_PROP_RESULT_EOF: end of data 2644 * > 0: the encoded size 2645 * 2646 * DDI_PROP_CMD_GET_DSIZE: 2647 * 2648 * DDI_PROP_RESULT_ERROR: cannot get decoded size 2649 * DDI_PROP_RESULT_EOF: end of data 2650 * > 0: the decoded size 2651 */ 2652 2653 /* 2654 * OBP 1275 integer operator 2655 * 2656 * OBP properties are a byte stream of data, so integers may not be 2657 * properly aligned. Therefore we need to copy them one byte at a time. 2658 */ 2659 int 2660 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data) 2661 { 2662 int i; 2663 2664 switch (cmd) { 2665 case DDI_PROP_CMD_DECODE: 2666 /* 2667 * Check that there is encoded data 2668 */ 2669 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) 2670 return (DDI_PROP_RESULT_ERROR); 2671 if (ph->ph_flags & PH_FROM_PROM) { 2672 i = MIN(ph->ph_size, PROP_1275_INT_SIZE); 2673 if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data + 2674 ph->ph_size - i)) 2675 return (DDI_PROP_RESULT_ERROR); 2676 } else { 2677 if (ph->ph_size < sizeof (int) || 2678 ((int *)ph->ph_cur_pos > ((int *)ph->ph_data + 2679 ph->ph_size - sizeof (int)))) 2680 return (DDI_PROP_RESULT_ERROR); 2681 } 2682 2683 /* 2684 * Copy the integer, using the implementation-specific 2685 * copy function if the property is coming from the PROM. 2686 */ 2687 if (ph->ph_flags & PH_FROM_PROM) { 2688 *data = impl_ddi_prop_int_from_prom( 2689 (uchar_t *)ph->ph_cur_pos, 2690 (ph->ph_size < PROP_1275_INT_SIZE) ? 2691 ph->ph_size : PROP_1275_INT_SIZE); 2692 } else { 2693 bcopy(ph->ph_cur_pos, data, sizeof (int)); 2694 } 2695 2696 /* 2697 * Move the current location to the start of the next 2698 * bit of undecoded data. 2699 */ 2700 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + 2701 PROP_1275_INT_SIZE; 2702 return (DDI_PROP_RESULT_OK); 2703 2704 case DDI_PROP_CMD_ENCODE: 2705 /* 2706 * Check that there is room to encoded the data 2707 */ 2708 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 2709 ph->ph_size < PROP_1275_INT_SIZE || 2710 ((int *)ph->ph_cur_pos > ((int *)ph->ph_data + 2711 ph->ph_size - sizeof (int)))) 2712 return (DDI_PROP_RESULT_ERROR); 2713 2714 /* 2715 * Encode the integer into the byte stream one byte at a 2716 * time. 2717 */ 2718 bcopy(data, ph->ph_cur_pos, sizeof (int)); 2719 2720 /* 2721 * Move the current location to the start of the next bit of 2722 * space where we can store encoded data. 2723 */ 2724 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE; 2725 return (DDI_PROP_RESULT_OK); 2726 2727 case DDI_PROP_CMD_SKIP: 2728 /* 2729 * Check that there is encoded data 2730 */ 2731 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 2732 ph->ph_size < PROP_1275_INT_SIZE) 2733 return (DDI_PROP_RESULT_ERROR); 2734 2735 2736 if ((caddr_t)ph->ph_cur_pos == 2737 (caddr_t)ph->ph_data + ph->ph_size) { 2738 return (DDI_PROP_RESULT_EOF); 2739 } else if ((caddr_t)ph->ph_cur_pos > 2740 (caddr_t)ph->ph_data + ph->ph_size) { 2741 return (DDI_PROP_RESULT_EOF); 2742 } 2743 2744 /* 2745 * Move the current location to the start of the next bit of 2746 * undecoded data. 2747 */ 2748 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE; 2749 return (DDI_PROP_RESULT_OK); 2750 2751 case DDI_PROP_CMD_GET_ESIZE: 2752 /* 2753 * Return the size of an encoded integer on OBP 2754 */ 2755 return (PROP_1275_INT_SIZE); 2756 2757 case DDI_PROP_CMD_GET_DSIZE: 2758 /* 2759 * Return the size of a decoded integer on the system. 2760 */ 2761 return (sizeof (int)); 2762 2763 default: 2764 #ifdef DEBUG 2765 panic("ddi_prop_1275_int: %x impossible", cmd); 2766 /*NOTREACHED*/ 2767 #else 2768 return (DDI_PROP_RESULT_ERROR); 2769 #endif /* DEBUG */ 2770 } 2771 } 2772 2773 /* 2774 * 64 bit integer operator. 2775 * 2776 * This is an extension, defined by Sun, to the 1275 integer 2777 * operator. This routine handles the encoding/decoding of 2778 * 64 bit integer properties. 2779 */ 2780 int 2781 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data) 2782 { 2783 2784 switch (cmd) { 2785 case DDI_PROP_CMD_DECODE: 2786 /* 2787 * Check that there is encoded data 2788 */ 2789 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) 2790 return (DDI_PROP_RESULT_ERROR); 2791 if (ph->ph_flags & PH_FROM_PROM) { 2792 return (DDI_PROP_RESULT_ERROR); 2793 } else { 2794 if (ph->ph_size < sizeof (int64_t) || 2795 ((int64_t *)ph->ph_cur_pos > 2796 ((int64_t *)ph->ph_data + 2797 ph->ph_size - sizeof (int64_t)))) 2798 return (DDI_PROP_RESULT_ERROR); 2799 } 2800 /* 2801 * Copy the integer, using the implementation-specific 2802 * copy function if the property is coming from the PROM. 2803 */ 2804 if (ph->ph_flags & PH_FROM_PROM) { 2805 return (DDI_PROP_RESULT_ERROR); 2806 } else { 2807 bcopy(ph->ph_cur_pos, data, sizeof (int64_t)); 2808 } 2809 2810 /* 2811 * Move the current location to the start of the next 2812 * bit of undecoded data. 2813 */ 2814 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + 2815 sizeof (int64_t); 2816 return (DDI_PROP_RESULT_OK); 2817 2818 case DDI_PROP_CMD_ENCODE: 2819 /* 2820 * Check that there is room to encoded the data 2821 */ 2822 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 2823 ph->ph_size < sizeof (int64_t) || 2824 ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data + 2825 ph->ph_size - sizeof (int64_t)))) 2826 return (DDI_PROP_RESULT_ERROR); 2827 2828 /* 2829 * Encode the integer into the byte stream one byte at a 2830 * time. 2831 */ 2832 bcopy(data, ph->ph_cur_pos, sizeof (int64_t)); 2833 2834 /* 2835 * Move the current location to the start of the next bit of 2836 * space where we can store encoded data. 2837 */ 2838 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + 2839 sizeof (int64_t); 2840 return (DDI_PROP_RESULT_OK); 2841 2842 case DDI_PROP_CMD_SKIP: 2843 /* 2844 * Check that there is encoded data 2845 */ 2846 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 2847 ph->ph_size < sizeof (int64_t)) 2848 return (DDI_PROP_RESULT_ERROR); 2849 2850 if ((caddr_t)ph->ph_cur_pos == 2851 (caddr_t)ph->ph_data + ph->ph_size) { 2852 return (DDI_PROP_RESULT_EOF); 2853 } else if ((caddr_t)ph->ph_cur_pos > 2854 (caddr_t)ph->ph_data + ph->ph_size) { 2855 return (DDI_PROP_RESULT_EOF); 2856 } 2857 2858 /* 2859 * Move the current location to the start of 2860 * the next bit of undecoded data. 2861 */ 2862 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + 2863 sizeof (int64_t); 2864 return (DDI_PROP_RESULT_OK); 2865 2866 case DDI_PROP_CMD_GET_ESIZE: 2867 /* 2868 * Return the size of an encoded integer on OBP 2869 */ 2870 return (sizeof (int64_t)); 2871 2872 case DDI_PROP_CMD_GET_DSIZE: 2873 /* 2874 * Return the size of a decoded integer on the system. 2875 */ 2876 return (sizeof (int64_t)); 2877 2878 default: 2879 #ifdef DEBUG 2880 panic("ddi_prop_int64_op: %x impossible", cmd); 2881 /*NOTREACHED*/ 2882 #else 2883 return (DDI_PROP_RESULT_ERROR); 2884 #endif /* DEBUG */ 2885 } 2886 } 2887 2888 /* 2889 * OBP 1275 string operator. 2890 * 2891 * OBP strings are NULL terminated. 2892 */ 2893 int 2894 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data) 2895 { 2896 int n; 2897 char *p; 2898 char *end; 2899 2900 switch (cmd) { 2901 case DDI_PROP_CMD_DECODE: 2902 /* 2903 * Check that there is encoded data 2904 */ 2905 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) { 2906 return (DDI_PROP_RESULT_ERROR); 2907 } 2908 2909 /* 2910 * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and 2911 * how to NULL terminate result. 2912 */ 2913 p = (char *)ph->ph_cur_pos; 2914 end = (char *)ph->ph_data + ph->ph_size; 2915 if (p >= end) 2916 return (DDI_PROP_RESULT_EOF); 2917 2918 while (p < end) { 2919 *data++ = *p; 2920 if (*p++ == 0) { /* NULL from OBP */ 2921 ph->ph_cur_pos = p; 2922 return (DDI_PROP_RESULT_OK); 2923 } 2924 } 2925 2926 /* 2927 * If OBP did not NULL terminate string, which happens 2928 * (at least) for 'true'/'false' boolean values, account for 2929 * the space and store null termination on decode. 2930 */ 2931 ph->ph_cur_pos = p; 2932 *data = 0; 2933 return (DDI_PROP_RESULT_OK); 2934 2935 case DDI_PROP_CMD_ENCODE: 2936 /* 2937 * Check that there is room to encoded the data 2938 */ 2939 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) { 2940 return (DDI_PROP_RESULT_ERROR); 2941 } 2942 2943 n = strlen(data) + 1; 2944 if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data + 2945 ph->ph_size - n)) { 2946 return (DDI_PROP_RESULT_ERROR); 2947 } 2948 2949 /* 2950 * Copy the NULL terminated string 2951 */ 2952 bcopy(data, ph->ph_cur_pos, n); 2953 2954 /* 2955 * Move the current location to the start of the next bit of 2956 * space where we can store encoded data. 2957 */ 2958 ph->ph_cur_pos = (char *)ph->ph_cur_pos + n; 2959 return (DDI_PROP_RESULT_OK); 2960 2961 case DDI_PROP_CMD_SKIP: 2962 /* 2963 * Check that there is encoded data 2964 */ 2965 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) { 2966 return (DDI_PROP_RESULT_ERROR); 2967 } 2968 2969 /* 2970 * Return the string length plus one for the NULL 2971 * We know the size of the property, we need to 2972 * ensure that the string is properly formatted, 2973 * since we may be looking up random OBP data. 2974 */ 2975 p = (char *)ph->ph_cur_pos; 2976 end = (char *)ph->ph_data + ph->ph_size; 2977 if (p >= end) 2978 return (DDI_PROP_RESULT_EOF); 2979 2980 while (p < end) { 2981 if (*p++ == 0) { /* NULL from OBP */ 2982 ph->ph_cur_pos = p; 2983 return (DDI_PROP_RESULT_OK); 2984 } 2985 } 2986 2987 /* 2988 * Accommodate the fact that OBP does not always NULL 2989 * terminate strings. 2990 */ 2991 ph->ph_cur_pos = p; 2992 return (DDI_PROP_RESULT_OK); 2993 2994 case DDI_PROP_CMD_GET_ESIZE: 2995 /* 2996 * Return the size of the encoded string on OBP. 2997 */ 2998 return (strlen(data) + 1); 2999 3000 case DDI_PROP_CMD_GET_DSIZE: 3001 /* 3002 * Return the string length plus one for the NULL. 3003 * We know the size of the property, we need to 3004 * ensure that the string is properly formatted, 3005 * since we may be looking up random OBP data. 3006 */ 3007 p = (char *)ph->ph_cur_pos; 3008 end = (char *)ph->ph_data + ph->ph_size; 3009 if (p >= end) 3010 return (DDI_PROP_RESULT_EOF); 3011 3012 for (n = 0; p < end; n++) { 3013 if (*p++ == 0) { /* NULL from OBP */ 3014 ph->ph_cur_pos = p; 3015 return (n + 1); 3016 } 3017 } 3018 3019 /* 3020 * If OBP did not NULL terminate string, which happens for 3021 * 'true'/'false' boolean values, account for the space 3022 * to store null termination here. 3023 */ 3024 ph->ph_cur_pos = p; 3025 return (n + 1); 3026 3027 default: 3028 #ifdef DEBUG 3029 panic("ddi_prop_1275_string: %x impossible", cmd); 3030 /*NOTREACHED*/ 3031 #else 3032 return (DDI_PROP_RESULT_ERROR); 3033 #endif /* DEBUG */ 3034 } 3035 } 3036 3037 /* 3038 * OBP 1275 byte operator 3039 * 3040 * Caller must specify the number of bytes to get. OBP encodes bytes 3041 * as a byte so there is a 1-to-1 translation. 3042 */ 3043 int 3044 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data, 3045 uint_t nelements) 3046 { 3047 switch (cmd) { 3048 case DDI_PROP_CMD_DECODE: 3049 /* 3050 * Check that there is encoded data 3051 */ 3052 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3053 ph->ph_size < nelements || 3054 ((char *)ph->ph_cur_pos > ((char *)ph->ph_data + 3055 ph->ph_size - nelements))) 3056 return (DDI_PROP_RESULT_ERROR); 3057 3058 /* 3059 * Copy out the bytes 3060 */ 3061 bcopy(ph->ph_cur_pos, data, nelements); 3062 3063 /* 3064 * Move the current location 3065 */ 3066 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements; 3067 return (DDI_PROP_RESULT_OK); 3068 3069 case DDI_PROP_CMD_ENCODE: 3070 /* 3071 * Check that there is room to encode the data 3072 */ 3073 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3074 ph->ph_size < nelements || 3075 ((char *)ph->ph_cur_pos > ((char *)ph->ph_data + 3076 ph->ph_size - nelements))) 3077 return (DDI_PROP_RESULT_ERROR); 3078 3079 /* 3080 * Copy in the bytes 3081 */ 3082 bcopy(data, ph->ph_cur_pos, nelements); 3083 3084 /* 3085 * Move the current location to the start of the next bit of 3086 * space where we can store encoded data. 3087 */ 3088 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements; 3089 return (DDI_PROP_RESULT_OK); 3090 3091 case DDI_PROP_CMD_SKIP: 3092 /* 3093 * Check that there is encoded data 3094 */ 3095 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3096 ph->ph_size < nelements) 3097 return (DDI_PROP_RESULT_ERROR); 3098 3099 if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data + 3100 ph->ph_size - nelements)) 3101 return (DDI_PROP_RESULT_EOF); 3102 3103 /* 3104 * Move the current location 3105 */ 3106 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements; 3107 return (DDI_PROP_RESULT_OK); 3108 3109 case DDI_PROP_CMD_GET_ESIZE: 3110 /* 3111 * The size in bytes of the encoded size is the 3112 * same as the decoded size provided by the caller. 3113 */ 3114 return (nelements); 3115 3116 case DDI_PROP_CMD_GET_DSIZE: 3117 /* 3118 * Just return the number of bytes specified by the caller. 3119 */ 3120 return (nelements); 3121 3122 default: 3123 #ifdef DEBUG 3124 panic("ddi_prop_1275_bytes: %x impossible", cmd); 3125 /*NOTREACHED*/ 3126 #else 3127 return (DDI_PROP_RESULT_ERROR); 3128 #endif /* DEBUG */ 3129 } 3130 } 3131 3132 /* 3133 * Used for properties that come from the OBP, hardware configuration files, 3134 * or that are created by calls to ddi_prop_update(9F). 3135 */ 3136 static struct prop_handle_ops prop_1275_ops = { 3137 ddi_prop_1275_int, 3138 ddi_prop_1275_string, 3139 ddi_prop_1275_bytes, 3140 ddi_prop_int64_op 3141 }; 3142 3143 3144 /* 3145 * Interface to create/modify a managed property on child's behalf... 3146 * Flags interpreted are: 3147 * DDI_PROP_CANSLEEP: Allow memory allocation to sleep. 3148 * DDI_PROP_SYSTEM_DEF: Manipulate system list rather than driver list. 3149 * 3150 * Use same dev_t when modifying or undefining a property. 3151 * Search for properties with DDI_DEV_T_ANY to match first named 3152 * property on the list. 3153 * 3154 * Properties are stored LIFO and subsequently will match the first 3155 * `matching' instance. 3156 */ 3157 3158 /* 3159 * ddi_prop_add: Add a software defined property 3160 */ 3161 3162 /* 3163 * define to get a new ddi_prop_t. 3164 * km_flags are KM_SLEEP or KM_NOSLEEP. 3165 */ 3166 3167 #define DDI_NEW_PROP_T(km_flags) \ 3168 (kmem_zalloc(sizeof (ddi_prop_t), km_flags)) 3169 3170 static int 3171 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags, 3172 char *name, caddr_t value, int length) 3173 { 3174 ddi_prop_t *new_propp, *propp; 3175 ddi_prop_t **list_head = &(DEVI(dip)->devi_drv_prop_ptr); 3176 int km_flags = KM_NOSLEEP; 3177 int name_buf_len; 3178 3179 /* 3180 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error. 3181 */ 3182 3183 if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0) 3184 return (DDI_PROP_INVAL_ARG); 3185 3186 if (flags & DDI_PROP_CANSLEEP) 3187 km_flags = KM_SLEEP; 3188 3189 if (flags & DDI_PROP_SYSTEM_DEF) 3190 list_head = &(DEVI(dip)->devi_sys_prop_ptr); 3191 else if (flags & DDI_PROP_HW_DEF) 3192 list_head = &(DEVI(dip)->devi_hw_prop_ptr); 3193 3194 if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL) { 3195 cmn_err(CE_CONT, prop_no_mem_msg, name); 3196 return (DDI_PROP_NO_MEMORY); 3197 } 3198 3199 /* 3200 * If dev is major number 0, then we need to do a ddi_name_to_major 3201 * to get the real major number for the device. This needs to be 3202 * done because some drivers need to call ddi_prop_create in their 3203 * attach routines but they don't have a dev. By creating the dev 3204 * ourself if the major number is 0, drivers will not have to know what 3205 * their major number. They can just create a dev with major number 3206 * 0 and pass it in. For device 0, we will be doing a little extra 3207 * work by recreating the same dev that we already have, but its the 3208 * price you pay :-). 3209 * 3210 * This fixes bug #1098060. 3211 */ 3212 if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) { 3213 new_propp->prop_dev = 3214 makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name), 3215 getminor(dev)); 3216 } else 3217 new_propp->prop_dev = dev; 3218 3219 /* 3220 * Allocate space for property name and copy it in... 3221 */ 3222 3223 name_buf_len = strlen(name) + 1; 3224 new_propp->prop_name = kmem_alloc(name_buf_len, km_flags); 3225 if (new_propp->prop_name == 0) { 3226 kmem_free(new_propp, sizeof (ddi_prop_t)); 3227 cmn_err(CE_CONT, prop_no_mem_msg, name); 3228 return (DDI_PROP_NO_MEMORY); 3229 } 3230 bcopy(name, new_propp->prop_name, name_buf_len); 3231 3232 /* 3233 * Set the property type 3234 */ 3235 new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK; 3236 3237 /* 3238 * Set length and value ONLY if not an explicit property undefine: 3239 * NOTE: value and length are zero for explicit undefines. 3240 */ 3241 3242 if (flags & DDI_PROP_UNDEF_IT) { 3243 new_propp->prop_flags |= DDI_PROP_UNDEF_IT; 3244 } else { 3245 if ((new_propp->prop_len = length) != 0) { 3246 new_propp->prop_val = kmem_alloc(length, km_flags); 3247 if (new_propp->prop_val == 0) { 3248 kmem_free(new_propp->prop_name, name_buf_len); 3249 kmem_free(new_propp, sizeof (ddi_prop_t)); 3250 cmn_err(CE_CONT, prop_no_mem_msg, name); 3251 return (DDI_PROP_NO_MEMORY); 3252 } 3253 bcopy(value, new_propp->prop_val, length); 3254 } 3255 } 3256 3257 /* 3258 * Link property into beginning of list. (Properties are LIFO order.) 3259 */ 3260 3261 mutex_enter(&(DEVI(dip)->devi_lock)); 3262 propp = *list_head; 3263 new_propp->prop_next = propp; 3264 *list_head = new_propp; 3265 mutex_exit(&(DEVI(dip)->devi_lock)); 3266 return (DDI_PROP_SUCCESS); 3267 } 3268 3269 3270 /* 3271 * ddi_prop_change: Modify a software managed property value 3272 * 3273 * Set new length and value if found. 3274 * returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or 3275 * input name is the NULL string. 3276 * returns DDI_PROP_NO_MEMORY if unable to allocate memory 3277 * 3278 * Note: an undef can be modified to be a define, 3279 * (you can't go the other way.) 3280 */ 3281 3282 static int 3283 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags, 3284 char *name, caddr_t value, int length) 3285 { 3286 ddi_prop_t *propp; 3287 ddi_prop_t **ppropp; 3288 caddr_t p = NULL; 3289 3290 if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0)) 3291 return (DDI_PROP_INVAL_ARG); 3292 3293 /* 3294 * Preallocate buffer, even if we don't need it... 3295 */ 3296 if (length != 0) { 3297 p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ? 3298 KM_SLEEP : KM_NOSLEEP); 3299 if (p == NULL) { 3300 cmn_err(CE_CONT, prop_no_mem_msg, name); 3301 return (DDI_PROP_NO_MEMORY); 3302 } 3303 } 3304 3305 /* 3306 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major 3307 * number, a real dev_t value should be created based upon the dip's 3308 * binding driver. See ddi_prop_add... 3309 */ 3310 if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) 3311 dev = makedevice( 3312 ddi_name_to_major(DEVI(dip)->devi_binding_name), 3313 getminor(dev)); 3314 3315 /* 3316 * Check to see if the property exists. If so we modify it. 3317 * Else we create it by calling ddi_prop_add(). 3318 */ 3319 mutex_enter(&(DEVI(dip)->devi_lock)); 3320 ppropp = &DEVI(dip)->devi_drv_prop_ptr; 3321 if (flags & DDI_PROP_SYSTEM_DEF) 3322 ppropp = &DEVI(dip)->devi_sys_prop_ptr; 3323 else if (flags & DDI_PROP_HW_DEF) 3324 ppropp = &DEVI(dip)->devi_hw_prop_ptr; 3325 3326 if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) { 3327 /* 3328 * Need to reallocate buffer? If so, do it 3329 * carefully (reuse same space if new prop 3330 * is same size and non-NULL sized). 3331 */ 3332 if (length != 0) 3333 bcopy(value, p, length); 3334 3335 if (propp->prop_len != 0) 3336 kmem_free(propp->prop_val, propp->prop_len); 3337 3338 propp->prop_len = length; 3339 propp->prop_val = p; 3340 propp->prop_flags &= ~DDI_PROP_UNDEF_IT; 3341 mutex_exit(&(DEVI(dip)->devi_lock)); 3342 return (DDI_PROP_SUCCESS); 3343 } 3344 3345 mutex_exit(&(DEVI(dip)->devi_lock)); 3346 if (length != 0) 3347 kmem_free(p, length); 3348 3349 return (ddi_prop_add(dev, dip, flags, name, value, length)); 3350 } 3351 3352 /* 3353 * Common update routine used to update and encode a property. Creates 3354 * a property handle, calls the property encode routine, figures out if 3355 * the property already exists and updates if it does. Otherwise it 3356 * creates if it does not exist. 3357 */ 3358 int 3359 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags, 3360 char *name, void *data, uint_t nelements, 3361 int (*prop_create)(prop_handle_t *, void *data, uint_t nelements)) 3362 { 3363 prop_handle_t ph; 3364 int rval; 3365 uint_t ourflags; 3366 3367 /* 3368 * If dev_t is DDI_DEV_T_ANY or name's length is zero, 3369 * return error. 3370 */ 3371 if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0) 3372 return (DDI_PROP_INVAL_ARG); 3373 3374 /* 3375 * Create the handle 3376 */ 3377 ph.ph_data = NULL; 3378 ph.ph_cur_pos = NULL; 3379 ph.ph_save_pos = NULL; 3380 ph.ph_size = 0; 3381 ph.ph_ops = &prop_1275_ops; 3382 3383 /* 3384 * ourflags: 3385 * For compatibility with the old interfaces. The old interfaces 3386 * didn't sleep by default and slept when the flag was set. These 3387 * interfaces to the opposite. So the old interfaces now set the 3388 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep. 3389 * 3390 * ph.ph_flags: 3391 * Blocked data or unblocked data allocation 3392 * for ph.ph_data in ddi_prop_encode_alloc() 3393 */ 3394 if (flags & DDI_PROP_DONTSLEEP) { 3395 ourflags = flags; 3396 ph.ph_flags = DDI_PROP_DONTSLEEP; 3397 } else { 3398 ourflags = flags | DDI_PROP_CANSLEEP; 3399 ph.ph_flags = DDI_PROP_CANSLEEP; 3400 } 3401 3402 /* 3403 * Encode the data and store it in the property handle by 3404 * calling the prop_encode routine. 3405 */ 3406 if ((rval = (*prop_create)(&ph, data, nelements)) != 3407 DDI_PROP_SUCCESS) { 3408 if (rval == DDI_PROP_NO_MEMORY) 3409 cmn_err(CE_CONT, prop_no_mem_msg, name); 3410 if (ph.ph_size != 0) 3411 kmem_free(ph.ph_data, ph.ph_size); 3412 return (rval); 3413 } 3414 3415 /* 3416 * The old interfaces use a stacking approach to creating 3417 * properties. If we are being called from the old interfaces, 3418 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a 3419 * create without checking. 3420 */ 3421 if (flags & DDI_PROP_STACK_CREATE) { 3422 rval = ddi_prop_add(match_dev, dip, 3423 ourflags, name, ph.ph_data, ph.ph_size); 3424 } else { 3425 rval = ddi_prop_change(match_dev, dip, 3426 ourflags, name, ph.ph_data, ph.ph_size); 3427 } 3428 3429 /* 3430 * Free the encoded data allocated in the prop_encode routine. 3431 */ 3432 if (ph.ph_size != 0) 3433 kmem_free(ph.ph_data, ph.ph_size); 3434 3435 return (rval); 3436 } 3437 3438 3439 /* 3440 * ddi_prop_create: Define a managed property: 3441 * See above for details. 3442 */ 3443 3444 int 3445 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag, 3446 char *name, caddr_t value, int length) 3447 { 3448 if (!(flag & DDI_PROP_CANSLEEP)) { 3449 flag |= DDI_PROP_DONTSLEEP; 3450 #ifdef DDI_PROP_DEBUG 3451 if (length != 0) 3452 cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete," 3453 "use ddi_prop_update (prop = %s, node = %s%d)", 3454 name, ddi_driver_name(dip), ddi_get_instance(dip)); 3455 #endif /* DDI_PROP_DEBUG */ 3456 } 3457 flag &= ~DDI_PROP_SYSTEM_DEF; 3458 flag |= DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY; 3459 return (ddi_prop_update_common(dev, dip, flag, name, 3460 value, length, ddi_prop_fm_encode_bytes)); 3461 } 3462 3463 int 3464 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag, 3465 char *name, caddr_t value, int length) 3466 { 3467 if (!(flag & DDI_PROP_CANSLEEP)) 3468 flag |= DDI_PROP_DONTSLEEP; 3469 flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY; 3470 return (ddi_prop_update_common(dev, dip, flag, 3471 name, value, length, ddi_prop_fm_encode_bytes)); 3472 } 3473 3474 int 3475 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag, 3476 char *name, caddr_t value, int length) 3477 { 3478 ASSERT((flag & DDI_PROP_TYPE_MASK) == 0); 3479 3480 /* 3481 * If dev_t is DDI_DEV_T_ANY or name's length is zero, 3482 * return error. 3483 */ 3484 if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0) 3485 return (DDI_PROP_INVAL_ARG); 3486 3487 if (!(flag & DDI_PROP_CANSLEEP)) 3488 flag |= DDI_PROP_DONTSLEEP; 3489 flag &= ~DDI_PROP_SYSTEM_DEF; 3490 if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0) 3491 return (DDI_PROP_NOT_FOUND); 3492 3493 return (ddi_prop_update_common(dev, dip, 3494 (flag | DDI_PROP_TYPE_BYTE), name, 3495 value, length, ddi_prop_fm_encode_bytes)); 3496 } 3497 3498 int 3499 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag, 3500 char *name, caddr_t value, int length) 3501 { 3502 ASSERT((flag & DDI_PROP_TYPE_MASK) == 0); 3503 3504 /* 3505 * If dev_t is DDI_DEV_T_ANY or name's length is zero, 3506 * return error. 3507 */ 3508 if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0) 3509 return (DDI_PROP_INVAL_ARG); 3510 3511 if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0) 3512 return (DDI_PROP_NOT_FOUND); 3513 3514 if (!(flag & DDI_PROP_CANSLEEP)) 3515 flag |= DDI_PROP_DONTSLEEP; 3516 return (ddi_prop_update_common(dev, dip, 3517 (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE), 3518 name, value, length, ddi_prop_fm_encode_bytes)); 3519 } 3520 3521 3522 /* 3523 * Common lookup routine used to lookup and decode a property. 3524 * Creates a property handle, searches for the raw encoded data, 3525 * fills in the handle, and calls the property decode functions 3526 * passed in. 3527 * 3528 * This routine is not static because ddi_bus_prop_op() which lives in 3529 * ddi_impl.c calls it. No driver should be calling this routine. 3530 */ 3531 int 3532 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip, 3533 uint_t flags, char *name, void *data, uint_t *nelements, 3534 int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements)) 3535 { 3536 int rval; 3537 uint_t ourflags; 3538 prop_handle_t ph; 3539 3540 if ((match_dev == DDI_DEV_T_NONE) || 3541 (name == NULL) || (strlen(name) == 0)) 3542 return (DDI_PROP_INVAL_ARG); 3543 3544 ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags : 3545 flags | DDI_PROP_CANSLEEP; 3546 3547 /* 3548 * Get the encoded data 3549 */ 3550 bzero(&ph, sizeof (prop_handle_t)); 3551 3552 if ((flags & DDI_UNBND_DLPI2) || (flags & DDI_PROP_ROOTNEX_GLOBAL)) { 3553 /* 3554 * For rootnex and unbound dlpi style-2 devices, index into 3555 * the devnames' array and search the global 3556 * property list. 3557 */ 3558 ourflags &= ~DDI_UNBND_DLPI2; 3559 rval = i_ddi_prop_search_global(match_dev, 3560 ourflags, name, &ph.ph_data, &ph.ph_size); 3561 } else { 3562 rval = ddi_prop_search_common(match_dev, dip, 3563 PROP_LEN_AND_VAL_ALLOC, ourflags, name, 3564 &ph.ph_data, &ph.ph_size); 3565 3566 } 3567 3568 if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) { 3569 ASSERT(ph.ph_data == NULL); 3570 ASSERT(ph.ph_size == 0); 3571 return (rval); 3572 } 3573 3574 /* 3575 * If the encoded data came from a OBP or software 3576 * use the 1275 OBP decode/encode routines. 3577 */ 3578 ph.ph_cur_pos = ph.ph_data; 3579 ph.ph_save_pos = ph.ph_data; 3580 ph.ph_ops = &prop_1275_ops; 3581 ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0; 3582 3583 rval = (*prop_decoder)(&ph, data, nelements); 3584 3585 /* 3586 * Free the encoded data 3587 */ 3588 if (ph.ph_size != 0) 3589 kmem_free(ph.ph_data, ph.ph_size); 3590 3591 return (rval); 3592 } 3593 3594 /* 3595 * Lookup and return an array of composite properties. The driver must 3596 * provide the decode routine. 3597 */ 3598 int 3599 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip, 3600 uint_t flags, char *name, void *data, uint_t *nelements, 3601 int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements)) 3602 { 3603 return (ddi_prop_lookup_common(match_dev, dip, 3604 (flags | DDI_PROP_TYPE_COMPOSITE), name, 3605 data, nelements, prop_decoder)); 3606 } 3607 3608 /* 3609 * Return 1 if a property exists (no type checking done). 3610 * Return 0 if it does not exist. 3611 */ 3612 int 3613 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name) 3614 { 3615 int i; 3616 uint_t x = 0; 3617 3618 i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS, 3619 flags | DDI_PROP_TYPE_MASK, name, NULL, &x); 3620 return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275); 3621 } 3622 3623 3624 /* 3625 * Update an array of composite properties. The driver must 3626 * provide the encode routine. 3627 */ 3628 int 3629 ddi_prop_update(dev_t match_dev, dev_info_t *dip, 3630 char *name, void *data, uint_t nelements, 3631 int (*prop_create)(prop_handle_t *, void *data, uint_t nelements)) 3632 { 3633 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE, 3634 name, data, nelements, prop_create)); 3635 } 3636 3637 /* 3638 * Get a single integer or boolean property and return it. 3639 * If the property does not exists, or cannot be decoded, 3640 * then return the defvalue passed in. 3641 * 3642 * This routine always succeeds. 3643 */ 3644 int 3645 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags, 3646 char *name, int defvalue) 3647 { 3648 int data; 3649 uint_t nelements; 3650 int rval; 3651 3652 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 3653 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 3654 #ifdef DEBUG 3655 if (dip != NULL) { 3656 cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag" 3657 " 0x%x (prop = %s, node = %s%d)", flags, 3658 name, ddi_driver_name(dip), ddi_get_instance(dip)); 3659 } 3660 #endif /* DEBUG */ 3661 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 3662 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 3663 } 3664 3665 if ((rval = ddi_prop_lookup_common(match_dev, dip, 3666 (flags | DDI_PROP_TYPE_INT), name, &data, &nelements, 3667 ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) { 3668 if (rval == DDI_PROP_END_OF_DATA) 3669 data = 1; 3670 else 3671 data = defvalue; 3672 } 3673 return (data); 3674 } 3675 3676 /* 3677 * Get a single 64 bit integer or boolean property and return it. 3678 * If the property does not exists, or cannot be decoded, 3679 * then return the defvalue passed in. 3680 * 3681 * This routine always succeeds. 3682 */ 3683 int64_t 3684 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags, 3685 char *name, int64_t defvalue) 3686 { 3687 int64_t data; 3688 uint_t nelements; 3689 int rval; 3690 3691 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 3692 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 3693 #ifdef DEBUG 3694 if (dip != NULL) { 3695 cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag" 3696 " 0x%x (prop = %s, node = %s%d)", flags, 3697 name, ddi_driver_name(dip), ddi_get_instance(dip)); 3698 } 3699 #endif /* DEBUG */ 3700 return (DDI_PROP_INVAL_ARG); 3701 } 3702 3703 if ((rval = ddi_prop_lookup_common(match_dev, dip, 3704 (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM), 3705 name, &data, &nelements, ddi_prop_fm_decode_int64)) 3706 != DDI_PROP_SUCCESS) { 3707 if (rval == DDI_PROP_END_OF_DATA) 3708 data = 1; 3709 else 3710 data = defvalue; 3711 } 3712 return (data); 3713 } 3714 3715 /* 3716 * Get an array of integer property 3717 */ 3718 int 3719 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags, 3720 char *name, int **data, uint_t *nelements) 3721 { 3722 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 3723 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 3724 #ifdef DEBUG 3725 if (dip != NULL) { 3726 cmn_err(CE_WARN, "ddi_prop_lookup_int_array: " 3727 "invalid flag 0x%x (prop = %s, node = %s%d)", 3728 flags, name, ddi_driver_name(dip), 3729 ddi_get_instance(dip)); 3730 } 3731 #endif /* DEBUG */ 3732 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 3733 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 3734 } 3735 3736 return (ddi_prop_lookup_common(match_dev, dip, 3737 (flags | DDI_PROP_TYPE_INT), name, data, 3738 nelements, ddi_prop_fm_decode_ints)); 3739 } 3740 3741 /* 3742 * Get an array of 64 bit integer properties 3743 */ 3744 int 3745 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags, 3746 char *name, int64_t **data, uint_t *nelements) 3747 { 3748 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 3749 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 3750 #ifdef DEBUG 3751 if (dip != NULL) { 3752 cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: " 3753 "invalid flag 0x%x (prop = %s, node = %s%d)", 3754 flags, name, ddi_driver_name(dip), 3755 ddi_get_instance(dip)); 3756 } 3757 #endif /* DEBUG */ 3758 return (DDI_PROP_INVAL_ARG); 3759 } 3760 3761 return (ddi_prop_lookup_common(match_dev, dip, 3762 (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM), 3763 name, data, nelements, ddi_prop_fm_decode_int64_array)); 3764 } 3765 3766 /* 3767 * Update a single integer property. If the property exists on the drivers 3768 * property list it updates, else it creates it. 3769 */ 3770 int 3771 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip, 3772 char *name, int data) 3773 { 3774 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT, 3775 name, &data, 1, ddi_prop_fm_encode_ints)); 3776 } 3777 3778 /* 3779 * Update a single 64 bit integer property. 3780 * Update the driver property list if it exists, else create it. 3781 */ 3782 int 3783 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip, 3784 char *name, int64_t data) 3785 { 3786 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64, 3787 name, &data, 1, ddi_prop_fm_encode_int64)); 3788 } 3789 3790 int 3791 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip, 3792 char *name, int data) 3793 { 3794 return (ddi_prop_update_common(match_dev, dip, 3795 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT, 3796 name, &data, 1, ddi_prop_fm_encode_ints)); 3797 } 3798 3799 int 3800 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip, 3801 char *name, int64_t data) 3802 { 3803 return (ddi_prop_update_common(match_dev, dip, 3804 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64, 3805 name, &data, 1, ddi_prop_fm_encode_int64)); 3806 } 3807 3808 /* 3809 * Update an array of integer property. If the property exists on the drivers 3810 * property list it updates, else it creates it. 3811 */ 3812 int 3813 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip, 3814 char *name, int *data, uint_t nelements) 3815 { 3816 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT, 3817 name, data, nelements, ddi_prop_fm_encode_ints)); 3818 } 3819 3820 /* 3821 * Update an array of 64 bit integer properties. 3822 * Update the driver property list if it exists, else create it. 3823 */ 3824 int 3825 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip, 3826 char *name, int64_t *data, uint_t nelements) 3827 { 3828 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64, 3829 name, data, nelements, ddi_prop_fm_encode_int64)); 3830 } 3831 3832 int 3833 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip, 3834 char *name, int64_t *data, uint_t nelements) 3835 { 3836 return (ddi_prop_update_common(match_dev, dip, 3837 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64, 3838 name, data, nelements, ddi_prop_fm_encode_int64)); 3839 } 3840 3841 int 3842 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip, 3843 char *name, int *data, uint_t nelements) 3844 { 3845 return (ddi_prop_update_common(match_dev, dip, 3846 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT, 3847 name, data, nelements, ddi_prop_fm_encode_ints)); 3848 } 3849 3850 /* 3851 * Get a single string property. 3852 */ 3853 int 3854 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags, 3855 char *name, char **data) 3856 { 3857 uint_t x; 3858 3859 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 3860 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 3861 #ifdef DEBUG 3862 if (dip != NULL) { 3863 cmn_err(CE_WARN, "%s: invalid flag 0x%x " 3864 "(prop = %s, node = %s%d); invalid bits ignored", 3865 "ddi_prop_lookup_string", flags, name, 3866 ddi_driver_name(dip), ddi_get_instance(dip)); 3867 } 3868 #endif /* DEBUG */ 3869 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 3870 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 3871 } 3872 3873 return (ddi_prop_lookup_common(match_dev, dip, 3874 (flags | DDI_PROP_TYPE_STRING), name, data, 3875 &x, ddi_prop_fm_decode_string)); 3876 } 3877 3878 /* 3879 * Get an array of strings property. 3880 */ 3881 int 3882 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags, 3883 char *name, char ***data, uint_t *nelements) 3884 { 3885 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 3886 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 3887 #ifdef DEBUG 3888 if (dip != NULL) { 3889 cmn_err(CE_WARN, "ddi_prop_lookup_string_array: " 3890 "invalid flag 0x%x (prop = %s, node = %s%d)", 3891 flags, name, ddi_driver_name(dip), 3892 ddi_get_instance(dip)); 3893 } 3894 #endif /* DEBUG */ 3895 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 3896 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 3897 } 3898 3899 return (ddi_prop_lookup_common(match_dev, dip, 3900 (flags | DDI_PROP_TYPE_STRING), name, data, 3901 nelements, ddi_prop_fm_decode_strings)); 3902 } 3903 3904 /* 3905 * Update a single string property. 3906 */ 3907 int 3908 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip, 3909 char *name, char *data) 3910 { 3911 return (ddi_prop_update_common(match_dev, dip, 3912 DDI_PROP_TYPE_STRING, name, &data, 1, 3913 ddi_prop_fm_encode_string)); 3914 } 3915 3916 int 3917 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip, 3918 char *name, char *data) 3919 { 3920 return (ddi_prop_update_common(match_dev, dip, 3921 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING, 3922 name, &data, 1, ddi_prop_fm_encode_string)); 3923 } 3924 3925 3926 /* 3927 * Update an array of strings property. 3928 */ 3929 int 3930 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip, 3931 char *name, char **data, uint_t nelements) 3932 { 3933 return (ddi_prop_update_common(match_dev, dip, 3934 DDI_PROP_TYPE_STRING, name, data, nelements, 3935 ddi_prop_fm_encode_strings)); 3936 } 3937 3938 int 3939 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip, 3940 char *name, char **data, uint_t nelements) 3941 { 3942 return (ddi_prop_update_common(match_dev, dip, 3943 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING, 3944 name, data, nelements, 3945 ddi_prop_fm_encode_strings)); 3946 } 3947 3948 3949 /* 3950 * Get an array of bytes property. 3951 */ 3952 int 3953 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags, 3954 char *name, uchar_t **data, uint_t *nelements) 3955 { 3956 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 3957 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 3958 #ifdef DEBUG 3959 if (dip != NULL) { 3960 cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: " 3961 " invalid flag 0x%x (prop = %s, node = %s%d)", 3962 flags, name, ddi_driver_name(dip), 3963 ddi_get_instance(dip)); 3964 } 3965 #endif /* DEBUG */ 3966 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 3967 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 3968 } 3969 3970 return (ddi_prop_lookup_common(match_dev, dip, 3971 (flags | DDI_PROP_TYPE_BYTE), name, data, 3972 nelements, ddi_prop_fm_decode_bytes)); 3973 } 3974 3975 /* 3976 * Update an array of bytes property. 3977 */ 3978 int 3979 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip, 3980 char *name, uchar_t *data, uint_t nelements) 3981 { 3982 if (nelements == 0) 3983 return (DDI_PROP_INVAL_ARG); 3984 3985 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE, 3986 name, data, nelements, ddi_prop_fm_encode_bytes)); 3987 } 3988 3989 3990 int 3991 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip, 3992 char *name, uchar_t *data, uint_t nelements) 3993 { 3994 if (nelements == 0) 3995 return (DDI_PROP_INVAL_ARG); 3996 3997 return (ddi_prop_update_common(match_dev, dip, 3998 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE, 3999 name, data, nelements, ddi_prop_fm_encode_bytes)); 4000 } 4001 4002 4003 /* 4004 * ddi_prop_remove_common: Undefine a managed property: 4005 * Input dev_t must match dev_t when defined. 4006 * Returns DDI_PROP_NOT_FOUND, possibly. 4007 * DDI_PROP_INVAL_ARG is also possible if dev is 4008 * DDI_DEV_T_ANY or incoming name is the NULL string. 4009 */ 4010 int 4011 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag) 4012 { 4013 ddi_prop_t **list_head = &(DEVI(dip)->devi_drv_prop_ptr); 4014 ddi_prop_t *propp; 4015 ddi_prop_t *lastpropp = NULL; 4016 4017 if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) || 4018 (strlen(name) == 0)) { 4019 return (DDI_PROP_INVAL_ARG); 4020 } 4021 4022 if (flag & DDI_PROP_SYSTEM_DEF) 4023 list_head = &(DEVI(dip)->devi_sys_prop_ptr); 4024 else if (flag & DDI_PROP_HW_DEF) 4025 list_head = &(DEVI(dip)->devi_hw_prop_ptr); 4026 4027 mutex_enter(&(DEVI(dip)->devi_lock)); 4028 4029 for (propp = *list_head; propp != NULL; propp = propp->prop_next) { 4030 if (DDI_STRSAME(propp->prop_name, name) && 4031 (dev == propp->prop_dev)) { 4032 /* 4033 * Unlink this propp allowing for it to 4034 * be first in the list: 4035 */ 4036 4037 if (lastpropp == NULL) 4038 *list_head = propp->prop_next; 4039 else 4040 lastpropp->prop_next = propp->prop_next; 4041 4042 mutex_exit(&(DEVI(dip)->devi_lock)); 4043 4044 /* 4045 * Free memory and return... 4046 */ 4047 kmem_free(propp->prop_name, 4048 strlen(propp->prop_name) + 1); 4049 if (propp->prop_len != 0) 4050 kmem_free(propp->prop_val, propp->prop_len); 4051 kmem_free(propp, sizeof (ddi_prop_t)); 4052 return (DDI_PROP_SUCCESS); 4053 } 4054 lastpropp = propp; 4055 } 4056 mutex_exit(&(DEVI(dip)->devi_lock)); 4057 return (DDI_PROP_NOT_FOUND); 4058 } 4059 4060 int 4061 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name) 4062 { 4063 return (ddi_prop_remove_common(dev, dip, name, 0)); 4064 } 4065 4066 int 4067 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name) 4068 { 4069 return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF)); 4070 } 4071 4072 /* 4073 * e_ddi_prop_list_delete: remove a list of properties 4074 * Note that the caller needs to provide the required protection 4075 * (eg. devi_lock if these properties are still attached to a devi) 4076 */ 4077 void 4078 e_ddi_prop_list_delete(ddi_prop_t *props) 4079 { 4080 i_ddi_prop_list_delete(props); 4081 } 4082 4083 /* 4084 * ddi_prop_remove_all_common: 4085 * Used before unloading a driver to remove 4086 * all properties. (undefines all dev_t's props.) 4087 * Also removes `explicitly undefined' props. 4088 * No errors possible. 4089 */ 4090 void 4091 ddi_prop_remove_all_common(dev_info_t *dip, int flag) 4092 { 4093 ddi_prop_t **list_head; 4094 4095 mutex_enter(&(DEVI(dip)->devi_lock)); 4096 if (flag & DDI_PROP_SYSTEM_DEF) { 4097 list_head = &(DEVI(dip)->devi_sys_prop_ptr); 4098 } else if (flag & DDI_PROP_HW_DEF) { 4099 list_head = &(DEVI(dip)->devi_hw_prop_ptr); 4100 } else { 4101 list_head = &(DEVI(dip)->devi_drv_prop_ptr); 4102 } 4103 i_ddi_prop_list_delete(*list_head); 4104 *list_head = NULL; 4105 mutex_exit(&(DEVI(dip)->devi_lock)); 4106 } 4107 4108 4109 /* 4110 * ddi_prop_remove_all: Remove all driver prop definitions. 4111 */ 4112 4113 void 4114 ddi_prop_remove_all(dev_info_t *dip) 4115 { 4116 i_ddi_prop_dyn_driver_set(dip, NULL); 4117 ddi_prop_remove_all_common(dip, 0); 4118 } 4119 4120 /* 4121 * e_ddi_prop_remove_all: Remove all system prop definitions. 4122 */ 4123 4124 void 4125 e_ddi_prop_remove_all(dev_info_t *dip) 4126 { 4127 ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF); 4128 } 4129 4130 4131 /* 4132 * ddi_prop_undefine: Explicitly undefine a property. Property 4133 * searches which match this property return 4134 * the error code DDI_PROP_UNDEFINED. 4135 * 4136 * Use ddi_prop_remove to negate effect of 4137 * ddi_prop_undefine 4138 * 4139 * See above for error returns. 4140 */ 4141 4142 int 4143 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name) 4144 { 4145 if (!(flag & DDI_PROP_CANSLEEP)) 4146 flag |= DDI_PROP_DONTSLEEP; 4147 flag |= DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY; 4148 return (ddi_prop_update_common(dev, dip, flag, 4149 name, NULL, 0, ddi_prop_fm_encode_bytes)); 4150 } 4151 4152 int 4153 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name) 4154 { 4155 if (!(flag & DDI_PROP_CANSLEEP)) 4156 flag |= DDI_PROP_DONTSLEEP; 4157 flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | 4158 DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY; 4159 return (ddi_prop_update_common(dev, dip, flag, 4160 name, NULL, 0, ddi_prop_fm_encode_bytes)); 4161 } 4162 4163 /* 4164 * Support for gathering dynamic properties in devinfo snapshot. 4165 */ 4166 void 4167 i_ddi_prop_dyn_driver_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp) 4168 { 4169 DEVI(dip)->devi_prop_dyn_driver = dp; 4170 } 4171 4172 i_ddi_prop_dyn_t * 4173 i_ddi_prop_dyn_driver_get(dev_info_t *dip) 4174 { 4175 return (DEVI(dip)->devi_prop_dyn_driver); 4176 } 4177 4178 void 4179 i_ddi_prop_dyn_parent_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp) 4180 { 4181 DEVI(dip)->devi_prop_dyn_parent = dp; 4182 } 4183 4184 i_ddi_prop_dyn_t * 4185 i_ddi_prop_dyn_parent_get(dev_info_t *dip) 4186 { 4187 return (DEVI(dip)->devi_prop_dyn_parent); 4188 } 4189 4190 void 4191 i_ddi_prop_dyn_cache_invalidate(dev_info_t *dip, i_ddi_prop_dyn_t *dp) 4192 { 4193 /* for now we invalidate the entire cached snapshot */ 4194 if (dip && dp) 4195 i_ddi_di_cache_invalidate(); 4196 } 4197 4198 /* ARGSUSED */ 4199 void 4200 ddi_prop_cache_invalidate(dev_t dev, dev_info_t *dip, char *name, int flags) 4201 { 4202 /* for now we invalidate the entire cached snapshot */ 4203 i_ddi_di_cache_invalidate(); 4204 } 4205 4206 4207 /* 4208 * Code to search hardware layer (PROM), if it exists, on behalf of child. 4209 * 4210 * if input dip != child_dip, then call is on behalf of child 4211 * to search PROM, do it via ddi_prop_search_common() and ascend only 4212 * if allowed. 4213 * 4214 * if input dip == ch_dip (child_dip), call is on behalf of root driver, 4215 * to search for PROM defined props only. 4216 * 4217 * Note that the PROM search is done only if the requested dev 4218 * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties 4219 * have no associated dev, thus are automatically associated with 4220 * DDI_DEV_T_NONE. 4221 * 4222 * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer. 4223 * 4224 * Returns DDI_PROP_FOUND_1275 if found to indicate to framework 4225 * that the property resides in the prom. 4226 */ 4227 int 4228 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip, 4229 ddi_prop_op_t prop_op, int mod_flags, 4230 char *name, caddr_t valuep, int *lengthp) 4231 { 4232 int len; 4233 caddr_t buffer = NULL; 4234 4235 /* 4236 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then 4237 * look in caller's PROM if it's a self identifying device... 4238 * 4239 * Note that this is very similar to ddi_prop_op, but we 4240 * search the PROM instead of the s/w defined properties, 4241 * and we are called on by the parent driver to do this for 4242 * the child. 4243 */ 4244 4245 if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) && 4246 ndi_dev_is_prom_node(ch_dip) && 4247 ((mod_flags & DDI_PROP_NOTPROM) == 0)) { 4248 len = prom_getproplen((pnode_t)DEVI(ch_dip)->devi_nodeid, name); 4249 if (len == -1) { 4250 return (DDI_PROP_NOT_FOUND); 4251 } 4252 4253 /* 4254 * If exists only request, we're done 4255 */ 4256 if (prop_op == PROP_EXISTS) { 4257 return (DDI_PROP_FOUND_1275); 4258 } 4259 4260 /* 4261 * If length only request or prop length == 0, get out 4262 */ 4263 if ((prop_op == PROP_LEN) || (len == 0)) { 4264 *lengthp = len; 4265 return (DDI_PROP_FOUND_1275); 4266 } 4267 4268 /* 4269 * Allocate buffer if required... (either way `buffer' 4270 * is receiving address). 4271 */ 4272 4273 switch (prop_op) { 4274 4275 case PROP_LEN_AND_VAL_ALLOC: 4276 4277 buffer = kmem_alloc((size_t)len, 4278 mod_flags & DDI_PROP_CANSLEEP ? 4279 KM_SLEEP : KM_NOSLEEP); 4280 if (buffer == NULL) { 4281 return (DDI_PROP_NO_MEMORY); 4282 } 4283 *(caddr_t *)valuep = buffer; 4284 break; 4285 4286 case PROP_LEN_AND_VAL_BUF: 4287 4288 if (len > (*lengthp)) { 4289 *lengthp = len; 4290 return (DDI_PROP_BUF_TOO_SMALL); 4291 } 4292 4293 buffer = valuep; 4294 break; 4295 4296 default: 4297 break; 4298 } 4299 4300 /* 4301 * Call the PROM function to do the copy. 4302 */ 4303 (void) prom_getprop((pnode_t)DEVI(ch_dip)->devi_nodeid, 4304 name, buffer); 4305 4306 *lengthp = len; /* return the actual length to the caller */ 4307 (void) impl_fix_props(dip, ch_dip, name, len, buffer); 4308 return (DDI_PROP_FOUND_1275); 4309 } 4310 4311 return (DDI_PROP_NOT_FOUND); 4312 } 4313 4314 /* 4315 * The ddi_bus_prop_op default bus nexus prop op function. 4316 * 4317 * Code to search hardware layer (PROM), if it exists, 4318 * on behalf of child, then, if appropriate, ascend and check 4319 * my own software defined properties... 4320 */ 4321 int 4322 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip, 4323 ddi_prop_op_t prop_op, int mod_flags, 4324 char *name, caddr_t valuep, int *lengthp) 4325 { 4326 int error; 4327 4328 error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags, 4329 name, valuep, lengthp); 4330 4331 if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 || 4332 error == DDI_PROP_BUF_TOO_SMALL) 4333 return (error); 4334 4335 if (error == DDI_PROP_NO_MEMORY) { 4336 cmn_err(CE_CONT, prop_no_mem_msg, name); 4337 return (DDI_PROP_NO_MEMORY); 4338 } 4339 4340 /* 4341 * Check the 'options' node as a last resort 4342 */ 4343 if ((mod_flags & DDI_PROP_DONTPASS) != 0) 4344 return (DDI_PROP_NOT_FOUND); 4345 4346 if (ch_dip == ddi_root_node()) { 4347 /* 4348 * As a last resort, when we've reached 4349 * the top and still haven't found the 4350 * property, see if the desired property 4351 * is attached to the options node. 4352 * 4353 * The options dip is attached right after boot. 4354 */ 4355 ASSERT(options_dip != NULL); 4356 /* 4357 * Force the "don't pass" flag to *just* see 4358 * what the options node has to offer. 4359 */ 4360 return (ddi_prop_search_common(dev, options_dip, prop_op, 4361 mod_flags|DDI_PROP_DONTPASS, name, valuep, 4362 (uint_t *)lengthp)); 4363 } 4364 4365 /* 4366 * Otherwise, continue search with parent's s/w defined properties... 4367 * NOTE: Using `dip' in following call increments the level. 4368 */ 4369 4370 return (ddi_prop_search_common(dev, dip, prop_op, mod_flags, 4371 name, valuep, (uint_t *)lengthp)); 4372 } 4373 4374 /* 4375 * External property functions used by other parts of the kernel... 4376 */ 4377 4378 /* 4379 * e_ddi_getlongprop: See comments for ddi_get_longprop. 4380 */ 4381 4382 int 4383 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags, 4384 caddr_t valuep, int *lengthp) 4385 { 4386 _NOTE(ARGUNUSED(type)) 4387 dev_info_t *devi; 4388 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC; 4389 int error; 4390 4391 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4392 return (DDI_PROP_NOT_FOUND); 4393 4394 error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp); 4395 ddi_release_devi(devi); 4396 return (error); 4397 } 4398 4399 /* 4400 * e_ddi_getlongprop_buf: See comments for ddi_getlongprop_buf. 4401 */ 4402 4403 int 4404 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags, 4405 caddr_t valuep, int *lengthp) 4406 { 4407 _NOTE(ARGUNUSED(type)) 4408 dev_info_t *devi; 4409 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF; 4410 int error; 4411 4412 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4413 return (DDI_PROP_NOT_FOUND); 4414 4415 error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp); 4416 ddi_release_devi(devi); 4417 return (error); 4418 } 4419 4420 /* 4421 * e_ddi_getprop: See comments for ddi_getprop. 4422 */ 4423 int 4424 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue) 4425 { 4426 _NOTE(ARGUNUSED(type)) 4427 dev_info_t *devi; 4428 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF; 4429 int propvalue = defvalue; 4430 int proplength = sizeof (int); 4431 int error; 4432 4433 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4434 return (defvalue); 4435 4436 error = cdev_prop_op(dev, devi, prop_op, 4437 flags, name, (caddr_t)&propvalue, &proplength); 4438 ddi_release_devi(devi); 4439 4440 if ((error == DDI_PROP_SUCCESS) && (proplength == 0)) 4441 propvalue = 1; 4442 4443 return (propvalue); 4444 } 4445 4446 /* 4447 * e_ddi_getprop_int64: 4448 * 4449 * This is a typed interfaces, but predates typed properties. With the 4450 * introduction of typed properties the framework tries to ensure 4451 * consistent use of typed interfaces. This is why TYPE_INT64 is not 4452 * part of TYPE_ANY. E_ddi_getprop_int64 is a special case where a 4453 * typed interface invokes legacy (non-typed) interfaces: 4454 * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)). In this case the 4455 * fact that TYPE_INT64 is not part of TYPE_ANY matters. To support 4456 * this type of lookup as a single operation we invoke the legacy 4457 * non-typed interfaces with the special CONSUMER_TYPED bit set. The 4458 * framework ddi_prop_op(9F) implementation is expected to check for 4459 * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY 4460 * (currently TYPE_INT64). 4461 */ 4462 int64_t 4463 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name, 4464 int flags, int64_t defvalue) 4465 { 4466 _NOTE(ARGUNUSED(type)) 4467 dev_info_t *devi; 4468 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF; 4469 int64_t propvalue = defvalue; 4470 int proplength = sizeof (propvalue); 4471 int error; 4472 4473 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4474 return (defvalue); 4475 4476 error = cdev_prop_op(dev, devi, prop_op, flags | 4477 DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength); 4478 ddi_release_devi(devi); 4479 4480 if ((error == DDI_PROP_SUCCESS) && (proplength == 0)) 4481 propvalue = 1; 4482 4483 return (propvalue); 4484 } 4485 4486 /* 4487 * e_ddi_getproplen: See comments for ddi_getproplen. 4488 */ 4489 int 4490 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp) 4491 { 4492 _NOTE(ARGUNUSED(type)) 4493 dev_info_t *devi; 4494 ddi_prop_op_t prop_op = PROP_LEN; 4495 int error; 4496 4497 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4498 return (DDI_PROP_NOT_FOUND); 4499 4500 error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp); 4501 ddi_release_devi(devi); 4502 return (error); 4503 } 4504 4505 /* 4506 * Routines to get at elements of the dev_info structure 4507 */ 4508 4509 /* 4510 * ddi_binding_name: Return the driver binding name of the devinfo node 4511 * This is the name the OS used to bind the node to a driver. 4512 */ 4513 char * 4514 ddi_binding_name(dev_info_t *dip) 4515 { 4516 return (DEVI(dip)->devi_binding_name); 4517 } 4518 4519 /* 4520 * ddi_driver_major: Return the major number of the driver that 4521 * the supplied devinfo is bound to. If not yet bound, 4522 * DDI_MAJOR_T_NONE. 4523 * 4524 * When used by the driver bound to 'devi', this 4525 * function will reliably return the driver major number. 4526 * Other ways of determining the driver major number, such as 4527 * major = ddi_name_to_major(ddi_get_name(devi)); 4528 * major = ddi_name_to_major(ddi_binding_name(devi)); 4529 * can return a different result as the driver/alias binding 4530 * can change dynamically, and thus should be avoided. 4531 */ 4532 major_t 4533 ddi_driver_major(dev_info_t *devi) 4534 { 4535 return (DEVI(devi)->devi_major); 4536 } 4537 4538 /* 4539 * ddi_driver_name: Return the normalized driver name. this is the 4540 * actual driver name 4541 */ 4542 const char * 4543 ddi_driver_name(dev_info_t *devi) 4544 { 4545 major_t major; 4546 4547 if ((major = ddi_driver_major(devi)) != DDI_MAJOR_T_NONE) 4548 return (ddi_major_to_name(major)); 4549 4550 return (ddi_node_name(devi)); 4551 } 4552 4553 /* 4554 * i_ddi_set_binding_name: Set binding name. 4555 * 4556 * Set the binding name to the given name. 4557 * This routine is for use by the ddi implementation, not by drivers. 4558 */ 4559 void 4560 i_ddi_set_binding_name(dev_info_t *dip, char *name) 4561 { 4562 DEVI(dip)->devi_binding_name = name; 4563 4564 } 4565 4566 /* 4567 * ddi_get_name: A synonym of ddi_binding_name() ... returns a name 4568 * the implementation has used to bind the node to a driver. 4569 */ 4570 char * 4571 ddi_get_name(dev_info_t *dip) 4572 { 4573 return (DEVI(dip)->devi_binding_name); 4574 } 4575 4576 /* 4577 * ddi_node_name: Return the name property of the devinfo node 4578 * This may differ from ddi_binding_name if the node name 4579 * does not define a binding to a driver (i.e. generic names). 4580 */ 4581 char * 4582 ddi_node_name(dev_info_t *dip) 4583 { 4584 return (DEVI(dip)->devi_node_name); 4585 } 4586 4587 4588 /* 4589 * ddi_get_nodeid: Get nodeid stored in dev_info structure. 4590 */ 4591 int 4592 ddi_get_nodeid(dev_info_t *dip) 4593 { 4594 return (DEVI(dip)->devi_nodeid); 4595 } 4596 4597 int 4598 ddi_get_instance(dev_info_t *dip) 4599 { 4600 return (DEVI(dip)->devi_instance); 4601 } 4602 4603 struct dev_ops * 4604 ddi_get_driver(dev_info_t *dip) 4605 { 4606 return (DEVI(dip)->devi_ops); 4607 } 4608 4609 void 4610 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo) 4611 { 4612 DEVI(dip)->devi_ops = devo; 4613 } 4614 4615 /* 4616 * ddi_set_driver_private/ddi_get_driver_private: 4617 * Get/set device driver private data in devinfo. 4618 */ 4619 void 4620 ddi_set_driver_private(dev_info_t *dip, void *data) 4621 { 4622 DEVI(dip)->devi_driver_data = data; 4623 } 4624 4625 void * 4626 ddi_get_driver_private(dev_info_t *dip) 4627 { 4628 return (DEVI(dip)->devi_driver_data); 4629 } 4630 4631 /* 4632 * ddi_get_parent, ddi_get_child, ddi_get_next_sibling 4633 */ 4634 4635 dev_info_t * 4636 ddi_get_parent(dev_info_t *dip) 4637 { 4638 return ((dev_info_t *)DEVI(dip)->devi_parent); 4639 } 4640 4641 dev_info_t * 4642 ddi_get_child(dev_info_t *dip) 4643 { 4644 return ((dev_info_t *)DEVI(dip)->devi_child); 4645 } 4646 4647 dev_info_t * 4648 ddi_get_next_sibling(dev_info_t *dip) 4649 { 4650 return ((dev_info_t *)DEVI(dip)->devi_sibling); 4651 } 4652 4653 dev_info_t * 4654 ddi_get_next(dev_info_t *dip) 4655 { 4656 return ((dev_info_t *)DEVI(dip)->devi_next); 4657 } 4658 4659 void 4660 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip) 4661 { 4662 DEVI(dip)->devi_next = DEVI(nextdip); 4663 } 4664 4665 /* 4666 * ddi_root_node: Return root node of devinfo tree 4667 */ 4668 4669 dev_info_t * 4670 ddi_root_node(void) 4671 { 4672 extern dev_info_t *top_devinfo; 4673 4674 return (top_devinfo); 4675 } 4676 4677 /* 4678 * Miscellaneous functions: 4679 */ 4680 4681 /* 4682 * Implementation specific hooks 4683 */ 4684 4685 void 4686 ddi_report_dev(dev_info_t *d) 4687 { 4688 char *b; 4689 4690 (void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0); 4691 4692 /* 4693 * If this devinfo node has cb_ops, it's implicitly accessible from 4694 * userland, so we print its full name together with the instance 4695 * number 'abbreviation' that the driver may use internally. 4696 */ 4697 if (DEVI(d)->devi_ops->devo_cb_ops != (struct cb_ops *)0 && 4698 (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) { 4699 cmn_err(CE_CONT, "?%s%d is %s\n", 4700 ddi_driver_name(d), ddi_get_instance(d), 4701 ddi_pathname(d, b)); 4702 kmem_free(b, MAXPATHLEN); 4703 } 4704 } 4705 4706 /* 4707 * ddi_ctlops() is described in the assembler not to buy a new register 4708 * window when it's called and can reduce cost in climbing the device tree 4709 * without using the tail call optimization. 4710 */ 4711 int 4712 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result) 4713 { 4714 int ret; 4715 4716 ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE, 4717 (void *)&rnumber, (void *)result); 4718 4719 return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE); 4720 } 4721 4722 int 4723 ddi_dev_nregs(dev_info_t *dev, int *result) 4724 { 4725 return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result)); 4726 } 4727 4728 int 4729 ddi_dev_is_sid(dev_info_t *d) 4730 { 4731 return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0)); 4732 } 4733 4734 int 4735 ddi_slaveonly(dev_info_t *d) 4736 { 4737 return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0)); 4738 } 4739 4740 int 4741 ddi_dev_affinity(dev_info_t *a, dev_info_t *b) 4742 { 4743 return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0)); 4744 } 4745 4746 int 4747 ddi_streams_driver(dev_info_t *dip) 4748 { 4749 if (i_ddi_devi_attached(dip) && 4750 (DEVI(dip)->devi_ops->devo_cb_ops != NULL) && 4751 (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL)) 4752 return (DDI_SUCCESS); 4753 return (DDI_FAILURE); 4754 } 4755 4756 /* 4757 * callback free list 4758 */ 4759 4760 static int ncallbacks; 4761 static int nc_low = 170; 4762 static int nc_med = 512; 4763 static int nc_high = 2048; 4764 static struct ddi_callback *callbackq; 4765 static struct ddi_callback *callbackqfree; 4766 4767 /* 4768 * set/run callback lists 4769 */ 4770 struct cbstats { 4771 kstat_named_t cb_asked; 4772 kstat_named_t cb_new; 4773 kstat_named_t cb_run; 4774 kstat_named_t cb_delete; 4775 kstat_named_t cb_maxreq; 4776 kstat_named_t cb_maxlist; 4777 kstat_named_t cb_alloc; 4778 kstat_named_t cb_runouts; 4779 kstat_named_t cb_L2; 4780 kstat_named_t cb_grow; 4781 } cbstats = { 4782 {"asked", KSTAT_DATA_UINT32}, 4783 {"new", KSTAT_DATA_UINT32}, 4784 {"run", KSTAT_DATA_UINT32}, 4785 {"delete", KSTAT_DATA_UINT32}, 4786 {"maxreq", KSTAT_DATA_UINT32}, 4787 {"maxlist", KSTAT_DATA_UINT32}, 4788 {"alloc", KSTAT_DATA_UINT32}, 4789 {"runouts", KSTAT_DATA_UINT32}, 4790 {"L2", KSTAT_DATA_UINT32}, 4791 {"grow", KSTAT_DATA_UINT32}, 4792 }; 4793 4794 #define nc_asked cb_asked.value.ui32 4795 #define nc_new cb_new.value.ui32 4796 #define nc_run cb_run.value.ui32 4797 #define nc_delete cb_delete.value.ui32 4798 #define nc_maxreq cb_maxreq.value.ui32 4799 #define nc_maxlist cb_maxlist.value.ui32 4800 #define nc_alloc cb_alloc.value.ui32 4801 #define nc_runouts cb_runouts.value.ui32 4802 #define nc_L2 cb_L2.value.ui32 4803 #define nc_grow cb_grow.value.ui32 4804 4805 static kmutex_t ddi_callback_mutex; 4806 4807 /* 4808 * callbacks are handled using a L1/L2 cache. The L1 cache 4809 * comes out of kmem_cache_alloc and can expand/shrink dynamically. If 4810 * we can't get callbacks from the L1 cache [because pageout is doing 4811 * I/O at the time freemem is 0], we allocate callbacks out of the 4812 * L2 cache. The L2 cache is static and depends on the memory size. 4813 * [We might also count the number of devices at probe time and 4814 * allocate one structure per device and adjust for deferred attach] 4815 */ 4816 void 4817 impl_ddi_callback_init(void) 4818 { 4819 int i; 4820 uint_t physmegs; 4821 kstat_t *ksp; 4822 4823 physmegs = physmem >> (20 - PAGESHIFT); 4824 if (physmegs < 48) { 4825 ncallbacks = nc_low; 4826 } else if (physmegs < 128) { 4827 ncallbacks = nc_med; 4828 } else { 4829 ncallbacks = nc_high; 4830 } 4831 4832 /* 4833 * init free list 4834 */ 4835 callbackq = kmem_zalloc( 4836 ncallbacks * sizeof (struct ddi_callback), KM_SLEEP); 4837 for (i = 0; i < ncallbacks-1; i++) 4838 callbackq[i].c_nfree = &callbackq[i+1]; 4839 callbackqfree = callbackq; 4840 4841 /* init kstats */ 4842 if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED, 4843 sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) { 4844 ksp->ks_data = (void *) &cbstats; 4845 kstat_install(ksp); 4846 } 4847 4848 } 4849 4850 static void 4851 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid, 4852 int count) 4853 { 4854 struct ddi_callback *list, *marker, *new; 4855 size_t size = sizeof (struct ddi_callback); 4856 4857 list = marker = (struct ddi_callback *)*listid; 4858 while (list != NULL) { 4859 if (list->c_call == funcp && list->c_arg == arg) { 4860 list->c_count += count; 4861 return; 4862 } 4863 marker = list; 4864 list = list->c_nlist; 4865 } 4866 new = kmem_alloc(size, KM_NOSLEEP); 4867 if (new == NULL) { 4868 new = callbackqfree; 4869 if (new == NULL) { 4870 new = kmem_alloc_tryhard(sizeof (struct ddi_callback), 4871 &size, KM_NOSLEEP | KM_PANIC); 4872 cbstats.nc_grow++; 4873 } else { 4874 callbackqfree = new->c_nfree; 4875 cbstats.nc_L2++; 4876 } 4877 } 4878 if (marker != NULL) { 4879 marker->c_nlist = new; 4880 } else { 4881 *listid = (uintptr_t)new; 4882 } 4883 new->c_size = size; 4884 new->c_nlist = NULL; 4885 new->c_call = funcp; 4886 new->c_arg = arg; 4887 new->c_count = count; 4888 cbstats.nc_new++; 4889 cbstats.nc_alloc++; 4890 if (cbstats.nc_alloc > cbstats.nc_maxlist) 4891 cbstats.nc_maxlist = cbstats.nc_alloc; 4892 } 4893 4894 void 4895 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid) 4896 { 4897 mutex_enter(&ddi_callback_mutex); 4898 cbstats.nc_asked++; 4899 if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq) 4900 cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run); 4901 (void) callback_insert(funcp, arg, listid, 1); 4902 mutex_exit(&ddi_callback_mutex); 4903 } 4904 4905 static void 4906 real_callback_run(void *Queue) 4907 { 4908 int (*funcp)(caddr_t); 4909 caddr_t arg; 4910 int count, rval; 4911 uintptr_t *listid; 4912 struct ddi_callback *list, *marker; 4913 int check_pending = 1; 4914 int pending = 0; 4915 4916 do { 4917 mutex_enter(&ddi_callback_mutex); 4918 listid = Queue; 4919 list = (struct ddi_callback *)*listid; 4920 if (list == NULL) { 4921 mutex_exit(&ddi_callback_mutex); 4922 return; 4923 } 4924 if (check_pending) { 4925 marker = list; 4926 while (marker != NULL) { 4927 pending += marker->c_count; 4928 marker = marker->c_nlist; 4929 } 4930 check_pending = 0; 4931 } 4932 ASSERT(pending > 0); 4933 ASSERT(list->c_count > 0); 4934 funcp = list->c_call; 4935 arg = list->c_arg; 4936 count = list->c_count; 4937 *(uintptr_t *)Queue = (uintptr_t)list->c_nlist; 4938 if (list >= &callbackq[0] && 4939 list <= &callbackq[ncallbacks-1]) { 4940 list->c_nfree = callbackqfree; 4941 callbackqfree = list; 4942 } else 4943 kmem_free(list, list->c_size); 4944 4945 cbstats.nc_delete++; 4946 cbstats.nc_alloc--; 4947 mutex_exit(&ddi_callback_mutex); 4948 4949 do { 4950 if ((rval = (*funcp)(arg)) == 0) { 4951 pending -= count; 4952 mutex_enter(&ddi_callback_mutex); 4953 (void) callback_insert(funcp, arg, listid, 4954 count); 4955 cbstats.nc_runouts++; 4956 } else { 4957 pending--; 4958 mutex_enter(&ddi_callback_mutex); 4959 cbstats.nc_run++; 4960 } 4961 mutex_exit(&ddi_callback_mutex); 4962 } while (rval != 0 && (--count > 0)); 4963 } while (pending > 0); 4964 } 4965 4966 void 4967 ddi_run_callback(uintptr_t *listid) 4968 { 4969 softcall(real_callback_run, listid); 4970 } 4971 4972 /* 4973 * ddi_periodic_t 4974 * ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, 4975 * int level) 4976 * 4977 * INTERFACE LEVEL 4978 * Solaris DDI specific (Solaris DDI) 4979 * 4980 * PARAMETERS 4981 * func: the callback function 4982 * 4983 * The callback function will be invoked. The function is invoked 4984 * in kernel context if the argument level passed is the zero. 4985 * Otherwise it's invoked in interrupt context at the specified 4986 * level. 4987 * 4988 * arg: the argument passed to the callback function 4989 * 4990 * interval: interval time 4991 * 4992 * level : callback interrupt level 4993 * 4994 * If the value is the zero, the callback function is invoked 4995 * in kernel context. If the value is more than the zero, but 4996 * less than or equal to ten, the callback function is invoked in 4997 * interrupt context at the specified interrupt level, which may 4998 * be used for real time applications. 4999 * 5000 * This value must be in range of 0-10, which can be a numeric 5001 * number or a pre-defined macro (DDI_IPL_0, ... , DDI_IPL_10). 5002 * 5003 * DESCRIPTION 5004 * ddi_periodic_add(9F) schedules the specified function to be 5005 * periodically invoked in the interval time. 5006 * 5007 * As well as timeout(9F), the exact time interval over which the function 5008 * takes effect cannot be guaranteed, but the value given is a close 5009 * approximation. 5010 * 5011 * Drivers waiting on behalf of processes with real-time constraints must 5012 * pass non-zero value with the level argument to ddi_periodic_add(9F). 5013 * 5014 * RETURN VALUES 5015 * ddi_periodic_add(9F) returns a non-zero opaque value (ddi_periodic_t), 5016 * which must be used for ddi_periodic_delete(9F) to specify the request. 5017 * 5018 * CONTEXT 5019 * ddi_periodic_add(9F) can be called in user or kernel context, but 5020 * it cannot be called in interrupt context, which is different from 5021 * timeout(9F). 5022 */ 5023 ddi_periodic_t 5024 ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, int level) 5025 { 5026 /* 5027 * Sanity check of the argument level. 5028 */ 5029 if (level < DDI_IPL_0 || level > DDI_IPL_10) 5030 cmn_err(CE_PANIC, 5031 "ddi_periodic_add: invalid interrupt level (%d).", level); 5032 5033 /* 5034 * Sanity check of the context. ddi_periodic_add() cannot be 5035 * called in either interrupt context or high interrupt context. 5036 */ 5037 if (servicing_interrupt()) 5038 cmn_err(CE_PANIC, 5039 "ddi_periodic_add: called in (high) interrupt context."); 5040 5041 return ((ddi_periodic_t)i_timeout(func, arg, interval, level)); 5042 } 5043 5044 /* 5045 * void 5046 * ddi_periodic_delete(ddi_periodic_t req) 5047 * 5048 * INTERFACE LEVEL 5049 * Solaris DDI specific (Solaris DDI) 5050 * 5051 * PARAMETERS 5052 * req: ddi_periodic_t opaque value ddi_periodic_add(9F) returned 5053 * previously. 5054 * 5055 * DESCRIPTION 5056 * ddi_periodic_delete(9F) cancels the ddi_periodic_add(9F) request 5057 * previously requested. 5058 * 5059 * ddi_periodic_delete(9F) will not return until the pending request 5060 * is canceled or executed. 5061 * 5062 * As well as untimeout(9F), calling ddi_periodic_delete(9F) for a 5063 * timeout which is either running on another CPU, or has already 5064 * completed causes no problems. However, unlike untimeout(9F), there is 5065 * no restrictions on the lock which might be held across the call to 5066 * ddi_periodic_delete(9F). 5067 * 5068 * Drivers should be structured with the understanding that the arrival of 5069 * both an interrupt and a timeout for that interrupt can occasionally 5070 * occur, in either order. 5071 * 5072 * CONTEXT 5073 * ddi_periodic_delete(9F) can be called in user or kernel context, but 5074 * it cannot be called in interrupt context, which is different from 5075 * untimeout(9F). 5076 */ 5077 void 5078 ddi_periodic_delete(ddi_periodic_t req) 5079 { 5080 /* 5081 * Sanity check of the context. ddi_periodic_delete() cannot be 5082 * called in either interrupt context or high interrupt context. 5083 */ 5084 if (servicing_interrupt()) 5085 cmn_err(CE_PANIC, 5086 "ddi_periodic_delete: called in (high) interrupt context."); 5087 5088 i_untimeout((timeout_t)req); 5089 } 5090 5091 dev_info_t * 5092 nodevinfo(dev_t dev, int otyp) 5093 { 5094 _NOTE(ARGUNUSED(dev, otyp)) 5095 return ((dev_info_t *)0); 5096 } 5097 5098 /* 5099 * A driver should support its own getinfo(9E) entry point. This function 5100 * is provided as a convenience for ON drivers that don't expect their 5101 * getinfo(9E) entry point to be called. A driver that uses this must not 5102 * call ddi_create_minor_node. 5103 */ 5104 int 5105 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 5106 { 5107 _NOTE(ARGUNUSED(dip, infocmd, arg, result)) 5108 return (DDI_FAILURE); 5109 } 5110 5111 /* 5112 * A driver should support its own getinfo(9E) entry point. This function 5113 * is provided as a convenience for ON drivers that where the minor number 5114 * is the instance. Drivers that do not have 1:1 mapping must implement 5115 * their own getinfo(9E) function. 5116 */ 5117 int 5118 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd, 5119 void *arg, void **result) 5120 { 5121 _NOTE(ARGUNUSED(dip)) 5122 int instance; 5123 5124 if (infocmd != DDI_INFO_DEVT2INSTANCE) 5125 return (DDI_FAILURE); 5126 5127 instance = getminor((dev_t)(uintptr_t)arg); 5128 *result = (void *)(uintptr_t)instance; 5129 return (DDI_SUCCESS); 5130 } 5131 5132 int 5133 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd) 5134 { 5135 _NOTE(ARGUNUSED(devi, cmd)) 5136 return (DDI_FAILURE); 5137 } 5138 5139 int 5140 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip, 5141 struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep) 5142 { 5143 _NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep)) 5144 return (DDI_DMA_NOMAPPING); 5145 } 5146 5147 int 5148 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr, 5149 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 5150 { 5151 _NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep)) 5152 return (DDI_DMA_BADATTR); 5153 } 5154 5155 int 5156 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, 5157 ddi_dma_handle_t handle) 5158 { 5159 _NOTE(ARGUNUSED(dip, rdip, handle)) 5160 return (DDI_FAILURE); 5161 } 5162 5163 int 5164 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 5165 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 5166 ddi_dma_cookie_t *cp, uint_t *ccountp) 5167 { 5168 _NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp)) 5169 return (DDI_DMA_NOMAPPING); 5170 } 5171 5172 int 5173 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 5174 ddi_dma_handle_t handle) 5175 { 5176 _NOTE(ARGUNUSED(dip, rdip, handle)) 5177 return (DDI_FAILURE); 5178 } 5179 5180 int 5181 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip, 5182 ddi_dma_handle_t handle, off_t off, size_t len, 5183 uint_t cache_flags) 5184 { 5185 _NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags)) 5186 return (DDI_FAILURE); 5187 } 5188 5189 int 5190 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip, 5191 ddi_dma_handle_t handle, uint_t win, off_t *offp, 5192 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 5193 { 5194 _NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp)) 5195 return (DDI_FAILURE); 5196 } 5197 5198 int 5199 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip, 5200 ddi_dma_handle_t handle, enum ddi_dma_ctlops request, 5201 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags) 5202 { 5203 _NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags)) 5204 return (DDI_FAILURE); 5205 } 5206 5207 void 5208 ddivoid(void) 5209 {} 5210 5211 int 5212 nochpoll(dev_t dev, short events, int anyyet, short *reventsp, 5213 struct pollhead **pollhdrp) 5214 { 5215 _NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp)) 5216 return (ENXIO); 5217 } 5218 5219 cred_t * 5220 ddi_get_cred(void) 5221 { 5222 return (CRED()); 5223 } 5224 5225 clock_t 5226 ddi_get_lbolt(void) 5227 { 5228 return ((clock_t)lbolt_hybrid()); 5229 } 5230 5231 int64_t 5232 ddi_get_lbolt64(void) 5233 { 5234 return (lbolt_hybrid()); 5235 } 5236 5237 time_t 5238 ddi_get_time(void) 5239 { 5240 time_t now; 5241 5242 if ((now = gethrestime_sec()) == 0) { 5243 timestruc_t ts; 5244 mutex_enter(&tod_lock); 5245 ts = tod_get(); 5246 mutex_exit(&tod_lock); 5247 return (ts.tv_sec); 5248 } else { 5249 return (now); 5250 } 5251 } 5252 5253 pid_t 5254 ddi_get_pid(void) 5255 { 5256 return (ttoproc(curthread)->p_pid); 5257 } 5258 5259 kt_did_t 5260 ddi_get_kt_did(void) 5261 { 5262 return (curthread->t_did); 5263 } 5264 5265 /* 5266 * This function returns B_TRUE if the caller can reasonably expect that a call 5267 * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened 5268 * by user-level signal. If it returns B_FALSE, then the caller should use 5269 * other means to make certain that the wait will not hang "forever." 5270 * 5271 * It does not check the signal mask, nor for reception of any particular 5272 * signal. 5273 * 5274 * Currently, a thread can receive a signal if it's not a kernel thread and it 5275 * is not in the middle of exit(2) tear-down. Threads that are in that 5276 * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to 5277 * cv_timedwait, and qwait_sig to qwait. 5278 */ 5279 boolean_t 5280 ddi_can_receive_sig(void) 5281 { 5282 proc_t *pp; 5283 5284 if (curthread->t_proc_flag & TP_LWPEXIT) 5285 return (B_FALSE); 5286 if ((pp = ttoproc(curthread)) == NULL) 5287 return (B_FALSE); 5288 return (pp->p_as != &kas); 5289 } 5290 5291 /* 5292 * Swap bytes in 16-bit [half-]words 5293 */ 5294 void 5295 swab(void *src, void *dst, size_t nbytes) 5296 { 5297 uchar_t *pf = (uchar_t *)src; 5298 uchar_t *pt = (uchar_t *)dst; 5299 uchar_t tmp; 5300 int nshorts; 5301 5302 nshorts = nbytes >> 1; 5303 5304 while (--nshorts >= 0) { 5305 tmp = *pf++; 5306 *pt++ = *pf++; 5307 *pt++ = tmp; 5308 } 5309 } 5310 5311 static void 5312 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp) 5313 { 5314 int circ; 5315 struct ddi_minor_data *dp; 5316 5317 ndi_devi_enter(ddip, &circ); 5318 if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) { 5319 DEVI(ddip)->devi_minor = dmdp; 5320 } else { 5321 while (dp->next != (struct ddi_minor_data *)NULL) 5322 dp = dp->next; 5323 dp->next = dmdp; 5324 } 5325 ndi_devi_exit(ddip, circ); 5326 } 5327 5328 static int 5329 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name) 5330 { 5331 int se_flag; 5332 int kmem_flag; 5333 int se_err; 5334 char *pathname, *class_name; 5335 sysevent_t *ev = NULL; 5336 sysevent_id_t eid; 5337 sysevent_value_t se_val; 5338 sysevent_attr_list_t *ev_attr_list = NULL; 5339 5340 /* determine interrupt context */ 5341 se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP; 5342 kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP; 5343 5344 i_ddi_di_cache_invalidate(); 5345 5346 #ifdef DEBUG 5347 if ((se_flag == SE_NOSLEEP) && sunddi_debug) { 5348 cmn_err(CE_CONT, "ddi_create_minor_node: called from " 5349 "interrupt level by driver %s", 5350 ddi_driver_name(dip)); 5351 } 5352 #endif /* DEBUG */ 5353 5354 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag); 5355 if (ev == NULL) { 5356 goto fail; 5357 } 5358 5359 pathname = kmem_alloc(MAXPATHLEN, kmem_flag); 5360 if (pathname == NULL) { 5361 sysevent_free(ev); 5362 goto fail; 5363 } 5364 5365 (void) ddi_pathname(dip, pathname); 5366 ASSERT(strlen(pathname)); 5367 se_val.value_type = SE_DATA_TYPE_STRING; 5368 se_val.value.sv_string = pathname; 5369 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME, 5370 &se_val, se_flag) != 0) { 5371 kmem_free(pathname, MAXPATHLEN); 5372 sysevent_free(ev); 5373 goto fail; 5374 } 5375 kmem_free(pathname, MAXPATHLEN); 5376 5377 /* add the device class attribute */ 5378 if ((class_name = i_ddi_devi_class(dip)) != NULL) { 5379 se_val.value_type = SE_DATA_TYPE_STRING; 5380 se_val.value.sv_string = class_name; 5381 if (sysevent_add_attr(&ev_attr_list, 5382 DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) { 5383 sysevent_free_attr(ev_attr_list); 5384 goto fail; 5385 } 5386 } 5387 5388 /* 5389 * allow for NULL minor names 5390 */ 5391 if (minor_name != NULL) { 5392 se_val.value.sv_string = minor_name; 5393 if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME, 5394 &se_val, se_flag) != 0) { 5395 sysevent_free_attr(ev_attr_list); 5396 sysevent_free(ev); 5397 goto fail; 5398 } 5399 } 5400 5401 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) { 5402 sysevent_free_attr(ev_attr_list); 5403 sysevent_free(ev); 5404 goto fail; 5405 } 5406 5407 if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) { 5408 if (se_err == SE_NO_TRANSPORT) { 5409 cmn_err(CE_WARN, "/devices or /dev may not be current " 5410 "for driver %s (%s). Run devfsadm -i %s", 5411 ddi_driver_name(dip), "syseventd not responding", 5412 ddi_driver_name(dip)); 5413 } else { 5414 sysevent_free(ev); 5415 goto fail; 5416 } 5417 } 5418 5419 sysevent_free(ev); 5420 return (DDI_SUCCESS); 5421 fail: 5422 cmn_err(CE_WARN, "/devices or /dev may not be current " 5423 "for driver %s. Run devfsadm -i %s", 5424 ddi_driver_name(dip), ddi_driver_name(dip)); 5425 return (DDI_SUCCESS); 5426 } 5427 5428 /* 5429 * failing to remove a minor node is not of interest 5430 * therefore we do not generate an error message 5431 */ 5432 static int 5433 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name) 5434 { 5435 char *pathname, *class_name; 5436 sysevent_t *ev; 5437 sysevent_id_t eid; 5438 sysevent_value_t se_val; 5439 sysevent_attr_list_t *ev_attr_list = NULL; 5440 5441 /* 5442 * only log ddi_remove_minor_node() calls outside the scope 5443 * of attach/detach reconfigurations and when the dip is 5444 * still initialized. 5445 */ 5446 if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) || 5447 (i_ddi_node_state(dip) < DS_INITIALIZED)) { 5448 return (DDI_SUCCESS); 5449 } 5450 5451 i_ddi_di_cache_invalidate(); 5452 5453 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP); 5454 if (ev == NULL) { 5455 return (DDI_SUCCESS); 5456 } 5457 5458 pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 5459 if (pathname == NULL) { 5460 sysevent_free(ev); 5461 return (DDI_SUCCESS); 5462 } 5463 5464 (void) ddi_pathname(dip, pathname); 5465 ASSERT(strlen(pathname)); 5466 se_val.value_type = SE_DATA_TYPE_STRING; 5467 se_val.value.sv_string = pathname; 5468 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME, 5469 &se_val, SE_SLEEP) != 0) { 5470 kmem_free(pathname, MAXPATHLEN); 5471 sysevent_free(ev); 5472 return (DDI_SUCCESS); 5473 } 5474 5475 kmem_free(pathname, MAXPATHLEN); 5476 5477 /* 5478 * allow for NULL minor names 5479 */ 5480 if (minor_name != NULL) { 5481 se_val.value.sv_string = minor_name; 5482 if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME, 5483 &se_val, SE_SLEEP) != 0) { 5484 sysevent_free_attr(ev_attr_list); 5485 goto fail; 5486 } 5487 } 5488 5489 if ((class_name = i_ddi_devi_class(dip)) != NULL) { 5490 /* add the device class, driver name and instance attributes */ 5491 5492 se_val.value_type = SE_DATA_TYPE_STRING; 5493 se_val.value.sv_string = class_name; 5494 if (sysevent_add_attr(&ev_attr_list, 5495 DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) { 5496 sysevent_free_attr(ev_attr_list); 5497 goto fail; 5498 } 5499 5500 se_val.value_type = SE_DATA_TYPE_STRING; 5501 se_val.value.sv_string = (char *)ddi_driver_name(dip); 5502 if (sysevent_add_attr(&ev_attr_list, 5503 DEVFS_DRIVER_NAME, &se_val, SE_SLEEP) != 0) { 5504 sysevent_free_attr(ev_attr_list); 5505 goto fail; 5506 } 5507 5508 se_val.value_type = SE_DATA_TYPE_INT32; 5509 se_val.value.sv_int32 = ddi_get_instance(dip); 5510 if (sysevent_add_attr(&ev_attr_list, 5511 DEVFS_INSTANCE, &se_val, SE_SLEEP) != 0) { 5512 sysevent_free_attr(ev_attr_list); 5513 goto fail; 5514 } 5515 5516 } 5517 5518 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) { 5519 sysevent_free_attr(ev_attr_list); 5520 } else { 5521 (void) log_sysevent(ev, SE_SLEEP, &eid); 5522 } 5523 fail: 5524 sysevent_free(ev); 5525 return (DDI_SUCCESS); 5526 } 5527 5528 /* 5529 * Derive the device class of the node. 5530 * Device class names aren't defined yet. Until this is done we use 5531 * devfs event subclass names as device class names. 5532 */ 5533 static int 5534 derive_devi_class(dev_info_t *dip, const char *node_type, int flag) 5535 { 5536 int rv = DDI_SUCCESS; 5537 5538 if (i_ddi_devi_class(dip) == NULL) { 5539 if (strncmp(node_type, DDI_NT_BLOCK, 5540 sizeof (DDI_NT_BLOCK) - 1) == 0 && 5541 (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' || 5542 node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') && 5543 strcmp(node_type, DDI_NT_FD) != 0) { 5544 5545 rv = i_ddi_set_devi_class(dip, ESC_DISK, flag); 5546 5547 } else if (strncmp(node_type, DDI_NT_NET, 5548 sizeof (DDI_NT_NET) - 1) == 0 && 5549 (node_type[sizeof (DDI_NT_NET) - 1] == '\0' || 5550 node_type[sizeof (DDI_NT_NET) - 1] == ':')) { 5551 5552 rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag); 5553 5554 } else if (strncmp(node_type, DDI_NT_PRINTER, 5555 sizeof (DDI_NT_PRINTER) - 1) == 0 && 5556 (node_type[sizeof (DDI_NT_PRINTER) - 1] == '\0' || 5557 node_type[sizeof (DDI_NT_PRINTER) - 1] == ':')) { 5558 5559 rv = i_ddi_set_devi_class(dip, ESC_PRINTER, flag); 5560 5561 } else if (strncmp(node_type, DDI_PSEUDO, 5562 sizeof (DDI_PSEUDO) -1) == 0 && 5563 (strncmp(ESC_LOFI, ddi_node_name(dip), 5564 sizeof (ESC_LOFI) -1) == 0)) { 5565 rv = i_ddi_set_devi_class(dip, ESC_LOFI, flag); 5566 } 5567 } 5568 5569 return (rv); 5570 } 5571 5572 /* 5573 * Check compliance with PSARC 2003/375: 5574 * 5575 * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not 5576 * exceed IFNAMSIZ (16) characters in length. 5577 */ 5578 static boolean_t 5579 verify_name(const char *name) 5580 { 5581 size_t len = strlen(name); 5582 const char *cp; 5583 5584 if (len == 0 || len > IFNAMSIZ) 5585 return (B_FALSE); 5586 5587 for (cp = name; *cp != '\0'; cp++) { 5588 if (!isalnum(*cp) && *cp != '_') 5589 return (B_FALSE); 5590 } 5591 5592 return (B_TRUE); 5593 } 5594 5595 /* 5596 * ddi_create_minor_common: Create a ddi_minor_data structure and 5597 * attach it to the given devinfo node. 5598 */ 5599 5600 static int 5601 ddi_create_minor_common(dev_info_t *dip, const char *name, int spec_type, 5602 minor_t minor_num, const char *node_type, int flag, ddi_minor_type mtype, 5603 const char *read_priv, const char *write_priv, mode_t priv_mode) 5604 { 5605 struct ddi_minor_data *dmdp; 5606 major_t major; 5607 5608 if (spec_type != S_IFCHR && spec_type != S_IFBLK) 5609 return (DDI_FAILURE); 5610 5611 if (name == NULL) 5612 return (DDI_FAILURE); 5613 5614 /* 5615 * Log a message if the minor number the driver is creating 5616 * is not expressible on the on-disk filesystem (currently 5617 * this is limited to 18 bits both by UFS). The device can 5618 * be opened via devfs, but not by device special files created 5619 * via mknod(). 5620 */ 5621 if (minor_num > L_MAXMIN32) { 5622 cmn_err(CE_WARN, 5623 "%s%d:%s minor 0x%x too big for 32-bit applications", 5624 ddi_driver_name(dip), ddi_get_instance(dip), 5625 name, minor_num); 5626 return (DDI_FAILURE); 5627 } 5628 5629 /* dip must be bound and attached */ 5630 major = ddi_driver_major(dip); 5631 ASSERT(major != DDI_MAJOR_T_NONE); 5632 5633 /* 5634 * Default node_type to DDI_PSEUDO and issue notice in debug mode 5635 */ 5636 if (node_type == NULL) { 5637 node_type = DDI_PSEUDO; 5638 NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d " 5639 " minor node %s; default to DDI_PSEUDO", 5640 ddi_driver_name(dip), ddi_get_instance(dip), name)); 5641 } 5642 5643 /* 5644 * If the driver is a network driver, ensure that the name falls within 5645 * the interface naming constraints specified by PSARC/2003/375. 5646 */ 5647 if (strcmp(node_type, DDI_NT_NET) == 0) { 5648 if (!verify_name(name)) 5649 return (DDI_FAILURE); 5650 5651 if (mtype == DDM_MINOR) { 5652 struct devnames *dnp = &devnamesp[major]; 5653 5654 /* Mark driver as a network driver */ 5655 LOCK_DEV_OPS(&dnp->dn_lock); 5656 dnp->dn_flags |= DN_NETWORK_DRIVER; 5657 5658 /* 5659 * If this minor node is created during the device 5660 * attachment, this is a physical network device. 5661 * Mark the driver as a physical network driver. 5662 */ 5663 if (DEVI_IS_ATTACHING(dip)) 5664 dnp->dn_flags |= DN_NETWORK_PHYSDRIVER; 5665 UNLOCK_DEV_OPS(&dnp->dn_lock); 5666 } 5667 } 5668 5669 if (mtype == DDM_MINOR) { 5670 if (derive_devi_class(dip, node_type, KM_NOSLEEP) != 5671 DDI_SUCCESS) 5672 return (DDI_FAILURE); 5673 } 5674 5675 /* 5676 * Take care of minor number information for the node. 5677 */ 5678 5679 if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data), 5680 KM_NOSLEEP)) == NULL) { 5681 return (DDI_FAILURE); 5682 } 5683 if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) { 5684 kmem_free(dmdp, sizeof (struct ddi_minor_data)); 5685 return (DDI_FAILURE); 5686 } 5687 dmdp->dip = dip; 5688 dmdp->ddm_dev = makedevice(major, minor_num); 5689 dmdp->ddm_spec_type = spec_type; 5690 dmdp->ddm_node_type = node_type; 5691 dmdp->type = mtype; 5692 if (flag & CLONE_DEV) { 5693 dmdp->type = DDM_ALIAS; 5694 dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major); 5695 } 5696 if (flag & PRIVONLY_DEV) { 5697 dmdp->ddm_flags |= DM_NO_FSPERM; 5698 } 5699 if (read_priv || write_priv) { 5700 dmdp->ddm_node_priv = 5701 devpolicy_priv_by_name(read_priv, write_priv); 5702 } 5703 dmdp->ddm_priv_mode = priv_mode; 5704 5705 ddi_append_minor_node(dip, dmdp); 5706 5707 /* 5708 * only log ddi_create_minor_node() calls which occur 5709 * outside the scope of attach(9e)/detach(9e) reconfigurations 5710 */ 5711 if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) && 5712 mtype != DDM_INTERNAL_PATH) { 5713 (void) i_log_devfs_minor_create(dip, dmdp->ddm_name); 5714 } 5715 5716 /* 5717 * Check if any dacf rules match the creation of this minor node 5718 */ 5719 dacfc_match_create_minor(name, node_type, dip, dmdp, flag); 5720 return (DDI_SUCCESS); 5721 } 5722 5723 int 5724 ddi_create_minor_node(dev_info_t *dip, const char *name, int spec_type, 5725 minor_t minor_num, const char *node_type, int flag) 5726 { 5727 return (ddi_create_minor_common(dip, name, spec_type, minor_num, 5728 node_type, flag, DDM_MINOR, NULL, NULL, 0)); 5729 } 5730 5731 int 5732 ddi_create_priv_minor_node(dev_info_t *dip, const char *name, int spec_type, 5733 minor_t minor_num, const char *node_type, int flag, 5734 const char *rdpriv, const char *wrpriv, mode_t priv_mode) 5735 { 5736 return (ddi_create_minor_common(dip, name, spec_type, minor_num, 5737 node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode)); 5738 } 5739 5740 int 5741 ddi_create_default_minor_node(dev_info_t *dip, const char *name, int spec_type, 5742 minor_t minor_num, const char *node_type, int flag) 5743 { 5744 return (ddi_create_minor_common(dip, name, spec_type, minor_num, 5745 node_type, flag, DDM_DEFAULT, NULL, NULL, 0)); 5746 } 5747 5748 /* 5749 * Internal (non-ddi) routine for drivers to export names known 5750 * to the kernel (especially ddi_pathname_to_dev_t and friends) 5751 * but not exported externally to /dev 5752 */ 5753 int 5754 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type, 5755 minor_t minor_num) 5756 { 5757 return (ddi_create_minor_common(dip, name, spec_type, minor_num, 5758 "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0)); 5759 } 5760 5761 void 5762 ddi_remove_minor_node(dev_info_t *dip, const char *name) 5763 { 5764 int circ; 5765 struct ddi_minor_data *dmdp, *dmdp1; 5766 struct ddi_minor_data **dmdp_prev; 5767 5768 ndi_devi_enter(dip, &circ); 5769 dmdp_prev = &DEVI(dip)->devi_minor; 5770 dmdp = DEVI(dip)->devi_minor; 5771 while (dmdp != NULL) { 5772 dmdp1 = dmdp->next; 5773 if ((name == NULL || (dmdp->ddm_name != NULL && 5774 strcmp(name, dmdp->ddm_name) == 0))) { 5775 if (dmdp->ddm_name != NULL) { 5776 if (dmdp->type != DDM_INTERNAL_PATH) 5777 (void) i_log_devfs_minor_remove(dip, 5778 dmdp->ddm_name); 5779 kmem_free(dmdp->ddm_name, 5780 strlen(dmdp->ddm_name) + 1); 5781 } 5782 /* 5783 * Release device privilege, if any. 5784 * Release dacf client data associated with this minor 5785 * node by storing NULL. 5786 */ 5787 if (dmdp->ddm_node_priv) 5788 dpfree(dmdp->ddm_node_priv); 5789 dacf_store_info((dacf_infohdl_t)dmdp, NULL); 5790 kmem_free(dmdp, sizeof (struct ddi_minor_data)); 5791 *dmdp_prev = dmdp1; 5792 /* 5793 * OK, we found it, so get out now -- if we drive on, 5794 * we will strcmp against garbage. See 1139209. 5795 */ 5796 if (name != NULL) 5797 break; 5798 } else { 5799 dmdp_prev = &dmdp->next; 5800 } 5801 dmdp = dmdp1; 5802 } 5803 ndi_devi_exit(dip, circ); 5804 } 5805 5806 5807 int 5808 ddi_in_panic() 5809 { 5810 return (panicstr != NULL); 5811 } 5812 5813 5814 /* 5815 * Find first bit set in a mask (returned counting from 1 up) 5816 */ 5817 5818 int 5819 ddi_ffs(long mask) 5820 { 5821 return (ffs(mask)); 5822 } 5823 5824 /* 5825 * Find last bit set. Take mask and clear 5826 * all but the most significant bit, and 5827 * then let ffs do the rest of the work. 5828 * 5829 * Algorithm courtesy of Steve Chessin. 5830 */ 5831 5832 int 5833 ddi_fls(long mask) 5834 { 5835 while (mask) { 5836 long nx; 5837 5838 if ((nx = (mask & (mask - 1))) == 0) 5839 break; 5840 mask = nx; 5841 } 5842 return (ffs(mask)); 5843 } 5844 5845 /* 5846 * The ddi_soft_state_* routines comprise generic storage management utilities 5847 * for driver soft state structures (in "the old days," this was done with 5848 * statically sized array - big systems and dynamic loading and unloading 5849 * make heap allocation more attractive). 5850 */ 5851 5852 /* 5853 * Allocate a set of pointers to 'n_items' objects of size 'size' 5854 * bytes. Each pointer is initialized to nil. 5855 * 5856 * The 'size' and 'n_items' values are stashed in the opaque 5857 * handle returned to the caller. 5858 * 5859 * This implementation interprets 'set of pointers' to mean 'array 5860 * of pointers' but note that nothing in the interface definition 5861 * precludes an implementation that uses, for example, a linked list. 5862 * However there should be a small efficiency gain from using an array 5863 * at lookup time. 5864 * 5865 * NOTE As an optimization, we make our growable array allocations in 5866 * powers of two (bytes), since that's how much kmem_alloc (currently) 5867 * gives us anyway. It should save us some free/realloc's .. 5868 * 5869 * As a further optimization, we make the growable array start out 5870 * with MIN_N_ITEMS in it. 5871 */ 5872 5873 #define MIN_N_ITEMS 8 /* 8 void *'s == 32 bytes */ 5874 5875 int 5876 ddi_soft_state_init(void **state_p, size_t size, size_t n_items) 5877 { 5878 i_ddi_soft_state *ss; 5879 5880 if (state_p == NULL || size == 0) 5881 return (EINVAL); 5882 5883 ss = kmem_zalloc(sizeof (*ss), KM_SLEEP); 5884 mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL); 5885 ss->size = size; 5886 5887 if (n_items < MIN_N_ITEMS) 5888 ss->n_items = MIN_N_ITEMS; 5889 else { 5890 int bitlog; 5891 5892 if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items)) 5893 bitlog--; 5894 ss->n_items = 1 << bitlog; 5895 } 5896 5897 ASSERT(ss->n_items >= n_items); 5898 5899 ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP); 5900 5901 *state_p = ss; 5902 return (0); 5903 } 5904 5905 /* 5906 * Allocate a state structure of size 'size' to be associated 5907 * with item 'item'. 5908 * 5909 * In this implementation, the array is extended to 5910 * allow the requested offset, if needed. 5911 */ 5912 int 5913 ddi_soft_state_zalloc(void *state, int item) 5914 { 5915 i_ddi_soft_state *ss = (i_ddi_soft_state *)state; 5916 void **array; 5917 void *new_element; 5918 5919 if ((state == NULL) || (item < 0)) 5920 return (DDI_FAILURE); 5921 5922 mutex_enter(&ss->lock); 5923 if (ss->size == 0) { 5924 mutex_exit(&ss->lock); 5925 cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s", 5926 mod_containing_pc(caller())); 5927 return (DDI_FAILURE); 5928 } 5929 5930 array = ss->array; /* NULL if ss->n_items == 0 */ 5931 ASSERT(ss->n_items != 0 && array != NULL); 5932 5933 /* 5934 * refuse to tread on an existing element 5935 */ 5936 if (item < ss->n_items && array[item] != NULL) { 5937 mutex_exit(&ss->lock); 5938 return (DDI_FAILURE); 5939 } 5940 5941 /* 5942 * Allocate a new element to plug in 5943 */ 5944 new_element = kmem_zalloc(ss->size, KM_SLEEP); 5945 5946 /* 5947 * Check if the array is big enough, if not, grow it. 5948 */ 5949 if (item >= ss->n_items) { 5950 void **new_array; 5951 size_t new_n_items; 5952 struct i_ddi_soft_state *dirty; 5953 5954 /* 5955 * Allocate a new array of the right length, copy 5956 * all the old pointers to the new array, then 5957 * if it exists at all, put the old array on the 5958 * dirty list. 5959 * 5960 * Note that we can't kmem_free() the old array. 5961 * 5962 * Why -- well the 'get' operation is 'mutex-free', so we 5963 * can't easily catch a suspended thread that is just about 5964 * to dereference the array we just grew out of. So we 5965 * cons up a header and put it on a list of 'dirty' 5966 * pointer arrays. (Dirty in the sense that there may 5967 * be suspended threads somewhere that are in the middle 5968 * of referencing them). Fortunately, we -can- garbage 5969 * collect it all at ddi_soft_state_fini time. 5970 */ 5971 new_n_items = ss->n_items; 5972 while (new_n_items < (1 + item)) 5973 new_n_items <<= 1; /* double array size .. */ 5974 5975 ASSERT(new_n_items >= (1 + item)); /* sanity check! */ 5976 5977 new_array = kmem_zalloc(new_n_items * sizeof (void *), 5978 KM_SLEEP); 5979 /* 5980 * Copy the pointers into the new array 5981 */ 5982 bcopy(array, new_array, ss->n_items * sizeof (void *)); 5983 5984 /* 5985 * Save the old array on the dirty list 5986 */ 5987 dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP); 5988 dirty->array = ss->array; 5989 dirty->n_items = ss->n_items; 5990 dirty->next = ss->next; 5991 ss->next = dirty; 5992 5993 ss->array = (array = new_array); 5994 ss->n_items = new_n_items; 5995 } 5996 5997 ASSERT(array != NULL && item < ss->n_items && array[item] == NULL); 5998 5999 array[item] = new_element; 6000 6001 mutex_exit(&ss->lock); 6002 return (DDI_SUCCESS); 6003 } 6004 6005 /* 6006 * Fetch a pointer to the allocated soft state structure. 6007 * 6008 * This is designed to be cheap. 6009 * 6010 * There's an argument that there should be more checking for 6011 * nil pointers and out of bounds on the array.. but we do a lot 6012 * of that in the alloc/free routines. 6013 * 6014 * An array has the convenience that we don't need to lock read-access 6015 * to it c.f. a linked list. However our "expanding array" strategy 6016 * means that we should hold a readers lock on the i_ddi_soft_state 6017 * structure. 6018 * 6019 * However, from a performance viewpoint, we need to do it without 6020 * any locks at all -- this also makes it a leaf routine. The algorithm 6021 * is 'lock-free' because we only discard the pointer arrays at 6022 * ddi_soft_state_fini() time. 6023 */ 6024 void * 6025 ddi_get_soft_state(void *state, int item) 6026 { 6027 i_ddi_soft_state *ss = (i_ddi_soft_state *)state; 6028 6029 ASSERT((ss != NULL) && (item >= 0)); 6030 6031 if (item < ss->n_items && ss->array != NULL) 6032 return (ss->array[item]); 6033 return (NULL); 6034 } 6035 6036 /* 6037 * Free the state structure corresponding to 'item.' Freeing an 6038 * element that has either gone or was never allocated is not 6039 * considered an error. Note that we free the state structure, but 6040 * we don't shrink our pointer array, or discard 'dirty' arrays, 6041 * since even a few pointers don't really waste too much memory. 6042 * 6043 * Passing an item number that is out of bounds, or a null pointer will 6044 * provoke an error message. 6045 */ 6046 void 6047 ddi_soft_state_free(void *state, int item) 6048 { 6049 i_ddi_soft_state *ss = (i_ddi_soft_state *)state; 6050 void **array; 6051 void *element; 6052 static char msg[] = "ddi_soft_state_free:"; 6053 6054 if (ss == NULL) { 6055 cmn_err(CE_WARN, "%s null handle: %s", 6056 msg, mod_containing_pc(caller())); 6057 return; 6058 } 6059 6060 element = NULL; 6061 6062 mutex_enter(&ss->lock); 6063 6064 if ((array = ss->array) == NULL || ss->size == 0) { 6065 cmn_err(CE_WARN, "%s bad handle: %s", 6066 msg, mod_containing_pc(caller())); 6067 } else if (item < 0 || item >= ss->n_items) { 6068 cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s", 6069 msg, item, ss->n_items - 1, mod_containing_pc(caller())); 6070 } else if (array[item] != NULL) { 6071 element = array[item]; 6072 array[item] = NULL; 6073 } 6074 6075 mutex_exit(&ss->lock); 6076 6077 if (element) 6078 kmem_free(element, ss->size); 6079 } 6080 6081 /* 6082 * Free the entire set of pointers, and any 6083 * soft state structures contained therein. 6084 * 6085 * Note that we don't grab the ss->lock mutex, even though 6086 * we're inspecting the various fields of the data structure. 6087 * 6088 * There is an implicit assumption that this routine will 6089 * never run concurrently with any of the above on this 6090 * particular state structure i.e. by the time the driver 6091 * calls this routine, there should be no other threads 6092 * running in the driver. 6093 */ 6094 void 6095 ddi_soft_state_fini(void **state_p) 6096 { 6097 i_ddi_soft_state *ss, *dirty; 6098 int item; 6099 static char msg[] = "ddi_soft_state_fini:"; 6100 6101 if (state_p == NULL || 6102 (ss = (i_ddi_soft_state *)(*state_p)) == NULL) { 6103 cmn_err(CE_WARN, "%s null handle: %s", 6104 msg, mod_containing_pc(caller())); 6105 return; 6106 } 6107 6108 if (ss->size == 0) { 6109 cmn_err(CE_WARN, "%s bad handle: %s", 6110 msg, mod_containing_pc(caller())); 6111 return; 6112 } 6113 6114 if (ss->n_items > 0) { 6115 for (item = 0; item < ss->n_items; item++) 6116 ddi_soft_state_free(ss, item); 6117 kmem_free(ss->array, ss->n_items * sizeof (void *)); 6118 } 6119 6120 /* 6121 * Now delete any dirty arrays from previous 'grow' operations 6122 */ 6123 for (dirty = ss->next; dirty; dirty = ss->next) { 6124 ss->next = dirty->next; 6125 kmem_free(dirty->array, dirty->n_items * sizeof (void *)); 6126 kmem_free(dirty, sizeof (*dirty)); 6127 } 6128 6129 mutex_destroy(&ss->lock); 6130 kmem_free(ss, sizeof (*ss)); 6131 6132 *state_p = NULL; 6133 } 6134 6135 #define SS_N_ITEMS_PER_HASH 16 6136 #define SS_MIN_HASH_SZ 16 6137 #define SS_MAX_HASH_SZ 4096 6138 6139 int 6140 ddi_soft_state_bystr_init(ddi_soft_state_bystr **state_p, size_t size, 6141 int n_items) 6142 { 6143 i_ddi_soft_state_bystr *sss; 6144 int hash_sz; 6145 6146 ASSERT(state_p && size && n_items); 6147 if ((state_p == NULL) || (size == 0) || (n_items == 0)) 6148 return (EINVAL); 6149 6150 /* current implementation is based on hash, convert n_items to hash */ 6151 hash_sz = n_items / SS_N_ITEMS_PER_HASH; 6152 if (hash_sz < SS_MIN_HASH_SZ) 6153 hash_sz = SS_MIN_HASH_SZ; 6154 else if (hash_sz > SS_MAX_HASH_SZ) 6155 hash_sz = SS_MAX_HASH_SZ; 6156 6157 /* allocate soft_state pool */ 6158 sss = kmem_zalloc(sizeof (*sss), KM_SLEEP); 6159 sss->ss_size = size; 6160 sss->ss_mod_hash = mod_hash_create_strhash("soft_state_bystr", 6161 hash_sz, mod_hash_null_valdtor); 6162 *state_p = (ddi_soft_state_bystr *)sss; 6163 return (0); 6164 } 6165 6166 int 6167 ddi_soft_state_bystr_zalloc(ddi_soft_state_bystr *state, const char *str) 6168 { 6169 i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state; 6170 void *sso; 6171 char *dup_str; 6172 6173 ASSERT(sss && str && sss->ss_mod_hash); 6174 if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL)) 6175 return (DDI_FAILURE); 6176 sso = kmem_zalloc(sss->ss_size, KM_SLEEP); 6177 dup_str = i_ddi_strdup((char *)str, KM_SLEEP); 6178 if (mod_hash_insert(sss->ss_mod_hash, 6179 (mod_hash_key_t)dup_str, (mod_hash_val_t)sso) == 0) 6180 return (DDI_SUCCESS); 6181 6182 /* 6183 * The only error from an strhash insert is caused by a duplicate key. 6184 * We refuse to tread on an existing elements, so free and fail. 6185 */ 6186 kmem_free(dup_str, strlen(dup_str) + 1); 6187 kmem_free(sso, sss->ss_size); 6188 return (DDI_FAILURE); 6189 } 6190 6191 void * 6192 ddi_soft_state_bystr_get(ddi_soft_state_bystr *state, const char *str) 6193 { 6194 i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state; 6195 void *sso; 6196 6197 ASSERT(sss && str && sss->ss_mod_hash); 6198 if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL)) 6199 return (NULL); 6200 6201 if (mod_hash_find(sss->ss_mod_hash, 6202 (mod_hash_key_t)str, (mod_hash_val_t *)&sso) == 0) 6203 return (sso); 6204 return (NULL); 6205 } 6206 6207 void 6208 ddi_soft_state_bystr_free(ddi_soft_state_bystr *state, const char *str) 6209 { 6210 i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state; 6211 void *sso; 6212 6213 ASSERT(sss && str && sss->ss_mod_hash); 6214 if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL)) 6215 return; 6216 6217 (void) mod_hash_remove(sss->ss_mod_hash, 6218 (mod_hash_key_t)str, (mod_hash_val_t *)&sso); 6219 kmem_free(sso, sss->ss_size); 6220 } 6221 6222 void 6223 ddi_soft_state_bystr_fini(ddi_soft_state_bystr **state_p) 6224 { 6225 i_ddi_soft_state_bystr *sss; 6226 6227 ASSERT(state_p); 6228 if (state_p == NULL) 6229 return; 6230 6231 sss = (i_ddi_soft_state_bystr *)(*state_p); 6232 if (sss == NULL) 6233 return; 6234 6235 ASSERT(sss->ss_mod_hash); 6236 if (sss->ss_mod_hash) { 6237 mod_hash_destroy_strhash(sss->ss_mod_hash); 6238 sss->ss_mod_hash = NULL; 6239 } 6240 6241 kmem_free(sss, sizeof (*sss)); 6242 *state_p = NULL; 6243 } 6244 6245 /* 6246 * The ddi_strid_* routines provide string-to-index management utilities. 6247 */ 6248 /* allocate and initialize an strid set */ 6249 int 6250 ddi_strid_init(ddi_strid **strid_p, int n_items) 6251 { 6252 i_ddi_strid *ss; 6253 int hash_sz; 6254 6255 if (strid_p == NULL) 6256 return (DDI_FAILURE); 6257 6258 /* current implementation is based on hash, convert n_items to hash */ 6259 hash_sz = n_items / SS_N_ITEMS_PER_HASH; 6260 if (hash_sz < SS_MIN_HASH_SZ) 6261 hash_sz = SS_MIN_HASH_SZ; 6262 else if (hash_sz > SS_MAX_HASH_SZ) 6263 hash_sz = SS_MAX_HASH_SZ; 6264 6265 ss = kmem_alloc(sizeof (*ss), KM_SLEEP); 6266 ss->strid_chunksz = n_items; 6267 ss->strid_spacesz = n_items; 6268 ss->strid_space = id_space_create("strid", 1, n_items); 6269 ss->strid_bystr = mod_hash_create_strhash("strid_bystr", hash_sz, 6270 mod_hash_null_valdtor); 6271 ss->strid_byid = mod_hash_create_idhash("strid_byid", hash_sz, 6272 mod_hash_null_valdtor); 6273 *strid_p = (ddi_strid *)ss; 6274 return (DDI_SUCCESS); 6275 } 6276 6277 /* allocate an id mapping within the specified set for str, return id */ 6278 static id_t 6279 i_ddi_strid_alloc(ddi_strid *strid, char *str) 6280 { 6281 i_ddi_strid *ss = (i_ddi_strid *)strid; 6282 id_t id; 6283 char *s; 6284 6285 ASSERT(ss && str); 6286 if ((ss == NULL) || (str == NULL)) 6287 return (0); 6288 6289 /* 6290 * Allocate an id using VM_FIRSTFIT in order to keep allocated id 6291 * range as compressed as possible. This is important to minimize 6292 * the amount of space used when the id is used as a ddi_soft_state 6293 * index by the caller. 6294 * 6295 * If the id list is exhausted, increase the size of the list 6296 * by the chuck size specified in ddi_strid_init and reattempt 6297 * the allocation 6298 */ 6299 if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1) { 6300 id_space_extend(ss->strid_space, ss->strid_spacesz, 6301 ss->strid_spacesz + ss->strid_chunksz); 6302 ss->strid_spacesz += ss->strid_chunksz; 6303 if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1) 6304 return (0); 6305 } 6306 6307 /* 6308 * NOTE: since we create and destroy in unison we can save space by 6309 * using bystr key as the byid value. This means destroy must occur 6310 * in (byid, bystr) order. 6311 */ 6312 s = i_ddi_strdup(str, KM_SLEEP); 6313 if (mod_hash_insert(ss->strid_bystr, (mod_hash_key_t)s, 6314 (mod_hash_val_t)(intptr_t)id) != 0) { 6315 ddi_strid_free(strid, id); 6316 return (0); 6317 } 6318 if (mod_hash_insert(ss->strid_byid, (mod_hash_key_t)(intptr_t)id, 6319 (mod_hash_val_t)s) != 0) { 6320 ddi_strid_free(strid, id); 6321 return (0); 6322 } 6323 6324 /* NOTE: s if freed on mod_hash_destroy by mod_hash_strval_dtor */ 6325 return (id); 6326 } 6327 6328 /* allocate an id mapping within the specified set for str, return id */ 6329 id_t 6330 ddi_strid_alloc(ddi_strid *strid, char *str) 6331 { 6332 return (i_ddi_strid_alloc(strid, str)); 6333 } 6334 6335 /* return the id within the specified strid given the str */ 6336 id_t 6337 ddi_strid_str2id(ddi_strid *strid, char *str) 6338 { 6339 i_ddi_strid *ss = (i_ddi_strid *)strid; 6340 id_t id = 0; 6341 mod_hash_val_t hv; 6342 6343 ASSERT(ss && str); 6344 if (ss && str && (mod_hash_find(ss->strid_bystr, 6345 (mod_hash_key_t)str, &hv) == 0)) 6346 id = (int)(intptr_t)hv; 6347 return (id); 6348 } 6349 6350 /* return str within the specified strid given the id */ 6351 char * 6352 ddi_strid_id2str(ddi_strid *strid, id_t id) 6353 { 6354 i_ddi_strid *ss = (i_ddi_strid *)strid; 6355 char *str = NULL; 6356 mod_hash_val_t hv; 6357 6358 ASSERT(ss && id > 0); 6359 if (ss && (id > 0) && (mod_hash_find(ss->strid_byid, 6360 (mod_hash_key_t)(uintptr_t)id, &hv) == 0)) 6361 str = (char *)hv; 6362 return (str); 6363 } 6364 6365 /* free the id mapping within the specified strid */ 6366 void 6367 ddi_strid_free(ddi_strid *strid, id_t id) 6368 { 6369 i_ddi_strid *ss = (i_ddi_strid *)strid; 6370 char *str; 6371 6372 ASSERT(ss && id > 0); 6373 if ((ss == NULL) || (id <= 0)) 6374 return; 6375 6376 /* bystr key is byid value: destroy order must be (byid, bystr) */ 6377 str = ddi_strid_id2str(strid, id); 6378 (void) mod_hash_destroy(ss->strid_byid, (mod_hash_key_t)(uintptr_t)id); 6379 id_free(ss->strid_space, id); 6380 6381 if (str) 6382 (void) mod_hash_destroy(ss->strid_bystr, (mod_hash_key_t)str); 6383 } 6384 6385 /* destroy the strid set */ 6386 void 6387 ddi_strid_fini(ddi_strid **strid_p) 6388 { 6389 i_ddi_strid *ss; 6390 6391 ASSERT(strid_p); 6392 if (strid_p == NULL) 6393 return; 6394 6395 ss = (i_ddi_strid *)(*strid_p); 6396 if (ss == NULL) 6397 return; 6398 6399 /* bystr key is byid value: destroy order must be (byid, bystr) */ 6400 if (ss->strid_byid) 6401 mod_hash_destroy_hash(ss->strid_byid); 6402 if (ss->strid_byid) 6403 mod_hash_destroy_hash(ss->strid_bystr); 6404 if (ss->strid_space) 6405 id_space_destroy(ss->strid_space); 6406 kmem_free(ss, sizeof (*ss)); 6407 *strid_p = NULL; 6408 } 6409 6410 /* 6411 * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'. 6412 * Storage is double buffered to prevent updates during devi_addr use - 6413 * double buffering is adaquate for reliable ddi_deviname() consumption. 6414 * The double buffer is not freed until dev_info structure destruction 6415 * (by i_ddi_free_node). 6416 */ 6417 void 6418 ddi_set_name_addr(dev_info_t *dip, char *name) 6419 { 6420 char *buf = DEVI(dip)->devi_addr_buf; 6421 char *newaddr; 6422 6423 if (buf == NULL) { 6424 buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP); 6425 DEVI(dip)->devi_addr_buf = buf; 6426 } 6427 6428 if (name) { 6429 ASSERT(strlen(name) < MAXNAMELEN); 6430 newaddr = (DEVI(dip)->devi_addr == buf) ? 6431 (buf + MAXNAMELEN) : buf; 6432 (void) strlcpy(newaddr, name, MAXNAMELEN); 6433 } else 6434 newaddr = NULL; 6435 6436 DEVI(dip)->devi_addr = newaddr; 6437 } 6438 6439 char * 6440 ddi_get_name_addr(dev_info_t *dip) 6441 { 6442 return (DEVI(dip)->devi_addr); 6443 } 6444 6445 void 6446 ddi_set_parent_data(dev_info_t *dip, void *pd) 6447 { 6448 DEVI(dip)->devi_parent_data = pd; 6449 } 6450 6451 void * 6452 ddi_get_parent_data(dev_info_t *dip) 6453 { 6454 return (DEVI(dip)->devi_parent_data); 6455 } 6456 6457 /* 6458 * ddi_name_to_major: returns the major number of a named module, 6459 * derived from the current driver alias binding. 6460 * 6461 * Caveat: drivers should avoid the use of this function, in particular 6462 * together with ddi_get_name/ddi_binding name, as per 6463 * major = ddi_name_to_major(ddi_get_name(devi)); 6464 * ddi_name_to_major() relies on the state of the device/alias binding, 6465 * which can and does change dynamically as aliases are administered 6466 * over time. An attached device instance cannot rely on the major 6467 * number returned by ddi_name_to_major() to match its own major number. 6468 * 6469 * For driver use, ddi_driver_major() reliably returns the major number 6470 * for the module to which the device was bound at attach time over 6471 * the life of the instance. 6472 * major = ddi_driver_major(dev_info_t *) 6473 */ 6474 major_t 6475 ddi_name_to_major(char *name) 6476 { 6477 return (mod_name_to_major(name)); 6478 } 6479 6480 /* 6481 * ddi_major_to_name: Returns the module name bound to a major number. 6482 */ 6483 char * 6484 ddi_major_to_name(major_t major) 6485 { 6486 return (mod_major_to_name(major)); 6487 } 6488 6489 /* 6490 * Return the name of the devinfo node pointed at by 'dip' in the buffer 6491 * pointed at by 'name.' A devinfo node is named as a result of calling 6492 * ddi_initchild(). 6493 * 6494 * Note: the driver must be held before calling this function! 6495 */ 6496 char * 6497 ddi_deviname(dev_info_t *dip, char *name) 6498 { 6499 char *addrname; 6500 char none = '\0'; 6501 6502 if (dip == ddi_root_node()) { 6503 *name = '\0'; 6504 return (name); 6505 } 6506 6507 if (i_ddi_node_state(dip) < DS_BOUND) { 6508 addrname = &none; 6509 } else { 6510 /* 6511 * Use ddi_get_name_addr() without checking state so we get 6512 * a unit-address if we are called after ddi_set_name_addr() 6513 * by nexus DDI_CTL_INITCHILD code, but before completing 6514 * node promotion to DS_INITIALIZED. We currently have 6515 * two situations where we are called in this state: 6516 * o For framework processing of a path-oriented alias. 6517 * o If a SCSA nexus driver calls ddi_devid_register() 6518 * from it's tran_tgt_init(9E) implementation. 6519 */ 6520 addrname = ddi_get_name_addr(dip); 6521 if (addrname == NULL) 6522 addrname = &none; 6523 } 6524 6525 if (*addrname == '\0') { 6526 (void) sprintf(name, "/%s", ddi_node_name(dip)); 6527 } else { 6528 (void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname); 6529 } 6530 6531 return (name); 6532 } 6533 6534 /* 6535 * Spits out the name of device node, typically name@addr, for a given node, 6536 * using the driver name, not the nodename. 6537 * 6538 * Used by match_parent. Not to be used elsewhere. 6539 */ 6540 char * 6541 i_ddi_parname(dev_info_t *dip, char *name) 6542 { 6543 char *addrname; 6544 6545 if (dip == ddi_root_node()) { 6546 *name = '\0'; 6547 return (name); 6548 } 6549 6550 ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED); 6551 6552 if (*(addrname = ddi_get_name_addr(dip)) == '\0') 6553 (void) sprintf(name, "%s", ddi_binding_name(dip)); 6554 else 6555 (void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname); 6556 return (name); 6557 } 6558 6559 static char * 6560 pathname_work(dev_info_t *dip, char *path) 6561 { 6562 char *bp; 6563 6564 if (dip == ddi_root_node()) { 6565 *path = '\0'; 6566 return (path); 6567 } 6568 (void) pathname_work(ddi_get_parent(dip), path); 6569 bp = path + strlen(path); 6570 (void) ddi_deviname(dip, bp); 6571 return (path); 6572 } 6573 6574 char * 6575 ddi_pathname(dev_info_t *dip, char *path) 6576 { 6577 return (pathname_work(dip, path)); 6578 } 6579 6580 char * 6581 ddi_pathname_minor(struct ddi_minor_data *dmdp, char *path) 6582 { 6583 if (dmdp->dip == NULL) 6584 *path = '\0'; 6585 else { 6586 (void) ddi_pathname(dmdp->dip, path); 6587 if (dmdp->ddm_name) { 6588 (void) strcat(path, ":"); 6589 (void) strcat(path, dmdp->ddm_name); 6590 } 6591 } 6592 return (path); 6593 } 6594 6595 static char * 6596 pathname_work_obp(dev_info_t *dip, char *path) 6597 { 6598 char *bp; 6599 char *obp_path; 6600 6601 /* 6602 * look up the "obp-path" property, return the path if it exists 6603 */ 6604 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 6605 "obp-path", &obp_path) == DDI_PROP_SUCCESS) { 6606 (void) strcpy(path, obp_path); 6607 ddi_prop_free(obp_path); 6608 return (path); 6609 } 6610 6611 /* 6612 * stop at root, no obp path 6613 */ 6614 if (dip == ddi_root_node()) { 6615 return (NULL); 6616 } 6617 6618 obp_path = pathname_work_obp(ddi_get_parent(dip), path); 6619 if (obp_path == NULL) 6620 return (NULL); 6621 6622 /* 6623 * append our component to parent's obp path 6624 */ 6625 bp = path + strlen(path); 6626 if (*(bp - 1) != '/') 6627 (void) strcat(bp++, "/"); 6628 (void) ddi_deviname(dip, bp); 6629 return (path); 6630 } 6631 6632 /* 6633 * return the 'obp-path' based path for the given node, or NULL if the node 6634 * does not have a different obp path. NOTE: Unlike ddi_pathname, this 6635 * function can't be called from interrupt context (since we need to 6636 * lookup a string property). 6637 */ 6638 char * 6639 ddi_pathname_obp(dev_info_t *dip, char *path) 6640 { 6641 ASSERT(!servicing_interrupt()); 6642 if (dip == NULL || path == NULL) 6643 return (NULL); 6644 6645 /* split work into a separate function to aid debugging */ 6646 return (pathname_work_obp(dip, path)); 6647 } 6648 6649 int 6650 ddi_pathname_obp_set(dev_info_t *dip, char *component) 6651 { 6652 dev_info_t *pdip; 6653 char *obp_path = NULL; 6654 int rc = DDI_FAILURE; 6655 6656 if (dip == NULL) 6657 return (DDI_FAILURE); 6658 6659 obp_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 6660 6661 pdip = ddi_get_parent(dip); 6662 6663 if (ddi_pathname_obp(pdip, obp_path) == NULL) { 6664 (void) ddi_pathname(pdip, obp_path); 6665 } 6666 6667 if (component) { 6668 (void) strncat(obp_path, "/", MAXPATHLEN); 6669 (void) strncat(obp_path, component, MAXPATHLEN); 6670 } 6671 rc = ndi_prop_update_string(DDI_DEV_T_NONE, dip, "obp-path", 6672 obp_path); 6673 6674 if (obp_path) 6675 kmem_free(obp_path, MAXPATHLEN); 6676 6677 return (rc); 6678 } 6679 6680 /* 6681 * Given a dev_t, return the pathname of the corresponding device in the 6682 * buffer pointed at by "path." The buffer is assumed to be large enough 6683 * to hold the pathname of the device (MAXPATHLEN). 6684 * 6685 * The pathname of a device is the pathname of the devinfo node to which 6686 * the device "belongs," concatenated with the character ':' and the name 6687 * of the minor node corresponding to the dev_t. If spec_type is 0 then 6688 * just the pathname of the devinfo node is returned without driving attach 6689 * of that node. For a non-zero spec_type, an attach is performed and a 6690 * search of the minor list occurs. 6691 * 6692 * It is possible that the path associated with the dev_t is not 6693 * currently available in the devinfo tree. In order to have a 6694 * dev_t, a device must have been discovered before, which means 6695 * that the path is always in the instance tree. The one exception 6696 * to this is if the dev_t is associated with a pseudo driver, in 6697 * which case the device must exist on the pseudo branch of the 6698 * devinfo tree as a result of parsing .conf files. 6699 */ 6700 int 6701 ddi_dev_pathname(dev_t devt, int spec_type, char *path) 6702 { 6703 int circ; 6704 major_t major = getmajor(devt); 6705 int instance; 6706 dev_info_t *dip; 6707 char *minorname; 6708 char *drvname; 6709 6710 if (major >= devcnt) 6711 goto fail; 6712 if (major == clone_major) { 6713 /* clone has no minor nodes, manufacture the path here */ 6714 if ((drvname = ddi_major_to_name(getminor(devt))) == NULL) 6715 goto fail; 6716 6717 (void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname); 6718 return (DDI_SUCCESS); 6719 } 6720 6721 /* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */ 6722 if ((instance = dev_to_instance(devt)) == -1) 6723 goto fail; 6724 6725 /* reconstruct the path given the major/instance */ 6726 if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS) 6727 goto fail; 6728 6729 /* if spec_type given we must drive attach and search minor nodes */ 6730 if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) { 6731 /* attach the path so we can search minors */ 6732 if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL) 6733 goto fail; 6734 6735 /* Add minorname to path. */ 6736 ndi_devi_enter(dip, &circ); 6737 minorname = i_ddi_devtspectype_to_minorname(dip, 6738 devt, spec_type); 6739 if (minorname) { 6740 (void) strcat(path, ":"); 6741 (void) strcat(path, minorname); 6742 } 6743 ndi_devi_exit(dip, circ); 6744 ddi_release_devi(dip); 6745 if (minorname == NULL) 6746 goto fail; 6747 } 6748 ASSERT(strlen(path) < MAXPATHLEN); 6749 return (DDI_SUCCESS); 6750 6751 fail: *path = 0; 6752 return (DDI_FAILURE); 6753 } 6754 6755 /* 6756 * Given a major number and an instance, return the path. 6757 * This interface does NOT drive attach. 6758 */ 6759 int 6760 e_ddi_majorinstance_to_path(major_t major, int instance, char *path) 6761 { 6762 struct devnames *dnp; 6763 dev_info_t *dip; 6764 6765 if ((major >= devcnt) || (instance == -1)) { 6766 *path = 0; 6767 return (DDI_FAILURE); 6768 } 6769 6770 /* look for the major/instance in the instance tree */ 6771 if (e_ddi_instance_majorinstance_to_path(major, instance, 6772 path) == DDI_SUCCESS) { 6773 ASSERT(strlen(path) < MAXPATHLEN); 6774 return (DDI_SUCCESS); 6775 } 6776 6777 /* 6778 * Not in instance tree, find the instance on the per driver list and 6779 * construct path to instance via ddi_pathname(). This is how paths 6780 * down the 'pseudo' branch are constructed. 6781 */ 6782 dnp = &(devnamesp[major]); 6783 LOCK_DEV_OPS(&(dnp->dn_lock)); 6784 for (dip = dnp->dn_head; dip; 6785 dip = (dev_info_t *)DEVI(dip)->devi_next) { 6786 /* Skip if instance does not match. */ 6787 if (DEVI(dip)->devi_instance != instance) 6788 continue; 6789 6790 /* 6791 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND 6792 * node demotion, so it is not an effective way of ensuring 6793 * that the ddi_pathname result has a unit-address. Instead, 6794 * we reverify the node state after calling ddi_pathname(). 6795 */ 6796 if (i_ddi_node_state(dip) >= DS_INITIALIZED) { 6797 (void) ddi_pathname(dip, path); 6798 if (i_ddi_node_state(dip) < DS_INITIALIZED) 6799 continue; 6800 UNLOCK_DEV_OPS(&(dnp->dn_lock)); 6801 ASSERT(strlen(path) < MAXPATHLEN); 6802 return (DDI_SUCCESS); 6803 } 6804 } 6805 UNLOCK_DEV_OPS(&(dnp->dn_lock)); 6806 6807 /* can't reconstruct the path */ 6808 *path = 0; 6809 return (DDI_FAILURE); 6810 } 6811 6812 #define GLD_DRIVER_PPA "SUNW,gld_v0_ppa" 6813 6814 /* 6815 * Given the dip for a network interface return the ppa for that interface. 6816 * 6817 * In all cases except GLD v0 drivers, the ppa == instance. 6818 * In the case of GLD v0 drivers, the ppa is equal to the attach order. 6819 * So for these drivers when the attach routine calls gld_register(), 6820 * the GLD framework creates an integer property called "gld_driver_ppa" 6821 * that can be queried here. 6822 * 6823 * The only time this function is used is when a system is booting over nfs. 6824 * In this case the system has to resolve the pathname of the boot device 6825 * to it's ppa. 6826 */ 6827 int 6828 i_ddi_devi_get_ppa(dev_info_t *dip) 6829 { 6830 return (ddi_prop_get_int(DDI_DEV_T_ANY, dip, 6831 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 6832 GLD_DRIVER_PPA, ddi_get_instance(dip))); 6833 } 6834 6835 /* 6836 * i_ddi_devi_set_ppa() should only be called from gld_register() 6837 * and only for GLD v0 drivers 6838 */ 6839 void 6840 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa) 6841 { 6842 (void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa); 6843 } 6844 6845 6846 /* 6847 * Private DDI Console bell functions. 6848 */ 6849 void 6850 ddi_ring_console_bell(clock_t duration) 6851 { 6852 if (ddi_console_bell_func != NULL) 6853 (*ddi_console_bell_func)(duration); 6854 } 6855 6856 void 6857 ddi_set_console_bell(void (*bellfunc)(clock_t duration)) 6858 { 6859 ddi_console_bell_func = bellfunc; 6860 } 6861 6862 int 6863 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr, 6864 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 6865 { 6866 int (*funcp)() = ddi_dma_allochdl; 6867 ddi_dma_attr_t dma_attr; 6868 struct bus_ops *bop; 6869 6870 if (attr == (ddi_dma_attr_t *)0) 6871 return (DDI_DMA_BADATTR); 6872 6873 dma_attr = *attr; 6874 6875 bop = DEVI(dip)->devi_ops->devo_bus_ops; 6876 if (bop && bop->bus_dma_allochdl) 6877 funcp = bop->bus_dma_allochdl; 6878 6879 return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep)); 6880 } 6881 6882 void 6883 ddi_dma_free_handle(ddi_dma_handle_t *handlep) 6884 { 6885 ddi_dma_handle_t h = *handlep; 6886 (void) ddi_dma_freehdl(HD, HD, h); 6887 } 6888 6889 static uintptr_t dma_mem_list_id = 0; 6890 6891 6892 int 6893 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length, 6894 ddi_device_acc_attr_t *accattrp, uint_t flags, 6895 int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp, 6896 size_t *real_length, ddi_acc_handle_t *handlep) 6897 { 6898 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 6899 dev_info_t *dip = hp->dmai_rdip; 6900 ddi_acc_hdl_t *ap; 6901 ddi_dma_attr_t *attrp = &hp->dmai_attr; 6902 uint_t sleepflag, xfermodes; 6903 int (*fp)(caddr_t); 6904 int rval; 6905 6906 if (waitfp == DDI_DMA_SLEEP) 6907 fp = (int (*)())KM_SLEEP; 6908 else if (waitfp == DDI_DMA_DONTWAIT) 6909 fp = (int (*)())KM_NOSLEEP; 6910 else 6911 fp = waitfp; 6912 *handlep = impl_acc_hdl_alloc(fp, arg); 6913 if (*handlep == NULL) 6914 return (DDI_FAILURE); 6915 6916 /* check if the cache attributes are supported */ 6917 if (i_ddi_check_cache_attr(flags) == B_FALSE) 6918 return (DDI_FAILURE); 6919 6920 /* 6921 * Transfer the meaningful bits to xfermodes. 6922 * Double-check if the 3rd party driver correctly sets the bits. 6923 * If not, set DDI_DMA_STREAMING to keep compatibility. 6924 */ 6925 xfermodes = flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING); 6926 if (xfermodes == 0) { 6927 xfermodes = DDI_DMA_STREAMING; 6928 } 6929 6930 /* 6931 * initialize the common elements of data access handle 6932 */ 6933 ap = impl_acc_hdl_get(*handlep); 6934 ap->ah_vers = VERS_ACCHDL; 6935 ap->ah_dip = dip; 6936 ap->ah_offset = 0; 6937 ap->ah_len = 0; 6938 ap->ah_xfermodes = flags; 6939 ap->ah_acc = *accattrp; 6940 6941 sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0); 6942 if (xfermodes == DDI_DMA_CONSISTENT) { 6943 rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag, 6944 flags, accattrp, kaddrp, NULL, ap); 6945 *real_length = length; 6946 } else { 6947 rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag, 6948 flags, accattrp, kaddrp, real_length, ap); 6949 } 6950 if (rval == DDI_SUCCESS) { 6951 ap->ah_len = (off_t)(*real_length); 6952 ap->ah_addr = *kaddrp; 6953 } else { 6954 impl_acc_hdl_free(*handlep); 6955 *handlep = (ddi_acc_handle_t)NULL; 6956 if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) { 6957 ddi_set_callback(waitfp, arg, &dma_mem_list_id); 6958 } 6959 rval = DDI_FAILURE; 6960 } 6961 return (rval); 6962 } 6963 6964 void 6965 ddi_dma_mem_free(ddi_acc_handle_t *handlep) 6966 { 6967 ddi_acc_hdl_t *ap; 6968 6969 ap = impl_acc_hdl_get(*handlep); 6970 ASSERT(ap); 6971 6972 i_ddi_mem_free((caddr_t)ap->ah_addr, ap); 6973 6974 /* 6975 * free the handle 6976 */ 6977 impl_acc_hdl_free(*handlep); 6978 *handlep = (ddi_acc_handle_t)NULL; 6979 6980 if (dma_mem_list_id != 0) { 6981 ddi_run_callback(&dma_mem_list_id); 6982 } 6983 } 6984 6985 int 6986 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp, 6987 uint_t flags, int (*waitfp)(caddr_t), caddr_t arg, 6988 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 6989 { 6990 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 6991 dev_info_t *dip, *rdip; 6992 struct ddi_dma_req dmareq; 6993 int (*funcp)(); 6994 ddi_dma_cookie_t cookie; 6995 uint_t count; 6996 6997 if (cookiep == NULL) 6998 cookiep = &cookie; 6999 7000 if (ccountp == NULL) 7001 ccountp = &count; 7002 7003 dmareq.dmar_flags = flags; 7004 dmareq.dmar_fp = waitfp; 7005 dmareq.dmar_arg = arg; 7006 dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount; 7007 7008 if (bp->b_flags & B_PAGEIO) { 7009 dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES; 7010 dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages; 7011 dmareq.dmar_object.dmao_obj.pp_obj.pp_offset = 7012 (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET); 7013 } else { 7014 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr; 7015 if (bp->b_flags & B_SHADOW) { 7016 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = 7017 bp->b_shadow; 7018 dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR; 7019 } else { 7020 dmareq.dmar_object.dmao_type = 7021 (bp->b_flags & (B_PHYS | B_REMAPPED)) ? 7022 DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR; 7023 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL; 7024 } 7025 7026 /* 7027 * If the buffer has no proc pointer, or the proc 7028 * struct has the kernel address space, or the buffer has 7029 * been marked B_REMAPPED (meaning that it is now 7030 * mapped into the kernel's address space), then 7031 * the address space is kas (kernel address space). 7032 */ 7033 if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) || 7034 (bp->b_flags & B_REMAPPED)) { 7035 dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0; 7036 } else { 7037 dmareq.dmar_object.dmao_obj.virt_obj.v_as = 7038 bp->b_proc->p_as; 7039 } 7040 } 7041 7042 dip = rdip = hp->dmai_rdip; 7043 if (dip != ddi_root_node()) 7044 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl; 7045 funcp = DEVI(rdip)->devi_bus_dma_bindfunc; 7046 return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp)); 7047 } 7048 7049 int 7050 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as, 7051 caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t), 7052 caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 7053 { 7054 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7055 dev_info_t *dip, *rdip; 7056 struct ddi_dma_req dmareq; 7057 int (*funcp)(); 7058 ddi_dma_cookie_t cookie; 7059 uint_t count; 7060 7061 if (len == (uint_t)0) { 7062 return (DDI_DMA_NOMAPPING); 7063 } 7064 7065 if (cookiep == NULL) 7066 cookiep = &cookie; 7067 7068 if (ccountp == NULL) 7069 ccountp = &count; 7070 7071 dmareq.dmar_flags = flags; 7072 dmareq.dmar_fp = waitfp; 7073 dmareq.dmar_arg = arg; 7074 dmareq.dmar_object.dmao_size = len; 7075 dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR; 7076 dmareq.dmar_object.dmao_obj.virt_obj.v_as = as; 7077 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr; 7078 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL; 7079 7080 dip = rdip = hp->dmai_rdip; 7081 if (dip != ddi_root_node()) 7082 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl; 7083 funcp = DEVI(rdip)->devi_bus_dma_bindfunc; 7084 return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp)); 7085 } 7086 7087 void 7088 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep) 7089 { 7090 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7091 ddi_dma_cookie_t *cp; 7092 7093 if (hp->dmai_curcookie >= hp->dmai_ncookies) { 7094 panic("ddi_dma_nextcookie() called too many times on handle %p", 7095 hp); 7096 } 7097 7098 cp = hp->dmai_cookie; 7099 ASSERT(cp); 7100 7101 cookiep->dmac_notused = cp->dmac_notused; 7102 cookiep->dmac_type = cp->dmac_type; 7103 cookiep->dmac_address = cp->dmac_address; 7104 cookiep->dmac_size = cp->dmac_size; 7105 hp->dmai_cookie++; 7106 hp->dmai_curcookie++; 7107 } 7108 7109 int 7110 ddi_dma_ncookies(ddi_dma_handle_t handle) 7111 { 7112 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7113 7114 return (hp->dmai_ncookies); 7115 } 7116 7117 const ddi_dma_cookie_t * 7118 ddi_dma_cookie_iter(ddi_dma_handle_t handle, const ddi_dma_cookie_t *iter) 7119 { 7120 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7121 const ddi_dma_cookie_t *base, *end; 7122 7123 if (hp->dmai_ncookies == 0) { 7124 return (NULL); 7125 } 7126 7127 base = hp->dmai_cookie - hp->dmai_curcookie; 7128 end = base + hp->dmai_ncookies; 7129 if (iter == NULL) { 7130 return (base); 7131 } 7132 7133 if ((uintptr_t)iter < (uintptr_t)base || 7134 (uintptr_t)iter >= (uintptr_t)end) { 7135 return (NULL); 7136 } 7137 7138 iter++; 7139 if (iter == end) { 7140 return (NULL); 7141 } 7142 7143 return (iter); 7144 } 7145 7146 const ddi_dma_cookie_t * 7147 ddi_dma_cookie_get(ddi_dma_handle_t handle, uint_t index) 7148 { 7149 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7150 const ddi_dma_cookie_t *base; 7151 7152 if (index >= hp->dmai_ncookies) { 7153 return (NULL); 7154 } 7155 7156 base = hp->dmai_cookie - hp->dmai_curcookie; 7157 return (base + index); 7158 } 7159 7160 const ddi_dma_cookie_t * 7161 ddi_dma_cookie_one(ddi_dma_handle_t handle) 7162 { 7163 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7164 const ddi_dma_cookie_t *base; 7165 7166 if (hp->dmai_ncookies != 1) { 7167 panic("ddi_dma_cookie_one() called with improper handle %p", 7168 hp); 7169 } 7170 ASSERT3P(hp->dmai_cookie, !=, NULL); 7171 7172 base = hp->dmai_cookie - hp->dmai_curcookie; 7173 return (base); 7174 } 7175 7176 int 7177 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp) 7178 { 7179 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7180 if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) { 7181 return (DDI_FAILURE); 7182 } else { 7183 *nwinp = hp->dmai_nwin; 7184 return (DDI_SUCCESS); 7185 } 7186 } 7187 7188 int 7189 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp, 7190 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 7191 { 7192 int (*funcp)() = ddi_dma_win; 7193 struct bus_ops *bop; 7194 ddi_dma_cookie_t cookie; 7195 uint_t count; 7196 7197 bop = DEVI(HD)->devi_ops->devo_bus_ops; 7198 if (bop && bop->bus_dma_win) 7199 funcp = bop->bus_dma_win; 7200 7201 if (cookiep == NULL) 7202 cookiep = &cookie; 7203 7204 if (ccountp == NULL) 7205 ccountp = &count; 7206 7207 return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp)); 7208 } 7209 7210 int 7211 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes) 7212 { 7213 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0, 7214 &burstsizes, 0, 0)); 7215 } 7216 7217 int 7218 i_ddi_dma_fault_check(ddi_dma_impl_t *hp) 7219 { 7220 return (hp->dmai_fault); 7221 } 7222 7223 int 7224 ddi_check_dma_handle(ddi_dma_handle_t handle) 7225 { 7226 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7227 int (*check)(ddi_dma_impl_t *); 7228 7229 if ((check = hp->dmai_fault_check) == NULL) 7230 check = i_ddi_dma_fault_check; 7231 7232 return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE); 7233 } 7234 7235 void 7236 i_ddi_dma_set_fault(ddi_dma_handle_t handle) 7237 { 7238 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7239 void (*notify)(ddi_dma_impl_t *); 7240 7241 if (!hp->dmai_fault) { 7242 hp->dmai_fault = 1; 7243 if ((notify = hp->dmai_fault_notify) != NULL) 7244 (*notify)(hp); 7245 } 7246 } 7247 7248 void 7249 i_ddi_dma_clr_fault(ddi_dma_handle_t handle) 7250 { 7251 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7252 void (*notify)(ddi_dma_impl_t *); 7253 7254 if (hp->dmai_fault) { 7255 hp->dmai_fault = 0; 7256 if ((notify = hp->dmai_fault_notify) != NULL) 7257 (*notify)(hp); 7258 } 7259 } 7260 7261 /* 7262 * register mapping routines. 7263 */ 7264 int 7265 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp, 7266 offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp, 7267 ddi_acc_handle_t *handle) 7268 { 7269 ddi_map_req_t mr; 7270 ddi_acc_hdl_t *hp; 7271 int result; 7272 7273 /* 7274 * Allocate and initialize the common elements of data access handle. 7275 */ 7276 *handle = impl_acc_hdl_alloc(KM_SLEEP, NULL); 7277 hp = impl_acc_hdl_get(*handle); 7278 hp->ah_vers = VERS_ACCHDL; 7279 hp->ah_dip = dip; 7280 hp->ah_rnumber = rnumber; 7281 hp->ah_offset = offset; 7282 hp->ah_len = len; 7283 hp->ah_acc = *accattrp; 7284 7285 /* 7286 * Set up the mapping request and call to parent. 7287 */ 7288 mr.map_op = DDI_MO_MAP_LOCKED; 7289 mr.map_type = DDI_MT_RNUMBER; 7290 mr.map_obj.rnumber = rnumber; 7291 mr.map_prot = PROT_READ | PROT_WRITE; 7292 mr.map_flags = DDI_MF_KERNEL_MAPPING; 7293 mr.map_handlep = hp; 7294 mr.map_vers = DDI_MAP_VERSION; 7295 result = ddi_map(dip, &mr, offset, len, addrp); 7296 7297 /* 7298 * check for end result 7299 */ 7300 if (result != DDI_SUCCESS) { 7301 impl_acc_hdl_free(*handle); 7302 *handle = (ddi_acc_handle_t)NULL; 7303 } else { 7304 hp->ah_addr = *addrp; 7305 } 7306 7307 return (result); 7308 } 7309 7310 void 7311 ddi_regs_map_free(ddi_acc_handle_t *handlep) 7312 { 7313 ddi_map_req_t mr; 7314 ddi_acc_hdl_t *hp; 7315 7316 hp = impl_acc_hdl_get(*handlep); 7317 ASSERT(hp); 7318 7319 mr.map_op = DDI_MO_UNMAP; 7320 mr.map_type = DDI_MT_RNUMBER; 7321 mr.map_obj.rnumber = hp->ah_rnumber; 7322 mr.map_prot = PROT_READ | PROT_WRITE; 7323 mr.map_flags = DDI_MF_KERNEL_MAPPING; 7324 mr.map_handlep = hp; 7325 mr.map_vers = DDI_MAP_VERSION; 7326 7327 /* 7328 * Call my parent to unmap my regs. 7329 */ 7330 (void) ddi_map(hp->ah_dip, &mr, hp->ah_offset, 7331 hp->ah_len, &hp->ah_addr); 7332 /* 7333 * free the handle 7334 */ 7335 impl_acc_hdl_free(*handlep); 7336 *handlep = (ddi_acc_handle_t)NULL; 7337 } 7338 7339 int 7340 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount, 7341 ssize_t dev_advcnt, uint_t dev_datasz) 7342 { 7343 uint8_t *b; 7344 uint16_t *w; 7345 uint32_t *l; 7346 uint64_t *ll; 7347 7348 /* check for total byte count is multiple of data transfer size */ 7349 if (bytecount != ((bytecount / dev_datasz) * dev_datasz)) 7350 return (DDI_FAILURE); 7351 7352 switch (dev_datasz) { 7353 case DDI_DATA_SZ01_ACC: 7354 for (b = (uint8_t *)dev_addr; 7355 bytecount != 0; bytecount -= 1, b += dev_advcnt) 7356 ddi_put8(handle, b, 0); 7357 break; 7358 case DDI_DATA_SZ02_ACC: 7359 for (w = (uint16_t *)dev_addr; 7360 bytecount != 0; bytecount -= 2, w += dev_advcnt) 7361 ddi_put16(handle, w, 0); 7362 break; 7363 case DDI_DATA_SZ04_ACC: 7364 for (l = (uint32_t *)dev_addr; 7365 bytecount != 0; bytecount -= 4, l += dev_advcnt) 7366 ddi_put32(handle, l, 0); 7367 break; 7368 case DDI_DATA_SZ08_ACC: 7369 for (ll = (uint64_t *)dev_addr; 7370 bytecount != 0; bytecount -= 8, ll += dev_advcnt) 7371 ddi_put64(handle, ll, 0x0ll); 7372 break; 7373 default: 7374 return (DDI_FAILURE); 7375 } 7376 return (DDI_SUCCESS); 7377 } 7378 7379 int 7380 ddi_device_copy( 7381 ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt, 7382 ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt, 7383 size_t bytecount, uint_t dev_datasz) 7384 { 7385 uint8_t *b_src, *b_dst; 7386 uint16_t *w_src, *w_dst; 7387 uint32_t *l_src, *l_dst; 7388 uint64_t *ll_src, *ll_dst; 7389 7390 /* check for total byte count is multiple of data transfer size */ 7391 if (bytecount != ((bytecount / dev_datasz) * dev_datasz)) 7392 return (DDI_FAILURE); 7393 7394 switch (dev_datasz) { 7395 case DDI_DATA_SZ01_ACC: 7396 b_src = (uint8_t *)src_addr; 7397 b_dst = (uint8_t *)dest_addr; 7398 7399 for (; bytecount != 0; bytecount -= 1) { 7400 ddi_put8(dest_handle, b_dst, 7401 ddi_get8(src_handle, b_src)); 7402 b_dst += dest_advcnt; 7403 b_src += src_advcnt; 7404 } 7405 break; 7406 case DDI_DATA_SZ02_ACC: 7407 w_src = (uint16_t *)src_addr; 7408 w_dst = (uint16_t *)dest_addr; 7409 7410 for (; bytecount != 0; bytecount -= 2) { 7411 ddi_put16(dest_handle, w_dst, 7412 ddi_get16(src_handle, w_src)); 7413 w_dst += dest_advcnt; 7414 w_src += src_advcnt; 7415 } 7416 break; 7417 case DDI_DATA_SZ04_ACC: 7418 l_src = (uint32_t *)src_addr; 7419 l_dst = (uint32_t *)dest_addr; 7420 7421 for (; bytecount != 0; bytecount -= 4) { 7422 ddi_put32(dest_handle, l_dst, 7423 ddi_get32(src_handle, l_src)); 7424 l_dst += dest_advcnt; 7425 l_src += src_advcnt; 7426 } 7427 break; 7428 case DDI_DATA_SZ08_ACC: 7429 ll_src = (uint64_t *)src_addr; 7430 ll_dst = (uint64_t *)dest_addr; 7431 7432 for (; bytecount != 0; bytecount -= 8) { 7433 ddi_put64(dest_handle, ll_dst, 7434 ddi_get64(src_handle, ll_src)); 7435 ll_dst += dest_advcnt; 7436 ll_src += src_advcnt; 7437 } 7438 break; 7439 default: 7440 return (DDI_FAILURE); 7441 } 7442 return (DDI_SUCCESS); 7443 } 7444 7445 #define swap16(value) \ 7446 ((((value) & 0xff) << 8) | ((value) >> 8)) 7447 7448 #define swap32(value) \ 7449 (((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \ 7450 (uint32_t)swap16((uint16_t)((value) >> 16))) 7451 7452 #define swap64(value) \ 7453 (((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \ 7454 << 32) | \ 7455 (uint64_t)swap32((uint32_t)((value) >> 32))) 7456 7457 uint16_t 7458 ddi_swap16(uint16_t value) 7459 { 7460 return (swap16(value)); 7461 } 7462 7463 uint32_t 7464 ddi_swap32(uint32_t value) 7465 { 7466 return (swap32(value)); 7467 } 7468 7469 uint64_t 7470 ddi_swap64(uint64_t value) 7471 { 7472 return (swap64(value)); 7473 } 7474 7475 /* 7476 * Convert a binding name to a driver name. 7477 * A binding name is the name used to determine the driver for a 7478 * device - it may be either an alias for the driver or the name 7479 * of the driver itself. 7480 */ 7481 char * 7482 i_binding_to_drv_name(char *bname) 7483 { 7484 major_t major_no; 7485 7486 ASSERT(bname != NULL); 7487 7488 if ((major_no = ddi_name_to_major(bname)) == -1) 7489 return (NULL); 7490 return (ddi_major_to_name(major_no)); 7491 } 7492 7493 /* 7494 * Search for minor name that has specified dev_t and spec_type. 7495 * If spec_type is zero then any dev_t match works. Since we 7496 * are returning a pointer to the minor name string, we require the 7497 * caller to do the locking. 7498 */ 7499 char * 7500 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type) 7501 { 7502 struct ddi_minor_data *dmdp; 7503 7504 /* 7505 * The did layered driver currently intentionally returns a 7506 * devinfo ptr for an underlying sd instance based on a did 7507 * dev_t. In this case it is not an error. 7508 * 7509 * The did layered driver is associated with Sun Cluster. 7510 */ 7511 ASSERT((ddi_driver_major(dip) == getmajor(dev)) || 7512 (strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0)); 7513 7514 ASSERT(DEVI_BUSY_OWNED(dip)); 7515 for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) { 7516 if (((dmdp->type == DDM_MINOR) || 7517 (dmdp->type == DDM_INTERNAL_PATH) || 7518 (dmdp->type == DDM_DEFAULT)) && 7519 (dmdp->ddm_dev == dev) && 7520 ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) || 7521 (dmdp->ddm_spec_type == spec_type))) 7522 return (dmdp->ddm_name); 7523 } 7524 7525 return (NULL); 7526 } 7527 7528 /* 7529 * Find the devt and spectype of the specified minor_name. 7530 * Return DDI_FAILURE if minor_name not found. Since we are 7531 * returning everything via arguments we can do the locking. 7532 */ 7533 int 7534 i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name, 7535 dev_t *devtp, int *spectypep) 7536 { 7537 int circ; 7538 struct ddi_minor_data *dmdp; 7539 7540 /* deal with clone minor nodes */ 7541 if (dip == clone_dip) { 7542 major_t major; 7543 /* 7544 * Make sure minor_name is a STREAMS driver. 7545 * We load the driver but don't attach to any instances. 7546 */ 7547 7548 major = ddi_name_to_major(minor_name); 7549 if (major == DDI_MAJOR_T_NONE) 7550 return (DDI_FAILURE); 7551 7552 if (ddi_hold_driver(major) == NULL) 7553 return (DDI_FAILURE); 7554 7555 if (STREAMSTAB(major) == NULL) { 7556 ddi_rele_driver(major); 7557 return (DDI_FAILURE); 7558 } 7559 ddi_rele_driver(major); 7560 7561 if (devtp) 7562 *devtp = makedevice(clone_major, (minor_t)major); 7563 7564 if (spectypep) 7565 *spectypep = S_IFCHR; 7566 7567 return (DDI_SUCCESS); 7568 } 7569 7570 ndi_devi_enter(dip, &circ); 7571 for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) { 7572 if (((dmdp->type != DDM_MINOR) && 7573 (dmdp->type != DDM_INTERNAL_PATH) && 7574 (dmdp->type != DDM_DEFAULT)) || 7575 strcmp(minor_name, dmdp->ddm_name)) 7576 continue; 7577 7578 if (devtp) 7579 *devtp = dmdp->ddm_dev; 7580 7581 if (spectypep) 7582 *spectypep = dmdp->ddm_spec_type; 7583 7584 ndi_devi_exit(dip, circ); 7585 return (DDI_SUCCESS); 7586 } 7587 ndi_devi_exit(dip, circ); 7588 7589 return (DDI_FAILURE); 7590 } 7591 7592 static kmutex_t devid_gen_mutex; 7593 static short devid_gen_number; 7594 7595 #ifdef DEBUG 7596 7597 static int devid_register_corrupt = 0; 7598 static int devid_register_corrupt_major = 0; 7599 static int devid_register_corrupt_hint = 0; 7600 static int devid_register_corrupt_hint_major = 0; 7601 7602 static int devid_lyr_debug = 0; 7603 7604 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs) \ 7605 if (devid_lyr_debug) \ 7606 ddi_debug_devid_devts(msg, ndevs, devs) 7607 7608 #else 7609 7610 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs) 7611 7612 #endif /* DEBUG */ 7613 7614 7615 #ifdef DEBUG 7616 7617 static void 7618 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs) 7619 { 7620 int i; 7621 7622 cmn_err(CE_CONT, "%s:\n", msg); 7623 for (i = 0; i < ndevs; i++) { 7624 cmn_err(CE_CONT, " 0x%lx\n", devs[i]); 7625 } 7626 } 7627 7628 static void 7629 ddi_debug_devid_paths(char *msg, int npaths, char **paths) 7630 { 7631 int i; 7632 7633 cmn_err(CE_CONT, "%s:\n", msg); 7634 for (i = 0; i < npaths; i++) { 7635 cmn_err(CE_CONT, " %s\n", paths[i]); 7636 } 7637 } 7638 7639 static void 7640 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs) 7641 { 7642 int i; 7643 7644 cmn_err(CE_CONT, "dev_ts per path %s\n", path); 7645 for (i = 0; i < ndevs; i++) { 7646 cmn_err(CE_CONT, " 0x%lx\n", devs[i]); 7647 } 7648 } 7649 7650 #endif /* DEBUG */ 7651 7652 /* 7653 * Register device id into DDI framework. 7654 * Must be called when the driver is bound. 7655 */ 7656 static int 7657 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid) 7658 { 7659 impl_devid_t *i_devid = (impl_devid_t *)devid; 7660 size_t driver_len; 7661 const char *driver_name; 7662 char *devid_str; 7663 major_t major; 7664 7665 if ((dip == NULL) || 7666 ((major = ddi_driver_major(dip)) == DDI_MAJOR_T_NONE)) 7667 return (DDI_FAILURE); 7668 7669 /* verify that the devid is valid */ 7670 if (ddi_devid_valid(devid) != DDI_SUCCESS) 7671 return (DDI_FAILURE); 7672 7673 /* Updating driver name hint in devid */ 7674 driver_name = ddi_driver_name(dip); 7675 driver_len = strlen(driver_name); 7676 if (driver_len > DEVID_HINT_SIZE) { 7677 /* Pick up last four characters of driver name */ 7678 driver_name += driver_len - DEVID_HINT_SIZE; 7679 driver_len = DEVID_HINT_SIZE; 7680 } 7681 bzero(i_devid->did_driver, DEVID_HINT_SIZE); 7682 bcopy(driver_name, i_devid->did_driver, driver_len); 7683 7684 #ifdef DEBUG 7685 /* Corrupt the devid for testing. */ 7686 if (devid_register_corrupt) 7687 i_devid->did_id[0] += devid_register_corrupt; 7688 if (devid_register_corrupt_major && 7689 (major == devid_register_corrupt_major)) 7690 i_devid->did_id[0] += 1; 7691 if (devid_register_corrupt_hint) 7692 i_devid->did_driver[0] += devid_register_corrupt_hint; 7693 if (devid_register_corrupt_hint_major && 7694 (major == devid_register_corrupt_hint_major)) 7695 i_devid->did_driver[0] += 1; 7696 #endif /* DEBUG */ 7697 7698 /* encode the devid as a string */ 7699 if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL) 7700 return (DDI_FAILURE); 7701 7702 /* add string as a string property */ 7703 if (ndi_prop_update_string(DDI_DEV_T_NONE, dip, 7704 DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) { 7705 cmn_err(CE_WARN, "%s%d: devid property update failed", 7706 ddi_driver_name(dip), ddi_get_instance(dip)); 7707 ddi_devid_str_free(devid_str); 7708 return (DDI_FAILURE); 7709 } 7710 7711 /* keep pointer to devid string for interrupt context fma code */ 7712 if (DEVI(dip)->devi_devid_str) 7713 ddi_devid_str_free(DEVI(dip)->devi_devid_str); 7714 DEVI(dip)->devi_devid_str = devid_str; 7715 return (DDI_SUCCESS); 7716 } 7717 7718 int 7719 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid) 7720 { 7721 int rval; 7722 7723 rval = i_ddi_devid_register(dip, devid); 7724 if (rval == DDI_SUCCESS) { 7725 /* 7726 * Register devid in devid-to-path cache 7727 */ 7728 if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) { 7729 mutex_enter(&DEVI(dip)->devi_lock); 7730 DEVI(dip)->devi_flags |= DEVI_CACHED_DEVID; 7731 mutex_exit(&DEVI(dip)->devi_lock); 7732 } else if (ddi_get_name_addr(dip)) { 7733 /* 7734 * We only expect cache_register DDI_FAILURE when we 7735 * can't form the full path because of NULL devi_addr. 7736 */ 7737 cmn_err(CE_WARN, "%s%d: failed to cache devid", 7738 ddi_driver_name(dip), ddi_get_instance(dip)); 7739 } 7740 } else { 7741 cmn_err(CE_WARN, "%s%d: failed to register devid", 7742 ddi_driver_name(dip), ddi_get_instance(dip)); 7743 } 7744 return (rval); 7745 } 7746 7747 /* 7748 * Remove (unregister) device id from DDI framework. 7749 * Must be called when device is detached. 7750 */ 7751 static void 7752 i_ddi_devid_unregister(dev_info_t *dip) 7753 { 7754 if (DEVI(dip)->devi_devid_str) { 7755 ddi_devid_str_free(DEVI(dip)->devi_devid_str); 7756 DEVI(dip)->devi_devid_str = NULL; 7757 } 7758 7759 /* remove the devid property */ 7760 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME); 7761 } 7762 7763 void 7764 ddi_devid_unregister(dev_info_t *dip) 7765 { 7766 mutex_enter(&DEVI(dip)->devi_lock); 7767 DEVI(dip)->devi_flags &= ~DEVI_CACHED_DEVID; 7768 mutex_exit(&DEVI(dip)->devi_lock); 7769 e_devid_cache_unregister(dip); 7770 i_ddi_devid_unregister(dip); 7771 } 7772 7773 /* 7774 * Allocate and initialize a device id. 7775 */ 7776 int 7777 ddi_devid_init( 7778 dev_info_t *dip, 7779 ushort_t devid_type, 7780 ushort_t nbytes, 7781 void *id, 7782 ddi_devid_t *ret_devid) 7783 { 7784 impl_devid_t *i_devid; 7785 int sz = sizeof (*i_devid) + nbytes - sizeof (char); 7786 int driver_len; 7787 const char *driver_name; 7788 7789 switch (devid_type) { 7790 case DEVID_SCSI3_WWN: 7791 /*FALLTHRU*/ 7792 case DEVID_SCSI_SERIAL: 7793 /*FALLTHRU*/ 7794 case DEVID_ATA_SERIAL: 7795 /*FALLTHRU*/ 7796 case DEVID_ENCAP: 7797 if (nbytes == 0) 7798 return (DDI_FAILURE); 7799 if (id == NULL) 7800 return (DDI_FAILURE); 7801 break; 7802 case DEVID_FAB: 7803 if (nbytes != 0) 7804 return (DDI_FAILURE); 7805 if (id != NULL) 7806 return (DDI_FAILURE); 7807 nbytes = sizeof (int) + 7808 sizeof (struct timeval32) + sizeof (short); 7809 sz += nbytes; 7810 break; 7811 default: 7812 return (DDI_FAILURE); 7813 } 7814 7815 if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL) 7816 return (DDI_FAILURE); 7817 7818 i_devid->did_magic_hi = DEVID_MAGIC_MSB; 7819 i_devid->did_magic_lo = DEVID_MAGIC_LSB; 7820 i_devid->did_rev_hi = DEVID_REV_MSB; 7821 i_devid->did_rev_lo = DEVID_REV_LSB; 7822 DEVID_FORMTYPE(i_devid, devid_type); 7823 DEVID_FORMLEN(i_devid, nbytes); 7824 7825 /* Fill in driver name hint */ 7826 driver_name = ddi_driver_name(dip); 7827 driver_len = strlen(driver_name); 7828 if (driver_len > DEVID_HINT_SIZE) { 7829 /* Pick up last four characters of driver name */ 7830 driver_name += driver_len - DEVID_HINT_SIZE; 7831 driver_len = DEVID_HINT_SIZE; 7832 } 7833 7834 bcopy(driver_name, i_devid->did_driver, driver_len); 7835 7836 /* Fill in id field */ 7837 if (devid_type == DEVID_FAB) { 7838 char *cp; 7839 uint32_t hostid; 7840 struct timeval32 timestamp32; 7841 int i; 7842 int *ip; 7843 short gen; 7844 7845 /* increase the generation number */ 7846 mutex_enter(&devid_gen_mutex); 7847 gen = devid_gen_number++; 7848 mutex_exit(&devid_gen_mutex); 7849 7850 cp = i_devid->did_id; 7851 7852 /* Fill in host id (big-endian byte ordering) */ 7853 hostid = zone_get_hostid(NULL); 7854 *cp++ = hibyte(hiword(hostid)); 7855 *cp++ = lobyte(hiword(hostid)); 7856 *cp++ = hibyte(loword(hostid)); 7857 *cp++ = lobyte(loword(hostid)); 7858 7859 /* 7860 * Fill in timestamp (big-endian byte ordering) 7861 * 7862 * (Note that the format may have to be changed 7863 * before 2038 comes around, though it's arguably 7864 * unique enough as it is..) 7865 */ 7866 uniqtime32(×tamp32); 7867 ip = (int *)×tamp32; 7868 for (i = 0; 7869 i < sizeof (timestamp32) / sizeof (int); i++, ip++) { 7870 int val; 7871 val = *ip; 7872 *cp++ = hibyte(hiword(val)); 7873 *cp++ = lobyte(hiword(val)); 7874 *cp++ = hibyte(loword(val)); 7875 *cp++ = lobyte(loword(val)); 7876 } 7877 7878 /* fill in the generation number */ 7879 *cp++ = hibyte(gen); 7880 *cp++ = lobyte(gen); 7881 } else 7882 bcopy(id, i_devid->did_id, nbytes); 7883 7884 /* return device id */ 7885 *ret_devid = (ddi_devid_t)i_devid; 7886 return (DDI_SUCCESS); 7887 } 7888 7889 int 7890 ddi_devid_get(dev_info_t *dip, ddi_devid_t *ret_devid) 7891 { 7892 return (i_ddi_devi_get_devid(DDI_DEV_T_ANY, dip, ret_devid)); 7893 } 7894 7895 int 7896 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid) 7897 { 7898 char *devidstr; 7899 7900 ASSERT(dev != DDI_DEV_T_NONE); 7901 7902 /* look up the property, devt specific first */ 7903 if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS, 7904 DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) { 7905 if ((dev == DDI_DEV_T_ANY) || 7906 (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 7907 DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) != 7908 DDI_PROP_SUCCESS)) { 7909 return (DDI_FAILURE); 7910 } 7911 } 7912 7913 /* convert to binary form */ 7914 if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) { 7915 ddi_prop_free(devidstr); 7916 return (DDI_FAILURE); 7917 } 7918 ddi_prop_free(devidstr); 7919 return (DDI_SUCCESS); 7920 } 7921 7922 /* 7923 * Return a copy of the device id for dev_t 7924 */ 7925 int 7926 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid) 7927 { 7928 dev_info_t *dip; 7929 int rval; 7930 7931 /* get the dip */ 7932 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 7933 return (DDI_FAILURE); 7934 7935 rval = i_ddi_devi_get_devid(dev, dip, ret_devid); 7936 7937 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */ 7938 return (rval); 7939 } 7940 7941 /* 7942 * Return a copy of the minor name for dev_t and spec_type 7943 */ 7944 int 7945 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name) 7946 { 7947 char *buf; 7948 int circ; 7949 dev_info_t *dip; 7950 char *nm; 7951 int rval; 7952 7953 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) { 7954 *minor_name = NULL; 7955 return (DDI_FAILURE); 7956 } 7957 7958 /* Find the minor name and copy into max size buf */ 7959 buf = kmem_alloc(MAXNAMELEN, KM_SLEEP); 7960 ndi_devi_enter(dip, &circ); 7961 nm = i_ddi_devtspectype_to_minorname(dip, dev, spec_type); 7962 if (nm) 7963 (void) strcpy(buf, nm); 7964 ndi_devi_exit(dip, circ); 7965 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */ 7966 7967 if (nm) { 7968 /* duplicate into min size buf for return result */ 7969 *minor_name = i_ddi_strdup(buf, KM_SLEEP); 7970 rval = DDI_SUCCESS; 7971 } else { 7972 *minor_name = NULL; 7973 rval = DDI_FAILURE; 7974 } 7975 7976 /* free max size buf and return */ 7977 kmem_free(buf, MAXNAMELEN); 7978 return (rval); 7979 } 7980 7981 int 7982 ddi_lyr_devid_to_devlist( 7983 ddi_devid_t devid, 7984 char *minor_name, 7985 int *retndevs, 7986 dev_t **retdevs) 7987 { 7988 ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS); 7989 7990 if (e_devid_cache_to_devt_list(devid, minor_name, 7991 retndevs, retdevs) == DDI_SUCCESS) { 7992 ASSERT(*retndevs > 0); 7993 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist", 7994 *retndevs, *retdevs); 7995 return (DDI_SUCCESS); 7996 } 7997 7998 if (e_ddi_devid_discovery(devid) == DDI_FAILURE) { 7999 return (DDI_FAILURE); 8000 } 8001 8002 if (e_devid_cache_to_devt_list(devid, minor_name, 8003 retndevs, retdevs) == DDI_SUCCESS) { 8004 ASSERT(*retndevs > 0); 8005 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist", 8006 *retndevs, *retdevs); 8007 return (DDI_SUCCESS); 8008 } 8009 8010 return (DDI_FAILURE); 8011 } 8012 8013 void 8014 ddi_lyr_free_devlist(dev_t *devlist, int ndevs) 8015 { 8016 kmem_free(devlist, sizeof (dev_t) * ndevs); 8017 } 8018 8019 /* 8020 * Note: This will need to be fixed if we ever allow processes to 8021 * have more than one data model per exec. 8022 */ 8023 model_t 8024 ddi_mmap_get_model(void) 8025 { 8026 return (get_udatamodel()); 8027 } 8028 8029 model_t 8030 ddi_model_convert_from(model_t model) 8031 { 8032 return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE); 8033 } 8034 8035 /* 8036 * ddi interfaces managing storage and retrieval of eventcookies. 8037 */ 8038 8039 /* 8040 * Invoke bus nexus driver's implementation of the 8041 * (*bus_remove_eventcall)() interface to remove a registered 8042 * callback handler for "event". 8043 */ 8044 int 8045 ddi_remove_event_handler(ddi_callback_id_t id) 8046 { 8047 ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id; 8048 dev_info_t *ddip; 8049 8050 ASSERT(cb); 8051 if (!cb) { 8052 return (DDI_FAILURE); 8053 } 8054 8055 ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie); 8056 return (ndi_busop_remove_eventcall(ddip, id)); 8057 } 8058 8059 /* 8060 * Invoke bus nexus driver's implementation of the 8061 * (*bus_add_eventcall)() interface to register a callback handler 8062 * for "event". 8063 */ 8064 int 8065 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event, 8066 void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *), 8067 void *arg, ddi_callback_id_t *id) 8068 { 8069 return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id)); 8070 } 8071 8072 8073 /* 8074 * Return a handle for event "name" by calling up the device tree 8075 * hierarchy via (*bus_get_eventcookie)() interface until claimed 8076 * by a bus nexus or top of dev_info tree is reached. 8077 */ 8078 int 8079 ddi_get_eventcookie(dev_info_t *dip, char *name, 8080 ddi_eventcookie_t *event_cookiep) 8081 { 8082 return (ndi_busop_get_eventcookie(dip, dip, 8083 name, event_cookiep)); 8084 } 8085 8086 /* 8087 * This procedure is provided as the general callback function when 8088 * umem_lockmemory calls as_add_callback for long term memory locking. 8089 * When as_unmap, as_setprot, or as_free encounter segments which have 8090 * locked memory, this callback will be invoked. 8091 */ 8092 void 8093 umem_lock_undo(struct as *as, void *arg, uint_t event) 8094 { 8095 _NOTE(ARGUNUSED(as, event)) 8096 struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg; 8097 8098 /* 8099 * Call the cleanup function. Decrement the cookie reference 8100 * count, if it goes to zero, return the memory for the cookie. 8101 * The i_ddi_umem_unlock for this cookie may or may not have been 8102 * called already. It is the responsibility of the caller of 8103 * umem_lockmemory to handle the case of the cleanup routine 8104 * being called after a ddi_umem_unlock for the cookie 8105 * was called. 8106 */ 8107 8108 (*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp); 8109 8110 /* remove the cookie if reference goes to zero */ 8111 if (atomic_dec_ulong_nv((ulong_t *)(&(cp->cook_refcnt))) == 0) { 8112 kmem_free(cp, sizeof (struct ddi_umem_cookie)); 8113 } 8114 } 8115 8116 /* 8117 * The following two Consolidation Private routines provide generic 8118 * interfaces to increase/decrease the amount of device-locked memory. 8119 * 8120 * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory() 8121 * must be called every time i_ddi_incr_locked_memory() is called. 8122 */ 8123 int 8124 /* ARGSUSED */ 8125 i_ddi_incr_locked_memory(proc_t *procp, rctl_qty_t inc) 8126 { 8127 ASSERT(procp != NULL); 8128 mutex_enter(&procp->p_lock); 8129 if (rctl_incr_locked_mem(procp, NULL, inc, 1)) { 8130 mutex_exit(&procp->p_lock); 8131 return (ENOMEM); 8132 } 8133 mutex_exit(&procp->p_lock); 8134 return (0); 8135 } 8136 8137 /* 8138 * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory() 8139 * must be called every time i_ddi_decr_locked_memory() is called. 8140 */ 8141 /* ARGSUSED */ 8142 void 8143 i_ddi_decr_locked_memory(proc_t *procp, rctl_qty_t dec) 8144 { 8145 ASSERT(procp != NULL); 8146 mutex_enter(&procp->p_lock); 8147 rctl_decr_locked_mem(procp, NULL, dec, 1); 8148 mutex_exit(&procp->p_lock); 8149 } 8150 8151 /* 8152 * The cookie->upd_max_lock_rctl flag is used to determine if we should 8153 * charge device locked memory to the max-locked-memory rctl. Tracking 8154 * device locked memory causes the rctl locks to get hot under high-speed 8155 * I/O such as RDSv3 over IB. If there is no max-locked-memory rctl limit, 8156 * we bypass charging the locked memory to the rctl altogether. The cookie's 8157 * flag tells us if the rctl value should be updated when unlocking the memory, 8158 * in case the rctl gets changed after the memory was locked. Any device 8159 * locked memory in that rare case will not be counted toward the rctl limit. 8160 * 8161 * When tracking the locked memory, the kproject_t parameter is always NULL 8162 * in the code paths: 8163 * i_ddi_incr_locked_memory -> rctl_incr_locked_mem 8164 * i_ddi_decr_locked_memory -> rctl_decr_locked_mem 8165 * Thus, we always use the tk_proj member to check the projp setting. 8166 */ 8167 static void 8168 init_lockedmem_rctl_flag(struct ddi_umem_cookie *cookie) 8169 { 8170 proc_t *p; 8171 kproject_t *projp; 8172 zone_t *zonep; 8173 8174 ASSERT(cookie); 8175 p = cookie->procp; 8176 ASSERT(p); 8177 8178 zonep = p->p_zone; 8179 projp = p->p_task->tk_proj; 8180 8181 ASSERT(zonep); 8182 ASSERT(projp); 8183 8184 if (zonep->zone_locked_mem_ctl == UINT64_MAX && 8185 projp->kpj_data.kpd_locked_mem_ctl == UINT64_MAX) 8186 cookie->upd_max_lock_rctl = 0; 8187 else 8188 cookie->upd_max_lock_rctl = 1; 8189 } 8190 8191 /* 8192 * This routine checks if the max-locked-memory resource ctl is 8193 * exceeded, if not increments it, grabs a hold on the project. 8194 * Returns 0 if successful otherwise returns error code 8195 */ 8196 static int 8197 umem_incr_devlockmem(struct ddi_umem_cookie *cookie) 8198 { 8199 proc_t *procp; 8200 int ret; 8201 8202 ASSERT(cookie); 8203 if (cookie->upd_max_lock_rctl == 0) 8204 return (0); 8205 8206 procp = cookie->procp; 8207 ASSERT(procp); 8208 8209 if ((ret = i_ddi_incr_locked_memory(procp, 8210 cookie->size)) != 0) { 8211 return (ret); 8212 } 8213 return (0); 8214 } 8215 8216 /* 8217 * Decrements the max-locked-memory resource ctl and releases 8218 * the hold on the project that was acquired during umem_incr_devlockmem 8219 */ 8220 static void 8221 umem_decr_devlockmem(struct ddi_umem_cookie *cookie) 8222 { 8223 proc_t *proc; 8224 8225 if (cookie->upd_max_lock_rctl == 0) 8226 return; 8227 8228 proc = (proc_t *)cookie->procp; 8229 if (!proc) 8230 return; 8231 8232 i_ddi_decr_locked_memory(proc, cookie->size); 8233 } 8234 8235 /* 8236 * A consolidation private function which is essentially equivalent to 8237 * ddi_umem_lock but with the addition of arguments ops_vector and procp. 8238 * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and 8239 * the ops_vector is valid. 8240 * 8241 * Lock the virtual address range in the current process and create a 8242 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to 8243 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export 8244 * to user space. 8245 * 8246 * Note: The resource control accounting currently uses a full charge model 8247 * in other words attempts to lock the same/overlapping areas of memory 8248 * will deduct the full size of the buffer from the projects running 8249 * counter for the device locked memory. 8250 * 8251 * addr, size should be PAGESIZE aligned 8252 * 8253 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both 8254 * identifies whether the locked memory will be read or written or both 8255 * DDI_UMEMLOCK_LONGTERM must be set when the locking will 8256 * be maintained for an indefinitely long period (essentially permanent), 8257 * rather than for what would be required for a typical I/O completion. 8258 * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT 8259 * if the memory pertains to a regular file which is mapped MAP_SHARED. 8260 * This is to prevent a deadlock if a file truncation is attempted after 8261 * after the locking is done. 8262 * 8263 * Returns 0 on success 8264 * EINVAL - for invalid parameters 8265 * EPERM, ENOMEM and other error codes returned by as_pagelock 8266 * ENOMEM - is returned if the current request to lock memory exceeds 8267 * *.max-locked-memory resource control value. 8268 * EFAULT - memory pertains to a regular file mapped shared and 8269 * and DDI_UMEMLOCK_LONGTERM flag is set 8270 * EAGAIN - could not start the ddi_umem_unlock list processing thread 8271 */ 8272 int 8273 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie, 8274 struct umem_callback_ops *ops_vector, 8275 proc_t *procp) 8276 { 8277 int error; 8278 struct ddi_umem_cookie *p; 8279 void (*driver_callback)() = NULL; 8280 struct as *as; 8281 struct seg *seg; 8282 vnode_t *vp; 8283 8284 /* Allow device drivers to not have to reference "curproc" */ 8285 if (procp == NULL) 8286 procp = curproc; 8287 as = procp->p_as; 8288 *cookie = NULL; /* in case of any error return */ 8289 8290 /* These are the only three valid flags */ 8291 if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE | 8292 DDI_UMEMLOCK_LONGTERM)) != 0) 8293 return (EINVAL); 8294 8295 /* At least one (can be both) of the two access flags must be set */ 8296 if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) 8297 return (EINVAL); 8298 8299 /* addr and len must be page-aligned */ 8300 if (((uintptr_t)addr & PAGEOFFSET) != 0) 8301 return (EINVAL); 8302 8303 if ((len & PAGEOFFSET) != 0) 8304 return (EINVAL); 8305 8306 /* 8307 * For longterm locking a driver callback must be specified; if 8308 * not longterm then a callback is optional. 8309 */ 8310 if (ops_vector != NULL) { 8311 if (ops_vector->cbo_umem_callback_version != 8312 UMEM_CALLBACK_VERSION) 8313 return (EINVAL); 8314 else 8315 driver_callback = ops_vector->cbo_umem_lock_cleanup; 8316 } 8317 if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM)) 8318 return (EINVAL); 8319 8320 /* 8321 * Call i_ddi_umem_unlock_thread_start if necessary. It will 8322 * be called on first ddi_umem_lock or umem_lockmemory call. 8323 */ 8324 if (ddi_umem_unlock_thread == NULL) 8325 i_ddi_umem_unlock_thread_start(); 8326 8327 /* Allocate memory for the cookie */ 8328 p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP); 8329 8330 /* Convert the flags to seg_rw type */ 8331 if (flags & DDI_UMEMLOCK_WRITE) { 8332 p->s_flags = S_WRITE; 8333 } else { 8334 p->s_flags = S_READ; 8335 } 8336 8337 /* Store procp in cookie for later iosetup/unlock */ 8338 p->procp = (void *)procp; 8339 8340 /* 8341 * Store the struct as pointer in cookie for later use by 8342 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock 8343 * is called after relvm is called. 8344 */ 8345 p->asp = as; 8346 8347 /* 8348 * The size field is needed for lockmem accounting. 8349 */ 8350 p->size = len; 8351 init_lockedmem_rctl_flag(p); 8352 8353 if (umem_incr_devlockmem(p) != 0) { 8354 /* 8355 * The requested memory cannot be locked 8356 */ 8357 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8358 *cookie = (ddi_umem_cookie_t)NULL; 8359 return (ENOMEM); 8360 } 8361 8362 /* Lock the pages corresponding to addr, len in memory */ 8363 error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags); 8364 if (error != 0) { 8365 umem_decr_devlockmem(p); 8366 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8367 *cookie = (ddi_umem_cookie_t)NULL; 8368 return (error); 8369 } 8370 8371 /* 8372 * For longterm locking the addr must pertain to a seg_vn segment or 8373 * or a seg_spt segment. 8374 * If the segment pertains to a regular file, it cannot be 8375 * mapped MAP_SHARED. 8376 * This is to prevent a deadlock if a file truncation is attempted 8377 * after the locking is done. 8378 * Doing this after as_pagelock guarantees persistence of the as; if 8379 * an unacceptable segment is found, the cleanup includes calling 8380 * as_pageunlock before returning EFAULT. 8381 * 8382 * segdev is allowed here as it is already locked. This allows 8383 * for memory exported by drivers through mmap() (which is already 8384 * locked) to be allowed for LONGTERM. 8385 */ 8386 if (flags & DDI_UMEMLOCK_LONGTERM) { 8387 extern struct seg_ops segspt_shmops; 8388 extern struct seg_ops segdev_ops; 8389 AS_LOCK_ENTER(as, RW_READER); 8390 for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) { 8391 if (seg == NULL || seg->s_base > addr + len) 8392 break; 8393 if (seg->s_ops == &segdev_ops) 8394 continue; 8395 if (((seg->s_ops != &segvn_ops) && 8396 (seg->s_ops != &segspt_shmops)) || 8397 ((SEGOP_GETVP(seg, addr, &vp) == 0 && 8398 vp != NULL && vp->v_type == VREG) && 8399 (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) { 8400 as_pageunlock(as, p->pparray, 8401 addr, len, p->s_flags); 8402 AS_LOCK_EXIT(as); 8403 umem_decr_devlockmem(p); 8404 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8405 *cookie = (ddi_umem_cookie_t)NULL; 8406 return (EFAULT); 8407 } 8408 } 8409 AS_LOCK_EXIT(as); 8410 } 8411 8412 8413 /* Initialize the fields in the ddi_umem_cookie */ 8414 p->cvaddr = addr; 8415 p->type = UMEM_LOCKED; 8416 if (driver_callback != NULL) { 8417 /* i_ddi_umem_unlock and umem_lock_undo may need the cookie */ 8418 p->cook_refcnt = 2; 8419 p->callbacks = *ops_vector; 8420 } else { 8421 /* only i_ddi_umme_unlock needs the cookie */ 8422 p->cook_refcnt = 1; 8423 } 8424 8425 *cookie = (ddi_umem_cookie_t)p; 8426 8427 /* 8428 * If a driver callback was specified, add an entry to the 8429 * as struct callback list. The as_pagelock above guarantees 8430 * the persistence of as. 8431 */ 8432 if (driver_callback) { 8433 error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT, 8434 addr, len, KM_SLEEP); 8435 if (error != 0) { 8436 as_pageunlock(as, p->pparray, 8437 addr, len, p->s_flags); 8438 umem_decr_devlockmem(p); 8439 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8440 *cookie = (ddi_umem_cookie_t)NULL; 8441 } 8442 } 8443 return (error); 8444 } 8445 8446 /* 8447 * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free 8448 * the cookie. Called from i_ddi_umem_unlock_thread. 8449 */ 8450 8451 static void 8452 i_ddi_umem_unlock(struct ddi_umem_cookie *p) 8453 { 8454 uint_t rc; 8455 8456 /* 8457 * There is no way to determine whether a callback to 8458 * umem_lock_undo was registered via as_add_callback. 8459 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and 8460 * a valid callback function structure.) as_delete_callback 8461 * is called to delete a possible registered callback. If the 8462 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it 8463 * indicates that there was a callback registered, and that is was 8464 * successfully deleted. Thus, the cookie reference count 8465 * will never be decremented by umem_lock_undo. Just return the 8466 * memory for the cookie, since both users of the cookie are done. 8467 * A return of AS_CALLBACK_NOTFOUND indicates a callback was 8468 * never registered. A return of AS_CALLBACK_DELETE_DEFERRED 8469 * indicates that callback processing is taking place and, and 8470 * umem_lock_undo is, or will be, executing, and thus decrementing 8471 * the cookie reference count when it is complete. 8472 * 8473 * This needs to be done before as_pageunlock so that the 8474 * persistence of as is guaranteed because of the locked pages. 8475 * 8476 */ 8477 rc = as_delete_callback(p->asp, p); 8478 8479 8480 /* 8481 * The proc->p_as will be stale if i_ddi_umem_unlock is called 8482 * after relvm is called so use p->asp. 8483 */ 8484 as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags); 8485 8486 /* 8487 * Now that we have unlocked the memory decrement the 8488 * *.max-locked-memory rctl 8489 */ 8490 umem_decr_devlockmem(p); 8491 8492 if (rc == AS_CALLBACK_DELETED) { 8493 /* umem_lock_undo will not happen, return the cookie memory */ 8494 ASSERT(p->cook_refcnt == 2); 8495 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8496 } else { 8497 /* 8498 * umem_undo_lock may happen if as_delete_callback returned 8499 * AS_CALLBACK_DELETE_DEFERRED. In that case, decrement the 8500 * reference count, atomically, and return the cookie 8501 * memory if the reference count goes to zero. The only 8502 * other value for rc is AS_CALLBACK_NOTFOUND. In that 8503 * case, just return the cookie memory. 8504 */ 8505 if ((rc != AS_CALLBACK_DELETE_DEFERRED) || 8506 (atomic_dec_ulong_nv((ulong_t *)(&(p->cook_refcnt))) 8507 == 0)) { 8508 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8509 } 8510 } 8511 } 8512 8513 /* 8514 * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler. 8515 * 8516 * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list 8517 * until it is empty. Then, wait for more to be added. This thread is awoken 8518 * via calls to ddi_umem_unlock. 8519 */ 8520 8521 static void 8522 i_ddi_umem_unlock_thread(void) 8523 { 8524 struct ddi_umem_cookie *ret_cookie; 8525 callb_cpr_t cprinfo; 8526 8527 /* process the ddi_umem_unlock list */ 8528 CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex, 8529 callb_generic_cpr, "unlock_thread"); 8530 for (;;) { 8531 mutex_enter(&ddi_umem_unlock_mutex); 8532 if (ddi_umem_unlock_head != NULL) { /* list not empty */ 8533 ret_cookie = ddi_umem_unlock_head; 8534 /* take if off the list */ 8535 if ((ddi_umem_unlock_head = 8536 ddi_umem_unlock_head->unl_forw) == NULL) { 8537 ddi_umem_unlock_tail = NULL; 8538 } 8539 mutex_exit(&ddi_umem_unlock_mutex); 8540 /* unlock the pages in this cookie */ 8541 (void) i_ddi_umem_unlock(ret_cookie); 8542 } else { /* list is empty, wait for next ddi_umem_unlock */ 8543 CALLB_CPR_SAFE_BEGIN(&cprinfo); 8544 cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex); 8545 CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex); 8546 mutex_exit(&ddi_umem_unlock_mutex); 8547 } 8548 } 8549 /* ddi_umem_unlock_thread does not exit */ 8550 /* NOTREACHED */ 8551 } 8552 8553 /* 8554 * Start the thread that will process the ddi_umem_unlock list if it is 8555 * not already started (i_ddi_umem_unlock_thread). 8556 */ 8557 static void 8558 i_ddi_umem_unlock_thread_start(void) 8559 { 8560 mutex_enter(&ddi_umem_unlock_mutex); 8561 if (ddi_umem_unlock_thread == NULL) { 8562 ddi_umem_unlock_thread = thread_create(NULL, 0, 8563 i_ddi_umem_unlock_thread, NULL, 0, &p0, 8564 TS_RUN, minclsyspri); 8565 } 8566 mutex_exit(&ddi_umem_unlock_mutex); 8567 } 8568 8569 /* 8570 * Lock the virtual address range in the current process and create a 8571 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to 8572 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export 8573 * to user space. 8574 * 8575 * Note: The resource control accounting currently uses a full charge model 8576 * in other words attempts to lock the same/overlapping areas of memory 8577 * will deduct the full size of the buffer from the projects running 8578 * counter for the device locked memory. This applies to umem_lockmemory too. 8579 * 8580 * addr, size should be PAGESIZE aligned 8581 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both 8582 * identifies whether the locked memory will be read or written or both 8583 * 8584 * Returns 0 on success 8585 * EINVAL - for invalid parameters 8586 * EPERM, ENOMEM and other error codes returned by as_pagelock 8587 * ENOMEM - is returned if the current request to lock memory exceeds 8588 * *.max-locked-memory resource control value. 8589 * EAGAIN - could not start the ddi_umem_unlock list processing thread 8590 */ 8591 int 8592 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie) 8593 { 8594 int error; 8595 struct ddi_umem_cookie *p; 8596 8597 *cookie = NULL; /* in case of any error return */ 8598 8599 /* These are the only two valid flags */ 8600 if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) { 8601 return (EINVAL); 8602 } 8603 8604 /* At least one of the two flags (or both) must be set */ 8605 if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) { 8606 return (EINVAL); 8607 } 8608 8609 /* addr and len must be page-aligned */ 8610 if (((uintptr_t)addr & PAGEOFFSET) != 0) { 8611 return (EINVAL); 8612 } 8613 8614 if ((len & PAGEOFFSET) != 0) { 8615 return (EINVAL); 8616 } 8617 8618 /* 8619 * Call i_ddi_umem_unlock_thread_start if necessary. It will 8620 * be called on first ddi_umem_lock or umem_lockmemory call. 8621 */ 8622 if (ddi_umem_unlock_thread == NULL) 8623 i_ddi_umem_unlock_thread_start(); 8624 8625 /* Allocate memory for the cookie */ 8626 p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP); 8627 8628 /* Convert the flags to seg_rw type */ 8629 if (flags & DDI_UMEMLOCK_WRITE) { 8630 p->s_flags = S_WRITE; 8631 } else { 8632 p->s_flags = S_READ; 8633 } 8634 8635 /* Store curproc in cookie for later iosetup/unlock */ 8636 p->procp = (void *)curproc; 8637 8638 /* 8639 * Store the struct as pointer in cookie for later use by 8640 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock 8641 * is called after relvm is called. 8642 */ 8643 p->asp = curproc->p_as; 8644 /* 8645 * The size field is needed for lockmem accounting. 8646 */ 8647 p->size = len; 8648 init_lockedmem_rctl_flag(p); 8649 8650 if (umem_incr_devlockmem(p) != 0) { 8651 /* 8652 * The requested memory cannot be locked 8653 */ 8654 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8655 *cookie = (ddi_umem_cookie_t)NULL; 8656 return (ENOMEM); 8657 } 8658 8659 /* Lock the pages corresponding to addr, len in memory */ 8660 error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray), 8661 addr, len, p->s_flags); 8662 if (error != 0) { 8663 umem_decr_devlockmem(p); 8664 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8665 *cookie = (ddi_umem_cookie_t)NULL; 8666 return (error); 8667 } 8668 8669 /* Initialize the fields in the ddi_umem_cookie */ 8670 p->cvaddr = addr; 8671 p->type = UMEM_LOCKED; 8672 p->cook_refcnt = 1; 8673 8674 *cookie = (ddi_umem_cookie_t)p; 8675 return (error); 8676 } 8677 8678 /* 8679 * Add the cookie to the ddi_umem_unlock list. Pages will be 8680 * unlocked by i_ddi_umem_unlock_thread. 8681 */ 8682 8683 void 8684 ddi_umem_unlock(ddi_umem_cookie_t cookie) 8685 { 8686 struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie; 8687 8688 ASSERT(p->type == UMEM_LOCKED); 8689 ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */ 8690 ASSERT(ddi_umem_unlock_thread != NULL); 8691 8692 p->unl_forw = (struct ddi_umem_cookie *)NULL; /* end of list */ 8693 /* 8694 * Queue the unlock request and notify i_ddi_umem_unlock thread 8695 * if it's called in the interrupt context. Otherwise, unlock pages 8696 * immediately. 8697 */ 8698 if (servicing_interrupt()) { 8699 /* queue the unlock request and notify the thread */ 8700 mutex_enter(&ddi_umem_unlock_mutex); 8701 if (ddi_umem_unlock_head == NULL) { 8702 ddi_umem_unlock_head = ddi_umem_unlock_tail = p; 8703 cv_broadcast(&ddi_umem_unlock_cv); 8704 } else { 8705 ddi_umem_unlock_tail->unl_forw = p; 8706 ddi_umem_unlock_tail = p; 8707 } 8708 mutex_exit(&ddi_umem_unlock_mutex); 8709 } else { 8710 /* unlock the pages right away */ 8711 (void) i_ddi_umem_unlock(p); 8712 } 8713 } 8714 8715 /* 8716 * Create a buf structure from a ddi_umem_cookie 8717 * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc 8718 * (only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported) 8719 * off, len - identifies the portion of the memory represented by the cookie 8720 * that the buf points to. 8721 * NOTE: off, len need to follow the alignment/size restrictions of the 8722 * device (dev) that this buf will be passed to. Some devices 8723 * will accept unrestricted alignment/size, whereas others (such as 8724 * st) require some block-size alignment/size. It is the caller's 8725 * responsibility to ensure that the alignment/size restrictions 8726 * are met (we cannot assert as we do not know the restrictions) 8727 * 8728 * direction - is one of B_READ or B_WRITE and needs to be compatible with 8729 * the flags used in ddi_umem_lock 8730 * 8731 * The following three arguments are used to initialize fields in the 8732 * buf structure and are uninterpreted by this routine. 8733 * 8734 * dev 8735 * blkno 8736 * iodone 8737 * 8738 * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP 8739 * 8740 * Returns a buf structure pointer on success (to be freed by freerbuf) 8741 * NULL on any parameter error or memory alloc failure 8742 * 8743 */ 8744 struct buf * 8745 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len, 8746 int direction, dev_t dev, daddr_t blkno, 8747 int (*iodone)(struct buf *), int sleepflag) 8748 { 8749 struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie; 8750 struct buf *bp; 8751 8752 /* 8753 * check for valid cookie offset, len 8754 */ 8755 if ((off + len) > p->size) { 8756 return (NULL); 8757 } 8758 8759 if (len > p->size) { 8760 return (NULL); 8761 } 8762 8763 /* direction has to be one of B_READ or B_WRITE */ 8764 if ((direction != B_READ) && (direction != B_WRITE)) { 8765 return (NULL); 8766 } 8767 8768 /* These are the only two valid sleepflags */ 8769 if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) { 8770 return (NULL); 8771 } 8772 8773 /* 8774 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported 8775 */ 8776 if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) { 8777 return (NULL); 8778 } 8779 8780 /* If type is KMEM_NON_PAGEABLE procp is NULL */ 8781 ASSERT((p->type == KMEM_NON_PAGEABLE) ? 8782 (p->procp == NULL) : (p->procp != NULL)); 8783 8784 bp = kmem_alloc(sizeof (struct buf), sleepflag); 8785 if (bp == NULL) { 8786 return (NULL); 8787 } 8788 bioinit(bp); 8789 8790 bp->b_flags = B_BUSY | B_PHYS | direction; 8791 bp->b_edev = dev; 8792 bp->b_lblkno = blkno; 8793 bp->b_iodone = iodone; 8794 bp->b_bcount = len; 8795 bp->b_proc = (proc_t *)p->procp; 8796 ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0); 8797 bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off); 8798 if (p->pparray != NULL) { 8799 bp->b_flags |= B_SHADOW; 8800 ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0); 8801 bp->b_shadow = p->pparray + btop(off); 8802 } 8803 return (bp); 8804 } 8805 8806 /* 8807 * Fault-handling and related routines 8808 */ 8809 8810 ddi_devstate_t 8811 ddi_get_devstate(dev_info_t *dip) 8812 { 8813 if (DEVI_IS_DEVICE_OFFLINE(dip)) 8814 return (DDI_DEVSTATE_OFFLINE); 8815 else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip)) 8816 return (DDI_DEVSTATE_DOWN); 8817 else if (DEVI_IS_BUS_QUIESCED(dip)) 8818 return (DDI_DEVSTATE_QUIESCED); 8819 else if (DEVI_IS_DEVICE_DEGRADED(dip)) 8820 return (DDI_DEVSTATE_DEGRADED); 8821 else 8822 return (DDI_DEVSTATE_UP); 8823 } 8824 8825 void 8826 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact, 8827 ddi_fault_location_t location, const char *message) 8828 { 8829 struct ddi_fault_event_data fd; 8830 ddi_eventcookie_t ec; 8831 8832 /* 8833 * Assemble all the information into a fault-event-data structure 8834 */ 8835 fd.f_dip = dip; 8836 fd.f_impact = impact; 8837 fd.f_location = location; 8838 fd.f_message = message; 8839 fd.f_oldstate = ddi_get_devstate(dip); 8840 8841 /* 8842 * Get eventcookie from defining parent. 8843 */ 8844 if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) != 8845 DDI_SUCCESS) 8846 return; 8847 8848 (void) ndi_post_event(dip, dip, ec, &fd); 8849 } 8850 8851 char * 8852 i_ddi_devi_class(dev_info_t *dip) 8853 { 8854 return (DEVI(dip)->devi_device_class); 8855 } 8856 8857 int 8858 i_ddi_set_devi_class(dev_info_t *dip, const char *devi_class, int flag) 8859 { 8860 struct dev_info *devi = DEVI(dip); 8861 8862 mutex_enter(&devi->devi_lock); 8863 8864 if (devi->devi_device_class) 8865 kmem_free(devi->devi_device_class, 8866 strlen(devi->devi_device_class) + 1); 8867 8868 if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag)) 8869 != NULL) { 8870 mutex_exit(&devi->devi_lock); 8871 return (DDI_SUCCESS); 8872 } 8873 8874 mutex_exit(&devi->devi_lock); 8875 8876 return (DDI_FAILURE); 8877 } 8878 8879 8880 /* 8881 * Task Queues DDI interfaces. 8882 */ 8883 8884 /* ARGSUSED */ 8885 ddi_taskq_t * 8886 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads, 8887 pri_t pri, uint_t cflags) 8888 { 8889 char full_name[TASKQ_NAMELEN]; 8890 const char *tq_name; 8891 int nodeid = 0; 8892 8893 if (dip == NULL) 8894 tq_name = name; 8895 else { 8896 nodeid = ddi_get_instance(dip); 8897 8898 if (name == NULL) 8899 name = "tq"; 8900 8901 (void) snprintf(full_name, sizeof (full_name), "%s_%s", 8902 ddi_driver_name(dip), name); 8903 8904 tq_name = full_name; 8905 } 8906 8907 return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads, 8908 pri == TASKQ_DEFAULTPRI ? minclsyspri : pri, 8909 nthreads, INT_MAX, TASKQ_PREPOPULATE)); 8910 } 8911 8912 void 8913 ddi_taskq_destroy(ddi_taskq_t *tq) 8914 { 8915 taskq_destroy((taskq_t *)tq); 8916 } 8917 8918 int 8919 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *), 8920 void *arg, uint_t dflags) 8921 { 8922 taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg, 8923 dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP); 8924 8925 return (id != TASKQID_INVALID ? DDI_SUCCESS : DDI_FAILURE); 8926 } 8927 8928 void 8929 ddi_taskq_wait(ddi_taskq_t *tq) 8930 { 8931 taskq_wait((taskq_t *)tq); 8932 } 8933 8934 void 8935 ddi_taskq_suspend(ddi_taskq_t *tq) 8936 { 8937 taskq_suspend((taskq_t *)tq); 8938 } 8939 8940 boolean_t 8941 ddi_taskq_suspended(ddi_taskq_t *tq) 8942 { 8943 return (taskq_suspended((taskq_t *)tq)); 8944 } 8945 8946 void 8947 ddi_taskq_resume(ddi_taskq_t *tq) 8948 { 8949 taskq_resume((taskq_t *)tq); 8950 } 8951 8952 int 8953 ddi_parse( 8954 const char *ifname, 8955 char *alnum, 8956 uint_t *nump) 8957 { 8958 const char *p; 8959 int l; 8960 ulong_t num; 8961 boolean_t nonum = B_TRUE; 8962 char c; 8963 8964 l = strlen(ifname); 8965 for (p = ifname + l; p != ifname; l--) { 8966 c = *--p; 8967 if (!isdigit(c)) { 8968 (void) strlcpy(alnum, ifname, l + 1); 8969 if (ddi_strtoul(p + 1, NULL, 10, &num) != 0) 8970 return (DDI_FAILURE); 8971 break; 8972 } 8973 nonum = B_FALSE; 8974 } 8975 if (l == 0 || nonum) 8976 return (DDI_FAILURE); 8977 8978 *nump = num; 8979 return (DDI_SUCCESS); 8980 } 8981 8982 /* 8983 * Default initialization function for drivers that don't need to quiesce. 8984 */ 8985 /* ARGSUSED */ 8986 int 8987 ddi_quiesce_not_needed(dev_info_t *dip) 8988 { 8989 return (DDI_SUCCESS); 8990 } 8991 8992 /* 8993 * Initialization function for drivers that should implement quiesce() 8994 * but haven't yet. 8995 */ 8996 /* ARGSUSED */ 8997 int 8998 ddi_quiesce_not_supported(dev_info_t *dip) 8999 { 9000 return (DDI_FAILURE); 9001 } 9002 9003 char * 9004 ddi_strdup(const char *str, int flag) 9005 { 9006 int n; 9007 char *ptr; 9008 9009 ASSERT(str != NULL); 9010 ASSERT((flag == KM_SLEEP) || (flag == KM_NOSLEEP)); 9011 9012 n = strlen(str); 9013 if ((ptr = kmem_alloc(n + 1, flag)) == NULL) 9014 return (NULL); 9015 bcopy(str, ptr, n + 1); 9016 return (ptr); 9017 } 9018 9019 char * 9020 strdup(const char *str) 9021 { 9022 return (ddi_strdup(str, KM_SLEEP)); 9023 } 9024 9025 void 9026 strfree(char *str) 9027 { 9028 ASSERT(str != NULL); 9029 kmem_free(str, strlen(str) + 1); 9030 } 9031 9032 /* 9033 * Generic DDI callback interfaces. 9034 */ 9035 9036 int 9037 ddi_cb_register(dev_info_t *dip, ddi_cb_flags_t flags, ddi_cb_func_t cbfunc, 9038 void *arg1, void *arg2, ddi_cb_handle_t *ret_hdlp) 9039 { 9040 ddi_cb_t *cbp; 9041 9042 ASSERT(dip != NULL); 9043 ASSERT(DDI_CB_FLAG_VALID(flags)); 9044 ASSERT(cbfunc != NULL); 9045 ASSERT(ret_hdlp != NULL); 9046 9047 /* Sanity check the context */ 9048 ASSERT(!servicing_interrupt()); 9049 if (servicing_interrupt()) 9050 return (DDI_FAILURE); 9051 9052 /* Validate parameters */ 9053 if ((dip == NULL) || !DDI_CB_FLAG_VALID(flags) || 9054 (cbfunc == NULL) || (ret_hdlp == NULL)) 9055 return (DDI_EINVAL); 9056 9057 /* Check for previous registration */ 9058 if (DEVI(dip)->devi_cb_p != NULL) 9059 return (DDI_EALREADY); 9060 9061 /* Allocate and initialize callback */ 9062 cbp = kmem_zalloc(sizeof (ddi_cb_t), KM_SLEEP); 9063 cbp->cb_dip = dip; 9064 cbp->cb_func = cbfunc; 9065 cbp->cb_arg1 = arg1; 9066 cbp->cb_arg2 = arg2; 9067 cbp->cb_flags = flags; 9068 DEVI(dip)->devi_cb_p = cbp; 9069 9070 /* If adding an IRM callback, notify IRM */ 9071 if (flags & DDI_CB_FLAG_INTR) 9072 i_ddi_irm_set_cb(dip, B_TRUE); 9073 9074 *ret_hdlp = (ddi_cb_handle_t)&(DEVI(dip)->devi_cb_p); 9075 return (DDI_SUCCESS); 9076 } 9077 9078 int 9079 ddi_cb_unregister(ddi_cb_handle_t hdl) 9080 { 9081 ddi_cb_t *cbp; 9082 dev_info_t *dip; 9083 9084 ASSERT(hdl != NULL); 9085 9086 /* Sanity check the context */ 9087 ASSERT(!servicing_interrupt()); 9088 if (servicing_interrupt()) 9089 return (DDI_FAILURE); 9090 9091 /* Validate parameters */ 9092 if ((hdl == NULL) || ((cbp = *(ddi_cb_t **)hdl) == NULL) || 9093 ((dip = cbp->cb_dip) == NULL)) 9094 return (DDI_EINVAL); 9095 9096 /* If removing an IRM callback, notify IRM */ 9097 if (cbp->cb_flags & DDI_CB_FLAG_INTR) 9098 i_ddi_irm_set_cb(dip, B_FALSE); 9099 9100 /* Destroy the callback */ 9101 kmem_free(cbp, sizeof (ddi_cb_t)); 9102 DEVI(dip)->devi_cb_p = NULL; 9103 9104 return (DDI_SUCCESS); 9105 } 9106 9107 /* 9108 * Platform independent DR routines 9109 */ 9110 9111 static int 9112 ndi2errno(int n) 9113 { 9114 int err = 0; 9115 9116 switch (n) { 9117 case NDI_NOMEM: 9118 err = ENOMEM; 9119 break; 9120 case NDI_BUSY: 9121 err = EBUSY; 9122 break; 9123 case NDI_FAULT: 9124 err = EFAULT; 9125 break; 9126 case NDI_FAILURE: 9127 err = EIO; 9128 break; 9129 case NDI_SUCCESS: 9130 break; 9131 case NDI_BADHANDLE: 9132 default: 9133 err = EINVAL; 9134 break; 9135 } 9136 return (err); 9137 } 9138 9139 /* 9140 * Prom tree node list 9141 */ 9142 struct ptnode { 9143 pnode_t nodeid; 9144 struct ptnode *next; 9145 }; 9146 9147 /* 9148 * Prom tree walk arg 9149 */ 9150 struct pta { 9151 dev_info_t *pdip; 9152 devi_branch_t *bp; 9153 uint_t flags; 9154 dev_info_t *fdip; 9155 struct ptnode *head; 9156 }; 9157 9158 static void 9159 visit_node(pnode_t nodeid, struct pta *ap) 9160 { 9161 struct ptnode **nextp; 9162 int (*select)(pnode_t, void *, uint_t); 9163 9164 ASSERT(nodeid != OBP_NONODE && nodeid != OBP_BADNODE); 9165 9166 select = ap->bp->create.prom_branch_select; 9167 9168 ASSERT(select); 9169 9170 if (select(nodeid, ap->bp->arg, 0) == DDI_SUCCESS) { 9171 9172 for (nextp = &ap->head; *nextp; nextp = &(*nextp)->next) 9173 ; 9174 9175 *nextp = kmem_zalloc(sizeof (struct ptnode), KM_SLEEP); 9176 9177 (*nextp)->nodeid = nodeid; 9178 } 9179 9180 if ((ap->flags & DEVI_BRANCH_CHILD) == DEVI_BRANCH_CHILD) 9181 return; 9182 9183 nodeid = prom_childnode(nodeid); 9184 while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) { 9185 visit_node(nodeid, ap); 9186 nodeid = prom_nextnode(nodeid); 9187 } 9188 } 9189 9190 /* 9191 * NOTE: The caller of this function must check for device contracts 9192 * or LDI callbacks against this dip before setting the dip offline. 9193 */ 9194 static int 9195 set_infant_dip_offline(dev_info_t *dip, void *arg) 9196 { 9197 char *path = (char *)arg; 9198 9199 ASSERT(dip); 9200 ASSERT(arg); 9201 9202 if (i_ddi_node_state(dip) >= DS_ATTACHED) { 9203 (void) ddi_pathname(dip, path); 9204 cmn_err(CE_WARN, "Attempt to set offline flag on attached " 9205 "node: %s", path); 9206 return (DDI_FAILURE); 9207 } 9208 9209 mutex_enter(&(DEVI(dip)->devi_lock)); 9210 if (!DEVI_IS_DEVICE_OFFLINE(dip)) 9211 DEVI_SET_DEVICE_OFFLINE(dip); 9212 mutex_exit(&(DEVI(dip)->devi_lock)); 9213 9214 return (DDI_SUCCESS); 9215 } 9216 9217 typedef struct result { 9218 char *path; 9219 int result; 9220 } result_t; 9221 9222 static int 9223 dip_set_offline(dev_info_t *dip, void *arg) 9224 { 9225 int end; 9226 result_t *resp = (result_t *)arg; 9227 9228 ASSERT(dip); 9229 ASSERT(resp); 9230 9231 /* 9232 * We stop the walk if e_ddi_offline_notify() returns 9233 * failure, because this implies that one or more consumers 9234 * (either LDI or contract based) has blocked the offline. 9235 * So there is no point in conitnuing the walk 9236 */ 9237 if (e_ddi_offline_notify(dip) == DDI_FAILURE) { 9238 resp->result = DDI_FAILURE; 9239 return (DDI_WALK_TERMINATE); 9240 } 9241 9242 /* 9243 * If set_infant_dip_offline() returns failure, it implies 9244 * that we failed to set a particular dip offline. This 9245 * does not imply that the offline as a whole should fail. 9246 * We want to do the best we can, so we continue the walk. 9247 */ 9248 if (set_infant_dip_offline(dip, resp->path) == DDI_SUCCESS) 9249 end = DDI_SUCCESS; 9250 else 9251 end = DDI_FAILURE; 9252 9253 e_ddi_offline_finalize(dip, end); 9254 9255 return (DDI_WALK_CONTINUE); 9256 } 9257 9258 /* 9259 * The call to e_ddi_offline_notify() exists for the 9260 * unlikely error case that a branch we are trying to 9261 * create already exists and has device contracts or LDI 9262 * event callbacks against it. 9263 * 9264 * We allow create to succeed for such branches only if 9265 * no constraints block the offline. 9266 */ 9267 static int 9268 branch_set_offline(dev_info_t *dip, char *path) 9269 { 9270 int circ; 9271 int end; 9272 result_t res; 9273 9274 9275 if (e_ddi_offline_notify(dip) == DDI_FAILURE) { 9276 return (DDI_FAILURE); 9277 } 9278 9279 if (set_infant_dip_offline(dip, path) == DDI_SUCCESS) 9280 end = DDI_SUCCESS; 9281 else 9282 end = DDI_FAILURE; 9283 9284 e_ddi_offline_finalize(dip, end); 9285 9286 if (end == DDI_FAILURE) 9287 return (DDI_FAILURE); 9288 9289 res.result = DDI_SUCCESS; 9290 res.path = path; 9291 9292 ndi_devi_enter(dip, &circ); 9293 ddi_walk_devs(ddi_get_child(dip), dip_set_offline, &res); 9294 ndi_devi_exit(dip, circ); 9295 9296 return (res.result); 9297 } 9298 9299 /*ARGSUSED*/ 9300 static int 9301 create_prom_branch(void *arg, int has_changed) 9302 { 9303 int circ; 9304 int exists, rv; 9305 pnode_t nodeid; 9306 struct ptnode *tnp; 9307 dev_info_t *dip; 9308 struct pta *ap = arg; 9309 devi_branch_t *bp; 9310 char *path; 9311 9312 ASSERT(ap); 9313 ASSERT(ap->fdip == NULL); 9314 ASSERT(ap->pdip && ndi_dev_is_prom_node(ap->pdip)); 9315 9316 bp = ap->bp; 9317 9318 nodeid = ddi_get_nodeid(ap->pdip); 9319 if (nodeid == OBP_NONODE || nodeid == OBP_BADNODE) { 9320 cmn_err(CE_WARN, "create_prom_branch: invalid " 9321 "nodeid: 0x%x", nodeid); 9322 return (EINVAL); 9323 } 9324 9325 ap->head = NULL; 9326 9327 nodeid = prom_childnode(nodeid); 9328 while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) { 9329 visit_node(nodeid, ap); 9330 nodeid = prom_nextnode(nodeid); 9331 } 9332 9333 if (ap->head == NULL) 9334 return (ENODEV); 9335 9336 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 9337 rv = 0; 9338 while ((tnp = ap->head) != NULL) { 9339 ap->head = tnp->next; 9340 9341 ndi_devi_enter(ap->pdip, &circ); 9342 9343 /* 9344 * Check if the branch already exists. 9345 */ 9346 exists = 0; 9347 dip = e_ddi_nodeid_to_dip(tnp->nodeid); 9348 if (dip != NULL) { 9349 exists = 1; 9350 9351 /* Parent is held busy, so release hold */ 9352 ndi_rele_devi(dip); 9353 #ifdef DEBUG 9354 cmn_err(CE_WARN, "create_prom_branch: dip(%p) exists" 9355 " for nodeid 0x%x", (void *)dip, tnp->nodeid); 9356 #endif 9357 } else { 9358 dip = i_ddi_create_branch(ap->pdip, tnp->nodeid); 9359 } 9360 9361 kmem_free(tnp, sizeof (struct ptnode)); 9362 9363 /* 9364 * Hold the branch if it is not already held 9365 */ 9366 if (dip && !exists) { 9367 e_ddi_branch_hold(dip); 9368 } 9369 9370 ASSERT(dip == NULL || e_ddi_branch_held(dip)); 9371 9372 /* 9373 * Set all dips in the newly created branch offline so that 9374 * only a "configure" operation can attach 9375 * the branch 9376 */ 9377 if (dip == NULL || branch_set_offline(dip, path) 9378 == DDI_FAILURE) { 9379 ndi_devi_exit(ap->pdip, circ); 9380 rv = EIO; 9381 continue; 9382 } 9383 9384 ASSERT(ddi_get_parent(dip) == ap->pdip); 9385 9386 ndi_devi_exit(ap->pdip, circ); 9387 9388 if (ap->flags & DEVI_BRANCH_CONFIGURE) { 9389 int error = e_ddi_branch_configure(dip, &ap->fdip, 0); 9390 if (error && rv == 0) 9391 rv = error; 9392 } 9393 9394 /* 9395 * Invoke devi_branch_callback() (if it exists) only for 9396 * newly created branches 9397 */ 9398 if (bp->devi_branch_callback && !exists) 9399 bp->devi_branch_callback(dip, bp->arg, 0); 9400 } 9401 9402 kmem_free(path, MAXPATHLEN); 9403 9404 return (rv); 9405 } 9406 9407 static int 9408 sid_node_create(dev_info_t *pdip, devi_branch_t *bp, dev_info_t **rdipp) 9409 { 9410 int rv, circ, len; 9411 int i, flags, ret; 9412 dev_info_t *dip; 9413 char *nbuf; 9414 char *path; 9415 static const char *noname = "<none>"; 9416 9417 ASSERT(pdip); 9418 ASSERT(DEVI_BUSY_OWNED(pdip)); 9419 9420 flags = 0; 9421 9422 /* 9423 * Creating the root of a branch ? 9424 */ 9425 if (rdipp) { 9426 *rdipp = NULL; 9427 flags = DEVI_BRANCH_ROOT; 9428 } 9429 9430 ndi_devi_alloc_sleep(pdip, (char *)noname, DEVI_SID_NODEID, &dip); 9431 rv = bp->create.sid_branch_create(dip, bp->arg, flags); 9432 9433 nbuf = kmem_alloc(OBP_MAXDRVNAME, KM_SLEEP); 9434 9435 if (rv == DDI_WALK_ERROR) { 9436 cmn_err(CE_WARN, "e_ddi_branch_create: Error setting" 9437 " properties on devinfo node %p", (void *)dip); 9438 goto fail; 9439 } 9440 9441 len = OBP_MAXDRVNAME; 9442 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 9443 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "name", nbuf, &len) 9444 != DDI_PROP_SUCCESS) { 9445 cmn_err(CE_WARN, "e_ddi_branch_create: devinfo node %p has" 9446 "no name property", (void *)dip); 9447 goto fail; 9448 } 9449 9450 ASSERT(i_ddi_node_state(dip) == DS_PROTO); 9451 if (ndi_devi_set_nodename(dip, nbuf, 0) != NDI_SUCCESS) { 9452 cmn_err(CE_WARN, "e_ddi_branch_create: cannot set name (%s)" 9453 " for devinfo node %p", nbuf, (void *)dip); 9454 goto fail; 9455 } 9456 9457 kmem_free(nbuf, OBP_MAXDRVNAME); 9458 9459 /* 9460 * Ignore bind failures just like boot does 9461 */ 9462 (void) ndi_devi_bind_driver(dip, 0); 9463 9464 switch (rv) { 9465 case DDI_WALK_CONTINUE: 9466 case DDI_WALK_PRUNESIB: 9467 ndi_devi_enter(dip, &circ); 9468 9469 i = DDI_WALK_CONTINUE; 9470 for (; i == DDI_WALK_CONTINUE; ) { 9471 i = sid_node_create(dip, bp, NULL); 9472 } 9473 9474 ASSERT(i == DDI_WALK_ERROR || i == DDI_WALK_PRUNESIB); 9475 if (i == DDI_WALK_ERROR) 9476 rv = i; 9477 /* 9478 * If PRUNESIB stop creating siblings 9479 * of dip's child. Subsequent walk behavior 9480 * is determined by rv returned by dip. 9481 */ 9482 9483 ndi_devi_exit(dip, circ); 9484 break; 9485 case DDI_WALK_TERMINATE: 9486 /* 9487 * Don't create children and ask our parent 9488 * to not create siblings either. 9489 */ 9490 rv = DDI_WALK_PRUNESIB; 9491 break; 9492 case DDI_WALK_PRUNECHILD: 9493 /* 9494 * Don't create children, but ask parent to continue 9495 * with siblings. 9496 */ 9497 rv = DDI_WALK_CONTINUE; 9498 break; 9499 default: 9500 ASSERT(0); 9501 break; 9502 } 9503 9504 if (rdipp) 9505 *rdipp = dip; 9506 9507 /* 9508 * Set device offline - only the "configure" op should cause an attach. 9509 * Note that it is safe to set the dip offline without checking 9510 * for either device contract or layered driver (LDI) based constraints 9511 * since there cannot be any contracts or LDI opens of this device. 9512 * This is because this node is a newly created dip with the parent busy 9513 * held, so no other thread can come in and attach this dip. A dip that 9514 * has never been attached cannot have contracts since by definition 9515 * a device contract (an agreement between a process and a device minor 9516 * node) can only be created against a device that has minor nodes 9517 * i.e is attached. Similarly an LDI open will only succeed if the 9518 * dip is attached. We assert below that the dip is not attached. 9519 */ 9520 ASSERT(i_ddi_node_state(dip) < DS_ATTACHED); 9521 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 9522 ret = set_infant_dip_offline(dip, path); 9523 ASSERT(ret == DDI_SUCCESS); 9524 kmem_free(path, MAXPATHLEN); 9525 9526 return (rv); 9527 fail: 9528 (void) ndi_devi_free(dip); 9529 kmem_free(nbuf, OBP_MAXDRVNAME); 9530 return (DDI_WALK_ERROR); 9531 } 9532 9533 static int 9534 create_sid_branch( 9535 dev_info_t *pdip, 9536 devi_branch_t *bp, 9537 dev_info_t **dipp, 9538 uint_t flags) 9539 { 9540 int rv = 0, state = DDI_WALK_CONTINUE; 9541 dev_info_t *rdip; 9542 9543 while (state == DDI_WALK_CONTINUE) { 9544 int circ; 9545 9546 ndi_devi_enter(pdip, &circ); 9547 9548 state = sid_node_create(pdip, bp, &rdip); 9549 if (rdip == NULL) { 9550 ndi_devi_exit(pdip, circ); 9551 ASSERT(state == DDI_WALK_ERROR); 9552 break; 9553 } 9554 9555 e_ddi_branch_hold(rdip); 9556 9557 ndi_devi_exit(pdip, circ); 9558 9559 if (flags & DEVI_BRANCH_CONFIGURE) { 9560 int error = e_ddi_branch_configure(rdip, dipp, 0); 9561 if (error && rv == 0) 9562 rv = error; 9563 } 9564 9565 /* 9566 * devi_branch_callback() is optional 9567 */ 9568 if (bp->devi_branch_callback) 9569 bp->devi_branch_callback(rdip, bp->arg, 0); 9570 } 9571 9572 ASSERT(state == DDI_WALK_ERROR || state == DDI_WALK_PRUNESIB); 9573 9574 return (state == DDI_WALK_ERROR ? EIO : rv); 9575 } 9576 9577 int 9578 e_ddi_branch_create( 9579 dev_info_t *pdip, 9580 devi_branch_t *bp, 9581 dev_info_t **dipp, 9582 uint_t flags) 9583 { 9584 int prom_devi, sid_devi, error; 9585 9586 if (pdip == NULL || bp == NULL || bp->type == 0) 9587 return (EINVAL); 9588 9589 prom_devi = (bp->type == DEVI_BRANCH_PROM) ? 1 : 0; 9590 sid_devi = (bp->type == DEVI_BRANCH_SID) ? 1 : 0; 9591 9592 if (prom_devi && bp->create.prom_branch_select == NULL) 9593 return (EINVAL); 9594 else if (sid_devi && bp->create.sid_branch_create == NULL) 9595 return (EINVAL); 9596 else if (!prom_devi && !sid_devi) 9597 return (EINVAL); 9598 9599 if (flags & DEVI_BRANCH_EVENT) 9600 return (EINVAL); 9601 9602 if (prom_devi) { 9603 struct pta pta = {0}; 9604 9605 pta.pdip = pdip; 9606 pta.bp = bp; 9607 pta.flags = flags; 9608 9609 error = prom_tree_access(create_prom_branch, &pta, NULL); 9610 9611 if (dipp) 9612 *dipp = pta.fdip; 9613 else if (pta.fdip) 9614 ndi_rele_devi(pta.fdip); 9615 } else { 9616 error = create_sid_branch(pdip, bp, dipp, flags); 9617 } 9618 9619 return (error); 9620 } 9621 9622 int 9623 e_ddi_branch_configure(dev_info_t *rdip, dev_info_t **dipp, uint_t flags) 9624 { 9625 int rv; 9626 char *devnm; 9627 dev_info_t *pdip; 9628 9629 if (dipp) 9630 *dipp = NULL; 9631 9632 if (rdip == NULL || flags != 0 || (flags & DEVI_BRANCH_EVENT)) 9633 return (EINVAL); 9634 9635 pdip = ddi_get_parent(rdip); 9636 9637 ndi_hold_devi(pdip); 9638 9639 if (!e_ddi_branch_held(rdip)) { 9640 ndi_rele_devi(pdip); 9641 cmn_err(CE_WARN, "e_ddi_branch_configure: " 9642 "dip(%p) not held", (void *)rdip); 9643 return (EINVAL); 9644 } 9645 9646 if (i_ddi_node_state(rdip) < DS_INITIALIZED) { 9647 /* 9648 * First attempt to bind a driver. If we fail, return 9649 * success (On some platforms, dips for some device 9650 * types (CPUs) may not have a driver) 9651 */ 9652 if (ndi_devi_bind_driver(rdip, 0) != NDI_SUCCESS) { 9653 ndi_rele_devi(pdip); 9654 return (0); 9655 } 9656 9657 if (ddi_initchild(pdip, rdip) != DDI_SUCCESS) { 9658 rv = NDI_FAILURE; 9659 goto out; 9660 } 9661 } 9662 9663 ASSERT(i_ddi_node_state(rdip) >= DS_INITIALIZED); 9664 9665 devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP); 9666 9667 (void) ddi_deviname(rdip, devnm); 9668 9669 if ((rv = ndi_devi_config_one(pdip, devnm+1, &rdip, 9670 NDI_DEVI_ONLINE | NDI_CONFIG)) == NDI_SUCCESS) { 9671 /* release hold from ndi_devi_config_one() */ 9672 ndi_rele_devi(rdip); 9673 } 9674 9675 kmem_free(devnm, MAXNAMELEN + 1); 9676 out: 9677 if (rv != NDI_SUCCESS && dipp && rdip) { 9678 ndi_hold_devi(rdip); 9679 *dipp = rdip; 9680 } 9681 ndi_rele_devi(pdip); 9682 return (ndi2errno(rv)); 9683 } 9684 9685 void 9686 e_ddi_branch_hold(dev_info_t *rdip) 9687 { 9688 if (e_ddi_branch_held(rdip)) { 9689 cmn_err(CE_WARN, "e_ddi_branch_hold: branch already held"); 9690 return; 9691 } 9692 9693 mutex_enter(&DEVI(rdip)->devi_lock); 9694 if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) == 0) { 9695 DEVI(rdip)->devi_flags |= DEVI_BRANCH_HELD; 9696 DEVI(rdip)->devi_ref++; 9697 } 9698 ASSERT(DEVI(rdip)->devi_ref > 0); 9699 mutex_exit(&DEVI(rdip)->devi_lock); 9700 } 9701 9702 int 9703 e_ddi_branch_held(dev_info_t *rdip) 9704 { 9705 int rv = 0; 9706 9707 mutex_enter(&DEVI(rdip)->devi_lock); 9708 if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) && 9709 DEVI(rdip)->devi_ref > 0) { 9710 rv = 1; 9711 } 9712 mutex_exit(&DEVI(rdip)->devi_lock); 9713 9714 return (rv); 9715 } 9716 9717 void 9718 e_ddi_branch_rele(dev_info_t *rdip) 9719 { 9720 mutex_enter(&DEVI(rdip)->devi_lock); 9721 DEVI(rdip)->devi_flags &= ~DEVI_BRANCH_HELD; 9722 DEVI(rdip)->devi_ref--; 9723 mutex_exit(&DEVI(rdip)->devi_lock); 9724 } 9725 9726 int 9727 e_ddi_branch_unconfigure( 9728 dev_info_t *rdip, 9729 dev_info_t **dipp, 9730 uint_t flags) 9731 { 9732 int circ, rv; 9733 int destroy; 9734 char *devnm; 9735 uint_t nflags; 9736 dev_info_t *pdip; 9737 9738 if (dipp) 9739 *dipp = NULL; 9740 9741 if (rdip == NULL) 9742 return (EINVAL); 9743 9744 pdip = ddi_get_parent(rdip); 9745 9746 ASSERT(pdip); 9747 9748 /* 9749 * Check if caller holds pdip busy - can cause deadlocks during 9750 * devfs_clean() 9751 */ 9752 if (DEVI_BUSY_OWNED(pdip)) { 9753 cmn_err(CE_WARN, "e_ddi_branch_unconfigure: failed: parent" 9754 " devinfo node(%p) is busy held", (void *)pdip); 9755 return (EINVAL); 9756 } 9757 9758 destroy = (flags & DEVI_BRANCH_DESTROY) ? 1 : 0; 9759 9760 devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP); 9761 9762 ndi_devi_enter(pdip, &circ); 9763 (void) ddi_deviname(rdip, devnm); 9764 ndi_devi_exit(pdip, circ); 9765 9766 /* 9767 * ddi_deviname() returns a component name with / prepended. 9768 */ 9769 (void) devfs_clean(pdip, devnm + 1, DV_CLEAN_FORCE); 9770 9771 ndi_devi_enter(pdip, &circ); 9772 9773 /* 9774 * Recreate device name as it may have changed state (init/uninit) 9775 * when parent busy lock was dropped for devfs_clean() 9776 */ 9777 (void) ddi_deviname(rdip, devnm); 9778 9779 if (!e_ddi_branch_held(rdip)) { 9780 kmem_free(devnm, MAXNAMELEN + 1); 9781 ndi_devi_exit(pdip, circ); 9782 cmn_err(CE_WARN, "e_ddi_%s_branch: dip(%p) not held", 9783 destroy ? "destroy" : "unconfigure", (void *)rdip); 9784 return (EINVAL); 9785 } 9786 9787 /* 9788 * Release hold on the branch. This is ok since we are holding the 9789 * parent busy. If rdip is not removed, we must do a hold on the 9790 * branch before returning. 9791 */ 9792 e_ddi_branch_rele(rdip); 9793 9794 nflags = NDI_DEVI_OFFLINE; 9795 if (destroy || (flags & DEVI_BRANCH_DESTROY)) { 9796 nflags |= NDI_DEVI_REMOVE; 9797 destroy = 1; 9798 } else { 9799 nflags |= NDI_UNCONFIG; /* uninit but don't remove */ 9800 } 9801 9802 if (flags & DEVI_BRANCH_EVENT) 9803 nflags |= NDI_POST_EVENT; 9804 9805 if (i_ddi_devi_attached(pdip) && 9806 (i_ddi_node_state(rdip) >= DS_INITIALIZED)) { 9807 rv = ndi_devi_unconfig_one(pdip, devnm+1, dipp, nflags); 9808 } else { 9809 rv = e_ddi_devi_unconfig(rdip, dipp, nflags); 9810 if (rv == NDI_SUCCESS) { 9811 ASSERT(!destroy || ddi_get_child(rdip) == NULL); 9812 rv = ndi_devi_offline(rdip, nflags); 9813 } 9814 } 9815 9816 if (!destroy || rv != NDI_SUCCESS) { 9817 /* The dip still exists, so do a hold */ 9818 e_ddi_branch_hold(rdip); 9819 } 9820 9821 kmem_free(devnm, MAXNAMELEN + 1); 9822 ndi_devi_exit(pdip, circ); 9823 return (ndi2errno(rv)); 9824 } 9825 9826 int 9827 e_ddi_branch_destroy(dev_info_t *rdip, dev_info_t **dipp, uint_t flag) 9828 { 9829 return (e_ddi_branch_unconfigure(rdip, dipp, 9830 flag|DEVI_BRANCH_DESTROY)); 9831 } 9832 9833 /* 9834 * Number of chains for hash table 9835 */ 9836 #define NUMCHAINS 17 9837 9838 /* 9839 * Devinfo busy arg 9840 */ 9841 struct devi_busy { 9842 int dv_total; 9843 int s_total; 9844 mod_hash_t *dv_hash; 9845 mod_hash_t *s_hash; 9846 int (*callback)(dev_info_t *, void *, uint_t); 9847 void *arg; 9848 }; 9849 9850 static int 9851 visit_dip(dev_info_t *dip, void *arg) 9852 { 9853 uintptr_t sbusy, dvbusy, ref; 9854 struct devi_busy *bsp = arg; 9855 9856 ASSERT(bsp->callback); 9857 9858 /* 9859 * A dip cannot be busy if its reference count is 0 9860 */ 9861 if ((ref = e_ddi_devi_holdcnt(dip)) == 0) { 9862 return (bsp->callback(dip, bsp->arg, 0)); 9863 } 9864 9865 if (mod_hash_find(bsp->dv_hash, dip, (mod_hash_val_t *)&dvbusy)) 9866 dvbusy = 0; 9867 9868 /* 9869 * To catch device opens currently maintained on specfs common snodes. 9870 */ 9871 if (mod_hash_find(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy)) 9872 sbusy = 0; 9873 9874 #ifdef DEBUG 9875 if (ref < sbusy || ref < dvbusy) { 9876 cmn_err(CE_WARN, "dip(%p): sopen = %lu, dvopen = %lu " 9877 "dip ref = %lu\n", (void *)dip, sbusy, dvbusy, ref); 9878 } 9879 #endif 9880 9881 dvbusy = (sbusy > dvbusy) ? sbusy : dvbusy; 9882 9883 return (bsp->callback(dip, bsp->arg, dvbusy)); 9884 } 9885 9886 static int 9887 visit_snode(struct snode *sp, void *arg) 9888 { 9889 uintptr_t sbusy; 9890 dev_info_t *dip; 9891 int count; 9892 struct devi_busy *bsp = arg; 9893 9894 ASSERT(sp); 9895 9896 /* 9897 * The stable lock is held. This prevents 9898 * the snode and its associated dip from 9899 * going away. 9900 */ 9901 dip = NULL; 9902 count = spec_devi_open_count(sp, &dip); 9903 9904 if (count <= 0) 9905 return (DDI_WALK_CONTINUE); 9906 9907 ASSERT(dip); 9908 9909 if (mod_hash_remove(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy)) 9910 sbusy = count; 9911 else 9912 sbusy += count; 9913 9914 if (mod_hash_insert(bsp->s_hash, dip, (mod_hash_val_t)sbusy)) { 9915 cmn_err(CE_WARN, "%s: s_hash insert failed: dip=0x%p, " 9916 "sbusy = %lu", "e_ddi_branch_referenced", 9917 (void *)dip, sbusy); 9918 } 9919 9920 bsp->s_total += count; 9921 9922 return (DDI_WALK_CONTINUE); 9923 } 9924 9925 static void 9926 visit_dvnode(struct dv_node *dv, void *arg) 9927 { 9928 uintptr_t dvbusy; 9929 uint_t count; 9930 struct vnode *vp; 9931 struct devi_busy *bsp = arg; 9932 9933 ASSERT(dv && dv->dv_devi); 9934 9935 vp = DVTOV(dv); 9936 9937 mutex_enter(&vp->v_lock); 9938 count = vp->v_count; 9939 mutex_exit(&vp->v_lock); 9940 9941 if (!count) 9942 return; 9943 9944 if (mod_hash_remove(bsp->dv_hash, dv->dv_devi, 9945 (mod_hash_val_t *)&dvbusy)) 9946 dvbusy = count; 9947 else 9948 dvbusy += count; 9949 9950 if (mod_hash_insert(bsp->dv_hash, dv->dv_devi, 9951 (mod_hash_val_t)dvbusy)) { 9952 cmn_err(CE_WARN, "%s: dv_hash insert failed: dip=0x%p, " 9953 "dvbusy=%lu", "e_ddi_branch_referenced", 9954 (void *)dv->dv_devi, dvbusy); 9955 } 9956 9957 bsp->dv_total += count; 9958 } 9959 9960 /* 9961 * Returns reference count on success or -1 on failure. 9962 */ 9963 int 9964 e_ddi_branch_referenced( 9965 dev_info_t *rdip, 9966 int (*callback)(dev_info_t *dip, void *arg, uint_t ref), 9967 void *arg) 9968 { 9969 int circ; 9970 char *path; 9971 dev_info_t *pdip; 9972 struct devi_busy bsa = {0}; 9973 9974 ASSERT(rdip); 9975 9976 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 9977 9978 ndi_hold_devi(rdip); 9979 9980 pdip = ddi_get_parent(rdip); 9981 9982 ASSERT(pdip); 9983 9984 /* 9985 * Check if caller holds pdip busy - can cause deadlocks during 9986 * devfs_walk() 9987 */ 9988 if (!e_ddi_branch_held(rdip) || DEVI_BUSY_OWNED(pdip)) { 9989 cmn_err(CE_WARN, "e_ddi_branch_referenced: failed: " 9990 "devinfo branch(%p) not held or parent busy held", 9991 (void *)rdip); 9992 ndi_rele_devi(rdip); 9993 kmem_free(path, MAXPATHLEN); 9994 return (-1); 9995 } 9996 9997 ndi_devi_enter(pdip, &circ); 9998 (void) ddi_pathname(rdip, path); 9999 ndi_devi_exit(pdip, circ); 10000 10001 bsa.dv_hash = mod_hash_create_ptrhash("dv_node busy hash", NUMCHAINS, 10002 mod_hash_null_valdtor, sizeof (struct dev_info)); 10003 10004 bsa.s_hash = mod_hash_create_ptrhash("snode busy hash", NUMCHAINS, 10005 mod_hash_null_valdtor, sizeof (struct snode)); 10006 10007 if (devfs_walk(path, visit_dvnode, &bsa)) { 10008 cmn_err(CE_WARN, "e_ddi_branch_referenced: " 10009 "devfs walk failed for: %s", path); 10010 kmem_free(path, MAXPATHLEN); 10011 bsa.s_total = bsa.dv_total = -1; 10012 goto out; 10013 } 10014 10015 kmem_free(path, MAXPATHLEN); 10016 10017 /* 10018 * Walk the snode table to detect device opens, which are currently 10019 * maintained on specfs common snodes. 10020 */ 10021 spec_snode_walk(visit_snode, &bsa); 10022 10023 if (callback == NULL) 10024 goto out; 10025 10026 bsa.callback = callback; 10027 bsa.arg = arg; 10028 10029 if (visit_dip(rdip, &bsa) == DDI_WALK_CONTINUE) { 10030 ndi_devi_enter(rdip, &circ); 10031 ddi_walk_devs(ddi_get_child(rdip), visit_dip, &bsa); 10032 ndi_devi_exit(rdip, circ); 10033 } 10034 10035 out: 10036 ndi_rele_devi(rdip); 10037 mod_hash_destroy_ptrhash(bsa.s_hash); 10038 mod_hash_destroy_ptrhash(bsa.dv_hash); 10039 return (bsa.s_total > bsa.dv_total ? bsa.s_total : bsa.dv_total); 10040 } 10041