1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/note.h> 28 #include <sys/types.h> 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/buf.h> 32 #include <sys/uio.h> 33 #include <sys/cred.h> 34 #include <sys/poll.h> 35 #include <sys/mman.h> 36 #include <sys/kmem.h> 37 #include <sys/model.h> 38 #include <sys/file.h> 39 #include <sys/proc.h> 40 #include <sys/open.h> 41 #include <sys/user.h> 42 #include <sys/t_lock.h> 43 #include <sys/vm.h> 44 #include <sys/stat.h> 45 #include <vm/hat.h> 46 #include <vm/seg.h> 47 #include <vm/seg_vn.h> 48 #include <vm/seg_dev.h> 49 #include <vm/as.h> 50 #include <sys/cmn_err.h> 51 #include <sys/cpuvar.h> 52 #include <sys/debug.h> 53 #include <sys/autoconf.h> 54 #include <sys/sunddi.h> 55 #include <sys/esunddi.h> 56 #include <sys/sunndi.h> 57 #include <sys/kstat.h> 58 #include <sys/conf.h> 59 #include <sys/ddi_impldefs.h> /* include implementation structure defs */ 60 #include <sys/ndi_impldefs.h> /* include prototypes */ 61 #include <sys/ddi_timer.h> 62 #include <sys/hwconf.h> 63 #include <sys/pathname.h> 64 #include <sys/modctl.h> 65 #include <sys/epm.h> 66 #include <sys/devctl.h> 67 #include <sys/callb.h> 68 #include <sys/cladm.h> 69 #include <sys/sysevent.h> 70 #include <sys/dacf_impl.h> 71 #include <sys/ddidevmap.h> 72 #include <sys/bootconf.h> 73 #include <sys/disp.h> 74 #include <sys/atomic.h> 75 #include <sys/promif.h> 76 #include <sys/instance.h> 77 #include <sys/sysevent/eventdefs.h> 78 #include <sys/task.h> 79 #include <sys/project.h> 80 #include <sys/taskq.h> 81 #include <sys/devpolicy.h> 82 #include <sys/ctype.h> 83 #include <net/if.h> 84 #include <sys/rctl.h> 85 #include <sys/zone.h> 86 87 extern pri_t minclsyspri; 88 89 extern rctl_hndl_t rc_project_locked_mem; 90 extern rctl_hndl_t rc_zone_locked_mem; 91 92 #ifdef DEBUG 93 static int sunddi_debug = 0; 94 #endif /* DEBUG */ 95 96 /* ddi_umem_unlock miscellaneous */ 97 98 static void i_ddi_umem_unlock_thread_start(void); 99 100 static kmutex_t ddi_umem_unlock_mutex; /* unlock list mutex */ 101 static kcondvar_t ddi_umem_unlock_cv; /* unlock list block/unblock */ 102 static kthread_t *ddi_umem_unlock_thread; 103 /* 104 * The ddi_umem_unlock FIFO list. NULL head pointer indicates empty list. 105 */ 106 static struct ddi_umem_cookie *ddi_umem_unlock_head = NULL; 107 static struct ddi_umem_cookie *ddi_umem_unlock_tail = NULL; 108 109 110 /* 111 * DDI(Sun) Function and flag definitions: 112 */ 113 114 #if defined(__x86) 115 /* 116 * Used to indicate which entries were chosen from a range. 117 */ 118 char *chosen_reg = "chosen-reg"; 119 #endif 120 121 /* 122 * Function used to ring system console bell 123 */ 124 void (*ddi_console_bell_func)(clock_t duration); 125 126 /* 127 * Creating register mappings and handling interrupts: 128 */ 129 130 /* 131 * Generic ddi_map: Call parent to fulfill request... 132 */ 133 134 int 135 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset, 136 off_t len, caddr_t *addrp) 137 { 138 dev_info_t *pdip; 139 140 ASSERT(dp); 141 pdip = (dev_info_t *)DEVI(dp)->devi_parent; 142 return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip, 143 dp, mp, offset, len, addrp)); 144 } 145 146 /* 147 * ddi_apply_range: (Called by nexi only.) 148 * Apply ranges in parent node dp, to child regspec rp... 149 */ 150 151 int 152 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp) 153 { 154 return (i_ddi_apply_range(dp, rdip, rp)); 155 } 156 157 int 158 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset, 159 off_t len) 160 { 161 ddi_map_req_t mr; 162 #if defined(__x86) 163 struct { 164 int bus; 165 int addr; 166 int size; 167 } reg, *reglist; 168 uint_t length; 169 int rc; 170 171 /* 172 * get the 'registers' or the 'reg' property. 173 * We look up the reg property as an array of 174 * int's. 175 */ 176 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 177 DDI_PROP_DONTPASS, "registers", (int **)®list, &length); 178 if (rc != DDI_PROP_SUCCESS) 179 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 180 DDI_PROP_DONTPASS, "reg", (int **)®list, &length); 181 if (rc == DDI_PROP_SUCCESS) { 182 /* 183 * point to the required entry. 184 */ 185 reg = reglist[rnumber]; 186 reg.addr += offset; 187 if (len != 0) 188 reg.size = len; 189 /* 190 * make a new property containing ONLY the required tuple. 191 */ 192 if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip, 193 chosen_reg, (int *)®, (sizeof (reg)/sizeof (int))) 194 != DDI_PROP_SUCCESS) { 195 cmn_err(CE_WARN, "%s%d: cannot create '%s' " 196 "property", DEVI(dip)->devi_name, 197 DEVI(dip)->devi_instance, chosen_reg); 198 } 199 /* 200 * free the memory allocated by 201 * ddi_prop_lookup_int_array (). 202 */ 203 ddi_prop_free((void *)reglist); 204 } 205 #endif 206 mr.map_op = DDI_MO_MAP_LOCKED; 207 mr.map_type = DDI_MT_RNUMBER; 208 mr.map_obj.rnumber = rnumber; 209 mr.map_prot = PROT_READ | PROT_WRITE; 210 mr.map_flags = DDI_MF_KERNEL_MAPPING; 211 mr.map_handlep = NULL; 212 mr.map_vers = DDI_MAP_VERSION; 213 214 /* 215 * Call my parent to map in my regs. 216 */ 217 218 return (ddi_map(dip, &mr, offset, len, kaddrp)); 219 } 220 221 void 222 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset, 223 off_t len) 224 { 225 ddi_map_req_t mr; 226 227 mr.map_op = DDI_MO_UNMAP; 228 mr.map_type = DDI_MT_RNUMBER; 229 mr.map_flags = DDI_MF_KERNEL_MAPPING; 230 mr.map_prot = PROT_READ | PROT_WRITE; /* who cares? */ 231 mr.map_obj.rnumber = rnumber; 232 mr.map_handlep = NULL; 233 mr.map_vers = DDI_MAP_VERSION; 234 235 /* 236 * Call my parent to unmap my regs. 237 */ 238 239 (void) ddi_map(dip, &mr, offset, len, kaddrp); 240 *kaddrp = (caddr_t)0; 241 #if defined(__x86) 242 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg); 243 #endif 244 } 245 246 int 247 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 248 off_t offset, off_t len, caddr_t *vaddrp) 249 { 250 return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp)); 251 } 252 253 /* 254 * nullbusmap: The/DDI default bus_map entry point for nexi 255 * not conforming to the reg/range paradigm (i.e. scsi, etc.) 256 * with no HAT/MMU layer to be programmed at this level. 257 * 258 * If the call is to map by rnumber, return an error, 259 * otherwise pass anything else up the tree to my parent. 260 */ 261 int 262 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 263 off_t offset, off_t len, caddr_t *vaddrp) 264 { 265 _NOTE(ARGUNUSED(rdip)) 266 if (mp->map_type == DDI_MT_RNUMBER) 267 return (DDI_ME_UNSUPPORTED); 268 269 return (ddi_map(dip, mp, offset, len, vaddrp)); 270 } 271 272 /* 273 * ddi_rnumber_to_regspec: Not for use by leaf drivers. 274 * Only for use by nexi using the reg/range paradigm. 275 */ 276 struct regspec * 277 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber) 278 { 279 return (i_ddi_rnumber_to_regspec(dip, rnumber)); 280 } 281 282 283 /* 284 * Note that we allow the dip to be nil because we may be called 285 * prior even to the instantiation of the devinfo tree itself - all 286 * regular leaf and nexus drivers should always use a non-nil dip! 287 * 288 * We treat peek in a somewhat cavalier fashion .. assuming that we'll 289 * simply get a synchronous fault as soon as we touch a missing address. 290 * 291 * Poke is rather more carefully handled because we might poke to a write 292 * buffer, "succeed", then only find some time later that we got an 293 * asynchronous fault that indicated that the address we were writing to 294 * was not really backed by hardware. 295 */ 296 297 static int 298 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size, 299 void *addr, void *value_p) 300 { 301 union { 302 uint64_t u64; 303 uint32_t u32; 304 uint16_t u16; 305 uint8_t u8; 306 } peekpoke_value; 307 308 peekpoke_ctlops_t peekpoke_args; 309 uint64_t dummy_result; 310 int rval; 311 312 /* Note: size is assumed to be correct; it is not checked. */ 313 peekpoke_args.size = size; 314 peekpoke_args.dev_addr = (uintptr_t)addr; 315 peekpoke_args.handle = NULL; 316 peekpoke_args.repcount = 1; 317 peekpoke_args.flags = 0; 318 319 if (cmd == DDI_CTLOPS_POKE) { 320 switch (size) { 321 case sizeof (uint8_t): 322 peekpoke_value.u8 = *(uint8_t *)value_p; 323 break; 324 case sizeof (uint16_t): 325 peekpoke_value.u16 = *(uint16_t *)value_p; 326 break; 327 case sizeof (uint32_t): 328 peekpoke_value.u32 = *(uint32_t *)value_p; 329 break; 330 case sizeof (uint64_t): 331 peekpoke_value.u64 = *(uint64_t *)value_p; 332 break; 333 } 334 } 335 336 peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64; 337 338 if (devi != NULL) 339 rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args, 340 &dummy_result); 341 else 342 rval = peekpoke_mem(cmd, &peekpoke_args); 343 344 /* 345 * A NULL value_p is permitted by ddi_peek(9F); discard the result. 346 */ 347 if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) { 348 switch (size) { 349 case sizeof (uint8_t): 350 *(uint8_t *)value_p = peekpoke_value.u8; 351 break; 352 case sizeof (uint16_t): 353 *(uint16_t *)value_p = peekpoke_value.u16; 354 break; 355 case sizeof (uint32_t): 356 *(uint32_t *)value_p = peekpoke_value.u32; 357 break; 358 case sizeof (uint64_t): 359 *(uint64_t *)value_p = peekpoke_value.u64; 360 break; 361 } 362 } 363 364 return (rval); 365 } 366 367 /* 368 * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this. 369 * they shouldn't be, but the 9f manpage kind of pseudo exposes it. 370 */ 371 int 372 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p) 373 { 374 switch (size) { 375 case sizeof (uint8_t): 376 case sizeof (uint16_t): 377 case sizeof (uint32_t): 378 case sizeof (uint64_t): 379 break; 380 default: 381 return (DDI_FAILURE); 382 } 383 384 return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p)); 385 } 386 387 int 388 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p) 389 { 390 switch (size) { 391 case sizeof (uint8_t): 392 case sizeof (uint16_t): 393 case sizeof (uint32_t): 394 case sizeof (uint64_t): 395 break; 396 default: 397 return (DDI_FAILURE); 398 } 399 400 return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p)); 401 } 402 403 int 404 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p) 405 { 406 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 407 val_p)); 408 } 409 410 int 411 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p) 412 { 413 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 414 val_p)); 415 } 416 417 int 418 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p) 419 { 420 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 421 val_p)); 422 } 423 424 int 425 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p) 426 { 427 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 428 val_p)); 429 } 430 431 432 /* 433 * We need to separate the old interfaces from the new ones and leave them 434 * in here for a while. Previous versions of the OS defined the new interfaces 435 * to the old interfaces. This way we can fix things up so that we can 436 * eventually remove these interfaces. 437 * e.g. A 3rd party module/driver using ddi_peek8 and built against S10 438 * or earlier will actually have a reference to ddi_peekc in the binary. 439 */ 440 #ifdef _ILP32 441 int 442 ddi_peekc(dev_info_t *dip, int8_t *addr, int8_t *val_p) 443 { 444 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 445 val_p)); 446 } 447 448 int 449 ddi_peeks(dev_info_t *dip, int16_t *addr, int16_t *val_p) 450 { 451 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 452 val_p)); 453 } 454 455 int 456 ddi_peekl(dev_info_t *dip, int32_t *addr, int32_t *val_p) 457 { 458 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 459 val_p)); 460 } 461 462 int 463 ddi_peekd(dev_info_t *dip, int64_t *addr, int64_t *val_p) 464 { 465 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 466 val_p)); 467 } 468 #endif /* _ILP32 */ 469 470 int 471 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val) 472 { 473 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 474 } 475 476 int 477 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val) 478 { 479 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 480 } 481 482 int 483 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val) 484 { 485 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 486 } 487 488 int 489 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val) 490 { 491 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 492 } 493 494 /* 495 * We need to separate the old interfaces from the new ones and leave them 496 * in here for a while. Previous versions of the OS defined the new interfaces 497 * to the old interfaces. This way we can fix things up so that we can 498 * eventually remove these interfaces. 499 * e.g. A 3rd party module/driver using ddi_poke8 and built against S10 500 * or earlier will actually have a reference to ddi_pokec in the binary. 501 */ 502 #ifdef _ILP32 503 int 504 ddi_pokec(dev_info_t *dip, int8_t *addr, int8_t val) 505 { 506 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 507 } 508 509 int 510 ddi_pokes(dev_info_t *dip, int16_t *addr, int16_t val) 511 { 512 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 513 } 514 515 int 516 ddi_pokel(dev_info_t *dip, int32_t *addr, int32_t val) 517 { 518 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 519 } 520 521 int 522 ddi_poked(dev_info_t *dip, int64_t *addr, int64_t val) 523 { 524 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 525 } 526 #endif /* _ILP32 */ 527 528 /* 529 * ddi_peekpokeio() is used primarily by the mem drivers for moving 530 * data to and from uio structures via peek and poke. Note that we 531 * use "internal" routines ddi_peek and ddi_poke to make this go 532 * slightly faster, avoiding the call overhead .. 533 */ 534 int 535 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw, 536 caddr_t addr, size_t len, uint_t xfersize) 537 { 538 int64_t ibuffer; 539 int8_t w8; 540 size_t sz; 541 int o; 542 543 if (xfersize > sizeof (long)) 544 xfersize = sizeof (long); 545 546 while (len != 0) { 547 if ((len | (uintptr_t)addr) & 1) { 548 sz = sizeof (int8_t); 549 if (rw == UIO_WRITE) { 550 if ((o = uwritec(uio)) == -1) 551 return (DDI_FAILURE); 552 if (ddi_poke8(devi, (int8_t *)addr, 553 (int8_t)o) != DDI_SUCCESS) 554 return (DDI_FAILURE); 555 } else { 556 if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz, 557 (int8_t *)addr, &w8) != DDI_SUCCESS) 558 return (DDI_FAILURE); 559 if (ureadc(w8, uio)) 560 return (DDI_FAILURE); 561 } 562 } else { 563 switch (xfersize) { 564 case sizeof (int64_t): 565 if (((len | (uintptr_t)addr) & 566 (sizeof (int64_t) - 1)) == 0) { 567 sz = xfersize; 568 break; 569 } 570 /*FALLTHROUGH*/ 571 case sizeof (int32_t): 572 if (((len | (uintptr_t)addr) & 573 (sizeof (int32_t) - 1)) == 0) { 574 sz = xfersize; 575 break; 576 } 577 /*FALLTHROUGH*/ 578 default: 579 /* 580 * This still assumes that we might have an 581 * I/O bus out there that permits 16-bit 582 * transfers (and that it would be upset by 583 * 32-bit transfers from such locations). 584 */ 585 sz = sizeof (int16_t); 586 break; 587 } 588 589 if (rw == UIO_READ) { 590 if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz, 591 addr, &ibuffer) != DDI_SUCCESS) 592 return (DDI_FAILURE); 593 } 594 595 if (uiomove(&ibuffer, sz, rw, uio)) 596 return (DDI_FAILURE); 597 598 if (rw == UIO_WRITE) { 599 if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz, 600 addr, &ibuffer) != DDI_SUCCESS) 601 return (DDI_FAILURE); 602 } 603 } 604 addr += sz; 605 len -= sz; 606 } 607 return (DDI_SUCCESS); 608 } 609 610 /* 611 * These routines are used by drivers that do layered ioctls 612 * On sparc, they're implemented in assembler to avoid spilling 613 * register windows in the common (copyin) case .. 614 */ 615 #if !defined(__sparc) 616 int 617 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags) 618 { 619 if (flags & FKIOCTL) 620 return (kcopy(buf, kernbuf, size) ? -1 : 0); 621 return (copyin(buf, kernbuf, size)); 622 } 623 624 int 625 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags) 626 { 627 if (flags & FKIOCTL) 628 return (kcopy(buf, kernbuf, size) ? -1 : 0); 629 return (copyout(buf, kernbuf, size)); 630 } 631 #endif /* !__sparc */ 632 633 /* 634 * Conversions in nexus pagesize units. We don't duplicate the 635 * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI 636 * routines anyway. 637 */ 638 unsigned long 639 ddi_btop(dev_info_t *dip, unsigned long bytes) 640 { 641 unsigned long pages; 642 643 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages); 644 return (pages); 645 } 646 647 unsigned long 648 ddi_btopr(dev_info_t *dip, unsigned long bytes) 649 { 650 unsigned long pages; 651 652 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages); 653 return (pages); 654 } 655 656 unsigned long 657 ddi_ptob(dev_info_t *dip, unsigned long pages) 658 { 659 unsigned long bytes; 660 661 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes); 662 return (bytes); 663 } 664 665 unsigned int 666 ddi_enter_critical(void) 667 { 668 return ((uint_t)spl7()); 669 } 670 671 void 672 ddi_exit_critical(unsigned int spl) 673 { 674 splx((int)spl); 675 } 676 677 /* 678 * Nexus ctlops punter 679 */ 680 681 #if !defined(__sparc) 682 /* 683 * Request bus_ctl parent to handle a bus_ctl request 684 * 685 * (The sparc version is in sparc_ddi.s) 686 */ 687 int 688 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v) 689 { 690 int (*fp)(); 691 692 if (!d || !r) 693 return (DDI_FAILURE); 694 695 if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL) 696 return (DDI_FAILURE); 697 698 fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl; 699 return ((*fp)(d, r, op, a, v)); 700 } 701 702 #endif 703 704 /* 705 * DMA/DVMA setup 706 */ 707 708 #if defined(__sparc) 709 static ddi_dma_lim_t standard_limits = { 710 (uint_t)0, /* addr_t dlim_addr_lo */ 711 (uint_t)-1, /* addr_t dlim_addr_hi */ 712 (uint_t)-1, /* uint_t dlim_cntr_max */ 713 (uint_t)1, /* uint_t dlim_burstsizes */ 714 (uint_t)1, /* uint_t dlim_minxfer */ 715 0 /* uint_t dlim_dmaspeed */ 716 }; 717 #elif defined(__x86) 718 static ddi_dma_lim_t standard_limits = { 719 (uint_t)0, /* addr_t dlim_addr_lo */ 720 (uint_t)0xffffff, /* addr_t dlim_addr_hi */ 721 (uint_t)0, /* uint_t dlim_cntr_max */ 722 (uint_t)0x00000001, /* uint_t dlim_burstsizes */ 723 (uint_t)DMA_UNIT_8, /* uint_t dlim_minxfer */ 724 (uint_t)0, /* uint_t dlim_dmaspeed */ 725 (uint_t)0x86<<24+0, /* uint_t dlim_version */ 726 (uint_t)0xffff, /* uint_t dlim_adreg_max */ 727 (uint_t)0xffff, /* uint_t dlim_ctreg_max */ 728 (uint_t)512, /* uint_t dlim_granular */ 729 (int)1, /* int dlim_sgllen */ 730 (uint_t)0xffffffff /* uint_t dlim_reqsizes */ 731 }; 732 733 #endif 734 735 int 736 ddi_dma_setup(dev_info_t *dip, struct ddi_dma_req *dmareqp, 737 ddi_dma_handle_t *handlep) 738 { 739 int (*funcp)() = ddi_dma_map; 740 struct bus_ops *bop; 741 #if defined(__sparc) 742 auto ddi_dma_lim_t dma_lim; 743 744 if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0) { 745 dma_lim = standard_limits; 746 } else { 747 dma_lim = *dmareqp->dmar_limits; 748 } 749 dmareqp->dmar_limits = &dma_lim; 750 #endif 751 #if defined(__x86) 752 if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0) 753 return (DDI_FAILURE); 754 #endif 755 756 /* 757 * Handle the case that the requester is both a leaf 758 * and a nexus driver simultaneously by calling the 759 * requester's bus_dma_map function directly instead 760 * of ddi_dma_map. 761 */ 762 bop = DEVI(dip)->devi_ops->devo_bus_ops; 763 if (bop && bop->bus_dma_map) 764 funcp = bop->bus_dma_map; 765 return ((*funcp)(dip, dip, dmareqp, handlep)); 766 } 767 768 int 769 ddi_dma_addr_setup(dev_info_t *dip, struct as *as, caddr_t addr, size_t len, 770 uint_t flags, int (*waitfp)(), caddr_t arg, 771 ddi_dma_lim_t *limits, ddi_dma_handle_t *handlep) 772 { 773 int (*funcp)() = ddi_dma_map; 774 ddi_dma_lim_t dma_lim; 775 struct ddi_dma_req dmareq; 776 struct bus_ops *bop; 777 778 if (len == 0) { 779 return (DDI_DMA_NOMAPPING); 780 } 781 if (limits == (ddi_dma_lim_t *)0) { 782 dma_lim = standard_limits; 783 } else { 784 dma_lim = *limits; 785 } 786 dmareq.dmar_limits = &dma_lim; 787 dmareq.dmar_flags = flags; 788 dmareq.dmar_fp = waitfp; 789 dmareq.dmar_arg = arg; 790 dmareq.dmar_object.dmao_size = len; 791 dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR; 792 dmareq.dmar_object.dmao_obj.virt_obj.v_as = as; 793 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr; 794 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL; 795 796 /* 797 * Handle the case that the requester is both a leaf 798 * and a nexus driver simultaneously by calling the 799 * requester's bus_dma_map function directly instead 800 * of ddi_dma_map. 801 */ 802 bop = DEVI(dip)->devi_ops->devo_bus_ops; 803 if (bop && bop->bus_dma_map) 804 funcp = bop->bus_dma_map; 805 806 return ((*funcp)(dip, dip, &dmareq, handlep)); 807 } 808 809 int 810 ddi_dma_buf_setup(dev_info_t *dip, struct buf *bp, uint_t flags, 811 int (*waitfp)(), caddr_t arg, ddi_dma_lim_t *limits, 812 ddi_dma_handle_t *handlep) 813 { 814 int (*funcp)() = ddi_dma_map; 815 ddi_dma_lim_t dma_lim; 816 struct ddi_dma_req dmareq; 817 struct bus_ops *bop; 818 819 if (limits == (ddi_dma_lim_t *)0) { 820 dma_lim = standard_limits; 821 } else { 822 dma_lim = *limits; 823 } 824 dmareq.dmar_limits = &dma_lim; 825 dmareq.dmar_flags = flags; 826 dmareq.dmar_fp = waitfp; 827 dmareq.dmar_arg = arg; 828 dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount; 829 830 if (bp->b_flags & B_PAGEIO) { 831 dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES; 832 dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages; 833 dmareq.dmar_object.dmao_obj.pp_obj.pp_offset = 834 (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET); 835 } else { 836 dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR; 837 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr; 838 if (bp->b_flags & B_SHADOW) { 839 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = 840 bp->b_shadow; 841 } else { 842 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL; 843 } 844 845 /* 846 * If the buffer has no proc pointer, or the proc 847 * struct has the kernel address space, or the buffer has 848 * been marked B_REMAPPED (meaning that it is now 849 * mapped into the kernel's address space), then 850 * the address space is kas (kernel address space). 851 */ 852 if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) || 853 (bp->b_flags & B_REMAPPED)) { 854 dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0; 855 } else { 856 dmareq.dmar_object.dmao_obj.virt_obj.v_as = 857 bp->b_proc->p_as; 858 } 859 } 860 861 /* 862 * Handle the case that the requester is both a leaf 863 * and a nexus driver simultaneously by calling the 864 * requester's bus_dma_map function directly instead 865 * of ddi_dma_map. 866 */ 867 bop = DEVI(dip)->devi_ops->devo_bus_ops; 868 if (bop && bop->bus_dma_map) 869 funcp = bop->bus_dma_map; 870 871 return ((*funcp)(dip, dip, &dmareq, handlep)); 872 } 873 874 #if !defined(__sparc) 875 /* 876 * Request bus_dma_ctl parent to fiddle with a dma request. 877 * 878 * (The sparc version is in sparc_subr.s) 879 */ 880 int 881 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip, 882 ddi_dma_handle_t handle, enum ddi_dma_ctlops request, 883 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags) 884 { 885 int (*fp)(); 886 887 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl; 888 fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl; 889 return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags)); 890 } 891 #endif 892 893 /* 894 * For all DMA control functions, call the DMA control 895 * routine and return status. 896 * 897 * Just plain assume that the parent is to be called. 898 * If a nexus driver or a thread outside the framework 899 * of a nexus driver or a leaf driver calls these functions, 900 * it is up to them to deal with the fact that the parent's 901 * bus_dma_ctl function will be the first one called. 902 */ 903 904 #define HD ((ddi_dma_impl_t *)h)->dmai_rdip 905 906 int 907 ddi_dma_kvaddrp(ddi_dma_handle_t h, off_t off, size_t len, caddr_t *kp) 908 { 909 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_KVADDR, &off, &len, kp, 0)); 910 } 911 912 int 913 ddi_dma_htoc(ddi_dma_handle_t h, off_t o, ddi_dma_cookie_t *c) 914 { 915 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_HTOC, &o, 0, (caddr_t *)c, 0)); 916 } 917 918 int 919 ddi_dma_coff(ddi_dma_handle_t h, ddi_dma_cookie_t *c, off_t *o) 920 { 921 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_COFF, 922 (off_t *)c, 0, (caddr_t *)o, 0)); 923 } 924 925 int 926 ddi_dma_movwin(ddi_dma_handle_t h, off_t *o, size_t *l, ddi_dma_cookie_t *c) 927 { 928 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_MOVWIN, o, 929 l, (caddr_t *)c, 0)); 930 } 931 932 int 933 ddi_dma_curwin(ddi_dma_handle_t h, off_t *o, size_t *l) 934 { 935 if ((((ddi_dma_impl_t *)h)->dmai_rflags & DDI_DMA_PARTIAL) == 0) 936 return (DDI_FAILURE); 937 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_REPWIN, o, l, 0, 0)); 938 } 939 940 int 941 ddi_dma_nextwin(ddi_dma_handle_t h, ddi_dma_win_t win, 942 ddi_dma_win_t *nwin) 943 { 944 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTWIN, (off_t *)&win, 0, 945 (caddr_t *)nwin, 0)); 946 } 947 948 int 949 ddi_dma_nextseg(ddi_dma_win_t win, ddi_dma_seg_t seg, ddi_dma_seg_t *nseg) 950 { 951 ddi_dma_handle_t h = (ddi_dma_handle_t)win; 952 953 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTSEG, (off_t *)&win, 954 (size_t *)&seg, (caddr_t *)nseg, 0)); 955 } 956 957 #if (defined(__i386) && !defined(__amd64)) || defined(__sparc) 958 /* 959 * This routine is Obsolete and should be removed from ALL architectures 960 * in a future release of Solaris. 961 * 962 * It is deliberately NOT ported to amd64; please fix the code that 963 * depends on this routine to use ddi_dma_nextcookie(9F). 964 * 965 * NOTE: even though we fixed the pointer through a 32-bit param issue (the fix 966 * is a side effect to some other cleanup), we're still not going to support 967 * this interface on x64. 968 */ 969 int 970 ddi_dma_segtocookie(ddi_dma_seg_t seg, off_t *o, off_t *l, 971 ddi_dma_cookie_t *cookiep) 972 { 973 ddi_dma_handle_t h = (ddi_dma_handle_t)seg; 974 975 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SEGTOC, o, (size_t *)l, 976 (caddr_t *)cookiep, 0)); 977 } 978 #endif /* (__i386 && !__amd64) || __sparc */ 979 980 #if !defined(__sparc) 981 982 /* 983 * The SPARC versions of these routines are done in assembler to 984 * save register windows, so they're in sparc_subr.s. 985 */ 986 987 int 988 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip, 989 struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep) 990 { 991 dev_info_t *hdip; 992 int (*funcp)(dev_info_t *, dev_info_t *, struct ddi_dma_req *, 993 ddi_dma_handle_t *); 994 995 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_map; 996 997 funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_map; 998 return ((*funcp)(hdip, rdip, dmareqp, handlep)); 999 } 1000 1001 int 1002 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr, 1003 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 1004 { 1005 dev_info_t *hdip; 1006 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *, 1007 int (*)(caddr_t), caddr_t, ddi_dma_handle_t *); 1008 1009 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl; 1010 1011 funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_allochdl; 1012 return ((*funcp)(hdip, rdip, attr, waitfp, arg, handlep)); 1013 } 1014 1015 int 1016 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep) 1017 { 1018 dev_info_t *hdip; 1019 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t); 1020 1021 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl; 1022 1023 funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_freehdl; 1024 return ((*funcp)(hdip, rdip, handlep)); 1025 } 1026 1027 int 1028 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 1029 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 1030 ddi_dma_cookie_t *cp, uint_t *ccountp) 1031 { 1032 dev_info_t *hdip; 1033 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, 1034 struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *); 1035 1036 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl; 1037 1038 funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_bindhdl; 1039 return ((*funcp)(hdip, rdip, handle, dmareq, cp, ccountp)); 1040 } 1041 1042 int 1043 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 1044 ddi_dma_handle_t handle) 1045 { 1046 dev_info_t *hdip; 1047 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t); 1048 1049 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl; 1050 1051 funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl; 1052 return ((*funcp)(hdip, rdip, handle)); 1053 } 1054 1055 1056 int 1057 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip, 1058 ddi_dma_handle_t handle, off_t off, size_t len, 1059 uint_t cache_flags) 1060 { 1061 dev_info_t *hdip; 1062 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, 1063 off_t, size_t, uint_t); 1064 1065 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush; 1066 1067 funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_flush; 1068 return ((*funcp)(hdip, rdip, handle, off, len, cache_flags)); 1069 } 1070 1071 int 1072 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip, 1073 ddi_dma_handle_t handle, uint_t win, off_t *offp, 1074 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 1075 { 1076 dev_info_t *hdip; 1077 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, 1078 uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *); 1079 1080 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win; 1081 1082 funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_win; 1083 return ((*funcp)(hdip, rdip, handle, win, offp, lenp, 1084 cookiep, ccountp)); 1085 } 1086 1087 int 1088 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom) 1089 { 1090 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h; 1091 dev_info_t *hdip, *dip; 1092 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t, 1093 size_t, uint_t); 1094 1095 /* 1096 * the DMA nexus driver will set DMP_NOSYNC if the 1097 * platform does not require any sync operation. For 1098 * example if the memory is uncached or consistent 1099 * and without any I/O write buffers involved. 1100 */ 1101 if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC) 1102 return (DDI_SUCCESS); 1103 1104 dip = hp->dmai_rdip; 1105 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush; 1106 funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_flush; 1107 return ((*funcp)(hdip, dip, h, o, l, whom)); 1108 } 1109 1110 int 1111 ddi_dma_unbind_handle(ddi_dma_handle_t h) 1112 { 1113 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h; 1114 dev_info_t *hdip, *dip; 1115 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t); 1116 1117 dip = hp->dmai_rdip; 1118 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl; 1119 funcp = DEVI(dip)->devi_bus_dma_unbindfunc; 1120 return ((*funcp)(hdip, dip, h)); 1121 } 1122 1123 #endif /* !__sparc */ 1124 1125 int 1126 ddi_dma_free(ddi_dma_handle_t h) 1127 { 1128 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_FREE, 0, 0, 0, 0)); 1129 } 1130 1131 int 1132 ddi_iopb_alloc(dev_info_t *dip, ddi_dma_lim_t *limp, uint_t len, caddr_t *iopbp) 1133 { 1134 ddi_dma_lim_t defalt; 1135 size_t size = len; 1136 1137 if (!limp) { 1138 defalt = standard_limits; 1139 limp = &defalt; 1140 } 1141 return (i_ddi_mem_alloc_lim(dip, limp, size, 0, 0, 0, 1142 iopbp, NULL, NULL)); 1143 } 1144 1145 void 1146 ddi_iopb_free(caddr_t iopb) 1147 { 1148 i_ddi_mem_free(iopb, NULL); 1149 } 1150 1151 int 1152 ddi_mem_alloc(dev_info_t *dip, ddi_dma_lim_t *limits, uint_t length, 1153 uint_t flags, caddr_t *kaddrp, uint_t *real_length) 1154 { 1155 ddi_dma_lim_t defalt; 1156 size_t size = length; 1157 1158 if (!limits) { 1159 defalt = standard_limits; 1160 limits = &defalt; 1161 } 1162 return (i_ddi_mem_alloc_lim(dip, limits, size, flags & 0x1, 1163 1, 0, kaddrp, real_length, NULL)); 1164 } 1165 1166 void 1167 ddi_mem_free(caddr_t kaddr) 1168 { 1169 i_ddi_mem_free(kaddr, NULL); 1170 } 1171 1172 /* 1173 * DMA attributes, alignment, burst sizes, and transfer minimums 1174 */ 1175 int 1176 ddi_dma_get_attr(ddi_dma_handle_t handle, ddi_dma_attr_t *attrp) 1177 { 1178 ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle; 1179 1180 if (attrp == NULL) 1181 return (DDI_FAILURE); 1182 *attrp = dimp->dmai_attr; 1183 return (DDI_SUCCESS); 1184 } 1185 1186 int 1187 ddi_dma_burstsizes(ddi_dma_handle_t handle) 1188 { 1189 ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle; 1190 1191 if (!dimp) 1192 return (0); 1193 else 1194 return (dimp->dmai_burstsizes); 1195 } 1196 1197 int 1198 ddi_dma_devalign(ddi_dma_handle_t handle, uint_t *alignment, uint_t *mineffect) 1199 { 1200 ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle; 1201 1202 if (!dimp || !alignment || !mineffect) 1203 return (DDI_FAILURE); 1204 if (!(dimp->dmai_rflags & DDI_DMA_SBUS_64BIT)) { 1205 *alignment = 1 << ddi_ffs(dimp->dmai_burstsizes); 1206 } else { 1207 if (dimp->dmai_burstsizes & 0xff0000) { 1208 *alignment = 1 << ddi_ffs(dimp->dmai_burstsizes >> 16); 1209 } else { 1210 *alignment = 1 << ddi_ffs(dimp->dmai_burstsizes); 1211 } 1212 } 1213 *mineffect = dimp->dmai_minxfer; 1214 return (DDI_SUCCESS); 1215 } 1216 1217 int 1218 ddi_iomin(dev_info_t *a, int i, int stream) 1219 { 1220 int r; 1221 1222 /* 1223 * Make sure that the initial value is sane 1224 */ 1225 if (i & (i - 1)) 1226 return (0); 1227 if (i == 0) 1228 i = (stream) ? 4 : 1; 1229 1230 r = ddi_ctlops(a, a, 1231 DDI_CTLOPS_IOMIN, (void *)(uintptr_t)stream, (void *)&i); 1232 if (r != DDI_SUCCESS || (i & (i - 1))) 1233 return (0); 1234 return (i); 1235 } 1236 1237 /* 1238 * Given two DMA attribute structures, apply the attributes 1239 * of one to the other, following the rules of attributes 1240 * and the wishes of the caller. 1241 * 1242 * The rules of DMA attribute structures are that you cannot 1243 * make things *less* restrictive as you apply one set 1244 * of attributes to another. 1245 * 1246 */ 1247 void 1248 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod) 1249 { 1250 attr->dma_attr_addr_lo = 1251 MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo); 1252 attr->dma_attr_addr_hi = 1253 MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi); 1254 attr->dma_attr_count_max = 1255 MIN(attr->dma_attr_count_max, mod->dma_attr_count_max); 1256 attr->dma_attr_align = 1257 MAX(attr->dma_attr_align, mod->dma_attr_align); 1258 attr->dma_attr_burstsizes = 1259 (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes); 1260 attr->dma_attr_minxfer = 1261 maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer); 1262 attr->dma_attr_maxxfer = 1263 MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer); 1264 attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg); 1265 attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen, 1266 (uint_t)mod->dma_attr_sgllen); 1267 attr->dma_attr_granular = 1268 MAX(attr->dma_attr_granular, mod->dma_attr_granular); 1269 } 1270 1271 /* 1272 * mmap/segmap interface: 1273 */ 1274 1275 /* 1276 * ddi_segmap: setup the default segment driver. Calls the drivers 1277 * XXmmap routine to validate the range to be mapped. 1278 * Return ENXIO of the range is not valid. Create 1279 * a seg_dev segment that contains all of the 1280 * necessary information and will reference the 1281 * default segment driver routines. It returns zero 1282 * on success or non-zero on failure. 1283 */ 1284 int 1285 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len, 1286 uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp) 1287 { 1288 extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *, 1289 off_t, uint_t, uint_t, uint_t, struct cred *); 1290 1291 return (spec_segmap(dev, offset, asp, addrp, len, 1292 prot, maxprot, flags, credp)); 1293 } 1294 1295 /* 1296 * ddi_map_fault: Resolve mappings at fault time. Used by segment 1297 * drivers. Allows each successive parent to resolve 1298 * address translations and add its mappings to the 1299 * mapping list supplied in the page structure. It 1300 * returns zero on success or non-zero on failure. 1301 */ 1302 1303 int 1304 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg, 1305 caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock) 1306 { 1307 return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock)); 1308 } 1309 1310 /* 1311 * ddi_device_mapping_check: Called from ddi_segmap_setup. 1312 * Invokes platform specific DDI to determine whether attributes specified 1313 * in attr(9s) are valid for the region of memory that will be made 1314 * available for direct access to user process via the mmap(2) system call. 1315 */ 1316 int 1317 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp, 1318 uint_t rnumber, uint_t *hat_flags) 1319 { 1320 ddi_acc_handle_t handle; 1321 ddi_map_req_t mr; 1322 ddi_acc_hdl_t *hp; 1323 int result; 1324 dev_info_t *dip; 1325 1326 /* 1327 * we use e_ddi_hold_devi_by_dev to search for the devi. We 1328 * release it immediately since it should already be held by 1329 * a devfs vnode. 1330 */ 1331 if ((dip = 1332 e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL) 1333 return (-1); 1334 ddi_release_devi(dip); /* for e_ddi_hold_devi_by_dev() */ 1335 1336 /* 1337 * Allocate and initialize the common elements of data 1338 * access handle. 1339 */ 1340 handle = impl_acc_hdl_alloc(KM_SLEEP, NULL); 1341 if (handle == NULL) 1342 return (-1); 1343 1344 hp = impl_acc_hdl_get(handle); 1345 hp->ah_vers = VERS_ACCHDL; 1346 hp->ah_dip = dip; 1347 hp->ah_rnumber = rnumber; 1348 hp->ah_offset = 0; 1349 hp->ah_len = 0; 1350 hp->ah_acc = *accattrp; 1351 1352 /* 1353 * Set up the mapping request and call to parent. 1354 */ 1355 mr.map_op = DDI_MO_MAP_HANDLE; 1356 mr.map_type = DDI_MT_RNUMBER; 1357 mr.map_obj.rnumber = rnumber; 1358 mr.map_prot = PROT_READ | PROT_WRITE; 1359 mr.map_flags = DDI_MF_KERNEL_MAPPING; 1360 mr.map_handlep = hp; 1361 mr.map_vers = DDI_MAP_VERSION; 1362 result = ddi_map(dip, &mr, 0, 0, NULL); 1363 1364 /* 1365 * Region must be mappable, pick up flags from the framework. 1366 */ 1367 *hat_flags = hp->ah_hat_flags; 1368 1369 impl_acc_hdl_free(handle); 1370 1371 /* 1372 * check for end result. 1373 */ 1374 if (result != DDI_SUCCESS) 1375 return (-1); 1376 return (0); 1377 } 1378 1379 1380 /* 1381 * Property functions: See also, ddipropdefs.h. 1382 * 1383 * These functions are the framework for the property functions, 1384 * i.e. they support software defined properties. All implementation 1385 * specific property handling (i.e.: self-identifying devices and 1386 * PROM defined properties are handled in the implementation specific 1387 * functions (defined in ddi_implfuncs.h). 1388 */ 1389 1390 /* 1391 * nopropop: Shouldn't be called, right? 1392 */ 1393 int 1394 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 1395 char *name, caddr_t valuep, int *lengthp) 1396 { 1397 _NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp)) 1398 return (DDI_PROP_NOT_FOUND); 1399 } 1400 1401 #ifdef DDI_PROP_DEBUG 1402 int ddi_prop_debug_flag = 0; 1403 1404 int 1405 ddi_prop_debug(int enable) 1406 { 1407 int prev = ddi_prop_debug_flag; 1408 1409 if ((enable != 0) || (prev != 0)) 1410 printf("ddi_prop_debug: debugging %s\n", 1411 enable ? "enabled" : "disabled"); 1412 ddi_prop_debug_flag = enable; 1413 return (prev); 1414 } 1415 1416 #endif /* DDI_PROP_DEBUG */ 1417 1418 /* 1419 * Search a property list for a match, if found return pointer 1420 * to matching prop struct, else return NULL. 1421 */ 1422 1423 ddi_prop_t * 1424 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head) 1425 { 1426 ddi_prop_t *propp; 1427 1428 /* 1429 * find the property in child's devinfo: 1430 * Search order defined by this search function is first matching 1431 * property with input dev == DDI_DEV_T_ANY matching any dev or 1432 * dev == propp->prop_dev, name == propp->name, and the correct 1433 * data type as specified in the flags. If a DDI_DEV_T_NONE dev 1434 * value made it this far then it implies a DDI_DEV_T_ANY search. 1435 */ 1436 if (dev == DDI_DEV_T_NONE) 1437 dev = DDI_DEV_T_ANY; 1438 1439 for (propp = *list_head; propp != NULL; propp = propp->prop_next) { 1440 1441 if (!DDI_STRSAME(propp->prop_name, name)) 1442 continue; 1443 1444 if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev)) 1445 continue; 1446 1447 if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0) 1448 continue; 1449 1450 return (propp); 1451 } 1452 1453 return ((ddi_prop_t *)0); 1454 } 1455 1456 /* 1457 * Search for property within devnames structures 1458 */ 1459 ddi_prop_t * 1460 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags) 1461 { 1462 major_t major; 1463 struct devnames *dnp; 1464 ddi_prop_t *propp; 1465 1466 /* 1467 * Valid dev_t value is needed to index into the 1468 * correct devnames entry, therefore a dev_t 1469 * value of DDI_DEV_T_ANY is not appropriate. 1470 */ 1471 ASSERT(dev != DDI_DEV_T_ANY); 1472 if (dev == DDI_DEV_T_ANY) { 1473 return ((ddi_prop_t *)0); 1474 } 1475 1476 major = getmajor(dev); 1477 dnp = &(devnamesp[major]); 1478 1479 if (dnp->dn_global_prop_ptr == NULL) 1480 return ((ddi_prop_t *)0); 1481 1482 LOCK_DEV_OPS(&dnp->dn_lock); 1483 1484 for (propp = dnp->dn_global_prop_ptr->prop_list; 1485 propp != NULL; 1486 propp = (ddi_prop_t *)propp->prop_next) { 1487 1488 if (!DDI_STRSAME(propp->prop_name, name)) 1489 continue; 1490 1491 if ((!(flags & DDI_PROP_ROOTNEX_GLOBAL)) && 1492 (!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev)) 1493 continue; 1494 1495 if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0) 1496 continue; 1497 1498 /* Property found, return it */ 1499 UNLOCK_DEV_OPS(&dnp->dn_lock); 1500 return (propp); 1501 } 1502 1503 UNLOCK_DEV_OPS(&dnp->dn_lock); 1504 return ((ddi_prop_t *)0); 1505 } 1506 1507 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>"; 1508 1509 /* 1510 * ddi_prop_search_global: 1511 * Search the global property list within devnames 1512 * for the named property. Return the encoded value. 1513 */ 1514 static int 1515 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name, 1516 void *valuep, uint_t *lengthp) 1517 { 1518 ddi_prop_t *propp; 1519 caddr_t buffer; 1520 1521 propp = i_ddi_search_global_prop(dev, name, flags); 1522 1523 /* Property NOT found, bail */ 1524 if (propp == (ddi_prop_t *)0) 1525 return (DDI_PROP_NOT_FOUND); 1526 1527 if (propp->prop_flags & DDI_PROP_UNDEF_IT) 1528 return (DDI_PROP_UNDEFINED); 1529 1530 if ((buffer = kmem_alloc(propp->prop_len, 1531 (flags & DDI_PROP_CANSLEEP) ? KM_SLEEP : KM_NOSLEEP)) == NULL) { 1532 cmn_err(CE_CONT, prop_no_mem_msg, name); 1533 return (DDI_PROP_NO_MEMORY); 1534 } 1535 1536 /* 1537 * Return the encoded data 1538 */ 1539 *(caddr_t *)valuep = buffer; 1540 *lengthp = propp->prop_len; 1541 bcopy(propp->prop_val, buffer, propp->prop_len); 1542 1543 return (DDI_PROP_SUCCESS); 1544 } 1545 1546 /* 1547 * ddi_prop_search_common: Lookup and return the encoded value 1548 */ 1549 int 1550 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1551 uint_t flags, char *name, void *valuep, uint_t *lengthp) 1552 { 1553 ddi_prop_t *propp; 1554 int i; 1555 caddr_t buffer; 1556 caddr_t prealloc = NULL; 1557 int plength = 0; 1558 dev_info_t *pdip; 1559 int (*bop)(); 1560 1561 /*CONSTANTCONDITION*/ 1562 while (1) { 1563 1564 mutex_enter(&(DEVI(dip)->devi_lock)); 1565 1566 1567 /* 1568 * find the property in child's devinfo: 1569 * Search order is: 1570 * 1. driver defined properties 1571 * 2. system defined properties 1572 * 3. driver global properties 1573 * 4. boot defined properties 1574 */ 1575 1576 propp = i_ddi_prop_search(dev, name, flags, 1577 &(DEVI(dip)->devi_drv_prop_ptr)); 1578 if (propp == NULL) { 1579 propp = i_ddi_prop_search(dev, name, flags, 1580 &(DEVI(dip)->devi_sys_prop_ptr)); 1581 } 1582 if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) { 1583 propp = i_ddi_prop_search(dev, name, flags, 1584 &DEVI(dip)->devi_global_prop_list->prop_list); 1585 } 1586 1587 if (propp == NULL) { 1588 propp = i_ddi_prop_search(dev, name, flags, 1589 &(DEVI(dip)->devi_hw_prop_ptr)); 1590 } 1591 1592 /* 1593 * Software property found? 1594 */ 1595 if (propp != (ddi_prop_t *)0) { 1596 1597 /* 1598 * If explicit undefine, return now. 1599 */ 1600 if (propp->prop_flags & DDI_PROP_UNDEF_IT) { 1601 mutex_exit(&(DEVI(dip)->devi_lock)); 1602 if (prealloc) 1603 kmem_free(prealloc, plength); 1604 return (DDI_PROP_UNDEFINED); 1605 } 1606 1607 /* 1608 * If we only want to know if it exists, return now 1609 */ 1610 if (prop_op == PROP_EXISTS) { 1611 mutex_exit(&(DEVI(dip)->devi_lock)); 1612 ASSERT(prealloc == NULL); 1613 return (DDI_PROP_SUCCESS); 1614 } 1615 1616 /* 1617 * If length only request or prop length == 0, 1618 * service request and return now. 1619 */ 1620 if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) { 1621 *lengthp = propp->prop_len; 1622 1623 /* 1624 * if prop_op is PROP_LEN_AND_VAL_ALLOC 1625 * that means prop_len is 0, so set valuep 1626 * also to NULL 1627 */ 1628 if (prop_op == PROP_LEN_AND_VAL_ALLOC) 1629 *(caddr_t *)valuep = NULL; 1630 1631 mutex_exit(&(DEVI(dip)->devi_lock)); 1632 if (prealloc) 1633 kmem_free(prealloc, plength); 1634 return (DDI_PROP_SUCCESS); 1635 } 1636 1637 /* 1638 * If LEN_AND_VAL_ALLOC and the request can sleep, 1639 * drop the mutex, allocate the buffer, and go 1640 * through the loop again. If we already allocated 1641 * the buffer, and the size of the property changed, 1642 * keep trying... 1643 */ 1644 if ((prop_op == PROP_LEN_AND_VAL_ALLOC) && 1645 (flags & DDI_PROP_CANSLEEP)) { 1646 if (prealloc && (propp->prop_len != plength)) { 1647 kmem_free(prealloc, plength); 1648 prealloc = NULL; 1649 } 1650 if (prealloc == NULL) { 1651 plength = propp->prop_len; 1652 mutex_exit(&(DEVI(dip)->devi_lock)); 1653 prealloc = kmem_alloc(plength, 1654 KM_SLEEP); 1655 continue; 1656 } 1657 } 1658 1659 /* 1660 * Allocate buffer, if required. Either way, 1661 * set `buffer' variable. 1662 */ 1663 i = *lengthp; /* Get callers length */ 1664 *lengthp = propp->prop_len; /* Set callers length */ 1665 1666 switch (prop_op) { 1667 1668 case PROP_LEN_AND_VAL_ALLOC: 1669 1670 if (prealloc == NULL) { 1671 buffer = kmem_alloc(propp->prop_len, 1672 KM_NOSLEEP); 1673 } else { 1674 buffer = prealloc; 1675 } 1676 1677 if (buffer == NULL) { 1678 mutex_exit(&(DEVI(dip)->devi_lock)); 1679 cmn_err(CE_CONT, prop_no_mem_msg, name); 1680 return (DDI_PROP_NO_MEMORY); 1681 } 1682 /* Set callers buf ptr */ 1683 *(caddr_t *)valuep = buffer; 1684 break; 1685 1686 case PROP_LEN_AND_VAL_BUF: 1687 1688 if (propp->prop_len > (i)) { 1689 mutex_exit(&(DEVI(dip)->devi_lock)); 1690 return (DDI_PROP_BUF_TOO_SMALL); 1691 } 1692 1693 buffer = valuep; /* Get callers buf ptr */ 1694 break; 1695 1696 default: 1697 break; 1698 } 1699 1700 /* 1701 * Do the copy. 1702 */ 1703 bcopy(propp->prop_val, buffer, propp->prop_len); 1704 mutex_exit(&(DEVI(dip)->devi_lock)); 1705 return (DDI_PROP_SUCCESS); 1706 } 1707 1708 mutex_exit(&(DEVI(dip)->devi_lock)); 1709 if (prealloc) 1710 kmem_free(prealloc, plength); 1711 prealloc = NULL; 1712 1713 /* 1714 * Prop not found, call parent bus_ops to deal with possible 1715 * h/w layer (possible PROM defined props, etc.) and to 1716 * possibly ascend the hierarchy, if allowed by flags. 1717 */ 1718 pdip = (dev_info_t *)DEVI(dip)->devi_parent; 1719 1720 /* 1721 * One last call for the root driver PROM props? 1722 */ 1723 if (dip == ddi_root_node()) { 1724 return (ddi_bus_prop_op(dev, dip, dip, prop_op, 1725 flags, name, valuep, (int *)lengthp)); 1726 } 1727 1728 /* 1729 * We may have been called to check for properties 1730 * within a single devinfo node that has no parent - 1731 * see make_prop() 1732 */ 1733 if (pdip == NULL) { 1734 ASSERT((flags & 1735 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) == 1736 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)); 1737 return (DDI_PROP_NOT_FOUND); 1738 } 1739 1740 /* 1741 * Instead of recursing, we do iterative calls up the tree. 1742 * As a bit of optimization, skip the bus_op level if the 1743 * node is a s/w node and if the parent's bus_prop_op function 1744 * is `ddi_bus_prop_op', because we know that in this case, 1745 * this function does nothing. 1746 * 1747 * 4225415: If the parent isn't attached, or the child 1748 * hasn't been named by the parent yet, use the default 1749 * ddi_bus_prop_op as a proxy for the parent. This 1750 * allows property lookups in any child/parent state to 1751 * include 'prom' and inherited properties, even when 1752 * there are no drivers attached to the child or parent. 1753 */ 1754 1755 bop = ddi_bus_prop_op; 1756 if (i_ddi_devi_attached(pdip) && 1757 (i_ddi_node_state(dip) >= DS_INITIALIZED)) 1758 bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op; 1759 1760 i = DDI_PROP_NOT_FOUND; 1761 1762 if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) { 1763 i = (*bop)(dev, pdip, dip, prop_op, 1764 flags | DDI_PROP_DONTPASS, 1765 name, valuep, lengthp); 1766 } 1767 1768 if ((flags & DDI_PROP_DONTPASS) || 1769 (i != DDI_PROP_NOT_FOUND)) 1770 return (i); 1771 1772 dip = pdip; 1773 } 1774 /*NOTREACHED*/ 1775 } 1776 1777 1778 /* 1779 * ddi_prop_op: The basic property operator for drivers. 1780 * 1781 * In ddi_prop_op, the type of valuep is interpreted based on prop_op: 1782 * 1783 * prop_op valuep 1784 * ------ ------ 1785 * 1786 * PROP_LEN <unused> 1787 * 1788 * PROP_LEN_AND_VAL_BUF Pointer to callers buffer 1789 * 1790 * PROP_LEN_AND_VAL_ALLOC Address of callers pointer (will be set to 1791 * address of allocated buffer, if successful) 1792 */ 1793 int 1794 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 1795 char *name, caddr_t valuep, int *lengthp) 1796 { 1797 int i; 1798 1799 ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0); 1800 1801 /* 1802 * If this was originally an LDI prop lookup then we bail here. 1803 * The reason is that the LDI property lookup interfaces first call 1804 * a drivers prop_op() entry point to allow it to override 1805 * properties. But if we've made it here, then the driver hasn't 1806 * overriden any properties. We don't want to continue with the 1807 * property search here because we don't have any type inforamtion. 1808 * When we return failure, the LDI interfaces will then proceed to 1809 * call the typed property interfaces to look up the property. 1810 */ 1811 if (mod_flags & DDI_PROP_DYNAMIC) 1812 return (DDI_PROP_NOT_FOUND); 1813 1814 /* 1815 * check for pre-typed property consumer asking for typed property: 1816 * see e_ddi_getprop_int64. 1817 */ 1818 if (mod_flags & DDI_PROP_CONSUMER_TYPED) 1819 mod_flags |= DDI_PROP_TYPE_INT64; 1820 mod_flags |= DDI_PROP_TYPE_ANY; 1821 1822 i = ddi_prop_search_common(dev, dip, prop_op, 1823 mod_flags, name, valuep, (uint_t *)lengthp); 1824 if (i == DDI_PROP_FOUND_1275) 1825 return (DDI_PROP_SUCCESS); 1826 return (i); 1827 } 1828 1829 /* 1830 * ddi_prop_op_nblocks_blksize: The basic property operator for drivers that 1831 * maintain size in number of blksize blocks. Provides a dynamic property 1832 * implementation for size oriented properties based on nblocks64 and blksize 1833 * values passed in by the driver. Fallback to ddi_prop_op if the nblocks64 1834 * is too large. This interface should not be used with a nblocks64 that 1835 * represents the driver's idea of how to represent unknown, if nblocks is 1836 * unknown use ddi_prop_op. 1837 */ 1838 int 1839 ddi_prop_op_nblocks_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1840 int mod_flags, char *name, caddr_t valuep, int *lengthp, 1841 uint64_t nblocks64, uint_t blksize) 1842 { 1843 uint64_t size64; 1844 int blkshift; 1845 1846 /* convert block size to shift value */ 1847 ASSERT(BIT_ONLYONESET(blksize)); 1848 blkshift = highbit(blksize) - 1; 1849 1850 /* 1851 * There is no point in supporting nblocks64 values that don't have 1852 * an accurate uint64_t byte count representation. 1853 */ 1854 if (nblocks64 >= (UINT64_MAX >> blkshift)) 1855 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 1856 name, valuep, lengthp)); 1857 1858 size64 = nblocks64 << blkshift; 1859 return (ddi_prop_op_size_blksize(dev, dip, prop_op, mod_flags, 1860 name, valuep, lengthp, size64, blksize)); 1861 } 1862 1863 /* 1864 * ddi_prop_op_nblocks: ddi_prop_op_nblocks_blksize with DEV_BSIZE blksize. 1865 */ 1866 int 1867 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1868 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64) 1869 { 1870 return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op, 1871 mod_flags, name, valuep, lengthp, nblocks64, DEV_BSIZE)); 1872 } 1873 1874 /* 1875 * ddi_prop_op_size_blksize: The basic property operator for block drivers that 1876 * maintain size in bytes. Provides a of dynamic property implementation for 1877 * size oriented properties based on size64 value and blksize passed in by the 1878 * driver. Fallback to ddi_prop_op if the size64 is too large. This interface 1879 * should not be used with a size64 that represents the driver's idea of how 1880 * to represent unknown, if size is unknown use ddi_prop_op. 1881 * 1882 * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned 1883 * integers. While the most likely interface to request them ([bc]devi_size) 1884 * is declared int (signed) there is no enforcement of this, which means we 1885 * can't enforce limitations here without risking regression. 1886 */ 1887 int 1888 ddi_prop_op_size_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1889 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64, 1890 uint_t blksize) 1891 { 1892 uint64_t nblocks64; 1893 int callers_length; 1894 caddr_t buffer; 1895 int blkshift; 1896 1897 /* 1898 * This is a kludge to support capture of size(9P) pure dynamic 1899 * properties in snapshots for non-cmlb code (without exposing 1900 * i_ddi_prop_dyn changes). When everyone uses cmlb, this code 1901 * should be removed. 1902 */ 1903 if (i_ddi_prop_dyn_driver_get(dip) == NULL) { 1904 static i_ddi_prop_dyn_t prop_dyn_size[] = { 1905 {"Size", DDI_PROP_TYPE_INT64, S_IFCHR}, 1906 {"Nblocks", DDI_PROP_TYPE_INT64, S_IFBLK}, 1907 {NULL} 1908 }; 1909 i_ddi_prop_dyn_driver_set(dip, prop_dyn_size); 1910 } 1911 1912 /* convert block size to shift value */ 1913 ASSERT(BIT_ONLYONESET(blksize)); 1914 blkshift = highbit(blksize) - 1; 1915 1916 /* compute DEV_BSIZE nblocks value */ 1917 nblocks64 = size64 >> blkshift; 1918 1919 /* get callers length, establish length of our dynamic properties */ 1920 callers_length = *lengthp; 1921 1922 if (strcmp(name, "Nblocks") == 0) 1923 *lengthp = sizeof (uint64_t); 1924 else if (strcmp(name, "Size") == 0) 1925 *lengthp = sizeof (uint64_t); 1926 else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX)) 1927 *lengthp = sizeof (uint32_t); 1928 else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX)) 1929 *lengthp = sizeof (uint32_t); 1930 else if ((strcmp(name, "blksize") == 0) && (blksize < UINT_MAX)) 1931 *lengthp = sizeof (uint32_t); 1932 else { 1933 /* fallback to ddi_prop_op */ 1934 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 1935 name, valuep, lengthp)); 1936 } 1937 1938 /* service request for the length of the property */ 1939 if (prop_op == PROP_LEN) 1940 return (DDI_PROP_SUCCESS); 1941 1942 switch (prop_op) { 1943 case PROP_LEN_AND_VAL_ALLOC: 1944 if ((buffer = kmem_alloc(*lengthp, 1945 (mod_flags & DDI_PROP_CANSLEEP) ? 1946 KM_SLEEP : KM_NOSLEEP)) == NULL) 1947 return (DDI_PROP_NO_MEMORY); 1948 1949 *(caddr_t *)valuep = buffer; /* set callers buf ptr */ 1950 break; 1951 1952 case PROP_LEN_AND_VAL_BUF: 1953 /* the length of the property and the request must match */ 1954 if (callers_length != *lengthp) 1955 return (DDI_PROP_INVAL_ARG); 1956 1957 buffer = valuep; /* get callers buf ptr */ 1958 break; 1959 1960 default: 1961 return (DDI_PROP_INVAL_ARG); 1962 } 1963 1964 /* transfer the value into the buffer */ 1965 if (strcmp(name, "Nblocks") == 0) 1966 *((uint64_t *)buffer) = nblocks64; 1967 else if (strcmp(name, "Size") == 0) 1968 *((uint64_t *)buffer) = size64; 1969 else if (strcmp(name, "nblocks") == 0) 1970 *((uint32_t *)buffer) = (uint32_t)nblocks64; 1971 else if (strcmp(name, "size") == 0) 1972 *((uint32_t *)buffer) = (uint32_t)size64; 1973 else if (strcmp(name, "blksize") == 0) 1974 *((uint32_t *)buffer) = (uint32_t)blksize; 1975 return (DDI_PROP_SUCCESS); 1976 } 1977 1978 /* 1979 * ddi_prop_op_size: ddi_prop_op_size_blksize with DEV_BSIZE block size. 1980 */ 1981 int 1982 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1983 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64) 1984 { 1985 return (ddi_prop_op_size_blksize(dev, dip, prop_op, 1986 mod_flags, name, valuep, lengthp, size64, DEV_BSIZE)); 1987 } 1988 1989 /* 1990 * Variable length props... 1991 */ 1992 1993 /* 1994 * ddi_getlongprop: Get variable length property len+val into a buffer 1995 * allocated by property provider via kmem_alloc. Requester 1996 * is responsible for freeing returned property via kmem_free. 1997 * 1998 * Arguments: 1999 * 2000 * dev_t: Input: dev_t of property. 2001 * dip: Input: dev_info_t pointer of child. 2002 * flags: Input: Possible flag modifiers are: 2003 * DDI_PROP_DONTPASS: Don't pass to parent if prop not found. 2004 * DDI_PROP_CANSLEEP: Memory allocation may sleep. 2005 * name: Input: name of property. 2006 * valuep: Output: Addr of callers buffer pointer. 2007 * lengthp:Output: *lengthp will contain prop length on exit. 2008 * 2009 * Possible Returns: 2010 * 2011 * DDI_PROP_SUCCESS: Prop found and returned. 2012 * DDI_PROP_NOT_FOUND: Prop not found 2013 * DDI_PROP_UNDEFINED: Prop explicitly undefined. 2014 * DDI_PROP_NO_MEMORY: Prop found, but unable to alloc mem. 2015 */ 2016 2017 int 2018 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags, 2019 char *name, caddr_t valuep, int *lengthp) 2020 { 2021 return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC, 2022 flags, name, valuep, lengthp)); 2023 } 2024 2025 /* 2026 * 2027 * ddi_getlongprop_buf: Get long prop into pre-allocated callers 2028 * buffer. (no memory allocation by provider). 2029 * 2030 * dev_t: Input: dev_t of property. 2031 * dip: Input: dev_info_t pointer of child. 2032 * flags: Input: DDI_PROP_DONTPASS or NULL 2033 * name: Input: name of property 2034 * valuep: Input: ptr to callers buffer. 2035 * lengthp:I/O: ptr to length of callers buffer on entry, 2036 * actual length of property on exit. 2037 * 2038 * Possible returns: 2039 * 2040 * DDI_PROP_SUCCESS Prop found and returned 2041 * DDI_PROP_NOT_FOUND Prop not found 2042 * DDI_PROP_UNDEFINED Prop explicitly undefined. 2043 * DDI_PROP_BUF_TOO_SMALL Prop found, callers buf too small, 2044 * no value returned, but actual prop 2045 * length returned in *lengthp 2046 * 2047 */ 2048 2049 int 2050 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags, 2051 char *name, caddr_t valuep, int *lengthp) 2052 { 2053 return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF, 2054 flags, name, valuep, lengthp)); 2055 } 2056 2057 /* 2058 * Integer/boolean sized props. 2059 * 2060 * Call is value only... returns found boolean or int sized prop value or 2061 * defvalue if prop not found or is wrong length or is explicitly undefined. 2062 * Only flag is DDI_PROP_DONTPASS... 2063 * 2064 * By convention, this interface returns boolean (0) sized properties 2065 * as value (int)1. 2066 * 2067 * This never returns an error, if property not found or specifically 2068 * undefined, the input `defvalue' is returned. 2069 */ 2070 2071 int 2072 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue) 2073 { 2074 int propvalue = defvalue; 2075 int proplength = sizeof (int); 2076 int error; 2077 2078 error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF, 2079 flags, name, (caddr_t)&propvalue, &proplength); 2080 2081 if ((error == DDI_PROP_SUCCESS) && (proplength == 0)) 2082 propvalue = 1; 2083 2084 return (propvalue); 2085 } 2086 2087 /* 2088 * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS 2089 * if returns DDI_PROP_SUCCESS, length returned in *lengthp. 2090 */ 2091 2092 int 2093 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp) 2094 { 2095 return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp)); 2096 } 2097 2098 /* 2099 * Allocate a struct prop_driver_data, along with 'size' bytes 2100 * for decoded property data. This structure is freed by 2101 * calling ddi_prop_free(9F). 2102 */ 2103 static void * 2104 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *)) 2105 { 2106 struct prop_driver_data *pdd; 2107 2108 /* 2109 * Allocate a structure with enough memory to store the decoded data. 2110 */ 2111 pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP); 2112 pdd->pdd_size = (sizeof (struct prop_driver_data) + size); 2113 pdd->pdd_prop_free = prop_free; 2114 2115 /* 2116 * Return a pointer to the location to put the decoded data. 2117 */ 2118 return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data))); 2119 } 2120 2121 /* 2122 * Allocated the memory needed to store the encoded data in the property 2123 * handle. 2124 */ 2125 static int 2126 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size) 2127 { 2128 /* 2129 * If size is zero, then set data to NULL and size to 0. This 2130 * is a boolean property. 2131 */ 2132 if (size == 0) { 2133 ph->ph_size = 0; 2134 ph->ph_data = NULL; 2135 ph->ph_cur_pos = NULL; 2136 ph->ph_save_pos = NULL; 2137 } else { 2138 if (ph->ph_flags == DDI_PROP_DONTSLEEP) { 2139 ph->ph_data = kmem_zalloc(size, KM_NOSLEEP); 2140 if (ph->ph_data == NULL) 2141 return (DDI_PROP_NO_MEMORY); 2142 } else 2143 ph->ph_data = kmem_zalloc(size, KM_SLEEP); 2144 ph->ph_size = size; 2145 ph->ph_cur_pos = ph->ph_data; 2146 ph->ph_save_pos = ph->ph_data; 2147 } 2148 return (DDI_PROP_SUCCESS); 2149 } 2150 2151 /* 2152 * Free the space allocated by the lookup routines. Each lookup routine 2153 * returns a pointer to the decoded data to the driver. The driver then 2154 * passes this pointer back to us. This data actually lives in a struct 2155 * prop_driver_data. We use negative indexing to find the beginning of 2156 * the structure and then free the entire structure using the size and 2157 * the free routine stored in the structure. 2158 */ 2159 void 2160 ddi_prop_free(void *datap) 2161 { 2162 struct prop_driver_data *pdd; 2163 2164 /* 2165 * Get the structure 2166 */ 2167 pdd = (struct prop_driver_data *) 2168 ((caddr_t)datap - sizeof (struct prop_driver_data)); 2169 /* 2170 * Call the free routine to free it 2171 */ 2172 (*pdd->pdd_prop_free)(pdd); 2173 } 2174 2175 /* 2176 * Free the data associated with an array of ints, 2177 * allocated with ddi_prop_decode_alloc(). 2178 */ 2179 static void 2180 ddi_prop_free_ints(struct prop_driver_data *pdd) 2181 { 2182 kmem_free(pdd, pdd->pdd_size); 2183 } 2184 2185 /* 2186 * Free a single string property or a single string contained within 2187 * the argv style return value of an array of strings. 2188 */ 2189 static void 2190 ddi_prop_free_string(struct prop_driver_data *pdd) 2191 { 2192 kmem_free(pdd, pdd->pdd_size); 2193 2194 } 2195 2196 /* 2197 * Free an array of strings. 2198 */ 2199 static void 2200 ddi_prop_free_strings(struct prop_driver_data *pdd) 2201 { 2202 kmem_free(pdd, pdd->pdd_size); 2203 } 2204 2205 /* 2206 * Free the data associated with an array of bytes. 2207 */ 2208 static void 2209 ddi_prop_free_bytes(struct prop_driver_data *pdd) 2210 { 2211 kmem_free(pdd, pdd->pdd_size); 2212 } 2213 2214 /* 2215 * Reset the current location pointer in the property handle to the 2216 * beginning of the data. 2217 */ 2218 void 2219 ddi_prop_reset_pos(prop_handle_t *ph) 2220 { 2221 ph->ph_cur_pos = ph->ph_data; 2222 ph->ph_save_pos = ph->ph_data; 2223 } 2224 2225 /* 2226 * Restore the current location pointer in the property handle to the 2227 * saved position. 2228 */ 2229 void 2230 ddi_prop_save_pos(prop_handle_t *ph) 2231 { 2232 ph->ph_save_pos = ph->ph_cur_pos; 2233 } 2234 2235 /* 2236 * Save the location that the current location pointer is pointing to.. 2237 */ 2238 void 2239 ddi_prop_restore_pos(prop_handle_t *ph) 2240 { 2241 ph->ph_cur_pos = ph->ph_save_pos; 2242 } 2243 2244 /* 2245 * Property encode/decode functions 2246 */ 2247 2248 /* 2249 * Decode a single integer property 2250 */ 2251 static int 2252 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements) 2253 { 2254 int i; 2255 int tmp; 2256 2257 /* 2258 * If there is nothing to decode return an error 2259 */ 2260 if (ph->ph_size == 0) 2261 return (DDI_PROP_END_OF_DATA); 2262 2263 /* 2264 * Decode the property as a single integer and return it 2265 * in data if we were able to decode it. 2266 */ 2267 i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp); 2268 if (i < DDI_PROP_RESULT_OK) { 2269 switch (i) { 2270 case DDI_PROP_RESULT_EOF: 2271 return (DDI_PROP_END_OF_DATA); 2272 2273 case DDI_PROP_RESULT_ERROR: 2274 return (DDI_PROP_CANNOT_DECODE); 2275 } 2276 } 2277 2278 *(int *)data = tmp; 2279 *nelements = 1; 2280 return (DDI_PROP_SUCCESS); 2281 } 2282 2283 /* 2284 * Decode a single 64 bit integer property 2285 */ 2286 static int 2287 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements) 2288 { 2289 int i; 2290 int64_t tmp; 2291 2292 /* 2293 * If there is nothing to decode return an error 2294 */ 2295 if (ph->ph_size == 0) 2296 return (DDI_PROP_END_OF_DATA); 2297 2298 /* 2299 * Decode the property as a single integer and return it 2300 * in data if we were able to decode it. 2301 */ 2302 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp); 2303 if (i < DDI_PROP_RESULT_OK) { 2304 switch (i) { 2305 case DDI_PROP_RESULT_EOF: 2306 return (DDI_PROP_END_OF_DATA); 2307 2308 case DDI_PROP_RESULT_ERROR: 2309 return (DDI_PROP_CANNOT_DECODE); 2310 } 2311 } 2312 2313 *(int64_t *)data = tmp; 2314 *nelements = 1; 2315 return (DDI_PROP_SUCCESS); 2316 } 2317 2318 /* 2319 * Decode an array of integers property 2320 */ 2321 static int 2322 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements) 2323 { 2324 int i; 2325 int cnt = 0; 2326 int *tmp; 2327 int *intp; 2328 int n; 2329 2330 /* 2331 * Figure out how many array elements there are by going through the 2332 * data without decoding it first and counting. 2333 */ 2334 for (;;) { 2335 i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL); 2336 if (i < 0) 2337 break; 2338 cnt++; 2339 } 2340 2341 /* 2342 * If there are no elements return an error 2343 */ 2344 if (cnt == 0) 2345 return (DDI_PROP_END_OF_DATA); 2346 2347 /* 2348 * If we cannot skip through the data, we cannot decode it 2349 */ 2350 if (i == DDI_PROP_RESULT_ERROR) 2351 return (DDI_PROP_CANNOT_DECODE); 2352 2353 /* 2354 * Reset the data pointer to the beginning of the encoded data 2355 */ 2356 ddi_prop_reset_pos(ph); 2357 2358 /* 2359 * Allocated memory to store the decoded value in. 2360 */ 2361 intp = ddi_prop_decode_alloc((cnt * sizeof (int)), 2362 ddi_prop_free_ints); 2363 2364 /* 2365 * Decode each element and place it in the space we just allocated 2366 */ 2367 tmp = intp; 2368 for (n = 0; n < cnt; n++, tmp++) { 2369 i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp); 2370 if (i < DDI_PROP_RESULT_OK) { 2371 /* 2372 * Free the space we just allocated 2373 * and return an error. 2374 */ 2375 ddi_prop_free(intp); 2376 switch (i) { 2377 case DDI_PROP_RESULT_EOF: 2378 return (DDI_PROP_END_OF_DATA); 2379 2380 case DDI_PROP_RESULT_ERROR: 2381 return (DDI_PROP_CANNOT_DECODE); 2382 } 2383 } 2384 } 2385 2386 *nelements = cnt; 2387 *(int **)data = intp; 2388 2389 return (DDI_PROP_SUCCESS); 2390 } 2391 2392 /* 2393 * Decode a 64 bit integer array property 2394 */ 2395 static int 2396 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements) 2397 { 2398 int i; 2399 int n; 2400 int cnt = 0; 2401 int64_t *tmp; 2402 int64_t *intp; 2403 2404 /* 2405 * Count the number of array elements by going 2406 * through the data without decoding it. 2407 */ 2408 for (;;) { 2409 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL); 2410 if (i < 0) 2411 break; 2412 cnt++; 2413 } 2414 2415 /* 2416 * If there are no elements return an error 2417 */ 2418 if (cnt == 0) 2419 return (DDI_PROP_END_OF_DATA); 2420 2421 /* 2422 * If we cannot skip through the data, we cannot decode it 2423 */ 2424 if (i == DDI_PROP_RESULT_ERROR) 2425 return (DDI_PROP_CANNOT_DECODE); 2426 2427 /* 2428 * Reset the data pointer to the beginning of the encoded data 2429 */ 2430 ddi_prop_reset_pos(ph); 2431 2432 /* 2433 * Allocate memory to store the decoded value. 2434 */ 2435 intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)), 2436 ddi_prop_free_ints); 2437 2438 /* 2439 * Decode each element and place it in the space allocated 2440 */ 2441 tmp = intp; 2442 for (n = 0; n < cnt; n++, tmp++) { 2443 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp); 2444 if (i < DDI_PROP_RESULT_OK) { 2445 /* 2446 * Free the space we just allocated 2447 * and return an error. 2448 */ 2449 ddi_prop_free(intp); 2450 switch (i) { 2451 case DDI_PROP_RESULT_EOF: 2452 return (DDI_PROP_END_OF_DATA); 2453 2454 case DDI_PROP_RESULT_ERROR: 2455 return (DDI_PROP_CANNOT_DECODE); 2456 } 2457 } 2458 } 2459 2460 *nelements = cnt; 2461 *(int64_t **)data = intp; 2462 2463 return (DDI_PROP_SUCCESS); 2464 } 2465 2466 /* 2467 * Encode an array of integers property (Can be one element) 2468 */ 2469 int 2470 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements) 2471 { 2472 int i; 2473 int *tmp; 2474 int cnt; 2475 int size; 2476 2477 /* 2478 * If there is no data, we cannot do anything 2479 */ 2480 if (nelements == 0) 2481 return (DDI_PROP_CANNOT_ENCODE); 2482 2483 /* 2484 * Get the size of an encoded int. 2485 */ 2486 size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL); 2487 2488 if (size < DDI_PROP_RESULT_OK) { 2489 switch (size) { 2490 case DDI_PROP_RESULT_EOF: 2491 return (DDI_PROP_END_OF_DATA); 2492 2493 case DDI_PROP_RESULT_ERROR: 2494 return (DDI_PROP_CANNOT_ENCODE); 2495 } 2496 } 2497 2498 /* 2499 * Allocate space in the handle to store the encoded int. 2500 */ 2501 if (ddi_prop_encode_alloc(ph, size * nelements) != 2502 DDI_PROP_SUCCESS) 2503 return (DDI_PROP_NO_MEMORY); 2504 2505 /* 2506 * Encode the array of ints. 2507 */ 2508 tmp = (int *)data; 2509 for (cnt = 0; cnt < nelements; cnt++, tmp++) { 2510 i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp); 2511 if (i < DDI_PROP_RESULT_OK) { 2512 switch (i) { 2513 case DDI_PROP_RESULT_EOF: 2514 return (DDI_PROP_END_OF_DATA); 2515 2516 case DDI_PROP_RESULT_ERROR: 2517 return (DDI_PROP_CANNOT_ENCODE); 2518 } 2519 } 2520 } 2521 2522 return (DDI_PROP_SUCCESS); 2523 } 2524 2525 2526 /* 2527 * Encode a 64 bit integer array property 2528 */ 2529 int 2530 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements) 2531 { 2532 int i; 2533 int cnt; 2534 int size; 2535 int64_t *tmp; 2536 2537 /* 2538 * If there is no data, we cannot do anything 2539 */ 2540 if (nelements == 0) 2541 return (DDI_PROP_CANNOT_ENCODE); 2542 2543 /* 2544 * Get the size of an encoded 64 bit int. 2545 */ 2546 size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL); 2547 2548 if (size < DDI_PROP_RESULT_OK) { 2549 switch (size) { 2550 case DDI_PROP_RESULT_EOF: 2551 return (DDI_PROP_END_OF_DATA); 2552 2553 case DDI_PROP_RESULT_ERROR: 2554 return (DDI_PROP_CANNOT_ENCODE); 2555 } 2556 } 2557 2558 /* 2559 * Allocate space in the handle to store the encoded int. 2560 */ 2561 if (ddi_prop_encode_alloc(ph, size * nelements) != 2562 DDI_PROP_SUCCESS) 2563 return (DDI_PROP_NO_MEMORY); 2564 2565 /* 2566 * Encode the array of ints. 2567 */ 2568 tmp = (int64_t *)data; 2569 for (cnt = 0; cnt < nelements; cnt++, tmp++) { 2570 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp); 2571 if (i < DDI_PROP_RESULT_OK) { 2572 switch (i) { 2573 case DDI_PROP_RESULT_EOF: 2574 return (DDI_PROP_END_OF_DATA); 2575 2576 case DDI_PROP_RESULT_ERROR: 2577 return (DDI_PROP_CANNOT_ENCODE); 2578 } 2579 } 2580 } 2581 2582 return (DDI_PROP_SUCCESS); 2583 } 2584 2585 /* 2586 * Decode a single string property 2587 */ 2588 static int 2589 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements) 2590 { 2591 char *tmp; 2592 char *str; 2593 int i; 2594 int size; 2595 2596 /* 2597 * If there is nothing to decode return an error 2598 */ 2599 if (ph->ph_size == 0) 2600 return (DDI_PROP_END_OF_DATA); 2601 2602 /* 2603 * Get the decoded size of the encoded string. 2604 */ 2605 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL); 2606 if (size < DDI_PROP_RESULT_OK) { 2607 switch (size) { 2608 case DDI_PROP_RESULT_EOF: 2609 return (DDI_PROP_END_OF_DATA); 2610 2611 case DDI_PROP_RESULT_ERROR: 2612 return (DDI_PROP_CANNOT_DECODE); 2613 } 2614 } 2615 2616 /* 2617 * Allocated memory to store the decoded value in. 2618 */ 2619 str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string); 2620 2621 ddi_prop_reset_pos(ph); 2622 2623 /* 2624 * Decode the str and place it in the space we just allocated 2625 */ 2626 tmp = str; 2627 i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp); 2628 if (i < DDI_PROP_RESULT_OK) { 2629 /* 2630 * Free the space we just allocated 2631 * and return an error. 2632 */ 2633 ddi_prop_free(str); 2634 switch (i) { 2635 case DDI_PROP_RESULT_EOF: 2636 return (DDI_PROP_END_OF_DATA); 2637 2638 case DDI_PROP_RESULT_ERROR: 2639 return (DDI_PROP_CANNOT_DECODE); 2640 } 2641 } 2642 2643 *(char **)data = str; 2644 *nelements = 1; 2645 2646 return (DDI_PROP_SUCCESS); 2647 } 2648 2649 /* 2650 * Decode an array of strings. 2651 */ 2652 int 2653 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements) 2654 { 2655 int cnt = 0; 2656 char **strs; 2657 char **tmp; 2658 char *ptr; 2659 int i; 2660 int n; 2661 int size; 2662 size_t nbytes; 2663 2664 /* 2665 * Figure out how many array elements there are by going through the 2666 * data without decoding it first and counting. 2667 */ 2668 for (;;) { 2669 i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL); 2670 if (i < 0) 2671 break; 2672 cnt++; 2673 } 2674 2675 /* 2676 * If there are no elements return an error 2677 */ 2678 if (cnt == 0) 2679 return (DDI_PROP_END_OF_DATA); 2680 2681 /* 2682 * If we cannot skip through the data, we cannot decode it 2683 */ 2684 if (i == DDI_PROP_RESULT_ERROR) 2685 return (DDI_PROP_CANNOT_DECODE); 2686 2687 /* 2688 * Reset the data pointer to the beginning of the encoded data 2689 */ 2690 ddi_prop_reset_pos(ph); 2691 2692 /* 2693 * Figure out how much memory we need for the sum total 2694 */ 2695 nbytes = (cnt + 1) * sizeof (char *); 2696 2697 for (n = 0; n < cnt; n++) { 2698 /* 2699 * Get the decoded size of the current encoded string. 2700 */ 2701 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL); 2702 if (size < DDI_PROP_RESULT_OK) { 2703 switch (size) { 2704 case DDI_PROP_RESULT_EOF: 2705 return (DDI_PROP_END_OF_DATA); 2706 2707 case DDI_PROP_RESULT_ERROR: 2708 return (DDI_PROP_CANNOT_DECODE); 2709 } 2710 } 2711 2712 nbytes += size; 2713 } 2714 2715 /* 2716 * Allocate memory in which to store the decoded strings. 2717 */ 2718 strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings); 2719 2720 /* 2721 * Set up pointers for each string by figuring out yet 2722 * again how long each string is. 2723 */ 2724 ddi_prop_reset_pos(ph); 2725 ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *)); 2726 for (tmp = strs, n = 0; n < cnt; n++, tmp++) { 2727 /* 2728 * Get the decoded size of the current encoded string. 2729 */ 2730 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL); 2731 if (size < DDI_PROP_RESULT_OK) { 2732 ddi_prop_free(strs); 2733 switch (size) { 2734 case DDI_PROP_RESULT_EOF: 2735 return (DDI_PROP_END_OF_DATA); 2736 2737 case DDI_PROP_RESULT_ERROR: 2738 return (DDI_PROP_CANNOT_DECODE); 2739 } 2740 } 2741 2742 *tmp = ptr; 2743 ptr += size; 2744 } 2745 2746 /* 2747 * String array is terminated by a NULL 2748 */ 2749 *tmp = NULL; 2750 2751 /* 2752 * Finally, we can decode each string 2753 */ 2754 ddi_prop_reset_pos(ph); 2755 for (tmp = strs, n = 0; n < cnt; n++, tmp++) { 2756 i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp); 2757 if (i < DDI_PROP_RESULT_OK) { 2758 /* 2759 * Free the space we just allocated 2760 * and return an error 2761 */ 2762 ddi_prop_free(strs); 2763 switch (i) { 2764 case DDI_PROP_RESULT_EOF: 2765 return (DDI_PROP_END_OF_DATA); 2766 2767 case DDI_PROP_RESULT_ERROR: 2768 return (DDI_PROP_CANNOT_DECODE); 2769 } 2770 } 2771 } 2772 2773 *(char ***)data = strs; 2774 *nelements = cnt; 2775 2776 return (DDI_PROP_SUCCESS); 2777 } 2778 2779 /* 2780 * Encode a string. 2781 */ 2782 int 2783 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements) 2784 { 2785 char **tmp; 2786 int size; 2787 int i; 2788 2789 /* 2790 * If there is no data, we cannot do anything 2791 */ 2792 if (nelements == 0) 2793 return (DDI_PROP_CANNOT_ENCODE); 2794 2795 /* 2796 * Get the size of the encoded string. 2797 */ 2798 tmp = (char **)data; 2799 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp); 2800 if (size < DDI_PROP_RESULT_OK) { 2801 switch (size) { 2802 case DDI_PROP_RESULT_EOF: 2803 return (DDI_PROP_END_OF_DATA); 2804 2805 case DDI_PROP_RESULT_ERROR: 2806 return (DDI_PROP_CANNOT_ENCODE); 2807 } 2808 } 2809 2810 /* 2811 * Allocate space in the handle to store the encoded string. 2812 */ 2813 if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS) 2814 return (DDI_PROP_NO_MEMORY); 2815 2816 ddi_prop_reset_pos(ph); 2817 2818 /* 2819 * Encode the string. 2820 */ 2821 tmp = (char **)data; 2822 i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp); 2823 if (i < DDI_PROP_RESULT_OK) { 2824 switch (i) { 2825 case DDI_PROP_RESULT_EOF: 2826 return (DDI_PROP_END_OF_DATA); 2827 2828 case DDI_PROP_RESULT_ERROR: 2829 return (DDI_PROP_CANNOT_ENCODE); 2830 } 2831 } 2832 2833 return (DDI_PROP_SUCCESS); 2834 } 2835 2836 2837 /* 2838 * Encode an array of strings. 2839 */ 2840 int 2841 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements) 2842 { 2843 int cnt = 0; 2844 char **tmp; 2845 int size; 2846 uint_t total_size; 2847 int i; 2848 2849 /* 2850 * If there is no data, we cannot do anything 2851 */ 2852 if (nelements == 0) 2853 return (DDI_PROP_CANNOT_ENCODE); 2854 2855 /* 2856 * Get the total size required to encode all the strings. 2857 */ 2858 total_size = 0; 2859 tmp = (char **)data; 2860 for (cnt = 0; cnt < nelements; cnt++, tmp++) { 2861 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp); 2862 if (size < DDI_PROP_RESULT_OK) { 2863 switch (size) { 2864 case DDI_PROP_RESULT_EOF: 2865 return (DDI_PROP_END_OF_DATA); 2866 2867 case DDI_PROP_RESULT_ERROR: 2868 return (DDI_PROP_CANNOT_ENCODE); 2869 } 2870 } 2871 total_size += (uint_t)size; 2872 } 2873 2874 /* 2875 * Allocate space in the handle to store the encoded strings. 2876 */ 2877 if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS) 2878 return (DDI_PROP_NO_MEMORY); 2879 2880 ddi_prop_reset_pos(ph); 2881 2882 /* 2883 * Encode the array of strings. 2884 */ 2885 tmp = (char **)data; 2886 for (cnt = 0; cnt < nelements; cnt++, tmp++) { 2887 i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp); 2888 if (i < DDI_PROP_RESULT_OK) { 2889 switch (i) { 2890 case DDI_PROP_RESULT_EOF: 2891 return (DDI_PROP_END_OF_DATA); 2892 2893 case DDI_PROP_RESULT_ERROR: 2894 return (DDI_PROP_CANNOT_ENCODE); 2895 } 2896 } 2897 } 2898 2899 return (DDI_PROP_SUCCESS); 2900 } 2901 2902 2903 /* 2904 * Decode an array of bytes. 2905 */ 2906 static int 2907 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements) 2908 { 2909 uchar_t *tmp; 2910 int nbytes; 2911 int i; 2912 2913 /* 2914 * If there are no elements return an error 2915 */ 2916 if (ph->ph_size == 0) 2917 return (DDI_PROP_END_OF_DATA); 2918 2919 /* 2920 * Get the size of the encoded array of bytes. 2921 */ 2922 nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE, 2923 data, ph->ph_size); 2924 if (nbytes < DDI_PROP_RESULT_OK) { 2925 switch (nbytes) { 2926 case DDI_PROP_RESULT_EOF: 2927 return (DDI_PROP_END_OF_DATA); 2928 2929 case DDI_PROP_RESULT_ERROR: 2930 return (DDI_PROP_CANNOT_DECODE); 2931 } 2932 } 2933 2934 /* 2935 * Allocated memory to store the decoded value in. 2936 */ 2937 tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes); 2938 2939 /* 2940 * Decode each element and place it in the space we just allocated 2941 */ 2942 i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes); 2943 if (i < DDI_PROP_RESULT_OK) { 2944 /* 2945 * Free the space we just allocated 2946 * and return an error 2947 */ 2948 ddi_prop_free(tmp); 2949 switch (i) { 2950 case DDI_PROP_RESULT_EOF: 2951 return (DDI_PROP_END_OF_DATA); 2952 2953 case DDI_PROP_RESULT_ERROR: 2954 return (DDI_PROP_CANNOT_DECODE); 2955 } 2956 } 2957 2958 *(uchar_t **)data = tmp; 2959 *nelements = nbytes; 2960 2961 return (DDI_PROP_SUCCESS); 2962 } 2963 2964 /* 2965 * Encode an array of bytes. 2966 */ 2967 int 2968 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements) 2969 { 2970 int size; 2971 int i; 2972 2973 /* 2974 * If there are no elements, then this is a boolean property, 2975 * so just create a property handle with no data and return. 2976 */ 2977 if (nelements == 0) { 2978 (void) ddi_prop_encode_alloc(ph, 0); 2979 return (DDI_PROP_SUCCESS); 2980 } 2981 2982 /* 2983 * Get the size of the encoded array of bytes. 2984 */ 2985 size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data, 2986 nelements); 2987 if (size < DDI_PROP_RESULT_OK) { 2988 switch (size) { 2989 case DDI_PROP_RESULT_EOF: 2990 return (DDI_PROP_END_OF_DATA); 2991 2992 case DDI_PROP_RESULT_ERROR: 2993 return (DDI_PROP_CANNOT_DECODE); 2994 } 2995 } 2996 2997 /* 2998 * Allocate space in the handle to store the encoded bytes. 2999 */ 3000 if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS) 3001 return (DDI_PROP_NO_MEMORY); 3002 3003 /* 3004 * Encode the array of bytes. 3005 */ 3006 i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data, 3007 nelements); 3008 if (i < DDI_PROP_RESULT_OK) { 3009 switch (i) { 3010 case DDI_PROP_RESULT_EOF: 3011 return (DDI_PROP_END_OF_DATA); 3012 3013 case DDI_PROP_RESULT_ERROR: 3014 return (DDI_PROP_CANNOT_ENCODE); 3015 } 3016 } 3017 3018 return (DDI_PROP_SUCCESS); 3019 } 3020 3021 /* 3022 * OBP 1275 integer, string and byte operators. 3023 * 3024 * DDI_PROP_CMD_DECODE: 3025 * 3026 * DDI_PROP_RESULT_ERROR: cannot decode the data 3027 * DDI_PROP_RESULT_EOF: end of data 3028 * DDI_PROP_OK: data was decoded 3029 * 3030 * DDI_PROP_CMD_ENCODE: 3031 * 3032 * DDI_PROP_RESULT_ERROR: cannot encode the data 3033 * DDI_PROP_RESULT_EOF: end of data 3034 * DDI_PROP_OK: data was encoded 3035 * 3036 * DDI_PROP_CMD_SKIP: 3037 * 3038 * DDI_PROP_RESULT_ERROR: cannot skip the data 3039 * DDI_PROP_RESULT_EOF: end of data 3040 * DDI_PROP_OK: data was skipped 3041 * 3042 * DDI_PROP_CMD_GET_ESIZE: 3043 * 3044 * DDI_PROP_RESULT_ERROR: cannot get encoded size 3045 * DDI_PROP_RESULT_EOF: end of data 3046 * > 0: the encoded size 3047 * 3048 * DDI_PROP_CMD_GET_DSIZE: 3049 * 3050 * DDI_PROP_RESULT_ERROR: cannot get decoded size 3051 * DDI_PROP_RESULT_EOF: end of data 3052 * > 0: the decoded size 3053 */ 3054 3055 /* 3056 * OBP 1275 integer operator 3057 * 3058 * OBP properties are a byte stream of data, so integers may not be 3059 * properly aligned. Therefore we need to copy them one byte at a time. 3060 */ 3061 int 3062 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data) 3063 { 3064 int i; 3065 3066 switch (cmd) { 3067 case DDI_PROP_CMD_DECODE: 3068 /* 3069 * Check that there is encoded data 3070 */ 3071 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) 3072 return (DDI_PROP_RESULT_ERROR); 3073 if (ph->ph_flags & PH_FROM_PROM) { 3074 i = MIN(ph->ph_size, PROP_1275_INT_SIZE); 3075 if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data + 3076 ph->ph_size - i)) 3077 return (DDI_PROP_RESULT_ERROR); 3078 } else { 3079 if (ph->ph_size < sizeof (int) || 3080 ((int *)ph->ph_cur_pos > ((int *)ph->ph_data + 3081 ph->ph_size - sizeof (int)))) 3082 return (DDI_PROP_RESULT_ERROR); 3083 } 3084 3085 /* 3086 * Copy the integer, using the implementation-specific 3087 * copy function if the property is coming from the PROM. 3088 */ 3089 if (ph->ph_flags & PH_FROM_PROM) { 3090 *data = impl_ddi_prop_int_from_prom( 3091 (uchar_t *)ph->ph_cur_pos, 3092 (ph->ph_size < PROP_1275_INT_SIZE) ? 3093 ph->ph_size : PROP_1275_INT_SIZE); 3094 } else { 3095 bcopy(ph->ph_cur_pos, data, sizeof (int)); 3096 } 3097 3098 /* 3099 * Move the current location to the start of the next 3100 * bit of undecoded data. 3101 */ 3102 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + 3103 PROP_1275_INT_SIZE; 3104 return (DDI_PROP_RESULT_OK); 3105 3106 case DDI_PROP_CMD_ENCODE: 3107 /* 3108 * Check that there is room to encoded the data 3109 */ 3110 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3111 ph->ph_size < PROP_1275_INT_SIZE || 3112 ((int *)ph->ph_cur_pos > ((int *)ph->ph_data + 3113 ph->ph_size - sizeof (int)))) 3114 return (DDI_PROP_RESULT_ERROR); 3115 3116 /* 3117 * Encode the integer into the byte stream one byte at a 3118 * time. 3119 */ 3120 bcopy(data, ph->ph_cur_pos, sizeof (int)); 3121 3122 /* 3123 * Move the current location to the start of the next bit of 3124 * space where we can store encoded data. 3125 */ 3126 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE; 3127 return (DDI_PROP_RESULT_OK); 3128 3129 case DDI_PROP_CMD_SKIP: 3130 /* 3131 * Check that there is encoded data 3132 */ 3133 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3134 ph->ph_size < PROP_1275_INT_SIZE) 3135 return (DDI_PROP_RESULT_ERROR); 3136 3137 3138 if ((caddr_t)ph->ph_cur_pos == 3139 (caddr_t)ph->ph_data + ph->ph_size) { 3140 return (DDI_PROP_RESULT_EOF); 3141 } else if ((caddr_t)ph->ph_cur_pos > 3142 (caddr_t)ph->ph_data + ph->ph_size) { 3143 return (DDI_PROP_RESULT_EOF); 3144 } 3145 3146 /* 3147 * Move the current location to the start of the next bit of 3148 * undecoded data. 3149 */ 3150 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE; 3151 return (DDI_PROP_RESULT_OK); 3152 3153 case DDI_PROP_CMD_GET_ESIZE: 3154 /* 3155 * Return the size of an encoded integer on OBP 3156 */ 3157 return (PROP_1275_INT_SIZE); 3158 3159 case DDI_PROP_CMD_GET_DSIZE: 3160 /* 3161 * Return the size of a decoded integer on the system. 3162 */ 3163 return (sizeof (int)); 3164 3165 default: 3166 #ifdef DEBUG 3167 panic("ddi_prop_1275_int: %x impossible", cmd); 3168 /*NOTREACHED*/ 3169 #else 3170 return (DDI_PROP_RESULT_ERROR); 3171 #endif /* DEBUG */ 3172 } 3173 } 3174 3175 /* 3176 * 64 bit integer operator. 3177 * 3178 * This is an extension, defined by Sun, to the 1275 integer 3179 * operator. This routine handles the encoding/decoding of 3180 * 64 bit integer properties. 3181 */ 3182 int 3183 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data) 3184 { 3185 3186 switch (cmd) { 3187 case DDI_PROP_CMD_DECODE: 3188 /* 3189 * Check that there is encoded data 3190 */ 3191 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) 3192 return (DDI_PROP_RESULT_ERROR); 3193 if (ph->ph_flags & PH_FROM_PROM) { 3194 return (DDI_PROP_RESULT_ERROR); 3195 } else { 3196 if (ph->ph_size < sizeof (int64_t) || 3197 ((int64_t *)ph->ph_cur_pos > 3198 ((int64_t *)ph->ph_data + 3199 ph->ph_size - sizeof (int64_t)))) 3200 return (DDI_PROP_RESULT_ERROR); 3201 } 3202 /* 3203 * Copy the integer, using the implementation-specific 3204 * copy function if the property is coming from the PROM. 3205 */ 3206 if (ph->ph_flags & PH_FROM_PROM) { 3207 return (DDI_PROP_RESULT_ERROR); 3208 } else { 3209 bcopy(ph->ph_cur_pos, data, sizeof (int64_t)); 3210 } 3211 3212 /* 3213 * Move the current location to the start of the next 3214 * bit of undecoded data. 3215 */ 3216 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + 3217 sizeof (int64_t); 3218 return (DDI_PROP_RESULT_OK); 3219 3220 case DDI_PROP_CMD_ENCODE: 3221 /* 3222 * Check that there is room to encoded the data 3223 */ 3224 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3225 ph->ph_size < sizeof (int64_t) || 3226 ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data + 3227 ph->ph_size - sizeof (int64_t)))) 3228 return (DDI_PROP_RESULT_ERROR); 3229 3230 /* 3231 * Encode the integer into the byte stream one byte at a 3232 * time. 3233 */ 3234 bcopy(data, ph->ph_cur_pos, sizeof (int64_t)); 3235 3236 /* 3237 * Move the current location to the start of the next bit of 3238 * space where we can store encoded data. 3239 */ 3240 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + 3241 sizeof (int64_t); 3242 return (DDI_PROP_RESULT_OK); 3243 3244 case DDI_PROP_CMD_SKIP: 3245 /* 3246 * Check that there is encoded data 3247 */ 3248 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3249 ph->ph_size < sizeof (int64_t)) 3250 return (DDI_PROP_RESULT_ERROR); 3251 3252 if ((caddr_t)ph->ph_cur_pos == 3253 (caddr_t)ph->ph_data + ph->ph_size) { 3254 return (DDI_PROP_RESULT_EOF); 3255 } else if ((caddr_t)ph->ph_cur_pos > 3256 (caddr_t)ph->ph_data + ph->ph_size) { 3257 return (DDI_PROP_RESULT_EOF); 3258 } 3259 3260 /* 3261 * Move the current location to the start of 3262 * the next bit of undecoded data. 3263 */ 3264 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + 3265 sizeof (int64_t); 3266 return (DDI_PROP_RESULT_OK); 3267 3268 case DDI_PROP_CMD_GET_ESIZE: 3269 /* 3270 * Return the size of an encoded integer on OBP 3271 */ 3272 return (sizeof (int64_t)); 3273 3274 case DDI_PROP_CMD_GET_DSIZE: 3275 /* 3276 * Return the size of a decoded integer on the system. 3277 */ 3278 return (sizeof (int64_t)); 3279 3280 default: 3281 #ifdef DEBUG 3282 panic("ddi_prop_int64_op: %x impossible", cmd); 3283 /*NOTREACHED*/ 3284 #else 3285 return (DDI_PROP_RESULT_ERROR); 3286 #endif /* DEBUG */ 3287 } 3288 } 3289 3290 /* 3291 * OBP 1275 string operator. 3292 * 3293 * OBP strings are NULL terminated. 3294 */ 3295 int 3296 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data) 3297 { 3298 int n; 3299 char *p; 3300 char *end; 3301 3302 switch (cmd) { 3303 case DDI_PROP_CMD_DECODE: 3304 /* 3305 * Check that there is encoded data 3306 */ 3307 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) { 3308 return (DDI_PROP_RESULT_ERROR); 3309 } 3310 3311 /* 3312 * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and 3313 * how to NULL terminate result. 3314 */ 3315 p = (char *)ph->ph_cur_pos; 3316 end = (char *)ph->ph_data + ph->ph_size; 3317 if (p >= end) 3318 return (DDI_PROP_RESULT_EOF); 3319 3320 while (p < end) { 3321 *data++ = *p; 3322 if (*p++ == 0) { /* NULL from OBP */ 3323 ph->ph_cur_pos = p; 3324 return (DDI_PROP_RESULT_OK); 3325 } 3326 } 3327 3328 /* 3329 * If OBP did not NULL terminate string, which happens 3330 * (at least) for 'true'/'false' boolean values, account for 3331 * the space and store null termination on decode. 3332 */ 3333 ph->ph_cur_pos = p; 3334 *data = 0; 3335 return (DDI_PROP_RESULT_OK); 3336 3337 case DDI_PROP_CMD_ENCODE: 3338 /* 3339 * Check that there is room to encoded the data 3340 */ 3341 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) { 3342 return (DDI_PROP_RESULT_ERROR); 3343 } 3344 3345 n = strlen(data) + 1; 3346 if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data + 3347 ph->ph_size - n)) { 3348 return (DDI_PROP_RESULT_ERROR); 3349 } 3350 3351 /* 3352 * Copy the NULL terminated string 3353 */ 3354 bcopy(data, ph->ph_cur_pos, n); 3355 3356 /* 3357 * Move the current location to the start of the next bit of 3358 * space where we can store encoded data. 3359 */ 3360 ph->ph_cur_pos = (char *)ph->ph_cur_pos + n; 3361 return (DDI_PROP_RESULT_OK); 3362 3363 case DDI_PROP_CMD_SKIP: 3364 /* 3365 * Check that there is encoded data 3366 */ 3367 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) { 3368 return (DDI_PROP_RESULT_ERROR); 3369 } 3370 3371 /* 3372 * Return the string length plus one for the NULL 3373 * We know the size of the property, we need to 3374 * ensure that the string is properly formatted, 3375 * since we may be looking up random OBP data. 3376 */ 3377 p = (char *)ph->ph_cur_pos; 3378 end = (char *)ph->ph_data + ph->ph_size; 3379 if (p >= end) 3380 return (DDI_PROP_RESULT_EOF); 3381 3382 while (p < end) { 3383 if (*p++ == 0) { /* NULL from OBP */ 3384 ph->ph_cur_pos = p; 3385 return (DDI_PROP_RESULT_OK); 3386 } 3387 } 3388 3389 /* 3390 * Accommodate the fact that OBP does not always NULL 3391 * terminate strings. 3392 */ 3393 ph->ph_cur_pos = p; 3394 return (DDI_PROP_RESULT_OK); 3395 3396 case DDI_PROP_CMD_GET_ESIZE: 3397 /* 3398 * Return the size of the encoded string on OBP. 3399 */ 3400 return (strlen(data) + 1); 3401 3402 case DDI_PROP_CMD_GET_DSIZE: 3403 /* 3404 * Return the string length plus one for the NULL. 3405 * We know the size of the property, we need to 3406 * ensure that the string is properly formatted, 3407 * since we may be looking up random OBP data. 3408 */ 3409 p = (char *)ph->ph_cur_pos; 3410 end = (char *)ph->ph_data + ph->ph_size; 3411 if (p >= end) 3412 return (DDI_PROP_RESULT_EOF); 3413 3414 for (n = 0; p < end; n++) { 3415 if (*p++ == 0) { /* NULL from OBP */ 3416 ph->ph_cur_pos = p; 3417 return (n + 1); 3418 } 3419 } 3420 3421 /* 3422 * If OBP did not NULL terminate string, which happens for 3423 * 'true'/'false' boolean values, account for the space 3424 * to store null termination here. 3425 */ 3426 ph->ph_cur_pos = p; 3427 return (n + 1); 3428 3429 default: 3430 #ifdef DEBUG 3431 panic("ddi_prop_1275_string: %x impossible", cmd); 3432 /*NOTREACHED*/ 3433 #else 3434 return (DDI_PROP_RESULT_ERROR); 3435 #endif /* DEBUG */ 3436 } 3437 } 3438 3439 /* 3440 * OBP 1275 byte operator 3441 * 3442 * Caller must specify the number of bytes to get. OBP encodes bytes 3443 * as a byte so there is a 1-to-1 translation. 3444 */ 3445 int 3446 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data, 3447 uint_t nelements) 3448 { 3449 switch (cmd) { 3450 case DDI_PROP_CMD_DECODE: 3451 /* 3452 * Check that there is encoded data 3453 */ 3454 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3455 ph->ph_size < nelements || 3456 ((char *)ph->ph_cur_pos > ((char *)ph->ph_data + 3457 ph->ph_size - nelements))) 3458 return (DDI_PROP_RESULT_ERROR); 3459 3460 /* 3461 * Copy out the bytes 3462 */ 3463 bcopy(ph->ph_cur_pos, data, nelements); 3464 3465 /* 3466 * Move the current location 3467 */ 3468 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements; 3469 return (DDI_PROP_RESULT_OK); 3470 3471 case DDI_PROP_CMD_ENCODE: 3472 /* 3473 * Check that there is room to encode the data 3474 */ 3475 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3476 ph->ph_size < nelements || 3477 ((char *)ph->ph_cur_pos > ((char *)ph->ph_data + 3478 ph->ph_size - nelements))) 3479 return (DDI_PROP_RESULT_ERROR); 3480 3481 /* 3482 * Copy in the bytes 3483 */ 3484 bcopy(data, ph->ph_cur_pos, nelements); 3485 3486 /* 3487 * Move the current location to the start of the next bit of 3488 * space where we can store encoded data. 3489 */ 3490 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements; 3491 return (DDI_PROP_RESULT_OK); 3492 3493 case DDI_PROP_CMD_SKIP: 3494 /* 3495 * Check that there is encoded data 3496 */ 3497 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3498 ph->ph_size < nelements) 3499 return (DDI_PROP_RESULT_ERROR); 3500 3501 if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data + 3502 ph->ph_size - nelements)) 3503 return (DDI_PROP_RESULT_EOF); 3504 3505 /* 3506 * Move the current location 3507 */ 3508 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements; 3509 return (DDI_PROP_RESULT_OK); 3510 3511 case DDI_PROP_CMD_GET_ESIZE: 3512 /* 3513 * The size in bytes of the encoded size is the 3514 * same as the decoded size provided by the caller. 3515 */ 3516 return (nelements); 3517 3518 case DDI_PROP_CMD_GET_DSIZE: 3519 /* 3520 * Just return the number of bytes specified by the caller. 3521 */ 3522 return (nelements); 3523 3524 default: 3525 #ifdef DEBUG 3526 panic("ddi_prop_1275_bytes: %x impossible", cmd); 3527 /*NOTREACHED*/ 3528 #else 3529 return (DDI_PROP_RESULT_ERROR); 3530 #endif /* DEBUG */ 3531 } 3532 } 3533 3534 /* 3535 * Used for properties that come from the OBP, hardware configuration files, 3536 * or that are created by calls to ddi_prop_update(9F). 3537 */ 3538 static struct prop_handle_ops prop_1275_ops = { 3539 ddi_prop_1275_int, 3540 ddi_prop_1275_string, 3541 ddi_prop_1275_bytes, 3542 ddi_prop_int64_op 3543 }; 3544 3545 3546 /* 3547 * Interface to create/modify a managed property on child's behalf... 3548 * Flags interpreted are: 3549 * DDI_PROP_CANSLEEP: Allow memory allocation to sleep. 3550 * DDI_PROP_SYSTEM_DEF: Manipulate system list rather than driver list. 3551 * 3552 * Use same dev_t when modifying or undefining a property. 3553 * Search for properties with DDI_DEV_T_ANY to match first named 3554 * property on the list. 3555 * 3556 * Properties are stored LIFO and subsequently will match the first 3557 * `matching' instance. 3558 */ 3559 3560 /* 3561 * ddi_prop_add: Add a software defined property 3562 */ 3563 3564 /* 3565 * define to get a new ddi_prop_t. 3566 * km_flags are KM_SLEEP or KM_NOSLEEP. 3567 */ 3568 3569 #define DDI_NEW_PROP_T(km_flags) \ 3570 (kmem_zalloc(sizeof (ddi_prop_t), km_flags)) 3571 3572 static int 3573 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags, 3574 char *name, caddr_t value, int length) 3575 { 3576 ddi_prop_t *new_propp, *propp; 3577 ddi_prop_t **list_head = &(DEVI(dip)->devi_drv_prop_ptr); 3578 int km_flags = KM_NOSLEEP; 3579 int name_buf_len; 3580 3581 /* 3582 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error. 3583 */ 3584 3585 if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0) 3586 return (DDI_PROP_INVAL_ARG); 3587 3588 if (flags & DDI_PROP_CANSLEEP) 3589 km_flags = KM_SLEEP; 3590 3591 if (flags & DDI_PROP_SYSTEM_DEF) 3592 list_head = &(DEVI(dip)->devi_sys_prop_ptr); 3593 else if (flags & DDI_PROP_HW_DEF) 3594 list_head = &(DEVI(dip)->devi_hw_prop_ptr); 3595 3596 if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL) { 3597 cmn_err(CE_CONT, prop_no_mem_msg, name); 3598 return (DDI_PROP_NO_MEMORY); 3599 } 3600 3601 /* 3602 * If dev is major number 0, then we need to do a ddi_name_to_major 3603 * to get the real major number for the device. This needs to be 3604 * done because some drivers need to call ddi_prop_create in their 3605 * attach routines but they don't have a dev. By creating the dev 3606 * ourself if the major number is 0, drivers will not have to know what 3607 * their major number. They can just create a dev with major number 3608 * 0 and pass it in. For device 0, we will be doing a little extra 3609 * work by recreating the same dev that we already have, but its the 3610 * price you pay :-). 3611 * 3612 * This fixes bug #1098060. 3613 */ 3614 if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) { 3615 new_propp->prop_dev = 3616 makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name), 3617 getminor(dev)); 3618 } else 3619 new_propp->prop_dev = dev; 3620 3621 /* 3622 * Allocate space for property name and copy it in... 3623 */ 3624 3625 name_buf_len = strlen(name) + 1; 3626 new_propp->prop_name = kmem_alloc(name_buf_len, km_flags); 3627 if (new_propp->prop_name == 0) { 3628 kmem_free(new_propp, sizeof (ddi_prop_t)); 3629 cmn_err(CE_CONT, prop_no_mem_msg, name); 3630 return (DDI_PROP_NO_MEMORY); 3631 } 3632 bcopy(name, new_propp->prop_name, name_buf_len); 3633 3634 /* 3635 * Set the property type 3636 */ 3637 new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK; 3638 3639 /* 3640 * Set length and value ONLY if not an explicit property undefine: 3641 * NOTE: value and length are zero for explicit undefines. 3642 */ 3643 3644 if (flags & DDI_PROP_UNDEF_IT) { 3645 new_propp->prop_flags |= DDI_PROP_UNDEF_IT; 3646 } else { 3647 if ((new_propp->prop_len = length) != 0) { 3648 new_propp->prop_val = kmem_alloc(length, km_flags); 3649 if (new_propp->prop_val == 0) { 3650 kmem_free(new_propp->prop_name, name_buf_len); 3651 kmem_free(new_propp, sizeof (ddi_prop_t)); 3652 cmn_err(CE_CONT, prop_no_mem_msg, name); 3653 return (DDI_PROP_NO_MEMORY); 3654 } 3655 bcopy(value, new_propp->prop_val, length); 3656 } 3657 } 3658 3659 /* 3660 * Link property into beginning of list. (Properties are LIFO order.) 3661 */ 3662 3663 mutex_enter(&(DEVI(dip)->devi_lock)); 3664 propp = *list_head; 3665 new_propp->prop_next = propp; 3666 *list_head = new_propp; 3667 mutex_exit(&(DEVI(dip)->devi_lock)); 3668 return (DDI_PROP_SUCCESS); 3669 } 3670 3671 3672 /* 3673 * ddi_prop_change: Modify a software managed property value 3674 * 3675 * Set new length and value if found. 3676 * returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or 3677 * input name is the NULL string. 3678 * returns DDI_PROP_NO_MEMORY if unable to allocate memory 3679 * 3680 * Note: an undef can be modified to be a define, 3681 * (you can't go the other way.) 3682 */ 3683 3684 static int 3685 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags, 3686 char *name, caddr_t value, int length) 3687 { 3688 ddi_prop_t *propp; 3689 ddi_prop_t **ppropp; 3690 caddr_t p = NULL; 3691 3692 if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0)) 3693 return (DDI_PROP_INVAL_ARG); 3694 3695 /* 3696 * Preallocate buffer, even if we don't need it... 3697 */ 3698 if (length != 0) { 3699 p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ? 3700 KM_SLEEP : KM_NOSLEEP); 3701 if (p == NULL) { 3702 cmn_err(CE_CONT, prop_no_mem_msg, name); 3703 return (DDI_PROP_NO_MEMORY); 3704 } 3705 } 3706 3707 /* 3708 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major 3709 * number, a real dev_t value should be created based upon the dip's 3710 * binding driver. See ddi_prop_add... 3711 */ 3712 if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) 3713 dev = makedevice( 3714 ddi_name_to_major(DEVI(dip)->devi_binding_name), 3715 getminor(dev)); 3716 3717 /* 3718 * Check to see if the property exists. If so we modify it. 3719 * Else we create it by calling ddi_prop_add(). 3720 */ 3721 mutex_enter(&(DEVI(dip)->devi_lock)); 3722 ppropp = &DEVI(dip)->devi_drv_prop_ptr; 3723 if (flags & DDI_PROP_SYSTEM_DEF) 3724 ppropp = &DEVI(dip)->devi_sys_prop_ptr; 3725 else if (flags & DDI_PROP_HW_DEF) 3726 ppropp = &DEVI(dip)->devi_hw_prop_ptr; 3727 3728 if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) { 3729 /* 3730 * Need to reallocate buffer? If so, do it 3731 * carefully (reuse same space if new prop 3732 * is same size and non-NULL sized). 3733 */ 3734 if (length != 0) 3735 bcopy(value, p, length); 3736 3737 if (propp->prop_len != 0) 3738 kmem_free(propp->prop_val, propp->prop_len); 3739 3740 propp->prop_len = length; 3741 propp->prop_val = p; 3742 propp->prop_flags &= ~DDI_PROP_UNDEF_IT; 3743 mutex_exit(&(DEVI(dip)->devi_lock)); 3744 return (DDI_PROP_SUCCESS); 3745 } 3746 3747 mutex_exit(&(DEVI(dip)->devi_lock)); 3748 if (length != 0) 3749 kmem_free(p, length); 3750 3751 return (ddi_prop_add(dev, dip, flags, name, value, length)); 3752 } 3753 3754 /* 3755 * Common update routine used to update and encode a property. Creates 3756 * a property handle, calls the property encode routine, figures out if 3757 * the property already exists and updates if it does. Otherwise it 3758 * creates if it does not exist. 3759 */ 3760 int 3761 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags, 3762 char *name, void *data, uint_t nelements, 3763 int (*prop_create)(prop_handle_t *, void *data, uint_t nelements)) 3764 { 3765 prop_handle_t ph; 3766 int rval; 3767 uint_t ourflags; 3768 3769 /* 3770 * If dev_t is DDI_DEV_T_ANY or name's length is zero, 3771 * return error. 3772 */ 3773 if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0) 3774 return (DDI_PROP_INVAL_ARG); 3775 3776 /* 3777 * Create the handle 3778 */ 3779 ph.ph_data = NULL; 3780 ph.ph_cur_pos = NULL; 3781 ph.ph_save_pos = NULL; 3782 ph.ph_size = 0; 3783 ph.ph_ops = &prop_1275_ops; 3784 3785 /* 3786 * ourflags: 3787 * For compatibility with the old interfaces. The old interfaces 3788 * didn't sleep by default and slept when the flag was set. These 3789 * interfaces to the opposite. So the old interfaces now set the 3790 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep. 3791 * 3792 * ph.ph_flags: 3793 * Blocked data or unblocked data allocation 3794 * for ph.ph_data in ddi_prop_encode_alloc() 3795 */ 3796 if (flags & DDI_PROP_DONTSLEEP) { 3797 ourflags = flags; 3798 ph.ph_flags = DDI_PROP_DONTSLEEP; 3799 } else { 3800 ourflags = flags | DDI_PROP_CANSLEEP; 3801 ph.ph_flags = DDI_PROP_CANSLEEP; 3802 } 3803 3804 /* 3805 * Encode the data and store it in the property handle by 3806 * calling the prop_encode routine. 3807 */ 3808 if ((rval = (*prop_create)(&ph, data, nelements)) != 3809 DDI_PROP_SUCCESS) { 3810 if (rval == DDI_PROP_NO_MEMORY) 3811 cmn_err(CE_CONT, prop_no_mem_msg, name); 3812 if (ph.ph_size != 0) 3813 kmem_free(ph.ph_data, ph.ph_size); 3814 return (rval); 3815 } 3816 3817 /* 3818 * The old interfaces use a stacking approach to creating 3819 * properties. If we are being called from the old interfaces, 3820 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a 3821 * create without checking. 3822 */ 3823 if (flags & DDI_PROP_STACK_CREATE) { 3824 rval = ddi_prop_add(match_dev, dip, 3825 ourflags, name, ph.ph_data, ph.ph_size); 3826 } else { 3827 rval = ddi_prop_change(match_dev, dip, 3828 ourflags, name, ph.ph_data, ph.ph_size); 3829 } 3830 3831 /* 3832 * Free the encoded data allocated in the prop_encode routine. 3833 */ 3834 if (ph.ph_size != 0) 3835 kmem_free(ph.ph_data, ph.ph_size); 3836 3837 return (rval); 3838 } 3839 3840 3841 /* 3842 * ddi_prop_create: Define a managed property: 3843 * See above for details. 3844 */ 3845 3846 int 3847 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag, 3848 char *name, caddr_t value, int length) 3849 { 3850 if (!(flag & DDI_PROP_CANSLEEP)) { 3851 flag |= DDI_PROP_DONTSLEEP; 3852 #ifdef DDI_PROP_DEBUG 3853 if (length != 0) 3854 cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete," 3855 "use ddi_prop_update (prop = %s, node = %s%d)", 3856 name, ddi_driver_name(dip), ddi_get_instance(dip)); 3857 #endif /* DDI_PROP_DEBUG */ 3858 } 3859 flag &= ~DDI_PROP_SYSTEM_DEF; 3860 flag |= DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY; 3861 return (ddi_prop_update_common(dev, dip, flag, name, 3862 value, length, ddi_prop_fm_encode_bytes)); 3863 } 3864 3865 int 3866 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag, 3867 char *name, caddr_t value, int length) 3868 { 3869 if (!(flag & DDI_PROP_CANSLEEP)) 3870 flag |= DDI_PROP_DONTSLEEP; 3871 flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY; 3872 return (ddi_prop_update_common(dev, dip, flag, 3873 name, value, length, ddi_prop_fm_encode_bytes)); 3874 } 3875 3876 int 3877 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag, 3878 char *name, caddr_t value, int length) 3879 { 3880 ASSERT((flag & DDI_PROP_TYPE_MASK) == 0); 3881 3882 /* 3883 * If dev_t is DDI_DEV_T_ANY or name's length is zero, 3884 * return error. 3885 */ 3886 if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0) 3887 return (DDI_PROP_INVAL_ARG); 3888 3889 if (!(flag & DDI_PROP_CANSLEEP)) 3890 flag |= DDI_PROP_DONTSLEEP; 3891 flag &= ~DDI_PROP_SYSTEM_DEF; 3892 if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0) 3893 return (DDI_PROP_NOT_FOUND); 3894 3895 return (ddi_prop_update_common(dev, dip, 3896 (flag | DDI_PROP_TYPE_BYTE), name, 3897 value, length, ddi_prop_fm_encode_bytes)); 3898 } 3899 3900 int 3901 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag, 3902 char *name, caddr_t value, int length) 3903 { 3904 ASSERT((flag & DDI_PROP_TYPE_MASK) == 0); 3905 3906 /* 3907 * If dev_t is DDI_DEV_T_ANY or name's length is zero, 3908 * return error. 3909 */ 3910 if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0) 3911 return (DDI_PROP_INVAL_ARG); 3912 3913 if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0) 3914 return (DDI_PROP_NOT_FOUND); 3915 3916 if (!(flag & DDI_PROP_CANSLEEP)) 3917 flag |= DDI_PROP_DONTSLEEP; 3918 return (ddi_prop_update_common(dev, dip, 3919 (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE), 3920 name, value, length, ddi_prop_fm_encode_bytes)); 3921 } 3922 3923 3924 /* 3925 * Common lookup routine used to lookup and decode a property. 3926 * Creates a property handle, searches for the raw encoded data, 3927 * fills in the handle, and calls the property decode functions 3928 * passed in. 3929 * 3930 * This routine is not static because ddi_bus_prop_op() which lives in 3931 * ddi_impl.c calls it. No driver should be calling this routine. 3932 */ 3933 int 3934 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip, 3935 uint_t flags, char *name, void *data, uint_t *nelements, 3936 int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements)) 3937 { 3938 int rval; 3939 uint_t ourflags; 3940 prop_handle_t ph; 3941 3942 if ((match_dev == DDI_DEV_T_NONE) || 3943 (name == NULL) || (strlen(name) == 0)) 3944 return (DDI_PROP_INVAL_ARG); 3945 3946 ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags : 3947 flags | DDI_PROP_CANSLEEP; 3948 3949 /* 3950 * Get the encoded data 3951 */ 3952 bzero(&ph, sizeof (prop_handle_t)); 3953 3954 if ((flags & DDI_UNBND_DLPI2) || (flags & DDI_PROP_ROOTNEX_GLOBAL)) { 3955 /* 3956 * For rootnex and unbound dlpi style-2 devices, index into 3957 * the devnames' array and search the global 3958 * property list. 3959 */ 3960 ourflags &= ~DDI_UNBND_DLPI2; 3961 rval = i_ddi_prop_search_global(match_dev, 3962 ourflags, name, &ph.ph_data, &ph.ph_size); 3963 } else { 3964 rval = ddi_prop_search_common(match_dev, dip, 3965 PROP_LEN_AND_VAL_ALLOC, ourflags, name, 3966 &ph.ph_data, &ph.ph_size); 3967 3968 } 3969 3970 if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) { 3971 ASSERT(ph.ph_data == NULL); 3972 ASSERT(ph.ph_size == 0); 3973 return (rval); 3974 } 3975 3976 /* 3977 * If the encoded data came from a OBP or software 3978 * use the 1275 OBP decode/encode routines. 3979 */ 3980 ph.ph_cur_pos = ph.ph_data; 3981 ph.ph_save_pos = ph.ph_data; 3982 ph.ph_ops = &prop_1275_ops; 3983 ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0; 3984 3985 rval = (*prop_decoder)(&ph, data, nelements); 3986 3987 /* 3988 * Free the encoded data 3989 */ 3990 if (ph.ph_size != 0) 3991 kmem_free(ph.ph_data, ph.ph_size); 3992 3993 return (rval); 3994 } 3995 3996 /* 3997 * Lookup and return an array of composite properties. The driver must 3998 * provide the decode routine. 3999 */ 4000 int 4001 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip, 4002 uint_t flags, char *name, void *data, uint_t *nelements, 4003 int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements)) 4004 { 4005 return (ddi_prop_lookup_common(match_dev, dip, 4006 (flags | DDI_PROP_TYPE_COMPOSITE), name, 4007 data, nelements, prop_decoder)); 4008 } 4009 4010 /* 4011 * Return 1 if a property exists (no type checking done). 4012 * Return 0 if it does not exist. 4013 */ 4014 int 4015 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name) 4016 { 4017 int i; 4018 uint_t x = 0; 4019 4020 i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS, 4021 flags | DDI_PROP_TYPE_MASK, name, NULL, &x); 4022 return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275); 4023 } 4024 4025 4026 /* 4027 * Update an array of composite properties. The driver must 4028 * provide the encode routine. 4029 */ 4030 int 4031 ddi_prop_update(dev_t match_dev, dev_info_t *dip, 4032 char *name, void *data, uint_t nelements, 4033 int (*prop_create)(prop_handle_t *, void *data, uint_t nelements)) 4034 { 4035 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE, 4036 name, data, nelements, prop_create)); 4037 } 4038 4039 /* 4040 * Get a single integer or boolean property and return it. 4041 * If the property does not exists, or cannot be decoded, 4042 * then return the defvalue passed in. 4043 * 4044 * This routine always succeeds. 4045 */ 4046 int 4047 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags, 4048 char *name, int defvalue) 4049 { 4050 int data; 4051 uint_t nelements; 4052 int rval; 4053 4054 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4055 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 4056 #ifdef DEBUG 4057 if (dip != NULL) { 4058 cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag" 4059 " 0x%x (prop = %s, node = %s%d)", flags, 4060 name, ddi_driver_name(dip), ddi_get_instance(dip)); 4061 } 4062 #endif /* DEBUG */ 4063 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4064 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 4065 } 4066 4067 if ((rval = ddi_prop_lookup_common(match_dev, dip, 4068 (flags | DDI_PROP_TYPE_INT), name, &data, &nelements, 4069 ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) { 4070 if (rval == DDI_PROP_END_OF_DATA) 4071 data = 1; 4072 else 4073 data = defvalue; 4074 } 4075 return (data); 4076 } 4077 4078 /* 4079 * Get a single 64 bit integer or boolean property and return it. 4080 * If the property does not exists, or cannot be decoded, 4081 * then return the defvalue passed in. 4082 * 4083 * This routine always succeeds. 4084 */ 4085 int64_t 4086 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags, 4087 char *name, int64_t defvalue) 4088 { 4089 int64_t data; 4090 uint_t nelements; 4091 int rval; 4092 4093 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4094 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 4095 #ifdef DEBUG 4096 if (dip != NULL) { 4097 cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag" 4098 " 0x%x (prop = %s, node = %s%d)", flags, 4099 name, ddi_driver_name(dip), ddi_get_instance(dip)); 4100 } 4101 #endif /* DEBUG */ 4102 return (DDI_PROP_INVAL_ARG); 4103 } 4104 4105 if ((rval = ddi_prop_lookup_common(match_dev, dip, 4106 (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM), 4107 name, &data, &nelements, ddi_prop_fm_decode_int64)) 4108 != DDI_PROP_SUCCESS) { 4109 if (rval == DDI_PROP_END_OF_DATA) 4110 data = 1; 4111 else 4112 data = defvalue; 4113 } 4114 return (data); 4115 } 4116 4117 /* 4118 * Get an array of integer property 4119 */ 4120 int 4121 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags, 4122 char *name, int **data, uint_t *nelements) 4123 { 4124 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4125 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 4126 #ifdef DEBUG 4127 if (dip != NULL) { 4128 cmn_err(CE_WARN, "ddi_prop_lookup_int_array: " 4129 "invalid flag 0x%x (prop = %s, node = %s%d)", 4130 flags, name, ddi_driver_name(dip), 4131 ddi_get_instance(dip)); 4132 } 4133 #endif /* DEBUG */ 4134 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4135 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 4136 } 4137 4138 return (ddi_prop_lookup_common(match_dev, dip, 4139 (flags | DDI_PROP_TYPE_INT), name, data, 4140 nelements, ddi_prop_fm_decode_ints)); 4141 } 4142 4143 /* 4144 * Get an array of 64 bit integer properties 4145 */ 4146 int 4147 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags, 4148 char *name, int64_t **data, uint_t *nelements) 4149 { 4150 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4151 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 4152 #ifdef DEBUG 4153 if (dip != NULL) { 4154 cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: " 4155 "invalid flag 0x%x (prop = %s, node = %s%d)", 4156 flags, name, ddi_driver_name(dip), 4157 ddi_get_instance(dip)); 4158 } 4159 #endif /* DEBUG */ 4160 return (DDI_PROP_INVAL_ARG); 4161 } 4162 4163 return (ddi_prop_lookup_common(match_dev, dip, 4164 (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM), 4165 name, data, nelements, ddi_prop_fm_decode_int64_array)); 4166 } 4167 4168 /* 4169 * Update a single integer property. If the property exists on the drivers 4170 * property list it updates, else it creates it. 4171 */ 4172 int 4173 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip, 4174 char *name, int data) 4175 { 4176 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT, 4177 name, &data, 1, ddi_prop_fm_encode_ints)); 4178 } 4179 4180 /* 4181 * Update a single 64 bit integer property. 4182 * Update the driver property list if it exists, else create it. 4183 */ 4184 int 4185 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip, 4186 char *name, int64_t data) 4187 { 4188 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64, 4189 name, &data, 1, ddi_prop_fm_encode_int64)); 4190 } 4191 4192 int 4193 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip, 4194 char *name, int data) 4195 { 4196 return (ddi_prop_update_common(match_dev, dip, 4197 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT, 4198 name, &data, 1, ddi_prop_fm_encode_ints)); 4199 } 4200 4201 int 4202 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip, 4203 char *name, int64_t data) 4204 { 4205 return (ddi_prop_update_common(match_dev, dip, 4206 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64, 4207 name, &data, 1, ddi_prop_fm_encode_int64)); 4208 } 4209 4210 /* 4211 * Update an array of integer property. If the property exists on the drivers 4212 * property list it updates, else it creates it. 4213 */ 4214 int 4215 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip, 4216 char *name, int *data, uint_t nelements) 4217 { 4218 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT, 4219 name, data, nelements, ddi_prop_fm_encode_ints)); 4220 } 4221 4222 /* 4223 * Update an array of 64 bit integer properties. 4224 * Update the driver property list if it exists, else create it. 4225 */ 4226 int 4227 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip, 4228 char *name, int64_t *data, uint_t nelements) 4229 { 4230 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64, 4231 name, data, nelements, ddi_prop_fm_encode_int64)); 4232 } 4233 4234 int 4235 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip, 4236 char *name, int64_t *data, uint_t nelements) 4237 { 4238 return (ddi_prop_update_common(match_dev, dip, 4239 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64, 4240 name, data, nelements, ddi_prop_fm_encode_int64)); 4241 } 4242 4243 int 4244 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip, 4245 char *name, int *data, uint_t nelements) 4246 { 4247 return (ddi_prop_update_common(match_dev, dip, 4248 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT, 4249 name, data, nelements, ddi_prop_fm_encode_ints)); 4250 } 4251 4252 /* 4253 * Get a single string property. 4254 */ 4255 int 4256 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags, 4257 char *name, char **data) 4258 { 4259 uint_t x; 4260 4261 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4262 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 4263 #ifdef DEBUG 4264 if (dip != NULL) { 4265 cmn_err(CE_WARN, "%s: invalid flag 0x%x " 4266 "(prop = %s, node = %s%d); invalid bits ignored", 4267 "ddi_prop_lookup_string", flags, name, 4268 ddi_driver_name(dip), ddi_get_instance(dip)); 4269 } 4270 #endif /* DEBUG */ 4271 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4272 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 4273 } 4274 4275 return (ddi_prop_lookup_common(match_dev, dip, 4276 (flags | DDI_PROP_TYPE_STRING), name, data, 4277 &x, ddi_prop_fm_decode_string)); 4278 } 4279 4280 /* 4281 * Get an array of strings property. 4282 */ 4283 int 4284 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags, 4285 char *name, char ***data, uint_t *nelements) 4286 { 4287 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4288 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 4289 #ifdef DEBUG 4290 if (dip != NULL) { 4291 cmn_err(CE_WARN, "ddi_prop_lookup_string_array: " 4292 "invalid flag 0x%x (prop = %s, node = %s%d)", 4293 flags, name, ddi_driver_name(dip), 4294 ddi_get_instance(dip)); 4295 } 4296 #endif /* DEBUG */ 4297 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4298 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 4299 } 4300 4301 return (ddi_prop_lookup_common(match_dev, dip, 4302 (flags | DDI_PROP_TYPE_STRING), name, data, 4303 nelements, ddi_prop_fm_decode_strings)); 4304 } 4305 4306 /* 4307 * Update a single string property. 4308 */ 4309 int 4310 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip, 4311 char *name, char *data) 4312 { 4313 return (ddi_prop_update_common(match_dev, dip, 4314 DDI_PROP_TYPE_STRING, name, &data, 1, 4315 ddi_prop_fm_encode_string)); 4316 } 4317 4318 int 4319 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip, 4320 char *name, char *data) 4321 { 4322 return (ddi_prop_update_common(match_dev, dip, 4323 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING, 4324 name, &data, 1, ddi_prop_fm_encode_string)); 4325 } 4326 4327 4328 /* 4329 * Update an array of strings property. 4330 */ 4331 int 4332 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip, 4333 char *name, char **data, uint_t nelements) 4334 { 4335 return (ddi_prop_update_common(match_dev, dip, 4336 DDI_PROP_TYPE_STRING, name, data, nelements, 4337 ddi_prop_fm_encode_strings)); 4338 } 4339 4340 int 4341 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip, 4342 char *name, char **data, uint_t nelements) 4343 { 4344 return (ddi_prop_update_common(match_dev, dip, 4345 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING, 4346 name, data, nelements, 4347 ddi_prop_fm_encode_strings)); 4348 } 4349 4350 4351 /* 4352 * Get an array of bytes property. 4353 */ 4354 int 4355 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags, 4356 char *name, uchar_t **data, uint_t *nelements) 4357 { 4358 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4359 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 4360 #ifdef DEBUG 4361 if (dip != NULL) { 4362 cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: " 4363 " invalid flag 0x%x (prop = %s, node = %s%d)", 4364 flags, name, ddi_driver_name(dip), 4365 ddi_get_instance(dip)); 4366 } 4367 #endif /* DEBUG */ 4368 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4369 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 4370 } 4371 4372 return (ddi_prop_lookup_common(match_dev, dip, 4373 (flags | DDI_PROP_TYPE_BYTE), name, data, 4374 nelements, ddi_prop_fm_decode_bytes)); 4375 } 4376 4377 /* 4378 * Update an array of bytes property. 4379 */ 4380 int 4381 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip, 4382 char *name, uchar_t *data, uint_t nelements) 4383 { 4384 if (nelements == 0) 4385 return (DDI_PROP_INVAL_ARG); 4386 4387 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE, 4388 name, data, nelements, ddi_prop_fm_encode_bytes)); 4389 } 4390 4391 4392 int 4393 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip, 4394 char *name, uchar_t *data, uint_t nelements) 4395 { 4396 if (nelements == 0) 4397 return (DDI_PROP_INVAL_ARG); 4398 4399 return (ddi_prop_update_common(match_dev, dip, 4400 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE, 4401 name, data, nelements, ddi_prop_fm_encode_bytes)); 4402 } 4403 4404 4405 /* 4406 * ddi_prop_remove_common: Undefine a managed property: 4407 * Input dev_t must match dev_t when defined. 4408 * Returns DDI_PROP_NOT_FOUND, possibly. 4409 * DDI_PROP_INVAL_ARG is also possible if dev is 4410 * DDI_DEV_T_ANY or incoming name is the NULL string. 4411 */ 4412 int 4413 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag) 4414 { 4415 ddi_prop_t **list_head = &(DEVI(dip)->devi_drv_prop_ptr); 4416 ddi_prop_t *propp; 4417 ddi_prop_t *lastpropp = NULL; 4418 4419 if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) || 4420 (strlen(name) == 0)) { 4421 return (DDI_PROP_INVAL_ARG); 4422 } 4423 4424 if (flag & DDI_PROP_SYSTEM_DEF) 4425 list_head = &(DEVI(dip)->devi_sys_prop_ptr); 4426 else if (flag & DDI_PROP_HW_DEF) 4427 list_head = &(DEVI(dip)->devi_hw_prop_ptr); 4428 4429 mutex_enter(&(DEVI(dip)->devi_lock)); 4430 4431 for (propp = *list_head; propp != NULL; propp = propp->prop_next) { 4432 if (DDI_STRSAME(propp->prop_name, name) && 4433 (dev == propp->prop_dev)) { 4434 /* 4435 * Unlink this propp allowing for it to 4436 * be first in the list: 4437 */ 4438 4439 if (lastpropp == NULL) 4440 *list_head = propp->prop_next; 4441 else 4442 lastpropp->prop_next = propp->prop_next; 4443 4444 mutex_exit(&(DEVI(dip)->devi_lock)); 4445 4446 /* 4447 * Free memory and return... 4448 */ 4449 kmem_free(propp->prop_name, 4450 strlen(propp->prop_name) + 1); 4451 if (propp->prop_len != 0) 4452 kmem_free(propp->prop_val, propp->prop_len); 4453 kmem_free(propp, sizeof (ddi_prop_t)); 4454 return (DDI_PROP_SUCCESS); 4455 } 4456 lastpropp = propp; 4457 } 4458 mutex_exit(&(DEVI(dip)->devi_lock)); 4459 return (DDI_PROP_NOT_FOUND); 4460 } 4461 4462 int 4463 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name) 4464 { 4465 return (ddi_prop_remove_common(dev, dip, name, 0)); 4466 } 4467 4468 int 4469 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name) 4470 { 4471 return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF)); 4472 } 4473 4474 /* 4475 * e_ddi_prop_list_delete: remove a list of properties 4476 * Note that the caller needs to provide the required protection 4477 * (eg. devi_lock if these properties are still attached to a devi) 4478 */ 4479 void 4480 e_ddi_prop_list_delete(ddi_prop_t *props) 4481 { 4482 i_ddi_prop_list_delete(props); 4483 } 4484 4485 /* 4486 * ddi_prop_remove_all_common: 4487 * Used before unloading a driver to remove 4488 * all properties. (undefines all dev_t's props.) 4489 * Also removes `explicitly undefined' props. 4490 * No errors possible. 4491 */ 4492 void 4493 ddi_prop_remove_all_common(dev_info_t *dip, int flag) 4494 { 4495 ddi_prop_t **list_head; 4496 4497 mutex_enter(&(DEVI(dip)->devi_lock)); 4498 if (flag & DDI_PROP_SYSTEM_DEF) { 4499 list_head = &(DEVI(dip)->devi_sys_prop_ptr); 4500 } else if (flag & DDI_PROP_HW_DEF) { 4501 list_head = &(DEVI(dip)->devi_hw_prop_ptr); 4502 } else { 4503 list_head = &(DEVI(dip)->devi_drv_prop_ptr); 4504 } 4505 i_ddi_prop_list_delete(*list_head); 4506 *list_head = NULL; 4507 mutex_exit(&(DEVI(dip)->devi_lock)); 4508 } 4509 4510 4511 /* 4512 * ddi_prop_remove_all: Remove all driver prop definitions. 4513 */ 4514 4515 void 4516 ddi_prop_remove_all(dev_info_t *dip) 4517 { 4518 i_ddi_prop_dyn_driver_set(dip, NULL); 4519 ddi_prop_remove_all_common(dip, 0); 4520 } 4521 4522 /* 4523 * e_ddi_prop_remove_all: Remove all system prop definitions. 4524 */ 4525 4526 void 4527 e_ddi_prop_remove_all(dev_info_t *dip) 4528 { 4529 ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF); 4530 } 4531 4532 4533 /* 4534 * ddi_prop_undefine: Explicitly undefine a property. Property 4535 * searches which match this property return 4536 * the error code DDI_PROP_UNDEFINED. 4537 * 4538 * Use ddi_prop_remove to negate effect of 4539 * ddi_prop_undefine 4540 * 4541 * See above for error returns. 4542 */ 4543 4544 int 4545 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name) 4546 { 4547 if (!(flag & DDI_PROP_CANSLEEP)) 4548 flag |= DDI_PROP_DONTSLEEP; 4549 flag |= DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY; 4550 return (ddi_prop_update_common(dev, dip, flag, 4551 name, NULL, 0, ddi_prop_fm_encode_bytes)); 4552 } 4553 4554 int 4555 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name) 4556 { 4557 if (!(flag & DDI_PROP_CANSLEEP)) 4558 flag |= DDI_PROP_DONTSLEEP; 4559 flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | 4560 DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY; 4561 return (ddi_prop_update_common(dev, dip, flag, 4562 name, NULL, 0, ddi_prop_fm_encode_bytes)); 4563 } 4564 4565 /* 4566 * Support for gathering dynamic properties in devinfo snapshot. 4567 */ 4568 void 4569 i_ddi_prop_dyn_driver_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp) 4570 { 4571 DEVI(dip)->devi_prop_dyn_driver = dp; 4572 } 4573 4574 i_ddi_prop_dyn_t * 4575 i_ddi_prop_dyn_driver_get(dev_info_t *dip) 4576 { 4577 return (DEVI(dip)->devi_prop_dyn_driver); 4578 } 4579 4580 void 4581 i_ddi_prop_dyn_parent_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp) 4582 { 4583 DEVI(dip)->devi_prop_dyn_parent = dp; 4584 } 4585 4586 i_ddi_prop_dyn_t * 4587 i_ddi_prop_dyn_parent_get(dev_info_t *dip) 4588 { 4589 return (DEVI(dip)->devi_prop_dyn_parent); 4590 } 4591 4592 void 4593 i_ddi_prop_dyn_cache_invalidate(dev_info_t *dip, i_ddi_prop_dyn_t *dp) 4594 { 4595 /* for now we invalidate the entire cached snapshot */ 4596 if (dip && dp) 4597 i_ddi_di_cache_invalidate(KM_SLEEP); 4598 } 4599 4600 /* ARGSUSED */ 4601 void 4602 ddi_prop_cache_invalidate(dev_t dev, dev_info_t *dip, char *name, int flags) 4603 { 4604 /* for now we invalidate the entire cached snapshot */ 4605 i_ddi_di_cache_invalidate(KM_SLEEP); 4606 } 4607 4608 4609 /* 4610 * Code to search hardware layer (PROM), if it exists, on behalf of child. 4611 * 4612 * if input dip != child_dip, then call is on behalf of child 4613 * to search PROM, do it via ddi_prop_search_common() and ascend only 4614 * if allowed. 4615 * 4616 * if input dip == ch_dip (child_dip), call is on behalf of root driver, 4617 * to search for PROM defined props only. 4618 * 4619 * Note that the PROM search is done only if the requested dev 4620 * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties 4621 * have no associated dev, thus are automatically associated with 4622 * DDI_DEV_T_NONE. 4623 * 4624 * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer. 4625 * 4626 * Returns DDI_PROP_FOUND_1275 if found to indicate to framework 4627 * that the property resides in the prom. 4628 */ 4629 int 4630 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip, 4631 ddi_prop_op_t prop_op, int mod_flags, 4632 char *name, caddr_t valuep, int *lengthp) 4633 { 4634 int len; 4635 caddr_t buffer; 4636 4637 /* 4638 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then 4639 * look in caller's PROM if it's a self identifying device... 4640 * 4641 * Note that this is very similar to ddi_prop_op, but we 4642 * search the PROM instead of the s/w defined properties, 4643 * and we are called on by the parent driver to do this for 4644 * the child. 4645 */ 4646 4647 if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) && 4648 ndi_dev_is_prom_node(ch_dip) && 4649 ((mod_flags & DDI_PROP_NOTPROM) == 0)) { 4650 len = prom_getproplen((pnode_t)DEVI(ch_dip)->devi_nodeid, name); 4651 if (len == -1) { 4652 return (DDI_PROP_NOT_FOUND); 4653 } 4654 4655 /* 4656 * If exists only request, we're done 4657 */ 4658 if (prop_op == PROP_EXISTS) { 4659 return (DDI_PROP_FOUND_1275); 4660 } 4661 4662 /* 4663 * If length only request or prop length == 0, get out 4664 */ 4665 if ((prop_op == PROP_LEN) || (len == 0)) { 4666 *lengthp = len; 4667 return (DDI_PROP_FOUND_1275); 4668 } 4669 4670 /* 4671 * Allocate buffer if required... (either way `buffer' 4672 * is receiving address). 4673 */ 4674 4675 switch (prop_op) { 4676 4677 case PROP_LEN_AND_VAL_ALLOC: 4678 4679 buffer = kmem_alloc((size_t)len, 4680 mod_flags & DDI_PROP_CANSLEEP ? 4681 KM_SLEEP : KM_NOSLEEP); 4682 if (buffer == NULL) { 4683 return (DDI_PROP_NO_MEMORY); 4684 } 4685 *(caddr_t *)valuep = buffer; 4686 break; 4687 4688 case PROP_LEN_AND_VAL_BUF: 4689 4690 if (len > (*lengthp)) { 4691 *lengthp = len; 4692 return (DDI_PROP_BUF_TOO_SMALL); 4693 } 4694 4695 buffer = valuep; 4696 break; 4697 4698 default: 4699 break; 4700 } 4701 4702 /* 4703 * Call the PROM function to do the copy. 4704 */ 4705 (void) prom_getprop((pnode_t)DEVI(ch_dip)->devi_nodeid, 4706 name, buffer); 4707 4708 *lengthp = len; /* return the actual length to the caller */ 4709 (void) impl_fix_props(dip, ch_dip, name, len, buffer); 4710 return (DDI_PROP_FOUND_1275); 4711 } 4712 4713 return (DDI_PROP_NOT_FOUND); 4714 } 4715 4716 /* 4717 * The ddi_bus_prop_op default bus nexus prop op function. 4718 * 4719 * Code to search hardware layer (PROM), if it exists, 4720 * on behalf of child, then, if appropriate, ascend and check 4721 * my own software defined properties... 4722 */ 4723 int 4724 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip, 4725 ddi_prop_op_t prop_op, int mod_flags, 4726 char *name, caddr_t valuep, int *lengthp) 4727 { 4728 int error; 4729 4730 error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags, 4731 name, valuep, lengthp); 4732 4733 if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 || 4734 error == DDI_PROP_BUF_TOO_SMALL) 4735 return (error); 4736 4737 if (error == DDI_PROP_NO_MEMORY) { 4738 cmn_err(CE_CONT, prop_no_mem_msg, name); 4739 return (DDI_PROP_NO_MEMORY); 4740 } 4741 4742 /* 4743 * Check the 'options' node as a last resort 4744 */ 4745 if ((mod_flags & DDI_PROP_DONTPASS) != 0) 4746 return (DDI_PROP_NOT_FOUND); 4747 4748 if (ch_dip == ddi_root_node()) { 4749 /* 4750 * As a last resort, when we've reached 4751 * the top and still haven't found the 4752 * property, see if the desired property 4753 * is attached to the options node. 4754 * 4755 * The options dip is attached right after boot. 4756 */ 4757 ASSERT(options_dip != NULL); 4758 /* 4759 * Force the "don't pass" flag to *just* see 4760 * what the options node has to offer. 4761 */ 4762 return (ddi_prop_search_common(dev, options_dip, prop_op, 4763 mod_flags|DDI_PROP_DONTPASS, name, valuep, 4764 (uint_t *)lengthp)); 4765 } 4766 4767 /* 4768 * Otherwise, continue search with parent's s/w defined properties... 4769 * NOTE: Using `dip' in following call increments the level. 4770 */ 4771 4772 return (ddi_prop_search_common(dev, dip, prop_op, mod_flags, 4773 name, valuep, (uint_t *)lengthp)); 4774 } 4775 4776 /* 4777 * External property functions used by other parts of the kernel... 4778 */ 4779 4780 /* 4781 * e_ddi_getlongprop: See comments for ddi_get_longprop. 4782 */ 4783 4784 int 4785 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags, 4786 caddr_t valuep, int *lengthp) 4787 { 4788 _NOTE(ARGUNUSED(type)) 4789 dev_info_t *devi; 4790 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC; 4791 int error; 4792 4793 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4794 return (DDI_PROP_NOT_FOUND); 4795 4796 error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp); 4797 ddi_release_devi(devi); 4798 return (error); 4799 } 4800 4801 /* 4802 * e_ddi_getlongprop_buf: See comments for ddi_getlongprop_buf. 4803 */ 4804 4805 int 4806 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags, 4807 caddr_t valuep, int *lengthp) 4808 { 4809 _NOTE(ARGUNUSED(type)) 4810 dev_info_t *devi; 4811 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF; 4812 int error; 4813 4814 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4815 return (DDI_PROP_NOT_FOUND); 4816 4817 error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp); 4818 ddi_release_devi(devi); 4819 return (error); 4820 } 4821 4822 /* 4823 * e_ddi_getprop: See comments for ddi_getprop. 4824 */ 4825 int 4826 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue) 4827 { 4828 _NOTE(ARGUNUSED(type)) 4829 dev_info_t *devi; 4830 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF; 4831 int propvalue = defvalue; 4832 int proplength = sizeof (int); 4833 int error; 4834 4835 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4836 return (defvalue); 4837 4838 error = cdev_prop_op(dev, devi, prop_op, 4839 flags, name, (caddr_t)&propvalue, &proplength); 4840 ddi_release_devi(devi); 4841 4842 if ((error == DDI_PROP_SUCCESS) && (proplength == 0)) 4843 propvalue = 1; 4844 4845 return (propvalue); 4846 } 4847 4848 /* 4849 * e_ddi_getprop_int64: 4850 * 4851 * This is a typed interfaces, but predates typed properties. With the 4852 * introduction of typed properties the framework tries to ensure 4853 * consistent use of typed interfaces. This is why TYPE_INT64 is not 4854 * part of TYPE_ANY. E_ddi_getprop_int64 is a special case where a 4855 * typed interface invokes legacy (non-typed) interfaces: 4856 * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)). In this case the 4857 * fact that TYPE_INT64 is not part of TYPE_ANY matters. To support 4858 * this type of lookup as a single operation we invoke the legacy 4859 * non-typed interfaces with the special CONSUMER_TYPED bit set. The 4860 * framework ddi_prop_op(9F) implementation is expected to check for 4861 * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY 4862 * (currently TYPE_INT64). 4863 */ 4864 int64_t 4865 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name, 4866 int flags, int64_t defvalue) 4867 { 4868 _NOTE(ARGUNUSED(type)) 4869 dev_info_t *devi; 4870 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF; 4871 int64_t propvalue = defvalue; 4872 int proplength = sizeof (propvalue); 4873 int error; 4874 4875 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4876 return (defvalue); 4877 4878 error = cdev_prop_op(dev, devi, prop_op, flags | 4879 DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength); 4880 ddi_release_devi(devi); 4881 4882 if ((error == DDI_PROP_SUCCESS) && (proplength == 0)) 4883 propvalue = 1; 4884 4885 return (propvalue); 4886 } 4887 4888 /* 4889 * e_ddi_getproplen: See comments for ddi_getproplen. 4890 */ 4891 int 4892 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp) 4893 { 4894 _NOTE(ARGUNUSED(type)) 4895 dev_info_t *devi; 4896 ddi_prop_op_t prop_op = PROP_LEN; 4897 int error; 4898 4899 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4900 return (DDI_PROP_NOT_FOUND); 4901 4902 error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp); 4903 ddi_release_devi(devi); 4904 return (error); 4905 } 4906 4907 /* 4908 * Routines to get at elements of the dev_info structure 4909 */ 4910 4911 /* 4912 * ddi_binding_name: Return the driver binding name of the devinfo node 4913 * This is the name the OS used to bind the node to a driver. 4914 */ 4915 char * 4916 ddi_binding_name(dev_info_t *dip) 4917 { 4918 return (DEVI(dip)->devi_binding_name); 4919 } 4920 4921 /* 4922 * ddi_driver_major: Return the major number of the driver that 4923 * the supplied devinfo is bound to. If not yet bound, 4924 * DDI_MAJOR_T_NONE. 4925 * 4926 * When used by the driver bound to 'devi', this 4927 * function will reliably return the driver major number. 4928 * Other ways of determining the driver major number, such as 4929 * major = ddi_name_to_major(ddi_get_name(devi)); 4930 * major = ddi_name_to_major(ddi_binding_name(devi)); 4931 * can return a different result as the driver/alias binding 4932 * can change dynamically, and thus should be avoided. 4933 */ 4934 major_t 4935 ddi_driver_major(dev_info_t *devi) 4936 { 4937 return (DEVI(devi)->devi_major); 4938 } 4939 4940 /* 4941 * ddi_driver_name: Return the normalized driver name. this is the 4942 * actual driver name 4943 */ 4944 const char * 4945 ddi_driver_name(dev_info_t *devi) 4946 { 4947 major_t major; 4948 4949 if ((major = ddi_driver_major(devi)) != DDI_MAJOR_T_NONE) 4950 return (ddi_major_to_name(major)); 4951 4952 return (ddi_node_name(devi)); 4953 } 4954 4955 /* 4956 * i_ddi_set_binding_name: Set binding name. 4957 * 4958 * Set the binding name to the given name. 4959 * This routine is for use by the ddi implementation, not by drivers. 4960 */ 4961 void 4962 i_ddi_set_binding_name(dev_info_t *dip, char *name) 4963 { 4964 DEVI(dip)->devi_binding_name = name; 4965 4966 } 4967 4968 /* 4969 * ddi_get_name: A synonym of ddi_binding_name() ... returns a name 4970 * the implementation has used to bind the node to a driver. 4971 */ 4972 char * 4973 ddi_get_name(dev_info_t *dip) 4974 { 4975 return (DEVI(dip)->devi_binding_name); 4976 } 4977 4978 /* 4979 * ddi_node_name: Return the name property of the devinfo node 4980 * This may differ from ddi_binding_name if the node name 4981 * does not define a binding to a driver (i.e. generic names). 4982 */ 4983 char * 4984 ddi_node_name(dev_info_t *dip) 4985 { 4986 return (DEVI(dip)->devi_node_name); 4987 } 4988 4989 4990 /* 4991 * ddi_get_nodeid: Get nodeid stored in dev_info structure. 4992 */ 4993 int 4994 ddi_get_nodeid(dev_info_t *dip) 4995 { 4996 return (DEVI(dip)->devi_nodeid); 4997 } 4998 4999 int 5000 ddi_get_instance(dev_info_t *dip) 5001 { 5002 return (DEVI(dip)->devi_instance); 5003 } 5004 5005 struct dev_ops * 5006 ddi_get_driver(dev_info_t *dip) 5007 { 5008 return (DEVI(dip)->devi_ops); 5009 } 5010 5011 void 5012 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo) 5013 { 5014 DEVI(dip)->devi_ops = devo; 5015 } 5016 5017 /* 5018 * ddi_set_driver_private/ddi_get_driver_private: 5019 * Get/set device driver private data in devinfo. 5020 */ 5021 void 5022 ddi_set_driver_private(dev_info_t *dip, void *data) 5023 { 5024 DEVI(dip)->devi_driver_data = data; 5025 } 5026 5027 void * 5028 ddi_get_driver_private(dev_info_t *dip) 5029 { 5030 return (DEVI(dip)->devi_driver_data); 5031 } 5032 5033 /* 5034 * ddi_get_parent, ddi_get_child, ddi_get_next_sibling 5035 */ 5036 5037 dev_info_t * 5038 ddi_get_parent(dev_info_t *dip) 5039 { 5040 return ((dev_info_t *)DEVI(dip)->devi_parent); 5041 } 5042 5043 dev_info_t * 5044 ddi_get_child(dev_info_t *dip) 5045 { 5046 return ((dev_info_t *)DEVI(dip)->devi_child); 5047 } 5048 5049 dev_info_t * 5050 ddi_get_next_sibling(dev_info_t *dip) 5051 { 5052 return ((dev_info_t *)DEVI(dip)->devi_sibling); 5053 } 5054 5055 dev_info_t * 5056 ddi_get_next(dev_info_t *dip) 5057 { 5058 return ((dev_info_t *)DEVI(dip)->devi_next); 5059 } 5060 5061 void 5062 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip) 5063 { 5064 DEVI(dip)->devi_next = DEVI(nextdip); 5065 } 5066 5067 /* 5068 * ddi_root_node: Return root node of devinfo tree 5069 */ 5070 5071 dev_info_t * 5072 ddi_root_node(void) 5073 { 5074 extern dev_info_t *top_devinfo; 5075 5076 return (top_devinfo); 5077 } 5078 5079 /* 5080 * Miscellaneous functions: 5081 */ 5082 5083 /* 5084 * Implementation specific hooks 5085 */ 5086 5087 void 5088 ddi_report_dev(dev_info_t *d) 5089 { 5090 char *b; 5091 5092 (void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0); 5093 5094 /* 5095 * If this devinfo node has cb_ops, it's implicitly accessible from 5096 * userland, so we print its full name together with the instance 5097 * number 'abbreviation' that the driver may use internally. 5098 */ 5099 if (DEVI(d)->devi_ops->devo_cb_ops != (struct cb_ops *)0 && 5100 (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) { 5101 cmn_err(CE_CONT, "?%s%d is %s\n", 5102 ddi_driver_name(d), ddi_get_instance(d), 5103 ddi_pathname(d, b)); 5104 kmem_free(b, MAXPATHLEN); 5105 } 5106 } 5107 5108 /* 5109 * ddi_ctlops() is described in the assembler not to buy a new register 5110 * window when it's called and can reduce cost in climbing the device tree 5111 * without using the tail call optimization. 5112 */ 5113 int 5114 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result) 5115 { 5116 int ret; 5117 5118 ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE, 5119 (void *)&rnumber, (void *)result); 5120 5121 return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE); 5122 } 5123 5124 int 5125 ddi_dev_nregs(dev_info_t *dev, int *result) 5126 { 5127 return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result)); 5128 } 5129 5130 int 5131 ddi_dev_is_sid(dev_info_t *d) 5132 { 5133 return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0)); 5134 } 5135 5136 int 5137 ddi_slaveonly(dev_info_t *d) 5138 { 5139 return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0)); 5140 } 5141 5142 int 5143 ddi_dev_affinity(dev_info_t *a, dev_info_t *b) 5144 { 5145 return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0)); 5146 } 5147 5148 int 5149 ddi_streams_driver(dev_info_t *dip) 5150 { 5151 if (i_ddi_devi_attached(dip) && 5152 (DEVI(dip)->devi_ops->devo_cb_ops != NULL) && 5153 (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL)) 5154 return (DDI_SUCCESS); 5155 return (DDI_FAILURE); 5156 } 5157 5158 /* 5159 * callback free list 5160 */ 5161 5162 static int ncallbacks; 5163 static int nc_low = 170; 5164 static int nc_med = 512; 5165 static int nc_high = 2048; 5166 static struct ddi_callback *callbackq; 5167 static struct ddi_callback *callbackqfree; 5168 5169 /* 5170 * set/run callback lists 5171 */ 5172 struct cbstats { 5173 kstat_named_t cb_asked; 5174 kstat_named_t cb_new; 5175 kstat_named_t cb_run; 5176 kstat_named_t cb_delete; 5177 kstat_named_t cb_maxreq; 5178 kstat_named_t cb_maxlist; 5179 kstat_named_t cb_alloc; 5180 kstat_named_t cb_runouts; 5181 kstat_named_t cb_L2; 5182 kstat_named_t cb_grow; 5183 } cbstats = { 5184 {"asked", KSTAT_DATA_UINT32}, 5185 {"new", KSTAT_DATA_UINT32}, 5186 {"run", KSTAT_DATA_UINT32}, 5187 {"delete", KSTAT_DATA_UINT32}, 5188 {"maxreq", KSTAT_DATA_UINT32}, 5189 {"maxlist", KSTAT_DATA_UINT32}, 5190 {"alloc", KSTAT_DATA_UINT32}, 5191 {"runouts", KSTAT_DATA_UINT32}, 5192 {"L2", KSTAT_DATA_UINT32}, 5193 {"grow", KSTAT_DATA_UINT32}, 5194 }; 5195 5196 #define nc_asked cb_asked.value.ui32 5197 #define nc_new cb_new.value.ui32 5198 #define nc_run cb_run.value.ui32 5199 #define nc_delete cb_delete.value.ui32 5200 #define nc_maxreq cb_maxreq.value.ui32 5201 #define nc_maxlist cb_maxlist.value.ui32 5202 #define nc_alloc cb_alloc.value.ui32 5203 #define nc_runouts cb_runouts.value.ui32 5204 #define nc_L2 cb_L2.value.ui32 5205 #define nc_grow cb_grow.value.ui32 5206 5207 static kmutex_t ddi_callback_mutex; 5208 5209 /* 5210 * callbacks are handled using a L1/L2 cache. The L1 cache 5211 * comes out of kmem_cache_alloc and can expand/shrink dynamically. If 5212 * we can't get callbacks from the L1 cache [because pageout is doing 5213 * I/O at the time freemem is 0], we allocate callbacks out of the 5214 * L2 cache. The L2 cache is static and depends on the memory size. 5215 * [We might also count the number of devices at probe time and 5216 * allocate one structure per device and adjust for deferred attach] 5217 */ 5218 void 5219 impl_ddi_callback_init(void) 5220 { 5221 int i; 5222 uint_t physmegs; 5223 kstat_t *ksp; 5224 5225 physmegs = physmem >> (20 - PAGESHIFT); 5226 if (physmegs < 48) { 5227 ncallbacks = nc_low; 5228 } else if (physmegs < 128) { 5229 ncallbacks = nc_med; 5230 } else { 5231 ncallbacks = nc_high; 5232 } 5233 5234 /* 5235 * init free list 5236 */ 5237 callbackq = kmem_zalloc( 5238 ncallbacks * sizeof (struct ddi_callback), KM_SLEEP); 5239 for (i = 0; i < ncallbacks-1; i++) 5240 callbackq[i].c_nfree = &callbackq[i+1]; 5241 callbackqfree = callbackq; 5242 5243 /* init kstats */ 5244 if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED, 5245 sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) { 5246 ksp->ks_data = (void *) &cbstats; 5247 kstat_install(ksp); 5248 } 5249 5250 } 5251 5252 static void 5253 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid, 5254 int count) 5255 { 5256 struct ddi_callback *list, *marker, *new; 5257 size_t size = sizeof (struct ddi_callback); 5258 5259 list = marker = (struct ddi_callback *)*listid; 5260 while (list != NULL) { 5261 if (list->c_call == funcp && list->c_arg == arg) { 5262 list->c_count += count; 5263 return; 5264 } 5265 marker = list; 5266 list = list->c_nlist; 5267 } 5268 new = kmem_alloc(size, KM_NOSLEEP); 5269 if (new == NULL) { 5270 new = callbackqfree; 5271 if (new == NULL) { 5272 new = kmem_alloc_tryhard(sizeof (struct ddi_callback), 5273 &size, KM_NOSLEEP | KM_PANIC); 5274 cbstats.nc_grow++; 5275 } else { 5276 callbackqfree = new->c_nfree; 5277 cbstats.nc_L2++; 5278 } 5279 } 5280 if (marker != NULL) { 5281 marker->c_nlist = new; 5282 } else { 5283 *listid = (uintptr_t)new; 5284 } 5285 new->c_size = size; 5286 new->c_nlist = NULL; 5287 new->c_call = funcp; 5288 new->c_arg = arg; 5289 new->c_count = count; 5290 cbstats.nc_new++; 5291 cbstats.nc_alloc++; 5292 if (cbstats.nc_alloc > cbstats.nc_maxlist) 5293 cbstats.nc_maxlist = cbstats.nc_alloc; 5294 } 5295 5296 void 5297 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid) 5298 { 5299 mutex_enter(&ddi_callback_mutex); 5300 cbstats.nc_asked++; 5301 if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq) 5302 cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run); 5303 (void) callback_insert(funcp, arg, listid, 1); 5304 mutex_exit(&ddi_callback_mutex); 5305 } 5306 5307 static void 5308 real_callback_run(void *Queue) 5309 { 5310 int (*funcp)(caddr_t); 5311 caddr_t arg; 5312 int count, rval; 5313 uintptr_t *listid; 5314 struct ddi_callback *list, *marker; 5315 int check_pending = 1; 5316 int pending = 0; 5317 5318 do { 5319 mutex_enter(&ddi_callback_mutex); 5320 listid = Queue; 5321 list = (struct ddi_callback *)*listid; 5322 if (list == NULL) { 5323 mutex_exit(&ddi_callback_mutex); 5324 return; 5325 } 5326 if (check_pending) { 5327 marker = list; 5328 while (marker != NULL) { 5329 pending += marker->c_count; 5330 marker = marker->c_nlist; 5331 } 5332 check_pending = 0; 5333 } 5334 ASSERT(pending > 0); 5335 ASSERT(list->c_count > 0); 5336 funcp = list->c_call; 5337 arg = list->c_arg; 5338 count = list->c_count; 5339 *(uintptr_t *)Queue = (uintptr_t)list->c_nlist; 5340 if (list >= &callbackq[0] && 5341 list <= &callbackq[ncallbacks-1]) { 5342 list->c_nfree = callbackqfree; 5343 callbackqfree = list; 5344 } else 5345 kmem_free(list, list->c_size); 5346 5347 cbstats.nc_delete++; 5348 cbstats.nc_alloc--; 5349 mutex_exit(&ddi_callback_mutex); 5350 5351 do { 5352 if ((rval = (*funcp)(arg)) == 0) { 5353 pending -= count; 5354 mutex_enter(&ddi_callback_mutex); 5355 (void) callback_insert(funcp, arg, listid, 5356 count); 5357 cbstats.nc_runouts++; 5358 } else { 5359 pending--; 5360 mutex_enter(&ddi_callback_mutex); 5361 cbstats.nc_run++; 5362 } 5363 mutex_exit(&ddi_callback_mutex); 5364 } while (rval != 0 && (--count > 0)); 5365 } while (pending > 0); 5366 } 5367 5368 void 5369 ddi_run_callback(uintptr_t *listid) 5370 { 5371 softcall(real_callback_run, listid); 5372 } 5373 5374 /* 5375 * ddi_periodic_t 5376 * ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, 5377 * int level) 5378 * 5379 * INTERFACE LEVEL 5380 * Solaris DDI specific (Solaris DDI) 5381 * 5382 * PARAMETERS 5383 * func: the callback function 5384 * 5385 * The callback function will be invoked. The function is invoked 5386 * in kernel context if the argument level passed is the zero. 5387 * Otherwise it's invoked in interrupt context at the specified 5388 * level. 5389 * 5390 * arg: the argument passed to the callback function 5391 * 5392 * interval: interval time 5393 * 5394 * level : callback interrupt level 5395 * 5396 * If the value is the zero, the callback function is invoked 5397 * in kernel context. If the value is more than the zero, but 5398 * less than or equal to ten, the callback function is invoked in 5399 * interrupt context at the specified interrupt level, which may 5400 * be used for real time applications. 5401 * 5402 * This value must be in range of 0-10, which can be a numeric 5403 * number or a pre-defined macro (DDI_IPL_0, ... , DDI_IPL_10). 5404 * 5405 * DESCRIPTION 5406 * ddi_periodic_add(9F) schedules the specified function to be 5407 * periodically invoked in the interval time. 5408 * 5409 * As well as timeout(9F), the exact time interval over which the function 5410 * takes effect cannot be guaranteed, but the value given is a close 5411 * approximation. 5412 * 5413 * Drivers waiting on behalf of processes with real-time constraints must 5414 * pass non-zero value with the level argument to ddi_periodic_add(9F). 5415 * 5416 * RETURN VALUES 5417 * ddi_periodic_add(9F) returns a non-zero opaque value (ddi_periodic_t), 5418 * which must be used for ddi_periodic_delete(9F) to specify the request. 5419 * 5420 * CONTEXT 5421 * ddi_periodic_add(9F) can be called in user or kernel context, but 5422 * it cannot be called in interrupt context, which is different from 5423 * timeout(9F). 5424 */ 5425 ddi_periodic_t 5426 ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, int level) 5427 { 5428 /* 5429 * Sanity check of the argument level. 5430 */ 5431 if (level < DDI_IPL_0 || level > DDI_IPL_10) 5432 cmn_err(CE_PANIC, 5433 "ddi_periodic_add: invalid interrupt level (%d).", level); 5434 5435 /* 5436 * Sanity check of the context. ddi_periodic_add() cannot be 5437 * called in either interrupt context or high interrupt context. 5438 */ 5439 if (servicing_interrupt()) 5440 cmn_err(CE_PANIC, 5441 "ddi_periodic_add: called in (high) interrupt context."); 5442 5443 return ((ddi_periodic_t)i_timeout(func, arg, interval, level)); 5444 } 5445 5446 /* 5447 * void 5448 * ddi_periodic_delete(ddi_periodic_t req) 5449 * 5450 * INTERFACE LEVEL 5451 * Solaris DDI specific (Solaris DDI) 5452 * 5453 * PARAMETERS 5454 * req: ddi_periodic_t opaque value ddi_periodic_add(9F) returned 5455 * previously. 5456 * 5457 * DESCRIPTION 5458 * ddi_periodic_delete(9F) cancels the ddi_periodic_add(9F) request 5459 * previously requested. 5460 * 5461 * ddi_periodic_delete(9F) will not return until the pending request 5462 * is canceled or executed. 5463 * 5464 * As well as untimeout(9F), calling ddi_periodic_delete(9F) for a 5465 * timeout which is either running on another CPU, or has already 5466 * completed causes no problems. However, unlike untimeout(9F), there is 5467 * no restrictions on the lock which might be held across the call to 5468 * ddi_periodic_delete(9F). 5469 * 5470 * Drivers should be structured with the understanding that the arrival of 5471 * both an interrupt and a timeout for that interrupt can occasionally 5472 * occur, in either order. 5473 * 5474 * CONTEXT 5475 * ddi_periodic_delete(9F) can be called in user or kernel context, but 5476 * it cannot be called in interrupt context, which is different from 5477 * untimeout(9F). 5478 */ 5479 void 5480 ddi_periodic_delete(ddi_periodic_t req) 5481 { 5482 /* 5483 * Sanity check of the context. ddi_periodic_delete() cannot be 5484 * called in either interrupt context or high interrupt context. 5485 */ 5486 if (servicing_interrupt()) 5487 cmn_err(CE_PANIC, 5488 "ddi_periodic_delete: called in (high) interrupt context."); 5489 5490 i_untimeout((timeout_t)req); 5491 } 5492 5493 dev_info_t * 5494 nodevinfo(dev_t dev, int otyp) 5495 { 5496 _NOTE(ARGUNUSED(dev, otyp)) 5497 return ((dev_info_t *)0); 5498 } 5499 5500 /* 5501 * A driver should support its own getinfo(9E) entry point. This function 5502 * is provided as a convenience for ON drivers that don't expect their 5503 * getinfo(9E) entry point to be called. A driver that uses this must not 5504 * call ddi_create_minor_node. 5505 */ 5506 int 5507 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 5508 { 5509 _NOTE(ARGUNUSED(dip, infocmd, arg, result)) 5510 return (DDI_FAILURE); 5511 } 5512 5513 /* 5514 * A driver should support its own getinfo(9E) entry point. This function 5515 * is provided as a convenience for ON drivers that where the minor number 5516 * is the instance. Drivers that do not have 1:1 mapping must implement 5517 * their own getinfo(9E) function. 5518 */ 5519 int 5520 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd, 5521 void *arg, void **result) 5522 { 5523 _NOTE(ARGUNUSED(dip)) 5524 int instance; 5525 5526 if (infocmd != DDI_INFO_DEVT2INSTANCE) 5527 return (DDI_FAILURE); 5528 5529 instance = getminor((dev_t)(uintptr_t)arg); 5530 *result = (void *)(uintptr_t)instance; 5531 return (DDI_SUCCESS); 5532 } 5533 5534 int 5535 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd) 5536 { 5537 _NOTE(ARGUNUSED(devi, cmd)) 5538 return (DDI_FAILURE); 5539 } 5540 5541 int 5542 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip, 5543 struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep) 5544 { 5545 _NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep)) 5546 return (DDI_DMA_NOMAPPING); 5547 } 5548 5549 int 5550 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr, 5551 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 5552 { 5553 _NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep)) 5554 return (DDI_DMA_BADATTR); 5555 } 5556 5557 int 5558 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, 5559 ddi_dma_handle_t handle) 5560 { 5561 _NOTE(ARGUNUSED(dip, rdip, handle)) 5562 return (DDI_FAILURE); 5563 } 5564 5565 int 5566 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 5567 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 5568 ddi_dma_cookie_t *cp, uint_t *ccountp) 5569 { 5570 _NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp)) 5571 return (DDI_DMA_NOMAPPING); 5572 } 5573 5574 int 5575 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 5576 ddi_dma_handle_t handle) 5577 { 5578 _NOTE(ARGUNUSED(dip, rdip, handle)) 5579 return (DDI_FAILURE); 5580 } 5581 5582 int 5583 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip, 5584 ddi_dma_handle_t handle, off_t off, size_t len, 5585 uint_t cache_flags) 5586 { 5587 _NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags)) 5588 return (DDI_FAILURE); 5589 } 5590 5591 int 5592 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip, 5593 ddi_dma_handle_t handle, uint_t win, off_t *offp, 5594 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 5595 { 5596 _NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp)) 5597 return (DDI_FAILURE); 5598 } 5599 5600 int 5601 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip, 5602 ddi_dma_handle_t handle, enum ddi_dma_ctlops request, 5603 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags) 5604 { 5605 _NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags)) 5606 return (DDI_FAILURE); 5607 } 5608 5609 void 5610 ddivoid(void) 5611 {} 5612 5613 int 5614 nochpoll(dev_t dev, short events, int anyyet, short *reventsp, 5615 struct pollhead **pollhdrp) 5616 { 5617 _NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp)) 5618 return (ENXIO); 5619 } 5620 5621 cred_t * 5622 ddi_get_cred(void) 5623 { 5624 return (CRED()); 5625 } 5626 5627 clock_t 5628 ddi_get_lbolt(void) 5629 { 5630 return (lbolt); 5631 } 5632 5633 time_t 5634 ddi_get_time(void) 5635 { 5636 time_t now; 5637 5638 if ((now = gethrestime_sec()) == 0) { 5639 timestruc_t ts; 5640 mutex_enter(&tod_lock); 5641 ts = tod_get(); 5642 mutex_exit(&tod_lock); 5643 return (ts.tv_sec); 5644 } else { 5645 return (now); 5646 } 5647 } 5648 5649 pid_t 5650 ddi_get_pid(void) 5651 { 5652 return (ttoproc(curthread)->p_pid); 5653 } 5654 5655 kt_did_t 5656 ddi_get_kt_did(void) 5657 { 5658 return (curthread->t_did); 5659 } 5660 5661 /* 5662 * This function returns B_TRUE if the caller can reasonably expect that a call 5663 * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened 5664 * by user-level signal. If it returns B_FALSE, then the caller should use 5665 * other means to make certain that the wait will not hang "forever." 5666 * 5667 * It does not check the signal mask, nor for reception of any particular 5668 * signal. 5669 * 5670 * Currently, a thread can receive a signal if it's not a kernel thread and it 5671 * is not in the middle of exit(2) tear-down. Threads that are in that 5672 * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to 5673 * cv_timedwait, and qwait_sig to qwait. 5674 */ 5675 boolean_t 5676 ddi_can_receive_sig(void) 5677 { 5678 proc_t *pp; 5679 5680 if (curthread->t_proc_flag & TP_LWPEXIT) 5681 return (B_FALSE); 5682 if ((pp = ttoproc(curthread)) == NULL) 5683 return (B_FALSE); 5684 return (pp->p_as != &kas); 5685 } 5686 5687 /* 5688 * Swap bytes in 16-bit [half-]words 5689 */ 5690 void 5691 swab(void *src, void *dst, size_t nbytes) 5692 { 5693 uchar_t *pf = (uchar_t *)src; 5694 uchar_t *pt = (uchar_t *)dst; 5695 uchar_t tmp; 5696 int nshorts; 5697 5698 nshorts = nbytes >> 1; 5699 5700 while (--nshorts >= 0) { 5701 tmp = *pf++; 5702 *pt++ = *pf++; 5703 *pt++ = tmp; 5704 } 5705 } 5706 5707 static void 5708 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp) 5709 { 5710 int circ; 5711 struct ddi_minor_data *dp; 5712 5713 ndi_devi_enter(ddip, &circ); 5714 if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) { 5715 DEVI(ddip)->devi_minor = dmdp; 5716 } else { 5717 while (dp->next != (struct ddi_minor_data *)NULL) 5718 dp = dp->next; 5719 dp->next = dmdp; 5720 } 5721 ndi_devi_exit(ddip, circ); 5722 } 5723 5724 /* 5725 * Part of the obsolete SunCluster DDI Hooks. 5726 * Keep for binary compatibility 5727 */ 5728 minor_t 5729 ddi_getiminor(dev_t dev) 5730 { 5731 return (getminor(dev)); 5732 } 5733 5734 static int 5735 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name) 5736 { 5737 int se_flag; 5738 int kmem_flag; 5739 int se_err; 5740 char *pathname, *class_name; 5741 sysevent_t *ev = NULL; 5742 sysevent_id_t eid; 5743 sysevent_value_t se_val; 5744 sysevent_attr_list_t *ev_attr_list = NULL; 5745 5746 /* determine interrupt context */ 5747 se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP; 5748 kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP; 5749 5750 i_ddi_di_cache_invalidate(kmem_flag); 5751 5752 #ifdef DEBUG 5753 if ((se_flag == SE_NOSLEEP) && sunddi_debug) { 5754 cmn_err(CE_CONT, "ddi_create_minor_node: called from " 5755 "interrupt level by driver %s", 5756 ddi_driver_name(dip)); 5757 } 5758 #endif /* DEBUG */ 5759 5760 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag); 5761 if (ev == NULL) { 5762 goto fail; 5763 } 5764 5765 pathname = kmem_alloc(MAXPATHLEN, kmem_flag); 5766 if (pathname == NULL) { 5767 sysevent_free(ev); 5768 goto fail; 5769 } 5770 5771 (void) ddi_pathname(dip, pathname); 5772 ASSERT(strlen(pathname)); 5773 se_val.value_type = SE_DATA_TYPE_STRING; 5774 se_val.value.sv_string = pathname; 5775 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME, 5776 &se_val, se_flag) != 0) { 5777 kmem_free(pathname, MAXPATHLEN); 5778 sysevent_free(ev); 5779 goto fail; 5780 } 5781 kmem_free(pathname, MAXPATHLEN); 5782 5783 /* add the device class attribute */ 5784 if ((class_name = i_ddi_devi_class(dip)) != NULL) { 5785 se_val.value_type = SE_DATA_TYPE_STRING; 5786 se_val.value.sv_string = class_name; 5787 if (sysevent_add_attr(&ev_attr_list, 5788 DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) { 5789 sysevent_free_attr(ev_attr_list); 5790 goto fail; 5791 } 5792 } 5793 5794 /* 5795 * allow for NULL minor names 5796 */ 5797 if (minor_name != NULL) { 5798 se_val.value.sv_string = minor_name; 5799 if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME, 5800 &se_val, se_flag) != 0) { 5801 sysevent_free_attr(ev_attr_list); 5802 sysevent_free(ev); 5803 goto fail; 5804 } 5805 } 5806 5807 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) { 5808 sysevent_free_attr(ev_attr_list); 5809 sysevent_free(ev); 5810 goto fail; 5811 } 5812 5813 if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) { 5814 if (se_err == SE_NO_TRANSPORT) { 5815 cmn_err(CE_WARN, "/devices or /dev may not be current " 5816 "for driver %s (%s). Run devfsadm -i %s", 5817 ddi_driver_name(dip), "syseventd not responding", 5818 ddi_driver_name(dip)); 5819 } else { 5820 sysevent_free(ev); 5821 goto fail; 5822 } 5823 } 5824 5825 sysevent_free(ev); 5826 return (DDI_SUCCESS); 5827 fail: 5828 cmn_err(CE_WARN, "/devices or /dev may not be current " 5829 "for driver %s. Run devfsadm -i %s", 5830 ddi_driver_name(dip), ddi_driver_name(dip)); 5831 return (DDI_SUCCESS); 5832 } 5833 5834 /* 5835 * failing to remove a minor node is not of interest 5836 * therefore we do not generate an error message 5837 */ 5838 static int 5839 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name) 5840 { 5841 char *pathname, *class_name; 5842 sysevent_t *ev; 5843 sysevent_id_t eid; 5844 sysevent_value_t se_val; 5845 sysevent_attr_list_t *ev_attr_list = NULL; 5846 5847 /* 5848 * only log ddi_remove_minor_node() calls outside the scope 5849 * of attach/detach reconfigurations and when the dip is 5850 * still initialized. 5851 */ 5852 if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) || 5853 (i_ddi_node_state(dip) < DS_INITIALIZED)) { 5854 return (DDI_SUCCESS); 5855 } 5856 5857 i_ddi_di_cache_invalidate(KM_SLEEP); 5858 5859 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP); 5860 if (ev == NULL) { 5861 return (DDI_SUCCESS); 5862 } 5863 5864 pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 5865 if (pathname == NULL) { 5866 sysevent_free(ev); 5867 return (DDI_SUCCESS); 5868 } 5869 5870 (void) ddi_pathname(dip, pathname); 5871 ASSERT(strlen(pathname)); 5872 se_val.value_type = SE_DATA_TYPE_STRING; 5873 se_val.value.sv_string = pathname; 5874 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME, 5875 &se_val, SE_SLEEP) != 0) { 5876 kmem_free(pathname, MAXPATHLEN); 5877 sysevent_free(ev); 5878 return (DDI_SUCCESS); 5879 } 5880 5881 kmem_free(pathname, MAXPATHLEN); 5882 5883 /* 5884 * allow for NULL minor names 5885 */ 5886 if (minor_name != NULL) { 5887 se_val.value.sv_string = minor_name; 5888 if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME, 5889 &se_val, SE_SLEEP) != 0) { 5890 sysevent_free_attr(ev_attr_list); 5891 goto fail; 5892 } 5893 } 5894 5895 if ((class_name = i_ddi_devi_class(dip)) != NULL) { 5896 /* add the device class, driver name and instance attributes */ 5897 5898 se_val.value_type = SE_DATA_TYPE_STRING; 5899 se_val.value.sv_string = class_name; 5900 if (sysevent_add_attr(&ev_attr_list, 5901 DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) { 5902 sysevent_free_attr(ev_attr_list); 5903 goto fail; 5904 } 5905 5906 se_val.value_type = SE_DATA_TYPE_STRING; 5907 se_val.value.sv_string = (char *)ddi_driver_name(dip); 5908 if (sysevent_add_attr(&ev_attr_list, 5909 DEVFS_DRIVER_NAME, &se_val, SE_SLEEP) != 0) { 5910 sysevent_free_attr(ev_attr_list); 5911 goto fail; 5912 } 5913 5914 se_val.value_type = SE_DATA_TYPE_INT32; 5915 se_val.value.sv_int32 = ddi_get_instance(dip); 5916 if (sysevent_add_attr(&ev_attr_list, 5917 DEVFS_INSTANCE, &se_val, SE_SLEEP) != 0) { 5918 sysevent_free_attr(ev_attr_list); 5919 goto fail; 5920 } 5921 5922 } 5923 5924 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) { 5925 sysevent_free_attr(ev_attr_list); 5926 } else { 5927 (void) log_sysevent(ev, SE_SLEEP, &eid); 5928 } 5929 fail: 5930 sysevent_free(ev); 5931 return (DDI_SUCCESS); 5932 } 5933 5934 /* 5935 * Derive the device class of the node. 5936 * Device class names aren't defined yet. Until this is done we use 5937 * devfs event subclass names as device class names. 5938 */ 5939 static int 5940 derive_devi_class(dev_info_t *dip, char *node_type, int flag) 5941 { 5942 int rv = DDI_SUCCESS; 5943 5944 if (i_ddi_devi_class(dip) == NULL) { 5945 if (strncmp(node_type, DDI_NT_BLOCK, 5946 sizeof (DDI_NT_BLOCK) - 1) == 0 && 5947 (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' || 5948 node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') && 5949 strcmp(node_type, DDI_NT_FD) != 0) { 5950 5951 rv = i_ddi_set_devi_class(dip, ESC_DISK, flag); 5952 5953 } else if (strncmp(node_type, DDI_NT_NET, 5954 sizeof (DDI_NT_NET) - 1) == 0 && 5955 (node_type[sizeof (DDI_NT_NET) - 1] == '\0' || 5956 node_type[sizeof (DDI_NT_NET) - 1] == ':')) { 5957 5958 rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag); 5959 5960 } else if (strncmp(node_type, DDI_NT_PRINTER, 5961 sizeof (DDI_NT_PRINTER) - 1) == 0 && 5962 (node_type[sizeof (DDI_NT_PRINTER) - 1] == '\0' || 5963 node_type[sizeof (DDI_NT_PRINTER) - 1] == ':')) { 5964 5965 rv = i_ddi_set_devi_class(dip, ESC_PRINTER, flag); 5966 5967 } else if (strncmp(node_type, DDI_PSEUDO, 5968 sizeof (DDI_PSEUDO) -1) == 0 && 5969 (strncmp(ESC_LOFI, ddi_node_name(dip), 5970 sizeof (ESC_LOFI) -1) == 0)) { 5971 rv = i_ddi_set_devi_class(dip, ESC_LOFI, flag); 5972 } 5973 } 5974 5975 return (rv); 5976 } 5977 5978 /* 5979 * Check compliance with PSARC 2003/375: 5980 * 5981 * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not 5982 * exceed IFNAMSIZ (16) characters in length. 5983 */ 5984 static boolean_t 5985 verify_name(char *name) 5986 { 5987 size_t len = strlen(name); 5988 char *cp; 5989 5990 if (len == 0 || len > IFNAMSIZ) 5991 return (B_FALSE); 5992 5993 for (cp = name; *cp != '\0'; cp++) { 5994 if (!isalnum(*cp) && *cp != '_') 5995 return (B_FALSE); 5996 } 5997 5998 return (B_TRUE); 5999 } 6000 6001 /* 6002 * ddi_create_minor_common: Create a ddi_minor_data structure and 6003 * attach it to the given devinfo node. 6004 */ 6005 6006 int 6007 ddi_create_minor_common(dev_info_t *dip, char *name, int spec_type, 6008 minor_t minor_num, char *node_type, int flag, ddi_minor_type mtype, 6009 const char *read_priv, const char *write_priv, mode_t priv_mode) 6010 { 6011 struct ddi_minor_data *dmdp; 6012 major_t major; 6013 6014 if (spec_type != S_IFCHR && spec_type != S_IFBLK) 6015 return (DDI_FAILURE); 6016 6017 if (name == NULL) 6018 return (DDI_FAILURE); 6019 6020 /* 6021 * Log a message if the minor number the driver is creating 6022 * is not expressible on the on-disk filesystem (currently 6023 * this is limited to 18 bits both by UFS). The device can 6024 * be opened via devfs, but not by device special files created 6025 * via mknod(). 6026 */ 6027 if (minor_num > L_MAXMIN32) { 6028 cmn_err(CE_WARN, 6029 "%s%d:%s minor 0x%x too big for 32-bit applications", 6030 ddi_driver_name(dip), ddi_get_instance(dip), 6031 name, minor_num); 6032 return (DDI_FAILURE); 6033 } 6034 6035 /* dip must be bound and attached */ 6036 major = ddi_driver_major(dip); 6037 ASSERT(major != DDI_MAJOR_T_NONE); 6038 6039 /* 6040 * Default node_type to DDI_PSEUDO and issue notice in debug mode 6041 */ 6042 if (node_type == NULL) { 6043 node_type = DDI_PSEUDO; 6044 NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d " 6045 " minor node %s; default to DDI_PSEUDO", 6046 ddi_driver_name(dip), ddi_get_instance(dip), name)); 6047 } 6048 6049 /* 6050 * If the driver is a network driver, ensure that the name falls within 6051 * the interface naming constraints specified by PSARC/2003/375. 6052 */ 6053 if (strcmp(node_type, DDI_NT_NET) == 0) { 6054 if (!verify_name(name)) 6055 return (DDI_FAILURE); 6056 6057 if (mtype == DDM_MINOR) { 6058 struct devnames *dnp = &devnamesp[major]; 6059 6060 /* Mark driver as a network driver */ 6061 LOCK_DEV_OPS(&dnp->dn_lock); 6062 dnp->dn_flags |= DN_NETWORK_DRIVER; 6063 UNLOCK_DEV_OPS(&dnp->dn_lock); 6064 } 6065 } 6066 6067 if (mtype == DDM_MINOR) { 6068 if (derive_devi_class(dip, node_type, KM_NOSLEEP) != 6069 DDI_SUCCESS) 6070 return (DDI_FAILURE); 6071 } 6072 6073 /* 6074 * Take care of minor number information for the node. 6075 */ 6076 6077 if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data), 6078 KM_NOSLEEP)) == NULL) { 6079 return (DDI_FAILURE); 6080 } 6081 if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) { 6082 kmem_free(dmdp, sizeof (struct ddi_minor_data)); 6083 return (DDI_FAILURE); 6084 } 6085 dmdp->dip = dip; 6086 dmdp->ddm_dev = makedevice(major, minor_num); 6087 dmdp->ddm_spec_type = spec_type; 6088 dmdp->ddm_node_type = node_type; 6089 dmdp->type = mtype; 6090 if (flag & CLONE_DEV) { 6091 dmdp->type = DDM_ALIAS; 6092 dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major); 6093 } 6094 if (flag & PRIVONLY_DEV) { 6095 dmdp->ddm_flags |= DM_NO_FSPERM; 6096 } 6097 if (read_priv || write_priv) { 6098 dmdp->ddm_node_priv = 6099 devpolicy_priv_by_name(read_priv, write_priv); 6100 } 6101 dmdp->ddm_priv_mode = priv_mode; 6102 6103 ddi_append_minor_node(dip, dmdp); 6104 6105 /* 6106 * only log ddi_create_minor_node() calls which occur 6107 * outside the scope of attach(9e)/detach(9e) reconfigurations 6108 */ 6109 if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) && 6110 mtype != DDM_INTERNAL_PATH) { 6111 (void) i_log_devfs_minor_create(dip, name); 6112 } 6113 6114 /* 6115 * Check if any dacf rules match the creation of this minor node 6116 */ 6117 dacfc_match_create_minor(name, node_type, dip, dmdp, flag); 6118 return (DDI_SUCCESS); 6119 } 6120 6121 int 6122 ddi_create_minor_node(dev_info_t *dip, char *name, int spec_type, 6123 minor_t minor_num, char *node_type, int flag) 6124 { 6125 return (ddi_create_minor_common(dip, name, spec_type, minor_num, 6126 node_type, flag, DDM_MINOR, NULL, NULL, 0)); 6127 } 6128 6129 int 6130 ddi_create_priv_minor_node(dev_info_t *dip, char *name, int spec_type, 6131 minor_t minor_num, char *node_type, int flag, 6132 const char *rdpriv, const char *wrpriv, mode_t priv_mode) 6133 { 6134 return (ddi_create_minor_common(dip, name, spec_type, minor_num, 6135 node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode)); 6136 } 6137 6138 int 6139 ddi_create_default_minor_node(dev_info_t *dip, char *name, int spec_type, 6140 minor_t minor_num, char *node_type, int flag) 6141 { 6142 return (ddi_create_minor_common(dip, name, spec_type, minor_num, 6143 node_type, flag, DDM_DEFAULT, NULL, NULL, 0)); 6144 } 6145 6146 /* 6147 * Internal (non-ddi) routine for drivers to export names known 6148 * to the kernel (especially ddi_pathname_to_dev_t and friends) 6149 * but not exported externally to /dev 6150 */ 6151 int 6152 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type, 6153 minor_t minor_num) 6154 { 6155 return (ddi_create_minor_common(dip, name, spec_type, minor_num, 6156 "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0)); 6157 } 6158 6159 void 6160 ddi_remove_minor_node(dev_info_t *dip, char *name) 6161 { 6162 int circ; 6163 struct ddi_minor_data *dmdp, *dmdp1; 6164 struct ddi_minor_data **dmdp_prev; 6165 6166 ndi_devi_enter(dip, &circ); 6167 dmdp_prev = &DEVI(dip)->devi_minor; 6168 dmdp = DEVI(dip)->devi_minor; 6169 while (dmdp != NULL) { 6170 dmdp1 = dmdp->next; 6171 if ((name == NULL || (dmdp->ddm_name != NULL && 6172 strcmp(name, dmdp->ddm_name) == 0))) { 6173 if (dmdp->ddm_name != NULL) { 6174 if (dmdp->type != DDM_INTERNAL_PATH) 6175 (void) i_log_devfs_minor_remove(dip, 6176 dmdp->ddm_name); 6177 kmem_free(dmdp->ddm_name, 6178 strlen(dmdp->ddm_name) + 1); 6179 } 6180 /* 6181 * Release device privilege, if any. 6182 * Release dacf client data associated with this minor 6183 * node by storing NULL. 6184 */ 6185 if (dmdp->ddm_node_priv) 6186 dpfree(dmdp->ddm_node_priv); 6187 dacf_store_info((dacf_infohdl_t)dmdp, NULL); 6188 kmem_free(dmdp, sizeof (struct ddi_minor_data)); 6189 *dmdp_prev = dmdp1; 6190 /* 6191 * OK, we found it, so get out now -- if we drive on, 6192 * we will strcmp against garbage. See 1139209. 6193 */ 6194 if (name != NULL) 6195 break; 6196 } else { 6197 dmdp_prev = &dmdp->next; 6198 } 6199 dmdp = dmdp1; 6200 } 6201 ndi_devi_exit(dip, circ); 6202 } 6203 6204 6205 int 6206 ddi_in_panic() 6207 { 6208 return (panicstr != NULL); 6209 } 6210 6211 6212 /* 6213 * Find first bit set in a mask (returned counting from 1 up) 6214 */ 6215 6216 int 6217 ddi_ffs(long mask) 6218 { 6219 return (ffs(mask)); 6220 } 6221 6222 /* 6223 * Find last bit set. Take mask and clear 6224 * all but the most significant bit, and 6225 * then let ffs do the rest of the work. 6226 * 6227 * Algorithm courtesy of Steve Chessin. 6228 */ 6229 6230 int 6231 ddi_fls(long mask) 6232 { 6233 while (mask) { 6234 long nx; 6235 6236 if ((nx = (mask & (mask - 1))) == 0) 6237 break; 6238 mask = nx; 6239 } 6240 return (ffs(mask)); 6241 } 6242 6243 /* 6244 * The next five routines comprise generic storage management utilities 6245 * for driver soft state structures (in "the old days," this was done 6246 * with a statically sized array - big systems and dynamic loading 6247 * and unloading make heap allocation more attractive) 6248 */ 6249 6250 /* 6251 * Allocate a set of pointers to 'n_items' objects of size 'size' 6252 * bytes. Each pointer is initialized to nil. 6253 * 6254 * The 'size' and 'n_items' values are stashed in the opaque 6255 * handle returned to the caller. 6256 * 6257 * This implementation interprets 'set of pointers' to mean 'array 6258 * of pointers' but note that nothing in the interface definition 6259 * precludes an implementation that uses, for example, a linked list. 6260 * However there should be a small efficiency gain from using an array 6261 * at lookup time. 6262 * 6263 * NOTE As an optimization, we make our growable array allocations in 6264 * powers of two (bytes), since that's how much kmem_alloc (currently) 6265 * gives us anyway. It should save us some free/realloc's .. 6266 * 6267 * As a further optimization, we make the growable array start out 6268 * with MIN_N_ITEMS in it. 6269 */ 6270 6271 #define MIN_N_ITEMS 8 /* 8 void *'s == 32 bytes */ 6272 6273 int 6274 ddi_soft_state_init(void **state_p, size_t size, size_t n_items) 6275 { 6276 struct i_ddi_soft_state *ss; 6277 6278 if (state_p == NULL || *state_p != NULL || size == 0) 6279 return (EINVAL); 6280 6281 ss = kmem_zalloc(sizeof (*ss), KM_SLEEP); 6282 mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL); 6283 ss->size = size; 6284 6285 if (n_items < MIN_N_ITEMS) 6286 ss->n_items = MIN_N_ITEMS; 6287 else { 6288 int bitlog; 6289 6290 if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items)) 6291 bitlog--; 6292 ss->n_items = 1 << bitlog; 6293 } 6294 6295 ASSERT(ss->n_items >= n_items); 6296 6297 ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP); 6298 6299 *state_p = ss; 6300 6301 return (0); 6302 } 6303 6304 6305 /* 6306 * Allocate a state structure of size 'size' to be associated 6307 * with item 'item'. 6308 * 6309 * In this implementation, the array is extended to 6310 * allow the requested offset, if needed. 6311 */ 6312 int 6313 ddi_soft_state_zalloc(void *state, int item) 6314 { 6315 struct i_ddi_soft_state *ss; 6316 void **array; 6317 void *new_element; 6318 6319 if ((ss = state) == NULL || item < 0) 6320 return (DDI_FAILURE); 6321 6322 mutex_enter(&ss->lock); 6323 if (ss->size == 0) { 6324 mutex_exit(&ss->lock); 6325 cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s", 6326 mod_containing_pc(caller())); 6327 return (DDI_FAILURE); 6328 } 6329 6330 array = ss->array; /* NULL if ss->n_items == 0 */ 6331 ASSERT(ss->n_items != 0 && array != NULL); 6332 6333 /* 6334 * refuse to tread on an existing element 6335 */ 6336 if (item < ss->n_items && array[item] != NULL) { 6337 mutex_exit(&ss->lock); 6338 return (DDI_FAILURE); 6339 } 6340 6341 /* 6342 * Allocate a new element to plug in 6343 */ 6344 new_element = kmem_zalloc(ss->size, KM_SLEEP); 6345 6346 /* 6347 * Check if the array is big enough, if not, grow it. 6348 */ 6349 if (item >= ss->n_items) { 6350 void **new_array; 6351 size_t new_n_items; 6352 struct i_ddi_soft_state *dirty; 6353 6354 /* 6355 * Allocate a new array of the right length, copy 6356 * all the old pointers to the new array, then 6357 * if it exists at all, put the old array on the 6358 * dirty list. 6359 * 6360 * Note that we can't kmem_free() the old array. 6361 * 6362 * Why -- well the 'get' operation is 'mutex-free', so we 6363 * can't easily catch a suspended thread that is just about 6364 * to dereference the array we just grew out of. So we 6365 * cons up a header and put it on a list of 'dirty' 6366 * pointer arrays. (Dirty in the sense that there may 6367 * be suspended threads somewhere that are in the middle 6368 * of referencing them). Fortunately, we -can- garbage 6369 * collect it all at ddi_soft_state_fini time. 6370 */ 6371 new_n_items = ss->n_items; 6372 while (new_n_items < (1 + item)) 6373 new_n_items <<= 1; /* double array size .. */ 6374 6375 ASSERT(new_n_items >= (1 + item)); /* sanity check! */ 6376 6377 new_array = kmem_zalloc(new_n_items * sizeof (void *), 6378 KM_SLEEP); 6379 /* 6380 * Copy the pointers into the new array 6381 */ 6382 bcopy(array, new_array, ss->n_items * sizeof (void *)); 6383 6384 /* 6385 * Save the old array on the dirty list 6386 */ 6387 dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP); 6388 dirty->array = ss->array; 6389 dirty->n_items = ss->n_items; 6390 dirty->next = ss->next; 6391 ss->next = dirty; 6392 6393 ss->array = (array = new_array); 6394 ss->n_items = new_n_items; 6395 } 6396 6397 ASSERT(array != NULL && item < ss->n_items && array[item] == NULL); 6398 6399 array[item] = new_element; 6400 6401 mutex_exit(&ss->lock); 6402 return (DDI_SUCCESS); 6403 } 6404 6405 6406 /* 6407 * Fetch a pointer to the allocated soft state structure. 6408 * 6409 * This is designed to be cheap. 6410 * 6411 * There's an argument that there should be more checking for 6412 * nil pointers and out of bounds on the array.. but we do a lot 6413 * of that in the alloc/free routines. 6414 * 6415 * An array has the convenience that we don't need to lock read-access 6416 * to it c.f. a linked list. However our "expanding array" strategy 6417 * means that we should hold a readers lock on the i_ddi_soft_state 6418 * structure. 6419 * 6420 * However, from a performance viewpoint, we need to do it without 6421 * any locks at all -- this also makes it a leaf routine. The algorithm 6422 * is 'lock-free' because we only discard the pointer arrays at 6423 * ddi_soft_state_fini() time. 6424 */ 6425 void * 6426 ddi_get_soft_state(void *state, int item) 6427 { 6428 struct i_ddi_soft_state *ss = state; 6429 6430 ASSERT(ss != NULL && item >= 0); 6431 6432 if (item < ss->n_items && ss->array != NULL) 6433 return (ss->array[item]); 6434 return (NULL); 6435 } 6436 6437 /* 6438 * Free the state structure corresponding to 'item.' Freeing an 6439 * element that has either gone or was never allocated is not 6440 * considered an error. Note that we free the state structure, but 6441 * we don't shrink our pointer array, or discard 'dirty' arrays, 6442 * since even a few pointers don't really waste too much memory. 6443 * 6444 * Passing an item number that is out of bounds, or a null pointer will 6445 * provoke an error message. 6446 */ 6447 void 6448 ddi_soft_state_free(void *state, int item) 6449 { 6450 struct i_ddi_soft_state *ss; 6451 void **array; 6452 void *element; 6453 static char msg[] = "ddi_soft_state_free:"; 6454 6455 if ((ss = state) == NULL) { 6456 cmn_err(CE_WARN, "%s null handle: %s", 6457 msg, mod_containing_pc(caller())); 6458 return; 6459 } 6460 6461 element = NULL; 6462 6463 mutex_enter(&ss->lock); 6464 6465 if ((array = ss->array) == NULL || ss->size == 0) { 6466 cmn_err(CE_WARN, "%s bad handle: %s", 6467 msg, mod_containing_pc(caller())); 6468 } else if (item < 0 || item >= ss->n_items) { 6469 cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s", 6470 msg, item, ss->n_items - 1, mod_containing_pc(caller())); 6471 } else if (array[item] != NULL) { 6472 element = array[item]; 6473 array[item] = NULL; 6474 } 6475 6476 mutex_exit(&ss->lock); 6477 6478 if (element) 6479 kmem_free(element, ss->size); 6480 } 6481 6482 6483 /* 6484 * Free the entire set of pointers, and any 6485 * soft state structures contained therein. 6486 * 6487 * Note that we don't grab the ss->lock mutex, even though 6488 * we're inspecting the various fields of the data structure. 6489 * 6490 * There is an implicit assumption that this routine will 6491 * never run concurrently with any of the above on this 6492 * particular state structure i.e. by the time the driver 6493 * calls this routine, there should be no other threads 6494 * running in the driver. 6495 */ 6496 void 6497 ddi_soft_state_fini(void **state_p) 6498 { 6499 struct i_ddi_soft_state *ss, *dirty; 6500 int item; 6501 static char msg[] = "ddi_soft_state_fini:"; 6502 6503 if (state_p == NULL || (ss = *state_p) == NULL) { 6504 cmn_err(CE_WARN, "%s null handle: %s", 6505 msg, mod_containing_pc(caller())); 6506 return; 6507 } 6508 6509 if (ss->size == 0) { 6510 cmn_err(CE_WARN, "%s bad handle: %s", 6511 msg, mod_containing_pc(caller())); 6512 return; 6513 } 6514 6515 if (ss->n_items > 0) { 6516 for (item = 0; item < ss->n_items; item++) 6517 ddi_soft_state_free(ss, item); 6518 kmem_free(ss->array, ss->n_items * sizeof (void *)); 6519 } 6520 6521 /* 6522 * Now delete any dirty arrays from previous 'grow' operations 6523 */ 6524 for (dirty = ss->next; dirty; dirty = ss->next) { 6525 ss->next = dirty->next; 6526 kmem_free(dirty->array, dirty->n_items * sizeof (void *)); 6527 kmem_free(dirty, sizeof (*dirty)); 6528 } 6529 6530 mutex_destroy(&ss->lock); 6531 kmem_free(ss, sizeof (*ss)); 6532 6533 *state_p = NULL; 6534 } 6535 6536 /* 6537 * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'. 6538 * Storage is double buffered to prevent updates during devi_addr use - 6539 * double buffering is adaquate for reliable ddi_deviname() consumption. 6540 * The double buffer is not freed until dev_info structure destruction 6541 * (by i_ddi_free_node). 6542 */ 6543 void 6544 ddi_set_name_addr(dev_info_t *dip, char *name) 6545 { 6546 char *buf = DEVI(dip)->devi_addr_buf; 6547 char *newaddr; 6548 6549 if (buf == NULL) { 6550 buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP); 6551 DEVI(dip)->devi_addr_buf = buf; 6552 } 6553 6554 if (name) { 6555 ASSERT(strlen(name) < MAXNAMELEN); 6556 newaddr = (DEVI(dip)->devi_addr == buf) ? 6557 (buf + MAXNAMELEN) : buf; 6558 (void) strlcpy(newaddr, name, MAXNAMELEN); 6559 } else 6560 newaddr = NULL; 6561 6562 DEVI(dip)->devi_addr = newaddr; 6563 } 6564 6565 char * 6566 ddi_get_name_addr(dev_info_t *dip) 6567 { 6568 return (DEVI(dip)->devi_addr); 6569 } 6570 6571 void 6572 ddi_set_parent_data(dev_info_t *dip, void *pd) 6573 { 6574 DEVI(dip)->devi_parent_data = pd; 6575 } 6576 6577 void * 6578 ddi_get_parent_data(dev_info_t *dip) 6579 { 6580 return (DEVI(dip)->devi_parent_data); 6581 } 6582 6583 /* 6584 * ddi_name_to_major: returns the major number of a named module, 6585 * derived from the current driver alias binding. 6586 * 6587 * Caveat: drivers should avoid the use of this function, in particular 6588 * together with ddi_get_name/ddi_binding name, as per 6589 * major = ddi_name_to_major(ddi_get_name(devi)); 6590 * ddi_name_to_major() relies on the state of the device/alias binding, 6591 * which can and does change dynamically as aliases are administered 6592 * over time. An attached device instance cannot rely on the major 6593 * number returned by ddi_name_to_major() to match its own major number. 6594 * 6595 * For driver use, ddi_driver_major() reliably returns the major number 6596 * for the module to which the device was bound at attach time over 6597 * the life of the instance. 6598 * major = ddi_driver_major(dev_info_t *) 6599 */ 6600 major_t 6601 ddi_name_to_major(char *name) 6602 { 6603 return (mod_name_to_major(name)); 6604 } 6605 6606 /* 6607 * ddi_major_to_name: Returns the module name bound to a major number. 6608 */ 6609 char * 6610 ddi_major_to_name(major_t major) 6611 { 6612 return (mod_major_to_name(major)); 6613 } 6614 6615 /* 6616 * Return the name of the devinfo node pointed at by 'dip' in the buffer 6617 * pointed at by 'name.' A devinfo node is named as a result of calling 6618 * ddi_initchild(). 6619 * 6620 * Note: the driver must be held before calling this function! 6621 */ 6622 char * 6623 ddi_deviname(dev_info_t *dip, char *name) 6624 { 6625 char *addrname; 6626 char none = '\0'; 6627 6628 if (dip == ddi_root_node()) { 6629 *name = '\0'; 6630 return (name); 6631 } 6632 6633 if (i_ddi_node_state(dip) < DS_BOUND) { 6634 addrname = &none; 6635 } else { 6636 /* 6637 * Use ddi_get_name_addr() without checking state so we get 6638 * a unit-address if we are called after ddi_set_name_addr() 6639 * by nexus DDI_CTL_INITCHILD code, but before completing 6640 * node promotion to DS_INITIALIZED. We currently have 6641 * two situations where we are called in this state: 6642 * o For framework processing of a path-oriented alias. 6643 * o If a SCSA nexus driver calls ddi_devid_register() 6644 * from it's tran_tgt_init(9E) implementation. 6645 */ 6646 addrname = ddi_get_name_addr(dip); 6647 if (addrname == NULL) 6648 addrname = &none; 6649 } 6650 6651 if (*addrname == '\0') { 6652 (void) sprintf(name, "/%s", ddi_node_name(dip)); 6653 } else { 6654 (void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname); 6655 } 6656 6657 return (name); 6658 } 6659 6660 /* 6661 * Spits out the name of device node, typically name@addr, for a given node, 6662 * using the driver name, not the nodename. 6663 * 6664 * Used by match_parent. Not to be used elsewhere. 6665 */ 6666 char * 6667 i_ddi_parname(dev_info_t *dip, char *name) 6668 { 6669 char *addrname; 6670 6671 if (dip == ddi_root_node()) { 6672 *name = '\0'; 6673 return (name); 6674 } 6675 6676 ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED); 6677 6678 if (*(addrname = ddi_get_name_addr(dip)) == '\0') 6679 (void) sprintf(name, "%s", ddi_binding_name(dip)); 6680 else 6681 (void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname); 6682 return (name); 6683 } 6684 6685 static char * 6686 pathname_work(dev_info_t *dip, char *path) 6687 { 6688 char *bp; 6689 6690 if (dip == ddi_root_node()) { 6691 *path = '\0'; 6692 return (path); 6693 } 6694 (void) pathname_work(ddi_get_parent(dip), path); 6695 bp = path + strlen(path); 6696 (void) ddi_deviname(dip, bp); 6697 return (path); 6698 } 6699 6700 char * 6701 ddi_pathname(dev_info_t *dip, char *path) 6702 { 6703 return (pathname_work(dip, path)); 6704 } 6705 6706 char * 6707 ddi_pathname_minor(struct ddi_minor_data *dmdp, char *path) 6708 { 6709 if (dmdp->dip == NULL) 6710 *path = '\0'; 6711 else { 6712 (void) ddi_pathname(dmdp->dip, path); 6713 if (dmdp->ddm_name) { 6714 (void) strcat(path, ":"); 6715 (void) strcat(path, dmdp->ddm_name); 6716 } 6717 } 6718 return (path); 6719 } 6720 6721 static char * 6722 pathname_work_obp(dev_info_t *dip, char *path) 6723 { 6724 char *bp; 6725 char *obp_path; 6726 6727 /* 6728 * look up the "obp-path" property, return the path if it exists 6729 */ 6730 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 6731 "obp-path", &obp_path) == DDI_PROP_SUCCESS) { 6732 (void) strcpy(path, obp_path); 6733 ddi_prop_free(obp_path); 6734 return (path); 6735 } 6736 6737 /* 6738 * stop at root, no obp path 6739 */ 6740 if (dip == ddi_root_node()) { 6741 return (NULL); 6742 } 6743 6744 obp_path = pathname_work_obp(ddi_get_parent(dip), path); 6745 if (obp_path == NULL) 6746 return (NULL); 6747 6748 /* 6749 * append our component to parent's obp path 6750 */ 6751 bp = path + strlen(path); 6752 if (*(bp - 1) != '/') 6753 (void) strcat(bp++, "/"); 6754 (void) ddi_deviname(dip, bp); 6755 return (path); 6756 } 6757 6758 /* 6759 * return the 'obp-path' based path for the given node, or NULL if the node 6760 * does not have a different obp path. NOTE: Unlike ddi_pathname, this 6761 * function can't be called from interrupt context (since we need to 6762 * lookup a string property). 6763 */ 6764 char * 6765 ddi_pathname_obp(dev_info_t *dip, char *path) 6766 { 6767 ASSERT(!servicing_interrupt()); 6768 if (dip == NULL || path == NULL) 6769 return (NULL); 6770 6771 /* split work into a separate function to aid debugging */ 6772 return (pathname_work_obp(dip, path)); 6773 } 6774 6775 int 6776 ddi_pathname_obp_set(dev_info_t *dip, char *component) 6777 { 6778 dev_info_t *pdip; 6779 char *obp_path = NULL; 6780 int rc = DDI_FAILURE; 6781 6782 if (dip == NULL) 6783 return (DDI_FAILURE); 6784 6785 obp_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 6786 6787 pdip = ddi_get_parent(dip); 6788 6789 if (ddi_pathname_obp(pdip, obp_path) == NULL) { 6790 (void) ddi_pathname(pdip, obp_path); 6791 } 6792 6793 if (component) { 6794 (void) strncat(obp_path, "/", MAXPATHLEN); 6795 (void) strncat(obp_path, component, MAXPATHLEN); 6796 } 6797 rc = ndi_prop_update_string(DDI_DEV_T_NONE, dip, "obp-path", 6798 obp_path); 6799 6800 if (obp_path) 6801 kmem_free(obp_path, MAXPATHLEN); 6802 6803 return (rc); 6804 } 6805 6806 /* 6807 * Given a dev_t, return the pathname of the corresponding device in the 6808 * buffer pointed at by "path." The buffer is assumed to be large enough 6809 * to hold the pathname of the device (MAXPATHLEN). 6810 * 6811 * The pathname of a device is the pathname of the devinfo node to which 6812 * the device "belongs," concatenated with the character ':' and the name 6813 * of the minor node corresponding to the dev_t. If spec_type is 0 then 6814 * just the pathname of the devinfo node is returned without driving attach 6815 * of that node. For a non-zero spec_type, an attach is performed and a 6816 * search of the minor list occurs. 6817 * 6818 * It is possible that the path associated with the dev_t is not 6819 * currently available in the devinfo tree. In order to have a 6820 * dev_t, a device must have been discovered before, which means 6821 * that the path is always in the instance tree. The one exception 6822 * to this is if the dev_t is associated with a pseudo driver, in 6823 * which case the device must exist on the pseudo branch of the 6824 * devinfo tree as a result of parsing .conf files. 6825 */ 6826 int 6827 ddi_dev_pathname(dev_t devt, int spec_type, char *path) 6828 { 6829 int circ; 6830 major_t major = getmajor(devt); 6831 int instance; 6832 dev_info_t *dip; 6833 char *minorname; 6834 char *drvname; 6835 6836 if (major >= devcnt) 6837 goto fail; 6838 if (major == clone_major) { 6839 /* clone has no minor nodes, manufacture the path here */ 6840 if ((drvname = ddi_major_to_name(getminor(devt))) == NULL) 6841 goto fail; 6842 6843 (void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname); 6844 return (DDI_SUCCESS); 6845 } 6846 6847 /* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */ 6848 if ((instance = dev_to_instance(devt)) == -1) 6849 goto fail; 6850 6851 /* reconstruct the path given the major/instance */ 6852 if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS) 6853 goto fail; 6854 6855 /* if spec_type given we must drive attach and search minor nodes */ 6856 if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) { 6857 /* attach the path so we can search minors */ 6858 if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL) 6859 goto fail; 6860 6861 /* Add minorname to path. */ 6862 ndi_devi_enter(dip, &circ); 6863 minorname = i_ddi_devtspectype_to_minorname(dip, 6864 devt, spec_type); 6865 if (minorname) { 6866 (void) strcat(path, ":"); 6867 (void) strcat(path, minorname); 6868 } 6869 ndi_devi_exit(dip, circ); 6870 ddi_release_devi(dip); 6871 if (minorname == NULL) 6872 goto fail; 6873 } 6874 ASSERT(strlen(path) < MAXPATHLEN); 6875 return (DDI_SUCCESS); 6876 6877 fail: *path = 0; 6878 return (DDI_FAILURE); 6879 } 6880 6881 /* 6882 * Given a major number and an instance, return the path. 6883 * This interface does NOT drive attach. 6884 */ 6885 int 6886 e_ddi_majorinstance_to_path(major_t major, int instance, char *path) 6887 { 6888 struct devnames *dnp; 6889 dev_info_t *dip; 6890 6891 if ((major >= devcnt) || (instance == -1)) { 6892 *path = 0; 6893 return (DDI_FAILURE); 6894 } 6895 6896 /* look for the major/instance in the instance tree */ 6897 if (e_ddi_instance_majorinstance_to_path(major, instance, 6898 path) == DDI_SUCCESS) { 6899 ASSERT(strlen(path) < MAXPATHLEN); 6900 return (DDI_SUCCESS); 6901 } 6902 6903 /* 6904 * Not in instance tree, find the instance on the per driver list and 6905 * construct path to instance via ddi_pathname(). This is how paths 6906 * down the 'pseudo' branch are constructed. 6907 */ 6908 dnp = &(devnamesp[major]); 6909 LOCK_DEV_OPS(&(dnp->dn_lock)); 6910 for (dip = dnp->dn_head; dip; 6911 dip = (dev_info_t *)DEVI(dip)->devi_next) { 6912 /* Skip if instance does not match. */ 6913 if (DEVI(dip)->devi_instance != instance) 6914 continue; 6915 6916 /* 6917 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND 6918 * node demotion, so it is not an effective way of ensuring 6919 * that the ddi_pathname result has a unit-address. Instead, 6920 * we reverify the node state after calling ddi_pathname(). 6921 */ 6922 if (i_ddi_node_state(dip) >= DS_INITIALIZED) { 6923 (void) ddi_pathname(dip, path); 6924 if (i_ddi_node_state(dip) < DS_INITIALIZED) 6925 continue; 6926 UNLOCK_DEV_OPS(&(dnp->dn_lock)); 6927 ASSERT(strlen(path) < MAXPATHLEN); 6928 return (DDI_SUCCESS); 6929 } 6930 } 6931 UNLOCK_DEV_OPS(&(dnp->dn_lock)); 6932 6933 /* can't reconstruct the path */ 6934 *path = 0; 6935 return (DDI_FAILURE); 6936 } 6937 6938 #define GLD_DRIVER_PPA "SUNW,gld_v0_ppa" 6939 6940 /* 6941 * Given the dip for a network interface return the ppa for that interface. 6942 * 6943 * In all cases except GLD v0 drivers, the ppa == instance. 6944 * In the case of GLD v0 drivers, the ppa is equal to the attach order. 6945 * So for these drivers when the attach routine calls gld_register(), 6946 * the GLD framework creates an integer property called "gld_driver_ppa" 6947 * that can be queried here. 6948 * 6949 * The only time this function is used is when a system is booting over nfs. 6950 * In this case the system has to resolve the pathname of the boot device 6951 * to it's ppa. 6952 */ 6953 int 6954 i_ddi_devi_get_ppa(dev_info_t *dip) 6955 { 6956 return (ddi_prop_get_int(DDI_DEV_T_ANY, dip, 6957 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 6958 GLD_DRIVER_PPA, ddi_get_instance(dip))); 6959 } 6960 6961 /* 6962 * i_ddi_devi_set_ppa() should only be called from gld_register() 6963 * and only for GLD v0 drivers 6964 */ 6965 void 6966 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa) 6967 { 6968 (void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa); 6969 } 6970 6971 6972 /* 6973 * Private DDI Console bell functions. 6974 */ 6975 void 6976 ddi_ring_console_bell(clock_t duration) 6977 { 6978 if (ddi_console_bell_func != NULL) 6979 (*ddi_console_bell_func)(duration); 6980 } 6981 6982 void 6983 ddi_set_console_bell(void (*bellfunc)(clock_t duration)) 6984 { 6985 ddi_console_bell_func = bellfunc; 6986 } 6987 6988 int 6989 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr, 6990 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 6991 { 6992 int (*funcp)() = ddi_dma_allochdl; 6993 ddi_dma_attr_t dma_attr; 6994 struct bus_ops *bop; 6995 6996 if (attr == (ddi_dma_attr_t *)0) 6997 return (DDI_DMA_BADATTR); 6998 6999 dma_attr = *attr; 7000 7001 bop = DEVI(dip)->devi_ops->devo_bus_ops; 7002 if (bop && bop->bus_dma_allochdl) 7003 funcp = bop->bus_dma_allochdl; 7004 7005 return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep)); 7006 } 7007 7008 void 7009 ddi_dma_free_handle(ddi_dma_handle_t *handlep) 7010 { 7011 ddi_dma_handle_t h = *handlep; 7012 (void) ddi_dma_freehdl(HD, HD, h); 7013 } 7014 7015 static uintptr_t dma_mem_list_id = 0; 7016 7017 7018 int 7019 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length, 7020 ddi_device_acc_attr_t *accattrp, uint_t flags, 7021 int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp, 7022 size_t *real_length, ddi_acc_handle_t *handlep) 7023 { 7024 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7025 dev_info_t *dip = hp->dmai_rdip; 7026 ddi_acc_hdl_t *ap; 7027 ddi_dma_attr_t *attrp = &hp->dmai_attr; 7028 uint_t sleepflag, xfermodes; 7029 int (*fp)(caddr_t); 7030 int rval; 7031 7032 if (waitfp == DDI_DMA_SLEEP) 7033 fp = (int (*)())KM_SLEEP; 7034 else if (waitfp == DDI_DMA_DONTWAIT) 7035 fp = (int (*)())KM_NOSLEEP; 7036 else 7037 fp = waitfp; 7038 *handlep = impl_acc_hdl_alloc(fp, arg); 7039 if (*handlep == NULL) 7040 return (DDI_FAILURE); 7041 7042 /* check if the cache attributes are supported */ 7043 if (i_ddi_check_cache_attr(flags) == B_FALSE) 7044 return (DDI_FAILURE); 7045 7046 /* 7047 * Transfer the meaningful bits to xfermodes. 7048 * Double-check if the 3rd party driver correctly sets the bits. 7049 * If not, set DDI_DMA_STREAMING to keep compatibility. 7050 */ 7051 xfermodes = flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING); 7052 if (xfermodes == 0) { 7053 xfermodes = DDI_DMA_STREAMING; 7054 } 7055 7056 /* 7057 * initialize the common elements of data access handle 7058 */ 7059 ap = impl_acc_hdl_get(*handlep); 7060 ap->ah_vers = VERS_ACCHDL; 7061 ap->ah_dip = dip; 7062 ap->ah_offset = 0; 7063 ap->ah_len = 0; 7064 ap->ah_xfermodes = flags; 7065 ap->ah_acc = *accattrp; 7066 7067 sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0); 7068 if (xfermodes == DDI_DMA_CONSISTENT) { 7069 rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag, 7070 flags, accattrp, kaddrp, NULL, ap); 7071 *real_length = length; 7072 } else { 7073 rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag, 7074 flags, accattrp, kaddrp, real_length, ap); 7075 } 7076 if (rval == DDI_SUCCESS) { 7077 ap->ah_len = (off_t)(*real_length); 7078 ap->ah_addr = *kaddrp; 7079 } else { 7080 impl_acc_hdl_free(*handlep); 7081 *handlep = (ddi_acc_handle_t)NULL; 7082 if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) { 7083 ddi_set_callback(waitfp, arg, &dma_mem_list_id); 7084 } 7085 rval = DDI_FAILURE; 7086 } 7087 return (rval); 7088 } 7089 7090 void 7091 ddi_dma_mem_free(ddi_acc_handle_t *handlep) 7092 { 7093 ddi_acc_hdl_t *ap; 7094 7095 ap = impl_acc_hdl_get(*handlep); 7096 ASSERT(ap); 7097 7098 i_ddi_mem_free((caddr_t)ap->ah_addr, ap); 7099 7100 /* 7101 * free the handle 7102 */ 7103 impl_acc_hdl_free(*handlep); 7104 *handlep = (ddi_acc_handle_t)NULL; 7105 7106 if (dma_mem_list_id != 0) { 7107 ddi_run_callback(&dma_mem_list_id); 7108 } 7109 } 7110 7111 int 7112 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp, 7113 uint_t flags, int (*waitfp)(caddr_t), caddr_t arg, 7114 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 7115 { 7116 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7117 dev_info_t *hdip, *dip; 7118 struct ddi_dma_req dmareq; 7119 int (*funcp)(); 7120 7121 dmareq.dmar_flags = flags; 7122 dmareq.dmar_fp = waitfp; 7123 dmareq.dmar_arg = arg; 7124 dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount; 7125 7126 if (bp->b_flags & B_PAGEIO) { 7127 dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES; 7128 dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages; 7129 dmareq.dmar_object.dmao_obj.pp_obj.pp_offset = 7130 (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET); 7131 } else { 7132 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr; 7133 if (bp->b_flags & B_SHADOW) { 7134 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = 7135 bp->b_shadow; 7136 dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR; 7137 } else { 7138 dmareq.dmar_object.dmao_type = 7139 (bp->b_flags & (B_PHYS | B_REMAPPED)) ? 7140 DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR; 7141 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL; 7142 } 7143 7144 /* 7145 * If the buffer has no proc pointer, or the proc 7146 * struct has the kernel address space, or the buffer has 7147 * been marked B_REMAPPED (meaning that it is now 7148 * mapped into the kernel's address space), then 7149 * the address space is kas (kernel address space). 7150 */ 7151 if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) || 7152 (bp->b_flags & B_REMAPPED)) { 7153 dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0; 7154 } else { 7155 dmareq.dmar_object.dmao_obj.virt_obj.v_as = 7156 bp->b_proc->p_as; 7157 } 7158 } 7159 7160 dip = hp->dmai_rdip; 7161 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl; 7162 funcp = DEVI(dip)->devi_bus_dma_bindfunc; 7163 return ((*funcp)(hdip, dip, handle, &dmareq, cookiep, ccountp)); 7164 } 7165 7166 int 7167 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as, 7168 caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t), 7169 caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 7170 { 7171 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7172 dev_info_t *hdip, *dip; 7173 struct ddi_dma_req dmareq; 7174 int (*funcp)(); 7175 7176 if (len == (uint_t)0) { 7177 return (DDI_DMA_NOMAPPING); 7178 } 7179 dmareq.dmar_flags = flags; 7180 dmareq.dmar_fp = waitfp; 7181 dmareq.dmar_arg = arg; 7182 dmareq.dmar_object.dmao_size = len; 7183 dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR; 7184 dmareq.dmar_object.dmao_obj.virt_obj.v_as = as; 7185 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr; 7186 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL; 7187 7188 dip = hp->dmai_rdip; 7189 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl; 7190 funcp = DEVI(dip)->devi_bus_dma_bindfunc; 7191 return ((*funcp)(hdip, dip, handle, &dmareq, cookiep, ccountp)); 7192 } 7193 7194 void 7195 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep) 7196 { 7197 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7198 ddi_dma_cookie_t *cp; 7199 7200 cp = hp->dmai_cookie; 7201 ASSERT(cp); 7202 7203 cookiep->dmac_notused = cp->dmac_notused; 7204 cookiep->dmac_type = cp->dmac_type; 7205 cookiep->dmac_address = cp->dmac_address; 7206 cookiep->dmac_size = cp->dmac_size; 7207 hp->dmai_cookie++; 7208 } 7209 7210 int 7211 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp) 7212 { 7213 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7214 if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) { 7215 return (DDI_FAILURE); 7216 } else { 7217 *nwinp = hp->dmai_nwin; 7218 return (DDI_SUCCESS); 7219 } 7220 } 7221 7222 int 7223 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp, 7224 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 7225 { 7226 int (*funcp)() = ddi_dma_win; 7227 struct bus_ops *bop; 7228 7229 bop = DEVI(HD)->devi_ops->devo_bus_ops; 7230 if (bop && bop->bus_dma_win) 7231 funcp = bop->bus_dma_win; 7232 7233 return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp)); 7234 } 7235 7236 int 7237 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes) 7238 { 7239 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0, 7240 &burstsizes, 0, 0)); 7241 } 7242 7243 int 7244 i_ddi_dma_fault_check(ddi_dma_impl_t *hp) 7245 { 7246 return (hp->dmai_fault); 7247 } 7248 7249 int 7250 ddi_check_dma_handle(ddi_dma_handle_t handle) 7251 { 7252 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7253 int (*check)(ddi_dma_impl_t *); 7254 7255 if ((check = hp->dmai_fault_check) == NULL) 7256 check = i_ddi_dma_fault_check; 7257 7258 return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE); 7259 } 7260 7261 void 7262 i_ddi_dma_set_fault(ddi_dma_handle_t handle) 7263 { 7264 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7265 void (*notify)(ddi_dma_impl_t *); 7266 7267 if (!hp->dmai_fault) { 7268 hp->dmai_fault = 1; 7269 if ((notify = hp->dmai_fault_notify) != NULL) 7270 (*notify)(hp); 7271 } 7272 } 7273 7274 void 7275 i_ddi_dma_clr_fault(ddi_dma_handle_t handle) 7276 { 7277 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7278 void (*notify)(ddi_dma_impl_t *); 7279 7280 if (hp->dmai_fault) { 7281 hp->dmai_fault = 0; 7282 if ((notify = hp->dmai_fault_notify) != NULL) 7283 (*notify)(hp); 7284 } 7285 } 7286 7287 /* 7288 * register mapping routines. 7289 */ 7290 int 7291 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp, 7292 offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp, 7293 ddi_acc_handle_t *handle) 7294 { 7295 ddi_map_req_t mr; 7296 ddi_acc_hdl_t *hp; 7297 int result; 7298 7299 /* 7300 * Allocate and initialize the common elements of data access handle. 7301 */ 7302 *handle = impl_acc_hdl_alloc(KM_SLEEP, NULL); 7303 hp = impl_acc_hdl_get(*handle); 7304 hp->ah_vers = VERS_ACCHDL; 7305 hp->ah_dip = dip; 7306 hp->ah_rnumber = rnumber; 7307 hp->ah_offset = offset; 7308 hp->ah_len = len; 7309 hp->ah_acc = *accattrp; 7310 7311 /* 7312 * Set up the mapping request and call to parent. 7313 */ 7314 mr.map_op = DDI_MO_MAP_LOCKED; 7315 mr.map_type = DDI_MT_RNUMBER; 7316 mr.map_obj.rnumber = rnumber; 7317 mr.map_prot = PROT_READ | PROT_WRITE; 7318 mr.map_flags = DDI_MF_KERNEL_MAPPING; 7319 mr.map_handlep = hp; 7320 mr.map_vers = DDI_MAP_VERSION; 7321 result = ddi_map(dip, &mr, offset, len, addrp); 7322 7323 /* 7324 * check for end result 7325 */ 7326 if (result != DDI_SUCCESS) { 7327 impl_acc_hdl_free(*handle); 7328 *handle = (ddi_acc_handle_t)NULL; 7329 } else { 7330 hp->ah_addr = *addrp; 7331 } 7332 7333 return (result); 7334 } 7335 7336 void 7337 ddi_regs_map_free(ddi_acc_handle_t *handlep) 7338 { 7339 ddi_map_req_t mr; 7340 ddi_acc_hdl_t *hp; 7341 7342 hp = impl_acc_hdl_get(*handlep); 7343 ASSERT(hp); 7344 7345 mr.map_op = DDI_MO_UNMAP; 7346 mr.map_type = DDI_MT_RNUMBER; 7347 mr.map_obj.rnumber = hp->ah_rnumber; 7348 mr.map_prot = PROT_READ | PROT_WRITE; 7349 mr.map_flags = DDI_MF_KERNEL_MAPPING; 7350 mr.map_handlep = hp; 7351 mr.map_vers = DDI_MAP_VERSION; 7352 7353 /* 7354 * Call my parent to unmap my regs. 7355 */ 7356 (void) ddi_map(hp->ah_dip, &mr, hp->ah_offset, 7357 hp->ah_len, &hp->ah_addr); 7358 /* 7359 * free the handle 7360 */ 7361 impl_acc_hdl_free(*handlep); 7362 *handlep = (ddi_acc_handle_t)NULL; 7363 } 7364 7365 int 7366 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount, 7367 ssize_t dev_advcnt, uint_t dev_datasz) 7368 { 7369 uint8_t *b; 7370 uint16_t *w; 7371 uint32_t *l; 7372 uint64_t *ll; 7373 7374 /* check for total byte count is multiple of data transfer size */ 7375 if (bytecount != ((bytecount / dev_datasz) * dev_datasz)) 7376 return (DDI_FAILURE); 7377 7378 switch (dev_datasz) { 7379 case DDI_DATA_SZ01_ACC: 7380 for (b = (uint8_t *)dev_addr; 7381 bytecount != 0; bytecount -= 1, b += dev_advcnt) 7382 ddi_put8(handle, b, 0); 7383 break; 7384 case DDI_DATA_SZ02_ACC: 7385 for (w = (uint16_t *)dev_addr; 7386 bytecount != 0; bytecount -= 2, w += dev_advcnt) 7387 ddi_put16(handle, w, 0); 7388 break; 7389 case DDI_DATA_SZ04_ACC: 7390 for (l = (uint32_t *)dev_addr; 7391 bytecount != 0; bytecount -= 4, l += dev_advcnt) 7392 ddi_put32(handle, l, 0); 7393 break; 7394 case DDI_DATA_SZ08_ACC: 7395 for (ll = (uint64_t *)dev_addr; 7396 bytecount != 0; bytecount -= 8, ll += dev_advcnt) 7397 ddi_put64(handle, ll, 0x0ll); 7398 break; 7399 default: 7400 return (DDI_FAILURE); 7401 } 7402 return (DDI_SUCCESS); 7403 } 7404 7405 int 7406 ddi_device_copy( 7407 ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt, 7408 ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt, 7409 size_t bytecount, uint_t dev_datasz) 7410 { 7411 uint8_t *b_src, *b_dst; 7412 uint16_t *w_src, *w_dst; 7413 uint32_t *l_src, *l_dst; 7414 uint64_t *ll_src, *ll_dst; 7415 7416 /* check for total byte count is multiple of data transfer size */ 7417 if (bytecount != ((bytecount / dev_datasz) * dev_datasz)) 7418 return (DDI_FAILURE); 7419 7420 switch (dev_datasz) { 7421 case DDI_DATA_SZ01_ACC: 7422 b_src = (uint8_t *)src_addr; 7423 b_dst = (uint8_t *)dest_addr; 7424 7425 for (; bytecount != 0; bytecount -= 1) { 7426 ddi_put8(dest_handle, b_dst, 7427 ddi_get8(src_handle, b_src)); 7428 b_dst += dest_advcnt; 7429 b_src += src_advcnt; 7430 } 7431 break; 7432 case DDI_DATA_SZ02_ACC: 7433 w_src = (uint16_t *)src_addr; 7434 w_dst = (uint16_t *)dest_addr; 7435 7436 for (; bytecount != 0; bytecount -= 2) { 7437 ddi_put16(dest_handle, w_dst, 7438 ddi_get16(src_handle, w_src)); 7439 w_dst += dest_advcnt; 7440 w_src += src_advcnt; 7441 } 7442 break; 7443 case DDI_DATA_SZ04_ACC: 7444 l_src = (uint32_t *)src_addr; 7445 l_dst = (uint32_t *)dest_addr; 7446 7447 for (; bytecount != 0; bytecount -= 4) { 7448 ddi_put32(dest_handle, l_dst, 7449 ddi_get32(src_handle, l_src)); 7450 l_dst += dest_advcnt; 7451 l_src += src_advcnt; 7452 } 7453 break; 7454 case DDI_DATA_SZ08_ACC: 7455 ll_src = (uint64_t *)src_addr; 7456 ll_dst = (uint64_t *)dest_addr; 7457 7458 for (; bytecount != 0; bytecount -= 8) { 7459 ddi_put64(dest_handle, ll_dst, 7460 ddi_get64(src_handle, ll_src)); 7461 ll_dst += dest_advcnt; 7462 ll_src += src_advcnt; 7463 } 7464 break; 7465 default: 7466 return (DDI_FAILURE); 7467 } 7468 return (DDI_SUCCESS); 7469 } 7470 7471 #define swap16(value) \ 7472 ((((value) & 0xff) << 8) | ((value) >> 8)) 7473 7474 #define swap32(value) \ 7475 (((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \ 7476 (uint32_t)swap16((uint16_t)((value) >> 16))) 7477 7478 #define swap64(value) \ 7479 (((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \ 7480 << 32) | \ 7481 (uint64_t)swap32((uint32_t)((value) >> 32))) 7482 7483 uint16_t 7484 ddi_swap16(uint16_t value) 7485 { 7486 return (swap16(value)); 7487 } 7488 7489 uint32_t 7490 ddi_swap32(uint32_t value) 7491 { 7492 return (swap32(value)); 7493 } 7494 7495 uint64_t 7496 ddi_swap64(uint64_t value) 7497 { 7498 return (swap64(value)); 7499 } 7500 7501 /* 7502 * Convert a binding name to a driver name. 7503 * A binding name is the name used to determine the driver for a 7504 * device - it may be either an alias for the driver or the name 7505 * of the driver itself. 7506 */ 7507 char * 7508 i_binding_to_drv_name(char *bname) 7509 { 7510 major_t major_no; 7511 7512 ASSERT(bname != NULL); 7513 7514 if ((major_no = ddi_name_to_major(bname)) == -1) 7515 return (NULL); 7516 return (ddi_major_to_name(major_no)); 7517 } 7518 7519 /* 7520 * Search for minor name that has specified dev_t and spec_type. 7521 * If spec_type is zero then any dev_t match works. Since we 7522 * are returning a pointer to the minor name string, we require the 7523 * caller to do the locking. 7524 */ 7525 char * 7526 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type) 7527 { 7528 struct ddi_minor_data *dmdp; 7529 7530 /* 7531 * The did layered driver currently intentionally returns a 7532 * devinfo ptr for an underlying sd instance based on a did 7533 * dev_t. In this case it is not an error. 7534 * 7535 * The did layered driver is associated with Sun Cluster. 7536 */ 7537 ASSERT((ddi_driver_major(dip) == getmajor(dev)) || 7538 (strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0)); 7539 7540 ASSERT(DEVI_BUSY_OWNED(dip)); 7541 for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) { 7542 if (((dmdp->type == DDM_MINOR) || 7543 (dmdp->type == DDM_INTERNAL_PATH) || 7544 (dmdp->type == DDM_DEFAULT)) && 7545 (dmdp->ddm_dev == dev) && 7546 ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) || 7547 (dmdp->ddm_spec_type == spec_type))) 7548 return (dmdp->ddm_name); 7549 } 7550 7551 return (NULL); 7552 } 7553 7554 /* 7555 * Find the devt and spectype of the specified minor_name. 7556 * Return DDI_FAILURE if minor_name not found. Since we are 7557 * returning everything via arguments we can do the locking. 7558 */ 7559 int 7560 i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name, 7561 dev_t *devtp, int *spectypep) 7562 { 7563 int circ; 7564 struct ddi_minor_data *dmdp; 7565 7566 /* deal with clone minor nodes */ 7567 if (dip == clone_dip) { 7568 major_t major; 7569 /* 7570 * Make sure minor_name is a STREAMS driver. 7571 * We load the driver but don't attach to any instances. 7572 */ 7573 7574 major = ddi_name_to_major(minor_name); 7575 if (major == DDI_MAJOR_T_NONE) 7576 return (DDI_FAILURE); 7577 7578 if (ddi_hold_driver(major) == NULL) 7579 return (DDI_FAILURE); 7580 7581 if (STREAMSTAB(major) == NULL) { 7582 ddi_rele_driver(major); 7583 return (DDI_FAILURE); 7584 } 7585 ddi_rele_driver(major); 7586 7587 if (devtp) 7588 *devtp = makedevice(clone_major, (minor_t)major); 7589 7590 if (spectypep) 7591 *spectypep = S_IFCHR; 7592 7593 return (DDI_SUCCESS); 7594 } 7595 7596 ndi_devi_enter(dip, &circ); 7597 for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) { 7598 if (((dmdp->type != DDM_MINOR) && 7599 (dmdp->type != DDM_INTERNAL_PATH) && 7600 (dmdp->type != DDM_DEFAULT)) || 7601 strcmp(minor_name, dmdp->ddm_name)) 7602 continue; 7603 7604 if (devtp) 7605 *devtp = dmdp->ddm_dev; 7606 7607 if (spectypep) 7608 *spectypep = dmdp->ddm_spec_type; 7609 7610 ndi_devi_exit(dip, circ); 7611 return (DDI_SUCCESS); 7612 } 7613 ndi_devi_exit(dip, circ); 7614 7615 return (DDI_FAILURE); 7616 } 7617 7618 static kmutex_t devid_gen_mutex; 7619 static short devid_gen_number; 7620 7621 #ifdef DEBUG 7622 7623 static int devid_register_corrupt = 0; 7624 static int devid_register_corrupt_major = 0; 7625 static int devid_register_corrupt_hint = 0; 7626 static int devid_register_corrupt_hint_major = 0; 7627 7628 static int devid_lyr_debug = 0; 7629 7630 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs) \ 7631 if (devid_lyr_debug) \ 7632 ddi_debug_devid_devts(msg, ndevs, devs) 7633 7634 #else 7635 7636 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs) 7637 7638 #endif /* DEBUG */ 7639 7640 7641 #ifdef DEBUG 7642 7643 static void 7644 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs) 7645 { 7646 int i; 7647 7648 cmn_err(CE_CONT, "%s:\n", msg); 7649 for (i = 0; i < ndevs; i++) { 7650 cmn_err(CE_CONT, " 0x%lx\n", devs[i]); 7651 } 7652 } 7653 7654 static void 7655 ddi_debug_devid_paths(char *msg, int npaths, char **paths) 7656 { 7657 int i; 7658 7659 cmn_err(CE_CONT, "%s:\n", msg); 7660 for (i = 0; i < npaths; i++) { 7661 cmn_err(CE_CONT, " %s\n", paths[i]); 7662 } 7663 } 7664 7665 static void 7666 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs) 7667 { 7668 int i; 7669 7670 cmn_err(CE_CONT, "dev_ts per path %s\n", path); 7671 for (i = 0; i < ndevs; i++) { 7672 cmn_err(CE_CONT, " 0x%lx\n", devs[i]); 7673 } 7674 } 7675 7676 #endif /* DEBUG */ 7677 7678 /* 7679 * Register device id into DDI framework. 7680 * Must be called when device is attached. 7681 */ 7682 static int 7683 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid) 7684 { 7685 impl_devid_t *i_devid = (impl_devid_t *)devid; 7686 size_t driver_len; 7687 const char *driver_name; 7688 char *devid_str; 7689 major_t major; 7690 7691 if ((dip == NULL) || 7692 ((major = ddi_driver_major(dip)) == DDI_MAJOR_T_NONE)) 7693 return (DDI_FAILURE); 7694 7695 /* verify that the devid is valid */ 7696 if (ddi_devid_valid(devid) != DDI_SUCCESS) 7697 return (DDI_FAILURE); 7698 7699 /* Updating driver name hint in devid */ 7700 driver_name = ddi_driver_name(dip); 7701 driver_len = strlen(driver_name); 7702 if (driver_len > DEVID_HINT_SIZE) { 7703 /* Pick up last four characters of driver name */ 7704 driver_name += driver_len - DEVID_HINT_SIZE; 7705 driver_len = DEVID_HINT_SIZE; 7706 } 7707 bzero(i_devid->did_driver, DEVID_HINT_SIZE); 7708 bcopy(driver_name, i_devid->did_driver, driver_len); 7709 7710 #ifdef DEBUG 7711 /* Corrupt the devid for testing. */ 7712 if (devid_register_corrupt) 7713 i_devid->did_id[0] += devid_register_corrupt; 7714 if (devid_register_corrupt_major && 7715 (major == devid_register_corrupt_major)) 7716 i_devid->did_id[0] += 1; 7717 if (devid_register_corrupt_hint) 7718 i_devid->did_driver[0] += devid_register_corrupt_hint; 7719 if (devid_register_corrupt_hint_major && 7720 (major == devid_register_corrupt_hint_major)) 7721 i_devid->did_driver[0] += 1; 7722 #endif /* DEBUG */ 7723 7724 /* encode the devid as a string */ 7725 if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL) 7726 return (DDI_FAILURE); 7727 7728 /* add string as a string property */ 7729 if (ndi_prop_update_string(DDI_DEV_T_NONE, dip, 7730 DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) { 7731 cmn_err(CE_WARN, "%s%d: devid property update failed", 7732 ddi_driver_name(dip), ddi_get_instance(dip)); 7733 ddi_devid_str_free(devid_str); 7734 return (DDI_FAILURE); 7735 } 7736 7737 /* keep pointer to devid string for interrupt context fma code */ 7738 if (DEVI(dip)->devi_devid_str) 7739 ddi_devid_str_free(DEVI(dip)->devi_devid_str); 7740 DEVI(dip)->devi_devid_str = devid_str; 7741 return (DDI_SUCCESS); 7742 } 7743 7744 int 7745 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid) 7746 { 7747 int rval; 7748 7749 rval = i_ddi_devid_register(dip, devid); 7750 if (rval == DDI_SUCCESS) { 7751 /* 7752 * Register devid in devid-to-path cache 7753 */ 7754 if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) { 7755 mutex_enter(&DEVI(dip)->devi_lock); 7756 DEVI(dip)->devi_flags |= DEVI_REGISTERED_DEVID; 7757 mutex_exit(&DEVI(dip)->devi_lock); 7758 } else { 7759 cmn_err(CE_WARN, "%s%d: failed to cache devid", 7760 ddi_driver_name(dip), ddi_get_instance(dip)); 7761 } 7762 } else { 7763 cmn_err(CE_WARN, "%s%d: failed to register devid", 7764 ddi_driver_name(dip), ddi_get_instance(dip)); 7765 } 7766 return (rval); 7767 } 7768 7769 /* 7770 * Remove (unregister) device id from DDI framework. 7771 * Must be called when device is detached. 7772 */ 7773 static void 7774 i_ddi_devid_unregister(dev_info_t *dip) 7775 { 7776 if (DEVI(dip)->devi_devid_str) { 7777 ddi_devid_str_free(DEVI(dip)->devi_devid_str); 7778 DEVI(dip)->devi_devid_str = NULL; 7779 } 7780 7781 /* remove the devid property */ 7782 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME); 7783 } 7784 7785 void 7786 ddi_devid_unregister(dev_info_t *dip) 7787 { 7788 mutex_enter(&DEVI(dip)->devi_lock); 7789 DEVI(dip)->devi_flags &= ~DEVI_REGISTERED_DEVID; 7790 mutex_exit(&DEVI(dip)->devi_lock); 7791 e_devid_cache_unregister(dip); 7792 i_ddi_devid_unregister(dip); 7793 } 7794 7795 /* 7796 * Allocate and initialize a device id. 7797 */ 7798 int 7799 ddi_devid_init( 7800 dev_info_t *dip, 7801 ushort_t devid_type, 7802 ushort_t nbytes, 7803 void *id, 7804 ddi_devid_t *ret_devid) 7805 { 7806 impl_devid_t *i_devid; 7807 int sz = sizeof (*i_devid) + nbytes - sizeof (char); 7808 int driver_len; 7809 const char *driver_name; 7810 7811 switch (devid_type) { 7812 case DEVID_SCSI3_WWN: 7813 /*FALLTHRU*/ 7814 case DEVID_SCSI_SERIAL: 7815 /*FALLTHRU*/ 7816 case DEVID_ATA_SERIAL: 7817 /*FALLTHRU*/ 7818 case DEVID_ENCAP: 7819 if (nbytes == 0) 7820 return (DDI_FAILURE); 7821 if (id == NULL) 7822 return (DDI_FAILURE); 7823 break; 7824 case DEVID_FAB: 7825 if (nbytes != 0) 7826 return (DDI_FAILURE); 7827 if (id != NULL) 7828 return (DDI_FAILURE); 7829 nbytes = sizeof (int) + 7830 sizeof (struct timeval32) + sizeof (short); 7831 sz += nbytes; 7832 break; 7833 default: 7834 return (DDI_FAILURE); 7835 } 7836 7837 if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL) 7838 return (DDI_FAILURE); 7839 7840 i_devid->did_magic_hi = DEVID_MAGIC_MSB; 7841 i_devid->did_magic_lo = DEVID_MAGIC_LSB; 7842 i_devid->did_rev_hi = DEVID_REV_MSB; 7843 i_devid->did_rev_lo = DEVID_REV_LSB; 7844 DEVID_FORMTYPE(i_devid, devid_type); 7845 DEVID_FORMLEN(i_devid, nbytes); 7846 7847 /* Fill in driver name hint */ 7848 driver_name = ddi_driver_name(dip); 7849 driver_len = strlen(driver_name); 7850 if (driver_len > DEVID_HINT_SIZE) { 7851 /* Pick up last four characters of driver name */ 7852 driver_name += driver_len - DEVID_HINT_SIZE; 7853 driver_len = DEVID_HINT_SIZE; 7854 } 7855 7856 bcopy(driver_name, i_devid->did_driver, driver_len); 7857 7858 /* Fill in id field */ 7859 if (devid_type == DEVID_FAB) { 7860 char *cp; 7861 uint32_t hostid; 7862 struct timeval32 timestamp32; 7863 int i; 7864 int *ip; 7865 short gen; 7866 7867 /* increase the generation number */ 7868 mutex_enter(&devid_gen_mutex); 7869 gen = devid_gen_number++; 7870 mutex_exit(&devid_gen_mutex); 7871 7872 cp = i_devid->did_id; 7873 7874 /* Fill in host id (big-endian byte ordering) */ 7875 hostid = zone_get_hostid(NULL); 7876 *cp++ = hibyte(hiword(hostid)); 7877 *cp++ = lobyte(hiword(hostid)); 7878 *cp++ = hibyte(loword(hostid)); 7879 *cp++ = lobyte(loword(hostid)); 7880 7881 /* 7882 * Fill in timestamp (big-endian byte ordering) 7883 * 7884 * (Note that the format may have to be changed 7885 * before 2038 comes around, though it's arguably 7886 * unique enough as it is..) 7887 */ 7888 uniqtime32(×tamp32); 7889 ip = (int *)×tamp32; 7890 for (i = 0; 7891 i < sizeof (timestamp32) / sizeof (int); i++, ip++) { 7892 int val; 7893 val = *ip; 7894 *cp++ = hibyte(hiword(val)); 7895 *cp++ = lobyte(hiword(val)); 7896 *cp++ = hibyte(loword(val)); 7897 *cp++ = lobyte(loword(val)); 7898 } 7899 7900 /* fill in the generation number */ 7901 *cp++ = hibyte(gen); 7902 *cp++ = lobyte(gen); 7903 } else 7904 bcopy(id, i_devid->did_id, nbytes); 7905 7906 /* return device id */ 7907 *ret_devid = (ddi_devid_t)i_devid; 7908 return (DDI_SUCCESS); 7909 } 7910 7911 int 7912 ddi_devid_get(dev_info_t *dip, ddi_devid_t *ret_devid) 7913 { 7914 return (i_ddi_devi_get_devid(DDI_DEV_T_ANY, dip, ret_devid)); 7915 } 7916 7917 int 7918 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid) 7919 { 7920 char *devidstr; 7921 7922 ASSERT(dev != DDI_DEV_T_NONE); 7923 7924 /* look up the property, devt specific first */ 7925 if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS, 7926 DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) { 7927 if ((dev == DDI_DEV_T_ANY) || 7928 (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 7929 DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) != 7930 DDI_PROP_SUCCESS)) { 7931 return (DDI_FAILURE); 7932 } 7933 } 7934 7935 /* convert to binary form */ 7936 if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) { 7937 ddi_prop_free(devidstr); 7938 return (DDI_FAILURE); 7939 } 7940 ddi_prop_free(devidstr); 7941 return (DDI_SUCCESS); 7942 } 7943 7944 /* 7945 * Return a copy of the device id for dev_t 7946 */ 7947 int 7948 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid) 7949 { 7950 dev_info_t *dip; 7951 int rval; 7952 7953 /* get the dip */ 7954 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 7955 return (DDI_FAILURE); 7956 7957 rval = i_ddi_devi_get_devid(dev, dip, ret_devid); 7958 7959 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */ 7960 return (rval); 7961 } 7962 7963 /* 7964 * Return a copy of the minor name for dev_t and spec_type 7965 */ 7966 int 7967 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name) 7968 { 7969 char *buf; 7970 int circ; 7971 dev_info_t *dip; 7972 char *nm; 7973 int rval; 7974 7975 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) { 7976 *minor_name = NULL; 7977 return (DDI_FAILURE); 7978 } 7979 7980 /* Find the minor name and copy into max size buf */ 7981 buf = kmem_alloc(MAXNAMELEN, KM_SLEEP); 7982 ndi_devi_enter(dip, &circ); 7983 nm = i_ddi_devtspectype_to_minorname(dip, dev, spec_type); 7984 if (nm) 7985 (void) strcpy(buf, nm); 7986 ndi_devi_exit(dip, circ); 7987 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */ 7988 7989 if (nm) { 7990 /* duplicate into min size buf for return result */ 7991 *minor_name = i_ddi_strdup(buf, KM_SLEEP); 7992 rval = DDI_SUCCESS; 7993 } else { 7994 *minor_name = NULL; 7995 rval = DDI_FAILURE; 7996 } 7997 7998 /* free max size buf and return */ 7999 kmem_free(buf, MAXNAMELEN); 8000 return (rval); 8001 } 8002 8003 int 8004 ddi_lyr_devid_to_devlist( 8005 ddi_devid_t devid, 8006 char *minor_name, 8007 int *retndevs, 8008 dev_t **retdevs) 8009 { 8010 ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS); 8011 8012 if (e_devid_cache_to_devt_list(devid, minor_name, 8013 retndevs, retdevs) == DDI_SUCCESS) { 8014 ASSERT(*retndevs > 0); 8015 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist", 8016 *retndevs, *retdevs); 8017 return (DDI_SUCCESS); 8018 } 8019 8020 if (e_ddi_devid_discovery(devid) == DDI_FAILURE) { 8021 return (DDI_FAILURE); 8022 } 8023 8024 if (e_devid_cache_to_devt_list(devid, minor_name, 8025 retndevs, retdevs) == DDI_SUCCESS) { 8026 ASSERT(*retndevs > 0); 8027 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist", 8028 *retndevs, *retdevs); 8029 return (DDI_SUCCESS); 8030 } 8031 8032 return (DDI_FAILURE); 8033 } 8034 8035 void 8036 ddi_lyr_free_devlist(dev_t *devlist, int ndevs) 8037 { 8038 kmem_free(devlist, sizeof (dev_t) * ndevs); 8039 } 8040 8041 /* 8042 * Note: This will need to be fixed if we ever allow processes to 8043 * have more than one data model per exec. 8044 */ 8045 model_t 8046 ddi_mmap_get_model(void) 8047 { 8048 return (get_udatamodel()); 8049 } 8050 8051 model_t 8052 ddi_model_convert_from(model_t model) 8053 { 8054 return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE); 8055 } 8056 8057 /* 8058 * ddi interfaces managing storage and retrieval of eventcookies. 8059 */ 8060 8061 /* 8062 * Invoke bus nexus driver's implementation of the 8063 * (*bus_remove_eventcall)() interface to remove a registered 8064 * callback handler for "event". 8065 */ 8066 int 8067 ddi_remove_event_handler(ddi_callback_id_t id) 8068 { 8069 ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id; 8070 dev_info_t *ddip; 8071 8072 ASSERT(cb); 8073 if (!cb) { 8074 return (DDI_FAILURE); 8075 } 8076 8077 ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie); 8078 return (ndi_busop_remove_eventcall(ddip, id)); 8079 } 8080 8081 /* 8082 * Invoke bus nexus driver's implementation of the 8083 * (*bus_add_eventcall)() interface to register a callback handler 8084 * for "event". 8085 */ 8086 int 8087 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event, 8088 void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *), 8089 void *arg, ddi_callback_id_t *id) 8090 { 8091 return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id)); 8092 } 8093 8094 8095 /* 8096 * Return a handle for event "name" by calling up the device tree 8097 * hierarchy via (*bus_get_eventcookie)() interface until claimed 8098 * by a bus nexus or top of dev_info tree is reached. 8099 */ 8100 int 8101 ddi_get_eventcookie(dev_info_t *dip, char *name, 8102 ddi_eventcookie_t *event_cookiep) 8103 { 8104 return (ndi_busop_get_eventcookie(dip, dip, 8105 name, event_cookiep)); 8106 } 8107 8108 /* 8109 * This procedure is provided as the general callback function when 8110 * umem_lockmemory calls as_add_callback for long term memory locking. 8111 * When as_unmap, as_setprot, or as_free encounter segments which have 8112 * locked memory, this callback will be invoked. 8113 */ 8114 void 8115 umem_lock_undo(struct as *as, void *arg, uint_t event) 8116 { 8117 _NOTE(ARGUNUSED(as, event)) 8118 struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg; 8119 8120 /* 8121 * Call the cleanup function. Decrement the cookie reference 8122 * count, if it goes to zero, return the memory for the cookie. 8123 * The i_ddi_umem_unlock for this cookie may or may not have been 8124 * called already. It is the responsibility of the caller of 8125 * umem_lockmemory to handle the case of the cleanup routine 8126 * being called after a ddi_umem_unlock for the cookie 8127 * was called. 8128 */ 8129 8130 (*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp); 8131 8132 /* remove the cookie if reference goes to zero */ 8133 if (atomic_add_long_nv((ulong_t *)(&(cp->cook_refcnt)), -1) == 0) { 8134 kmem_free(cp, sizeof (struct ddi_umem_cookie)); 8135 } 8136 } 8137 8138 /* 8139 * The following two Consolidation Private routines provide generic 8140 * interfaces to increase/decrease the amount of device-locked memory. 8141 * 8142 * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory() 8143 * must be called every time i_ddi_incr_locked_memory() is called. 8144 */ 8145 int 8146 /* ARGSUSED */ 8147 i_ddi_incr_locked_memory(proc_t *procp, rctl_qty_t inc) 8148 { 8149 ASSERT(procp != NULL); 8150 mutex_enter(&procp->p_lock); 8151 if (rctl_incr_locked_mem(procp, NULL, inc, 1)) { 8152 mutex_exit(&procp->p_lock); 8153 return (ENOMEM); 8154 } 8155 mutex_exit(&procp->p_lock); 8156 return (0); 8157 } 8158 8159 /* 8160 * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory() 8161 * must be called every time i_ddi_decr_locked_memory() is called. 8162 */ 8163 /* ARGSUSED */ 8164 void 8165 i_ddi_decr_locked_memory(proc_t *procp, rctl_qty_t dec) 8166 { 8167 ASSERT(procp != NULL); 8168 mutex_enter(&procp->p_lock); 8169 rctl_decr_locked_mem(procp, NULL, dec, 1); 8170 mutex_exit(&procp->p_lock); 8171 } 8172 8173 /* 8174 * This routine checks if the max-locked-memory resource ctl is 8175 * exceeded, if not increments it, grabs a hold on the project. 8176 * Returns 0 if successful otherwise returns error code 8177 */ 8178 static int 8179 umem_incr_devlockmem(struct ddi_umem_cookie *cookie) 8180 { 8181 proc_t *procp; 8182 int ret; 8183 8184 ASSERT(cookie); 8185 procp = cookie->procp; 8186 ASSERT(procp); 8187 8188 if ((ret = i_ddi_incr_locked_memory(procp, 8189 cookie->size)) != 0) { 8190 return (ret); 8191 } 8192 return (0); 8193 } 8194 8195 /* 8196 * Decrements the max-locked-memory resource ctl and releases 8197 * the hold on the project that was acquired during umem_incr_devlockmem 8198 */ 8199 static void 8200 umem_decr_devlockmem(struct ddi_umem_cookie *cookie) 8201 { 8202 proc_t *proc; 8203 8204 proc = (proc_t *)cookie->procp; 8205 if (!proc) 8206 return; 8207 8208 i_ddi_decr_locked_memory(proc, cookie->size); 8209 } 8210 8211 /* 8212 * A consolidation private function which is essentially equivalent to 8213 * ddi_umem_lock but with the addition of arguments ops_vector and procp. 8214 * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and 8215 * the ops_vector is valid. 8216 * 8217 * Lock the virtual address range in the current process and create a 8218 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to 8219 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export 8220 * to user space. 8221 * 8222 * Note: The resource control accounting currently uses a full charge model 8223 * in other words attempts to lock the same/overlapping areas of memory 8224 * will deduct the full size of the buffer from the projects running 8225 * counter for the device locked memory. 8226 * 8227 * addr, size should be PAGESIZE aligned 8228 * 8229 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both 8230 * identifies whether the locked memory will be read or written or both 8231 * DDI_UMEMLOCK_LONGTERM must be set when the locking will 8232 * be maintained for an indefinitely long period (essentially permanent), 8233 * rather than for what would be required for a typical I/O completion. 8234 * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT 8235 * if the memory pertains to a regular file which is mapped MAP_SHARED. 8236 * This is to prevent a deadlock if a file truncation is attempted after 8237 * after the locking is done. 8238 * 8239 * Returns 0 on success 8240 * EINVAL - for invalid parameters 8241 * EPERM, ENOMEM and other error codes returned by as_pagelock 8242 * ENOMEM - is returned if the current request to lock memory exceeds 8243 * *.max-locked-memory resource control value. 8244 * EFAULT - memory pertains to a regular file mapped shared and 8245 * and DDI_UMEMLOCK_LONGTERM flag is set 8246 * EAGAIN - could not start the ddi_umem_unlock list processing thread 8247 */ 8248 int 8249 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie, 8250 struct umem_callback_ops *ops_vector, 8251 proc_t *procp) 8252 { 8253 int error; 8254 struct ddi_umem_cookie *p; 8255 void (*driver_callback)() = NULL; 8256 struct as *as = procp->p_as; 8257 struct seg *seg; 8258 vnode_t *vp; 8259 8260 *cookie = NULL; /* in case of any error return */ 8261 8262 /* These are the only three valid flags */ 8263 if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE | 8264 DDI_UMEMLOCK_LONGTERM)) != 0) 8265 return (EINVAL); 8266 8267 /* At least one (can be both) of the two access flags must be set */ 8268 if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) 8269 return (EINVAL); 8270 8271 /* addr and len must be page-aligned */ 8272 if (((uintptr_t)addr & PAGEOFFSET) != 0) 8273 return (EINVAL); 8274 8275 if ((len & PAGEOFFSET) != 0) 8276 return (EINVAL); 8277 8278 /* 8279 * For longterm locking a driver callback must be specified; if 8280 * not longterm then a callback is optional. 8281 */ 8282 if (ops_vector != NULL) { 8283 if (ops_vector->cbo_umem_callback_version != 8284 UMEM_CALLBACK_VERSION) 8285 return (EINVAL); 8286 else 8287 driver_callback = ops_vector->cbo_umem_lock_cleanup; 8288 } 8289 if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM)) 8290 return (EINVAL); 8291 8292 /* 8293 * Call i_ddi_umem_unlock_thread_start if necessary. It will 8294 * be called on first ddi_umem_lock or umem_lockmemory call. 8295 */ 8296 if (ddi_umem_unlock_thread == NULL) 8297 i_ddi_umem_unlock_thread_start(); 8298 8299 /* Allocate memory for the cookie */ 8300 p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP); 8301 8302 /* Convert the flags to seg_rw type */ 8303 if (flags & DDI_UMEMLOCK_WRITE) { 8304 p->s_flags = S_WRITE; 8305 } else { 8306 p->s_flags = S_READ; 8307 } 8308 8309 /* Store procp in cookie for later iosetup/unlock */ 8310 p->procp = (void *)procp; 8311 8312 /* 8313 * Store the struct as pointer in cookie for later use by 8314 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock 8315 * is called after relvm is called. 8316 */ 8317 p->asp = as; 8318 8319 /* 8320 * The size field is needed for lockmem accounting. 8321 */ 8322 p->size = len; 8323 8324 if (umem_incr_devlockmem(p) != 0) { 8325 /* 8326 * The requested memory cannot be locked 8327 */ 8328 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8329 *cookie = (ddi_umem_cookie_t)NULL; 8330 return (ENOMEM); 8331 } 8332 8333 /* Lock the pages corresponding to addr, len in memory */ 8334 error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags); 8335 if (error != 0) { 8336 umem_decr_devlockmem(p); 8337 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8338 *cookie = (ddi_umem_cookie_t)NULL; 8339 return (error); 8340 } 8341 8342 /* 8343 * For longterm locking the addr must pertain to a seg_vn segment or 8344 * or a seg_spt segment. 8345 * If the segment pertains to a regular file, it cannot be 8346 * mapped MAP_SHARED. 8347 * This is to prevent a deadlock if a file truncation is attempted 8348 * after the locking is done. 8349 * Doing this after as_pagelock guarantees persistence of the as; if 8350 * an unacceptable segment is found, the cleanup includes calling 8351 * as_pageunlock before returning EFAULT. 8352 */ 8353 if (flags & DDI_UMEMLOCK_LONGTERM) { 8354 extern struct seg_ops segspt_shmops; 8355 AS_LOCK_ENTER(as, &as->a_lock, RW_READER); 8356 for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) { 8357 if (seg == NULL || seg->s_base > addr + len) 8358 break; 8359 if (((seg->s_ops != &segvn_ops) && 8360 (seg->s_ops != &segspt_shmops)) || 8361 ((SEGOP_GETVP(seg, addr, &vp) == 0 && 8362 vp != NULL && vp->v_type == VREG) && 8363 (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) { 8364 as_pageunlock(as, p->pparray, 8365 addr, len, p->s_flags); 8366 AS_LOCK_EXIT(as, &as->a_lock); 8367 umem_decr_devlockmem(p); 8368 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8369 *cookie = (ddi_umem_cookie_t)NULL; 8370 return (EFAULT); 8371 } 8372 } 8373 AS_LOCK_EXIT(as, &as->a_lock); 8374 } 8375 8376 8377 /* Initialize the fields in the ddi_umem_cookie */ 8378 p->cvaddr = addr; 8379 p->type = UMEM_LOCKED; 8380 if (driver_callback != NULL) { 8381 /* i_ddi_umem_unlock and umem_lock_undo may need the cookie */ 8382 p->cook_refcnt = 2; 8383 p->callbacks = *ops_vector; 8384 } else { 8385 /* only i_ddi_umme_unlock needs the cookie */ 8386 p->cook_refcnt = 1; 8387 } 8388 8389 *cookie = (ddi_umem_cookie_t)p; 8390 8391 /* 8392 * If a driver callback was specified, add an entry to the 8393 * as struct callback list. The as_pagelock above guarantees 8394 * the persistence of as. 8395 */ 8396 if (driver_callback) { 8397 error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT, 8398 addr, len, KM_SLEEP); 8399 if (error != 0) { 8400 as_pageunlock(as, p->pparray, 8401 addr, len, p->s_flags); 8402 umem_decr_devlockmem(p); 8403 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8404 *cookie = (ddi_umem_cookie_t)NULL; 8405 } 8406 } 8407 return (error); 8408 } 8409 8410 /* 8411 * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free 8412 * the cookie. Called from i_ddi_umem_unlock_thread. 8413 */ 8414 8415 static void 8416 i_ddi_umem_unlock(struct ddi_umem_cookie *p) 8417 { 8418 uint_t rc; 8419 8420 /* 8421 * There is no way to determine whether a callback to 8422 * umem_lock_undo was registered via as_add_callback. 8423 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and 8424 * a valid callback function structure.) as_delete_callback 8425 * is called to delete a possible registered callback. If the 8426 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it 8427 * indicates that there was a callback registered, and that is was 8428 * successfully deleted. Thus, the cookie reference count 8429 * will never be decremented by umem_lock_undo. Just return the 8430 * memory for the cookie, since both users of the cookie are done. 8431 * A return of AS_CALLBACK_NOTFOUND indicates a callback was 8432 * never registered. A return of AS_CALLBACK_DELETE_DEFERRED 8433 * indicates that callback processing is taking place and, and 8434 * umem_lock_undo is, or will be, executing, and thus decrementing 8435 * the cookie reference count when it is complete. 8436 * 8437 * This needs to be done before as_pageunlock so that the 8438 * persistence of as is guaranteed because of the locked pages. 8439 * 8440 */ 8441 rc = as_delete_callback(p->asp, p); 8442 8443 8444 /* 8445 * The proc->p_as will be stale if i_ddi_umem_unlock is called 8446 * after relvm is called so use p->asp. 8447 */ 8448 as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags); 8449 8450 /* 8451 * Now that we have unlocked the memory decrement the 8452 * *.max-locked-memory rctl 8453 */ 8454 umem_decr_devlockmem(p); 8455 8456 if (rc == AS_CALLBACK_DELETED) { 8457 /* umem_lock_undo will not happen, return the cookie memory */ 8458 ASSERT(p->cook_refcnt == 2); 8459 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8460 } else { 8461 /* 8462 * umem_undo_lock may happen if as_delete_callback returned 8463 * AS_CALLBACK_DELETE_DEFERRED. In that case, decrement the 8464 * reference count, atomically, and return the cookie 8465 * memory if the reference count goes to zero. The only 8466 * other value for rc is AS_CALLBACK_NOTFOUND. In that 8467 * case, just return the cookie memory. 8468 */ 8469 if ((rc != AS_CALLBACK_DELETE_DEFERRED) || 8470 (atomic_add_long_nv((ulong_t *)(&(p->cook_refcnt)), -1) 8471 == 0)) { 8472 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8473 } 8474 } 8475 } 8476 8477 /* 8478 * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler. 8479 * 8480 * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list 8481 * until it is empty. Then, wait for more to be added. This thread is awoken 8482 * via calls to ddi_umem_unlock. 8483 */ 8484 8485 static void 8486 i_ddi_umem_unlock_thread(void) 8487 { 8488 struct ddi_umem_cookie *ret_cookie; 8489 callb_cpr_t cprinfo; 8490 8491 /* process the ddi_umem_unlock list */ 8492 CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex, 8493 callb_generic_cpr, "unlock_thread"); 8494 for (;;) { 8495 mutex_enter(&ddi_umem_unlock_mutex); 8496 if (ddi_umem_unlock_head != NULL) { /* list not empty */ 8497 ret_cookie = ddi_umem_unlock_head; 8498 /* take if off the list */ 8499 if ((ddi_umem_unlock_head = 8500 ddi_umem_unlock_head->unl_forw) == NULL) { 8501 ddi_umem_unlock_tail = NULL; 8502 } 8503 mutex_exit(&ddi_umem_unlock_mutex); 8504 /* unlock the pages in this cookie */ 8505 (void) i_ddi_umem_unlock(ret_cookie); 8506 } else { /* list is empty, wait for next ddi_umem_unlock */ 8507 CALLB_CPR_SAFE_BEGIN(&cprinfo); 8508 cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex); 8509 CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex); 8510 mutex_exit(&ddi_umem_unlock_mutex); 8511 } 8512 } 8513 /* ddi_umem_unlock_thread does not exit */ 8514 /* NOTREACHED */ 8515 } 8516 8517 /* 8518 * Start the thread that will process the ddi_umem_unlock list if it is 8519 * not already started (i_ddi_umem_unlock_thread). 8520 */ 8521 static void 8522 i_ddi_umem_unlock_thread_start(void) 8523 { 8524 mutex_enter(&ddi_umem_unlock_mutex); 8525 if (ddi_umem_unlock_thread == NULL) { 8526 ddi_umem_unlock_thread = thread_create(NULL, 0, 8527 i_ddi_umem_unlock_thread, NULL, 0, &p0, 8528 TS_RUN, minclsyspri); 8529 } 8530 mutex_exit(&ddi_umem_unlock_mutex); 8531 } 8532 8533 /* 8534 * Lock the virtual address range in the current process and create a 8535 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to 8536 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export 8537 * to user space. 8538 * 8539 * Note: The resource control accounting currently uses a full charge model 8540 * in other words attempts to lock the same/overlapping areas of memory 8541 * will deduct the full size of the buffer from the projects running 8542 * counter for the device locked memory. This applies to umem_lockmemory too. 8543 * 8544 * addr, size should be PAGESIZE aligned 8545 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both 8546 * identifies whether the locked memory will be read or written or both 8547 * 8548 * Returns 0 on success 8549 * EINVAL - for invalid parameters 8550 * EPERM, ENOMEM and other error codes returned by as_pagelock 8551 * ENOMEM - is returned if the current request to lock memory exceeds 8552 * *.max-locked-memory resource control value. 8553 * EAGAIN - could not start the ddi_umem_unlock list processing thread 8554 */ 8555 int 8556 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie) 8557 { 8558 int error; 8559 struct ddi_umem_cookie *p; 8560 8561 *cookie = NULL; /* in case of any error return */ 8562 8563 /* These are the only two valid flags */ 8564 if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) { 8565 return (EINVAL); 8566 } 8567 8568 /* At least one of the two flags (or both) must be set */ 8569 if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) { 8570 return (EINVAL); 8571 } 8572 8573 /* addr and len must be page-aligned */ 8574 if (((uintptr_t)addr & PAGEOFFSET) != 0) { 8575 return (EINVAL); 8576 } 8577 8578 if ((len & PAGEOFFSET) != 0) { 8579 return (EINVAL); 8580 } 8581 8582 /* 8583 * Call i_ddi_umem_unlock_thread_start if necessary. It will 8584 * be called on first ddi_umem_lock or umem_lockmemory call. 8585 */ 8586 if (ddi_umem_unlock_thread == NULL) 8587 i_ddi_umem_unlock_thread_start(); 8588 8589 /* Allocate memory for the cookie */ 8590 p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP); 8591 8592 /* Convert the flags to seg_rw type */ 8593 if (flags & DDI_UMEMLOCK_WRITE) { 8594 p->s_flags = S_WRITE; 8595 } else { 8596 p->s_flags = S_READ; 8597 } 8598 8599 /* Store curproc in cookie for later iosetup/unlock */ 8600 p->procp = (void *)curproc; 8601 8602 /* 8603 * Store the struct as pointer in cookie for later use by 8604 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock 8605 * is called after relvm is called. 8606 */ 8607 p->asp = curproc->p_as; 8608 /* 8609 * The size field is needed for lockmem accounting. 8610 */ 8611 p->size = len; 8612 8613 if (umem_incr_devlockmem(p) != 0) { 8614 /* 8615 * The requested memory cannot be locked 8616 */ 8617 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8618 *cookie = (ddi_umem_cookie_t)NULL; 8619 return (ENOMEM); 8620 } 8621 8622 /* Lock the pages corresponding to addr, len in memory */ 8623 error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray), 8624 addr, len, p->s_flags); 8625 if (error != 0) { 8626 umem_decr_devlockmem(p); 8627 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8628 *cookie = (ddi_umem_cookie_t)NULL; 8629 return (error); 8630 } 8631 8632 /* Initialize the fields in the ddi_umem_cookie */ 8633 p->cvaddr = addr; 8634 p->type = UMEM_LOCKED; 8635 p->cook_refcnt = 1; 8636 8637 *cookie = (ddi_umem_cookie_t)p; 8638 return (error); 8639 } 8640 8641 /* 8642 * Add the cookie to the ddi_umem_unlock list. Pages will be 8643 * unlocked by i_ddi_umem_unlock_thread. 8644 */ 8645 8646 void 8647 ddi_umem_unlock(ddi_umem_cookie_t cookie) 8648 { 8649 struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie; 8650 8651 ASSERT(p->type == UMEM_LOCKED); 8652 ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */ 8653 ASSERT(ddi_umem_unlock_thread != NULL); 8654 8655 p->unl_forw = (struct ddi_umem_cookie *)NULL; /* end of list */ 8656 /* 8657 * Queue the unlock request and notify i_ddi_umem_unlock thread 8658 * if it's called in the interrupt context. Otherwise, unlock pages 8659 * immediately. 8660 */ 8661 if (servicing_interrupt()) { 8662 /* queue the unlock request and notify the thread */ 8663 mutex_enter(&ddi_umem_unlock_mutex); 8664 if (ddi_umem_unlock_head == NULL) { 8665 ddi_umem_unlock_head = ddi_umem_unlock_tail = p; 8666 cv_broadcast(&ddi_umem_unlock_cv); 8667 } else { 8668 ddi_umem_unlock_tail->unl_forw = p; 8669 ddi_umem_unlock_tail = p; 8670 } 8671 mutex_exit(&ddi_umem_unlock_mutex); 8672 } else { 8673 /* unlock the pages right away */ 8674 (void) i_ddi_umem_unlock(p); 8675 } 8676 } 8677 8678 /* 8679 * Create a buf structure from a ddi_umem_cookie 8680 * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc 8681 * (only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported) 8682 * off, len - identifies the portion of the memory represented by the cookie 8683 * that the buf points to. 8684 * NOTE: off, len need to follow the alignment/size restrictions of the 8685 * device (dev) that this buf will be passed to. Some devices 8686 * will accept unrestricted alignment/size, whereas others (such as 8687 * st) require some block-size alignment/size. It is the caller's 8688 * responsibility to ensure that the alignment/size restrictions 8689 * are met (we cannot assert as we do not know the restrictions) 8690 * 8691 * direction - is one of B_READ or B_WRITE and needs to be compatible with 8692 * the flags used in ddi_umem_lock 8693 * 8694 * The following three arguments are used to initialize fields in the 8695 * buf structure and are uninterpreted by this routine. 8696 * 8697 * dev 8698 * blkno 8699 * iodone 8700 * 8701 * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP 8702 * 8703 * Returns a buf structure pointer on success (to be freed by freerbuf) 8704 * NULL on any parameter error or memory alloc failure 8705 * 8706 */ 8707 struct buf * 8708 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len, 8709 int direction, dev_t dev, daddr_t blkno, 8710 int (*iodone)(struct buf *), int sleepflag) 8711 { 8712 struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie; 8713 struct buf *bp; 8714 8715 /* 8716 * check for valid cookie offset, len 8717 */ 8718 if ((off + len) > p->size) { 8719 return (NULL); 8720 } 8721 8722 if (len > p->size) { 8723 return (NULL); 8724 } 8725 8726 /* direction has to be one of B_READ or B_WRITE */ 8727 if ((direction != B_READ) && (direction != B_WRITE)) { 8728 return (NULL); 8729 } 8730 8731 /* These are the only two valid sleepflags */ 8732 if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) { 8733 return (NULL); 8734 } 8735 8736 /* 8737 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported 8738 */ 8739 if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) { 8740 return (NULL); 8741 } 8742 8743 /* If type is KMEM_NON_PAGEABLE procp is NULL */ 8744 ASSERT((p->type == KMEM_NON_PAGEABLE) ? 8745 (p->procp == NULL) : (p->procp != NULL)); 8746 8747 bp = kmem_alloc(sizeof (struct buf), sleepflag); 8748 if (bp == NULL) { 8749 return (NULL); 8750 } 8751 bioinit(bp); 8752 8753 bp->b_flags = B_BUSY | B_PHYS | direction; 8754 bp->b_edev = dev; 8755 bp->b_lblkno = blkno; 8756 bp->b_iodone = iodone; 8757 bp->b_bcount = len; 8758 bp->b_proc = (proc_t *)p->procp; 8759 ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0); 8760 bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off); 8761 if (p->pparray != NULL) { 8762 bp->b_flags |= B_SHADOW; 8763 ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0); 8764 bp->b_shadow = p->pparray + btop(off); 8765 } 8766 return (bp); 8767 } 8768 8769 /* 8770 * Fault-handling and related routines 8771 */ 8772 8773 ddi_devstate_t 8774 ddi_get_devstate(dev_info_t *dip) 8775 { 8776 if (DEVI_IS_DEVICE_OFFLINE(dip)) 8777 return (DDI_DEVSTATE_OFFLINE); 8778 else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip)) 8779 return (DDI_DEVSTATE_DOWN); 8780 else if (DEVI_IS_BUS_QUIESCED(dip)) 8781 return (DDI_DEVSTATE_QUIESCED); 8782 else if (DEVI_IS_DEVICE_DEGRADED(dip)) 8783 return (DDI_DEVSTATE_DEGRADED); 8784 else 8785 return (DDI_DEVSTATE_UP); 8786 } 8787 8788 void 8789 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact, 8790 ddi_fault_location_t location, const char *message) 8791 { 8792 struct ddi_fault_event_data fd; 8793 ddi_eventcookie_t ec; 8794 8795 /* 8796 * Assemble all the information into a fault-event-data structure 8797 */ 8798 fd.f_dip = dip; 8799 fd.f_impact = impact; 8800 fd.f_location = location; 8801 fd.f_message = message; 8802 fd.f_oldstate = ddi_get_devstate(dip); 8803 8804 /* 8805 * Get eventcookie from defining parent. 8806 */ 8807 if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) != 8808 DDI_SUCCESS) 8809 return; 8810 8811 (void) ndi_post_event(dip, dip, ec, &fd); 8812 } 8813 8814 char * 8815 i_ddi_devi_class(dev_info_t *dip) 8816 { 8817 return (DEVI(dip)->devi_device_class); 8818 } 8819 8820 int 8821 i_ddi_set_devi_class(dev_info_t *dip, char *devi_class, int flag) 8822 { 8823 struct dev_info *devi = DEVI(dip); 8824 8825 mutex_enter(&devi->devi_lock); 8826 8827 if (devi->devi_device_class) 8828 kmem_free(devi->devi_device_class, 8829 strlen(devi->devi_device_class) + 1); 8830 8831 if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag)) 8832 != NULL) { 8833 mutex_exit(&devi->devi_lock); 8834 return (DDI_SUCCESS); 8835 } 8836 8837 mutex_exit(&devi->devi_lock); 8838 8839 return (DDI_FAILURE); 8840 } 8841 8842 8843 /* 8844 * Task Queues DDI interfaces. 8845 */ 8846 8847 /* ARGSUSED */ 8848 ddi_taskq_t * 8849 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads, 8850 pri_t pri, uint_t cflags) 8851 { 8852 char full_name[TASKQ_NAMELEN]; 8853 const char *tq_name; 8854 int nodeid = 0; 8855 8856 if (dip == NULL) 8857 tq_name = name; 8858 else { 8859 nodeid = ddi_get_instance(dip); 8860 8861 if (name == NULL) 8862 name = "tq"; 8863 8864 (void) snprintf(full_name, sizeof (full_name), "%s_%s", 8865 ddi_driver_name(dip), name); 8866 8867 tq_name = full_name; 8868 } 8869 8870 return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads, 8871 pri == TASKQ_DEFAULTPRI ? minclsyspri : pri, 8872 nthreads, INT_MAX, TASKQ_PREPOPULATE)); 8873 } 8874 8875 void 8876 ddi_taskq_destroy(ddi_taskq_t *tq) 8877 { 8878 taskq_destroy((taskq_t *)tq); 8879 } 8880 8881 int 8882 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *), 8883 void *arg, uint_t dflags) 8884 { 8885 taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg, 8886 dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP); 8887 8888 return (id != 0 ? DDI_SUCCESS : DDI_FAILURE); 8889 } 8890 8891 void 8892 ddi_taskq_wait(ddi_taskq_t *tq) 8893 { 8894 taskq_wait((taskq_t *)tq); 8895 } 8896 8897 void 8898 ddi_taskq_suspend(ddi_taskq_t *tq) 8899 { 8900 taskq_suspend((taskq_t *)tq); 8901 } 8902 8903 boolean_t 8904 ddi_taskq_suspended(ddi_taskq_t *tq) 8905 { 8906 return (taskq_suspended((taskq_t *)tq)); 8907 } 8908 8909 void 8910 ddi_taskq_resume(ddi_taskq_t *tq) 8911 { 8912 taskq_resume((taskq_t *)tq); 8913 } 8914 8915 int 8916 ddi_parse( 8917 const char *ifname, 8918 char *alnum, 8919 uint_t *nump) 8920 { 8921 const char *p; 8922 int l; 8923 ulong_t num; 8924 boolean_t nonum = B_TRUE; 8925 char c; 8926 8927 l = strlen(ifname); 8928 for (p = ifname + l; p != ifname; l--) { 8929 c = *--p; 8930 if (!isdigit(c)) { 8931 (void) strlcpy(alnum, ifname, l + 1); 8932 if (ddi_strtoul(p + 1, NULL, 10, &num) != 0) 8933 return (DDI_FAILURE); 8934 break; 8935 } 8936 nonum = B_FALSE; 8937 } 8938 if (l == 0 || nonum) 8939 return (DDI_FAILURE); 8940 8941 *nump = num; 8942 return (DDI_SUCCESS); 8943 } 8944 8945 /* 8946 * Default initialization function for drivers that don't need to quiesce. 8947 */ 8948 /* ARGSUSED */ 8949 int 8950 ddi_quiesce_not_needed(dev_info_t *dip) 8951 { 8952 return (DDI_SUCCESS); 8953 } 8954 8955 /* 8956 * Initialization function for drivers that should implement quiesce() 8957 * but haven't yet. 8958 */ 8959 /* ARGSUSED */ 8960 int 8961 ddi_quiesce_not_supported(dev_info_t *dip) 8962 { 8963 return (DDI_FAILURE); 8964 } 8965 8966 /* 8967 * Generic DDI callback interfaces. 8968 */ 8969 8970 int 8971 ddi_cb_register(dev_info_t *dip, ddi_cb_flags_t flags, ddi_cb_func_t cbfunc, 8972 void *arg1, void *arg2, ddi_cb_handle_t *ret_hdlp) 8973 { 8974 ddi_cb_t *cbp; 8975 8976 ASSERT(dip != NULL); 8977 ASSERT(DDI_CB_FLAG_VALID(flags)); 8978 ASSERT(cbfunc != NULL); 8979 ASSERT(ret_hdlp != NULL); 8980 8981 /* Sanity check the context */ 8982 ASSERT(!servicing_interrupt()); 8983 if (servicing_interrupt()) 8984 return (DDI_FAILURE); 8985 8986 /* Validate parameters */ 8987 if ((dip == NULL) || !DDI_CB_FLAG_VALID(flags) || 8988 (cbfunc == NULL) || (ret_hdlp == NULL)) 8989 return (DDI_EINVAL); 8990 8991 /* Check for previous registration */ 8992 if (DEVI(dip)->devi_cb_p != NULL) 8993 return (DDI_EALREADY); 8994 8995 /* Allocate and initialize callback */ 8996 cbp = kmem_zalloc(sizeof (ddi_cb_t), KM_SLEEP); 8997 cbp->cb_dip = dip; 8998 cbp->cb_func = cbfunc; 8999 cbp->cb_arg1 = arg1; 9000 cbp->cb_arg2 = arg2; 9001 cbp->cb_flags = flags; 9002 DEVI(dip)->devi_cb_p = cbp; 9003 9004 /* If adding an IRM callback, notify IRM */ 9005 if (flags & DDI_CB_FLAG_INTR) 9006 i_ddi_irm_set_cb(dip, B_TRUE); 9007 9008 *ret_hdlp = (ddi_cb_handle_t)&(DEVI(dip)->devi_cb_p); 9009 return (DDI_SUCCESS); 9010 } 9011 9012 int 9013 ddi_cb_unregister(ddi_cb_handle_t hdl) 9014 { 9015 ddi_cb_t *cbp; 9016 dev_info_t *dip; 9017 9018 ASSERT(hdl != NULL); 9019 9020 /* Sanity check the context */ 9021 ASSERT(!servicing_interrupt()); 9022 if (servicing_interrupt()) 9023 return (DDI_FAILURE); 9024 9025 /* Validate parameters */ 9026 if ((hdl == NULL) || ((cbp = *(ddi_cb_t **)hdl) == NULL) || 9027 ((dip = cbp->cb_dip) == NULL)) 9028 return (DDI_EINVAL); 9029 9030 /* If removing an IRM callback, notify IRM */ 9031 if (cbp->cb_flags & DDI_CB_FLAG_INTR) 9032 i_ddi_irm_set_cb(dip, B_FALSE); 9033 9034 /* Destroy the callback */ 9035 kmem_free(cbp, sizeof (ddi_cb_t)); 9036 DEVI(dip)->devi_cb_p = NULL; 9037 9038 return (DDI_SUCCESS); 9039 } 9040