1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/note.h> 30 #include <sys/types.h> 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/buf.h> 34 #include <sys/uio.h> 35 #include <sys/cred.h> 36 #include <sys/poll.h> 37 #include <sys/mman.h> 38 #include <sys/kmem.h> 39 #include <sys/model.h> 40 #include <sys/file.h> 41 #include <sys/proc.h> 42 #include <sys/open.h> 43 #include <sys/user.h> 44 #include <sys/t_lock.h> 45 #include <sys/vm.h> 46 #include <sys/stat.h> 47 #include <vm/hat.h> 48 #include <vm/seg.h> 49 #include <vm/seg_vn.h> 50 #include <vm/seg_dev.h> 51 #include <vm/as.h> 52 #include <sys/cmn_err.h> 53 #include <sys/cpuvar.h> 54 #include <sys/debug.h> 55 #include <sys/autoconf.h> 56 #include <sys/sunddi.h> 57 #include <sys/esunddi.h> 58 #include <sys/sunndi.h> 59 #include <sys/kstat.h> 60 #include <sys/conf.h> 61 #include <sys/ddi_impldefs.h> /* include implementation structure defs */ 62 #include <sys/ndi_impldefs.h> /* include prototypes */ 63 #include <sys/ddi_timer.h> 64 #include <sys/hwconf.h> 65 #include <sys/pathname.h> 66 #include <sys/modctl.h> 67 #include <sys/epm.h> 68 #include <sys/devctl.h> 69 #include <sys/callb.h> 70 #include <sys/cladm.h> 71 #include <sys/sysevent.h> 72 #include <sys/dacf_impl.h> 73 #include <sys/ddidevmap.h> 74 #include <sys/bootconf.h> 75 #include <sys/disp.h> 76 #include <sys/atomic.h> 77 #include <sys/promif.h> 78 #include <sys/instance.h> 79 #include <sys/sysevent/eventdefs.h> 80 #include <sys/task.h> 81 #include <sys/project.h> 82 #include <sys/taskq.h> 83 #include <sys/devpolicy.h> 84 #include <sys/ctype.h> 85 #include <net/if.h> 86 #include <sys/rctl.h> 87 88 extern pri_t minclsyspri; 89 90 extern rctl_hndl_t rc_project_locked_mem; 91 extern rctl_hndl_t rc_zone_locked_mem; 92 93 #ifdef DEBUG 94 static int sunddi_debug = 0; 95 #endif /* DEBUG */ 96 97 /* ddi_umem_unlock miscellaneous */ 98 99 static void i_ddi_umem_unlock_thread_start(void); 100 101 static kmutex_t ddi_umem_unlock_mutex; /* unlock list mutex */ 102 static kcondvar_t ddi_umem_unlock_cv; /* unlock list block/unblock */ 103 static kthread_t *ddi_umem_unlock_thread; 104 /* 105 * The ddi_umem_unlock FIFO list. NULL head pointer indicates empty list. 106 */ 107 static struct ddi_umem_cookie *ddi_umem_unlock_head = NULL; 108 static struct ddi_umem_cookie *ddi_umem_unlock_tail = NULL; 109 110 111 /* 112 * DDI(Sun) Function and flag definitions: 113 */ 114 115 #if defined(__x86) 116 /* 117 * Used to indicate which entries were chosen from a range. 118 */ 119 char *chosen_reg = "chosen-reg"; 120 #endif 121 122 /* 123 * Function used to ring system console bell 124 */ 125 void (*ddi_console_bell_func)(clock_t duration); 126 127 /* 128 * Creating register mappings and handling interrupts: 129 */ 130 131 /* 132 * Generic ddi_map: Call parent to fulfill request... 133 */ 134 135 int 136 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset, 137 off_t len, caddr_t *addrp) 138 { 139 dev_info_t *pdip; 140 141 ASSERT(dp); 142 pdip = (dev_info_t *)DEVI(dp)->devi_parent; 143 return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip, 144 dp, mp, offset, len, addrp)); 145 } 146 147 /* 148 * ddi_apply_range: (Called by nexi only.) 149 * Apply ranges in parent node dp, to child regspec rp... 150 */ 151 152 int 153 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp) 154 { 155 return (i_ddi_apply_range(dp, rdip, rp)); 156 } 157 158 int 159 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset, 160 off_t len) 161 { 162 ddi_map_req_t mr; 163 #if defined(__x86) 164 struct { 165 int bus; 166 int addr; 167 int size; 168 } reg, *reglist; 169 uint_t length; 170 int rc; 171 172 /* 173 * get the 'registers' or the 'reg' property. 174 * We look up the reg property as an array of 175 * int's. 176 */ 177 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 178 DDI_PROP_DONTPASS, "registers", (int **)®list, &length); 179 if (rc != DDI_PROP_SUCCESS) 180 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 181 DDI_PROP_DONTPASS, "reg", (int **)®list, &length); 182 if (rc == DDI_PROP_SUCCESS) { 183 /* 184 * point to the required entry. 185 */ 186 reg = reglist[rnumber]; 187 reg.addr += offset; 188 if (len != 0) 189 reg.size = len; 190 /* 191 * make a new property containing ONLY the required tuple. 192 */ 193 if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip, 194 chosen_reg, (int *)®, (sizeof (reg)/sizeof (int))) 195 != DDI_PROP_SUCCESS) { 196 cmn_err(CE_WARN, "%s%d: cannot create '%s' " 197 "property", DEVI(dip)->devi_name, 198 DEVI(dip)->devi_instance, chosen_reg); 199 } 200 /* 201 * free the memory allocated by 202 * ddi_prop_lookup_int_array (). 203 */ 204 ddi_prop_free((void *)reglist); 205 } 206 #endif 207 mr.map_op = DDI_MO_MAP_LOCKED; 208 mr.map_type = DDI_MT_RNUMBER; 209 mr.map_obj.rnumber = rnumber; 210 mr.map_prot = PROT_READ | PROT_WRITE; 211 mr.map_flags = DDI_MF_KERNEL_MAPPING; 212 mr.map_handlep = NULL; 213 mr.map_vers = DDI_MAP_VERSION; 214 215 /* 216 * Call my parent to map in my regs. 217 */ 218 219 return (ddi_map(dip, &mr, offset, len, kaddrp)); 220 } 221 222 void 223 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset, 224 off_t len) 225 { 226 ddi_map_req_t mr; 227 228 mr.map_op = DDI_MO_UNMAP; 229 mr.map_type = DDI_MT_RNUMBER; 230 mr.map_flags = DDI_MF_KERNEL_MAPPING; 231 mr.map_prot = PROT_READ | PROT_WRITE; /* who cares? */ 232 mr.map_obj.rnumber = rnumber; 233 mr.map_handlep = NULL; 234 mr.map_vers = DDI_MAP_VERSION; 235 236 /* 237 * Call my parent to unmap my regs. 238 */ 239 240 (void) ddi_map(dip, &mr, offset, len, kaddrp); 241 *kaddrp = (caddr_t)0; 242 #if defined(__x86) 243 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg); 244 #endif 245 } 246 247 int 248 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 249 off_t offset, off_t len, caddr_t *vaddrp) 250 { 251 return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp)); 252 } 253 254 /* 255 * nullbusmap: The/DDI default bus_map entry point for nexi 256 * not conforming to the reg/range paradigm (i.e. scsi, etc.) 257 * with no HAT/MMU layer to be programmed at this level. 258 * 259 * If the call is to map by rnumber, return an error, 260 * otherwise pass anything else up the tree to my parent. 261 */ 262 int 263 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 264 off_t offset, off_t len, caddr_t *vaddrp) 265 { 266 _NOTE(ARGUNUSED(rdip)) 267 if (mp->map_type == DDI_MT_RNUMBER) 268 return (DDI_ME_UNSUPPORTED); 269 270 return (ddi_map(dip, mp, offset, len, vaddrp)); 271 } 272 273 /* 274 * ddi_rnumber_to_regspec: Not for use by leaf drivers. 275 * Only for use by nexi using the reg/range paradigm. 276 */ 277 struct regspec * 278 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber) 279 { 280 return (i_ddi_rnumber_to_regspec(dip, rnumber)); 281 } 282 283 284 /* 285 * Note that we allow the dip to be nil because we may be called 286 * prior even to the instantiation of the devinfo tree itself - all 287 * regular leaf and nexus drivers should always use a non-nil dip! 288 * 289 * We treat peek in a somewhat cavalier fashion .. assuming that we'll 290 * simply get a synchronous fault as soon as we touch a missing address. 291 * 292 * Poke is rather more carefully handled because we might poke to a write 293 * buffer, "succeed", then only find some time later that we got an 294 * asynchronous fault that indicated that the address we were writing to 295 * was not really backed by hardware. 296 */ 297 298 static int 299 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size, 300 void *addr, void *value_p) 301 { 302 union { 303 uint64_t u64; 304 uint32_t u32; 305 uint16_t u16; 306 uint8_t u8; 307 } peekpoke_value; 308 309 peekpoke_ctlops_t peekpoke_args; 310 uint64_t dummy_result; 311 int rval; 312 313 /* Note: size is assumed to be correct; it is not checked. */ 314 peekpoke_args.size = size; 315 peekpoke_args.dev_addr = (uintptr_t)addr; 316 peekpoke_args.handle = NULL; 317 peekpoke_args.repcount = 1; 318 peekpoke_args.flags = 0; 319 320 if (cmd == DDI_CTLOPS_POKE) { 321 switch (size) { 322 case sizeof (uint8_t): 323 peekpoke_value.u8 = *(uint8_t *)value_p; 324 break; 325 case sizeof (uint16_t): 326 peekpoke_value.u16 = *(uint16_t *)value_p; 327 break; 328 case sizeof (uint32_t): 329 peekpoke_value.u32 = *(uint32_t *)value_p; 330 break; 331 case sizeof (uint64_t): 332 peekpoke_value.u64 = *(uint64_t *)value_p; 333 break; 334 } 335 } 336 337 peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64; 338 339 if (devi != NULL) 340 rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args, 341 &dummy_result); 342 else 343 rval = peekpoke_mem(cmd, &peekpoke_args); 344 345 /* 346 * A NULL value_p is permitted by ddi_peek(9F); discard the result. 347 */ 348 if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) { 349 switch (size) { 350 case sizeof (uint8_t): 351 *(uint8_t *)value_p = peekpoke_value.u8; 352 break; 353 case sizeof (uint16_t): 354 *(uint16_t *)value_p = peekpoke_value.u16; 355 break; 356 case sizeof (uint32_t): 357 *(uint32_t *)value_p = peekpoke_value.u32; 358 break; 359 case sizeof (uint64_t): 360 *(uint64_t *)value_p = peekpoke_value.u64; 361 break; 362 } 363 } 364 365 return (rval); 366 } 367 368 /* 369 * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this. 370 * they shouldn't be, but the 9f manpage kind of pseudo exposes it. 371 */ 372 int 373 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p) 374 { 375 switch (size) { 376 case sizeof (uint8_t): 377 case sizeof (uint16_t): 378 case sizeof (uint32_t): 379 case sizeof (uint64_t): 380 break; 381 default: 382 return (DDI_FAILURE); 383 } 384 385 return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p)); 386 } 387 388 int 389 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p) 390 { 391 switch (size) { 392 case sizeof (uint8_t): 393 case sizeof (uint16_t): 394 case sizeof (uint32_t): 395 case sizeof (uint64_t): 396 break; 397 default: 398 return (DDI_FAILURE); 399 } 400 401 return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p)); 402 } 403 404 int 405 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p) 406 { 407 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 408 val_p)); 409 } 410 411 int 412 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p) 413 { 414 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 415 val_p)); 416 } 417 418 int 419 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p) 420 { 421 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 422 val_p)); 423 } 424 425 int 426 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p) 427 { 428 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 429 val_p)); 430 } 431 432 433 /* 434 * We need to separate the old interfaces from the new ones and leave them 435 * in here for a while. Previous versions of the OS defined the new interfaces 436 * to the old interfaces. This way we can fix things up so that we can 437 * eventually remove these interfaces. 438 * e.g. A 3rd party module/driver using ddi_peek8 and built against S10 439 * or earlier will actually have a reference to ddi_peekc in the binary. 440 */ 441 #ifdef _ILP32 442 int 443 ddi_peekc(dev_info_t *dip, int8_t *addr, int8_t *val_p) 444 { 445 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 446 val_p)); 447 } 448 449 int 450 ddi_peeks(dev_info_t *dip, int16_t *addr, int16_t *val_p) 451 { 452 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 453 val_p)); 454 } 455 456 int 457 ddi_peekl(dev_info_t *dip, int32_t *addr, int32_t *val_p) 458 { 459 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 460 val_p)); 461 } 462 463 int 464 ddi_peekd(dev_info_t *dip, int64_t *addr, int64_t *val_p) 465 { 466 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 467 val_p)); 468 } 469 #endif /* _ILP32 */ 470 471 int 472 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val) 473 { 474 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 475 } 476 477 int 478 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val) 479 { 480 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 481 } 482 483 int 484 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val) 485 { 486 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 487 } 488 489 int 490 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val) 491 { 492 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 493 } 494 495 /* 496 * We need to separate the old interfaces from the new ones and leave them 497 * in here for a while. Previous versions of the OS defined the new interfaces 498 * to the old interfaces. This way we can fix things up so that we can 499 * eventually remove these interfaces. 500 * e.g. A 3rd party module/driver using ddi_poke8 and built against S10 501 * or earlier will actually have a reference to ddi_pokec in the binary. 502 */ 503 #ifdef _ILP32 504 int 505 ddi_pokec(dev_info_t *dip, int8_t *addr, int8_t val) 506 { 507 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 508 } 509 510 int 511 ddi_pokes(dev_info_t *dip, int16_t *addr, int16_t val) 512 { 513 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 514 } 515 516 int 517 ddi_pokel(dev_info_t *dip, int32_t *addr, int32_t val) 518 { 519 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 520 } 521 522 int 523 ddi_poked(dev_info_t *dip, int64_t *addr, int64_t val) 524 { 525 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 526 } 527 #endif /* _ILP32 */ 528 529 /* 530 * ddi_peekpokeio() is used primarily by the mem drivers for moving 531 * data to and from uio structures via peek and poke. Note that we 532 * use "internal" routines ddi_peek and ddi_poke to make this go 533 * slightly faster, avoiding the call overhead .. 534 */ 535 int 536 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw, 537 caddr_t addr, size_t len, uint_t xfersize) 538 { 539 int64_t ibuffer; 540 int8_t w8; 541 size_t sz; 542 int o; 543 544 if (xfersize > sizeof (long)) 545 xfersize = sizeof (long); 546 547 while (len != 0) { 548 if ((len | (uintptr_t)addr) & 1) { 549 sz = sizeof (int8_t); 550 if (rw == UIO_WRITE) { 551 if ((o = uwritec(uio)) == -1) 552 return (DDI_FAILURE); 553 if (ddi_poke8(devi, (int8_t *)addr, 554 (int8_t)o) != DDI_SUCCESS) 555 return (DDI_FAILURE); 556 } else { 557 if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz, 558 (int8_t *)addr, &w8) != DDI_SUCCESS) 559 return (DDI_FAILURE); 560 if (ureadc(w8, uio)) 561 return (DDI_FAILURE); 562 } 563 } else { 564 switch (xfersize) { 565 case sizeof (int64_t): 566 if (((len | (uintptr_t)addr) & 567 (sizeof (int64_t) - 1)) == 0) { 568 sz = xfersize; 569 break; 570 } 571 /*FALLTHROUGH*/ 572 case sizeof (int32_t): 573 if (((len | (uintptr_t)addr) & 574 (sizeof (int32_t) - 1)) == 0) { 575 sz = xfersize; 576 break; 577 } 578 /*FALLTHROUGH*/ 579 default: 580 /* 581 * This still assumes that we might have an 582 * I/O bus out there that permits 16-bit 583 * transfers (and that it would be upset by 584 * 32-bit transfers from such locations). 585 */ 586 sz = sizeof (int16_t); 587 break; 588 } 589 590 if (rw == UIO_READ) { 591 if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz, 592 addr, &ibuffer) != DDI_SUCCESS) 593 return (DDI_FAILURE); 594 } 595 596 if (uiomove(&ibuffer, sz, rw, uio)) 597 return (DDI_FAILURE); 598 599 if (rw == UIO_WRITE) { 600 if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz, 601 addr, &ibuffer) != DDI_SUCCESS) 602 return (DDI_FAILURE); 603 } 604 } 605 addr += sz; 606 len -= sz; 607 } 608 return (DDI_SUCCESS); 609 } 610 611 /* 612 * These routines are used by drivers that do layered ioctls 613 * On sparc, they're implemented in assembler to avoid spilling 614 * register windows in the common (copyin) case .. 615 */ 616 #if !defined(__sparc) 617 int 618 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags) 619 { 620 if (flags & FKIOCTL) 621 return (kcopy(buf, kernbuf, size) ? -1 : 0); 622 return (copyin(buf, kernbuf, size)); 623 } 624 625 int 626 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags) 627 { 628 if (flags & FKIOCTL) 629 return (kcopy(buf, kernbuf, size) ? -1 : 0); 630 return (copyout(buf, kernbuf, size)); 631 } 632 #endif /* !__sparc */ 633 634 /* 635 * Conversions in nexus pagesize units. We don't duplicate the 636 * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI 637 * routines anyway. 638 */ 639 unsigned long 640 ddi_btop(dev_info_t *dip, unsigned long bytes) 641 { 642 unsigned long pages; 643 644 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages); 645 return (pages); 646 } 647 648 unsigned long 649 ddi_btopr(dev_info_t *dip, unsigned long bytes) 650 { 651 unsigned long pages; 652 653 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages); 654 return (pages); 655 } 656 657 unsigned long 658 ddi_ptob(dev_info_t *dip, unsigned long pages) 659 { 660 unsigned long bytes; 661 662 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes); 663 return (bytes); 664 } 665 666 unsigned int 667 ddi_enter_critical(void) 668 { 669 return ((uint_t)spl7()); 670 } 671 672 void 673 ddi_exit_critical(unsigned int spl) 674 { 675 splx((int)spl); 676 } 677 678 /* 679 * Nexus ctlops punter 680 */ 681 682 #if !defined(__sparc) 683 /* 684 * Request bus_ctl parent to handle a bus_ctl request 685 * 686 * (The sparc version is in sparc_ddi.s) 687 */ 688 int 689 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v) 690 { 691 int (*fp)(); 692 693 if (!d || !r) 694 return (DDI_FAILURE); 695 696 if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL) 697 return (DDI_FAILURE); 698 699 fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl; 700 return ((*fp)(d, r, op, a, v)); 701 } 702 703 #endif 704 705 /* 706 * DMA/DVMA setup 707 */ 708 709 #if defined(__sparc) 710 static ddi_dma_lim_t standard_limits = { 711 (uint_t)0, /* addr_t dlim_addr_lo */ 712 (uint_t)-1, /* addr_t dlim_addr_hi */ 713 (uint_t)-1, /* uint_t dlim_cntr_max */ 714 (uint_t)1, /* uint_t dlim_burstsizes */ 715 (uint_t)1, /* uint_t dlim_minxfer */ 716 0 /* uint_t dlim_dmaspeed */ 717 }; 718 #elif defined(__x86) 719 static ddi_dma_lim_t standard_limits = { 720 (uint_t)0, /* addr_t dlim_addr_lo */ 721 (uint_t)0xffffff, /* addr_t dlim_addr_hi */ 722 (uint_t)0, /* uint_t dlim_cntr_max */ 723 (uint_t)0x00000001, /* uint_t dlim_burstsizes */ 724 (uint_t)DMA_UNIT_8, /* uint_t dlim_minxfer */ 725 (uint_t)0, /* uint_t dlim_dmaspeed */ 726 (uint_t)0x86<<24+0, /* uint_t dlim_version */ 727 (uint_t)0xffff, /* uint_t dlim_adreg_max */ 728 (uint_t)0xffff, /* uint_t dlim_ctreg_max */ 729 (uint_t)512, /* uint_t dlim_granular */ 730 (int)1, /* int dlim_sgllen */ 731 (uint_t)0xffffffff /* uint_t dlim_reqsizes */ 732 }; 733 734 #endif 735 736 int 737 ddi_dma_setup(dev_info_t *dip, struct ddi_dma_req *dmareqp, 738 ddi_dma_handle_t *handlep) 739 { 740 int (*funcp)() = ddi_dma_map; 741 struct bus_ops *bop; 742 #if defined(__sparc) 743 auto ddi_dma_lim_t dma_lim; 744 745 if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0) { 746 dma_lim = standard_limits; 747 } else { 748 dma_lim = *dmareqp->dmar_limits; 749 } 750 dmareqp->dmar_limits = &dma_lim; 751 #endif 752 #if defined(__x86) 753 if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0) 754 return (DDI_FAILURE); 755 #endif 756 757 /* 758 * Handle the case that the requester is both a leaf 759 * and a nexus driver simultaneously by calling the 760 * requester's bus_dma_map function directly instead 761 * of ddi_dma_map. 762 */ 763 bop = DEVI(dip)->devi_ops->devo_bus_ops; 764 if (bop && bop->bus_dma_map) 765 funcp = bop->bus_dma_map; 766 return ((*funcp)(dip, dip, dmareqp, handlep)); 767 } 768 769 int 770 ddi_dma_addr_setup(dev_info_t *dip, struct as *as, caddr_t addr, size_t len, 771 uint_t flags, int (*waitfp)(), caddr_t arg, 772 ddi_dma_lim_t *limits, ddi_dma_handle_t *handlep) 773 { 774 int (*funcp)() = ddi_dma_map; 775 ddi_dma_lim_t dma_lim; 776 struct ddi_dma_req dmareq; 777 struct bus_ops *bop; 778 779 if (len == 0) { 780 return (DDI_DMA_NOMAPPING); 781 } 782 if (limits == (ddi_dma_lim_t *)0) { 783 dma_lim = standard_limits; 784 } else { 785 dma_lim = *limits; 786 } 787 dmareq.dmar_limits = &dma_lim; 788 dmareq.dmar_flags = flags; 789 dmareq.dmar_fp = waitfp; 790 dmareq.dmar_arg = arg; 791 dmareq.dmar_object.dmao_size = len; 792 dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR; 793 dmareq.dmar_object.dmao_obj.virt_obj.v_as = as; 794 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr; 795 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL; 796 797 /* 798 * Handle the case that the requester is both a leaf 799 * and a nexus driver simultaneously by calling the 800 * requester's bus_dma_map function directly instead 801 * of ddi_dma_map. 802 */ 803 bop = DEVI(dip)->devi_ops->devo_bus_ops; 804 if (bop && bop->bus_dma_map) 805 funcp = bop->bus_dma_map; 806 807 return ((*funcp)(dip, dip, &dmareq, handlep)); 808 } 809 810 int 811 ddi_dma_buf_setup(dev_info_t *dip, struct buf *bp, uint_t flags, 812 int (*waitfp)(), caddr_t arg, ddi_dma_lim_t *limits, 813 ddi_dma_handle_t *handlep) 814 { 815 int (*funcp)() = ddi_dma_map; 816 ddi_dma_lim_t dma_lim; 817 struct ddi_dma_req dmareq; 818 struct bus_ops *bop; 819 820 if (limits == (ddi_dma_lim_t *)0) { 821 dma_lim = standard_limits; 822 } else { 823 dma_lim = *limits; 824 } 825 dmareq.dmar_limits = &dma_lim; 826 dmareq.dmar_flags = flags; 827 dmareq.dmar_fp = waitfp; 828 dmareq.dmar_arg = arg; 829 dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount; 830 831 if (bp->b_flags & B_PAGEIO) { 832 dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES; 833 dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages; 834 dmareq.dmar_object.dmao_obj.pp_obj.pp_offset = 835 (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET); 836 } else { 837 dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR; 838 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr; 839 if (bp->b_flags & B_SHADOW) { 840 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = 841 bp->b_shadow; 842 } else { 843 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL; 844 } 845 846 /* 847 * If the buffer has no proc pointer, or the proc 848 * struct has the kernel address space, or the buffer has 849 * been marked B_REMAPPED (meaning that it is now 850 * mapped into the kernel's address space), then 851 * the address space is kas (kernel address space). 852 */ 853 if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) || 854 (bp->b_flags & B_REMAPPED)) { 855 dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0; 856 } else { 857 dmareq.dmar_object.dmao_obj.virt_obj.v_as = 858 bp->b_proc->p_as; 859 } 860 } 861 862 /* 863 * Handle the case that the requester is both a leaf 864 * and a nexus driver simultaneously by calling the 865 * requester's bus_dma_map function directly instead 866 * of ddi_dma_map. 867 */ 868 bop = DEVI(dip)->devi_ops->devo_bus_ops; 869 if (bop && bop->bus_dma_map) 870 funcp = bop->bus_dma_map; 871 872 return ((*funcp)(dip, dip, &dmareq, handlep)); 873 } 874 875 #if !defined(__sparc) 876 /* 877 * Request bus_dma_ctl parent to fiddle with a dma request. 878 * 879 * (The sparc version is in sparc_subr.s) 880 */ 881 int 882 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip, 883 ddi_dma_handle_t handle, enum ddi_dma_ctlops request, 884 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags) 885 { 886 int (*fp)(); 887 888 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl; 889 fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl; 890 return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags)); 891 } 892 #endif 893 894 /* 895 * For all DMA control functions, call the DMA control 896 * routine and return status. 897 * 898 * Just plain assume that the parent is to be called. 899 * If a nexus driver or a thread outside the framework 900 * of a nexus driver or a leaf driver calls these functions, 901 * it is up to them to deal with the fact that the parent's 902 * bus_dma_ctl function will be the first one called. 903 */ 904 905 #define HD ((ddi_dma_impl_t *)h)->dmai_rdip 906 907 int 908 ddi_dma_kvaddrp(ddi_dma_handle_t h, off_t off, size_t len, caddr_t *kp) 909 { 910 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_KVADDR, &off, &len, kp, 0)); 911 } 912 913 int 914 ddi_dma_htoc(ddi_dma_handle_t h, off_t o, ddi_dma_cookie_t *c) 915 { 916 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_HTOC, &o, 0, (caddr_t *)c, 0)); 917 } 918 919 int 920 ddi_dma_coff(ddi_dma_handle_t h, ddi_dma_cookie_t *c, off_t *o) 921 { 922 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_COFF, 923 (off_t *)c, 0, (caddr_t *)o, 0)); 924 } 925 926 int 927 ddi_dma_movwin(ddi_dma_handle_t h, off_t *o, size_t *l, ddi_dma_cookie_t *c) 928 { 929 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_MOVWIN, o, 930 l, (caddr_t *)c, 0)); 931 } 932 933 int 934 ddi_dma_curwin(ddi_dma_handle_t h, off_t *o, size_t *l) 935 { 936 if ((((ddi_dma_impl_t *)h)->dmai_rflags & DDI_DMA_PARTIAL) == 0) 937 return (DDI_FAILURE); 938 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_REPWIN, o, l, 0, 0)); 939 } 940 941 int 942 ddi_dma_nextwin(ddi_dma_handle_t h, ddi_dma_win_t win, 943 ddi_dma_win_t *nwin) 944 { 945 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTWIN, (off_t *)&win, 0, 946 (caddr_t *)nwin, 0)); 947 } 948 949 int 950 ddi_dma_nextseg(ddi_dma_win_t win, ddi_dma_seg_t seg, ddi_dma_seg_t *nseg) 951 { 952 ddi_dma_handle_t h = (ddi_dma_handle_t)win; 953 954 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTSEG, (off_t *)&win, 955 (size_t *)&seg, (caddr_t *)nseg, 0)); 956 } 957 958 #if (defined(__i386) && !defined(__amd64)) || defined(__sparc) 959 /* 960 * This routine is Obsolete and should be removed from ALL architectures 961 * in a future release of Solaris. 962 * 963 * It is deliberately NOT ported to amd64; please fix the code that 964 * depends on this routine to use ddi_dma_nextcookie(9F). 965 * 966 * NOTE: even though we fixed the pointer through a 32-bit param issue (the fix 967 * is a side effect to some other cleanup), we're still not going to support 968 * this interface on x64. 969 */ 970 int 971 ddi_dma_segtocookie(ddi_dma_seg_t seg, off_t *o, off_t *l, 972 ddi_dma_cookie_t *cookiep) 973 { 974 ddi_dma_handle_t h = (ddi_dma_handle_t)seg; 975 976 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SEGTOC, o, (size_t *)l, 977 (caddr_t *)cookiep, 0)); 978 } 979 #endif /* (__i386 && !__amd64) || __sparc */ 980 981 #if !defined(__sparc) 982 983 /* 984 * The SPARC versions of these routines are done in assembler to 985 * save register windows, so they're in sparc_subr.s. 986 */ 987 988 int 989 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip, 990 struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep) 991 { 992 dev_info_t *hdip; 993 int (*funcp)(dev_info_t *, dev_info_t *, struct ddi_dma_req *, 994 ddi_dma_handle_t *); 995 996 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_map; 997 998 funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_map; 999 return ((*funcp)(hdip, rdip, dmareqp, handlep)); 1000 } 1001 1002 int 1003 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr, 1004 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 1005 { 1006 dev_info_t *hdip; 1007 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *, 1008 int (*)(caddr_t), caddr_t, ddi_dma_handle_t *); 1009 1010 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl; 1011 1012 funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_allochdl; 1013 return ((*funcp)(hdip, rdip, attr, waitfp, arg, handlep)); 1014 } 1015 1016 int 1017 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep) 1018 { 1019 dev_info_t *hdip; 1020 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t); 1021 1022 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl; 1023 1024 funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_freehdl; 1025 return ((*funcp)(hdip, rdip, handlep)); 1026 } 1027 1028 int 1029 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 1030 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 1031 ddi_dma_cookie_t *cp, uint_t *ccountp) 1032 { 1033 dev_info_t *hdip; 1034 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, 1035 struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *); 1036 1037 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl; 1038 1039 funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_bindhdl; 1040 return ((*funcp)(hdip, rdip, handle, dmareq, cp, ccountp)); 1041 } 1042 1043 int 1044 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 1045 ddi_dma_handle_t handle) 1046 { 1047 dev_info_t *hdip; 1048 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t); 1049 1050 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl; 1051 1052 funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl; 1053 return ((*funcp)(hdip, rdip, handle)); 1054 } 1055 1056 1057 int 1058 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip, 1059 ddi_dma_handle_t handle, off_t off, size_t len, 1060 uint_t cache_flags) 1061 { 1062 dev_info_t *hdip; 1063 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, 1064 off_t, size_t, uint_t); 1065 1066 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush; 1067 1068 funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_flush; 1069 return ((*funcp)(hdip, rdip, handle, off, len, cache_flags)); 1070 } 1071 1072 int 1073 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip, 1074 ddi_dma_handle_t handle, uint_t win, off_t *offp, 1075 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 1076 { 1077 dev_info_t *hdip; 1078 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, 1079 uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *); 1080 1081 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win; 1082 1083 funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_win; 1084 return ((*funcp)(hdip, rdip, handle, win, offp, lenp, 1085 cookiep, ccountp)); 1086 } 1087 1088 int 1089 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom) 1090 { 1091 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h; 1092 dev_info_t *hdip, *dip; 1093 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t, 1094 size_t, uint_t); 1095 1096 /* 1097 * the DMA nexus driver will set DMP_NOSYNC if the 1098 * platform does not require any sync operation. For 1099 * example if the memory is uncached or consistent 1100 * and without any I/O write buffers involved. 1101 */ 1102 if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC) 1103 return (DDI_SUCCESS); 1104 1105 dip = hp->dmai_rdip; 1106 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush; 1107 funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_flush; 1108 return ((*funcp)(hdip, dip, h, o, l, whom)); 1109 } 1110 1111 int 1112 ddi_dma_unbind_handle(ddi_dma_handle_t h) 1113 { 1114 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h; 1115 dev_info_t *hdip, *dip; 1116 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t); 1117 1118 dip = hp->dmai_rdip; 1119 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl; 1120 funcp = DEVI(dip)->devi_bus_dma_unbindfunc; 1121 return ((*funcp)(hdip, dip, h)); 1122 } 1123 1124 #endif /* !__sparc */ 1125 1126 int 1127 ddi_dma_free(ddi_dma_handle_t h) 1128 { 1129 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_FREE, 0, 0, 0, 0)); 1130 } 1131 1132 int 1133 ddi_iopb_alloc(dev_info_t *dip, ddi_dma_lim_t *limp, uint_t len, caddr_t *iopbp) 1134 { 1135 ddi_dma_lim_t defalt; 1136 size_t size = len; 1137 1138 if (!limp) { 1139 defalt = standard_limits; 1140 limp = &defalt; 1141 } 1142 return (i_ddi_mem_alloc_lim(dip, limp, size, 0, 0, 0, 1143 iopbp, NULL, NULL)); 1144 } 1145 1146 void 1147 ddi_iopb_free(caddr_t iopb) 1148 { 1149 i_ddi_mem_free(iopb, NULL); 1150 } 1151 1152 int 1153 ddi_mem_alloc(dev_info_t *dip, ddi_dma_lim_t *limits, uint_t length, 1154 uint_t flags, caddr_t *kaddrp, uint_t *real_length) 1155 { 1156 ddi_dma_lim_t defalt; 1157 size_t size = length; 1158 1159 if (!limits) { 1160 defalt = standard_limits; 1161 limits = &defalt; 1162 } 1163 return (i_ddi_mem_alloc_lim(dip, limits, size, flags & 0x1, 1164 1, 0, kaddrp, real_length, NULL)); 1165 } 1166 1167 void 1168 ddi_mem_free(caddr_t kaddr) 1169 { 1170 i_ddi_mem_free(kaddr, NULL); 1171 } 1172 1173 /* 1174 * DMA attributes, alignment, burst sizes, and transfer minimums 1175 */ 1176 int 1177 ddi_dma_get_attr(ddi_dma_handle_t handle, ddi_dma_attr_t *attrp) 1178 { 1179 ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle; 1180 1181 if (attrp == NULL) 1182 return (DDI_FAILURE); 1183 *attrp = dimp->dmai_attr; 1184 return (DDI_SUCCESS); 1185 } 1186 1187 int 1188 ddi_dma_burstsizes(ddi_dma_handle_t handle) 1189 { 1190 ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle; 1191 1192 if (!dimp) 1193 return (0); 1194 else 1195 return (dimp->dmai_burstsizes); 1196 } 1197 1198 int 1199 ddi_dma_devalign(ddi_dma_handle_t handle, uint_t *alignment, uint_t *mineffect) 1200 { 1201 ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle; 1202 1203 if (!dimp || !alignment || !mineffect) 1204 return (DDI_FAILURE); 1205 if (!(dimp->dmai_rflags & DDI_DMA_SBUS_64BIT)) { 1206 *alignment = 1 << ddi_ffs(dimp->dmai_burstsizes); 1207 } else { 1208 if (dimp->dmai_burstsizes & 0xff0000) { 1209 *alignment = 1 << ddi_ffs(dimp->dmai_burstsizes >> 16); 1210 } else { 1211 *alignment = 1 << ddi_ffs(dimp->dmai_burstsizes); 1212 } 1213 } 1214 *mineffect = dimp->dmai_minxfer; 1215 return (DDI_SUCCESS); 1216 } 1217 1218 int 1219 ddi_iomin(dev_info_t *a, int i, int stream) 1220 { 1221 int r; 1222 1223 /* 1224 * Make sure that the initial value is sane 1225 */ 1226 if (i & (i - 1)) 1227 return (0); 1228 if (i == 0) 1229 i = (stream) ? 4 : 1; 1230 1231 r = ddi_ctlops(a, a, 1232 DDI_CTLOPS_IOMIN, (void *)(uintptr_t)stream, (void *)&i); 1233 if (r != DDI_SUCCESS || (i & (i - 1))) 1234 return (0); 1235 return (i); 1236 } 1237 1238 /* 1239 * Given two DMA attribute structures, apply the attributes 1240 * of one to the other, following the rules of attributes 1241 * and the wishes of the caller. 1242 * 1243 * The rules of DMA attribute structures are that you cannot 1244 * make things *less* restrictive as you apply one set 1245 * of attributes to another. 1246 * 1247 */ 1248 void 1249 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod) 1250 { 1251 attr->dma_attr_addr_lo = 1252 MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo); 1253 attr->dma_attr_addr_hi = 1254 MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi); 1255 attr->dma_attr_count_max = 1256 MIN(attr->dma_attr_count_max, mod->dma_attr_count_max); 1257 attr->dma_attr_align = 1258 MAX(attr->dma_attr_align, mod->dma_attr_align); 1259 attr->dma_attr_burstsizes = 1260 (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes); 1261 attr->dma_attr_minxfer = 1262 maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer); 1263 attr->dma_attr_maxxfer = 1264 MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer); 1265 attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg); 1266 attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen, 1267 (uint_t)mod->dma_attr_sgllen); 1268 attr->dma_attr_granular = 1269 MAX(attr->dma_attr_granular, mod->dma_attr_granular); 1270 } 1271 1272 /* 1273 * mmap/segmap interface: 1274 */ 1275 1276 /* 1277 * ddi_segmap: setup the default segment driver. Calls the drivers 1278 * XXmmap routine to validate the range to be mapped. 1279 * Return ENXIO of the range is not valid. Create 1280 * a seg_dev segment that contains all of the 1281 * necessary information and will reference the 1282 * default segment driver routines. It returns zero 1283 * on success or non-zero on failure. 1284 */ 1285 int 1286 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len, 1287 uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp) 1288 { 1289 extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *, 1290 off_t, uint_t, uint_t, uint_t, struct cred *); 1291 1292 return (spec_segmap(dev, offset, asp, addrp, len, 1293 prot, maxprot, flags, credp)); 1294 } 1295 1296 /* 1297 * ddi_map_fault: Resolve mappings at fault time. Used by segment 1298 * drivers. Allows each successive parent to resolve 1299 * address translations and add its mappings to the 1300 * mapping list supplied in the page structure. It 1301 * returns zero on success or non-zero on failure. 1302 */ 1303 1304 int 1305 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg, 1306 caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock) 1307 { 1308 return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock)); 1309 } 1310 1311 /* 1312 * ddi_device_mapping_check: Called from ddi_segmap_setup. 1313 * Invokes platform specific DDI to determine whether attributes specified 1314 * in attr(9s) are valid for the region of memory that will be made 1315 * available for direct access to user process via the mmap(2) system call. 1316 */ 1317 int 1318 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp, 1319 uint_t rnumber, uint_t *hat_flags) 1320 { 1321 ddi_acc_handle_t handle; 1322 ddi_map_req_t mr; 1323 ddi_acc_hdl_t *hp; 1324 int result; 1325 dev_info_t *dip; 1326 1327 /* 1328 * we use e_ddi_hold_devi_by_dev to search for the devi. We 1329 * release it immediately since it should already be held by 1330 * a devfs vnode. 1331 */ 1332 if ((dip = 1333 e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL) 1334 return (-1); 1335 ddi_release_devi(dip); /* for e_ddi_hold_devi_by_dev() */ 1336 1337 /* 1338 * Allocate and initialize the common elements of data 1339 * access handle. 1340 */ 1341 handle = impl_acc_hdl_alloc(KM_SLEEP, NULL); 1342 if (handle == NULL) 1343 return (-1); 1344 1345 hp = impl_acc_hdl_get(handle); 1346 hp->ah_vers = VERS_ACCHDL; 1347 hp->ah_dip = dip; 1348 hp->ah_rnumber = rnumber; 1349 hp->ah_offset = 0; 1350 hp->ah_len = 0; 1351 hp->ah_acc = *accattrp; 1352 1353 /* 1354 * Set up the mapping request and call to parent. 1355 */ 1356 mr.map_op = DDI_MO_MAP_HANDLE; 1357 mr.map_type = DDI_MT_RNUMBER; 1358 mr.map_obj.rnumber = rnumber; 1359 mr.map_prot = PROT_READ | PROT_WRITE; 1360 mr.map_flags = DDI_MF_KERNEL_MAPPING; 1361 mr.map_handlep = hp; 1362 mr.map_vers = DDI_MAP_VERSION; 1363 result = ddi_map(dip, &mr, 0, 0, NULL); 1364 1365 /* 1366 * Region must be mappable, pick up flags from the framework. 1367 */ 1368 *hat_flags = hp->ah_hat_flags; 1369 1370 impl_acc_hdl_free(handle); 1371 1372 /* 1373 * check for end result. 1374 */ 1375 if (result != DDI_SUCCESS) 1376 return (-1); 1377 return (0); 1378 } 1379 1380 1381 /* 1382 * Property functions: See also, ddipropdefs.h. 1383 * 1384 * These functions are the framework for the property functions, 1385 * i.e. they support software defined properties. All implementation 1386 * specific property handling (i.e.: self-identifying devices and 1387 * PROM defined properties are handled in the implementation specific 1388 * functions (defined in ddi_implfuncs.h). 1389 */ 1390 1391 /* 1392 * nopropop: Shouldn't be called, right? 1393 */ 1394 int 1395 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 1396 char *name, caddr_t valuep, int *lengthp) 1397 { 1398 _NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp)) 1399 return (DDI_PROP_NOT_FOUND); 1400 } 1401 1402 #ifdef DDI_PROP_DEBUG 1403 int ddi_prop_debug_flag = 0; 1404 1405 int 1406 ddi_prop_debug(int enable) 1407 { 1408 int prev = ddi_prop_debug_flag; 1409 1410 if ((enable != 0) || (prev != 0)) 1411 printf("ddi_prop_debug: debugging %s\n", 1412 enable ? "enabled" : "disabled"); 1413 ddi_prop_debug_flag = enable; 1414 return (prev); 1415 } 1416 1417 #endif /* DDI_PROP_DEBUG */ 1418 1419 /* 1420 * Search a property list for a match, if found return pointer 1421 * to matching prop struct, else return NULL. 1422 */ 1423 1424 ddi_prop_t * 1425 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head) 1426 { 1427 ddi_prop_t *propp; 1428 1429 /* 1430 * find the property in child's devinfo: 1431 * Search order defined by this search function is first matching 1432 * property with input dev == DDI_DEV_T_ANY matching any dev or 1433 * dev == propp->prop_dev, name == propp->name, and the correct 1434 * data type as specified in the flags. If a DDI_DEV_T_NONE dev 1435 * value made it this far then it implies a DDI_DEV_T_ANY search. 1436 */ 1437 if (dev == DDI_DEV_T_NONE) 1438 dev = DDI_DEV_T_ANY; 1439 1440 for (propp = *list_head; propp != NULL; propp = propp->prop_next) { 1441 1442 if (!DDI_STRSAME(propp->prop_name, name)) 1443 continue; 1444 1445 if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev)) 1446 continue; 1447 1448 if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0) 1449 continue; 1450 1451 return (propp); 1452 } 1453 1454 return ((ddi_prop_t *)0); 1455 } 1456 1457 /* 1458 * Search for property within devnames structures 1459 */ 1460 ddi_prop_t * 1461 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags) 1462 { 1463 major_t major; 1464 struct devnames *dnp; 1465 ddi_prop_t *propp; 1466 1467 /* 1468 * Valid dev_t value is needed to index into the 1469 * correct devnames entry, therefore a dev_t 1470 * value of DDI_DEV_T_ANY is not appropriate. 1471 */ 1472 ASSERT(dev != DDI_DEV_T_ANY); 1473 if (dev == DDI_DEV_T_ANY) { 1474 return ((ddi_prop_t *)0); 1475 } 1476 1477 major = getmajor(dev); 1478 dnp = &(devnamesp[major]); 1479 1480 if (dnp->dn_global_prop_ptr == NULL) 1481 return ((ddi_prop_t *)0); 1482 1483 LOCK_DEV_OPS(&dnp->dn_lock); 1484 1485 for (propp = dnp->dn_global_prop_ptr->prop_list; 1486 propp != NULL; 1487 propp = (ddi_prop_t *)propp->prop_next) { 1488 1489 if (!DDI_STRSAME(propp->prop_name, name)) 1490 continue; 1491 1492 if ((!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev)) 1493 continue; 1494 1495 if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0) 1496 continue; 1497 1498 /* Property found, return it */ 1499 UNLOCK_DEV_OPS(&dnp->dn_lock); 1500 return (propp); 1501 } 1502 1503 UNLOCK_DEV_OPS(&dnp->dn_lock); 1504 return ((ddi_prop_t *)0); 1505 } 1506 1507 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>"; 1508 1509 /* 1510 * ddi_prop_search_global: 1511 * Search the global property list within devnames 1512 * for the named property. Return the encoded value. 1513 */ 1514 static int 1515 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name, 1516 void *valuep, uint_t *lengthp) 1517 { 1518 ddi_prop_t *propp; 1519 caddr_t buffer; 1520 1521 propp = i_ddi_search_global_prop(dev, name, flags); 1522 1523 /* Property NOT found, bail */ 1524 if (propp == (ddi_prop_t *)0) 1525 return (DDI_PROP_NOT_FOUND); 1526 1527 if (propp->prop_flags & DDI_PROP_UNDEF_IT) 1528 return (DDI_PROP_UNDEFINED); 1529 1530 if ((buffer = kmem_alloc(propp->prop_len, 1531 (flags & DDI_PROP_CANSLEEP) ? KM_SLEEP : KM_NOSLEEP)) == NULL) { 1532 cmn_err(CE_CONT, prop_no_mem_msg, name); 1533 return (DDI_PROP_NO_MEMORY); 1534 } 1535 1536 /* 1537 * Return the encoded data 1538 */ 1539 *(caddr_t *)valuep = buffer; 1540 *lengthp = propp->prop_len; 1541 bcopy(propp->prop_val, buffer, propp->prop_len); 1542 1543 return (DDI_PROP_SUCCESS); 1544 } 1545 1546 /* 1547 * ddi_prop_search_common: Lookup and return the encoded value 1548 */ 1549 int 1550 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1551 uint_t flags, char *name, void *valuep, uint_t *lengthp) 1552 { 1553 ddi_prop_t *propp; 1554 int i; 1555 caddr_t buffer; 1556 caddr_t prealloc = NULL; 1557 int plength = 0; 1558 dev_info_t *pdip; 1559 int (*bop)(); 1560 1561 /*CONSTANTCONDITION*/ 1562 while (1) { 1563 1564 mutex_enter(&(DEVI(dip)->devi_lock)); 1565 1566 1567 /* 1568 * find the property in child's devinfo: 1569 * Search order is: 1570 * 1. driver defined properties 1571 * 2. system defined properties 1572 * 3. driver global properties 1573 * 4. boot defined properties 1574 */ 1575 1576 propp = i_ddi_prop_search(dev, name, flags, 1577 &(DEVI(dip)->devi_drv_prop_ptr)); 1578 if (propp == NULL) { 1579 propp = i_ddi_prop_search(dev, name, flags, 1580 &(DEVI(dip)->devi_sys_prop_ptr)); 1581 } 1582 if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) { 1583 propp = i_ddi_prop_search(dev, name, flags, 1584 &DEVI(dip)->devi_global_prop_list->prop_list); 1585 } 1586 1587 if (propp == NULL) { 1588 propp = i_ddi_prop_search(dev, name, flags, 1589 &(DEVI(dip)->devi_hw_prop_ptr)); 1590 } 1591 1592 /* 1593 * Software property found? 1594 */ 1595 if (propp != (ddi_prop_t *)0) { 1596 1597 /* 1598 * If explicit undefine, return now. 1599 */ 1600 if (propp->prop_flags & DDI_PROP_UNDEF_IT) { 1601 mutex_exit(&(DEVI(dip)->devi_lock)); 1602 if (prealloc) 1603 kmem_free(prealloc, plength); 1604 return (DDI_PROP_UNDEFINED); 1605 } 1606 1607 /* 1608 * If we only want to know if it exists, return now 1609 */ 1610 if (prop_op == PROP_EXISTS) { 1611 mutex_exit(&(DEVI(dip)->devi_lock)); 1612 ASSERT(prealloc == NULL); 1613 return (DDI_PROP_SUCCESS); 1614 } 1615 1616 /* 1617 * If length only request or prop length == 0, 1618 * service request and return now. 1619 */ 1620 if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) { 1621 *lengthp = propp->prop_len; 1622 1623 /* 1624 * if prop_op is PROP_LEN_AND_VAL_ALLOC 1625 * that means prop_len is 0, so set valuep 1626 * also to NULL 1627 */ 1628 if (prop_op == PROP_LEN_AND_VAL_ALLOC) 1629 *(caddr_t *)valuep = NULL; 1630 1631 mutex_exit(&(DEVI(dip)->devi_lock)); 1632 if (prealloc) 1633 kmem_free(prealloc, plength); 1634 return (DDI_PROP_SUCCESS); 1635 } 1636 1637 /* 1638 * If LEN_AND_VAL_ALLOC and the request can sleep, 1639 * drop the mutex, allocate the buffer, and go 1640 * through the loop again. If we already allocated 1641 * the buffer, and the size of the property changed, 1642 * keep trying... 1643 */ 1644 if ((prop_op == PROP_LEN_AND_VAL_ALLOC) && 1645 (flags & DDI_PROP_CANSLEEP)) { 1646 if (prealloc && (propp->prop_len != plength)) { 1647 kmem_free(prealloc, plength); 1648 prealloc = NULL; 1649 } 1650 if (prealloc == NULL) { 1651 plength = propp->prop_len; 1652 mutex_exit(&(DEVI(dip)->devi_lock)); 1653 prealloc = kmem_alloc(plength, 1654 KM_SLEEP); 1655 continue; 1656 } 1657 } 1658 1659 /* 1660 * Allocate buffer, if required. Either way, 1661 * set `buffer' variable. 1662 */ 1663 i = *lengthp; /* Get callers length */ 1664 *lengthp = propp->prop_len; /* Set callers length */ 1665 1666 switch (prop_op) { 1667 1668 case PROP_LEN_AND_VAL_ALLOC: 1669 1670 if (prealloc == NULL) { 1671 buffer = kmem_alloc(propp->prop_len, 1672 KM_NOSLEEP); 1673 } else { 1674 buffer = prealloc; 1675 } 1676 1677 if (buffer == NULL) { 1678 mutex_exit(&(DEVI(dip)->devi_lock)); 1679 cmn_err(CE_CONT, prop_no_mem_msg, name); 1680 return (DDI_PROP_NO_MEMORY); 1681 } 1682 /* Set callers buf ptr */ 1683 *(caddr_t *)valuep = buffer; 1684 break; 1685 1686 case PROP_LEN_AND_VAL_BUF: 1687 1688 if (propp->prop_len > (i)) { 1689 mutex_exit(&(DEVI(dip)->devi_lock)); 1690 return (DDI_PROP_BUF_TOO_SMALL); 1691 } 1692 1693 buffer = valuep; /* Get callers buf ptr */ 1694 break; 1695 1696 default: 1697 break; 1698 } 1699 1700 /* 1701 * Do the copy. 1702 */ 1703 bcopy(propp->prop_val, buffer, propp->prop_len); 1704 mutex_exit(&(DEVI(dip)->devi_lock)); 1705 return (DDI_PROP_SUCCESS); 1706 } 1707 1708 mutex_exit(&(DEVI(dip)->devi_lock)); 1709 if (prealloc) 1710 kmem_free(prealloc, plength); 1711 prealloc = NULL; 1712 1713 /* 1714 * Prop not found, call parent bus_ops to deal with possible 1715 * h/w layer (possible PROM defined props, etc.) and to 1716 * possibly ascend the hierarchy, if allowed by flags. 1717 */ 1718 pdip = (dev_info_t *)DEVI(dip)->devi_parent; 1719 1720 /* 1721 * One last call for the root driver PROM props? 1722 */ 1723 if (dip == ddi_root_node()) { 1724 return (ddi_bus_prop_op(dev, dip, dip, prop_op, 1725 flags, name, valuep, (int *)lengthp)); 1726 } 1727 1728 /* 1729 * We may have been called to check for properties 1730 * within a single devinfo node that has no parent - 1731 * see make_prop() 1732 */ 1733 if (pdip == NULL) { 1734 ASSERT((flags & 1735 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) == 1736 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)); 1737 return (DDI_PROP_NOT_FOUND); 1738 } 1739 1740 /* 1741 * Instead of recursing, we do iterative calls up the tree. 1742 * As a bit of optimization, skip the bus_op level if the 1743 * node is a s/w node and if the parent's bus_prop_op function 1744 * is `ddi_bus_prop_op', because we know that in this case, 1745 * this function does nothing. 1746 * 1747 * 4225415: If the parent isn't attached, or the child 1748 * hasn't been named by the parent yet, use the default 1749 * ddi_bus_prop_op as a proxy for the parent. This 1750 * allows property lookups in any child/parent state to 1751 * include 'prom' and inherited properties, even when 1752 * there are no drivers attached to the child or parent. 1753 */ 1754 1755 bop = ddi_bus_prop_op; 1756 if (i_ddi_devi_attached(pdip) && 1757 (i_ddi_node_state(dip) >= DS_INITIALIZED)) 1758 bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op; 1759 1760 i = DDI_PROP_NOT_FOUND; 1761 1762 if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) { 1763 i = (*bop)(dev, pdip, dip, prop_op, 1764 flags | DDI_PROP_DONTPASS, 1765 name, valuep, lengthp); 1766 } 1767 1768 if ((flags & DDI_PROP_DONTPASS) || 1769 (i != DDI_PROP_NOT_FOUND)) 1770 return (i); 1771 1772 dip = pdip; 1773 } 1774 /*NOTREACHED*/ 1775 } 1776 1777 1778 /* 1779 * ddi_prop_op: The basic property operator for drivers. 1780 * 1781 * In ddi_prop_op, the type of valuep is interpreted based on prop_op: 1782 * 1783 * prop_op valuep 1784 * ------ ------ 1785 * 1786 * PROP_LEN <unused> 1787 * 1788 * PROP_LEN_AND_VAL_BUF Pointer to callers buffer 1789 * 1790 * PROP_LEN_AND_VAL_ALLOC Address of callers pointer (will be set to 1791 * address of allocated buffer, if successful) 1792 */ 1793 int 1794 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 1795 char *name, caddr_t valuep, int *lengthp) 1796 { 1797 int i; 1798 1799 ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0); 1800 1801 /* 1802 * If this was originally an LDI prop lookup then we bail here. 1803 * The reason is that the LDI property lookup interfaces first call 1804 * a drivers prop_op() entry point to allow it to override 1805 * properties. But if we've made it here, then the driver hasn't 1806 * overriden any properties. We don't want to continue with the 1807 * property search here because we don't have any type inforamtion. 1808 * When we return failure, the LDI interfaces will then proceed to 1809 * call the typed property interfaces to look up the property. 1810 */ 1811 if (mod_flags & DDI_PROP_DYNAMIC) 1812 return (DDI_PROP_NOT_FOUND); 1813 1814 /* 1815 * check for pre-typed property consumer asking for typed property: 1816 * see e_ddi_getprop_int64. 1817 */ 1818 if (mod_flags & DDI_PROP_CONSUMER_TYPED) 1819 mod_flags |= DDI_PROP_TYPE_INT64; 1820 mod_flags |= DDI_PROP_TYPE_ANY; 1821 1822 i = ddi_prop_search_common(dev, dip, prop_op, 1823 mod_flags, name, valuep, (uint_t *)lengthp); 1824 if (i == DDI_PROP_FOUND_1275) 1825 return (DDI_PROP_SUCCESS); 1826 return (i); 1827 } 1828 1829 /* 1830 * ddi_prop_op_nblocks_blksize: The basic property operator for drivers that 1831 * maintain size in number of blksize blocks. Provides a dynamic property 1832 * implementation for size oriented properties based on nblocks64 and blksize 1833 * values passed in by the driver. Fallback to ddi_prop_op if the nblocks64 1834 * is too large. This interface should not be used with a nblocks64 that 1835 * represents the driver's idea of how to represent unknown, if nblocks is 1836 * unknown use ddi_prop_op. 1837 */ 1838 int 1839 ddi_prop_op_nblocks_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1840 int mod_flags, char *name, caddr_t valuep, int *lengthp, 1841 uint64_t nblocks64, uint_t blksize) 1842 { 1843 uint64_t size64; 1844 int blkshift; 1845 1846 /* convert block size to shift value */ 1847 ASSERT(BIT_ONLYONESET(blksize)); 1848 blkshift = highbit(blksize) - 1; 1849 1850 /* 1851 * There is no point in supporting nblocks64 values that don't have 1852 * an accurate uint64_t byte count representation. 1853 */ 1854 if (nblocks64 >= (UINT64_MAX >> blkshift)) 1855 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 1856 name, valuep, lengthp)); 1857 1858 size64 = nblocks64 << blkshift; 1859 return (ddi_prop_op_size_blksize(dev, dip, prop_op, mod_flags, 1860 name, valuep, lengthp, size64, blksize)); 1861 } 1862 1863 /* 1864 * ddi_prop_op_nblocks: ddi_prop_op_nblocks_blksize with DEV_BSIZE blksize. 1865 */ 1866 int 1867 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1868 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64) 1869 { 1870 return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op, 1871 mod_flags, name, valuep, lengthp, nblocks64, DEV_BSIZE)); 1872 } 1873 1874 /* 1875 * ddi_prop_op_size_blksize: The basic property operator for block drivers that 1876 * maintain size in bytes. Provides a of dynamic property implementation for 1877 * size oriented properties based on size64 value and blksize passed in by the 1878 * driver. Fallback to ddi_prop_op if the size64 is too large. This interface 1879 * should not be used with a size64 that represents the driver's idea of how 1880 * to represent unknown, if size is unknown use ddi_prop_op. 1881 * 1882 * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned 1883 * integers. While the most likely interface to request them ([bc]devi_size) 1884 * is declared int (signed) there is no enforcement of this, which means we 1885 * can't enforce limitations here without risking regression. 1886 */ 1887 int 1888 ddi_prop_op_size_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1889 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64, 1890 uint_t blksize) 1891 { 1892 uint64_t nblocks64; 1893 int callers_length; 1894 caddr_t buffer; 1895 int blkshift; 1896 1897 /* convert block size to shift value */ 1898 ASSERT(BIT_ONLYONESET(blksize)); 1899 blkshift = highbit(blksize) - 1; 1900 1901 /* compute DEV_BSIZE nblocks value */ 1902 nblocks64 = size64 >> blkshift; 1903 1904 /* get callers length, establish length of our dynamic properties */ 1905 callers_length = *lengthp; 1906 1907 if (strcmp(name, "Nblocks") == 0) 1908 *lengthp = sizeof (uint64_t); 1909 else if (strcmp(name, "Size") == 0) 1910 *lengthp = sizeof (uint64_t); 1911 else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX)) 1912 *lengthp = sizeof (uint32_t); 1913 else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX)) 1914 *lengthp = sizeof (uint32_t); 1915 else if ((strcmp(name, "blksize") == 0) && (blksize < UINT_MAX)) 1916 *lengthp = sizeof (uint32_t); 1917 else { 1918 /* fallback to ddi_prop_op */ 1919 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 1920 name, valuep, lengthp)); 1921 } 1922 1923 /* service request for the length of the property */ 1924 if (prop_op == PROP_LEN) 1925 return (DDI_PROP_SUCCESS); 1926 1927 /* the length of the property and the request must match */ 1928 if (callers_length != *lengthp) 1929 return (DDI_PROP_INVAL_ARG); 1930 1931 switch (prop_op) { 1932 case PROP_LEN_AND_VAL_ALLOC: 1933 if ((buffer = kmem_alloc(*lengthp, 1934 (mod_flags & DDI_PROP_CANSLEEP) ? 1935 KM_SLEEP : KM_NOSLEEP)) == NULL) 1936 return (DDI_PROP_NO_MEMORY); 1937 1938 *(caddr_t *)valuep = buffer; /* set callers buf ptr */ 1939 break; 1940 1941 case PROP_LEN_AND_VAL_BUF: 1942 buffer = valuep; /* get callers buf ptr */ 1943 break; 1944 1945 default: 1946 return (DDI_PROP_INVAL_ARG); 1947 } 1948 1949 /* transfer the value into the buffer */ 1950 if (strcmp(name, "Nblocks") == 0) 1951 *((uint64_t *)buffer) = nblocks64; 1952 else if (strcmp(name, "Size") == 0) 1953 *((uint64_t *)buffer) = size64; 1954 else if (strcmp(name, "nblocks") == 0) 1955 *((uint32_t *)buffer) = (uint32_t)nblocks64; 1956 else if (strcmp(name, "size") == 0) 1957 *((uint32_t *)buffer) = (uint32_t)size64; 1958 else if (strcmp(name, "blksize") == 0) 1959 *((uint32_t *)buffer) = (uint32_t)blksize; 1960 return (DDI_PROP_SUCCESS); 1961 } 1962 1963 /* 1964 * ddi_prop_op_size: ddi_prop_op_size_blksize with DEV_BSIZE block size. 1965 */ 1966 int 1967 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1968 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64) 1969 { 1970 return (ddi_prop_op_size_blksize(dev, dip, prop_op, 1971 mod_flags, name, valuep, lengthp, size64, DEV_BSIZE)); 1972 } 1973 1974 /* 1975 * Variable length props... 1976 */ 1977 1978 /* 1979 * ddi_getlongprop: Get variable length property len+val into a buffer 1980 * allocated by property provider via kmem_alloc. Requester 1981 * is responsible for freeing returned property via kmem_free. 1982 * 1983 * Arguments: 1984 * 1985 * dev_t: Input: dev_t of property. 1986 * dip: Input: dev_info_t pointer of child. 1987 * flags: Input: Possible flag modifiers are: 1988 * DDI_PROP_DONTPASS: Don't pass to parent if prop not found. 1989 * DDI_PROP_CANSLEEP: Memory allocation may sleep. 1990 * name: Input: name of property. 1991 * valuep: Output: Addr of callers buffer pointer. 1992 * lengthp:Output: *lengthp will contain prop length on exit. 1993 * 1994 * Possible Returns: 1995 * 1996 * DDI_PROP_SUCCESS: Prop found and returned. 1997 * DDI_PROP_NOT_FOUND: Prop not found 1998 * DDI_PROP_UNDEFINED: Prop explicitly undefined. 1999 * DDI_PROP_NO_MEMORY: Prop found, but unable to alloc mem. 2000 */ 2001 2002 int 2003 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags, 2004 char *name, caddr_t valuep, int *lengthp) 2005 { 2006 return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC, 2007 flags, name, valuep, lengthp)); 2008 } 2009 2010 /* 2011 * 2012 * ddi_getlongprop_buf: Get long prop into pre-allocated callers 2013 * buffer. (no memory allocation by provider). 2014 * 2015 * dev_t: Input: dev_t of property. 2016 * dip: Input: dev_info_t pointer of child. 2017 * flags: Input: DDI_PROP_DONTPASS or NULL 2018 * name: Input: name of property 2019 * valuep: Input: ptr to callers buffer. 2020 * lengthp:I/O: ptr to length of callers buffer on entry, 2021 * actual length of property on exit. 2022 * 2023 * Possible returns: 2024 * 2025 * DDI_PROP_SUCCESS Prop found and returned 2026 * DDI_PROP_NOT_FOUND Prop not found 2027 * DDI_PROP_UNDEFINED Prop explicitly undefined. 2028 * DDI_PROP_BUF_TOO_SMALL Prop found, callers buf too small, 2029 * no value returned, but actual prop 2030 * length returned in *lengthp 2031 * 2032 */ 2033 2034 int 2035 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags, 2036 char *name, caddr_t valuep, int *lengthp) 2037 { 2038 return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF, 2039 flags, name, valuep, lengthp)); 2040 } 2041 2042 /* 2043 * Integer/boolean sized props. 2044 * 2045 * Call is value only... returns found boolean or int sized prop value or 2046 * defvalue if prop not found or is wrong length or is explicitly undefined. 2047 * Only flag is DDI_PROP_DONTPASS... 2048 * 2049 * By convention, this interface returns boolean (0) sized properties 2050 * as value (int)1. 2051 * 2052 * This never returns an error, if property not found or specifically 2053 * undefined, the input `defvalue' is returned. 2054 */ 2055 2056 int 2057 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue) 2058 { 2059 int propvalue = defvalue; 2060 int proplength = sizeof (int); 2061 int error; 2062 2063 error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF, 2064 flags, name, (caddr_t)&propvalue, &proplength); 2065 2066 if ((error == DDI_PROP_SUCCESS) && (proplength == 0)) 2067 propvalue = 1; 2068 2069 return (propvalue); 2070 } 2071 2072 /* 2073 * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS 2074 * if returns DDI_PROP_SUCCESS, length returned in *lengthp. 2075 */ 2076 2077 int 2078 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp) 2079 { 2080 return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp)); 2081 } 2082 2083 /* 2084 * Allocate a struct prop_driver_data, along with 'size' bytes 2085 * for decoded property data. This structure is freed by 2086 * calling ddi_prop_free(9F). 2087 */ 2088 static void * 2089 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *)) 2090 { 2091 struct prop_driver_data *pdd; 2092 2093 /* 2094 * Allocate a structure with enough memory to store the decoded data. 2095 */ 2096 pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP); 2097 pdd->pdd_size = (sizeof (struct prop_driver_data) + size); 2098 pdd->pdd_prop_free = prop_free; 2099 2100 /* 2101 * Return a pointer to the location to put the decoded data. 2102 */ 2103 return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data))); 2104 } 2105 2106 /* 2107 * Allocated the memory needed to store the encoded data in the property 2108 * handle. 2109 */ 2110 static int 2111 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size) 2112 { 2113 /* 2114 * If size is zero, then set data to NULL and size to 0. This 2115 * is a boolean property. 2116 */ 2117 if (size == 0) { 2118 ph->ph_size = 0; 2119 ph->ph_data = NULL; 2120 ph->ph_cur_pos = NULL; 2121 ph->ph_save_pos = NULL; 2122 } else { 2123 if (ph->ph_flags == DDI_PROP_DONTSLEEP) { 2124 ph->ph_data = kmem_zalloc(size, KM_NOSLEEP); 2125 if (ph->ph_data == NULL) 2126 return (DDI_PROP_NO_MEMORY); 2127 } else 2128 ph->ph_data = kmem_zalloc(size, KM_SLEEP); 2129 ph->ph_size = size; 2130 ph->ph_cur_pos = ph->ph_data; 2131 ph->ph_save_pos = ph->ph_data; 2132 } 2133 return (DDI_PROP_SUCCESS); 2134 } 2135 2136 /* 2137 * Free the space allocated by the lookup routines. Each lookup routine 2138 * returns a pointer to the decoded data to the driver. The driver then 2139 * passes this pointer back to us. This data actually lives in a struct 2140 * prop_driver_data. We use negative indexing to find the beginning of 2141 * the structure and then free the entire structure using the size and 2142 * the free routine stored in the structure. 2143 */ 2144 void 2145 ddi_prop_free(void *datap) 2146 { 2147 struct prop_driver_data *pdd; 2148 2149 /* 2150 * Get the structure 2151 */ 2152 pdd = (struct prop_driver_data *) 2153 ((caddr_t)datap - sizeof (struct prop_driver_data)); 2154 /* 2155 * Call the free routine to free it 2156 */ 2157 (*pdd->pdd_prop_free)(pdd); 2158 } 2159 2160 /* 2161 * Free the data associated with an array of ints, 2162 * allocated with ddi_prop_decode_alloc(). 2163 */ 2164 static void 2165 ddi_prop_free_ints(struct prop_driver_data *pdd) 2166 { 2167 kmem_free(pdd, pdd->pdd_size); 2168 } 2169 2170 /* 2171 * Free a single string property or a single string contained within 2172 * the argv style return value of an array of strings. 2173 */ 2174 static void 2175 ddi_prop_free_string(struct prop_driver_data *pdd) 2176 { 2177 kmem_free(pdd, pdd->pdd_size); 2178 2179 } 2180 2181 /* 2182 * Free an array of strings. 2183 */ 2184 static void 2185 ddi_prop_free_strings(struct prop_driver_data *pdd) 2186 { 2187 kmem_free(pdd, pdd->pdd_size); 2188 } 2189 2190 /* 2191 * Free the data associated with an array of bytes. 2192 */ 2193 static void 2194 ddi_prop_free_bytes(struct prop_driver_data *pdd) 2195 { 2196 kmem_free(pdd, pdd->pdd_size); 2197 } 2198 2199 /* 2200 * Reset the current location pointer in the property handle to the 2201 * beginning of the data. 2202 */ 2203 void 2204 ddi_prop_reset_pos(prop_handle_t *ph) 2205 { 2206 ph->ph_cur_pos = ph->ph_data; 2207 ph->ph_save_pos = ph->ph_data; 2208 } 2209 2210 /* 2211 * Restore the current location pointer in the property handle to the 2212 * saved position. 2213 */ 2214 void 2215 ddi_prop_save_pos(prop_handle_t *ph) 2216 { 2217 ph->ph_save_pos = ph->ph_cur_pos; 2218 } 2219 2220 /* 2221 * Save the location that the current location pointer is pointing to.. 2222 */ 2223 void 2224 ddi_prop_restore_pos(prop_handle_t *ph) 2225 { 2226 ph->ph_cur_pos = ph->ph_save_pos; 2227 } 2228 2229 /* 2230 * Property encode/decode functions 2231 */ 2232 2233 /* 2234 * Decode a single integer property 2235 */ 2236 static int 2237 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements) 2238 { 2239 int i; 2240 int tmp; 2241 2242 /* 2243 * If there is nothing to decode return an error 2244 */ 2245 if (ph->ph_size == 0) 2246 return (DDI_PROP_END_OF_DATA); 2247 2248 /* 2249 * Decode the property as a single integer and return it 2250 * in data if we were able to decode it. 2251 */ 2252 i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp); 2253 if (i < DDI_PROP_RESULT_OK) { 2254 switch (i) { 2255 case DDI_PROP_RESULT_EOF: 2256 return (DDI_PROP_END_OF_DATA); 2257 2258 case DDI_PROP_RESULT_ERROR: 2259 return (DDI_PROP_CANNOT_DECODE); 2260 } 2261 } 2262 2263 *(int *)data = tmp; 2264 *nelements = 1; 2265 return (DDI_PROP_SUCCESS); 2266 } 2267 2268 /* 2269 * Decode a single 64 bit integer property 2270 */ 2271 static int 2272 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements) 2273 { 2274 int i; 2275 int64_t tmp; 2276 2277 /* 2278 * If there is nothing to decode return an error 2279 */ 2280 if (ph->ph_size == 0) 2281 return (DDI_PROP_END_OF_DATA); 2282 2283 /* 2284 * Decode the property as a single integer and return it 2285 * in data if we were able to decode it. 2286 */ 2287 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp); 2288 if (i < DDI_PROP_RESULT_OK) { 2289 switch (i) { 2290 case DDI_PROP_RESULT_EOF: 2291 return (DDI_PROP_END_OF_DATA); 2292 2293 case DDI_PROP_RESULT_ERROR: 2294 return (DDI_PROP_CANNOT_DECODE); 2295 } 2296 } 2297 2298 *(int64_t *)data = tmp; 2299 *nelements = 1; 2300 return (DDI_PROP_SUCCESS); 2301 } 2302 2303 /* 2304 * Decode an array of integers property 2305 */ 2306 static int 2307 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements) 2308 { 2309 int i; 2310 int cnt = 0; 2311 int *tmp; 2312 int *intp; 2313 int n; 2314 2315 /* 2316 * Figure out how many array elements there are by going through the 2317 * data without decoding it first and counting. 2318 */ 2319 for (;;) { 2320 i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL); 2321 if (i < 0) 2322 break; 2323 cnt++; 2324 } 2325 2326 /* 2327 * If there are no elements return an error 2328 */ 2329 if (cnt == 0) 2330 return (DDI_PROP_END_OF_DATA); 2331 2332 /* 2333 * If we cannot skip through the data, we cannot decode it 2334 */ 2335 if (i == DDI_PROP_RESULT_ERROR) 2336 return (DDI_PROP_CANNOT_DECODE); 2337 2338 /* 2339 * Reset the data pointer to the beginning of the encoded data 2340 */ 2341 ddi_prop_reset_pos(ph); 2342 2343 /* 2344 * Allocated memory to store the decoded value in. 2345 */ 2346 intp = ddi_prop_decode_alloc((cnt * sizeof (int)), 2347 ddi_prop_free_ints); 2348 2349 /* 2350 * Decode each element and place it in the space we just allocated 2351 */ 2352 tmp = intp; 2353 for (n = 0; n < cnt; n++, tmp++) { 2354 i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp); 2355 if (i < DDI_PROP_RESULT_OK) { 2356 /* 2357 * Free the space we just allocated 2358 * and return an error. 2359 */ 2360 ddi_prop_free(intp); 2361 switch (i) { 2362 case DDI_PROP_RESULT_EOF: 2363 return (DDI_PROP_END_OF_DATA); 2364 2365 case DDI_PROP_RESULT_ERROR: 2366 return (DDI_PROP_CANNOT_DECODE); 2367 } 2368 } 2369 } 2370 2371 *nelements = cnt; 2372 *(int **)data = intp; 2373 2374 return (DDI_PROP_SUCCESS); 2375 } 2376 2377 /* 2378 * Decode a 64 bit integer array property 2379 */ 2380 static int 2381 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements) 2382 { 2383 int i; 2384 int n; 2385 int cnt = 0; 2386 int64_t *tmp; 2387 int64_t *intp; 2388 2389 /* 2390 * Count the number of array elements by going 2391 * through the data without decoding it. 2392 */ 2393 for (;;) { 2394 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL); 2395 if (i < 0) 2396 break; 2397 cnt++; 2398 } 2399 2400 /* 2401 * If there are no elements return an error 2402 */ 2403 if (cnt == 0) 2404 return (DDI_PROP_END_OF_DATA); 2405 2406 /* 2407 * If we cannot skip through the data, we cannot decode it 2408 */ 2409 if (i == DDI_PROP_RESULT_ERROR) 2410 return (DDI_PROP_CANNOT_DECODE); 2411 2412 /* 2413 * Reset the data pointer to the beginning of the encoded data 2414 */ 2415 ddi_prop_reset_pos(ph); 2416 2417 /* 2418 * Allocate memory to store the decoded value. 2419 */ 2420 intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)), 2421 ddi_prop_free_ints); 2422 2423 /* 2424 * Decode each element and place it in the space allocated 2425 */ 2426 tmp = intp; 2427 for (n = 0; n < cnt; n++, tmp++) { 2428 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp); 2429 if (i < DDI_PROP_RESULT_OK) { 2430 /* 2431 * Free the space we just allocated 2432 * and return an error. 2433 */ 2434 ddi_prop_free(intp); 2435 switch (i) { 2436 case DDI_PROP_RESULT_EOF: 2437 return (DDI_PROP_END_OF_DATA); 2438 2439 case DDI_PROP_RESULT_ERROR: 2440 return (DDI_PROP_CANNOT_DECODE); 2441 } 2442 } 2443 } 2444 2445 *nelements = cnt; 2446 *(int64_t **)data = intp; 2447 2448 return (DDI_PROP_SUCCESS); 2449 } 2450 2451 /* 2452 * Encode an array of integers property (Can be one element) 2453 */ 2454 int 2455 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements) 2456 { 2457 int i; 2458 int *tmp; 2459 int cnt; 2460 int size; 2461 2462 /* 2463 * If there is no data, we cannot do anything 2464 */ 2465 if (nelements == 0) 2466 return (DDI_PROP_CANNOT_ENCODE); 2467 2468 /* 2469 * Get the size of an encoded int. 2470 */ 2471 size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL); 2472 2473 if (size < DDI_PROP_RESULT_OK) { 2474 switch (size) { 2475 case DDI_PROP_RESULT_EOF: 2476 return (DDI_PROP_END_OF_DATA); 2477 2478 case DDI_PROP_RESULT_ERROR: 2479 return (DDI_PROP_CANNOT_ENCODE); 2480 } 2481 } 2482 2483 /* 2484 * Allocate space in the handle to store the encoded int. 2485 */ 2486 if (ddi_prop_encode_alloc(ph, size * nelements) != 2487 DDI_PROP_SUCCESS) 2488 return (DDI_PROP_NO_MEMORY); 2489 2490 /* 2491 * Encode the array of ints. 2492 */ 2493 tmp = (int *)data; 2494 for (cnt = 0; cnt < nelements; cnt++, tmp++) { 2495 i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp); 2496 if (i < DDI_PROP_RESULT_OK) { 2497 switch (i) { 2498 case DDI_PROP_RESULT_EOF: 2499 return (DDI_PROP_END_OF_DATA); 2500 2501 case DDI_PROP_RESULT_ERROR: 2502 return (DDI_PROP_CANNOT_ENCODE); 2503 } 2504 } 2505 } 2506 2507 return (DDI_PROP_SUCCESS); 2508 } 2509 2510 2511 /* 2512 * Encode a 64 bit integer array property 2513 */ 2514 int 2515 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements) 2516 { 2517 int i; 2518 int cnt; 2519 int size; 2520 int64_t *tmp; 2521 2522 /* 2523 * If there is no data, we cannot do anything 2524 */ 2525 if (nelements == 0) 2526 return (DDI_PROP_CANNOT_ENCODE); 2527 2528 /* 2529 * Get the size of an encoded 64 bit int. 2530 */ 2531 size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL); 2532 2533 if (size < DDI_PROP_RESULT_OK) { 2534 switch (size) { 2535 case DDI_PROP_RESULT_EOF: 2536 return (DDI_PROP_END_OF_DATA); 2537 2538 case DDI_PROP_RESULT_ERROR: 2539 return (DDI_PROP_CANNOT_ENCODE); 2540 } 2541 } 2542 2543 /* 2544 * Allocate space in the handle to store the encoded int. 2545 */ 2546 if (ddi_prop_encode_alloc(ph, size * nelements) != 2547 DDI_PROP_SUCCESS) 2548 return (DDI_PROP_NO_MEMORY); 2549 2550 /* 2551 * Encode the array of ints. 2552 */ 2553 tmp = (int64_t *)data; 2554 for (cnt = 0; cnt < nelements; cnt++, tmp++) { 2555 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp); 2556 if (i < DDI_PROP_RESULT_OK) { 2557 switch (i) { 2558 case DDI_PROP_RESULT_EOF: 2559 return (DDI_PROP_END_OF_DATA); 2560 2561 case DDI_PROP_RESULT_ERROR: 2562 return (DDI_PROP_CANNOT_ENCODE); 2563 } 2564 } 2565 } 2566 2567 return (DDI_PROP_SUCCESS); 2568 } 2569 2570 /* 2571 * Decode a single string property 2572 */ 2573 static int 2574 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements) 2575 { 2576 char *tmp; 2577 char *str; 2578 int i; 2579 int size; 2580 2581 /* 2582 * If there is nothing to decode return an error 2583 */ 2584 if (ph->ph_size == 0) 2585 return (DDI_PROP_END_OF_DATA); 2586 2587 /* 2588 * Get the decoded size of the encoded string. 2589 */ 2590 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL); 2591 if (size < DDI_PROP_RESULT_OK) { 2592 switch (size) { 2593 case DDI_PROP_RESULT_EOF: 2594 return (DDI_PROP_END_OF_DATA); 2595 2596 case DDI_PROP_RESULT_ERROR: 2597 return (DDI_PROP_CANNOT_DECODE); 2598 } 2599 } 2600 2601 /* 2602 * Allocated memory to store the decoded value in. 2603 */ 2604 str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string); 2605 2606 ddi_prop_reset_pos(ph); 2607 2608 /* 2609 * Decode the str and place it in the space we just allocated 2610 */ 2611 tmp = str; 2612 i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp); 2613 if (i < DDI_PROP_RESULT_OK) { 2614 /* 2615 * Free the space we just allocated 2616 * and return an error. 2617 */ 2618 ddi_prop_free(str); 2619 switch (i) { 2620 case DDI_PROP_RESULT_EOF: 2621 return (DDI_PROP_END_OF_DATA); 2622 2623 case DDI_PROP_RESULT_ERROR: 2624 return (DDI_PROP_CANNOT_DECODE); 2625 } 2626 } 2627 2628 *(char **)data = str; 2629 *nelements = 1; 2630 2631 return (DDI_PROP_SUCCESS); 2632 } 2633 2634 /* 2635 * Decode an array of strings. 2636 */ 2637 int 2638 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements) 2639 { 2640 int cnt = 0; 2641 char **strs; 2642 char **tmp; 2643 char *ptr; 2644 int i; 2645 int n; 2646 int size; 2647 size_t nbytes; 2648 2649 /* 2650 * Figure out how many array elements there are by going through the 2651 * data without decoding it first and counting. 2652 */ 2653 for (;;) { 2654 i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL); 2655 if (i < 0) 2656 break; 2657 cnt++; 2658 } 2659 2660 /* 2661 * If there are no elements return an error 2662 */ 2663 if (cnt == 0) 2664 return (DDI_PROP_END_OF_DATA); 2665 2666 /* 2667 * If we cannot skip through the data, we cannot decode it 2668 */ 2669 if (i == DDI_PROP_RESULT_ERROR) 2670 return (DDI_PROP_CANNOT_DECODE); 2671 2672 /* 2673 * Reset the data pointer to the beginning of the encoded data 2674 */ 2675 ddi_prop_reset_pos(ph); 2676 2677 /* 2678 * Figure out how much memory we need for the sum total 2679 */ 2680 nbytes = (cnt + 1) * sizeof (char *); 2681 2682 for (n = 0; n < cnt; n++) { 2683 /* 2684 * Get the decoded size of the current encoded string. 2685 */ 2686 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL); 2687 if (size < DDI_PROP_RESULT_OK) { 2688 switch (size) { 2689 case DDI_PROP_RESULT_EOF: 2690 return (DDI_PROP_END_OF_DATA); 2691 2692 case DDI_PROP_RESULT_ERROR: 2693 return (DDI_PROP_CANNOT_DECODE); 2694 } 2695 } 2696 2697 nbytes += size; 2698 } 2699 2700 /* 2701 * Allocate memory in which to store the decoded strings. 2702 */ 2703 strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings); 2704 2705 /* 2706 * Set up pointers for each string by figuring out yet 2707 * again how long each string is. 2708 */ 2709 ddi_prop_reset_pos(ph); 2710 ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *)); 2711 for (tmp = strs, n = 0; n < cnt; n++, tmp++) { 2712 /* 2713 * Get the decoded size of the current encoded string. 2714 */ 2715 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL); 2716 if (size < DDI_PROP_RESULT_OK) { 2717 ddi_prop_free(strs); 2718 switch (size) { 2719 case DDI_PROP_RESULT_EOF: 2720 return (DDI_PROP_END_OF_DATA); 2721 2722 case DDI_PROP_RESULT_ERROR: 2723 return (DDI_PROP_CANNOT_DECODE); 2724 } 2725 } 2726 2727 *tmp = ptr; 2728 ptr += size; 2729 } 2730 2731 /* 2732 * String array is terminated by a NULL 2733 */ 2734 *tmp = NULL; 2735 2736 /* 2737 * Finally, we can decode each string 2738 */ 2739 ddi_prop_reset_pos(ph); 2740 for (tmp = strs, n = 0; n < cnt; n++, tmp++) { 2741 i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp); 2742 if (i < DDI_PROP_RESULT_OK) { 2743 /* 2744 * Free the space we just allocated 2745 * and return an error 2746 */ 2747 ddi_prop_free(strs); 2748 switch (i) { 2749 case DDI_PROP_RESULT_EOF: 2750 return (DDI_PROP_END_OF_DATA); 2751 2752 case DDI_PROP_RESULT_ERROR: 2753 return (DDI_PROP_CANNOT_DECODE); 2754 } 2755 } 2756 } 2757 2758 *(char ***)data = strs; 2759 *nelements = cnt; 2760 2761 return (DDI_PROP_SUCCESS); 2762 } 2763 2764 /* 2765 * Encode a string. 2766 */ 2767 int 2768 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements) 2769 { 2770 char **tmp; 2771 int size; 2772 int i; 2773 2774 /* 2775 * If there is no data, we cannot do anything 2776 */ 2777 if (nelements == 0) 2778 return (DDI_PROP_CANNOT_ENCODE); 2779 2780 /* 2781 * Get the size of the encoded string. 2782 */ 2783 tmp = (char **)data; 2784 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp); 2785 if (size < DDI_PROP_RESULT_OK) { 2786 switch (size) { 2787 case DDI_PROP_RESULT_EOF: 2788 return (DDI_PROP_END_OF_DATA); 2789 2790 case DDI_PROP_RESULT_ERROR: 2791 return (DDI_PROP_CANNOT_ENCODE); 2792 } 2793 } 2794 2795 /* 2796 * Allocate space in the handle to store the encoded string. 2797 */ 2798 if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS) 2799 return (DDI_PROP_NO_MEMORY); 2800 2801 ddi_prop_reset_pos(ph); 2802 2803 /* 2804 * Encode the string. 2805 */ 2806 tmp = (char **)data; 2807 i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp); 2808 if (i < DDI_PROP_RESULT_OK) { 2809 switch (i) { 2810 case DDI_PROP_RESULT_EOF: 2811 return (DDI_PROP_END_OF_DATA); 2812 2813 case DDI_PROP_RESULT_ERROR: 2814 return (DDI_PROP_CANNOT_ENCODE); 2815 } 2816 } 2817 2818 return (DDI_PROP_SUCCESS); 2819 } 2820 2821 2822 /* 2823 * Encode an array of strings. 2824 */ 2825 int 2826 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements) 2827 { 2828 int cnt = 0; 2829 char **tmp; 2830 int size; 2831 uint_t total_size; 2832 int i; 2833 2834 /* 2835 * If there is no data, we cannot do anything 2836 */ 2837 if (nelements == 0) 2838 return (DDI_PROP_CANNOT_ENCODE); 2839 2840 /* 2841 * Get the total size required to encode all the strings. 2842 */ 2843 total_size = 0; 2844 tmp = (char **)data; 2845 for (cnt = 0; cnt < nelements; cnt++, tmp++) { 2846 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp); 2847 if (size < DDI_PROP_RESULT_OK) { 2848 switch (size) { 2849 case DDI_PROP_RESULT_EOF: 2850 return (DDI_PROP_END_OF_DATA); 2851 2852 case DDI_PROP_RESULT_ERROR: 2853 return (DDI_PROP_CANNOT_ENCODE); 2854 } 2855 } 2856 total_size += (uint_t)size; 2857 } 2858 2859 /* 2860 * Allocate space in the handle to store the encoded strings. 2861 */ 2862 if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS) 2863 return (DDI_PROP_NO_MEMORY); 2864 2865 ddi_prop_reset_pos(ph); 2866 2867 /* 2868 * Encode the array of strings. 2869 */ 2870 tmp = (char **)data; 2871 for (cnt = 0; cnt < nelements; cnt++, tmp++) { 2872 i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp); 2873 if (i < DDI_PROP_RESULT_OK) { 2874 switch (i) { 2875 case DDI_PROP_RESULT_EOF: 2876 return (DDI_PROP_END_OF_DATA); 2877 2878 case DDI_PROP_RESULT_ERROR: 2879 return (DDI_PROP_CANNOT_ENCODE); 2880 } 2881 } 2882 } 2883 2884 return (DDI_PROP_SUCCESS); 2885 } 2886 2887 2888 /* 2889 * Decode an array of bytes. 2890 */ 2891 static int 2892 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements) 2893 { 2894 uchar_t *tmp; 2895 int nbytes; 2896 int i; 2897 2898 /* 2899 * If there are no elements return an error 2900 */ 2901 if (ph->ph_size == 0) 2902 return (DDI_PROP_END_OF_DATA); 2903 2904 /* 2905 * Get the size of the encoded array of bytes. 2906 */ 2907 nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE, 2908 data, ph->ph_size); 2909 if (nbytes < DDI_PROP_RESULT_OK) { 2910 switch (nbytes) { 2911 case DDI_PROP_RESULT_EOF: 2912 return (DDI_PROP_END_OF_DATA); 2913 2914 case DDI_PROP_RESULT_ERROR: 2915 return (DDI_PROP_CANNOT_DECODE); 2916 } 2917 } 2918 2919 /* 2920 * Allocated memory to store the decoded value in. 2921 */ 2922 tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes); 2923 2924 /* 2925 * Decode each element and place it in the space we just allocated 2926 */ 2927 i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes); 2928 if (i < DDI_PROP_RESULT_OK) { 2929 /* 2930 * Free the space we just allocated 2931 * and return an error 2932 */ 2933 ddi_prop_free(tmp); 2934 switch (i) { 2935 case DDI_PROP_RESULT_EOF: 2936 return (DDI_PROP_END_OF_DATA); 2937 2938 case DDI_PROP_RESULT_ERROR: 2939 return (DDI_PROP_CANNOT_DECODE); 2940 } 2941 } 2942 2943 *(uchar_t **)data = tmp; 2944 *nelements = nbytes; 2945 2946 return (DDI_PROP_SUCCESS); 2947 } 2948 2949 /* 2950 * Encode an array of bytes. 2951 */ 2952 int 2953 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements) 2954 { 2955 int size; 2956 int i; 2957 2958 /* 2959 * If there are no elements, then this is a boolean property, 2960 * so just create a property handle with no data and return. 2961 */ 2962 if (nelements == 0) { 2963 (void) ddi_prop_encode_alloc(ph, 0); 2964 return (DDI_PROP_SUCCESS); 2965 } 2966 2967 /* 2968 * Get the size of the encoded array of bytes. 2969 */ 2970 size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data, 2971 nelements); 2972 if (size < DDI_PROP_RESULT_OK) { 2973 switch (size) { 2974 case DDI_PROP_RESULT_EOF: 2975 return (DDI_PROP_END_OF_DATA); 2976 2977 case DDI_PROP_RESULT_ERROR: 2978 return (DDI_PROP_CANNOT_DECODE); 2979 } 2980 } 2981 2982 /* 2983 * Allocate space in the handle to store the encoded bytes. 2984 */ 2985 if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS) 2986 return (DDI_PROP_NO_MEMORY); 2987 2988 /* 2989 * Encode the array of bytes. 2990 */ 2991 i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data, 2992 nelements); 2993 if (i < DDI_PROP_RESULT_OK) { 2994 switch (i) { 2995 case DDI_PROP_RESULT_EOF: 2996 return (DDI_PROP_END_OF_DATA); 2997 2998 case DDI_PROP_RESULT_ERROR: 2999 return (DDI_PROP_CANNOT_ENCODE); 3000 } 3001 } 3002 3003 return (DDI_PROP_SUCCESS); 3004 } 3005 3006 /* 3007 * OBP 1275 integer, string and byte operators. 3008 * 3009 * DDI_PROP_CMD_DECODE: 3010 * 3011 * DDI_PROP_RESULT_ERROR: cannot decode the data 3012 * DDI_PROP_RESULT_EOF: end of data 3013 * DDI_PROP_OK: data was decoded 3014 * 3015 * DDI_PROP_CMD_ENCODE: 3016 * 3017 * DDI_PROP_RESULT_ERROR: cannot encode the data 3018 * DDI_PROP_RESULT_EOF: end of data 3019 * DDI_PROP_OK: data was encoded 3020 * 3021 * DDI_PROP_CMD_SKIP: 3022 * 3023 * DDI_PROP_RESULT_ERROR: cannot skip the data 3024 * DDI_PROP_RESULT_EOF: end of data 3025 * DDI_PROP_OK: data was skipped 3026 * 3027 * DDI_PROP_CMD_GET_ESIZE: 3028 * 3029 * DDI_PROP_RESULT_ERROR: cannot get encoded size 3030 * DDI_PROP_RESULT_EOF: end of data 3031 * > 0: the encoded size 3032 * 3033 * DDI_PROP_CMD_GET_DSIZE: 3034 * 3035 * DDI_PROP_RESULT_ERROR: cannot get decoded size 3036 * DDI_PROP_RESULT_EOF: end of data 3037 * > 0: the decoded size 3038 */ 3039 3040 /* 3041 * OBP 1275 integer operator 3042 * 3043 * OBP properties are a byte stream of data, so integers may not be 3044 * properly aligned. Therefore we need to copy them one byte at a time. 3045 */ 3046 int 3047 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data) 3048 { 3049 int i; 3050 3051 switch (cmd) { 3052 case DDI_PROP_CMD_DECODE: 3053 /* 3054 * Check that there is encoded data 3055 */ 3056 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) 3057 return (DDI_PROP_RESULT_ERROR); 3058 if (ph->ph_flags & PH_FROM_PROM) { 3059 i = MIN(ph->ph_size, PROP_1275_INT_SIZE); 3060 if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data + 3061 ph->ph_size - i)) 3062 return (DDI_PROP_RESULT_ERROR); 3063 } else { 3064 if (ph->ph_size < sizeof (int) || 3065 ((int *)ph->ph_cur_pos > ((int *)ph->ph_data + 3066 ph->ph_size - sizeof (int)))) 3067 return (DDI_PROP_RESULT_ERROR); 3068 } 3069 3070 /* 3071 * Copy the integer, using the implementation-specific 3072 * copy function if the property is coming from the PROM. 3073 */ 3074 if (ph->ph_flags & PH_FROM_PROM) { 3075 *data = impl_ddi_prop_int_from_prom( 3076 (uchar_t *)ph->ph_cur_pos, 3077 (ph->ph_size < PROP_1275_INT_SIZE) ? 3078 ph->ph_size : PROP_1275_INT_SIZE); 3079 } else { 3080 bcopy(ph->ph_cur_pos, data, sizeof (int)); 3081 } 3082 3083 /* 3084 * Move the current location to the start of the next 3085 * bit of undecoded data. 3086 */ 3087 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + 3088 PROP_1275_INT_SIZE; 3089 return (DDI_PROP_RESULT_OK); 3090 3091 case DDI_PROP_CMD_ENCODE: 3092 /* 3093 * Check that there is room to encoded the data 3094 */ 3095 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3096 ph->ph_size < PROP_1275_INT_SIZE || 3097 ((int *)ph->ph_cur_pos > ((int *)ph->ph_data + 3098 ph->ph_size - sizeof (int)))) 3099 return (DDI_PROP_RESULT_ERROR); 3100 3101 /* 3102 * Encode the integer into the byte stream one byte at a 3103 * time. 3104 */ 3105 bcopy(data, ph->ph_cur_pos, sizeof (int)); 3106 3107 /* 3108 * Move the current location to the start of the next bit of 3109 * space where we can store encoded data. 3110 */ 3111 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE; 3112 return (DDI_PROP_RESULT_OK); 3113 3114 case DDI_PROP_CMD_SKIP: 3115 /* 3116 * Check that there is encoded data 3117 */ 3118 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3119 ph->ph_size < PROP_1275_INT_SIZE) 3120 return (DDI_PROP_RESULT_ERROR); 3121 3122 3123 if ((caddr_t)ph->ph_cur_pos == 3124 (caddr_t)ph->ph_data + ph->ph_size) { 3125 return (DDI_PROP_RESULT_EOF); 3126 } else if ((caddr_t)ph->ph_cur_pos > 3127 (caddr_t)ph->ph_data + ph->ph_size) { 3128 return (DDI_PROP_RESULT_EOF); 3129 } 3130 3131 /* 3132 * Move the current location to the start of the next bit of 3133 * undecoded data. 3134 */ 3135 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE; 3136 return (DDI_PROP_RESULT_OK); 3137 3138 case DDI_PROP_CMD_GET_ESIZE: 3139 /* 3140 * Return the size of an encoded integer on OBP 3141 */ 3142 return (PROP_1275_INT_SIZE); 3143 3144 case DDI_PROP_CMD_GET_DSIZE: 3145 /* 3146 * Return the size of a decoded integer on the system. 3147 */ 3148 return (sizeof (int)); 3149 3150 default: 3151 #ifdef DEBUG 3152 panic("ddi_prop_1275_int: %x impossible", cmd); 3153 /*NOTREACHED*/ 3154 #else 3155 return (DDI_PROP_RESULT_ERROR); 3156 #endif /* DEBUG */ 3157 } 3158 } 3159 3160 /* 3161 * 64 bit integer operator. 3162 * 3163 * This is an extension, defined by Sun, to the 1275 integer 3164 * operator. This routine handles the encoding/decoding of 3165 * 64 bit integer properties. 3166 */ 3167 int 3168 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data) 3169 { 3170 3171 switch (cmd) { 3172 case DDI_PROP_CMD_DECODE: 3173 /* 3174 * Check that there is encoded data 3175 */ 3176 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) 3177 return (DDI_PROP_RESULT_ERROR); 3178 if (ph->ph_flags & PH_FROM_PROM) { 3179 return (DDI_PROP_RESULT_ERROR); 3180 } else { 3181 if (ph->ph_size < sizeof (int64_t) || 3182 ((int64_t *)ph->ph_cur_pos > 3183 ((int64_t *)ph->ph_data + 3184 ph->ph_size - sizeof (int64_t)))) 3185 return (DDI_PROP_RESULT_ERROR); 3186 } 3187 /* 3188 * Copy the integer, using the implementation-specific 3189 * copy function if the property is coming from the PROM. 3190 */ 3191 if (ph->ph_flags & PH_FROM_PROM) { 3192 return (DDI_PROP_RESULT_ERROR); 3193 } else { 3194 bcopy(ph->ph_cur_pos, data, sizeof (int64_t)); 3195 } 3196 3197 /* 3198 * Move the current location to the start of the next 3199 * bit of undecoded data. 3200 */ 3201 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + 3202 sizeof (int64_t); 3203 return (DDI_PROP_RESULT_OK); 3204 3205 case DDI_PROP_CMD_ENCODE: 3206 /* 3207 * Check that there is room to encoded the data 3208 */ 3209 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3210 ph->ph_size < sizeof (int64_t) || 3211 ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data + 3212 ph->ph_size - sizeof (int64_t)))) 3213 return (DDI_PROP_RESULT_ERROR); 3214 3215 /* 3216 * Encode the integer into the byte stream one byte at a 3217 * time. 3218 */ 3219 bcopy(data, ph->ph_cur_pos, sizeof (int64_t)); 3220 3221 /* 3222 * Move the current location to the start of the next bit of 3223 * space where we can store encoded data. 3224 */ 3225 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + 3226 sizeof (int64_t); 3227 return (DDI_PROP_RESULT_OK); 3228 3229 case DDI_PROP_CMD_SKIP: 3230 /* 3231 * Check that there is encoded data 3232 */ 3233 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3234 ph->ph_size < sizeof (int64_t)) 3235 return (DDI_PROP_RESULT_ERROR); 3236 3237 if ((caddr_t)ph->ph_cur_pos == 3238 (caddr_t)ph->ph_data + ph->ph_size) { 3239 return (DDI_PROP_RESULT_EOF); 3240 } else if ((caddr_t)ph->ph_cur_pos > 3241 (caddr_t)ph->ph_data + ph->ph_size) { 3242 return (DDI_PROP_RESULT_EOF); 3243 } 3244 3245 /* 3246 * Move the current location to the start of 3247 * the next bit of undecoded data. 3248 */ 3249 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + 3250 sizeof (int64_t); 3251 return (DDI_PROP_RESULT_OK); 3252 3253 case DDI_PROP_CMD_GET_ESIZE: 3254 /* 3255 * Return the size of an encoded integer on OBP 3256 */ 3257 return (sizeof (int64_t)); 3258 3259 case DDI_PROP_CMD_GET_DSIZE: 3260 /* 3261 * Return the size of a decoded integer on the system. 3262 */ 3263 return (sizeof (int64_t)); 3264 3265 default: 3266 #ifdef DEBUG 3267 panic("ddi_prop_int64_op: %x impossible", cmd); 3268 /*NOTREACHED*/ 3269 #else 3270 return (DDI_PROP_RESULT_ERROR); 3271 #endif /* DEBUG */ 3272 } 3273 } 3274 3275 /* 3276 * OBP 1275 string operator. 3277 * 3278 * OBP strings are NULL terminated. 3279 */ 3280 int 3281 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data) 3282 { 3283 int n; 3284 char *p; 3285 char *end; 3286 3287 switch (cmd) { 3288 case DDI_PROP_CMD_DECODE: 3289 /* 3290 * Check that there is encoded data 3291 */ 3292 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) { 3293 return (DDI_PROP_RESULT_ERROR); 3294 } 3295 3296 /* 3297 * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and 3298 * how to NULL terminate result. 3299 */ 3300 p = (char *)ph->ph_cur_pos; 3301 end = (char *)ph->ph_data + ph->ph_size; 3302 if (p >= end) 3303 return (DDI_PROP_RESULT_EOF); 3304 3305 while (p < end) { 3306 *data++ = *p; 3307 if (*p++ == 0) { /* NULL from OBP */ 3308 ph->ph_cur_pos = p; 3309 return (DDI_PROP_RESULT_OK); 3310 } 3311 } 3312 3313 /* 3314 * If OBP did not NULL terminate string, which happens 3315 * (at least) for 'true'/'false' boolean values, account for 3316 * the space and store null termination on decode. 3317 */ 3318 ph->ph_cur_pos = p; 3319 *data = 0; 3320 return (DDI_PROP_RESULT_OK); 3321 3322 case DDI_PROP_CMD_ENCODE: 3323 /* 3324 * Check that there is room to encoded the data 3325 */ 3326 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) { 3327 return (DDI_PROP_RESULT_ERROR); 3328 } 3329 3330 n = strlen(data) + 1; 3331 if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data + 3332 ph->ph_size - n)) { 3333 return (DDI_PROP_RESULT_ERROR); 3334 } 3335 3336 /* 3337 * Copy the NULL terminated string 3338 */ 3339 bcopy(data, ph->ph_cur_pos, n); 3340 3341 /* 3342 * Move the current location to the start of the next bit of 3343 * space where we can store encoded data. 3344 */ 3345 ph->ph_cur_pos = (char *)ph->ph_cur_pos + n; 3346 return (DDI_PROP_RESULT_OK); 3347 3348 case DDI_PROP_CMD_SKIP: 3349 /* 3350 * Check that there is encoded data 3351 */ 3352 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) { 3353 return (DDI_PROP_RESULT_ERROR); 3354 } 3355 3356 /* 3357 * Return the string length plus one for the NULL 3358 * We know the size of the property, we need to 3359 * ensure that the string is properly formatted, 3360 * since we may be looking up random OBP data. 3361 */ 3362 p = (char *)ph->ph_cur_pos; 3363 end = (char *)ph->ph_data + ph->ph_size; 3364 if (p >= end) 3365 return (DDI_PROP_RESULT_EOF); 3366 3367 while (p < end) { 3368 if (*p++ == 0) { /* NULL from OBP */ 3369 ph->ph_cur_pos = p; 3370 return (DDI_PROP_RESULT_OK); 3371 } 3372 } 3373 3374 /* 3375 * Accommodate the fact that OBP does not always NULL 3376 * terminate strings. 3377 */ 3378 ph->ph_cur_pos = p; 3379 return (DDI_PROP_RESULT_OK); 3380 3381 case DDI_PROP_CMD_GET_ESIZE: 3382 /* 3383 * Return the size of the encoded string on OBP. 3384 */ 3385 return (strlen(data) + 1); 3386 3387 case DDI_PROP_CMD_GET_DSIZE: 3388 /* 3389 * Return the string length plus one for the NULL. 3390 * We know the size of the property, we need to 3391 * ensure that the string is properly formatted, 3392 * since we may be looking up random OBP data. 3393 */ 3394 p = (char *)ph->ph_cur_pos; 3395 end = (char *)ph->ph_data + ph->ph_size; 3396 if (p >= end) 3397 return (DDI_PROP_RESULT_EOF); 3398 3399 for (n = 0; p < end; n++) { 3400 if (*p++ == 0) { /* NULL from OBP */ 3401 ph->ph_cur_pos = p; 3402 return (n + 1); 3403 } 3404 } 3405 3406 /* 3407 * If OBP did not NULL terminate string, which happens for 3408 * 'true'/'false' boolean values, account for the space 3409 * to store null termination here. 3410 */ 3411 ph->ph_cur_pos = p; 3412 return (n + 1); 3413 3414 default: 3415 #ifdef DEBUG 3416 panic("ddi_prop_1275_string: %x impossible", cmd); 3417 /*NOTREACHED*/ 3418 #else 3419 return (DDI_PROP_RESULT_ERROR); 3420 #endif /* DEBUG */ 3421 } 3422 } 3423 3424 /* 3425 * OBP 1275 byte operator 3426 * 3427 * Caller must specify the number of bytes to get. OBP encodes bytes 3428 * as a byte so there is a 1-to-1 translation. 3429 */ 3430 int 3431 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data, 3432 uint_t nelements) 3433 { 3434 switch (cmd) { 3435 case DDI_PROP_CMD_DECODE: 3436 /* 3437 * Check that there is encoded data 3438 */ 3439 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3440 ph->ph_size < nelements || 3441 ((char *)ph->ph_cur_pos > ((char *)ph->ph_data + 3442 ph->ph_size - nelements))) 3443 return (DDI_PROP_RESULT_ERROR); 3444 3445 /* 3446 * Copy out the bytes 3447 */ 3448 bcopy(ph->ph_cur_pos, data, nelements); 3449 3450 /* 3451 * Move the current location 3452 */ 3453 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements; 3454 return (DDI_PROP_RESULT_OK); 3455 3456 case DDI_PROP_CMD_ENCODE: 3457 /* 3458 * Check that there is room to encode the data 3459 */ 3460 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3461 ph->ph_size < nelements || 3462 ((char *)ph->ph_cur_pos > ((char *)ph->ph_data + 3463 ph->ph_size - nelements))) 3464 return (DDI_PROP_RESULT_ERROR); 3465 3466 /* 3467 * Copy in the bytes 3468 */ 3469 bcopy(data, ph->ph_cur_pos, nelements); 3470 3471 /* 3472 * Move the current location to the start of the next bit of 3473 * space where we can store encoded data. 3474 */ 3475 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements; 3476 return (DDI_PROP_RESULT_OK); 3477 3478 case DDI_PROP_CMD_SKIP: 3479 /* 3480 * Check that there is encoded data 3481 */ 3482 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3483 ph->ph_size < nelements) 3484 return (DDI_PROP_RESULT_ERROR); 3485 3486 if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data + 3487 ph->ph_size - nelements)) 3488 return (DDI_PROP_RESULT_EOF); 3489 3490 /* 3491 * Move the current location 3492 */ 3493 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements; 3494 return (DDI_PROP_RESULT_OK); 3495 3496 case DDI_PROP_CMD_GET_ESIZE: 3497 /* 3498 * The size in bytes of the encoded size is the 3499 * same as the decoded size provided by the caller. 3500 */ 3501 return (nelements); 3502 3503 case DDI_PROP_CMD_GET_DSIZE: 3504 /* 3505 * Just return the number of bytes specified by the caller. 3506 */ 3507 return (nelements); 3508 3509 default: 3510 #ifdef DEBUG 3511 panic("ddi_prop_1275_bytes: %x impossible", cmd); 3512 /*NOTREACHED*/ 3513 #else 3514 return (DDI_PROP_RESULT_ERROR); 3515 #endif /* DEBUG */ 3516 } 3517 } 3518 3519 /* 3520 * Used for properties that come from the OBP, hardware configuration files, 3521 * or that are created by calls to ddi_prop_update(9F). 3522 */ 3523 static struct prop_handle_ops prop_1275_ops = { 3524 ddi_prop_1275_int, 3525 ddi_prop_1275_string, 3526 ddi_prop_1275_bytes, 3527 ddi_prop_int64_op 3528 }; 3529 3530 3531 /* 3532 * Interface to create/modify a managed property on child's behalf... 3533 * Flags interpreted are: 3534 * DDI_PROP_CANSLEEP: Allow memory allocation to sleep. 3535 * DDI_PROP_SYSTEM_DEF: Manipulate system list rather than driver list. 3536 * 3537 * Use same dev_t when modifying or undefining a property. 3538 * Search for properties with DDI_DEV_T_ANY to match first named 3539 * property on the list. 3540 * 3541 * Properties are stored LIFO and subsequently will match the first 3542 * `matching' instance. 3543 */ 3544 3545 /* 3546 * ddi_prop_add: Add a software defined property 3547 */ 3548 3549 /* 3550 * define to get a new ddi_prop_t. 3551 * km_flags are KM_SLEEP or KM_NOSLEEP. 3552 */ 3553 3554 #define DDI_NEW_PROP_T(km_flags) \ 3555 (kmem_zalloc(sizeof (ddi_prop_t), km_flags)) 3556 3557 static int 3558 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags, 3559 char *name, caddr_t value, int length) 3560 { 3561 ddi_prop_t *new_propp, *propp; 3562 ddi_prop_t **list_head = &(DEVI(dip)->devi_drv_prop_ptr); 3563 int km_flags = KM_NOSLEEP; 3564 int name_buf_len; 3565 3566 /* 3567 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error. 3568 */ 3569 3570 if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0) 3571 return (DDI_PROP_INVAL_ARG); 3572 3573 if (flags & DDI_PROP_CANSLEEP) 3574 km_flags = KM_SLEEP; 3575 3576 if (flags & DDI_PROP_SYSTEM_DEF) 3577 list_head = &(DEVI(dip)->devi_sys_prop_ptr); 3578 else if (flags & DDI_PROP_HW_DEF) 3579 list_head = &(DEVI(dip)->devi_hw_prop_ptr); 3580 3581 if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL) { 3582 cmn_err(CE_CONT, prop_no_mem_msg, name); 3583 return (DDI_PROP_NO_MEMORY); 3584 } 3585 3586 /* 3587 * If dev is major number 0, then we need to do a ddi_name_to_major 3588 * to get the real major number for the device. This needs to be 3589 * done because some drivers need to call ddi_prop_create in their 3590 * attach routines but they don't have a dev. By creating the dev 3591 * ourself if the major number is 0, drivers will not have to know what 3592 * their major number. They can just create a dev with major number 3593 * 0 and pass it in. For device 0, we will be doing a little extra 3594 * work by recreating the same dev that we already have, but its the 3595 * price you pay :-). 3596 * 3597 * This fixes bug #1098060. 3598 */ 3599 if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) { 3600 new_propp->prop_dev = 3601 makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name), 3602 getminor(dev)); 3603 } else 3604 new_propp->prop_dev = dev; 3605 3606 /* 3607 * Allocate space for property name and copy it in... 3608 */ 3609 3610 name_buf_len = strlen(name) + 1; 3611 new_propp->prop_name = kmem_alloc(name_buf_len, km_flags); 3612 if (new_propp->prop_name == 0) { 3613 kmem_free(new_propp, sizeof (ddi_prop_t)); 3614 cmn_err(CE_CONT, prop_no_mem_msg, name); 3615 return (DDI_PROP_NO_MEMORY); 3616 } 3617 bcopy(name, new_propp->prop_name, name_buf_len); 3618 3619 /* 3620 * Set the property type 3621 */ 3622 new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK; 3623 3624 /* 3625 * Set length and value ONLY if not an explicit property undefine: 3626 * NOTE: value and length are zero for explicit undefines. 3627 */ 3628 3629 if (flags & DDI_PROP_UNDEF_IT) { 3630 new_propp->prop_flags |= DDI_PROP_UNDEF_IT; 3631 } else { 3632 if ((new_propp->prop_len = length) != 0) { 3633 new_propp->prop_val = kmem_alloc(length, km_flags); 3634 if (new_propp->prop_val == 0) { 3635 kmem_free(new_propp->prop_name, name_buf_len); 3636 kmem_free(new_propp, sizeof (ddi_prop_t)); 3637 cmn_err(CE_CONT, prop_no_mem_msg, name); 3638 return (DDI_PROP_NO_MEMORY); 3639 } 3640 bcopy(value, new_propp->prop_val, length); 3641 } 3642 } 3643 3644 /* 3645 * Link property into beginning of list. (Properties are LIFO order.) 3646 */ 3647 3648 mutex_enter(&(DEVI(dip)->devi_lock)); 3649 propp = *list_head; 3650 new_propp->prop_next = propp; 3651 *list_head = new_propp; 3652 mutex_exit(&(DEVI(dip)->devi_lock)); 3653 return (DDI_PROP_SUCCESS); 3654 } 3655 3656 3657 /* 3658 * ddi_prop_change: Modify a software managed property value 3659 * 3660 * Set new length and value if found. 3661 * returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or 3662 * input name is the NULL string. 3663 * returns DDI_PROP_NO_MEMORY if unable to allocate memory 3664 * 3665 * Note: an undef can be modified to be a define, 3666 * (you can't go the other way.) 3667 */ 3668 3669 static int 3670 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags, 3671 char *name, caddr_t value, int length) 3672 { 3673 ddi_prop_t *propp; 3674 ddi_prop_t **ppropp; 3675 caddr_t p = NULL; 3676 3677 if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0)) 3678 return (DDI_PROP_INVAL_ARG); 3679 3680 /* 3681 * Preallocate buffer, even if we don't need it... 3682 */ 3683 if (length != 0) { 3684 p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ? 3685 KM_SLEEP : KM_NOSLEEP); 3686 if (p == NULL) { 3687 cmn_err(CE_CONT, prop_no_mem_msg, name); 3688 return (DDI_PROP_NO_MEMORY); 3689 } 3690 } 3691 3692 /* 3693 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major 3694 * number, a real dev_t value should be created based upon the dip's 3695 * binding driver. See ddi_prop_add... 3696 */ 3697 if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) 3698 dev = makedevice( 3699 ddi_name_to_major(DEVI(dip)->devi_binding_name), 3700 getminor(dev)); 3701 3702 /* 3703 * Check to see if the property exists. If so we modify it. 3704 * Else we create it by calling ddi_prop_add(). 3705 */ 3706 mutex_enter(&(DEVI(dip)->devi_lock)); 3707 ppropp = &DEVI(dip)->devi_drv_prop_ptr; 3708 if (flags & DDI_PROP_SYSTEM_DEF) 3709 ppropp = &DEVI(dip)->devi_sys_prop_ptr; 3710 else if (flags & DDI_PROP_HW_DEF) 3711 ppropp = &DEVI(dip)->devi_hw_prop_ptr; 3712 3713 if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) { 3714 /* 3715 * Need to reallocate buffer? If so, do it 3716 * carefully (reuse same space if new prop 3717 * is same size and non-NULL sized). 3718 */ 3719 if (length != 0) 3720 bcopy(value, p, length); 3721 3722 if (propp->prop_len != 0) 3723 kmem_free(propp->prop_val, propp->prop_len); 3724 3725 propp->prop_len = length; 3726 propp->prop_val = p; 3727 propp->prop_flags &= ~DDI_PROP_UNDEF_IT; 3728 mutex_exit(&(DEVI(dip)->devi_lock)); 3729 return (DDI_PROP_SUCCESS); 3730 } 3731 3732 mutex_exit(&(DEVI(dip)->devi_lock)); 3733 if (length != 0) 3734 kmem_free(p, length); 3735 3736 return (ddi_prop_add(dev, dip, flags, name, value, length)); 3737 } 3738 3739 /* 3740 * Common update routine used to update and encode a property. Creates 3741 * a property handle, calls the property encode routine, figures out if 3742 * the property already exists and updates if it does. Otherwise it 3743 * creates if it does not exist. 3744 */ 3745 int 3746 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags, 3747 char *name, void *data, uint_t nelements, 3748 int (*prop_create)(prop_handle_t *, void *data, uint_t nelements)) 3749 { 3750 prop_handle_t ph; 3751 int rval; 3752 uint_t ourflags; 3753 3754 /* 3755 * If dev_t is DDI_DEV_T_ANY or name's length is zero, 3756 * return error. 3757 */ 3758 if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0) 3759 return (DDI_PROP_INVAL_ARG); 3760 3761 /* 3762 * Create the handle 3763 */ 3764 ph.ph_data = NULL; 3765 ph.ph_cur_pos = NULL; 3766 ph.ph_save_pos = NULL; 3767 ph.ph_size = 0; 3768 ph.ph_ops = &prop_1275_ops; 3769 3770 /* 3771 * ourflags: 3772 * For compatibility with the old interfaces. The old interfaces 3773 * didn't sleep by default and slept when the flag was set. These 3774 * interfaces to the opposite. So the old interfaces now set the 3775 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep. 3776 * 3777 * ph.ph_flags: 3778 * Blocked data or unblocked data allocation 3779 * for ph.ph_data in ddi_prop_encode_alloc() 3780 */ 3781 if (flags & DDI_PROP_DONTSLEEP) { 3782 ourflags = flags; 3783 ph.ph_flags = DDI_PROP_DONTSLEEP; 3784 } else { 3785 ourflags = flags | DDI_PROP_CANSLEEP; 3786 ph.ph_flags = DDI_PROP_CANSLEEP; 3787 } 3788 3789 /* 3790 * Encode the data and store it in the property handle by 3791 * calling the prop_encode routine. 3792 */ 3793 if ((rval = (*prop_create)(&ph, data, nelements)) != 3794 DDI_PROP_SUCCESS) { 3795 if (rval == DDI_PROP_NO_MEMORY) 3796 cmn_err(CE_CONT, prop_no_mem_msg, name); 3797 if (ph.ph_size != 0) 3798 kmem_free(ph.ph_data, ph.ph_size); 3799 return (rval); 3800 } 3801 3802 /* 3803 * The old interfaces use a stacking approach to creating 3804 * properties. If we are being called from the old interfaces, 3805 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a 3806 * create without checking. 3807 */ 3808 if (flags & DDI_PROP_STACK_CREATE) { 3809 rval = ddi_prop_add(match_dev, dip, 3810 ourflags, name, ph.ph_data, ph.ph_size); 3811 } else { 3812 rval = ddi_prop_change(match_dev, dip, 3813 ourflags, name, ph.ph_data, ph.ph_size); 3814 } 3815 3816 /* 3817 * Free the encoded data allocated in the prop_encode routine. 3818 */ 3819 if (ph.ph_size != 0) 3820 kmem_free(ph.ph_data, ph.ph_size); 3821 3822 return (rval); 3823 } 3824 3825 3826 /* 3827 * ddi_prop_create: Define a managed property: 3828 * See above for details. 3829 */ 3830 3831 int 3832 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag, 3833 char *name, caddr_t value, int length) 3834 { 3835 if (!(flag & DDI_PROP_CANSLEEP)) { 3836 flag |= DDI_PROP_DONTSLEEP; 3837 #ifdef DDI_PROP_DEBUG 3838 if (length != 0) 3839 cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete," 3840 "use ddi_prop_update (prop = %s, node = %s%d)", 3841 name, ddi_driver_name(dip), ddi_get_instance(dip)); 3842 #endif /* DDI_PROP_DEBUG */ 3843 } 3844 flag &= ~DDI_PROP_SYSTEM_DEF; 3845 return (ddi_prop_update_common(dev, dip, 3846 (flag | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY), name, 3847 value, length, ddi_prop_fm_encode_bytes)); 3848 } 3849 3850 int 3851 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag, 3852 char *name, caddr_t value, int length) 3853 { 3854 if (!(flag & DDI_PROP_CANSLEEP)) 3855 flag |= DDI_PROP_DONTSLEEP; 3856 return (ddi_prop_update_common(dev, dip, 3857 (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | 3858 DDI_PROP_TYPE_ANY), 3859 name, value, length, ddi_prop_fm_encode_bytes)); 3860 } 3861 3862 int 3863 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag, 3864 char *name, caddr_t value, int length) 3865 { 3866 ASSERT((flag & DDI_PROP_TYPE_MASK) == 0); 3867 3868 /* 3869 * If dev_t is DDI_DEV_T_ANY or name's length is zero, 3870 * return error. 3871 */ 3872 if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0) 3873 return (DDI_PROP_INVAL_ARG); 3874 3875 if (!(flag & DDI_PROP_CANSLEEP)) 3876 flag |= DDI_PROP_DONTSLEEP; 3877 flag &= ~DDI_PROP_SYSTEM_DEF; 3878 if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0) 3879 return (DDI_PROP_NOT_FOUND); 3880 3881 return (ddi_prop_update_common(dev, dip, 3882 (flag | DDI_PROP_TYPE_BYTE), name, 3883 value, length, ddi_prop_fm_encode_bytes)); 3884 } 3885 3886 int 3887 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag, 3888 char *name, caddr_t value, int length) 3889 { 3890 ASSERT((flag & DDI_PROP_TYPE_MASK) == 0); 3891 3892 /* 3893 * If dev_t is DDI_DEV_T_ANY or name's length is zero, 3894 * return error. 3895 */ 3896 if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0) 3897 return (DDI_PROP_INVAL_ARG); 3898 3899 if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0) 3900 return (DDI_PROP_NOT_FOUND); 3901 3902 if (!(flag & DDI_PROP_CANSLEEP)) 3903 flag |= DDI_PROP_DONTSLEEP; 3904 return (ddi_prop_update_common(dev, dip, 3905 (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE), 3906 name, value, length, ddi_prop_fm_encode_bytes)); 3907 } 3908 3909 3910 /* 3911 * Common lookup routine used to lookup and decode a property. 3912 * Creates a property handle, searches for the raw encoded data, 3913 * fills in the handle, and calls the property decode functions 3914 * passed in. 3915 * 3916 * This routine is not static because ddi_bus_prop_op() which lives in 3917 * ddi_impl.c calls it. No driver should be calling this routine. 3918 */ 3919 int 3920 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip, 3921 uint_t flags, char *name, void *data, uint_t *nelements, 3922 int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements)) 3923 { 3924 int rval; 3925 uint_t ourflags; 3926 prop_handle_t ph; 3927 3928 if ((match_dev == DDI_DEV_T_NONE) || 3929 (name == NULL) || (strlen(name) == 0)) 3930 return (DDI_PROP_INVAL_ARG); 3931 3932 ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags : 3933 flags | DDI_PROP_CANSLEEP; 3934 3935 /* 3936 * Get the encoded data 3937 */ 3938 bzero(&ph, sizeof (prop_handle_t)); 3939 3940 if (flags & DDI_UNBND_DLPI2) { 3941 /* 3942 * For unbound dlpi style-2 devices, index into 3943 * the devnames' array and search the global 3944 * property list. 3945 */ 3946 ourflags &= ~DDI_UNBND_DLPI2; 3947 rval = i_ddi_prop_search_global(match_dev, 3948 ourflags, name, &ph.ph_data, &ph.ph_size); 3949 } else { 3950 rval = ddi_prop_search_common(match_dev, dip, 3951 PROP_LEN_AND_VAL_ALLOC, ourflags, name, 3952 &ph.ph_data, &ph.ph_size); 3953 3954 } 3955 3956 if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) { 3957 ASSERT(ph.ph_data == NULL); 3958 ASSERT(ph.ph_size == 0); 3959 return (rval); 3960 } 3961 3962 /* 3963 * If the encoded data came from a OBP or software 3964 * use the 1275 OBP decode/encode routines. 3965 */ 3966 ph.ph_cur_pos = ph.ph_data; 3967 ph.ph_save_pos = ph.ph_data; 3968 ph.ph_ops = &prop_1275_ops; 3969 ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0; 3970 3971 rval = (*prop_decoder)(&ph, data, nelements); 3972 3973 /* 3974 * Free the encoded data 3975 */ 3976 if (ph.ph_size != 0) 3977 kmem_free(ph.ph_data, ph.ph_size); 3978 3979 return (rval); 3980 } 3981 3982 /* 3983 * Lookup and return an array of composite properties. The driver must 3984 * provide the decode routine. 3985 */ 3986 int 3987 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip, 3988 uint_t flags, char *name, void *data, uint_t *nelements, 3989 int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements)) 3990 { 3991 return (ddi_prop_lookup_common(match_dev, dip, 3992 (flags | DDI_PROP_TYPE_COMPOSITE), name, 3993 data, nelements, prop_decoder)); 3994 } 3995 3996 /* 3997 * Return 1 if a property exists (no type checking done). 3998 * Return 0 if it does not exist. 3999 */ 4000 int 4001 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name) 4002 { 4003 int i; 4004 uint_t x = 0; 4005 4006 i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS, 4007 flags | DDI_PROP_TYPE_MASK, name, NULL, &x); 4008 return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275); 4009 } 4010 4011 4012 /* 4013 * Update an array of composite properties. The driver must 4014 * provide the encode routine. 4015 */ 4016 int 4017 ddi_prop_update(dev_t match_dev, dev_info_t *dip, 4018 char *name, void *data, uint_t nelements, 4019 int (*prop_create)(prop_handle_t *, void *data, uint_t nelements)) 4020 { 4021 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE, 4022 name, data, nelements, prop_create)); 4023 } 4024 4025 /* 4026 * Get a single integer or boolean property and return it. 4027 * If the property does not exists, or cannot be decoded, 4028 * then return the defvalue passed in. 4029 * 4030 * This routine always succeeds. 4031 */ 4032 int 4033 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags, 4034 char *name, int defvalue) 4035 { 4036 int data; 4037 uint_t nelements; 4038 int rval; 4039 4040 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4041 LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) { 4042 #ifdef DEBUG 4043 if (dip != NULL) { 4044 cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag" 4045 " 0x%x (prop = %s, node = %s%d)", flags, 4046 name, ddi_driver_name(dip), ddi_get_instance(dip)); 4047 } 4048 #endif /* DEBUG */ 4049 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4050 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 4051 } 4052 4053 if ((rval = ddi_prop_lookup_common(match_dev, dip, 4054 (flags | DDI_PROP_TYPE_INT), name, &data, &nelements, 4055 ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) { 4056 if (rval == DDI_PROP_END_OF_DATA) 4057 data = 1; 4058 else 4059 data = defvalue; 4060 } 4061 return (data); 4062 } 4063 4064 /* 4065 * Get a single 64 bit integer or boolean property and return it. 4066 * If the property does not exists, or cannot be decoded, 4067 * then return the defvalue passed in. 4068 * 4069 * This routine always succeeds. 4070 */ 4071 int64_t 4072 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags, 4073 char *name, int64_t defvalue) 4074 { 4075 int64_t data; 4076 uint_t nelements; 4077 int rval; 4078 4079 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4080 LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) { 4081 #ifdef DEBUG 4082 if (dip != NULL) { 4083 cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag" 4084 " 0x%x (prop = %s, node = %s%d)", flags, 4085 name, ddi_driver_name(dip), ddi_get_instance(dip)); 4086 } 4087 #endif /* DEBUG */ 4088 return (DDI_PROP_INVAL_ARG); 4089 } 4090 4091 if ((rval = ddi_prop_lookup_common(match_dev, dip, 4092 (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM), 4093 name, &data, &nelements, ddi_prop_fm_decode_int64)) 4094 != DDI_PROP_SUCCESS) { 4095 if (rval == DDI_PROP_END_OF_DATA) 4096 data = 1; 4097 else 4098 data = defvalue; 4099 } 4100 return (data); 4101 } 4102 4103 /* 4104 * Get an array of integer property 4105 */ 4106 int 4107 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags, 4108 char *name, int **data, uint_t *nelements) 4109 { 4110 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4111 LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) { 4112 #ifdef DEBUG 4113 if (dip != NULL) { 4114 cmn_err(CE_WARN, "ddi_prop_lookup_int_array: " 4115 "invalid flag 0x%x (prop = %s, node = %s%d)", 4116 flags, name, ddi_driver_name(dip), 4117 ddi_get_instance(dip)); 4118 } 4119 #endif /* DEBUG */ 4120 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4121 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 4122 } 4123 4124 return (ddi_prop_lookup_common(match_dev, dip, 4125 (flags | DDI_PROP_TYPE_INT), name, data, 4126 nelements, ddi_prop_fm_decode_ints)); 4127 } 4128 4129 /* 4130 * Get an array of 64 bit integer properties 4131 */ 4132 int 4133 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags, 4134 char *name, int64_t **data, uint_t *nelements) 4135 { 4136 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4137 LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) { 4138 #ifdef DEBUG 4139 if (dip != NULL) { 4140 cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: " 4141 "invalid flag 0x%x (prop = %s, node = %s%d)", 4142 flags, name, ddi_driver_name(dip), 4143 ddi_get_instance(dip)); 4144 } 4145 #endif /* DEBUG */ 4146 return (DDI_PROP_INVAL_ARG); 4147 } 4148 4149 return (ddi_prop_lookup_common(match_dev, dip, 4150 (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM), 4151 name, data, nelements, ddi_prop_fm_decode_int64_array)); 4152 } 4153 4154 /* 4155 * Update a single integer property. If the property exists on the drivers 4156 * property list it updates, else it creates it. 4157 */ 4158 int 4159 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip, 4160 char *name, int data) 4161 { 4162 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT, 4163 name, &data, 1, ddi_prop_fm_encode_ints)); 4164 } 4165 4166 /* 4167 * Update a single 64 bit integer property. 4168 * Update the driver property list if it exists, else create it. 4169 */ 4170 int 4171 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip, 4172 char *name, int64_t data) 4173 { 4174 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64, 4175 name, &data, 1, ddi_prop_fm_encode_int64)); 4176 } 4177 4178 int 4179 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip, 4180 char *name, int data) 4181 { 4182 return (ddi_prop_update_common(match_dev, dip, 4183 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT, 4184 name, &data, 1, ddi_prop_fm_encode_ints)); 4185 } 4186 4187 int 4188 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip, 4189 char *name, int64_t data) 4190 { 4191 return (ddi_prop_update_common(match_dev, dip, 4192 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64, 4193 name, &data, 1, ddi_prop_fm_encode_int64)); 4194 } 4195 4196 /* 4197 * Update an array of integer property. If the property exists on the drivers 4198 * property list it updates, else it creates it. 4199 */ 4200 int 4201 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip, 4202 char *name, int *data, uint_t nelements) 4203 { 4204 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT, 4205 name, data, nelements, ddi_prop_fm_encode_ints)); 4206 } 4207 4208 /* 4209 * Update an array of 64 bit integer properties. 4210 * Update the driver property list if it exists, else create it. 4211 */ 4212 int 4213 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip, 4214 char *name, int64_t *data, uint_t nelements) 4215 { 4216 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64, 4217 name, data, nelements, ddi_prop_fm_encode_int64)); 4218 } 4219 4220 int 4221 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip, 4222 char *name, int64_t *data, uint_t nelements) 4223 { 4224 return (ddi_prop_update_common(match_dev, dip, 4225 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64, 4226 name, data, nelements, ddi_prop_fm_encode_int64)); 4227 } 4228 4229 int 4230 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip, 4231 char *name, int *data, uint_t nelements) 4232 { 4233 return (ddi_prop_update_common(match_dev, dip, 4234 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT, 4235 name, data, nelements, ddi_prop_fm_encode_ints)); 4236 } 4237 4238 /* 4239 * Get a single string property. 4240 */ 4241 int 4242 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags, 4243 char *name, char **data) 4244 { 4245 uint_t x; 4246 4247 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4248 LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) { 4249 #ifdef DEBUG 4250 if (dip != NULL) { 4251 cmn_err(CE_WARN, "%s: invalid flag 0x%x " 4252 "(prop = %s, node = %s%d); invalid bits ignored", 4253 "ddi_prop_lookup_string", flags, name, 4254 ddi_driver_name(dip), ddi_get_instance(dip)); 4255 } 4256 #endif /* DEBUG */ 4257 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4258 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 4259 } 4260 4261 return (ddi_prop_lookup_common(match_dev, dip, 4262 (flags | DDI_PROP_TYPE_STRING), name, data, 4263 &x, ddi_prop_fm_decode_string)); 4264 } 4265 4266 /* 4267 * Get an array of strings property. 4268 */ 4269 int 4270 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags, 4271 char *name, char ***data, uint_t *nelements) 4272 { 4273 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4274 LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) { 4275 #ifdef DEBUG 4276 if (dip != NULL) { 4277 cmn_err(CE_WARN, "ddi_prop_lookup_string_array: " 4278 "invalid flag 0x%x (prop = %s, node = %s%d)", 4279 flags, name, ddi_driver_name(dip), 4280 ddi_get_instance(dip)); 4281 } 4282 #endif /* DEBUG */ 4283 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4284 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 4285 } 4286 4287 return (ddi_prop_lookup_common(match_dev, dip, 4288 (flags | DDI_PROP_TYPE_STRING), name, data, 4289 nelements, ddi_prop_fm_decode_strings)); 4290 } 4291 4292 /* 4293 * Update a single string property. 4294 */ 4295 int 4296 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip, 4297 char *name, char *data) 4298 { 4299 return (ddi_prop_update_common(match_dev, dip, 4300 DDI_PROP_TYPE_STRING, name, &data, 1, 4301 ddi_prop_fm_encode_string)); 4302 } 4303 4304 int 4305 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip, 4306 char *name, char *data) 4307 { 4308 return (ddi_prop_update_common(match_dev, dip, 4309 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING, 4310 name, &data, 1, ddi_prop_fm_encode_string)); 4311 } 4312 4313 4314 /* 4315 * Update an array of strings property. 4316 */ 4317 int 4318 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip, 4319 char *name, char **data, uint_t nelements) 4320 { 4321 return (ddi_prop_update_common(match_dev, dip, 4322 DDI_PROP_TYPE_STRING, name, data, nelements, 4323 ddi_prop_fm_encode_strings)); 4324 } 4325 4326 int 4327 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip, 4328 char *name, char **data, uint_t nelements) 4329 { 4330 return (ddi_prop_update_common(match_dev, dip, 4331 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING, 4332 name, data, nelements, 4333 ddi_prop_fm_encode_strings)); 4334 } 4335 4336 4337 /* 4338 * Get an array of bytes property. 4339 */ 4340 int 4341 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags, 4342 char *name, uchar_t **data, uint_t *nelements) 4343 { 4344 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4345 LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) { 4346 #ifdef DEBUG 4347 if (dip != NULL) { 4348 cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: " 4349 " invalid flag 0x%x (prop = %s, node = %s%d)", 4350 flags, name, ddi_driver_name(dip), 4351 ddi_get_instance(dip)); 4352 } 4353 #endif /* DEBUG */ 4354 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4355 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 4356 } 4357 4358 return (ddi_prop_lookup_common(match_dev, dip, 4359 (flags | DDI_PROP_TYPE_BYTE), name, data, 4360 nelements, ddi_prop_fm_decode_bytes)); 4361 } 4362 4363 /* 4364 * Update an array of bytes property. 4365 */ 4366 int 4367 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip, 4368 char *name, uchar_t *data, uint_t nelements) 4369 { 4370 if (nelements == 0) 4371 return (DDI_PROP_INVAL_ARG); 4372 4373 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE, 4374 name, data, nelements, ddi_prop_fm_encode_bytes)); 4375 } 4376 4377 4378 int 4379 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip, 4380 char *name, uchar_t *data, uint_t nelements) 4381 { 4382 if (nelements == 0) 4383 return (DDI_PROP_INVAL_ARG); 4384 4385 return (ddi_prop_update_common(match_dev, dip, 4386 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE, 4387 name, data, nelements, ddi_prop_fm_encode_bytes)); 4388 } 4389 4390 4391 /* 4392 * ddi_prop_remove_common: Undefine a managed property: 4393 * Input dev_t must match dev_t when defined. 4394 * Returns DDI_PROP_NOT_FOUND, possibly. 4395 * DDI_PROP_INVAL_ARG is also possible if dev is 4396 * DDI_DEV_T_ANY or incoming name is the NULL string. 4397 */ 4398 int 4399 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag) 4400 { 4401 ddi_prop_t **list_head = &(DEVI(dip)->devi_drv_prop_ptr); 4402 ddi_prop_t *propp; 4403 ddi_prop_t *lastpropp = NULL; 4404 4405 if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) || 4406 (strlen(name) == 0)) { 4407 return (DDI_PROP_INVAL_ARG); 4408 } 4409 4410 if (flag & DDI_PROP_SYSTEM_DEF) 4411 list_head = &(DEVI(dip)->devi_sys_prop_ptr); 4412 else if (flag & DDI_PROP_HW_DEF) 4413 list_head = &(DEVI(dip)->devi_hw_prop_ptr); 4414 4415 mutex_enter(&(DEVI(dip)->devi_lock)); 4416 4417 for (propp = *list_head; propp != NULL; propp = propp->prop_next) { 4418 if (DDI_STRSAME(propp->prop_name, name) && 4419 (dev == propp->prop_dev)) { 4420 /* 4421 * Unlink this propp allowing for it to 4422 * be first in the list: 4423 */ 4424 4425 if (lastpropp == NULL) 4426 *list_head = propp->prop_next; 4427 else 4428 lastpropp->prop_next = propp->prop_next; 4429 4430 mutex_exit(&(DEVI(dip)->devi_lock)); 4431 4432 /* 4433 * Free memory and return... 4434 */ 4435 kmem_free(propp->prop_name, 4436 strlen(propp->prop_name) + 1); 4437 if (propp->prop_len != 0) 4438 kmem_free(propp->prop_val, propp->prop_len); 4439 kmem_free(propp, sizeof (ddi_prop_t)); 4440 return (DDI_PROP_SUCCESS); 4441 } 4442 lastpropp = propp; 4443 } 4444 mutex_exit(&(DEVI(dip)->devi_lock)); 4445 return (DDI_PROP_NOT_FOUND); 4446 } 4447 4448 int 4449 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name) 4450 { 4451 return (ddi_prop_remove_common(dev, dip, name, 0)); 4452 } 4453 4454 int 4455 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name) 4456 { 4457 return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF)); 4458 } 4459 4460 /* 4461 * e_ddi_prop_list_delete: remove a list of properties 4462 * Note that the caller needs to provide the required protection 4463 * (eg. devi_lock if these properties are still attached to a devi) 4464 */ 4465 void 4466 e_ddi_prop_list_delete(ddi_prop_t *props) 4467 { 4468 i_ddi_prop_list_delete(props); 4469 } 4470 4471 /* 4472 * ddi_prop_remove_all_common: 4473 * Used before unloading a driver to remove 4474 * all properties. (undefines all dev_t's props.) 4475 * Also removes `explicitly undefined' props. 4476 * No errors possible. 4477 */ 4478 void 4479 ddi_prop_remove_all_common(dev_info_t *dip, int flag) 4480 { 4481 ddi_prop_t **list_head; 4482 4483 mutex_enter(&(DEVI(dip)->devi_lock)); 4484 if (flag & DDI_PROP_SYSTEM_DEF) { 4485 list_head = &(DEVI(dip)->devi_sys_prop_ptr); 4486 } else if (flag & DDI_PROP_HW_DEF) { 4487 list_head = &(DEVI(dip)->devi_hw_prop_ptr); 4488 } else { 4489 list_head = &(DEVI(dip)->devi_drv_prop_ptr); 4490 } 4491 i_ddi_prop_list_delete(*list_head); 4492 *list_head = NULL; 4493 mutex_exit(&(DEVI(dip)->devi_lock)); 4494 } 4495 4496 4497 /* 4498 * ddi_prop_remove_all: Remove all driver prop definitions. 4499 */ 4500 4501 void 4502 ddi_prop_remove_all(dev_info_t *dip) 4503 { 4504 ddi_prop_remove_all_common(dip, 0); 4505 } 4506 4507 /* 4508 * e_ddi_prop_remove_all: Remove all system prop definitions. 4509 */ 4510 4511 void 4512 e_ddi_prop_remove_all(dev_info_t *dip) 4513 { 4514 ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF); 4515 } 4516 4517 4518 /* 4519 * ddi_prop_undefine: Explicitly undefine a property. Property 4520 * searches which match this property return 4521 * the error code DDI_PROP_UNDEFINED. 4522 * 4523 * Use ddi_prop_remove to negate effect of 4524 * ddi_prop_undefine 4525 * 4526 * See above for error returns. 4527 */ 4528 4529 int 4530 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name) 4531 { 4532 if (!(flag & DDI_PROP_CANSLEEP)) 4533 flag |= DDI_PROP_DONTSLEEP; 4534 return (ddi_prop_update_common(dev, dip, 4535 (flag | DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT | 4536 DDI_PROP_TYPE_ANY), name, NULL, 0, ddi_prop_fm_encode_bytes)); 4537 } 4538 4539 int 4540 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name) 4541 { 4542 if (!(flag & DDI_PROP_CANSLEEP)) 4543 flag |= DDI_PROP_DONTSLEEP; 4544 return (ddi_prop_update_common(dev, dip, 4545 (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | 4546 DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY), 4547 name, NULL, 0, ddi_prop_fm_encode_bytes)); 4548 } 4549 4550 /* 4551 * Code to search hardware layer (PROM), if it exists, on behalf of child. 4552 * 4553 * if input dip != child_dip, then call is on behalf of child 4554 * to search PROM, do it via ddi_prop_search_common() and ascend only 4555 * if allowed. 4556 * 4557 * if input dip == ch_dip (child_dip), call is on behalf of root driver, 4558 * to search for PROM defined props only. 4559 * 4560 * Note that the PROM search is done only if the requested dev 4561 * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties 4562 * have no associated dev, thus are automatically associated with 4563 * DDI_DEV_T_NONE. 4564 * 4565 * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer. 4566 * 4567 * Returns DDI_PROP_FOUND_1275 if found to indicate to framework 4568 * that the property resides in the prom. 4569 */ 4570 int 4571 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip, 4572 ddi_prop_op_t prop_op, int mod_flags, 4573 char *name, caddr_t valuep, int *lengthp) 4574 { 4575 int len; 4576 caddr_t buffer; 4577 4578 /* 4579 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then 4580 * look in caller's PROM if it's a self identifying device... 4581 * 4582 * Note that this is very similar to ddi_prop_op, but we 4583 * search the PROM instead of the s/w defined properties, 4584 * and we are called on by the parent driver to do this for 4585 * the child. 4586 */ 4587 4588 if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) && 4589 ndi_dev_is_prom_node(ch_dip) && 4590 ((mod_flags & DDI_PROP_NOTPROM) == 0)) { 4591 len = prom_getproplen((pnode_t)DEVI(ch_dip)->devi_nodeid, name); 4592 if (len == -1) { 4593 return (DDI_PROP_NOT_FOUND); 4594 } 4595 4596 /* 4597 * If exists only request, we're done 4598 */ 4599 if (prop_op == PROP_EXISTS) { 4600 return (DDI_PROP_FOUND_1275); 4601 } 4602 4603 /* 4604 * If length only request or prop length == 0, get out 4605 */ 4606 if ((prop_op == PROP_LEN) || (len == 0)) { 4607 *lengthp = len; 4608 return (DDI_PROP_FOUND_1275); 4609 } 4610 4611 /* 4612 * Allocate buffer if required... (either way `buffer' 4613 * is receiving address). 4614 */ 4615 4616 switch (prop_op) { 4617 4618 case PROP_LEN_AND_VAL_ALLOC: 4619 4620 buffer = kmem_alloc((size_t)len, 4621 mod_flags & DDI_PROP_CANSLEEP ? 4622 KM_SLEEP : KM_NOSLEEP); 4623 if (buffer == NULL) { 4624 return (DDI_PROP_NO_MEMORY); 4625 } 4626 *(caddr_t *)valuep = buffer; 4627 break; 4628 4629 case PROP_LEN_AND_VAL_BUF: 4630 4631 if (len > (*lengthp)) { 4632 *lengthp = len; 4633 return (DDI_PROP_BUF_TOO_SMALL); 4634 } 4635 4636 buffer = valuep; 4637 break; 4638 4639 default: 4640 break; 4641 } 4642 4643 /* 4644 * Call the PROM function to do the copy. 4645 */ 4646 (void) prom_getprop((pnode_t)DEVI(ch_dip)->devi_nodeid, 4647 name, buffer); 4648 4649 *lengthp = len; /* return the actual length to the caller */ 4650 (void) impl_fix_props(dip, ch_dip, name, len, buffer); 4651 return (DDI_PROP_FOUND_1275); 4652 } 4653 4654 return (DDI_PROP_NOT_FOUND); 4655 } 4656 4657 /* 4658 * The ddi_bus_prop_op default bus nexus prop op function. 4659 * 4660 * Code to search hardware layer (PROM), if it exists, 4661 * on behalf of child, then, if appropriate, ascend and check 4662 * my own software defined properties... 4663 */ 4664 int 4665 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip, 4666 ddi_prop_op_t prop_op, int mod_flags, 4667 char *name, caddr_t valuep, int *lengthp) 4668 { 4669 int error; 4670 4671 error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags, 4672 name, valuep, lengthp); 4673 4674 if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 || 4675 error == DDI_PROP_BUF_TOO_SMALL) 4676 return (error); 4677 4678 if (error == DDI_PROP_NO_MEMORY) { 4679 cmn_err(CE_CONT, prop_no_mem_msg, name); 4680 return (DDI_PROP_NO_MEMORY); 4681 } 4682 4683 /* 4684 * Check the 'options' node as a last resort 4685 */ 4686 if ((mod_flags & DDI_PROP_DONTPASS) != 0) 4687 return (DDI_PROP_NOT_FOUND); 4688 4689 if (ch_dip == ddi_root_node()) { 4690 /* 4691 * As a last resort, when we've reached 4692 * the top and still haven't found the 4693 * property, see if the desired property 4694 * is attached to the options node. 4695 * 4696 * The options dip is attached right after boot. 4697 */ 4698 ASSERT(options_dip != NULL); 4699 /* 4700 * Force the "don't pass" flag to *just* see 4701 * what the options node has to offer. 4702 */ 4703 return (ddi_prop_search_common(dev, options_dip, prop_op, 4704 mod_flags|DDI_PROP_DONTPASS, name, valuep, 4705 (uint_t *)lengthp)); 4706 } 4707 4708 /* 4709 * Otherwise, continue search with parent's s/w defined properties... 4710 * NOTE: Using `dip' in following call increments the level. 4711 */ 4712 4713 return (ddi_prop_search_common(dev, dip, prop_op, mod_flags, 4714 name, valuep, (uint_t *)lengthp)); 4715 } 4716 4717 /* 4718 * External property functions used by other parts of the kernel... 4719 */ 4720 4721 /* 4722 * e_ddi_getlongprop: See comments for ddi_get_longprop. 4723 */ 4724 4725 int 4726 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags, 4727 caddr_t valuep, int *lengthp) 4728 { 4729 _NOTE(ARGUNUSED(type)) 4730 dev_info_t *devi; 4731 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC; 4732 int error; 4733 4734 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4735 return (DDI_PROP_NOT_FOUND); 4736 4737 error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp); 4738 ddi_release_devi(devi); 4739 return (error); 4740 } 4741 4742 /* 4743 * e_ddi_getlongprop_buf: See comments for ddi_getlongprop_buf. 4744 */ 4745 4746 int 4747 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags, 4748 caddr_t valuep, int *lengthp) 4749 { 4750 _NOTE(ARGUNUSED(type)) 4751 dev_info_t *devi; 4752 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF; 4753 int error; 4754 4755 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4756 return (DDI_PROP_NOT_FOUND); 4757 4758 error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp); 4759 ddi_release_devi(devi); 4760 return (error); 4761 } 4762 4763 /* 4764 * e_ddi_getprop: See comments for ddi_getprop. 4765 */ 4766 int 4767 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue) 4768 { 4769 _NOTE(ARGUNUSED(type)) 4770 dev_info_t *devi; 4771 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF; 4772 int propvalue = defvalue; 4773 int proplength = sizeof (int); 4774 int error; 4775 4776 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4777 return (defvalue); 4778 4779 error = cdev_prop_op(dev, devi, prop_op, 4780 flags, name, (caddr_t)&propvalue, &proplength); 4781 ddi_release_devi(devi); 4782 4783 if ((error == DDI_PROP_SUCCESS) && (proplength == 0)) 4784 propvalue = 1; 4785 4786 return (propvalue); 4787 } 4788 4789 /* 4790 * e_ddi_getprop_int64: 4791 * 4792 * This is a typed interfaces, but predates typed properties. With the 4793 * introduction of typed properties the framework tries to ensure 4794 * consistent use of typed interfaces. This is why TYPE_INT64 is not 4795 * part of TYPE_ANY. E_ddi_getprop_int64 is a special case where a 4796 * typed interface invokes legacy (non-typed) interfaces: 4797 * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)). In this case the 4798 * fact that TYPE_INT64 is not part of TYPE_ANY matters. To support 4799 * this type of lookup as a single operation we invoke the legacy 4800 * non-typed interfaces with the special CONSUMER_TYPED bit set. The 4801 * framework ddi_prop_op(9F) implementation is expected to check for 4802 * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY 4803 * (currently TYPE_INT64). 4804 */ 4805 int64_t 4806 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name, 4807 int flags, int64_t defvalue) 4808 { 4809 _NOTE(ARGUNUSED(type)) 4810 dev_info_t *devi; 4811 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF; 4812 int64_t propvalue = defvalue; 4813 int proplength = sizeof (propvalue); 4814 int error; 4815 4816 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4817 return (defvalue); 4818 4819 error = cdev_prop_op(dev, devi, prop_op, flags | 4820 DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength); 4821 ddi_release_devi(devi); 4822 4823 if ((error == DDI_PROP_SUCCESS) && (proplength == 0)) 4824 propvalue = 1; 4825 4826 return (propvalue); 4827 } 4828 4829 /* 4830 * e_ddi_getproplen: See comments for ddi_getproplen. 4831 */ 4832 int 4833 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp) 4834 { 4835 _NOTE(ARGUNUSED(type)) 4836 dev_info_t *devi; 4837 ddi_prop_op_t prop_op = PROP_LEN; 4838 int error; 4839 4840 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4841 return (DDI_PROP_NOT_FOUND); 4842 4843 error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp); 4844 ddi_release_devi(devi); 4845 return (error); 4846 } 4847 4848 /* 4849 * Routines to get at elements of the dev_info structure 4850 */ 4851 4852 /* 4853 * ddi_binding_name: Return the driver binding name of the devinfo node 4854 * This is the name the OS used to bind the node to a driver. 4855 */ 4856 char * 4857 ddi_binding_name(dev_info_t *dip) 4858 { 4859 return (DEVI(dip)->devi_binding_name); 4860 } 4861 4862 /* 4863 * ddi_driver_major: Return the major number of the driver that 4864 * the supplied devinfo is bound to (-1 if none) 4865 */ 4866 major_t 4867 ddi_driver_major(dev_info_t *devi) 4868 { 4869 return (DEVI(devi)->devi_major); 4870 } 4871 4872 /* 4873 * ddi_driver_name: Return the normalized driver name. this is the 4874 * actual driver name 4875 */ 4876 const char * 4877 ddi_driver_name(dev_info_t *devi) 4878 { 4879 major_t major; 4880 4881 if ((major = ddi_driver_major(devi)) != (major_t)-1) 4882 return (ddi_major_to_name(major)); 4883 4884 return (ddi_node_name(devi)); 4885 } 4886 4887 /* 4888 * i_ddi_set_binding_name: Set binding name. 4889 * 4890 * Set the binding name to the given name. 4891 * This routine is for use by the ddi implementation, not by drivers. 4892 */ 4893 void 4894 i_ddi_set_binding_name(dev_info_t *dip, char *name) 4895 { 4896 DEVI(dip)->devi_binding_name = name; 4897 4898 } 4899 4900 /* 4901 * ddi_get_name: A synonym of ddi_binding_name() ... returns a name 4902 * the implementation has used to bind the node to a driver. 4903 */ 4904 char * 4905 ddi_get_name(dev_info_t *dip) 4906 { 4907 return (DEVI(dip)->devi_binding_name); 4908 } 4909 4910 /* 4911 * ddi_node_name: Return the name property of the devinfo node 4912 * This may differ from ddi_binding_name if the node name 4913 * does not define a binding to a driver (i.e. generic names). 4914 */ 4915 char * 4916 ddi_node_name(dev_info_t *dip) 4917 { 4918 return (DEVI(dip)->devi_node_name); 4919 } 4920 4921 4922 /* 4923 * ddi_get_nodeid: Get nodeid stored in dev_info structure. 4924 */ 4925 int 4926 ddi_get_nodeid(dev_info_t *dip) 4927 { 4928 return (DEVI(dip)->devi_nodeid); 4929 } 4930 4931 int 4932 ddi_get_instance(dev_info_t *dip) 4933 { 4934 return (DEVI(dip)->devi_instance); 4935 } 4936 4937 struct dev_ops * 4938 ddi_get_driver(dev_info_t *dip) 4939 { 4940 return (DEVI(dip)->devi_ops); 4941 } 4942 4943 void 4944 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo) 4945 { 4946 DEVI(dip)->devi_ops = devo; 4947 } 4948 4949 /* 4950 * ddi_set_driver_private/ddi_get_driver_private: 4951 * Get/set device driver private data in devinfo. 4952 */ 4953 void 4954 ddi_set_driver_private(dev_info_t *dip, void *data) 4955 { 4956 DEVI(dip)->devi_driver_data = data; 4957 } 4958 4959 void * 4960 ddi_get_driver_private(dev_info_t *dip) 4961 { 4962 return (DEVI(dip)->devi_driver_data); 4963 } 4964 4965 /* 4966 * ddi_get_parent, ddi_get_child, ddi_get_next_sibling 4967 */ 4968 4969 dev_info_t * 4970 ddi_get_parent(dev_info_t *dip) 4971 { 4972 return ((dev_info_t *)DEVI(dip)->devi_parent); 4973 } 4974 4975 dev_info_t * 4976 ddi_get_child(dev_info_t *dip) 4977 { 4978 return ((dev_info_t *)DEVI(dip)->devi_child); 4979 } 4980 4981 dev_info_t * 4982 ddi_get_next_sibling(dev_info_t *dip) 4983 { 4984 return ((dev_info_t *)DEVI(dip)->devi_sibling); 4985 } 4986 4987 dev_info_t * 4988 ddi_get_next(dev_info_t *dip) 4989 { 4990 return ((dev_info_t *)DEVI(dip)->devi_next); 4991 } 4992 4993 void 4994 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip) 4995 { 4996 DEVI(dip)->devi_next = DEVI(nextdip); 4997 } 4998 4999 /* 5000 * ddi_root_node: Return root node of devinfo tree 5001 */ 5002 5003 dev_info_t * 5004 ddi_root_node(void) 5005 { 5006 extern dev_info_t *top_devinfo; 5007 5008 return (top_devinfo); 5009 } 5010 5011 /* 5012 * Miscellaneous functions: 5013 */ 5014 5015 /* 5016 * Implementation specific hooks 5017 */ 5018 5019 void 5020 ddi_report_dev(dev_info_t *d) 5021 { 5022 char *b; 5023 5024 (void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0); 5025 5026 /* 5027 * If this devinfo node has cb_ops, it's implicitly accessible from 5028 * userland, so we print its full name together with the instance 5029 * number 'abbreviation' that the driver may use internally. 5030 */ 5031 if (DEVI(d)->devi_ops->devo_cb_ops != (struct cb_ops *)0 && 5032 (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) { 5033 cmn_err(CE_CONT, "?%s%d is %s\n", 5034 ddi_driver_name(d), ddi_get_instance(d), 5035 ddi_pathname(d, b)); 5036 kmem_free(b, MAXPATHLEN); 5037 } 5038 } 5039 5040 /* 5041 * ddi_ctlops() is described in the assembler not to buy a new register 5042 * window when it's called and can reduce cost in climbing the device tree 5043 * without using the tail call optimization. 5044 */ 5045 int 5046 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result) 5047 { 5048 int ret; 5049 5050 ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE, 5051 (void *)&rnumber, (void *)result); 5052 5053 return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE); 5054 } 5055 5056 int 5057 ddi_dev_nregs(dev_info_t *dev, int *result) 5058 { 5059 return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result)); 5060 } 5061 5062 int 5063 ddi_dev_is_sid(dev_info_t *d) 5064 { 5065 return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0)); 5066 } 5067 5068 int 5069 ddi_slaveonly(dev_info_t *d) 5070 { 5071 return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0)); 5072 } 5073 5074 int 5075 ddi_dev_affinity(dev_info_t *a, dev_info_t *b) 5076 { 5077 return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0)); 5078 } 5079 5080 int 5081 ddi_streams_driver(dev_info_t *dip) 5082 { 5083 if (i_ddi_devi_attached(dip) && 5084 (DEVI(dip)->devi_ops->devo_cb_ops != NULL) && 5085 (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL)) 5086 return (DDI_SUCCESS); 5087 return (DDI_FAILURE); 5088 } 5089 5090 /* 5091 * callback free list 5092 */ 5093 5094 static int ncallbacks; 5095 static int nc_low = 170; 5096 static int nc_med = 512; 5097 static int nc_high = 2048; 5098 static struct ddi_callback *callbackq; 5099 static struct ddi_callback *callbackqfree; 5100 5101 /* 5102 * set/run callback lists 5103 */ 5104 struct cbstats { 5105 kstat_named_t cb_asked; 5106 kstat_named_t cb_new; 5107 kstat_named_t cb_run; 5108 kstat_named_t cb_delete; 5109 kstat_named_t cb_maxreq; 5110 kstat_named_t cb_maxlist; 5111 kstat_named_t cb_alloc; 5112 kstat_named_t cb_runouts; 5113 kstat_named_t cb_L2; 5114 kstat_named_t cb_grow; 5115 } cbstats = { 5116 {"asked", KSTAT_DATA_UINT32}, 5117 {"new", KSTAT_DATA_UINT32}, 5118 {"run", KSTAT_DATA_UINT32}, 5119 {"delete", KSTAT_DATA_UINT32}, 5120 {"maxreq", KSTAT_DATA_UINT32}, 5121 {"maxlist", KSTAT_DATA_UINT32}, 5122 {"alloc", KSTAT_DATA_UINT32}, 5123 {"runouts", KSTAT_DATA_UINT32}, 5124 {"L2", KSTAT_DATA_UINT32}, 5125 {"grow", KSTAT_DATA_UINT32}, 5126 }; 5127 5128 #define nc_asked cb_asked.value.ui32 5129 #define nc_new cb_new.value.ui32 5130 #define nc_run cb_run.value.ui32 5131 #define nc_delete cb_delete.value.ui32 5132 #define nc_maxreq cb_maxreq.value.ui32 5133 #define nc_maxlist cb_maxlist.value.ui32 5134 #define nc_alloc cb_alloc.value.ui32 5135 #define nc_runouts cb_runouts.value.ui32 5136 #define nc_L2 cb_L2.value.ui32 5137 #define nc_grow cb_grow.value.ui32 5138 5139 static kmutex_t ddi_callback_mutex; 5140 5141 /* 5142 * callbacks are handled using a L1/L2 cache. The L1 cache 5143 * comes out of kmem_cache_alloc and can expand/shrink dynamically. If 5144 * we can't get callbacks from the L1 cache [because pageout is doing 5145 * I/O at the time freemem is 0], we allocate callbacks out of the 5146 * L2 cache. The L2 cache is static and depends on the memory size. 5147 * [We might also count the number of devices at probe time and 5148 * allocate one structure per device and adjust for deferred attach] 5149 */ 5150 void 5151 impl_ddi_callback_init(void) 5152 { 5153 int i; 5154 uint_t physmegs; 5155 kstat_t *ksp; 5156 5157 physmegs = physmem >> (20 - PAGESHIFT); 5158 if (physmegs < 48) { 5159 ncallbacks = nc_low; 5160 } else if (physmegs < 128) { 5161 ncallbacks = nc_med; 5162 } else { 5163 ncallbacks = nc_high; 5164 } 5165 5166 /* 5167 * init free list 5168 */ 5169 callbackq = kmem_zalloc( 5170 ncallbacks * sizeof (struct ddi_callback), KM_SLEEP); 5171 for (i = 0; i < ncallbacks-1; i++) 5172 callbackq[i].c_nfree = &callbackq[i+1]; 5173 callbackqfree = callbackq; 5174 5175 /* init kstats */ 5176 if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED, 5177 sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) { 5178 ksp->ks_data = (void *) &cbstats; 5179 kstat_install(ksp); 5180 } 5181 5182 } 5183 5184 static void 5185 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid, 5186 int count) 5187 { 5188 struct ddi_callback *list, *marker, *new; 5189 size_t size = sizeof (struct ddi_callback); 5190 5191 list = marker = (struct ddi_callback *)*listid; 5192 while (list != NULL) { 5193 if (list->c_call == funcp && list->c_arg == arg) { 5194 list->c_count += count; 5195 return; 5196 } 5197 marker = list; 5198 list = list->c_nlist; 5199 } 5200 new = kmem_alloc(size, KM_NOSLEEP); 5201 if (new == NULL) { 5202 new = callbackqfree; 5203 if (new == NULL) { 5204 new = kmem_alloc_tryhard(sizeof (struct ddi_callback), 5205 &size, KM_NOSLEEP | KM_PANIC); 5206 cbstats.nc_grow++; 5207 } else { 5208 callbackqfree = new->c_nfree; 5209 cbstats.nc_L2++; 5210 } 5211 } 5212 if (marker != NULL) { 5213 marker->c_nlist = new; 5214 } else { 5215 *listid = (uintptr_t)new; 5216 } 5217 new->c_size = size; 5218 new->c_nlist = NULL; 5219 new->c_call = funcp; 5220 new->c_arg = arg; 5221 new->c_count = count; 5222 cbstats.nc_new++; 5223 cbstats.nc_alloc++; 5224 if (cbstats.nc_alloc > cbstats.nc_maxlist) 5225 cbstats.nc_maxlist = cbstats.nc_alloc; 5226 } 5227 5228 void 5229 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid) 5230 { 5231 mutex_enter(&ddi_callback_mutex); 5232 cbstats.nc_asked++; 5233 if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq) 5234 cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run); 5235 (void) callback_insert(funcp, arg, listid, 1); 5236 mutex_exit(&ddi_callback_mutex); 5237 } 5238 5239 static void 5240 real_callback_run(void *Queue) 5241 { 5242 int (*funcp)(caddr_t); 5243 caddr_t arg; 5244 int count, rval; 5245 uintptr_t *listid; 5246 struct ddi_callback *list, *marker; 5247 int check_pending = 1; 5248 int pending = 0; 5249 5250 do { 5251 mutex_enter(&ddi_callback_mutex); 5252 listid = Queue; 5253 list = (struct ddi_callback *)*listid; 5254 if (list == NULL) { 5255 mutex_exit(&ddi_callback_mutex); 5256 return; 5257 } 5258 if (check_pending) { 5259 marker = list; 5260 while (marker != NULL) { 5261 pending += marker->c_count; 5262 marker = marker->c_nlist; 5263 } 5264 check_pending = 0; 5265 } 5266 ASSERT(pending > 0); 5267 ASSERT(list->c_count > 0); 5268 funcp = list->c_call; 5269 arg = list->c_arg; 5270 count = list->c_count; 5271 *(uintptr_t *)Queue = (uintptr_t)list->c_nlist; 5272 if (list >= &callbackq[0] && 5273 list <= &callbackq[ncallbacks-1]) { 5274 list->c_nfree = callbackqfree; 5275 callbackqfree = list; 5276 } else 5277 kmem_free(list, list->c_size); 5278 5279 cbstats.nc_delete++; 5280 cbstats.nc_alloc--; 5281 mutex_exit(&ddi_callback_mutex); 5282 5283 do { 5284 if ((rval = (*funcp)(arg)) == 0) { 5285 pending -= count; 5286 mutex_enter(&ddi_callback_mutex); 5287 (void) callback_insert(funcp, arg, listid, 5288 count); 5289 cbstats.nc_runouts++; 5290 } else { 5291 pending--; 5292 mutex_enter(&ddi_callback_mutex); 5293 cbstats.nc_run++; 5294 } 5295 mutex_exit(&ddi_callback_mutex); 5296 } while (rval != 0 && (--count > 0)); 5297 } while (pending > 0); 5298 } 5299 5300 void 5301 ddi_run_callback(uintptr_t *listid) 5302 { 5303 softcall(real_callback_run, listid); 5304 } 5305 5306 /* 5307 * ddi_periodic_t 5308 * ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, 5309 * int level) 5310 * 5311 * INTERFACE LEVEL 5312 * Solaris DDI specific (Solaris DDI) 5313 * 5314 * PARAMETERS 5315 * func: the callback function 5316 * 5317 * The callback function will be invoked. The function is invoked 5318 * in kernel context if the argument level passed is the zero. 5319 * Otherwise it's invoked in interrupt context at the specified 5320 * level. 5321 * 5322 * arg: the argument passed to the callback function 5323 * 5324 * interval: interval time 5325 * 5326 * level : callback interrupt level 5327 * 5328 * If the value is the zero, the callback function is invoked 5329 * in kernel context. If the value is more than the zero, but 5330 * less than or equal to ten, the callback function is invoked in 5331 * interrupt context at the specified interrupt level, which may 5332 * be used for real time applications. 5333 * 5334 * This value must be in range of 0-10, which can be a numeric 5335 * number or a pre-defined macro (DDI_IPL_0, ... , DDI_IPL_10). 5336 * 5337 * DESCRIPTION 5338 * ddi_periodic_add(9F) schedules the specified function to be 5339 * periodically invoked in the interval time. 5340 * 5341 * As well as timeout(9F), the exact time interval over which the function 5342 * takes effect cannot be guaranteed, but the value given is a close 5343 * approximation. 5344 * 5345 * Drivers waiting on behalf of processes with real-time constraints must 5346 * pass non-zero value with the level argument to ddi_periodic_add(9F). 5347 * 5348 * RETURN VALUES 5349 * ddi_periodic_add(9F) returns a non-zero opaque value (ddi_periodic_t), 5350 * which must be used for ddi_periodic_delete(9F) to specify the request. 5351 * 5352 * CONTEXT 5353 * ddi_periodic_add(9F) can be called in user or kernel context, but 5354 * it cannot be called in interrupt context, which is different from 5355 * timeout(9F). 5356 */ 5357 ddi_periodic_t 5358 ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, int level) 5359 { 5360 /* 5361 * Sanity check of the argument level. 5362 */ 5363 if (level < DDI_IPL_0 || level > DDI_IPL_10) 5364 cmn_err(CE_PANIC, 5365 "ddi_periodic_add: invalid interrupt level (%d).", level); 5366 5367 /* 5368 * Sanity check of the context. ddi_periodic_add() cannot be 5369 * called in either interrupt context or high interrupt context. 5370 */ 5371 if (servicing_interrupt()) 5372 cmn_err(CE_PANIC, 5373 "ddi_periodic_add: called in (high) interrupt context."); 5374 5375 return ((ddi_periodic_t)i_timeout(func, arg, interval, level)); 5376 } 5377 5378 /* 5379 * void 5380 * ddi_periodic_delete(ddi_periodic_t req) 5381 * 5382 * INTERFACE LEVEL 5383 * Solaris DDI specific (Solaris DDI) 5384 * 5385 * PARAMETERS 5386 * req: ddi_periodic_t opaque value ddi_periodic_add(9F) returned 5387 * previously. 5388 * 5389 * DESCRIPTION 5390 * ddi_periodic_delete(9F) cancels the ddi_periodic_add(9F) request 5391 * previously requested. 5392 * 5393 * ddi_periodic_delete(9F) will not return until the pending request 5394 * is canceled or executed. 5395 * 5396 * As well as untimeout(9F), calling ddi_periodic_delete(9F) for a 5397 * timeout which is either running on another CPU, or has already 5398 * completed causes no problems. However, unlike untimeout(9F), there is 5399 * no restrictions on the lock which might be held across the call to 5400 * ddi_periodic_delete(9F). 5401 * 5402 * Drivers should be structured with the understanding that the arrival of 5403 * both an interrupt and a timeout for that interrupt can occasionally 5404 * occur, in either order. 5405 * 5406 * CONTEXT 5407 * ddi_periodic_delete(9F) can be called in user or kernel context, but 5408 * it cannot be called in interrupt context, which is different from 5409 * untimeout(9F). 5410 */ 5411 void 5412 ddi_periodic_delete(ddi_periodic_t req) 5413 { 5414 /* 5415 * Sanity check of the context. ddi_periodic_delete() cannot be 5416 * called in either interrupt context or high interrupt context. 5417 */ 5418 if (servicing_interrupt()) 5419 cmn_err(CE_PANIC, 5420 "ddi_periodic_delete: called in (high) interrupt context."); 5421 5422 i_untimeout((timeout_t)req); 5423 } 5424 5425 dev_info_t * 5426 nodevinfo(dev_t dev, int otyp) 5427 { 5428 _NOTE(ARGUNUSED(dev, otyp)) 5429 return ((dev_info_t *)0); 5430 } 5431 5432 /* 5433 * A driver should support its own getinfo(9E) entry point. This function 5434 * is provided as a convenience for ON drivers that don't expect their 5435 * getinfo(9E) entry point to be called. A driver that uses this must not 5436 * call ddi_create_minor_node. 5437 */ 5438 int 5439 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 5440 { 5441 _NOTE(ARGUNUSED(dip, infocmd, arg, result)) 5442 return (DDI_FAILURE); 5443 } 5444 5445 /* 5446 * A driver should support its own getinfo(9E) entry point. This function 5447 * is provided as a convenience for ON drivers that where the minor number 5448 * is the instance. Drivers that do not have 1:1 mapping must implement 5449 * their own getinfo(9E) function. 5450 */ 5451 int 5452 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd, 5453 void *arg, void **result) 5454 { 5455 _NOTE(ARGUNUSED(dip)) 5456 int instance; 5457 5458 if (infocmd != DDI_INFO_DEVT2INSTANCE) 5459 return (DDI_FAILURE); 5460 5461 instance = getminor((dev_t)(uintptr_t)arg); 5462 *result = (void *)(uintptr_t)instance; 5463 return (DDI_SUCCESS); 5464 } 5465 5466 int 5467 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd) 5468 { 5469 _NOTE(ARGUNUSED(devi, cmd)) 5470 return (DDI_FAILURE); 5471 } 5472 5473 int 5474 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip, 5475 struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep) 5476 { 5477 _NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep)) 5478 return (DDI_DMA_NOMAPPING); 5479 } 5480 5481 int 5482 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr, 5483 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 5484 { 5485 _NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep)) 5486 return (DDI_DMA_BADATTR); 5487 } 5488 5489 int 5490 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, 5491 ddi_dma_handle_t handle) 5492 { 5493 _NOTE(ARGUNUSED(dip, rdip, handle)) 5494 return (DDI_FAILURE); 5495 } 5496 5497 int 5498 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 5499 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 5500 ddi_dma_cookie_t *cp, uint_t *ccountp) 5501 { 5502 _NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp)) 5503 return (DDI_DMA_NOMAPPING); 5504 } 5505 5506 int 5507 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 5508 ddi_dma_handle_t handle) 5509 { 5510 _NOTE(ARGUNUSED(dip, rdip, handle)) 5511 return (DDI_FAILURE); 5512 } 5513 5514 int 5515 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip, 5516 ddi_dma_handle_t handle, off_t off, size_t len, 5517 uint_t cache_flags) 5518 { 5519 _NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags)) 5520 return (DDI_FAILURE); 5521 } 5522 5523 int 5524 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip, 5525 ddi_dma_handle_t handle, uint_t win, off_t *offp, 5526 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 5527 { 5528 _NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp)) 5529 return (DDI_FAILURE); 5530 } 5531 5532 int 5533 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip, 5534 ddi_dma_handle_t handle, enum ddi_dma_ctlops request, 5535 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags) 5536 { 5537 _NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags)) 5538 return (DDI_FAILURE); 5539 } 5540 5541 void 5542 ddivoid(void) 5543 {} 5544 5545 int 5546 nochpoll(dev_t dev, short events, int anyyet, short *reventsp, 5547 struct pollhead **pollhdrp) 5548 { 5549 _NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp)) 5550 return (ENXIO); 5551 } 5552 5553 cred_t * 5554 ddi_get_cred(void) 5555 { 5556 return (CRED()); 5557 } 5558 5559 clock_t 5560 ddi_get_lbolt(void) 5561 { 5562 return (lbolt); 5563 } 5564 5565 time_t 5566 ddi_get_time(void) 5567 { 5568 time_t now; 5569 5570 if ((now = gethrestime_sec()) == 0) { 5571 timestruc_t ts; 5572 mutex_enter(&tod_lock); 5573 ts = tod_get(); 5574 mutex_exit(&tod_lock); 5575 return (ts.tv_sec); 5576 } else { 5577 return (now); 5578 } 5579 } 5580 5581 pid_t 5582 ddi_get_pid(void) 5583 { 5584 return (ttoproc(curthread)->p_pid); 5585 } 5586 5587 kt_did_t 5588 ddi_get_kt_did(void) 5589 { 5590 return (curthread->t_did); 5591 } 5592 5593 /* 5594 * This function returns B_TRUE if the caller can reasonably expect that a call 5595 * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened 5596 * by user-level signal. If it returns B_FALSE, then the caller should use 5597 * other means to make certain that the wait will not hang "forever." 5598 * 5599 * It does not check the signal mask, nor for reception of any particular 5600 * signal. 5601 * 5602 * Currently, a thread can receive a signal if it's not a kernel thread and it 5603 * is not in the middle of exit(2) tear-down. Threads that are in that 5604 * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to 5605 * cv_timedwait, and qwait_sig to qwait. 5606 */ 5607 boolean_t 5608 ddi_can_receive_sig(void) 5609 { 5610 proc_t *pp; 5611 5612 if (curthread->t_proc_flag & TP_LWPEXIT) 5613 return (B_FALSE); 5614 if ((pp = ttoproc(curthread)) == NULL) 5615 return (B_FALSE); 5616 return (pp->p_as != &kas); 5617 } 5618 5619 /* 5620 * Swap bytes in 16-bit [half-]words 5621 */ 5622 void 5623 swab(void *src, void *dst, size_t nbytes) 5624 { 5625 uchar_t *pf = (uchar_t *)src; 5626 uchar_t *pt = (uchar_t *)dst; 5627 uchar_t tmp; 5628 int nshorts; 5629 5630 nshorts = nbytes >> 1; 5631 5632 while (--nshorts >= 0) { 5633 tmp = *pf++; 5634 *pt++ = *pf++; 5635 *pt++ = tmp; 5636 } 5637 } 5638 5639 static void 5640 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp) 5641 { 5642 struct ddi_minor_data *dp; 5643 5644 mutex_enter(&(DEVI(ddip)->devi_lock)); 5645 i_devi_enter(ddip, DEVI_S_MD_UPDATE, DEVI_S_MD_UPDATE, 1); 5646 5647 if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) { 5648 DEVI(ddip)->devi_minor = dmdp; 5649 } else { 5650 while (dp->next != (struct ddi_minor_data *)NULL) 5651 dp = dp->next; 5652 dp->next = dmdp; 5653 } 5654 5655 i_devi_exit(ddip, DEVI_S_MD_UPDATE, 1); 5656 mutex_exit(&(DEVI(ddip)->devi_lock)); 5657 } 5658 5659 /* 5660 * Part of the obsolete SunCluster DDI Hooks. 5661 * Keep for binary compatibility 5662 */ 5663 minor_t 5664 ddi_getiminor(dev_t dev) 5665 { 5666 return (getminor(dev)); 5667 } 5668 5669 static int 5670 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name) 5671 { 5672 int se_flag; 5673 int kmem_flag; 5674 int se_err; 5675 char *pathname, *class_name; 5676 sysevent_t *ev = NULL; 5677 sysevent_id_t eid; 5678 sysevent_value_t se_val; 5679 sysevent_attr_list_t *ev_attr_list = NULL; 5680 5681 /* determine interrupt context */ 5682 se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP; 5683 kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP; 5684 5685 i_ddi_di_cache_invalidate(kmem_flag); 5686 5687 #ifdef DEBUG 5688 if ((se_flag == SE_NOSLEEP) && sunddi_debug) { 5689 cmn_err(CE_CONT, "ddi_create_minor_node: called from " 5690 "interrupt level by driver %s", 5691 ddi_driver_name(dip)); 5692 } 5693 #endif /* DEBUG */ 5694 5695 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag); 5696 if (ev == NULL) { 5697 goto fail; 5698 } 5699 5700 pathname = kmem_alloc(MAXPATHLEN, kmem_flag); 5701 if (pathname == NULL) { 5702 sysevent_free(ev); 5703 goto fail; 5704 } 5705 5706 (void) ddi_pathname(dip, pathname); 5707 ASSERT(strlen(pathname)); 5708 se_val.value_type = SE_DATA_TYPE_STRING; 5709 se_val.value.sv_string = pathname; 5710 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME, 5711 &se_val, se_flag) != 0) { 5712 kmem_free(pathname, MAXPATHLEN); 5713 sysevent_free(ev); 5714 goto fail; 5715 } 5716 kmem_free(pathname, MAXPATHLEN); 5717 5718 /* add the device class attribute */ 5719 if ((class_name = i_ddi_devi_class(dip)) != NULL) { 5720 se_val.value_type = SE_DATA_TYPE_STRING; 5721 se_val.value.sv_string = class_name; 5722 if (sysevent_add_attr(&ev_attr_list, 5723 DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) { 5724 sysevent_free_attr(ev_attr_list); 5725 goto fail; 5726 } 5727 } 5728 5729 /* 5730 * allow for NULL minor names 5731 */ 5732 if (minor_name != NULL) { 5733 se_val.value.sv_string = minor_name; 5734 if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME, 5735 &se_val, se_flag) != 0) { 5736 sysevent_free_attr(ev_attr_list); 5737 sysevent_free(ev); 5738 goto fail; 5739 } 5740 } 5741 5742 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) { 5743 sysevent_free_attr(ev_attr_list); 5744 sysevent_free(ev); 5745 goto fail; 5746 } 5747 5748 if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) { 5749 if (se_err == SE_NO_TRANSPORT) { 5750 cmn_err(CE_WARN, "/devices or /dev may not be current " 5751 "for driver %s (%s). Run devfsadm -i %s", 5752 ddi_driver_name(dip), "syseventd not responding", 5753 ddi_driver_name(dip)); 5754 } else { 5755 sysevent_free(ev); 5756 goto fail; 5757 } 5758 } 5759 5760 sysevent_free(ev); 5761 return (DDI_SUCCESS); 5762 fail: 5763 cmn_err(CE_WARN, "/devices or /dev may not be current " 5764 "for driver %s. Run devfsadm -i %s", 5765 ddi_driver_name(dip), ddi_driver_name(dip)); 5766 return (DDI_SUCCESS); 5767 } 5768 5769 /* 5770 * failing to remove a minor node is not of interest 5771 * therefore we do not generate an error message 5772 */ 5773 static int 5774 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name) 5775 { 5776 char *pathname, *class_name; 5777 sysevent_t *ev; 5778 sysevent_id_t eid; 5779 sysevent_value_t se_val; 5780 sysevent_attr_list_t *ev_attr_list = NULL; 5781 5782 /* 5783 * only log ddi_remove_minor_node() calls outside the scope 5784 * of attach/detach reconfigurations and when the dip is 5785 * still initialized. 5786 */ 5787 if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) || 5788 (i_ddi_node_state(dip) < DS_INITIALIZED)) { 5789 return (DDI_SUCCESS); 5790 } 5791 5792 i_ddi_di_cache_invalidate(KM_SLEEP); 5793 5794 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP); 5795 if (ev == NULL) { 5796 return (DDI_SUCCESS); 5797 } 5798 5799 pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 5800 if (pathname == NULL) { 5801 sysevent_free(ev); 5802 return (DDI_SUCCESS); 5803 } 5804 5805 (void) ddi_pathname(dip, pathname); 5806 ASSERT(strlen(pathname)); 5807 se_val.value_type = SE_DATA_TYPE_STRING; 5808 se_val.value.sv_string = pathname; 5809 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME, 5810 &se_val, SE_SLEEP) != 0) { 5811 kmem_free(pathname, MAXPATHLEN); 5812 sysevent_free(ev); 5813 return (DDI_SUCCESS); 5814 } 5815 5816 kmem_free(pathname, MAXPATHLEN); 5817 5818 /* 5819 * allow for NULL minor names 5820 */ 5821 if (minor_name != NULL) { 5822 se_val.value.sv_string = minor_name; 5823 if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME, 5824 &se_val, SE_SLEEP) != 0) { 5825 sysevent_free_attr(ev_attr_list); 5826 goto fail; 5827 } 5828 } 5829 5830 if ((class_name = i_ddi_devi_class(dip)) != NULL) { 5831 /* add the device class, driver name and instance attributes */ 5832 5833 se_val.value_type = SE_DATA_TYPE_STRING; 5834 se_val.value.sv_string = class_name; 5835 if (sysevent_add_attr(&ev_attr_list, 5836 DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) { 5837 sysevent_free_attr(ev_attr_list); 5838 goto fail; 5839 } 5840 5841 se_val.value_type = SE_DATA_TYPE_STRING; 5842 se_val.value.sv_string = (char *)ddi_driver_name(dip); 5843 if (sysevent_add_attr(&ev_attr_list, 5844 DEVFS_DRIVER_NAME, &se_val, SE_SLEEP) != 0) { 5845 sysevent_free_attr(ev_attr_list); 5846 goto fail; 5847 } 5848 5849 se_val.value_type = SE_DATA_TYPE_INT32; 5850 se_val.value.sv_int32 = ddi_get_instance(dip); 5851 if (sysevent_add_attr(&ev_attr_list, 5852 DEVFS_INSTANCE, &se_val, SE_SLEEP) != 0) { 5853 sysevent_free_attr(ev_attr_list); 5854 goto fail; 5855 } 5856 5857 } 5858 5859 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) { 5860 sysevent_free_attr(ev_attr_list); 5861 } else { 5862 (void) log_sysevent(ev, SE_SLEEP, &eid); 5863 } 5864 fail: 5865 sysevent_free(ev); 5866 return (DDI_SUCCESS); 5867 } 5868 5869 /* 5870 * Derive the device class of the node. 5871 * Device class names aren't defined yet. Until this is done we use 5872 * devfs event subclass names as device class names. 5873 */ 5874 static int 5875 derive_devi_class(dev_info_t *dip, char *node_type, int flag) 5876 { 5877 int rv = DDI_SUCCESS; 5878 5879 if (i_ddi_devi_class(dip) == NULL) { 5880 if (strncmp(node_type, DDI_NT_BLOCK, 5881 sizeof (DDI_NT_BLOCK) - 1) == 0 && 5882 (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' || 5883 node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') && 5884 strcmp(node_type, DDI_NT_FD) != 0) { 5885 5886 rv = i_ddi_set_devi_class(dip, ESC_DISK, flag); 5887 5888 } else if (strncmp(node_type, DDI_NT_NET, 5889 sizeof (DDI_NT_NET) - 1) == 0 && 5890 (node_type[sizeof (DDI_NT_NET) - 1] == '\0' || 5891 node_type[sizeof (DDI_NT_NET) - 1] == ':')) { 5892 5893 rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag); 5894 5895 } else if (strncmp(node_type, DDI_NT_PRINTER, 5896 sizeof (DDI_NT_PRINTER) - 1) == 0 && 5897 (node_type[sizeof (DDI_NT_PRINTER) - 1] == '\0' || 5898 node_type[sizeof (DDI_NT_PRINTER) - 1] == ':')) { 5899 5900 rv = i_ddi_set_devi_class(dip, ESC_PRINTER, flag); 5901 5902 } else if (strncmp(node_type, DDI_PSEUDO, 5903 sizeof (DDI_PSEUDO) -1) == 0 && 5904 (strncmp(ESC_LOFI, ddi_node_name(dip), 5905 sizeof (ESC_LOFI) -1) == 0)) { 5906 rv = i_ddi_set_devi_class(dip, ESC_LOFI, flag); 5907 } 5908 } 5909 5910 return (rv); 5911 } 5912 5913 /* 5914 * Check compliance with PSARC 2003/375: 5915 * 5916 * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not 5917 * exceed IFNAMSIZ (16) characters in length. 5918 */ 5919 static boolean_t 5920 verify_name(char *name) 5921 { 5922 size_t len = strlen(name); 5923 char *cp; 5924 5925 if (len == 0 || len > IFNAMSIZ) 5926 return (B_FALSE); 5927 5928 for (cp = name; *cp != '\0'; cp++) { 5929 if (!isalnum(*cp) && *cp != '_') 5930 return (B_FALSE); 5931 } 5932 5933 return (B_TRUE); 5934 } 5935 5936 /* 5937 * ddi_create_minor_common: Create a ddi_minor_data structure and 5938 * attach it to the given devinfo node. 5939 */ 5940 5941 int 5942 ddi_create_minor_common(dev_info_t *dip, char *name, int spec_type, 5943 minor_t minor_num, char *node_type, int flag, ddi_minor_type mtype, 5944 const char *read_priv, const char *write_priv, mode_t priv_mode) 5945 { 5946 struct ddi_minor_data *dmdp; 5947 major_t major; 5948 5949 if (spec_type != S_IFCHR && spec_type != S_IFBLK) 5950 return (DDI_FAILURE); 5951 5952 if (name == NULL) 5953 return (DDI_FAILURE); 5954 5955 /* 5956 * Log a message if the minor number the driver is creating 5957 * is not expressible on the on-disk filesystem (currently 5958 * this is limited to 18 bits both by UFS). The device can 5959 * be opened via devfs, but not by device special files created 5960 * via mknod(). 5961 */ 5962 if (minor_num > L_MAXMIN32) { 5963 cmn_err(CE_WARN, 5964 "%s%d:%s minor 0x%x too big for 32-bit applications", 5965 ddi_driver_name(dip), ddi_get_instance(dip), 5966 name, minor_num); 5967 return (DDI_FAILURE); 5968 } 5969 5970 /* dip must be bound and attached */ 5971 major = ddi_driver_major(dip); 5972 ASSERT(major != (major_t)-1); 5973 5974 /* 5975 * Default node_type to DDI_PSEUDO and issue notice in debug mode 5976 */ 5977 if (node_type == NULL) { 5978 node_type = DDI_PSEUDO; 5979 NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d " 5980 " minor node %s; default to DDI_PSEUDO", 5981 ddi_driver_name(dip), ddi_get_instance(dip), name)); 5982 } 5983 5984 /* 5985 * If the driver is a network driver, ensure that the name falls within 5986 * the interface naming constraints specified by PSARC/2003/375. 5987 */ 5988 if (strcmp(node_type, DDI_NT_NET) == 0) { 5989 if (!verify_name(name)) 5990 return (DDI_FAILURE); 5991 5992 if (mtype == DDM_MINOR) { 5993 struct devnames *dnp = &devnamesp[major]; 5994 5995 /* Mark driver as a network driver */ 5996 LOCK_DEV_OPS(&dnp->dn_lock); 5997 dnp->dn_flags |= DN_NETWORK_DRIVER; 5998 UNLOCK_DEV_OPS(&dnp->dn_lock); 5999 } 6000 } 6001 6002 if (mtype == DDM_MINOR) { 6003 if (derive_devi_class(dip, node_type, KM_NOSLEEP) != 6004 DDI_SUCCESS) 6005 return (DDI_FAILURE); 6006 } 6007 6008 /* 6009 * Take care of minor number information for the node. 6010 */ 6011 6012 if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data), 6013 KM_NOSLEEP)) == NULL) { 6014 return (DDI_FAILURE); 6015 } 6016 if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) { 6017 kmem_free(dmdp, sizeof (struct ddi_minor_data)); 6018 return (DDI_FAILURE); 6019 } 6020 dmdp->dip = dip; 6021 dmdp->ddm_dev = makedevice(major, minor_num); 6022 dmdp->ddm_spec_type = spec_type; 6023 dmdp->ddm_node_type = node_type; 6024 dmdp->type = mtype; 6025 if (flag & CLONE_DEV) { 6026 dmdp->type = DDM_ALIAS; 6027 dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major); 6028 } 6029 if (flag & PRIVONLY_DEV) { 6030 dmdp->ddm_flags |= DM_NO_FSPERM; 6031 } 6032 if (read_priv || write_priv) { 6033 dmdp->ddm_node_priv = 6034 devpolicy_priv_by_name(read_priv, write_priv); 6035 } 6036 dmdp->ddm_priv_mode = priv_mode; 6037 6038 ddi_append_minor_node(dip, dmdp); 6039 6040 /* 6041 * only log ddi_create_minor_node() calls which occur 6042 * outside the scope of attach(9e)/detach(9e) reconfigurations 6043 */ 6044 if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) && 6045 mtype != DDM_INTERNAL_PATH) { 6046 (void) i_log_devfs_minor_create(dip, name); 6047 } 6048 6049 /* 6050 * Check if any dacf rules match the creation of this minor node 6051 */ 6052 dacfc_match_create_minor(name, node_type, dip, dmdp, flag); 6053 return (DDI_SUCCESS); 6054 } 6055 6056 int 6057 ddi_create_minor_node(dev_info_t *dip, char *name, int spec_type, 6058 minor_t minor_num, char *node_type, int flag) 6059 { 6060 return (ddi_create_minor_common(dip, name, spec_type, minor_num, 6061 node_type, flag, DDM_MINOR, NULL, NULL, 0)); 6062 } 6063 6064 int 6065 ddi_create_priv_minor_node(dev_info_t *dip, char *name, int spec_type, 6066 minor_t minor_num, char *node_type, int flag, 6067 const char *rdpriv, const char *wrpriv, mode_t priv_mode) 6068 { 6069 return (ddi_create_minor_common(dip, name, spec_type, minor_num, 6070 node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode)); 6071 } 6072 6073 int 6074 ddi_create_default_minor_node(dev_info_t *dip, char *name, int spec_type, 6075 minor_t minor_num, char *node_type, int flag) 6076 { 6077 return (ddi_create_minor_common(dip, name, spec_type, minor_num, 6078 node_type, flag, DDM_DEFAULT, NULL, NULL, 0)); 6079 } 6080 6081 /* 6082 * Internal (non-ddi) routine for drivers to export names known 6083 * to the kernel (especially ddi_pathname_to_dev_t and friends) 6084 * but not exported externally to /dev 6085 */ 6086 int 6087 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type, 6088 minor_t minor_num) 6089 { 6090 return (ddi_create_minor_common(dip, name, spec_type, minor_num, 6091 "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0)); 6092 } 6093 6094 void 6095 ddi_remove_minor_node(dev_info_t *dip, char *name) 6096 { 6097 struct ddi_minor_data *dmdp, *dmdp1; 6098 struct ddi_minor_data **dmdp_prev; 6099 6100 mutex_enter(&(DEVI(dip)->devi_lock)); 6101 i_devi_enter(dip, DEVI_S_MD_UPDATE, DEVI_S_MD_UPDATE, 1); 6102 6103 dmdp_prev = &DEVI(dip)->devi_minor; 6104 dmdp = DEVI(dip)->devi_minor; 6105 while (dmdp != NULL) { 6106 dmdp1 = dmdp->next; 6107 if ((name == NULL || (dmdp->ddm_name != NULL && 6108 strcmp(name, dmdp->ddm_name) == 0))) { 6109 if (dmdp->ddm_name != NULL) { 6110 if (dmdp->type != DDM_INTERNAL_PATH) 6111 (void) i_log_devfs_minor_remove(dip, 6112 dmdp->ddm_name); 6113 kmem_free(dmdp->ddm_name, 6114 strlen(dmdp->ddm_name) + 1); 6115 } 6116 /* 6117 * Release device privilege, if any. 6118 * Release dacf client data associated with this minor 6119 * node by storing NULL. 6120 */ 6121 if (dmdp->ddm_node_priv) 6122 dpfree(dmdp->ddm_node_priv); 6123 dacf_store_info((dacf_infohdl_t)dmdp, NULL); 6124 kmem_free(dmdp, sizeof (struct ddi_minor_data)); 6125 *dmdp_prev = dmdp1; 6126 /* 6127 * OK, we found it, so get out now -- if we drive on, 6128 * we will strcmp against garbage. See 1139209. 6129 */ 6130 if (name != NULL) 6131 break; 6132 } else { 6133 dmdp_prev = &dmdp->next; 6134 } 6135 dmdp = dmdp1; 6136 } 6137 6138 i_devi_exit(dip, DEVI_S_MD_UPDATE, 1); 6139 mutex_exit(&(DEVI(dip)->devi_lock)); 6140 } 6141 6142 6143 int 6144 ddi_in_panic() 6145 { 6146 return (panicstr != NULL); 6147 } 6148 6149 6150 /* 6151 * Find first bit set in a mask (returned counting from 1 up) 6152 */ 6153 6154 int 6155 ddi_ffs(long mask) 6156 { 6157 return (ffs(mask)); 6158 } 6159 6160 /* 6161 * Find last bit set. Take mask and clear 6162 * all but the most significant bit, and 6163 * then let ffs do the rest of the work. 6164 * 6165 * Algorithm courtesy of Steve Chessin. 6166 */ 6167 6168 int 6169 ddi_fls(long mask) 6170 { 6171 while (mask) { 6172 long nx; 6173 6174 if ((nx = (mask & (mask - 1))) == 0) 6175 break; 6176 mask = nx; 6177 } 6178 return (ffs(mask)); 6179 } 6180 6181 /* 6182 * The next five routines comprise generic storage management utilities 6183 * for driver soft state structures (in "the old days," this was done 6184 * with a statically sized array - big systems and dynamic loading 6185 * and unloading make heap allocation more attractive) 6186 */ 6187 6188 /* 6189 * Allocate a set of pointers to 'n_items' objects of size 'size' 6190 * bytes. Each pointer is initialized to nil. 6191 * 6192 * The 'size' and 'n_items' values are stashed in the opaque 6193 * handle returned to the caller. 6194 * 6195 * This implementation interprets 'set of pointers' to mean 'array 6196 * of pointers' but note that nothing in the interface definition 6197 * precludes an implementation that uses, for example, a linked list. 6198 * However there should be a small efficiency gain from using an array 6199 * at lookup time. 6200 * 6201 * NOTE As an optimization, we make our growable array allocations in 6202 * powers of two (bytes), since that's how much kmem_alloc (currently) 6203 * gives us anyway. It should save us some free/realloc's .. 6204 * 6205 * As a further optimization, we make the growable array start out 6206 * with MIN_N_ITEMS in it. 6207 */ 6208 6209 #define MIN_N_ITEMS 8 /* 8 void *'s == 32 bytes */ 6210 6211 int 6212 ddi_soft_state_init(void **state_p, size_t size, size_t n_items) 6213 { 6214 struct i_ddi_soft_state *ss; 6215 6216 if (state_p == NULL || *state_p != NULL || size == 0) 6217 return (EINVAL); 6218 6219 ss = kmem_zalloc(sizeof (*ss), KM_SLEEP); 6220 mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL); 6221 ss->size = size; 6222 6223 if (n_items < MIN_N_ITEMS) 6224 ss->n_items = MIN_N_ITEMS; 6225 else { 6226 int bitlog; 6227 6228 if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items)) 6229 bitlog--; 6230 ss->n_items = 1 << bitlog; 6231 } 6232 6233 ASSERT(ss->n_items >= n_items); 6234 6235 ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP); 6236 6237 *state_p = ss; 6238 6239 return (0); 6240 } 6241 6242 6243 /* 6244 * Allocate a state structure of size 'size' to be associated 6245 * with item 'item'. 6246 * 6247 * In this implementation, the array is extended to 6248 * allow the requested offset, if needed. 6249 */ 6250 int 6251 ddi_soft_state_zalloc(void *state, int item) 6252 { 6253 struct i_ddi_soft_state *ss; 6254 void **array; 6255 void *new_element; 6256 6257 if ((ss = state) == NULL || item < 0) 6258 return (DDI_FAILURE); 6259 6260 mutex_enter(&ss->lock); 6261 if (ss->size == 0) { 6262 mutex_exit(&ss->lock); 6263 cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s", 6264 mod_containing_pc(caller())); 6265 return (DDI_FAILURE); 6266 } 6267 6268 array = ss->array; /* NULL if ss->n_items == 0 */ 6269 ASSERT(ss->n_items != 0 && array != NULL); 6270 6271 /* 6272 * refuse to tread on an existing element 6273 */ 6274 if (item < ss->n_items && array[item] != NULL) { 6275 mutex_exit(&ss->lock); 6276 return (DDI_FAILURE); 6277 } 6278 6279 /* 6280 * Allocate a new element to plug in 6281 */ 6282 new_element = kmem_zalloc(ss->size, KM_SLEEP); 6283 6284 /* 6285 * Check if the array is big enough, if not, grow it. 6286 */ 6287 if (item >= ss->n_items) { 6288 void **new_array; 6289 size_t new_n_items; 6290 struct i_ddi_soft_state *dirty; 6291 6292 /* 6293 * Allocate a new array of the right length, copy 6294 * all the old pointers to the new array, then 6295 * if it exists at all, put the old array on the 6296 * dirty list. 6297 * 6298 * Note that we can't kmem_free() the old array. 6299 * 6300 * Why -- well the 'get' operation is 'mutex-free', so we 6301 * can't easily catch a suspended thread that is just about 6302 * to dereference the array we just grew out of. So we 6303 * cons up a header and put it on a list of 'dirty' 6304 * pointer arrays. (Dirty in the sense that there may 6305 * be suspended threads somewhere that are in the middle 6306 * of referencing them). Fortunately, we -can- garbage 6307 * collect it all at ddi_soft_state_fini time. 6308 */ 6309 new_n_items = ss->n_items; 6310 while (new_n_items < (1 + item)) 6311 new_n_items <<= 1; /* double array size .. */ 6312 6313 ASSERT(new_n_items >= (1 + item)); /* sanity check! */ 6314 6315 new_array = kmem_zalloc(new_n_items * sizeof (void *), 6316 KM_SLEEP); 6317 /* 6318 * Copy the pointers into the new array 6319 */ 6320 bcopy(array, new_array, ss->n_items * sizeof (void *)); 6321 6322 /* 6323 * Save the old array on the dirty list 6324 */ 6325 dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP); 6326 dirty->array = ss->array; 6327 dirty->n_items = ss->n_items; 6328 dirty->next = ss->next; 6329 ss->next = dirty; 6330 6331 ss->array = (array = new_array); 6332 ss->n_items = new_n_items; 6333 } 6334 6335 ASSERT(array != NULL && item < ss->n_items && array[item] == NULL); 6336 6337 array[item] = new_element; 6338 6339 mutex_exit(&ss->lock); 6340 return (DDI_SUCCESS); 6341 } 6342 6343 6344 /* 6345 * Fetch a pointer to the allocated soft state structure. 6346 * 6347 * This is designed to be cheap. 6348 * 6349 * There's an argument that there should be more checking for 6350 * nil pointers and out of bounds on the array.. but we do a lot 6351 * of that in the alloc/free routines. 6352 * 6353 * An array has the convenience that we don't need to lock read-access 6354 * to it c.f. a linked list. However our "expanding array" strategy 6355 * means that we should hold a readers lock on the i_ddi_soft_state 6356 * structure. 6357 * 6358 * However, from a performance viewpoint, we need to do it without 6359 * any locks at all -- this also makes it a leaf routine. The algorithm 6360 * is 'lock-free' because we only discard the pointer arrays at 6361 * ddi_soft_state_fini() time. 6362 */ 6363 void * 6364 ddi_get_soft_state(void *state, int item) 6365 { 6366 struct i_ddi_soft_state *ss = state; 6367 6368 ASSERT(ss != NULL && item >= 0); 6369 6370 if (item < ss->n_items && ss->array != NULL) 6371 return (ss->array[item]); 6372 return (NULL); 6373 } 6374 6375 /* 6376 * Free the state structure corresponding to 'item.' Freeing an 6377 * element that has either gone or was never allocated is not 6378 * considered an error. Note that we free the state structure, but 6379 * we don't shrink our pointer array, or discard 'dirty' arrays, 6380 * since even a few pointers don't really waste too much memory. 6381 * 6382 * Passing an item number that is out of bounds, or a null pointer will 6383 * provoke an error message. 6384 */ 6385 void 6386 ddi_soft_state_free(void *state, int item) 6387 { 6388 struct i_ddi_soft_state *ss; 6389 void **array; 6390 void *element; 6391 static char msg[] = "ddi_soft_state_free:"; 6392 6393 if ((ss = state) == NULL) { 6394 cmn_err(CE_WARN, "%s null handle: %s", 6395 msg, mod_containing_pc(caller())); 6396 return; 6397 } 6398 6399 element = NULL; 6400 6401 mutex_enter(&ss->lock); 6402 6403 if ((array = ss->array) == NULL || ss->size == 0) { 6404 cmn_err(CE_WARN, "%s bad handle: %s", 6405 msg, mod_containing_pc(caller())); 6406 } else if (item < 0 || item >= ss->n_items) { 6407 cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s", 6408 msg, item, ss->n_items - 1, mod_containing_pc(caller())); 6409 } else if (array[item] != NULL) { 6410 element = array[item]; 6411 array[item] = NULL; 6412 } 6413 6414 mutex_exit(&ss->lock); 6415 6416 if (element) 6417 kmem_free(element, ss->size); 6418 } 6419 6420 6421 /* 6422 * Free the entire set of pointers, and any 6423 * soft state structures contained therein. 6424 * 6425 * Note that we don't grab the ss->lock mutex, even though 6426 * we're inspecting the various fields of the data structure. 6427 * 6428 * There is an implicit assumption that this routine will 6429 * never run concurrently with any of the above on this 6430 * particular state structure i.e. by the time the driver 6431 * calls this routine, there should be no other threads 6432 * running in the driver. 6433 */ 6434 void 6435 ddi_soft_state_fini(void **state_p) 6436 { 6437 struct i_ddi_soft_state *ss, *dirty; 6438 int item; 6439 static char msg[] = "ddi_soft_state_fini:"; 6440 6441 if (state_p == NULL || (ss = *state_p) == NULL) { 6442 cmn_err(CE_WARN, "%s null handle: %s", 6443 msg, mod_containing_pc(caller())); 6444 return; 6445 } 6446 6447 if (ss->size == 0) { 6448 cmn_err(CE_WARN, "%s bad handle: %s", 6449 msg, mod_containing_pc(caller())); 6450 return; 6451 } 6452 6453 if (ss->n_items > 0) { 6454 for (item = 0; item < ss->n_items; item++) 6455 ddi_soft_state_free(ss, item); 6456 kmem_free(ss->array, ss->n_items * sizeof (void *)); 6457 } 6458 6459 /* 6460 * Now delete any dirty arrays from previous 'grow' operations 6461 */ 6462 for (dirty = ss->next; dirty; dirty = ss->next) { 6463 ss->next = dirty->next; 6464 kmem_free(dirty->array, dirty->n_items * sizeof (void *)); 6465 kmem_free(dirty, sizeof (*dirty)); 6466 } 6467 6468 mutex_destroy(&ss->lock); 6469 kmem_free(ss, sizeof (*ss)); 6470 6471 *state_p = NULL; 6472 } 6473 6474 /* 6475 * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'. 6476 * Storage is double buffered to prevent updates during devi_addr use - 6477 * double buffering is adaquate for reliable ddi_deviname() consumption. 6478 * The double buffer is not freed until dev_info structure destruction 6479 * (by i_ddi_free_node). 6480 */ 6481 void 6482 ddi_set_name_addr(dev_info_t *dip, char *name) 6483 { 6484 char *buf = DEVI(dip)->devi_addr_buf; 6485 char *newaddr; 6486 6487 if (buf == NULL) { 6488 buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP); 6489 DEVI(dip)->devi_addr_buf = buf; 6490 } 6491 6492 if (name) { 6493 ASSERT(strlen(name) < MAXNAMELEN); 6494 newaddr = (DEVI(dip)->devi_addr == buf) ? 6495 (buf + MAXNAMELEN) : buf; 6496 (void) strlcpy(newaddr, name, MAXNAMELEN); 6497 } else 6498 newaddr = NULL; 6499 6500 DEVI(dip)->devi_addr = newaddr; 6501 } 6502 6503 char * 6504 ddi_get_name_addr(dev_info_t *dip) 6505 { 6506 return (DEVI(dip)->devi_addr); 6507 } 6508 6509 void 6510 ddi_set_parent_data(dev_info_t *dip, void *pd) 6511 { 6512 DEVI(dip)->devi_parent_data = pd; 6513 } 6514 6515 void * 6516 ddi_get_parent_data(dev_info_t *dip) 6517 { 6518 return (DEVI(dip)->devi_parent_data); 6519 } 6520 6521 /* 6522 * ddi_name_to_major: Returns the major number of a module given its name. 6523 */ 6524 major_t 6525 ddi_name_to_major(char *name) 6526 { 6527 return (mod_name_to_major(name)); 6528 } 6529 6530 /* 6531 * ddi_major_to_name: Returns the module name bound to a major number. 6532 */ 6533 char * 6534 ddi_major_to_name(major_t major) 6535 { 6536 return (mod_major_to_name(major)); 6537 } 6538 6539 /* 6540 * Return the name of the devinfo node pointed at by 'dip' in the buffer 6541 * pointed at by 'name.' A devinfo node is named as a result of calling 6542 * ddi_initchild(). 6543 * 6544 * Note: the driver must be held before calling this function! 6545 */ 6546 char * 6547 ddi_deviname(dev_info_t *dip, char *name) 6548 { 6549 char *addrname; 6550 char none = '\0'; 6551 6552 if (dip == ddi_root_node()) { 6553 *name = '\0'; 6554 return (name); 6555 } 6556 6557 if (i_ddi_node_state(dip) < DS_BOUND) { 6558 addrname = &none; 6559 } else { 6560 /* 6561 * Use ddi_get_name_addr() without checking state so we get 6562 * a unit-address if we are called after ddi_set_name_addr() 6563 * by nexus DDI_CTL_INITCHILD code, but before completing 6564 * node promotion to DS_INITIALIZED. We currently have 6565 * two situations where we are called in this state: 6566 * o For framework processing of a path-oriented alias. 6567 * o If a SCSA nexus driver calls ddi_devid_register() 6568 * from it's tran_tgt_init(9E) implementation. 6569 */ 6570 addrname = ddi_get_name_addr(dip); 6571 if (addrname == NULL) 6572 addrname = &none; 6573 } 6574 6575 if (*addrname == '\0') { 6576 (void) sprintf(name, "/%s", ddi_node_name(dip)); 6577 } else { 6578 (void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname); 6579 } 6580 6581 return (name); 6582 } 6583 6584 /* 6585 * Spits out the name of device node, typically name@addr, for a given node, 6586 * using the driver name, not the nodename. 6587 * 6588 * Used by match_parent. Not to be used elsewhere. 6589 */ 6590 char * 6591 i_ddi_parname(dev_info_t *dip, char *name) 6592 { 6593 char *addrname; 6594 6595 if (dip == ddi_root_node()) { 6596 *name = '\0'; 6597 return (name); 6598 } 6599 6600 ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED); 6601 6602 if (*(addrname = ddi_get_name_addr(dip)) == '\0') 6603 (void) sprintf(name, "%s", ddi_binding_name(dip)); 6604 else 6605 (void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname); 6606 return (name); 6607 } 6608 6609 static char * 6610 pathname_work(dev_info_t *dip, char *path) 6611 { 6612 char *bp; 6613 6614 if (dip == ddi_root_node()) { 6615 *path = '\0'; 6616 return (path); 6617 } 6618 (void) pathname_work(ddi_get_parent(dip), path); 6619 bp = path + strlen(path); 6620 (void) ddi_deviname(dip, bp); 6621 return (path); 6622 } 6623 6624 char * 6625 ddi_pathname(dev_info_t *dip, char *path) 6626 { 6627 return (pathname_work(dip, path)); 6628 } 6629 6630 /* 6631 * Given a dev_t, return the pathname of the corresponding device in the 6632 * buffer pointed at by "path." The buffer is assumed to be large enough 6633 * to hold the pathname of the device (MAXPATHLEN). 6634 * 6635 * The pathname of a device is the pathname of the devinfo node to which 6636 * the device "belongs," concatenated with the character ':' and the name 6637 * of the minor node corresponding to the dev_t. If spec_type is 0 then 6638 * just the pathname of the devinfo node is returned without driving attach 6639 * of that node. For a non-zero spec_type, an attach is performed and a 6640 * search of the minor list occurs. 6641 * 6642 * It is possible that the path associated with the dev_t is not 6643 * currently available in the devinfo tree. In order to have a 6644 * dev_t, a device must have been discovered before, which means 6645 * that the path is always in the instance tree. The one exception 6646 * to this is if the dev_t is associated with a pseudo driver, in 6647 * which case the device must exist on the pseudo branch of the 6648 * devinfo tree as a result of parsing .conf files. 6649 */ 6650 int 6651 ddi_dev_pathname(dev_t devt, int spec_type, char *path) 6652 { 6653 major_t major = getmajor(devt); 6654 int instance; 6655 dev_info_t *dip; 6656 char *minorname; 6657 char *drvname; 6658 6659 if (major >= devcnt) 6660 goto fail; 6661 if (major == clone_major) { 6662 /* clone has no minor nodes, manufacture the path here */ 6663 if ((drvname = ddi_major_to_name(getminor(devt))) == NULL) 6664 goto fail; 6665 6666 (void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname); 6667 return (DDI_SUCCESS); 6668 } 6669 6670 /* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */ 6671 if ((instance = dev_to_instance(devt)) == -1) 6672 goto fail; 6673 6674 /* reconstruct the path given the major/instance */ 6675 if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS) 6676 goto fail; 6677 6678 /* if spec_type given we must drive attach and search minor nodes */ 6679 if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) { 6680 /* attach the path so we can search minors */ 6681 if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL) 6682 goto fail; 6683 6684 /* Add minorname to path. */ 6685 mutex_enter(&(DEVI(dip)->devi_lock)); 6686 minorname = i_ddi_devtspectype_to_minorname(dip, 6687 devt, spec_type); 6688 if (minorname) { 6689 (void) strcat(path, ":"); 6690 (void) strcat(path, minorname); 6691 } 6692 mutex_exit(&(DEVI(dip)->devi_lock)); 6693 ddi_release_devi(dip); 6694 if (minorname == NULL) 6695 goto fail; 6696 } 6697 ASSERT(strlen(path) < MAXPATHLEN); 6698 return (DDI_SUCCESS); 6699 6700 fail: *path = 0; 6701 return (DDI_FAILURE); 6702 } 6703 6704 /* 6705 * Given a major number and an instance, return the path. 6706 * This interface does NOT drive attach. 6707 */ 6708 int 6709 e_ddi_majorinstance_to_path(major_t major, int instance, char *path) 6710 { 6711 struct devnames *dnp; 6712 dev_info_t *dip; 6713 6714 if ((major >= devcnt) || (instance == -1)) { 6715 *path = 0; 6716 return (DDI_FAILURE); 6717 } 6718 6719 /* look for the major/instance in the instance tree */ 6720 if (e_ddi_instance_majorinstance_to_path(major, instance, 6721 path) == DDI_SUCCESS) { 6722 ASSERT(strlen(path) < MAXPATHLEN); 6723 return (DDI_SUCCESS); 6724 } 6725 6726 /* 6727 * Not in instance tree, find the instance on the per driver list and 6728 * construct path to instance via ddi_pathname(). This is how paths 6729 * down the 'pseudo' branch are constructed. 6730 */ 6731 dnp = &(devnamesp[major]); 6732 LOCK_DEV_OPS(&(dnp->dn_lock)); 6733 for (dip = dnp->dn_head; dip; 6734 dip = (dev_info_t *)DEVI(dip)->devi_next) { 6735 /* Skip if instance does not match. */ 6736 if (DEVI(dip)->devi_instance != instance) 6737 continue; 6738 6739 /* 6740 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND 6741 * node demotion, so it is not an effective way of ensuring 6742 * that the ddi_pathname result has a unit-address. Instead, 6743 * we reverify the node state after calling ddi_pathname(). 6744 */ 6745 if (i_ddi_node_state(dip) >= DS_INITIALIZED) { 6746 (void) ddi_pathname(dip, path); 6747 if (i_ddi_node_state(dip) < DS_INITIALIZED) 6748 continue; 6749 UNLOCK_DEV_OPS(&(dnp->dn_lock)); 6750 ASSERT(strlen(path) < MAXPATHLEN); 6751 return (DDI_SUCCESS); 6752 } 6753 } 6754 UNLOCK_DEV_OPS(&(dnp->dn_lock)); 6755 6756 /* can't reconstruct the path */ 6757 *path = 0; 6758 return (DDI_FAILURE); 6759 } 6760 6761 #define GLD_DRIVER_PPA "SUNW,gld_v0_ppa" 6762 6763 /* 6764 * Given the dip for a network interface return the ppa for that interface. 6765 * 6766 * In all cases except GLD v0 drivers, the ppa == instance. 6767 * In the case of GLD v0 drivers, the ppa is equal to the attach order. 6768 * So for these drivers when the attach routine calls gld_register(), 6769 * the GLD framework creates an integer property called "gld_driver_ppa" 6770 * that can be queried here. 6771 * 6772 * The only time this function is used is when a system is booting over nfs. 6773 * In this case the system has to resolve the pathname of the boot device 6774 * to it's ppa. 6775 */ 6776 int 6777 i_ddi_devi_get_ppa(dev_info_t *dip) 6778 { 6779 return (ddi_prop_get_int(DDI_DEV_T_ANY, dip, 6780 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 6781 GLD_DRIVER_PPA, ddi_get_instance(dip))); 6782 } 6783 6784 /* 6785 * i_ddi_devi_set_ppa() should only be called from gld_register() 6786 * and only for GLD v0 drivers 6787 */ 6788 void 6789 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa) 6790 { 6791 (void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa); 6792 } 6793 6794 6795 /* 6796 * Private DDI Console bell functions. 6797 */ 6798 void 6799 ddi_ring_console_bell(clock_t duration) 6800 { 6801 if (ddi_console_bell_func != NULL) 6802 (*ddi_console_bell_func)(duration); 6803 } 6804 6805 void 6806 ddi_set_console_bell(void (*bellfunc)(clock_t duration)) 6807 { 6808 ddi_console_bell_func = bellfunc; 6809 } 6810 6811 int 6812 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr, 6813 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 6814 { 6815 int (*funcp)() = ddi_dma_allochdl; 6816 ddi_dma_attr_t dma_attr; 6817 struct bus_ops *bop; 6818 6819 if (attr == (ddi_dma_attr_t *)0) 6820 return (DDI_DMA_BADATTR); 6821 6822 dma_attr = *attr; 6823 6824 bop = DEVI(dip)->devi_ops->devo_bus_ops; 6825 if (bop && bop->bus_dma_allochdl) 6826 funcp = bop->bus_dma_allochdl; 6827 6828 return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep)); 6829 } 6830 6831 void 6832 ddi_dma_free_handle(ddi_dma_handle_t *handlep) 6833 { 6834 ddi_dma_handle_t h = *handlep; 6835 (void) ddi_dma_freehdl(HD, HD, h); 6836 } 6837 6838 static uintptr_t dma_mem_list_id = 0; 6839 6840 6841 int 6842 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length, 6843 ddi_device_acc_attr_t *accattrp, uint_t flags, 6844 int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp, 6845 size_t *real_length, ddi_acc_handle_t *handlep) 6846 { 6847 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 6848 dev_info_t *dip = hp->dmai_rdip; 6849 ddi_acc_hdl_t *ap; 6850 ddi_dma_attr_t *attrp = &hp->dmai_attr; 6851 uint_t sleepflag, xfermodes; 6852 int (*fp)(caddr_t); 6853 int rval; 6854 6855 if (waitfp == DDI_DMA_SLEEP) 6856 fp = (int (*)())KM_SLEEP; 6857 else if (waitfp == DDI_DMA_DONTWAIT) 6858 fp = (int (*)())KM_NOSLEEP; 6859 else 6860 fp = waitfp; 6861 *handlep = impl_acc_hdl_alloc(fp, arg); 6862 if (*handlep == NULL) 6863 return (DDI_FAILURE); 6864 6865 /* check if the cache attributes are supported */ 6866 if (i_ddi_check_cache_attr(flags) == B_FALSE) 6867 return (DDI_FAILURE); 6868 6869 /* 6870 * Transfer the meaningful bits to xfermodes. 6871 * Double-check if the 3rd party driver correctly sets the bits. 6872 * If not, set DDI_DMA_STREAMING to keep compatibility. 6873 */ 6874 xfermodes = flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING); 6875 if (xfermodes == 0) { 6876 xfermodes = DDI_DMA_STREAMING; 6877 } 6878 6879 /* 6880 * initialize the common elements of data access handle 6881 */ 6882 ap = impl_acc_hdl_get(*handlep); 6883 ap->ah_vers = VERS_ACCHDL; 6884 ap->ah_dip = dip; 6885 ap->ah_offset = 0; 6886 ap->ah_len = 0; 6887 ap->ah_xfermodes = flags; 6888 ap->ah_acc = *accattrp; 6889 6890 sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0); 6891 if (xfermodes == DDI_DMA_CONSISTENT) { 6892 rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag, 6893 flags, accattrp, kaddrp, NULL, ap); 6894 *real_length = length; 6895 } else { 6896 rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag, 6897 flags, accattrp, kaddrp, real_length, ap); 6898 } 6899 if (rval == DDI_SUCCESS) { 6900 ap->ah_len = (off_t)(*real_length); 6901 ap->ah_addr = *kaddrp; 6902 } else { 6903 impl_acc_hdl_free(*handlep); 6904 *handlep = (ddi_acc_handle_t)NULL; 6905 if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) { 6906 ddi_set_callback(waitfp, arg, &dma_mem_list_id); 6907 } 6908 rval = DDI_FAILURE; 6909 } 6910 return (rval); 6911 } 6912 6913 void 6914 ddi_dma_mem_free(ddi_acc_handle_t *handlep) 6915 { 6916 ddi_acc_hdl_t *ap; 6917 6918 ap = impl_acc_hdl_get(*handlep); 6919 ASSERT(ap); 6920 6921 i_ddi_mem_free((caddr_t)ap->ah_addr, ap); 6922 6923 /* 6924 * free the handle 6925 */ 6926 impl_acc_hdl_free(*handlep); 6927 *handlep = (ddi_acc_handle_t)NULL; 6928 6929 if (dma_mem_list_id != 0) { 6930 ddi_run_callback(&dma_mem_list_id); 6931 } 6932 } 6933 6934 int 6935 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp, 6936 uint_t flags, int (*waitfp)(caddr_t), caddr_t arg, 6937 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 6938 { 6939 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 6940 dev_info_t *hdip, *dip; 6941 struct ddi_dma_req dmareq; 6942 int (*funcp)(); 6943 6944 dmareq.dmar_flags = flags; 6945 dmareq.dmar_fp = waitfp; 6946 dmareq.dmar_arg = arg; 6947 dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount; 6948 6949 if (bp->b_flags & B_PAGEIO) { 6950 dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES; 6951 dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages; 6952 dmareq.dmar_object.dmao_obj.pp_obj.pp_offset = 6953 (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET); 6954 } else { 6955 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr; 6956 if (bp->b_flags & B_SHADOW) { 6957 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = 6958 bp->b_shadow; 6959 dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR; 6960 } else { 6961 dmareq.dmar_object.dmao_type = 6962 (bp->b_flags & (B_PHYS | B_REMAPPED)) ? 6963 DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR; 6964 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL; 6965 } 6966 6967 /* 6968 * If the buffer has no proc pointer, or the proc 6969 * struct has the kernel address space, or the buffer has 6970 * been marked B_REMAPPED (meaning that it is now 6971 * mapped into the kernel's address space), then 6972 * the address space is kas (kernel address space). 6973 */ 6974 if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) || 6975 (bp->b_flags & B_REMAPPED)) { 6976 dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0; 6977 } else { 6978 dmareq.dmar_object.dmao_obj.virt_obj.v_as = 6979 bp->b_proc->p_as; 6980 } 6981 } 6982 6983 dip = hp->dmai_rdip; 6984 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl; 6985 funcp = DEVI(dip)->devi_bus_dma_bindfunc; 6986 return ((*funcp)(hdip, dip, handle, &dmareq, cookiep, ccountp)); 6987 } 6988 6989 int 6990 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as, 6991 caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t), 6992 caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 6993 { 6994 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 6995 dev_info_t *hdip, *dip; 6996 struct ddi_dma_req dmareq; 6997 int (*funcp)(); 6998 6999 if (len == (uint_t)0) { 7000 return (DDI_DMA_NOMAPPING); 7001 } 7002 dmareq.dmar_flags = flags; 7003 dmareq.dmar_fp = waitfp; 7004 dmareq.dmar_arg = arg; 7005 dmareq.dmar_object.dmao_size = len; 7006 dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR; 7007 dmareq.dmar_object.dmao_obj.virt_obj.v_as = as; 7008 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr; 7009 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL; 7010 7011 dip = hp->dmai_rdip; 7012 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl; 7013 funcp = DEVI(dip)->devi_bus_dma_bindfunc; 7014 return ((*funcp)(hdip, dip, handle, &dmareq, cookiep, ccountp)); 7015 } 7016 7017 void 7018 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep) 7019 { 7020 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7021 ddi_dma_cookie_t *cp; 7022 7023 cp = hp->dmai_cookie; 7024 ASSERT(cp); 7025 7026 cookiep->dmac_notused = cp->dmac_notused; 7027 cookiep->dmac_type = cp->dmac_type; 7028 cookiep->dmac_address = cp->dmac_address; 7029 cookiep->dmac_size = cp->dmac_size; 7030 hp->dmai_cookie++; 7031 } 7032 7033 int 7034 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp) 7035 { 7036 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7037 if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) { 7038 return (DDI_FAILURE); 7039 } else { 7040 *nwinp = hp->dmai_nwin; 7041 return (DDI_SUCCESS); 7042 } 7043 } 7044 7045 int 7046 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp, 7047 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 7048 { 7049 int (*funcp)() = ddi_dma_win; 7050 struct bus_ops *bop; 7051 7052 bop = DEVI(HD)->devi_ops->devo_bus_ops; 7053 if (bop && bop->bus_dma_win) 7054 funcp = bop->bus_dma_win; 7055 7056 return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp)); 7057 } 7058 7059 int 7060 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes) 7061 { 7062 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0, 7063 &burstsizes, 0, 0)); 7064 } 7065 7066 int 7067 i_ddi_dma_fault_check(ddi_dma_impl_t *hp) 7068 { 7069 return (hp->dmai_fault); 7070 } 7071 7072 int 7073 ddi_check_dma_handle(ddi_dma_handle_t handle) 7074 { 7075 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7076 int (*check)(ddi_dma_impl_t *); 7077 7078 if ((check = hp->dmai_fault_check) == NULL) 7079 check = i_ddi_dma_fault_check; 7080 7081 return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE); 7082 } 7083 7084 void 7085 i_ddi_dma_set_fault(ddi_dma_handle_t handle) 7086 { 7087 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7088 void (*notify)(ddi_dma_impl_t *); 7089 7090 if (!hp->dmai_fault) { 7091 hp->dmai_fault = 1; 7092 if ((notify = hp->dmai_fault_notify) != NULL) 7093 (*notify)(hp); 7094 } 7095 } 7096 7097 void 7098 i_ddi_dma_clr_fault(ddi_dma_handle_t handle) 7099 { 7100 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7101 void (*notify)(ddi_dma_impl_t *); 7102 7103 if (hp->dmai_fault) { 7104 hp->dmai_fault = 0; 7105 if ((notify = hp->dmai_fault_notify) != NULL) 7106 (*notify)(hp); 7107 } 7108 } 7109 7110 /* 7111 * register mapping routines. 7112 */ 7113 int 7114 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp, 7115 offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp, 7116 ddi_acc_handle_t *handle) 7117 { 7118 ddi_map_req_t mr; 7119 ddi_acc_hdl_t *hp; 7120 int result; 7121 7122 /* 7123 * Allocate and initialize the common elements of data access handle. 7124 */ 7125 *handle = impl_acc_hdl_alloc(KM_SLEEP, NULL); 7126 hp = impl_acc_hdl_get(*handle); 7127 hp->ah_vers = VERS_ACCHDL; 7128 hp->ah_dip = dip; 7129 hp->ah_rnumber = rnumber; 7130 hp->ah_offset = offset; 7131 hp->ah_len = len; 7132 hp->ah_acc = *accattrp; 7133 7134 /* 7135 * Set up the mapping request and call to parent. 7136 */ 7137 mr.map_op = DDI_MO_MAP_LOCKED; 7138 mr.map_type = DDI_MT_RNUMBER; 7139 mr.map_obj.rnumber = rnumber; 7140 mr.map_prot = PROT_READ | PROT_WRITE; 7141 mr.map_flags = DDI_MF_KERNEL_MAPPING; 7142 mr.map_handlep = hp; 7143 mr.map_vers = DDI_MAP_VERSION; 7144 result = ddi_map(dip, &mr, offset, len, addrp); 7145 7146 /* 7147 * check for end result 7148 */ 7149 if (result != DDI_SUCCESS) { 7150 impl_acc_hdl_free(*handle); 7151 *handle = (ddi_acc_handle_t)NULL; 7152 } else { 7153 hp->ah_addr = *addrp; 7154 } 7155 7156 return (result); 7157 } 7158 7159 void 7160 ddi_regs_map_free(ddi_acc_handle_t *handlep) 7161 { 7162 ddi_map_req_t mr; 7163 ddi_acc_hdl_t *hp; 7164 7165 hp = impl_acc_hdl_get(*handlep); 7166 ASSERT(hp); 7167 7168 mr.map_op = DDI_MO_UNMAP; 7169 mr.map_type = DDI_MT_RNUMBER; 7170 mr.map_obj.rnumber = hp->ah_rnumber; 7171 mr.map_prot = PROT_READ | PROT_WRITE; 7172 mr.map_flags = DDI_MF_KERNEL_MAPPING; 7173 mr.map_handlep = hp; 7174 mr.map_vers = DDI_MAP_VERSION; 7175 7176 /* 7177 * Call my parent to unmap my regs. 7178 */ 7179 (void) ddi_map(hp->ah_dip, &mr, hp->ah_offset, 7180 hp->ah_len, &hp->ah_addr); 7181 /* 7182 * free the handle 7183 */ 7184 impl_acc_hdl_free(*handlep); 7185 *handlep = (ddi_acc_handle_t)NULL; 7186 } 7187 7188 int 7189 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount, 7190 ssize_t dev_advcnt, uint_t dev_datasz) 7191 { 7192 uint8_t *b; 7193 uint16_t *w; 7194 uint32_t *l; 7195 uint64_t *ll; 7196 7197 /* check for total byte count is multiple of data transfer size */ 7198 if (bytecount != ((bytecount / dev_datasz) * dev_datasz)) 7199 return (DDI_FAILURE); 7200 7201 switch (dev_datasz) { 7202 case DDI_DATA_SZ01_ACC: 7203 for (b = (uint8_t *)dev_addr; 7204 bytecount != 0; bytecount -= 1, b += dev_advcnt) 7205 ddi_put8(handle, b, 0); 7206 break; 7207 case DDI_DATA_SZ02_ACC: 7208 for (w = (uint16_t *)dev_addr; 7209 bytecount != 0; bytecount -= 2, w += dev_advcnt) 7210 ddi_put16(handle, w, 0); 7211 break; 7212 case DDI_DATA_SZ04_ACC: 7213 for (l = (uint32_t *)dev_addr; 7214 bytecount != 0; bytecount -= 4, l += dev_advcnt) 7215 ddi_put32(handle, l, 0); 7216 break; 7217 case DDI_DATA_SZ08_ACC: 7218 for (ll = (uint64_t *)dev_addr; 7219 bytecount != 0; bytecount -= 8, ll += dev_advcnt) 7220 ddi_put64(handle, ll, 0x0ll); 7221 break; 7222 default: 7223 return (DDI_FAILURE); 7224 } 7225 return (DDI_SUCCESS); 7226 } 7227 7228 int 7229 ddi_device_copy( 7230 ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt, 7231 ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt, 7232 size_t bytecount, uint_t dev_datasz) 7233 { 7234 uint8_t *b_src, *b_dst; 7235 uint16_t *w_src, *w_dst; 7236 uint32_t *l_src, *l_dst; 7237 uint64_t *ll_src, *ll_dst; 7238 7239 /* check for total byte count is multiple of data transfer size */ 7240 if (bytecount != ((bytecount / dev_datasz) * dev_datasz)) 7241 return (DDI_FAILURE); 7242 7243 switch (dev_datasz) { 7244 case DDI_DATA_SZ01_ACC: 7245 b_src = (uint8_t *)src_addr; 7246 b_dst = (uint8_t *)dest_addr; 7247 7248 for (; bytecount != 0; bytecount -= 1) { 7249 ddi_put8(dest_handle, b_dst, 7250 ddi_get8(src_handle, b_src)); 7251 b_dst += dest_advcnt; 7252 b_src += src_advcnt; 7253 } 7254 break; 7255 case DDI_DATA_SZ02_ACC: 7256 w_src = (uint16_t *)src_addr; 7257 w_dst = (uint16_t *)dest_addr; 7258 7259 for (; bytecount != 0; bytecount -= 2) { 7260 ddi_put16(dest_handle, w_dst, 7261 ddi_get16(src_handle, w_src)); 7262 w_dst += dest_advcnt; 7263 w_src += src_advcnt; 7264 } 7265 break; 7266 case DDI_DATA_SZ04_ACC: 7267 l_src = (uint32_t *)src_addr; 7268 l_dst = (uint32_t *)dest_addr; 7269 7270 for (; bytecount != 0; bytecount -= 4) { 7271 ddi_put32(dest_handle, l_dst, 7272 ddi_get32(src_handle, l_src)); 7273 l_dst += dest_advcnt; 7274 l_src += src_advcnt; 7275 } 7276 break; 7277 case DDI_DATA_SZ08_ACC: 7278 ll_src = (uint64_t *)src_addr; 7279 ll_dst = (uint64_t *)dest_addr; 7280 7281 for (; bytecount != 0; bytecount -= 8) { 7282 ddi_put64(dest_handle, ll_dst, 7283 ddi_get64(src_handle, ll_src)); 7284 ll_dst += dest_advcnt; 7285 ll_src += src_advcnt; 7286 } 7287 break; 7288 default: 7289 return (DDI_FAILURE); 7290 } 7291 return (DDI_SUCCESS); 7292 } 7293 7294 #define swap16(value) \ 7295 ((((value) & 0xff) << 8) | ((value) >> 8)) 7296 7297 #define swap32(value) \ 7298 (((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \ 7299 (uint32_t)swap16((uint16_t)((value) >> 16))) 7300 7301 #define swap64(value) \ 7302 (((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \ 7303 << 32) | \ 7304 (uint64_t)swap32((uint32_t)((value) >> 32))) 7305 7306 uint16_t 7307 ddi_swap16(uint16_t value) 7308 { 7309 return (swap16(value)); 7310 } 7311 7312 uint32_t 7313 ddi_swap32(uint32_t value) 7314 { 7315 return (swap32(value)); 7316 } 7317 7318 uint64_t 7319 ddi_swap64(uint64_t value) 7320 { 7321 return (swap64(value)); 7322 } 7323 7324 /* 7325 * Convert a binding name to a driver name. 7326 * A binding name is the name used to determine the driver for a 7327 * device - it may be either an alias for the driver or the name 7328 * of the driver itself. 7329 */ 7330 char * 7331 i_binding_to_drv_name(char *bname) 7332 { 7333 major_t major_no; 7334 7335 ASSERT(bname != NULL); 7336 7337 if ((major_no = ddi_name_to_major(bname)) == -1) 7338 return (NULL); 7339 return (ddi_major_to_name(major_no)); 7340 } 7341 7342 /* 7343 * Search for minor name that has specified dev_t and spec_type. 7344 * If spec_type is zero then any dev_t match works. Since we 7345 * are returning a pointer to the minor name string, we require the 7346 * caller to do the locking. 7347 */ 7348 char * 7349 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type) 7350 { 7351 struct ddi_minor_data *dmdp; 7352 7353 /* 7354 * The did layered driver currently intentionally returns a 7355 * devinfo ptr for an underlying sd instance based on a did 7356 * dev_t. In this case it is not an error. 7357 * 7358 * The did layered driver is associated with Sun Cluster. 7359 */ 7360 ASSERT((ddi_driver_major(dip) == getmajor(dev)) || 7361 (strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0)); 7362 ASSERT(MUTEX_HELD(&(DEVI(dip)->devi_lock))); 7363 7364 for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) { 7365 if (((dmdp->type == DDM_MINOR) || 7366 (dmdp->type == DDM_INTERNAL_PATH) || 7367 (dmdp->type == DDM_DEFAULT)) && 7368 (dmdp->ddm_dev == dev) && 7369 ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) || 7370 (dmdp->ddm_spec_type == spec_type))) 7371 return (dmdp->ddm_name); 7372 } 7373 7374 return (NULL); 7375 } 7376 7377 /* 7378 * Find the devt and spectype of the specified minor_name. 7379 * Return DDI_FAILURE if minor_name not found. Since we are 7380 * returning everything via arguments we can do the locking. 7381 */ 7382 int 7383 i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name, 7384 dev_t *devtp, int *spectypep) 7385 { 7386 struct ddi_minor_data *dmdp; 7387 7388 /* deal with clone minor nodes */ 7389 if (dip == clone_dip) { 7390 major_t major; 7391 /* 7392 * Make sure minor_name is a STREAMS driver. 7393 * We load the driver but don't attach to any instances. 7394 */ 7395 7396 major = ddi_name_to_major(minor_name); 7397 if (major == (major_t)-1) 7398 return (DDI_FAILURE); 7399 7400 if (ddi_hold_driver(major) == NULL) 7401 return (DDI_FAILURE); 7402 7403 if (STREAMSTAB(major) == NULL) { 7404 ddi_rele_driver(major); 7405 return (DDI_FAILURE); 7406 } 7407 ddi_rele_driver(major); 7408 7409 if (devtp) 7410 *devtp = makedevice(clone_major, (minor_t)major); 7411 7412 if (spectypep) 7413 *spectypep = S_IFCHR; 7414 7415 return (DDI_SUCCESS); 7416 } 7417 7418 ASSERT(!MUTEX_HELD(&(DEVI(dip)->devi_lock))); 7419 mutex_enter(&(DEVI(dip)->devi_lock)); 7420 7421 for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) { 7422 if (((dmdp->type != DDM_MINOR) && 7423 (dmdp->type != DDM_INTERNAL_PATH) && 7424 (dmdp->type != DDM_DEFAULT)) || 7425 strcmp(minor_name, dmdp->ddm_name)) 7426 continue; 7427 7428 if (devtp) 7429 *devtp = dmdp->ddm_dev; 7430 7431 if (spectypep) 7432 *spectypep = dmdp->ddm_spec_type; 7433 7434 mutex_exit(&(DEVI(dip)->devi_lock)); 7435 return (DDI_SUCCESS); 7436 } 7437 7438 mutex_exit(&(DEVI(dip)->devi_lock)); 7439 return (DDI_FAILURE); 7440 } 7441 7442 extern char hw_serial[]; 7443 static kmutex_t devid_gen_mutex; 7444 static short devid_gen_number; 7445 7446 #ifdef DEBUG 7447 7448 static int devid_register_corrupt = 0; 7449 static int devid_register_corrupt_major = 0; 7450 static int devid_register_corrupt_hint = 0; 7451 static int devid_register_corrupt_hint_major = 0; 7452 7453 static int devid_lyr_debug = 0; 7454 7455 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs) \ 7456 if (devid_lyr_debug) \ 7457 ddi_debug_devid_devts(msg, ndevs, devs) 7458 7459 #else 7460 7461 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs) 7462 7463 #endif /* DEBUG */ 7464 7465 7466 #ifdef DEBUG 7467 7468 static void 7469 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs) 7470 { 7471 int i; 7472 7473 cmn_err(CE_CONT, "%s:\n", msg); 7474 for (i = 0; i < ndevs; i++) { 7475 cmn_err(CE_CONT, " 0x%lx\n", devs[i]); 7476 } 7477 } 7478 7479 static void 7480 ddi_debug_devid_paths(char *msg, int npaths, char **paths) 7481 { 7482 int i; 7483 7484 cmn_err(CE_CONT, "%s:\n", msg); 7485 for (i = 0; i < npaths; i++) { 7486 cmn_err(CE_CONT, " %s\n", paths[i]); 7487 } 7488 } 7489 7490 static void 7491 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs) 7492 { 7493 int i; 7494 7495 cmn_err(CE_CONT, "dev_ts per path %s\n", path); 7496 for (i = 0; i < ndevs; i++) { 7497 cmn_err(CE_CONT, " 0x%lx\n", devs[i]); 7498 } 7499 } 7500 7501 #endif /* DEBUG */ 7502 7503 /* 7504 * Register device id into DDI framework. 7505 * Must be called when device is attached. 7506 */ 7507 static int 7508 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid) 7509 { 7510 impl_devid_t *i_devid = (impl_devid_t *)devid; 7511 size_t driver_len; 7512 const char *driver_name; 7513 char *devid_str; 7514 major_t major; 7515 7516 if ((dip == NULL) || 7517 ((major = ddi_driver_major(dip)) == (major_t)-1)) 7518 return (DDI_FAILURE); 7519 7520 /* verify that the devid is valid */ 7521 if (ddi_devid_valid(devid) != DDI_SUCCESS) 7522 return (DDI_FAILURE); 7523 7524 /* Updating driver name hint in devid */ 7525 driver_name = ddi_driver_name(dip); 7526 driver_len = strlen(driver_name); 7527 if (driver_len > DEVID_HINT_SIZE) { 7528 /* Pick up last four characters of driver name */ 7529 driver_name += driver_len - DEVID_HINT_SIZE; 7530 driver_len = DEVID_HINT_SIZE; 7531 } 7532 bzero(i_devid->did_driver, DEVID_HINT_SIZE); 7533 bcopy(driver_name, i_devid->did_driver, driver_len); 7534 7535 #ifdef DEBUG 7536 /* Corrupt the devid for testing. */ 7537 if (devid_register_corrupt) 7538 i_devid->did_id[0] += devid_register_corrupt; 7539 if (devid_register_corrupt_major && 7540 (major == devid_register_corrupt_major)) 7541 i_devid->did_id[0] += 1; 7542 if (devid_register_corrupt_hint) 7543 i_devid->did_driver[0] += devid_register_corrupt_hint; 7544 if (devid_register_corrupt_hint_major && 7545 (major == devid_register_corrupt_hint_major)) 7546 i_devid->did_driver[0] += 1; 7547 #endif /* DEBUG */ 7548 7549 /* encode the devid as a string */ 7550 if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL) 7551 return (DDI_FAILURE); 7552 7553 /* add string as a string property */ 7554 if (ndi_prop_update_string(DDI_DEV_T_NONE, dip, 7555 DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) { 7556 cmn_err(CE_WARN, "%s%d: devid property update failed", 7557 ddi_driver_name(dip), ddi_get_instance(dip)); 7558 ddi_devid_str_free(devid_str); 7559 return (DDI_FAILURE); 7560 } 7561 7562 ddi_devid_str_free(devid_str); 7563 7564 #ifdef DEVID_COMPATIBILITY 7565 /* 7566 * marker for devinfo snapshot compatibility. 7567 * This code gets deleted when di_devid is gone from libdevid 7568 */ 7569 DEVI(dip)->devi_devid = DEVID_COMPATIBILITY; 7570 #endif /* DEVID_COMPATIBILITY */ 7571 return (DDI_SUCCESS); 7572 } 7573 7574 int 7575 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid) 7576 { 7577 int rval; 7578 7579 rval = i_ddi_devid_register(dip, devid); 7580 if (rval == DDI_SUCCESS) { 7581 /* 7582 * Register devid in devid-to-path cache 7583 */ 7584 if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) { 7585 mutex_enter(&DEVI(dip)->devi_lock); 7586 DEVI(dip)->devi_flags |= DEVI_REGISTERED_DEVID; 7587 mutex_exit(&DEVI(dip)->devi_lock); 7588 } else { 7589 cmn_err(CE_WARN, "%s%d: failed to cache devid", 7590 ddi_driver_name(dip), ddi_get_instance(dip)); 7591 } 7592 } else { 7593 cmn_err(CE_WARN, "%s%d: failed to register devid", 7594 ddi_driver_name(dip), ddi_get_instance(dip)); 7595 } 7596 return (rval); 7597 } 7598 7599 /* 7600 * Remove (unregister) device id from DDI framework. 7601 * Must be called when device is detached. 7602 */ 7603 static void 7604 i_ddi_devid_unregister(dev_info_t *dip) 7605 { 7606 #ifdef DEVID_COMPATIBILITY 7607 /* 7608 * marker for micro release devinfo snapshot compatibility. 7609 * This code gets deleted for the minor release. 7610 */ 7611 DEVI(dip)->devi_devid = NULL; /* unset DEVID_PROP */ 7612 #endif /* DEVID_COMPATIBILITY */ 7613 7614 /* remove the devid property */ 7615 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME); 7616 } 7617 7618 void 7619 ddi_devid_unregister(dev_info_t *dip) 7620 { 7621 mutex_enter(&DEVI(dip)->devi_lock); 7622 DEVI(dip)->devi_flags &= ~DEVI_REGISTERED_DEVID; 7623 mutex_exit(&DEVI(dip)->devi_lock); 7624 e_devid_cache_unregister(dip); 7625 i_ddi_devid_unregister(dip); 7626 } 7627 7628 /* 7629 * Allocate and initialize a device id. 7630 */ 7631 int 7632 ddi_devid_init( 7633 dev_info_t *dip, 7634 ushort_t devid_type, 7635 ushort_t nbytes, 7636 void *id, 7637 ddi_devid_t *ret_devid) 7638 { 7639 impl_devid_t *i_devid; 7640 int sz = sizeof (*i_devid) + nbytes - sizeof (char); 7641 int driver_len; 7642 const char *driver_name; 7643 7644 switch (devid_type) { 7645 case DEVID_SCSI3_WWN: 7646 /*FALLTHRU*/ 7647 case DEVID_SCSI_SERIAL: 7648 /*FALLTHRU*/ 7649 case DEVID_ATA_SERIAL: 7650 /*FALLTHRU*/ 7651 case DEVID_ENCAP: 7652 if (nbytes == 0) 7653 return (DDI_FAILURE); 7654 if (id == NULL) 7655 return (DDI_FAILURE); 7656 break; 7657 case DEVID_FAB: 7658 if (nbytes != 0) 7659 return (DDI_FAILURE); 7660 if (id != NULL) 7661 return (DDI_FAILURE); 7662 nbytes = sizeof (int) + 7663 sizeof (struct timeval32) + sizeof (short); 7664 sz += nbytes; 7665 break; 7666 default: 7667 return (DDI_FAILURE); 7668 } 7669 7670 if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL) 7671 return (DDI_FAILURE); 7672 7673 i_devid->did_magic_hi = DEVID_MAGIC_MSB; 7674 i_devid->did_magic_lo = DEVID_MAGIC_LSB; 7675 i_devid->did_rev_hi = DEVID_REV_MSB; 7676 i_devid->did_rev_lo = DEVID_REV_LSB; 7677 DEVID_FORMTYPE(i_devid, devid_type); 7678 DEVID_FORMLEN(i_devid, nbytes); 7679 7680 /* Fill in driver name hint */ 7681 driver_name = ddi_driver_name(dip); 7682 driver_len = strlen(driver_name); 7683 if (driver_len > DEVID_HINT_SIZE) { 7684 /* Pick up last four characters of driver name */ 7685 driver_name += driver_len - DEVID_HINT_SIZE; 7686 driver_len = DEVID_HINT_SIZE; 7687 } 7688 7689 bcopy(driver_name, i_devid->did_driver, driver_len); 7690 7691 /* Fill in id field */ 7692 if (devid_type == DEVID_FAB) { 7693 char *cp; 7694 int hostid; 7695 char *hostid_cp = &hw_serial[0]; 7696 struct timeval32 timestamp32; 7697 int i; 7698 int *ip; 7699 short gen; 7700 7701 /* increase the generation number */ 7702 mutex_enter(&devid_gen_mutex); 7703 gen = devid_gen_number++; 7704 mutex_exit(&devid_gen_mutex); 7705 7706 cp = i_devid->did_id; 7707 7708 /* Fill in host id (big-endian byte ordering) */ 7709 hostid = stoi(&hostid_cp); 7710 *cp++ = hibyte(hiword(hostid)); 7711 *cp++ = lobyte(hiword(hostid)); 7712 *cp++ = hibyte(loword(hostid)); 7713 *cp++ = lobyte(loword(hostid)); 7714 7715 /* 7716 * Fill in timestamp (big-endian byte ordering) 7717 * 7718 * (Note that the format may have to be changed 7719 * before 2038 comes around, though it's arguably 7720 * unique enough as it is..) 7721 */ 7722 uniqtime32(×tamp32); 7723 ip = (int *)×tamp32; 7724 for (i = 0; 7725 i < sizeof (timestamp32) / sizeof (int); i++, ip++) { 7726 int val; 7727 val = *ip; 7728 *cp++ = hibyte(hiword(val)); 7729 *cp++ = lobyte(hiword(val)); 7730 *cp++ = hibyte(loword(val)); 7731 *cp++ = lobyte(loword(val)); 7732 } 7733 7734 /* fill in the generation number */ 7735 *cp++ = hibyte(gen); 7736 *cp++ = lobyte(gen); 7737 } else 7738 bcopy(id, i_devid->did_id, nbytes); 7739 7740 /* return device id */ 7741 *ret_devid = (ddi_devid_t)i_devid; 7742 return (DDI_SUCCESS); 7743 } 7744 7745 int 7746 ddi_devid_get(dev_info_t *dip, ddi_devid_t *ret_devid) 7747 { 7748 return (i_ddi_devi_get_devid(DDI_DEV_T_ANY, dip, ret_devid)); 7749 } 7750 7751 int 7752 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid) 7753 { 7754 char *devidstr; 7755 7756 ASSERT(dev != DDI_DEV_T_NONE); 7757 7758 /* look up the property, devt specific first */ 7759 if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS, 7760 DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) { 7761 if ((dev == DDI_DEV_T_ANY) || 7762 (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 7763 DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) != 7764 DDI_PROP_SUCCESS)) { 7765 return (DDI_FAILURE); 7766 } 7767 } 7768 7769 /* convert to binary form */ 7770 if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) { 7771 ddi_prop_free(devidstr); 7772 return (DDI_FAILURE); 7773 } 7774 ddi_prop_free(devidstr); 7775 return (DDI_SUCCESS); 7776 } 7777 7778 /* 7779 * Return a copy of the device id for dev_t 7780 */ 7781 int 7782 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid) 7783 { 7784 dev_info_t *dip; 7785 int rval; 7786 7787 /* get the dip */ 7788 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 7789 return (DDI_FAILURE); 7790 7791 rval = i_ddi_devi_get_devid(dev, dip, ret_devid); 7792 7793 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */ 7794 return (rval); 7795 } 7796 7797 /* 7798 * Return a copy of the minor name for dev_t and spec_type 7799 */ 7800 int 7801 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name) 7802 { 7803 dev_info_t *dip; 7804 char *nm; 7805 size_t alloc_sz, sz; 7806 7807 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 7808 return (DDI_FAILURE); 7809 7810 mutex_enter(&(DEVI(dip)->devi_lock)); 7811 7812 if ((nm = i_ddi_devtspectype_to_minorname(dip, 7813 dev, spec_type)) == NULL) { 7814 mutex_exit(&(DEVI(dip)->devi_lock)); 7815 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */ 7816 return (DDI_FAILURE); 7817 } 7818 7819 /* make a copy */ 7820 alloc_sz = strlen(nm) + 1; 7821 retry: 7822 /* drop lock to allocate memory */ 7823 mutex_exit(&(DEVI(dip)->devi_lock)); 7824 *minor_name = kmem_alloc(alloc_sz, KM_SLEEP); 7825 mutex_enter(&(DEVI(dip)->devi_lock)); 7826 7827 /* re-check things, since we dropped the lock */ 7828 if ((nm = i_ddi_devtspectype_to_minorname(dip, 7829 dev, spec_type)) == NULL) { 7830 mutex_exit(&(DEVI(dip)->devi_lock)); 7831 kmem_free(*minor_name, alloc_sz); 7832 *minor_name = NULL; 7833 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */ 7834 return (DDI_FAILURE); 7835 } 7836 7837 /* verify size is the same */ 7838 sz = strlen(nm) + 1; 7839 if (alloc_sz != sz) { 7840 kmem_free(*minor_name, alloc_sz); 7841 alloc_sz = sz; 7842 goto retry; 7843 } 7844 7845 /* sz == alloc_sz - make a copy */ 7846 (void) strcpy(*minor_name, nm); 7847 7848 mutex_exit(&(DEVI(dip)->devi_lock)); 7849 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */ 7850 return (DDI_SUCCESS); 7851 } 7852 7853 int 7854 ddi_lyr_devid_to_devlist( 7855 ddi_devid_t devid, 7856 char *minor_name, 7857 int *retndevs, 7858 dev_t **retdevs) 7859 { 7860 ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS); 7861 7862 if (e_devid_cache_to_devt_list(devid, minor_name, 7863 retndevs, retdevs) == DDI_SUCCESS) { 7864 ASSERT(*retndevs > 0); 7865 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist", 7866 *retndevs, *retdevs); 7867 return (DDI_SUCCESS); 7868 } 7869 7870 if (e_ddi_devid_discovery(devid) == DDI_FAILURE) { 7871 return (DDI_FAILURE); 7872 } 7873 7874 if (e_devid_cache_to_devt_list(devid, minor_name, 7875 retndevs, retdevs) == DDI_SUCCESS) { 7876 ASSERT(*retndevs > 0); 7877 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist", 7878 *retndevs, *retdevs); 7879 return (DDI_SUCCESS); 7880 } 7881 7882 return (DDI_FAILURE); 7883 } 7884 7885 void 7886 ddi_lyr_free_devlist(dev_t *devlist, int ndevs) 7887 { 7888 kmem_free(devlist, sizeof (dev_t) * ndevs); 7889 } 7890 7891 /* 7892 * Note: This will need to be fixed if we ever allow processes to 7893 * have more than one data model per exec. 7894 */ 7895 model_t 7896 ddi_mmap_get_model(void) 7897 { 7898 return (get_udatamodel()); 7899 } 7900 7901 model_t 7902 ddi_model_convert_from(model_t model) 7903 { 7904 return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE); 7905 } 7906 7907 /* 7908 * ddi interfaces managing storage and retrieval of eventcookies. 7909 */ 7910 7911 /* 7912 * Invoke bus nexus driver's implementation of the 7913 * (*bus_remove_eventcall)() interface to remove a registered 7914 * callback handler for "event". 7915 */ 7916 int 7917 ddi_remove_event_handler(ddi_callback_id_t id) 7918 { 7919 ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id; 7920 dev_info_t *ddip; 7921 7922 ASSERT(cb); 7923 if (!cb) { 7924 return (DDI_FAILURE); 7925 } 7926 7927 ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie); 7928 return (ndi_busop_remove_eventcall(ddip, id)); 7929 } 7930 7931 /* 7932 * Invoke bus nexus driver's implementation of the 7933 * (*bus_add_eventcall)() interface to register a callback handler 7934 * for "event". 7935 */ 7936 int 7937 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event, 7938 void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *), 7939 void *arg, ddi_callback_id_t *id) 7940 { 7941 return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id)); 7942 } 7943 7944 7945 /* 7946 * Return a handle for event "name" by calling up the device tree 7947 * hierarchy via (*bus_get_eventcookie)() interface until claimed 7948 * by a bus nexus or top of dev_info tree is reached. 7949 */ 7950 int 7951 ddi_get_eventcookie(dev_info_t *dip, char *name, 7952 ddi_eventcookie_t *event_cookiep) 7953 { 7954 return (ndi_busop_get_eventcookie(dip, dip, 7955 name, event_cookiep)); 7956 } 7957 7958 /* 7959 * single thread access to dev_info node and set state 7960 */ 7961 void 7962 i_devi_enter(dev_info_t *dip, uint_t s_mask, uint_t w_mask, int has_lock) 7963 { 7964 if (!has_lock) 7965 mutex_enter(&(DEVI(dip)->devi_lock)); 7966 7967 ASSERT(mutex_owned(&(DEVI(dip)->devi_lock))); 7968 7969 /* 7970 * wait until state(s) have been changed 7971 */ 7972 while ((DEVI(dip)->devi_state & w_mask) != 0) { 7973 cv_wait(&(DEVI(dip)->devi_cv), &(DEVI(dip)->devi_lock)); 7974 } 7975 DEVI(dip)->devi_state |= s_mask; 7976 7977 if (!has_lock) 7978 mutex_exit(&(DEVI(dip)->devi_lock)); 7979 } 7980 7981 void 7982 i_devi_exit(dev_info_t *dip, uint_t c_mask, int has_lock) 7983 { 7984 if (!has_lock) 7985 mutex_enter(&(DEVI(dip)->devi_lock)); 7986 7987 ASSERT(mutex_owned(&(DEVI(dip)->devi_lock))); 7988 7989 /* 7990 * clear the state(s) and wakeup any threads waiting 7991 * for state change 7992 */ 7993 DEVI(dip)->devi_state &= ~c_mask; 7994 cv_broadcast(&(DEVI(dip)->devi_cv)); 7995 7996 if (!has_lock) 7997 mutex_exit(&(DEVI(dip)->devi_lock)); 7998 } 7999 8000 /* 8001 * This procedure is provided as the general callback function when 8002 * umem_lockmemory calls as_add_callback for long term memory locking. 8003 * When as_unmap, as_setprot, or as_free encounter segments which have 8004 * locked memory, this callback will be invoked. 8005 */ 8006 void 8007 umem_lock_undo(struct as *as, void *arg, uint_t event) 8008 { 8009 _NOTE(ARGUNUSED(as, event)) 8010 struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg; 8011 8012 /* 8013 * Call the cleanup function. Decrement the cookie reference 8014 * count, if it goes to zero, return the memory for the cookie. 8015 * The i_ddi_umem_unlock for this cookie may or may not have been 8016 * called already. It is the responsibility of the caller of 8017 * umem_lockmemory to handle the case of the cleanup routine 8018 * being called after a ddi_umem_unlock for the cookie 8019 * was called. 8020 */ 8021 8022 (*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp); 8023 8024 /* remove the cookie if reference goes to zero */ 8025 if (atomic_add_long_nv((ulong_t *)(&(cp->cook_refcnt)), -1) == 0) { 8026 kmem_free(cp, sizeof (struct ddi_umem_cookie)); 8027 } 8028 } 8029 8030 /* 8031 * The following two Consolidation Private routines provide generic 8032 * interfaces to increase/decrease the amount of device-locked memory. 8033 * 8034 * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory() 8035 * must be called every time i_ddi_incr_locked_memory() is called. 8036 */ 8037 int 8038 /* ARGSUSED */ 8039 i_ddi_incr_locked_memory(proc_t *procp, rctl_qty_t inc) 8040 { 8041 ASSERT(procp != NULL); 8042 mutex_enter(&procp->p_lock); 8043 if (rctl_incr_locked_mem(procp, NULL, inc, 1)) { 8044 mutex_exit(&procp->p_lock); 8045 return (ENOMEM); 8046 } 8047 mutex_exit(&procp->p_lock); 8048 return (0); 8049 } 8050 8051 /* 8052 * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory() 8053 * must be called every time i_ddi_decr_locked_memory() is called. 8054 */ 8055 /* ARGSUSED */ 8056 void 8057 i_ddi_decr_locked_memory(proc_t *procp, rctl_qty_t dec) 8058 { 8059 ASSERT(procp != NULL); 8060 mutex_enter(&procp->p_lock); 8061 rctl_decr_locked_mem(procp, NULL, dec, 1); 8062 mutex_exit(&procp->p_lock); 8063 } 8064 8065 /* 8066 * This routine checks if the max-locked-memory resource ctl is 8067 * exceeded, if not increments it, grabs a hold on the project. 8068 * Returns 0 if successful otherwise returns error code 8069 */ 8070 static int 8071 umem_incr_devlockmem(struct ddi_umem_cookie *cookie) 8072 { 8073 proc_t *procp; 8074 int ret; 8075 8076 ASSERT(cookie); 8077 procp = cookie->procp; 8078 ASSERT(procp); 8079 8080 if ((ret = i_ddi_incr_locked_memory(procp, 8081 cookie->size)) != 0) { 8082 return (ret); 8083 } 8084 return (0); 8085 } 8086 8087 /* 8088 * Decrements the max-locked-memory resource ctl and releases 8089 * the hold on the project that was acquired during umem_incr_devlockmem 8090 */ 8091 static void 8092 umem_decr_devlockmem(struct ddi_umem_cookie *cookie) 8093 { 8094 proc_t *proc; 8095 8096 proc = (proc_t *)cookie->procp; 8097 if (!proc) 8098 return; 8099 8100 i_ddi_decr_locked_memory(proc, cookie->size); 8101 } 8102 8103 /* 8104 * A consolidation private function which is essentially equivalent to 8105 * ddi_umem_lock but with the addition of arguments ops_vector and procp. 8106 * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and 8107 * the ops_vector is valid. 8108 * 8109 * Lock the virtual address range in the current process and create a 8110 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to 8111 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export 8112 * to user space. 8113 * 8114 * Note: The resource control accounting currently uses a full charge model 8115 * in other words attempts to lock the same/overlapping areas of memory 8116 * will deduct the full size of the buffer from the projects running 8117 * counter for the device locked memory. 8118 * 8119 * addr, size should be PAGESIZE aligned 8120 * 8121 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both 8122 * identifies whether the locked memory will be read or written or both 8123 * DDI_UMEMLOCK_LONGTERM must be set when the locking will 8124 * be maintained for an indefinitely long period (essentially permanent), 8125 * rather than for what would be required for a typical I/O completion. 8126 * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT 8127 * if the memory pertains to a regular file which is mapped MAP_SHARED. 8128 * This is to prevent a deadlock if a file truncation is attempted after 8129 * after the locking is done. 8130 * 8131 * Returns 0 on success 8132 * EINVAL - for invalid parameters 8133 * EPERM, ENOMEM and other error codes returned by as_pagelock 8134 * ENOMEM - is returned if the current request to lock memory exceeds 8135 * *.max-locked-memory resource control value. 8136 * EFAULT - memory pertains to a regular file mapped shared and 8137 * and DDI_UMEMLOCK_LONGTERM flag is set 8138 * EAGAIN - could not start the ddi_umem_unlock list processing thread 8139 */ 8140 int 8141 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie, 8142 struct umem_callback_ops *ops_vector, 8143 proc_t *procp) 8144 { 8145 int error; 8146 struct ddi_umem_cookie *p; 8147 void (*driver_callback)() = NULL; 8148 struct as *as = procp->p_as; 8149 struct seg *seg; 8150 vnode_t *vp; 8151 8152 *cookie = NULL; /* in case of any error return */ 8153 8154 /* These are the only three valid flags */ 8155 if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE | 8156 DDI_UMEMLOCK_LONGTERM)) != 0) 8157 return (EINVAL); 8158 8159 /* At least one (can be both) of the two access flags must be set */ 8160 if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) 8161 return (EINVAL); 8162 8163 /* addr and len must be page-aligned */ 8164 if (((uintptr_t)addr & PAGEOFFSET) != 0) 8165 return (EINVAL); 8166 8167 if ((len & PAGEOFFSET) != 0) 8168 return (EINVAL); 8169 8170 /* 8171 * For longterm locking a driver callback must be specified; if 8172 * not longterm then a callback is optional. 8173 */ 8174 if (ops_vector != NULL) { 8175 if (ops_vector->cbo_umem_callback_version != 8176 UMEM_CALLBACK_VERSION) 8177 return (EINVAL); 8178 else 8179 driver_callback = ops_vector->cbo_umem_lock_cleanup; 8180 } 8181 if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM)) 8182 return (EINVAL); 8183 8184 /* 8185 * Call i_ddi_umem_unlock_thread_start if necessary. It will 8186 * be called on first ddi_umem_lock or umem_lockmemory call. 8187 */ 8188 if (ddi_umem_unlock_thread == NULL) 8189 i_ddi_umem_unlock_thread_start(); 8190 8191 /* Allocate memory for the cookie */ 8192 p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP); 8193 8194 /* Convert the flags to seg_rw type */ 8195 if (flags & DDI_UMEMLOCK_WRITE) { 8196 p->s_flags = S_WRITE; 8197 } else { 8198 p->s_flags = S_READ; 8199 } 8200 8201 /* Store procp in cookie for later iosetup/unlock */ 8202 p->procp = (void *)procp; 8203 8204 /* 8205 * Store the struct as pointer in cookie for later use by 8206 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock 8207 * is called after relvm is called. 8208 */ 8209 p->asp = as; 8210 8211 /* 8212 * The size field is needed for lockmem accounting. 8213 */ 8214 p->size = len; 8215 8216 if (umem_incr_devlockmem(p) != 0) { 8217 /* 8218 * The requested memory cannot be locked 8219 */ 8220 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8221 *cookie = (ddi_umem_cookie_t)NULL; 8222 return (ENOMEM); 8223 } 8224 8225 /* Lock the pages corresponding to addr, len in memory */ 8226 error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags); 8227 if (error != 0) { 8228 umem_decr_devlockmem(p); 8229 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8230 *cookie = (ddi_umem_cookie_t)NULL; 8231 return (error); 8232 } 8233 8234 /* 8235 * For longterm locking the addr must pertain to a seg_vn segment or 8236 * or a seg_spt segment. 8237 * If the segment pertains to a regular file, it cannot be 8238 * mapped MAP_SHARED. 8239 * This is to prevent a deadlock if a file truncation is attempted 8240 * after the locking is done. 8241 * Doing this after as_pagelock guarantees persistence of the as; if 8242 * an unacceptable segment is found, the cleanup includes calling 8243 * as_pageunlock before returning EFAULT. 8244 */ 8245 if (flags & DDI_UMEMLOCK_LONGTERM) { 8246 extern struct seg_ops segspt_shmops; 8247 AS_LOCK_ENTER(as, &as->a_lock, RW_READER); 8248 for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) { 8249 if (seg == NULL || seg->s_base > addr + len) 8250 break; 8251 if (((seg->s_ops != &segvn_ops) && 8252 (seg->s_ops != &segspt_shmops)) || 8253 ((SEGOP_GETVP(seg, addr, &vp) == 0 && 8254 vp != NULL && vp->v_type == VREG) && 8255 (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) { 8256 as_pageunlock(as, p->pparray, 8257 addr, len, p->s_flags); 8258 AS_LOCK_EXIT(as, &as->a_lock); 8259 umem_decr_devlockmem(p); 8260 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8261 *cookie = (ddi_umem_cookie_t)NULL; 8262 return (EFAULT); 8263 } 8264 } 8265 AS_LOCK_EXIT(as, &as->a_lock); 8266 } 8267 8268 8269 /* Initialize the fields in the ddi_umem_cookie */ 8270 p->cvaddr = addr; 8271 p->type = UMEM_LOCKED; 8272 if (driver_callback != NULL) { 8273 /* i_ddi_umem_unlock and umem_lock_undo may need the cookie */ 8274 p->cook_refcnt = 2; 8275 p->callbacks = *ops_vector; 8276 } else { 8277 /* only i_ddi_umme_unlock needs the cookie */ 8278 p->cook_refcnt = 1; 8279 } 8280 8281 *cookie = (ddi_umem_cookie_t)p; 8282 8283 /* 8284 * If a driver callback was specified, add an entry to the 8285 * as struct callback list. The as_pagelock above guarantees 8286 * the persistence of as. 8287 */ 8288 if (driver_callback) { 8289 error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT, 8290 addr, len, KM_SLEEP); 8291 if (error != 0) { 8292 as_pageunlock(as, p->pparray, 8293 addr, len, p->s_flags); 8294 umem_decr_devlockmem(p); 8295 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8296 *cookie = (ddi_umem_cookie_t)NULL; 8297 } 8298 } 8299 return (error); 8300 } 8301 8302 /* 8303 * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free 8304 * the cookie. Called from i_ddi_umem_unlock_thread. 8305 */ 8306 8307 static void 8308 i_ddi_umem_unlock(struct ddi_umem_cookie *p) 8309 { 8310 uint_t rc; 8311 8312 /* 8313 * There is no way to determine whether a callback to 8314 * umem_lock_undo was registered via as_add_callback. 8315 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and 8316 * a valid callback function structure.) as_delete_callback 8317 * is called to delete a possible registered callback. If the 8318 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it 8319 * indicates that there was a callback registered, and that is was 8320 * successfully deleted. Thus, the cookie reference count 8321 * will never be decremented by umem_lock_undo. Just return the 8322 * memory for the cookie, since both users of the cookie are done. 8323 * A return of AS_CALLBACK_NOTFOUND indicates a callback was 8324 * never registered. A return of AS_CALLBACK_DELETE_DEFERRED 8325 * indicates that callback processing is taking place and, and 8326 * umem_lock_undo is, or will be, executing, and thus decrementing 8327 * the cookie reference count when it is complete. 8328 * 8329 * This needs to be done before as_pageunlock so that the 8330 * persistence of as is guaranteed because of the locked pages. 8331 * 8332 */ 8333 rc = as_delete_callback(p->asp, p); 8334 8335 8336 /* 8337 * The proc->p_as will be stale if i_ddi_umem_unlock is called 8338 * after relvm is called so use p->asp. 8339 */ 8340 as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags); 8341 8342 /* 8343 * Now that we have unlocked the memory decrement the 8344 * *.max-locked-memory rctl 8345 */ 8346 umem_decr_devlockmem(p); 8347 8348 if (rc == AS_CALLBACK_DELETED) { 8349 /* umem_lock_undo will not happen, return the cookie memory */ 8350 ASSERT(p->cook_refcnt == 2); 8351 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8352 } else { 8353 /* 8354 * umem_undo_lock may happen if as_delete_callback returned 8355 * AS_CALLBACK_DELETE_DEFERRED. In that case, decrement the 8356 * reference count, atomically, and return the cookie 8357 * memory if the reference count goes to zero. The only 8358 * other value for rc is AS_CALLBACK_NOTFOUND. In that 8359 * case, just return the cookie memory. 8360 */ 8361 if ((rc != AS_CALLBACK_DELETE_DEFERRED) || 8362 (atomic_add_long_nv((ulong_t *)(&(p->cook_refcnt)), -1) 8363 == 0)) { 8364 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8365 } 8366 } 8367 } 8368 8369 /* 8370 * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler. 8371 * 8372 * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list 8373 * until it is empty. Then, wait for more to be added. This thread is awoken 8374 * via calls to ddi_umem_unlock. 8375 */ 8376 8377 static void 8378 i_ddi_umem_unlock_thread(void) 8379 { 8380 struct ddi_umem_cookie *ret_cookie; 8381 callb_cpr_t cprinfo; 8382 8383 /* process the ddi_umem_unlock list */ 8384 CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex, 8385 callb_generic_cpr, "unlock_thread"); 8386 for (;;) { 8387 mutex_enter(&ddi_umem_unlock_mutex); 8388 if (ddi_umem_unlock_head != NULL) { /* list not empty */ 8389 ret_cookie = ddi_umem_unlock_head; 8390 /* take if off the list */ 8391 if ((ddi_umem_unlock_head = 8392 ddi_umem_unlock_head->unl_forw) == NULL) { 8393 ddi_umem_unlock_tail = NULL; 8394 } 8395 mutex_exit(&ddi_umem_unlock_mutex); 8396 /* unlock the pages in this cookie */ 8397 (void) i_ddi_umem_unlock(ret_cookie); 8398 } else { /* list is empty, wait for next ddi_umem_unlock */ 8399 CALLB_CPR_SAFE_BEGIN(&cprinfo); 8400 cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex); 8401 CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex); 8402 mutex_exit(&ddi_umem_unlock_mutex); 8403 } 8404 } 8405 /* ddi_umem_unlock_thread does not exit */ 8406 /* NOTREACHED */ 8407 } 8408 8409 /* 8410 * Start the thread that will process the ddi_umem_unlock list if it is 8411 * not already started (i_ddi_umem_unlock_thread). 8412 */ 8413 static void 8414 i_ddi_umem_unlock_thread_start(void) 8415 { 8416 mutex_enter(&ddi_umem_unlock_mutex); 8417 if (ddi_umem_unlock_thread == NULL) { 8418 ddi_umem_unlock_thread = thread_create(NULL, 0, 8419 i_ddi_umem_unlock_thread, NULL, 0, &p0, 8420 TS_RUN, minclsyspri); 8421 } 8422 mutex_exit(&ddi_umem_unlock_mutex); 8423 } 8424 8425 /* 8426 * Lock the virtual address range in the current process and create a 8427 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to 8428 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export 8429 * to user space. 8430 * 8431 * Note: The resource control accounting currently uses a full charge model 8432 * in other words attempts to lock the same/overlapping areas of memory 8433 * will deduct the full size of the buffer from the projects running 8434 * counter for the device locked memory. This applies to umem_lockmemory too. 8435 * 8436 * addr, size should be PAGESIZE aligned 8437 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both 8438 * identifies whether the locked memory will be read or written or both 8439 * 8440 * Returns 0 on success 8441 * EINVAL - for invalid parameters 8442 * EPERM, ENOMEM and other error codes returned by as_pagelock 8443 * ENOMEM - is returned if the current request to lock memory exceeds 8444 * *.max-locked-memory resource control value. 8445 * EAGAIN - could not start the ddi_umem_unlock list processing thread 8446 */ 8447 int 8448 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie) 8449 { 8450 int error; 8451 struct ddi_umem_cookie *p; 8452 8453 *cookie = NULL; /* in case of any error return */ 8454 8455 /* These are the only two valid flags */ 8456 if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) { 8457 return (EINVAL); 8458 } 8459 8460 /* At least one of the two flags (or both) must be set */ 8461 if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) { 8462 return (EINVAL); 8463 } 8464 8465 /* addr and len must be page-aligned */ 8466 if (((uintptr_t)addr & PAGEOFFSET) != 0) { 8467 return (EINVAL); 8468 } 8469 8470 if ((len & PAGEOFFSET) != 0) { 8471 return (EINVAL); 8472 } 8473 8474 /* 8475 * Call i_ddi_umem_unlock_thread_start if necessary. It will 8476 * be called on first ddi_umem_lock or umem_lockmemory call. 8477 */ 8478 if (ddi_umem_unlock_thread == NULL) 8479 i_ddi_umem_unlock_thread_start(); 8480 8481 /* Allocate memory for the cookie */ 8482 p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP); 8483 8484 /* Convert the flags to seg_rw type */ 8485 if (flags & DDI_UMEMLOCK_WRITE) { 8486 p->s_flags = S_WRITE; 8487 } else { 8488 p->s_flags = S_READ; 8489 } 8490 8491 /* Store curproc in cookie for later iosetup/unlock */ 8492 p->procp = (void *)curproc; 8493 8494 /* 8495 * Store the struct as pointer in cookie for later use by 8496 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock 8497 * is called after relvm is called. 8498 */ 8499 p->asp = curproc->p_as; 8500 /* 8501 * The size field is needed for lockmem accounting. 8502 */ 8503 p->size = len; 8504 8505 if (umem_incr_devlockmem(p) != 0) { 8506 /* 8507 * The requested memory cannot be locked 8508 */ 8509 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8510 *cookie = (ddi_umem_cookie_t)NULL; 8511 return (ENOMEM); 8512 } 8513 8514 /* Lock the pages corresponding to addr, len in memory */ 8515 error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray), 8516 addr, len, p->s_flags); 8517 if (error != 0) { 8518 umem_decr_devlockmem(p); 8519 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8520 *cookie = (ddi_umem_cookie_t)NULL; 8521 return (error); 8522 } 8523 8524 /* Initialize the fields in the ddi_umem_cookie */ 8525 p->cvaddr = addr; 8526 p->type = UMEM_LOCKED; 8527 p->cook_refcnt = 1; 8528 8529 *cookie = (ddi_umem_cookie_t)p; 8530 return (error); 8531 } 8532 8533 /* 8534 * Add the cookie to the ddi_umem_unlock list. Pages will be 8535 * unlocked by i_ddi_umem_unlock_thread. 8536 */ 8537 8538 void 8539 ddi_umem_unlock(ddi_umem_cookie_t cookie) 8540 { 8541 struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie; 8542 8543 ASSERT(p->type == UMEM_LOCKED); 8544 ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */ 8545 ASSERT(ddi_umem_unlock_thread != NULL); 8546 8547 p->unl_forw = (struct ddi_umem_cookie *)NULL; /* end of list */ 8548 /* 8549 * Queue the unlock request and notify i_ddi_umem_unlock thread 8550 * if it's called in the interrupt context. Otherwise, unlock pages 8551 * immediately. 8552 */ 8553 if (servicing_interrupt()) { 8554 /* queue the unlock request and notify the thread */ 8555 mutex_enter(&ddi_umem_unlock_mutex); 8556 if (ddi_umem_unlock_head == NULL) { 8557 ddi_umem_unlock_head = ddi_umem_unlock_tail = p; 8558 cv_broadcast(&ddi_umem_unlock_cv); 8559 } else { 8560 ddi_umem_unlock_tail->unl_forw = p; 8561 ddi_umem_unlock_tail = p; 8562 } 8563 mutex_exit(&ddi_umem_unlock_mutex); 8564 } else { 8565 /* unlock the pages right away */ 8566 (void) i_ddi_umem_unlock(p); 8567 } 8568 } 8569 8570 /* 8571 * Create a buf structure from a ddi_umem_cookie 8572 * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc 8573 * (only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported) 8574 * off, len - identifies the portion of the memory represented by the cookie 8575 * that the buf points to. 8576 * NOTE: off, len need to follow the alignment/size restrictions of the 8577 * device (dev) that this buf will be passed to. Some devices 8578 * will accept unrestricted alignment/size, whereas others (such as 8579 * st) require some block-size alignment/size. It is the caller's 8580 * responsibility to ensure that the alignment/size restrictions 8581 * are met (we cannot assert as we do not know the restrictions) 8582 * 8583 * direction - is one of B_READ or B_WRITE and needs to be compatible with 8584 * the flags used in ddi_umem_lock 8585 * 8586 * The following three arguments are used to initialize fields in the 8587 * buf structure and are uninterpreted by this routine. 8588 * 8589 * dev 8590 * blkno 8591 * iodone 8592 * 8593 * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP 8594 * 8595 * Returns a buf structure pointer on success (to be freed by freerbuf) 8596 * NULL on any parameter error or memory alloc failure 8597 * 8598 */ 8599 struct buf * 8600 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len, 8601 int direction, dev_t dev, daddr_t blkno, 8602 int (*iodone)(struct buf *), int sleepflag) 8603 { 8604 struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie; 8605 struct buf *bp; 8606 8607 /* 8608 * check for valid cookie offset, len 8609 */ 8610 if ((off + len) > p->size) { 8611 return (NULL); 8612 } 8613 8614 if (len > p->size) { 8615 return (NULL); 8616 } 8617 8618 /* direction has to be one of B_READ or B_WRITE */ 8619 if ((direction != B_READ) && (direction != B_WRITE)) { 8620 return (NULL); 8621 } 8622 8623 /* These are the only two valid sleepflags */ 8624 if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) { 8625 return (NULL); 8626 } 8627 8628 /* 8629 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported 8630 */ 8631 if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) { 8632 return (NULL); 8633 } 8634 8635 /* If type is KMEM_NON_PAGEABLE procp is NULL */ 8636 ASSERT((p->type == KMEM_NON_PAGEABLE) ? 8637 (p->procp == NULL) : (p->procp != NULL)); 8638 8639 bp = kmem_alloc(sizeof (struct buf), sleepflag); 8640 if (bp == NULL) { 8641 return (NULL); 8642 } 8643 bioinit(bp); 8644 8645 bp->b_flags = B_BUSY | B_PHYS | direction; 8646 bp->b_edev = dev; 8647 bp->b_lblkno = blkno; 8648 bp->b_iodone = iodone; 8649 bp->b_bcount = len; 8650 bp->b_proc = (proc_t *)p->procp; 8651 ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0); 8652 bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off); 8653 if (p->pparray != NULL) { 8654 bp->b_flags |= B_SHADOW; 8655 ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0); 8656 bp->b_shadow = p->pparray + btop(off); 8657 } 8658 return (bp); 8659 } 8660 8661 /* 8662 * Fault-handling and related routines 8663 */ 8664 8665 ddi_devstate_t 8666 ddi_get_devstate(dev_info_t *dip) 8667 { 8668 if (DEVI_IS_DEVICE_OFFLINE(dip)) 8669 return (DDI_DEVSTATE_OFFLINE); 8670 else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip)) 8671 return (DDI_DEVSTATE_DOWN); 8672 else if (DEVI_IS_BUS_QUIESCED(dip)) 8673 return (DDI_DEVSTATE_QUIESCED); 8674 else if (DEVI_IS_DEVICE_DEGRADED(dip)) 8675 return (DDI_DEVSTATE_DEGRADED); 8676 else 8677 return (DDI_DEVSTATE_UP); 8678 } 8679 8680 void 8681 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact, 8682 ddi_fault_location_t location, const char *message) 8683 { 8684 struct ddi_fault_event_data fd; 8685 ddi_eventcookie_t ec; 8686 8687 /* 8688 * Assemble all the information into a fault-event-data structure 8689 */ 8690 fd.f_dip = dip; 8691 fd.f_impact = impact; 8692 fd.f_location = location; 8693 fd.f_message = message; 8694 fd.f_oldstate = ddi_get_devstate(dip); 8695 8696 /* 8697 * Get eventcookie from defining parent. 8698 */ 8699 if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) != 8700 DDI_SUCCESS) 8701 return; 8702 8703 (void) ndi_post_event(dip, dip, ec, &fd); 8704 } 8705 8706 char * 8707 i_ddi_devi_class(dev_info_t *dip) 8708 { 8709 return (DEVI(dip)->devi_device_class); 8710 } 8711 8712 int 8713 i_ddi_set_devi_class(dev_info_t *dip, char *devi_class, int flag) 8714 { 8715 struct dev_info *devi = DEVI(dip); 8716 8717 mutex_enter(&devi->devi_lock); 8718 8719 if (devi->devi_device_class) 8720 kmem_free(devi->devi_device_class, 8721 strlen(devi->devi_device_class) + 1); 8722 8723 if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag)) 8724 != NULL) { 8725 mutex_exit(&devi->devi_lock); 8726 return (DDI_SUCCESS); 8727 } 8728 8729 mutex_exit(&devi->devi_lock); 8730 8731 return (DDI_FAILURE); 8732 } 8733 8734 8735 /* 8736 * Task Queues DDI interfaces. 8737 */ 8738 8739 /* ARGSUSED */ 8740 ddi_taskq_t * 8741 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads, 8742 pri_t pri, uint_t cflags) 8743 { 8744 char full_name[TASKQ_NAMELEN]; 8745 const char *tq_name; 8746 int nodeid = 0; 8747 8748 if (dip == NULL) 8749 tq_name = name; 8750 else { 8751 nodeid = ddi_get_instance(dip); 8752 8753 if (name == NULL) 8754 name = "tq"; 8755 8756 (void) snprintf(full_name, sizeof (full_name), "%s_%s", 8757 ddi_driver_name(dip), name); 8758 8759 tq_name = full_name; 8760 } 8761 8762 return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads, 8763 pri == TASKQ_DEFAULTPRI ? minclsyspri : pri, 8764 nthreads, INT_MAX, TASKQ_PREPOPULATE)); 8765 } 8766 8767 void 8768 ddi_taskq_destroy(ddi_taskq_t *tq) 8769 { 8770 taskq_destroy((taskq_t *)tq); 8771 } 8772 8773 int 8774 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *), 8775 void *arg, uint_t dflags) 8776 { 8777 taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg, 8778 dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP); 8779 8780 return (id != 0 ? DDI_SUCCESS : DDI_FAILURE); 8781 } 8782 8783 void 8784 ddi_taskq_wait(ddi_taskq_t *tq) 8785 { 8786 taskq_wait((taskq_t *)tq); 8787 } 8788 8789 void 8790 ddi_taskq_suspend(ddi_taskq_t *tq) 8791 { 8792 taskq_suspend((taskq_t *)tq); 8793 } 8794 8795 boolean_t 8796 ddi_taskq_suspended(ddi_taskq_t *tq) 8797 { 8798 return (taskq_suspended((taskq_t *)tq)); 8799 } 8800 8801 void 8802 ddi_taskq_resume(ddi_taskq_t *tq) 8803 { 8804 taskq_resume((taskq_t *)tq); 8805 } 8806 8807 int 8808 ddi_parse( 8809 const char *ifname, 8810 char *alnum, 8811 uint_t *nump) 8812 { 8813 const char *p; 8814 int l; 8815 ulong_t num; 8816 boolean_t nonum = B_TRUE; 8817 char c; 8818 8819 l = strlen(ifname); 8820 for (p = ifname + l; p != ifname; l--) { 8821 c = *--p; 8822 if (!isdigit(c)) { 8823 (void) strlcpy(alnum, ifname, l + 1); 8824 if (ddi_strtoul(p + 1, NULL, 10, &num) != 0) 8825 return (DDI_FAILURE); 8826 break; 8827 } 8828 nonum = B_FALSE; 8829 } 8830 if (l == 0 || nonum) 8831 return (DDI_FAILURE); 8832 8833 *nump = num; 8834 return (DDI_SUCCESS); 8835 } 8836