1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/note.h> 30 #include <sys/types.h> 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/buf.h> 34 #include <sys/uio.h> 35 #include <sys/cred.h> 36 #include <sys/poll.h> 37 #include <sys/mman.h> 38 #include <sys/kmem.h> 39 #include <sys/model.h> 40 #include <sys/file.h> 41 #include <sys/proc.h> 42 #include <sys/open.h> 43 #include <sys/user.h> 44 #include <sys/t_lock.h> 45 #include <sys/vm.h> 46 #include <sys/stat.h> 47 #include <vm/hat.h> 48 #include <vm/seg.h> 49 #include <vm/seg_vn.h> 50 #include <vm/seg_dev.h> 51 #include <vm/as.h> 52 #include <sys/cmn_err.h> 53 #include <sys/cpuvar.h> 54 #include <sys/debug.h> 55 #include <sys/autoconf.h> 56 #include <sys/sunddi.h> 57 #include <sys/esunddi.h> 58 #include <sys/sunndi.h> 59 #include <sys/kstat.h> 60 #include <sys/conf.h> 61 #include <sys/ddi_impldefs.h> /* include implementation structure defs */ 62 #include <sys/ndi_impldefs.h> /* include prototypes */ 63 #include <sys/hwconf.h> 64 #include <sys/pathname.h> 65 #include <sys/modctl.h> 66 #include <sys/epm.h> 67 #include <sys/devctl.h> 68 #include <sys/callb.h> 69 #include <sys/cladm.h> 70 #include <sys/sysevent.h> 71 #include <sys/dacf_impl.h> 72 #include <sys/ddidevmap.h> 73 #include <sys/bootconf.h> 74 #include <sys/disp.h> 75 #include <sys/atomic.h> 76 #include <sys/promif.h> 77 #include <sys/instance.h> 78 #include <sys/sysevent/eventdefs.h> 79 #include <sys/task.h> 80 #include <sys/project.h> 81 #include <sys/taskq.h> 82 #include <sys/devpolicy.h> 83 #include <sys/ctype.h> 84 #include <net/if.h> 85 86 extern pri_t minclsyspri; 87 88 extern rctl_hndl_t rc_project_devlockmem; 89 90 #ifdef DEBUG 91 static int sunddi_debug = 0; 92 #endif /* DEBUG */ 93 94 /* ddi_umem_unlock miscellaneous */ 95 96 static void i_ddi_umem_unlock_thread_start(void); 97 98 static kmutex_t ddi_umem_unlock_mutex; /* unlock list mutex */ 99 static kcondvar_t ddi_umem_unlock_cv; /* unlock list block/unblock */ 100 static kthread_t *ddi_umem_unlock_thread; 101 /* 102 * The ddi_umem_unlock FIFO list. NULL head pointer indicates empty list. 103 */ 104 static struct ddi_umem_cookie *ddi_umem_unlock_head = NULL; 105 static struct ddi_umem_cookie *ddi_umem_unlock_tail = NULL; 106 107 /* 108 * This lock protects the project.max-device-locked-memory counter. 109 * When both p_lock (proc_t) and this lock need to acquired, p_lock 110 * should be acquired first. 111 */ 112 static kmutex_t umem_devlockmem_rctl_lock; 113 114 115 /* 116 * DDI(Sun) Function and flag definitions: 117 */ 118 119 #if defined(__x86) 120 /* 121 * Used to indicate which entries were chosen from a range. 122 */ 123 char *chosen_reg = "chosen-reg"; 124 #endif 125 126 /* 127 * Function used to ring system console bell 128 */ 129 void (*ddi_console_bell_func)(clock_t duration); 130 131 /* 132 * Creating register mappings and handling interrupts: 133 */ 134 135 /* 136 * Generic ddi_map: Call parent to fulfill request... 137 */ 138 139 int 140 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset, 141 off_t len, caddr_t *addrp) 142 { 143 dev_info_t *pdip; 144 145 ASSERT(dp); 146 pdip = (dev_info_t *)DEVI(dp)->devi_parent; 147 return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip, 148 dp, mp, offset, len, addrp)); 149 } 150 151 /* 152 * ddi_apply_range: (Called by nexi only.) 153 * Apply ranges in parent node dp, to child regspec rp... 154 */ 155 156 int 157 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp) 158 { 159 return (i_ddi_apply_range(dp, rdip, rp)); 160 } 161 162 int 163 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset, 164 off_t len) 165 { 166 ddi_map_req_t mr; 167 #if defined(__x86) 168 struct { 169 int bus; 170 int addr; 171 int size; 172 } reg, *reglist; 173 uint_t length; 174 int rc; 175 176 /* 177 * get the 'registers' or the 'reg' property. 178 * We look up the reg property as an array of 179 * int's. 180 */ 181 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 182 DDI_PROP_DONTPASS, "registers", (int **)®list, &length); 183 if (rc != DDI_PROP_SUCCESS) 184 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 185 DDI_PROP_DONTPASS, "reg", (int **)®list, &length); 186 if (rc == DDI_PROP_SUCCESS) { 187 /* 188 * point to the required entry. 189 */ 190 reg = reglist[rnumber]; 191 reg.addr += offset; 192 if (len != 0) 193 reg.size = len; 194 /* 195 * make a new property containing ONLY the required tuple. 196 */ 197 if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip, 198 chosen_reg, (int *)®, (sizeof (reg)/sizeof (int))) 199 != DDI_PROP_SUCCESS) { 200 cmn_err(CE_WARN, "%s%d: cannot create '%s' " 201 "property", DEVI(dip)->devi_name, 202 DEVI(dip)->devi_instance, chosen_reg); 203 } 204 /* 205 * free the memory allocated by 206 * ddi_prop_lookup_int_array (). 207 */ 208 ddi_prop_free((void *)reglist); 209 } 210 #endif 211 mr.map_op = DDI_MO_MAP_LOCKED; 212 mr.map_type = DDI_MT_RNUMBER; 213 mr.map_obj.rnumber = rnumber; 214 mr.map_prot = PROT_READ | PROT_WRITE; 215 mr.map_flags = DDI_MF_KERNEL_MAPPING; 216 mr.map_handlep = NULL; 217 mr.map_vers = DDI_MAP_VERSION; 218 219 /* 220 * Call my parent to map in my regs. 221 */ 222 223 return (ddi_map(dip, &mr, offset, len, kaddrp)); 224 } 225 226 void 227 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset, 228 off_t len) 229 { 230 ddi_map_req_t mr; 231 232 mr.map_op = DDI_MO_UNMAP; 233 mr.map_type = DDI_MT_RNUMBER; 234 mr.map_flags = DDI_MF_KERNEL_MAPPING; 235 mr.map_prot = PROT_READ | PROT_WRITE; /* who cares? */ 236 mr.map_obj.rnumber = rnumber; 237 mr.map_handlep = NULL; 238 mr.map_vers = DDI_MAP_VERSION; 239 240 /* 241 * Call my parent to unmap my regs. 242 */ 243 244 (void) ddi_map(dip, &mr, offset, len, kaddrp); 245 *kaddrp = (caddr_t)0; 246 #if defined(__x86) 247 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg); 248 #endif 249 } 250 251 int 252 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 253 off_t offset, off_t len, caddr_t *vaddrp) 254 { 255 return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp)); 256 } 257 258 /* 259 * nullbusmap: The/DDI default bus_map entry point for nexi 260 * not conforming to the reg/range paradigm (i.e. scsi, etc.) 261 * with no HAT/MMU layer to be programmed at this level. 262 * 263 * If the call is to map by rnumber, return an error, 264 * otherwise pass anything else up the tree to my parent. 265 */ 266 int 267 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 268 off_t offset, off_t len, caddr_t *vaddrp) 269 { 270 _NOTE(ARGUNUSED(rdip)) 271 if (mp->map_type == DDI_MT_RNUMBER) 272 return (DDI_ME_UNSUPPORTED); 273 274 return (ddi_map(dip, mp, offset, len, vaddrp)); 275 } 276 277 /* 278 * ddi_rnumber_to_regspec: Not for use by leaf drivers. 279 * Only for use by nexi using the reg/range paradigm. 280 */ 281 struct regspec * 282 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber) 283 { 284 return (i_ddi_rnumber_to_regspec(dip, rnumber)); 285 } 286 287 288 /* 289 * Note that we allow the dip to be nil because we may be called 290 * prior even to the instantiation of the devinfo tree itself - all 291 * regular leaf and nexus drivers should always use a non-nil dip! 292 * 293 * We treat peek in a somewhat cavalier fashion .. assuming that we'll 294 * simply get a synchronous fault as soon as we touch a missing address. 295 * 296 * Poke is rather more carefully handled because we might poke to a write 297 * buffer, "succeed", then only find some time later that we got an 298 * asynchronous fault that indicated that the address we were writing to 299 * was not really backed by hardware. 300 */ 301 302 static int 303 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size, 304 void *addr, void *value_p) 305 { 306 union { 307 uint64_t u64; 308 uint32_t u32; 309 uint16_t u16; 310 uint8_t u8; 311 } peekpoke_value; 312 313 peekpoke_ctlops_t peekpoke_args; 314 uint64_t dummy_result; 315 int rval; 316 317 /* Note: size is assumed to be correct; it is not checked. */ 318 peekpoke_args.size = size; 319 peekpoke_args.dev_addr = (uintptr_t)addr; 320 peekpoke_args.handle = NULL; 321 peekpoke_args.repcount = 1; 322 peekpoke_args.flags = 0; 323 324 if (cmd == DDI_CTLOPS_POKE) { 325 switch (size) { 326 case sizeof (uint8_t): 327 peekpoke_value.u8 = *(uint8_t *)value_p; 328 break; 329 case sizeof (uint16_t): 330 peekpoke_value.u16 = *(uint16_t *)value_p; 331 break; 332 case sizeof (uint32_t): 333 peekpoke_value.u32 = *(uint32_t *)value_p; 334 break; 335 case sizeof (uint64_t): 336 peekpoke_value.u64 = *(uint64_t *)value_p; 337 break; 338 } 339 } 340 341 peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64; 342 343 if (devi != NULL) 344 rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args, 345 &dummy_result); 346 else 347 rval = peekpoke_mem(cmd, &peekpoke_args); 348 349 /* 350 * A NULL value_p is permitted by ddi_peek(9F); discard the result. 351 */ 352 if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) { 353 switch (size) { 354 case sizeof (uint8_t): 355 *(uint8_t *)value_p = peekpoke_value.u8; 356 break; 357 case sizeof (uint16_t): 358 *(uint16_t *)value_p = peekpoke_value.u16; 359 break; 360 case sizeof (uint32_t): 361 *(uint32_t *)value_p = peekpoke_value.u32; 362 break; 363 case sizeof (uint64_t): 364 *(uint64_t *)value_p = peekpoke_value.u64; 365 break; 366 } 367 } 368 369 return (rval); 370 } 371 372 /* 373 * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this. 374 * they shouldn't be, but the 9f manpage kind of pseudo exposes it. 375 */ 376 int 377 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p) 378 { 379 switch (size) { 380 case sizeof (uint8_t): 381 case sizeof (uint16_t): 382 case sizeof (uint32_t): 383 case sizeof (uint64_t): 384 break; 385 default: 386 return (DDI_FAILURE); 387 } 388 389 return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p)); 390 } 391 392 int 393 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p) 394 { 395 switch (size) { 396 case sizeof (uint8_t): 397 case sizeof (uint16_t): 398 case sizeof (uint32_t): 399 case sizeof (uint64_t): 400 break; 401 default: 402 return (DDI_FAILURE); 403 } 404 405 return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p)); 406 } 407 408 int 409 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p) 410 { 411 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 412 val_p)); 413 } 414 415 int 416 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p) 417 { 418 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 419 val_p)); 420 } 421 422 int 423 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p) 424 { 425 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 426 val_p)); 427 } 428 429 int 430 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p) 431 { 432 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 433 val_p)); 434 } 435 436 437 /* 438 * We need to separate the old interfaces from the new ones and leave them 439 * in here for a while. Previous versions of the OS defined the new interfaces 440 * to the old interfaces. This way we can fix things up so that we can 441 * eventually remove these interfaces. 442 * e.g. A 3rd party module/driver using ddi_peek8 and built against S10 443 * or earlier will actually have a reference to ddi_peekc in the binary. 444 */ 445 #ifdef _ILP32 446 int 447 ddi_peekc(dev_info_t *dip, int8_t *addr, int8_t *val_p) 448 { 449 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 450 val_p)); 451 } 452 453 int 454 ddi_peeks(dev_info_t *dip, int16_t *addr, int16_t *val_p) 455 { 456 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 457 val_p)); 458 } 459 460 int 461 ddi_peekl(dev_info_t *dip, int32_t *addr, int32_t *val_p) 462 { 463 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 464 val_p)); 465 } 466 467 int 468 ddi_peekd(dev_info_t *dip, int64_t *addr, int64_t *val_p) 469 { 470 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 471 val_p)); 472 } 473 #endif /* _ILP32 */ 474 475 int 476 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val) 477 { 478 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 479 } 480 481 int 482 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val) 483 { 484 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 485 } 486 487 int 488 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val) 489 { 490 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 491 } 492 493 int 494 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val) 495 { 496 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 497 } 498 499 /* 500 * We need to separate the old interfaces from the new ones and leave them 501 * in here for a while. Previous versions of the OS defined the new interfaces 502 * to the old interfaces. This way we can fix things up so that we can 503 * eventually remove these interfaces. 504 * e.g. A 3rd party module/driver using ddi_poke8 and built against S10 505 * or earlier will actually have a reference to ddi_pokec in the binary. 506 */ 507 #ifdef _ILP32 508 int 509 ddi_pokec(dev_info_t *dip, int8_t *addr, int8_t val) 510 { 511 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 512 } 513 514 int 515 ddi_pokes(dev_info_t *dip, int16_t *addr, int16_t val) 516 { 517 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 518 } 519 520 int 521 ddi_pokel(dev_info_t *dip, int32_t *addr, int32_t val) 522 { 523 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 524 } 525 526 int 527 ddi_poked(dev_info_t *dip, int64_t *addr, int64_t val) 528 { 529 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 530 } 531 #endif /* _ILP32 */ 532 533 /* 534 * ddi_peekpokeio() is used primarily by the mem drivers for moving 535 * data to and from uio structures via peek and poke. Note that we 536 * use "internal" routines ddi_peek and ddi_poke to make this go 537 * slightly faster, avoiding the call overhead .. 538 */ 539 int 540 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw, 541 caddr_t addr, size_t len, uint_t xfersize) 542 { 543 int64_t ibuffer; 544 int8_t w8; 545 size_t sz; 546 int o; 547 548 if (xfersize > sizeof (long)) 549 xfersize = sizeof (long); 550 551 while (len != 0) { 552 if ((len | (uintptr_t)addr) & 1) { 553 sz = sizeof (int8_t); 554 if (rw == UIO_WRITE) { 555 if ((o = uwritec(uio)) == -1) 556 return (DDI_FAILURE); 557 if (ddi_poke8(devi, (int8_t *)addr, 558 (int8_t)o) != DDI_SUCCESS) 559 return (DDI_FAILURE); 560 } else { 561 if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz, 562 (int8_t *)addr, &w8) != DDI_SUCCESS) 563 return (DDI_FAILURE); 564 if (ureadc(w8, uio)) 565 return (DDI_FAILURE); 566 } 567 } else { 568 switch (xfersize) { 569 case sizeof (int64_t): 570 if (((len | (uintptr_t)addr) & 571 (sizeof (int64_t) - 1)) == 0) { 572 sz = xfersize; 573 break; 574 } 575 /*FALLTHROUGH*/ 576 case sizeof (int32_t): 577 if (((len | (uintptr_t)addr) & 578 (sizeof (int32_t) - 1)) == 0) { 579 sz = xfersize; 580 break; 581 } 582 /*FALLTHROUGH*/ 583 default: 584 /* 585 * This still assumes that we might have an 586 * I/O bus out there that permits 16-bit 587 * transfers (and that it would be upset by 588 * 32-bit transfers from such locations). 589 */ 590 sz = sizeof (int16_t); 591 break; 592 } 593 594 if (rw == UIO_READ) { 595 if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz, 596 addr, &ibuffer) != DDI_SUCCESS) 597 return (DDI_FAILURE); 598 } 599 600 if (uiomove(&ibuffer, sz, rw, uio)) 601 return (DDI_FAILURE); 602 603 if (rw == UIO_WRITE) { 604 if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz, 605 addr, &ibuffer) != DDI_SUCCESS) 606 return (DDI_FAILURE); 607 } 608 } 609 addr += sz; 610 len -= sz; 611 } 612 return (DDI_SUCCESS); 613 } 614 615 /* 616 * These routines are used by drivers that do layered ioctls 617 * On sparc, they're implemented in assembler to avoid spilling 618 * register windows in the common (copyin) case .. 619 */ 620 #if !defined(__sparc) 621 int 622 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags) 623 { 624 if (flags & FKIOCTL) 625 return (kcopy(buf, kernbuf, size) ? -1 : 0); 626 return (copyin(buf, kernbuf, size)); 627 } 628 629 int 630 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags) 631 { 632 if (flags & FKIOCTL) 633 return (kcopy(buf, kernbuf, size) ? -1 : 0); 634 return (copyout(buf, kernbuf, size)); 635 } 636 #endif /* !__sparc */ 637 638 /* 639 * Conversions in nexus pagesize units. We don't duplicate the 640 * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI 641 * routines anyway. 642 */ 643 unsigned long 644 ddi_btop(dev_info_t *dip, unsigned long bytes) 645 { 646 unsigned long pages; 647 648 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages); 649 return (pages); 650 } 651 652 unsigned long 653 ddi_btopr(dev_info_t *dip, unsigned long bytes) 654 { 655 unsigned long pages; 656 657 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages); 658 return (pages); 659 } 660 661 unsigned long 662 ddi_ptob(dev_info_t *dip, unsigned long pages) 663 { 664 unsigned long bytes; 665 666 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes); 667 return (bytes); 668 } 669 670 unsigned int 671 ddi_enter_critical(void) 672 { 673 return ((uint_t)spl7()); 674 } 675 676 void 677 ddi_exit_critical(unsigned int spl) 678 { 679 splx((int)spl); 680 } 681 682 /* 683 * Nexus ctlops punter 684 */ 685 686 #if !defined(__sparc) 687 /* 688 * Request bus_ctl parent to handle a bus_ctl request 689 * 690 * (The sparc version is in sparc_ddi.s) 691 */ 692 int 693 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v) 694 { 695 int (*fp)(); 696 697 if (!d || !r) 698 return (DDI_FAILURE); 699 700 if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL) 701 return (DDI_FAILURE); 702 703 fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl; 704 return ((*fp)(d, r, op, a, v)); 705 } 706 707 #endif 708 709 /* 710 * DMA/DVMA setup 711 */ 712 713 #if defined(__sparc) 714 static ddi_dma_lim_t standard_limits = { 715 (uint_t)0, /* addr_t dlim_addr_lo */ 716 (uint_t)-1, /* addr_t dlim_addr_hi */ 717 (uint_t)-1, /* uint_t dlim_cntr_max */ 718 (uint_t)1, /* uint_t dlim_burstsizes */ 719 (uint_t)1, /* uint_t dlim_minxfer */ 720 0 /* uint_t dlim_dmaspeed */ 721 }; 722 #elif defined(__x86) 723 static ddi_dma_lim_t standard_limits = { 724 (uint_t)0, /* addr_t dlim_addr_lo */ 725 (uint_t)0xffffff, /* addr_t dlim_addr_hi */ 726 (uint_t)0, /* uint_t dlim_cntr_max */ 727 (uint_t)0x00000001, /* uint_t dlim_burstsizes */ 728 (uint_t)DMA_UNIT_8, /* uint_t dlim_minxfer */ 729 (uint_t)0, /* uint_t dlim_dmaspeed */ 730 (uint_t)0x86<<24+0, /* uint_t dlim_version */ 731 (uint_t)0xffff, /* uint_t dlim_adreg_max */ 732 (uint_t)0xffff, /* uint_t dlim_ctreg_max */ 733 (uint_t)512, /* uint_t dlim_granular */ 734 (int)1, /* int dlim_sgllen */ 735 (uint_t)0xffffffff /* uint_t dlim_reqsizes */ 736 }; 737 738 #endif 739 740 int 741 ddi_dma_setup(dev_info_t *dip, struct ddi_dma_req *dmareqp, 742 ddi_dma_handle_t *handlep) 743 { 744 int (*funcp)() = ddi_dma_map; 745 struct bus_ops *bop; 746 #if defined(__sparc) 747 auto ddi_dma_lim_t dma_lim; 748 749 if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0) { 750 dma_lim = standard_limits; 751 } else { 752 dma_lim = *dmareqp->dmar_limits; 753 } 754 dmareqp->dmar_limits = &dma_lim; 755 #endif 756 #if defined(__x86) 757 if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0) 758 return (DDI_FAILURE); 759 #endif 760 761 /* 762 * Handle the case that the requester is both a leaf 763 * and a nexus driver simultaneously by calling the 764 * requester's bus_dma_map function directly instead 765 * of ddi_dma_map. 766 */ 767 bop = DEVI(dip)->devi_ops->devo_bus_ops; 768 if (bop && bop->bus_dma_map) 769 funcp = bop->bus_dma_map; 770 return ((*funcp)(dip, dip, dmareqp, handlep)); 771 } 772 773 int 774 ddi_dma_addr_setup(dev_info_t *dip, struct as *as, caddr_t addr, size_t len, 775 uint_t flags, int (*waitfp)(), caddr_t arg, 776 ddi_dma_lim_t *limits, ddi_dma_handle_t *handlep) 777 { 778 int (*funcp)() = ddi_dma_map; 779 ddi_dma_lim_t dma_lim; 780 struct ddi_dma_req dmareq; 781 struct bus_ops *bop; 782 783 if (len == 0) { 784 return (DDI_DMA_NOMAPPING); 785 } 786 if (limits == (ddi_dma_lim_t *)0) { 787 dma_lim = standard_limits; 788 } else { 789 dma_lim = *limits; 790 } 791 dmareq.dmar_limits = &dma_lim; 792 dmareq.dmar_flags = flags; 793 dmareq.dmar_fp = waitfp; 794 dmareq.dmar_arg = arg; 795 dmareq.dmar_object.dmao_size = len; 796 dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR; 797 dmareq.dmar_object.dmao_obj.virt_obj.v_as = as; 798 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr; 799 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL; 800 801 /* 802 * Handle the case that the requester is both a leaf 803 * and a nexus driver simultaneously by calling the 804 * requester's bus_dma_map function directly instead 805 * of ddi_dma_map. 806 */ 807 bop = DEVI(dip)->devi_ops->devo_bus_ops; 808 if (bop && bop->bus_dma_map) 809 funcp = bop->bus_dma_map; 810 811 return ((*funcp)(dip, dip, &dmareq, handlep)); 812 } 813 814 int 815 ddi_dma_buf_setup(dev_info_t *dip, struct buf *bp, uint_t flags, 816 int (*waitfp)(), caddr_t arg, ddi_dma_lim_t *limits, 817 ddi_dma_handle_t *handlep) 818 { 819 int (*funcp)() = ddi_dma_map; 820 ddi_dma_lim_t dma_lim; 821 struct ddi_dma_req dmareq; 822 struct bus_ops *bop; 823 824 if (limits == (ddi_dma_lim_t *)0) { 825 dma_lim = standard_limits; 826 } else { 827 dma_lim = *limits; 828 } 829 dmareq.dmar_limits = &dma_lim; 830 dmareq.dmar_flags = flags; 831 dmareq.dmar_fp = waitfp; 832 dmareq.dmar_arg = arg; 833 dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount; 834 835 if (bp->b_flags & B_PAGEIO) { 836 dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES; 837 dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages; 838 dmareq.dmar_object.dmao_obj.pp_obj.pp_offset = 839 (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET); 840 } else { 841 dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR; 842 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr; 843 if (bp->b_flags & B_SHADOW) { 844 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = 845 bp->b_shadow; 846 } else { 847 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL; 848 } 849 850 /* 851 * If the buffer has no proc pointer, or the proc 852 * struct has the kernel address space, or the buffer has 853 * been marked B_REMAPPED (meaning that it is now 854 * mapped into the kernel's address space), then 855 * the address space is kas (kernel address space). 856 */ 857 if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) || 858 (bp->b_flags & B_REMAPPED)) { 859 dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0; 860 } else { 861 dmareq.dmar_object.dmao_obj.virt_obj.v_as = 862 bp->b_proc->p_as; 863 } 864 } 865 866 /* 867 * Handle the case that the requester is both a leaf 868 * and a nexus driver simultaneously by calling the 869 * requester's bus_dma_map function directly instead 870 * of ddi_dma_map. 871 */ 872 bop = DEVI(dip)->devi_ops->devo_bus_ops; 873 if (bop && bop->bus_dma_map) 874 funcp = bop->bus_dma_map; 875 876 return ((*funcp)(dip, dip, &dmareq, handlep)); 877 } 878 879 #if !defined(__sparc) 880 /* 881 * Request bus_dma_ctl parent to fiddle with a dma request. 882 * 883 * (The sparc version is in sparc_subr.s) 884 */ 885 int 886 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip, 887 ddi_dma_handle_t handle, enum ddi_dma_ctlops request, 888 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags) 889 { 890 int (*fp)(); 891 892 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl; 893 fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl; 894 return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags)); 895 } 896 #endif 897 898 /* 899 * For all DMA control functions, call the DMA control 900 * routine and return status. 901 * 902 * Just plain assume that the parent is to be called. 903 * If a nexus driver or a thread outside the framework 904 * of a nexus driver or a leaf driver calls these functions, 905 * it is up to them to deal with the fact that the parent's 906 * bus_dma_ctl function will be the first one called. 907 */ 908 909 #define HD ((ddi_dma_impl_t *)h)->dmai_rdip 910 911 int 912 ddi_dma_kvaddrp(ddi_dma_handle_t h, off_t off, size_t len, caddr_t *kp) 913 { 914 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_KVADDR, &off, &len, kp, 0)); 915 } 916 917 int 918 ddi_dma_htoc(ddi_dma_handle_t h, off_t o, ddi_dma_cookie_t *c) 919 { 920 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_HTOC, &o, 0, (caddr_t *)c, 0)); 921 } 922 923 int 924 ddi_dma_coff(ddi_dma_handle_t h, ddi_dma_cookie_t *c, off_t *o) 925 { 926 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_COFF, 927 (off_t *)c, 0, (caddr_t *)o, 0)); 928 } 929 930 int 931 ddi_dma_movwin(ddi_dma_handle_t h, off_t *o, size_t *l, ddi_dma_cookie_t *c) 932 { 933 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_MOVWIN, o, 934 l, (caddr_t *)c, 0)); 935 } 936 937 int 938 ddi_dma_curwin(ddi_dma_handle_t h, off_t *o, size_t *l) 939 { 940 if ((((ddi_dma_impl_t *)h)->dmai_rflags & DDI_DMA_PARTIAL) == 0) 941 return (DDI_FAILURE); 942 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_REPWIN, o, l, 0, 0)); 943 } 944 945 int 946 ddi_dma_nextwin(ddi_dma_handle_t h, ddi_dma_win_t win, 947 ddi_dma_win_t *nwin) 948 { 949 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTWIN, (off_t *)&win, 0, 950 (caddr_t *)nwin, 0)); 951 } 952 953 int 954 ddi_dma_nextseg(ddi_dma_win_t win, ddi_dma_seg_t seg, ddi_dma_seg_t *nseg) 955 { 956 ddi_dma_handle_t h = (ddi_dma_handle_t)win; 957 958 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTSEG, (off_t *)&win, 959 (size_t *)&seg, (caddr_t *)nseg, 0)); 960 } 961 962 #if (defined(__i386) && !defined(__amd64)) || defined(__sparc) 963 /* 964 * This routine is Obsolete and should be removed from ALL architectures 965 * in a future release of Solaris. 966 * 967 * It is deliberately NOT ported to amd64; please fix the code that 968 * depends on this routine to use ddi_dma_nextcookie(9F). 969 * 970 * NOTE: even though we fixed the pointer through a 32-bit param issue (the fix 971 * is a side effect to some other cleanup), we're still not going to support 972 * this interface on x64. 973 */ 974 int 975 ddi_dma_segtocookie(ddi_dma_seg_t seg, off_t *o, off_t *l, 976 ddi_dma_cookie_t *cookiep) 977 { 978 ddi_dma_handle_t h = (ddi_dma_handle_t)seg; 979 980 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SEGTOC, o, (size_t *)l, 981 (caddr_t *)cookiep, 0)); 982 } 983 #endif /* (__i386 && !__amd64) || __sparc */ 984 985 #if !defined(__sparc) 986 987 /* 988 * The SPARC versions of these routines are done in assembler to 989 * save register windows, so they're in sparc_subr.s. 990 */ 991 992 int 993 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip, 994 struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep) 995 { 996 dev_info_t *hdip; 997 int (*funcp)(dev_info_t *, dev_info_t *, struct ddi_dma_req *, 998 ddi_dma_handle_t *); 999 1000 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_map; 1001 1002 funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_map; 1003 return ((*funcp)(hdip, rdip, dmareqp, handlep)); 1004 } 1005 1006 int 1007 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr, 1008 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 1009 { 1010 dev_info_t *hdip; 1011 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *, 1012 int (*)(caddr_t), caddr_t, ddi_dma_handle_t *); 1013 1014 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl; 1015 1016 funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_allochdl; 1017 return ((*funcp)(hdip, rdip, attr, waitfp, arg, handlep)); 1018 } 1019 1020 int 1021 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep) 1022 { 1023 dev_info_t *hdip; 1024 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t); 1025 1026 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl; 1027 1028 funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_freehdl; 1029 return ((*funcp)(hdip, rdip, handlep)); 1030 } 1031 1032 int 1033 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 1034 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 1035 ddi_dma_cookie_t *cp, uint_t *ccountp) 1036 { 1037 dev_info_t *hdip; 1038 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, 1039 struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *); 1040 1041 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl; 1042 1043 funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_bindhdl; 1044 return ((*funcp)(hdip, rdip, handle, dmareq, cp, ccountp)); 1045 } 1046 1047 int 1048 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 1049 ddi_dma_handle_t handle) 1050 { 1051 dev_info_t *hdip; 1052 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t); 1053 1054 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl; 1055 1056 funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl; 1057 return ((*funcp)(hdip, rdip, handle)); 1058 } 1059 1060 1061 int 1062 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip, 1063 ddi_dma_handle_t handle, off_t off, size_t len, 1064 uint_t cache_flags) 1065 { 1066 dev_info_t *hdip; 1067 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, 1068 off_t, size_t, uint_t); 1069 1070 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush; 1071 1072 funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_flush; 1073 return ((*funcp)(hdip, rdip, handle, off, len, cache_flags)); 1074 } 1075 1076 int 1077 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip, 1078 ddi_dma_handle_t handle, uint_t win, off_t *offp, 1079 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 1080 { 1081 dev_info_t *hdip; 1082 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, 1083 uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *); 1084 1085 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win; 1086 1087 funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_win; 1088 return ((*funcp)(hdip, rdip, handle, win, offp, lenp, 1089 cookiep, ccountp)); 1090 } 1091 1092 int 1093 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom) 1094 { 1095 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h; 1096 dev_info_t *hdip, *dip; 1097 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t, 1098 size_t, uint_t); 1099 1100 /* 1101 * the DMA nexus driver will set DMP_NOSYNC if the 1102 * platform does not require any sync operation. For 1103 * example if the memory is uncached or consistent 1104 * and without any I/O write buffers involved. 1105 */ 1106 if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC) 1107 return (DDI_SUCCESS); 1108 1109 dip = hp->dmai_rdip; 1110 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush; 1111 funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_flush; 1112 return ((*funcp)(hdip, dip, h, o, l, whom)); 1113 } 1114 1115 int 1116 ddi_dma_unbind_handle(ddi_dma_handle_t h) 1117 { 1118 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h; 1119 dev_info_t *hdip, *dip; 1120 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t); 1121 1122 dip = hp->dmai_rdip; 1123 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl; 1124 funcp = DEVI(dip)->devi_bus_dma_unbindfunc; 1125 return ((*funcp)(hdip, dip, h)); 1126 } 1127 1128 #endif /* !__sparc */ 1129 1130 int 1131 ddi_dma_free(ddi_dma_handle_t h) 1132 { 1133 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_FREE, 0, 0, 0, 0)); 1134 } 1135 1136 int 1137 ddi_iopb_alloc(dev_info_t *dip, ddi_dma_lim_t *limp, uint_t len, caddr_t *iopbp) 1138 { 1139 ddi_dma_lim_t defalt; 1140 size_t size = len; 1141 1142 if (!limp) { 1143 defalt = standard_limits; 1144 limp = &defalt; 1145 } 1146 return (i_ddi_mem_alloc_lim(dip, limp, size, 0, 0, 0, 1147 iopbp, NULL, NULL)); 1148 } 1149 1150 void 1151 ddi_iopb_free(caddr_t iopb) 1152 { 1153 i_ddi_mem_free(iopb, NULL); 1154 } 1155 1156 int 1157 ddi_mem_alloc(dev_info_t *dip, ddi_dma_lim_t *limits, uint_t length, 1158 uint_t flags, caddr_t *kaddrp, uint_t *real_length) 1159 { 1160 ddi_dma_lim_t defalt; 1161 size_t size = length; 1162 1163 if (!limits) { 1164 defalt = standard_limits; 1165 limits = &defalt; 1166 } 1167 return (i_ddi_mem_alloc_lim(dip, limits, size, flags & 0x1, 1168 1, 0, kaddrp, real_length, NULL)); 1169 } 1170 1171 void 1172 ddi_mem_free(caddr_t kaddr) 1173 { 1174 i_ddi_mem_free(kaddr, NULL); 1175 } 1176 1177 /* 1178 * DMA attributes, alignment, burst sizes, and transfer minimums 1179 */ 1180 int 1181 ddi_dma_get_attr(ddi_dma_handle_t handle, ddi_dma_attr_t *attrp) 1182 { 1183 ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle; 1184 1185 if (attrp == NULL) 1186 return (DDI_FAILURE); 1187 *attrp = dimp->dmai_attr; 1188 return (DDI_SUCCESS); 1189 } 1190 1191 int 1192 ddi_dma_burstsizes(ddi_dma_handle_t handle) 1193 { 1194 ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle; 1195 1196 if (!dimp) 1197 return (0); 1198 else 1199 return (dimp->dmai_burstsizes); 1200 } 1201 1202 int 1203 ddi_dma_devalign(ddi_dma_handle_t handle, uint_t *alignment, uint_t *mineffect) 1204 { 1205 ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle; 1206 1207 if (!dimp || !alignment || !mineffect) 1208 return (DDI_FAILURE); 1209 if (!(dimp->dmai_rflags & DDI_DMA_SBUS_64BIT)) { 1210 *alignment = 1 << ddi_ffs(dimp->dmai_burstsizes); 1211 } else { 1212 if (dimp->dmai_burstsizes & 0xff0000) { 1213 *alignment = 1 << ddi_ffs(dimp->dmai_burstsizes >> 16); 1214 } else { 1215 *alignment = 1 << ddi_ffs(dimp->dmai_burstsizes); 1216 } 1217 } 1218 *mineffect = dimp->dmai_minxfer; 1219 return (DDI_SUCCESS); 1220 } 1221 1222 int 1223 ddi_iomin(dev_info_t *a, int i, int stream) 1224 { 1225 int r; 1226 1227 /* 1228 * Make sure that the initial value is sane 1229 */ 1230 if (i & (i - 1)) 1231 return (0); 1232 if (i == 0) 1233 i = (stream) ? 4 : 1; 1234 1235 r = ddi_ctlops(a, a, 1236 DDI_CTLOPS_IOMIN, (void *)(uintptr_t)stream, (void *)&i); 1237 if (r != DDI_SUCCESS || (i & (i - 1))) 1238 return (0); 1239 return (i); 1240 } 1241 1242 /* 1243 * Given two DMA attribute structures, apply the attributes 1244 * of one to the other, following the rules of attributes 1245 * and the wishes of the caller. 1246 * 1247 * The rules of DMA attribute structures are that you cannot 1248 * make things *less* restrictive as you apply one set 1249 * of attributes to another. 1250 * 1251 */ 1252 void 1253 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod) 1254 { 1255 attr->dma_attr_addr_lo = 1256 MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo); 1257 attr->dma_attr_addr_hi = 1258 MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi); 1259 attr->dma_attr_count_max = 1260 MIN(attr->dma_attr_count_max, mod->dma_attr_count_max); 1261 attr->dma_attr_align = 1262 MAX(attr->dma_attr_align, mod->dma_attr_align); 1263 attr->dma_attr_burstsizes = 1264 (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes); 1265 attr->dma_attr_minxfer = 1266 maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer); 1267 attr->dma_attr_maxxfer = 1268 MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer); 1269 attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg); 1270 attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen, 1271 (uint_t)mod->dma_attr_sgllen); 1272 attr->dma_attr_granular = 1273 MAX(attr->dma_attr_granular, mod->dma_attr_granular); 1274 } 1275 1276 /* 1277 * mmap/segmap interface: 1278 */ 1279 1280 /* 1281 * ddi_segmap: setup the default segment driver. Calls the drivers 1282 * XXmmap routine to validate the range to be mapped. 1283 * Return ENXIO of the range is not valid. Create 1284 * a seg_dev segment that contains all of the 1285 * necessary information and will reference the 1286 * default segment driver routines. It returns zero 1287 * on success or non-zero on failure. 1288 */ 1289 int 1290 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len, 1291 uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp) 1292 { 1293 extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *, 1294 off_t, uint_t, uint_t, uint_t, struct cred *); 1295 1296 return (spec_segmap(dev, offset, asp, addrp, len, 1297 prot, maxprot, flags, credp)); 1298 } 1299 1300 /* 1301 * ddi_map_fault: Resolve mappings at fault time. Used by segment 1302 * drivers. Allows each successive parent to resolve 1303 * address translations and add its mappings to the 1304 * mapping list supplied in the page structure. It 1305 * returns zero on success or non-zero on failure. 1306 */ 1307 1308 int 1309 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg, 1310 caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock) 1311 { 1312 return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock)); 1313 } 1314 1315 /* 1316 * ddi_device_mapping_check: Called from ddi_segmap_setup. 1317 * Invokes platform specific DDI to determine whether attributes specified 1318 * in attr(9s) are valid for the region of memory that will be made 1319 * available for direct access to user process via the mmap(2) system call. 1320 */ 1321 int 1322 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp, 1323 uint_t rnumber, uint_t *hat_flags) 1324 { 1325 ddi_acc_handle_t handle; 1326 ddi_map_req_t mr; 1327 ddi_acc_hdl_t *hp; 1328 int result; 1329 dev_info_t *dip; 1330 1331 /* 1332 * we use e_ddi_hold_devi_by_dev to search for the devi. We 1333 * release it immediately since it should already be held by 1334 * a devfs vnode. 1335 */ 1336 if ((dip = 1337 e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL) 1338 return (-1); 1339 ddi_release_devi(dip); /* for e_ddi_hold_devi_by_dev() */ 1340 1341 /* 1342 * Allocate and initialize the common elements of data 1343 * access handle. 1344 */ 1345 handle = impl_acc_hdl_alloc(KM_SLEEP, NULL); 1346 if (handle == NULL) 1347 return (-1); 1348 1349 hp = impl_acc_hdl_get(handle); 1350 hp->ah_vers = VERS_ACCHDL; 1351 hp->ah_dip = dip; 1352 hp->ah_rnumber = rnumber; 1353 hp->ah_offset = 0; 1354 hp->ah_len = 0; 1355 hp->ah_acc = *accattrp; 1356 1357 /* 1358 * Set up the mapping request and call to parent. 1359 */ 1360 mr.map_op = DDI_MO_MAP_HANDLE; 1361 mr.map_type = DDI_MT_RNUMBER; 1362 mr.map_obj.rnumber = rnumber; 1363 mr.map_prot = PROT_READ | PROT_WRITE; 1364 mr.map_flags = DDI_MF_KERNEL_MAPPING; 1365 mr.map_handlep = hp; 1366 mr.map_vers = DDI_MAP_VERSION; 1367 result = ddi_map(dip, &mr, 0, 0, NULL); 1368 1369 /* 1370 * Region must be mappable, pick up flags from the framework. 1371 */ 1372 *hat_flags = hp->ah_hat_flags; 1373 1374 impl_acc_hdl_free(handle); 1375 1376 /* 1377 * check for end result. 1378 */ 1379 if (result != DDI_SUCCESS) 1380 return (-1); 1381 return (0); 1382 } 1383 1384 1385 /* 1386 * Property functions: See also, ddipropdefs.h. 1387 * 1388 * These functions are the framework for the property functions, 1389 * i.e. they support software defined properties. All implementation 1390 * specific property handling (i.e.: self-identifying devices and 1391 * PROM defined properties are handled in the implementation specific 1392 * functions (defined in ddi_implfuncs.h). 1393 */ 1394 1395 /* 1396 * nopropop: Shouldn't be called, right? 1397 */ 1398 int 1399 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 1400 char *name, caddr_t valuep, int *lengthp) 1401 { 1402 _NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp)) 1403 return (DDI_PROP_NOT_FOUND); 1404 } 1405 1406 #ifdef DDI_PROP_DEBUG 1407 int ddi_prop_debug_flag = 0; 1408 1409 int 1410 ddi_prop_debug(int enable) 1411 { 1412 int prev = ddi_prop_debug_flag; 1413 1414 if ((enable != 0) || (prev != 0)) 1415 printf("ddi_prop_debug: debugging %s\n", 1416 enable ? "enabled" : "disabled"); 1417 ddi_prop_debug_flag = enable; 1418 return (prev); 1419 } 1420 1421 #endif /* DDI_PROP_DEBUG */ 1422 1423 /* 1424 * Search a property list for a match, if found return pointer 1425 * to matching prop struct, else return NULL. 1426 */ 1427 1428 ddi_prop_t * 1429 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head) 1430 { 1431 ddi_prop_t *propp; 1432 1433 /* 1434 * find the property in child's devinfo: 1435 * Search order defined by this search function is first matching 1436 * property with input dev == DDI_DEV_T_ANY matching any dev or 1437 * dev == propp->prop_dev, name == propp->name, and the correct 1438 * data type as specified in the flags. If a DDI_DEV_T_NONE dev 1439 * value made it this far then it implies a DDI_DEV_T_ANY search. 1440 */ 1441 if (dev == DDI_DEV_T_NONE) 1442 dev = DDI_DEV_T_ANY; 1443 1444 for (propp = *list_head; propp != NULL; propp = propp->prop_next) { 1445 1446 if (!DDI_STRSAME(propp->prop_name, name)) 1447 continue; 1448 1449 if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev)) 1450 continue; 1451 1452 if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0) 1453 continue; 1454 1455 return (propp); 1456 } 1457 1458 return ((ddi_prop_t *)0); 1459 } 1460 1461 /* 1462 * Search for property within devnames structures 1463 */ 1464 ddi_prop_t * 1465 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags) 1466 { 1467 major_t major; 1468 struct devnames *dnp; 1469 ddi_prop_t *propp; 1470 1471 /* 1472 * Valid dev_t value is needed to index into the 1473 * correct devnames entry, therefore a dev_t 1474 * value of DDI_DEV_T_ANY is not appropriate. 1475 */ 1476 ASSERT(dev != DDI_DEV_T_ANY); 1477 if (dev == DDI_DEV_T_ANY) { 1478 return ((ddi_prop_t *)0); 1479 } 1480 1481 major = getmajor(dev); 1482 dnp = &(devnamesp[major]); 1483 1484 if (dnp->dn_global_prop_ptr == NULL) 1485 return ((ddi_prop_t *)0); 1486 1487 LOCK_DEV_OPS(&dnp->dn_lock); 1488 1489 for (propp = dnp->dn_global_prop_ptr->prop_list; 1490 propp != NULL; 1491 propp = (ddi_prop_t *)propp->prop_next) { 1492 1493 if (!DDI_STRSAME(propp->prop_name, name)) 1494 continue; 1495 1496 if ((!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev)) 1497 continue; 1498 1499 if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0) 1500 continue; 1501 1502 /* Property found, return it */ 1503 UNLOCK_DEV_OPS(&dnp->dn_lock); 1504 return (propp); 1505 } 1506 1507 UNLOCK_DEV_OPS(&dnp->dn_lock); 1508 return ((ddi_prop_t *)0); 1509 } 1510 1511 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>"; 1512 1513 /* 1514 * ddi_prop_search_global: 1515 * Search the global property list within devnames 1516 * for the named property. Return the encoded value. 1517 */ 1518 static int 1519 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name, 1520 void *valuep, uint_t *lengthp) 1521 { 1522 ddi_prop_t *propp; 1523 caddr_t buffer; 1524 1525 propp = i_ddi_search_global_prop(dev, name, flags); 1526 1527 /* Property NOT found, bail */ 1528 if (propp == (ddi_prop_t *)0) 1529 return (DDI_PROP_NOT_FOUND); 1530 1531 if (propp->prop_flags & DDI_PROP_UNDEF_IT) 1532 return (DDI_PROP_UNDEFINED); 1533 1534 if ((buffer = kmem_alloc(propp->prop_len, 1535 (flags & DDI_PROP_CANSLEEP) ? KM_SLEEP : KM_NOSLEEP)) == NULL) { 1536 cmn_err(CE_CONT, prop_no_mem_msg, name); 1537 return (DDI_PROP_NO_MEMORY); 1538 } 1539 1540 /* 1541 * Return the encoded data 1542 */ 1543 *(caddr_t *)valuep = buffer; 1544 *lengthp = propp->prop_len; 1545 bcopy(propp->prop_val, buffer, propp->prop_len); 1546 1547 return (DDI_PROP_SUCCESS); 1548 } 1549 1550 /* 1551 * ddi_prop_search_common: Lookup and return the encoded value 1552 */ 1553 int 1554 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1555 uint_t flags, char *name, void *valuep, uint_t *lengthp) 1556 { 1557 ddi_prop_t *propp; 1558 int i; 1559 caddr_t buffer; 1560 caddr_t prealloc = NULL; 1561 int plength = 0; 1562 dev_info_t *pdip; 1563 int (*bop)(); 1564 1565 /*CONSTANTCONDITION*/ 1566 while (1) { 1567 1568 mutex_enter(&(DEVI(dip)->devi_lock)); 1569 1570 1571 /* 1572 * find the property in child's devinfo: 1573 * Search order is: 1574 * 1. driver defined properties 1575 * 2. system defined properties 1576 * 3. driver global properties 1577 * 4. boot defined properties 1578 */ 1579 1580 propp = i_ddi_prop_search(dev, name, flags, 1581 &(DEVI(dip)->devi_drv_prop_ptr)); 1582 if (propp == NULL) { 1583 propp = i_ddi_prop_search(dev, name, flags, 1584 &(DEVI(dip)->devi_sys_prop_ptr)); 1585 } 1586 if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) { 1587 propp = i_ddi_prop_search(dev, name, flags, 1588 &DEVI(dip)->devi_global_prop_list->prop_list); 1589 } 1590 1591 if (propp == NULL) { 1592 propp = i_ddi_prop_search(dev, name, flags, 1593 &(DEVI(dip)->devi_hw_prop_ptr)); 1594 } 1595 1596 /* 1597 * Software property found? 1598 */ 1599 if (propp != (ddi_prop_t *)0) { 1600 1601 /* 1602 * If explicit undefine, return now. 1603 */ 1604 if (propp->prop_flags & DDI_PROP_UNDEF_IT) { 1605 mutex_exit(&(DEVI(dip)->devi_lock)); 1606 if (prealloc) 1607 kmem_free(prealloc, plength); 1608 return (DDI_PROP_UNDEFINED); 1609 } 1610 1611 /* 1612 * If we only want to know if it exists, return now 1613 */ 1614 if (prop_op == PROP_EXISTS) { 1615 mutex_exit(&(DEVI(dip)->devi_lock)); 1616 ASSERT(prealloc == NULL); 1617 return (DDI_PROP_SUCCESS); 1618 } 1619 1620 /* 1621 * If length only request or prop length == 0, 1622 * service request and return now. 1623 */ 1624 if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) { 1625 *lengthp = propp->prop_len; 1626 1627 /* 1628 * if prop_op is PROP_LEN_AND_VAL_ALLOC 1629 * that means prop_len is 0, so set valuep 1630 * also to NULL 1631 */ 1632 if (prop_op == PROP_LEN_AND_VAL_ALLOC) 1633 *(caddr_t *)valuep = NULL; 1634 1635 mutex_exit(&(DEVI(dip)->devi_lock)); 1636 if (prealloc) 1637 kmem_free(prealloc, plength); 1638 return (DDI_PROP_SUCCESS); 1639 } 1640 1641 /* 1642 * If LEN_AND_VAL_ALLOC and the request can sleep, 1643 * drop the mutex, allocate the buffer, and go 1644 * through the loop again. If we already allocated 1645 * the buffer, and the size of the property changed, 1646 * keep trying... 1647 */ 1648 if ((prop_op == PROP_LEN_AND_VAL_ALLOC) && 1649 (flags & DDI_PROP_CANSLEEP)) { 1650 if (prealloc && (propp->prop_len != plength)) { 1651 kmem_free(prealloc, plength); 1652 prealloc = NULL; 1653 } 1654 if (prealloc == NULL) { 1655 plength = propp->prop_len; 1656 mutex_exit(&(DEVI(dip)->devi_lock)); 1657 prealloc = kmem_alloc(plength, 1658 KM_SLEEP); 1659 continue; 1660 } 1661 } 1662 1663 /* 1664 * Allocate buffer, if required. Either way, 1665 * set `buffer' variable. 1666 */ 1667 i = *lengthp; /* Get callers length */ 1668 *lengthp = propp->prop_len; /* Set callers length */ 1669 1670 switch (prop_op) { 1671 1672 case PROP_LEN_AND_VAL_ALLOC: 1673 1674 if (prealloc == NULL) { 1675 buffer = kmem_alloc(propp->prop_len, 1676 KM_NOSLEEP); 1677 } else { 1678 buffer = prealloc; 1679 } 1680 1681 if (buffer == NULL) { 1682 mutex_exit(&(DEVI(dip)->devi_lock)); 1683 cmn_err(CE_CONT, prop_no_mem_msg, name); 1684 return (DDI_PROP_NO_MEMORY); 1685 } 1686 /* Set callers buf ptr */ 1687 *(caddr_t *)valuep = buffer; 1688 break; 1689 1690 case PROP_LEN_AND_VAL_BUF: 1691 1692 if (propp->prop_len > (i)) { 1693 mutex_exit(&(DEVI(dip)->devi_lock)); 1694 return (DDI_PROP_BUF_TOO_SMALL); 1695 } 1696 1697 buffer = valuep; /* Get callers buf ptr */ 1698 break; 1699 1700 default: 1701 break; 1702 } 1703 1704 /* 1705 * Do the copy. 1706 */ 1707 bcopy(propp->prop_val, buffer, propp->prop_len); 1708 mutex_exit(&(DEVI(dip)->devi_lock)); 1709 return (DDI_PROP_SUCCESS); 1710 } 1711 1712 mutex_exit(&(DEVI(dip)->devi_lock)); 1713 if (prealloc) 1714 kmem_free(prealloc, plength); 1715 prealloc = NULL; 1716 1717 /* 1718 * Prop not found, call parent bus_ops to deal with possible 1719 * h/w layer (possible PROM defined props, etc.) and to 1720 * possibly ascend the hierarchy, if allowed by flags. 1721 */ 1722 pdip = (dev_info_t *)DEVI(dip)->devi_parent; 1723 1724 /* 1725 * One last call for the root driver PROM props? 1726 */ 1727 if (dip == ddi_root_node()) { 1728 return (ddi_bus_prop_op(dev, dip, dip, prop_op, 1729 flags, name, valuep, (int *)lengthp)); 1730 } 1731 1732 /* 1733 * We may have been called to check for properties 1734 * within a single devinfo node that has no parent - 1735 * see make_prop() 1736 */ 1737 if (pdip == NULL) { 1738 ASSERT((flags & 1739 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) == 1740 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)); 1741 return (DDI_PROP_NOT_FOUND); 1742 } 1743 1744 /* 1745 * Instead of recursing, we do iterative calls up the tree. 1746 * As a bit of optimization, skip the bus_op level if the 1747 * node is a s/w node and if the parent's bus_prop_op function 1748 * is `ddi_bus_prop_op', because we know that in this case, 1749 * this function does nothing. 1750 * 1751 * 4225415: If the parent isn't attached, or the child 1752 * hasn't been named by the parent yet, use the default 1753 * ddi_bus_prop_op as a proxy for the parent. This 1754 * allows property lookups in any child/parent state to 1755 * include 'prom' and inherited properties, even when 1756 * there are no drivers attached to the child or parent. 1757 */ 1758 1759 bop = ddi_bus_prop_op; 1760 if (i_ddi_devi_attached(pdip) && 1761 (i_ddi_node_state(dip) >= DS_INITIALIZED)) 1762 bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op; 1763 1764 i = DDI_PROP_NOT_FOUND; 1765 1766 if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) { 1767 i = (*bop)(dev, pdip, dip, prop_op, 1768 flags | DDI_PROP_DONTPASS, 1769 name, valuep, lengthp); 1770 } 1771 1772 if ((flags & DDI_PROP_DONTPASS) || 1773 (i != DDI_PROP_NOT_FOUND)) 1774 return (i); 1775 1776 dip = pdip; 1777 } 1778 /*NOTREACHED*/ 1779 } 1780 1781 1782 /* 1783 * ddi_prop_op: The basic property operator for drivers. 1784 * 1785 * In ddi_prop_op, the type of valuep is interpreted based on prop_op: 1786 * 1787 * prop_op valuep 1788 * ------ ------ 1789 * 1790 * PROP_LEN <unused> 1791 * 1792 * PROP_LEN_AND_VAL_BUF Pointer to callers buffer 1793 * 1794 * PROP_LEN_AND_VAL_ALLOC Address of callers pointer (will be set to 1795 * address of allocated buffer, if successful) 1796 */ 1797 int 1798 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 1799 char *name, caddr_t valuep, int *lengthp) 1800 { 1801 int i; 1802 1803 ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0); 1804 1805 /* 1806 * If this was originally an LDI prop lookup then we bail here. 1807 * The reason is that the LDI property lookup interfaces first call 1808 * a drivers prop_op() entry point to allow it to override 1809 * properties. But if we've made it here, then the driver hasn't 1810 * overriden any properties. We don't want to continue with the 1811 * property search here because we don't have any type inforamtion. 1812 * When we return failure, the LDI interfaces will then proceed to 1813 * call the typed property interfaces to look up the property. 1814 */ 1815 if (mod_flags & DDI_PROP_DYNAMIC) 1816 return (DDI_PROP_NOT_FOUND); 1817 1818 /* 1819 * check for pre-typed property consumer asking for typed property: 1820 * see e_ddi_getprop_int64. 1821 */ 1822 if (mod_flags & DDI_PROP_CONSUMER_TYPED) 1823 mod_flags |= DDI_PROP_TYPE_INT64; 1824 mod_flags |= DDI_PROP_TYPE_ANY; 1825 1826 i = ddi_prop_search_common(dev, dip, prop_op, 1827 mod_flags, name, valuep, (uint_t *)lengthp); 1828 if (i == DDI_PROP_FOUND_1275) 1829 return (DDI_PROP_SUCCESS); 1830 return (i); 1831 } 1832 1833 /* 1834 * ddi_prop_op_nblocks: The basic property operator for drivers that maintain 1835 * size in number of DEV_BSIZE blocks. Provides a dynamic property 1836 * implementation for size oriented properties based on nblocks64 values passed 1837 * in by the driver. Fallback to ddi_prop_op if the nblocks64 is too large. 1838 * This interface should not be used with a nblocks64 that represents the 1839 * driver's idea of how to represent unknown, if nblocks is unknown use 1840 * ddi_prop_op. 1841 */ 1842 int 1843 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1844 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64) 1845 { 1846 uint64_t size64; 1847 1848 /* 1849 * There is no point in supporting nblocks64 values that don't have 1850 * an accurate uint64_t byte count representation. 1851 */ 1852 if (nblocks64 >= (UINT64_MAX >> DEV_BSHIFT)) 1853 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 1854 name, valuep, lengthp)); 1855 1856 size64 = nblocks64 << DEV_BSHIFT; 1857 return (ddi_prop_op_size(dev, dip, prop_op, mod_flags, 1858 name, valuep, lengthp, size64)); 1859 } 1860 1861 /* 1862 * ddi_prop_op_size: The basic property operator for drivers that maintain size 1863 * in bytes. Provides a of dynamic property implementation for size oriented 1864 * properties based on size64 values passed in by the driver. Fallback to 1865 * ddi_prop_op if the size64 is too large. This interface should not be used 1866 * with a size64 that represents the driver's idea of how to represent unknown, 1867 * if size is unknown use ddi_prop_op. 1868 * 1869 * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned 1870 * integers. While the most likely interface to request them ([bc]devi_size) 1871 * is declared int (signed) there is no enforcement of this, which means we 1872 * can't enforce limitations here without risking regression. 1873 */ 1874 int 1875 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1876 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64) 1877 { 1878 uint64_t nblocks64; 1879 int callers_length; 1880 caddr_t buffer; 1881 1882 /* compute DEV_BSIZE nblocks value */ 1883 nblocks64 = lbtodb(size64); 1884 1885 /* get callers length, establish length of our dynamic properties */ 1886 callers_length = *lengthp; 1887 1888 if (strcmp(name, "Nblocks") == 0) 1889 *lengthp = sizeof (uint64_t); 1890 else if (strcmp(name, "Size") == 0) 1891 *lengthp = sizeof (uint64_t); 1892 else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX)) 1893 *lengthp = sizeof (uint32_t); 1894 else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX)) 1895 *lengthp = sizeof (uint32_t); 1896 else { 1897 /* fallback to ddi_prop_op */ 1898 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 1899 name, valuep, lengthp)); 1900 } 1901 1902 /* service request for the length of the property */ 1903 if (prop_op == PROP_LEN) 1904 return (DDI_PROP_SUCCESS); 1905 1906 /* the length of the property and the request must match */ 1907 if (callers_length != *lengthp) 1908 return (DDI_PROP_INVAL_ARG); 1909 1910 switch (prop_op) { 1911 case PROP_LEN_AND_VAL_ALLOC: 1912 if ((buffer = kmem_alloc(*lengthp, 1913 (mod_flags & DDI_PROP_CANSLEEP) ? 1914 KM_SLEEP : KM_NOSLEEP)) == NULL) 1915 return (DDI_PROP_NO_MEMORY); 1916 1917 *(caddr_t *)valuep = buffer; /* set callers buf ptr */ 1918 break; 1919 1920 case PROP_LEN_AND_VAL_BUF: 1921 buffer = valuep; /* get callers buf ptr */ 1922 break; 1923 1924 default: 1925 return (DDI_PROP_INVAL_ARG); 1926 } 1927 1928 /* transfer the value into the buffer */ 1929 if (strcmp(name, "Nblocks") == 0) 1930 *((uint64_t *)buffer) = nblocks64; 1931 else if (strcmp(name, "Size") == 0) 1932 *((uint64_t *)buffer) = size64; 1933 else if (strcmp(name, "nblocks") == 0) 1934 *((uint32_t *)buffer) = (uint32_t)nblocks64; 1935 else if (strcmp(name, "size") == 0) 1936 *((uint32_t *)buffer) = (uint32_t)size64; 1937 return (DDI_PROP_SUCCESS); 1938 } 1939 1940 /* 1941 * Variable length props... 1942 */ 1943 1944 /* 1945 * ddi_getlongprop: Get variable length property len+val into a buffer 1946 * allocated by property provider via kmem_alloc. Requester 1947 * is responsible for freeing returned property via kmem_free. 1948 * 1949 * Arguments: 1950 * 1951 * dev_t: Input: dev_t of property. 1952 * dip: Input: dev_info_t pointer of child. 1953 * flags: Input: Possible flag modifiers are: 1954 * DDI_PROP_DONTPASS: Don't pass to parent if prop not found. 1955 * DDI_PROP_CANSLEEP: Memory allocation may sleep. 1956 * name: Input: name of property. 1957 * valuep: Output: Addr of callers buffer pointer. 1958 * lengthp:Output: *lengthp will contain prop length on exit. 1959 * 1960 * Possible Returns: 1961 * 1962 * DDI_PROP_SUCCESS: Prop found and returned. 1963 * DDI_PROP_NOT_FOUND: Prop not found 1964 * DDI_PROP_UNDEFINED: Prop explicitly undefined. 1965 * DDI_PROP_NO_MEMORY: Prop found, but unable to alloc mem. 1966 */ 1967 1968 int 1969 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags, 1970 char *name, caddr_t valuep, int *lengthp) 1971 { 1972 return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC, 1973 flags, name, valuep, lengthp)); 1974 } 1975 1976 /* 1977 * 1978 * ddi_getlongprop_buf: Get long prop into pre-allocated callers 1979 * buffer. (no memory allocation by provider). 1980 * 1981 * dev_t: Input: dev_t of property. 1982 * dip: Input: dev_info_t pointer of child. 1983 * flags: Input: DDI_PROP_DONTPASS or NULL 1984 * name: Input: name of property 1985 * valuep: Input: ptr to callers buffer. 1986 * lengthp:I/O: ptr to length of callers buffer on entry, 1987 * actual length of property on exit. 1988 * 1989 * Possible returns: 1990 * 1991 * DDI_PROP_SUCCESS Prop found and returned 1992 * DDI_PROP_NOT_FOUND Prop not found 1993 * DDI_PROP_UNDEFINED Prop explicitly undefined. 1994 * DDI_PROP_BUF_TOO_SMALL Prop found, callers buf too small, 1995 * no value returned, but actual prop 1996 * length returned in *lengthp 1997 * 1998 */ 1999 2000 int 2001 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags, 2002 char *name, caddr_t valuep, int *lengthp) 2003 { 2004 return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF, 2005 flags, name, valuep, lengthp)); 2006 } 2007 2008 /* 2009 * Integer/boolean sized props. 2010 * 2011 * Call is value only... returns found boolean or int sized prop value or 2012 * defvalue if prop not found or is wrong length or is explicitly undefined. 2013 * Only flag is DDI_PROP_DONTPASS... 2014 * 2015 * By convention, this interface returns boolean (0) sized properties 2016 * as value (int)1. 2017 * 2018 * This never returns an error, if property not found or specifically 2019 * undefined, the input `defvalue' is returned. 2020 */ 2021 2022 int 2023 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue) 2024 { 2025 int propvalue = defvalue; 2026 int proplength = sizeof (int); 2027 int error; 2028 2029 error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF, 2030 flags, name, (caddr_t)&propvalue, &proplength); 2031 2032 if ((error == DDI_PROP_SUCCESS) && (proplength == 0)) 2033 propvalue = 1; 2034 2035 return (propvalue); 2036 } 2037 2038 /* 2039 * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS 2040 * if returns DDI_PROP_SUCCESS, length returned in *lengthp. 2041 */ 2042 2043 int 2044 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp) 2045 { 2046 return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp)); 2047 } 2048 2049 /* 2050 * Allocate a struct prop_driver_data, along with 'size' bytes 2051 * for decoded property data. This structure is freed by 2052 * calling ddi_prop_free(9F). 2053 */ 2054 static void * 2055 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *)) 2056 { 2057 struct prop_driver_data *pdd; 2058 2059 /* 2060 * Allocate a structure with enough memory to store the decoded data. 2061 */ 2062 pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP); 2063 pdd->pdd_size = (sizeof (struct prop_driver_data) + size); 2064 pdd->pdd_prop_free = prop_free; 2065 2066 /* 2067 * Return a pointer to the location to put the decoded data. 2068 */ 2069 return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data))); 2070 } 2071 2072 /* 2073 * Allocated the memory needed to store the encoded data in the property 2074 * handle. 2075 */ 2076 static int 2077 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size) 2078 { 2079 /* 2080 * If size is zero, then set data to NULL and size to 0. This 2081 * is a boolean property. 2082 */ 2083 if (size == 0) { 2084 ph->ph_size = 0; 2085 ph->ph_data = NULL; 2086 ph->ph_cur_pos = NULL; 2087 ph->ph_save_pos = NULL; 2088 } else { 2089 if (ph->ph_flags == DDI_PROP_DONTSLEEP) { 2090 ph->ph_data = kmem_zalloc(size, KM_NOSLEEP); 2091 if (ph->ph_data == NULL) 2092 return (DDI_PROP_NO_MEMORY); 2093 } else 2094 ph->ph_data = kmem_zalloc(size, KM_SLEEP); 2095 ph->ph_size = size; 2096 ph->ph_cur_pos = ph->ph_data; 2097 ph->ph_save_pos = ph->ph_data; 2098 } 2099 return (DDI_PROP_SUCCESS); 2100 } 2101 2102 /* 2103 * Free the space allocated by the lookup routines. Each lookup routine 2104 * returns a pointer to the decoded data to the driver. The driver then 2105 * passes this pointer back to us. This data actually lives in a struct 2106 * prop_driver_data. We use negative indexing to find the beginning of 2107 * the structure and then free the entire structure using the size and 2108 * the free routine stored in the structure. 2109 */ 2110 void 2111 ddi_prop_free(void *datap) 2112 { 2113 struct prop_driver_data *pdd; 2114 2115 /* 2116 * Get the structure 2117 */ 2118 pdd = (struct prop_driver_data *) 2119 ((caddr_t)datap - sizeof (struct prop_driver_data)); 2120 /* 2121 * Call the free routine to free it 2122 */ 2123 (*pdd->pdd_prop_free)(pdd); 2124 } 2125 2126 /* 2127 * Free the data associated with an array of ints, 2128 * allocated with ddi_prop_decode_alloc(). 2129 */ 2130 static void 2131 ddi_prop_free_ints(struct prop_driver_data *pdd) 2132 { 2133 kmem_free(pdd, pdd->pdd_size); 2134 } 2135 2136 /* 2137 * Free a single string property or a single string contained within 2138 * the argv style return value of an array of strings. 2139 */ 2140 static void 2141 ddi_prop_free_string(struct prop_driver_data *pdd) 2142 { 2143 kmem_free(pdd, pdd->pdd_size); 2144 2145 } 2146 2147 /* 2148 * Free an array of strings. 2149 */ 2150 static void 2151 ddi_prop_free_strings(struct prop_driver_data *pdd) 2152 { 2153 kmem_free(pdd, pdd->pdd_size); 2154 } 2155 2156 /* 2157 * Free the data associated with an array of bytes. 2158 */ 2159 static void 2160 ddi_prop_free_bytes(struct prop_driver_data *pdd) 2161 { 2162 kmem_free(pdd, pdd->pdd_size); 2163 } 2164 2165 /* 2166 * Reset the current location pointer in the property handle to the 2167 * beginning of the data. 2168 */ 2169 void 2170 ddi_prop_reset_pos(prop_handle_t *ph) 2171 { 2172 ph->ph_cur_pos = ph->ph_data; 2173 ph->ph_save_pos = ph->ph_data; 2174 } 2175 2176 /* 2177 * Restore the current location pointer in the property handle to the 2178 * saved position. 2179 */ 2180 void 2181 ddi_prop_save_pos(prop_handle_t *ph) 2182 { 2183 ph->ph_save_pos = ph->ph_cur_pos; 2184 } 2185 2186 /* 2187 * Save the location that the current location pointer is pointing to.. 2188 */ 2189 void 2190 ddi_prop_restore_pos(prop_handle_t *ph) 2191 { 2192 ph->ph_cur_pos = ph->ph_save_pos; 2193 } 2194 2195 /* 2196 * Property encode/decode functions 2197 */ 2198 2199 /* 2200 * Decode a single integer property 2201 */ 2202 static int 2203 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements) 2204 { 2205 int i; 2206 int tmp; 2207 2208 /* 2209 * If there is nothing to decode return an error 2210 */ 2211 if (ph->ph_size == 0) 2212 return (DDI_PROP_END_OF_DATA); 2213 2214 /* 2215 * Decode the property as a single integer and return it 2216 * in data if we were able to decode it. 2217 */ 2218 i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp); 2219 if (i < DDI_PROP_RESULT_OK) { 2220 switch (i) { 2221 case DDI_PROP_RESULT_EOF: 2222 return (DDI_PROP_END_OF_DATA); 2223 2224 case DDI_PROP_RESULT_ERROR: 2225 return (DDI_PROP_CANNOT_DECODE); 2226 } 2227 } 2228 2229 *(int *)data = tmp; 2230 *nelements = 1; 2231 return (DDI_PROP_SUCCESS); 2232 } 2233 2234 /* 2235 * Decode a single 64 bit integer property 2236 */ 2237 static int 2238 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements) 2239 { 2240 int i; 2241 int64_t tmp; 2242 2243 /* 2244 * If there is nothing to decode return an error 2245 */ 2246 if (ph->ph_size == 0) 2247 return (DDI_PROP_END_OF_DATA); 2248 2249 /* 2250 * Decode the property as a single integer and return it 2251 * in data if we were able to decode it. 2252 */ 2253 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp); 2254 if (i < DDI_PROP_RESULT_OK) { 2255 switch (i) { 2256 case DDI_PROP_RESULT_EOF: 2257 return (DDI_PROP_END_OF_DATA); 2258 2259 case DDI_PROP_RESULT_ERROR: 2260 return (DDI_PROP_CANNOT_DECODE); 2261 } 2262 } 2263 2264 *(int64_t *)data = tmp; 2265 *nelements = 1; 2266 return (DDI_PROP_SUCCESS); 2267 } 2268 2269 /* 2270 * Decode an array of integers property 2271 */ 2272 static int 2273 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements) 2274 { 2275 int i; 2276 int cnt = 0; 2277 int *tmp; 2278 int *intp; 2279 int n; 2280 2281 /* 2282 * Figure out how many array elements there are by going through the 2283 * data without decoding it first and counting. 2284 */ 2285 for (;;) { 2286 i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL); 2287 if (i < 0) 2288 break; 2289 cnt++; 2290 } 2291 2292 /* 2293 * If there are no elements return an error 2294 */ 2295 if (cnt == 0) 2296 return (DDI_PROP_END_OF_DATA); 2297 2298 /* 2299 * If we cannot skip through the data, we cannot decode it 2300 */ 2301 if (i == DDI_PROP_RESULT_ERROR) 2302 return (DDI_PROP_CANNOT_DECODE); 2303 2304 /* 2305 * Reset the data pointer to the beginning of the encoded data 2306 */ 2307 ddi_prop_reset_pos(ph); 2308 2309 /* 2310 * Allocated memory to store the decoded value in. 2311 */ 2312 intp = ddi_prop_decode_alloc((cnt * sizeof (int)), 2313 ddi_prop_free_ints); 2314 2315 /* 2316 * Decode each element and place it in the space we just allocated 2317 */ 2318 tmp = intp; 2319 for (n = 0; n < cnt; n++, tmp++) { 2320 i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp); 2321 if (i < DDI_PROP_RESULT_OK) { 2322 /* 2323 * Free the space we just allocated 2324 * and return an error. 2325 */ 2326 ddi_prop_free(intp); 2327 switch (i) { 2328 case DDI_PROP_RESULT_EOF: 2329 return (DDI_PROP_END_OF_DATA); 2330 2331 case DDI_PROP_RESULT_ERROR: 2332 return (DDI_PROP_CANNOT_DECODE); 2333 } 2334 } 2335 } 2336 2337 *nelements = cnt; 2338 *(int **)data = intp; 2339 2340 return (DDI_PROP_SUCCESS); 2341 } 2342 2343 /* 2344 * Decode a 64 bit integer array property 2345 */ 2346 static int 2347 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements) 2348 { 2349 int i; 2350 int n; 2351 int cnt = 0; 2352 int64_t *tmp; 2353 int64_t *intp; 2354 2355 /* 2356 * Count the number of array elements by going 2357 * through the data without decoding it. 2358 */ 2359 for (;;) { 2360 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL); 2361 if (i < 0) 2362 break; 2363 cnt++; 2364 } 2365 2366 /* 2367 * If there are no elements return an error 2368 */ 2369 if (cnt == 0) 2370 return (DDI_PROP_END_OF_DATA); 2371 2372 /* 2373 * If we cannot skip through the data, we cannot decode it 2374 */ 2375 if (i == DDI_PROP_RESULT_ERROR) 2376 return (DDI_PROP_CANNOT_DECODE); 2377 2378 /* 2379 * Reset the data pointer to the beginning of the encoded data 2380 */ 2381 ddi_prop_reset_pos(ph); 2382 2383 /* 2384 * Allocate memory to store the decoded value. 2385 */ 2386 intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)), 2387 ddi_prop_free_ints); 2388 2389 /* 2390 * Decode each element and place it in the space allocated 2391 */ 2392 tmp = intp; 2393 for (n = 0; n < cnt; n++, tmp++) { 2394 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp); 2395 if (i < DDI_PROP_RESULT_OK) { 2396 /* 2397 * Free the space we just allocated 2398 * and return an error. 2399 */ 2400 ddi_prop_free(intp); 2401 switch (i) { 2402 case DDI_PROP_RESULT_EOF: 2403 return (DDI_PROP_END_OF_DATA); 2404 2405 case DDI_PROP_RESULT_ERROR: 2406 return (DDI_PROP_CANNOT_DECODE); 2407 } 2408 } 2409 } 2410 2411 *nelements = cnt; 2412 *(int64_t **)data = intp; 2413 2414 return (DDI_PROP_SUCCESS); 2415 } 2416 2417 /* 2418 * Encode an array of integers property (Can be one element) 2419 */ 2420 int 2421 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements) 2422 { 2423 int i; 2424 int *tmp; 2425 int cnt; 2426 int size; 2427 2428 /* 2429 * If there is no data, we cannot do anything 2430 */ 2431 if (nelements == 0) 2432 return (DDI_PROP_CANNOT_ENCODE); 2433 2434 /* 2435 * Get the size of an encoded int. 2436 */ 2437 size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL); 2438 2439 if (size < DDI_PROP_RESULT_OK) { 2440 switch (size) { 2441 case DDI_PROP_RESULT_EOF: 2442 return (DDI_PROP_END_OF_DATA); 2443 2444 case DDI_PROP_RESULT_ERROR: 2445 return (DDI_PROP_CANNOT_ENCODE); 2446 } 2447 } 2448 2449 /* 2450 * Allocate space in the handle to store the encoded int. 2451 */ 2452 if (ddi_prop_encode_alloc(ph, size * nelements) != 2453 DDI_PROP_SUCCESS) 2454 return (DDI_PROP_NO_MEMORY); 2455 2456 /* 2457 * Encode the array of ints. 2458 */ 2459 tmp = (int *)data; 2460 for (cnt = 0; cnt < nelements; cnt++, tmp++) { 2461 i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp); 2462 if (i < DDI_PROP_RESULT_OK) { 2463 switch (i) { 2464 case DDI_PROP_RESULT_EOF: 2465 return (DDI_PROP_END_OF_DATA); 2466 2467 case DDI_PROP_RESULT_ERROR: 2468 return (DDI_PROP_CANNOT_ENCODE); 2469 } 2470 } 2471 } 2472 2473 return (DDI_PROP_SUCCESS); 2474 } 2475 2476 2477 /* 2478 * Encode a 64 bit integer array property 2479 */ 2480 int 2481 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements) 2482 { 2483 int i; 2484 int cnt; 2485 int size; 2486 int64_t *tmp; 2487 2488 /* 2489 * If there is no data, we cannot do anything 2490 */ 2491 if (nelements == 0) 2492 return (DDI_PROP_CANNOT_ENCODE); 2493 2494 /* 2495 * Get the size of an encoded 64 bit int. 2496 */ 2497 size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL); 2498 2499 if (size < DDI_PROP_RESULT_OK) { 2500 switch (size) { 2501 case DDI_PROP_RESULT_EOF: 2502 return (DDI_PROP_END_OF_DATA); 2503 2504 case DDI_PROP_RESULT_ERROR: 2505 return (DDI_PROP_CANNOT_ENCODE); 2506 } 2507 } 2508 2509 /* 2510 * Allocate space in the handle to store the encoded int. 2511 */ 2512 if (ddi_prop_encode_alloc(ph, size * nelements) != 2513 DDI_PROP_SUCCESS) 2514 return (DDI_PROP_NO_MEMORY); 2515 2516 /* 2517 * Encode the array of ints. 2518 */ 2519 tmp = (int64_t *)data; 2520 for (cnt = 0; cnt < nelements; cnt++, tmp++) { 2521 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp); 2522 if (i < DDI_PROP_RESULT_OK) { 2523 switch (i) { 2524 case DDI_PROP_RESULT_EOF: 2525 return (DDI_PROP_END_OF_DATA); 2526 2527 case DDI_PROP_RESULT_ERROR: 2528 return (DDI_PROP_CANNOT_ENCODE); 2529 } 2530 } 2531 } 2532 2533 return (DDI_PROP_SUCCESS); 2534 } 2535 2536 /* 2537 * Decode a single string property 2538 */ 2539 static int 2540 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements) 2541 { 2542 char *tmp; 2543 char *str; 2544 int i; 2545 int size; 2546 2547 /* 2548 * If there is nothing to decode return an error 2549 */ 2550 if (ph->ph_size == 0) 2551 return (DDI_PROP_END_OF_DATA); 2552 2553 /* 2554 * Get the decoded size of the encoded string. 2555 */ 2556 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL); 2557 if (size < DDI_PROP_RESULT_OK) { 2558 switch (size) { 2559 case DDI_PROP_RESULT_EOF: 2560 return (DDI_PROP_END_OF_DATA); 2561 2562 case DDI_PROP_RESULT_ERROR: 2563 return (DDI_PROP_CANNOT_DECODE); 2564 } 2565 } 2566 2567 /* 2568 * Allocated memory to store the decoded value in. 2569 */ 2570 str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string); 2571 2572 ddi_prop_reset_pos(ph); 2573 2574 /* 2575 * Decode the str and place it in the space we just allocated 2576 */ 2577 tmp = str; 2578 i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp); 2579 if (i < DDI_PROP_RESULT_OK) { 2580 /* 2581 * Free the space we just allocated 2582 * and return an error. 2583 */ 2584 ddi_prop_free(str); 2585 switch (i) { 2586 case DDI_PROP_RESULT_EOF: 2587 return (DDI_PROP_END_OF_DATA); 2588 2589 case DDI_PROP_RESULT_ERROR: 2590 return (DDI_PROP_CANNOT_DECODE); 2591 } 2592 } 2593 2594 *(char **)data = str; 2595 *nelements = 1; 2596 2597 return (DDI_PROP_SUCCESS); 2598 } 2599 2600 /* 2601 * Decode an array of strings. 2602 */ 2603 int 2604 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements) 2605 { 2606 int cnt = 0; 2607 char **strs; 2608 char **tmp; 2609 char *ptr; 2610 int i; 2611 int n; 2612 int size; 2613 size_t nbytes; 2614 2615 /* 2616 * Figure out how many array elements there are by going through the 2617 * data without decoding it first and counting. 2618 */ 2619 for (;;) { 2620 i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL); 2621 if (i < 0) 2622 break; 2623 cnt++; 2624 } 2625 2626 /* 2627 * If there are no elements return an error 2628 */ 2629 if (cnt == 0) 2630 return (DDI_PROP_END_OF_DATA); 2631 2632 /* 2633 * If we cannot skip through the data, we cannot decode it 2634 */ 2635 if (i == DDI_PROP_RESULT_ERROR) 2636 return (DDI_PROP_CANNOT_DECODE); 2637 2638 /* 2639 * Reset the data pointer to the beginning of the encoded data 2640 */ 2641 ddi_prop_reset_pos(ph); 2642 2643 /* 2644 * Figure out how much memory we need for the sum total 2645 */ 2646 nbytes = (cnt + 1) * sizeof (char *); 2647 2648 for (n = 0; n < cnt; n++) { 2649 /* 2650 * Get the decoded size of the current encoded string. 2651 */ 2652 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL); 2653 if (size < DDI_PROP_RESULT_OK) { 2654 switch (size) { 2655 case DDI_PROP_RESULT_EOF: 2656 return (DDI_PROP_END_OF_DATA); 2657 2658 case DDI_PROP_RESULT_ERROR: 2659 return (DDI_PROP_CANNOT_DECODE); 2660 } 2661 } 2662 2663 nbytes += size; 2664 } 2665 2666 /* 2667 * Allocate memory in which to store the decoded strings. 2668 */ 2669 strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings); 2670 2671 /* 2672 * Set up pointers for each string by figuring out yet 2673 * again how long each string is. 2674 */ 2675 ddi_prop_reset_pos(ph); 2676 ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *)); 2677 for (tmp = strs, n = 0; n < cnt; n++, tmp++) { 2678 /* 2679 * Get the decoded size of the current encoded string. 2680 */ 2681 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL); 2682 if (size < DDI_PROP_RESULT_OK) { 2683 ddi_prop_free(strs); 2684 switch (size) { 2685 case DDI_PROP_RESULT_EOF: 2686 return (DDI_PROP_END_OF_DATA); 2687 2688 case DDI_PROP_RESULT_ERROR: 2689 return (DDI_PROP_CANNOT_DECODE); 2690 } 2691 } 2692 2693 *tmp = ptr; 2694 ptr += size; 2695 } 2696 2697 /* 2698 * String array is terminated by a NULL 2699 */ 2700 *tmp = NULL; 2701 2702 /* 2703 * Finally, we can decode each string 2704 */ 2705 ddi_prop_reset_pos(ph); 2706 for (tmp = strs, n = 0; n < cnt; n++, tmp++) { 2707 i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp); 2708 if (i < DDI_PROP_RESULT_OK) { 2709 /* 2710 * Free the space we just allocated 2711 * and return an error 2712 */ 2713 ddi_prop_free(strs); 2714 switch (i) { 2715 case DDI_PROP_RESULT_EOF: 2716 return (DDI_PROP_END_OF_DATA); 2717 2718 case DDI_PROP_RESULT_ERROR: 2719 return (DDI_PROP_CANNOT_DECODE); 2720 } 2721 } 2722 } 2723 2724 *(char ***)data = strs; 2725 *nelements = cnt; 2726 2727 return (DDI_PROP_SUCCESS); 2728 } 2729 2730 /* 2731 * Encode a string. 2732 */ 2733 int 2734 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements) 2735 { 2736 char **tmp; 2737 int size; 2738 int i; 2739 2740 /* 2741 * If there is no data, we cannot do anything 2742 */ 2743 if (nelements == 0) 2744 return (DDI_PROP_CANNOT_ENCODE); 2745 2746 /* 2747 * Get the size of the encoded string. 2748 */ 2749 tmp = (char **)data; 2750 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp); 2751 if (size < DDI_PROP_RESULT_OK) { 2752 switch (size) { 2753 case DDI_PROP_RESULT_EOF: 2754 return (DDI_PROP_END_OF_DATA); 2755 2756 case DDI_PROP_RESULT_ERROR: 2757 return (DDI_PROP_CANNOT_ENCODE); 2758 } 2759 } 2760 2761 /* 2762 * Allocate space in the handle to store the encoded string. 2763 */ 2764 if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS) 2765 return (DDI_PROP_NO_MEMORY); 2766 2767 ddi_prop_reset_pos(ph); 2768 2769 /* 2770 * Encode the string. 2771 */ 2772 tmp = (char **)data; 2773 i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp); 2774 if (i < DDI_PROP_RESULT_OK) { 2775 switch (i) { 2776 case DDI_PROP_RESULT_EOF: 2777 return (DDI_PROP_END_OF_DATA); 2778 2779 case DDI_PROP_RESULT_ERROR: 2780 return (DDI_PROP_CANNOT_ENCODE); 2781 } 2782 } 2783 2784 return (DDI_PROP_SUCCESS); 2785 } 2786 2787 2788 /* 2789 * Encode an array of strings. 2790 */ 2791 int 2792 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements) 2793 { 2794 int cnt = 0; 2795 char **tmp; 2796 int size; 2797 uint_t total_size; 2798 int i; 2799 2800 /* 2801 * If there is no data, we cannot do anything 2802 */ 2803 if (nelements == 0) 2804 return (DDI_PROP_CANNOT_ENCODE); 2805 2806 /* 2807 * Get the total size required to encode all the strings. 2808 */ 2809 total_size = 0; 2810 tmp = (char **)data; 2811 for (cnt = 0; cnt < nelements; cnt++, tmp++) { 2812 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp); 2813 if (size < DDI_PROP_RESULT_OK) { 2814 switch (size) { 2815 case DDI_PROP_RESULT_EOF: 2816 return (DDI_PROP_END_OF_DATA); 2817 2818 case DDI_PROP_RESULT_ERROR: 2819 return (DDI_PROP_CANNOT_ENCODE); 2820 } 2821 } 2822 total_size += (uint_t)size; 2823 } 2824 2825 /* 2826 * Allocate space in the handle to store the encoded strings. 2827 */ 2828 if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS) 2829 return (DDI_PROP_NO_MEMORY); 2830 2831 ddi_prop_reset_pos(ph); 2832 2833 /* 2834 * Encode the array of strings. 2835 */ 2836 tmp = (char **)data; 2837 for (cnt = 0; cnt < nelements; cnt++, tmp++) { 2838 i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp); 2839 if (i < DDI_PROP_RESULT_OK) { 2840 switch (i) { 2841 case DDI_PROP_RESULT_EOF: 2842 return (DDI_PROP_END_OF_DATA); 2843 2844 case DDI_PROP_RESULT_ERROR: 2845 return (DDI_PROP_CANNOT_ENCODE); 2846 } 2847 } 2848 } 2849 2850 return (DDI_PROP_SUCCESS); 2851 } 2852 2853 2854 /* 2855 * Decode an array of bytes. 2856 */ 2857 static int 2858 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements) 2859 { 2860 uchar_t *tmp; 2861 int nbytes; 2862 int i; 2863 2864 /* 2865 * If there are no elements return an error 2866 */ 2867 if (ph->ph_size == 0) 2868 return (DDI_PROP_END_OF_DATA); 2869 2870 /* 2871 * Get the size of the encoded array of bytes. 2872 */ 2873 nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE, 2874 data, ph->ph_size); 2875 if (nbytes < DDI_PROP_RESULT_OK) { 2876 switch (nbytes) { 2877 case DDI_PROP_RESULT_EOF: 2878 return (DDI_PROP_END_OF_DATA); 2879 2880 case DDI_PROP_RESULT_ERROR: 2881 return (DDI_PROP_CANNOT_DECODE); 2882 } 2883 } 2884 2885 /* 2886 * Allocated memory to store the decoded value in. 2887 */ 2888 tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes); 2889 2890 /* 2891 * Decode each element and place it in the space we just allocated 2892 */ 2893 i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes); 2894 if (i < DDI_PROP_RESULT_OK) { 2895 /* 2896 * Free the space we just allocated 2897 * and return an error 2898 */ 2899 ddi_prop_free(tmp); 2900 switch (i) { 2901 case DDI_PROP_RESULT_EOF: 2902 return (DDI_PROP_END_OF_DATA); 2903 2904 case DDI_PROP_RESULT_ERROR: 2905 return (DDI_PROP_CANNOT_DECODE); 2906 } 2907 } 2908 2909 *(uchar_t **)data = tmp; 2910 *nelements = nbytes; 2911 2912 return (DDI_PROP_SUCCESS); 2913 } 2914 2915 /* 2916 * Encode an array of bytes. 2917 */ 2918 int 2919 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements) 2920 { 2921 int size; 2922 int i; 2923 2924 /* 2925 * If there are no elements, then this is a boolean property, 2926 * so just create a property handle with no data and return. 2927 */ 2928 if (nelements == 0) { 2929 (void) ddi_prop_encode_alloc(ph, 0); 2930 return (DDI_PROP_SUCCESS); 2931 } 2932 2933 /* 2934 * Get the size of the encoded array of bytes. 2935 */ 2936 size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data, 2937 nelements); 2938 if (size < DDI_PROP_RESULT_OK) { 2939 switch (size) { 2940 case DDI_PROP_RESULT_EOF: 2941 return (DDI_PROP_END_OF_DATA); 2942 2943 case DDI_PROP_RESULT_ERROR: 2944 return (DDI_PROP_CANNOT_DECODE); 2945 } 2946 } 2947 2948 /* 2949 * Allocate space in the handle to store the encoded bytes. 2950 */ 2951 if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS) 2952 return (DDI_PROP_NO_MEMORY); 2953 2954 /* 2955 * Encode the array of bytes. 2956 */ 2957 i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data, 2958 nelements); 2959 if (i < DDI_PROP_RESULT_OK) { 2960 switch (i) { 2961 case DDI_PROP_RESULT_EOF: 2962 return (DDI_PROP_END_OF_DATA); 2963 2964 case DDI_PROP_RESULT_ERROR: 2965 return (DDI_PROP_CANNOT_ENCODE); 2966 } 2967 } 2968 2969 return (DDI_PROP_SUCCESS); 2970 } 2971 2972 /* 2973 * OBP 1275 integer, string and byte operators. 2974 * 2975 * DDI_PROP_CMD_DECODE: 2976 * 2977 * DDI_PROP_RESULT_ERROR: cannot decode the data 2978 * DDI_PROP_RESULT_EOF: end of data 2979 * DDI_PROP_OK: data was decoded 2980 * 2981 * DDI_PROP_CMD_ENCODE: 2982 * 2983 * DDI_PROP_RESULT_ERROR: cannot encode the data 2984 * DDI_PROP_RESULT_EOF: end of data 2985 * DDI_PROP_OK: data was encoded 2986 * 2987 * DDI_PROP_CMD_SKIP: 2988 * 2989 * DDI_PROP_RESULT_ERROR: cannot skip the data 2990 * DDI_PROP_RESULT_EOF: end of data 2991 * DDI_PROP_OK: data was skipped 2992 * 2993 * DDI_PROP_CMD_GET_ESIZE: 2994 * 2995 * DDI_PROP_RESULT_ERROR: cannot get encoded size 2996 * DDI_PROP_RESULT_EOF: end of data 2997 * > 0: the encoded size 2998 * 2999 * DDI_PROP_CMD_GET_DSIZE: 3000 * 3001 * DDI_PROP_RESULT_ERROR: cannot get decoded size 3002 * DDI_PROP_RESULT_EOF: end of data 3003 * > 0: the decoded size 3004 */ 3005 3006 /* 3007 * OBP 1275 integer operator 3008 * 3009 * OBP properties are a byte stream of data, so integers may not be 3010 * properly aligned. Therefore we need to copy them one byte at a time. 3011 */ 3012 int 3013 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data) 3014 { 3015 int i; 3016 3017 switch (cmd) { 3018 case DDI_PROP_CMD_DECODE: 3019 /* 3020 * Check that there is encoded data 3021 */ 3022 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) 3023 return (DDI_PROP_RESULT_ERROR); 3024 if (ph->ph_flags & PH_FROM_PROM) { 3025 i = MIN(ph->ph_size, PROP_1275_INT_SIZE); 3026 if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data + 3027 ph->ph_size - i)) 3028 return (DDI_PROP_RESULT_ERROR); 3029 } else { 3030 if (ph->ph_size < sizeof (int) || 3031 ((int *)ph->ph_cur_pos > ((int *)ph->ph_data + 3032 ph->ph_size - sizeof (int)))) 3033 return (DDI_PROP_RESULT_ERROR); 3034 } 3035 3036 /* 3037 * Copy the integer, using the implementation-specific 3038 * copy function if the property is coming from the PROM. 3039 */ 3040 if (ph->ph_flags & PH_FROM_PROM) { 3041 *data = impl_ddi_prop_int_from_prom( 3042 (uchar_t *)ph->ph_cur_pos, 3043 (ph->ph_size < PROP_1275_INT_SIZE) ? 3044 ph->ph_size : PROP_1275_INT_SIZE); 3045 } else { 3046 bcopy(ph->ph_cur_pos, data, sizeof (int)); 3047 } 3048 3049 /* 3050 * Move the current location to the start of the next 3051 * bit of undecoded data. 3052 */ 3053 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + 3054 PROP_1275_INT_SIZE; 3055 return (DDI_PROP_RESULT_OK); 3056 3057 case DDI_PROP_CMD_ENCODE: 3058 /* 3059 * Check that there is room to encoded the data 3060 */ 3061 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3062 ph->ph_size < PROP_1275_INT_SIZE || 3063 ((int *)ph->ph_cur_pos > ((int *)ph->ph_data + 3064 ph->ph_size - sizeof (int)))) 3065 return (DDI_PROP_RESULT_ERROR); 3066 3067 /* 3068 * Encode the integer into the byte stream one byte at a 3069 * time. 3070 */ 3071 bcopy(data, ph->ph_cur_pos, sizeof (int)); 3072 3073 /* 3074 * Move the current location to the start of the next bit of 3075 * space where we can store encoded data. 3076 */ 3077 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE; 3078 return (DDI_PROP_RESULT_OK); 3079 3080 case DDI_PROP_CMD_SKIP: 3081 /* 3082 * Check that there is encoded data 3083 */ 3084 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3085 ph->ph_size < PROP_1275_INT_SIZE) 3086 return (DDI_PROP_RESULT_ERROR); 3087 3088 3089 if ((caddr_t)ph->ph_cur_pos == 3090 (caddr_t)ph->ph_data + ph->ph_size) { 3091 return (DDI_PROP_RESULT_EOF); 3092 } else if ((caddr_t)ph->ph_cur_pos > 3093 (caddr_t)ph->ph_data + ph->ph_size) { 3094 return (DDI_PROP_RESULT_EOF); 3095 } 3096 3097 /* 3098 * Move the current location to the start of the next bit of 3099 * undecoded data. 3100 */ 3101 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE; 3102 return (DDI_PROP_RESULT_OK); 3103 3104 case DDI_PROP_CMD_GET_ESIZE: 3105 /* 3106 * Return the size of an encoded integer on OBP 3107 */ 3108 return (PROP_1275_INT_SIZE); 3109 3110 case DDI_PROP_CMD_GET_DSIZE: 3111 /* 3112 * Return the size of a decoded integer on the system. 3113 */ 3114 return (sizeof (int)); 3115 3116 default: 3117 #ifdef DEBUG 3118 panic("ddi_prop_1275_int: %x impossible", cmd); 3119 /*NOTREACHED*/ 3120 #else 3121 return (DDI_PROP_RESULT_ERROR); 3122 #endif /* DEBUG */ 3123 } 3124 } 3125 3126 /* 3127 * 64 bit integer operator. 3128 * 3129 * This is an extension, defined by Sun, to the 1275 integer 3130 * operator. This routine handles the encoding/decoding of 3131 * 64 bit integer properties. 3132 */ 3133 int 3134 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data) 3135 { 3136 3137 switch (cmd) { 3138 case DDI_PROP_CMD_DECODE: 3139 /* 3140 * Check that there is encoded data 3141 */ 3142 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) 3143 return (DDI_PROP_RESULT_ERROR); 3144 if (ph->ph_flags & PH_FROM_PROM) { 3145 return (DDI_PROP_RESULT_ERROR); 3146 } else { 3147 if (ph->ph_size < sizeof (int64_t) || 3148 ((int64_t *)ph->ph_cur_pos > 3149 ((int64_t *)ph->ph_data + 3150 ph->ph_size - sizeof (int64_t)))) 3151 return (DDI_PROP_RESULT_ERROR); 3152 } 3153 /* 3154 * Copy the integer, using the implementation-specific 3155 * copy function if the property is coming from the PROM. 3156 */ 3157 if (ph->ph_flags & PH_FROM_PROM) { 3158 return (DDI_PROP_RESULT_ERROR); 3159 } else { 3160 bcopy(ph->ph_cur_pos, data, sizeof (int64_t)); 3161 } 3162 3163 /* 3164 * Move the current location to the start of the next 3165 * bit of undecoded data. 3166 */ 3167 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + 3168 sizeof (int64_t); 3169 return (DDI_PROP_RESULT_OK); 3170 3171 case DDI_PROP_CMD_ENCODE: 3172 /* 3173 * Check that there is room to encoded the data 3174 */ 3175 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3176 ph->ph_size < sizeof (int64_t) || 3177 ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data + 3178 ph->ph_size - sizeof (int64_t)))) 3179 return (DDI_PROP_RESULT_ERROR); 3180 3181 /* 3182 * Encode the integer into the byte stream one byte at a 3183 * time. 3184 */ 3185 bcopy(data, ph->ph_cur_pos, sizeof (int64_t)); 3186 3187 /* 3188 * Move the current location to the start of the next bit of 3189 * space where we can store encoded data. 3190 */ 3191 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + 3192 sizeof (int64_t); 3193 return (DDI_PROP_RESULT_OK); 3194 3195 case DDI_PROP_CMD_SKIP: 3196 /* 3197 * Check that there is encoded data 3198 */ 3199 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3200 ph->ph_size < sizeof (int64_t)) 3201 return (DDI_PROP_RESULT_ERROR); 3202 3203 if ((caddr_t)ph->ph_cur_pos == 3204 (caddr_t)ph->ph_data + ph->ph_size) { 3205 return (DDI_PROP_RESULT_EOF); 3206 } else if ((caddr_t)ph->ph_cur_pos > 3207 (caddr_t)ph->ph_data + ph->ph_size) { 3208 return (DDI_PROP_RESULT_EOF); 3209 } 3210 3211 /* 3212 * Move the current location to the start of 3213 * the next bit of undecoded data. 3214 */ 3215 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + 3216 sizeof (int64_t); 3217 return (DDI_PROP_RESULT_OK); 3218 3219 case DDI_PROP_CMD_GET_ESIZE: 3220 /* 3221 * Return the size of an encoded integer on OBP 3222 */ 3223 return (sizeof (int64_t)); 3224 3225 case DDI_PROP_CMD_GET_DSIZE: 3226 /* 3227 * Return the size of a decoded integer on the system. 3228 */ 3229 return (sizeof (int64_t)); 3230 3231 default: 3232 #ifdef DEBUG 3233 panic("ddi_prop_int64_op: %x impossible", cmd); 3234 /*NOTREACHED*/ 3235 #else 3236 return (DDI_PROP_RESULT_ERROR); 3237 #endif /* DEBUG */ 3238 } 3239 } 3240 3241 /* 3242 * OBP 1275 string operator. 3243 * 3244 * OBP strings are NULL terminated. 3245 */ 3246 int 3247 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data) 3248 { 3249 int n; 3250 char *p; 3251 char *end; 3252 3253 switch (cmd) { 3254 case DDI_PROP_CMD_DECODE: 3255 /* 3256 * Check that there is encoded data 3257 */ 3258 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) { 3259 return (DDI_PROP_RESULT_ERROR); 3260 } 3261 3262 /* 3263 * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and 3264 * how to NULL terminate result. 3265 */ 3266 p = (char *)ph->ph_cur_pos; 3267 end = (char *)ph->ph_data + ph->ph_size; 3268 if (p >= end) 3269 return (DDI_PROP_RESULT_EOF); 3270 3271 while (p < end) { 3272 *data++ = *p; 3273 if (*p++ == 0) { /* NULL from OBP */ 3274 ph->ph_cur_pos = p; 3275 return (DDI_PROP_RESULT_OK); 3276 } 3277 } 3278 3279 /* 3280 * If OBP did not NULL terminate string, which happens 3281 * (at least) for 'true'/'false' boolean values, account for 3282 * the space and store null termination on decode. 3283 */ 3284 ph->ph_cur_pos = p; 3285 *data = 0; 3286 return (DDI_PROP_RESULT_OK); 3287 3288 case DDI_PROP_CMD_ENCODE: 3289 /* 3290 * Check that there is room to encoded the data 3291 */ 3292 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) { 3293 return (DDI_PROP_RESULT_ERROR); 3294 } 3295 3296 n = strlen(data) + 1; 3297 if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data + 3298 ph->ph_size - n)) { 3299 return (DDI_PROP_RESULT_ERROR); 3300 } 3301 3302 /* 3303 * Copy the NULL terminated string 3304 */ 3305 bcopy(data, ph->ph_cur_pos, n); 3306 3307 /* 3308 * Move the current location to the start of the next bit of 3309 * space where we can store encoded data. 3310 */ 3311 ph->ph_cur_pos = (char *)ph->ph_cur_pos + n; 3312 return (DDI_PROP_RESULT_OK); 3313 3314 case DDI_PROP_CMD_SKIP: 3315 /* 3316 * Check that there is encoded data 3317 */ 3318 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) { 3319 return (DDI_PROP_RESULT_ERROR); 3320 } 3321 3322 /* 3323 * Return the string length plus one for the NULL 3324 * We know the size of the property, we need to 3325 * ensure that the string is properly formatted, 3326 * since we may be looking up random OBP data. 3327 */ 3328 p = (char *)ph->ph_cur_pos; 3329 end = (char *)ph->ph_data + ph->ph_size; 3330 if (p >= end) 3331 return (DDI_PROP_RESULT_EOF); 3332 3333 while (p < end) { 3334 if (*p++ == 0) { /* NULL from OBP */ 3335 ph->ph_cur_pos = p; 3336 return (DDI_PROP_RESULT_OK); 3337 } 3338 } 3339 3340 /* 3341 * Accommodate the fact that OBP does not always NULL 3342 * terminate strings. 3343 */ 3344 ph->ph_cur_pos = p; 3345 return (DDI_PROP_RESULT_OK); 3346 3347 case DDI_PROP_CMD_GET_ESIZE: 3348 /* 3349 * Return the size of the encoded string on OBP. 3350 */ 3351 return (strlen(data) + 1); 3352 3353 case DDI_PROP_CMD_GET_DSIZE: 3354 /* 3355 * Return the string length plus one for the NULL. 3356 * We know the size of the property, we need to 3357 * ensure that the string is properly formatted, 3358 * since we may be looking up random OBP data. 3359 */ 3360 p = (char *)ph->ph_cur_pos; 3361 end = (char *)ph->ph_data + ph->ph_size; 3362 if (p >= end) 3363 return (DDI_PROP_RESULT_EOF); 3364 3365 for (n = 0; p < end; n++) { 3366 if (*p++ == 0) { /* NULL from OBP */ 3367 ph->ph_cur_pos = p; 3368 return (n + 1); 3369 } 3370 } 3371 3372 /* 3373 * If OBP did not NULL terminate string, which happens for 3374 * 'true'/'false' boolean values, account for the space 3375 * to store null termination here. 3376 */ 3377 ph->ph_cur_pos = p; 3378 return (n + 1); 3379 3380 default: 3381 #ifdef DEBUG 3382 panic("ddi_prop_1275_string: %x impossible", cmd); 3383 /*NOTREACHED*/ 3384 #else 3385 return (DDI_PROP_RESULT_ERROR); 3386 #endif /* DEBUG */ 3387 } 3388 } 3389 3390 /* 3391 * OBP 1275 byte operator 3392 * 3393 * Caller must specify the number of bytes to get. OBP encodes bytes 3394 * as a byte so there is a 1-to-1 translation. 3395 */ 3396 int 3397 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data, 3398 uint_t nelements) 3399 { 3400 switch (cmd) { 3401 case DDI_PROP_CMD_DECODE: 3402 /* 3403 * Check that there is encoded data 3404 */ 3405 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3406 ph->ph_size < nelements || 3407 ((char *)ph->ph_cur_pos > ((char *)ph->ph_data + 3408 ph->ph_size - nelements))) 3409 return (DDI_PROP_RESULT_ERROR); 3410 3411 /* 3412 * Copy out the bytes 3413 */ 3414 bcopy(ph->ph_cur_pos, data, nelements); 3415 3416 /* 3417 * Move the current location 3418 */ 3419 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements; 3420 return (DDI_PROP_RESULT_OK); 3421 3422 case DDI_PROP_CMD_ENCODE: 3423 /* 3424 * Check that there is room to encode the data 3425 */ 3426 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3427 ph->ph_size < nelements || 3428 ((char *)ph->ph_cur_pos > ((char *)ph->ph_data + 3429 ph->ph_size - nelements))) 3430 return (DDI_PROP_RESULT_ERROR); 3431 3432 /* 3433 * Copy in the bytes 3434 */ 3435 bcopy(data, ph->ph_cur_pos, nelements); 3436 3437 /* 3438 * Move the current location to the start of the next bit of 3439 * space where we can store encoded data. 3440 */ 3441 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements; 3442 return (DDI_PROP_RESULT_OK); 3443 3444 case DDI_PROP_CMD_SKIP: 3445 /* 3446 * Check that there is encoded data 3447 */ 3448 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3449 ph->ph_size < nelements) 3450 return (DDI_PROP_RESULT_ERROR); 3451 3452 if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data + 3453 ph->ph_size - nelements)) 3454 return (DDI_PROP_RESULT_EOF); 3455 3456 /* 3457 * Move the current location 3458 */ 3459 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements; 3460 return (DDI_PROP_RESULT_OK); 3461 3462 case DDI_PROP_CMD_GET_ESIZE: 3463 /* 3464 * The size in bytes of the encoded size is the 3465 * same as the decoded size provided by the caller. 3466 */ 3467 return (nelements); 3468 3469 case DDI_PROP_CMD_GET_DSIZE: 3470 /* 3471 * Just return the number of bytes specified by the caller. 3472 */ 3473 return (nelements); 3474 3475 default: 3476 #ifdef DEBUG 3477 panic("ddi_prop_1275_bytes: %x impossible", cmd); 3478 /*NOTREACHED*/ 3479 #else 3480 return (DDI_PROP_RESULT_ERROR); 3481 #endif /* DEBUG */ 3482 } 3483 } 3484 3485 /* 3486 * Used for properties that come from the OBP, hardware configuration files, 3487 * or that are created by calls to ddi_prop_update(9F). 3488 */ 3489 static struct prop_handle_ops prop_1275_ops = { 3490 ddi_prop_1275_int, 3491 ddi_prop_1275_string, 3492 ddi_prop_1275_bytes, 3493 ddi_prop_int64_op 3494 }; 3495 3496 3497 /* 3498 * Interface to create/modify a managed property on child's behalf... 3499 * Flags interpreted are: 3500 * DDI_PROP_CANSLEEP: Allow memory allocation to sleep. 3501 * DDI_PROP_SYSTEM_DEF: Manipulate system list rather than driver list. 3502 * 3503 * Use same dev_t when modifying or undefining a property. 3504 * Search for properties with DDI_DEV_T_ANY to match first named 3505 * property on the list. 3506 * 3507 * Properties are stored LIFO and subsequently will match the first 3508 * `matching' instance. 3509 */ 3510 3511 /* 3512 * ddi_prop_add: Add a software defined property 3513 */ 3514 3515 /* 3516 * define to get a new ddi_prop_t. 3517 * km_flags are KM_SLEEP or KM_NOSLEEP. 3518 */ 3519 3520 #define DDI_NEW_PROP_T(km_flags) \ 3521 (kmem_zalloc(sizeof (ddi_prop_t), km_flags)) 3522 3523 static int 3524 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags, 3525 char *name, caddr_t value, int length) 3526 { 3527 ddi_prop_t *new_propp, *propp; 3528 ddi_prop_t **list_head = &(DEVI(dip)->devi_drv_prop_ptr); 3529 int km_flags = KM_NOSLEEP; 3530 int name_buf_len; 3531 3532 /* 3533 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error. 3534 */ 3535 3536 if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0) 3537 return (DDI_PROP_INVAL_ARG); 3538 3539 if (flags & DDI_PROP_CANSLEEP) 3540 km_flags = KM_SLEEP; 3541 3542 if (flags & DDI_PROP_SYSTEM_DEF) 3543 list_head = &(DEVI(dip)->devi_sys_prop_ptr); 3544 else if (flags & DDI_PROP_HW_DEF) 3545 list_head = &(DEVI(dip)->devi_hw_prop_ptr); 3546 3547 if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL) { 3548 cmn_err(CE_CONT, prop_no_mem_msg, name); 3549 return (DDI_PROP_NO_MEMORY); 3550 } 3551 3552 /* 3553 * If dev is major number 0, then we need to do a ddi_name_to_major 3554 * to get the real major number for the device. This needs to be 3555 * done because some drivers need to call ddi_prop_create in their 3556 * attach routines but they don't have a dev. By creating the dev 3557 * ourself if the major number is 0, drivers will not have to know what 3558 * their major number. They can just create a dev with major number 3559 * 0 and pass it in. For device 0, we will be doing a little extra 3560 * work by recreating the same dev that we already have, but its the 3561 * price you pay :-). 3562 * 3563 * This fixes bug #1098060. 3564 */ 3565 if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) { 3566 new_propp->prop_dev = 3567 makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name), 3568 getminor(dev)); 3569 } else 3570 new_propp->prop_dev = dev; 3571 3572 /* 3573 * Allocate space for property name and copy it in... 3574 */ 3575 3576 name_buf_len = strlen(name) + 1; 3577 new_propp->prop_name = kmem_alloc(name_buf_len, km_flags); 3578 if (new_propp->prop_name == 0) { 3579 kmem_free(new_propp, sizeof (ddi_prop_t)); 3580 cmn_err(CE_CONT, prop_no_mem_msg, name); 3581 return (DDI_PROP_NO_MEMORY); 3582 } 3583 bcopy(name, new_propp->prop_name, name_buf_len); 3584 3585 /* 3586 * Set the property type 3587 */ 3588 new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK; 3589 3590 /* 3591 * Set length and value ONLY if not an explicit property undefine: 3592 * NOTE: value and length are zero for explicit undefines. 3593 */ 3594 3595 if (flags & DDI_PROP_UNDEF_IT) { 3596 new_propp->prop_flags |= DDI_PROP_UNDEF_IT; 3597 } else { 3598 if ((new_propp->prop_len = length) != 0) { 3599 new_propp->prop_val = kmem_alloc(length, km_flags); 3600 if (new_propp->prop_val == 0) { 3601 kmem_free(new_propp->prop_name, name_buf_len); 3602 kmem_free(new_propp, sizeof (ddi_prop_t)); 3603 cmn_err(CE_CONT, prop_no_mem_msg, name); 3604 return (DDI_PROP_NO_MEMORY); 3605 } 3606 bcopy(value, new_propp->prop_val, length); 3607 } 3608 } 3609 3610 /* 3611 * Link property into beginning of list. (Properties are LIFO order.) 3612 */ 3613 3614 mutex_enter(&(DEVI(dip)->devi_lock)); 3615 propp = *list_head; 3616 new_propp->prop_next = propp; 3617 *list_head = new_propp; 3618 mutex_exit(&(DEVI(dip)->devi_lock)); 3619 return (DDI_PROP_SUCCESS); 3620 } 3621 3622 3623 /* 3624 * ddi_prop_change: Modify a software managed property value 3625 * 3626 * Set new length and value if found. 3627 * returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or 3628 * input name is the NULL string. 3629 * returns DDI_PROP_NO_MEMORY if unable to allocate memory 3630 * 3631 * Note: an undef can be modified to be a define, 3632 * (you can't go the other way.) 3633 */ 3634 3635 static int 3636 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags, 3637 char *name, caddr_t value, int length) 3638 { 3639 ddi_prop_t *propp; 3640 ddi_prop_t **ppropp; 3641 caddr_t p = NULL; 3642 3643 if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0)) 3644 return (DDI_PROP_INVAL_ARG); 3645 3646 /* 3647 * Preallocate buffer, even if we don't need it... 3648 */ 3649 if (length != 0) { 3650 p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ? 3651 KM_SLEEP : KM_NOSLEEP); 3652 if (p == NULL) { 3653 cmn_err(CE_CONT, prop_no_mem_msg, name); 3654 return (DDI_PROP_NO_MEMORY); 3655 } 3656 } 3657 3658 /* 3659 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major 3660 * number, a real dev_t value should be created based upon the dip's 3661 * binding driver. See ddi_prop_add... 3662 */ 3663 if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) 3664 dev = makedevice( 3665 ddi_name_to_major(DEVI(dip)->devi_binding_name), 3666 getminor(dev)); 3667 3668 /* 3669 * Check to see if the property exists. If so we modify it. 3670 * Else we create it by calling ddi_prop_add(). 3671 */ 3672 mutex_enter(&(DEVI(dip)->devi_lock)); 3673 ppropp = &DEVI(dip)->devi_drv_prop_ptr; 3674 if (flags & DDI_PROP_SYSTEM_DEF) 3675 ppropp = &DEVI(dip)->devi_sys_prop_ptr; 3676 else if (flags & DDI_PROP_HW_DEF) 3677 ppropp = &DEVI(dip)->devi_hw_prop_ptr; 3678 3679 if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) { 3680 /* 3681 * Need to reallocate buffer? If so, do it 3682 * carefully (reuse same space if new prop 3683 * is same size and non-NULL sized). 3684 */ 3685 if (length != 0) 3686 bcopy(value, p, length); 3687 3688 if (propp->prop_len != 0) 3689 kmem_free(propp->prop_val, propp->prop_len); 3690 3691 propp->prop_len = length; 3692 propp->prop_val = p; 3693 propp->prop_flags &= ~DDI_PROP_UNDEF_IT; 3694 mutex_exit(&(DEVI(dip)->devi_lock)); 3695 return (DDI_PROP_SUCCESS); 3696 } 3697 3698 mutex_exit(&(DEVI(dip)->devi_lock)); 3699 if (length != 0) 3700 kmem_free(p, length); 3701 3702 return (ddi_prop_add(dev, dip, flags, name, value, length)); 3703 } 3704 3705 /* 3706 * Common update routine used to update and encode a property. Creates 3707 * a property handle, calls the property encode routine, figures out if 3708 * the property already exists and updates if it does. Otherwise it 3709 * creates if it does not exist. 3710 */ 3711 int 3712 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags, 3713 char *name, void *data, uint_t nelements, 3714 int (*prop_create)(prop_handle_t *, void *data, uint_t nelements)) 3715 { 3716 prop_handle_t ph; 3717 int rval; 3718 uint_t ourflags; 3719 3720 /* 3721 * If dev_t is DDI_DEV_T_ANY or name's length is zero, 3722 * return error. 3723 */ 3724 if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0) 3725 return (DDI_PROP_INVAL_ARG); 3726 3727 /* 3728 * Create the handle 3729 */ 3730 ph.ph_data = NULL; 3731 ph.ph_cur_pos = NULL; 3732 ph.ph_save_pos = NULL; 3733 ph.ph_size = 0; 3734 ph.ph_ops = &prop_1275_ops; 3735 3736 /* 3737 * ourflags: 3738 * For compatibility with the old interfaces. The old interfaces 3739 * didn't sleep by default and slept when the flag was set. These 3740 * interfaces to the opposite. So the old interfaces now set the 3741 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep. 3742 * 3743 * ph.ph_flags: 3744 * Blocked data or unblocked data allocation 3745 * for ph.ph_data in ddi_prop_encode_alloc() 3746 */ 3747 if (flags & DDI_PROP_DONTSLEEP) { 3748 ourflags = flags; 3749 ph.ph_flags = DDI_PROP_DONTSLEEP; 3750 } else { 3751 ourflags = flags | DDI_PROP_CANSLEEP; 3752 ph.ph_flags = DDI_PROP_CANSLEEP; 3753 } 3754 3755 /* 3756 * Encode the data and store it in the property handle by 3757 * calling the prop_encode routine. 3758 */ 3759 if ((rval = (*prop_create)(&ph, data, nelements)) != 3760 DDI_PROP_SUCCESS) { 3761 if (rval == DDI_PROP_NO_MEMORY) 3762 cmn_err(CE_CONT, prop_no_mem_msg, name); 3763 if (ph.ph_size != 0) 3764 kmem_free(ph.ph_data, ph.ph_size); 3765 return (rval); 3766 } 3767 3768 /* 3769 * The old interfaces use a stacking approach to creating 3770 * properties. If we are being called from the old interfaces, 3771 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a 3772 * create without checking. 3773 */ 3774 if (flags & DDI_PROP_STACK_CREATE) { 3775 rval = ddi_prop_add(match_dev, dip, 3776 ourflags, name, ph.ph_data, ph.ph_size); 3777 } else { 3778 rval = ddi_prop_change(match_dev, dip, 3779 ourflags, name, ph.ph_data, ph.ph_size); 3780 } 3781 3782 /* 3783 * Free the encoded data allocated in the prop_encode routine. 3784 */ 3785 if (ph.ph_size != 0) 3786 kmem_free(ph.ph_data, ph.ph_size); 3787 3788 return (rval); 3789 } 3790 3791 3792 /* 3793 * ddi_prop_create: Define a managed property: 3794 * See above for details. 3795 */ 3796 3797 int 3798 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag, 3799 char *name, caddr_t value, int length) 3800 { 3801 if (!(flag & DDI_PROP_CANSLEEP)) { 3802 flag |= DDI_PROP_DONTSLEEP; 3803 #ifdef DDI_PROP_DEBUG 3804 if (length != 0) 3805 cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete," 3806 "use ddi_prop_update (prop = %s, node = %s%d)", 3807 name, ddi_driver_name(dip), ddi_get_instance(dip)); 3808 #endif /* DDI_PROP_DEBUG */ 3809 } 3810 flag &= ~DDI_PROP_SYSTEM_DEF; 3811 return (ddi_prop_update_common(dev, dip, 3812 (flag | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY), name, 3813 value, length, ddi_prop_fm_encode_bytes)); 3814 } 3815 3816 int 3817 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag, 3818 char *name, caddr_t value, int length) 3819 { 3820 if (!(flag & DDI_PROP_CANSLEEP)) 3821 flag |= DDI_PROP_DONTSLEEP; 3822 return (ddi_prop_update_common(dev, dip, 3823 (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | 3824 DDI_PROP_TYPE_ANY), 3825 name, value, length, ddi_prop_fm_encode_bytes)); 3826 } 3827 3828 int 3829 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag, 3830 char *name, caddr_t value, int length) 3831 { 3832 ASSERT((flag & DDI_PROP_TYPE_MASK) == 0); 3833 3834 /* 3835 * If dev_t is DDI_DEV_T_ANY or name's length is zero, 3836 * return error. 3837 */ 3838 if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0) 3839 return (DDI_PROP_INVAL_ARG); 3840 3841 if (!(flag & DDI_PROP_CANSLEEP)) 3842 flag |= DDI_PROP_DONTSLEEP; 3843 flag &= ~DDI_PROP_SYSTEM_DEF; 3844 if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0) 3845 return (DDI_PROP_NOT_FOUND); 3846 3847 return (ddi_prop_update_common(dev, dip, 3848 (flag | DDI_PROP_TYPE_BYTE), name, 3849 value, length, ddi_prop_fm_encode_bytes)); 3850 } 3851 3852 int 3853 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag, 3854 char *name, caddr_t value, int length) 3855 { 3856 ASSERT((flag & DDI_PROP_TYPE_MASK) == 0); 3857 3858 /* 3859 * If dev_t is DDI_DEV_T_ANY or name's length is zero, 3860 * return error. 3861 */ 3862 if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0) 3863 return (DDI_PROP_INVAL_ARG); 3864 3865 if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0) 3866 return (DDI_PROP_NOT_FOUND); 3867 3868 if (!(flag & DDI_PROP_CANSLEEP)) 3869 flag |= DDI_PROP_DONTSLEEP; 3870 return (ddi_prop_update_common(dev, dip, 3871 (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE), 3872 name, value, length, ddi_prop_fm_encode_bytes)); 3873 } 3874 3875 3876 /* 3877 * Common lookup routine used to lookup and decode a property. 3878 * Creates a property handle, searches for the raw encoded data, 3879 * fills in the handle, and calls the property decode functions 3880 * passed in. 3881 * 3882 * This routine is not static because ddi_bus_prop_op() which lives in 3883 * ddi_impl.c calls it. No driver should be calling this routine. 3884 */ 3885 int 3886 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip, 3887 uint_t flags, char *name, void *data, uint_t *nelements, 3888 int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements)) 3889 { 3890 int rval; 3891 uint_t ourflags; 3892 prop_handle_t ph; 3893 3894 if ((match_dev == DDI_DEV_T_NONE) || 3895 (name == NULL) || (strlen(name) == 0)) 3896 return (DDI_PROP_INVAL_ARG); 3897 3898 ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags : 3899 flags | DDI_PROP_CANSLEEP; 3900 3901 /* 3902 * Get the encoded data 3903 */ 3904 bzero(&ph, sizeof (prop_handle_t)); 3905 3906 if (flags & DDI_UNBND_DLPI2) { 3907 /* 3908 * For unbound dlpi style-2 devices, index into 3909 * the devnames' array and search the global 3910 * property list. 3911 */ 3912 ourflags &= ~DDI_UNBND_DLPI2; 3913 rval = i_ddi_prop_search_global(match_dev, 3914 ourflags, name, &ph.ph_data, &ph.ph_size); 3915 } else { 3916 rval = ddi_prop_search_common(match_dev, dip, 3917 PROP_LEN_AND_VAL_ALLOC, ourflags, name, 3918 &ph.ph_data, &ph.ph_size); 3919 3920 } 3921 3922 if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) { 3923 ASSERT(ph.ph_data == NULL); 3924 ASSERT(ph.ph_size == 0); 3925 return (rval); 3926 } 3927 3928 /* 3929 * If the encoded data came from a OBP or software 3930 * use the 1275 OBP decode/encode routines. 3931 */ 3932 ph.ph_cur_pos = ph.ph_data; 3933 ph.ph_save_pos = ph.ph_data; 3934 ph.ph_ops = &prop_1275_ops; 3935 ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0; 3936 3937 rval = (*prop_decoder)(&ph, data, nelements); 3938 3939 /* 3940 * Free the encoded data 3941 */ 3942 if (ph.ph_size != 0) 3943 kmem_free(ph.ph_data, ph.ph_size); 3944 3945 return (rval); 3946 } 3947 3948 /* 3949 * Lookup and return an array of composite properties. The driver must 3950 * provide the decode routine. 3951 */ 3952 int 3953 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip, 3954 uint_t flags, char *name, void *data, uint_t *nelements, 3955 int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements)) 3956 { 3957 return (ddi_prop_lookup_common(match_dev, dip, 3958 (flags | DDI_PROP_TYPE_COMPOSITE), name, 3959 data, nelements, prop_decoder)); 3960 } 3961 3962 /* 3963 * Return 1 if a property exists (no type checking done). 3964 * Return 0 if it does not exist. 3965 */ 3966 int 3967 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name) 3968 { 3969 int i; 3970 uint_t x = 0; 3971 3972 i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS, 3973 flags | DDI_PROP_TYPE_MASK, name, NULL, &x); 3974 return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275); 3975 } 3976 3977 3978 /* 3979 * Update an array of composite properties. The driver must 3980 * provide the encode routine. 3981 */ 3982 int 3983 ddi_prop_update(dev_t match_dev, dev_info_t *dip, 3984 char *name, void *data, uint_t nelements, 3985 int (*prop_create)(prop_handle_t *, void *data, uint_t nelements)) 3986 { 3987 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE, 3988 name, data, nelements, prop_create)); 3989 } 3990 3991 /* 3992 * Get a single integer or boolean property and return it. 3993 * If the property does not exists, or cannot be decoded, 3994 * then return the defvalue passed in. 3995 * 3996 * This routine always succeeds. 3997 */ 3998 int 3999 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags, 4000 char *name, int defvalue) 4001 { 4002 int data; 4003 uint_t nelements; 4004 int rval; 4005 4006 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4007 LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) { 4008 #ifdef DEBUG 4009 if (dip != NULL) { 4010 cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag" 4011 " 0x%x (prop = %s, node = %s%d)", flags, 4012 name, ddi_driver_name(dip), ddi_get_instance(dip)); 4013 } 4014 #endif /* DEBUG */ 4015 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4016 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 4017 } 4018 4019 if ((rval = ddi_prop_lookup_common(match_dev, dip, 4020 (flags | DDI_PROP_TYPE_INT), name, &data, &nelements, 4021 ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) { 4022 if (rval == DDI_PROP_END_OF_DATA) 4023 data = 1; 4024 else 4025 data = defvalue; 4026 } 4027 return (data); 4028 } 4029 4030 /* 4031 * Get a single 64 bit integer or boolean property and return it. 4032 * If the property does not exists, or cannot be decoded, 4033 * then return the defvalue passed in. 4034 * 4035 * This routine always succeeds. 4036 */ 4037 int64_t 4038 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags, 4039 char *name, int64_t defvalue) 4040 { 4041 int64_t data; 4042 uint_t nelements; 4043 int rval; 4044 4045 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4046 LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) { 4047 #ifdef DEBUG 4048 if (dip != NULL) { 4049 cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag" 4050 " 0x%x (prop = %s, node = %s%d)", flags, 4051 name, ddi_driver_name(dip), ddi_get_instance(dip)); 4052 } 4053 #endif /* DEBUG */ 4054 return (DDI_PROP_INVAL_ARG); 4055 } 4056 4057 if ((rval = ddi_prop_lookup_common(match_dev, dip, 4058 (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM), 4059 name, &data, &nelements, ddi_prop_fm_decode_int64)) 4060 != DDI_PROP_SUCCESS) { 4061 if (rval == DDI_PROP_END_OF_DATA) 4062 data = 1; 4063 else 4064 data = defvalue; 4065 } 4066 return (data); 4067 } 4068 4069 /* 4070 * Get an array of integer property 4071 */ 4072 int 4073 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags, 4074 char *name, int **data, uint_t *nelements) 4075 { 4076 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4077 LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) { 4078 #ifdef DEBUG 4079 if (dip != NULL) { 4080 cmn_err(CE_WARN, "ddi_prop_lookup_int_array: " 4081 "invalid flag 0x%x (prop = %s, node = %s%d)", 4082 flags, name, ddi_driver_name(dip), 4083 ddi_get_instance(dip)); 4084 } 4085 #endif /* DEBUG */ 4086 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4087 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 4088 } 4089 4090 return (ddi_prop_lookup_common(match_dev, dip, 4091 (flags | DDI_PROP_TYPE_INT), name, data, 4092 nelements, ddi_prop_fm_decode_ints)); 4093 } 4094 4095 /* 4096 * Get an array of 64 bit integer properties 4097 */ 4098 int 4099 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags, 4100 char *name, int64_t **data, uint_t *nelements) 4101 { 4102 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4103 LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) { 4104 #ifdef DEBUG 4105 if (dip != NULL) { 4106 cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: " 4107 "invalid flag 0x%x (prop = %s, node = %s%d)", 4108 flags, name, ddi_driver_name(dip), 4109 ddi_get_instance(dip)); 4110 } 4111 #endif /* DEBUG */ 4112 return (DDI_PROP_INVAL_ARG); 4113 } 4114 4115 return (ddi_prop_lookup_common(match_dev, dip, 4116 (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM), 4117 name, data, nelements, ddi_prop_fm_decode_int64_array)); 4118 } 4119 4120 /* 4121 * Update a single integer property. If the property exists on the drivers 4122 * property list it updates, else it creates it. 4123 */ 4124 int 4125 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip, 4126 char *name, int data) 4127 { 4128 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT, 4129 name, &data, 1, ddi_prop_fm_encode_ints)); 4130 } 4131 4132 /* 4133 * Update a single 64 bit integer property. 4134 * Update the driver property list if it exists, else create it. 4135 */ 4136 int 4137 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip, 4138 char *name, int64_t data) 4139 { 4140 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64, 4141 name, &data, 1, ddi_prop_fm_encode_int64)); 4142 } 4143 4144 int 4145 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip, 4146 char *name, int data) 4147 { 4148 return (ddi_prop_update_common(match_dev, dip, 4149 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT, 4150 name, &data, 1, ddi_prop_fm_encode_ints)); 4151 } 4152 4153 int 4154 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip, 4155 char *name, int64_t data) 4156 { 4157 return (ddi_prop_update_common(match_dev, dip, 4158 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64, 4159 name, &data, 1, ddi_prop_fm_encode_int64)); 4160 } 4161 4162 /* 4163 * Update an array of integer property. If the property exists on the drivers 4164 * property list it updates, else it creates it. 4165 */ 4166 int 4167 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip, 4168 char *name, int *data, uint_t nelements) 4169 { 4170 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT, 4171 name, data, nelements, ddi_prop_fm_encode_ints)); 4172 } 4173 4174 /* 4175 * Update an array of 64 bit integer properties. 4176 * Update the driver property list if it exists, else create it. 4177 */ 4178 int 4179 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip, 4180 char *name, int64_t *data, uint_t nelements) 4181 { 4182 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64, 4183 name, data, nelements, ddi_prop_fm_encode_int64)); 4184 } 4185 4186 int 4187 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip, 4188 char *name, int64_t *data, uint_t nelements) 4189 { 4190 return (ddi_prop_update_common(match_dev, dip, 4191 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64, 4192 name, data, nelements, ddi_prop_fm_encode_int64)); 4193 } 4194 4195 int 4196 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip, 4197 char *name, int *data, uint_t nelements) 4198 { 4199 return (ddi_prop_update_common(match_dev, dip, 4200 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT, 4201 name, data, nelements, ddi_prop_fm_encode_ints)); 4202 } 4203 4204 /* 4205 * Get a single string property. 4206 */ 4207 int 4208 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags, 4209 char *name, char **data) 4210 { 4211 uint_t x; 4212 4213 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4214 LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) { 4215 #ifdef DEBUG 4216 if (dip != NULL) { 4217 cmn_err(CE_WARN, "%s: invalid flag 0x%x " 4218 "(prop = %s, node = %s%d); invalid bits ignored", 4219 "ddi_prop_lookup_string", flags, name, 4220 ddi_driver_name(dip), ddi_get_instance(dip)); 4221 } 4222 #endif /* DEBUG */ 4223 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4224 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 4225 } 4226 4227 return (ddi_prop_lookup_common(match_dev, dip, 4228 (flags | DDI_PROP_TYPE_STRING), name, data, 4229 &x, ddi_prop_fm_decode_string)); 4230 } 4231 4232 /* 4233 * Get an array of strings property. 4234 */ 4235 int 4236 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags, 4237 char *name, char ***data, uint_t *nelements) 4238 { 4239 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4240 LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) { 4241 #ifdef DEBUG 4242 if (dip != NULL) { 4243 cmn_err(CE_WARN, "ddi_prop_lookup_string_array: " 4244 "invalid flag 0x%x (prop = %s, node = %s%d)", 4245 flags, name, ddi_driver_name(dip), 4246 ddi_get_instance(dip)); 4247 } 4248 #endif /* DEBUG */ 4249 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4250 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 4251 } 4252 4253 return (ddi_prop_lookup_common(match_dev, dip, 4254 (flags | DDI_PROP_TYPE_STRING), name, data, 4255 nelements, ddi_prop_fm_decode_strings)); 4256 } 4257 4258 /* 4259 * Update a single string property. 4260 */ 4261 int 4262 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip, 4263 char *name, char *data) 4264 { 4265 return (ddi_prop_update_common(match_dev, dip, 4266 DDI_PROP_TYPE_STRING, name, &data, 1, 4267 ddi_prop_fm_encode_string)); 4268 } 4269 4270 int 4271 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip, 4272 char *name, char *data) 4273 { 4274 return (ddi_prop_update_common(match_dev, dip, 4275 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING, 4276 name, &data, 1, ddi_prop_fm_encode_string)); 4277 } 4278 4279 4280 /* 4281 * Update an array of strings property. 4282 */ 4283 int 4284 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip, 4285 char *name, char **data, uint_t nelements) 4286 { 4287 return (ddi_prop_update_common(match_dev, dip, 4288 DDI_PROP_TYPE_STRING, name, data, nelements, 4289 ddi_prop_fm_encode_strings)); 4290 } 4291 4292 int 4293 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip, 4294 char *name, char **data, uint_t nelements) 4295 { 4296 return (ddi_prop_update_common(match_dev, dip, 4297 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING, 4298 name, data, nelements, 4299 ddi_prop_fm_encode_strings)); 4300 } 4301 4302 4303 /* 4304 * Get an array of bytes property. 4305 */ 4306 int 4307 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags, 4308 char *name, uchar_t **data, uint_t *nelements) 4309 { 4310 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4311 LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) { 4312 #ifdef DEBUG 4313 if (dip != NULL) { 4314 cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: " 4315 " invalid flag 0x%x (prop = %s, node = %s%d)", 4316 flags, name, ddi_driver_name(dip), 4317 ddi_get_instance(dip)); 4318 } 4319 #endif /* DEBUG */ 4320 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4321 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 4322 } 4323 4324 return (ddi_prop_lookup_common(match_dev, dip, 4325 (flags | DDI_PROP_TYPE_BYTE), name, data, 4326 nelements, ddi_prop_fm_decode_bytes)); 4327 } 4328 4329 /* 4330 * Update an array of bytes property. 4331 */ 4332 int 4333 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip, 4334 char *name, uchar_t *data, uint_t nelements) 4335 { 4336 if (nelements == 0) 4337 return (DDI_PROP_INVAL_ARG); 4338 4339 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE, 4340 name, data, nelements, ddi_prop_fm_encode_bytes)); 4341 } 4342 4343 4344 int 4345 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip, 4346 char *name, uchar_t *data, uint_t nelements) 4347 { 4348 if (nelements == 0) 4349 return (DDI_PROP_INVAL_ARG); 4350 4351 return (ddi_prop_update_common(match_dev, dip, 4352 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE, 4353 name, data, nelements, ddi_prop_fm_encode_bytes)); 4354 } 4355 4356 4357 /* 4358 * ddi_prop_remove_common: Undefine a managed property: 4359 * Input dev_t must match dev_t when defined. 4360 * Returns DDI_PROP_NOT_FOUND, possibly. 4361 * DDI_PROP_INVAL_ARG is also possible if dev is 4362 * DDI_DEV_T_ANY or incoming name is the NULL string. 4363 */ 4364 int 4365 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag) 4366 { 4367 ddi_prop_t **list_head = &(DEVI(dip)->devi_drv_prop_ptr); 4368 ddi_prop_t *propp; 4369 ddi_prop_t *lastpropp = NULL; 4370 4371 if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) || 4372 (strlen(name) == 0)) { 4373 return (DDI_PROP_INVAL_ARG); 4374 } 4375 4376 if (flag & DDI_PROP_SYSTEM_DEF) 4377 list_head = &(DEVI(dip)->devi_sys_prop_ptr); 4378 else if (flag & DDI_PROP_HW_DEF) 4379 list_head = &(DEVI(dip)->devi_hw_prop_ptr); 4380 4381 mutex_enter(&(DEVI(dip)->devi_lock)); 4382 4383 for (propp = *list_head; propp != NULL; propp = propp->prop_next) { 4384 if (DDI_STRSAME(propp->prop_name, name) && 4385 (dev == propp->prop_dev)) { 4386 /* 4387 * Unlink this propp allowing for it to 4388 * be first in the list: 4389 */ 4390 4391 if (lastpropp == NULL) 4392 *list_head = propp->prop_next; 4393 else 4394 lastpropp->prop_next = propp->prop_next; 4395 4396 mutex_exit(&(DEVI(dip)->devi_lock)); 4397 4398 /* 4399 * Free memory and return... 4400 */ 4401 kmem_free(propp->prop_name, 4402 strlen(propp->prop_name) + 1); 4403 if (propp->prop_len != 0) 4404 kmem_free(propp->prop_val, propp->prop_len); 4405 kmem_free(propp, sizeof (ddi_prop_t)); 4406 return (DDI_PROP_SUCCESS); 4407 } 4408 lastpropp = propp; 4409 } 4410 mutex_exit(&(DEVI(dip)->devi_lock)); 4411 return (DDI_PROP_NOT_FOUND); 4412 } 4413 4414 int 4415 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name) 4416 { 4417 return (ddi_prop_remove_common(dev, dip, name, 0)); 4418 } 4419 4420 int 4421 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name) 4422 { 4423 return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF)); 4424 } 4425 4426 /* 4427 * e_ddi_prop_list_delete: remove a list of properties 4428 * Note that the caller needs to provide the required protection 4429 * (eg. devi_lock if these properties are still attached to a devi) 4430 */ 4431 void 4432 e_ddi_prop_list_delete(ddi_prop_t *props) 4433 { 4434 i_ddi_prop_list_delete(props); 4435 } 4436 4437 /* 4438 * ddi_prop_remove_all_common: 4439 * Used before unloading a driver to remove 4440 * all properties. (undefines all dev_t's props.) 4441 * Also removes `explicitly undefined' props. 4442 * No errors possible. 4443 */ 4444 void 4445 ddi_prop_remove_all_common(dev_info_t *dip, int flag) 4446 { 4447 ddi_prop_t **list_head; 4448 4449 mutex_enter(&(DEVI(dip)->devi_lock)); 4450 if (flag & DDI_PROP_SYSTEM_DEF) { 4451 list_head = &(DEVI(dip)->devi_sys_prop_ptr); 4452 } else if (flag & DDI_PROP_HW_DEF) { 4453 list_head = &(DEVI(dip)->devi_hw_prop_ptr); 4454 } else { 4455 list_head = &(DEVI(dip)->devi_drv_prop_ptr); 4456 } 4457 i_ddi_prop_list_delete(*list_head); 4458 *list_head = NULL; 4459 mutex_exit(&(DEVI(dip)->devi_lock)); 4460 } 4461 4462 4463 /* 4464 * ddi_prop_remove_all: Remove all driver prop definitions. 4465 */ 4466 4467 void 4468 ddi_prop_remove_all(dev_info_t *dip) 4469 { 4470 ddi_prop_remove_all_common(dip, 0); 4471 } 4472 4473 /* 4474 * e_ddi_prop_remove_all: Remove all system prop definitions. 4475 */ 4476 4477 void 4478 e_ddi_prop_remove_all(dev_info_t *dip) 4479 { 4480 ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF); 4481 } 4482 4483 4484 /* 4485 * ddi_prop_undefine: Explicitly undefine a property. Property 4486 * searches which match this property return 4487 * the error code DDI_PROP_UNDEFINED. 4488 * 4489 * Use ddi_prop_remove to negate effect of 4490 * ddi_prop_undefine 4491 * 4492 * See above for error returns. 4493 */ 4494 4495 int 4496 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name) 4497 { 4498 if (!(flag & DDI_PROP_CANSLEEP)) 4499 flag |= DDI_PROP_DONTSLEEP; 4500 return (ddi_prop_update_common(dev, dip, 4501 (flag | DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT | 4502 DDI_PROP_TYPE_ANY), name, NULL, 0, ddi_prop_fm_encode_bytes)); 4503 } 4504 4505 int 4506 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name) 4507 { 4508 if (!(flag & DDI_PROP_CANSLEEP)) 4509 flag |= DDI_PROP_DONTSLEEP; 4510 return (ddi_prop_update_common(dev, dip, 4511 (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | 4512 DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY), 4513 name, NULL, 0, ddi_prop_fm_encode_bytes)); 4514 } 4515 4516 /* 4517 * Code to search hardware layer (PROM), if it exists, on behalf of child. 4518 * 4519 * if input dip != child_dip, then call is on behalf of child 4520 * to search PROM, do it via ddi_prop_search_common() and ascend only 4521 * if allowed. 4522 * 4523 * if input dip == ch_dip (child_dip), call is on behalf of root driver, 4524 * to search for PROM defined props only. 4525 * 4526 * Note that the PROM search is done only if the requested dev 4527 * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties 4528 * have no associated dev, thus are automatically associated with 4529 * DDI_DEV_T_NONE. 4530 * 4531 * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer. 4532 * 4533 * Returns DDI_PROP_FOUND_1275 if found to indicate to framework 4534 * that the property resides in the prom. 4535 */ 4536 int 4537 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip, 4538 ddi_prop_op_t prop_op, int mod_flags, 4539 char *name, caddr_t valuep, int *lengthp) 4540 { 4541 int len; 4542 caddr_t buffer; 4543 4544 /* 4545 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then 4546 * look in caller's PROM if it's a self identifying device... 4547 * 4548 * Note that this is very similar to ddi_prop_op, but we 4549 * search the PROM instead of the s/w defined properties, 4550 * and we are called on by the parent driver to do this for 4551 * the child. 4552 */ 4553 4554 if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) && 4555 ndi_dev_is_prom_node(ch_dip) && 4556 ((mod_flags & DDI_PROP_NOTPROM) == 0)) { 4557 len = prom_getproplen((pnode_t)DEVI(ch_dip)->devi_nodeid, name); 4558 if (len == -1) { 4559 return (DDI_PROP_NOT_FOUND); 4560 } 4561 4562 /* 4563 * If exists only request, we're done 4564 */ 4565 if (prop_op == PROP_EXISTS) { 4566 return (DDI_PROP_FOUND_1275); 4567 } 4568 4569 /* 4570 * If length only request or prop length == 0, get out 4571 */ 4572 if ((prop_op == PROP_LEN) || (len == 0)) { 4573 *lengthp = len; 4574 return (DDI_PROP_FOUND_1275); 4575 } 4576 4577 /* 4578 * Allocate buffer if required... (either way `buffer' 4579 * is receiving address). 4580 */ 4581 4582 switch (prop_op) { 4583 4584 case PROP_LEN_AND_VAL_ALLOC: 4585 4586 buffer = kmem_alloc((size_t)len, 4587 mod_flags & DDI_PROP_CANSLEEP ? 4588 KM_SLEEP : KM_NOSLEEP); 4589 if (buffer == NULL) { 4590 return (DDI_PROP_NO_MEMORY); 4591 } 4592 *(caddr_t *)valuep = buffer; 4593 break; 4594 4595 case PROP_LEN_AND_VAL_BUF: 4596 4597 if (len > (*lengthp)) { 4598 *lengthp = len; 4599 return (DDI_PROP_BUF_TOO_SMALL); 4600 } 4601 4602 buffer = valuep; 4603 break; 4604 4605 default: 4606 break; 4607 } 4608 4609 /* 4610 * Call the PROM function to do the copy. 4611 */ 4612 (void) prom_getprop((pnode_t)DEVI(ch_dip)->devi_nodeid, 4613 name, buffer); 4614 4615 *lengthp = len; /* return the actual length to the caller */ 4616 (void) impl_fix_props(dip, ch_dip, name, len, buffer); 4617 return (DDI_PROP_FOUND_1275); 4618 } 4619 4620 return (DDI_PROP_NOT_FOUND); 4621 } 4622 4623 /* 4624 * The ddi_bus_prop_op default bus nexus prop op function. 4625 * 4626 * Code to search hardware layer (PROM), if it exists, 4627 * on behalf of child, then, if appropriate, ascend and check 4628 * my own software defined properties... 4629 */ 4630 int 4631 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip, 4632 ddi_prop_op_t prop_op, int mod_flags, 4633 char *name, caddr_t valuep, int *lengthp) 4634 { 4635 int error; 4636 4637 error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags, 4638 name, valuep, lengthp); 4639 4640 if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 || 4641 error == DDI_PROP_BUF_TOO_SMALL) 4642 return (error); 4643 4644 if (error == DDI_PROP_NO_MEMORY) { 4645 cmn_err(CE_CONT, prop_no_mem_msg, name); 4646 return (DDI_PROP_NO_MEMORY); 4647 } 4648 4649 /* 4650 * Check the 'options' node as a last resort 4651 */ 4652 if ((mod_flags & DDI_PROP_DONTPASS) != 0) 4653 return (DDI_PROP_NOT_FOUND); 4654 4655 if (ch_dip == ddi_root_node()) { 4656 /* 4657 * As a last resort, when we've reached 4658 * the top and still haven't found the 4659 * property, see if the desired property 4660 * is attached to the options node. 4661 * 4662 * The options dip is attached right after boot. 4663 */ 4664 ASSERT(options_dip != NULL); 4665 /* 4666 * Force the "don't pass" flag to *just* see 4667 * what the options node has to offer. 4668 */ 4669 return (ddi_prop_search_common(dev, options_dip, prop_op, 4670 mod_flags|DDI_PROP_DONTPASS, name, valuep, 4671 (uint_t *)lengthp)); 4672 } 4673 4674 /* 4675 * Otherwise, continue search with parent's s/w defined properties... 4676 * NOTE: Using `dip' in following call increments the level. 4677 */ 4678 4679 return (ddi_prop_search_common(dev, dip, prop_op, mod_flags, 4680 name, valuep, (uint_t *)lengthp)); 4681 } 4682 4683 /* 4684 * External property functions used by other parts of the kernel... 4685 */ 4686 4687 /* 4688 * e_ddi_getlongprop: See comments for ddi_get_longprop. 4689 */ 4690 4691 int 4692 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags, 4693 caddr_t valuep, int *lengthp) 4694 { 4695 _NOTE(ARGUNUSED(type)) 4696 dev_info_t *devi; 4697 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC; 4698 int error; 4699 4700 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4701 return (DDI_PROP_NOT_FOUND); 4702 4703 error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp); 4704 ddi_release_devi(devi); 4705 return (error); 4706 } 4707 4708 /* 4709 * e_ddi_getlongprop_buf: See comments for ddi_getlongprop_buf. 4710 */ 4711 4712 int 4713 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags, 4714 caddr_t valuep, int *lengthp) 4715 { 4716 _NOTE(ARGUNUSED(type)) 4717 dev_info_t *devi; 4718 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF; 4719 int error; 4720 4721 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4722 return (DDI_PROP_NOT_FOUND); 4723 4724 error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp); 4725 ddi_release_devi(devi); 4726 return (error); 4727 } 4728 4729 /* 4730 * e_ddi_getprop: See comments for ddi_getprop. 4731 */ 4732 int 4733 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue) 4734 { 4735 _NOTE(ARGUNUSED(type)) 4736 dev_info_t *devi; 4737 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF; 4738 int propvalue = defvalue; 4739 int proplength = sizeof (int); 4740 int error; 4741 4742 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4743 return (defvalue); 4744 4745 error = cdev_prop_op(dev, devi, prop_op, 4746 flags, name, (caddr_t)&propvalue, &proplength); 4747 ddi_release_devi(devi); 4748 4749 if ((error == DDI_PROP_SUCCESS) && (proplength == 0)) 4750 propvalue = 1; 4751 4752 return (propvalue); 4753 } 4754 4755 /* 4756 * e_ddi_getprop_int64: 4757 * 4758 * This is a typed interfaces, but predates typed properties. With the 4759 * introduction of typed properties the framework tries to ensure 4760 * consistent use of typed interfaces. This is why TYPE_INT64 is not 4761 * part of TYPE_ANY. E_ddi_getprop_int64 is a special case where a 4762 * typed interface invokes legacy (non-typed) interfaces: 4763 * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)). In this case the 4764 * fact that TYPE_INT64 is not part of TYPE_ANY matters. To support 4765 * this type of lookup as a single operation we invoke the legacy 4766 * non-typed interfaces with the special CONSUMER_TYPED bit set. The 4767 * framework ddi_prop_op(9F) implementation is expected to check for 4768 * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY 4769 * (currently TYPE_INT64). 4770 */ 4771 int64_t 4772 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name, 4773 int flags, int64_t defvalue) 4774 { 4775 _NOTE(ARGUNUSED(type)) 4776 dev_info_t *devi; 4777 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF; 4778 int64_t propvalue = defvalue; 4779 int proplength = sizeof (propvalue); 4780 int error; 4781 4782 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4783 return (defvalue); 4784 4785 error = cdev_prop_op(dev, devi, prop_op, flags | 4786 DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength); 4787 ddi_release_devi(devi); 4788 4789 if ((error == DDI_PROP_SUCCESS) && (proplength == 0)) 4790 propvalue = 1; 4791 4792 return (propvalue); 4793 } 4794 4795 /* 4796 * e_ddi_getproplen: See comments for ddi_getproplen. 4797 */ 4798 int 4799 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp) 4800 { 4801 _NOTE(ARGUNUSED(type)) 4802 dev_info_t *devi; 4803 ddi_prop_op_t prop_op = PROP_LEN; 4804 int error; 4805 4806 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4807 return (DDI_PROP_NOT_FOUND); 4808 4809 error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp); 4810 ddi_release_devi(devi); 4811 return (error); 4812 } 4813 4814 /* 4815 * Routines to get at elements of the dev_info structure 4816 */ 4817 4818 /* 4819 * ddi_binding_name: Return the driver binding name of the devinfo node 4820 * This is the name the OS used to bind the node to a driver. 4821 */ 4822 char * 4823 ddi_binding_name(dev_info_t *dip) 4824 { 4825 return (DEVI(dip)->devi_binding_name); 4826 } 4827 4828 /* 4829 * ddi_driver_major: Return the major number of the driver that 4830 * the supplied devinfo is bound to (-1 if none) 4831 */ 4832 major_t 4833 ddi_driver_major(dev_info_t *devi) 4834 { 4835 return (DEVI(devi)->devi_major); 4836 } 4837 4838 /* 4839 * ddi_driver_name: Return the normalized driver name. this is the 4840 * actual driver name 4841 */ 4842 const char * 4843 ddi_driver_name(dev_info_t *devi) 4844 { 4845 major_t major; 4846 4847 if ((major = ddi_driver_major(devi)) != (major_t)-1) 4848 return (ddi_major_to_name(major)); 4849 4850 return (ddi_node_name(devi)); 4851 } 4852 4853 /* 4854 * i_ddi_set_binding_name: Set binding name. 4855 * 4856 * Set the binding name to the given name. 4857 * This routine is for use by the ddi implementation, not by drivers. 4858 */ 4859 void 4860 i_ddi_set_binding_name(dev_info_t *dip, char *name) 4861 { 4862 DEVI(dip)->devi_binding_name = name; 4863 4864 } 4865 4866 /* 4867 * ddi_get_name: A synonym of ddi_binding_name() ... returns a name 4868 * the implementation has used to bind the node to a driver. 4869 */ 4870 char * 4871 ddi_get_name(dev_info_t *dip) 4872 { 4873 return (DEVI(dip)->devi_binding_name); 4874 } 4875 4876 /* 4877 * ddi_node_name: Return the name property of the devinfo node 4878 * This may differ from ddi_binding_name if the node name 4879 * does not define a binding to a driver (i.e. generic names). 4880 */ 4881 char * 4882 ddi_node_name(dev_info_t *dip) 4883 { 4884 return (DEVI(dip)->devi_node_name); 4885 } 4886 4887 4888 /* 4889 * ddi_get_nodeid: Get nodeid stored in dev_info structure. 4890 */ 4891 int 4892 ddi_get_nodeid(dev_info_t *dip) 4893 { 4894 return (DEVI(dip)->devi_nodeid); 4895 } 4896 4897 int 4898 ddi_get_instance(dev_info_t *dip) 4899 { 4900 return (DEVI(dip)->devi_instance); 4901 } 4902 4903 struct dev_ops * 4904 ddi_get_driver(dev_info_t *dip) 4905 { 4906 return (DEVI(dip)->devi_ops); 4907 } 4908 4909 void 4910 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo) 4911 { 4912 DEVI(dip)->devi_ops = devo; 4913 } 4914 4915 /* 4916 * ddi_set_driver_private/ddi_get_driver_private: 4917 * Get/set device driver private data in devinfo. 4918 */ 4919 void 4920 ddi_set_driver_private(dev_info_t *dip, void *data) 4921 { 4922 DEVI(dip)->devi_driver_data = data; 4923 } 4924 4925 void * 4926 ddi_get_driver_private(dev_info_t *dip) 4927 { 4928 return (DEVI(dip)->devi_driver_data); 4929 } 4930 4931 /* 4932 * ddi_get_parent, ddi_get_child, ddi_get_next_sibling 4933 */ 4934 4935 dev_info_t * 4936 ddi_get_parent(dev_info_t *dip) 4937 { 4938 return ((dev_info_t *)DEVI(dip)->devi_parent); 4939 } 4940 4941 dev_info_t * 4942 ddi_get_child(dev_info_t *dip) 4943 { 4944 return ((dev_info_t *)DEVI(dip)->devi_child); 4945 } 4946 4947 dev_info_t * 4948 ddi_get_next_sibling(dev_info_t *dip) 4949 { 4950 return ((dev_info_t *)DEVI(dip)->devi_sibling); 4951 } 4952 4953 dev_info_t * 4954 ddi_get_next(dev_info_t *dip) 4955 { 4956 return ((dev_info_t *)DEVI(dip)->devi_next); 4957 } 4958 4959 void 4960 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip) 4961 { 4962 DEVI(dip)->devi_next = DEVI(nextdip); 4963 } 4964 4965 /* 4966 * ddi_root_node: Return root node of devinfo tree 4967 */ 4968 4969 dev_info_t * 4970 ddi_root_node(void) 4971 { 4972 extern dev_info_t *top_devinfo; 4973 4974 return (top_devinfo); 4975 } 4976 4977 /* 4978 * Miscellaneous functions: 4979 */ 4980 4981 /* 4982 * Implementation specific hooks 4983 */ 4984 4985 void 4986 ddi_report_dev(dev_info_t *d) 4987 { 4988 char *b; 4989 4990 (void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0); 4991 4992 /* 4993 * If this devinfo node has cb_ops, it's implicitly accessible from 4994 * userland, so we print its full name together with the instance 4995 * number 'abbreviation' that the driver may use internally. 4996 */ 4997 if (DEVI(d)->devi_ops->devo_cb_ops != (struct cb_ops *)0 && 4998 (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) { 4999 cmn_err(CE_CONT, "?%s%d is %s\n", 5000 ddi_driver_name(d), ddi_get_instance(d), 5001 ddi_pathname(d, b)); 5002 kmem_free(b, MAXPATHLEN); 5003 } 5004 } 5005 5006 /* 5007 * ddi_ctlops() is described in the assembler not to buy a new register 5008 * window when it's called and can reduce cost in climbing the device tree 5009 * without using the tail call optimization. 5010 */ 5011 int 5012 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result) 5013 { 5014 int ret; 5015 5016 ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE, 5017 (void *)&rnumber, (void *)result); 5018 5019 return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE); 5020 } 5021 5022 int 5023 ddi_dev_nregs(dev_info_t *dev, int *result) 5024 { 5025 return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result)); 5026 } 5027 5028 int 5029 ddi_dev_is_sid(dev_info_t *d) 5030 { 5031 return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0)); 5032 } 5033 5034 int 5035 ddi_slaveonly(dev_info_t *d) 5036 { 5037 return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0)); 5038 } 5039 5040 int 5041 ddi_dev_affinity(dev_info_t *a, dev_info_t *b) 5042 { 5043 return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0)); 5044 } 5045 5046 int 5047 ddi_streams_driver(dev_info_t *dip) 5048 { 5049 if (i_ddi_devi_attached(dip) && 5050 (DEVI(dip)->devi_ops->devo_cb_ops != NULL) && 5051 (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL)) 5052 return (DDI_SUCCESS); 5053 return (DDI_FAILURE); 5054 } 5055 5056 /* 5057 * callback free list 5058 */ 5059 5060 static int ncallbacks; 5061 static int nc_low = 170; 5062 static int nc_med = 512; 5063 static int nc_high = 2048; 5064 static struct ddi_callback *callbackq; 5065 static struct ddi_callback *callbackqfree; 5066 5067 /* 5068 * set/run callback lists 5069 */ 5070 struct cbstats { 5071 kstat_named_t cb_asked; 5072 kstat_named_t cb_new; 5073 kstat_named_t cb_run; 5074 kstat_named_t cb_delete; 5075 kstat_named_t cb_maxreq; 5076 kstat_named_t cb_maxlist; 5077 kstat_named_t cb_alloc; 5078 kstat_named_t cb_runouts; 5079 kstat_named_t cb_L2; 5080 kstat_named_t cb_grow; 5081 } cbstats = { 5082 {"asked", KSTAT_DATA_UINT32}, 5083 {"new", KSTAT_DATA_UINT32}, 5084 {"run", KSTAT_DATA_UINT32}, 5085 {"delete", KSTAT_DATA_UINT32}, 5086 {"maxreq", KSTAT_DATA_UINT32}, 5087 {"maxlist", KSTAT_DATA_UINT32}, 5088 {"alloc", KSTAT_DATA_UINT32}, 5089 {"runouts", KSTAT_DATA_UINT32}, 5090 {"L2", KSTAT_DATA_UINT32}, 5091 {"grow", KSTAT_DATA_UINT32}, 5092 }; 5093 5094 #define nc_asked cb_asked.value.ui32 5095 #define nc_new cb_new.value.ui32 5096 #define nc_run cb_run.value.ui32 5097 #define nc_delete cb_delete.value.ui32 5098 #define nc_maxreq cb_maxreq.value.ui32 5099 #define nc_maxlist cb_maxlist.value.ui32 5100 #define nc_alloc cb_alloc.value.ui32 5101 #define nc_runouts cb_runouts.value.ui32 5102 #define nc_L2 cb_L2.value.ui32 5103 #define nc_grow cb_grow.value.ui32 5104 5105 static kmutex_t ddi_callback_mutex; 5106 5107 /* 5108 * callbacks are handled using a L1/L2 cache. The L1 cache 5109 * comes out of kmem_cache_alloc and can expand/shrink dynamically. If 5110 * we can't get callbacks from the L1 cache [because pageout is doing 5111 * I/O at the time freemem is 0], we allocate callbacks out of the 5112 * L2 cache. The L2 cache is static and depends on the memory size. 5113 * [We might also count the number of devices at probe time and 5114 * allocate one structure per device and adjust for deferred attach] 5115 */ 5116 void 5117 impl_ddi_callback_init(void) 5118 { 5119 int i; 5120 uint_t physmegs; 5121 kstat_t *ksp; 5122 5123 physmegs = physmem >> (20 - PAGESHIFT); 5124 if (physmegs < 48) { 5125 ncallbacks = nc_low; 5126 } else if (physmegs < 128) { 5127 ncallbacks = nc_med; 5128 } else { 5129 ncallbacks = nc_high; 5130 } 5131 5132 /* 5133 * init free list 5134 */ 5135 callbackq = kmem_zalloc( 5136 ncallbacks * sizeof (struct ddi_callback), KM_SLEEP); 5137 for (i = 0; i < ncallbacks-1; i++) 5138 callbackq[i].c_nfree = &callbackq[i+1]; 5139 callbackqfree = callbackq; 5140 5141 /* init kstats */ 5142 if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED, 5143 sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) { 5144 ksp->ks_data = (void *) &cbstats; 5145 kstat_install(ksp); 5146 } 5147 5148 } 5149 5150 static void 5151 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid, 5152 int count) 5153 { 5154 struct ddi_callback *list, *marker, *new; 5155 size_t size = sizeof (struct ddi_callback); 5156 5157 list = marker = (struct ddi_callback *)*listid; 5158 while (list != NULL) { 5159 if (list->c_call == funcp && list->c_arg == arg) { 5160 list->c_count += count; 5161 return; 5162 } 5163 marker = list; 5164 list = list->c_nlist; 5165 } 5166 new = kmem_alloc(size, KM_NOSLEEP); 5167 if (new == NULL) { 5168 new = callbackqfree; 5169 if (new == NULL) { 5170 new = kmem_alloc_tryhard(sizeof (struct ddi_callback), 5171 &size, KM_NOSLEEP | KM_PANIC); 5172 cbstats.nc_grow++; 5173 } else { 5174 callbackqfree = new->c_nfree; 5175 cbstats.nc_L2++; 5176 } 5177 } 5178 if (marker != NULL) { 5179 marker->c_nlist = new; 5180 } else { 5181 *listid = (uintptr_t)new; 5182 } 5183 new->c_size = size; 5184 new->c_nlist = NULL; 5185 new->c_call = funcp; 5186 new->c_arg = arg; 5187 new->c_count = count; 5188 cbstats.nc_new++; 5189 cbstats.nc_alloc++; 5190 if (cbstats.nc_alloc > cbstats.nc_maxlist) 5191 cbstats.nc_maxlist = cbstats.nc_alloc; 5192 } 5193 5194 void 5195 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid) 5196 { 5197 mutex_enter(&ddi_callback_mutex); 5198 cbstats.nc_asked++; 5199 if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq) 5200 cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run); 5201 (void) callback_insert(funcp, arg, listid, 1); 5202 mutex_exit(&ddi_callback_mutex); 5203 } 5204 5205 static void 5206 real_callback_run(void *Queue) 5207 { 5208 int (*funcp)(caddr_t); 5209 caddr_t arg; 5210 int count, rval; 5211 uintptr_t *listid; 5212 struct ddi_callback *list, *marker; 5213 int check_pending = 1; 5214 int pending = 0; 5215 5216 do { 5217 mutex_enter(&ddi_callback_mutex); 5218 listid = Queue; 5219 list = (struct ddi_callback *)*listid; 5220 if (list == NULL) { 5221 mutex_exit(&ddi_callback_mutex); 5222 return; 5223 } 5224 if (check_pending) { 5225 marker = list; 5226 while (marker != NULL) { 5227 pending += marker->c_count; 5228 marker = marker->c_nlist; 5229 } 5230 check_pending = 0; 5231 } 5232 ASSERT(pending > 0); 5233 ASSERT(list->c_count > 0); 5234 funcp = list->c_call; 5235 arg = list->c_arg; 5236 count = list->c_count; 5237 *(uintptr_t *)Queue = (uintptr_t)list->c_nlist; 5238 if (list >= &callbackq[0] && 5239 list <= &callbackq[ncallbacks-1]) { 5240 list->c_nfree = callbackqfree; 5241 callbackqfree = list; 5242 } else 5243 kmem_free(list, list->c_size); 5244 5245 cbstats.nc_delete++; 5246 cbstats.nc_alloc--; 5247 mutex_exit(&ddi_callback_mutex); 5248 5249 do { 5250 if ((rval = (*funcp)(arg)) == 0) { 5251 pending -= count; 5252 mutex_enter(&ddi_callback_mutex); 5253 (void) callback_insert(funcp, arg, listid, 5254 count); 5255 cbstats.nc_runouts++; 5256 } else { 5257 pending--; 5258 mutex_enter(&ddi_callback_mutex); 5259 cbstats.nc_run++; 5260 } 5261 mutex_exit(&ddi_callback_mutex); 5262 } while (rval != 0 && (--count > 0)); 5263 } while (pending > 0); 5264 } 5265 5266 void 5267 ddi_run_callback(uintptr_t *listid) 5268 { 5269 softcall(real_callback_run, listid); 5270 } 5271 5272 dev_info_t * 5273 nodevinfo(dev_t dev, int otyp) 5274 { 5275 _NOTE(ARGUNUSED(dev, otyp)) 5276 return ((dev_info_t *)0); 5277 } 5278 5279 /* 5280 * A driver should support its own getinfo(9E) entry point. This function 5281 * is provided as a convenience for ON drivers that don't expect their 5282 * getinfo(9E) entry point to be called. A driver that uses this must not 5283 * call ddi_create_minor_node. 5284 */ 5285 int 5286 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 5287 { 5288 _NOTE(ARGUNUSED(dip, infocmd, arg, result)) 5289 return (DDI_FAILURE); 5290 } 5291 5292 /* 5293 * A driver should support its own getinfo(9E) entry point. This function 5294 * is provided as a convenience for ON drivers that where the minor number 5295 * is the instance. Drivers that do not have 1:1 mapping must implement 5296 * their own getinfo(9E) function. 5297 */ 5298 int 5299 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd, 5300 void *arg, void **result) 5301 { 5302 _NOTE(ARGUNUSED(dip)) 5303 int instance; 5304 5305 if (infocmd != DDI_INFO_DEVT2INSTANCE) 5306 return (DDI_FAILURE); 5307 5308 instance = getminor((dev_t)(uintptr_t)arg); 5309 *result = (void *)(uintptr_t)instance; 5310 return (DDI_SUCCESS); 5311 } 5312 5313 int 5314 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd) 5315 { 5316 _NOTE(ARGUNUSED(devi, cmd)) 5317 return (DDI_FAILURE); 5318 } 5319 5320 int 5321 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip, 5322 struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep) 5323 { 5324 _NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep)) 5325 return (DDI_DMA_NOMAPPING); 5326 } 5327 5328 int 5329 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr, 5330 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 5331 { 5332 _NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep)) 5333 return (DDI_DMA_BADATTR); 5334 } 5335 5336 int 5337 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, 5338 ddi_dma_handle_t handle) 5339 { 5340 _NOTE(ARGUNUSED(dip, rdip, handle)) 5341 return (DDI_FAILURE); 5342 } 5343 5344 int 5345 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 5346 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 5347 ddi_dma_cookie_t *cp, uint_t *ccountp) 5348 { 5349 _NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp)) 5350 return (DDI_DMA_NOMAPPING); 5351 } 5352 5353 int 5354 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 5355 ddi_dma_handle_t handle) 5356 { 5357 _NOTE(ARGUNUSED(dip, rdip, handle)) 5358 return (DDI_FAILURE); 5359 } 5360 5361 int 5362 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip, 5363 ddi_dma_handle_t handle, off_t off, size_t len, 5364 uint_t cache_flags) 5365 { 5366 _NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags)) 5367 return (DDI_FAILURE); 5368 } 5369 5370 int 5371 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip, 5372 ddi_dma_handle_t handle, uint_t win, off_t *offp, 5373 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 5374 { 5375 _NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp)) 5376 return (DDI_FAILURE); 5377 } 5378 5379 int 5380 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip, 5381 ddi_dma_handle_t handle, enum ddi_dma_ctlops request, 5382 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags) 5383 { 5384 _NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags)) 5385 return (DDI_FAILURE); 5386 } 5387 5388 void 5389 ddivoid(void) 5390 {} 5391 5392 int 5393 nochpoll(dev_t dev, short events, int anyyet, short *reventsp, 5394 struct pollhead **pollhdrp) 5395 { 5396 _NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp)) 5397 return (ENXIO); 5398 } 5399 5400 cred_t * 5401 ddi_get_cred(void) 5402 { 5403 return (CRED()); 5404 } 5405 5406 clock_t 5407 ddi_get_lbolt(void) 5408 { 5409 return (lbolt); 5410 } 5411 5412 time_t 5413 ddi_get_time(void) 5414 { 5415 time_t now; 5416 5417 if ((now = gethrestime_sec()) == 0) { 5418 timestruc_t ts; 5419 mutex_enter(&tod_lock); 5420 ts = tod_get(); 5421 mutex_exit(&tod_lock); 5422 return (ts.tv_sec); 5423 } else { 5424 return (now); 5425 } 5426 } 5427 5428 pid_t 5429 ddi_get_pid(void) 5430 { 5431 return (ttoproc(curthread)->p_pid); 5432 } 5433 5434 kt_did_t 5435 ddi_get_kt_did(void) 5436 { 5437 return (curthread->t_did); 5438 } 5439 5440 /* 5441 * This function returns B_TRUE if the caller can reasonably expect that a call 5442 * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened 5443 * by user-level signal. If it returns B_FALSE, then the caller should use 5444 * other means to make certain that the wait will not hang "forever." 5445 * 5446 * It does not check the signal mask, nor for reception of any particular 5447 * signal. 5448 * 5449 * Currently, a thread can receive a signal if it's not a kernel thread and it 5450 * is not in the middle of exit(2) tear-down. Threads that are in that 5451 * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to 5452 * cv_timedwait, and qwait_sig to qwait. 5453 */ 5454 boolean_t 5455 ddi_can_receive_sig(void) 5456 { 5457 proc_t *pp; 5458 5459 if (curthread->t_proc_flag & TP_LWPEXIT) 5460 return (B_FALSE); 5461 if ((pp = ttoproc(curthread)) == NULL) 5462 return (B_FALSE); 5463 return (pp->p_as != &kas); 5464 } 5465 5466 /* 5467 * Swap bytes in 16-bit [half-]words 5468 */ 5469 void 5470 swab(void *src, void *dst, size_t nbytes) 5471 { 5472 uchar_t *pf = (uchar_t *)src; 5473 uchar_t *pt = (uchar_t *)dst; 5474 uchar_t tmp; 5475 int nshorts; 5476 5477 nshorts = nbytes >> 1; 5478 5479 while (--nshorts >= 0) { 5480 tmp = *pf++; 5481 *pt++ = *pf++; 5482 *pt++ = tmp; 5483 } 5484 } 5485 5486 static void 5487 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp) 5488 { 5489 struct ddi_minor_data *dp; 5490 5491 mutex_enter(&(DEVI(ddip)->devi_lock)); 5492 i_devi_enter(ddip, DEVI_S_MD_UPDATE, DEVI_S_MD_UPDATE, 1); 5493 5494 if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) { 5495 DEVI(ddip)->devi_minor = dmdp; 5496 } else { 5497 while (dp->next != (struct ddi_minor_data *)NULL) 5498 dp = dp->next; 5499 dp->next = dmdp; 5500 } 5501 5502 i_devi_exit(ddip, DEVI_S_MD_UPDATE, 1); 5503 mutex_exit(&(DEVI(ddip)->devi_lock)); 5504 } 5505 5506 /* 5507 * Part of the obsolete SunCluster DDI Hooks. 5508 * Keep for binary compatibility 5509 */ 5510 minor_t 5511 ddi_getiminor(dev_t dev) 5512 { 5513 return (getminor(dev)); 5514 } 5515 5516 static int 5517 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name) 5518 { 5519 int se_flag; 5520 int kmem_flag; 5521 int se_err; 5522 char *pathname; 5523 sysevent_t *ev = NULL; 5524 sysevent_id_t eid; 5525 sysevent_value_t se_val; 5526 sysevent_attr_list_t *ev_attr_list = NULL; 5527 5528 /* determine interrupt context */ 5529 se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP; 5530 kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP; 5531 5532 i_ddi_di_cache_invalidate(kmem_flag); 5533 5534 #ifdef DEBUG 5535 if ((se_flag == SE_NOSLEEP) && sunddi_debug) { 5536 cmn_err(CE_CONT, "ddi_create_minor_node: called from " 5537 "interrupt level by driver %s", 5538 ddi_driver_name(dip)); 5539 } 5540 #endif /* DEBUG */ 5541 5542 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag); 5543 if (ev == NULL) { 5544 goto fail; 5545 } 5546 5547 pathname = kmem_alloc(MAXPATHLEN, kmem_flag); 5548 if (pathname == NULL) { 5549 sysevent_free(ev); 5550 goto fail; 5551 } 5552 5553 (void) ddi_pathname(dip, pathname); 5554 ASSERT(strlen(pathname)); 5555 se_val.value_type = SE_DATA_TYPE_STRING; 5556 se_val.value.sv_string = pathname; 5557 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME, 5558 &se_val, se_flag) != 0) { 5559 kmem_free(pathname, MAXPATHLEN); 5560 sysevent_free(ev); 5561 goto fail; 5562 } 5563 kmem_free(pathname, MAXPATHLEN); 5564 5565 /* 5566 * allow for NULL minor names 5567 */ 5568 if (minor_name != NULL) { 5569 se_val.value.sv_string = minor_name; 5570 if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME, 5571 &se_val, se_flag) != 0) { 5572 sysevent_free_attr(ev_attr_list); 5573 sysevent_free(ev); 5574 goto fail; 5575 } 5576 } 5577 5578 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) { 5579 sysevent_free_attr(ev_attr_list); 5580 sysevent_free(ev); 5581 goto fail; 5582 } 5583 5584 if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) { 5585 if (se_err == SE_NO_TRANSPORT) { 5586 cmn_err(CE_WARN, "/devices or /dev may not be current " 5587 "for driver %s (%s). Run devfsadm -i %s", 5588 ddi_driver_name(dip), "syseventd not responding", 5589 ddi_driver_name(dip)); 5590 } else { 5591 sysevent_free(ev); 5592 goto fail; 5593 } 5594 } 5595 5596 sysevent_free(ev); 5597 return (DDI_SUCCESS); 5598 fail: 5599 cmn_err(CE_WARN, "/devices or /dev may not be current " 5600 "for driver %s. Run devfsadm -i %s", 5601 ddi_driver_name(dip), ddi_driver_name(dip)); 5602 return (DDI_SUCCESS); 5603 } 5604 5605 /* 5606 * failing to remove a minor node is not of interest 5607 * therefore we do not generate an error message 5608 */ 5609 static int 5610 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name) 5611 { 5612 char *pathname; 5613 sysevent_t *ev; 5614 sysevent_id_t eid; 5615 sysevent_value_t se_val; 5616 sysevent_attr_list_t *ev_attr_list = NULL; 5617 5618 /* 5619 * only log ddi_remove_minor_node() calls outside the scope 5620 * of attach/detach reconfigurations and when the dip is 5621 * still initialized. 5622 */ 5623 if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) || 5624 (i_ddi_node_state(dip) < DS_INITIALIZED)) { 5625 return (DDI_SUCCESS); 5626 } 5627 5628 i_ddi_di_cache_invalidate(KM_SLEEP); 5629 5630 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP); 5631 if (ev == NULL) { 5632 return (DDI_SUCCESS); 5633 } 5634 5635 pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 5636 if (pathname == NULL) { 5637 sysevent_free(ev); 5638 return (DDI_SUCCESS); 5639 } 5640 5641 (void) ddi_pathname(dip, pathname); 5642 ASSERT(strlen(pathname)); 5643 se_val.value_type = SE_DATA_TYPE_STRING; 5644 se_val.value.sv_string = pathname; 5645 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME, 5646 &se_val, SE_SLEEP) != 0) { 5647 kmem_free(pathname, MAXPATHLEN); 5648 sysevent_free(ev); 5649 return (DDI_SUCCESS); 5650 } 5651 5652 kmem_free(pathname, MAXPATHLEN); 5653 5654 /* 5655 * allow for NULL minor names 5656 */ 5657 if (minor_name != NULL) { 5658 se_val.value.sv_string = minor_name; 5659 if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME, 5660 &se_val, SE_SLEEP) != 0) { 5661 sysevent_free_attr(ev_attr_list); 5662 goto fail; 5663 } 5664 } 5665 5666 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) { 5667 sysevent_free_attr(ev_attr_list); 5668 } else { 5669 (void) log_sysevent(ev, SE_SLEEP, &eid); 5670 } 5671 fail: 5672 sysevent_free(ev); 5673 return (DDI_SUCCESS); 5674 } 5675 5676 /* 5677 * Derive the device class of the node. 5678 * Device class names aren't defined yet. Until this is done we use 5679 * devfs event subclass names as device class names. 5680 */ 5681 static int 5682 derive_devi_class(dev_info_t *dip, char *node_type, int flag) 5683 { 5684 int rv = DDI_SUCCESS; 5685 5686 if (i_ddi_devi_class(dip) == NULL) { 5687 if (strncmp(node_type, DDI_NT_BLOCK, 5688 sizeof (DDI_NT_BLOCK) - 1) == 0 && 5689 (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' || 5690 node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') && 5691 strcmp(node_type, DDI_NT_FD) != 0) { 5692 5693 rv = i_ddi_set_devi_class(dip, ESC_DISK, flag); 5694 5695 } else if (strncmp(node_type, DDI_NT_NET, 5696 sizeof (DDI_NT_NET) - 1) == 0 && 5697 (node_type[sizeof (DDI_NT_NET) - 1] == '\0' || 5698 node_type[sizeof (DDI_NT_NET) - 1] == ':')) { 5699 5700 rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag); 5701 5702 } else if (strncmp(node_type, DDI_NT_PRINTER, 5703 sizeof (DDI_NT_PRINTER) - 1) == 0 && 5704 (node_type[sizeof (DDI_NT_PRINTER) - 1] == '\0' || 5705 node_type[sizeof (DDI_NT_PRINTER) - 1] == ':')) { 5706 5707 rv = i_ddi_set_devi_class(dip, ESC_PRINTER, flag); 5708 } 5709 } 5710 5711 return (rv); 5712 } 5713 5714 /* 5715 * Check compliance with PSARC 2003/375: 5716 * 5717 * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not 5718 * exceed IFNAMSIZ (16) characters in length. 5719 */ 5720 static boolean_t 5721 verify_name(char *name) 5722 { 5723 size_t len = strlen(name); 5724 char *cp; 5725 5726 if (len == 0 || len > IFNAMSIZ) 5727 return (B_FALSE); 5728 5729 for (cp = name; *cp != '\0'; cp++) { 5730 if (!isalnum(*cp) && *cp != '_') 5731 return (B_FALSE); 5732 } 5733 5734 return (B_TRUE); 5735 } 5736 5737 /* 5738 * ddi_create_minor_common: Create a ddi_minor_data structure and 5739 * attach it to the given devinfo node. 5740 */ 5741 5742 int 5743 ddi_create_minor_common(dev_info_t *dip, char *name, int spec_type, 5744 minor_t minor_num, char *node_type, int flag, ddi_minor_type mtype, 5745 const char *read_priv, const char *write_priv, mode_t priv_mode) 5746 { 5747 struct ddi_minor_data *dmdp; 5748 major_t major; 5749 5750 if (spec_type != S_IFCHR && spec_type != S_IFBLK) 5751 return (DDI_FAILURE); 5752 5753 if (name == NULL) 5754 return (DDI_FAILURE); 5755 5756 /* 5757 * Log a message if the minor number the driver is creating 5758 * is not expressible on the on-disk filesystem (currently 5759 * this is limited to 18 bits both by UFS). The device can 5760 * be opened via devfs, but not by device special files created 5761 * via mknod(). 5762 */ 5763 if (minor_num > L_MAXMIN32) { 5764 cmn_err(CE_WARN, 5765 "%s%d:%s minor 0x%x too big for 32-bit applications", 5766 ddi_driver_name(dip), ddi_get_instance(dip), 5767 name, minor_num); 5768 return (DDI_FAILURE); 5769 } 5770 5771 /* dip must be bound and attached */ 5772 major = ddi_driver_major(dip); 5773 ASSERT(major != (major_t)-1); 5774 5775 /* 5776 * Default node_type to DDI_PSEUDO and issue notice in debug mode 5777 */ 5778 if (node_type == NULL) { 5779 node_type = DDI_PSEUDO; 5780 NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d " 5781 " minor node %s; default to DDI_PSEUDO", 5782 ddi_driver_name(dip), ddi_get_instance(dip), name)); 5783 } 5784 5785 /* 5786 * If the driver is a network driver, ensure that the name falls within 5787 * the interface naming constraints specified by PSARC/2003/375. 5788 */ 5789 if (strcmp(node_type, DDI_NT_NET) == 0) { 5790 if (!verify_name(name)) 5791 return (DDI_FAILURE); 5792 5793 if (mtype == DDM_MINOR) { 5794 struct devnames *dnp = &devnamesp[major]; 5795 5796 /* Mark driver as a network driver */ 5797 LOCK_DEV_OPS(&dnp->dn_lock); 5798 dnp->dn_flags |= DN_NETWORK_DRIVER; 5799 UNLOCK_DEV_OPS(&dnp->dn_lock); 5800 } 5801 } 5802 5803 if (mtype == DDM_MINOR) { 5804 if (derive_devi_class(dip, node_type, KM_NOSLEEP) != 5805 DDI_SUCCESS) 5806 return (DDI_FAILURE); 5807 } 5808 5809 /* 5810 * Take care of minor number information for the node. 5811 */ 5812 5813 if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data), 5814 KM_NOSLEEP)) == NULL) { 5815 return (DDI_FAILURE); 5816 } 5817 if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) { 5818 kmem_free(dmdp, sizeof (struct ddi_minor_data)); 5819 return (DDI_FAILURE); 5820 } 5821 dmdp->dip = dip; 5822 dmdp->ddm_dev = makedevice(major, minor_num); 5823 dmdp->ddm_spec_type = spec_type; 5824 dmdp->ddm_node_type = node_type; 5825 dmdp->type = mtype; 5826 if (flag & CLONE_DEV) { 5827 dmdp->type = DDM_ALIAS; 5828 dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major); 5829 } 5830 if (flag & PRIVONLY_DEV) { 5831 dmdp->ddm_flags |= DM_NO_FSPERM; 5832 } 5833 if (read_priv || write_priv) { 5834 dmdp->ddm_node_priv = 5835 devpolicy_priv_by_name(read_priv, write_priv); 5836 } 5837 dmdp->ddm_priv_mode = priv_mode; 5838 5839 ddi_append_minor_node(dip, dmdp); 5840 5841 /* 5842 * only log ddi_create_minor_node() calls which occur 5843 * outside the scope of attach(9e)/detach(9e) reconfigurations 5844 */ 5845 if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) && 5846 mtype != DDM_INTERNAL_PATH) { 5847 (void) i_log_devfs_minor_create(dip, name); 5848 } 5849 5850 /* 5851 * Check if any dacf rules match the creation of this minor node 5852 */ 5853 dacfc_match_create_minor(name, node_type, dip, dmdp, flag); 5854 return (DDI_SUCCESS); 5855 } 5856 5857 int 5858 ddi_create_minor_node(dev_info_t *dip, char *name, int spec_type, 5859 minor_t minor_num, char *node_type, int flag) 5860 { 5861 return (ddi_create_minor_common(dip, name, spec_type, minor_num, 5862 node_type, flag, DDM_MINOR, NULL, NULL, 0)); 5863 } 5864 5865 int 5866 ddi_create_priv_minor_node(dev_info_t *dip, char *name, int spec_type, 5867 minor_t minor_num, char *node_type, int flag, 5868 const char *rdpriv, const char *wrpriv, mode_t priv_mode) 5869 { 5870 return (ddi_create_minor_common(dip, name, spec_type, minor_num, 5871 node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode)); 5872 } 5873 5874 int 5875 ddi_create_default_minor_node(dev_info_t *dip, char *name, int spec_type, 5876 minor_t minor_num, char *node_type, int flag) 5877 { 5878 return (ddi_create_minor_common(dip, name, spec_type, minor_num, 5879 node_type, flag, DDM_DEFAULT, NULL, NULL, 0)); 5880 } 5881 5882 /* 5883 * Internal (non-ddi) routine for drivers to export names known 5884 * to the kernel (especially ddi_pathname_to_dev_t and friends) 5885 * but not exported externally to /dev 5886 */ 5887 int 5888 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type, 5889 minor_t minor_num) 5890 { 5891 return (ddi_create_minor_common(dip, name, spec_type, minor_num, 5892 "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0)); 5893 } 5894 5895 void 5896 ddi_remove_minor_node(dev_info_t *dip, char *name) 5897 { 5898 struct ddi_minor_data *dmdp, *dmdp1; 5899 struct ddi_minor_data **dmdp_prev; 5900 5901 mutex_enter(&(DEVI(dip)->devi_lock)); 5902 i_devi_enter(dip, DEVI_S_MD_UPDATE, DEVI_S_MD_UPDATE, 1); 5903 5904 dmdp_prev = &DEVI(dip)->devi_minor; 5905 dmdp = DEVI(dip)->devi_minor; 5906 while (dmdp != NULL) { 5907 dmdp1 = dmdp->next; 5908 if ((name == NULL || (dmdp->ddm_name != NULL && 5909 strcmp(name, dmdp->ddm_name) == 0))) { 5910 if (dmdp->ddm_name != NULL) { 5911 if (dmdp->type != DDM_INTERNAL_PATH) 5912 (void) i_log_devfs_minor_remove(dip, 5913 dmdp->ddm_name); 5914 kmem_free(dmdp->ddm_name, 5915 strlen(dmdp->ddm_name) + 1); 5916 } 5917 /* 5918 * Release device privilege, if any. 5919 * Release dacf client data associated with this minor 5920 * node by storing NULL. 5921 */ 5922 if (dmdp->ddm_node_priv) 5923 dpfree(dmdp->ddm_node_priv); 5924 dacf_store_info((dacf_infohdl_t)dmdp, NULL); 5925 kmem_free(dmdp, sizeof (struct ddi_minor_data)); 5926 *dmdp_prev = dmdp1; 5927 /* 5928 * OK, we found it, so get out now -- if we drive on, 5929 * we will strcmp against garbage. See 1139209. 5930 */ 5931 if (name != NULL) 5932 break; 5933 } else { 5934 dmdp_prev = &dmdp->next; 5935 } 5936 dmdp = dmdp1; 5937 } 5938 5939 i_devi_exit(dip, DEVI_S_MD_UPDATE, 1); 5940 mutex_exit(&(DEVI(dip)->devi_lock)); 5941 } 5942 5943 5944 int 5945 ddi_in_panic() 5946 { 5947 return (panicstr != NULL); 5948 } 5949 5950 5951 /* 5952 * Find first bit set in a mask (returned counting from 1 up) 5953 */ 5954 5955 int 5956 ddi_ffs(long mask) 5957 { 5958 return (ffs(mask)); 5959 } 5960 5961 /* 5962 * Find last bit set. Take mask and clear 5963 * all but the most significant bit, and 5964 * then let ffs do the rest of the work. 5965 * 5966 * Algorithm courtesy of Steve Chessin. 5967 */ 5968 5969 int 5970 ddi_fls(long mask) 5971 { 5972 while (mask) { 5973 long nx; 5974 5975 if ((nx = (mask & (mask - 1))) == 0) 5976 break; 5977 mask = nx; 5978 } 5979 return (ffs(mask)); 5980 } 5981 5982 /* 5983 * The next five routines comprise generic storage management utilities 5984 * for driver soft state structures (in "the old days," this was done 5985 * with a statically sized array - big systems and dynamic loading 5986 * and unloading make heap allocation more attractive) 5987 */ 5988 5989 /* 5990 * Allocate a set of pointers to 'n_items' objects of size 'size' 5991 * bytes. Each pointer is initialized to nil. 5992 * 5993 * The 'size' and 'n_items' values are stashed in the opaque 5994 * handle returned to the caller. 5995 * 5996 * This implementation interprets 'set of pointers' to mean 'array 5997 * of pointers' but note that nothing in the interface definition 5998 * precludes an implementation that uses, for example, a linked list. 5999 * However there should be a small efficiency gain from using an array 6000 * at lookup time. 6001 * 6002 * NOTE As an optimization, we make our growable array allocations in 6003 * powers of two (bytes), since that's how much kmem_alloc (currently) 6004 * gives us anyway. It should save us some free/realloc's .. 6005 * 6006 * As a further optimization, we make the growable array start out 6007 * with MIN_N_ITEMS in it. 6008 */ 6009 6010 #define MIN_N_ITEMS 8 /* 8 void *'s == 32 bytes */ 6011 6012 int 6013 ddi_soft_state_init(void **state_p, size_t size, size_t n_items) 6014 { 6015 struct i_ddi_soft_state *ss; 6016 6017 if (state_p == NULL || *state_p != NULL || size == 0) 6018 return (EINVAL); 6019 6020 ss = kmem_zalloc(sizeof (*ss), KM_SLEEP); 6021 mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL); 6022 ss->size = size; 6023 6024 if (n_items < MIN_N_ITEMS) 6025 ss->n_items = MIN_N_ITEMS; 6026 else { 6027 int bitlog; 6028 6029 if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items)) 6030 bitlog--; 6031 ss->n_items = 1 << bitlog; 6032 } 6033 6034 ASSERT(ss->n_items >= n_items); 6035 6036 ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP); 6037 6038 *state_p = ss; 6039 6040 return (0); 6041 } 6042 6043 6044 /* 6045 * Allocate a state structure of size 'size' to be associated 6046 * with item 'item'. 6047 * 6048 * In this implementation, the array is extended to 6049 * allow the requested offset, if needed. 6050 */ 6051 int 6052 ddi_soft_state_zalloc(void *state, int item) 6053 { 6054 struct i_ddi_soft_state *ss; 6055 void **array; 6056 void *new_element; 6057 6058 if ((ss = state) == NULL || item < 0) 6059 return (DDI_FAILURE); 6060 6061 mutex_enter(&ss->lock); 6062 if (ss->size == 0) { 6063 mutex_exit(&ss->lock); 6064 cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s", 6065 mod_containing_pc(caller())); 6066 return (DDI_FAILURE); 6067 } 6068 6069 array = ss->array; /* NULL if ss->n_items == 0 */ 6070 ASSERT(ss->n_items != 0 && array != NULL); 6071 6072 /* 6073 * refuse to tread on an existing element 6074 */ 6075 if (item < ss->n_items && array[item] != NULL) { 6076 mutex_exit(&ss->lock); 6077 return (DDI_FAILURE); 6078 } 6079 6080 /* 6081 * Allocate a new element to plug in 6082 */ 6083 new_element = kmem_zalloc(ss->size, KM_SLEEP); 6084 6085 /* 6086 * Check if the array is big enough, if not, grow it. 6087 */ 6088 if (item >= ss->n_items) { 6089 void **new_array; 6090 size_t new_n_items; 6091 struct i_ddi_soft_state *dirty; 6092 6093 /* 6094 * Allocate a new array of the right length, copy 6095 * all the old pointers to the new array, then 6096 * if it exists at all, put the old array on the 6097 * dirty list. 6098 * 6099 * Note that we can't kmem_free() the old array. 6100 * 6101 * Why -- well the 'get' operation is 'mutex-free', so we 6102 * can't easily catch a suspended thread that is just about 6103 * to dereference the array we just grew out of. So we 6104 * cons up a header and put it on a list of 'dirty' 6105 * pointer arrays. (Dirty in the sense that there may 6106 * be suspended threads somewhere that are in the middle 6107 * of referencing them). Fortunately, we -can- garbage 6108 * collect it all at ddi_soft_state_fini time. 6109 */ 6110 new_n_items = ss->n_items; 6111 while (new_n_items < (1 + item)) 6112 new_n_items <<= 1; /* double array size .. */ 6113 6114 ASSERT(new_n_items >= (1 + item)); /* sanity check! */ 6115 6116 new_array = kmem_zalloc(new_n_items * sizeof (void *), 6117 KM_SLEEP); 6118 /* 6119 * Copy the pointers into the new array 6120 */ 6121 bcopy(array, new_array, ss->n_items * sizeof (void *)); 6122 6123 /* 6124 * Save the old array on the dirty list 6125 */ 6126 dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP); 6127 dirty->array = ss->array; 6128 dirty->n_items = ss->n_items; 6129 dirty->next = ss->next; 6130 ss->next = dirty; 6131 6132 ss->array = (array = new_array); 6133 ss->n_items = new_n_items; 6134 } 6135 6136 ASSERT(array != NULL && item < ss->n_items && array[item] == NULL); 6137 6138 array[item] = new_element; 6139 6140 mutex_exit(&ss->lock); 6141 return (DDI_SUCCESS); 6142 } 6143 6144 6145 /* 6146 * Fetch a pointer to the allocated soft state structure. 6147 * 6148 * This is designed to be cheap. 6149 * 6150 * There's an argument that there should be more checking for 6151 * nil pointers and out of bounds on the array.. but we do a lot 6152 * of that in the alloc/free routines. 6153 * 6154 * An array has the convenience that we don't need to lock read-access 6155 * to it c.f. a linked list. However our "expanding array" strategy 6156 * means that we should hold a readers lock on the i_ddi_soft_state 6157 * structure. 6158 * 6159 * However, from a performance viewpoint, we need to do it without 6160 * any locks at all -- this also makes it a leaf routine. The algorithm 6161 * is 'lock-free' because we only discard the pointer arrays at 6162 * ddi_soft_state_fini() time. 6163 */ 6164 void * 6165 ddi_get_soft_state(void *state, int item) 6166 { 6167 struct i_ddi_soft_state *ss = state; 6168 6169 ASSERT(ss != NULL && item >= 0); 6170 6171 if (item < ss->n_items && ss->array != NULL) 6172 return (ss->array[item]); 6173 return (NULL); 6174 } 6175 6176 /* 6177 * Free the state structure corresponding to 'item.' Freeing an 6178 * element that has either gone or was never allocated is not 6179 * considered an error. Note that we free the state structure, but 6180 * we don't shrink our pointer array, or discard 'dirty' arrays, 6181 * since even a few pointers don't really waste too much memory. 6182 * 6183 * Passing an item number that is out of bounds, or a null pointer will 6184 * provoke an error message. 6185 */ 6186 void 6187 ddi_soft_state_free(void *state, int item) 6188 { 6189 struct i_ddi_soft_state *ss; 6190 void **array; 6191 void *element; 6192 static char msg[] = "ddi_soft_state_free:"; 6193 6194 if ((ss = state) == NULL) { 6195 cmn_err(CE_WARN, "%s null handle: %s", 6196 msg, mod_containing_pc(caller())); 6197 return; 6198 } 6199 6200 element = NULL; 6201 6202 mutex_enter(&ss->lock); 6203 6204 if ((array = ss->array) == NULL || ss->size == 0) { 6205 cmn_err(CE_WARN, "%s bad handle: %s", 6206 msg, mod_containing_pc(caller())); 6207 } else if (item < 0 || item >= ss->n_items) { 6208 cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s", 6209 msg, item, ss->n_items - 1, mod_containing_pc(caller())); 6210 } else if (array[item] != NULL) { 6211 element = array[item]; 6212 array[item] = NULL; 6213 } 6214 6215 mutex_exit(&ss->lock); 6216 6217 if (element) 6218 kmem_free(element, ss->size); 6219 } 6220 6221 6222 /* 6223 * Free the entire set of pointers, and any 6224 * soft state structures contained therein. 6225 * 6226 * Note that we don't grab the ss->lock mutex, even though 6227 * we're inspecting the various fields of the data structure. 6228 * 6229 * There is an implicit assumption that this routine will 6230 * never run concurrently with any of the above on this 6231 * particular state structure i.e. by the time the driver 6232 * calls this routine, there should be no other threads 6233 * running in the driver. 6234 */ 6235 void 6236 ddi_soft_state_fini(void **state_p) 6237 { 6238 struct i_ddi_soft_state *ss, *dirty; 6239 int item; 6240 static char msg[] = "ddi_soft_state_fini:"; 6241 6242 if (state_p == NULL || (ss = *state_p) == NULL) { 6243 cmn_err(CE_WARN, "%s null handle: %s", 6244 msg, mod_containing_pc(caller())); 6245 return; 6246 } 6247 6248 if (ss->size == 0) { 6249 cmn_err(CE_WARN, "%s bad handle: %s", 6250 msg, mod_containing_pc(caller())); 6251 return; 6252 } 6253 6254 if (ss->n_items > 0) { 6255 for (item = 0; item < ss->n_items; item++) 6256 ddi_soft_state_free(ss, item); 6257 kmem_free(ss->array, ss->n_items * sizeof (void *)); 6258 } 6259 6260 /* 6261 * Now delete any dirty arrays from previous 'grow' operations 6262 */ 6263 for (dirty = ss->next; dirty; dirty = ss->next) { 6264 ss->next = dirty->next; 6265 kmem_free(dirty->array, dirty->n_items * sizeof (void *)); 6266 kmem_free(dirty, sizeof (*dirty)); 6267 } 6268 6269 mutex_destroy(&ss->lock); 6270 kmem_free(ss, sizeof (*ss)); 6271 6272 *state_p = NULL; 6273 } 6274 6275 /* 6276 * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'. 6277 * Storage is double buffered to prevent updates during devi_addr use - 6278 * double buffering is adaquate for reliable ddi_deviname() consumption. 6279 * The double buffer is not freed until dev_info structure destruction 6280 * (by i_ddi_free_node). 6281 */ 6282 void 6283 ddi_set_name_addr(dev_info_t *dip, char *name) 6284 { 6285 char *buf = DEVI(dip)->devi_addr_buf; 6286 char *newaddr; 6287 6288 if (buf == NULL) { 6289 buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP); 6290 DEVI(dip)->devi_addr_buf = buf; 6291 } 6292 6293 if (name) { 6294 ASSERT(strlen(name) < MAXNAMELEN); 6295 newaddr = (DEVI(dip)->devi_addr == buf) ? 6296 (buf + MAXNAMELEN) : buf; 6297 (void) strlcpy(newaddr, name, MAXNAMELEN); 6298 } else 6299 newaddr = NULL; 6300 6301 DEVI(dip)->devi_addr = newaddr; 6302 } 6303 6304 char * 6305 ddi_get_name_addr(dev_info_t *dip) 6306 { 6307 return (DEVI(dip)->devi_addr); 6308 } 6309 6310 void 6311 ddi_set_parent_data(dev_info_t *dip, void *pd) 6312 { 6313 DEVI(dip)->devi_parent_data = pd; 6314 } 6315 6316 void * 6317 ddi_get_parent_data(dev_info_t *dip) 6318 { 6319 return (DEVI(dip)->devi_parent_data); 6320 } 6321 6322 /* 6323 * ddi_name_to_major: Returns the major number of a module given its name. 6324 */ 6325 major_t 6326 ddi_name_to_major(char *name) 6327 { 6328 return (mod_name_to_major(name)); 6329 } 6330 6331 /* 6332 * ddi_major_to_name: Returns the module name bound to a major number. 6333 */ 6334 char * 6335 ddi_major_to_name(major_t major) 6336 { 6337 return (mod_major_to_name(major)); 6338 } 6339 6340 /* 6341 * Return the name of the devinfo node pointed at by 'dip' in the buffer 6342 * pointed at by 'name.' A devinfo node is named as a result of calling 6343 * ddi_initchild(). 6344 * 6345 * Note: the driver must be held before calling this function! 6346 */ 6347 char * 6348 ddi_deviname(dev_info_t *dip, char *name) 6349 { 6350 char *addrname; 6351 char none = '\0'; 6352 6353 if (dip == ddi_root_node()) { 6354 *name = '\0'; 6355 return (name); 6356 } 6357 6358 if (i_ddi_node_state(dip) < DS_INITIALIZED) { 6359 addrname = &none; 6360 } else { 6361 addrname = ddi_get_name_addr(dip); 6362 } 6363 6364 if (*addrname == '\0') { 6365 (void) sprintf(name, "/%s", ddi_node_name(dip)); 6366 } else { 6367 (void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname); 6368 } 6369 6370 return (name); 6371 } 6372 6373 /* 6374 * Spits out the name of device node, typically name@addr, for a given node, 6375 * using the driver name, not the nodename. 6376 * 6377 * Used by match_parent. Not to be used elsewhere. 6378 */ 6379 char * 6380 i_ddi_parname(dev_info_t *dip, char *name) 6381 { 6382 char *addrname; 6383 6384 if (dip == ddi_root_node()) { 6385 *name = '\0'; 6386 return (name); 6387 } 6388 6389 ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED); 6390 6391 if (*(addrname = ddi_get_name_addr(dip)) == '\0') 6392 (void) sprintf(name, "%s", ddi_binding_name(dip)); 6393 else 6394 (void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname); 6395 return (name); 6396 } 6397 6398 static char * 6399 pathname_work(dev_info_t *dip, char *path) 6400 { 6401 char *bp; 6402 6403 if (dip == ddi_root_node()) { 6404 *path = '\0'; 6405 return (path); 6406 } 6407 (void) pathname_work(ddi_get_parent(dip), path); 6408 bp = path + strlen(path); 6409 (void) ddi_deviname(dip, bp); 6410 return (path); 6411 } 6412 6413 char * 6414 ddi_pathname(dev_info_t *dip, char *path) 6415 { 6416 return (pathname_work(dip, path)); 6417 } 6418 6419 /* 6420 * Given a dev_t, return the pathname of the corresponding device in the 6421 * buffer pointed at by "path." The buffer is assumed to be large enough 6422 * to hold the pathname of the device (MAXPATHLEN). 6423 * 6424 * The pathname of a device is the pathname of the devinfo node to which 6425 * the device "belongs," concatenated with the character ':' and the name 6426 * of the minor node corresponding to the dev_t. If spec_type is 0 then 6427 * just the pathname of the devinfo node is returned without driving attach 6428 * of that node. For a non-zero spec_type, an attach is performed and a 6429 * search of the minor list occurs. 6430 * 6431 * It is possible that the path associated with the dev_t is not 6432 * currently available in the devinfo tree. In order to have a 6433 * dev_t, a device must have been discovered before, which means 6434 * that the path is always in the instance tree. The one exception 6435 * to this is if the dev_t is associated with a pseudo driver, in 6436 * which case the device must exist on the pseudo branch of the 6437 * devinfo tree as a result of parsing .conf files. 6438 */ 6439 int 6440 ddi_dev_pathname(dev_t devt, int spec_type, char *path) 6441 { 6442 major_t major = getmajor(devt); 6443 int instance; 6444 dev_info_t *dip; 6445 char *minorname; 6446 char *drvname; 6447 6448 if (major >= devcnt) 6449 goto fail; 6450 if (major == clone_major) { 6451 /* clone has no minor nodes, manufacture the path here */ 6452 if ((drvname = ddi_major_to_name(getminor(devt))) == NULL) 6453 goto fail; 6454 6455 (void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname); 6456 return (DDI_SUCCESS); 6457 } 6458 6459 /* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */ 6460 if ((instance = dev_to_instance(devt)) == -1) 6461 goto fail; 6462 6463 /* reconstruct the path given the major/instance */ 6464 if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS) 6465 goto fail; 6466 6467 /* if spec_type given we must drive attach and search minor nodes */ 6468 if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) { 6469 /* attach the path so we can search minors */ 6470 if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL) 6471 goto fail; 6472 6473 /* Add minorname to path. */ 6474 mutex_enter(&(DEVI(dip)->devi_lock)); 6475 minorname = i_ddi_devtspectype_to_minorname(dip, 6476 devt, spec_type); 6477 if (minorname) { 6478 (void) strcat(path, ":"); 6479 (void) strcat(path, minorname); 6480 } 6481 mutex_exit(&(DEVI(dip)->devi_lock)); 6482 ddi_release_devi(dip); 6483 if (minorname == NULL) 6484 goto fail; 6485 } 6486 ASSERT(strlen(path) < MAXPATHLEN); 6487 return (DDI_SUCCESS); 6488 6489 fail: *path = 0; 6490 return (DDI_FAILURE); 6491 } 6492 6493 /* 6494 * Given a major number and an instance, return the path. 6495 * This interface does NOT drive attach. 6496 */ 6497 int 6498 e_ddi_majorinstance_to_path(major_t major, int instance, char *path) 6499 { 6500 struct devnames *dnp; 6501 dev_info_t *dip; 6502 6503 if ((major >= devcnt) || (instance == -1)) { 6504 *path = 0; 6505 return (DDI_FAILURE); 6506 } 6507 6508 /* look for the major/instance in the instance tree */ 6509 if (e_ddi_instance_majorinstance_to_path(major, instance, 6510 path) == DDI_SUCCESS) { 6511 ASSERT(strlen(path) < MAXPATHLEN); 6512 return (DDI_SUCCESS); 6513 } 6514 6515 /* 6516 * Not in instance tree, find the instance on the per driver list and 6517 * construct path to instance via ddi_pathname(). This is how paths 6518 * down the 'pseudo' branch are constructed. 6519 */ 6520 dnp = &(devnamesp[major]); 6521 LOCK_DEV_OPS(&(dnp->dn_lock)); 6522 for (dip = dnp->dn_head; dip; 6523 dip = (dev_info_t *)DEVI(dip)->devi_next) { 6524 /* Skip if instance does not match. */ 6525 if (DEVI(dip)->devi_instance != instance) 6526 continue; 6527 6528 /* 6529 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND 6530 * node demotion, so it is not an effective way of ensuring 6531 * that the ddi_pathname result has a unit-address. Instead, 6532 * we reverify the node state after calling ddi_pathname(). 6533 */ 6534 if (i_ddi_node_state(dip) >= DS_INITIALIZED) { 6535 (void) ddi_pathname(dip, path); 6536 if (i_ddi_node_state(dip) < DS_INITIALIZED) 6537 continue; 6538 UNLOCK_DEV_OPS(&(dnp->dn_lock)); 6539 ASSERT(strlen(path) < MAXPATHLEN); 6540 return (DDI_SUCCESS); 6541 } 6542 } 6543 UNLOCK_DEV_OPS(&(dnp->dn_lock)); 6544 6545 /* can't reconstruct the path */ 6546 *path = 0; 6547 return (DDI_FAILURE); 6548 } 6549 6550 #define GLD_DRIVER_PPA "SUNW,gld_v0_ppa" 6551 6552 /* 6553 * Given the dip for a network interface return the ppa for that interface. 6554 * 6555 * In all cases except GLD v0 drivers, the ppa == instance. 6556 * In the case of GLD v0 drivers, the ppa is equal to the attach order. 6557 * So for these drivers when the attach routine calls gld_register(), 6558 * the GLD framework creates an integer property called "gld_driver_ppa" 6559 * that can be queried here. 6560 * 6561 * The only time this function is used is when a system is booting over nfs. 6562 * In this case the system has to resolve the pathname of the boot device 6563 * to it's ppa. 6564 */ 6565 int 6566 i_ddi_devi_get_ppa(dev_info_t *dip) 6567 { 6568 return (ddi_prop_get_int(DDI_DEV_T_ANY, dip, 6569 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 6570 GLD_DRIVER_PPA, ddi_get_instance(dip))); 6571 } 6572 6573 /* 6574 * i_ddi_devi_set_ppa() should only be called from gld_register() 6575 * and only for GLD v0 drivers 6576 */ 6577 void 6578 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa) 6579 { 6580 (void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa); 6581 } 6582 6583 6584 /* 6585 * Private DDI Console bell functions. 6586 */ 6587 void 6588 ddi_ring_console_bell(clock_t duration) 6589 { 6590 if (ddi_console_bell_func != NULL) 6591 (*ddi_console_bell_func)(duration); 6592 } 6593 6594 void 6595 ddi_set_console_bell(void (*bellfunc)(clock_t duration)) 6596 { 6597 ddi_console_bell_func = bellfunc; 6598 } 6599 6600 int 6601 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr, 6602 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 6603 { 6604 int (*funcp)() = ddi_dma_allochdl; 6605 ddi_dma_attr_t dma_attr; 6606 struct bus_ops *bop; 6607 6608 if (attr == (ddi_dma_attr_t *)0) 6609 return (DDI_DMA_BADATTR); 6610 6611 dma_attr = *attr; 6612 6613 bop = DEVI(dip)->devi_ops->devo_bus_ops; 6614 if (bop && bop->bus_dma_allochdl) 6615 funcp = bop->bus_dma_allochdl; 6616 6617 return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep)); 6618 } 6619 6620 void 6621 ddi_dma_free_handle(ddi_dma_handle_t *handlep) 6622 { 6623 ddi_dma_handle_t h = *handlep; 6624 (void) ddi_dma_freehdl(HD, HD, h); 6625 } 6626 6627 static uintptr_t dma_mem_list_id = 0; 6628 6629 6630 int 6631 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length, 6632 ddi_device_acc_attr_t *accattrp, uint_t flags, 6633 int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp, 6634 size_t *real_length, ddi_acc_handle_t *handlep) 6635 { 6636 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 6637 dev_info_t *dip = hp->dmai_rdip; 6638 ddi_acc_hdl_t *ap; 6639 ddi_dma_attr_t *attrp = &hp->dmai_attr; 6640 uint_t sleepflag, xfermodes; 6641 int (*fp)(caddr_t); 6642 int rval; 6643 6644 if (waitfp == DDI_DMA_SLEEP) 6645 fp = (int (*)())KM_SLEEP; 6646 else if (waitfp == DDI_DMA_DONTWAIT) 6647 fp = (int (*)())KM_NOSLEEP; 6648 else 6649 fp = waitfp; 6650 *handlep = impl_acc_hdl_alloc(fp, arg); 6651 if (*handlep == NULL) 6652 return (DDI_FAILURE); 6653 6654 /* check if the cache attributes are supported */ 6655 if (i_ddi_check_cache_attr(flags) == B_FALSE) 6656 return (DDI_FAILURE); 6657 6658 /* 6659 * Transfer the meaningful bits to xfermodes. 6660 * Double-check if the 3rd party driver correctly sets the bits. 6661 * If not, set DDI_DMA_STREAMING to keep compatibility. 6662 */ 6663 xfermodes = flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING); 6664 if (xfermodes == 0) { 6665 xfermodes = DDI_DMA_STREAMING; 6666 } 6667 6668 /* 6669 * initialize the common elements of data access handle 6670 */ 6671 ap = impl_acc_hdl_get(*handlep); 6672 ap->ah_vers = VERS_ACCHDL; 6673 ap->ah_dip = dip; 6674 ap->ah_offset = 0; 6675 ap->ah_len = 0; 6676 ap->ah_xfermodes = flags; 6677 ap->ah_acc = *accattrp; 6678 6679 sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0); 6680 if (xfermodes == DDI_DMA_CONSISTENT) { 6681 rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag, 6682 flags, accattrp, kaddrp, NULL, ap); 6683 *real_length = length; 6684 } else { 6685 rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag, 6686 flags, accattrp, kaddrp, real_length, ap); 6687 } 6688 if (rval == DDI_SUCCESS) { 6689 ap->ah_len = (off_t)(*real_length); 6690 ap->ah_addr = *kaddrp; 6691 } else { 6692 impl_acc_hdl_free(*handlep); 6693 *handlep = (ddi_acc_handle_t)NULL; 6694 if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) { 6695 ddi_set_callback(waitfp, arg, &dma_mem_list_id); 6696 } 6697 rval = DDI_FAILURE; 6698 } 6699 return (rval); 6700 } 6701 6702 void 6703 ddi_dma_mem_free(ddi_acc_handle_t *handlep) 6704 { 6705 ddi_acc_hdl_t *ap; 6706 6707 ap = impl_acc_hdl_get(*handlep); 6708 ASSERT(ap); 6709 6710 i_ddi_mem_free((caddr_t)ap->ah_addr, ap); 6711 6712 /* 6713 * free the handle 6714 */ 6715 impl_acc_hdl_free(*handlep); 6716 *handlep = (ddi_acc_handle_t)NULL; 6717 6718 if (dma_mem_list_id != 0) { 6719 ddi_run_callback(&dma_mem_list_id); 6720 } 6721 } 6722 6723 int 6724 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp, 6725 uint_t flags, int (*waitfp)(caddr_t), caddr_t arg, 6726 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 6727 { 6728 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 6729 dev_info_t *hdip, *dip; 6730 struct ddi_dma_req dmareq; 6731 int (*funcp)(); 6732 6733 dmareq.dmar_flags = flags; 6734 dmareq.dmar_fp = waitfp; 6735 dmareq.dmar_arg = arg; 6736 dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount; 6737 6738 if (bp->b_flags & B_PAGEIO) { 6739 dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES; 6740 dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages; 6741 dmareq.dmar_object.dmao_obj.pp_obj.pp_offset = 6742 (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET); 6743 } else { 6744 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr; 6745 if (bp->b_flags & B_SHADOW) { 6746 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = 6747 bp->b_shadow; 6748 dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR; 6749 } else { 6750 dmareq.dmar_object.dmao_type = 6751 (bp->b_flags & (B_PHYS | B_REMAPPED)) ? 6752 DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR; 6753 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL; 6754 } 6755 6756 /* 6757 * If the buffer has no proc pointer, or the proc 6758 * struct has the kernel address space, or the buffer has 6759 * been marked B_REMAPPED (meaning that it is now 6760 * mapped into the kernel's address space), then 6761 * the address space is kas (kernel address space). 6762 */ 6763 if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) || 6764 (bp->b_flags & B_REMAPPED)) { 6765 dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0; 6766 } else { 6767 dmareq.dmar_object.dmao_obj.virt_obj.v_as = 6768 bp->b_proc->p_as; 6769 } 6770 } 6771 6772 dip = hp->dmai_rdip; 6773 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl; 6774 funcp = DEVI(dip)->devi_bus_dma_bindfunc; 6775 return ((*funcp)(hdip, dip, handle, &dmareq, cookiep, ccountp)); 6776 } 6777 6778 int 6779 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as, 6780 caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t), 6781 caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 6782 { 6783 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 6784 dev_info_t *hdip, *dip; 6785 struct ddi_dma_req dmareq; 6786 int (*funcp)(); 6787 6788 if (len == (uint_t)0) { 6789 return (DDI_DMA_NOMAPPING); 6790 } 6791 dmareq.dmar_flags = flags; 6792 dmareq.dmar_fp = waitfp; 6793 dmareq.dmar_arg = arg; 6794 dmareq.dmar_object.dmao_size = len; 6795 dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR; 6796 dmareq.dmar_object.dmao_obj.virt_obj.v_as = as; 6797 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr; 6798 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL; 6799 6800 dip = hp->dmai_rdip; 6801 hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl; 6802 funcp = DEVI(dip)->devi_bus_dma_bindfunc; 6803 return ((*funcp)(hdip, dip, handle, &dmareq, cookiep, ccountp)); 6804 } 6805 6806 void 6807 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep) 6808 { 6809 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 6810 ddi_dma_cookie_t *cp; 6811 6812 cp = hp->dmai_cookie; 6813 ASSERT(cp); 6814 6815 cookiep->dmac_notused = cp->dmac_notused; 6816 cookiep->dmac_type = cp->dmac_type; 6817 cookiep->dmac_address = cp->dmac_address; 6818 cookiep->dmac_size = cp->dmac_size; 6819 hp->dmai_cookie++; 6820 } 6821 6822 int 6823 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp) 6824 { 6825 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 6826 if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) { 6827 return (DDI_FAILURE); 6828 } else { 6829 *nwinp = hp->dmai_nwin; 6830 return (DDI_SUCCESS); 6831 } 6832 } 6833 6834 int 6835 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp, 6836 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 6837 { 6838 int (*funcp)() = ddi_dma_win; 6839 struct bus_ops *bop; 6840 6841 bop = DEVI(HD)->devi_ops->devo_bus_ops; 6842 if (bop && bop->bus_dma_win) 6843 funcp = bop->bus_dma_win; 6844 6845 return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp)); 6846 } 6847 6848 int 6849 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes) 6850 { 6851 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0, 6852 &burstsizes, 0, 0)); 6853 } 6854 6855 int 6856 i_ddi_dma_fault_check(ddi_dma_impl_t *hp) 6857 { 6858 return (hp->dmai_fault); 6859 } 6860 6861 int 6862 ddi_check_dma_handle(ddi_dma_handle_t handle) 6863 { 6864 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 6865 int (*check)(ddi_dma_impl_t *); 6866 6867 if ((check = hp->dmai_fault_check) == NULL) 6868 check = i_ddi_dma_fault_check; 6869 6870 return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE); 6871 } 6872 6873 void 6874 i_ddi_dma_set_fault(ddi_dma_handle_t handle) 6875 { 6876 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 6877 void (*notify)(ddi_dma_impl_t *); 6878 6879 if (!hp->dmai_fault) { 6880 hp->dmai_fault = 1; 6881 if ((notify = hp->dmai_fault_notify) != NULL) 6882 (*notify)(hp); 6883 } 6884 } 6885 6886 void 6887 i_ddi_dma_clr_fault(ddi_dma_handle_t handle) 6888 { 6889 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 6890 void (*notify)(ddi_dma_impl_t *); 6891 6892 if (hp->dmai_fault) { 6893 hp->dmai_fault = 0; 6894 if ((notify = hp->dmai_fault_notify) != NULL) 6895 (*notify)(hp); 6896 } 6897 } 6898 6899 /* 6900 * register mapping routines. 6901 */ 6902 int 6903 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp, 6904 offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp, 6905 ddi_acc_handle_t *handle) 6906 { 6907 ddi_map_req_t mr; 6908 ddi_acc_hdl_t *hp; 6909 int result; 6910 6911 /* 6912 * Allocate and initialize the common elements of data access handle. 6913 */ 6914 *handle = impl_acc_hdl_alloc(KM_SLEEP, NULL); 6915 hp = impl_acc_hdl_get(*handle); 6916 hp->ah_vers = VERS_ACCHDL; 6917 hp->ah_dip = dip; 6918 hp->ah_rnumber = rnumber; 6919 hp->ah_offset = offset; 6920 hp->ah_len = len; 6921 hp->ah_acc = *accattrp; 6922 6923 /* 6924 * Set up the mapping request and call to parent. 6925 */ 6926 mr.map_op = DDI_MO_MAP_LOCKED; 6927 mr.map_type = DDI_MT_RNUMBER; 6928 mr.map_obj.rnumber = rnumber; 6929 mr.map_prot = PROT_READ | PROT_WRITE; 6930 mr.map_flags = DDI_MF_KERNEL_MAPPING; 6931 mr.map_handlep = hp; 6932 mr.map_vers = DDI_MAP_VERSION; 6933 result = ddi_map(dip, &mr, offset, len, addrp); 6934 6935 /* 6936 * check for end result 6937 */ 6938 if (result != DDI_SUCCESS) { 6939 impl_acc_hdl_free(*handle); 6940 *handle = (ddi_acc_handle_t)NULL; 6941 } else { 6942 hp->ah_addr = *addrp; 6943 } 6944 6945 return (result); 6946 } 6947 6948 void 6949 ddi_regs_map_free(ddi_acc_handle_t *handlep) 6950 { 6951 ddi_map_req_t mr; 6952 ddi_acc_hdl_t *hp; 6953 6954 hp = impl_acc_hdl_get(*handlep); 6955 ASSERT(hp); 6956 6957 mr.map_op = DDI_MO_UNMAP; 6958 mr.map_type = DDI_MT_RNUMBER; 6959 mr.map_obj.rnumber = hp->ah_rnumber; 6960 mr.map_prot = PROT_READ | PROT_WRITE; 6961 mr.map_flags = DDI_MF_KERNEL_MAPPING; 6962 mr.map_handlep = hp; 6963 mr.map_vers = DDI_MAP_VERSION; 6964 6965 /* 6966 * Call my parent to unmap my regs. 6967 */ 6968 (void) ddi_map(hp->ah_dip, &mr, hp->ah_offset, 6969 hp->ah_len, &hp->ah_addr); 6970 /* 6971 * free the handle 6972 */ 6973 impl_acc_hdl_free(*handlep); 6974 *handlep = (ddi_acc_handle_t)NULL; 6975 } 6976 6977 int 6978 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount, 6979 ssize_t dev_advcnt, uint_t dev_datasz) 6980 { 6981 uint8_t *b; 6982 uint16_t *w; 6983 uint32_t *l; 6984 uint64_t *ll; 6985 6986 /* check for total byte count is multiple of data transfer size */ 6987 if (bytecount != ((bytecount / dev_datasz) * dev_datasz)) 6988 return (DDI_FAILURE); 6989 6990 switch (dev_datasz) { 6991 case DDI_DATA_SZ01_ACC: 6992 for (b = (uint8_t *)dev_addr; 6993 bytecount != 0; bytecount -= 1, b += dev_advcnt) 6994 ddi_put8(handle, b, 0); 6995 break; 6996 case DDI_DATA_SZ02_ACC: 6997 for (w = (uint16_t *)dev_addr; 6998 bytecount != 0; bytecount -= 2, w += dev_advcnt) 6999 ddi_put16(handle, w, 0); 7000 break; 7001 case DDI_DATA_SZ04_ACC: 7002 for (l = (uint32_t *)dev_addr; 7003 bytecount != 0; bytecount -= 4, l += dev_advcnt) 7004 ddi_put32(handle, l, 0); 7005 break; 7006 case DDI_DATA_SZ08_ACC: 7007 for (ll = (uint64_t *)dev_addr; 7008 bytecount != 0; bytecount -= 8, ll += dev_advcnt) 7009 ddi_put64(handle, ll, 0x0ll); 7010 break; 7011 default: 7012 return (DDI_FAILURE); 7013 } 7014 return (DDI_SUCCESS); 7015 } 7016 7017 int 7018 ddi_device_copy( 7019 ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt, 7020 ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt, 7021 size_t bytecount, uint_t dev_datasz) 7022 { 7023 uint8_t *b_src, *b_dst; 7024 uint16_t *w_src, *w_dst; 7025 uint32_t *l_src, *l_dst; 7026 uint64_t *ll_src, *ll_dst; 7027 7028 /* check for total byte count is multiple of data transfer size */ 7029 if (bytecount != ((bytecount / dev_datasz) * dev_datasz)) 7030 return (DDI_FAILURE); 7031 7032 switch (dev_datasz) { 7033 case DDI_DATA_SZ01_ACC: 7034 b_src = (uint8_t *)src_addr; 7035 b_dst = (uint8_t *)dest_addr; 7036 7037 for (; bytecount != 0; bytecount -= 1) { 7038 ddi_put8(dest_handle, b_dst, 7039 ddi_get8(src_handle, b_src)); 7040 b_dst += dest_advcnt; 7041 b_src += src_advcnt; 7042 } 7043 break; 7044 case DDI_DATA_SZ02_ACC: 7045 w_src = (uint16_t *)src_addr; 7046 w_dst = (uint16_t *)dest_addr; 7047 7048 for (; bytecount != 0; bytecount -= 2) { 7049 ddi_put16(dest_handle, w_dst, 7050 ddi_get16(src_handle, w_src)); 7051 w_dst += dest_advcnt; 7052 w_src += src_advcnt; 7053 } 7054 break; 7055 case DDI_DATA_SZ04_ACC: 7056 l_src = (uint32_t *)src_addr; 7057 l_dst = (uint32_t *)dest_addr; 7058 7059 for (; bytecount != 0; bytecount -= 4) { 7060 ddi_put32(dest_handle, l_dst, 7061 ddi_get32(src_handle, l_src)); 7062 l_dst += dest_advcnt; 7063 l_src += src_advcnt; 7064 } 7065 break; 7066 case DDI_DATA_SZ08_ACC: 7067 ll_src = (uint64_t *)src_addr; 7068 ll_dst = (uint64_t *)dest_addr; 7069 7070 for (; bytecount != 0; bytecount -= 8) { 7071 ddi_put64(dest_handle, ll_dst, 7072 ddi_get64(src_handle, ll_src)); 7073 ll_dst += dest_advcnt; 7074 ll_src += src_advcnt; 7075 } 7076 break; 7077 default: 7078 return (DDI_FAILURE); 7079 } 7080 return (DDI_SUCCESS); 7081 } 7082 7083 #define swap16(value) \ 7084 ((((value) & 0xff) << 8) | ((value) >> 8)) 7085 7086 #define swap32(value) \ 7087 (((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \ 7088 (uint32_t)swap16((uint16_t)((value) >> 16))) 7089 7090 #define swap64(value) \ 7091 (((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \ 7092 << 32) | \ 7093 (uint64_t)swap32((uint32_t)((value) >> 32))) 7094 7095 uint16_t 7096 ddi_swap16(uint16_t value) 7097 { 7098 return (swap16(value)); 7099 } 7100 7101 uint32_t 7102 ddi_swap32(uint32_t value) 7103 { 7104 return (swap32(value)); 7105 } 7106 7107 uint64_t 7108 ddi_swap64(uint64_t value) 7109 { 7110 return (swap64(value)); 7111 } 7112 7113 /* 7114 * Convert a binding name to a driver name. 7115 * A binding name is the name used to determine the driver for a 7116 * device - it may be either an alias for the driver or the name 7117 * of the driver itself. 7118 */ 7119 char * 7120 i_binding_to_drv_name(char *bname) 7121 { 7122 major_t major_no; 7123 7124 ASSERT(bname != NULL); 7125 7126 if ((major_no = ddi_name_to_major(bname)) == -1) 7127 return (NULL); 7128 return (ddi_major_to_name(major_no)); 7129 } 7130 7131 /* 7132 * Search for minor name that has specified dev_t and spec_type. 7133 * If spec_type is zero then any dev_t match works. Since we 7134 * are returning a pointer to the minor name string, we require the 7135 * caller to do the locking. 7136 */ 7137 char * 7138 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type) 7139 { 7140 struct ddi_minor_data *dmdp; 7141 7142 /* 7143 * The did layered driver currently intentionally returns a 7144 * devinfo ptr for an underlying sd instance based on a did 7145 * dev_t. In this case it is not an error. 7146 * 7147 * The did layered driver is associated with Sun Cluster. 7148 */ 7149 ASSERT((ddi_driver_major(dip) == getmajor(dev)) || 7150 (strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0)); 7151 ASSERT(MUTEX_HELD(&(DEVI(dip)->devi_lock))); 7152 7153 for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) { 7154 if (((dmdp->type == DDM_MINOR) || 7155 (dmdp->type == DDM_INTERNAL_PATH) || 7156 (dmdp->type == DDM_DEFAULT)) && 7157 (dmdp->ddm_dev == dev) && 7158 ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) || 7159 (dmdp->ddm_spec_type == spec_type))) 7160 return (dmdp->ddm_name); 7161 } 7162 7163 return (NULL); 7164 } 7165 7166 /* 7167 * Find the devt and spectype of the specified minor_name. 7168 * Return DDI_FAILURE if minor_name not found. Since we are 7169 * returning everything via arguments we can do the locking. 7170 */ 7171 int 7172 i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name, 7173 dev_t *devtp, int *spectypep) 7174 { 7175 struct ddi_minor_data *dmdp; 7176 7177 /* deal with clone minor nodes */ 7178 if (dip == clone_dip) { 7179 major_t major; 7180 /* 7181 * Make sure minor_name is a STREAMS driver. 7182 * We load the driver but don't attach to any instances. 7183 */ 7184 7185 major = ddi_name_to_major(minor_name); 7186 if (major == (major_t)-1) 7187 return (DDI_FAILURE); 7188 7189 if (ddi_hold_driver(major) == NULL) 7190 return (DDI_FAILURE); 7191 7192 if (STREAMSTAB(major) == NULL) { 7193 ddi_rele_driver(major); 7194 return (DDI_FAILURE); 7195 } 7196 ddi_rele_driver(major); 7197 7198 if (devtp) 7199 *devtp = makedevice(clone_major, (minor_t)major); 7200 7201 if (spectypep) 7202 *spectypep = S_IFCHR; 7203 7204 return (DDI_SUCCESS); 7205 } 7206 7207 ASSERT(!MUTEX_HELD(&(DEVI(dip)->devi_lock))); 7208 mutex_enter(&(DEVI(dip)->devi_lock)); 7209 7210 for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) { 7211 if (((dmdp->type != DDM_MINOR) && 7212 (dmdp->type != DDM_INTERNAL_PATH) && 7213 (dmdp->type != DDM_DEFAULT)) || 7214 strcmp(minor_name, dmdp->ddm_name)) 7215 continue; 7216 7217 if (devtp) 7218 *devtp = dmdp->ddm_dev; 7219 7220 if (spectypep) 7221 *spectypep = dmdp->ddm_spec_type; 7222 7223 mutex_exit(&(DEVI(dip)->devi_lock)); 7224 return (DDI_SUCCESS); 7225 } 7226 7227 mutex_exit(&(DEVI(dip)->devi_lock)); 7228 return (DDI_FAILURE); 7229 } 7230 7231 extern char hw_serial[]; 7232 static kmutex_t devid_gen_mutex; 7233 static short devid_gen_number; 7234 7235 #ifdef DEBUG 7236 7237 static int devid_register_corrupt = 0; 7238 static int devid_register_corrupt_major = 0; 7239 static int devid_register_corrupt_hint = 0; 7240 static int devid_register_corrupt_hint_major = 0; 7241 7242 static int devid_lyr_debug = 0; 7243 7244 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs) \ 7245 if (devid_lyr_debug) \ 7246 ddi_debug_devid_devts(msg, ndevs, devs) 7247 7248 #else 7249 7250 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs) 7251 7252 #endif /* DEBUG */ 7253 7254 7255 #ifdef DEBUG 7256 7257 static void 7258 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs) 7259 { 7260 int i; 7261 7262 cmn_err(CE_CONT, "%s:\n", msg); 7263 for (i = 0; i < ndevs; i++) { 7264 cmn_err(CE_CONT, " 0x%lx\n", devs[i]); 7265 } 7266 } 7267 7268 static void 7269 ddi_debug_devid_paths(char *msg, int npaths, char **paths) 7270 { 7271 int i; 7272 7273 cmn_err(CE_CONT, "%s:\n", msg); 7274 for (i = 0; i < npaths; i++) { 7275 cmn_err(CE_CONT, " %s\n", paths[i]); 7276 } 7277 } 7278 7279 static void 7280 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs) 7281 { 7282 int i; 7283 7284 cmn_err(CE_CONT, "dev_ts per path %s\n", path); 7285 for (i = 0; i < ndevs; i++) { 7286 cmn_err(CE_CONT, " 0x%lx\n", devs[i]); 7287 } 7288 } 7289 7290 #endif /* DEBUG */ 7291 7292 /* 7293 * Register device id into DDI framework. 7294 * Must be called when device is attached. 7295 */ 7296 static int 7297 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid) 7298 { 7299 impl_devid_t *i_devid = (impl_devid_t *)devid; 7300 size_t driver_len; 7301 const char *driver_name; 7302 char *devid_str; 7303 major_t major; 7304 7305 if ((dip == NULL) || 7306 ((major = ddi_driver_major(dip)) == (major_t)-1)) 7307 return (DDI_FAILURE); 7308 7309 /* verify that the devid is valid */ 7310 if (ddi_devid_valid(devid) != DDI_SUCCESS) 7311 return (DDI_FAILURE); 7312 7313 /* Updating driver name hint in devid */ 7314 driver_name = ddi_driver_name(dip); 7315 driver_len = strlen(driver_name); 7316 if (driver_len > DEVID_HINT_SIZE) { 7317 /* Pick up last four characters of driver name */ 7318 driver_name += driver_len - DEVID_HINT_SIZE; 7319 driver_len = DEVID_HINT_SIZE; 7320 } 7321 bzero(i_devid->did_driver, DEVID_HINT_SIZE); 7322 bcopy(driver_name, i_devid->did_driver, driver_len); 7323 7324 #ifdef DEBUG 7325 /* Corrupt the devid for testing. */ 7326 if (devid_register_corrupt) 7327 i_devid->did_id[0] += devid_register_corrupt; 7328 if (devid_register_corrupt_major && 7329 (major == devid_register_corrupt_major)) 7330 i_devid->did_id[0] += 1; 7331 if (devid_register_corrupt_hint) 7332 i_devid->did_driver[0] += devid_register_corrupt_hint; 7333 if (devid_register_corrupt_hint_major && 7334 (major == devid_register_corrupt_hint_major)) 7335 i_devid->did_driver[0] += 1; 7336 #endif /* DEBUG */ 7337 7338 /* encode the devid as a string */ 7339 if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL) 7340 return (DDI_FAILURE); 7341 7342 /* add string as a string property */ 7343 if (ndi_prop_update_string(DDI_DEV_T_NONE, dip, 7344 DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) { 7345 cmn_err(CE_WARN, "%s%d: devid property update failed", 7346 ddi_driver_name(dip), ddi_get_instance(dip)); 7347 ddi_devid_str_free(devid_str); 7348 return (DDI_FAILURE); 7349 } 7350 7351 ddi_devid_str_free(devid_str); 7352 7353 #ifdef DEVID_COMPATIBILITY 7354 /* 7355 * marker for devinfo snapshot compatibility. 7356 * This code gets deleted when di_devid is gone from libdevid 7357 */ 7358 DEVI(dip)->devi_devid = DEVID_COMPATIBILITY; 7359 #endif /* DEVID_COMPATIBILITY */ 7360 return (DDI_SUCCESS); 7361 } 7362 7363 int 7364 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid) 7365 { 7366 int rval; 7367 7368 rval = i_ddi_devid_register(dip, devid); 7369 if (rval == DDI_SUCCESS) { 7370 /* 7371 * Register devid in devid-to-path cache 7372 */ 7373 if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) { 7374 mutex_enter(&DEVI(dip)->devi_lock); 7375 DEVI(dip)->devi_flags |= DEVI_REGISTERED_DEVID; 7376 mutex_exit(&DEVI(dip)->devi_lock); 7377 } else { 7378 cmn_err(CE_WARN, "%s%d: failed to cache devid", 7379 ddi_driver_name(dip), ddi_get_instance(dip)); 7380 } 7381 } else { 7382 cmn_err(CE_WARN, "%s%d: failed to register devid", 7383 ddi_driver_name(dip), ddi_get_instance(dip)); 7384 } 7385 return (rval); 7386 } 7387 7388 /* 7389 * Remove (unregister) device id from DDI framework. 7390 * Must be called when device is detached. 7391 */ 7392 static void 7393 i_ddi_devid_unregister(dev_info_t *dip) 7394 { 7395 #ifdef DEVID_COMPATIBILITY 7396 /* 7397 * marker for micro release devinfo snapshot compatibility. 7398 * This code gets deleted for the minor release. 7399 */ 7400 DEVI(dip)->devi_devid = NULL; /* unset DEVID_PROP */ 7401 #endif /* DEVID_COMPATIBILITY */ 7402 7403 /* remove the devid property */ 7404 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME); 7405 } 7406 7407 void 7408 ddi_devid_unregister(dev_info_t *dip) 7409 { 7410 mutex_enter(&DEVI(dip)->devi_lock); 7411 DEVI(dip)->devi_flags &= ~DEVI_REGISTERED_DEVID; 7412 mutex_exit(&DEVI(dip)->devi_lock); 7413 e_devid_cache_unregister(dip); 7414 i_ddi_devid_unregister(dip); 7415 } 7416 7417 /* 7418 * Allocate and initialize a device id. 7419 */ 7420 int 7421 ddi_devid_init( 7422 dev_info_t *dip, 7423 ushort_t devid_type, 7424 ushort_t nbytes, 7425 void *id, 7426 ddi_devid_t *ret_devid) 7427 { 7428 impl_devid_t *i_devid; 7429 int sz = sizeof (*i_devid) + nbytes - sizeof (char); 7430 int driver_len; 7431 const char *driver_name; 7432 7433 switch (devid_type) { 7434 case DEVID_SCSI3_WWN: 7435 /*FALLTHRU*/ 7436 case DEVID_SCSI_SERIAL: 7437 /*FALLTHRU*/ 7438 case DEVID_ATA_SERIAL: 7439 /*FALLTHRU*/ 7440 case DEVID_ENCAP: 7441 if (nbytes == 0) 7442 return (DDI_FAILURE); 7443 if (id == NULL) 7444 return (DDI_FAILURE); 7445 break; 7446 case DEVID_FAB: 7447 if (nbytes != 0) 7448 return (DDI_FAILURE); 7449 if (id != NULL) 7450 return (DDI_FAILURE); 7451 nbytes = sizeof (int) + 7452 sizeof (struct timeval32) + sizeof (short); 7453 sz += nbytes; 7454 break; 7455 default: 7456 return (DDI_FAILURE); 7457 } 7458 7459 if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL) 7460 return (DDI_FAILURE); 7461 7462 i_devid->did_magic_hi = DEVID_MAGIC_MSB; 7463 i_devid->did_magic_lo = DEVID_MAGIC_LSB; 7464 i_devid->did_rev_hi = DEVID_REV_MSB; 7465 i_devid->did_rev_lo = DEVID_REV_LSB; 7466 DEVID_FORMTYPE(i_devid, devid_type); 7467 DEVID_FORMLEN(i_devid, nbytes); 7468 7469 /* Fill in driver name hint */ 7470 driver_name = ddi_driver_name(dip); 7471 driver_len = strlen(driver_name); 7472 if (driver_len > DEVID_HINT_SIZE) { 7473 /* Pick up last four characters of driver name */ 7474 driver_name += driver_len - DEVID_HINT_SIZE; 7475 driver_len = DEVID_HINT_SIZE; 7476 } 7477 7478 bcopy(driver_name, i_devid->did_driver, driver_len); 7479 7480 /* Fill in id field */ 7481 if (devid_type == DEVID_FAB) { 7482 char *cp; 7483 int hostid; 7484 char *hostid_cp = &hw_serial[0]; 7485 struct timeval32 timestamp32; 7486 int i; 7487 int *ip; 7488 short gen; 7489 7490 /* increase the generation number */ 7491 mutex_enter(&devid_gen_mutex); 7492 gen = devid_gen_number++; 7493 mutex_exit(&devid_gen_mutex); 7494 7495 cp = i_devid->did_id; 7496 7497 /* Fill in host id (big-endian byte ordering) */ 7498 hostid = stoi(&hostid_cp); 7499 *cp++ = hibyte(hiword(hostid)); 7500 *cp++ = lobyte(hiword(hostid)); 7501 *cp++ = hibyte(loword(hostid)); 7502 *cp++ = lobyte(loword(hostid)); 7503 7504 /* 7505 * Fill in timestamp (big-endian byte ordering) 7506 * 7507 * (Note that the format may have to be changed 7508 * before 2038 comes around, though it's arguably 7509 * unique enough as it is..) 7510 */ 7511 uniqtime32(×tamp32); 7512 ip = (int *)×tamp32; 7513 for (i = 0; 7514 i < sizeof (timestamp32) / sizeof (int); i++, ip++) { 7515 int val; 7516 val = *ip; 7517 *cp++ = hibyte(hiword(val)); 7518 *cp++ = lobyte(hiword(val)); 7519 *cp++ = hibyte(loword(val)); 7520 *cp++ = lobyte(loword(val)); 7521 } 7522 7523 /* fill in the generation number */ 7524 *cp++ = hibyte(gen); 7525 *cp++ = lobyte(gen); 7526 } else 7527 bcopy(id, i_devid->did_id, nbytes); 7528 7529 /* return device id */ 7530 *ret_devid = (ddi_devid_t)i_devid; 7531 return (DDI_SUCCESS); 7532 } 7533 7534 int 7535 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid) 7536 { 7537 char *devidstr; 7538 7539 ASSERT(dev != DDI_DEV_T_NONE); 7540 7541 /* look up the property, devt specific first */ 7542 if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS, 7543 DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) { 7544 if ((dev == DDI_DEV_T_ANY) || 7545 (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 7546 DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) != 7547 DDI_PROP_SUCCESS)) { 7548 return (DDI_FAILURE); 7549 } 7550 } 7551 7552 /* convert to binary form */ 7553 if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) { 7554 ddi_prop_free(devidstr); 7555 return (DDI_FAILURE); 7556 } 7557 ddi_prop_free(devidstr); 7558 return (DDI_SUCCESS); 7559 } 7560 7561 /* 7562 * Return a copy of the device id for dev_t 7563 */ 7564 int 7565 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid) 7566 { 7567 dev_info_t *dip; 7568 int rval; 7569 7570 /* get the dip */ 7571 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 7572 return (DDI_FAILURE); 7573 7574 rval = i_ddi_devi_get_devid(dev, dip, ret_devid); 7575 7576 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */ 7577 return (rval); 7578 } 7579 7580 /* 7581 * Return a copy of the minor name for dev_t and spec_type 7582 */ 7583 int 7584 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name) 7585 { 7586 dev_info_t *dip; 7587 char *nm; 7588 size_t alloc_sz, sz; 7589 7590 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 7591 return (DDI_FAILURE); 7592 7593 mutex_enter(&(DEVI(dip)->devi_lock)); 7594 7595 if ((nm = i_ddi_devtspectype_to_minorname(dip, 7596 dev, spec_type)) == NULL) { 7597 mutex_exit(&(DEVI(dip)->devi_lock)); 7598 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */ 7599 return (DDI_FAILURE); 7600 } 7601 7602 /* make a copy */ 7603 alloc_sz = strlen(nm) + 1; 7604 retry: 7605 /* drop lock to allocate memory */ 7606 mutex_exit(&(DEVI(dip)->devi_lock)); 7607 *minor_name = kmem_alloc(alloc_sz, KM_SLEEP); 7608 mutex_enter(&(DEVI(dip)->devi_lock)); 7609 7610 /* re-check things, since we dropped the lock */ 7611 if ((nm = i_ddi_devtspectype_to_minorname(dip, 7612 dev, spec_type)) == NULL) { 7613 mutex_exit(&(DEVI(dip)->devi_lock)); 7614 kmem_free(*minor_name, alloc_sz); 7615 *minor_name = NULL; 7616 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */ 7617 return (DDI_FAILURE); 7618 } 7619 7620 /* verify size is the same */ 7621 sz = strlen(nm) + 1; 7622 if (alloc_sz != sz) { 7623 kmem_free(*minor_name, alloc_sz); 7624 alloc_sz = sz; 7625 goto retry; 7626 } 7627 7628 /* sz == alloc_sz - make a copy */ 7629 (void) strcpy(*minor_name, nm); 7630 7631 mutex_exit(&(DEVI(dip)->devi_lock)); 7632 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */ 7633 return (DDI_SUCCESS); 7634 } 7635 7636 int 7637 ddi_lyr_devid_to_devlist( 7638 ddi_devid_t devid, 7639 char *minor_name, 7640 int *retndevs, 7641 dev_t **retdevs) 7642 { 7643 ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS); 7644 7645 if (e_devid_cache_to_devt_list(devid, minor_name, 7646 retndevs, retdevs) == DDI_SUCCESS) { 7647 ASSERT(*retndevs > 0); 7648 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist", 7649 *retndevs, *retdevs); 7650 return (DDI_SUCCESS); 7651 } 7652 7653 if (e_ddi_devid_discovery(devid) == DDI_FAILURE) { 7654 return (DDI_FAILURE); 7655 } 7656 7657 if (e_devid_cache_to_devt_list(devid, minor_name, 7658 retndevs, retdevs) == DDI_SUCCESS) { 7659 ASSERT(*retndevs > 0); 7660 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist", 7661 *retndevs, *retdevs); 7662 return (DDI_SUCCESS); 7663 } 7664 7665 return (DDI_FAILURE); 7666 } 7667 7668 void 7669 ddi_lyr_free_devlist(dev_t *devlist, int ndevs) 7670 { 7671 kmem_free(devlist, sizeof (dev_t) * ndevs); 7672 } 7673 7674 /* 7675 * Note: This will need to be fixed if we ever allow processes to 7676 * have more than one data model per exec. 7677 */ 7678 model_t 7679 ddi_mmap_get_model(void) 7680 { 7681 return (get_udatamodel()); 7682 } 7683 7684 model_t 7685 ddi_model_convert_from(model_t model) 7686 { 7687 return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE); 7688 } 7689 7690 /* 7691 * ddi interfaces managing storage and retrieval of eventcookies. 7692 */ 7693 7694 /* 7695 * Invoke bus nexus driver's implementation of the 7696 * (*bus_remove_eventcall)() interface to remove a registered 7697 * callback handler for "event". 7698 */ 7699 int 7700 ddi_remove_event_handler(ddi_callback_id_t id) 7701 { 7702 ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id; 7703 dev_info_t *ddip; 7704 7705 ASSERT(cb); 7706 if (!cb) { 7707 return (DDI_FAILURE); 7708 } 7709 7710 ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie); 7711 return (ndi_busop_remove_eventcall(ddip, id)); 7712 } 7713 7714 /* 7715 * Invoke bus nexus driver's implementation of the 7716 * (*bus_add_eventcall)() interface to register a callback handler 7717 * for "event". 7718 */ 7719 int 7720 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event, 7721 void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *), 7722 void *arg, ddi_callback_id_t *id) 7723 { 7724 return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id)); 7725 } 7726 7727 7728 /* 7729 * Return a handle for event "name" by calling up the device tree 7730 * hierarchy via (*bus_get_eventcookie)() interface until claimed 7731 * by a bus nexus or top of dev_info tree is reached. 7732 */ 7733 int 7734 ddi_get_eventcookie(dev_info_t *dip, char *name, 7735 ddi_eventcookie_t *event_cookiep) 7736 { 7737 return (ndi_busop_get_eventcookie(dip, dip, 7738 name, event_cookiep)); 7739 } 7740 7741 /* 7742 * single thread access to dev_info node and set state 7743 */ 7744 void 7745 i_devi_enter(dev_info_t *dip, uint_t s_mask, uint_t w_mask, int has_lock) 7746 { 7747 if (!has_lock) 7748 mutex_enter(&(DEVI(dip)->devi_lock)); 7749 7750 ASSERT(mutex_owned(&(DEVI(dip)->devi_lock))); 7751 7752 /* 7753 * wait until state(s) have been changed 7754 */ 7755 while ((DEVI(dip)->devi_state & w_mask) != 0) { 7756 cv_wait(&(DEVI(dip)->devi_cv), &(DEVI(dip)->devi_lock)); 7757 } 7758 DEVI(dip)->devi_state |= s_mask; 7759 7760 if (!has_lock) 7761 mutex_exit(&(DEVI(dip)->devi_lock)); 7762 } 7763 7764 void 7765 i_devi_exit(dev_info_t *dip, uint_t c_mask, int has_lock) 7766 { 7767 if (!has_lock) 7768 mutex_enter(&(DEVI(dip)->devi_lock)); 7769 7770 ASSERT(mutex_owned(&(DEVI(dip)->devi_lock))); 7771 7772 /* 7773 * clear the state(s) and wakeup any threads waiting 7774 * for state change 7775 */ 7776 DEVI(dip)->devi_state &= ~c_mask; 7777 cv_broadcast(&(DEVI(dip)->devi_cv)); 7778 7779 if (!has_lock) 7780 mutex_exit(&(DEVI(dip)->devi_lock)); 7781 } 7782 7783 /* 7784 * This procedure is provided as the general callback function when 7785 * umem_lockmemory calls as_add_callback for long term memory locking. 7786 * When as_unmap, as_setprot, or as_free encounter segments which have 7787 * locked memory, this callback will be invoked. 7788 */ 7789 void 7790 umem_lock_undo(struct as *as, void *arg, uint_t event) 7791 { 7792 _NOTE(ARGUNUSED(as, event)) 7793 struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg; 7794 7795 /* 7796 * Call the cleanup function. Decrement the cookie reference 7797 * count, if it goes to zero, return the memory for the cookie. 7798 * The i_ddi_umem_unlock for this cookie may or may not have been 7799 * called already. It is the responsibility of the caller of 7800 * umem_lockmemory to handle the case of the cleanup routine 7801 * being called after a ddi_umem_unlock for the cookie 7802 * was called. 7803 */ 7804 7805 (*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp); 7806 7807 /* remove the cookie if reference goes to zero */ 7808 if (atomic_add_long_nv((ulong_t *)(&(cp->cook_refcnt)), -1) == 0) { 7809 kmem_free(cp, sizeof (struct ddi_umem_cookie)); 7810 } 7811 } 7812 7813 /* 7814 * The following two Consolidation Private routines provide generic 7815 * interfaces to increase/decrease the amount of device-locked memory. 7816 * 7817 * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory() 7818 * must be called every time i_ddi_incr_locked_memory() is called. 7819 */ 7820 int 7821 /* ARGSUSED */ 7822 i_ddi_incr_locked_memory(proc_t *procp, task_t *taskp, 7823 kproject_t *projectp, zone_t *zonep, rctl_qty_t inc) 7824 { 7825 kproject_t *projp; 7826 7827 ASSERT(procp); 7828 ASSERT(mutex_owned(&procp->p_lock)); 7829 7830 projp = procp->p_task->tk_proj; 7831 mutex_enter(&umem_devlockmem_rctl_lock); 7832 /* 7833 * Test if the requested memory can be locked without exceeding the 7834 * limits. 7835 */ 7836 if (rctl_test(rc_project_devlockmem, projp->kpj_rctls, 7837 procp, inc, RCA_SAFE) & RCT_DENY) { 7838 mutex_exit(&umem_devlockmem_rctl_lock); 7839 return (ENOMEM); 7840 } 7841 projp->kpj_data.kpd_devlockmem += inc; 7842 mutex_exit(&umem_devlockmem_rctl_lock); 7843 /* 7844 * Grab a hold on the project. 7845 */ 7846 (void) project_hold(projp); 7847 7848 return (0); 7849 } 7850 7851 /* 7852 * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory() 7853 * must be called every time i_ddi_decr_locked_memory() is called. 7854 */ 7855 /* ARGSUSED */ 7856 void 7857 i_ddi_decr_locked_memory(proc_t *procp, task_t *taskp, 7858 kproject_t *projectp, zone_t *zonep, rctl_qty_t dec) 7859 { 7860 ASSERT(projectp); 7861 7862 mutex_enter(&umem_devlockmem_rctl_lock); 7863 projectp->kpj_data.kpd_devlockmem -= dec; 7864 mutex_exit(&umem_devlockmem_rctl_lock); 7865 7866 /* 7867 * Release the project pointer reference accquired in 7868 * i_ddi_incr_locked_memory(). 7869 */ 7870 (void) project_rele(projectp); 7871 } 7872 7873 /* 7874 * This routine checks if the max-device-locked-memory resource ctl is 7875 * exceeded, if not increments it, grabs a hold on the project. 7876 * Returns 0 if successful otherwise returns error code 7877 */ 7878 static int 7879 umem_incr_devlockmem(struct ddi_umem_cookie *cookie) 7880 { 7881 proc_t *procp; 7882 int ret; 7883 7884 ASSERT(cookie); 7885 procp = cookie->procp; 7886 ASSERT(procp); 7887 7888 mutex_enter(&procp->p_lock); 7889 7890 if ((ret = i_ddi_incr_locked_memory(procp, NULL, 7891 NULL, NULL, cookie->size)) != 0) { 7892 mutex_exit(&procp->p_lock); 7893 return (ret); 7894 } 7895 7896 /* 7897 * save the project pointer in the 7898 * umem cookie, project pointer already 7899 * hold in i_ddi_incr_locked_memory 7900 */ 7901 cookie->lockmem_proj = (void *)procp->p_task->tk_proj; 7902 mutex_exit(&procp->p_lock); 7903 7904 return (0); 7905 } 7906 7907 /* 7908 * Decrements the max-device-locked-memory resource ctl and releases 7909 * the hold on the project that was acquired during umem_incr_devlockmem 7910 */ 7911 static void 7912 umem_decr_devlockmem(struct ddi_umem_cookie *cookie) 7913 { 7914 kproject_t *projp; 7915 7916 if (!cookie->lockmem_proj) 7917 return; 7918 7919 projp = (kproject_t *)cookie->lockmem_proj; 7920 i_ddi_decr_locked_memory(NULL, NULL, projp, NULL, cookie->size); 7921 7922 cookie->lockmem_proj = NULL; 7923 } 7924 7925 /* 7926 * A consolidation private function which is essentially equivalent to 7927 * ddi_umem_lock but with the addition of arguments ops_vector and procp. 7928 * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and 7929 * the ops_vector is valid. 7930 * 7931 * Lock the virtual address range in the current process and create a 7932 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to 7933 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export 7934 * to user space. 7935 * 7936 * Note: The resource control accounting currently uses a full charge model 7937 * in other words attempts to lock the same/overlapping areas of memory 7938 * will deduct the full size of the buffer from the projects running 7939 * counter for the device locked memory. 7940 * 7941 * addr, size should be PAGESIZE aligned 7942 * 7943 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both 7944 * identifies whether the locked memory will be read or written or both 7945 * DDI_UMEMLOCK_LONGTERM must be set when the locking will 7946 * be maintained for an indefinitely long period (essentially permanent), 7947 * rather than for what would be required for a typical I/O completion. 7948 * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT 7949 * if the memory pertains to a regular file which is mapped MAP_SHARED. 7950 * This is to prevent a deadlock if a file truncation is attempted after 7951 * after the locking is done. 7952 * 7953 * Returns 0 on success 7954 * EINVAL - for invalid parameters 7955 * EPERM, ENOMEM and other error codes returned by as_pagelock 7956 * ENOMEM - is returned if the current request to lock memory exceeds 7957 * project.max-device-locked-memory resource control value. 7958 * EFAULT - memory pertains to a regular file mapped shared and 7959 * and DDI_UMEMLOCK_LONGTERM flag is set 7960 * EAGAIN - could not start the ddi_umem_unlock list processing thread 7961 */ 7962 int 7963 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie, 7964 struct umem_callback_ops *ops_vector, 7965 proc_t *procp) 7966 { 7967 int error; 7968 struct ddi_umem_cookie *p; 7969 void (*driver_callback)() = NULL; 7970 struct as *as = procp->p_as; 7971 struct seg *seg; 7972 vnode_t *vp; 7973 7974 *cookie = NULL; /* in case of any error return */ 7975 7976 /* These are the only three valid flags */ 7977 if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE | 7978 DDI_UMEMLOCK_LONGTERM)) != 0) 7979 return (EINVAL); 7980 7981 /* At least one (can be both) of the two access flags must be set */ 7982 if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) 7983 return (EINVAL); 7984 7985 /* addr and len must be page-aligned */ 7986 if (((uintptr_t)addr & PAGEOFFSET) != 0) 7987 return (EINVAL); 7988 7989 if ((len & PAGEOFFSET) != 0) 7990 return (EINVAL); 7991 7992 /* 7993 * For longterm locking a driver callback must be specified; if 7994 * not longterm then a callback is optional. 7995 */ 7996 if (ops_vector != NULL) { 7997 if (ops_vector->cbo_umem_callback_version != 7998 UMEM_CALLBACK_VERSION) 7999 return (EINVAL); 8000 else 8001 driver_callback = ops_vector->cbo_umem_lock_cleanup; 8002 } 8003 if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM)) 8004 return (EINVAL); 8005 8006 /* 8007 * Call i_ddi_umem_unlock_thread_start if necessary. It will 8008 * be called on first ddi_umem_lock or umem_lockmemory call. 8009 */ 8010 if (ddi_umem_unlock_thread == NULL) 8011 i_ddi_umem_unlock_thread_start(); 8012 8013 /* Allocate memory for the cookie */ 8014 p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP); 8015 8016 /* Convert the flags to seg_rw type */ 8017 if (flags & DDI_UMEMLOCK_WRITE) { 8018 p->s_flags = S_WRITE; 8019 } else { 8020 p->s_flags = S_READ; 8021 } 8022 8023 /* Store procp in cookie for later iosetup/unlock */ 8024 p->procp = (void *)procp; 8025 8026 /* 8027 * Store the struct as pointer in cookie for later use by 8028 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock 8029 * is called after relvm is called. 8030 */ 8031 p->asp = as; 8032 8033 /* 8034 * The size field is needed for lockmem accounting. 8035 */ 8036 p->size = len; 8037 8038 if (umem_incr_devlockmem(p) != 0) { 8039 /* 8040 * The requested memory cannot be locked 8041 */ 8042 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8043 *cookie = (ddi_umem_cookie_t)NULL; 8044 return (ENOMEM); 8045 } 8046 /* 8047 * umem_incr_devlockmem stashes the project ptr into the 8048 * cookie. This is needed during unlock since that can 8049 * happen in a non-USER context 8050 */ 8051 ASSERT(p->lockmem_proj); 8052 8053 /* Lock the pages corresponding to addr, len in memory */ 8054 error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags); 8055 if (error != 0) { 8056 umem_decr_devlockmem(p); 8057 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8058 *cookie = (ddi_umem_cookie_t)NULL; 8059 return (error); 8060 } 8061 8062 /* 8063 * For longterm locking the addr must pertain to a seg_vn segment or 8064 * or a seg_spt segment. 8065 * If the segment pertains to a regular file, it cannot be 8066 * mapped MAP_SHARED. 8067 * This is to prevent a deadlock if a file truncation is attempted 8068 * after the locking is done. 8069 * Doing this after as_pagelock guarantees persistence of the as; if 8070 * an unacceptable segment is found, the cleanup includes calling 8071 * as_pageunlock before returning EFAULT. 8072 */ 8073 if (flags & DDI_UMEMLOCK_LONGTERM) { 8074 extern struct seg_ops segspt_shmops; 8075 AS_LOCK_ENTER(as, &as->a_lock, RW_READER); 8076 for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) { 8077 if (seg == NULL || seg->s_base > addr + len) 8078 break; 8079 if (((seg->s_ops != &segvn_ops) && 8080 (seg->s_ops != &segspt_shmops)) || 8081 ((SEGOP_GETVP(seg, addr, &vp) == 0 && 8082 vp != NULL && vp->v_type == VREG) && 8083 (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) { 8084 as_pageunlock(as, p->pparray, 8085 addr, len, p->s_flags); 8086 AS_LOCK_EXIT(as, &as->a_lock); 8087 umem_decr_devlockmem(p); 8088 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8089 *cookie = (ddi_umem_cookie_t)NULL; 8090 return (EFAULT); 8091 } 8092 } 8093 AS_LOCK_EXIT(as, &as->a_lock); 8094 } 8095 8096 8097 /* Initialize the fields in the ddi_umem_cookie */ 8098 p->cvaddr = addr; 8099 p->type = UMEM_LOCKED; 8100 if (driver_callback != NULL) { 8101 /* i_ddi_umem_unlock and umem_lock_undo may need the cookie */ 8102 p->cook_refcnt = 2; 8103 p->callbacks = *ops_vector; 8104 } else { 8105 /* only i_ddi_umme_unlock needs the cookie */ 8106 p->cook_refcnt = 1; 8107 } 8108 8109 *cookie = (ddi_umem_cookie_t)p; 8110 8111 /* 8112 * If a driver callback was specified, add an entry to the 8113 * as struct callback list. The as_pagelock above guarantees 8114 * the persistence of as. 8115 */ 8116 if (driver_callback) { 8117 error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT, 8118 addr, len, KM_SLEEP); 8119 if (error != 0) { 8120 as_pageunlock(as, p->pparray, 8121 addr, len, p->s_flags); 8122 umem_decr_devlockmem(p); 8123 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8124 *cookie = (ddi_umem_cookie_t)NULL; 8125 } 8126 } 8127 return (error); 8128 } 8129 8130 /* 8131 * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free 8132 * the cookie. Called from i_ddi_umem_unlock_thread. 8133 */ 8134 8135 static void 8136 i_ddi_umem_unlock(struct ddi_umem_cookie *p) 8137 { 8138 uint_t rc; 8139 8140 /* 8141 * There is no way to determine whether a callback to 8142 * umem_lock_undo was registered via as_add_callback. 8143 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and 8144 * a valid callback function structure.) as_delete_callback 8145 * is called to delete a possible registered callback. If the 8146 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it 8147 * indicates that there was a callback registered, and that is was 8148 * successfully deleted. Thus, the cookie reference count 8149 * will never be decremented by umem_lock_undo. Just return the 8150 * memory for the cookie, since both users of the cookie are done. 8151 * A return of AS_CALLBACK_NOTFOUND indicates a callback was 8152 * never registered. A return of AS_CALLBACK_DELETE_DEFERRED 8153 * indicates that callback processing is taking place and, and 8154 * umem_lock_undo is, or will be, executing, and thus decrementing 8155 * the cookie reference count when it is complete. 8156 * 8157 * This needs to be done before as_pageunlock so that the 8158 * persistence of as is guaranteed because of the locked pages. 8159 * 8160 */ 8161 rc = as_delete_callback(p->asp, p); 8162 8163 8164 /* 8165 * The proc->p_as will be stale if i_ddi_umem_unlock is called 8166 * after relvm is called so use p->asp. 8167 */ 8168 as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags); 8169 8170 /* 8171 * Now that we have unlocked the memory decrement the 8172 * max-device-locked-memory rctl 8173 */ 8174 umem_decr_devlockmem(p); 8175 8176 if (rc == AS_CALLBACK_DELETED) { 8177 /* umem_lock_undo will not happen, return the cookie memory */ 8178 ASSERT(p->cook_refcnt == 2); 8179 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8180 } else { 8181 /* 8182 * umem_undo_lock may happen if as_delete_callback returned 8183 * AS_CALLBACK_DELETE_DEFERRED. In that case, decrement the 8184 * reference count, atomically, and return the cookie 8185 * memory if the reference count goes to zero. The only 8186 * other value for rc is AS_CALLBACK_NOTFOUND. In that 8187 * case, just return the cookie memory. 8188 */ 8189 if ((rc != AS_CALLBACK_DELETE_DEFERRED) || 8190 (atomic_add_long_nv((ulong_t *)(&(p->cook_refcnt)), -1) 8191 == 0)) { 8192 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8193 } 8194 } 8195 } 8196 8197 /* 8198 * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler. 8199 * 8200 * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list 8201 * until it is empty. Then, wait for more to be added. This thread is awoken 8202 * via calls to ddi_umem_unlock. 8203 */ 8204 8205 static void 8206 i_ddi_umem_unlock_thread(void) 8207 { 8208 struct ddi_umem_cookie *ret_cookie; 8209 callb_cpr_t cprinfo; 8210 8211 /* process the ddi_umem_unlock list */ 8212 CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex, 8213 callb_generic_cpr, "unlock_thread"); 8214 for (;;) { 8215 mutex_enter(&ddi_umem_unlock_mutex); 8216 if (ddi_umem_unlock_head != NULL) { /* list not empty */ 8217 ret_cookie = ddi_umem_unlock_head; 8218 /* take if off the list */ 8219 if ((ddi_umem_unlock_head = 8220 ddi_umem_unlock_head->unl_forw) == NULL) { 8221 ddi_umem_unlock_tail = NULL; 8222 } 8223 mutex_exit(&ddi_umem_unlock_mutex); 8224 /* unlock the pages in this cookie */ 8225 (void) i_ddi_umem_unlock(ret_cookie); 8226 } else { /* list is empty, wait for next ddi_umem_unlock */ 8227 CALLB_CPR_SAFE_BEGIN(&cprinfo); 8228 cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex); 8229 CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex); 8230 mutex_exit(&ddi_umem_unlock_mutex); 8231 } 8232 } 8233 /* ddi_umem_unlock_thread does not exit */ 8234 /* NOTREACHED */ 8235 } 8236 8237 /* 8238 * Start the thread that will process the ddi_umem_unlock list if it is 8239 * not already started (i_ddi_umem_unlock_thread). 8240 */ 8241 static void 8242 i_ddi_umem_unlock_thread_start(void) 8243 { 8244 mutex_enter(&ddi_umem_unlock_mutex); 8245 if (ddi_umem_unlock_thread == NULL) { 8246 ddi_umem_unlock_thread = thread_create(NULL, 0, 8247 i_ddi_umem_unlock_thread, NULL, 0, &p0, 8248 TS_RUN, minclsyspri); 8249 } 8250 mutex_exit(&ddi_umem_unlock_mutex); 8251 } 8252 8253 /* 8254 * Lock the virtual address range in the current process and create a 8255 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to 8256 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export 8257 * to user space. 8258 * 8259 * Note: The resource control accounting currently uses a full charge model 8260 * in other words attempts to lock the same/overlapping areas of memory 8261 * will deduct the full size of the buffer from the projects running 8262 * counter for the device locked memory. This applies to umem_lockmemory too. 8263 * 8264 * addr, size should be PAGESIZE aligned 8265 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both 8266 * identifies whether the locked memory will be read or written or both 8267 * 8268 * Returns 0 on success 8269 * EINVAL - for invalid parameters 8270 * EPERM, ENOMEM and other error codes returned by as_pagelock 8271 * ENOMEM - is returned if the current request to lock memory exceeds 8272 * project.max-device-locked-memory resource control value. 8273 * EAGAIN - could not start the ddi_umem_unlock list processing thread 8274 */ 8275 int 8276 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie) 8277 { 8278 int error; 8279 struct ddi_umem_cookie *p; 8280 8281 *cookie = NULL; /* in case of any error return */ 8282 8283 /* These are the only two valid flags */ 8284 if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) { 8285 return (EINVAL); 8286 } 8287 8288 /* At least one of the two flags (or both) must be set */ 8289 if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) { 8290 return (EINVAL); 8291 } 8292 8293 /* addr and len must be page-aligned */ 8294 if (((uintptr_t)addr & PAGEOFFSET) != 0) { 8295 return (EINVAL); 8296 } 8297 8298 if ((len & PAGEOFFSET) != 0) { 8299 return (EINVAL); 8300 } 8301 8302 /* 8303 * Call i_ddi_umem_unlock_thread_start if necessary. It will 8304 * be called on first ddi_umem_lock or umem_lockmemory call. 8305 */ 8306 if (ddi_umem_unlock_thread == NULL) 8307 i_ddi_umem_unlock_thread_start(); 8308 8309 /* Allocate memory for the cookie */ 8310 p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP); 8311 8312 /* Convert the flags to seg_rw type */ 8313 if (flags & DDI_UMEMLOCK_WRITE) { 8314 p->s_flags = S_WRITE; 8315 } else { 8316 p->s_flags = S_READ; 8317 } 8318 8319 /* Store curproc in cookie for later iosetup/unlock */ 8320 p->procp = (void *)curproc; 8321 8322 /* 8323 * Store the struct as pointer in cookie for later use by 8324 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock 8325 * is called after relvm is called. 8326 */ 8327 p->asp = curproc->p_as; 8328 /* 8329 * The size field is needed for lockmem accounting. 8330 */ 8331 p->size = len; 8332 8333 if (umem_incr_devlockmem(p) != 0) { 8334 /* 8335 * The requested memory cannot be locked 8336 */ 8337 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8338 *cookie = (ddi_umem_cookie_t)NULL; 8339 return (ENOMEM); 8340 } 8341 /* 8342 * umem_incr_devlockmem stashes the project ptr into the 8343 * cookie. This is needed during unlock since that can 8344 * happen in a non-USER context 8345 */ 8346 ASSERT(p->lockmem_proj); 8347 8348 /* Lock the pages corresponding to addr, len in memory */ 8349 error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray), 8350 addr, len, p->s_flags); 8351 if (error != 0) { 8352 umem_decr_devlockmem(p); 8353 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8354 *cookie = (ddi_umem_cookie_t)NULL; 8355 return (error); 8356 } 8357 8358 /* Initialize the fields in the ddi_umem_cookie */ 8359 p->cvaddr = addr; 8360 p->type = UMEM_LOCKED; 8361 p->cook_refcnt = 1; 8362 8363 *cookie = (ddi_umem_cookie_t)p; 8364 return (error); 8365 } 8366 8367 /* 8368 * Add the cookie to the ddi_umem_unlock list. Pages will be 8369 * unlocked by i_ddi_umem_unlock_thread. 8370 */ 8371 8372 void 8373 ddi_umem_unlock(ddi_umem_cookie_t cookie) 8374 { 8375 struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie; 8376 8377 ASSERT(p->type == UMEM_LOCKED); 8378 ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */ 8379 ASSERT(ddi_umem_unlock_thread != NULL); 8380 8381 p->unl_forw = (struct ddi_umem_cookie *)NULL; /* end of list */ 8382 /* 8383 * Queue the unlock request and notify i_ddi_umem_unlock thread 8384 * if it's called in the interrupt context. Otherwise, unlock pages 8385 * immediately. 8386 */ 8387 if (servicing_interrupt()) { 8388 /* queue the unlock request and notify the thread */ 8389 mutex_enter(&ddi_umem_unlock_mutex); 8390 if (ddi_umem_unlock_head == NULL) { 8391 ddi_umem_unlock_head = ddi_umem_unlock_tail = p; 8392 cv_broadcast(&ddi_umem_unlock_cv); 8393 } else { 8394 ddi_umem_unlock_tail->unl_forw = p; 8395 ddi_umem_unlock_tail = p; 8396 } 8397 mutex_exit(&ddi_umem_unlock_mutex); 8398 } else { 8399 /* unlock the pages right away */ 8400 (void) i_ddi_umem_unlock(p); 8401 } 8402 } 8403 8404 /* 8405 * Create a buf structure from a ddi_umem_cookie 8406 * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc 8407 * (only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported) 8408 * off, len - identifies the portion of the memory represented by the cookie 8409 * that the buf points to. 8410 * NOTE: off, len need to follow the alignment/size restrictions of the 8411 * device (dev) that this buf will be passed to. Some devices 8412 * will accept unrestricted alignment/size, whereas others (such as 8413 * st) require some block-size alignment/size. It is the caller's 8414 * responsibility to ensure that the alignment/size restrictions 8415 * are met (we cannot assert as we do not know the restrictions) 8416 * 8417 * direction - is one of B_READ or B_WRITE and needs to be compatible with 8418 * the flags used in ddi_umem_lock 8419 * 8420 * The following three arguments are used to initialize fields in the 8421 * buf structure and are uninterpreted by this routine. 8422 * 8423 * dev 8424 * blkno 8425 * iodone 8426 * 8427 * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP 8428 * 8429 * Returns a buf structure pointer on success (to be freed by freerbuf) 8430 * NULL on any parameter error or memory alloc failure 8431 * 8432 */ 8433 struct buf * 8434 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len, 8435 int direction, dev_t dev, daddr_t blkno, 8436 int (*iodone)(struct buf *), int sleepflag) 8437 { 8438 struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie; 8439 struct buf *bp; 8440 8441 /* 8442 * check for valid cookie offset, len 8443 */ 8444 if ((off + len) > p->size) { 8445 return (NULL); 8446 } 8447 8448 if (len > p->size) { 8449 return (NULL); 8450 } 8451 8452 /* direction has to be one of B_READ or B_WRITE */ 8453 if ((direction != B_READ) && (direction != B_WRITE)) { 8454 return (NULL); 8455 } 8456 8457 /* These are the only two valid sleepflags */ 8458 if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) { 8459 return (NULL); 8460 } 8461 8462 /* 8463 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported 8464 */ 8465 if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) { 8466 return (NULL); 8467 } 8468 8469 /* If type is KMEM_NON_PAGEABLE procp is NULL */ 8470 ASSERT((p->type == KMEM_NON_PAGEABLE) ? 8471 (p->procp == NULL) : (p->procp != NULL)); 8472 8473 bp = kmem_alloc(sizeof (struct buf), sleepflag); 8474 if (bp == NULL) { 8475 return (NULL); 8476 } 8477 bioinit(bp); 8478 8479 bp->b_flags = B_BUSY | B_PHYS | direction; 8480 bp->b_edev = dev; 8481 bp->b_lblkno = blkno; 8482 bp->b_iodone = iodone; 8483 bp->b_bcount = len; 8484 bp->b_proc = (proc_t *)p->procp; 8485 ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0); 8486 bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off); 8487 if (p->pparray != NULL) { 8488 bp->b_flags |= B_SHADOW; 8489 ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0); 8490 bp->b_shadow = p->pparray + btop(off); 8491 } 8492 return (bp); 8493 } 8494 8495 /* 8496 * Fault-handling and related routines 8497 */ 8498 8499 ddi_devstate_t 8500 ddi_get_devstate(dev_info_t *dip) 8501 { 8502 if (DEVI_IS_DEVICE_OFFLINE(dip)) 8503 return (DDI_DEVSTATE_OFFLINE); 8504 else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip)) 8505 return (DDI_DEVSTATE_DOWN); 8506 else if (DEVI_IS_BUS_QUIESCED(dip)) 8507 return (DDI_DEVSTATE_QUIESCED); 8508 else if (DEVI_IS_DEVICE_DEGRADED(dip)) 8509 return (DDI_DEVSTATE_DEGRADED); 8510 else 8511 return (DDI_DEVSTATE_UP); 8512 } 8513 8514 void 8515 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact, 8516 ddi_fault_location_t location, const char *message) 8517 { 8518 struct ddi_fault_event_data fd; 8519 ddi_eventcookie_t ec; 8520 8521 /* 8522 * Assemble all the information into a fault-event-data structure 8523 */ 8524 fd.f_dip = dip; 8525 fd.f_impact = impact; 8526 fd.f_location = location; 8527 fd.f_message = message; 8528 fd.f_oldstate = ddi_get_devstate(dip); 8529 8530 /* 8531 * Get eventcookie from defining parent. 8532 */ 8533 if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) != 8534 DDI_SUCCESS) 8535 return; 8536 8537 (void) ndi_post_event(dip, dip, ec, &fd); 8538 } 8539 8540 char * 8541 i_ddi_devi_class(dev_info_t *dip) 8542 { 8543 return (DEVI(dip)->devi_device_class); 8544 } 8545 8546 int 8547 i_ddi_set_devi_class(dev_info_t *dip, char *devi_class, int flag) 8548 { 8549 struct dev_info *devi = DEVI(dip); 8550 8551 mutex_enter(&devi->devi_lock); 8552 8553 if (devi->devi_device_class) 8554 kmem_free(devi->devi_device_class, 8555 strlen(devi->devi_device_class) + 1); 8556 8557 if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag)) 8558 != NULL) { 8559 mutex_exit(&devi->devi_lock); 8560 return (DDI_SUCCESS); 8561 } 8562 8563 mutex_exit(&devi->devi_lock); 8564 8565 return (DDI_FAILURE); 8566 } 8567 8568 8569 /* 8570 * Task Queues DDI interfaces. 8571 */ 8572 8573 /* ARGSUSED */ 8574 ddi_taskq_t * 8575 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads, 8576 pri_t pri, uint_t cflags) 8577 { 8578 char full_name[TASKQ_NAMELEN]; 8579 const char *tq_name; 8580 int nodeid = 0; 8581 8582 if (dip == NULL) 8583 tq_name = name; 8584 else { 8585 nodeid = ddi_get_instance(dip); 8586 8587 if (name == NULL) 8588 name = "tq"; 8589 8590 (void) snprintf(full_name, sizeof (full_name), "%s_%s", 8591 ddi_driver_name(dip), name); 8592 8593 tq_name = full_name; 8594 } 8595 8596 return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads, 8597 pri == TASKQ_DEFAULTPRI ? minclsyspri : pri, 8598 nthreads, INT_MAX, TASKQ_PREPOPULATE)); 8599 } 8600 8601 void 8602 ddi_taskq_destroy(ddi_taskq_t *tq) 8603 { 8604 taskq_destroy((taskq_t *)tq); 8605 } 8606 8607 int 8608 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *), 8609 void *arg, uint_t dflags) 8610 { 8611 taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg, 8612 dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP); 8613 8614 return (id != 0 ? DDI_SUCCESS : DDI_FAILURE); 8615 } 8616 8617 void 8618 ddi_taskq_wait(ddi_taskq_t *tq) 8619 { 8620 taskq_wait((taskq_t *)tq); 8621 } 8622 8623 void 8624 ddi_taskq_suspend(ddi_taskq_t *tq) 8625 { 8626 taskq_suspend((taskq_t *)tq); 8627 } 8628 8629 boolean_t 8630 ddi_taskq_suspended(ddi_taskq_t *tq) 8631 { 8632 return (taskq_suspended((taskq_t *)tq)); 8633 } 8634 8635 void 8636 ddi_taskq_resume(ddi_taskq_t *tq) 8637 { 8638 taskq_resume((taskq_t *)tq); 8639 } 8640 8641 int 8642 ddi_parse( 8643 const char *ifname, 8644 char *alnum, 8645 uint_t *nump) 8646 { 8647 const char *p; 8648 int l; 8649 ulong_t num; 8650 boolean_t nonum = B_TRUE; 8651 char c; 8652 8653 l = strlen(ifname); 8654 for (p = ifname + l; p != ifname; l--) { 8655 c = *--p; 8656 if (!isdigit(c)) { 8657 (void) strlcpy(alnum, ifname, l + 1); 8658 if (ddi_strtoul(p + 1, NULL, 10, &num) != 0) 8659 return (DDI_FAILURE); 8660 break; 8661 } 8662 nonum = B_FALSE; 8663 } 8664 if (l == 0 || nonum) 8665 return (DDI_FAILURE); 8666 8667 *nump = num; 8668 return (DDI_SUCCESS); 8669 } 8670