1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2014 Garrett D'Amore <garrett@damore.org>
25 * Copyright 2023 MNX Cloud, Inc.
26 */
27
28 #include <sys/note.h>
29 #include <sys/types.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/buf.h>
33 #include <sys/uio.h>
34 #include <sys/cred.h>
35 #include <sys/poll.h>
36 #include <sys/mman.h>
37 #include <sys/kmem.h>
38 #include <sys/model.h>
39 #include <sys/file.h>
40 #include <sys/proc.h>
41 #include <sys/open.h>
42 #include <sys/user.h>
43 #include <sys/t_lock.h>
44 #include <sys/vm.h>
45 #include <sys/stat.h>
46 #include <vm/hat.h>
47 #include <vm/seg.h>
48 #include <vm/seg_vn.h>
49 #include <vm/seg_dev.h>
50 #include <vm/as.h>
51 #include <sys/cmn_err.h>
52 #include <sys/cpuvar.h>
53 #include <sys/debug.h>
54 #include <sys/autoconf.h>
55 #include <sys/sunddi.h>
56 #include <sys/esunddi.h>
57 #include <sys/sunndi.h>
58 #include <sys/kstat.h>
59 #include <sys/conf.h>
60 #include <sys/ddi_impldefs.h> /* include implementation structure defs */
61 #include <sys/ndi_impldefs.h> /* include prototypes */
62 #include <sys/ddi_periodic.h>
63 #include <sys/hwconf.h>
64 #include <sys/pathname.h>
65 #include <sys/modctl.h>
66 #include <sys/epm.h>
67 #include <sys/devctl.h>
68 #include <sys/callb.h>
69 #include <sys/cladm.h>
70 #include <sys/sysevent.h>
71 #include <sys/dacf_impl.h>
72 #include <sys/ddidevmap.h>
73 #include <sys/bootconf.h>
74 #include <sys/disp.h>
75 #include <sys/atomic.h>
76 #include <sys/promif.h>
77 #include <sys/instance.h>
78 #include <sys/sysevent/eventdefs.h>
79 #include <sys/task.h>
80 #include <sys/project.h>
81 #include <sys/taskq.h>
82 #include <sys/devpolicy.h>
83 #include <sys/ctype.h>
84 #include <net/if.h>
85 #include <sys/rctl.h>
86 #include <sys/zone.h>
87 #include <sys/clock_impl.h>
88 #include <sys/ddi.h>
89 #include <sys/modhash.h>
90 #include <sys/sunldi_impl.h>
91 #include <sys/fs/dv_node.h>
92 #include <sys/fs/snode.h>
93
94 extern pri_t minclsyspri;
95
96 extern rctl_hndl_t rc_project_locked_mem;
97 extern rctl_hndl_t rc_zone_locked_mem;
98
99 #ifdef DEBUG
100 static int sunddi_debug = 0;
101 #endif /* DEBUG */
102
103 /* ddi_umem_unlock miscellaneous */
104
105 static void i_ddi_umem_unlock_thread_start(void);
106
107 static kmutex_t ddi_umem_unlock_mutex; /* unlock list mutex */
108 static kcondvar_t ddi_umem_unlock_cv; /* unlock list block/unblock */
109 static kthread_t *ddi_umem_unlock_thread;
110 /*
111 * The ddi_umem_unlock FIFO list. NULL head pointer indicates empty list.
112 */
113 static struct ddi_umem_cookie *ddi_umem_unlock_head = NULL;
114 static struct ddi_umem_cookie *ddi_umem_unlock_tail = NULL;
115
116 /*
117 * DDI(Sun) Function and flag definitions:
118 */
119
120 #if defined(__x86)
121 /*
122 * Used to indicate which entries were chosen from a range.
123 */
124 char *chosen_reg = "chosen-reg";
125 #endif
126
127 /*
128 * Function used to ring system console bell
129 */
130 void (*ddi_console_bell_func)(clock_t duration);
131
132 /*
133 * Creating register mappings and handling interrupts:
134 */
135
136 /*
137 * Generic ddi_map: Call parent to fulfill request...
138 */
139
140 int
ddi_map(dev_info_t * dp,ddi_map_req_t * mp,off_t offset,off_t len,caddr_t * addrp)141 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset,
142 off_t len, caddr_t *addrp)
143 {
144 dev_info_t *pdip;
145
146 ASSERT(dp);
147 pdip = (dev_info_t *)DEVI(dp)->devi_parent;
148 return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip,
149 dp, mp, offset, len, addrp));
150 }
151
152 /*
153 * ddi_apply_range: (Called by nexi only.)
154 * Apply ranges in parent node dp, to child regspec rp...
155 */
156
157 int
ddi_apply_range(dev_info_t * dp,dev_info_t * rdip,struct regspec * rp)158 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp)
159 {
160 return (i_ddi_apply_range(dp, rdip, rp));
161 }
162
163 int
ddi_map_regs(dev_info_t * dip,uint_t rnumber,caddr_t * kaddrp,off_t offset,off_t len)164 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
165 off_t len)
166 {
167 ddi_map_req_t mr;
168 #if defined(__x86)
169 struct {
170 int bus;
171 int addr;
172 int size;
173 } reg, *reglist;
174 uint_t length;
175 int rc;
176
177 /*
178 * get the 'registers' or the 'reg' property.
179 * We look up the reg property as an array of
180 * int's.
181 */
182 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
183 DDI_PROP_DONTPASS, "registers", (int **)®list, &length);
184 if (rc != DDI_PROP_SUCCESS)
185 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
186 DDI_PROP_DONTPASS, "reg", (int **)®list, &length);
187 if (rc == DDI_PROP_SUCCESS) {
188 /*
189 * point to the required entry.
190 */
191 reg = reglist[rnumber];
192 reg.addr += offset;
193 if (len != 0)
194 reg.size = len;
195 /*
196 * make a new property containing ONLY the required tuple.
197 */
198 if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
199 chosen_reg, (int *)®, (sizeof (reg)/sizeof (int)))
200 != DDI_PROP_SUCCESS) {
201 cmn_err(CE_WARN, "%s%d: cannot create '%s' "
202 "property", DEVI(dip)->devi_name,
203 DEVI(dip)->devi_instance, chosen_reg);
204 }
205 /*
206 * free the memory allocated by
207 * ddi_prop_lookup_int_array ().
208 */
209 ddi_prop_free((void *)reglist);
210 }
211 #endif
212 mr.map_op = DDI_MO_MAP_LOCKED;
213 mr.map_type = DDI_MT_RNUMBER;
214 mr.map_obj.rnumber = rnumber;
215 mr.map_prot = PROT_READ | PROT_WRITE;
216 mr.map_flags = DDI_MF_KERNEL_MAPPING;
217 mr.map_handlep = NULL;
218 mr.map_vers = DDI_MAP_VERSION;
219
220 /*
221 * Call my parent to map in my regs.
222 */
223
224 return (ddi_map(dip, &mr, offset, len, kaddrp));
225 }
226
227 void
ddi_unmap_regs(dev_info_t * dip,uint_t rnumber,caddr_t * kaddrp,off_t offset,off_t len)228 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
229 off_t len)
230 {
231 ddi_map_req_t mr;
232
233 mr.map_op = DDI_MO_UNMAP;
234 mr.map_type = DDI_MT_RNUMBER;
235 mr.map_flags = DDI_MF_KERNEL_MAPPING;
236 mr.map_prot = PROT_READ | PROT_WRITE; /* who cares? */
237 mr.map_obj.rnumber = rnumber;
238 mr.map_handlep = NULL;
239 mr.map_vers = DDI_MAP_VERSION;
240
241 /*
242 * Call my parent to unmap my regs.
243 */
244
245 (void) ddi_map(dip, &mr, offset, len, kaddrp);
246 *kaddrp = (caddr_t)0;
247 #if defined(__x86)
248 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg);
249 #endif
250 }
251
252 int
ddi_bus_map(dev_info_t * dip,dev_info_t * rdip,ddi_map_req_t * mp,off_t offset,off_t len,caddr_t * vaddrp)253 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
254 off_t offset, off_t len, caddr_t *vaddrp)
255 {
256 return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp));
257 }
258
259 /*
260 * nullbusmap: The/DDI default bus_map entry point for nexi
261 * not conforming to the reg/range paradigm (i.e. scsi, etc.)
262 * with no HAT/MMU layer to be programmed at this level.
263 *
264 * If the call is to map by rnumber, return an error,
265 * otherwise pass anything else up the tree to my parent.
266 */
267 int
nullbusmap(dev_info_t * dip,dev_info_t * rdip,ddi_map_req_t * mp,off_t offset,off_t len,caddr_t * vaddrp)268 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
269 off_t offset, off_t len, caddr_t *vaddrp)
270 {
271 _NOTE(ARGUNUSED(rdip))
272 if (mp->map_type == DDI_MT_RNUMBER)
273 return (DDI_ME_UNSUPPORTED);
274
275 return (ddi_map(dip, mp, offset, len, vaddrp));
276 }
277
278 /*
279 * ddi_rnumber_to_regspec: Not for use by leaf drivers.
280 * Only for use by nexi using the reg/range paradigm.
281 */
282 struct regspec *
ddi_rnumber_to_regspec(dev_info_t * dip,int rnumber)283 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber)
284 {
285 return (i_ddi_rnumber_to_regspec(dip, rnumber));
286 }
287
288
289 /*
290 * Note that we allow the dip to be nil because we may be called
291 * prior even to the instantiation of the devinfo tree itself - all
292 * regular leaf and nexus drivers should always use a non-nil dip!
293 *
294 * We treat peek in a somewhat cavalier fashion .. assuming that we'll
295 * simply get a synchronous fault as soon as we touch a missing address.
296 *
297 * Poke is rather more carefully handled because we might poke to a write
298 * buffer, "succeed", then only find some time later that we got an
299 * asynchronous fault that indicated that the address we were writing to
300 * was not really backed by hardware.
301 */
302
303 static int
i_ddi_peekpoke(dev_info_t * devi,ddi_ctl_enum_t cmd,size_t size,void * addr,void * value_p)304 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size,
305 void *addr, void *value_p)
306 {
307 union {
308 uint64_t u64;
309 uint32_t u32;
310 uint16_t u16;
311 uint8_t u8;
312 } peekpoke_value;
313
314 peekpoke_ctlops_t peekpoke_args;
315 uint64_t dummy_result;
316 int rval;
317
318 /* Note: size is assumed to be correct; it is not checked. */
319 peekpoke_args.size = size;
320 peekpoke_args.dev_addr = (uintptr_t)addr;
321 peekpoke_args.handle = NULL;
322 peekpoke_args.repcount = 1;
323 peekpoke_args.flags = 0;
324
325 if (cmd == DDI_CTLOPS_POKE) {
326 switch (size) {
327 case sizeof (uint8_t):
328 peekpoke_value.u8 = *(uint8_t *)value_p;
329 break;
330 case sizeof (uint16_t):
331 peekpoke_value.u16 = *(uint16_t *)value_p;
332 break;
333 case sizeof (uint32_t):
334 peekpoke_value.u32 = *(uint32_t *)value_p;
335 break;
336 case sizeof (uint64_t):
337 peekpoke_value.u64 = *(uint64_t *)value_p;
338 break;
339 }
340 }
341
342 peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64;
343
344 if (devi != NULL)
345 rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args,
346 &dummy_result);
347 else
348 rval = peekpoke_mem(cmd, &peekpoke_args);
349
350 /*
351 * A NULL value_p is permitted by ddi_peek(9F); discard the result.
352 */
353 if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) {
354 switch (size) {
355 case sizeof (uint8_t):
356 *(uint8_t *)value_p = peekpoke_value.u8;
357 break;
358 case sizeof (uint16_t):
359 *(uint16_t *)value_p = peekpoke_value.u16;
360 break;
361 case sizeof (uint32_t):
362 *(uint32_t *)value_p = peekpoke_value.u32;
363 break;
364 case sizeof (uint64_t):
365 *(uint64_t *)value_p = peekpoke_value.u64;
366 break;
367 }
368 }
369
370 return (rval);
371 }
372
373 /*
374 * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this.
375 * they shouldn't be, but the 9f manpage kind of pseudo exposes it.
376 */
377 int
ddi_peek(dev_info_t * devi,size_t size,void * addr,void * value_p)378 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p)
379 {
380 switch (size) {
381 case sizeof (uint8_t):
382 case sizeof (uint16_t):
383 case sizeof (uint32_t):
384 case sizeof (uint64_t):
385 break;
386 default:
387 return (DDI_FAILURE);
388 }
389
390 return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p));
391 }
392
393 int
ddi_poke(dev_info_t * devi,size_t size,void * addr,void * value_p)394 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p)
395 {
396 switch (size) {
397 case sizeof (uint8_t):
398 case sizeof (uint16_t):
399 case sizeof (uint32_t):
400 case sizeof (uint64_t):
401 break;
402 default:
403 return (DDI_FAILURE);
404 }
405
406 return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p));
407 }
408
409 int
ddi_peek8(dev_info_t * dip,int8_t * addr,int8_t * val_p)410 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p)
411 {
412 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
413 val_p));
414 }
415
416 int
ddi_peek16(dev_info_t * dip,int16_t * addr,int16_t * val_p)417 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p)
418 {
419 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
420 val_p));
421 }
422
423 int
ddi_peek32(dev_info_t * dip,int32_t * addr,int32_t * val_p)424 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p)
425 {
426 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
427 val_p));
428 }
429
430 int
ddi_peek64(dev_info_t * dip,int64_t * addr,int64_t * val_p)431 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p)
432 {
433 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
434 val_p));
435 }
436
437
438 /*
439 * We need to separate the old interfaces from the new ones and leave them
440 * in here for a while. Previous versions of the OS defined the new interfaces
441 * to the old interfaces. This way we can fix things up so that we can
442 * eventually remove these interfaces.
443 * e.g. A 3rd party module/driver using ddi_peek8 and built against S10
444 * or earlier will actually have a reference to ddi_peekc in the binary.
445 */
446 #ifdef _ILP32
447 int
ddi_peekc(dev_info_t * dip,int8_t * addr,int8_t * val_p)448 ddi_peekc(dev_info_t *dip, int8_t *addr, int8_t *val_p)
449 {
450 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
451 val_p));
452 }
453
454 int
ddi_peeks(dev_info_t * dip,int16_t * addr,int16_t * val_p)455 ddi_peeks(dev_info_t *dip, int16_t *addr, int16_t *val_p)
456 {
457 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
458 val_p));
459 }
460
461 int
ddi_peekl(dev_info_t * dip,int32_t * addr,int32_t * val_p)462 ddi_peekl(dev_info_t *dip, int32_t *addr, int32_t *val_p)
463 {
464 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
465 val_p));
466 }
467
468 int
ddi_peekd(dev_info_t * dip,int64_t * addr,int64_t * val_p)469 ddi_peekd(dev_info_t *dip, int64_t *addr, int64_t *val_p)
470 {
471 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
472 val_p));
473 }
474 #endif /* _ILP32 */
475
476 int
ddi_poke8(dev_info_t * dip,int8_t * addr,int8_t val)477 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val)
478 {
479 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
480 }
481
482 int
ddi_poke16(dev_info_t * dip,int16_t * addr,int16_t val)483 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val)
484 {
485 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
486 }
487
488 int
ddi_poke32(dev_info_t * dip,int32_t * addr,int32_t val)489 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val)
490 {
491 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
492 }
493
494 int
ddi_poke64(dev_info_t * dip,int64_t * addr,int64_t val)495 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val)
496 {
497 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
498 }
499
500 /*
501 * We need to separate the old interfaces from the new ones and leave them
502 * in here for a while. Previous versions of the OS defined the new interfaces
503 * to the old interfaces. This way we can fix things up so that we can
504 * eventually remove these interfaces.
505 * e.g. A 3rd party module/driver using ddi_poke8 and built against S10
506 * or earlier will actually have a reference to ddi_pokec in the binary.
507 */
508 #ifdef _ILP32
509 int
ddi_pokec(dev_info_t * dip,int8_t * addr,int8_t val)510 ddi_pokec(dev_info_t *dip, int8_t *addr, int8_t val)
511 {
512 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
513 }
514
515 int
ddi_pokes(dev_info_t * dip,int16_t * addr,int16_t val)516 ddi_pokes(dev_info_t *dip, int16_t *addr, int16_t val)
517 {
518 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
519 }
520
521 int
ddi_pokel(dev_info_t * dip,int32_t * addr,int32_t val)522 ddi_pokel(dev_info_t *dip, int32_t *addr, int32_t val)
523 {
524 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
525 }
526
527 int
ddi_poked(dev_info_t * dip,int64_t * addr,int64_t val)528 ddi_poked(dev_info_t *dip, int64_t *addr, int64_t val)
529 {
530 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
531 }
532 #endif /* _ILP32 */
533
534 /*
535 * ddi_peekpokeio() is used primarily by the mem drivers for moving
536 * data to and from uio structures via peek and poke. Note that we
537 * use "internal" routines ddi_peek and ddi_poke to make this go
538 * slightly faster, avoiding the call overhead ..
539 */
540 int
ddi_peekpokeio(dev_info_t * devi,struct uio * uio,enum uio_rw rw,caddr_t addr,size_t len,uint_t xfersize)541 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw,
542 caddr_t addr, size_t len, uint_t xfersize)
543 {
544 int64_t ibuffer;
545 int8_t w8;
546 size_t sz;
547 int o;
548
549 if (xfersize > sizeof (long))
550 xfersize = sizeof (long);
551
552 while (len != 0) {
553 if ((len | (uintptr_t)addr) & 1) {
554 sz = sizeof (int8_t);
555 if (rw == UIO_WRITE) {
556 if ((o = uwritec(uio)) == -1)
557 return (DDI_FAILURE);
558 if (ddi_poke8(devi, (int8_t *)addr,
559 (int8_t)o) != DDI_SUCCESS)
560 return (DDI_FAILURE);
561 } else {
562 if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
563 (int8_t *)addr, &w8) != DDI_SUCCESS)
564 return (DDI_FAILURE);
565 if (ureadc(w8, uio))
566 return (DDI_FAILURE);
567 }
568 } else {
569 switch (xfersize) {
570 case sizeof (int64_t):
571 if (((len | (uintptr_t)addr) &
572 (sizeof (int64_t) - 1)) == 0) {
573 sz = xfersize;
574 break;
575 }
576 /*FALLTHROUGH*/
577 case sizeof (int32_t):
578 if (((len | (uintptr_t)addr) &
579 (sizeof (int32_t) - 1)) == 0) {
580 sz = xfersize;
581 break;
582 }
583 /*FALLTHROUGH*/
584 default:
585 /*
586 * This still assumes that we might have an
587 * I/O bus out there that permits 16-bit
588 * transfers (and that it would be upset by
589 * 32-bit transfers from such locations).
590 */
591 sz = sizeof (int16_t);
592 break;
593 }
594
595 if (rw == UIO_READ) {
596 if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
597 addr, &ibuffer) != DDI_SUCCESS)
598 return (DDI_FAILURE);
599 }
600
601 if (uiomove(&ibuffer, sz, rw, uio))
602 return (DDI_FAILURE);
603
604 if (rw == UIO_WRITE) {
605 if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz,
606 addr, &ibuffer) != DDI_SUCCESS)
607 return (DDI_FAILURE);
608 }
609 }
610 addr += sz;
611 len -= sz;
612 }
613 return (DDI_SUCCESS);
614 }
615
616 /*
617 * These routines are used by drivers that do layered ioctls
618 * On sparc, they're implemented in assembler to avoid spilling
619 * register windows in the common (copyin) case ..
620 */
621 #if !defined(__sparc)
622 int
ddi_copyin(const void * buf,void * kernbuf,size_t size,int flags)623 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags)
624 {
625 if (flags & FKIOCTL)
626 return (kcopy(buf, kernbuf, size) ? -1 : 0);
627 return (copyin(buf, kernbuf, size));
628 }
629
630 int
ddi_copyout(const void * buf,void * kernbuf,size_t size,int flags)631 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags)
632 {
633 if (flags & FKIOCTL)
634 return (kcopy(buf, kernbuf, size) ? -1 : 0);
635 return (copyout(buf, kernbuf, size));
636 }
637 #endif /* !__sparc */
638
639 /*
640 * Conversions in nexus pagesize units. We don't duplicate the
641 * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI
642 * routines anyway.
643 */
644 unsigned long
ddi_btop(dev_info_t * dip,unsigned long bytes)645 ddi_btop(dev_info_t *dip, unsigned long bytes)
646 {
647 unsigned long pages;
648
649 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages);
650 return (pages);
651 }
652
653 unsigned long
ddi_btopr(dev_info_t * dip,unsigned long bytes)654 ddi_btopr(dev_info_t *dip, unsigned long bytes)
655 {
656 unsigned long pages;
657
658 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages);
659 return (pages);
660 }
661
662 unsigned long
ddi_ptob(dev_info_t * dip,unsigned long pages)663 ddi_ptob(dev_info_t *dip, unsigned long pages)
664 {
665 unsigned long bytes;
666
667 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes);
668 return (bytes);
669 }
670
671 unsigned int
ddi_enter_critical(void)672 ddi_enter_critical(void)
673 {
674 return ((uint_t)spl7());
675 }
676
677 void
ddi_exit_critical(unsigned int spl)678 ddi_exit_critical(unsigned int spl)
679 {
680 splx((int)spl);
681 }
682
683 /*
684 * Nexus ctlops punter
685 */
686
687 #if !defined(__sparc)
688 /*
689 * Request bus_ctl parent to handle a bus_ctl request
690 *
691 * (The sparc version is in sparc_ddi.s)
692 */
693 int
ddi_ctlops(dev_info_t * d,dev_info_t * r,ddi_ctl_enum_t op,void * a,void * v)694 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v)
695 {
696 int (*fp)();
697
698 if (!d || !r)
699 return (DDI_FAILURE);
700
701 if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL)
702 return (DDI_FAILURE);
703
704 fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl;
705 return ((*fp)(d, r, op, a, v));
706 }
707
708 #endif
709
710 /*
711 * DMA/DVMA setup
712 */
713
714 #if !defined(__sparc)
715 /*
716 * Request bus_dma_ctl parent to fiddle with a dma request.
717 *
718 * (The sparc version is in sparc_subr.s)
719 */
720 int
ddi_dma_mctl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,enum ddi_dma_ctlops request,off_t * offp,size_t * lenp,caddr_t * objp,uint_t flags)721 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
722 ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
723 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
724 {
725 int (*fp)();
726
727 if (dip != ddi_root_node())
728 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl;
729 fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl;
730 return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags));
731 }
732 #endif
733
734 /*
735 * For all DMA control functions, call the DMA control
736 * routine and return status.
737 *
738 * Just plain assume that the parent is to be called.
739 * If a nexus driver or a thread outside the framework
740 * of a nexus driver or a leaf driver calls these functions,
741 * it is up to them to deal with the fact that the parent's
742 * bus_dma_ctl function will be the first one called.
743 */
744
745 #define HD ((ddi_dma_impl_t *)h)->dmai_rdip
746
747 /*
748 * This routine is left in place to satisfy link dependencies
749 * for any 3rd party nexus drivers that rely on it. It is never
750 * called, though.
751 */
752 /*ARGSUSED*/
753 int
ddi_dma_map(dev_info_t * dip,dev_info_t * rdip,struct ddi_dma_req * dmareqp,ddi_dma_handle_t * handlep)754 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip,
755 struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
756 {
757 return (DDI_FAILURE);
758 }
759
760 #if !defined(__sparc)
761
762 /*
763 * The SPARC versions of these routines are done in assembler to
764 * save register windows, so they're in sparc_subr.s.
765 */
766
767 int
ddi_dma_allochdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_attr_t * attr,int (* waitfp)(caddr_t),caddr_t arg,ddi_dma_handle_t * handlep)768 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
769 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
770 {
771 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *,
772 int (*)(caddr_t), caddr_t, ddi_dma_handle_t *);
773
774 if (dip != ddi_root_node())
775 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
776
777 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_allochdl;
778 return ((*funcp)(dip, rdip, attr, waitfp, arg, handlep));
779 }
780
781 int
ddi_dma_freehdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handlep)782 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep)
783 {
784 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
785
786 if (dip != ddi_root_node())
787 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
788
789 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_freehdl;
790 return ((*funcp)(dip, rdip, handlep));
791 }
792
793 int
ddi_dma_bindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,struct ddi_dma_req * dmareq,ddi_dma_cookie_t * cp,uint_t * ccountp)794 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
795 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
796 ddi_dma_cookie_t *cp, uint_t *ccountp)
797 {
798 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
799 struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *);
800
801 if (dip != ddi_root_node())
802 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
803
804 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_bindhdl;
805 return ((*funcp)(dip, rdip, handle, dmareq, cp, ccountp));
806 }
807
808 int
ddi_dma_unbindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)809 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
810 ddi_dma_handle_t handle)
811 {
812 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
813
814 if (dip != ddi_root_node())
815 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
816
817 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl;
818 return ((*funcp)(dip, rdip, handle));
819 }
820
821
822 int
ddi_dma_flush(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,off_t off,size_t len,uint_t cache_flags)823 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
824 ddi_dma_handle_t handle, off_t off, size_t len,
825 uint_t cache_flags)
826 {
827 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
828 off_t, size_t, uint_t);
829
830 if (dip != ddi_root_node())
831 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
832
833 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
834 return ((*funcp)(dip, rdip, handle, off, len, cache_flags));
835 }
836
837 int
ddi_dma_win(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,uint_t win,off_t * offp,size_t * lenp,ddi_dma_cookie_t * cookiep,uint_t * ccountp)838 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip,
839 ddi_dma_handle_t handle, uint_t win, off_t *offp,
840 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
841 {
842 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
843 uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
844
845 if (dip != ddi_root_node())
846 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win;
847
848 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_win;
849 return ((*funcp)(dip, rdip, handle, win, offp, lenp,
850 cookiep, ccountp));
851 }
852
853 int
ddi_dma_sync(ddi_dma_handle_t h,off_t o,size_t l,uint_t whom)854 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom)
855 {
856 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
857 dev_info_t *dip, *rdip;
858 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t,
859 size_t, uint_t);
860
861 /*
862 * the DMA nexus driver will set DMP_NOSYNC if the
863 * platform does not require any sync operation. For
864 * example if the memory is uncached or consistent
865 * and without any I/O write buffers involved.
866 */
867 if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
868 return (DDI_SUCCESS);
869
870 dip = rdip = hp->dmai_rdip;
871 if (dip != ddi_root_node())
872 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
873 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
874 return ((*funcp)(dip, rdip, h, o, l, whom));
875 }
876
877 int
ddi_dma_unbind_handle(ddi_dma_handle_t h)878 ddi_dma_unbind_handle(ddi_dma_handle_t h)
879 {
880 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
881 dev_info_t *dip, *rdip;
882 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
883
884 dip = rdip = hp->dmai_rdip;
885 if (dip != ddi_root_node())
886 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
887 funcp = DEVI(rdip)->devi_bus_dma_unbindfunc;
888 return ((*funcp)(dip, rdip, h));
889 }
890
891 #endif /* !__sparc */
892
893 /*
894 * DMA burst sizes, and transfer minimums
895 */
896
897 int
ddi_dma_burstsizes(ddi_dma_handle_t handle)898 ddi_dma_burstsizes(ddi_dma_handle_t handle)
899 {
900 ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
901
902 if (!dimp)
903 return (0);
904 else
905 return (dimp->dmai_burstsizes);
906 }
907
908 /*
909 * Given two DMA attribute structures, apply the attributes
910 * of one to the other, following the rules of attributes
911 * and the wishes of the caller.
912 *
913 * The rules of DMA attribute structures are that you cannot
914 * make things *less* restrictive as you apply one set
915 * of attributes to another.
916 *
917 */
918 void
ddi_dma_attr_merge(ddi_dma_attr_t * attr,ddi_dma_attr_t * mod)919 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod)
920 {
921 attr->dma_attr_addr_lo =
922 MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo);
923 attr->dma_attr_addr_hi =
924 MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi);
925 attr->dma_attr_count_max =
926 MIN(attr->dma_attr_count_max, mod->dma_attr_count_max);
927 attr->dma_attr_align =
928 MAX(attr->dma_attr_align, mod->dma_attr_align);
929 attr->dma_attr_burstsizes =
930 (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes);
931 attr->dma_attr_minxfer =
932 maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer);
933 attr->dma_attr_maxxfer =
934 MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer);
935 attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg);
936 attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen,
937 (uint_t)mod->dma_attr_sgllen);
938 attr->dma_attr_granular =
939 MAX(attr->dma_attr_granular, mod->dma_attr_granular);
940 }
941
942 /*
943 * mmap/segmap interface:
944 */
945
946 /*
947 * ddi_segmap: setup the default segment driver. Calls the drivers
948 * XXmmap routine to validate the range to be mapped.
949 * Return ENXIO of the range is not valid. Create
950 * a seg_dev segment that contains all of the
951 * necessary information and will reference the
952 * default segment driver routines. It returns zero
953 * on success or non-zero on failure.
954 */
955 int
ddi_segmap(dev_t dev,off_t offset,struct as * asp,caddr_t * addrp,off_t len,uint_t prot,uint_t maxprot,uint_t flags,cred_t * credp)956 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len,
957 uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp)
958 {
959 extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *,
960 off_t, uint_t, uint_t, uint_t, struct cred *);
961
962 return (spec_segmap(dev, offset, asp, addrp, len,
963 prot, maxprot, flags, credp));
964 }
965
966 /*
967 * ddi_map_fault: Resolve mappings at fault time. Used by segment
968 * drivers. Allows each successive parent to resolve
969 * address translations and add its mappings to the
970 * mapping list supplied in the page structure. It
971 * returns zero on success or non-zero on failure.
972 */
973
974 int
ddi_map_fault(dev_info_t * dip,struct hat * hat,struct seg * seg,caddr_t addr,struct devpage * dp,pfn_t pfn,uint_t prot,uint_t lock)975 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg,
976 caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock)
977 {
978 return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock));
979 }
980
981 /*
982 * ddi_device_mapping_check: Called from ddi_segmap_setup.
983 * Invokes platform specific DDI to determine whether attributes specified
984 * in attr(9s) are valid for the region of memory that will be made
985 * available for direct access to user process via the mmap(2) system call.
986 */
987 int
ddi_device_mapping_check(dev_t dev,ddi_device_acc_attr_t * accattrp,uint_t rnumber,uint_t * hat_flags)988 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp,
989 uint_t rnumber, uint_t *hat_flags)
990 {
991 ddi_acc_handle_t handle;
992 ddi_map_req_t mr;
993 ddi_acc_hdl_t *hp;
994 int result;
995 dev_info_t *dip;
996
997 /*
998 * we use e_ddi_hold_devi_by_dev to search for the devi. We
999 * release it immediately since it should already be held by
1000 * a devfs vnode.
1001 */
1002 if ((dip =
1003 e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL)
1004 return (-1);
1005 ddi_release_devi(dip); /* for e_ddi_hold_devi_by_dev() */
1006
1007 /*
1008 * Allocate and initialize the common elements of data
1009 * access handle.
1010 */
1011 handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1012 if (handle == NULL)
1013 return (-1);
1014
1015 hp = impl_acc_hdl_get(handle);
1016 hp->ah_vers = VERS_ACCHDL;
1017 hp->ah_dip = dip;
1018 hp->ah_rnumber = rnumber;
1019 hp->ah_offset = 0;
1020 hp->ah_len = 0;
1021 hp->ah_acc = *accattrp;
1022
1023 /*
1024 * Set up the mapping request and call to parent.
1025 */
1026 mr.map_op = DDI_MO_MAP_HANDLE;
1027 mr.map_type = DDI_MT_RNUMBER;
1028 mr.map_obj.rnumber = rnumber;
1029 mr.map_prot = PROT_READ | PROT_WRITE;
1030 mr.map_flags = DDI_MF_KERNEL_MAPPING;
1031 mr.map_handlep = hp;
1032 mr.map_vers = DDI_MAP_VERSION;
1033 result = ddi_map(dip, &mr, 0, 0, NULL);
1034
1035 /*
1036 * Region must be mappable, pick up flags from the framework.
1037 */
1038 *hat_flags = hp->ah_hat_flags;
1039
1040 impl_acc_hdl_free(handle);
1041
1042 /*
1043 * check for end result.
1044 */
1045 if (result != DDI_SUCCESS)
1046 return (-1);
1047 return (0);
1048 }
1049
1050
1051 /*
1052 * Property functions: See also, ddipropdefs.h.
1053 *
1054 * These functions are the framework for the property functions,
1055 * i.e. they support software defined properties. All implementation
1056 * specific property handling (i.e.: self-identifying devices and
1057 * PROM defined properties are handled in the implementation specific
1058 * functions (defined in ddi_implfuncs.h).
1059 */
1060
1061 /*
1062 * nopropop: Shouldn't be called, right?
1063 */
1064 int
nopropop(dev_t dev,dev_info_t * dip,ddi_prop_op_t prop_op,int mod_flags,char * name,caddr_t valuep,int * lengthp)1065 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1066 char *name, caddr_t valuep, int *lengthp)
1067 {
1068 _NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp))
1069 return (DDI_PROP_NOT_FOUND);
1070 }
1071
1072 #ifdef DDI_PROP_DEBUG
1073 int ddi_prop_debug_flag = 0;
1074
1075 int
ddi_prop_debug(int enable)1076 ddi_prop_debug(int enable)
1077 {
1078 int prev = ddi_prop_debug_flag;
1079
1080 if ((enable != 0) || (prev != 0))
1081 printf("ddi_prop_debug: debugging %s\n",
1082 enable ? "enabled" : "disabled");
1083 ddi_prop_debug_flag = enable;
1084 return (prev);
1085 }
1086
1087 #endif /* DDI_PROP_DEBUG */
1088
1089 /*
1090 * Search a property list for a match, if found return pointer
1091 * to matching prop struct, else return NULL.
1092 */
1093
1094 ddi_prop_t *
i_ddi_prop_search(dev_t dev,char * name,uint_t flags,ddi_prop_t ** list_head)1095 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head)
1096 {
1097 ddi_prop_t *propp;
1098
1099 /*
1100 * find the property in child's devinfo:
1101 * Search order defined by this search function is first matching
1102 * property with input dev == DDI_DEV_T_ANY matching any dev or
1103 * dev == propp->prop_dev, name == propp->name, and the correct
1104 * data type as specified in the flags. If a DDI_DEV_T_NONE dev
1105 * value made it this far then it implies a DDI_DEV_T_ANY search.
1106 */
1107 if (dev == DDI_DEV_T_NONE)
1108 dev = DDI_DEV_T_ANY;
1109
1110 for (propp = *list_head; propp != NULL; propp = propp->prop_next) {
1111
1112 if (!DDI_STRSAME(propp->prop_name, name))
1113 continue;
1114
1115 if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev))
1116 continue;
1117
1118 if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1119 continue;
1120
1121 return (propp);
1122 }
1123
1124 return ((ddi_prop_t *)0);
1125 }
1126
1127 /*
1128 * Search for property within devnames structures
1129 */
1130 ddi_prop_t *
i_ddi_search_global_prop(dev_t dev,char * name,uint_t flags)1131 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags)
1132 {
1133 major_t major;
1134 struct devnames *dnp;
1135 ddi_prop_t *propp;
1136
1137 /*
1138 * Valid dev_t value is needed to index into the
1139 * correct devnames entry, therefore a dev_t
1140 * value of DDI_DEV_T_ANY is not appropriate.
1141 */
1142 ASSERT(dev != DDI_DEV_T_ANY);
1143 if (dev == DDI_DEV_T_ANY) {
1144 return ((ddi_prop_t *)0);
1145 }
1146
1147 major = getmajor(dev);
1148 dnp = &(devnamesp[major]);
1149
1150 if (dnp->dn_global_prop_ptr == NULL)
1151 return ((ddi_prop_t *)0);
1152
1153 LOCK_DEV_OPS(&dnp->dn_lock);
1154
1155 for (propp = dnp->dn_global_prop_ptr->prop_list;
1156 propp != NULL;
1157 propp = (ddi_prop_t *)propp->prop_next) {
1158
1159 if (!DDI_STRSAME(propp->prop_name, name))
1160 continue;
1161
1162 if ((!(flags & DDI_PROP_ROOTNEX_GLOBAL)) &&
1163 (!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev))
1164 continue;
1165
1166 if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1167 continue;
1168
1169 /* Property found, return it */
1170 UNLOCK_DEV_OPS(&dnp->dn_lock);
1171 return (propp);
1172 }
1173
1174 UNLOCK_DEV_OPS(&dnp->dn_lock);
1175 return ((ddi_prop_t *)0);
1176 }
1177
1178 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>";
1179
1180 /*
1181 * ddi_prop_search_global:
1182 * Search the global property list within devnames
1183 * for the named property. Return the encoded value.
1184 */
1185 static int
i_ddi_prop_search_global(dev_t dev,uint_t flags,char * name,void * valuep,uint_t * lengthp)1186 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name,
1187 void *valuep, uint_t *lengthp)
1188 {
1189 ddi_prop_t *propp;
1190 caddr_t buffer;
1191
1192 propp = i_ddi_search_global_prop(dev, name, flags);
1193
1194 /* Property NOT found, bail */
1195 if (propp == (ddi_prop_t *)0)
1196 return (DDI_PROP_NOT_FOUND);
1197
1198 if (propp->prop_flags & DDI_PROP_UNDEF_IT)
1199 return (DDI_PROP_UNDEFINED);
1200
1201 if ((buffer = kmem_alloc(propp->prop_len,
1202 (flags & DDI_PROP_CANSLEEP) ? KM_SLEEP : KM_NOSLEEP)) == NULL) {
1203 cmn_err(CE_CONT, prop_no_mem_msg, name);
1204 return (DDI_PROP_NO_MEMORY);
1205 }
1206
1207 /*
1208 * Return the encoded data
1209 */
1210 *(caddr_t *)valuep = buffer;
1211 *lengthp = propp->prop_len;
1212 bcopy(propp->prop_val, buffer, propp->prop_len);
1213
1214 return (DDI_PROP_SUCCESS);
1215 }
1216
1217 /*
1218 * ddi_prop_search_common: Lookup and return the encoded value
1219 */
1220 int
ddi_prop_search_common(dev_t dev,dev_info_t * dip,ddi_prop_op_t prop_op,uint_t flags,char * name,void * valuep,uint_t * lengthp)1221 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1222 uint_t flags, char *name, void *valuep, uint_t *lengthp)
1223 {
1224 ddi_prop_t *propp;
1225 int i;
1226 caddr_t buffer;
1227 caddr_t prealloc = NULL;
1228 int plength = 0;
1229 dev_info_t *pdip;
1230 int (*bop)();
1231
1232 /*CONSTANTCONDITION*/
1233 while (1) {
1234
1235 mutex_enter(&(DEVI(dip)->devi_lock));
1236
1237
1238 /*
1239 * find the property in child's devinfo:
1240 * Search order is:
1241 * 1. driver defined properties
1242 * 2. system defined properties
1243 * 3. driver global properties
1244 * 4. boot defined properties
1245 */
1246
1247 propp = i_ddi_prop_search(dev, name, flags,
1248 &(DEVI(dip)->devi_drv_prop_ptr));
1249 if (propp == NULL) {
1250 propp = i_ddi_prop_search(dev, name, flags,
1251 &(DEVI(dip)->devi_sys_prop_ptr));
1252 }
1253 if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) {
1254 propp = i_ddi_prop_search(dev, name, flags,
1255 &DEVI(dip)->devi_global_prop_list->prop_list);
1256 }
1257
1258 if (propp == NULL) {
1259 propp = i_ddi_prop_search(dev, name, flags,
1260 &(DEVI(dip)->devi_hw_prop_ptr));
1261 }
1262
1263 /*
1264 * Software property found?
1265 */
1266 if (propp != (ddi_prop_t *)0) {
1267
1268 /*
1269 * If explicit undefine, return now.
1270 */
1271 if (propp->prop_flags & DDI_PROP_UNDEF_IT) {
1272 mutex_exit(&(DEVI(dip)->devi_lock));
1273 if (prealloc)
1274 kmem_free(prealloc, plength);
1275 return (DDI_PROP_UNDEFINED);
1276 }
1277
1278 /*
1279 * If we only want to know if it exists, return now
1280 */
1281 if (prop_op == PROP_EXISTS) {
1282 mutex_exit(&(DEVI(dip)->devi_lock));
1283 ASSERT(prealloc == NULL);
1284 return (DDI_PROP_SUCCESS);
1285 }
1286
1287 /*
1288 * If length only request or prop length == 0,
1289 * service request and return now.
1290 */
1291 if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) {
1292 *lengthp = propp->prop_len;
1293
1294 /*
1295 * if prop_op is PROP_LEN_AND_VAL_ALLOC
1296 * that means prop_len is 0, so set valuep
1297 * also to NULL
1298 */
1299 if (prop_op == PROP_LEN_AND_VAL_ALLOC)
1300 *(caddr_t *)valuep = NULL;
1301
1302 mutex_exit(&(DEVI(dip)->devi_lock));
1303 if (prealloc)
1304 kmem_free(prealloc, plength);
1305 return (DDI_PROP_SUCCESS);
1306 }
1307
1308 /*
1309 * If LEN_AND_VAL_ALLOC and the request can sleep,
1310 * drop the mutex, allocate the buffer, and go
1311 * through the loop again. If we already allocated
1312 * the buffer, and the size of the property changed,
1313 * keep trying...
1314 */
1315 if ((prop_op == PROP_LEN_AND_VAL_ALLOC) &&
1316 (flags & DDI_PROP_CANSLEEP)) {
1317 if (prealloc && (propp->prop_len != plength)) {
1318 kmem_free(prealloc, plength);
1319 prealloc = NULL;
1320 }
1321 if (prealloc == NULL) {
1322 plength = propp->prop_len;
1323 mutex_exit(&(DEVI(dip)->devi_lock));
1324 prealloc = kmem_alloc(plength,
1325 KM_SLEEP);
1326 continue;
1327 }
1328 }
1329
1330 /*
1331 * Allocate buffer, if required. Either way,
1332 * set `buffer' variable.
1333 */
1334 i = *lengthp; /* Get callers length */
1335 *lengthp = propp->prop_len; /* Set callers length */
1336
1337 switch (prop_op) {
1338
1339 case PROP_LEN_AND_VAL_ALLOC:
1340
1341 if (prealloc == NULL) {
1342 buffer = kmem_alloc(propp->prop_len,
1343 KM_NOSLEEP);
1344 } else {
1345 buffer = prealloc;
1346 }
1347
1348 if (buffer == NULL) {
1349 mutex_exit(&(DEVI(dip)->devi_lock));
1350 cmn_err(CE_CONT, prop_no_mem_msg, name);
1351 return (DDI_PROP_NO_MEMORY);
1352 }
1353 /* Set callers buf ptr */
1354 *(caddr_t *)valuep = buffer;
1355 break;
1356
1357 case PROP_LEN_AND_VAL_BUF:
1358
1359 if (propp->prop_len > (i)) {
1360 mutex_exit(&(DEVI(dip)->devi_lock));
1361 return (DDI_PROP_BUF_TOO_SMALL);
1362 }
1363
1364 buffer = valuep; /* Get callers buf ptr */
1365 break;
1366
1367 default:
1368 break;
1369 }
1370
1371 /*
1372 * Do the copy.
1373 */
1374 bcopy(propp->prop_val, buffer, propp->prop_len);
1375 mutex_exit(&(DEVI(dip)->devi_lock));
1376 return (DDI_PROP_SUCCESS);
1377 }
1378
1379 mutex_exit(&(DEVI(dip)->devi_lock));
1380 if (prealloc)
1381 kmem_free(prealloc, plength);
1382 prealloc = NULL;
1383
1384 /*
1385 * Prop not found, call parent bus_ops to deal with possible
1386 * h/w layer (possible PROM defined props, etc.) and to
1387 * possibly ascend the hierarchy, if allowed by flags.
1388 */
1389 pdip = (dev_info_t *)DEVI(dip)->devi_parent;
1390
1391 /*
1392 * One last call for the root driver PROM props?
1393 */
1394 if (dip == ddi_root_node()) {
1395 return (ddi_bus_prop_op(dev, dip, dip, prop_op,
1396 flags, name, valuep, (int *)lengthp));
1397 }
1398
1399 /*
1400 * We may have been called to check for properties
1401 * within a single devinfo node that has no parent -
1402 * see make_prop()
1403 */
1404 if (pdip == NULL) {
1405 ASSERT((flags &
1406 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) ==
1407 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM));
1408 return (DDI_PROP_NOT_FOUND);
1409 }
1410
1411 /*
1412 * Instead of recursing, we do iterative calls up the tree.
1413 * As a bit of optimization, skip the bus_op level if the
1414 * node is a s/w node and if the parent's bus_prop_op function
1415 * is `ddi_bus_prop_op', because we know that in this case,
1416 * this function does nothing.
1417 *
1418 * 4225415: If the parent isn't attached, or the child
1419 * hasn't been named by the parent yet, use the default
1420 * ddi_bus_prop_op as a proxy for the parent. This
1421 * allows property lookups in any child/parent state to
1422 * include 'prom' and inherited properties, even when
1423 * there are no drivers attached to the child or parent.
1424 */
1425
1426 bop = ddi_bus_prop_op;
1427 if (i_ddi_devi_attached(pdip) &&
1428 (i_ddi_node_state(dip) >= DS_INITIALIZED))
1429 bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op;
1430
1431 i = DDI_PROP_NOT_FOUND;
1432
1433 if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) {
1434 i = (*bop)(dev, pdip, dip, prop_op,
1435 flags | DDI_PROP_DONTPASS,
1436 name, valuep, lengthp);
1437 }
1438
1439 if ((flags & DDI_PROP_DONTPASS) ||
1440 (i != DDI_PROP_NOT_FOUND))
1441 return (i);
1442
1443 dip = pdip;
1444 }
1445 /*NOTREACHED*/
1446 }
1447
1448
1449 /*
1450 * ddi_prop_op: The basic property operator for drivers.
1451 *
1452 * In ddi_prop_op, the type of valuep is interpreted based on prop_op:
1453 *
1454 * prop_op valuep
1455 * ------ ------
1456 *
1457 * PROP_LEN <unused>
1458 *
1459 * PROP_LEN_AND_VAL_BUF Pointer to callers buffer
1460 *
1461 * PROP_LEN_AND_VAL_ALLOC Address of callers pointer (will be set to
1462 * address of allocated buffer, if successful)
1463 */
1464 int
ddi_prop_op(dev_t dev,dev_info_t * dip,ddi_prop_op_t prop_op,int mod_flags,char * name,caddr_t valuep,int * lengthp)1465 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1466 char *name, caddr_t valuep, int *lengthp)
1467 {
1468 int i;
1469
1470 ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0);
1471
1472 /*
1473 * If this was originally an LDI prop lookup then we bail here.
1474 * The reason is that the LDI property lookup interfaces first call
1475 * a drivers prop_op() entry point to allow it to override
1476 * properties. But if we've made it here, then the driver hasn't
1477 * overriden any properties. We don't want to continue with the
1478 * property search here because we don't have any type inforamtion.
1479 * When we return failure, the LDI interfaces will then proceed to
1480 * call the typed property interfaces to look up the property.
1481 */
1482 if (mod_flags & DDI_PROP_DYNAMIC)
1483 return (DDI_PROP_NOT_FOUND);
1484
1485 /*
1486 * check for pre-typed property consumer asking for typed property:
1487 * see e_ddi_getprop_int64.
1488 */
1489 if (mod_flags & DDI_PROP_CONSUMER_TYPED)
1490 mod_flags |= DDI_PROP_TYPE_INT64;
1491 mod_flags |= DDI_PROP_TYPE_ANY;
1492
1493 i = ddi_prop_search_common(dev, dip, prop_op,
1494 mod_flags, name, valuep, (uint_t *)lengthp);
1495 if (i == DDI_PROP_FOUND_1275)
1496 return (DDI_PROP_SUCCESS);
1497 return (i);
1498 }
1499
1500 /*
1501 * ddi_prop_op_nblocks_blksize: The basic property operator for drivers that
1502 * maintain size in number of blksize blocks. Provides a dynamic property
1503 * implementation for size oriented properties based on nblocks64 and blksize
1504 * values passed in by the driver. Fallback to ddi_prop_op if the nblocks64
1505 * is too large. This interface should not be used with a nblocks64 that
1506 * represents the driver's idea of how to represent unknown, if nblocks is
1507 * unknown use ddi_prop_op.
1508 */
1509 int
ddi_prop_op_nblocks_blksize(dev_t dev,dev_info_t * dip,ddi_prop_op_t prop_op,int mod_flags,char * name,caddr_t valuep,int * lengthp,uint64_t nblocks64,uint_t blksize)1510 ddi_prop_op_nblocks_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1511 int mod_flags, char *name, caddr_t valuep, int *lengthp,
1512 uint64_t nblocks64, uint_t blksize)
1513 {
1514 uint64_t size64;
1515 int blkshift;
1516
1517 /* convert block size to shift value */
1518 ASSERT(BIT_ONLYONESET(blksize));
1519 blkshift = highbit(blksize) - 1;
1520
1521 /*
1522 * There is no point in supporting nblocks64 values that don't have
1523 * an accurate uint64_t byte count representation.
1524 */
1525 if (nblocks64 >= (UINT64_MAX >> blkshift))
1526 return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1527 name, valuep, lengthp));
1528
1529 size64 = nblocks64 << blkshift;
1530 return (ddi_prop_op_size_blksize(dev, dip, prop_op, mod_flags,
1531 name, valuep, lengthp, size64, blksize));
1532 }
1533
1534 /*
1535 * ddi_prop_op_nblocks: ddi_prop_op_nblocks_blksize with DEV_BSIZE blksize.
1536 */
1537 int
ddi_prop_op_nblocks(dev_t dev,dev_info_t * dip,ddi_prop_op_t prop_op,int mod_flags,char * name,caddr_t valuep,int * lengthp,uint64_t nblocks64)1538 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1539 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64)
1540 {
1541 return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op,
1542 mod_flags, name, valuep, lengthp, nblocks64, DEV_BSIZE));
1543 }
1544
1545 /*
1546 * ddi_prop_op_size_blksize: The basic property operator for block drivers that
1547 * maintain size in bytes. Provides a of dynamic property implementation for
1548 * size oriented properties based on size64 value and blksize passed in by the
1549 * driver. Fallback to ddi_prop_op if the size64 is too large. This interface
1550 * should not be used with a size64 that represents the driver's idea of how
1551 * to represent unknown, if size is unknown use ddi_prop_op.
1552 *
1553 * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned
1554 * integers. While the most likely interface to request them ([bc]devi_size)
1555 * is declared int (signed) there is no enforcement of this, which means we
1556 * can't enforce limitations here without risking regression.
1557 */
1558 int
ddi_prop_op_size_blksize(dev_t dev,dev_info_t * dip,ddi_prop_op_t prop_op,int mod_flags,char * name,caddr_t valuep,int * lengthp,uint64_t size64,uint_t blksize)1559 ddi_prop_op_size_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1560 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64,
1561 uint_t blksize)
1562 {
1563 uint64_t nblocks64;
1564 int callers_length;
1565 caddr_t buffer;
1566 int blkshift;
1567
1568 /*
1569 * This is a kludge to support capture of size(9P) pure dynamic
1570 * properties in snapshots for non-cmlb code (without exposing
1571 * i_ddi_prop_dyn changes). When everyone uses cmlb, this code
1572 * should be removed.
1573 */
1574 if (i_ddi_prop_dyn_driver_get(dip) == NULL) {
1575 static i_ddi_prop_dyn_t prop_dyn_size[] = {
1576 {"Size", DDI_PROP_TYPE_INT64, S_IFCHR},
1577 {"Nblocks", DDI_PROP_TYPE_INT64, S_IFBLK},
1578 {NULL}
1579 };
1580 i_ddi_prop_dyn_driver_set(dip, prop_dyn_size);
1581 }
1582
1583 /* convert block size to shift value */
1584 ASSERT(BIT_ONLYONESET(blksize));
1585 blkshift = highbit(blksize) - 1;
1586
1587 /* compute DEV_BSIZE nblocks value */
1588 nblocks64 = size64 >> blkshift;
1589
1590 /* get callers length, establish length of our dynamic properties */
1591 callers_length = *lengthp;
1592
1593 if (strcmp(name, "Nblocks") == 0)
1594 *lengthp = sizeof (uint64_t);
1595 else if (strcmp(name, "Size") == 0)
1596 *lengthp = sizeof (uint64_t);
1597 else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX))
1598 *lengthp = sizeof (uint32_t);
1599 else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX))
1600 *lengthp = sizeof (uint32_t);
1601 else if ((strcmp(name, "blksize") == 0) && (blksize < UINT_MAX))
1602 *lengthp = sizeof (uint32_t);
1603 else {
1604 /* fallback to ddi_prop_op */
1605 return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1606 name, valuep, lengthp));
1607 }
1608
1609 /* service request for the length of the property */
1610 if (prop_op == PROP_LEN)
1611 return (DDI_PROP_SUCCESS);
1612
1613 switch (prop_op) {
1614 case PROP_LEN_AND_VAL_ALLOC:
1615 if ((buffer = kmem_alloc(*lengthp,
1616 (mod_flags & DDI_PROP_CANSLEEP) ?
1617 KM_SLEEP : KM_NOSLEEP)) == NULL)
1618 return (DDI_PROP_NO_MEMORY);
1619
1620 *(caddr_t *)valuep = buffer; /* set callers buf ptr */
1621 break;
1622
1623 case PROP_LEN_AND_VAL_BUF:
1624 /* the length of the property and the request must match */
1625 if (callers_length != *lengthp)
1626 return (DDI_PROP_INVAL_ARG);
1627
1628 buffer = valuep; /* get callers buf ptr */
1629 break;
1630
1631 default:
1632 return (DDI_PROP_INVAL_ARG);
1633 }
1634
1635 /* transfer the value into the buffer */
1636 if (strcmp(name, "Nblocks") == 0)
1637 *((uint64_t *)buffer) = nblocks64;
1638 else if (strcmp(name, "Size") == 0)
1639 *((uint64_t *)buffer) = size64;
1640 else if (strcmp(name, "nblocks") == 0)
1641 *((uint32_t *)buffer) = (uint32_t)nblocks64;
1642 else if (strcmp(name, "size") == 0)
1643 *((uint32_t *)buffer) = (uint32_t)size64;
1644 else if (strcmp(name, "blksize") == 0)
1645 *((uint32_t *)buffer) = (uint32_t)blksize;
1646 return (DDI_PROP_SUCCESS);
1647 }
1648
1649 /*
1650 * ddi_prop_op_size: ddi_prop_op_size_blksize with DEV_BSIZE block size.
1651 */
1652 int
ddi_prop_op_size(dev_t dev,dev_info_t * dip,ddi_prop_op_t prop_op,int mod_flags,char * name,caddr_t valuep,int * lengthp,uint64_t size64)1653 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1654 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64)
1655 {
1656 return (ddi_prop_op_size_blksize(dev, dip, prop_op,
1657 mod_flags, name, valuep, lengthp, size64, DEV_BSIZE));
1658 }
1659
1660 /*
1661 * Variable length props...
1662 */
1663
1664 /*
1665 * ddi_getlongprop: Get variable length property len+val into a buffer
1666 * allocated by property provider via kmem_alloc. Requester
1667 * is responsible for freeing returned property via kmem_free.
1668 *
1669 * Arguments:
1670 *
1671 * dev_t: Input: dev_t of property.
1672 * dip: Input: dev_info_t pointer of child.
1673 * flags: Input: Possible flag modifiers are:
1674 * DDI_PROP_DONTPASS: Don't pass to parent if prop not found.
1675 * DDI_PROP_CANSLEEP: Memory allocation may sleep.
1676 * name: Input: name of property.
1677 * valuep: Output: Addr of callers buffer pointer.
1678 * lengthp:Output: *lengthp will contain prop length on exit.
1679 *
1680 * Possible Returns:
1681 *
1682 * DDI_PROP_SUCCESS: Prop found and returned.
1683 * DDI_PROP_NOT_FOUND: Prop not found
1684 * DDI_PROP_UNDEFINED: Prop explicitly undefined.
1685 * DDI_PROP_NO_MEMORY: Prop found, but unable to alloc mem.
1686 */
1687
1688 int
ddi_getlongprop(dev_t dev,dev_info_t * dip,int flags,char * name,caddr_t valuep,int * lengthp)1689 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags,
1690 char *name, caddr_t valuep, int *lengthp)
1691 {
1692 return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC,
1693 flags, name, valuep, lengthp));
1694 }
1695
1696 /*
1697 *
1698 * ddi_getlongprop_buf: Get long prop into pre-allocated callers
1699 * buffer. (no memory allocation by provider).
1700 *
1701 * dev_t: Input: dev_t of property.
1702 * dip: Input: dev_info_t pointer of child.
1703 * flags: Input: DDI_PROP_DONTPASS or NULL
1704 * name: Input: name of property
1705 * valuep: Input: ptr to callers buffer.
1706 * lengthp:I/O: ptr to length of callers buffer on entry,
1707 * actual length of property on exit.
1708 *
1709 * Possible returns:
1710 *
1711 * DDI_PROP_SUCCESS Prop found and returned
1712 * DDI_PROP_NOT_FOUND Prop not found
1713 * DDI_PROP_UNDEFINED Prop explicitly undefined.
1714 * DDI_PROP_BUF_TOO_SMALL Prop found, callers buf too small,
1715 * no value returned, but actual prop
1716 * length returned in *lengthp
1717 *
1718 */
1719
1720 int
ddi_getlongprop_buf(dev_t dev,dev_info_t * dip,int flags,char * name,caddr_t valuep,int * lengthp)1721 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags,
1722 char *name, caddr_t valuep, int *lengthp)
1723 {
1724 return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
1725 flags, name, valuep, lengthp));
1726 }
1727
1728 /*
1729 * Integer/boolean sized props.
1730 *
1731 * Call is value only... returns found boolean or int sized prop value or
1732 * defvalue if prop not found or is wrong length or is explicitly undefined.
1733 * Only flag is DDI_PROP_DONTPASS...
1734 *
1735 * By convention, this interface returns boolean (0) sized properties
1736 * as value (int)1.
1737 *
1738 * This never returns an error, if property not found or specifically
1739 * undefined, the input `defvalue' is returned.
1740 */
1741
1742 int
ddi_getprop(dev_t dev,dev_info_t * dip,int flags,char * name,int defvalue)1743 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue)
1744 {
1745 int propvalue = defvalue;
1746 int proplength = sizeof (int);
1747 int error;
1748
1749 error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
1750 flags, name, (caddr_t)&propvalue, &proplength);
1751
1752 if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
1753 propvalue = 1;
1754
1755 return (propvalue);
1756 }
1757
1758 /*
1759 * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS
1760 * if returns DDI_PROP_SUCCESS, length returned in *lengthp.
1761 */
1762
1763 int
ddi_getproplen(dev_t dev,dev_info_t * dip,int flags,char * name,int * lengthp)1764 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp)
1765 {
1766 return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp));
1767 }
1768
1769 /*
1770 * Allocate a struct prop_driver_data, along with 'size' bytes
1771 * for decoded property data. This structure is freed by
1772 * calling ddi_prop_free(9F).
1773 */
1774 static void *
ddi_prop_decode_alloc(size_t size,void (* prop_free)(struct prop_driver_data *))1775 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *))
1776 {
1777 struct prop_driver_data *pdd;
1778
1779 /*
1780 * Allocate a structure with enough memory to store the decoded data.
1781 */
1782 pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP);
1783 pdd->pdd_size = (sizeof (struct prop_driver_data) + size);
1784 pdd->pdd_prop_free = prop_free;
1785
1786 /*
1787 * Return a pointer to the location to put the decoded data.
1788 */
1789 return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data)));
1790 }
1791
1792 /*
1793 * Allocated the memory needed to store the encoded data in the property
1794 * handle.
1795 */
1796 static int
ddi_prop_encode_alloc(prop_handle_t * ph,size_t size)1797 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size)
1798 {
1799 /*
1800 * If size is zero, then set data to NULL and size to 0. This
1801 * is a boolean property.
1802 */
1803 if (size == 0) {
1804 ph->ph_size = 0;
1805 ph->ph_data = NULL;
1806 ph->ph_cur_pos = NULL;
1807 ph->ph_save_pos = NULL;
1808 } else {
1809 if (ph->ph_flags == DDI_PROP_DONTSLEEP) {
1810 ph->ph_data = kmem_zalloc(size, KM_NOSLEEP);
1811 if (ph->ph_data == NULL)
1812 return (DDI_PROP_NO_MEMORY);
1813 } else
1814 ph->ph_data = kmem_zalloc(size, KM_SLEEP);
1815 ph->ph_size = size;
1816 ph->ph_cur_pos = ph->ph_data;
1817 ph->ph_save_pos = ph->ph_data;
1818 }
1819 return (DDI_PROP_SUCCESS);
1820 }
1821
1822 /*
1823 * Free the space allocated by the lookup routines. Each lookup routine
1824 * returns a pointer to the decoded data to the driver. The driver then
1825 * passes this pointer back to us. This data actually lives in a struct
1826 * prop_driver_data. We use negative indexing to find the beginning of
1827 * the structure and then free the entire structure using the size and
1828 * the free routine stored in the structure.
1829 */
1830 void
ddi_prop_free(void * datap)1831 ddi_prop_free(void *datap)
1832 {
1833 struct prop_driver_data *pdd;
1834
1835 /*
1836 * Get the structure
1837 */
1838 pdd = (struct prop_driver_data *)
1839 ((caddr_t)datap - sizeof (struct prop_driver_data));
1840 /*
1841 * Call the free routine to free it
1842 */
1843 (*pdd->pdd_prop_free)(pdd);
1844 }
1845
1846 /*
1847 * Free the data associated with an array of ints,
1848 * allocated with ddi_prop_decode_alloc().
1849 */
1850 static void
ddi_prop_free_ints(struct prop_driver_data * pdd)1851 ddi_prop_free_ints(struct prop_driver_data *pdd)
1852 {
1853 kmem_free(pdd, pdd->pdd_size);
1854 }
1855
1856 /*
1857 * Free a single string property or a single string contained within
1858 * the argv style return value of an array of strings.
1859 */
1860 static void
ddi_prop_free_string(struct prop_driver_data * pdd)1861 ddi_prop_free_string(struct prop_driver_data *pdd)
1862 {
1863 kmem_free(pdd, pdd->pdd_size);
1864
1865 }
1866
1867 /*
1868 * Free an array of strings.
1869 */
1870 static void
ddi_prop_free_strings(struct prop_driver_data * pdd)1871 ddi_prop_free_strings(struct prop_driver_data *pdd)
1872 {
1873 kmem_free(pdd, pdd->pdd_size);
1874 }
1875
1876 /*
1877 * Free the data associated with an array of bytes.
1878 */
1879 static void
ddi_prop_free_bytes(struct prop_driver_data * pdd)1880 ddi_prop_free_bytes(struct prop_driver_data *pdd)
1881 {
1882 kmem_free(pdd, pdd->pdd_size);
1883 }
1884
1885 /*
1886 * Reset the current location pointer in the property handle to the
1887 * beginning of the data.
1888 */
1889 void
ddi_prop_reset_pos(prop_handle_t * ph)1890 ddi_prop_reset_pos(prop_handle_t *ph)
1891 {
1892 ph->ph_cur_pos = ph->ph_data;
1893 ph->ph_save_pos = ph->ph_data;
1894 }
1895
1896 /*
1897 * Restore the current location pointer in the property handle to the
1898 * saved position.
1899 */
1900 void
ddi_prop_save_pos(prop_handle_t * ph)1901 ddi_prop_save_pos(prop_handle_t *ph)
1902 {
1903 ph->ph_save_pos = ph->ph_cur_pos;
1904 }
1905
1906 /*
1907 * Save the location that the current location pointer is pointing to..
1908 */
1909 void
ddi_prop_restore_pos(prop_handle_t * ph)1910 ddi_prop_restore_pos(prop_handle_t *ph)
1911 {
1912 ph->ph_cur_pos = ph->ph_save_pos;
1913 }
1914
1915 /*
1916 * Property encode/decode functions
1917 */
1918
1919 /*
1920 * Decode a single integer property
1921 */
1922 static int
ddi_prop_fm_decode_int(prop_handle_t * ph,void * data,uint_t * nelements)1923 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements)
1924 {
1925 int i;
1926 int tmp;
1927
1928 /*
1929 * If there is nothing to decode return an error
1930 */
1931 if (ph->ph_size == 0)
1932 return (DDI_PROP_END_OF_DATA);
1933
1934 /*
1935 * Decode the property as a single integer and return it
1936 * in data if we were able to decode it.
1937 */
1938 i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp);
1939 if (i < DDI_PROP_RESULT_OK) {
1940 switch (i) {
1941 case DDI_PROP_RESULT_EOF:
1942 return (DDI_PROP_END_OF_DATA);
1943
1944 case DDI_PROP_RESULT_ERROR:
1945 return (DDI_PROP_CANNOT_DECODE);
1946 }
1947 }
1948
1949 *(int *)data = tmp;
1950 *nelements = 1;
1951 return (DDI_PROP_SUCCESS);
1952 }
1953
1954 /*
1955 * Decode a single 64 bit integer property
1956 */
1957 static int
ddi_prop_fm_decode_int64(prop_handle_t * ph,void * data,uint_t * nelements)1958 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements)
1959 {
1960 int i;
1961 int64_t tmp;
1962
1963 /*
1964 * If there is nothing to decode return an error
1965 */
1966 if (ph->ph_size == 0)
1967 return (DDI_PROP_END_OF_DATA);
1968
1969 /*
1970 * Decode the property as a single integer and return it
1971 * in data if we were able to decode it.
1972 */
1973 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp);
1974 if (i < DDI_PROP_RESULT_OK) {
1975 switch (i) {
1976 case DDI_PROP_RESULT_EOF:
1977 return (DDI_PROP_END_OF_DATA);
1978
1979 case DDI_PROP_RESULT_ERROR:
1980 return (DDI_PROP_CANNOT_DECODE);
1981 }
1982 }
1983
1984 *(int64_t *)data = tmp;
1985 *nelements = 1;
1986 return (DDI_PROP_SUCCESS);
1987 }
1988
1989 /*
1990 * Decode an array of integers property
1991 */
1992 static int
ddi_prop_fm_decode_ints(prop_handle_t * ph,void * data,uint_t * nelements)1993 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements)
1994 {
1995 int i;
1996 int cnt = 0;
1997 int *tmp;
1998 int *intp;
1999 int n;
2000
2001 /*
2002 * Figure out how many array elements there are by going through the
2003 * data without decoding it first and counting.
2004 */
2005 for (;;) {
2006 i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL);
2007 if (i < 0)
2008 break;
2009 cnt++;
2010 }
2011
2012 /*
2013 * If there are no elements return an error
2014 */
2015 if (cnt == 0)
2016 return (DDI_PROP_END_OF_DATA);
2017
2018 /*
2019 * If we cannot skip through the data, we cannot decode it
2020 */
2021 if (i == DDI_PROP_RESULT_ERROR)
2022 return (DDI_PROP_CANNOT_DECODE);
2023
2024 /*
2025 * Reset the data pointer to the beginning of the encoded data
2026 */
2027 ddi_prop_reset_pos(ph);
2028
2029 /*
2030 * Allocated memory to store the decoded value in.
2031 */
2032 intp = ddi_prop_decode_alloc((cnt * sizeof (int)),
2033 ddi_prop_free_ints);
2034
2035 /*
2036 * Decode each element and place it in the space we just allocated
2037 */
2038 tmp = intp;
2039 for (n = 0; n < cnt; n++, tmp++) {
2040 i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp);
2041 if (i < DDI_PROP_RESULT_OK) {
2042 /*
2043 * Free the space we just allocated
2044 * and return an error.
2045 */
2046 ddi_prop_free(intp);
2047 switch (i) {
2048 case DDI_PROP_RESULT_EOF:
2049 return (DDI_PROP_END_OF_DATA);
2050
2051 case DDI_PROP_RESULT_ERROR:
2052 return (DDI_PROP_CANNOT_DECODE);
2053 }
2054 }
2055 }
2056
2057 *nelements = cnt;
2058 *(int **)data = intp;
2059
2060 return (DDI_PROP_SUCCESS);
2061 }
2062
2063 /*
2064 * Decode a 64 bit integer array property
2065 */
2066 static int
ddi_prop_fm_decode_int64_array(prop_handle_t * ph,void * data,uint_t * nelements)2067 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements)
2068 {
2069 int i;
2070 int n;
2071 int cnt = 0;
2072 int64_t *tmp;
2073 int64_t *intp;
2074
2075 /*
2076 * Count the number of array elements by going
2077 * through the data without decoding it.
2078 */
2079 for (;;) {
2080 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL);
2081 if (i < 0)
2082 break;
2083 cnt++;
2084 }
2085
2086 /*
2087 * If there are no elements return an error
2088 */
2089 if (cnt == 0)
2090 return (DDI_PROP_END_OF_DATA);
2091
2092 /*
2093 * If we cannot skip through the data, we cannot decode it
2094 */
2095 if (i == DDI_PROP_RESULT_ERROR)
2096 return (DDI_PROP_CANNOT_DECODE);
2097
2098 /*
2099 * Reset the data pointer to the beginning of the encoded data
2100 */
2101 ddi_prop_reset_pos(ph);
2102
2103 /*
2104 * Allocate memory to store the decoded value.
2105 */
2106 intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)),
2107 ddi_prop_free_ints);
2108
2109 /*
2110 * Decode each element and place it in the space allocated
2111 */
2112 tmp = intp;
2113 for (n = 0; n < cnt; n++, tmp++) {
2114 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp);
2115 if (i < DDI_PROP_RESULT_OK) {
2116 /*
2117 * Free the space we just allocated
2118 * and return an error.
2119 */
2120 ddi_prop_free(intp);
2121 switch (i) {
2122 case DDI_PROP_RESULT_EOF:
2123 return (DDI_PROP_END_OF_DATA);
2124
2125 case DDI_PROP_RESULT_ERROR:
2126 return (DDI_PROP_CANNOT_DECODE);
2127 }
2128 }
2129 }
2130
2131 *nelements = cnt;
2132 *(int64_t **)data = intp;
2133
2134 return (DDI_PROP_SUCCESS);
2135 }
2136
2137 /*
2138 * Encode an array of integers property (Can be one element)
2139 */
2140 int
ddi_prop_fm_encode_ints(prop_handle_t * ph,void * data,uint_t nelements)2141 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements)
2142 {
2143 int i;
2144 int *tmp;
2145 int cnt;
2146 int size;
2147
2148 /*
2149 * If there is no data, we cannot do anything
2150 */
2151 if (nelements == 0)
2152 return (DDI_PROP_CANNOT_ENCODE);
2153
2154 /*
2155 * Get the size of an encoded int.
2156 */
2157 size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2158
2159 if (size < DDI_PROP_RESULT_OK) {
2160 switch (size) {
2161 case DDI_PROP_RESULT_EOF:
2162 return (DDI_PROP_END_OF_DATA);
2163
2164 case DDI_PROP_RESULT_ERROR:
2165 return (DDI_PROP_CANNOT_ENCODE);
2166 }
2167 }
2168
2169 /*
2170 * Allocate space in the handle to store the encoded int.
2171 */
2172 if (ddi_prop_encode_alloc(ph, size * nelements) !=
2173 DDI_PROP_SUCCESS)
2174 return (DDI_PROP_NO_MEMORY);
2175
2176 /*
2177 * Encode the array of ints.
2178 */
2179 tmp = (int *)data;
2180 for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2181 i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp);
2182 if (i < DDI_PROP_RESULT_OK) {
2183 switch (i) {
2184 case DDI_PROP_RESULT_EOF:
2185 return (DDI_PROP_END_OF_DATA);
2186
2187 case DDI_PROP_RESULT_ERROR:
2188 return (DDI_PROP_CANNOT_ENCODE);
2189 }
2190 }
2191 }
2192
2193 return (DDI_PROP_SUCCESS);
2194 }
2195
2196
2197 /*
2198 * Encode a 64 bit integer array property
2199 */
2200 int
ddi_prop_fm_encode_int64(prop_handle_t * ph,void * data,uint_t nelements)2201 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements)
2202 {
2203 int i;
2204 int cnt;
2205 int size;
2206 int64_t *tmp;
2207
2208 /*
2209 * If there is no data, we cannot do anything
2210 */
2211 if (nelements == 0)
2212 return (DDI_PROP_CANNOT_ENCODE);
2213
2214 /*
2215 * Get the size of an encoded 64 bit int.
2216 */
2217 size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2218
2219 if (size < DDI_PROP_RESULT_OK) {
2220 switch (size) {
2221 case DDI_PROP_RESULT_EOF:
2222 return (DDI_PROP_END_OF_DATA);
2223
2224 case DDI_PROP_RESULT_ERROR:
2225 return (DDI_PROP_CANNOT_ENCODE);
2226 }
2227 }
2228
2229 /*
2230 * Allocate space in the handle to store the encoded int.
2231 */
2232 if (ddi_prop_encode_alloc(ph, size * nelements) !=
2233 DDI_PROP_SUCCESS)
2234 return (DDI_PROP_NO_MEMORY);
2235
2236 /*
2237 * Encode the array of ints.
2238 */
2239 tmp = (int64_t *)data;
2240 for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2241 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp);
2242 if (i < DDI_PROP_RESULT_OK) {
2243 switch (i) {
2244 case DDI_PROP_RESULT_EOF:
2245 return (DDI_PROP_END_OF_DATA);
2246
2247 case DDI_PROP_RESULT_ERROR:
2248 return (DDI_PROP_CANNOT_ENCODE);
2249 }
2250 }
2251 }
2252
2253 return (DDI_PROP_SUCCESS);
2254 }
2255
2256 /*
2257 * Decode a single string property
2258 */
2259 static int
ddi_prop_fm_decode_string(prop_handle_t * ph,void * data,uint_t * nelements)2260 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements)
2261 {
2262 char *tmp;
2263 char *str;
2264 int i;
2265 int size;
2266
2267 /*
2268 * If there is nothing to decode return an error
2269 */
2270 if (ph->ph_size == 0)
2271 return (DDI_PROP_END_OF_DATA);
2272
2273 /*
2274 * Get the decoded size of the encoded string.
2275 */
2276 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2277 if (size < DDI_PROP_RESULT_OK) {
2278 switch (size) {
2279 case DDI_PROP_RESULT_EOF:
2280 return (DDI_PROP_END_OF_DATA);
2281
2282 case DDI_PROP_RESULT_ERROR:
2283 return (DDI_PROP_CANNOT_DECODE);
2284 }
2285 }
2286
2287 /*
2288 * Allocated memory to store the decoded value in.
2289 */
2290 str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string);
2291
2292 ddi_prop_reset_pos(ph);
2293
2294 /*
2295 * Decode the str and place it in the space we just allocated
2296 */
2297 tmp = str;
2298 i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp);
2299 if (i < DDI_PROP_RESULT_OK) {
2300 /*
2301 * Free the space we just allocated
2302 * and return an error.
2303 */
2304 ddi_prop_free(str);
2305 switch (i) {
2306 case DDI_PROP_RESULT_EOF:
2307 return (DDI_PROP_END_OF_DATA);
2308
2309 case DDI_PROP_RESULT_ERROR:
2310 return (DDI_PROP_CANNOT_DECODE);
2311 }
2312 }
2313
2314 *(char **)data = str;
2315 *nelements = 1;
2316
2317 return (DDI_PROP_SUCCESS);
2318 }
2319
2320 /*
2321 * Decode an array of strings.
2322 */
2323 int
ddi_prop_fm_decode_strings(prop_handle_t * ph,void * data,uint_t * nelements)2324 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements)
2325 {
2326 int cnt = 0;
2327 char **strs;
2328 char **tmp;
2329 char *ptr;
2330 int i;
2331 int n;
2332 int size;
2333 size_t nbytes;
2334
2335 /*
2336 * Figure out how many array elements there are by going through the
2337 * data without decoding it first and counting.
2338 */
2339 for (;;) {
2340 i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL);
2341 if (i < 0)
2342 break;
2343 cnt++;
2344 }
2345
2346 /*
2347 * If there are no elements return an error
2348 */
2349 if (cnt == 0)
2350 return (DDI_PROP_END_OF_DATA);
2351
2352 /*
2353 * If we cannot skip through the data, we cannot decode it
2354 */
2355 if (i == DDI_PROP_RESULT_ERROR)
2356 return (DDI_PROP_CANNOT_DECODE);
2357
2358 /*
2359 * Reset the data pointer to the beginning of the encoded data
2360 */
2361 ddi_prop_reset_pos(ph);
2362
2363 /*
2364 * Figure out how much memory we need for the sum total
2365 */
2366 nbytes = (cnt + 1) * sizeof (char *);
2367
2368 for (n = 0; n < cnt; n++) {
2369 /*
2370 * Get the decoded size of the current encoded string.
2371 */
2372 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2373 if (size < DDI_PROP_RESULT_OK) {
2374 switch (size) {
2375 case DDI_PROP_RESULT_EOF:
2376 return (DDI_PROP_END_OF_DATA);
2377
2378 case DDI_PROP_RESULT_ERROR:
2379 return (DDI_PROP_CANNOT_DECODE);
2380 }
2381 }
2382
2383 nbytes += size;
2384 }
2385
2386 /*
2387 * Allocate memory in which to store the decoded strings.
2388 */
2389 strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings);
2390
2391 /*
2392 * Set up pointers for each string by figuring out yet
2393 * again how long each string is.
2394 */
2395 ddi_prop_reset_pos(ph);
2396 ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *));
2397 for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2398 /*
2399 * Get the decoded size of the current encoded string.
2400 */
2401 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2402 if (size < DDI_PROP_RESULT_OK) {
2403 ddi_prop_free(strs);
2404 switch (size) {
2405 case DDI_PROP_RESULT_EOF:
2406 return (DDI_PROP_END_OF_DATA);
2407
2408 case DDI_PROP_RESULT_ERROR:
2409 return (DDI_PROP_CANNOT_DECODE);
2410 }
2411 }
2412
2413 *tmp = ptr;
2414 ptr += size;
2415 }
2416
2417 /*
2418 * String array is terminated by a NULL
2419 */
2420 *tmp = NULL;
2421
2422 /*
2423 * Finally, we can decode each string
2424 */
2425 ddi_prop_reset_pos(ph);
2426 for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2427 i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp);
2428 if (i < DDI_PROP_RESULT_OK) {
2429 /*
2430 * Free the space we just allocated
2431 * and return an error
2432 */
2433 ddi_prop_free(strs);
2434 switch (i) {
2435 case DDI_PROP_RESULT_EOF:
2436 return (DDI_PROP_END_OF_DATA);
2437
2438 case DDI_PROP_RESULT_ERROR:
2439 return (DDI_PROP_CANNOT_DECODE);
2440 }
2441 }
2442 }
2443
2444 *(char ***)data = strs;
2445 *nelements = cnt;
2446
2447 return (DDI_PROP_SUCCESS);
2448 }
2449
2450 /*
2451 * Encode a string.
2452 */
2453 int
ddi_prop_fm_encode_string(prop_handle_t * ph,void * data,uint_t nelements)2454 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements)
2455 {
2456 char **tmp;
2457 int size;
2458 int i;
2459
2460 /*
2461 * If there is no data, we cannot do anything
2462 */
2463 if (nelements == 0)
2464 return (DDI_PROP_CANNOT_ENCODE);
2465
2466 /*
2467 * Get the size of the encoded string.
2468 */
2469 tmp = (char **)data;
2470 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2471 if (size < DDI_PROP_RESULT_OK) {
2472 switch (size) {
2473 case DDI_PROP_RESULT_EOF:
2474 return (DDI_PROP_END_OF_DATA);
2475
2476 case DDI_PROP_RESULT_ERROR:
2477 return (DDI_PROP_CANNOT_ENCODE);
2478 }
2479 }
2480
2481 /*
2482 * Allocate space in the handle to store the encoded string.
2483 */
2484 if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS)
2485 return (DDI_PROP_NO_MEMORY);
2486
2487 ddi_prop_reset_pos(ph);
2488
2489 /*
2490 * Encode the string.
2491 */
2492 tmp = (char **)data;
2493 i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2494 if (i < DDI_PROP_RESULT_OK) {
2495 switch (i) {
2496 case DDI_PROP_RESULT_EOF:
2497 return (DDI_PROP_END_OF_DATA);
2498
2499 case DDI_PROP_RESULT_ERROR:
2500 return (DDI_PROP_CANNOT_ENCODE);
2501 }
2502 }
2503
2504 return (DDI_PROP_SUCCESS);
2505 }
2506
2507
2508 /*
2509 * Encode an array of strings.
2510 */
2511 int
ddi_prop_fm_encode_strings(prop_handle_t * ph,void * data,uint_t nelements)2512 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements)
2513 {
2514 int cnt = 0;
2515 char **tmp;
2516 int size;
2517 uint_t total_size;
2518 int i;
2519
2520 /*
2521 * If there is no data, we cannot do anything
2522 */
2523 if (nelements == 0)
2524 return (DDI_PROP_CANNOT_ENCODE);
2525
2526 /*
2527 * Get the total size required to encode all the strings.
2528 */
2529 total_size = 0;
2530 tmp = (char **)data;
2531 for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2532 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2533 if (size < DDI_PROP_RESULT_OK) {
2534 switch (size) {
2535 case DDI_PROP_RESULT_EOF:
2536 return (DDI_PROP_END_OF_DATA);
2537
2538 case DDI_PROP_RESULT_ERROR:
2539 return (DDI_PROP_CANNOT_ENCODE);
2540 }
2541 }
2542 total_size += (uint_t)size;
2543 }
2544
2545 /*
2546 * Allocate space in the handle to store the encoded strings.
2547 */
2548 if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS)
2549 return (DDI_PROP_NO_MEMORY);
2550
2551 ddi_prop_reset_pos(ph);
2552
2553 /*
2554 * Encode the array of strings.
2555 */
2556 tmp = (char **)data;
2557 for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2558 i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2559 if (i < DDI_PROP_RESULT_OK) {
2560 switch (i) {
2561 case DDI_PROP_RESULT_EOF:
2562 return (DDI_PROP_END_OF_DATA);
2563
2564 case DDI_PROP_RESULT_ERROR:
2565 return (DDI_PROP_CANNOT_ENCODE);
2566 }
2567 }
2568 }
2569
2570 return (DDI_PROP_SUCCESS);
2571 }
2572
2573
2574 /*
2575 * Decode an array of bytes.
2576 */
2577 static int
ddi_prop_fm_decode_bytes(prop_handle_t * ph,void * data,uint_t * nelements)2578 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements)
2579 {
2580 uchar_t *tmp;
2581 int nbytes;
2582 int i;
2583
2584 /*
2585 * If there are no elements return an error
2586 */
2587 if (ph->ph_size == 0)
2588 return (DDI_PROP_END_OF_DATA);
2589
2590 /*
2591 * Get the size of the encoded array of bytes.
2592 */
2593 nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE,
2594 data, ph->ph_size);
2595 if (nbytes < DDI_PROP_RESULT_OK) {
2596 switch (nbytes) {
2597 case DDI_PROP_RESULT_EOF:
2598 return (DDI_PROP_END_OF_DATA);
2599
2600 case DDI_PROP_RESULT_ERROR:
2601 return (DDI_PROP_CANNOT_DECODE);
2602 }
2603 }
2604
2605 /*
2606 * Allocated memory to store the decoded value in.
2607 */
2608 tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes);
2609
2610 /*
2611 * Decode each element and place it in the space we just allocated
2612 */
2613 i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes);
2614 if (i < DDI_PROP_RESULT_OK) {
2615 /*
2616 * Free the space we just allocated
2617 * and return an error
2618 */
2619 ddi_prop_free(tmp);
2620 switch (i) {
2621 case DDI_PROP_RESULT_EOF:
2622 return (DDI_PROP_END_OF_DATA);
2623
2624 case DDI_PROP_RESULT_ERROR:
2625 return (DDI_PROP_CANNOT_DECODE);
2626 }
2627 }
2628
2629 *(uchar_t **)data = tmp;
2630 *nelements = nbytes;
2631
2632 return (DDI_PROP_SUCCESS);
2633 }
2634
2635 /*
2636 * Encode an array of bytes.
2637 */
2638 int
ddi_prop_fm_encode_bytes(prop_handle_t * ph,void * data,uint_t nelements)2639 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements)
2640 {
2641 int size;
2642 int i;
2643
2644 /*
2645 * If there are no elements, then this is a boolean property,
2646 * so just create a property handle with no data and return.
2647 */
2648 if (nelements == 0) {
2649 (void) ddi_prop_encode_alloc(ph, 0);
2650 return (DDI_PROP_SUCCESS);
2651 }
2652
2653 /*
2654 * Get the size of the encoded array of bytes.
2655 */
2656 size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data,
2657 nelements);
2658 if (size < DDI_PROP_RESULT_OK) {
2659 switch (size) {
2660 case DDI_PROP_RESULT_EOF:
2661 return (DDI_PROP_END_OF_DATA);
2662
2663 case DDI_PROP_RESULT_ERROR:
2664 return (DDI_PROP_CANNOT_DECODE);
2665 }
2666 }
2667
2668 /*
2669 * Allocate space in the handle to store the encoded bytes.
2670 */
2671 if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS)
2672 return (DDI_PROP_NO_MEMORY);
2673
2674 /*
2675 * Encode the array of bytes.
2676 */
2677 i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data,
2678 nelements);
2679 if (i < DDI_PROP_RESULT_OK) {
2680 switch (i) {
2681 case DDI_PROP_RESULT_EOF:
2682 return (DDI_PROP_END_OF_DATA);
2683
2684 case DDI_PROP_RESULT_ERROR:
2685 return (DDI_PROP_CANNOT_ENCODE);
2686 }
2687 }
2688
2689 return (DDI_PROP_SUCCESS);
2690 }
2691
2692 /*
2693 * OBP 1275 integer, string and byte operators.
2694 *
2695 * DDI_PROP_CMD_DECODE:
2696 *
2697 * DDI_PROP_RESULT_ERROR: cannot decode the data
2698 * DDI_PROP_RESULT_EOF: end of data
2699 * DDI_PROP_OK: data was decoded
2700 *
2701 * DDI_PROP_CMD_ENCODE:
2702 *
2703 * DDI_PROP_RESULT_ERROR: cannot encode the data
2704 * DDI_PROP_RESULT_EOF: end of data
2705 * DDI_PROP_OK: data was encoded
2706 *
2707 * DDI_PROP_CMD_SKIP:
2708 *
2709 * DDI_PROP_RESULT_ERROR: cannot skip the data
2710 * DDI_PROP_RESULT_EOF: end of data
2711 * DDI_PROP_OK: data was skipped
2712 *
2713 * DDI_PROP_CMD_GET_ESIZE:
2714 *
2715 * DDI_PROP_RESULT_ERROR: cannot get encoded size
2716 * DDI_PROP_RESULT_EOF: end of data
2717 * > 0: the encoded size
2718 *
2719 * DDI_PROP_CMD_GET_DSIZE:
2720 *
2721 * DDI_PROP_RESULT_ERROR: cannot get decoded size
2722 * DDI_PROP_RESULT_EOF: end of data
2723 * > 0: the decoded size
2724 */
2725
2726 /*
2727 * OBP 1275 integer operator
2728 *
2729 * OBP properties are a byte stream of data, so integers may not be
2730 * properly aligned. Therefore we need to copy them one byte at a time.
2731 */
2732 int
ddi_prop_1275_int(prop_handle_t * ph,uint_t cmd,int * data)2733 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data)
2734 {
2735 int i;
2736
2737 switch (cmd) {
2738 case DDI_PROP_CMD_DECODE:
2739 /*
2740 * Check that there is encoded data
2741 */
2742 if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
2743 return (DDI_PROP_RESULT_ERROR);
2744 if (ph->ph_flags & PH_FROM_PROM) {
2745 i = MIN(ph->ph_size, PROP_1275_INT_SIZE);
2746 if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2747 ph->ph_size - i))
2748 return (DDI_PROP_RESULT_ERROR);
2749 } else {
2750 if (ph->ph_size < sizeof (int) ||
2751 ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2752 ph->ph_size - sizeof (int))))
2753 return (DDI_PROP_RESULT_ERROR);
2754 }
2755
2756 /*
2757 * Copy the integer, using the implementation-specific
2758 * copy function if the property is coming from the PROM.
2759 */
2760 if (ph->ph_flags & PH_FROM_PROM) {
2761 *data = impl_ddi_prop_int_from_prom(
2762 (uchar_t *)ph->ph_cur_pos,
2763 (ph->ph_size < PROP_1275_INT_SIZE) ?
2764 ph->ph_size : PROP_1275_INT_SIZE);
2765 } else {
2766 bcopy(ph->ph_cur_pos, data, sizeof (int));
2767 }
2768
2769 /*
2770 * Move the current location to the start of the next
2771 * bit of undecoded data.
2772 */
2773 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2774 PROP_1275_INT_SIZE;
2775 return (DDI_PROP_RESULT_OK);
2776
2777 case DDI_PROP_CMD_ENCODE:
2778 /*
2779 * Check that there is room to encoded the data
2780 */
2781 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2782 ph->ph_size < PROP_1275_INT_SIZE ||
2783 ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2784 ph->ph_size - sizeof (int))))
2785 return (DDI_PROP_RESULT_ERROR);
2786
2787 /*
2788 * Encode the integer into the byte stream one byte at a
2789 * time.
2790 */
2791 bcopy(data, ph->ph_cur_pos, sizeof (int));
2792
2793 /*
2794 * Move the current location to the start of the next bit of
2795 * space where we can store encoded data.
2796 */
2797 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
2798 return (DDI_PROP_RESULT_OK);
2799
2800 case DDI_PROP_CMD_SKIP:
2801 /*
2802 * Check that there is encoded data
2803 */
2804 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2805 ph->ph_size < PROP_1275_INT_SIZE)
2806 return (DDI_PROP_RESULT_ERROR);
2807
2808
2809 if ((caddr_t)ph->ph_cur_pos ==
2810 (caddr_t)ph->ph_data + ph->ph_size) {
2811 return (DDI_PROP_RESULT_EOF);
2812 } else if ((caddr_t)ph->ph_cur_pos >
2813 (caddr_t)ph->ph_data + ph->ph_size) {
2814 return (DDI_PROP_RESULT_EOF);
2815 }
2816
2817 /*
2818 * Move the current location to the start of the next bit of
2819 * undecoded data.
2820 */
2821 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
2822 return (DDI_PROP_RESULT_OK);
2823
2824 case DDI_PROP_CMD_GET_ESIZE:
2825 /*
2826 * Return the size of an encoded integer on OBP
2827 */
2828 return (PROP_1275_INT_SIZE);
2829
2830 case DDI_PROP_CMD_GET_DSIZE:
2831 /*
2832 * Return the size of a decoded integer on the system.
2833 */
2834 return (sizeof (int));
2835
2836 default:
2837 #ifdef DEBUG
2838 panic("ddi_prop_1275_int: %x impossible", cmd);
2839 /*NOTREACHED*/
2840 #else
2841 return (DDI_PROP_RESULT_ERROR);
2842 #endif /* DEBUG */
2843 }
2844 }
2845
2846 /*
2847 * 64 bit integer operator.
2848 *
2849 * This is an extension, defined by Sun, to the 1275 integer
2850 * operator. This routine handles the encoding/decoding of
2851 * 64 bit integer properties.
2852 */
2853 int
ddi_prop_int64_op(prop_handle_t * ph,uint_t cmd,int64_t * data)2854 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data)
2855 {
2856
2857 switch (cmd) {
2858 case DDI_PROP_CMD_DECODE:
2859 /*
2860 * Check that there is encoded data
2861 */
2862 if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
2863 return (DDI_PROP_RESULT_ERROR);
2864 if (ph->ph_flags & PH_FROM_PROM) {
2865 return (DDI_PROP_RESULT_ERROR);
2866 } else {
2867 if (ph->ph_size < sizeof (int64_t) ||
2868 ((int64_t *)ph->ph_cur_pos >
2869 ((int64_t *)ph->ph_data +
2870 ph->ph_size - sizeof (int64_t))))
2871 return (DDI_PROP_RESULT_ERROR);
2872 }
2873 /*
2874 * Copy the integer, using the implementation-specific
2875 * copy function if the property is coming from the PROM.
2876 */
2877 if (ph->ph_flags & PH_FROM_PROM) {
2878 return (DDI_PROP_RESULT_ERROR);
2879 } else {
2880 bcopy(ph->ph_cur_pos, data, sizeof (int64_t));
2881 }
2882
2883 /*
2884 * Move the current location to the start of the next
2885 * bit of undecoded data.
2886 */
2887 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2888 sizeof (int64_t);
2889 return (DDI_PROP_RESULT_OK);
2890
2891 case DDI_PROP_CMD_ENCODE:
2892 /*
2893 * Check that there is room to encoded the data
2894 */
2895 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2896 ph->ph_size < sizeof (int64_t) ||
2897 ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data +
2898 ph->ph_size - sizeof (int64_t))))
2899 return (DDI_PROP_RESULT_ERROR);
2900
2901 /*
2902 * Encode the integer into the byte stream one byte at a
2903 * time.
2904 */
2905 bcopy(data, ph->ph_cur_pos, sizeof (int64_t));
2906
2907 /*
2908 * Move the current location to the start of the next bit of
2909 * space where we can store encoded data.
2910 */
2911 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2912 sizeof (int64_t);
2913 return (DDI_PROP_RESULT_OK);
2914
2915 case DDI_PROP_CMD_SKIP:
2916 /*
2917 * Check that there is encoded data
2918 */
2919 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2920 ph->ph_size < sizeof (int64_t))
2921 return (DDI_PROP_RESULT_ERROR);
2922
2923 if ((caddr_t)ph->ph_cur_pos ==
2924 (caddr_t)ph->ph_data + ph->ph_size) {
2925 return (DDI_PROP_RESULT_EOF);
2926 } else if ((caddr_t)ph->ph_cur_pos >
2927 (caddr_t)ph->ph_data + ph->ph_size) {
2928 return (DDI_PROP_RESULT_EOF);
2929 }
2930
2931 /*
2932 * Move the current location to the start of
2933 * the next bit of undecoded data.
2934 */
2935 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2936 sizeof (int64_t);
2937 return (DDI_PROP_RESULT_OK);
2938
2939 case DDI_PROP_CMD_GET_ESIZE:
2940 /*
2941 * Return the size of an encoded integer on OBP
2942 */
2943 return (sizeof (int64_t));
2944
2945 case DDI_PROP_CMD_GET_DSIZE:
2946 /*
2947 * Return the size of a decoded integer on the system.
2948 */
2949 return (sizeof (int64_t));
2950
2951 default:
2952 #ifdef DEBUG
2953 panic("ddi_prop_int64_op: %x impossible", cmd);
2954 /*NOTREACHED*/
2955 #else
2956 return (DDI_PROP_RESULT_ERROR);
2957 #endif /* DEBUG */
2958 }
2959 }
2960
2961 /*
2962 * OBP 1275 string operator.
2963 *
2964 * OBP strings are NULL terminated.
2965 */
2966 int
ddi_prop_1275_string(prop_handle_t * ph,uint_t cmd,char * data)2967 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data)
2968 {
2969 int n;
2970 char *p;
2971 char *end;
2972
2973 switch (cmd) {
2974 case DDI_PROP_CMD_DECODE:
2975 /*
2976 * Check that there is encoded data
2977 */
2978 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
2979 return (DDI_PROP_RESULT_ERROR);
2980 }
2981
2982 /*
2983 * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and
2984 * how to NULL terminate result.
2985 */
2986 p = (char *)ph->ph_cur_pos;
2987 end = (char *)ph->ph_data + ph->ph_size;
2988 if (p >= end)
2989 return (DDI_PROP_RESULT_EOF);
2990
2991 while (p < end) {
2992 *data++ = *p;
2993 if (*p++ == 0) { /* NULL from OBP */
2994 ph->ph_cur_pos = p;
2995 return (DDI_PROP_RESULT_OK);
2996 }
2997 }
2998
2999 /*
3000 * If OBP did not NULL terminate string, which happens
3001 * (at least) for 'true'/'false' boolean values, account for
3002 * the space and store null termination on decode.
3003 */
3004 ph->ph_cur_pos = p;
3005 *data = 0;
3006 return (DDI_PROP_RESULT_OK);
3007
3008 case DDI_PROP_CMD_ENCODE:
3009 /*
3010 * Check that there is room to encoded the data
3011 */
3012 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3013 return (DDI_PROP_RESULT_ERROR);
3014 }
3015
3016 n = strlen(data) + 1;
3017 if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3018 ph->ph_size - n)) {
3019 return (DDI_PROP_RESULT_ERROR);
3020 }
3021
3022 /*
3023 * Copy the NULL terminated string
3024 */
3025 bcopy(data, ph->ph_cur_pos, n);
3026
3027 /*
3028 * Move the current location to the start of the next bit of
3029 * space where we can store encoded data.
3030 */
3031 ph->ph_cur_pos = (char *)ph->ph_cur_pos + n;
3032 return (DDI_PROP_RESULT_OK);
3033
3034 case DDI_PROP_CMD_SKIP:
3035 /*
3036 * Check that there is encoded data
3037 */
3038 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3039 return (DDI_PROP_RESULT_ERROR);
3040 }
3041
3042 /*
3043 * Return the string length plus one for the NULL
3044 * We know the size of the property, we need to
3045 * ensure that the string is properly formatted,
3046 * since we may be looking up random OBP data.
3047 */
3048 p = (char *)ph->ph_cur_pos;
3049 end = (char *)ph->ph_data + ph->ph_size;
3050 if (p >= end)
3051 return (DDI_PROP_RESULT_EOF);
3052
3053 while (p < end) {
3054 if (*p++ == 0) { /* NULL from OBP */
3055 ph->ph_cur_pos = p;
3056 return (DDI_PROP_RESULT_OK);
3057 }
3058 }
3059
3060 /*
3061 * Accommodate the fact that OBP does not always NULL
3062 * terminate strings.
3063 */
3064 ph->ph_cur_pos = p;
3065 return (DDI_PROP_RESULT_OK);
3066
3067 case DDI_PROP_CMD_GET_ESIZE:
3068 /*
3069 * Return the size of the encoded string on OBP.
3070 */
3071 return (strlen(data) + 1);
3072
3073 case DDI_PROP_CMD_GET_DSIZE:
3074 /*
3075 * Return the string length plus one for the NULL.
3076 * We know the size of the property, we need to
3077 * ensure that the string is properly formatted,
3078 * since we may be looking up random OBP data.
3079 */
3080 p = (char *)ph->ph_cur_pos;
3081 end = (char *)ph->ph_data + ph->ph_size;
3082 if (p >= end)
3083 return (DDI_PROP_RESULT_EOF);
3084
3085 for (n = 0; p < end; n++) {
3086 if (*p++ == 0) { /* NULL from OBP */
3087 ph->ph_cur_pos = p;
3088 return (n + 1);
3089 }
3090 }
3091
3092 /*
3093 * If OBP did not NULL terminate string, which happens for
3094 * 'true'/'false' boolean values, account for the space
3095 * to store null termination here.
3096 */
3097 ph->ph_cur_pos = p;
3098 return (n + 1);
3099
3100 default:
3101 #ifdef DEBUG
3102 panic("ddi_prop_1275_string: %x impossible", cmd);
3103 /*NOTREACHED*/
3104 #else
3105 return (DDI_PROP_RESULT_ERROR);
3106 #endif /* DEBUG */
3107 }
3108 }
3109
3110 /*
3111 * OBP 1275 byte operator
3112 *
3113 * Caller must specify the number of bytes to get. OBP encodes bytes
3114 * as a byte so there is a 1-to-1 translation.
3115 */
3116 int
ddi_prop_1275_bytes(prop_handle_t * ph,uint_t cmd,uchar_t * data,uint_t nelements)3117 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data,
3118 uint_t nelements)
3119 {
3120 switch (cmd) {
3121 case DDI_PROP_CMD_DECODE:
3122 /*
3123 * Check that there is encoded data
3124 */
3125 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3126 ph->ph_size < nelements ||
3127 ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3128 ph->ph_size - nelements)))
3129 return (DDI_PROP_RESULT_ERROR);
3130
3131 /*
3132 * Copy out the bytes
3133 */
3134 bcopy(ph->ph_cur_pos, data, nelements);
3135
3136 /*
3137 * Move the current location
3138 */
3139 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3140 return (DDI_PROP_RESULT_OK);
3141
3142 case DDI_PROP_CMD_ENCODE:
3143 /*
3144 * Check that there is room to encode the data
3145 */
3146 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3147 ph->ph_size < nelements ||
3148 ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3149 ph->ph_size - nelements)))
3150 return (DDI_PROP_RESULT_ERROR);
3151
3152 /*
3153 * Copy in the bytes
3154 */
3155 bcopy(data, ph->ph_cur_pos, nelements);
3156
3157 /*
3158 * Move the current location to the start of the next bit of
3159 * space where we can store encoded data.
3160 */
3161 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3162 return (DDI_PROP_RESULT_OK);
3163
3164 case DDI_PROP_CMD_SKIP:
3165 /*
3166 * Check that there is encoded data
3167 */
3168 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3169 ph->ph_size < nelements)
3170 return (DDI_PROP_RESULT_ERROR);
3171
3172 if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3173 ph->ph_size - nelements))
3174 return (DDI_PROP_RESULT_EOF);
3175
3176 /*
3177 * Move the current location
3178 */
3179 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3180 return (DDI_PROP_RESULT_OK);
3181
3182 case DDI_PROP_CMD_GET_ESIZE:
3183 /*
3184 * The size in bytes of the encoded size is the
3185 * same as the decoded size provided by the caller.
3186 */
3187 return (nelements);
3188
3189 case DDI_PROP_CMD_GET_DSIZE:
3190 /*
3191 * Just return the number of bytes specified by the caller.
3192 */
3193 return (nelements);
3194
3195 default:
3196 #ifdef DEBUG
3197 panic("ddi_prop_1275_bytes: %x impossible", cmd);
3198 /*NOTREACHED*/
3199 #else
3200 return (DDI_PROP_RESULT_ERROR);
3201 #endif /* DEBUG */
3202 }
3203 }
3204
3205 /*
3206 * Used for properties that come from the OBP, hardware configuration files,
3207 * or that are created by calls to ddi_prop_update(9F).
3208 */
3209 static struct prop_handle_ops prop_1275_ops = {
3210 ddi_prop_1275_int,
3211 ddi_prop_1275_string,
3212 ddi_prop_1275_bytes,
3213 ddi_prop_int64_op
3214 };
3215
3216
3217 /*
3218 * Interface to create/modify a managed property on child's behalf...
3219 * Flags interpreted are:
3220 * DDI_PROP_CANSLEEP: Allow memory allocation to sleep.
3221 * DDI_PROP_SYSTEM_DEF: Manipulate system list rather than driver list.
3222 *
3223 * Use same dev_t when modifying or undefining a property.
3224 * Search for properties with DDI_DEV_T_ANY to match first named
3225 * property on the list.
3226 *
3227 * Properties are stored LIFO and subsequently will match the first
3228 * `matching' instance.
3229 */
3230
3231 /*
3232 * ddi_prop_add: Add a software defined property
3233 */
3234
3235 /*
3236 * define to get a new ddi_prop_t.
3237 * km_flags are KM_SLEEP or KM_NOSLEEP.
3238 */
3239
3240 #define DDI_NEW_PROP_T(km_flags) \
3241 (kmem_zalloc(sizeof (ddi_prop_t), km_flags))
3242
3243 static int
ddi_prop_add(dev_t dev,dev_info_t * dip,int flags,char * name,caddr_t value,int length)3244 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags,
3245 char *name, caddr_t value, int length)
3246 {
3247 ddi_prop_t *new_propp, *propp;
3248 ddi_prop_t **list_head = &(DEVI(dip)->devi_drv_prop_ptr);
3249 int km_flags = KM_NOSLEEP;
3250 int name_buf_len;
3251
3252 /*
3253 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error.
3254 */
3255
3256 if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0)
3257 return (DDI_PROP_INVAL_ARG);
3258
3259 if (flags & DDI_PROP_CANSLEEP)
3260 km_flags = KM_SLEEP;
3261
3262 if (flags & DDI_PROP_SYSTEM_DEF)
3263 list_head = &(DEVI(dip)->devi_sys_prop_ptr);
3264 else if (flags & DDI_PROP_HW_DEF)
3265 list_head = &(DEVI(dip)->devi_hw_prop_ptr);
3266
3267 if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL) {
3268 cmn_err(CE_CONT, prop_no_mem_msg, name);
3269 return (DDI_PROP_NO_MEMORY);
3270 }
3271
3272 /*
3273 * If dev is major number 0, then we need to do a ddi_name_to_major
3274 * to get the real major number for the device. This needs to be
3275 * done because some drivers need to call ddi_prop_create in their
3276 * attach routines but they don't have a dev. By creating the dev
3277 * ourself if the major number is 0, drivers will not have to know what
3278 * their major number. They can just create a dev with major number
3279 * 0 and pass it in. For device 0, we will be doing a little extra
3280 * work by recreating the same dev that we already have, but its the
3281 * price you pay :-).
3282 *
3283 * This fixes bug #1098060.
3284 */
3285 if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) {
3286 new_propp->prop_dev =
3287 makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name),
3288 getminor(dev));
3289 } else
3290 new_propp->prop_dev = dev;
3291
3292 /*
3293 * Allocate space for property name and copy it in...
3294 */
3295
3296 name_buf_len = strlen(name) + 1;
3297 new_propp->prop_name = kmem_alloc(name_buf_len, km_flags);
3298 if (new_propp->prop_name == 0) {
3299 kmem_free(new_propp, sizeof (ddi_prop_t));
3300 cmn_err(CE_CONT, prop_no_mem_msg, name);
3301 return (DDI_PROP_NO_MEMORY);
3302 }
3303 bcopy(name, new_propp->prop_name, name_buf_len);
3304
3305 /*
3306 * Set the property type
3307 */
3308 new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK;
3309
3310 /*
3311 * Set length and value ONLY if not an explicit property undefine:
3312 * NOTE: value and length are zero for explicit undefines.
3313 */
3314
3315 if (flags & DDI_PROP_UNDEF_IT) {
3316 new_propp->prop_flags |= DDI_PROP_UNDEF_IT;
3317 } else {
3318 if ((new_propp->prop_len = length) != 0) {
3319 new_propp->prop_val = kmem_alloc(length, km_flags);
3320 if (new_propp->prop_val == 0) {
3321 kmem_free(new_propp->prop_name, name_buf_len);
3322 kmem_free(new_propp, sizeof (ddi_prop_t));
3323 cmn_err(CE_CONT, prop_no_mem_msg, name);
3324 return (DDI_PROP_NO_MEMORY);
3325 }
3326 bcopy(value, new_propp->prop_val, length);
3327 }
3328 }
3329
3330 /*
3331 * Link property into beginning of list. (Properties are LIFO order.)
3332 */
3333
3334 mutex_enter(&(DEVI(dip)->devi_lock));
3335 propp = *list_head;
3336 new_propp->prop_next = propp;
3337 *list_head = new_propp;
3338 mutex_exit(&(DEVI(dip)->devi_lock));
3339 return (DDI_PROP_SUCCESS);
3340 }
3341
3342
3343 /*
3344 * ddi_prop_change: Modify a software managed property value
3345 *
3346 * Set new length and value if found.
3347 * returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or
3348 * input name is the NULL string.
3349 * returns DDI_PROP_NO_MEMORY if unable to allocate memory
3350 *
3351 * Note: an undef can be modified to be a define,
3352 * (you can't go the other way.)
3353 */
3354
3355 static int
ddi_prop_change(dev_t dev,dev_info_t * dip,int flags,char * name,caddr_t value,int length)3356 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags,
3357 char *name, caddr_t value, int length)
3358 {
3359 ddi_prop_t *propp;
3360 ddi_prop_t **ppropp;
3361 caddr_t p = NULL;
3362
3363 if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0))
3364 return (DDI_PROP_INVAL_ARG);
3365
3366 /*
3367 * Preallocate buffer, even if we don't need it...
3368 */
3369 if (length != 0) {
3370 p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ?
3371 KM_SLEEP : KM_NOSLEEP);
3372 if (p == NULL) {
3373 cmn_err(CE_CONT, prop_no_mem_msg, name);
3374 return (DDI_PROP_NO_MEMORY);
3375 }
3376 }
3377
3378 /*
3379 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major
3380 * number, a real dev_t value should be created based upon the dip's
3381 * binding driver. See ddi_prop_add...
3382 */
3383 if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN)
3384 dev = makedevice(
3385 ddi_name_to_major(DEVI(dip)->devi_binding_name),
3386 getminor(dev));
3387
3388 /*
3389 * Check to see if the property exists. If so we modify it.
3390 * Else we create it by calling ddi_prop_add().
3391 */
3392 mutex_enter(&(DEVI(dip)->devi_lock));
3393 ppropp = &DEVI(dip)->devi_drv_prop_ptr;
3394 if (flags & DDI_PROP_SYSTEM_DEF)
3395 ppropp = &DEVI(dip)->devi_sys_prop_ptr;
3396 else if (flags & DDI_PROP_HW_DEF)
3397 ppropp = &DEVI(dip)->devi_hw_prop_ptr;
3398
3399 if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) {
3400 /*
3401 * Need to reallocate buffer? If so, do it
3402 * carefully (reuse same space if new prop
3403 * is same size and non-NULL sized).
3404 */
3405 if (length != 0)
3406 bcopy(value, p, length);
3407
3408 if (propp->prop_len != 0)
3409 kmem_free(propp->prop_val, propp->prop_len);
3410
3411 propp->prop_len = length;
3412 propp->prop_val = p;
3413 propp->prop_flags &= ~DDI_PROP_UNDEF_IT;
3414 mutex_exit(&(DEVI(dip)->devi_lock));
3415 return (DDI_PROP_SUCCESS);
3416 }
3417
3418 mutex_exit(&(DEVI(dip)->devi_lock));
3419 if (length != 0)
3420 kmem_free(p, length);
3421
3422 return (ddi_prop_add(dev, dip, flags, name, value, length));
3423 }
3424
3425 /*
3426 * Common update routine used to update and encode a property. Creates
3427 * a property handle, calls the property encode routine, figures out if
3428 * the property already exists and updates if it does. Otherwise it
3429 * creates if it does not exist.
3430 */
3431 int
ddi_prop_update_common(dev_t match_dev,dev_info_t * dip,int flags,char * name,void * data,uint_t nelements,int (* prop_create)(prop_handle_t *,void * data,uint_t nelements))3432 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags,
3433 char *name, void *data, uint_t nelements,
3434 int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3435 {
3436 prop_handle_t ph;
3437 int rval;
3438 uint_t ourflags;
3439
3440 /*
3441 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3442 * return error.
3443 */
3444 if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3445 return (DDI_PROP_INVAL_ARG);
3446
3447 /*
3448 * Create the handle
3449 */
3450 ph.ph_data = NULL;
3451 ph.ph_cur_pos = NULL;
3452 ph.ph_save_pos = NULL;
3453 ph.ph_size = 0;
3454 ph.ph_ops = &prop_1275_ops;
3455
3456 /*
3457 * ourflags:
3458 * For compatibility with the old interfaces. The old interfaces
3459 * didn't sleep by default and slept when the flag was set. These
3460 * interfaces to the opposite. So the old interfaces now set the
3461 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep.
3462 *
3463 * ph.ph_flags:
3464 * Blocked data or unblocked data allocation
3465 * for ph.ph_data in ddi_prop_encode_alloc()
3466 */
3467 if (flags & DDI_PROP_DONTSLEEP) {
3468 ourflags = flags;
3469 ph.ph_flags = DDI_PROP_DONTSLEEP;
3470 } else {
3471 ourflags = flags | DDI_PROP_CANSLEEP;
3472 ph.ph_flags = DDI_PROP_CANSLEEP;
3473 }
3474
3475 /*
3476 * Encode the data and store it in the property handle by
3477 * calling the prop_encode routine.
3478 */
3479 if ((rval = (*prop_create)(&ph, data, nelements)) !=
3480 DDI_PROP_SUCCESS) {
3481 if (rval == DDI_PROP_NO_MEMORY)
3482 cmn_err(CE_CONT, prop_no_mem_msg, name);
3483 if (ph.ph_size != 0)
3484 kmem_free(ph.ph_data, ph.ph_size);
3485 return (rval);
3486 }
3487
3488 /*
3489 * The old interfaces use a stacking approach to creating
3490 * properties. If we are being called from the old interfaces,
3491 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a
3492 * create without checking.
3493 */
3494 if (flags & DDI_PROP_STACK_CREATE) {
3495 rval = ddi_prop_add(match_dev, dip,
3496 ourflags, name, ph.ph_data, ph.ph_size);
3497 } else {
3498 rval = ddi_prop_change(match_dev, dip,
3499 ourflags, name, ph.ph_data, ph.ph_size);
3500 }
3501
3502 /*
3503 * Free the encoded data allocated in the prop_encode routine.
3504 */
3505 if (ph.ph_size != 0)
3506 kmem_free(ph.ph_data, ph.ph_size);
3507
3508 return (rval);
3509 }
3510
3511
3512 /*
3513 * ddi_prop_create: Define a managed property:
3514 * See above for details.
3515 */
3516
3517 int
ddi_prop_create(dev_t dev,dev_info_t * dip,int flag,char * name,caddr_t value,int length)3518 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3519 char *name, caddr_t value, int length)
3520 {
3521 if (!(flag & DDI_PROP_CANSLEEP)) {
3522 flag |= DDI_PROP_DONTSLEEP;
3523 #ifdef DDI_PROP_DEBUG
3524 if (length != 0)
3525 cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete,"
3526 "use ddi_prop_update (prop = %s, node = %s%d)",
3527 name, ddi_driver_name(dip), ddi_get_instance(dip));
3528 #endif /* DDI_PROP_DEBUG */
3529 }
3530 flag &= ~DDI_PROP_SYSTEM_DEF;
3531 flag |= DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3532 return (ddi_prop_update_common(dev, dip, flag, name,
3533 value, length, ddi_prop_fm_encode_bytes));
3534 }
3535
3536 int
e_ddi_prop_create(dev_t dev,dev_info_t * dip,int flag,char * name,caddr_t value,int length)3537 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3538 char *name, caddr_t value, int length)
3539 {
3540 if (!(flag & DDI_PROP_CANSLEEP))
3541 flag |= DDI_PROP_DONTSLEEP;
3542 flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3543 return (ddi_prop_update_common(dev, dip, flag,
3544 name, value, length, ddi_prop_fm_encode_bytes));
3545 }
3546
3547 int
ddi_prop_modify(dev_t dev,dev_info_t * dip,int flag,char * name,caddr_t value,int length)3548 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3549 char *name, caddr_t value, int length)
3550 {
3551 ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3552
3553 /*
3554 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3555 * return error.
3556 */
3557 if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3558 return (DDI_PROP_INVAL_ARG);
3559
3560 if (!(flag & DDI_PROP_CANSLEEP))
3561 flag |= DDI_PROP_DONTSLEEP;
3562 flag &= ~DDI_PROP_SYSTEM_DEF;
3563 if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0)
3564 return (DDI_PROP_NOT_FOUND);
3565
3566 return (ddi_prop_update_common(dev, dip,
3567 (flag | DDI_PROP_TYPE_BYTE), name,
3568 value, length, ddi_prop_fm_encode_bytes));
3569 }
3570
3571 int
e_ddi_prop_modify(dev_t dev,dev_info_t * dip,int flag,char * name,caddr_t value,int length)3572 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3573 char *name, caddr_t value, int length)
3574 {
3575 ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3576
3577 /*
3578 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3579 * return error.
3580 */
3581 if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3582 return (DDI_PROP_INVAL_ARG);
3583
3584 if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0)
3585 return (DDI_PROP_NOT_FOUND);
3586
3587 if (!(flag & DDI_PROP_CANSLEEP))
3588 flag |= DDI_PROP_DONTSLEEP;
3589 return (ddi_prop_update_common(dev, dip,
3590 (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE),
3591 name, value, length, ddi_prop_fm_encode_bytes));
3592 }
3593
3594
3595 /*
3596 * Common lookup routine used to lookup and decode a property.
3597 * Creates a property handle, searches for the raw encoded data,
3598 * fills in the handle, and calls the property decode functions
3599 * passed in.
3600 *
3601 * This routine is not static because ddi_bus_prop_op() which lives in
3602 * ddi_impl.c calls it. No driver should be calling this routine.
3603 */
3604 int
ddi_prop_lookup_common(dev_t match_dev,dev_info_t * dip,uint_t flags,char * name,void * data,uint_t * nelements,int (* prop_decoder)(prop_handle_t *,void * data,uint_t * nelements))3605 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip,
3606 uint_t flags, char *name, void *data, uint_t *nelements,
3607 int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3608 {
3609 int rval;
3610 uint_t ourflags;
3611 prop_handle_t ph;
3612
3613 if ((match_dev == DDI_DEV_T_NONE) ||
3614 (name == NULL) || (strlen(name) == 0))
3615 return (DDI_PROP_INVAL_ARG);
3616
3617 ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags :
3618 flags | DDI_PROP_CANSLEEP;
3619
3620 /*
3621 * Get the encoded data
3622 */
3623 bzero(&ph, sizeof (prop_handle_t));
3624
3625 if ((flags & DDI_UNBND_DLPI2) || (flags & DDI_PROP_ROOTNEX_GLOBAL)) {
3626 /*
3627 * For rootnex and unbound dlpi style-2 devices, index into
3628 * the devnames' array and search the global
3629 * property list.
3630 */
3631 ourflags &= ~DDI_UNBND_DLPI2;
3632 rval = i_ddi_prop_search_global(match_dev,
3633 ourflags, name, &ph.ph_data, &ph.ph_size);
3634 } else {
3635 rval = ddi_prop_search_common(match_dev, dip,
3636 PROP_LEN_AND_VAL_ALLOC, ourflags, name,
3637 &ph.ph_data, &ph.ph_size);
3638
3639 }
3640
3641 if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) {
3642 ASSERT(ph.ph_data == NULL);
3643 ASSERT(ph.ph_size == 0);
3644 return (rval);
3645 }
3646
3647 /*
3648 * If the encoded data came from a OBP or software
3649 * use the 1275 OBP decode/encode routines.
3650 */
3651 ph.ph_cur_pos = ph.ph_data;
3652 ph.ph_save_pos = ph.ph_data;
3653 ph.ph_ops = &prop_1275_ops;
3654 ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0;
3655
3656 rval = (*prop_decoder)(&ph, data, nelements);
3657
3658 /*
3659 * Free the encoded data
3660 */
3661 if (ph.ph_size != 0)
3662 kmem_free(ph.ph_data, ph.ph_size);
3663
3664 return (rval);
3665 }
3666
3667 /*
3668 * Lookup and return an array of composite properties. The driver must
3669 * provide the decode routine.
3670 */
3671 int
ddi_prop_lookup(dev_t match_dev,dev_info_t * dip,uint_t flags,char * name,void * data,uint_t * nelements,int (* prop_decoder)(prop_handle_t *,void * data,uint_t * nelements))3672 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip,
3673 uint_t flags, char *name, void *data, uint_t *nelements,
3674 int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3675 {
3676 return (ddi_prop_lookup_common(match_dev, dip,
3677 (flags | DDI_PROP_TYPE_COMPOSITE), name,
3678 data, nelements, prop_decoder));
3679 }
3680
3681 /*
3682 * Return 1 if a property exists (no type checking done).
3683 * Return 0 if it does not exist.
3684 */
3685 int
ddi_prop_exists(dev_t match_dev,dev_info_t * dip,uint_t flags,char * name)3686 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name)
3687 {
3688 int i;
3689 uint_t x = 0;
3690
3691 i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS,
3692 flags | DDI_PROP_TYPE_MASK, name, NULL, &x);
3693 return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275);
3694 }
3695
3696
3697 /*
3698 * Update an array of composite properties. The driver must
3699 * provide the encode routine.
3700 */
3701 int
ddi_prop_update(dev_t match_dev,dev_info_t * dip,char * name,void * data,uint_t nelements,int (* prop_create)(prop_handle_t *,void * data,uint_t nelements))3702 ddi_prop_update(dev_t match_dev, dev_info_t *dip,
3703 char *name, void *data, uint_t nelements,
3704 int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3705 {
3706 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE,
3707 name, data, nelements, prop_create));
3708 }
3709
3710 /*
3711 * Get a single integer or boolean property and return it.
3712 * If the property does not exists, or cannot be decoded,
3713 * then return the defvalue passed in.
3714 *
3715 * This routine always succeeds.
3716 */
3717 int
ddi_prop_get_int(dev_t match_dev,dev_info_t * dip,uint_t flags,char * name,int defvalue)3718 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags,
3719 char *name, int defvalue)
3720 {
3721 int data;
3722 uint_t nelements;
3723 int rval;
3724
3725 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3726 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3727 #ifdef DEBUG
3728 if (dip != NULL) {
3729 cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag"
3730 " 0x%x (prop = %s, node = %s%d)", flags,
3731 name, ddi_driver_name(dip), ddi_get_instance(dip));
3732 }
3733 #endif /* DEBUG */
3734 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3735 LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3736 }
3737
3738 if ((rval = ddi_prop_lookup_common(match_dev, dip,
3739 (flags | DDI_PROP_TYPE_INT), name, &data, &nelements,
3740 ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) {
3741 if (rval == DDI_PROP_END_OF_DATA)
3742 data = 1;
3743 else
3744 data = defvalue;
3745 }
3746 return (data);
3747 }
3748
3749 /*
3750 * Get a single 64 bit integer or boolean property and return it.
3751 * If the property does not exists, or cannot be decoded,
3752 * then return the defvalue passed in.
3753 *
3754 * This routine always succeeds.
3755 */
3756 int64_t
ddi_prop_get_int64(dev_t match_dev,dev_info_t * dip,uint_t flags,char * name,int64_t defvalue)3757 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags,
3758 char *name, int64_t defvalue)
3759 {
3760 int64_t data;
3761 uint_t nelements;
3762 int rval;
3763
3764 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3765 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3766 #ifdef DEBUG
3767 if (dip != NULL) {
3768 cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag"
3769 " 0x%x (prop = %s, node = %s%d)", flags,
3770 name, ddi_driver_name(dip), ddi_get_instance(dip));
3771 }
3772 #endif /* DEBUG */
3773 return (DDI_PROP_INVAL_ARG);
3774 }
3775
3776 if ((rval = ddi_prop_lookup_common(match_dev, dip,
3777 (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
3778 name, &data, &nelements, ddi_prop_fm_decode_int64))
3779 != DDI_PROP_SUCCESS) {
3780 if (rval == DDI_PROP_END_OF_DATA)
3781 data = 1;
3782 else
3783 data = defvalue;
3784 }
3785 return (data);
3786 }
3787
3788 /*
3789 * Get an array of integer property
3790 */
3791 int
ddi_prop_lookup_int_array(dev_t match_dev,dev_info_t * dip,uint_t flags,char * name,int ** data,uint_t * nelements)3792 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3793 char *name, int **data, uint_t *nelements)
3794 {
3795 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3796 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3797 #ifdef DEBUG
3798 if (dip != NULL) {
3799 cmn_err(CE_WARN, "ddi_prop_lookup_int_array: "
3800 "invalid flag 0x%x (prop = %s, node = %s%d)",
3801 flags, name, ddi_driver_name(dip),
3802 ddi_get_instance(dip));
3803 }
3804 #endif /* DEBUG */
3805 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3806 LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3807 }
3808
3809 return (ddi_prop_lookup_common(match_dev, dip,
3810 (flags | DDI_PROP_TYPE_INT), name, data,
3811 nelements, ddi_prop_fm_decode_ints));
3812 }
3813
3814 /*
3815 * Get an array of 64 bit integer properties
3816 */
3817 int
ddi_prop_lookup_int64_array(dev_t match_dev,dev_info_t * dip,uint_t flags,char * name,int64_t ** data,uint_t * nelements)3818 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3819 char *name, int64_t **data, uint_t *nelements)
3820 {
3821 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3822 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3823 #ifdef DEBUG
3824 if (dip != NULL) {
3825 cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: "
3826 "invalid flag 0x%x (prop = %s, node = %s%d)",
3827 flags, name, ddi_driver_name(dip),
3828 ddi_get_instance(dip));
3829 }
3830 #endif /* DEBUG */
3831 return (DDI_PROP_INVAL_ARG);
3832 }
3833
3834 return (ddi_prop_lookup_common(match_dev, dip,
3835 (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
3836 name, data, nelements, ddi_prop_fm_decode_int64_array));
3837 }
3838
3839 /*
3840 * Update a single integer property. If the property exists on the drivers
3841 * property list it updates, else it creates it.
3842 */
3843 int
ddi_prop_update_int(dev_t match_dev,dev_info_t * dip,char * name,int data)3844 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
3845 char *name, int data)
3846 {
3847 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
3848 name, &data, 1, ddi_prop_fm_encode_ints));
3849 }
3850
3851 /*
3852 * Update a single 64 bit integer property.
3853 * Update the driver property list if it exists, else create it.
3854 */
3855 int
ddi_prop_update_int64(dev_t match_dev,dev_info_t * dip,char * name,int64_t data)3856 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
3857 char *name, int64_t data)
3858 {
3859 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
3860 name, &data, 1, ddi_prop_fm_encode_int64));
3861 }
3862
3863 int
e_ddi_prop_update_int(dev_t match_dev,dev_info_t * dip,char * name,int data)3864 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
3865 char *name, int data)
3866 {
3867 return (ddi_prop_update_common(match_dev, dip,
3868 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
3869 name, &data, 1, ddi_prop_fm_encode_ints));
3870 }
3871
3872 int
e_ddi_prop_update_int64(dev_t match_dev,dev_info_t * dip,char * name,int64_t data)3873 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
3874 char *name, int64_t data)
3875 {
3876 return (ddi_prop_update_common(match_dev, dip,
3877 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
3878 name, &data, 1, ddi_prop_fm_encode_int64));
3879 }
3880
3881 /*
3882 * Update an array of integer property. If the property exists on the drivers
3883 * property list it updates, else it creates it.
3884 */
3885 int
ddi_prop_update_int_array(dev_t match_dev,dev_info_t * dip,char * name,int * data,uint_t nelements)3886 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
3887 char *name, int *data, uint_t nelements)
3888 {
3889 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
3890 name, data, nelements, ddi_prop_fm_encode_ints));
3891 }
3892
3893 /*
3894 * Update an array of 64 bit integer properties.
3895 * Update the driver property list if it exists, else create it.
3896 */
3897 int
ddi_prop_update_int64_array(dev_t match_dev,dev_info_t * dip,char * name,int64_t * data,uint_t nelements)3898 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
3899 char *name, int64_t *data, uint_t nelements)
3900 {
3901 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
3902 name, data, nelements, ddi_prop_fm_encode_int64));
3903 }
3904
3905 int
e_ddi_prop_update_int64_array(dev_t match_dev,dev_info_t * dip,char * name,int64_t * data,uint_t nelements)3906 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
3907 char *name, int64_t *data, uint_t nelements)
3908 {
3909 return (ddi_prop_update_common(match_dev, dip,
3910 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
3911 name, data, nelements, ddi_prop_fm_encode_int64));
3912 }
3913
3914 int
e_ddi_prop_update_int_array(dev_t match_dev,dev_info_t * dip,char * name,int * data,uint_t nelements)3915 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
3916 char *name, int *data, uint_t nelements)
3917 {
3918 return (ddi_prop_update_common(match_dev, dip,
3919 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
3920 name, data, nelements, ddi_prop_fm_encode_ints));
3921 }
3922
3923 /*
3924 * Get a single string property.
3925 */
3926 int
ddi_prop_lookup_string(dev_t match_dev,dev_info_t * dip,uint_t flags,char * name,char ** data)3927 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags,
3928 char *name, char **data)
3929 {
3930 uint_t x;
3931
3932 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3933 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3934 #ifdef DEBUG
3935 if (dip != NULL) {
3936 cmn_err(CE_WARN, "%s: invalid flag 0x%x "
3937 "(prop = %s, node = %s%d); invalid bits ignored",
3938 "ddi_prop_lookup_string", flags, name,
3939 ddi_driver_name(dip), ddi_get_instance(dip));
3940 }
3941 #endif /* DEBUG */
3942 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3943 LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3944 }
3945
3946 return (ddi_prop_lookup_common(match_dev, dip,
3947 (flags | DDI_PROP_TYPE_STRING), name, data,
3948 &x, ddi_prop_fm_decode_string));
3949 }
3950
3951 /*
3952 * Get an array of strings property.
3953 */
3954 int
ddi_prop_lookup_string_array(dev_t match_dev,dev_info_t * dip,uint_t flags,char * name,char *** data,uint_t * nelements)3955 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3956 char *name, char ***data, uint_t *nelements)
3957 {
3958 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3959 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3960 #ifdef DEBUG
3961 if (dip != NULL) {
3962 cmn_err(CE_WARN, "ddi_prop_lookup_string_array: "
3963 "invalid flag 0x%x (prop = %s, node = %s%d)",
3964 flags, name, ddi_driver_name(dip),
3965 ddi_get_instance(dip));
3966 }
3967 #endif /* DEBUG */
3968 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3969 LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3970 }
3971
3972 return (ddi_prop_lookup_common(match_dev, dip,
3973 (flags | DDI_PROP_TYPE_STRING), name, data,
3974 nelements, ddi_prop_fm_decode_strings));
3975 }
3976
3977 /*
3978 * Update a single string property.
3979 */
3980 int
ddi_prop_update_string(dev_t match_dev,dev_info_t * dip,char * name,char * data)3981 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
3982 char *name, char *data)
3983 {
3984 return (ddi_prop_update_common(match_dev, dip,
3985 DDI_PROP_TYPE_STRING, name, &data, 1,
3986 ddi_prop_fm_encode_string));
3987 }
3988
3989 int
e_ddi_prop_update_string(dev_t match_dev,dev_info_t * dip,char * name,char * data)3990 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
3991 char *name, char *data)
3992 {
3993 return (ddi_prop_update_common(match_dev, dip,
3994 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
3995 name, &data, 1, ddi_prop_fm_encode_string));
3996 }
3997
3998
3999 /*
4000 * Update an array of strings property.
4001 */
4002 int
ddi_prop_update_string_array(dev_t match_dev,dev_info_t * dip,char * name,char ** data,uint_t nelements)4003 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4004 char *name, char **data, uint_t nelements)
4005 {
4006 return (ddi_prop_update_common(match_dev, dip,
4007 DDI_PROP_TYPE_STRING, name, data, nelements,
4008 ddi_prop_fm_encode_strings));
4009 }
4010
4011 int
e_ddi_prop_update_string_array(dev_t match_dev,dev_info_t * dip,char * name,char ** data,uint_t nelements)4012 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4013 char *name, char **data, uint_t nelements)
4014 {
4015 return (ddi_prop_update_common(match_dev, dip,
4016 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4017 name, data, nelements,
4018 ddi_prop_fm_encode_strings));
4019 }
4020
4021
4022 /*
4023 * Get an array of bytes property.
4024 */
4025 int
ddi_prop_lookup_byte_array(dev_t match_dev,dev_info_t * dip,uint_t flags,char * name,uchar_t ** data,uint_t * nelements)4026 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4027 char *name, uchar_t **data, uint_t *nelements)
4028 {
4029 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4030 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4031 #ifdef DEBUG
4032 if (dip != NULL) {
4033 cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: "
4034 " invalid flag 0x%x (prop = %s, node = %s%d)",
4035 flags, name, ddi_driver_name(dip),
4036 ddi_get_instance(dip));
4037 }
4038 #endif /* DEBUG */
4039 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4040 LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4041 }
4042
4043 return (ddi_prop_lookup_common(match_dev, dip,
4044 (flags | DDI_PROP_TYPE_BYTE), name, data,
4045 nelements, ddi_prop_fm_decode_bytes));
4046 }
4047
4048 /*
4049 * Update an array of bytes property.
4050 */
4051 int
ddi_prop_update_byte_array(dev_t match_dev,dev_info_t * dip,char * name,uchar_t * data,uint_t nelements)4052 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4053 char *name, uchar_t *data, uint_t nelements)
4054 {
4055 if (nelements == 0)
4056 return (DDI_PROP_INVAL_ARG);
4057
4058 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE,
4059 name, data, nelements, ddi_prop_fm_encode_bytes));
4060 }
4061
4062
4063 int
e_ddi_prop_update_byte_array(dev_t match_dev,dev_info_t * dip,char * name,uchar_t * data,uint_t nelements)4064 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4065 char *name, uchar_t *data, uint_t nelements)
4066 {
4067 if (nelements == 0)
4068 return (DDI_PROP_INVAL_ARG);
4069
4070 return (ddi_prop_update_common(match_dev, dip,
4071 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE,
4072 name, data, nelements, ddi_prop_fm_encode_bytes));
4073 }
4074
4075
4076 /*
4077 * ddi_prop_remove_common: Undefine a managed property:
4078 * Input dev_t must match dev_t when defined.
4079 * Returns DDI_PROP_NOT_FOUND, possibly.
4080 * DDI_PROP_INVAL_ARG is also possible if dev is
4081 * DDI_DEV_T_ANY or incoming name is the NULL string.
4082 */
4083 int
ddi_prop_remove_common(dev_t dev,dev_info_t * dip,char * name,int flag)4084 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag)
4085 {
4086 ddi_prop_t **list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4087 ddi_prop_t *propp;
4088 ddi_prop_t *lastpropp = NULL;
4089
4090 if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) ||
4091 (strlen(name) == 0)) {
4092 return (DDI_PROP_INVAL_ARG);
4093 }
4094
4095 if (flag & DDI_PROP_SYSTEM_DEF)
4096 list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4097 else if (flag & DDI_PROP_HW_DEF)
4098 list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4099
4100 mutex_enter(&(DEVI(dip)->devi_lock));
4101
4102 for (propp = *list_head; propp != NULL; propp = propp->prop_next) {
4103 if (DDI_STRSAME(propp->prop_name, name) &&
4104 (dev == propp->prop_dev)) {
4105 /*
4106 * Unlink this propp allowing for it to
4107 * be first in the list:
4108 */
4109
4110 if (lastpropp == NULL)
4111 *list_head = propp->prop_next;
4112 else
4113 lastpropp->prop_next = propp->prop_next;
4114
4115 mutex_exit(&(DEVI(dip)->devi_lock));
4116
4117 /*
4118 * Free memory and return...
4119 */
4120 kmem_free(propp->prop_name,
4121 strlen(propp->prop_name) + 1);
4122 if (propp->prop_len != 0)
4123 kmem_free(propp->prop_val, propp->prop_len);
4124 kmem_free(propp, sizeof (ddi_prop_t));
4125 return (DDI_PROP_SUCCESS);
4126 }
4127 lastpropp = propp;
4128 }
4129 mutex_exit(&(DEVI(dip)->devi_lock));
4130 return (DDI_PROP_NOT_FOUND);
4131 }
4132
4133 int
ddi_prop_remove(dev_t dev,dev_info_t * dip,char * name)4134 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4135 {
4136 return (ddi_prop_remove_common(dev, dip, name, 0));
4137 }
4138
4139 int
e_ddi_prop_remove(dev_t dev,dev_info_t * dip,char * name)4140 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4141 {
4142 return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF));
4143 }
4144
4145 /*
4146 * e_ddi_prop_list_delete: remove a list of properties
4147 * Note that the caller needs to provide the required protection
4148 * (eg. devi_lock if these properties are still attached to a devi)
4149 */
4150 void
e_ddi_prop_list_delete(ddi_prop_t * props)4151 e_ddi_prop_list_delete(ddi_prop_t *props)
4152 {
4153 i_ddi_prop_list_delete(props);
4154 }
4155
4156 /*
4157 * ddi_prop_remove_all_common:
4158 * Used before unloading a driver to remove
4159 * all properties. (undefines all dev_t's props.)
4160 * Also removes `explicitly undefined' props.
4161 * No errors possible.
4162 */
4163 void
ddi_prop_remove_all_common(dev_info_t * dip,int flag)4164 ddi_prop_remove_all_common(dev_info_t *dip, int flag)
4165 {
4166 ddi_prop_t **list_head;
4167
4168 mutex_enter(&(DEVI(dip)->devi_lock));
4169 if (flag & DDI_PROP_SYSTEM_DEF) {
4170 list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4171 } else if (flag & DDI_PROP_HW_DEF) {
4172 list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4173 } else {
4174 list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4175 }
4176 i_ddi_prop_list_delete(*list_head);
4177 *list_head = NULL;
4178 mutex_exit(&(DEVI(dip)->devi_lock));
4179 }
4180
4181
4182 /*
4183 * ddi_prop_remove_all: Remove all driver prop definitions.
4184 */
4185
4186 void
ddi_prop_remove_all(dev_info_t * dip)4187 ddi_prop_remove_all(dev_info_t *dip)
4188 {
4189 i_ddi_prop_dyn_driver_set(dip, NULL);
4190 ddi_prop_remove_all_common(dip, 0);
4191 }
4192
4193 /*
4194 * e_ddi_prop_remove_all: Remove all system prop definitions.
4195 */
4196
4197 void
e_ddi_prop_remove_all(dev_info_t * dip)4198 e_ddi_prop_remove_all(dev_info_t *dip)
4199 {
4200 ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF);
4201 }
4202
4203
4204 /*
4205 * ddi_prop_undefine: Explicitly undefine a property. Property
4206 * searches which match this property return
4207 * the error code DDI_PROP_UNDEFINED.
4208 *
4209 * Use ddi_prop_remove to negate effect of
4210 * ddi_prop_undefine
4211 *
4212 * See above for error returns.
4213 */
4214
4215 int
ddi_prop_undefine(dev_t dev,dev_info_t * dip,int flag,char * name)4216 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4217 {
4218 if (!(flag & DDI_PROP_CANSLEEP))
4219 flag |= DDI_PROP_DONTSLEEP;
4220 flag |= DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4221 return (ddi_prop_update_common(dev, dip, flag,
4222 name, NULL, 0, ddi_prop_fm_encode_bytes));
4223 }
4224
4225 int
e_ddi_prop_undefine(dev_t dev,dev_info_t * dip,int flag,char * name)4226 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4227 {
4228 if (!(flag & DDI_PROP_CANSLEEP))
4229 flag |= DDI_PROP_DONTSLEEP;
4230 flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
4231 DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4232 return (ddi_prop_update_common(dev, dip, flag,
4233 name, NULL, 0, ddi_prop_fm_encode_bytes));
4234 }
4235
4236 /*
4237 * Support for gathering dynamic properties in devinfo snapshot.
4238 */
4239 void
i_ddi_prop_dyn_driver_set(dev_info_t * dip,i_ddi_prop_dyn_t * dp)4240 i_ddi_prop_dyn_driver_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4241 {
4242 DEVI(dip)->devi_prop_dyn_driver = dp;
4243 }
4244
4245 i_ddi_prop_dyn_t *
i_ddi_prop_dyn_driver_get(dev_info_t * dip)4246 i_ddi_prop_dyn_driver_get(dev_info_t *dip)
4247 {
4248 return (DEVI(dip)->devi_prop_dyn_driver);
4249 }
4250
4251 void
i_ddi_prop_dyn_parent_set(dev_info_t * dip,i_ddi_prop_dyn_t * dp)4252 i_ddi_prop_dyn_parent_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4253 {
4254 DEVI(dip)->devi_prop_dyn_parent = dp;
4255 }
4256
4257 i_ddi_prop_dyn_t *
i_ddi_prop_dyn_parent_get(dev_info_t * dip)4258 i_ddi_prop_dyn_parent_get(dev_info_t *dip)
4259 {
4260 return (DEVI(dip)->devi_prop_dyn_parent);
4261 }
4262
4263 void
i_ddi_prop_dyn_cache_invalidate(dev_info_t * dip,i_ddi_prop_dyn_t * dp)4264 i_ddi_prop_dyn_cache_invalidate(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4265 {
4266 /* for now we invalidate the entire cached snapshot */
4267 if (dip && dp)
4268 i_ddi_di_cache_invalidate();
4269 }
4270
4271 /* ARGSUSED */
4272 void
ddi_prop_cache_invalidate(dev_t dev,dev_info_t * dip,char * name,int flags)4273 ddi_prop_cache_invalidate(dev_t dev, dev_info_t *dip, char *name, int flags)
4274 {
4275 /* for now we invalidate the entire cached snapshot */
4276 i_ddi_di_cache_invalidate();
4277 }
4278
4279
4280 /*
4281 * Code to search hardware layer (PROM), if it exists, on behalf of child.
4282 *
4283 * if input dip != child_dip, then call is on behalf of child
4284 * to search PROM, do it via ddi_prop_search_common() and ascend only
4285 * if allowed.
4286 *
4287 * if input dip == ch_dip (child_dip), call is on behalf of root driver,
4288 * to search for PROM defined props only.
4289 *
4290 * Note that the PROM search is done only if the requested dev
4291 * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties
4292 * have no associated dev, thus are automatically associated with
4293 * DDI_DEV_T_NONE.
4294 *
4295 * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer.
4296 *
4297 * Returns DDI_PROP_FOUND_1275 if found to indicate to framework
4298 * that the property resides in the prom.
4299 */
4300 int
impl_ddi_bus_prop_op(dev_t dev,dev_info_t * dip,dev_info_t * ch_dip,ddi_prop_op_t prop_op,int mod_flags,char * name,caddr_t valuep,int * lengthp)4301 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4302 ddi_prop_op_t prop_op, int mod_flags,
4303 char *name, caddr_t valuep, int *lengthp)
4304 {
4305 int len;
4306 caddr_t buffer;
4307
4308 /*
4309 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then
4310 * look in caller's PROM if it's a self identifying device...
4311 *
4312 * Note that this is very similar to ddi_prop_op, but we
4313 * search the PROM instead of the s/w defined properties,
4314 * and we are called on by the parent driver to do this for
4315 * the child.
4316 */
4317
4318 if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) &&
4319 ndi_dev_is_prom_node(ch_dip) &&
4320 ((mod_flags & DDI_PROP_NOTPROM) == 0)) {
4321 len = prom_getproplen((pnode_t)DEVI(ch_dip)->devi_nodeid, name);
4322 if (len == -1) {
4323 return (DDI_PROP_NOT_FOUND);
4324 }
4325
4326 /*
4327 * If exists only request, we're done
4328 */
4329 if (prop_op == PROP_EXISTS) {
4330 return (DDI_PROP_FOUND_1275);
4331 }
4332
4333 /*
4334 * If length only request or prop length == 0, get out
4335 */
4336 if ((prop_op == PROP_LEN) || (len == 0)) {
4337 *lengthp = len;
4338 return (DDI_PROP_FOUND_1275);
4339 }
4340
4341 /*
4342 * Allocate buffer if required... (either way `buffer'
4343 * is receiving address).
4344 */
4345
4346 switch (prop_op) {
4347
4348 case PROP_LEN_AND_VAL_ALLOC:
4349
4350 buffer = kmem_alloc((size_t)len,
4351 mod_flags & DDI_PROP_CANSLEEP ?
4352 KM_SLEEP : KM_NOSLEEP);
4353 if (buffer == NULL) {
4354 return (DDI_PROP_NO_MEMORY);
4355 }
4356 *(caddr_t *)valuep = buffer;
4357 break;
4358
4359 case PROP_LEN_AND_VAL_BUF:
4360
4361 if (len > (*lengthp)) {
4362 *lengthp = len;
4363 return (DDI_PROP_BUF_TOO_SMALL);
4364 }
4365
4366 buffer = valuep;
4367 break;
4368
4369 default:
4370 break;
4371 }
4372
4373 /*
4374 * Call the PROM function to do the copy.
4375 */
4376 (void) prom_getprop((pnode_t)DEVI(ch_dip)->devi_nodeid,
4377 name, buffer);
4378
4379 *lengthp = len; /* return the actual length to the caller */
4380 (void) impl_fix_props(dip, ch_dip, name, len, buffer);
4381 return (DDI_PROP_FOUND_1275);
4382 }
4383
4384 return (DDI_PROP_NOT_FOUND);
4385 }
4386
4387 /*
4388 * The ddi_bus_prop_op default bus nexus prop op function.
4389 *
4390 * Code to search hardware layer (PROM), if it exists,
4391 * on behalf of child, then, if appropriate, ascend and check
4392 * my own software defined properties...
4393 */
4394 int
ddi_bus_prop_op(dev_t dev,dev_info_t * dip,dev_info_t * ch_dip,ddi_prop_op_t prop_op,int mod_flags,char * name,caddr_t valuep,int * lengthp)4395 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4396 ddi_prop_op_t prop_op, int mod_flags,
4397 char *name, caddr_t valuep, int *lengthp)
4398 {
4399 int error;
4400
4401 error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags,
4402 name, valuep, lengthp);
4403
4404 if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 ||
4405 error == DDI_PROP_BUF_TOO_SMALL)
4406 return (error);
4407
4408 if (error == DDI_PROP_NO_MEMORY) {
4409 cmn_err(CE_CONT, prop_no_mem_msg, name);
4410 return (DDI_PROP_NO_MEMORY);
4411 }
4412
4413 /*
4414 * Check the 'options' node as a last resort
4415 */
4416 if ((mod_flags & DDI_PROP_DONTPASS) != 0)
4417 return (DDI_PROP_NOT_FOUND);
4418
4419 if (ch_dip == ddi_root_node()) {
4420 /*
4421 * As a last resort, when we've reached
4422 * the top and still haven't found the
4423 * property, see if the desired property
4424 * is attached to the options node.
4425 *
4426 * The options dip is attached right after boot.
4427 */
4428 ASSERT(options_dip != NULL);
4429 /*
4430 * Force the "don't pass" flag to *just* see
4431 * what the options node has to offer.
4432 */
4433 return (ddi_prop_search_common(dev, options_dip, prop_op,
4434 mod_flags|DDI_PROP_DONTPASS, name, valuep,
4435 (uint_t *)lengthp));
4436 }
4437
4438 /*
4439 * Otherwise, continue search with parent's s/w defined properties...
4440 * NOTE: Using `dip' in following call increments the level.
4441 */
4442
4443 return (ddi_prop_search_common(dev, dip, prop_op, mod_flags,
4444 name, valuep, (uint_t *)lengthp));
4445 }
4446
4447 /*
4448 * External property functions used by other parts of the kernel...
4449 */
4450
4451 /*
4452 * e_ddi_getlongprop: See comments for ddi_get_longprop.
4453 */
4454
4455 int
e_ddi_getlongprop(dev_t dev,vtype_t type,char * name,int flags,caddr_t valuep,int * lengthp)4456 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags,
4457 caddr_t valuep, int *lengthp)
4458 {
4459 _NOTE(ARGUNUSED(type))
4460 dev_info_t *devi;
4461 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC;
4462 int error;
4463
4464 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4465 return (DDI_PROP_NOT_FOUND);
4466
4467 error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4468 ddi_release_devi(devi);
4469 return (error);
4470 }
4471
4472 /*
4473 * e_ddi_getlongprop_buf: See comments for ddi_getlongprop_buf.
4474 */
4475
4476 int
e_ddi_getlongprop_buf(dev_t dev,vtype_t type,char * name,int flags,caddr_t valuep,int * lengthp)4477 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags,
4478 caddr_t valuep, int *lengthp)
4479 {
4480 _NOTE(ARGUNUSED(type))
4481 dev_info_t *devi;
4482 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4483 int error;
4484
4485 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4486 return (DDI_PROP_NOT_FOUND);
4487
4488 error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4489 ddi_release_devi(devi);
4490 return (error);
4491 }
4492
4493 /*
4494 * e_ddi_getprop: See comments for ddi_getprop.
4495 */
4496 int
e_ddi_getprop(dev_t dev,vtype_t type,char * name,int flags,int defvalue)4497 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue)
4498 {
4499 _NOTE(ARGUNUSED(type))
4500 dev_info_t *devi;
4501 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4502 int propvalue = defvalue;
4503 int proplength = sizeof (int);
4504 int error;
4505
4506 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4507 return (defvalue);
4508
4509 error = cdev_prop_op(dev, devi, prop_op,
4510 flags, name, (caddr_t)&propvalue, &proplength);
4511 ddi_release_devi(devi);
4512
4513 if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4514 propvalue = 1;
4515
4516 return (propvalue);
4517 }
4518
4519 /*
4520 * e_ddi_getprop_int64:
4521 *
4522 * This is a typed interfaces, but predates typed properties. With the
4523 * introduction of typed properties the framework tries to ensure
4524 * consistent use of typed interfaces. This is why TYPE_INT64 is not
4525 * part of TYPE_ANY. E_ddi_getprop_int64 is a special case where a
4526 * typed interface invokes legacy (non-typed) interfaces:
4527 * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)). In this case the
4528 * fact that TYPE_INT64 is not part of TYPE_ANY matters. To support
4529 * this type of lookup as a single operation we invoke the legacy
4530 * non-typed interfaces with the special CONSUMER_TYPED bit set. The
4531 * framework ddi_prop_op(9F) implementation is expected to check for
4532 * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY
4533 * (currently TYPE_INT64).
4534 */
4535 int64_t
e_ddi_getprop_int64(dev_t dev,vtype_t type,char * name,int flags,int64_t defvalue)4536 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name,
4537 int flags, int64_t defvalue)
4538 {
4539 _NOTE(ARGUNUSED(type))
4540 dev_info_t *devi;
4541 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4542 int64_t propvalue = defvalue;
4543 int proplength = sizeof (propvalue);
4544 int error;
4545
4546 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4547 return (defvalue);
4548
4549 error = cdev_prop_op(dev, devi, prop_op, flags |
4550 DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength);
4551 ddi_release_devi(devi);
4552
4553 if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4554 propvalue = 1;
4555
4556 return (propvalue);
4557 }
4558
4559 /*
4560 * e_ddi_getproplen: See comments for ddi_getproplen.
4561 */
4562 int
e_ddi_getproplen(dev_t dev,vtype_t type,char * name,int flags,int * lengthp)4563 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp)
4564 {
4565 _NOTE(ARGUNUSED(type))
4566 dev_info_t *devi;
4567 ddi_prop_op_t prop_op = PROP_LEN;
4568 int error;
4569
4570 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4571 return (DDI_PROP_NOT_FOUND);
4572
4573 error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp);
4574 ddi_release_devi(devi);
4575 return (error);
4576 }
4577
4578 /*
4579 * Routines to get at elements of the dev_info structure
4580 */
4581
4582 /*
4583 * ddi_binding_name: Return the driver binding name of the devinfo node
4584 * This is the name the OS used to bind the node to a driver.
4585 */
4586 char *
ddi_binding_name(dev_info_t * dip)4587 ddi_binding_name(dev_info_t *dip)
4588 {
4589 return (DEVI(dip)->devi_binding_name);
4590 }
4591
4592 /*
4593 * ddi_driver_major: Return the major number of the driver that
4594 * the supplied devinfo is bound to. If not yet bound,
4595 * DDI_MAJOR_T_NONE.
4596 *
4597 * When used by the driver bound to 'devi', this
4598 * function will reliably return the driver major number.
4599 * Other ways of determining the driver major number, such as
4600 * major = ddi_name_to_major(ddi_get_name(devi));
4601 * major = ddi_name_to_major(ddi_binding_name(devi));
4602 * can return a different result as the driver/alias binding
4603 * can change dynamically, and thus should be avoided.
4604 */
4605 major_t
ddi_driver_major(dev_info_t * devi)4606 ddi_driver_major(dev_info_t *devi)
4607 {
4608 return (DEVI(devi)->devi_major);
4609 }
4610
4611 /*
4612 * ddi_driver_name: Return the normalized driver name. this is the
4613 * actual driver name
4614 */
4615 const char *
ddi_driver_name(dev_info_t * devi)4616 ddi_driver_name(dev_info_t *devi)
4617 {
4618 major_t major;
4619
4620 if ((major = ddi_driver_major(devi)) != DDI_MAJOR_T_NONE)
4621 return (ddi_major_to_name(major));
4622
4623 return (ddi_node_name(devi));
4624 }
4625
4626 /*
4627 * i_ddi_set_binding_name: Set binding name.
4628 *
4629 * Set the binding name to the given name.
4630 * This routine is for use by the ddi implementation, not by drivers.
4631 */
4632 void
i_ddi_set_binding_name(dev_info_t * dip,char * name)4633 i_ddi_set_binding_name(dev_info_t *dip, char *name)
4634 {
4635 DEVI(dip)->devi_binding_name = name;
4636
4637 }
4638
4639 /*
4640 * ddi_get_name: A synonym of ddi_binding_name() ... returns a name
4641 * the implementation has used to bind the node to a driver.
4642 */
4643 char *
ddi_get_name(dev_info_t * dip)4644 ddi_get_name(dev_info_t *dip)
4645 {
4646 return (DEVI(dip)->devi_binding_name);
4647 }
4648
4649 /*
4650 * ddi_node_name: Return the name property of the devinfo node
4651 * This may differ from ddi_binding_name if the node name
4652 * does not define a binding to a driver (i.e. generic names).
4653 */
4654 char *
ddi_node_name(dev_info_t * dip)4655 ddi_node_name(dev_info_t *dip)
4656 {
4657 return (DEVI(dip)->devi_node_name);
4658 }
4659
4660
4661 /*
4662 * ddi_get_nodeid: Get nodeid stored in dev_info structure.
4663 */
4664 int
ddi_get_nodeid(dev_info_t * dip)4665 ddi_get_nodeid(dev_info_t *dip)
4666 {
4667 return (DEVI(dip)->devi_nodeid);
4668 }
4669
4670 int
ddi_get_instance(dev_info_t * dip)4671 ddi_get_instance(dev_info_t *dip)
4672 {
4673 return (DEVI(dip)->devi_instance);
4674 }
4675
4676 struct dev_ops *
ddi_get_driver(dev_info_t * dip)4677 ddi_get_driver(dev_info_t *dip)
4678 {
4679 return (DEVI(dip)->devi_ops);
4680 }
4681
4682 void
ddi_set_driver(dev_info_t * dip,struct dev_ops * devo)4683 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo)
4684 {
4685 DEVI(dip)->devi_ops = devo;
4686 }
4687
4688 /*
4689 * ddi_set_driver_private/ddi_get_driver_private:
4690 * Get/set device driver private data in devinfo.
4691 */
4692 void
ddi_set_driver_private(dev_info_t * dip,void * data)4693 ddi_set_driver_private(dev_info_t *dip, void *data)
4694 {
4695 DEVI(dip)->devi_driver_data = data;
4696 }
4697
4698 void *
ddi_get_driver_private(dev_info_t * dip)4699 ddi_get_driver_private(dev_info_t *dip)
4700 {
4701 return (DEVI(dip)->devi_driver_data);
4702 }
4703
4704 /*
4705 * ddi_get_parent, ddi_get_child, ddi_get_next_sibling
4706 */
4707
4708 dev_info_t *
ddi_get_parent(dev_info_t * dip)4709 ddi_get_parent(dev_info_t *dip)
4710 {
4711 return ((dev_info_t *)DEVI(dip)->devi_parent);
4712 }
4713
4714 dev_info_t *
ddi_get_child(dev_info_t * dip)4715 ddi_get_child(dev_info_t *dip)
4716 {
4717 return ((dev_info_t *)DEVI(dip)->devi_child);
4718 }
4719
4720 dev_info_t *
ddi_get_next_sibling(dev_info_t * dip)4721 ddi_get_next_sibling(dev_info_t *dip)
4722 {
4723 return ((dev_info_t *)DEVI(dip)->devi_sibling);
4724 }
4725
4726 dev_info_t *
ddi_get_next(dev_info_t * dip)4727 ddi_get_next(dev_info_t *dip)
4728 {
4729 return ((dev_info_t *)DEVI(dip)->devi_next);
4730 }
4731
4732 void
ddi_set_next(dev_info_t * dip,dev_info_t * nextdip)4733 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip)
4734 {
4735 DEVI(dip)->devi_next = DEVI(nextdip);
4736 }
4737
4738 /*
4739 * ddi_root_node: Return root node of devinfo tree
4740 */
4741
4742 dev_info_t *
ddi_root_node(void)4743 ddi_root_node(void)
4744 {
4745 extern dev_info_t *top_devinfo;
4746
4747 return (top_devinfo);
4748 }
4749
4750 /*
4751 * Miscellaneous functions:
4752 */
4753
4754 /*
4755 * Implementation specific hooks
4756 */
4757
4758 void
ddi_report_dev(dev_info_t * d)4759 ddi_report_dev(dev_info_t *d)
4760 {
4761 char *b;
4762
4763 (void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0);
4764
4765 /*
4766 * If this devinfo node has cb_ops, it's implicitly accessible from
4767 * userland, so we print its full name together with the instance
4768 * number 'abbreviation' that the driver may use internally.
4769 */
4770 if (DEVI(d)->devi_ops->devo_cb_ops != (struct cb_ops *)0 &&
4771 (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) {
4772 cmn_err(CE_CONT, "?%s%d is %s\n",
4773 ddi_driver_name(d), ddi_get_instance(d),
4774 ddi_pathname(d, b));
4775 kmem_free(b, MAXPATHLEN);
4776 }
4777 }
4778
4779 /*
4780 * ddi_ctlops() is described in the assembler not to buy a new register
4781 * window when it's called and can reduce cost in climbing the device tree
4782 * without using the tail call optimization.
4783 */
4784 int
ddi_dev_regsize(dev_info_t * dev,uint_t rnumber,off_t * result)4785 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result)
4786 {
4787 int ret;
4788
4789 ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE,
4790 (void *)&rnumber, (void *)result);
4791
4792 return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
4793 }
4794
4795 int
ddi_dev_nregs(dev_info_t * dev,int * result)4796 ddi_dev_nregs(dev_info_t *dev, int *result)
4797 {
4798 return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result));
4799 }
4800
4801 int
ddi_dev_is_sid(dev_info_t * d)4802 ddi_dev_is_sid(dev_info_t *d)
4803 {
4804 return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0));
4805 }
4806
4807 int
ddi_slaveonly(dev_info_t * d)4808 ddi_slaveonly(dev_info_t *d)
4809 {
4810 return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0));
4811 }
4812
4813 int
ddi_dev_affinity(dev_info_t * a,dev_info_t * b)4814 ddi_dev_affinity(dev_info_t *a, dev_info_t *b)
4815 {
4816 return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0));
4817 }
4818
4819 int
ddi_streams_driver(dev_info_t * dip)4820 ddi_streams_driver(dev_info_t *dip)
4821 {
4822 if (i_ddi_devi_attached(dip) &&
4823 (DEVI(dip)->devi_ops->devo_cb_ops != NULL) &&
4824 (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL))
4825 return (DDI_SUCCESS);
4826 return (DDI_FAILURE);
4827 }
4828
4829 /*
4830 * callback free list
4831 */
4832
4833 static int ncallbacks;
4834 static int nc_low = 170;
4835 static int nc_med = 512;
4836 static int nc_high = 2048;
4837 static struct ddi_callback *callbackq;
4838 static struct ddi_callback *callbackqfree;
4839
4840 /*
4841 * set/run callback lists
4842 */
4843 struct cbstats {
4844 kstat_named_t cb_asked;
4845 kstat_named_t cb_new;
4846 kstat_named_t cb_run;
4847 kstat_named_t cb_delete;
4848 kstat_named_t cb_maxreq;
4849 kstat_named_t cb_maxlist;
4850 kstat_named_t cb_alloc;
4851 kstat_named_t cb_runouts;
4852 kstat_named_t cb_L2;
4853 kstat_named_t cb_grow;
4854 } cbstats = {
4855 {"asked", KSTAT_DATA_UINT32},
4856 {"new", KSTAT_DATA_UINT32},
4857 {"run", KSTAT_DATA_UINT32},
4858 {"delete", KSTAT_DATA_UINT32},
4859 {"maxreq", KSTAT_DATA_UINT32},
4860 {"maxlist", KSTAT_DATA_UINT32},
4861 {"alloc", KSTAT_DATA_UINT32},
4862 {"runouts", KSTAT_DATA_UINT32},
4863 {"L2", KSTAT_DATA_UINT32},
4864 {"grow", KSTAT_DATA_UINT32},
4865 };
4866
4867 #define nc_asked cb_asked.value.ui32
4868 #define nc_new cb_new.value.ui32
4869 #define nc_run cb_run.value.ui32
4870 #define nc_delete cb_delete.value.ui32
4871 #define nc_maxreq cb_maxreq.value.ui32
4872 #define nc_maxlist cb_maxlist.value.ui32
4873 #define nc_alloc cb_alloc.value.ui32
4874 #define nc_runouts cb_runouts.value.ui32
4875 #define nc_L2 cb_L2.value.ui32
4876 #define nc_grow cb_grow.value.ui32
4877
4878 static kmutex_t ddi_callback_mutex;
4879
4880 /*
4881 * callbacks are handled using a L1/L2 cache. The L1 cache
4882 * comes out of kmem_cache_alloc and can expand/shrink dynamically. If
4883 * we can't get callbacks from the L1 cache [because pageout is doing
4884 * I/O at the time freemem is 0], we allocate callbacks out of the
4885 * L2 cache. The L2 cache is static and depends on the memory size.
4886 * [We might also count the number of devices at probe time and
4887 * allocate one structure per device and adjust for deferred attach]
4888 */
4889 void
impl_ddi_callback_init(void)4890 impl_ddi_callback_init(void)
4891 {
4892 int i;
4893 uint_t physmegs;
4894 kstat_t *ksp;
4895
4896 physmegs = physmem >> (20 - PAGESHIFT);
4897 if (physmegs < 48) {
4898 ncallbacks = nc_low;
4899 } else if (physmegs < 128) {
4900 ncallbacks = nc_med;
4901 } else {
4902 ncallbacks = nc_high;
4903 }
4904
4905 /*
4906 * init free list
4907 */
4908 callbackq = kmem_zalloc(
4909 ncallbacks * sizeof (struct ddi_callback), KM_SLEEP);
4910 for (i = 0; i < ncallbacks-1; i++)
4911 callbackq[i].c_nfree = &callbackq[i+1];
4912 callbackqfree = callbackq;
4913
4914 /* init kstats */
4915 if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED,
4916 sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) {
4917 ksp->ks_data = (void *) &cbstats;
4918 kstat_install(ksp);
4919 }
4920
4921 }
4922
4923 static void
callback_insert(int (* funcp)(caddr_t),caddr_t arg,uintptr_t * listid,int count)4924 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid,
4925 int count)
4926 {
4927 struct ddi_callback *list, *marker, *new;
4928 size_t size = sizeof (struct ddi_callback);
4929
4930 list = marker = (struct ddi_callback *)*listid;
4931 while (list != NULL) {
4932 if (list->c_call == funcp && list->c_arg == arg) {
4933 list->c_count += count;
4934 return;
4935 }
4936 marker = list;
4937 list = list->c_nlist;
4938 }
4939 new = kmem_alloc(size, KM_NOSLEEP);
4940 if (new == NULL) {
4941 new = callbackqfree;
4942 if (new == NULL) {
4943 new = kmem_alloc_tryhard(sizeof (struct ddi_callback),
4944 &size, KM_NOSLEEP | KM_PANIC);
4945 cbstats.nc_grow++;
4946 } else {
4947 callbackqfree = new->c_nfree;
4948 cbstats.nc_L2++;
4949 }
4950 }
4951 if (marker != NULL) {
4952 marker->c_nlist = new;
4953 } else {
4954 *listid = (uintptr_t)new;
4955 }
4956 new->c_size = size;
4957 new->c_nlist = NULL;
4958 new->c_call = funcp;
4959 new->c_arg = arg;
4960 new->c_count = count;
4961 cbstats.nc_new++;
4962 cbstats.nc_alloc++;
4963 if (cbstats.nc_alloc > cbstats.nc_maxlist)
4964 cbstats.nc_maxlist = cbstats.nc_alloc;
4965 }
4966
4967 void
ddi_set_callback(int (* funcp)(caddr_t),caddr_t arg,uintptr_t * listid)4968 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid)
4969 {
4970 mutex_enter(&ddi_callback_mutex);
4971 cbstats.nc_asked++;
4972 if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq)
4973 cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run);
4974 (void) callback_insert(funcp, arg, listid, 1);
4975 mutex_exit(&ddi_callback_mutex);
4976 }
4977
4978 static void
real_callback_run(void * Queue)4979 real_callback_run(void *Queue)
4980 {
4981 int (*funcp)(caddr_t);
4982 caddr_t arg;
4983 int count, rval;
4984 uintptr_t *listid;
4985 struct ddi_callback *list, *marker;
4986 int check_pending = 1;
4987 int pending = 0;
4988
4989 do {
4990 mutex_enter(&ddi_callback_mutex);
4991 listid = Queue;
4992 list = (struct ddi_callback *)*listid;
4993 if (list == NULL) {
4994 mutex_exit(&ddi_callback_mutex);
4995 return;
4996 }
4997 if (check_pending) {
4998 marker = list;
4999 while (marker != NULL) {
5000 pending += marker->c_count;
5001 marker = marker->c_nlist;
5002 }
5003 check_pending = 0;
5004 }
5005 ASSERT(pending > 0);
5006 ASSERT(list->c_count > 0);
5007 funcp = list->c_call;
5008 arg = list->c_arg;
5009 count = list->c_count;
5010 *(uintptr_t *)Queue = (uintptr_t)list->c_nlist;
5011 if (list >= &callbackq[0] &&
5012 list <= &callbackq[ncallbacks-1]) {
5013 list->c_nfree = callbackqfree;
5014 callbackqfree = list;
5015 } else
5016 kmem_free(list, list->c_size);
5017
5018 cbstats.nc_delete++;
5019 cbstats.nc_alloc--;
5020 mutex_exit(&ddi_callback_mutex);
5021
5022 do {
5023 if ((rval = (*funcp)(arg)) == 0) {
5024 pending -= count;
5025 mutex_enter(&ddi_callback_mutex);
5026 (void) callback_insert(funcp, arg, listid,
5027 count);
5028 cbstats.nc_runouts++;
5029 } else {
5030 pending--;
5031 mutex_enter(&ddi_callback_mutex);
5032 cbstats.nc_run++;
5033 }
5034 mutex_exit(&ddi_callback_mutex);
5035 } while (rval != 0 && (--count > 0));
5036 } while (pending > 0);
5037 }
5038
5039 void
ddi_run_callback(uintptr_t * listid)5040 ddi_run_callback(uintptr_t *listid)
5041 {
5042 softcall(real_callback_run, listid);
5043 }
5044
5045 /*
5046 * ddi_periodic_t
5047 * ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval,
5048 * int level)
5049 *
5050 * INTERFACE LEVEL
5051 * Solaris DDI specific (Solaris DDI)
5052 *
5053 * PARAMETERS
5054 * func: the callback function
5055 *
5056 * The callback function will be invoked. The function is invoked
5057 * in kernel context if the argument level passed is the zero.
5058 * Otherwise it's invoked in interrupt context at the specified
5059 * level.
5060 *
5061 * arg: the argument passed to the callback function
5062 *
5063 * interval: interval time
5064 *
5065 * level : callback interrupt level
5066 *
5067 * If the value is the zero, the callback function is invoked
5068 * in kernel context. If the value is more than the zero, but
5069 * less than or equal to ten, the callback function is invoked in
5070 * interrupt context at the specified interrupt level, which may
5071 * be used for real time applications.
5072 *
5073 * This value must be in range of 0-10, which can be a numeric
5074 * number or a pre-defined macro (DDI_IPL_0, ... , DDI_IPL_10).
5075 *
5076 * DESCRIPTION
5077 * ddi_periodic_add(9F) schedules the specified function to be
5078 * periodically invoked in the interval time.
5079 *
5080 * As well as timeout(9F), the exact time interval over which the function
5081 * takes effect cannot be guaranteed, but the value given is a close
5082 * approximation.
5083 *
5084 * Drivers waiting on behalf of processes with real-time constraints must
5085 * pass non-zero value with the level argument to ddi_periodic_add(9F).
5086 *
5087 * RETURN VALUES
5088 * ddi_periodic_add(9F) returns a non-zero opaque value (ddi_periodic_t),
5089 * which must be used for ddi_periodic_delete(9F) to specify the request.
5090 *
5091 * CONTEXT
5092 * ddi_periodic_add(9F) can be called in user or kernel context, but
5093 * it cannot be called in interrupt context, which is different from
5094 * timeout(9F).
5095 */
5096 ddi_periodic_t
ddi_periodic_add(void (* func)(void *),void * arg,hrtime_t interval,int level)5097 ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, int level)
5098 {
5099 /*
5100 * Sanity check of the argument level.
5101 */
5102 if (level < DDI_IPL_0 || level > DDI_IPL_10)
5103 cmn_err(CE_PANIC,
5104 "ddi_periodic_add: invalid interrupt level (%d).", level);
5105
5106 /*
5107 * Sanity check of the context. ddi_periodic_add() cannot be
5108 * called in either interrupt context or high interrupt context.
5109 */
5110 if (servicing_interrupt())
5111 cmn_err(CE_PANIC,
5112 "ddi_periodic_add: called in (high) interrupt context.");
5113
5114 return ((ddi_periodic_t)i_timeout(func, arg, interval, level));
5115 }
5116
5117 /*
5118 * void
5119 * ddi_periodic_delete(ddi_periodic_t req)
5120 *
5121 * INTERFACE LEVEL
5122 * Solaris DDI specific (Solaris DDI)
5123 *
5124 * PARAMETERS
5125 * req: ddi_periodic_t opaque value ddi_periodic_add(9F) returned
5126 * previously.
5127 *
5128 * DESCRIPTION
5129 * ddi_periodic_delete(9F) cancels the ddi_periodic_add(9F) request
5130 * previously requested.
5131 *
5132 * ddi_periodic_delete(9F) will not return until the pending request
5133 * is canceled or executed.
5134 *
5135 * As well as untimeout(9F), calling ddi_periodic_delete(9F) for a
5136 * timeout which is either running on another CPU, or has already
5137 * completed causes no problems. However, unlike untimeout(9F), there is
5138 * no restrictions on the lock which might be held across the call to
5139 * ddi_periodic_delete(9F).
5140 *
5141 * Drivers should be structured with the understanding that the arrival of
5142 * both an interrupt and a timeout for that interrupt can occasionally
5143 * occur, in either order.
5144 *
5145 * CONTEXT
5146 * ddi_periodic_delete(9F) can be called in user or kernel context, but
5147 * it cannot be called in interrupt context, which is different from
5148 * untimeout(9F).
5149 */
5150 void
ddi_periodic_delete(ddi_periodic_t req)5151 ddi_periodic_delete(ddi_periodic_t req)
5152 {
5153 /*
5154 * Sanity check of the context. ddi_periodic_delete() cannot be
5155 * called in either interrupt context or high interrupt context.
5156 */
5157 if (servicing_interrupt())
5158 cmn_err(CE_PANIC,
5159 "ddi_periodic_delete: called in (high) interrupt context.");
5160
5161 i_untimeout((timeout_t)req);
5162 }
5163
5164 dev_info_t *
nodevinfo(dev_t dev,int otyp)5165 nodevinfo(dev_t dev, int otyp)
5166 {
5167 _NOTE(ARGUNUSED(dev, otyp))
5168 return ((dev_info_t *)0);
5169 }
5170
5171 /*
5172 * A driver should support its own getinfo(9E) entry point. This function
5173 * is provided as a convenience for ON drivers that don't expect their
5174 * getinfo(9E) entry point to be called. A driver that uses this must not
5175 * call ddi_create_minor_node.
5176 */
5177 int
ddi_no_info(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)5178 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5179 {
5180 _NOTE(ARGUNUSED(dip, infocmd, arg, result))
5181 return (DDI_FAILURE);
5182 }
5183
5184 /*
5185 * A driver should support its own getinfo(9E) entry point. This function
5186 * is provided as a convenience for ON drivers that where the minor number
5187 * is the instance. Drivers that do not have 1:1 mapping must implement
5188 * their own getinfo(9E) function.
5189 */
5190 int
ddi_getinfo_1to1(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)5191 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd,
5192 void *arg, void **result)
5193 {
5194 _NOTE(ARGUNUSED(dip))
5195 int instance;
5196
5197 if (infocmd != DDI_INFO_DEVT2INSTANCE)
5198 return (DDI_FAILURE);
5199
5200 instance = getminor((dev_t)(uintptr_t)arg);
5201 *result = (void *)(uintptr_t)instance;
5202 return (DDI_SUCCESS);
5203 }
5204
5205 int
ddifail(dev_info_t * devi,ddi_attach_cmd_t cmd)5206 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd)
5207 {
5208 _NOTE(ARGUNUSED(devi, cmd))
5209 return (DDI_FAILURE);
5210 }
5211
5212 int
ddi_no_dma_map(dev_info_t * dip,dev_info_t * rdip,struct ddi_dma_req * dmareqp,ddi_dma_handle_t * handlep)5213 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip,
5214 struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
5215 {
5216 _NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep))
5217 return (DDI_DMA_NOMAPPING);
5218 }
5219
5220 int
ddi_no_dma_allochdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_attr_t * attr,int (* waitfp)(caddr_t),caddr_t arg,ddi_dma_handle_t * handlep)5221 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
5222 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
5223 {
5224 _NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep))
5225 return (DDI_DMA_BADATTR);
5226 }
5227
5228 int
ddi_no_dma_freehdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)5229 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
5230 ddi_dma_handle_t handle)
5231 {
5232 _NOTE(ARGUNUSED(dip, rdip, handle))
5233 return (DDI_FAILURE);
5234 }
5235
5236 int
ddi_no_dma_bindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,struct ddi_dma_req * dmareq,ddi_dma_cookie_t * cp,uint_t * ccountp)5237 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
5238 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
5239 ddi_dma_cookie_t *cp, uint_t *ccountp)
5240 {
5241 _NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp))
5242 return (DDI_DMA_NOMAPPING);
5243 }
5244
5245 int
ddi_no_dma_unbindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)5246 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
5247 ddi_dma_handle_t handle)
5248 {
5249 _NOTE(ARGUNUSED(dip, rdip, handle))
5250 return (DDI_FAILURE);
5251 }
5252
5253 int
ddi_no_dma_flush(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,off_t off,size_t len,uint_t cache_flags)5254 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip,
5255 ddi_dma_handle_t handle, off_t off, size_t len,
5256 uint_t cache_flags)
5257 {
5258 _NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags))
5259 return (DDI_FAILURE);
5260 }
5261
5262 int
ddi_no_dma_win(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,uint_t win,off_t * offp,size_t * lenp,ddi_dma_cookie_t * cookiep,uint_t * ccountp)5263 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip,
5264 ddi_dma_handle_t handle, uint_t win, off_t *offp,
5265 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
5266 {
5267 _NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp))
5268 return (DDI_FAILURE);
5269 }
5270
5271 int
ddi_no_dma_mctl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,enum ddi_dma_ctlops request,off_t * offp,size_t * lenp,caddr_t * objp,uint_t flags)5272 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
5273 ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
5274 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
5275 {
5276 _NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags))
5277 return (DDI_FAILURE);
5278 }
5279
5280 void
ddivoid(void)5281 ddivoid(void)
5282 {}
5283
5284 int
nochpoll(dev_t dev,short events,int anyyet,short * reventsp,struct pollhead ** pollhdrp)5285 nochpoll(dev_t dev, short events, int anyyet, short *reventsp,
5286 struct pollhead **pollhdrp)
5287 {
5288 _NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp))
5289 return (ENXIO);
5290 }
5291
5292 cred_t *
ddi_get_cred(void)5293 ddi_get_cred(void)
5294 {
5295 return (CRED());
5296 }
5297
5298 clock_t
ddi_get_lbolt(void)5299 ddi_get_lbolt(void)
5300 {
5301 return ((clock_t)lbolt_hybrid());
5302 }
5303
5304 int64_t
ddi_get_lbolt64(void)5305 ddi_get_lbolt64(void)
5306 {
5307 return (lbolt_hybrid());
5308 }
5309
5310 time_t
ddi_get_time(void)5311 ddi_get_time(void)
5312 {
5313 time_t now;
5314
5315 if ((now = gethrestime_sec()) == 0) {
5316 timestruc_t ts;
5317 mutex_enter(&tod_lock);
5318 ts = tod_get();
5319 mutex_exit(&tod_lock);
5320 return (ts.tv_sec);
5321 } else {
5322 return (now);
5323 }
5324 }
5325
5326 pid_t
ddi_get_pid(void)5327 ddi_get_pid(void)
5328 {
5329 return (ttoproc(curthread)->p_pid);
5330 }
5331
5332 kt_did_t
ddi_get_kt_did(void)5333 ddi_get_kt_did(void)
5334 {
5335 return (curthread->t_did);
5336 }
5337
5338 /*
5339 * This function returns B_TRUE if the caller can reasonably expect that a call
5340 * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened
5341 * by user-level signal. If it returns B_FALSE, then the caller should use
5342 * other means to make certain that the wait will not hang "forever."
5343 *
5344 * It does not check the signal mask, nor for reception of any particular
5345 * signal.
5346 *
5347 * Currently, a thread can receive a signal if it's not a kernel thread and it
5348 * is not in the middle of exit(2) tear-down. Threads that are in that
5349 * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to
5350 * cv_timedwait, and qwait_sig to qwait.
5351 */
5352 boolean_t
ddi_can_receive_sig(void)5353 ddi_can_receive_sig(void)
5354 {
5355 proc_t *pp;
5356
5357 if (curthread->t_proc_flag & TP_LWPEXIT)
5358 return (B_FALSE);
5359 if ((pp = ttoproc(curthread)) == NULL)
5360 return (B_FALSE);
5361 return (pp->p_as != &kas);
5362 }
5363
5364 /*
5365 * Swap bytes in 16-bit [half-]words
5366 */
5367 void
swab(void * src,void * dst,size_t nbytes)5368 swab(void *src, void *dst, size_t nbytes)
5369 {
5370 uchar_t *pf = (uchar_t *)src;
5371 uchar_t *pt = (uchar_t *)dst;
5372 uchar_t tmp;
5373 int nshorts;
5374
5375 nshorts = nbytes >> 1;
5376
5377 while (--nshorts >= 0) {
5378 tmp = *pf++;
5379 *pt++ = *pf++;
5380 *pt++ = tmp;
5381 }
5382 }
5383
5384 static void
ddi_append_minor_node(dev_info_t * ddip,struct ddi_minor_data * dmdp)5385 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp)
5386 {
5387 int circ;
5388 struct ddi_minor_data *dp;
5389
5390 ndi_devi_enter(ddip, &circ);
5391 if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) {
5392 DEVI(ddip)->devi_minor = dmdp;
5393 } else {
5394 while (dp->next != (struct ddi_minor_data *)NULL)
5395 dp = dp->next;
5396 dp->next = dmdp;
5397 }
5398 ndi_devi_exit(ddip, circ);
5399 }
5400
5401 /*
5402 * Part of the obsolete SunCluster DDI Hooks.
5403 * Keep for binary compatibility
5404 */
5405 minor_t
ddi_getiminor(dev_t dev)5406 ddi_getiminor(dev_t dev)
5407 {
5408 return (getminor(dev));
5409 }
5410
5411 static int
i_log_devfs_minor_create(dev_info_t * dip,char * minor_name)5412 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name)
5413 {
5414 int se_flag;
5415 int kmem_flag;
5416 int se_err;
5417 char *pathname, *class_name;
5418 sysevent_t *ev = NULL;
5419 sysevent_id_t eid;
5420 sysevent_value_t se_val;
5421 sysevent_attr_list_t *ev_attr_list = NULL;
5422
5423 /* determine interrupt context */
5424 se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP;
5425 kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
5426
5427 i_ddi_di_cache_invalidate();
5428
5429 #ifdef DEBUG
5430 if ((se_flag == SE_NOSLEEP) && sunddi_debug) {
5431 cmn_err(CE_CONT, "ddi_create_minor_node: called from "
5432 "interrupt level by driver %s",
5433 ddi_driver_name(dip));
5434 }
5435 #endif /* DEBUG */
5436
5437 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag);
5438 if (ev == NULL) {
5439 goto fail;
5440 }
5441
5442 pathname = kmem_alloc(MAXPATHLEN, kmem_flag);
5443 if (pathname == NULL) {
5444 sysevent_free(ev);
5445 goto fail;
5446 }
5447
5448 (void) ddi_pathname(dip, pathname);
5449 ASSERT(strlen(pathname));
5450 se_val.value_type = SE_DATA_TYPE_STRING;
5451 se_val.value.sv_string = pathname;
5452 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5453 &se_val, se_flag) != 0) {
5454 kmem_free(pathname, MAXPATHLEN);
5455 sysevent_free(ev);
5456 goto fail;
5457 }
5458 kmem_free(pathname, MAXPATHLEN);
5459
5460 /* add the device class attribute */
5461 if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5462 se_val.value_type = SE_DATA_TYPE_STRING;
5463 se_val.value.sv_string = class_name;
5464 if (sysevent_add_attr(&ev_attr_list,
5465 DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5466 sysevent_free_attr(ev_attr_list);
5467 goto fail;
5468 }
5469 }
5470
5471 /*
5472 * allow for NULL minor names
5473 */
5474 if (minor_name != NULL) {
5475 se_val.value.sv_string = minor_name;
5476 if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5477 &se_val, se_flag) != 0) {
5478 sysevent_free_attr(ev_attr_list);
5479 sysevent_free(ev);
5480 goto fail;
5481 }
5482 }
5483
5484 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5485 sysevent_free_attr(ev_attr_list);
5486 sysevent_free(ev);
5487 goto fail;
5488 }
5489
5490 if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) {
5491 if (se_err == SE_NO_TRANSPORT) {
5492 cmn_err(CE_WARN, "/devices or /dev may not be current "
5493 "for driver %s (%s). Run devfsadm -i %s",
5494 ddi_driver_name(dip), "syseventd not responding",
5495 ddi_driver_name(dip));
5496 } else {
5497 sysevent_free(ev);
5498 goto fail;
5499 }
5500 }
5501
5502 sysevent_free(ev);
5503 return (DDI_SUCCESS);
5504 fail:
5505 cmn_err(CE_WARN, "/devices or /dev may not be current "
5506 "for driver %s. Run devfsadm -i %s",
5507 ddi_driver_name(dip), ddi_driver_name(dip));
5508 return (DDI_SUCCESS);
5509 }
5510
5511 /*
5512 * failing to remove a minor node is not of interest
5513 * therefore we do not generate an error message
5514 */
5515 static int
i_log_devfs_minor_remove(dev_info_t * dip,char * minor_name)5516 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name)
5517 {
5518 char *pathname, *class_name;
5519 sysevent_t *ev;
5520 sysevent_id_t eid;
5521 sysevent_value_t se_val;
5522 sysevent_attr_list_t *ev_attr_list = NULL;
5523
5524 /*
5525 * only log ddi_remove_minor_node() calls outside the scope
5526 * of attach/detach reconfigurations and when the dip is
5527 * still initialized.
5528 */
5529 if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) ||
5530 (i_ddi_node_state(dip) < DS_INITIALIZED)) {
5531 return (DDI_SUCCESS);
5532 }
5533
5534 i_ddi_di_cache_invalidate();
5535
5536 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP);
5537 if (ev == NULL) {
5538 return (DDI_SUCCESS);
5539 }
5540
5541 pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5542 if (pathname == NULL) {
5543 sysevent_free(ev);
5544 return (DDI_SUCCESS);
5545 }
5546
5547 (void) ddi_pathname(dip, pathname);
5548 ASSERT(strlen(pathname));
5549 se_val.value_type = SE_DATA_TYPE_STRING;
5550 se_val.value.sv_string = pathname;
5551 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5552 &se_val, SE_SLEEP) != 0) {
5553 kmem_free(pathname, MAXPATHLEN);
5554 sysevent_free(ev);
5555 return (DDI_SUCCESS);
5556 }
5557
5558 kmem_free(pathname, MAXPATHLEN);
5559
5560 /*
5561 * allow for NULL minor names
5562 */
5563 if (minor_name != NULL) {
5564 se_val.value.sv_string = minor_name;
5565 if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5566 &se_val, SE_SLEEP) != 0) {
5567 sysevent_free_attr(ev_attr_list);
5568 goto fail;
5569 }
5570 }
5571
5572 if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5573 /* add the device class, driver name and instance attributes */
5574
5575 se_val.value_type = SE_DATA_TYPE_STRING;
5576 se_val.value.sv_string = class_name;
5577 if (sysevent_add_attr(&ev_attr_list,
5578 DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5579 sysevent_free_attr(ev_attr_list);
5580 goto fail;
5581 }
5582
5583 se_val.value_type = SE_DATA_TYPE_STRING;
5584 se_val.value.sv_string = (char *)ddi_driver_name(dip);
5585 if (sysevent_add_attr(&ev_attr_list,
5586 DEVFS_DRIVER_NAME, &se_val, SE_SLEEP) != 0) {
5587 sysevent_free_attr(ev_attr_list);
5588 goto fail;
5589 }
5590
5591 se_val.value_type = SE_DATA_TYPE_INT32;
5592 se_val.value.sv_int32 = ddi_get_instance(dip);
5593 if (sysevent_add_attr(&ev_attr_list,
5594 DEVFS_INSTANCE, &se_val, SE_SLEEP) != 0) {
5595 sysevent_free_attr(ev_attr_list);
5596 goto fail;
5597 }
5598
5599 }
5600
5601 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5602 sysevent_free_attr(ev_attr_list);
5603 } else {
5604 (void) log_sysevent(ev, SE_SLEEP, &eid);
5605 }
5606 fail:
5607 sysevent_free(ev);
5608 return (DDI_SUCCESS);
5609 }
5610
5611 /*
5612 * Derive the device class of the node.
5613 * Device class names aren't defined yet. Until this is done we use
5614 * devfs event subclass names as device class names.
5615 */
5616 static int
derive_devi_class(dev_info_t * dip,char * node_type,int flag)5617 derive_devi_class(dev_info_t *dip, char *node_type, int flag)
5618 {
5619 int rv = DDI_SUCCESS;
5620
5621 if (i_ddi_devi_class(dip) == NULL) {
5622 if (strncmp(node_type, DDI_NT_BLOCK,
5623 sizeof (DDI_NT_BLOCK) - 1) == 0 &&
5624 (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' ||
5625 node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') &&
5626 strcmp(node_type, DDI_NT_FD) != 0) {
5627
5628 rv = i_ddi_set_devi_class(dip, ESC_DISK, flag);
5629
5630 } else if (strncmp(node_type, DDI_NT_NET,
5631 sizeof (DDI_NT_NET) - 1) == 0 &&
5632 (node_type[sizeof (DDI_NT_NET) - 1] == '\0' ||
5633 node_type[sizeof (DDI_NT_NET) - 1] == ':')) {
5634
5635 rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag);
5636
5637 } else if (strncmp(node_type, DDI_NT_PRINTER,
5638 sizeof (DDI_NT_PRINTER) - 1) == 0 &&
5639 (node_type[sizeof (DDI_NT_PRINTER) - 1] == '\0' ||
5640 node_type[sizeof (DDI_NT_PRINTER) - 1] == ':')) {
5641
5642 rv = i_ddi_set_devi_class(dip, ESC_PRINTER, flag);
5643
5644 } else if (strncmp(node_type, DDI_PSEUDO,
5645 sizeof (DDI_PSEUDO) -1) == 0 &&
5646 (strncmp(ESC_LOFI, ddi_node_name(dip),
5647 sizeof (ESC_LOFI) -1) == 0)) {
5648 rv = i_ddi_set_devi_class(dip, ESC_LOFI, flag);
5649 }
5650 }
5651
5652 return (rv);
5653 }
5654
5655 /*
5656 * Check compliance with PSARC 2003/375:
5657 *
5658 * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not
5659 * exceed IFNAMSIZ (16) characters in length.
5660 */
5661 static boolean_t
verify_name(char * name)5662 verify_name(char *name)
5663 {
5664 size_t len = strlen(name);
5665 char *cp;
5666
5667 if (len == 0 || len > IFNAMSIZ)
5668 return (B_FALSE);
5669
5670 for (cp = name; *cp != '\0'; cp++) {
5671 if (!isalnum(*cp) && *cp != '_')
5672 return (B_FALSE);
5673 }
5674
5675 return (B_TRUE);
5676 }
5677
5678 /*
5679 * ddi_create_minor_common: Create a ddi_minor_data structure and
5680 * attach it to the given devinfo node.
5681 */
5682
5683 int
ddi_create_minor_common(dev_info_t * dip,char * name,int spec_type,minor_t minor_num,char * node_type,int flag,ddi_minor_type mtype,const char * read_priv,const char * write_priv,mode_t priv_mode)5684 ddi_create_minor_common(dev_info_t *dip, char *name, int spec_type,
5685 minor_t minor_num, char *node_type, int flag, ddi_minor_type mtype,
5686 const char *read_priv, const char *write_priv, mode_t priv_mode)
5687 {
5688 struct ddi_minor_data *dmdp;
5689 major_t major;
5690
5691 if (spec_type != S_IFCHR && spec_type != S_IFBLK)
5692 return (DDI_FAILURE);
5693
5694 if (name == NULL)
5695 return (DDI_FAILURE);
5696
5697 /*
5698 * Log a message if the minor number the driver is creating
5699 * is not expressible on the on-disk filesystem (currently
5700 * this is limited to 18 bits both by UFS). The device can
5701 * be opened via devfs, but not by device special files created
5702 * via mknod().
5703 */
5704 if (minor_num > L_MAXMIN32) {
5705 cmn_err(CE_WARN,
5706 "%s%d:%s minor 0x%x too big for 32-bit applications",
5707 ddi_driver_name(dip), ddi_get_instance(dip),
5708 name, minor_num);
5709 return (DDI_FAILURE);
5710 }
5711
5712 /* dip must be bound and attached */
5713 major = ddi_driver_major(dip);
5714 ASSERT(major != DDI_MAJOR_T_NONE);
5715
5716 /*
5717 * Default node_type to DDI_PSEUDO and issue notice in debug mode
5718 */
5719 if (node_type == NULL) {
5720 node_type = DDI_PSEUDO;
5721 NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d "
5722 " minor node %s; default to DDI_PSEUDO",
5723 ddi_driver_name(dip), ddi_get_instance(dip), name));
5724 }
5725
5726 /*
5727 * If the driver is a network driver, ensure that the name falls within
5728 * the interface naming constraints specified by PSARC/2003/375.
5729 */
5730 if (strcmp(node_type, DDI_NT_NET) == 0) {
5731 if (!verify_name(name))
5732 return (DDI_FAILURE);
5733
5734 if (mtype == DDM_MINOR) {
5735 struct devnames *dnp = &devnamesp[major];
5736
5737 /* Mark driver as a network driver */
5738 LOCK_DEV_OPS(&dnp->dn_lock);
5739 dnp->dn_flags |= DN_NETWORK_DRIVER;
5740
5741 /*
5742 * If this minor node is created during the device
5743 * attachment, this is a physical network device.
5744 * Mark the driver as a physical network driver.
5745 */
5746 if (DEVI_IS_ATTACHING(dip))
5747 dnp->dn_flags |= DN_NETWORK_PHYSDRIVER;
5748 UNLOCK_DEV_OPS(&dnp->dn_lock);
5749 }
5750 }
5751
5752 if (mtype == DDM_MINOR) {
5753 if (derive_devi_class(dip, node_type, KM_NOSLEEP) !=
5754 DDI_SUCCESS)
5755 return (DDI_FAILURE);
5756 }
5757
5758 /*
5759 * Take care of minor number information for the node.
5760 */
5761
5762 if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data),
5763 KM_NOSLEEP)) == NULL) {
5764 return (DDI_FAILURE);
5765 }
5766 if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) {
5767 kmem_free(dmdp, sizeof (struct ddi_minor_data));
5768 return (DDI_FAILURE);
5769 }
5770 dmdp->dip = dip;
5771 dmdp->ddm_dev = makedevice(major, minor_num);
5772 dmdp->ddm_spec_type = spec_type;
5773 dmdp->ddm_node_type = node_type;
5774 dmdp->type = mtype;
5775 if (flag & CLONE_DEV) {
5776 dmdp->type = DDM_ALIAS;
5777 dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major);
5778 }
5779 if (flag & PRIVONLY_DEV) {
5780 dmdp->ddm_flags |= DM_NO_FSPERM;
5781 }
5782 if (read_priv || write_priv) {
5783 dmdp->ddm_node_priv =
5784 devpolicy_priv_by_name(read_priv, write_priv);
5785 }
5786 dmdp->ddm_priv_mode = priv_mode;
5787
5788 ddi_append_minor_node(dip, dmdp);
5789
5790 /*
5791 * only log ddi_create_minor_node() calls which occur
5792 * outside the scope of attach(9e)/detach(9e) reconfigurations
5793 */
5794 if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) &&
5795 mtype != DDM_INTERNAL_PATH) {
5796 (void) i_log_devfs_minor_create(dip, name);
5797 }
5798
5799 /*
5800 * Check if any dacf rules match the creation of this minor node
5801 */
5802 dacfc_match_create_minor(name, node_type, dip, dmdp, flag);
5803 return (DDI_SUCCESS);
5804 }
5805
5806 int
ddi_create_minor_node(dev_info_t * dip,char * name,int spec_type,minor_t minor_num,char * node_type,int flag)5807 ddi_create_minor_node(dev_info_t *dip, char *name, int spec_type,
5808 minor_t minor_num, char *node_type, int flag)
5809 {
5810 return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5811 node_type, flag, DDM_MINOR, NULL, NULL, 0));
5812 }
5813
5814 int
ddi_create_priv_minor_node(dev_info_t * dip,char * name,int spec_type,minor_t minor_num,char * node_type,int flag,const char * rdpriv,const char * wrpriv,mode_t priv_mode)5815 ddi_create_priv_minor_node(dev_info_t *dip, char *name, int spec_type,
5816 minor_t minor_num, char *node_type, int flag,
5817 const char *rdpriv, const char *wrpriv, mode_t priv_mode)
5818 {
5819 return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5820 node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode));
5821 }
5822
5823 int
ddi_create_default_minor_node(dev_info_t * dip,char * name,int spec_type,minor_t minor_num,char * node_type,int flag)5824 ddi_create_default_minor_node(dev_info_t *dip, char *name, int spec_type,
5825 minor_t minor_num, char *node_type, int flag)
5826 {
5827 return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5828 node_type, flag, DDM_DEFAULT, NULL, NULL, 0));
5829 }
5830
5831 /*
5832 * Internal (non-ddi) routine for drivers to export names known
5833 * to the kernel (especially ddi_pathname_to_dev_t and friends)
5834 * but not exported externally to /dev
5835 */
5836 int
ddi_create_internal_pathname(dev_info_t * dip,char * name,int spec_type,minor_t minor_num)5837 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type,
5838 minor_t minor_num)
5839 {
5840 return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5841 "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0));
5842 }
5843
5844 void
ddi_remove_minor_node(dev_info_t * dip,char * name)5845 ddi_remove_minor_node(dev_info_t *dip, char *name)
5846 {
5847 int circ;
5848 struct ddi_minor_data *dmdp, *dmdp1;
5849 struct ddi_minor_data **dmdp_prev;
5850
5851 ndi_devi_enter(dip, &circ);
5852 dmdp_prev = &DEVI(dip)->devi_minor;
5853 dmdp = DEVI(dip)->devi_minor;
5854 while (dmdp != NULL) {
5855 dmdp1 = dmdp->next;
5856 if ((name == NULL || (dmdp->ddm_name != NULL &&
5857 strcmp(name, dmdp->ddm_name) == 0))) {
5858 if (dmdp->ddm_name != NULL) {
5859 if (dmdp->type != DDM_INTERNAL_PATH)
5860 (void) i_log_devfs_minor_remove(dip,
5861 dmdp->ddm_name);
5862 kmem_free(dmdp->ddm_name,
5863 strlen(dmdp->ddm_name) + 1);
5864 }
5865 /*
5866 * Release device privilege, if any.
5867 * Release dacf client data associated with this minor
5868 * node by storing NULL.
5869 */
5870 if (dmdp->ddm_node_priv)
5871 dpfree(dmdp->ddm_node_priv);
5872 dacf_store_info((dacf_infohdl_t)dmdp, NULL);
5873 kmem_free(dmdp, sizeof (struct ddi_minor_data));
5874 *dmdp_prev = dmdp1;
5875 /*
5876 * OK, we found it, so get out now -- if we drive on,
5877 * we will strcmp against garbage. See 1139209.
5878 */
5879 if (name != NULL)
5880 break;
5881 } else {
5882 dmdp_prev = &dmdp->next;
5883 }
5884 dmdp = dmdp1;
5885 }
5886 ndi_devi_exit(dip, circ);
5887 }
5888
5889
5890 int
ddi_in_panic()5891 ddi_in_panic()
5892 {
5893 return (panicstr != NULL);
5894 }
5895
5896
5897 /*
5898 * Find first bit set in a mask (returned counting from 1 up)
5899 */
5900
5901 int
ddi_ffs(long mask)5902 ddi_ffs(long mask)
5903 {
5904 return (ffs(mask));
5905 }
5906
5907 /*
5908 * Find last bit set. Take mask and clear
5909 * all but the most significant bit, and
5910 * then let ffs do the rest of the work.
5911 *
5912 * Algorithm courtesy of Steve Chessin.
5913 */
5914
5915 int
ddi_fls(long mask)5916 ddi_fls(long mask)
5917 {
5918 while (mask) {
5919 long nx;
5920
5921 if ((nx = (mask & (mask - 1))) == 0)
5922 break;
5923 mask = nx;
5924 }
5925 return (ffs(mask));
5926 }
5927
5928 /*
5929 * The ddi_soft_state_* routines comprise generic storage management utilities
5930 * for driver soft state structures (in "the old days," this was done with
5931 * statically sized array - big systems and dynamic loading and unloading
5932 * make heap allocation more attractive).
5933 */
5934
5935 /*
5936 * Allocate a set of pointers to 'n_items' objects of size 'size'
5937 * bytes. Each pointer is initialized to nil.
5938 *
5939 * The 'size' and 'n_items' values are stashed in the opaque
5940 * handle returned to the caller.
5941 *
5942 * This implementation interprets 'set of pointers' to mean 'array
5943 * of pointers' but note that nothing in the interface definition
5944 * precludes an implementation that uses, for example, a linked list.
5945 * However there should be a small efficiency gain from using an array
5946 * at lookup time.
5947 *
5948 * NOTE As an optimization, we make our growable array allocations in
5949 * powers of two (bytes), since that's how much kmem_alloc (currently)
5950 * gives us anyway. It should save us some free/realloc's ..
5951 *
5952 * As a further optimization, we make the growable array start out
5953 * with MIN_N_ITEMS in it.
5954 */
5955
5956 #define MIN_N_ITEMS 8 /* 8 void *'s == 32 bytes */
5957
5958 int
ddi_soft_state_init(void ** state_p,size_t size,size_t n_items)5959 ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
5960 {
5961 i_ddi_soft_state *ss;
5962
5963 if (state_p == NULL || size == 0)
5964 return (EINVAL);
5965
5966 ss = kmem_zalloc(sizeof (*ss), KM_SLEEP);
5967 mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL);
5968 ss->size = size;
5969
5970 if (n_items < MIN_N_ITEMS)
5971 ss->n_items = MIN_N_ITEMS;
5972 else {
5973 int bitlog;
5974
5975 if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items))
5976 bitlog--;
5977 ss->n_items = 1 << bitlog;
5978 }
5979
5980 ASSERT(ss->n_items >= n_items);
5981
5982 ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP);
5983
5984 *state_p = ss;
5985 return (0);
5986 }
5987
5988 /*
5989 * Allocate a state structure of size 'size' to be associated
5990 * with item 'item'.
5991 *
5992 * In this implementation, the array is extended to
5993 * allow the requested offset, if needed.
5994 */
5995 int
ddi_soft_state_zalloc(void * state,int item)5996 ddi_soft_state_zalloc(void *state, int item)
5997 {
5998 i_ddi_soft_state *ss = (i_ddi_soft_state *)state;
5999 void **array;
6000 void *new_element;
6001
6002 if ((state == NULL) || (item < 0))
6003 return (DDI_FAILURE);
6004
6005 mutex_enter(&ss->lock);
6006 if (ss->size == 0) {
6007 mutex_exit(&ss->lock);
6008 cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s",
6009 mod_containing_pc(caller()));
6010 return (DDI_FAILURE);
6011 }
6012
6013 array = ss->array; /* NULL if ss->n_items == 0 */
6014 ASSERT(ss->n_items != 0 && array != NULL);
6015
6016 /*
6017 * refuse to tread on an existing element
6018 */
6019 if (item < ss->n_items && array[item] != NULL) {
6020 mutex_exit(&ss->lock);
6021 return (DDI_FAILURE);
6022 }
6023
6024 /*
6025 * Allocate a new element to plug in
6026 */
6027 new_element = kmem_zalloc(ss->size, KM_SLEEP);
6028
6029 /*
6030 * Check if the array is big enough, if not, grow it.
6031 */
6032 if (item >= ss->n_items) {
6033 void **new_array;
6034 size_t new_n_items;
6035 struct i_ddi_soft_state *dirty;
6036
6037 /*
6038 * Allocate a new array of the right length, copy
6039 * all the old pointers to the new array, then
6040 * if it exists at all, put the old array on the
6041 * dirty list.
6042 *
6043 * Note that we can't kmem_free() the old array.
6044 *
6045 * Why -- well the 'get' operation is 'mutex-free', so we
6046 * can't easily catch a suspended thread that is just about
6047 * to dereference the array we just grew out of. So we
6048 * cons up a header and put it on a list of 'dirty'
6049 * pointer arrays. (Dirty in the sense that there may
6050 * be suspended threads somewhere that are in the middle
6051 * of referencing them). Fortunately, we -can- garbage
6052 * collect it all at ddi_soft_state_fini time.
6053 */
6054 new_n_items = ss->n_items;
6055 while (new_n_items < (1 + item))
6056 new_n_items <<= 1; /* double array size .. */
6057
6058 ASSERT(new_n_items >= (1 + item)); /* sanity check! */
6059
6060 new_array = kmem_zalloc(new_n_items * sizeof (void *),
6061 KM_SLEEP);
6062 /*
6063 * Copy the pointers into the new array
6064 */
6065 bcopy(array, new_array, ss->n_items * sizeof (void *));
6066
6067 /*
6068 * Save the old array on the dirty list
6069 */
6070 dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP);
6071 dirty->array = ss->array;
6072 dirty->n_items = ss->n_items;
6073 dirty->next = ss->next;
6074 ss->next = dirty;
6075
6076 ss->array = (array = new_array);
6077 ss->n_items = new_n_items;
6078 }
6079
6080 ASSERT(array != NULL && item < ss->n_items && array[item] == NULL);
6081
6082 array[item] = new_element;
6083
6084 mutex_exit(&ss->lock);
6085 return (DDI_SUCCESS);
6086 }
6087
6088 /*
6089 * Fetch a pointer to the allocated soft state structure.
6090 *
6091 * This is designed to be cheap.
6092 *
6093 * There's an argument that there should be more checking for
6094 * nil pointers and out of bounds on the array.. but we do a lot
6095 * of that in the alloc/free routines.
6096 *
6097 * An array has the convenience that we don't need to lock read-access
6098 * to it c.f. a linked list. However our "expanding array" strategy
6099 * means that we should hold a readers lock on the i_ddi_soft_state
6100 * structure.
6101 *
6102 * However, from a performance viewpoint, we need to do it without
6103 * any locks at all -- this also makes it a leaf routine. The algorithm
6104 * is 'lock-free' because we only discard the pointer arrays at
6105 * ddi_soft_state_fini() time.
6106 */
6107 void *
ddi_get_soft_state(void * state,int item)6108 ddi_get_soft_state(void *state, int item)
6109 {
6110 i_ddi_soft_state *ss = (i_ddi_soft_state *)state;
6111
6112 ASSERT((ss != NULL) && (item >= 0));
6113
6114 if (item < ss->n_items && ss->array != NULL)
6115 return (ss->array[item]);
6116 return (NULL);
6117 }
6118
6119 /*
6120 * Free the state structure corresponding to 'item.' Freeing an
6121 * element that has either gone or was never allocated is not
6122 * considered an error. Note that we free the state structure, but
6123 * we don't shrink our pointer array, or discard 'dirty' arrays,
6124 * since even a few pointers don't really waste too much memory.
6125 *
6126 * Passing an item number that is out of bounds, or a null pointer will
6127 * provoke an error message.
6128 */
6129 void
ddi_soft_state_free(void * state,int item)6130 ddi_soft_state_free(void *state, int item)
6131 {
6132 i_ddi_soft_state *ss = (i_ddi_soft_state *)state;
6133 void **array;
6134 void *element;
6135 static char msg[] = "ddi_soft_state_free:";
6136
6137 if (ss == NULL) {
6138 cmn_err(CE_WARN, "%s null handle: %s",
6139 msg, mod_containing_pc(caller()));
6140 return;
6141 }
6142
6143 element = NULL;
6144
6145 mutex_enter(&ss->lock);
6146
6147 if ((array = ss->array) == NULL || ss->size == 0) {
6148 cmn_err(CE_WARN, "%s bad handle: %s",
6149 msg, mod_containing_pc(caller()));
6150 } else if (item < 0 || item >= ss->n_items) {
6151 cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s",
6152 msg, item, ss->n_items - 1, mod_containing_pc(caller()));
6153 } else if (array[item] != NULL) {
6154 element = array[item];
6155 array[item] = NULL;
6156 }
6157
6158 mutex_exit(&ss->lock);
6159
6160 if (element)
6161 kmem_free(element, ss->size);
6162 }
6163
6164 /*
6165 * Free the entire set of pointers, and any
6166 * soft state structures contained therein.
6167 *
6168 * Note that we don't grab the ss->lock mutex, even though
6169 * we're inspecting the various fields of the data structure.
6170 *
6171 * There is an implicit assumption that this routine will
6172 * never run concurrently with any of the above on this
6173 * particular state structure i.e. by the time the driver
6174 * calls this routine, there should be no other threads
6175 * running in the driver.
6176 */
6177 void
ddi_soft_state_fini(void ** state_p)6178 ddi_soft_state_fini(void **state_p)
6179 {
6180 i_ddi_soft_state *ss, *dirty;
6181 int item;
6182 static char msg[] = "ddi_soft_state_fini:";
6183
6184 if (state_p == NULL ||
6185 (ss = (i_ddi_soft_state *)(*state_p)) == NULL) {
6186 cmn_err(CE_WARN, "%s null handle: %s",
6187 msg, mod_containing_pc(caller()));
6188 return;
6189 }
6190
6191 if (ss->size == 0) {
6192 cmn_err(CE_WARN, "%s bad handle: %s",
6193 msg, mod_containing_pc(caller()));
6194 return;
6195 }
6196
6197 if (ss->n_items > 0) {
6198 for (item = 0; item < ss->n_items; item++)
6199 ddi_soft_state_free(ss, item);
6200 kmem_free(ss->array, ss->n_items * sizeof (void *));
6201 }
6202
6203 /*
6204 * Now delete any dirty arrays from previous 'grow' operations
6205 */
6206 for (dirty = ss->next; dirty; dirty = ss->next) {
6207 ss->next = dirty->next;
6208 kmem_free(dirty->array, dirty->n_items * sizeof (void *));
6209 kmem_free(dirty, sizeof (*dirty));
6210 }
6211
6212 mutex_destroy(&ss->lock);
6213 kmem_free(ss, sizeof (*ss));
6214
6215 *state_p = NULL;
6216 }
6217
6218 #define SS_N_ITEMS_PER_HASH 16
6219 #define SS_MIN_HASH_SZ 16
6220 #define SS_MAX_HASH_SZ 4096
6221
6222 int
ddi_soft_state_bystr_init(ddi_soft_state_bystr ** state_p,size_t size,int n_items)6223 ddi_soft_state_bystr_init(ddi_soft_state_bystr **state_p, size_t size,
6224 int n_items)
6225 {
6226 i_ddi_soft_state_bystr *sss;
6227 int hash_sz;
6228
6229 ASSERT(state_p && size && n_items);
6230 if ((state_p == NULL) || (size == 0) || (n_items == 0))
6231 return (EINVAL);
6232
6233 /* current implementation is based on hash, convert n_items to hash */
6234 hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6235 if (hash_sz < SS_MIN_HASH_SZ)
6236 hash_sz = SS_MIN_HASH_SZ;
6237 else if (hash_sz > SS_MAX_HASH_SZ)
6238 hash_sz = SS_MAX_HASH_SZ;
6239
6240 /* allocate soft_state pool */
6241 sss = kmem_zalloc(sizeof (*sss), KM_SLEEP);
6242 sss->ss_size = size;
6243 sss->ss_mod_hash = mod_hash_create_strhash("soft_state_bystr",
6244 hash_sz, mod_hash_null_valdtor);
6245 *state_p = (ddi_soft_state_bystr *)sss;
6246 return (0);
6247 }
6248
6249 int
ddi_soft_state_bystr_zalloc(ddi_soft_state_bystr * state,const char * str)6250 ddi_soft_state_bystr_zalloc(ddi_soft_state_bystr *state, const char *str)
6251 {
6252 i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state;
6253 void *sso;
6254 char *dup_str;
6255
6256 ASSERT(sss && str && sss->ss_mod_hash);
6257 if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6258 return (DDI_FAILURE);
6259 sso = kmem_zalloc(sss->ss_size, KM_SLEEP);
6260 dup_str = i_ddi_strdup((char *)str, KM_SLEEP);
6261 if (mod_hash_insert(sss->ss_mod_hash,
6262 (mod_hash_key_t)dup_str, (mod_hash_val_t)sso) == 0)
6263 return (DDI_SUCCESS);
6264
6265 /*
6266 * The only error from an strhash insert is caused by a duplicate key.
6267 * We refuse to tread on an existing elements, so free and fail.
6268 */
6269 kmem_free(dup_str, strlen(dup_str) + 1);
6270 kmem_free(sso, sss->ss_size);
6271 return (DDI_FAILURE);
6272 }
6273
6274 void *
ddi_soft_state_bystr_get(ddi_soft_state_bystr * state,const char * str)6275 ddi_soft_state_bystr_get(ddi_soft_state_bystr *state, const char *str)
6276 {
6277 i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state;
6278 void *sso;
6279
6280 ASSERT(sss && str && sss->ss_mod_hash);
6281 if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6282 return (NULL);
6283
6284 if (mod_hash_find(sss->ss_mod_hash,
6285 (mod_hash_key_t)str, (mod_hash_val_t *)&sso) == 0)
6286 return (sso);
6287 return (NULL);
6288 }
6289
6290 void
ddi_soft_state_bystr_free(ddi_soft_state_bystr * state,const char * str)6291 ddi_soft_state_bystr_free(ddi_soft_state_bystr *state, const char *str)
6292 {
6293 i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state;
6294 void *sso;
6295
6296 ASSERT(sss && str && sss->ss_mod_hash);
6297 if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6298 return;
6299
6300 (void) mod_hash_remove(sss->ss_mod_hash,
6301 (mod_hash_key_t)str, (mod_hash_val_t *)&sso);
6302 kmem_free(sso, sss->ss_size);
6303 }
6304
6305 void
ddi_soft_state_bystr_fini(ddi_soft_state_bystr ** state_p)6306 ddi_soft_state_bystr_fini(ddi_soft_state_bystr **state_p)
6307 {
6308 i_ddi_soft_state_bystr *sss;
6309
6310 ASSERT(state_p);
6311 if (state_p == NULL)
6312 return;
6313
6314 sss = (i_ddi_soft_state_bystr *)(*state_p);
6315 if (sss == NULL)
6316 return;
6317
6318 ASSERT(sss->ss_mod_hash);
6319 if (sss->ss_mod_hash) {
6320 mod_hash_destroy_strhash(sss->ss_mod_hash);
6321 sss->ss_mod_hash = NULL;
6322 }
6323
6324 kmem_free(sss, sizeof (*sss));
6325 *state_p = NULL;
6326 }
6327
6328 /*
6329 * The ddi_strid_* routines provide string-to-index management utilities.
6330 */
6331 /* allocate and initialize an strid set */
6332 int
ddi_strid_init(ddi_strid ** strid_p,int n_items)6333 ddi_strid_init(ddi_strid **strid_p, int n_items)
6334 {
6335 i_ddi_strid *ss;
6336 int hash_sz;
6337
6338 if (strid_p == NULL)
6339 return (DDI_FAILURE);
6340
6341 /* current implementation is based on hash, convert n_items to hash */
6342 hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6343 if (hash_sz < SS_MIN_HASH_SZ)
6344 hash_sz = SS_MIN_HASH_SZ;
6345 else if (hash_sz > SS_MAX_HASH_SZ)
6346 hash_sz = SS_MAX_HASH_SZ;
6347
6348 ss = kmem_alloc(sizeof (*ss), KM_SLEEP);
6349 ss->strid_chunksz = n_items;
6350 ss->strid_spacesz = n_items;
6351 ss->strid_space = id_space_create("strid", 1, n_items);
6352 ss->strid_bystr = mod_hash_create_strhash("strid_bystr", hash_sz,
6353 mod_hash_null_valdtor);
6354 ss->strid_byid = mod_hash_create_idhash("strid_byid", hash_sz,
6355 mod_hash_null_valdtor);
6356 *strid_p = (ddi_strid *)ss;
6357 return (DDI_SUCCESS);
6358 }
6359
6360 /* allocate an id mapping within the specified set for str, return id */
6361 static id_t
i_ddi_strid_alloc(ddi_strid * strid,char * str)6362 i_ddi_strid_alloc(ddi_strid *strid, char *str)
6363 {
6364 i_ddi_strid *ss = (i_ddi_strid *)strid;
6365 id_t id;
6366 char *s;
6367
6368 ASSERT(ss && str);
6369 if ((ss == NULL) || (str == NULL))
6370 return (0);
6371
6372 /*
6373 * Allocate an id using VM_FIRSTFIT in order to keep allocated id
6374 * range as compressed as possible. This is important to minimize
6375 * the amount of space used when the id is used as a ddi_soft_state
6376 * index by the caller.
6377 *
6378 * If the id list is exhausted, increase the size of the list
6379 * by the chuck size specified in ddi_strid_init and reattempt
6380 * the allocation
6381 */
6382 if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1) {
6383 id_space_extend(ss->strid_space, ss->strid_spacesz,
6384 ss->strid_spacesz + ss->strid_chunksz);
6385 ss->strid_spacesz += ss->strid_chunksz;
6386 if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1)
6387 return (0);
6388 }
6389
6390 /*
6391 * NOTE: since we create and destroy in unison we can save space by
6392 * using bystr key as the byid value. This means destroy must occur
6393 * in (byid, bystr) order.
6394 */
6395 s = i_ddi_strdup(str, KM_SLEEP);
6396 if (mod_hash_insert(ss->strid_bystr, (mod_hash_key_t)s,
6397 (mod_hash_val_t)(intptr_t)id) != 0) {
6398 ddi_strid_free(strid, id);
6399 return (0);
6400 }
6401 if (mod_hash_insert(ss->strid_byid, (mod_hash_key_t)(intptr_t)id,
6402 (mod_hash_val_t)s) != 0) {
6403 ddi_strid_free(strid, id);
6404 return (0);
6405 }
6406
6407 /* NOTE: s if freed on mod_hash_destroy by mod_hash_strval_dtor */
6408 return (id);
6409 }
6410
6411 /* allocate an id mapping within the specified set for str, return id */
6412 id_t
ddi_strid_alloc(ddi_strid * strid,char * str)6413 ddi_strid_alloc(ddi_strid *strid, char *str)
6414 {
6415 return (i_ddi_strid_alloc(strid, str));
6416 }
6417
6418 /* return the id within the specified strid given the str */
6419 id_t
ddi_strid_str2id(ddi_strid * strid,char * str)6420 ddi_strid_str2id(ddi_strid *strid, char *str)
6421 {
6422 i_ddi_strid *ss = (i_ddi_strid *)strid;
6423 id_t id = 0;
6424 mod_hash_val_t hv;
6425
6426 ASSERT(ss && str);
6427 if (ss && str && (mod_hash_find(ss->strid_bystr,
6428 (mod_hash_key_t)str, &hv) == 0))
6429 id = (int)(intptr_t)hv;
6430 return (id);
6431 }
6432
6433 /* return str within the specified strid given the id */
6434 char *
ddi_strid_id2str(ddi_strid * strid,id_t id)6435 ddi_strid_id2str(ddi_strid *strid, id_t id)
6436 {
6437 i_ddi_strid *ss = (i_ddi_strid *)strid;
6438 char *str = NULL;
6439 mod_hash_val_t hv;
6440
6441 ASSERT(ss && id > 0);
6442 if (ss && (id > 0) && (mod_hash_find(ss->strid_byid,
6443 (mod_hash_key_t)(uintptr_t)id, &hv) == 0))
6444 str = (char *)hv;
6445 return (str);
6446 }
6447
6448 /* free the id mapping within the specified strid */
6449 void
ddi_strid_free(ddi_strid * strid,id_t id)6450 ddi_strid_free(ddi_strid *strid, id_t id)
6451 {
6452 i_ddi_strid *ss = (i_ddi_strid *)strid;
6453 char *str;
6454
6455 ASSERT(ss && id > 0);
6456 if ((ss == NULL) || (id <= 0))
6457 return;
6458
6459 /* bystr key is byid value: destroy order must be (byid, bystr) */
6460 str = ddi_strid_id2str(strid, id);
6461 (void) mod_hash_destroy(ss->strid_byid, (mod_hash_key_t)(uintptr_t)id);
6462 id_free(ss->strid_space, id);
6463
6464 if (str)
6465 (void) mod_hash_destroy(ss->strid_bystr, (mod_hash_key_t)str);
6466 }
6467
6468 /* destroy the strid set */
6469 void
ddi_strid_fini(ddi_strid ** strid_p)6470 ddi_strid_fini(ddi_strid **strid_p)
6471 {
6472 i_ddi_strid *ss;
6473
6474 ASSERT(strid_p);
6475 if (strid_p == NULL)
6476 return;
6477
6478 ss = (i_ddi_strid *)(*strid_p);
6479 if (ss == NULL)
6480 return;
6481
6482 /* bystr key is byid value: destroy order must be (byid, bystr) */
6483 if (ss->strid_byid)
6484 mod_hash_destroy_hash(ss->strid_byid);
6485 if (ss->strid_byid)
6486 mod_hash_destroy_hash(ss->strid_bystr);
6487 if (ss->strid_space)
6488 id_space_destroy(ss->strid_space);
6489 kmem_free(ss, sizeof (*ss));
6490 *strid_p = NULL;
6491 }
6492
6493 /*
6494 * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'.
6495 * Storage is double buffered to prevent updates during devi_addr use -
6496 * double buffering is adaquate for reliable ddi_deviname() consumption.
6497 * The double buffer is not freed until dev_info structure destruction
6498 * (by i_ddi_free_node).
6499 */
6500 void
ddi_set_name_addr(dev_info_t * dip,char * name)6501 ddi_set_name_addr(dev_info_t *dip, char *name)
6502 {
6503 char *buf = DEVI(dip)->devi_addr_buf;
6504 char *newaddr;
6505
6506 if (buf == NULL) {
6507 buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP);
6508 DEVI(dip)->devi_addr_buf = buf;
6509 }
6510
6511 if (name) {
6512 ASSERT(strlen(name) < MAXNAMELEN);
6513 newaddr = (DEVI(dip)->devi_addr == buf) ?
6514 (buf + MAXNAMELEN) : buf;
6515 (void) strlcpy(newaddr, name, MAXNAMELEN);
6516 } else
6517 newaddr = NULL;
6518
6519 DEVI(dip)->devi_addr = newaddr;
6520 }
6521
6522 char *
ddi_get_name_addr(dev_info_t * dip)6523 ddi_get_name_addr(dev_info_t *dip)
6524 {
6525 return (DEVI(dip)->devi_addr);
6526 }
6527
6528 void
ddi_set_parent_data(dev_info_t * dip,void * pd)6529 ddi_set_parent_data(dev_info_t *dip, void *pd)
6530 {
6531 DEVI(dip)->devi_parent_data = pd;
6532 }
6533
6534 void *
ddi_get_parent_data(dev_info_t * dip)6535 ddi_get_parent_data(dev_info_t *dip)
6536 {
6537 return (DEVI(dip)->devi_parent_data);
6538 }
6539
6540 /*
6541 * ddi_name_to_major: returns the major number of a named module,
6542 * derived from the current driver alias binding.
6543 *
6544 * Caveat: drivers should avoid the use of this function, in particular
6545 * together with ddi_get_name/ddi_binding name, as per
6546 * major = ddi_name_to_major(ddi_get_name(devi));
6547 * ddi_name_to_major() relies on the state of the device/alias binding,
6548 * which can and does change dynamically as aliases are administered
6549 * over time. An attached device instance cannot rely on the major
6550 * number returned by ddi_name_to_major() to match its own major number.
6551 *
6552 * For driver use, ddi_driver_major() reliably returns the major number
6553 * for the module to which the device was bound at attach time over
6554 * the life of the instance.
6555 * major = ddi_driver_major(dev_info_t *)
6556 */
6557 major_t
ddi_name_to_major(char * name)6558 ddi_name_to_major(char *name)
6559 {
6560 return (mod_name_to_major(name));
6561 }
6562
6563 /*
6564 * ddi_major_to_name: Returns the module name bound to a major number.
6565 */
6566 char *
ddi_major_to_name(major_t major)6567 ddi_major_to_name(major_t major)
6568 {
6569 return (mod_major_to_name(major));
6570 }
6571
6572 /*
6573 * Return the name of the devinfo node pointed at by 'dip' in the buffer
6574 * pointed at by 'name.' A devinfo node is named as a result of calling
6575 * ddi_initchild().
6576 *
6577 * Note: the driver must be held before calling this function!
6578 */
6579 char *
ddi_deviname(dev_info_t * dip,char * name)6580 ddi_deviname(dev_info_t *dip, char *name)
6581 {
6582 char *addrname;
6583 char none = '\0';
6584
6585 if (dip == ddi_root_node()) {
6586 *name = '\0';
6587 return (name);
6588 }
6589
6590 if (i_ddi_node_state(dip) < DS_BOUND) {
6591 addrname = &none;
6592 } else {
6593 /*
6594 * Use ddi_get_name_addr() without checking state so we get
6595 * a unit-address if we are called after ddi_set_name_addr()
6596 * by nexus DDI_CTL_INITCHILD code, but before completing
6597 * node promotion to DS_INITIALIZED. We currently have
6598 * two situations where we are called in this state:
6599 * o For framework processing of a path-oriented alias.
6600 * o If a SCSA nexus driver calls ddi_devid_register()
6601 * from it's tran_tgt_init(9E) implementation.
6602 */
6603 addrname = ddi_get_name_addr(dip);
6604 if (addrname == NULL)
6605 addrname = &none;
6606 }
6607
6608 if (*addrname == '\0') {
6609 (void) sprintf(name, "/%s", ddi_node_name(dip));
6610 } else {
6611 (void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname);
6612 }
6613
6614 return (name);
6615 }
6616
6617 /*
6618 * Spits out the name of device node, typically name@addr, for a given node,
6619 * using the driver name, not the nodename.
6620 *
6621 * Used by match_parent. Not to be used elsewhere.
6622 */
6623 char *
i_ddi_parname(dev_info_t * dip,char * name)6624 i_ddi_parname(dev_info_t *dip, char *name)
6625 {
6626 char *addrname;
6627
6628 if (dip == ddi_root_node()) {
6629 *name = '\0';
6630 return (name);
6631 }
6632
6633 ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED);
6634
6635 if (*(addrname = ddi_get_name_addr(dip)) == '\0')
6636 (void) sprintf(name, "%s", ddi_binding_name(dip));
6637 else
6638 (void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname);
6639 return (name);
6640 }
6641
6642 static char *
pathname_work(dev_info_t * dip,char * path)6643 pathname_work(dev_info_t *dip, char *path)
6644 {
6645 char *bp;
6646
6647 if (dip == ddi_root_node()) {
6648 *path = '\0';
6649 return (path);
6650 }
6651 (void) pathname_work(ddi_get_parent(dip), path);
6652 bp = path + strlen(path);
6653 (void) ddi_deviname(dip, bp);
6654 return (path);
6655 }
6656
6657 char *
ddi_pathname(dev_info_t * dip,char * path)6658 ddi_pathname(dev_info_t *dip, char *path)
6659 {
6660 return (pathname_work(dip, path));
6661 }
6662
6663 char *
ddi_pathname_minor(struct ddi_minor_data * dmdp,char * path)6664 ddi_pathname_minor(struct ddi_minor_data *dmdp, char *path)
6665 {
6666 if (dmdp->dip == NULL)
6667 *path = '\0';
6668 else {
6669 (void) ddi_pathname(dmdp->dip, path);
6670 if (dmdp->ddm_name) {
6671 (void) strcat(path, ":");
6672 (void) strcat(path, dmdp->ddm_name);
6673 }
6674 }
6675 return (path);
6676 }
6677
6678 static char *
pathname_work_obp(dev_info_t * dip,char * path)6679 pathname_work_obp(dev_info_t *dip, char *path)
6680 {
6681 char *bp;
6682 char *obp_path;
6683
6684 /*
6685 * look up the "obp-path" property, return the path if it exists
6686 */
6687 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
6688 "obp-path", &obp_path) == DDI_PROP_SUCCESS) {
6689 (void) strcpy(path, obp_path);
6690 ddi_prop_free(obp_path);
6691 return (path);
6692 }
6693
6694 /*
6695 * stop at root, no obp path
6696 */
6697 if (dip == ddi_root_node()) {
6698 return (NULL);
6699 }
6700
6701 obp_path = pathname_work_obp(ddi_get_parent(dip), path);
6702 if (obp_path == NULL)
6703 return (NULL);
6704
6705 /*
6706 * append our component to parent's obp path
6707 */
6708 bp = path + strlen(path);
6709 if (*(bp - 1) != '/')
6710 (void) strcat(bp++, "/");
6711 (void) ddi_deviname(dip, bp);
6712 return (path);
6713 }
6714
6715 /*
6716 * return the 'obp-path' based path for the given node, or NULL if the node
6717 * does not have a different obp path. NOTE: Unlike ddi_pathname, this
6718 * function can't be called from interrupt context (since we need to
6719 * lookup a string property).
6720 */
6721 char *
ddi_pathname_obp(dev_info_t * dip,char * path)6722 ddi_pathname_obp(dev_info_t *dip, char *path)
6723 {
6724 ASSERT(!servicing_interrupt());
6725 if (dip == NULL || path == NULL)
6726 return (NULL);
6727
6728 /* split work into a separate function to aid debugging */
6729 return (pathname_work_obp(dip, path));
6730 }
6731
6732 int
ddi_pathname_obp_set(dev_info_t * dip,char * component)6733 ddi_pathname_obp_set(dev_info_t *dip, char *component)
6734 {
6735 dev_info_t *pdip;
6736 char *obp_path = NULL;
6737 int rc = DDI_FAILURE;
6738
6739 if (dip == NULL)
6740 return (DDI_FAILURE);
6741
6742 obp_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
6743
6744 pdip = ddi_get_parent(dip);
6745
6746 if (ddi_pathname_obp(pdip, obp_path) == NULL) {
6747 (void) ddi_pathname(pdip, obp_path);
6748 }
6749
6750 if (component) {
6751 (void) strncat(obp_path, "/", MAXPATHLEN);
6752 (void) strncat(obp_path, component, MAXPATHLEN);
6753 }
6754 rc = ndi_prop_update_string(DDI_DEV_T_NONE, dip, "obp-path",
6755 obp_path);
6756
6757 if (obp_path)
6758 kmem_free(obp_path, MAXPATHLEN);
6759
6760 return (rc);
6761 }
6762
6763 /*
6764 * Given a dev_t, return the pathname of the corresponding device in the
6765 * buffer pointed at by "path." The buffer is assumed to be large enough
6766 * to hold the pathname of the device (MAXPATHLEN).
6767 *
6768 * The pathname of a device is the pathname of the devinfo node to which
6769 * the device "belongs," concatenated with the character ':' and the name
6770 * of the minor node corresponding to the dev_t. If spec_type is 0 then
6771 * just the pathname of the devinfo node is returned without driving attach
6772 * of that node. For a non-zero spec_type, an attach is performed and a
6773 * search of the minor list occurs.
6774 *
6775 * It is possible that the path associated with the dev_t is not
6776 * currently available in the devinfo tree. In order to have a
6777 * dev_t, a device must have been discovered before, which means
6778 * that the path is always in the instance tree. The one exception
6779 * to this is if the dev_t is associated with a pseudo driver, in
6780 * which case the device must exist on the pseudo branch of the
6781 * devinfo tree as a result of parsing .conf files.
6782 */
6783 int
ddi_dev_pathname(dev_t devt,int spec_type,char * path)6784 ddi_dev_pathname(dev_t devt, int spec_type, char *path)
6785 {
6786 int circ;
6787 major_t major = getmajor(devt);
6788 int instance;
6789 dev_info_t *dip;
6790 char *minorname;
6791 char *drvname;
6792
6793 if (major >= devcnt)
6794 goto fail;
6795 if (major == clone_major) {
6796 /* clone has no minor nodes, manufacture the path here */
6797 if ((drvname = ddi_major_to_name(getminor(devt))) == NULL)
6798 goto fail;
6799
6800 (void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname);
6801 return (DDI_SUCCESS);
6802 }
6803
6804 /* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */
6805 if ((instance = dev_to_instance(devt)) == -1)
6806 goto fail;
6807
6808 /* reconstruct the path given the major/instance */
6809 if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS)
6810 goto fail;
6811
6812 /* if spec_type given we must drive attach and search minor nodes */
6813 if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) {
6814 /* attach the path so we can search minors */
6815 if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL)
6816 goto fail;
6817
6818 /* Add minorname to path. */
6819 ndi_devi_enter(dip, &circ);
6820 minorname = i_ddi_devtspectype_to_minorname(dip,
6821 devt, spec_type);
6822 if (minorname) {
6823 (void) strcat(path, ":");
6824 (void) strcat(path, minorname);
6825 }
6826 ndi_devi_exit(dip, circ);
6827 ddi_release_devi(dip);
6828 if (minorname == NULL)
6829 goto fail;
6830 }
6831 ASSERT(strlen(path) < MAXPATHLEN);
6832 return (DDI_SUCCESS);
6833
6834 fail: *path = 0;
6835 return (DDI_FAILURE);
6836 }
6837
6838 /*
6839 * Given a major number and an instance, return the path.
6840 * This interface does NOT drive attach.
6841 */
6842 int
e_ddi_majorinstance_to_path(major_t major,int instance,char * path)6843 e_ddi_majorinstance_to_path(major_t major, int instance, char *path)
6844 {
6845 struct devnames *dnp;
6846 dev_info_t *dip;
6847
6848 if ((major >= devcnt) || (instance == -1)) {
6849 *path = 0;
6850 return (DDI_FAILURE);
6851 }
6852
6853 /* look for the major/instance in the instance tree */
6854 if (e_ddi_instance_majorinstance_to_path(major, instance,
6855 path) == DDI_SUCCESS) {
6856 ASSERT(strlen(path) < MAXPATHLEN);
6857 return (DDI_SUCCESS);
6858 }
6859
6860 /*
6861 * Not in instance tree, find the instance on the per driver list and
6862 * construct path to instance via ddi_pathname(). This is how paths
6863 * down the 'pseudo' branch are constructed.
6864 */
6865 dnp = &(devnamesp[major]);
6866 LOCK_DEV_OPS(&(dnp->dn_lock));
6867 for (dip = dnp->dn_head; dip;
6868 dip = (dev_info_t *)DEVI(dip)->devi_next) {
6869 /* Skip if instance does not match. */
6870 if (DEVI(dip)->devi_instance != instance)
6871 continue;
6872
6873 /*
6874 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND
6875 * node demotion, so it is not an effective way of ensuring
6876 * that the ddi_pathname result has a unit-address. Instead,
6877 * we reverify the node state after calling ddi_pathname().
6878 */
6879 if (i_ddi_node_state(dip) >= DS_INITIALIZED) {
6880 (void) ddi_pathname(dip, path);
6881 if (i_ddi_node_state(dip) < DS_INITIALIZED)
6882 continue;
6883 UNLOCK_DEV_OPS(&(dnp->dn_lock));
6884 ASSERT(strlen(path) < MAXPATHLEN);
6885 return (DDI_SUCCESS);
6886 }
6887 }
6888 UNLOCK_DEV_OPS(&(dnp->dn_lock));
6889
6890 /* can't reconstruct the path */
6891 *path = 0;
6892 return (DDI_FAILURE);
6893 }
6894
6895 #define GLD_DRIVER_PPA "SUNW,gld_v0_ppa"
6896
6897 /*
6898 * Given the dip for a network interface return the ppa for that interface.
6899 *
6900 * In all cases except GLD v0 drivers, the ppa == instance.
6901 * In the case of GLD v0 drivers, the ppa is equal to the attach order.
6902 * So for these drivers when the attach routine calls gld_register(),
6903 * the GLD framework creates an integer property called "gld_driver_ppa"
6904 * that can be queried here.
6905 *
6906 * The only time this function is used is when a system is booting over nfs.
6907 * In this case the system has to resolve the pathname of the boot device
6908 * to it's ppa.
6909 */
6910 int
i_ddi_devi_get_ppa(dev_info_t * dip)6911 i_ddi_devi_get_ppa(dev_info_t *dip)
6912 {
6913 return (ddi_prop_get_int(DDI_DEV_T_ANY, dip,
6914 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
6915 GLD_DRIVER_PPA, ddi_get_instance(dip)));
6916 }
6917
6918 /*
6919 * i_ddi_devi_set_ppa() should only be called from gld_register()
6920 * and only for GLD v0 drivers
6921 */
6922 void
i_ddi_devi_set_ppa(dev_info_t * dip,int ppa)6923 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa)
6924 {
6925 (void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa);
6926 }
6927
6928
6929 /*
6930 * Private DDI Console bell functions.
6931 */
6932 void
ddi_ring_console_bell(clock_t duration)6933 ddi_ring_console_bell(clock_t duration)
6934 {
6935 if (ddi_console_bell_func != NULL)
6936 (*ddi_console_bell_func)(duration);
6937 }
6938
6939 void
ddi_set_console_bell(void (* bellfunc)(clock_t duration))6940 ddi_set_console_bell(void (*bellfunc)(clock_t duration))
6941 {
6942 ddi_console_bell_func = bellfunc;
6943 }
6944
6945 int
ddi_dma_alloc_handle(dev_info_t * dip,ddi_dma_attr_t * attr,int (* waitfp)(caddr_t),caddr_t arg,ddi_dma_handle_t * handlep)6946 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr,
6947 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
6948 {
6949 int (*funcp)() = ddi_dma_allochdl;
6950 ddi_dma_attr_t dma_attr;
6951 struct bus_ops *bop;
6952
6953 if (attr == (ddi_dma_attr_t *)0)
6954 return (DDI_DMA_BADATTR);
6955
6956 dma_attr = *attr;
6957
6958 bop = DEVI(dip)->devi_ops->devo_bus_ops;
6959 if (bop && bop->bus_dma_allochdl)
6960 funcp = bop->bus_dma_allochdl;
6961
6962 return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep));
6963 }
6964
6965 void
ddi_dma_free_handle(ddi_dma_handle_t * handlep)6966 ddi_dma_free_handle(ddi_dma_handle_t *handlep)
6967 {
6968 ddi_dma_handle_t h = *handlep;
6969 (void) ddi_dma_freehdl(HD, HD, h);
6970 }
6971
6972 static uintptr_t dma_mem_list_id = 0;
6973
6974
6975 int
ddi_dma_mem_alloc(ddi_dma_handle_t handle,size_t length,ddi_device_acc_attr_t * accattrp,uint_t flags,int (* waitfp)(caddr_t),caddr_t arg,caddr_t * kaddrp,size_t * real_length,ddi_acc_handle_t * handlep)6976 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length,
6977 ddi_device_acc_attr_t *accattrp, uint_t flags,
6978 int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp,
6979 size_t *real_length, ddi_acc_handle_t *handlep)
6980 {
6981 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6982 dev_info_t *dip = hp->dmai_rdip;
6983 ddi_acc_hdl_t *ap;
6984 ddi_dma_attr_t *attrp = &hp->dmai_attr;
6985 uint_t sleepflag, xfermodes;
6986 int (*fp)(caddr_t);
6987 int rval;
6988
6989 if (waitfp == DDI_DMA_SLEEP)
6990 fp = (int (*)())KM_SLEEP;
6991 else if (waitfp == DDI_DMA_DONTWAIT)
6992 fp = (int (*)())KM_NOSLEEP;
6993 else
6994 fp = waitfp;
6995 *handlep = impl_acc_hdl_alloc(fp, arg);
6996 if (*handlep == NULL)
6997 return (DDI_FAILURE);
6998
6999 /* check if the cache attributes are supported */
7000 if (i_ddi_check_cache_attr(flags) == B_FALSE)
7001 return (DDI_FAILURE);
7002
7003 /*
7004 * Transfer the meaningful bits to xfermodes.
7005 * Double-check if the 3rd party driver correctly sets the bits.
7006 * If not, set DDI_DMA_STREAMING to keep compatibility.
7007 */
7008 xfermodes = flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING);
7009 if (xfermodes == 0) {
7010 xfermodes = DDI_DMA_STREAMING;
7011 }
7012
7013 /*
7014 * initialize the common elements of data access handle
7015 */
7016 ap = impl_acc_hdl_get(*handlep);
7017 ap->ah_vers = VERS_ACCHDL;
7018 ap->ah_dip = dip;
7019 ap->ah_offset = 0;
7020 ap->ah_len = 0;
7021 ap->ah_xfermodes = flags;
7022 ap->ah_acc = *accattrp;
7023
7024 sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0);
7025 if (xfermodes == DDI_DMA_CONSISTENT) {
7026 rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
7027 flags, accattrp, kaddrp, NULL, ap);
7028 *real_length = length;
7029 } else {
7030 rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
7031 flags, accattrp, kaddrp, real_length, ap);
7032 }
7033 if (rval == DDI_SUCCESS) {
7034 ap->ah_len = (off_t)(*real_length);
7035 ap->ah_addr = *kaddrp;
7036 } else {
7037 impl_acc_hdl_free(*handlep);
7038 *handlep = (ddi_acc_handle_t)NULL;
7039 if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) {
7040 ddi_set_callback(waitfp, arg, &dma_mem_list_id);
7041 }
7042 rval = DDI_FAILURE;
7043 }
7044 return (rval);
7045 }
7046
7047 void
ddi_dma_mem_free(ddi_acc_handle_t * handlep)7048 ddi_dma_mem_free(ddi_acc_handle_t *handlep)
7049 {
7050 ddi_acc_hdl_t *ap;
7051
7052 ap = impl_acc_hdl_get(*handlep);
7053 ASSERT(ap);
7054
7055 i_ddi_mem_free((caddr_t)ap->ah_addr, ap);
7056
7057 /*
7058 * free the handle
7059 */
7060 impl_acc_hdl_free(*handlep);
7061 *handlep = (ddi_acc_handle_t)NULL;
7062
7063 if (dma_mem_list_id != 0) {
7064 ddi_run_callback(&dma_mem_list_id);
7065 }
7066 }
7067
7068 int
ddi_dma_buf_bind_handle(ddi_dma_handle_t handle,struct buf * bp,uint_t flags,int (* waitfp)(caddr_t),caddr_t arg,ddi_dma_cookie_t * cookiep,uint_t * ccountp)7069 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp,
7070 uint_t flags, int (*waitfp)(caddr_t), caddr_t arg,
7071 ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7072 {
7073 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7074 dev_info_t *dip, *rdip;
7075 struct ddi_dma_req dmareq;
7076 int (*funcp)();
7077
7078 dmareq.dmar_flags = flags;
7079 dmareq.dmar_fp = waitfp;
7080 dmareq.dmar_arg = arg;
7081 dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
7082
7083 if (bp->b_flags & B_PAGEIO) {
7084 dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
7085 dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
7086 dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
7087 (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
7088 } else {
7089 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
7090 if (bp->b_flags & B_SHADOW) {
7091 dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
7092 bp->b_shadow;
7093 dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
7094 } else {
7095 dmareq.dmar_object.dmao_type =
7096 (bp->b_flags & (B_PHYS | B_REMAPPED)) ?
7097 DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR;
7098 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7099 }
7100
7101 /*
7102 * If the buffer has no proc pointer, or the proc
7103 * struct has the kernel address space, or the buffer has
7104 * been marked B_REMAPPED (meaning that it is now
7105 * mapped into the kernel's address space), then
7106 * the address space is kas (kernel address space).
7107 */
7108 if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
7109 (bp->b_flags & B_REMAPPED)) {
7110 dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
7111 } else {
7112 dmareq.dmar_object.dmao_obj.virt_obj.v_as =
7113 bp->b_proc->p_as;
7114 }
7115 }
7116
7117 dip = rdip = hp->dmai_rdip;
7118 if (dip != ddi_root_node())
7119 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7120 funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7121 return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7122 }
7123
7124 int
ddi_dma_addr_bind_handle(ddi_dma_handle_t handle,struct as * as,caddr_t addr,size_t len,uint_t flags,int (* waitfp)(caddr_t),caddr_t arg,ddi_dma_cookie_t * cookiep,uint_t * ccountp)7125 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as,
7126 caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t),
7127 caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7128 {
7129 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7130 dev_info_t *dip, *rdip;
7131 struct ddi_dma_req dmareq;
7132 int (*funcp)();
7133
7134 if (len == (uint_t)0) {
7135 return (DDI_DMA_NOMAPPING);
7136 }
7137 dmareq.dmar_flags = flags;
7138 dmareq.dmar_fp = waitfp;
7139 dmareq.dmar_arg = arg;
7140 dmareq.dmar_object.dmao_size = len;
7141 dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
7142 dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
7143 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
7144 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7145
7146 dip = rdip = hp->dmai_rdip;
7147 if (dip != ddi_root_node())
7148 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7149 funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7150 return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7151 }
7152
7153 void
ddi_dma_nextcookie(ddi_dma_handle_t handle,ddi_dma_cookie_t * cookiep)7154 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep)
7155 {
7156 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7157 ddi_dma_cookie_t *cp;
7158
7159 cp = hp->dmai_cookie;
7160 ASSERT(cp);
7161
7162 cookiep->dmac_notused = cp->dmac_notused;
7163 cookiep->dmac_type = cp->dmac_type;
7164 cookiep->dmac_address = cp->dmac_address;
7165 cookiep->dmac_size = cp->dmac_size;
7166 hp->dmai_cookie++;
7167 }
7168
7169 int
ddi_dma_numwin(ddi_dma_handle_t handle,uint_t * nwinp)7170 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp)
7171 {
7172 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7173 if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
7174 return (DDI_FAILURE);
7175 } else {
7176 *nwinp = hp->dmai_nwin;
7177 return (DDI_SUCCESS);
7178 }
7179 }
7180
7181 int
ddi_dma_getwin(ddi_dma_handle_t h,uint_t win,off_t * offp,size_t * lenp,ddi_dma_cookie_t * cookiep,uint_t * ccountp)7182 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp,
7183 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7184 {
7185 int (*funcp)() = ddi_dma_win;
7186 struct bus_ops *bop;
7187
7188 bop = DEVI(HD)->devi_ops->devo_bus_ops;
7189 if (bop && bop->bus_dma_win)
7190 funcp = bop->bus_dma_win;
7191
7192 return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp));
7193 }
7194
7195 int
ddi_dma_set_sbus64(ddi_dma_handle_t h,ulong_t burstsizes)7196 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes)
7197 {
7198 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0,
7199 &burstsizes, 0, 0));
7200 }
7201
7202 int
i_ddi_dma_fault_check(ddi_dma_impl_t * hp)7203 i_ddi_dma_fault_check(ddi_dma_impl_t *hp)
7204 {
7205 return (hp->dmai_fault);
7206 }
7207
7208 int
ddi_check_dma_handle(ddi_dma_handle_t handle)7209 ddi_check_dma_handle(ddi_dma_handle_t handle)
7210 {
7211 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7212 int (*check)(ddi_dma_impl_t *);
7213
7214 if ((check = hp->dmai_fault_check) == NULL)
7215 check = i_ddi_dma_fault_check;
7216
7217 return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
7218 }
7219
7220 void
i_ddi_dma_set_fault(ddi_dma_handle_t handle)7221 i_ddi_dma_set_fault(ddi_dma_handle_t handle)
7222 {
7223 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7224 void (*notify)(ddi_dma_impl_t *);
7225
7226 if (!hp->dmai_fault) {
7227 hp->dmai_fault = 1;
7228 if ((notify = hp->dmai_fault_notify) != NULL)
7229 (*notify)(hp);
7230 }
7231 }
7232
7233 void
i_ddi_dma_clr_fault(ddi_dma_handle_t handle)7234 i_ddi_dma_clr_fault(ddi_dma_handle_t handle)
7235 {
7236 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7237 void (*notify)(ddi_dma_impl_t *);
7238
7239 if (hp->dmai_fault) {
7240 hp->dmai_fault = 0;
7241 if ((notify = hp->dmai_fault_notify) != NULL)
7242 (*notify)(hp);
7243 }
7244 }
7245
7246 /*
7247 * register mapping routines.
7248 */
7249 int
ddi_regs_map_setup(dev_info_t * dip,uint_t rnumber,caddr_t * addrp,offset_t offset,offset_t len,ddi_device_acc_attr_t * accattrp,ddi_acc_handle_t * handle)7250 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp,
7251 offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp,
7252 ddi_acc_handle_t *handle)
7253 {
7254 ddi_map_req_t mr;
7255 ddi_acc_hdl_t *hp;
7256 int result;
7257
7258 /*
7259 * Allocate and initialize the common elements of data access handle.
7260 */
7261 *handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
7262 hp = impl_acc_hdl_get(*handle);
7263 hp->ah_vers = VERS_ACCHDL;
7264 hp->ah_dip = dip;
7265 hp->ah_rnumber = rnumber;
7266 hp->ah_offset = offset;
7267 hp->ah_len = len;
7268 hp->ah_acc = *accattrp;
7269
7270 /*
7271 * Set up the mapping request and call to parent.
7272 */
7273 mr.map_op = DDI_MO_MAP_LOCKED;
7274 mr.map_type = DDI_MT_RNUMBER;
7275 mr.map_obj.rnumber = rnumber;
7276 mr.map_prot = PROT_READ | PROT_WRITE;
7277 mr.map_flags = DDI_MF_KERNEL_MAPPING;
7278 mr.map_handlep = hp;
7279 mr.map_vers = DDI_MAP_VERSION;
7280 result = ddi_map(dip, &mr, offset, len, addrp);
7281
7282 /*
7283 * check for end result
7284 */
7285 if (result != DDI_SUCCESS) {
7286 impl_acc_hdl_free(*handle);
7287 *handle = (ddi_acc_handle_t)NULL;
7288 } else {
7289 hp->ah_addr = *addrp;
7290 }
7291
7292 return (result);
7293 }
7294
7295 void
ddi_regs_map_free(ddi_acc_handle_t * handlep)7296 ddi_regs_map_free(ddi_acc_handle_t *handlep)
7297 {
7298 ddi_map_req_t mr;
7299 ddi_acc_hdl_t *hp;
7300
7301 hp = impl_acc_hdl_get(*handlep);
7302 ASSERT(hp);
7303
7304 mr.map_op = DDI_MO_UNMAP;
7305 mr.map_type = DDI_MT_RNUMBER;
7306 mr.map_obj.rnumber = hp->ah_rnumber;
7307 mr.map_prot = PROT_READ | PROT_WRITE;
7308 mr.map_flags = DDI_MF_KERNEL_MAPPING;
7309 mr.map_handlep = hp;
7310 mr.map_vers = DDI_MAP_VERSION;
7311
7312 /*
7313 * Call my parent to unmap my regs.
7314 */
7315 (void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
7316 hp->ah_len, &hp->ah_addr);
7317 /*
7318 * free the handle
7319 */
7320 impl_acc_hdl_free(*handlep);
7321 *handlep = (ddi_acc_handle_t)NULL;
7322 }
7323
7324 int
ddi_device_zero(ddi_acc_handle_t handle,caddr_t dev_addr,size_t bytecount,ssize_t dev_advcnt,uint_t dev_datasz)7325 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount,
7326 ssize_t dev_advcnt, uint_t dev_datasz)
7327 {
7328 uint8_t *b;
7329 uint16_t *w;
7330 uint32_t *l;
7331 uint64_t *ll;
7332
7333 /* check for total byte count is multiple of data transfer size */
7334 if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7335 return (DDI_FAILURE);
7336
7337 switch (dev_datasz) {
7338 case DDI_DATA_SZ01_ACC:
7339 for (b = (uint8_t *)dev_addr;
7340 bytecount != 0; bytecount -= 1, b += dev_advcnt)
7341 ddi_put8(handle, b, 0);
7342 break;
7343 case DDI_DATA_SZ02_ACC:
7344 for (w = (uint16_t *)dev_addr;
7345 bytecount != 0; bytecount -= 2, w += dev_advcnt)
7346 ddi_put16(handle, w, 0);
7347 break;
7348 case DDI_DATA_SZ04_ACC:
7349 for (l = (uint32_t *)dev_addr;
7350 bytecount != 0; bytecount -= 4, l += dev_advcnt)
7351 ddi_put32(handle, l, 0);
7352 break;
7353 case DDI_DATA_SZ08_ACC:
7354 for (ll = (uint64_t *)dev_addr;
7355 bytecount != 0; bytecount -= 8, ll += dev_advcnt)
7356 ddi_put64(handle, ll, 0x0ll);
7357 break;
7358 default:
7359 return (DDI_FAILURE);
7360 }
7361 return (DDI_SUCCESS);
7362 }
7363
7364 int
ddi_device_copy(ddi_acc_handle_t src_handle,caddr_t src_addr,ssize_t src_advcnt,ddi_acc_handle_t dest_handle,caddr_t dest_addr,ssize_t dest_advcnt,size_t bytecount,uint_t dev_datasz)7365 ddi_device_copy(
7366 ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt,
7367 ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt,
7368 size_t bytecount, uint_t dev_datasz)
7369 {
7370 uint8_t *b_src, *b_dst;
7371 uint16_t *w_src, *w_dst;
7372 uint32_t *l_src, *l_dst;
7373 uint64_t *ll_src, *ll_dst;
7374
7375 /* check for total byte count is multiple of data transfer size */
7376 if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7377 return (DDI_FAILURE);
7378
7379 switch (dev_datasz) {
7380 case DDI_DATA_SZ01_ACC:
7381 b_src = (uint8_t *)src_addr;
7382 b_dst = (uint8_t *)dest_addr;
7383
7384 for (; bytecount != 0; bytecount -= 1) {
7385 ddi_put8(dest_handle, b_dst,
7386 ddi_get8(src_handle, b_src));
7387 b_dst += dest_advcnt;
7388 b_src += src_advcnt;
7389 }
7390 break;
7391 case DDI_DATA_SZ02_ACC:
7392 w_src = (uint16_t *)src_addr;
7393 w_dst = (uint16_t *)dest_addr;
7394
7395 for (; bytecount != 0; bytecount -= 2) {
7396 ddi_put16(dest_handle, w_dst,
7397 ddi_get16(src_handle, w_src));
7398 w_dst += dest_advcnt;
7399 w_src += src_advcnt;
7400 }
7401 break;
7402 case DDI_DATA_SZ04_ACC:
7403 l_src = (uint32_t *)src_addr;
7404 l_dst = (uint32_t *)dest_addr;
7405
7406 for (; bytecount != 0; bytecount -= 4) {
7407 ddi_put32(dest_handle, l_dst,
7408 ddi_get32(src_handle, l_src));
7409 l_dst += dest_advcnt;
7410 l_src += src_advcnt;
7411 }
7412 break;
7413 case DDI_DATA_SZ08_ACC:
7414 ll_src = (uint64_t *)src_addr;
7415 ll_dst = (uint64_t *)dest_addr;
7416
7417 for (; bytecount != 0; bytecount -= 8) {
7418 ddi_put64(dest_handle, ll_dst,
7419 ddi_get64(src_handle, ll_src));
7420 ll_dst += dest_advcnt;
7421 ll_src += src_advcnt;
7422 }
7423 break;
7424 default:
7425 return (DDI_FAILURE);
7426 }
7427 return (DDI_SUCCESS);
7428 }
7429
7430 #define swap16(value) \
7431 ((((value) & 0xff) << 8) | ((value) >> 8))
7432
7433 #define swap32(value) \
7434 (((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \
7435 (uint32_t)swap16((uint16_t)((value) >> 16)))
7436
7437 #define swap64(value) \
7438 (((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \
7439 << 32) | \
7440 (uint64_t)swap32((uint32_t)((value) >> 32)))
7441
7442 uint16_t
ddi_swap16(uint16_t value)7443 ddi_swap16(uint16_t value)
7444 {
7445 return (swap16(value));
7446 }
7447
7448 uint32_t
ddi_swap32(uint32_t value)7449 ddi_swap32(uint32_t value)
7450 {
7451 return (swap32(value));
7452 }
7453
7454 uint64_t
ddi_swap64(uint64_t value)7455 ddi_swap64(uint64_t value)
7456 {
7457 return (swap64(value));
7458 }
7459
7460 /*
7461 * Convert a binding name to a driver name.
7462 * A binding name is the name used to determine the driver for a
7463 * device - it may be either an alias for the driver or the name
7464 * of the driver itself.
7465 */
7466 char *
i_binding_to_drv_name(char * bname)7467 i_binding_to_drv_name(char *bname)
7468 {
7469 major_t major_no;
7470
7471 ASSERT(bname != NULL);
7472
7473 if ((major_no = ddi_name_to_major(bname)) == -1)
7474 return (NULL);
7475 return (ddi_major_to_name(major_no));
7476 }
7477
7478 /*
7479 * Search for minor name that has specified dev_t and spec_type.
7480 * If spec_type is zero then any dev_t match works. Since we
7481 * are returning a pointer to the minor name string, we require the
7482 * caller to do the locking.
7483 */
7484 char *
i_ddi_devtspectype_to_minorname(dev_info_t * dip,dev_t dev,int spec_type)7485 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type)
7486 {
7487 struct ddi_minor_data *dmdp;
7488
7489 /*
7490 * The did layered driver currently intentionally returns a
7491 * devinfo ptr for an underlying sd instance based on a did
7492 * dev_t. In this case it is not an error.
7493 *
7494 * The did layered driver is associated with Sun Cluster.
7495 */
7496 ASSERT((ddi_driver_major(dip) == getmajor(dev)) ||
7497 (strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0));
7498
7499 ASSERT(DEVI_BUSY_OWNED(dip));
7500 for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7501 if (((dmdp->type == DDM_MINOR) ||
7502 (dmdp->type == DDM_INTERNAL_PATH) ||
7503 (dmdp->type == DDM_DEFAULT)) &&
7504 (dmdp->ddm_dev == dev) &&
7505 ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) ||
7506 (dmdp->ddm_spec_type == spec_type)))
7507 return (dmdp->ddm_name);
7508 }
7509
7510 return (NULL);
7511 }
7512
7513 /*
7514 * Find the devt and spectype of the specified minor_name.
7515 * Return DDI_FAILURE if minor_name not found. Since we are
7516 * returning everything via arguments we can do the locking.
7517 */
7518 int
i_ddi_minorname_to_devtspectype(dev_info_t * dip,char * minor_name,dev_t * devtp,int * spectypep)7519 i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name,
7520 dev_t *devtp, int *spectypep)
7521 {
7522 int circ;
7523 struct ddi_minor_data *dmdp;
7524
7525 /* deal with clone minor nodes */
7526 if (dip == clone_dip) {
7527 major_t major;
7528 /*
7529 * Make sure minor_name is a STREAMS driver.
7530 * We load the driver but don't attach to any instances.
7531 */
7532
7533 major = ddi_name_to_major(minor_name);
7534 if (major == DDI_MAJOR_T_NONE)
7535 return (DDI_FAILURE);
7536
7537 if (ddi_hold_driver(major) == NULL)
7538 return (DDI_FAILURE);
7539
7540 if (STREAMSTAB(major) == NULL) {
7541 ddi_rele_driver(major);
7542 return (DDI_FAILURE);
7543 }
7544 ddi_rele_driver(major);
7545
7546 if (devtp)
7547 *devtp = makedevice(clone_major, (minor_t)major);
7548
7549 if (spectypep)
7550 *spectypep = S_IFCHR;
7551
7552 return (DDI_SUCCESS);
7553 }
7554
7555 ndi_devi_enter(dip, &circ);
7556 for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7557 if (((dmdp->type != DDM_MINOR) &&
7558 (dmdp->type != DDM_INTERNAL_PATH) &&
7559 (dmdp->type != DDM_DEFAULT)) ||
7560 strcmp(minor_name, dmdp->ddm_name))
7561 continue;
7562
7563 if (devtp)
7564 *devtp = dmdp->ddm_dev;
7565
7566 if (spectypep)
7567 *spectypep = dmdp->ddm_spec_type;
7568
7569 ndi_devi_exit(dip, circ);
7570 return (DDI_SUCCESS);
7571 }
7572 ndi_devi_exit(dip, circ);
7573
7574 return (DDI_FAILURE);
7575 }
7576
7577 static kmutex_t devid_gen_mutex;
7578 static short devid_gen_number;
7579
7580 #ifdef DEBUG
7581
7582 static int devid_register_corrupt = 0;
7583 static int devid_register_corrupt_major = 0;
7584 static int devid_register_corrupt_hint = 0;
7585 static int devid_register_corrupt_hint_major = 0;
7586
7587 static int devid_lyr_debug = 0;
7588
7589 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs) \
7590 if (devid_lyr_debug) \
7591 ddi_debug_devid_devts(msg, ndevs, devs)
7592
7593 #else
7594
7595 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)
7596
7597 #endif /* DEBUG */
7598
7599
7600 #ifdef DEBUG
7601
7602 static void
ddi_debug_devid_devts(char * msg,int ndevs,dev_t * devs)7603 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs)
7604 {
7605 int i;
7606
7607 cmn_err(CE_CONT, "%s:\n", msg);
7608 for (i = 0; i < ndevs; i++) {
7609 cmn_err(CE_CONT, " 0x%lx\n", devs[i]);
7610 }
7611 }
7612
7613 static void
ddi_debug_devid_paths(char * msg,int npaths,char ** paths)7614 ddi_debug_devid_paths(char *msg, int npaths, char **paths)
7615 {
7616 int i;
7617
7618 cmn_err(CE_CONT, "%s:\n", msg);
7619 for (i = 0; i < npaths; i++) {
7620 cmn_err(CE_CONT, " %s\n", paths[i]);
7621 }
7622 }
7623
7624 static void
ddi_debug_devid_devts_per_path(char * path,int ndevs,dev_t * devs)7625 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs)
7626 {
7627 int i;
7628
7629 cmn_err(CE_CONT, "dev_ts per path %s\n", path);
7630 for (i = 0; i < ndevs; i++) {
7631 cmn_err(CE_CONT, " 0x%lx\n", devs[i]);
7632 }
7633 }
7634
7635 #endif /* DEBUG */
7636
7637 /*
7638 * Register device id into DDI framework.
7639 * Must be called when the driver is bound.
7640 */
7641 static int
i_ddi_devid_register(dev_info_t * dip,ddi_devid_t devid)7642 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7643 {
7644 impl_devid_t *i_devid = (impl_devid_t *)devid;
7645 size_t driver_len;
7646 const char *driver_name;
7647 char *devid_str;
7648 major_t major;
7649
7650 if ((dip == NULL) ||
7651 ((major = ddi_driver_major(dip)) == DDI_MAJOR_T_NONE))
7652 return (DDI_FAILURE);
7653
7654 /* verify that the devid is valid */
7655 if (ddi_devid_valid(devid) != DDI_SUCCESS)
7656 return (DDI_FAILURE);
7657
7658 /* Updating driver name hint in devid */
7659 driver_name = ddi_driver_name(dip);
7660 driver_len = strlen(driver_name);
7661 if (driver_len > DEVID_HINT_SIZE) {
7662 /* Pick up last four characters of driver name */
7663 driver_name += driver_len - DEVID_HINT_SIZE;
7664 driver_len = DEVID_HINT_SIZE;
7665 }
7666 bzero(i_devid->did_driver, DEVID_HINT_SIZE);
7667 bcopy(driver_name, i_devid->did_driver, driver_len);
7668
7669 #ifdef DEBUG
7670 /* Corrupt the devid for testing. */
7671 if (devid_register_corrupt)
7672 i_devid->did_id[0] += devid_register_corrupt;
7673 if (devid_register_corrupt_major &&
7674 (major == devid_register_corrupt_major))
7675 i_devid->did_id[0] += 1;
7676 if (devid_register_corrupt_hint)
7677 i_devid->did_driver[0] += devid_register_corrupt_hint;
7678 if (devid_register_corrupt_hint_major &&
7679 (major == devid_register_corrupt_hint_major))
7680 i_devid->did_driver[0] += 1;
7681 #endif /* DEBUG */
7682
7683 /* encode the devid as a string */
7684 if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL)
7685 return (DDI_FAILURE);
7686
7687 /* add string as a string property */
7688 if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
7689 DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) {
7690 cmn_err(CE_WARN, "%s%d: devid property update failed",
7691 ddi_driver_name(dip), ddi_get_instance(dip));
7692 ddi_devid_str_free(devid_str);
7693 return (DDI_FAILURE);
7694 }
7695
7696 /* keep pointer to devid string for interrupt context fma code */
7697 if (DEVI(dip)->devi_devid_str)
7698 ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7699 DEVI(dip)->devi_devid_str = devid_str;
7700 return (DDI_SUCCESS);
7701 }
7702
7703 int
ddi_devid_register(dev_info_t * dip,ddi_devid_t devid)7704 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7705 {
7706 int rval;
7707
7708 rval = i_ddi_devid_register(dip, devid);
7709 if (rval == DDI_SUCCESS) {
7710 /*
7711 * Register devid in devid-to-path cache
7712 */
7713 if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) {
7714 mutex_enter(&DEVI(dip)->devi_lock);
7715 DEVI(dip)->devi_flags |= DEVI_CACHED_DEVID;
7716 mutex_exit(&DEVI(dip)->devi_lock);
7717 } else if (ddi_get_name_addr(dip)) {
7718 /*
7719 * We only expect cache_register DDI_FAILURE when we
7720 * can't form the full path because of NULL devi_addr.
7721 */
7722 cmn_err(CE_WARN, "%s%d: failed to cache devid",
7723 ddi_driver_name(dip), ddi_get_instance(dip));
7724 }
7725 } else {
7726 cmn_err(CE_WARN, "%s%d: failed to register devid",
7727 ddi_driver_name(dip), ddi_get_instance(dip));
7728 }
7729 return (rval);
7730 }
7731
7732 /*
7733 * Remove (unregister) device id from DDI framework.
7734 * Must be called when device is detached.
7735 */
7736 static void
i_ddi_devid_unregister(dev_info_t * dip)7737 i_ddi_devid_unregister(dev_info_t *dip)
7738 {
7739 if (DEVI(dip)->devi_devid_str) {
7740 ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7741 DEVI(dip)->devi_devid_str = NULL;
7742 }
7743
7744 /* remove the devid property */
7745 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME);
7746 }
7747
7748 void
ddi_devid_unregister(dev_info_t * dip)7749 ddi_devid_unregister(dev_info_t *dip)
7750 {
7751 mutex_enter(&DEVI(dip)->devi_lock);
7752 DEVI(dip)->devi_flags &= ~DEVI_CACHED_DEVID;
7753 mutex_exit(&DEVI(dip)->devi_lock);
7754 e_devid_cache_unregister(dip);
7755 i_ddi_devid_unregister(dip);
7756 }
7757
7758 /*
7759 * Allocate and initialize a device id.
7760 */
7761 int
ddi_devid_init(dev_info_t * dip,ushort_t devid_type,ushort_t nbytes,void * id,ddi_devid_t * ret_devid)7762 ddi_devid_init(
7763 dev_info_t *dip,
7764 ushort_t devid_type,
7765 ushort_t nbytes,
7766 void *id,
7767 ddi_devid_t *ret_devid)
7768 {
7769 impl_devid_t *i_devid;
7770 int sz = sizeof (*i_devid) + nbytes - sizeof (char);
7771 int driver_len;
7772 const char *driver_name;
7773
7774 switch (devid_type) {
7775 case DEVID_SCSI3_WWN:
7776 /*FALLTHRU*/
7777 case DEVID_SCSI_SERIAL:
7778 /*FALLTHRU*/
7779 case DEVID_ATA_SERIAL:
7780 /*FALLTHRU*/
7781 case DEVID_ENCAP:
7782 if (nbytes == 0)
7783 return (DDI_FAILURE);
7784 if (id == NULL)
7785 return (DDI_FAILURE);
7786 break;
7787 case DEVID_FAB:
7788 if (nbytes != 0)
7789 return (DDI_FAILURE);
7790 if (id != NULL)
7791 return (DDI_FAILURE);
7792 nbytes = sizeof (int) +
7793 sizeof (struct timeval32) + sizeof (short);
7794 sz += nbytes;
7795 break;
7796 default:
7797 return (DDI_FAILURE);
7798 }
7799
7800 if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL)
7801 return (DDI_FAILURE);
7802
7803 i_devid->did_magic_hi = DEVID_MAGIC_MSB;
7804 i_devid->did_magic_lo = DEVID_MAGIC_LSB;
7805 i_devid->did_rev_hi = DEVID_REV_MSB;
7806 i_devid->did_rev_lo = DEVID_REV_LSB;
7807 DEVID_FORMTYPE(i_devid, devid_type);
7808 DEVID_FORMLEN(i_devid, nbytes);
7809
7810 /* Fill in driver name hint */
7811 driver_name = ddi_driver_name(dip);
7812 driver_len = strlen(driver_name);
7813 if (driver_len > DEVID_HINT_SIZE) {
7814 /* Pick up last four characters of driver name */
7815 driver_name += driver_len - DEVID_HINT_SIZE;
7816 driver_len = DEVID_HINT_SIZE;
7817 }
7818
7819 bcopy(driver_name, i_devid->did_driver, driver_len);
7820
7821 /* Fill in id field */
7822 if (devid_type == DEVID_FAB) {
7823 char *cp;
7824 uint32_t hostid;
7825 struct timeval32 timestamp32;
7826 int i;
7827 int *ip;
7828 short gen;
7829
7830 /* increase the generation number */
7831 mutex_enter(&devid_gen_mutex);
7832 gen = devid_gen_number++;
7833 mutex_exit(&devid_gen_mutex);
7834
7835 cp = i_devid->did_id;
7836
7837 /* Fill in host id (big-endian byte ordering) */
7838 hostid = zone_get_hostid(NULL);
7839 *cp++ = hibyte(hiword(hostid));
7840 *cp++ = lobyte(hiword(hostid));
7841 *cp++ = hibyte(loword(hostid));
7842 *cp++ = lobyte(loword(hostid));
7843
7844 /*
7845 * Fill in timestamp (big-endian byte ordering)
7846 *
7847 * (Note that the format may have to be changed
7848 * before 2038 comes around, though it's arguably
7849 * unique enough as it is..)
7850 */
7851 uniqtime32(×tamp32);
7852 ip = (int *)×tamp32;
7853 for (i = 0;
7854 i < sizeof (timestamp32) / sizeof (int); i++, ip++) {
7855 int val;
7856 val = *ip;
7857 *cp++ = hibyte(hiword(val));
7858 *cp++ = lobyte(hiword(val));
7859 *cp++ = hibyte(loword(val));
7860 *cp++ = lobyte(loword(val));
7861 }
7862
7863 /* fill in the generation number */
7864 *cp++ = hibyte(gen);
7865 *cp++ = lobyte(gen);
7866 } else
7867 bcopy(id, i_devid->did_id, nbytes);
7868
7869 /* return device id */
7870 *ret_devid = (ddi_devid_t)i_devid;
7871 return (DDI_SUCCESS);
7872 }
7873
7874 int
ddi_devid_get(dev_info_t * dip,ddi_devid_t * ret_devid)7875 ddi_devid_get(dev_info_t *dip, ddi_devid_t *ret_devid)
7876 {
7877 return (i_ddi_devi_get_devid(DDI_DEV_T_ANY, dip, ret_devid));
7878 }
7879
7880 int
i_ddi_devi_get_devid(dev_t dev,dev_info_t * dip,ddi_devid_t * ret_devid)7881 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid)
7882 {
7883 char *devidstr;
7884
7885 ASSERT(dev != DDI_DEV_T_NONE);
7886
7887 /* look up the property, devt specific first */
7888 if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS,
7889 DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) {
7890 if ((dev == DDI_DEV_T_ANY) ||
7891 (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
7892 DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) !=
7893 DDI_PROP_SUCCESS)) {
7894 return (DDI_FAILURE);
7895 }
7896 }
7897
7898 /* convert to binary form */
7899 if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) {
7900 ddi_prop_free(devidstr);
7901 return (DDI_FAILURE);
7902 }
7903 ddi_prop_free(devidstr);
7904 return (DDI_SUCCESS);
7905 }
7906
7907 /*
7908 * Return a copy of the device id for dev_t
7909 */
7910 int
ddi_lyr_get_devid(dev_t dev,ddi_devid_t * ret_devid)7911 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid)
7912 {
7913 dev_info_t *dip;
7914 int rval;
7915
7916 /* get the dip */
7917 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
7918 return (DDI_FAILURE);
7919
7920 rval = i_ddi_devi_get_devid(dev, dip, ret_devid);
7921
7922 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */
7923 return (rval);
7924 }
7925
7926 /*
7927 * Return a copy of the minor name for dev_t and spec_type
7928 */
7929 int
ddi_lyr_get_minor_name(dev_t dev,int spec_type,char ** minor_name)7930 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name)
7931 {
7932 char *buf;
7933 int circ;
7934 dev_info_t *dip;
7935 char *nm;
7936 int rval;
7937
7938 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) {
7939 *minor_name = NULL;
7940 return (DDI_FAILURE);
7941 }
7942
7943 /* Find the minor name and copy into max size buf */
7944 buf = kmem_alloc(MAXNAMELEN, KM_SLEEP);
7945 ndi_devi_enter(dip, &circ);
7946 nm = i_ddi_devtspectype_to_minorname(dip, dev, spec_type);
7947 if (nm)
7948 (void) strcpy(buf, nm);
7949 ndi_devi_exit(dip, circ);
7950 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */
7951
7952 if (nm) {
7953 /* duplicate into min size buf for return result */
7954 *minor_name = i_ddi_strdup(buf, KM_SLEEP);
7955 rval = DDI_SUCCESS;
7956 } else {
7957 *minor_name = NULL;
7958 rval = DDI_FAILURE;
7959 }
7960
7961 /* free max size buf and return */
7962 kmem_free(buf, MAXNAMELEN);
7963 return (rval);
7964 }
7965
7966 int
ddi_lyr_devid_to_devlist(ddi_devid_t devid,char * minor_name,int * retndevs,dev_t ** retdevs)7967 ddi_lyr_devid_to_devlist(
7968 ddi_devid_t devid,
7969 char *minor_name,
7970 int *retndevs,
7971 dev_t **retdevs)
7972 {
7973 ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
7974
7975 if (e_devid_cache_to_devt_list(devid, minor_name,
7976 retndevs, retdevs) == DDI_SUCCESS) {
7977 ASSERT(*retndevs > 0);
7978 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
7979 *retndevs, *retdevs);
7980 return (DDI_SUCCESS);
7981 }
7982
7983 if (e_ddi_devid_discovery(devid) == DDI_FAILURE) {
7984 return (DDI_FAILURE);
7985 }
7986
7987 if (e_devid_cache_to_devt_list(devid, minor_name,
7988 retndevs, retdevs) == DDI_SUCCESS) {
7989 ASSERT(*retndevs > 0);
7990 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
7991 *retndevs, *retdevs);
7992 return (DDI_SUCCESS);
7993 }
7994
7995 return (DDI_FAILURE);
7996 }
7997
7998 void
ddi_lyr_free_devlist(dev_t * devlist,int ndevs)7999 ddi_lyr_free_devlist(dev_t *devlist, int ndevs)
8000 {
8001 kmem_free(devlist, sizeof (dev_t) * ndevs);
8002 }
8003
8004 /*
8005 * Note: This will need to be fixed if we ever allow processes to
8006 * have more than one data model per exec.
8007 */
8008 model_t
ddi_mmap_get_model(void)8009 ddi_mmap_get_model(void)
8010 {
8011 return (get_udatamodel());
8012 }
8013
8014 model_t
ddi_model_convert_from(model_t model)8015 ddi_model_convert_from(model_t model)
8016 {
8017 return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE);
8018 }
8019
8020 /*
8021 * ddi interfaces managing storage and retrieval of eventcookies.
8022 */
8023
8024 /*
8025 * Invoke bus nexus driver's implementation of the
8026 * (*bus_remove_eventcall)() interface to remove a registered
8027 * callback handler for "event".
8028 */
8029 int
ddi_remove_event_handler(ddi_callback_id_t id)8030 ddi_remove_event_handler(ddi_callback_id_t id)
8031 {
8032 ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id;
8033 dev_info_t *ddip;
8034
8035 ASSERT(cb);
8036 if (!cb) {
8037 return (DDI_FAILURE);
8038 }
8039
8040 ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie);
8041 return (ndi_busop_remove_eventcall(ddip, id));
8042 }
8043
8044 /*
8045 * Invoke bus nexus driver's implementation of the
8046 * (*bus_add_eventcall)() interface to register a callback handler
8047 * for "event".
8048 */
8049 int
ddi_add_event_handler(dev_info_t * dip,ddi_eventcookie_t event,void (* handler)(dev_info_t *,ddi_eventcookie_t,void *,void *),void * arg,ddi_callback_id_t * id)8050 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event,
8051 void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *),
8052 void *arg, ddi_callback_id_t *id)
8053 {
8054 return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id));
8055 }
8056
8057
8058 /*
8059 * Return a handle for event "name" by calling up the device tree
8060 * hierarchy via (*bus_get_eventcookie)() interface until claimed
8061 * by a bus nexus or top of dev_info tree is reached.
8062 */
8063 int
ddi_get_eventcookie(dev_info_t * dip,char * name,ddi_eventcookie_t * event_cookiep)8064 ddi_get_eventcookie(dev_info_t *dip, char *name,
8065 ddi_eventcookie_t *event_cookiep)
8066 {
8067 return (ndi_busop_get_eventcookie(dip, dip,
8068 name, event_cookiep));
8069 }
8070
8071 /*
8072 * This procedure is provided as the general callback function when
8073 * umem_lockmemory calls as_add_callback for long term memory locking.
8074 * When as_unmap, as_setprot, or as_free encounter segments which have
8075 * locked memory, this callback will be invoked.
8076 */
8077 void
umem_lock_undo(struct as * as,void * arg,uint_t event)8078 umem_lock_undo(struct as *as, void *arg, uint_t event)
8079 {
8080 _NOTE(ARGUNUSED(as, event))
8081 struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg;
8082
8083 /*
8084 * Call the cleanup function. Decrement the cookie reference
8085 * count, if it goes to zero, return the memory for the cookie.
8086 * The i_ddi_umem_unlock for this cookie may or may not have been
8087 * called already. It is the responsibility of the caller of
8088 * umem_lockmemory to handle the case of the cleanup routine
8089 * being called after a ddi_umem_unlock for the cookie
8090 * was called.
8091 */
8092
8093 (*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp);
8094
8095 /* remove the cookie if reference goes to zero */
8096 if (atomic_dec_ulong_nv((ulong_t *)(&(cp->cook_refcnt))) == 0) {
8097 kmem_free(cp, sizeof (struct ddi_umem_cookie));
8098 }
8099 }
8100
8101 /*
8102 * The following two Consolidation Private routines provide generic
8103 * interfaces to increase/decrease the amount of device-locked memory.
8104 *
8105 * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory()
8106 * must be called every time i_ddi_incr_locked_memory() is called.
8107 */
8108 int
8109 /* ARGSUSED */
i_ddi_incr_locked_memory(proc_t * procp,rctl_qty_t inc)8110 i_ddi_incr_locked_memory(proc_t *procp, rctl_qty_t inc)
8111 {
8112 ASSERT(procp != NULL);
8113 mutex_enter(&procp->p_lock);
8114 if (rctl_incr_locked_mem(procp, NULL, inc, 1)) {
8115 mutex_exit(&procp->p_lock);
8116 return (ENOMEM);
8117 }
8118 mutex_exit(&procp->p_lock);
8119 return (0);
8120 }
8121
8122 /*
8123 * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory()
8124 * must be called every time i_ddi_decr_locked_memory() is called.
8125 */
8126 /* ARGSUSED */
8127 void
i_ddi_decr_locked_memory(proc_t * procp,rctl_qty_t dec)8128 i_ddi_decr_locked_memory(proc_t *procp, rctl_qty_t dec)
8129 {
8130 ASSERT(procp != NULL);
8131 mutex_enter(&procp->p_lock);
8132 rctl_decr_locked_mem(procp, NULL, dec, 1);
8133 mutex_exit(&procp->p_lock);
8134 }
8135
8136 /*
8137 * The cookie->upd_max_lock_rctl flag is used to determine if we should
8138 * charge device locked memory to the max-locked-memory rctl. Tracking
8139 * device locked memory causes the rctl locks to get hot under high-speed
8140 * I/O such as RDSv3 over IB. If there is no max-locked-memory rctl limit,
8141 * we bypass charging the locked memory to the rctl altogether. The cookie's
8142 * flag tells us if the rctl value should be updated when unlocking the memory,
8143 * in case the rctl gets changed after the memory was locked. Any device
8144 * locked memory in that rare case will not be counted toward the rctl limit.
8145 *
8146 * When tracking the locked memory, the kproject_t parameter is always NULL
8147 * in the code paths:
8148 * i_ddi_incr_locked_memory -> rctl_incr_locked_mem
8149 * i_ddi_decr_locked_memory -> rctl_decr_locked_mem
8150 * Thus, we always use the tk_proj member to check the projp setting.
8151 */
8152 static void
init_lockedmem_rctl_flag(struct ddi_umem_cookie * cookie)8153 init_lockedmem_rctl_flag(struct ddi_umem_cookie *cookie)
8154 {
8155 proc_t *p;
8156 kproject_t *projp;
8157 zone_t *zonep;
8158
8159 ASSERT(cookie);
8160 p = cookie->procp;
8161 ASSERT(p);
8162
8163 zonep = p->p_zone;
8164 projp = p->p_task->tk_proj;
8165
8166 ASSERT(zonep);
8167 ASSERT(projp);
8168
8169 if (zonep->zone_locked_mem_ctl == UINT64_MAX &&
8170 projp->kpj_data.kpd_locked_mem_ctl == UINT64_MAX)
8171 cookie->upd_max_lock_rctl = 0;
8172 else
8173 cookie->upd_max_lock_rctl = 1;
8174 }
8175
8176 /*
8177 * This routine checks if the max-locked-memory resource ctl is
8178 * exceeded, if not increments it, grabs a hold on the project.
8179 * Returns 0 if successful otherwise returns error code
8180 */
8181 static int
umem_incr_devlockmem(struct ddi_umem_cookie * cookie)8182 umem_incr_devlockmem(struct ddi_umem_cookie *cookie)
8183 {
8184 proc_t *procp;
8185 int ret;
8186
8187 ASSERT(cookie);
8188 if (cookie->upd_max_lock_rctl == 0)
8189 return (0);
8190
8191 procp = cookie->procp;
8192 ASSERT(procp);
8193
8194 if ((ret = i_ddi_incr_locked_memory(procp,
8195 cookie->size)) != 0) {
8196 return (ret);
8197 }
8198 return (0);
8199 }
8200
8201 /*
8202 * Decrements the max-locked-memory resource ctl and releases
8203 * the hold on the project that was acquired during umem_incr_devlockmem
8204 */
8205 static void
umem_decr_devlockmem(struct ddi_umem_cookie * cookie)8206 umem_decr_devlockmem(struct ddi_umem_cookie *cookie)
8207 {
8208 proc_t *proc;
8209
8210 if (cookie->upd_max_lock_rctl == 0)
8211 return;
8212
8213 proc = (proc_t *)cookie->procp;
8214 if (!proc)
8215 return;
8216
8217 i_ddi_decr_locked_memory(proc, cookie->size);
8218 }
8219
8220 /*
8221 * A consolidation private function which is essentially equivalent to
8222 * ddi_umem_lock but with the addition of arguments ops_vector and procp.
8223 * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and
8224 * the ops_vector is valid.
8225 *
8226 * Lock the virtual address range in the current process and create a
8227 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8228 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8229 * to user space.
8230 *
8231 * Note: The resource control accounting currently uses a full charge model
8232 * in other words attempts to lock the same/overlapping areas of memory
8233 * will deduct the full size of the buffer from the projects running
8234 * counter for the device locked memory.
8235 *
8236 * addr, size should be PAGESIZE aligned
8237 *
8238 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8239 * identifies whether the locked memory will be read or written or both
8240 * DDI_UMEMLOCK_LONGTERM must be set when the locking will
8241 * be maintained for an indefinitely long period (essentially permanent),
8242 * rather than for what would be required for a typical I/O completion.
8243 * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT
8244 * if the memory pertains to a regular file which is mapped MAP_SHARED.
8245 * This is to prevent a deadlock if a file truncation is attempted after
8246 * after the locking is done.
8247 *
8248 * Returns 0 on success
8249 * EINVAL - for invalid parameters
8250 * EPERM, ENOMEM and other error codes returned by as_pagelock
8251 * ENOMEM - is returned if the current request to lock memory exceeds
8252 * *.max-locked-memory resource control value.
8253 * EFAULT - memory pertains to a regular file mapped shared and
8254 * and DDI_UMEMLOCK_LONGTERM flag is set
8255 * EAGAIN - could not start the ddi_umem_unlock list processing thread
8256 */
8257 int
umem_lockmemory(caddr_t addr,size_t len,int flags,ddi_umem_cookie_t * cookie,struct umem_callback_ops * ops_vector,proc_t * procp)8258 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie,
8259 struct umem_callback_ops *ops_vector,
8260 proc_t *procp)
8261 {
8262 int error;
8263 struct ddi_umem_cookie *p;
8264 void (*driver_callback)() = NULL;
8265 struct as *as;
8266 struct seg *seg;
8267 vnode_t *vp;
8268
8269 /* Allow device drivers to not have to reference "curproc" */
8270 if (procp == NULL)
8271 procp = curproc;
8272 as = procp->p_as;
8273 *cookie = NULL; /* in case of any error return */
8274
8275 /* These are the only three valid flags */
8276 if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE |
8277 DDI_UMEMLOCK_LONGTERM)) != 0)
8278 return (EINVAL);
8279
8280 /* At least one (can be both) of the two access flags must be set */
8281 if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0)
8282 return (EINVAL);
8283
8284 /* addr and len must be page-aligned */
8285 if (((uintptr_t)addr & PAGEOFFSET) != 0)
8286 return (EINVAL);
8287
8288 if ((len & PAGEOFFSET) != 0)
8289 return (EINVAL);
8290
8291 /*
8292 * For longterm locking a driver callback must be specified; if
8293 * not longterm then a callback is optional.
8294 */
8295 if (ops_vector != NULL) {
8296 if (ops_vector->cbo_umem_callback_version !=
8297 UMEM_CALLBACK_VERSION)
8298 return (EINVAL);
8299 else
8300 driver_callback = ops_vector->cbo_umem_lock_cleanup;
8301 }
8302 if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM))
8303 return (EINVAL);
8304
8305 /*
8306 * Call i_ddi_umem_unlock_thread_start if necessary. It will
8307 * be called on first ddi_umem_lock or umem_lockmemory call.
8308 */
8309 if (ddi_umem_unlock_thread == NULL)
8310 i_ddi_umem_unlock_thread_start();
8311
8312 /* Allocate memory for the cookie */
8313 p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8314
8315 /* Convert the flags to seg_rw type */
8316 if (flags & DDI_UMEMLOCK_WRITE) {
8317 p->s_flags = S_WRITE;
8318 } else {
8319 p->s_flags = S_READ;
8320 }
8321
8322 /* Store procp in cookie for later iosetup/unlock */
8323 p->procp = (void *)procp;
8324
8325 /*
8326 * Store the struct as pointer in cookie for later use by
8327 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock
8328 * is called after relvm is called.
8329 */
8330 p->asp = as;
8331
8332 /*
8333 * The size field is needed for lockmem accounting.
8334 */
8335 p->size = len;
8336 init_lockedmem_rctl_flag(p);
8337
8338 if (umem_incr_devlockmem(p) != 0) {
8339 /*
8340 * The requested memory cannot be locked
8341 */
8342 kmem_free(p, sizeof (struct ddi_umem_cookie));
8343 *cookie = (ddi_umem_cookie_t)NULL;
8344 return (ENOMEM);
8345 }
8346
8347 /* Lock the pages corresponding to addr, len in memory */
8348 error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags);
8349 if (error != 0) {
8350 umem_decr_devlockmem(p);
8351 kmem_free(p, sizeof (struct ddi_umem_cookie));
8352 *cookie = (ddi_umem_cookie_t)NULL;
8353 return (error);
8354 }
8355
8356 /*
8357 * For longterm locking the addr must pertain to a seg_vn segment or
8358 * or a seg_spt segment.
8359 * If the segment pertains to a regular file, it cannot be
8360 * mapped MAP_SHARED.
8361 * This is to prevent a deadlock if a file truncation is attempted
8362 * after the locking is done.
8363 * Doing this after as_pagelock guarantees persistence of the as; if
8364 * an unacceptable segment is found, the cleanup includes calling
8365 * as_pageunlock before returning EFAULT.
8366 *
8367 * segdev is allowed here as it is already locked. This allows
8368 * for memory exported by drivers through mmap() (which is already
8369 * locked) to be allowed for LONGTERM.
8370 */
8371 if (flags & DDI_UMEMLOCK_LONGTERM) {
8372 extern struct seg_ops segspt_shmops;
8373 extern struct seg_ops segdev_ops;
8374 AS_LOCK_ENTER(as, RW_READER);
8375 for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) {
8376 if (seg == NULL || seg->s_base > addr + len)
8377 break;
8378 if (seg->s_ops == &segdev_ops)
8379 continue;
8380 if (((seg->s_ops != &segvn_ops) &&
8381 (seg->s_ops != &segspt_shmops)) ||
8382 ((SEGOP_GETVP(seg, addr, &vp) == 0 &&
8383 vp != NULL && vp->v_type == VREG) &&
8384 (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) {
8385 as_pageunlock(as, p->pparray,
8386 addr, len, p->s_flags);
8387 AS_LOCK_EXIT(as);
8388 umem_decr_devlockmem(p);
8389 kmem_free(p, sizeof (struct ddi_umem_cookie));
8390 *cookie = (ddi_umem_cookie_t)NULL;
8391 return (EFAULT);
8392 }
8393 }
8394 AS_LOCK_EXIT(as);
8395 }
8396
8397
8398 /* Initialize the fields in the ddi_umem_cookie */
8399 p->cvaddr = addr;
8400 p->type = UMEM_LOCKED;
8401 if (driver_callback != NULL) {
8402 /* i_ddi_umem_unlock and umem_lock_undo may need the cookie */
8403 p->cook_refcnt = 2;
8404 p->callbacks = *ops_vector;
8405 } else {
8406 /* only i_ddi_umme_unlock needs the cookie */
8407 p->cook_refcnt = 1;
8408 }
8409
8410 *cookie = (ddi_umem_cookie_t)p;
8411
8412 /*
8413 * If a driver callback was specified, add an entry to the
8414 * as struct callback list. The as_pagelock above guarantees
8415 * the persistence of as.
8416 */
8417 if (driver_callback) {
8418 error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT,
8419 addr, len, KM_SLEEP);
8420 if (error != 0) {
8421 as_pageunlock(as, p->pparray,
8422 addr, len, p->s_flags);
8423 umem_decr_devlockmem(p);
8424 kmem_free(p, sizeof (struct ddi_umem_cookie));
8425 *cookie = (ddi_umem_cookie_t)NULL;
8426 }
8427 }
8428 return (error);
8429 }
8430
8431 /*
8432 * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free
8433 * the cookie. Called from i_ddi_umem_unlock_thread.
8434 */
8435
8436 static void
i_ddi_umem_unlock(struct ddi_umem_cookie * p)8437 i_ddi_umem_unlock(struct ddi_umem_cookie *p)
8438 {
8439 uint_t rc;
8440
8441 /*
8442 * There is no way to determine whether a callback to
8443 * umem_lock_undo was registered via as_add_callback.
8444 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and
8445 * a valid callback function structure.) as_delete_callback
8446 * is called to delete a possible registered callback. If the
8447 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it
8448 * indicates that there was a callback registered, and that is was
8449 * successfully deleted. Thus, the cookie reference count
8450 * will never be decremented by umem_lock_undo. Just return the
8451 * memory for the cookie, since both users of the cookie are done.
8452 * A return of AS_CALLBACK_NOTFOUND indicates a callback was
8453 * never registered. A return of AS_CALLBACK_DELETE_DEFERRED
8454 * indicates that callback processing is taking place and, and
8455 * umem_lock_undo is, or will be, executing, and thus decrementing
8456 * the cookie reference count when it is complete.
8457 *
8458 * This needs to be done before as_pageunlock so that the
8459 * persistence of as is guaranteed because of the locked pages.
8460 *
8461 */
8462 rc = as_delete_callback(p->asp, p);
8463
8464
8465 /*
8466 * The proc->p_as will be stale if i_ddi_umem_unlock is called
8467 * after relvm is called so use p->asp.
8468 */
8469 as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags);
8470
8471 /*
8472 * Now that we have unlocked the memory decrement the
8473 * *.max-locked-memory rctl
8474 */
8475 umem_decr_devlockmem(p);
8476
8477 if (rc == AS_CALLBACK_DELETED) {
8478 /* umem_lock_undo will not happen, return the cookie memory */
8479 ASSERT(p->cook_refcnt == 2);
8480 kmem_free(p, sizeof (struct ddi_umem_cookie));
8481 } else {
8482 /*
8483 * umem_undo_lock may happen if as_delete_callback returned
8484 * AS_CALLBACK_DELETE_DEFERRED. In that case, decrement the
8485 * reference count, atomically, and return the cookie
8486 * memory if the reference count goes to zero. The only
8487 * other value for rc is AS_CALLBACK_NOTFOUND. In that
8488 * case, just return the cookie memory.
8489 */
8490 if ((rc != AS_CALLBACK_DELETE_DEFERRED) ||
8491 (atomic_dec_ulong_nv((ulong_t *)(&(p->cook_refcnt)))
8492 == 0)) {
8493 kmem_free(p, sizeof (struct ddi_umem_cookie));
8494 }
8495 }
8496 }
8497
8498 /*
8499 * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler.
8500 *
8501 * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list
8502 * until it is empty. Then, wait for more to be added. This thread is awoken
8503 * via calls to ddi_umem_unlock.
8504 */
8505
8506 static void
i_ddi_umem_unlock_thread(void)8507 i_ddi_umem_unlock_thread(void)
8508 {
8509 struct ddi_umem_cookie *ret_cookie;
8510 callb_cpr_t cprinfo;
8511
8512 /* process the ddi_umem_unlock list */
8513 CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex,
8514 callb_generic_cpr, "unlock_thread");
8515 for (;;) {
8516 mutex_enter(&ddi_umem_unlock_mutex);
8517 if (ddi_umem_unlock_head != NULL) { /* list not empty */
8518 ret_cookie = ddi_umem_unlock_head;
8519 /* take if off the list */
8520 if ((ddi_umem_unlock_head =
8521 ddi_umem_unlock_head->unl_forw) == NULL) {
8522 ddi_umem_unlock_tail = NULL;
8523 }
8524 mutex_exit(&ddi_umem_unlock_mutex);
8525 /* unlock the pages in this cookie */
8526 (void) i_ddi_umem_unlock(ret_cookie);
8527 } else { /* list is empty, wait for next ddi_umem_unlock */
8528 CALLB_CPR_SAFE_BEGIN(&cprinfo);
8529 cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex);
8530 CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex);
8531 mutex_exit(&ddi_umem_unlock_mutex);
8532 }
8533 }
8534 /* ddi_umem_unlock_thread does not exit */
8535 /* NOTREACHED */
8536 }
8537
8538 /*
8539 * Start the thread that will process the ddi_umem_unlock list if it is
8540 * not already started (i_ddi_umem_unlock_thread).
8541 */
8542 static void
i_ddi_umem_unlock_thread_start(void)8543 i_ddi_umem_unlock_thread_start(void)
8544 {
8545 mutex_enter(&ddi_umem_unlock_mutex);
8546 if (ddi_umem_unlock_thread == NULL) {
8547 ddi_umem_unlock_thread = thread_create(NULL, 0,
8548 i_ddi_umem_unlock_thread, NULL, 0, &p0,
8549 TS_RUN, minclsyspri);
8550 }
8551 mutex_exit(&ddi_umem_unlock_mutex);
8552 }
8553
8554 /*
8555 * Lock the virtual address range in the current process and create a
8556 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8557 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8558 * to user space.
8559 *
8560 * Note: The resource control accounting currently uses a full charge model
8561 * in other words attempts to lock the same/overlapping areas of memory
8562 * will deduct the full size of the buffer from the projects running
8563 * counter for the device locked memory. This applies to umem_lockmemory too.
8564 *
8565 * addr, size should be PAGESIZE aligned
8566 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8567 * identifies whether the locked memory will be read or written or both
8568 *
8569 * Returns 0 on success
8570 * EINVAL - for invalid parameters
8571 * EPERM, ENOMEM and other error codes returned by as_pagelock
8572 * ENOMEM - is returned if the current request to lock memory exceeds
8573 * *.max-locked-memory resource control value.
8574 * EAGAIN - could not start the ddi_umem_unlock list processing thread
8575 */
8576 int
ddi_umem_lock(caddr_t addr,size_t len,int flags,ddi_umem_cookie_t * cookie)8577 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie)
8578 {
8579 int error;
8580 struct ddi_umem_cookie *p;
8581
8582 *cookie = NULL; /* in case of any error return */
8583
8584 /* These are the only two valid flags */
8585 if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) {
8586 return (EINVAL);
8587 }
8588
8589 /* At least one of the two flags (or both) must be set */
8590 if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) {
8591 return (EINVAL);
8592 }
8593
8594 /* addr and len must be page-aligned */
8595 if (((uintptr_t)addr & PAGEOFFSET) != 0) {
8596 return (EINVAL);
8597 }
8598
8599 if ((len & PAGEOFFSET) != 0) {
8600 return (EINVAL);
8601 }
8602
8603 /*
8604 * Call i_ddi_umem_unlock_thread_start if necessary. It will
8605 * be called on first ddi_umem_lock or umem_lockmemory call.
8606 */
8607 if (ddi_umem_unlock_thread == NULL)
8608 i_ddi_umem_unlock_thread_start();
8609
8610 /* Allocate memory for the cookie */
8611 p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8612
8613 /* Convert the flags to seg_rw type */
8614 if (flags & DDI_UMEMLOCK_WRITE) {
8615 p->s_flags = S_WRITE;
8616 } else {
8617 p->s_flags = S_READ;
8618 }
8619
8620 /* Store curproc in cookie for later iosetup/unlock */
8621 p->procp = (void *)curproc;
8622
8623 /*
8624 * Store the struct as pointer in cookie for later use by
8625 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock
8626 * is called after relvm is called.
8627 */
8628 p->asp = curproc->p_as;
8629 /*
8630 * The size field is needed for lockmem accounting.
8631 */
8632 p->size = len;
8633 init_lockedmem_rctl_flag(p);
8634
8635 if (umem_incr_devlockmem(p) != 0) {
8636 /*
8637 * The requested memory cannot be locked
8638 */
8639 kmem_free(p, sizeof (struct ddi_umem_cookie));
8640 *cookie = (ddi_umem_cookie_t)NULL;
8641 return (ENOMEM);
8642 }
8643
8644 /* Lock the pages corresponding to addr, len in memory */
8645 error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray),
8646 addr, len, p->s_flags);
8647 if (error != 0) {
8648 umem_decr_devlockmem(p);
8649 kmem_free(p, sizeof (struct ddi_umem_cookie));
8650 *cookie = (ddi_umem_cookie_t)NULL;
8651 return (error);
8652 }
8653
8654 /* Initialize the fields in the ddi_umem_cookie */
8655 p->cvaddr = addr;
8656 p->type = UMEM_LOCKED;
8657 p->cook_refcnt = 1;
8658
8659 *cookie = (ddi_umem_cookie_t)p;
8660 return (error);
8661 }
8662
8663 /*
8664 * Add the cookie to the ddi_umem_unlock list. Pages will be
8665 * unlocked by i_ddi_umem_unlock_thread.
8666 */
8667
8668 void
ddi_umem_unlock(ddi_umem_cookie_t cookie)8669 ddi_umem_unlock(ddi_umem_cookie_t cookie)
8670 {
8671 struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
8672
8673 ASSERT(p->type == UMEM_LOCKED);
8674 ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */
8675 ASSERT(ddi_umem_unlock_thread != NULL);
8676
8677 p->unl_forw = (struct ddi_umem_cookie *)NULL; /* end of list */
8678 /*
8679 * Queue the unlock request and notify i_ddi_umem_unlock thread
8680 * if it's called in the interrupt context. Otherwise, unlock pages
8681 * immediately.
8682 */
8683 if (servicing_interrupt()) {
8684 /* queue the unlock request and notify the thread */
8685 mutex_enter(&ddi_umem_unlock_mutex);
8686 if (ddi_umem_unlock_head == NULL) {
8687 ddi_umem_unlock_head = ddi_umem_unlock_tail = p;
8688 cv_broadcast(&ddi_umem_unlock_cv);
8689 } else {
8690 ddi_umem_unlock_tail->unl_forw = p;
8691 ddi_umem_unlock_tail = p;
8692 }
8693 mutex_exit(&ddi_umem_unlock_mutex);
8694 } else {
8695 /* unlock the pages right away */
8696 (void) i_ddi_umem_unlock(p);
8697 }
8698 }
8699
8700 /*
8701 * Create a buf structure from a ddi_umem_cookie
8702 * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc
8703 * (only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported)
8704 * off, len - identifies the portion of the memory represented by the cookie
8705 * that the buf points to.
8706 * NOTE: off, len need to follow the alignment/size restrictions of the
8707 * device (dev) that this buf will be passed to. Some devices
8708 * will accept unrestricted alignment/size, whereas others (such as
8709 * st) require some block-size alignment/size. It is the caller's
8710 * responsibility to ensure that the alignment/size restrictions
8711 * are met (we cannot assert as we do not know the restrictions)
8712 *
8713 * direction - is one of B_READ or B_WRITE and needs to be compatible with
8714 * the flags used in ddi_umem_lock
8715 *
8716 * The following three arguments are used to initialize fields in the
8717 * buf structure and are uninterpreted by this routine.
8718 *
8719 * dev
8720 * blkno
8721 * iodone
8722 *
8723 * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP
8724 *
8725 * Returns a buf structure pointer on success (to be freed by freerbuf)
8726 * NULL on any parameter error or memory alloc failure
8727 *
8728 */
8729 struct buf *
ddi_umem_iosetup(ddi_umem_cookie_t cookie,off_t off,size_t len,int direction,dev_t dev,daddr_t blkno,int (* iodone)(struct buf *),int sleepflag)8730 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len,
8731 int direction, dev_t dev, daddr_t blkno,
8732 int (*iodone)(struct buf *), int sleepflag)
8733 {
8734 struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
8735 struct buf *bp;
8736
8737 /*
8738 * check for valid cookie offset, len
8739 */
8740 if ((off + len) > p->size) {
8741 return (NULL);
8742 }
8743
8744 if (len > p->size) {
8745 return (NULL);
8746 }
8747
8748 /* direction has to be one of B_READ or B_WRITE */
8749 if ((direction != B_READ) && (direction != B_WRITE)) {
8750 return (NULL);
8751 }
8752
8753 /* These are the only two valid sleepflags */
8754 if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) {
8755 return (NULL);
8756 }
8757
8758 /*
8759 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported
8760 */
8761 if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) {
8762 return (NULL);
8763 }
8764
8765 /* If type is KMEM_NON_PAGEABLE procp is NULL */
8766 ASSERT((p->type == KMEM_NON_PAGEABLE) ?
8767 (p->procp == NULL) : (p->procp != NULL));
8768
8769 bp = kmem_alloc(sizeof (struct buf), sleepflag);
8770 if (bp == NULL) {
8771 return (NULL);
8772 }
8773 bioinit(bp);
8774
8775 bp->b_flags = B_BUSY | B_PHYS | direction;
8776 bp->b_edev = dev;
8777 bp->b_lblkno = blkno;
8778 bp->b_iodone = iodone;
8779 bp->b_bcount = len;
8780 bp->b_proc = (proc_t *)p->procp;
8781 ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8782 bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off);
8783 if (p->pparray != NULL) {
8784 bp->b_flags |= B_SHADOW;
8785 ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8786 bp->b_shadow = p->pparray + btop(off);
8787 }
8788 return (bp);
8789 }
8790
8791 /*
8792 * Fault-handling and related routines
8793 */
8794
8795 ddi_devstate_t
ddi_get_devstate(dev_info_t * dip)8796 ddi_get_devstate(dev_info_t *dip)
8797 {
8798 if (DEVI_IS_DEVICE_OFFLINE(dip))
8799 return (DDI_DEVSTATE_OFFLINE);
8800 else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip))
8801 return (DDI_DEVSTATE_DOWN);
8802 else if (DEVI_IS_BUS_QUIESCED(dip))
8803 return (DDI_DEVSTATE_QUIESCED);
8804 else if (DEVI_IS_DEVICE_DEGRADED(dip))
8805 return (DDI_DEVSTATE_DEGRADED);
8806 else
8807 return (DDI_DEVSTATE_UP);
8808 }
8809
8810 void
ddi_dev_report_fault(dev_info_t * dip,ddi_fault_impact_t impact,ddi_fault_location_t location,const char * message)8811 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact,
8812 ddi_fault_location_t location, const char *message)
8813 {
8814 struct ddi_fault_event_data fd;
8815 ddi_eventcookie_t ec;
8816
8817 /*
8818 * Assemble all the information into a fault-event-data structure
8819 */
8820 fd.f_dip = dip;
8821 fd.f_impact = impact;
8822 fd.f_location = location;
8823 fd.f_message = message;
8824 fd.f_oldstate = ddi_get_devstate(dip);
8825
8826 /*
8827 * Get eventcookie from defining parent.
8828 */
8829 if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) !=
8830 DDI_SUCCESS)
8831 return;
8832
8833 (void) ndi_post_event(dip, dip, ec, &fd);
8834 }
8835
8836 char *
i_ddi_devi_class(dev_info_t * dip)8837 i_ddi_devi_class(dev_info_t *dip)
8838 {
8839 return (DEVI(dip)->devi_device_class);
8840 }
8841
8842 int
i_ddi_set_devi_class(dev_info_t * dip,char * devi_class,int flag)8843 i_ddi_set_devi_class(dev_info_t *dip, char *devi_class, int flag)
8844 {
8845 struct dev_info *devi = DEVI(dip);
8846
8847 mutex_enter(&devi->devi_lock);
8848
8849 if (devi->devi_device_class)
8850 kmem_free(devi->devi_device_class,
8851 strlen(devi->devi_device_class) + 1);
8852
8853 if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag))
8854 != NULL) {
8855 mutex_exit(&devi->devi_lock);
8856 return (DDI_SUCCESS);
8857 }
8858
8859 mutex_exit(&devi->devi_lock);
8860
8861 return (DDI_FAILURE);
8862 }
8863
8864
8865 /*
8866 * Task Queues DDI interfaces.
8867 */
8868
8869 /* ARGSUSED */
8870 ddi_taskq_t *
ddi_taskq_create(dev_info_t * dip,const char * name,int nthreads,pri_t pri,uint_t cflags)8871 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads,
8872 pri_t pri, uint_t cflags)
8873 {
8874 char full_name[TASKQ_NAMELEN];
8875 const char *tq_name;
8876 int nodeid = 0;
8877
8878 if (dip == NULL)
8879 tq_name = name;
8880 else {
8881 nodeid = ddi_get_instance(dip);
8882
8883 if (name == NULL)
8884 name = "tq";
8885
8886 (void) snprintf(full_name, sizeof (full_name), "%s_%s",
8887 ddi_driver_name(dip), name);
8888
8889 tq_name = full_name;
8890 }
8891
8892 return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads,
8893 pri == TASKQ_DEFAULTPRI ? minclsyspri : pri,
8894 nthreads, INT_MAX, TASKQ_PREPOPULATE));
8895 }
8896
8897 void
ddi_taskq_destroy(ddi_taskq_t * tq)8898 ddi_taskq_destroy(ddi_taskq_t *tq)
8899 {
8900 taskq_destroy((taskq_t *)tq);
8901 }
8902
8903 int
ddi_taskq_dispatch(ddi_taskq_t * tq,void (* func)(void *),void * arg,uint_t dflags)8904 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *),
8905 void *arg, uint_t dflags)
8906 {
8907 taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg,
8908 dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP);
8909
8910 return (id != 0 ? DDI_SUCCESS : DDI_FAILURE);
8911 }
8912
8913 void
ddi_taskq_wait(ddi_taskq_t * tq)8914 ddi_taskq_wait(ddi_taskq_t *tq)
8915 {
8916 taskq_wait((taskq_t *)tq);
8917 }
8918
8919 void
ddi_taskq_suspend(ddi_taskq_t * tq)8920 ddi_taskq_suspend(ddi_taskq_t *tq)
8921 {
8922 taskq_suspend((taskq_t *)tq);
8923 }
8924
8925 boolean_t
ddi_taskq_suspended(ddi_taskq_t * tq)8926 ddi_taskq_suspended(ddi_taskq_t *tq)
8927 {
8928 return (taskq_suspended((taskq_t *)tq));
8929 }
8930
8931 void
ddi_taskq_resume(ddi_taskq_t * tq)8932 ddi_taskq_resume(ddi_taskq_t *tq)
8933 {
8934 taskq_resume((taskq_t *)tq);
8935 }
8936
8937 int
ddi_parse(const char * ifname,char * alnum,uint_t * nump)8938 ddi_parse(const char *ifname, char *alnum, uint_t *nump)
8939 {
8940 /*
8941 * Cap "alnum" size at LIFNAMSIZ, as callers use that in most/all
8942 * cases.
8943 */
8944 return (ddi_parse_dlen(ifname, alnum, LIFNAMSIZ, nump));
8945 }
8946
8947 int
ddi_parse_dlen(const char * ifname,char * alnum,size_t alnumsize,uint_t * nump)8948 ddi_parse_dlen(const char *ifname, char *alnum, size_t alnumsize, uint_t *nump)
8949 {
8950 const char *p;
8951 int copy_len;
8952 ulong_t num;
8953 boolean_t nonum = B_TRUE;
8954 char c;
8955
8956 copy_len = strlen(ifname);
8957 for (p = ifname + copy_len; p != ifname; copy_len--) {
8958 c = *--p;
8959 if (!isdigit(c)) {
8960 /*
8961 * At this point, copy_len is the length of ifname
8962 * WITHOUT the PPA number. For "e1000g10" copy_len is 6.
8963 *
8964 * We must first make sure we HAVE a PPA, and we
8965 * aren't exceeding alnumsize with copy_len and a '\0'
8966 * terminator...
8967 */
8968 int copy_len_nul = copy_len + 1;
8969
8970 if (nonum || alnumsize < copy_len_nul)
8971 return (DDI_FAILURE);
8972
8973 /*
8974 * ... then we abuse strlcpy() to copy over the
8975 * driver name portion AND '\0'-terminate it.
8976 */
8977 (void) strlcpy(alnum, ifname, copy_len_nul);
8978 if (ddi_strtoul(p + 1, NULL, 10, &num) != 0)
8979 return (DDI_FAILURE);
8980 break;
8981 }
8982 nonum = B_FALSE;
8983 }
8984
8985 if (copy_len == 0)
8986 return (DDI_FAILURE);
8987
8988 *nump = num;
8989 return (DDI_SUCCESS);
8990 }
8991
8992 /*
8993 * Default initialization function for drivers that don't need to quiesce.
8994 */
8995 /* ARGSUSED */
8996 int
ddi_quiesce_not_needed(dev_info_t * dip)8997 ddi_quiesce_not_needed(dev_info_t *dip)
8998 {
8999 return (DDI_SUCCESS);
9000 }
9001
9002 /*
9003 * Initialization function for drivers that should implement quiesce()
9004 * but haven't yet.
9005 */
9006 /* ARGSUSED */
9007 int
ddi_quiesce_not_supported(dev_info_t * dip)9008 ddi_quiesce_not_supported(dev_info_t *dip)
9009 {
9010 return (DDI_FAILURE);
9011 }
9012
9013 char *
ddi_strdup(const char * str,int flag)9014 ddi_strdup(const char *str, int flag)
9015 {
9016 int n;
9017 char *ptr;
9018
9019 ASSERT(str != NULL);
9020 ASSERT((flag == KM_SLEEP) || (flag == KM_NOSLEEP));
9021
9022 n = strlen(str);
9023 if ((ptr = kmem_alloc(n + 1, flag)) == NULL)
9024 return (NULL);
9025 bcopy(str, ptr, n + 1);
9026 return (ptr);
9027 }
9028
9029 char *
strdup(const char * str)9030 strdup(const char *str)
9031 {
9032 return (ddi_strdup(str, KM_SLEEP));
9033 }
9034
9035 void
strfree(char * str)9036 strfree(char *str)
9037 {
9038 ASSERT(str != NULL);
9039 kmem_free(str, strlen(str) + 1);
9040 }
9041
9042 /*
9043 * Generic DDI callback interfaces.
9044 */
9045
9046 int
ddi_cb_register(dev_info_t * dip,ddi_cb_flags_t flags,ddi_cb_func_t cbfunc,void * arg1,void * arg2,ddi_cb_handle_t * ret_hdlp)9047 ddi_cb_register(dev_info_t *dip, ddi_cb_flags_t flags, ddi_cb_func_t cbfunc,
9048 void *arg1, void *arg2, ddi_cb_handle_t *ret_hdlp)
9049 {
9050 ddi_cb_t *cbp;
9051
9052 ASSERT(dip != NULL);
9053 ASSERT(DDI_CB_FLAG_VALID(flags));
9054 ASSERT(cbfunc != NULL);
9055 ASSERT(ret_hdlp != NULL);
9056
9057 /* Sanity check the context */
9058 ASSERT(!servicing_interrupt());
9059 if (servicing_interrupt())
9060 return (DDI_FAILURE);
9061
9062 /* Validate parameters */
9063 if ((dip == NULL) || !DDI_CB_FLAG_VALID(flags) ||
9064 (cbfunc == NULL) || (ret_hdlp == NULL))
9065 return (DDI_EINVAL);
9066
9067 /* Check for previous registration */
9068 if (DEVI(dip)->devi_cb_p != NULL)
9069 return (DDI_EALREADY);
9070
9071 /* Allocate and initialize callback */
9072 cbp = kmem_zalloc(sizeof (ddi_cb_t), KM_SLEEP);
9073 cbp->cb_dip = dip;
9074 cbp->cb_func = cbfunc;
9075 cbp->cb_arg1 = arg1;
9076 cbp->cb_arg2 = arg2;
9077 cbp->cb_flags = flags;
9078 DEVI(dip)->devi_cb_p = cbp;
9079
9080 /* If adding an IRM callback, notify IRM */
9081 if (flags & DDI_CB_FLAG_INTR)
9082 i_ddi_irm_set_cb(dip, B_TRUE);
9083
9084 *ret_hdlp = (ddi_cb_handle_t)&(DEVI(dip)->devi_cb_p);
9085 return (DDI_SUCCESS);
9086 }
9087
9088 int
ddi_cb_unregister(ddi_cb_handle_t hdl)9089 ddi_cb_unregister(ddi_cb_handle_t hdl)
9090 {
9091 ddi_cb_t *cbp;
9092 dev_info_t *dip;
9093
9094 ASSERT(hdl != NULL);
9095
9096 /* Sanity check the context */
9097 ASSERT(!servicing_interrupt());
9098 if (servicing_interrupt())
9099 return (DDI_FAILURE);
9100
9101 /* Validate parameters */
9102 if ((hdl == NULL) || ((cbp = *(ddi_cb_t **)hdl) == NULL) ||
9103 ((dip = cbp->cb_dip) == NULL))
9104 return (DDI_EINVAL);
9105
9106 /* If removing an IRM callback, notify IRM */
9107 if (cbp->cb_flags & DDI_CB_FLAG_INTR)
9108 i_ddi_irm_set_cb(dip, B_FALSE);
9109
9110 /* Destroy the callback */
9111 kmem_free(cbp, sizeof (ddi_cb_t));
9112 DEVI(dip)->devi_cb_p = NULL;
9113
9114 return (DDI_SUCCESS);
9115 }
9116
9117 /*
9118 * Platform independent DR routines
9119 */
9120
9121 static int
ndi2errno(int n)9122 ndi2errno(int n)
9123 {
9124 int err = 0;
9125
9126 switch (n) {
9127 case NDI_NOMEM:
9128 err = ENOMEM;
9129 break;
9130 case NDI_BUSY:
9131 err = EBUSY;
9132 break;
9133 case NDI_FAULT:
9134 err = EFAULT;
9135 break;
9136 case NDI_FAILURE:
9137 err = EIO;
9138 break;
9139 case NDI_SUCCESS:
9140 break;
9141 case NDI_BADHANDLE:
9142 default:
9143 err = EINVAL;
9144 break;
9145 }
9146 return (err);
9147 }
9148
9149 /*
9150 * Prom tree node list
9151 */
9152 struct ptnode {
9153 pnode_t nodeid;
9154 struct ptnode *next;
9155 };
9156
9157 /*
9158 * Prom tree walk arg
9159 */
9160 struct pta {
9161 dev_info_t *pdip;
9162 devi_branch_t *bp;
9163 uint_t flags;
9164 dev_info_t *fdip;
9165 struct ptnode *head;
9166 };
9167
9168 static void
visit_node(pnode_t nodeid,struct pta * ap)9169 visit_node(pnode_t nodeid, struct pta *ap)
9170 {
9171 struct ptnode **nextp;
9172 int (*select)(pnode_t, void *, uint_t);
9173
9174 ASSERT(nodeid != OBP_NONODE && nodeid != OBP_BADNODE);
9175
9176 select = ap->bp->create.prom_branch_select;
9177
9178 ASSERT(select);
9179
9180 if (select(nodeid, ap->bp->arg, 0) == DDI_SUCCESS) {
9181
9182 for (nextp = &ap->head; *nextp; nextp = &(*nextp)->next)
9183 ;
9184
9185 *nextp = kmem_zalloc(sizeof (struct ptnode), KM_SLEEP);
9186
9187 (*nextp)->nodeid = nodeid;
9188 }
9189
9190 if ((ap->flags & DEVI_BRANCH_CHILD) == DEVI_BRANCH_CHILD)
9191 return;
9192
9193 nodeid = prom_childnode(nodeid);
9194 while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
9195 visit_node(nodeid, ap);
9196 nodeid = prom_nextnode(nodeid);
9197 }
9198 }
9199
9200 /*
9201 * NOTE: The caller of this function must check for device contracts
9202 * or LDI callbacks against this dip before setting the dip offline.
9203 */
9204 static int
set_infant_dip_offline(dev_info_t * dip,void * arg)9205 set_infant_dip_offline(dev_info_t *dip, void *arg)
9206 {
9207 char *path = (char *)arg;
9208
9209 ASSERT(dip);
9210 ASSERT(arg);
9211
9212 if (i_ddi_node_state(dip) >= DS_ATTACHED) {
9213 (void) ddi_pathname(dip, path);
9214 cmn_err(CE_WARN, "Attempt to set offline flag on attached "
9215 "node: %s", path);
9216 return (DDI_FAILURE);
9217 }
9218
9219 mutex_enter(&(DEVI(dip)->devi_lock));
9220 if (!DEVI_IS_DEVICE_OFFLINE(dip))
9221 DEVI_SET_DEVICE_OFFLINE(dip);
9222 mutex_exit(&(DEVI(dip)->devi_lock));
9223
9224 return (DDI_SUCCESS);
9225 }
9226
9227 typedef struct result {
9228 char *path;
9229 int result;
9230 } result_t;
9231
9232 static int
dip_set_offline(dev_info_t * dip,void * arg)9233 dip_set_offline(dev_info_t *dip, void *arg)
9234 {
9235 int end;
9236 result_t *resp = (result_t *)arg;
9237
9238 ASSERT(dip);
9239 ASSERT(resp);
9240
9241 /*
9242 * We stop the walk if e_ddi_offline_notify() returns
9243 * failure, because this implies that one or more consumers
9244 * (either LDI or contract based) has blocked the offline.
9245 * So there is no point in conitnuing the walk
9246 */
9247 if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
9248 resp->result = DDI_FAILURE;
9249 return (DDI_WALK_TERMINATE);
9250 }
9251
9252 /*
9253 * If set_infant_dip_offline() returns failure, it implies
9254 * that we failed to set a particular dip offline. This
9255 * does not imply that the offline as a whole should fail.
9256 * We want to do the best we can, so we continue the walk.
9257 */
9258 if (set_infant_dip_offline(dip, resp->path) == DDI_SUCCESS)
9259 end = DDI_SUCCESS;
9260 else
9261 end = DDI_FAILURE;
9262
9263 e_ddi_offline_finalize(dip, end);
9264
9265 return (DDI_WALK_CONTINUE);
9266 }
9267
9268 /*
9269 * The call to e_ddi_offline_notify() exists for the
9270 * unlikely error case that a branch we are trying to
9271 * create already exists and has device contracts or LDI
9272 * event callbacks against it.
9273 *
9274 * We allow create to succeed for such branches only if
9275 * no constraints block the offline.
9276 */
9277 static int
branch_set_offline(dev_info_t * dip,char * path)9278 branch_set_offline(dev_info_t *dip, char *path)
9279 {
9280 int circ;
9281 int end;
9282 result_t res;
9283
9284
9285 if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
9286 return (DDI_FAILURE);
9287 }
9288
9289 if (set_infant_dip_offline(dip, path) == DDI_SUCCESS)
9290 end = DDI_SUCCESS;
9291 else
9292 end = DDI_FAILURE;
9293
9294 e_ddi_offline_finalize(dip, end);
9295
9296 if (end == DDI_FAILURE)
9297 return (DDI_FAILURE);
9298
9299 res.result = DDI_SUCCESS;
9300 res.path = path;
9301
9302 ndi_devi_enter(dip, &circ);
9303 ddi_walk_devs(ddi_get_child(dip), dip_set_offline, &res);
9304 ndi_devi_exit(dip, circ);
9305
9306 return (res.result);
9307 }
9308
9309 /*ARGSUSED*/
9310 static int
create_prom_branch(void * arg,int has_changed)9311 create_prom_branch(void *arg, int has_changed)
9312 {
9313 int circ;
9314 int exists, rv;
9315 pnode_t nodeid;
9316 struct ptnode *tnp;
9317 dev_info_t *dip;
9318 struct pta *ap = arg;
9319 devi_branch_t *bp;
9320 char *path;
9321
9322 ASSERT(ap);
9323 ASSERT(ap->fdip == NULL);
9324 ASSERT(ap->pdip && ndi_dev_is_prom_node(ap->pdip));
9325
9326 bp = ap->bp;
9327
9328 nodeid = ddi_get_nodeid(ap->pdip);
9329 if (nodeid == OBP_NONODE || nodeid == OBP_BADNODE) {
9330 cmn_err(CE_WARN, "create_prom_branch: invalid "
9331 "nodeid: 0x%x", nodeid);
9332 return (EINVAL);
9333 }
9334
9335 ap->head = NULL;
9336
9337 nodeid = prom_childnode(nodeid);
9338 while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
9339 visit_node(nodeid, ap);
9340 nodeid = prom_nextnode(nodeid);
9341 }
9342
9343 if (ap->head == NULL)
9344 return (ENODEV);
9345
9346 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9347 rv = 0;
9348 while ((tnp = ap->head) != NULL) {
9349 ap->head = tnp->next;
9350
9351 ndi_devi_enter(ap->pdip, &circ);
9352
9353 /*
9354 * Check if the branch already exists.
9355 */
9356 exists = 0;
9357 dip = e_ddi_nodeid_to_dip(tnp->nodeid);
9358 if (dip != NULL) {
9359 exists = 1;
9360
9361 /* Parent is held busy, so release hold */
9362 ndi_rele_devi(dip);
9363 #ifdef DEBUG
9364 cmn_err(CE_WARN, "create_prom_branch: dip(%p) exists"
9365 " for nodeid 0x%x", (void *)dip, tnp->nodeid);
9366 #endif
9367 } else {
9368 dip = i_ddi_create_branch(ap->pdip, tnp->nodeid);
9369 }
9370
9371 kmem_free(tnp, sizeof (struct ptnode));
9372
9373 /*
9374 * Hold the branch if it is not already held
9375 */
9376 if (dip && !exists) {
9377 e_ddi_branch_hold(dip);
9378 }
9379
9380 ASSERT(dip == NULL || e_ddi_branch_held(dip));
9381
9382 /*
9383 * Set all dips in the newly created branch offline so that
9384 * only a "configure" operation can attach
9385 * the branch
9386 */
9387 if (dip == NULL || branch_set_offline(dip, path)
9388 == DDI_FAILURE) {
9389 ndi_devi_exit(ap->pdip, circ);
9390 rv = EIO;
9391 continue;
9392 }
9393
9394 ASSERT(ddi_get_parent(dip) == ap->pdip);
9395
9396 ndi_devi_exit(ap->pdip, circ);
9397
9398 if (ap->flags & DEVI_BRANCH_CONFIGURE) {
9399 int error = e_ddi_branch_configure(dip, &ap->fdip, 0);
9400 if (error && rv == 0)
9401 rv = error;
9402 }
9403
9404 /*
9405 * Invoke devi_branch_callback() (if it exists) only for
9406 * newly created branches
9407 */
9408 if (bp->devi_branch_callback && !exists)
9409 bp->devi_branch_callback(dip, bp->arg, 0);
9410 }
9411
9412 kmem_free(path, MAXPATHLEN);
9413
9414 return (rv);
9415 }
9416
9417 static int
sid_node_create(dev_info_t * pdip,devi_branch_t * bp,dev_info_t ** rdipp)9418 sid_node_create(dev_info_t *pdip, devi_branch_t *bp, dev_info_t **rdipp)
9419 {
9420 int rv, circ, len;
9421 int i, flags, ret;
9422 dev_info_t *dip;
9423 char *nbuf;
9424 char *path;
9425 static const char *noname = "<none>";
9426
9427 ASSERT(pdip);
9428 ASSERT(DEVI_BUSY_OWNED(pdip));
9429
9430 flags = 0;
9431
9432 /*
9433 * Creating the root of a branch ?
9434 */
9435 if (rdipp) {
9436 *rdipp = NULL;
9437 flags = DEVI_BRANCH_ROOT;
9438 }
9439
9440 ndi_devi_alloc_sleep(pdip, (char *)noname, DEVI_SID_NODEID, &dip);
9441 rv = bp->create.sid_branch_create(dip, bp->arg, flags);
9442
9443 nbuf = kmem_alloc(OBP_MAXDRVNAME, KM_SLEEP);
9444
9445 if (rv == DDI_WALK_ERROR) {
9446 cmn_err(CE_WARN, "e_ddi_branch_create: Error setting"
9447 " properties on devinfo node %p", (void *)dip);
9448 goto fail;
9449 }
9450
9451 len = OBP_MAXDRVNAME;
9452 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
9453 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "name", nbuf, &len)
9454 != DDI_PROP_SUCCESS) {
9455 cmn_err(CE_WARN, "e_ddi_branch_create: devinfo node %p has"
9456 "no name property", (void *)dip);
9457 goto fail;
9458 }
9459
9460 ASSERT(i_ddi_node_state(dip) == DS_PROTO);
9461 if (ndi_devi_set_nodename(dip, nbuf, 0) != NDI_SUCCESS) {
9462 cmn_err(CE_WARN, "e_ddi_branch_create: cannot set name (%s)"
9463 " for devinfo node %p", nbuf, (void *)dip);
9464 goto fail;
9465 }
9466
9467 kmem_free(nbuf, OBP_MAXDRVNAME);
9468
9469 /*
9470 * Ignore bind failures just like boot does
9471 */
9472 (void) ndi_devi_bind_driver(dip, 0);
9473
9474 switch (rv) {
9475 case DDI_WALK_CONTINUE:
9476 case DDI_WALK_PRUNESIB:
9477 ndi_devi_enter(dip, &circ);
9478
9479 i = DDI_WALK_CONTINUE;
9480 for (; i == DDI_WALK_CONTINUE; ) {
9481 i = sid_node_create(dip, bp, NULL);
9482 }
9483
9484 ASSERT(i == DDI_WALK_ERROR || i == DDI_WALK_PRUNESIB);
9485 if (i == DDI_WALK_ERROR)
9486 rv = i;
9487 /*
9488 * If PRUNESIB stop creating siblings
9489 * of dip's child. Subsequent walk behavior
9490 * is determined by rv returned by dip.
9491 */
9492
9493 ndi_devi_exit(dip, circ);
9494 break;
9495 case DDI_WALK_TERMINATE:
9496 /*
9497 * Don't create children and ask our parent
9498 * to not create siblings either.
9499 */
9500 rv = DDI_WALK_PRUNESIB;
9501 break;
9502 case DDI_WALK_PRUNECHILD:
9503 /*
9504 * Don't create children, but ask parent to continue
9505 * with siblings.
9506 */
9507 rv = DDI_WALK_CONTINUE;
9508 break;
9509 default:
9510 ASSERT(0);
9511 break;
9512 }
9513
9514 if (rdipp)
9515 *rdipp = dip;
9516
9517 /*
9518 * Set device offline - only the "configure" op should cause an attach.
9519 * Note that it is safe to set the dip offline without checking
9520 * for either device contract or layered driver (LDI) based constraints
9521 * since there cannot be any contracts or LDI opens of this device.
9522 * This is because this node is a newly created dip with the parent busy
9523 * held, so no other thread can come in and attach this dip. A dip that
9524 * has never been attached cannot have contracts since by definition
9525 * a device contract (an agreement between a process and a device minor
9526 * node) can only be created against a device that has minor nodes
9527 * i.e is attached. Similarly an LDI open will only succeed if the
9528 * dip is attached. We assert below that the dip is not attached.
9529 */
9530 ASSERT(i_ddi_node_state(dip) < DS_ATTACHED);
9531 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9532 ret = set_infant_dip_offline(dip, path);
9533 ASSERT(ret == DDI_SUCCESS);
9534 kmem_free(path, MAXPATHLEN);
9535
9536 return (rv);
9537 fail:
9538 (void) ndi_devi_free(dip);
9539 kmem_free(nbuf, OBP_MAXDRVNAME);
9540 return (DDI_WALK_ERROR);
9541 }
9542
9543 static int
create_sid_branch(dev_info_t * pdip,devi_branch_t * bp,dev_info_t ** dipp,uint_t flags)9544 create_sid_branch(
9545 dev_info_t *pdip,
9546 devi_branch_t *bp,
9547 dev_info_t **dipp,
9548 uint_t flags)
9549 {
9550 int rv = 0, state = DDI_WALK_CONTINUE;
9551 dev_info_t *rdip;
9552
9553 while (state == DDI_WALK_CONTINUE) {
9554 int circ;
9555
9556 ndi_devi_enter(pdip, &circ);
9557
9558 state = sid_node_create(pdip, bp, &rdip);
9559 if (rdip == NULL) {
9560 ndi_devi_exit(pdip, circ);
9561 ASSERT(state == DDI_WALK_ERROR);
9562 break;
9563 }
9564
9565 e_ddi_branch_hold(rdip);
9566
9567 ndi_devi_exit(pdip, circ);
9568
9569 if (flags & DEVI_BRANCH_CONFIGURE) {
9570 int error = e_ddi_branch_configure(rdip, dipp, 0);
9571 if (error && rv == 0)
9572 rv = error;
9573 }
9574
9575 /*
9576 * devi_branch_callback() is optional
9577 */
9578 if (bp->devi_branch_callback)
9579 bp->devi_branch_callback(rdip, bp->arg, 0);
9580 }
9581
9582 ASSERT(state == DDI_WALK_ERROR || state == DDI_WALK_PRUNESIB);
9583
9584 return (state == DDI_WALK_ERROR ? EIO : rv);
9585 }
9586
9587 int
e_ddi_branch_create(dev_info_t * pdip,devi_branch_t * bp,dev_info_t ** dipp,uint_t flags)9588 e_ddi_branch_create(
9589 dev_info_t *pdip,
9590 devi_branch_t *bp,
9591 dev_info_t **dipp,
9592 uint_t flags)
9593 {
9594 int prom_devi, sid_devi, error;
9595
9596 if (pdip == NULL || bp == NULL || bp->type == 0)
9597 return (EINVAL);
9598
9599 prom_devi = (bp->type == DEVI_BRANCH_PROM) ? 1 : 0;
9600 sid_devi = (bp->type == DEVI_BRANCH_SID) ? 1 : 0;
9601
9602 if (prom_devi && bp->create.prom_branch_select == NULL)
9603 return (EINVAL);
9604 else if (sid_devi && bp->create.sid_branch_create == NULL)
9605 return (EINVAL);
9606 else if (!prom_devi && !sid_devi)
9607 return (EINVAL);
9608
9609 if (flags & DEVI_BRANCH_EVENT)
9610 return (EINVAL);
9611
9612 if (prom_devi) {
9613 struct pta pta = {0};
9614
9615 pta.pdip = pdip;
9616 pta.bp = bp;
9617 pta.flags = flags;
9618
9619 error = prom_tree_access(create_prom_branch, &pta, NULL);
9620
9621 if (dipp)
9622 *dipp = pta.fdip;
9623 else if (pta.fdip)
9624 ndi_rele_devi(pta.fdip);
9625 } else {
9626 error = create_sid_branch(pdip, bp, dipp, flags);
9627 }
9628
9629 return (error);
9630 }
9631
9632 int
e_ddi_branch_configure(dev_info_t * rdip,dev_info_t ** dipp,uint_t flags)9633 e_ddi_branch_configure(dev_info_t *rdip, dev_info_t **dipp, uint_t flags)
9634 {
9635 int rv;
9636 char *devnm;
9637 dev_info_t *pdip;
9638
9639 if (dipp)
9640 *dipp = NULL;
9641
9642 if (rdip == NULL || flags != 0 || (flags & DEVI_BRANCH_EVENT))
9643 return (EINVAL);
9644
9645 pdip = ddi_get_parent(rdip);
9646
9647 ndi_hold_devi(pdip);
9648
9649 if (!e_ddi_branch_held(rdip)) {
9650 ndi_rele_devi(pdip);
9651 cmn_err(CE_WARN, "e_ddi_branch_configure: "
9652 "dip(%p) not held", (void *)rdip);
9653 return (EINVAL);
9654 }
9655
9656 if (i_ddi_node_state(rdip) < DS_INITIALIZED) {
9657 /*
9658 * First attempt to bind a driver. If we fail, return
9659 * success (On some platforms, dips for some device
9660 * types (CPUs) may not have a driver)
9661 */
9662 if (ndi_devi_bind_driver(rdip, 0) != NDI_SUCCESS) {
9663 ndi_rele_devi(pdip);
9664 return (0);
9665 }
9666
9667 if (ddi_initchild(pdip, rdip) != DDI_SUCCESS) {
9668 rv = NDI_FAILURE;
9669 goto out;
9670 }
9671 }
9672
9673 ASSERT(i_ddi_node_state(rdip) >= DS_INITIALIZED);
9674
9675 devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
9676
9677 (void) ddi_deviname(rdip, devnm);
9678
9679 if ((rv = ndi_devi_config_one(pdip, devnm+1, &rdip,
9680 NDI_DEVI_ONLINE | NDI_CONFIG)) == NDI_SUCCESS) {
9681 /* release hold from ndi_devi_config_one() */
9682 ndi_rele_devi(rdip);
9683 }
9684
9685 kmem_free(devnm, MAXNAMELEN + 1);
9686 out:
9687 if (rv != NDI_SUCCESS && dipp && rdip) {
9688 ndi_hold_devi(rdip);
9689 *dipp = rdip;
9690 }
9691 ndi_rele_devi(pdip);
9692 return (ndi2errno(rv));
9693 }
9694
9695 void
e_ddi_branch_hold(dev_info_t * rdip)9696 e_ddi_branch_hold(dev_info_t *rdip)
9697 {
9698 if (e_ddi_branch_held(rdip)) {
9699 cmn_err(CE_WARN, "e_ddi_branch_hold: branch already held");
9700 return;
9701 }
9702
9703 mutex_enter(&DEVI(rdip)->devi_lock);
9704 if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) == 0) {
9705 DEVI(rdip)->devi_flags |= DEVI_BRANCH_HELD;
9706 DEVI(rdip)->devi_ref++;
9707 }
9708 ASSERT(DEVI(rdip)->devi_ref > 0);
9709 mutex_exit(&DEVI(rdip)->devi_lock);
9710 }
9711
9712 int
e_ddi_branch_held(dev_info_t * rdip)9713 e_ddi_branch_held(dev_info_t *rdip)
9714 {
9715 int rv = 0;
9716
9717 mutex_enter(&DEVI(rdip)->devi_lock);
9718 if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) &&
9719 DEVI(rdip)->devi_ref > 0) {
9720 rv = 1;
9721 }
9722 mutex_exit(&DEVI(rdip)->devi_lock);
9723
9724 return (rv);
9725 }
9726
9727 void
e_ddi_branch_rele(dev_info_t * rdip)9728 e_ddi_branch_rele(dev_info_t *rdip)
9729 {
9730 mutex_enter(&DEVI(rdip)->devi_lock);
9731 DEVI(rdip)->devi_flags &= ~DEVI_BRANCH_HELD;
9732 DEVI(rdip)->devi_ref--;
9733 mutex_exit(&DEVI(rdip)->devi_lock);
9734 }
9735
9736 int
e_ddi_branch_unconfigure(dev_info_t * rdip,dev_info_t ** dipp,uint_t flags)9737 e_ddi_branch_unconfigure(
9738 dev_info_t *rdip,
9739 dev_info_t **dipp,
9740 uint_t flags)
9741 {
9742 int circ, rv;
9743 int destroy;
9744 char *devnm;
9745 uint_t nflags;
9746 dev_info_t *pdip;
9747
9748 if (dipp)
9749 *dipp = NULL;
9750
9751 if (rdip == NULL)
9752 return (EINVAL);
9753
9754 pdip = ddi_get_parent(rdip);
9755
9756 ASSERT(pdip);
9757
9758 /*
9759 * Check if caller holds pdip busy - can cause deadlocks during
9760 * devfs_clean()
9761 */
9762 if (DEVI_BUSY_OWNED(pdip)) {
9763 cmn_err(CE_WARN, "e_ddi_branch_unconfigure: failed: parent"
9764 " devinfo node(%p) is busy held", (void *)pdip);
9765 return (EINVAL);
9766 }
9767
9768 destroy = (flags & DEVI_BRANCH_DESTROY) ? 1 : 0;
9769
9770 devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
9771
9772 ndi_devi_enter(pdip, &circ);
9773 (void) ddi_deviname(rdip, devnm);
9774 ndi_devi_exit(pdip, circ);
9775
9776 /*
9777 * ddi_deviname() returns a component name with / prepended.
9778 */
9779 (void) devfs_clean(pdip, devnm + 1, DV_CLEAN_FORCE);
9780
9781 ndi_devi_enter(pdip, &circ);
9782
9783 /*
9784 * Recreate device name as it may have changed state (init/uninit)
9785 * when parent busy lock was dropped for devfs_clean()
9786 */
9787 (void) ddi_deviname(rdip, devnm);
9788
9789 if (!e_ddi_branch_held(rdip)) {
9790 kmem_free(devnm, MAXNAMELEN + 1);
9791 ndi_devi_exit(pdip, circ);
9792 cmn_err(CE_WARN, "e_ddi_%s_branch: dip(%p) not held",
9793 destroy ? "destroy" : "unconfigure", (void *)rdip);
9794 return (EINVAL);
9795 }
9796
9797 /*
9798 * Release hold on the branch. This is ok since we are holding the
9799 * parent busy. If rdip is not removed, we must do a hold on the
9800 * branch before returning.
9801 */
9802 e_ddi_branch_rele(rdip);
9803
9804 nflags = NDI_DEVI_OFFLINE;
9805 if (destroy || (flags & DEVI_BRANCH_DESTROY)) {
9806 nflags |= NDI_DEVI_REMOVE;
9807 destroy = 1;
9808 } else {
9809 nflags |= NDI_UNCONFIG; /* uninit but don't remove */
9810 }
9811
9812 if (flags & DEVI_BRANCH_EVENT)
9813 nflags |= NDI_POST_EVENT;
9814
9815 if (i_ddi_devi_attached(pdip) &&
9816 (i_ddi_node_state(rdip) >= DS_INITIALIZED)) {
9817 rv = ndi_devi_unconfig_one(pdip, devnm+1, dipp, nflags);
9818 } else {
9819 rv = e_ddi_devi_unconfig(rdip, dipp, nflags);
9820 if (rv == NDI_SUCCESS) {
9821 ASSERT(!destroy || ddi_get_child(rdip) == NULL);
9822 rv = ndi_devi_offline(rdip, nflags);
9823 }
9824 }
9825
9826 if (!destroy || rv != NDI_SUCCESS) {
9827 /* The dip still exists, so do a hold */
9828 e_ddi_branch_hold(rdip);
9829 }
9830 out:
9831 kmem_free(devnm, MAXNAMELEN + 1);
9832 ndi_devi_exit(pdip, circ);
9833 return (ndi2errno(rv));
9834 }
9835
9836 int
e_ddi_branch_destroy(dev_info_t * rdip,dev_info_t ** dipp,uint_t flag)9837 e_ddi_branch_destroy(dev_info_t *rdip, dev_info_t **dipp, uint_t flag)
9838 {
9839 return (e_ddi_branch_unconfigure(rdip, dipp,
9840 flag|DEVI_BRANCH_DESTROY));
9841 }
9842
9843 /*
9844 * Number of chains for hash table
9845 */
9846 #define NUMCHAINS 17
9847
9848 /*
9849 * Devinfo busy arg
9850 */
9851 struct devi_busy {
9852 int dv_total;
9853 int s_total;
9854 mod_hash_t *dv_hash;
9855 mod_hash_t *s_hash;
9856 int (*callback)(dev_info_t *, void *, uint_t);
9857 void *arg;
9858 };
9859
9860 static int
visit_dip(dev_info_t * dip,void * arg)9861 visit_dip(dev_info_t *dip, void *arg)
9862 {
9863 uintptr_t sbusy, dvbusy, ref;
9864 struct devi_busy *bsp = arg;
9865
9866 ASSERT(bsp->callback);
9867
9868 /*
9869 * A dip cannot be busy if its reference count is 0
9870 */
9871 if ((ref = e_ddi_devi_holdcnt(dip)) == 0) {
9872 return (bsp->callback(dip, bsp->arg, 0));
9873 }
9874
9875 if (mod_hash_find(bsp->dv_hash, dip, (mod_hash_val_t *)&dvbusy))
9876 dvbusy = 0;
9877
9878 /*
9879 * To catch device opens currently maintained on specfs common snodes.
9880 */
9881 if (mod_hash_find(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
9882 sbusy = 0;
9883
9884 #ifdef DEBUG
9885 if (ref < sbusy || ref < dvbusy) {
9886 cmn_err(CE_WARN, "dip(%p): sopen = %lu, dvopen = %lu "
9887 "dip ref = %lu\n", (void *)dip, sbusy, dvbusy, ref);
9888 }
9889 #endif
9890
9891 dvbusy = (sbusy > dvbusy) ? sbusy : dvbusy;
9892
9893 return (bsp->callback(dip, bsp->arg, dvbusy));
9894 }
9895
9896 static int
visit_snode(struct snode * sp,void * arg)9897 visit_snode(struct snode *sp, void *arg)
9898 {
9899 uintptr_t sbusy;
9900 dev_info_t *dip;
9901 int count;
9902 struct devi_busy *bsp = arg;
9903
9904 ASSERT(sp);
9905
9906 /*
9907 * The stable lock is held. This prevents
9908 * the snode and its associated dip from
9909 * going away.
9910 */
9911 dip = NULL;
9912 count = spec_devi_open_count(sp, &dip);
9913
9914 if (count <= 0)
9915 return (DDI_WALK_CONTINUE);
9916
9917 ASSERT(dip);
9918
9919 if (mod_hash_remove(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
9920 sbusy = count;
9921 else
9922 sbusy += count;
9923
9924 if (mod_hash_insert(bsp->s_hash, dip, (mod_hash_val_t)sbusy)) {
9925 cmn_err(CE_WARN, "%s: s_hash insert failed: dip=0x%p, "
9926 "sbusy = %lu", "e_ddi_branch_referenced",
9927 (void *)dip, sbusy);
9928 }
9929
9930 bsp->s_total += count;
9931
9932 return (DDI_WALK_CONTINUE);
9933 }
9934
9935 static void
visit_dvnode(struct dv_node * dv,void * arg)9936 visit_dvnode(struct dv_node *dv, void *arg)
9937 {
9938 uintptr_t dvbusy;
9939 uint_t count;
9940 struct vnode *vp;
9941 struct devi_busy *bsp = arg;
9942
9943 ASSERT(dv && dv->dv_devi);
9944
9945 vp = DVTOV(dv);
9946
9947 mutex_enter(&vp->v_lock);
9948 count = vp->v_count;
9949 mutex_exit(&vp->v_lock);
9950
9951 if (!count)
9952 return;
9953
9954 if (mod_hash_remove(bsp->dv_hash, dv->dv_devi,
9955 (mod_hash_val_t *)&dvbusy))
9956 dvbusy = count;
9957 else
9958 dvbusy += count;
9959
9960 if (mod_hash_insert(bsp->dv_hash, dv->dv_devi,
9961 (mod_hash_val_t)dvbusy)) {
9962 cmn_err(CE_WARN, "%s: dv_hash insert failed: dip=0x%p, "
9963 "dvbusy=%lu", "e_ddi_branch_referenced",
9964 (void *)dv->dv_devi, dvbusy);
9965 }
9966
9967 bsp->dv_total += count;
9968 }
9969
9970 /*
9971 * Returns reference count on success or -1 on failure.
9972 */
9973 int
e_ddi_branch_referenced(dev_info_t * rdip,int (* callback)(dev_info_t * dip,void * arg,uint_t ref),void * arg)9974 e_ddi_branch_referenced(
9975 dev_info_t *rdip,
9976 int (*callback)(dev_info_t *dip, void *arg, uint_t ref),
9977 void *arg)
9978 {
9979 int circ;
9980 char *path;
9981 dev_info_t *pdip;
9982 struct devi_busy bsa = {0};
9983
9984 ASSERT(rdip);
9985
9986 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9987
9988 ndi_hold_devi(rdip);
9989
9990 pdip = ddi_get_parent(rdip);
9991
9992 ASSERT(pdip);
9993
9994 /*
9995 * Check if caller holds pdip busy - can cause deadlocks during
9996 * devfs_walk()
9997 */
9998 if (!e_ddi_branch_held(rdip) || DEVI_BUSY_OWNED(pdip)) {
9999 cmn_err(CE_WARN, "e_ddi_branch_referenced: failed: "
10000 "devinfo branch(%p) not held or parent busy held",
10001 (void *)rdip);
10002 ndi_rele_devi(rdip);
10003 kmem_free(path, MAXPATHLEN);
10004 return (-1);
10005 }
10006
10007 ndi_devi_enter(pdip, &circ);
10008 (void) ddi_pathname(rdip, path);
10009 ndi_devi_exit(pdip, circ);
10010
10011 bsa.dv_hash = mod_hash_create_ptrhash("dv_node busy hash", NUMCHAINS,
10012 mod_hash_null_valdtor, sizeof (struct dev_info));
10013
10014 bsa.s_hash = mod_hash_create_ptrhash("snode busy hash", NUMCHAINS,
10015 mod_hash_null_valdtor, sizeof (struct snode));
10016
10017 if (devfs_walk(path, visit_dvnode, &bsa)) {
10018 cmn_err(CE_WARN, "e_ddi_branch_referenced: "
10019 "devfs walk failed for: %s", path);
10020 kmem_free(path, MAXPATHLEN);
10021 bsa.s_total = bsa.dv_total = -1;
10022 goto out;
10023 }
10024
10025 kmem_free(path, MAXPATHLEN);
10026
10027 /*
10028 * Walk the snode table to detect device opens, which are currently
10029 * maintained on specfs common snodes.
10030 */
10031 spec_snode_walk(visit_snode, &bsa);
10032
10033 if (callback == NULL)
10034 goto out;
10035
10036 bsa.callback = callback;
10037 bsa.arg = arg;
10038
10039 if (visit_dip(rdip, &bsa) == DDI_WALK_CONTINUE) {
10040 ndi_devi_enter(rdip, &circ);
10041 ddi_walk_devs(ddi_get_child(rdip), visit_dip, &bsa);
10042 ndi_devi_exit(rdip, circ);
10043 }
10044
10045 out:
10046 ndi_rele_devi(rdip);
10047 mod_hash_destroy_ptrhash(bsa.s_hash);
10048 mod_hash_destroy_ptrhash(bsa.dv_hash);
10049 return (bsa.s_total > bsa.dv_total ? bsa.s_total : bsa.dv_total);
10050 }
10051