xref: /titanic_44/usr/src/uts/common/io/ppp/sppptun/sppptun.c (revision ee5416c9d7e449233197d5d20bc6b81e4ff091b2)
1 /*
2  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 #pragma ident	"%Z%%M%	%I%	%E% SMI"
7 
8 #include <sys/types.h>
9 #include <sys/debug.h>
10 #include <sys/param.h>
11 #include <sys/stat.h>
12 #include <sys/systm.h>
13 #include <sys/socket.h>
14 #include <sys/stream.h>
15 #include <sys/stropts.h>
16 #include <sys/errno.h>
17 #include <sys/time.h>
18 #include <sys/cmn_err.h>
19 #include <sys/conf.h>
20 #include <sys/dlpi.h>
21 #include <sys/ddi.h>
22 #include <sys/kstat.h>
23 #include <sys/strsun.h>
24 #include <sys/bitmap.h>
25 #include <sys/sysmacros.h>
26 #include <sys/note.h>
27 #include <sys/policy.h>
28 #include <net/ppp_defs.h>
29 #include <net/pppio.h>
30 #include <net/sppptun.h>
31 #include <net/pppoe.h>
32 #include <netinet/in.h>
33 
34 #include "s_common.h"
35 #include "sppptun_mod.h"
36 #include "sppptun_impl.h"
37 
38 #define	NTUN_INITIAL 16			/* Initial number of sppptun slots */
39 #define	NTUN_PERCENT 5			/* Percent of memory to use */
40 
41 /*
42  * This is used to tag official Solaris sources.  Please do not define
43  * "INTERNAL_BUILD" when building this software outside of Sun
44  * Microsystems.
45  */
46 #ifdef INTERNAL_BUILD
47 /* MODINFO is limited to 32 characters. */
48 const char sppptun_driver_description[] = "PPP 4.0 tunnel driver v%I%";
49 const char sppptun_module_description[] = "PPP 4.0 tunnel module v%I%";
50 #else
51 const char sppptun_driver_description[] = "ANU PPP tundrv $Revision: $";
52 const char sppptun_module_description[] = "ANU PPP tunmod $Revision: $";
53 
54 /* LINTED */
55 static const char buildtime[] = "Built " __DATE__ " at " __TIME__
56 #ifdef DEBUG
57 " DEBUG"
58 #endif
59 "\n";
60 #endif
61 
62 /*
63  * Tunable values; these are similar to the values used in ptms_conf.c.
64  * Override these settings via /etc/system.
65  */
66 uint_t	sppptun_cnt = 0;		/* Minimum number of tunnels */
67 size_t	sppptun_max_pty = 0;		/* Maximum number of tunnels */
68 uint_t	sppptun_init_cnt = NTUN_INITIAL; /* Initial number of tunnel slots */
69 uint_t	sppptun_pctofmem = NTUN_PERCENT; /* Percent of memory to use */
70 
71 typedef struct ether_dest_s {
72 	ether_addr_t addr;
73 	ushort_t type;
74 } ether_dest_t;
75 
76 /* Allows unaligned access. */
77 #define	GETLONG(x)	(((x)[0]<<24)|((x)[1]<<16)|((x)[2]<<8)|(x)[3])
78 
79 static const char *tll_kstats_list[] = { TLL_KSTATS_NAMES };
80 static const char *tcl_kstats_list[] = { TCL_KSTATS_NAMES };
81 
82 #define	KREF(p, m, vn)	p->m.vn.value.ui64
83 #define	KINCR(p, m, vn)	++KREF(p, m, vn)
84 #define	KDECR(p, m, vn)	--KREF(p, m, vn)
85 
86 #define	KLINCR(vn)	KINCR(tll, tll_kstats, vn)
87 #define	KLDECR(vn)	KDECR(tll, tll_kstats, vn)
88 
89 #define	KCINCR(vn)	KINCR(tcl, tcl_kstats, vn)
90 #define	KCDECR(vn)	KDECR(tcl, tcl_kstats, vn)
91 
92 #define	DBGTSIDE(t)	((((tuncl_t *)(t))->tcl_flags & TCLF_ISCLIENT) ? \
93 				"device" : "module")
94 #define	DBGQSIDE(q)	(DBGTSIDE((q)->q_ptr))
95 
96 static int	sppptun_open(queue_t *, dev_t *, int, int, cred_t *);
97 static int	sppptun_close(queue_t *);
98 static void	sppptun_urput(queue_t *, mblk_t *);
99 static void	sppptun_uwput(queue_t *, mblk_t *);
100 static int	sppptun_ursrv(queue_t *);
101 static int	sppptun_uwsrv(queue_t *);
102 static void	sppptun_lrput(queue_t *, mblk_t *);
103 static void	sppptun_lwput(queue_t *, mblk_t *);
104 
105 /*
106  * This is the hash table of clients.  Clients are the programs that
107  * open /dev/sppptun as a device.  There may be a large number of
108  * these; one per tunneled PPP session.
109  *
110  * Note: slots are offset from minor node value by 1 because
111  * vmem_alloc returns 0 for failure.
112  *
113  * The tcl_slots array entries are modified only when exclusive on
114  * both inner and outer perimeters.  This ensures that threads on
115  * shared perimeters always view this as unchanging memory with no
116  * need to lock around accesses.  (Specifically, the tcl_slots array
117  * is modified by entry to sppptun_open, sppptun_close, and _fini.)
118  */
119 static tuncl_t **tcl_slots = NULL;	/* Slots for tuncl_t */
120 static size_t tcl_nslots = 0;		/* Size of slot array */
121 static size_t tcl_minormax = 0;		/* Maximum number of tunnels */
122 static size_t tcl_inuse = 0;		/* # of tunnels currently allocated */
123 static krwlock_t tcl_rwlock;
124 static struct kmem_cache *tcl_cache = NULL;	/* tunnel cache */
125 static vmem_t *tcl_minor_arena = NULL; /* Arena for device minors */
126 
127 /*
128  * This is the simple list of lower layers.  For PPPoE, there is one
129  * of these per Ethernet interface.  Lower layers are established by
130  * "plumbing" -- using I_PLINK to connect the tunnel multiplexor to
131  * the physical interface.
132  */
133 static struct qelem tunll_list;
134 static int tunll_index;
135 
136 /* Test value; if all zeroes, then address hasn't been set yet. */
137 static const ether_addr_t zero_mac_addr = { 0, 0, 0, 0, 0, 0 };
138 
139 #define	MIN_SET_FASTPATH_UNITDATAREQ_SIZE	\
140 	(sizeof (dl_unitdata_req_t) + 4)
141 
142 #define	TUN_MI_ID	2104	/* officially allocated module ID */
143 #define	TUN_MI_MINPSZ	(0)
144 #define	TUN_MI_MAXPSZ	(PPP_MAXMTU)
145 #define	TUN_MI_HIWAT	(PPP_MTU * 8)
146 #define	TUN_MI_LOWAT	(128)
147 
148 static struct module_info sppptun_modinfo = {
149 	TUN_MI_ID,		/* mi_idnum */
150 	PPP_TUN_NAME,		/* mi_idname */
151 	TUN_MI_MINPSZ,		/* mi_minpsz */
152 	TUN_MI_MAXPSZ,		/* mi_maxpsz */
153 	TUN_MI_HIWAT,		/* mi_hiwat */
154 	TUN_MI_LOWAT		/* mi_lowat */
155 };
156 
157 static struct qinit sppptun_urinit = {
158 	(int (*)())sppptun_urput, /* qi_putp */
159 	sppptun_ursrv,		/* qi_srvp */
160 	sppptun_open,		/* qi_qopen */
161 	sppptun_close,		/* qi_qclose */
162 	NULL,			/* qi_qadmin */
163 	&sppptun_modinfo,	/* qi_minfo */
164 	NULL			/* qi_mstat */
165 };
166 
167 static struct qinit sppptun_uwinit = {
168 	(int (*)())sppptun_uwput, /* qi_putp */
169 	sppptun_uwsrv,		/* qi_srvp */
170 	NULL,			/* qi_qopen */
171 	NULL,			/* qi_qclose */
172 	NULL,			/* qi_qadmin */
173 	&sppptun_modinfo,	/* qi_minfo */
174 	NULL			/* qi_mstat */
175 };
176 
177 static struct qinit sppptun_lrinit = {
178 	(int (*)())sppptun_lrput, /* qi_putp */
179 	NULL,			/* qi_srvp */
180 	NULL,			/* qi_qopen */
181 	NULL,			/* qi_qclose */
182 	NULL,			/* qi_qadmin */
183 	&sppptun_modinfo,	/* qi_minfo */
184 	NULL			/* qi_mstat */
185 };
186 
187 static struct qinit sppptun_lwinit = {
188 	(int (*)())sppptun_lwput, /* qi_putp */
189 	NULL,			/* qi_srvp */
190 	NULL,			/* qi_qopen */
191 	NULL,			/* qi_qclose */
192 	NULL,			/* qi_qadmin */
193 	&sppptun_modinfo,	/* qi_minfo */
194 	NULL			/* qi_mstat */
195 };
196 
197 /*
198  * This is referenced in sppptun_mod.c.
199  */
200 struct streamtab sppptun_tab = {
201 	&sppptun_urinit,	/* st_rdinit */
202 	&sppptun_uwinit,	/* st_wrinit */
203 	&sppptun_lrinit,	/* st_muxrinit */
204 	&sppptun_lwinit		/* st_muxwrinit */
205 };
206 
207 /*
208  * Nifty packet dumper; copied from pppd AIX 4.1 port.  This routine
209  * dumps the raw received and transmitted data through syslog.  This
210  * allows debug of communications problems without resorting to a line
211  * analyzer.
212  *
213  * The expression "3*BYTES_PER_LINE" used frequently here represents
214  * the size of each hex value printed -- two hex digits and a space.
215  */
216 #define	BYTES_PER_LINE	8
217 static void
218 sppp_dump_frame(int unit, mblk_t *mptr, const char *msg)
219 {
220 	/*
221 	 * Buffer is big enough for hex digits, two spaces, ASCII output,
222 	 * and one NUL byte.
223 	 */
224 	char buf[3 * BYTES_PER_LINE + 2 + BYTES_PER_LINE + 1];
225 	uchar_t *rptr, *eptr;
226 	int i, chr;
227 	char *bp;
228 	static const char digits[] = "0123456789abcdef";
229 
230 	cmn_err(CE_CONT, "!sppp%d: %s %ld bytes\n", unit, msg, msgsize(mptr));
231 	i = 0;
232 	bp = buf;
233 	/* Add filler spaces between hex output and ASCII */
234 	buf[3 * BYTES_PER_LINE] = ' ';
235 	buf[3 * BYTES_PER_LINE + 1] = ' ';
236 	/* Add NUL byte at end */
237 	buf[sizeof (buf) - 1] = '\0';
238 	while (mptr != NULL) {
239 		rptr = mptr->b_rptr; /* get pointer to beginning  */
240 		eptr = mptr->b_wptr;
241 		while (rptr < eptr) {
242 			chr = *rptr++;
243 			/* convert byte to ascii hex */
244 			*bp++ = digits[chr >> 4];
245 			*bp++ = digits[chr & 0xf];
246 			*bp++ = ' ';
247 			/* Insert ASCII past hex output and filler */
248 			buf[3 * BYTES_PER_LINE + 2 + i] =
249 			    (chr >= 0x20 && chr <= 0x7E) ? (char)chr : '.';
250 			i++;
251 			if (i >= BYTES_PER_LINE) {
252 				cmn_err(CE_CONT, "!sppp%d: %s\n", unit,
253 				    buf);
254 				bp = buf;
255 				i = 0;
256 			}
257 		}
258 		mptr = mptr->b_cont;
259 	}
260 	if (bp > buf) {
261 		/* fill over unused hex display positions */
262 		while (bp < buf + 3 * BYTES_PER_LINE)
263 			*bp++ = ' ';
264 		/* terminate ASCII string at right position */
265 		buf[3 * BYTES_PER_LINE + 2 + i] = '\0';
266 		cmn_err(CE_CONT, "!sppp%d: %s\n", unit, buf);
267 	}
268 }
269 
270 /*
271  * Allocate another slot table twice as large as the original one
272  * (limited to global maximum).  Migrate all tunnels to the new slot
273  * table and free the original one.  Assumes we're exclusive on both
274  * inner and outer perimeters, and thus there are no other users of
275  * the tcl_slots array.
276  */
277 static minor_t
278 tcl_grow(void)
279 {
280 	minor_t old_size = tcl_nslots;
281 	minor_t new_size = 2 * old_size;
282 	tuncl_t **tcl_old = tcl_slots;
283 	tuncl_t **tcl_new;
284 	void  *vaddr;			/* vmem_add return value */
285 
286 	ASSERT(RW_LOCK_HELD(&tcl_rwlock));
287 
288 	/* Allocate new ptms array */
289 	tcl_new = kmem_zalloc(new_size * sizeof (tuncl_t *), KM_NOSLEEP);
290 	if (tcl_new == NULL)
291 		return ((minor_t)0);
292 
293 	/* Increase clone index space */
294 	vaddr = vmem_add(tcl_minor_arena, (void*)((uintptr_t)old_size + 1),
295 	    new_size - old_size, VM_NOSLEEP);
296 
297 	if (vaddr == NULL) {
298 		kmem_free(tcl_new, new_size * sizeof (tuncl_t *));
299 		return ((minor_t)0);
300 	}
301 
302 	/* Migrate tuncl_t entries to a new location */
303 	tcl_nslots = new_size;
304 	bcopy(tcl_old, tcl_new, old_size * sizeof (tuncl_t *));
305 	tcl_slots = tcl_new;
306 	kmem_free(tcl_old, old_size * sizeof (tuncl_t *));
307 
308 	/* Allocate minor number and return it */
309 	return ((minor_t)(uintptr_t)vmem_alloc(tcl_minor_arena, 1, VM_NOSLEEP));
310 }
311 
312 /*
313  * Allocate new minor number and tunnel client entry.  Returns the new
314  * entry or NULL if no memory or maximum number of entries reached.
315  * Assumes we're exclusive on both inner and outer perimeters, and
316  * thus there are no other users of the tcl_slots array.
317  */
318 static tuncl_t *
319 tuncl_alloc(int wantminor)
320 {
321 	minor_t dminor;
322 	tuncl_t *tcl = NULL;
323 
324 	rw_enter(&tcl_rwlock, RW_WRITER);
325 
326 	ASSERT(tcl_slots != NULL);
327 
328 	/*
329 	 * Always try to allocate new pty when sppptun_cnt minimum
330 	 * limit is not achieved. If it is achieved, the maximum is
331 	 * determined by either user-specified value (if it is
332 	 * non-zero) or our memory estimations - whatever is less.
333 	 */
334 	if (tcl_inuse >= sppptun_cnt) {
335 		/*
336 		 * When system achieved required minimum of tunnels,
337 		 * check for the denial of service limits.
338 		 *
339 		 * Get user-imposed maximum, if configured, or
340 		 * calculated memory constraint.
341 		 */
342 		size_t user_max = (sppptun_max_pty == 0 ? tcl_minormax :
343 		    min(sppptun_max_pty, tcl_minormax));
344 
345 		/* Do not try to allocate more than allowed */
346 		if (tcl_inuse >= user_max) {
347 			rw_exit(&tcl_rwlock);
348 			return (NULL);
349 		}
350 	}
351 	tcl_inuse++;
352 
353 	/*
354 	 * Allocate new minor number. If this fails, all slots are
355 	 * busy and we need to grow the hash.
356 	 */
357 	if (wantminor <= 0) {
358 		dminor = (minor_t)(uintptr_t)vmem_alloc(tcl_minor_arena, 1,
359 		    VM_NOSLEEP);
360 		if (dminor == 0) {
361 			/* Grow the cache and retry allocation */
362 			dminor = tcl_grow();
363 		}
364 	} else {
365 		dminor = (minor_t)(uintptr_t)vmem_xalloc(tcl_minor_arena, 1,
366 		    0, 0, 0, (void *)(uintptr_t)wantminor,
367 		    (void *)((uintptr_t)wantminor+1), VM_NOSLEEP);
368 		if (dminor != 0 && dminor != wantminor) {
369 			vmem_free(tcl_minor_arena, (void *)(uintptr_t)dminor,
370 			    1);
371 			dminor = 0;
372 		}
373 	}
374 
375 	if (dminor == 0) {
376 		/* Not enough memory now */
377 		tcl_inuse--;
378 		rw_exit(&tcl_rwlock);
379 		return (NULL);
380 	}
381 
382 	tcl = kmem_cache_alloc(tcl_cache, KM_NOSLEEP);
383 	if (tcl == NULL) {
384 		/* Not enough memory - this entry can't be used now. */
385 		vmem_free(tcl_minor_arena, (void *)(uintptr_t)dminor, 1);
386 		tcl_inuse--;
387 	} else {
388 		bzero(tcl, sizeof (*tcl));
389 		tcl->tcl_lsessid = dminor;
390 		ASSERT(tcl_slots[dminor - 1] == NULL);
391 		tcl_slots[dminor - 1] = tcl;
392 	}
393 
394 	rw_exit(&tcl_rwlock);
395 	return (tcl);
396 }
397 
398 /*
399  * This routine frees an upper level (client) stream by removing it
400  * from the minor number pool and freeing the state structure storage.
401  * Assumes we're exclusive on both inner and outer perimeters, and
402  * thus there are no other concurrent users of the tcl_slots array or
403  * of any entry in that array.
404  */
405 static void
406 tuncl_free(tuncl_t *tcl)
407 {
408 	rw_enter(&tcl_rwlock, RW_WRITER);
409 	ASSERT(tcl->tcl_lsessid <= tcl_nslots);
410 	ASSERT(tcl_slots[tcl->tcl_lsessid - 1] == tcl);
411 	ASSERT(tcl_inuse > 0);
412 	tcl_inuse--;
413 	tcl_slots[tcl->tcl_lsessid - 1] = NULL;
414 
415 	if (tcl->tcl_ksp != NULL) {
416 		kstat_delete(tcl->tcl_ksp);
417 		tcl->tcl_ksp = NULL;
418 	}
419 
420 	/* Return minor number to the pool of minors */
421 	vmem_free(tcl_minor_arena, (void *)(uintptr_t)tcl->tcl_lsessid, 1);
422 
423 	/* Return tuncl_t to the cache */
424 	kmem_cache_free(tcl_cache, tcl);
425 	rw_exit(&tcl_rwlock);
426 }
427 
428 /*
429  * Get tuncl_t structure by minor number.  Returns NULL when minor is
430  * out of range.  Note that lookup of tcl pointers (and use of those
431  * pointers) is safe because modification is done only when exclusive
432  * on both inner and outer perimeters.
433  */
434 static tuncl_t *
435 tcl_by_minor(minor_t dminor)
436 {
437 	tuncl_t *tcl = NULL;
438 
439 	if ((dminor >= 1) && (dminor <= tcl_nslots) && tcl_slots != NULL) {
440 		tcl = tcl_slots[dminor - 1];
441 	}
442 
443 	return (tcl);
444 }
445 
446 /*
447  * Set up kstats for upper or lower stream.
448  */
449 static kstat_t *
450 kstat_setup(kstat_named_t *knt, const char **names, int nstat,
451     const char *modname, int unitnum)
452 {
453 	kstat_t *ksp;
454 	char unitname[KSTAT_STRLEN];
455 	int i;
456 
457 	for (i = 0; i < nstat; i++) {
458 		kstat_set_string(knt[i].name, names[i]);
459 		knt[i].data_type = KSTAT_DATA_UINT64;
460 	}
461 	(void) sprintf(unitname, "%s%d", modname, unitnum);
462 	ksp = kstat_create(modname, unitnum, unitname, "net",
463 	    KSTAT_TYPE_NAMED, nstat, KSTAT_FLAG_VIRTUAL);
464 	if (ksp != NULL) {
465 		ksp->ks_data = (void *)knt;
466 		kstat_install(ksp);
467 	}
468 	return (ksp);
469 }
470 
471 /*
472  * sppptun_open()
473  *
474  * MT-Perimeters:
475  *    exclusive inner, exclusive outer.
476  *
477  * Description:
478  *    Common open procedure for module and driver.
479  */
480 static int
481 sppptun_open(queue_t *q, dev_t *devp, int oflag, int sflag, cred_t *credp)
482 {
483 	_NOTE(ARGUNUSED(oflag))
484 	ASSERT(q != NULL);
485 
486 	DBGENTRY((CE_CONT, "sppptun_open as %s",
487 	    (sflag & MODOPEN) ? "module" :"device"));
488 
489 	/* Allow a re-open */
490 	if (q->q_ptr != NULL)
491 		return (0);
492 
493 	/* In the off chance that we're on our way out, just return error */
494 	if (tcl_slots == NULL) {
495 		DBGERROR((CE_CONT, "tcl_slots is NULL on open\n"));
496 		return (EINVAL);
497 	}
498 
499 	if (sflag & MODOPEN) {
500 		tunll_t *tll;
501 		char *cp;
502 
503 		/* ordinary users have no need to push this module */
504 		if (secpolicy_net_config(credp, B_FALSE) != 0)
505 			return (EPERM);
506 
507 		tll = kmem_zalloc(sizeof (tunll_t), KM_SLEEP);
508 
509 		tll->tll_index = tunll_index++;
510 
511 		tll->tll_wq = WR(q);
512 
513 		/* Insert at end of list */
514 		insque(&tll->tll_next, tunll_list.q_back);
515 		q->q_ptr = WR(q)->q_ptr = tll;
516 
517 		tll->tll_style = PTS_PPPOE;
518 		tll->tll_alen = sizeof (tll->tll_lcladdr.pta_pppoe);
519 
520 		tll->tll_ksp = kstat_setup((kstat_named_t *)&tll->tll_kstats,
521 		    tll_kstats_list, Dim(tll_kstats_list), "tll",
522 		    tll->tll_index);
523 
524 		/*
525 		 * Find the name of the driver somewhere beneath us.
526 		 * Note that we have no driver under us until after
527 		 * qprocson().
528 		 */
529 		qprocson(q);
530 		for (q = WR(q); q->q_next != NULL; q = q->q_next)
531 			;
532 		cp = NULL;
533 		if (q->q_qinfo != NULL && q->q_qinfo->qi_minfo != NULL)
534 			cp = q->q_qinfo->qi_minfo->mi_idname;
535 		if (cp != NULL && *cp == '\0')
536 			cp = NULL;
537 
538 		/* Set initial name; user should overwrite. */
539 		if (cp == NULL)
540 			(void) snprintf(tll->tll_name, sizeof (tll->tll_name),
541 			    PPP_TUN_NAME "%d", tll->tll_index);
542 		else
543 			(void) snprintf(tll->tll_name, sizeof (tll->tll_name),
544 			    "%s:tun%d", cp, tll->tll_index);
545 	} else {
546 		tuncl_t	*tcl;
547 
548 		ASSERT(devp != NULL);
549 		if (sflag & CLONEOPEN) {
550 			tcl = tuncl_alloc(-1);
551 		} else {
552 			minor_t mn;
553 
554 			/*
555 			 * Support of non-clone open (ie, mknod with
556 			 * defined minor number) is supported for
557 			 * testing purposes so that 'arbitrary' minor
558 			 * numbers can be used.
559 			 */
560 			mn = getminor(*devp);
561 			if (mn == 0 || (tcl = tcl_by_minor(mn)) != NULL) {
562 				return (EPERM);
563 			}
564 			tcl = tuncl_alloc(mn);
565 		}
566 		if (tcl == NULL)
567 			return (ENOSR);
568 		tcl->tcl_rq = q;		/* save read queue pointer */
569 		tcl->tcl_flags |= TCLF_ISCLIENT;	/* sanity check */
570 
571 		q->q_ptr = WR(q)->q_ptr = (caddr_t)tcl;
572 		*devp = makedevice(getmajor(*devp), tcl->tcl_lsessid);
573 
574 		tcl->tcl_ksp = kstat_setup((kstat_named_t *)&tcl->tcl_kstats,
575 		    tcl_kstats_list, Dim(tcl_kstats_list), "tcl",
576 		    tcl->tcl_lsessid);
577 
578 		qprocson(q);
579 	}
580 	return (0);
581 }
582 
583 /*
584  * Create an appropriate control message for this client event.
585  */
586 static mblk_t *
587 make_control(tuncl_t *tclabout, tunll_t *tllabout, int action, tuncl_t *tclto)
588 {
589 	struct ppptun_control *ptc;
590 	mblk_t *mp = allocb(sizeof (*ptc), BPRI_HI);
591 
592 	if (mp != NULL) {
593 		MTYPE(mp) = M_PROTO;
594 		ptc = (struct ppptun_control *)mp->b_wptr;
595 		mp->b_wptr += sizeof (*ptc);
596 		if (tclabout != NULL) {
597 			ptc->ptc_rsessid = tclabout->tcl_rsessid;
598 			ptc->ptc_address = tclabout->tcl_address;
599 		} else {
600 			bzero(ptc, sizeof (*ptc));
601 		}
602 		ptc->ptc_discrim = tclto->tcl_ctlval;
603 		ptc->ptc_action = action;
604 		(void) strncpy(ptc->ptc_name, tllabout->tll_name,
605 		    sizeof (ptc->ptc_name));
606 	}
607 	return (mp);
608 }
609 
610 /*
611  * Send an appropriate control message up this client session.
612  */
613 static void
614 send_control(tuncl_t *tclabout, tunll_t *tllabout, int action, tuncl_t *tcl)
615 {
616 	mblk_t *mp;
617 
618 	if (tcl->tcl_rq != NULL) {
619 		mp = make_control(tclabout, tllabout, action, tcl);
620 		if (mp != NULL) {
621 			KCINCR(cks_octrl_spec);
622 			putnext(tcl->tcl_rq, mp);
623 		}
624 	}
625 }
626 
627 /*
628  * If a lower stream is being unplumbed, then the upper streams
629  * connected to this lower stream must be disconnected.  This routine
630  * accomplishes this by sending M_HANGUP to data streams and M_PROTO
631  * messages to control streams.  This is called by vmem_walk, and
632  * handles a span of minor node numbers.
633  *
634  * No need to update lks_clients here; the lower stream is on its way
635  * out.
636  */
637 static void
638 tclvm_remove_tll(void *arg, void *firstv, size_t numv)
639 {
640 	tunll_t *tll = (tunll_t *)arg;
641 	int minorn = (int)(uintptr_t)firstv;
642 	int minormax = minorn + numv;
643 	tuncl_t *tcl;
644 	mblk_t *mp;
645 
646 	while (minorn < minormax) {
647 		tcl = tcl_slots[minorn - 1];
648 		ASSERT(tcl != NULL);
649 		if (tcl->tcl_data_tll == tll && tcl->tcl_rq != NULL) {
650 			tcl->tcl_data_tll = NULL;
651 			mp = allocb(0, BPRI_HI);
652 			if (mp != NULL) {
653 				MTYPE(mp) = M_HANGUP;
654 				putnext(tcl->tcl_rq, mp);
655 				if (tcl->tcl_ctrl_tll == tll)
656 					tcl->tcl_ctrl_tll = NULL;
657 			}
658 		}
659 		if (tcl->tcl_ctrl_tll == tll) {
660 			send_control(tcl, tll, PTCA_UNPLUMB, tcl);
661 			tcl->tcl_ctrl_tll = NULL;
662 		}
663 		minorn++;
664 	}
665 }
666 
667 /*
668  * sppptun_close()
669  *
670  * MT-Perimeters:
671  *    exclusive inner, exclusive outer.
672  *
673  * Description:
674  *    Common close procedure for module and driver.
675  */
676 static int
677 sppptun_close(queue_t *q)
678 {
679 	int err;
680 	void *qptr;
681 	tunll_t *tll;
682 	tuncl_t *tcl;
683 
684 	ASSERT(q != NULL);
685 	qptr = q->q_ptr;
686 	ASSERT(qptr != NULL);
687 
688 	err = 0;
689 	tll = (tunll_t *)qptr;
690 	if (!(tll->tll_flags & TLLF_NOTLOWER)) {
691 		/* q_next is set on modules */
692 		ASSERT(WR(q)->q_next != NULL);
693 
694 		/* unlink any clients using this lower layer. */
695 		vmem_walk(tcl_minor_arena, VMEM_ALLOC, tclvm_remove_tll, tll);
696 
697 		/* tell daemon that this has been removed. */
698 		if ((tcl = tll->tll_defcl) != NULL)
699 			send_control(NULL, tll, PTCA_UNPLUMB, tcl);
700 
701 		tll->tll_flags |= TLLF_CLOSING;
702 		while (!(tll->tll_flags & TLLF_CLOSE_DONE)) {
703 			qenable(tll->tll_wq);
704 			qwait(tll->tll_wq);
705 		}
706 		tll->tll_error = 0;
707 		while (!(tll->tll_flags & TLLF_SHUTDOWN_DONE)) {
708 			if (!qwait_sig(tll->tll_wq))
709 				break;
710 		}
711 
712 		qprocsoff(q);
713 		q->q_ptr = WR(q)->q_ptr = NULL;
714 		tll->tll_wq = NULL;
715 		remque(&tll->tll_next);
716 		err = tll->tll_error;
717 		if (tll->tll_ksp != NULL)
718 			kstat_delete(tll->tll_ksp);
719 		kmem_free(tll, sizeof (*tll));
720 	} else {
721 		tcl = (tuncl_t *)tll;
722 
723 		/* devices are end of line; no q_next. */
724 		ASSERT(WR(q)->q_next == NULL);
725 
726 		qprocsoff(q);
727 		DBGNORMAL((CE_CONT, "session %d closed", tcl->tcl_rsessid));
728 		tcl->tcl_rq = NULL;
729 		q->q_ptr = WR(q)->q_ptr = NULL;
730 
731 		tll = TO_TLL(tunll_list.q_forw);
732 		while (tll != TO_TLL(&tunll_list)) {
733 			if (tll->tll_defcl == tcl)
734 				tll->tll_defcl = NULL;
735 			if (tll->tll_lastcl == tcl)
736 				tll->tll_lastcl = NULL;
737 			tll = TO_TLL(tll->tll_next);
738 		}
739 		/*
740 		 * If this was a normal session, then tell the daemon.
741 		 */
742 		if (!(tcl->tcl_flags & TCLF_DAEMON) &&
743 		    (tll = tcl->tcl_ctrl_tll) != NULL &&
744 		    tll->tll_defcl != NULL) {
745 			DBGERROR((CE_CONT, "unexpected client disconnect"));
746 			send_control(tcl, tll, PTCA_DISCONNECT,
747 			    tll->tll_defcl);
748 		}
749 
750 		/* Update statistics for references being dropped. */
751 		if ((tll = tcl->tcl_data_tll) != NULL) {
752 			KLDECR(lks_clients);
753 		}
754 		if ((tll = tcl->tcl_ctrl_tll) != NULL) {
755 			KLDECR(lks_clients);
756 		}
757 
758 		tuncl_free(tcl);
759 	}
760 
761 	return (err);
762 }
763 
764 /*
765  * Allocate and initialize a DLPI or TPI template of the specified
766  * length.
767  */
768 static mblk_t *
769 pi_alloc(size_t len, int prim)
770 {
771 	mblk_t	*mp;
772 
773 	mp = allocb(len, BPRI_MED);
774 	if (mp != NULL) {
775 		MTYPE(mp) = M_PROTO;
776 		mp->b_wptr = mp->b_rptr + len;
777 		bzero(mp->b_rptr, len);
778 		*(int *)mp->b_rptr = prim;
779 	}
780 	return (mp);
781 }
782 
783 #define	dlpi_alloc(l, p)	pi_alloc((l), (p))
784 
785 /*
786  * Prepend some room to an mblk.  Try to reuse the existing buffer, if
787  * at all possible, rather than allocating a new one.  (Fast-path
788  * output should be able to use this.)
789  *
790  * (XXX why isn't this a library function ...?)
791  */
792 static mblk_t *
793 prependb(mblk_t *mp, size_t len, size_t align)
794 {
795 	mblk_t *newmp;
796 
797 
798 	if (align == 0)
799 		align = 8;
800 	if (DB_REF(mp) > 1 || mp->b_datap->db_base+len > mp->b_rptr ||
801 	    ((uint_t)((uintptr_t)mp->b_rptr - len) % align) != 0) {
802 		if ((newmp = allocb(len, BPRI_LO)) == NULL) {
803 			freemsg(mp);
804 			return (NULL);
805 		}
806 		newmp->b_wptr = newmp->b_rptr + len;
807 		newmp->b_cont = mp;
808 		return (newmp);
809 	}
810 	mp->b_rptr -= len;
811 	return (mp);
812 }
813 
814 /*
815  * sppptun_outpkt()
816  *
817  * MT-Perimeters:
818  *	shared inner, shared outer (if called from sppptun_uwput),
819  *	exclusive inner, shared outer (if called from sppptun_uwsrv).
820  *
821  * Description:
822  *    Called from sppptun_uwput or sppptun_uwsrv when processing a
823  *    M_DATA, M_PROTO, or M_PCPROTO message.  For all cases, it tries
824  *    to prepare the data to be sent to the module below this driver
825  *    if there is a lower stream linked underneath.  If no lower
826  *    stream exists, then the data will be discarded and an ENXIO
827  *    error returned.
828  *
829  * Returns:
830  *	pointer to queue if caller should do putnext, otherwise
831  *	*mpp != NULL if message should be enqueued, otherwise
832  *	*mpp == NULL if message is gone.
833  */
834 static queue_t *
835 sppptun_outpkt(queue_t *q, mblk_t **mpp)
836 {
837 	mblk_t *mp;
838 	tuncl_t *tcl;
839 	tunll_t *tll;
840 	mblk_t *encmb;
841 	mblk_t *datamb;
842 	dl_unitdata_req_t *dur;
843 	queue_t *lowerq;
844 	poep_t *poep;
845 	int len;
846 	ether_dest_t *edestp;
847 	enum { luNone, luCopy, luSend } loopup;
848 	boolean_t isdata;
849 	struct ppptun_control *ptc;
850 
851 	ASSERT(mpp != NULL);
852 	mp = *mpp;
853 	ASSERT(mp != NULL);
854 	ASSERT(q != NULL);
855 	tcl = (tuncl_t *)q->q_ptr;
856 	ASSERT(tcl != NULL);
857 
858 	*mpp = NULL;
859 	if (!(tcl->tcl_flags & TCLF_ISCLIENT)) {
860 		DBGERROR((CE_CONT, "discard data sent on lower stream\n"));
861 		merror(q, mp, EINVAL);
862 		return (NULL);
863 	}
864 
865 	isdata = (MTYPE(mp) == M_DATA);
866 	if (isdata) {
867 		tll = tcl->tcl_data_tll;
868 		ptc = NULL;
869 	} else {
870 		/*
871 		 * If data are unaligned or otherwise unsuitable, then
872 		 * discard.
873 		 */
874 		if (MBLKL(mp) != sizeof (*ptc) || DB_REF(mp) > 1 ||
875 		    !IS_P2ALIGNED(mp->b_rptr, sizeof (ptc))) {
876 			KCINCR(cks_octrl_drop);
877 			DBGERROR((CE_CONT, "discard bad control message\n"));
878 			merror(q, mp, EINVAL);
879 			return (NULL);
880 		}
881 		ptc = (struct ppptun_control *)mp->b_rptr;
882 
883 		/* Set stream discriminator value if not yet set. */
884 		if (tcl->tcl_ctlval == 0)
885 			tcl->tcl_ctlval = ptc->ptc_discrim;
886 
887 		/* If this is a test message, then reply to caller. */
888 		if (ptc->ptc_action == PTCA_TEST) {
889 			DBGNORMAL((CE_CONT, "sending test reply"));
890 			if (mp->b_cont != NULL) {
891 				freemsg(mp->b_cont);
892 				mp->b_cont = NULL;
893 			}
894 			ptc->ptc_discrim = tcl->tcl_ctlval;
895 			putnext(RD(q), mp);
896 			return (NULL);
897 		}
898 
899 		/* If this one isn't for us, then discard it */
900 		if (tcl->tcl_ctlval != ptc->ptc_discrim) {
901 			DBGNORMAL((CE_CONT, "Discriminator %X != %X; ignoring",
902 			    tcl->tcl_ctlval, ptc->ptc_discrim));
903 			freemsg(mp);
904 			return (NULL);
905 		}
906 
907 		/* Don't allow empty control packets. */
908 		if (mp->b_cont == NULL) {
909 			KCINCR(cks_octrl_drop);
910 			merror(q, mp, EINVAL);
911 			return (NULL);
912 		}
913 		tll = tcl->tcl_ctrl_tll;
914 	}
915 
916 	if (tll == NULL || (lowerq = tll->tll_wq) == NULL) {
917 		DBGERROR((CE_CONT, "can't send; no %s on %X\n",
918 		    tll == NULL ? "attached lower layer" : "output queue",
919 		    tll == NULL ? (unsigned)tcl : (unsigned)tll));
920 		merror(q, mp, ENXIO);
921 		if (isdata) {
922 			tcl->tcl_stats.ppp_oerrors++;
923 		} else {
924 			KCINCR(cks_octrl_drop);
925 		}
926 		return (NULL);
927 	}
928 
929 	/*
930 	 * If so, then try to send it down.  The lower queue is only
931 	 * ever detached while holding an exclusive lock on the whole
932 	 * driver, so we can be confident that the lower queue is
933 	 * still there.
934 	 */
935 	if (!bcanputnext(lowerq, mp->b_band)) {
936 		DBGNORMAL((CE_CONT, "can't send; !canputnext\n"));
937 		*mpp = mp;
938 		return (NULL);
939 	}
940 
941 	/*
942 	 * Note: DLPI and TPI expect that the first buffer contains
943 	 * the control (unitdata-req) header, destination address, and
944 	 * nothing else.  Any protocol headers must go in the next
945 	 * buffer.
946 	 */
947 	loopup = luNone;
948 	encmb = NULL;
949 	if (isdata) {
950 		if (tll->tll_alen != 0 &&
951 		    bcmp(&tcl->tcl_address, &tll->tll_lcladdr,
952 			tll->tll_alen) == 0)
953 			loopup = luSend;
954 		switch (tll->tll_style) {
955 		case PTS_PPPOE:
956 			/* Strip address and control fields if present. */
957 			if (mp->b_rptr[0] == 0xFF) {
958 				if (MBLKL(mp) < 3) {
959 					encmb = msgpullup(mp, 3);
960 					freemsg(mp);
961 					if ((mp = encmb) == NULL)
962 						break;
963 				}
964 				mp->b_rptr += 2;
965 			}
966 			/* Broadcasting data is probably not a good idea. */
967 			if (tcl->tcl_address.pta_pppoe.ptma_mac[0] & 1)
968 				break;
969 			encmb = dlpi_alloc(sizeof (*dur) + sizeof (*edestp),
970 			    DL_UNITDATA_REQ);
971 			if (encmb == NULL)
972 				break;
973 
974 			dur = (dl_unitdata_req_t *)encmb->b_rptr;
975 			dur->dl_dest_addr_length = sizeof (*edestp);
976 			dur->dl_dest_addr_offset = sizeof (*dur);
977 			edestp = (ether_dest_t *)(dur + 1);
978 			ether_copy(tcl->tcl_address.pta_pppoe.ptma_mac,
979 			    edestp->addr);
980 			/* DLPI SAPs are in host byte order! */
981 			edestp->type = ETHERTYPE_PPPOES;
982 
983 			/* Make sure the protocol field isn't compressed. */
984 			len = (*mp->b_rptr & 1);
985 			mp = prependb(mp, sizeof (*poep) + len, POE_HDR_ALIGN);
986 			if (mp == NULL)
987 				break;
988 			poep = (poep_t *)mp->b_rptr;
989 			poep->poep_version_type = POE_VERSION;
990 			poep->poep_code = POECODE_DATA;
991 			poep->poep_session_id = htons(tcl->tcl_rsessid);
992 			poep->poep_length = htons(msgsize(mp) -
993 			    sizeof (*poep));
994 			if (len > 0)
995 				*(char *)(poep + 1) = '\0';
996 			break;
997 
998 		default:
999 			ASSERT(0);
1000 		}
1001 	} else {
1002 		/*
1003 		 * Control side encapsulation.
1004 		 */
1005 		if (bcmp(&ptc->ptc_address, &tll->tll_lcladdr, tll->tll_alen)
1006 		    == 0)
1007 			loopup = luSend;
1008 		datamb = mp->b_cont;
1009 		switch (tll->tll_style) {
1010 		case PTS_PPPOE:
1011 			/*
1012 			 * Don't allow a loopback session to establish
1013 			 * itself.  PPPoE is broken; it uses only one
1014 			 * session ID for both data directions, so the
1015 			 * loopback data path can simply never work.
1016 			 */
1017 			if (loopup == luSend &&
1018 			    ((poep_t *)datamb->b_rptr)->poep_code ==
1019 			    POECODE_PADR)
1020 				break;
1021 			encmb = dlpi_alloc(sizeof (*dur) + sizeof (*edestp),
1022 			    DL_UNITDATA_REQ);
1023 			if (encmb == NULL)
1024 				break;
1025 			dur = (dl_unitdata_req_t *)encmb->b_rptr;
1026 			dur->dl_dest_addr_length = sizeof (*edestp);
1027 			dur->dl_dest_addr_offset = sizeof (*dur);
1028 
1029 			edestp = (ether_dest_t *)(dur + 1);
1030 			/* DLPI SAPs are in host byte order! */
1031 			edestp->type = ETHERTYPE_PPPOED;
1032 
1033 			/*
1034 			 * If destination isn't set yet, then we have to
1035 			 * allow anything at all.  Otherwise, force use
1036 			 * of configured peer address.
1037 			 */
1038 			if (bcmp(tcl->tcl_address.pta_pppoe.ptma_mac,
1039 			    zero_mac_addr, sizeof (zero_mac_addr)) == 0 ||
1040 				(tcl->tcl_flags & TCLF_DAEMON)) {
1041 				ether_copy(ptc->ptc_address.pta_pppoe.ptma_mac,
1042 				    edestp->addr);
1043 			} else {
1044 				ether_copy(tcl->tcl_address.pta_pppoe.ptma_mac,
1045 				    edestp->addr);
1046 			}
1047 			/* Reflect multicast/broadcast back up. */
1048 			if (edestp->addr[0] & 1)
1049 				loopup = luCopy;
1050 			break;
1051 
1052 		case PTS_PPTP:
1053 			/*
1054 			 * PPTP's control side is actually done over
1055 			 * separate TCP connections.
1056 			 */
1057 		default:
1058 			ASSERT(0);
1059 		}
1060 		freeb(mp);
1061 		mp = datamb;
1062 	}
1063 	if (mp == NULL || encmb == NULL) {
1064 		DBGERROR((CE_CONT, "output failure\n"));
1065 		freemsg(mp);
1066 		freemsg(encmb);
1067 		if (isdata) {
1068 			tcl->tcl_stats.ppp_oerrors++;
1069 		} else {
1070 			KCINCR(cks_octrl_drop);
1071 			KLINCR(lks_octrl_drop);
1072 		}
1073 		lowerq = NULL;
1074 	} else {
1075 		if (tcl->tcl_flags & TCLF_DEBUG)
1076 			sppp_dump_frame(tcl->tcl_unit, mp,
1077 			    isdata ? "sent" : "sctl");
1078 		if (isdata) {
1079 			tcl->tcl_stats.ppp_obytes += msgsize(mp);
1080 			tcl->tcl_stats.ppp_opackets++;
1081 		} else {
1082 			KCINCR(cks_octrls);
1083 			KLINCR(lks_octrls);
1084 		}
1085 		if (encmb != mp)
1086 			encmb->b_cont = mp;
1087 		switch (loopup) {
1088 		case luNone:
1089 			*mpp = encmb;
1090 			break;
1091 		case luCopy:
1092 			mp = copymsg(encmb);
1093 			if (mp != NULL)
1094 				sppptun_urput(RD(lowerq), mp);
1095 			*mpp = encmb;
1096 			break;
1097 		case luSend:
1098 			sppptun_urput(RD(lowerq), encmb);
1099 			lowerq = NULL;
1100 			break;
1101 		}
1102 	}
1103 	return (lowerq);
1104 }
1105 
1106 /*
1107  * Enqueue a message to be sent when the lower stream is closed.  This
1108  * is done so that we're guaranteed that we always have the necessary
1109  * resources to properly detach ourselves from the system.  (If we
1110  * waited until the close was done to allocate these messages, then
1111  * the message allocation could fail, and we'd be unable to properly
1112  * detach.)
1113  */
1114 static void
1115 save_for_close(tunll_t *tll, mblk_t *mp)
1116 {
1117 	mblk_t *onc;
1118 
1119 	if ((onc = tll->tll_onclose) == NULL)
1120 		tll->tll_onclose = mp;
1121 	else {
1122 		while (onc->b_next != NULL)
1123 			onc = onc->b_next;
1124 		onc->b_next = mp;
1125 	}
1126 }
1127 
1128 /*
1129  * Given the lower stream name, locate the state structure.  Note that
1130  * lookup of tcl pointers (and use of those pointers) is safe because
1131  * modification is done only when exclusive on both inner and outer
1132  * perimeters.
1133  */
1134 static tunll_t *
1135 tll_lookup_on_name(char *dname)
1136 {
1137 	tunll_t *tll;
1138 
1139 	tll = TO_TLL(tunll_list.q_forw);
1140 	for (; tll != TO_TLL(&tunll_list); tll = TO_TLL(tll->tll_next))
1141 		if (strcmp(dname, tll->tll_name) == 0)
1142 			return (tll);
1143 	return (NULL);
1144 }
1145 
1146 /*
1147  * sppptun_inner_ioctl()
1148  *
1149  * MT-Perimeters:
1150  *    exclusive inner, shared outer.
1151  *
1152  * Description:
1153  *    Called by qwriter from sppptun_ioctl as the result of receiving
1154  *    a handled ioctl.
1155  */
1156 static void
1157 sppptun_inner_ioctl(queue_t *q, mblk_t *mp)
1158 {
1159 	struct iocblk *iop;
1160 	int rc = 0;
1161 	int len = 0;
1162 	int i;
1163 	tuncl_t *tcl;
1164 	tunll_t *tll;
1165 	union ppptun_name *ptn;
1166 	struct ppptun_info *pti;
1167 	struct ppptun_peer *ptp;
1168 	mblk_t *mptmp;
1169 	ppptun_atype *pap;
1170 	struct ppp_stats64 *psp;
1171 
1172 	ASSERT(q != NULL);
1173 	ASSERT(q->q_ptr != NULL);
1174 	ASSERT(mp != NULL);
1175 	ASSERT(mp->b_rptr != NULL);
1176 
1177 	iop = (struct iocblk *)mp->b_rptr;
1178 	tcl = NULL;
1179 	tll = (tunll_t *)q->q_ptr;
1180 	if (tll->tll_flags & TLLF_NOTLOWER) {
1181 		tcl = (tuncl_t *)tll;
1182 		tll = NULL;
1183 	}
1184 
1185 	switch (iop->ioc_cmd) {
1186 	case PPPIO_DEBUG:
1187 		/* Client (device) side only */
1188 		if (tcl == NULL || iop->ioc_count != sizeof (uint32_t) ||
1189 		    mp->b_cont == NULL) {
1190 			DBGERROR((CE_CONT, "bad PPPIO_DEBUG"));
1191 			rc = EINVAL;
1192 			break;
1193 		}
1194 
1195 		/* just one type of debug so far */
1196 		i = *(uint32_t *)mp->b_cont->b_rptr;
1197 		if (i != PPPDBG_LOG + PPPDBG_AHDLC)
1198 			rc =  EINVAL;
1199 		else
1200 			tcl->tcl_flags |= TCLF_DEBUG;
1201 		break;
1202 
1203 	case PPPIO_GETSTAT:
1204 		rc = EINVAL;
1205 		break;
1206 
1207 	case PPPIO_GETSTAT64:
1208 		/* Client (device) side only */
1209 		if (tcl == NULL) {
1210 			DBGERROR((CE_CONT, "bad PPPIO_GETSTAT64"));
1211 			rc = EINVAL;
1212 			break;
1213 		}
1214 		mptmp = allocb(sizeof (*psp), BPRI_HI);
1215 		if (mptmp == NULL) {
1216 			rc = ENOSR;
1217 			break;
1218 		}
1219 		freemsg(mp->b_cont);
1220 		mp->b_cont = mptmp;
1221 
1222 		psp = (struct ppp_stats64 *)mptmp->b_wptr;
1223 		bzero((caddr_t)psp, sizeof (*psp));
1224 		psp->p = tcl->tcl_stats;
1225 
1226 		len = sizeof (*psp);
1227 		break;
1228 
1229 	case PPPTUN_SNAME:
1230 		/* This is done on the *module* (lower level) side. */
1231 		if (tll == NULL || mp->b_cont == NULL ||
1232 		    iop->ioc_count != sizeof (*ptn) ||
1233 		    *mp->b_cont->b_rptr == '\0') {
1234 			rc = EINVAL;
1235 			break;
1236 		}
1237 
1238 		ptn = (union ppptun_name *)mp->b_cont->b_rptr;
1239 		ptn->ptn_name[sizeof (ptn->ptn_name) - 1] = '\0';
1240 
1241 		if ((tll = tll_lookup_on_name(ptn->ptn_name)) != NULL) {
1242 			rc = EEXIST;
1243 			break;
1244 		}
1245 		tll = (tunll_t *)q->q_ptr;
1246 		(void) strcpy(tll->tll_name, ptn->ptn_name);
1247 		break;
1248 
1249 	case PPPTUN_GNAME:
1250 		/* This is done on the *module* (lower level) side. */
1251 		if (tll == NULL) {
1252 			rc = EINVAL;
1253 			break;
1254 		}
1255 		if (mp->b_cont != NULL)
1256 			freemsg(mp->b_cont);
1257 		if ((mp->b_cont = allocb(sizeof (*ptn), BPRI_HI)) == NULL) {
1258 			rc = ENOSR;
1259 			break;
1260 		}
1261 		ptn = (union ppptun_name *)mp->b_cont->b_rptr;
1262 		bcopy(tll->tll_name, ptn->ptn_name, sizeof (ptn->ptn_name));
1263 		len = sizeof (*ptn);
1264 		break;
1265 
1266 	case PPPTUN_SINFO:
1267 	case PPPTUN_GINFO:
1268 		/* Either side */
1269 		if (mp->b_cont == NULL || iop->ioc_count != sizeof (*pti)) {
1270 			DBGERROR((CE_CONT, "missing ioctl data"));
1271 			rc = EINVAL;
1272 			break;
1273 		}
1274 		pti = (struct ppptun_info *)mp->b_cont->b_rptr;
1275 		if (pti->pti_name[0] != '\0')
1276 			tll = tll_lookup_on_name(pti->pti_name);
1277 		if (tll == NULL) {
1278 			/* Driver (client) side must have name */
1279 			if (tcl != NULL && pti->pti_name[0] == '\0') {
1280 				DBGERROR((CE_CONT,
1281 				    "null sinfo name on driver"));
1282 				rc = EINVAL;
1283 			} else
1284 				rc = ESRCH;
1285 			break;
1286 		}
1287 		if (iop->ioc_cmd == PPPTUN_GINFO) {
1288 			pti->pti_muxid = tll->tll_muxid;
1289 			pti->pti_style = tll->tll_style;
1290 			len = sizeof (*pti);
1291 			break;
1292 		}
1293 		tll->tll_muxid = pti->pti_muxid;
1294 		tll->tll_style = pti->pti_style;
1295 		switch (tll->tll_style) {
1296 		case PTS_PPPOE:		/* DLPI type */
1297 			tll->tll_alen = sizeof (tll->tll_lcladdr.pta_pppoe);
1298 			mptmp = dlpi_alloc(sizeof (dl_unbind_req_t),
1299 			    DL_UNBIND_REQ);
1300 			if (mptmp == NULL) {
1301 				rc = ENOSR;
1302 				break;
1303 			}
1304 			save_for_close(tll, mptmp);
1305 			mptmp = dlpi_alloc(sizeof (dl_detach_req_t),
1306 			    DL_DETACH_REQ);
1307 			if (mptmp == NULL) {
1308 				rc = ENOSR;
1309 				break;
1310 			}
1311 			save_for_close(tll, mptmp);
1312 			break;
1313 		default:
1314 			DBGERROR((CE_CONT, "bad style %d driver",
1315 			    tll->tll_style));
1316 			tll->tll_style = PTS_NONE;
1317 			tll->tll_alen = 0;
1318 			rc = EINVAL;
1319 			break;
1320 		}
1321 		break;
1322 
1323 	case PPPTUN_GNNAME:
1324 		/* This can be done on either side. */
1325 		if (mp->b_cont == NULL || iop->ioc_count < sizeof (uint32_t)) {
1326 			rc = EINVAL;
1327 			break;
1328 		}
1329 		ptn = (union ppptun_name *)mp->b_cont->b_rptr;
1330 		i = ptn->ptn_index;
1331 		tll = TO_TLL(tunll_list.q_forw);
1332 		while (--i >= 0 && tll != TO_TLL(&tunll_list))
1333 			tll = TO_TLL(tll->tll_next);
1334 		if (tll != TO_TLL(&tunll_list)) {
1335 			bcopy(tll->tll_name, ptn->ptn_name,
1336 			    sizeof (ptn->ptn_name));
1337 		} else {
1338 			bzero(ptn, sizeof (*ptn));
1339 		}
1340 		len = sizeof (*ptn);
1341 		break;
1342 
1343 	case PPPTUN_LCLADDR:
1344 		/* This is done on the *module* (lower level) side. */
1345 		if (tll == NULL || mp->b_cont == NULL) {
1346 			rc = EINVAL;
1347 			break;
1348 		}
1349 
1350 		pap = &tll->tll_lcladdr;
1351 		len = tll->tll_alen;
1352 		if (len == 0 || len > iop->ioc_count) {
1353 			rc = EINVAL;
1354 			break;
1355 		}
1356 		bcopy(mp->b_cont->b_rptr, pap, len);
1357 		len = 0;
1358 		break;
1359 
1360 	case PPPTUN_SPEER:
1361 		/* Client (device) side only; before SDATA */
1362 		if (tcl == NULL || mp->b_cont == NULL ||
1363 		    iop->ioc_count != sizeof (*ptp)) {
1364 			DBGERROR((CE_CONT, "bad speer ioctl"));
1365 			rc = EINVAL;
1366 			break;
1367 		}
1368 		if (tcl->tcl_data_tll != NULL) {
1369 			DBGERROR((CE_CONT, "data link already set"));
1370 			rc = EINVAL;
1371 			break;
1372 		}
1373 		ptp = (struct ppptun_peer *)mp->b_cont->b_rptr;
1374 		/* Once set, the style cannot change. */
1375 		if (tcl->tcl_style != PTS_NONE &&
1376 		    tcl->tcl_style != ptp->ptp_style) {
1377 			DBGERROR((CE_CONT, "bad style; %d != %d",
1378 			    tcl->tcl_style, ptp->ptp_style));
1379 			rc = EINVAL;
1380 			break;
1381 		}
1382 		if (ptp->ptp_flags & PTPF_DAEMON) {
1383 			/* User requests registration for tunnel 0 */
1384 			if ((tcl->tcl_flags & TCLF_SPEER_DONE) ||
1385 			    ptp->ptp_ltunid != 0 || ptp->ptp_rtunid != 0 ||
1386 			    ptp->ptp_lsessid != 0 || ptp->ptp_rsessid != 0) {
1387 				rc = EINVAL;
1388 				break;
1389 			}
1390 			tcl->tcl_flags |= TCLF_DAEMON;
1391 		} else {
1392 			/* Normal client connection */
1393 			if (tcl->tcl_flags & TCLF_DAEMON) {
1394 				DBGERROR((CE_CONT, "can't change to daemon"));
1395 				rc = EINVAL;
1396 				break;
1397 			}
1398 			if (ptp->ptp_lsessid != 0 &&
1399 			    ptp->ptp_lsessid != tcl->tcl_lsessid) {
1400 				DBGERROR((CE_CONT, "bad lsessid; %d != %d",
1401 				    ptp->ptp_lsessid, tcl->tcl_lsessid));
1402 				rc = EINVAL;
1403 				break;
1404 			}
1405 			/*
1406 			 * If we're reassigning the peer data, then
1407 			 * the previous assignment must have been for
1408 			 * a client control connection.  Check that.
1409 			 */
1410 			if ((tcl->tcl_flags & TCLF_SPEER_DONE) &&
1411 			    ((tcl->tcl_ltunid != 0 &&
1412 				tcl->tcl_ltunid != ptp->ptp_ltunid) ||
1413 				(tcl->tcl_rtunid != 0 &&
1414 				    tcl->tcl_rtunid != ptp->ptp_rtunid) ||
1415 				(tcl->tcl_rsessid != 0 &&
1416 				    tcl->tcl_rsessid != ptp->ptp_rsessid))) {
1417 				DBGERROR((CE_CONT, "can't change parameters"));
1418 				rc = EINVAL;
1419 				break;
1420 			}
1421 			if ((tcl->tcl_ltunid = ptp->ptp_ltunid) == 0 &&
1422 			    tcl->tcl_style == PTS_L2FTP)
1423 				tcl->tcl_ltunid = ptp->ptp_lsessid;
1424 			tcl->tcl_rtunid = ptp->ptp_rtunid;
1425 			tcl->tcl_rsessid = ptp->ptp_rsessid;
1426 			DBGNORMAL((CE_CONT, "set session ID to %d",
1427 			    tcl->tcl_rsessid));
1428 		}
1429 		tcl->tcl_flags |= TCLF_SPEER_DONE;
1430 		tcl->tcl_style = ptp->ptp_style;
1431 		tcl->tcl_address = ptp->ptp_address;
1432 		goto fill_in_peer;
1433 
1434 	case PPPTUN_GPEER:
1435 		/* Client (device) side only */
1436 		if (tcl == NULL) {
1437 			rc = EINVAL;
1438 			break;
1439 		}
1440 		if (mp->b_cont != NULL)
1441 			freemsg(mp->b_cont);
1442 		mp->b_cont = allocb(sizeof (*ptp), BPRI_HI);
1443 		if (mp->b_cont == NULL) {
1444 			rc = ENOSR;
1445 			break;
1446 		}
1447 		ptp = (struct ppptun_peer *)mp->b_cont->b_rptr;
1448 	fill_in_peer:
1449 		ptp->ptp_style = tcl->tcl_style;
1450 		ptp->ptp_flags = (tcl->tcl_flags & TCLF_DAEMON) ? PTPF_DAEMON :
1451 		    0;
1452 		ptp->ptp_ltunid = tcl->tcl_ltunid;
1453 		ptp->ptp_rtunid = tcl->tcl_rtunid;
1454 		ptp->ptp_lsessid = tcl->tcl_lsessid;
1455 		ptp->ptp_rsessid = tcl->tcl_rsessid;
1456 		ptp->ptp_address = tcl->tcl_address;
1457 		len = sizeof (*ptp);
1458 		break;
1459 
1460 	case PPPTUN_SDATA:
1461 	case PPPTUN_SCTL:
1462 		/* Client (device) side only; must do SPEER first */
1463 		if (tcl == NULL || mp->b_cont == NULL ||
1464 		    iop->ioc_count != sizeof (*ptn) ||
1465 		    *mp->b_cont->b_rptr == '\0') {
1466 			DBGERROR((CE_CONT, "bad ioctl data"));
1467 			rc = EINVAL;
1468 			break;
1469 		}
1470 		if (!(tcl->tcl_flags & TCLF_SPEER_DONE)) {
1471 			DBGERROR((CE_CONT, "speer not yet done"));
1472 			rc = EINVAL;
1473 			break;
1474 		}
1475 		ptn = (union ppptun_name *)mp->b_cont->b_rptr;
1476 		ptn->ptn_name[sizeof (ptn->ptn_name) - 1] = '\0';
1477 		tll = tll_lookup_on_name(ptn->ptn_name);
1478 		if (tll == NULL) {
1479 			DBGERROR((CE_CONT, "cannot locate \"%s\"",
1480 			    ptn->ptn_name));
1481 			rc = ESRCH;
1482 			break;
1483 		}
1484 		if (tll->tll_style != tcl->tcl_style) {
1485 			DBGERROR((CE_CONT, "user style %d doesn't match lower"
1486 			    " style %d", tcl->tcl_style, tll->tll_style));
1487 			rc = ENXIO;
1488 			break;
1489 		}
1490 		if (iop->ioc_cmd == PPPTUN_SDATA) {
1491 			if (tcl->tcl_data_tll != NULL) {
1492 				DBGERROR((CE_CONT, "data link already set"));
1493 				rc = EEXIST;
1494 				break;
1495 			}
1496 			/* server daemons cannot use regular data */
1497 			if (tcl->tcl_flags & TCLF_DAEMON) {
1498 				DBGERROR((CE_CONT, "daemon has no data"));
1499 				rc = EINVAL;
1500 				break;
1501 			}
1502 			tcl->tcl_data_tll = tll;
1503 		} else if (tcl->tcl_flags & TCLF_DAEMON) {
1504 			if (tll->tll_defcl != NULL && tll->tll_defcl != tcl) {
1505 				DBGERROR((CE_CONT,
1506 				    "control link already set"));
1507 				rc = EEXIST;
1508 				break;
1509 			}
1510 			tll->tll_defcl = tcl;
1511 			if (tcl->tcl_ctrl_tll != NULL) {
1512 				KDECR(tcl->tcl_ctrl_tll, tll_kstats,
1513 				    lks_clients);
1514 			}
1515 			tcl->tcl_ctrl_tll = tll;
1516 		} else {
1517 			if (tcl->tcl_ctrl_tll != NULL) {
1518 				DBGERROR((CE_CONT,
1519 				    "control link already set"));
1520 				rc = EEXIST;
1521 				break;
1522 			}
1523 			tcl->tcl_ctrl_tll = tll;
1524 		}
1525 		KLINCR(lks_clients);
1526 		break;
1527 
1528 	case PPPTUN_GDATA:
1529 	case PPPTUN_GCTL:
1530 		/* Client (device) side only */
1531 		if (tcl == NULL) {
1532 			rc = EINVAL;
1533 			break;
1534 		}
1535 		if (mp->b_cont != NULL)
1536 			freemsg(mp->b_cont);
1537 		mp->b_cont = allocb(sizeof (*ptn), BPRI_HI);
1538 		if (mp->b_cont == NULL) {
1539 			rc = ENOSR;
1540 			break;
1541 		}
1542 		ptn = (union ppptun_name *)mp->b_cont->b_rptr;
1543 		if (iop->ioc_cmd == PPPTUN_GDATA)
1544 			tll = tcl->tcl_data_tll;
1545 		else
1546 			tll = tcl->tcl_ctrl_tll;
1547 		if (tll == NULL)
1548 			bzero(ptn, sizeof (*ptn));
1549 		else
1550 			bcopy(tll->tll_name, ptn->ptn_name,
1551 			    sizeof (ptn->ptn_name));
1552 		len = sizeof (*ptn);
1553 		break;
1554 
1555 	case PPPTUN_DCTL:
1556 		/* Client (device) side daemon mode only */
1557 		if (tcl == NULL || mp->b_cont == NULL ||
1558 		    iop->ioc_count != sizeof (*ptn) ||
1559 		    !(tcl->tcl_flags & TCLF_DAEMON)) {
1560 			rc = EINVAL;
1561 			break;
1562 		}
1563 		ptn = (union ppptun_name *)mp->b_cont->b_rptr;
1564 		ptn->ptn_name[sizeof (ptn->ptn_name) - 1] = '\0';
1565 		tll = tll_lookup_on_name(ptn->ptn_name);
1566 		if (tll == NULL || tll->tll_defcl != tcl) {
1567 			DBGERROR((CE_CONT, "cannot locate \"%s\"",
1568 			    ptn->ptn_name));
1569 			rc = ESRCH;
1570 			break;
1571 		}
1572 		tll->tll_defcl = NULL;
1573 		break;
1574 
1575 	default:
1576 		/* Caller should already have checked command value */
1577 		ASSERT(0);
1578 	}
1579 	if (rc != 0) {
1580 		DBGERROR((CE_CONT, "error %d for ioctl %X", rc, iop->ioc_cmd));
1581 		miocnak(q, mp, 0, rc);
1582 	} else {
1583 		if (len > 0)
1584 			mp->b_cont->b_wptr = mp->b_cont->b_rptr + len;
1585 		miocack(q, mp, len, 0);
1586 	}
1587 }
1588 
1589 /*
1590  * sppptun_ioctl()
1591  *
1592  * MT-Perimeters:
1593  *    shared inner, shared outer.
1594  *
1595  * Description:
1596  *    Called by sppptun_uwput as the result of receiving a M_IOCTL command.
1597  */
1598 static void
1599 sppptun_ioctl(queue_t *q, mblk_t *mp)
1600 {
1601 	struct iocblk *iop;
1602 	int rc = 0;
1603 	int len = 0;
1604 	uint32_t val = 0;
1605 	tunll_t *tll;
1606 
1607 	ASSERT(q != NULL);
1608 	ASSERT(q->q_ptr != NULL);
1609 	ASSERT(mp != NULL);
1610 	ASSERT(mp->b_rptr != NULL);
1611 
1612 	iop = (struct iocblk *)mp->b_rptr;
1613 
1614 	DBGNORMAL((CE_CONT, "got ioctl %X\n", iop->ioc_cmd));
1615 	switch (iop->ioc_cmd) {
1616 	case PPPIO_DEBUG:
1617 	case PPPIO_GETSTAT:
1618 	case PPPIO_GETSTAT64:
1619 	case PPPTUN_SNAME:
1620 	case PPPTUN_GNAME:
1621 	case PPPTUN_SINFO:
1622 	case PPPTUN_GINFO:
1623 	case PPPTUN_GNNAME:
1624 	case PPPTUN_LCLADDR:
1625 	case PPPTUN_SPEER:
1626 	case PPPTUN_GPEER:
1627 	case PPPTUN_SDATA:
1628 	case PPPTUN_GDATA:
1629 	case PPPTUN_SCTL:
1630 	case PPPTUN_GCTL:
1631 	case PPPTUN_DCTL:
1632 		qwriter(q, mp, sppptun_inner_ioctl, PERIM_INNER);
1633 		return;
1634 
1635 	case PPPIO_GCLEAN:	/* always clean */
1636 		val = RCV_B7_1 | RCV_B7_0 | RCV_ODDP | RCV_EVNP;
1637 		len = sizeof (uint32_t);
1638 		break;
1639 
1640 	case PPPIO_GTYPE:	/* we look like an async driver. */
1641 		val = PPPTYP_AHDLC;
1642 		len = sizeof (uint32_t);
1643 		break;
1644 
1645 	case PPPIO_CFLAGS:	/* never compress headers */
1646 		val = 0;
1647 		len = sizeof (uint32_t);
1648 		break;
1649 
1650 		/* quietly ack PPP things we don't need to do. */
1651 	case PPPIO_XFCS:
1652 	case PPPIO_RFCS:
1653 	case PPPIO_XACCM:
1654 	case PPPIO_RACCM:
1655 	case PPPIO_LASTMOD:
1656 	case PPPIO_MUX:
1657 	case I_PLINK:
1658 	case I_PUNLINK:
1659 	case I_LINK:
1660 	case I_UNLINK:
1661 		break;
1662 
1663 	default:
1664 		tll = (tunll_t *)q->q_ptr;
1665 		if (!(tll->tll_flags & TLLF_NOTLOWER)) {
1666 			/* module side; pass this through. */
1667 			putnext(q, mp);
1668 			return;
1669 		}
1670 		rc = EINVAL;
1671 		break;
1672 	}
1673 	if (rc == 0 && len == sizeof (uint32_t)) {
1674 		if (mp->b_cont != NULL)
1675 			freemsg(mp->b_cont);
1676 		mp->b_cont = allocb(sizeof (uint32_t), BPRI_HI);
1677 		if (mp->b_cont == NULL) {
1678 			rc = ENOSR;
1679 		} else {
1680 			*(uint32_t *)mp->b_cont->b_wptr = val;
1681 			mp->b_cont->b_wptr += sizeof (uint32_t);
1682 		}
1683 	}
1684 	if (rc == 0) {
1685 		miocack(q, mp, len, 0);
1686 	} else {
1687 		DBGERROR((CE_CONT, "error %d for ioctl %X", rc, iop->ioc_cmd));
1688 		miocnak(q, mp, 0, rc);
1689 	}
1690 }
1691 
1692 /*
1693  * sppptun_inner_mctl()
1694  *
1695  * MT-Perimeters:
1696  *    exclusive inner, shared outer.
1697  *
1698  * Description:
1699  *    Called by qwriter (via sppptun_uwput) as the result of receiving
1700  *    an M_CTL.  Called only on the client (driver) side.
1701  */
1702 static void
1703 sppptun_inner_mctl(queue_t *q, mblk_t *mp)
1704 {
1705 	int msglen;
1706 	tuncl_t *tcl;
1707 
1708 	ASSERT(q != NULL && mp != NULL);
1709 	ASSERT(q->q_ptr != NULL && mp->b_rptr != NULL);
1710 
1711 	tcl = (tuncl_t *)q->q_ptr;
1712 
1713 	if (!(tcl->tcl_flags & TCLF_ISCLIENT)) {
1714 		freemsg(mp);
1715 		return;
1716 	}
1717 
1718 	msglen = MBLKL(mp);
1719 	switch (*mp->b_rptr) {
1720 	case PPPCTL_UNIT:
1721 		if (msglen == 2)
1722 			tcl->tcl_unit = mp->b_rptr[1];
1723 		else if (msglen == 8)
1724 			tcl->tcl_unit = ((uint32_t *)mp->b_rptr)[1];
1725 		break;
1726 	}
1727 	freemsg(mp);
1728 }
1729 
1730 /*
1731  * sppptun_uwput()
1732  *
1733  * MT-Perimeters:
1734  *    shared inner, shared outer.
1735  *
1736  * Description:
1737  *	Regular output data and controls pass through here.
1738  */
1739 static void
1740 sppptun_uwput(queue_t *q, mblk_t *mp)
1741 {
1742 	queue_t *nextq;
1743 	tuncl_t *tcl;
1744 
1745 	ASSERT(q != NULL);
1746 	ASSERT(q->q_ptr != NULL);
1747 	ASSERT(mp != NULL);
1748 	ASSERT(mp->b_rptr != NULL);
1749 
1750 	DBGENTRY((CE_CONT, "sppptun_uwput as %s", DBGQSIDE(q)));
1751 
1752 	switch (MTYPE(mp)) {
1753 	case M_DATA:
1754 	case M_PROTO:
1755 	case M_PCPROTO:
1756 		if (q->q_first == NULL &&
1757 		    (nextq = sppptun_outpkt(q, &mp)) != NULL) {
1758 			putnext(nextq, mp);
1759 		} else if (mp != NULL && !putq(q, mp)) {
1760 			freemsg(mp);
1761 		}
1762 		break;
1763 	case M_IOCTL:
1764 		sppptun_ioctl(q, mp);
1765 		break;
1766 	case M_CTL:
1767 		qwriter(q, mp, sppptun_inner_mctl, PERIM_INNER);
1768 		break;
1769 	default:
1770 		tcl = (tuncl_t *)q->q_ptr;
1771 		/*
1772 		 * If we're the driver, then discard unknown junk.
1773 		 * Otherwise, if we're the module, then forward along.
1774 		 */
1775 		if (tcl->tcl_flags & TCLF_ISCLIENT)
1776 			freemsg(mp);
1777 		else
1778 			putnext(q, mp);
1779 		break;
1780 	}
1781 }
1782 
1783 /*
1784  * Send a DLPI/TPI control message to the driver but make sure there
1785  * is only one outstanding message.  Uses tll_msg_pending to tell when
1786  * it must queue.  sppptun_urput calls message_done() when an ACK or a
1787  * NAK is received to process the next queued message.
1788  */
1789 static void
1790 message_send(tunll_t *tll, mblk_t *mp)
1791 {
1792 	mblk_t **mpp;
1793 
1794 	if (tll->tll_msg_pending) {
1795 		/* Must queue message. Tail insertion */
1796 		mpp = &tll->tll_msg_deferred;
1797 		while (*mpp != NULL)
1798 			mpp = &((*mpp)->b_next);
1799 		*mpp = mp;
1800 		return;
1801 	}
1802 	tll->tll_msg_pending = 1;
1803 	putnext(tll->tll_wq, mp);
1804 }
1805 
1806 /*
1807  * Called when an DLPI/TPI control message has been acked or nacked to
1808  * send down the next queued message (if any).
1809  */
1810 static void
1811 message_done(tunll_t *tll)
1812 {
1813 	mblk_t *mp;
1814 
1815 	ASSERT(tll->tll_msg_pending);
1816 	tll->tll_msg_pending = 0;
1817 	mp = tll->tll_msg_deferred;
1818 	if (mp != NULL) {
1819 		tll->tll_msg_deferred = mp->b_next;
1820 		mp->b_next = NULL;
1821 		tll->tll_msg_pending = 1;
1822 		putnext(tll->tll_wq, mp);
1823 	}
1824 }
1825 
1826 /*
1827  * Send down queued "close" messages to lower stream.  These were
1828  * enqueued right after the stream was originally allocated, when the
1829  * tll_style was set by PPPTUN_SINFO.
1830  */
1831 static int
1832 tll_close_req(tunll_t *tll)
1833 {
1834 	mblk_t *mb, *mbnext;
1835 
1836 	if ((mb = tll->tll_onclose) == NULL)
1837 		tll->tll_flags |= TLLF_SHUTDOWN_DONE;
1838 	else {
1839 		tll->tll_onclose = NULL;
1840 		while (mb != NULL) {
1841 			mbnext = mb->b_next;
1842 			mb->b_next = NULL;
1843 			message_send(tll, mb);
1844 			mb = mbnext;
1845 		}
1846 	}
1847 	return (0);
1848 }
1849 
1850 /*
1851  * sppptun_uwsrv()
1852  *
1853  * MT-Perimeters:
1854  *    exclusive inner, shared outer.
1855  *
1856  * Description:
1857  *    Upper write-side service procedure.  In addition to the usual
1858  *    STREAMS queue service handling, this routine also handles the
1859  *    transmission of the unbind/detach messages to the lower stream
1860  *    driver when a lower stream is being closed.  (See the use of
1861  *    qenable/qwait in sppptun_close().)
1862  */
1863 static int
1864 sppptun_uwsrv(queue_t *q)
1865 {
1866 	tuncl_t	*tcl;
1867 	mblk_t *mp;
1868 	queue_t *nextq;
1869 
1870 	ASSERT(q != NULL);
1871 	ASSERT(q->q_ptr != NULL);
1872 
1873 	tcl = (tuncl_t *)q->q_ptr;
1874 	if (!(tcl->tcl_flags & TCLF_ISCLIENT)) {
1875 		tunll_t *tll = (tunll_t *)tcl;
1876 		if ((tll->tll_flags & (TLLF_CLOSING|TLLF_CLOSE_DONE)) ==
1877 		    TLLF_CLOSING) {
1878 			DBGPLUMB((CE_CONT, "sending close req\n"));
1879 			tll->tll_error = tll_close_req(tll);
1880 			tll->tll_flags |= TLLF_CLOSE_DONE;
1881 		}
1882 		return (0);
1883 	}
1884 
1885 	while ((mp = getq(q)) != NULL) {
1886 		if ((nextq = sppptun_outpkt(q, &mp)) != NULL) {
1887 			putnext(nextq, mp);
1888 		} else if (mp != NULL) {
1889 			(void) putbq(q, mp);
1890 			break;
1891 		}
1892 	}
1893 	return (0);
1894 }
1895 
1896 /*
1897  * sppptun_lwput()
1898  *
1899  * MT-Perimeters:
1900  *    shared inner, shared outer.
1901  *
1902  * Description:
1903  *    Lower write-side put procedure.  Nothing should be sending
1904  *    packets down this stream.
1905  */
1906 static void
1907 sppptun_lwput(queue_t *q, mblk_t *mp)
1908 {
1909 	ASSERT(q != NULL);
1910 	ASSERT(mp != NULL);
1911 	DBGENTRY((CE_CONT, "sppptun_lwput as %s", DBGQSIDE(q)));
1912 
1913 	switch (MTYPE(mp)) {
1914 	case M_PROTO:
1915 		putnext(q, mp);
1916 		break;
1917 	default:
1918 		freemsg(mp);
1919 		break;
1920 	}
1921 }
1922 
1923 /*
1924  * sppptun_lrput()
1925  *
1926  * MT-Perimeters:
1927  *    shared inner, shared outer.
1928  *
1929  * Description:
1930  *    Lower read-side put procedure.  Nothing should arrive here.
1931  */
1932 static void
1933 sppptun_lrput(queue_t *q, mblk_t *mp)
1934 {
1935 	tuncl_t *tcl;
1936 
1937 	ASSERT(q != NULL);
1938 	ASSERT(mp != NULL);
1939 
1940 	DBGENTRY((CE_CONT, "sppptun_lrput as %s", DBGQSIDE(q)));
1941 
1942 	switch (MTYPE(mp)) {
1943 	case M_IOCTL:
1944 		DBGERROR((CE_CONT, "unexpected lrput ioctl"));
1945 		miocnak(q, mp, 0, EINVAL);
1946 		return;
1947 	case M_FLUSH:
1948 		if (*mp->b_rptr & FLUSHR) {
1949 			flushq(q, FLUSHDATA);
1950 		}
1951 		if (*mp->b_rptr & FLUSHW) {
1952 			*mp->b_rptr &= ~FLUSHR;
1953 			qreply(q, mp);
1954 		} else {
1955 			freemsg(mp);
1956 		}
1957 		return;
1958 	}
1959 	/*
1960 	 * Try to forward the message to the put procedure for the upper
1961 	 * control stream for this lower stream. If there are already messages
1962 	 * queued here, queue this one up to preserve message ordering.
1963 	 */
1964 	if ((tcl = (tuncl_t *)q->q_ptr) == NULL || tcl->tcl_rq == NULL) {
1965 		freemsg(mp);
1966 		return;
1967 	}
1968 	if (queclass(mp) == QPCTL ||
1969 	    (q->q_first == NULL && canput(tcl->tcl_rq))) {
1970 		put(tcl->tcl_rq, mp);
1971 	} else {
1972 		if (!putq(q, mp))
1973 			freemsg(mp);
1974 	}
1975 }
1976 
1977 /*
1978  * MT-Perimeters:
1979  *    shared inner, shared outer.
1980  *
1981  *    Handle non-data DLPI messages.  Used with PPPoE, which runs over
1982  *    Ethernet only.
1983  */
1984 static void
1985 urput_dlpi(queue_t *q, mblk_t *mp)
1986 {
1987 	int err;
1988 	union DL_primitives *dlp = (union DL_primitives *)mp->b_rptr;
1989 	tunll_t *tll = (tunll_t *)q->q_ptr;
1990 
1991 	DBGNORMAL((CE_CONT, "received DLPI primitive %d\n",
1992 	    dlp->dl_primitive));
1993 	switch (dlp->dl_primitive) {
1994 	case DL_UDERROR_IND:
1995 		DBGERROR((CE_CONT, "uderror:  unix %d, dlpi %d\n",
1996 		    dlp->uderror_ind.dl_unix_errno,
1997 		    dlp->uderror_ind.dl_errno));
1998 		break;
1999 
2000 	case DL_ERROR_ACK:
2001 		DBGERROR((CE_CONT,
2002 		    "error-ack: unix %d, dlpi %d on primitive %d\n",
2003 		    dlp->error_ack.dl_unix_errno,
2004 		    dlp->error_ack.dl_errno,
2005 		    dlp->error_ack.dl_error_primitive));
2006 		err = dlp->error_ack.dl_unix_errno ?
2007 		    dlp->error_ack.dl_unix_errno : ENXIO;
2008 		switch (dlp->error_ack.dl_error_primitive) {
2009 		case DL_UNBIND_REQ:
2010 			message_done(tll);
2011 			break;
2012 		case DL_DETACH_REQ:
2013 			message_done(tll);
2014 			tll->tll_error = err;
2015 			DBGPLUMB((CE_CONT, "detach error %d; shutdown done\n",
2016 			    err));
2017 			tll->tll_flags |= TLLF_SHUTDOWN_DONE;
2018 			break;
2019 		case DL_PHYS_ADDR_REQ:
2020 			message_done(tll);
2021 			break;
2022 		case DL_INFO_REQ:
2023 		case DL_ATTACH_REQ:
2024 		case DL_BIND_REQ:
2025 			message_done(tll);
2026 			tll->tll_error = err;
2027 			DBGPLUMB((CE_CONT, "bind error %d\n", err));
2028 			break;
2029 		}
2030 		break;
2031 
2032 	case DL_INFO_ACK:
2033 		message_done(tll);
2034 		break;
2035 
2036 	case DL_BIND_ACK:
2037 		message_done(tll);
2038 		DBGPLUMB((CE_CONT, "bind ack\n"));
2039 		break;
2040 
2041 	case DL_PHYS_ADDR_ACK:
2042 		break;
2043 
2044 	case DL_OK_ACK:
2045 		switch (dlp->ok_ack.dl_correct_primitive) {
2046 		case DL_UNBIND_REQ:
2047 			message_done(tll);
2048 			break;
2049 		case DL_DETACH_REQ:
2050 			DBGPLUMB((CE_CONT, "detach ack; shutdown done\n"));
2051 			tll->tll_flags |= TLLF_SHUTDOWN_DONE;
2052 			break;
2053 		case DL_ATTACH_REQ:
2054 			message_done(tll);
2055 			DBGPLUMB((CE_CONT, "attach ack\n"));
2056 			break;
2057 		}
2058 		break;
2059 	}
2060 	freemsg(mp);
2061 }
2062 
2063 /* Search structure used with PPPoE only; see tclvm_pppoe_search(). */
2064 struct poedat {
2065 	uint_t sessid;
2066 	tunll_t *tll;
2067 	void *srcaddr;
2068 	int isdata;
2069 	tuncl_t *tcl;
2070 };
2071 
2072 /*
2073  * This function is called by vmem_walk from within sppptun_recv.  It
2074  * iterates over a span of allocated minor node numbers to search for
2075  * the appropriate lower stream, session ID, and peer MAC address.
2076  *
2077  * (This is necessary due to a design flaw in the PPPoE protocol
2078  * itself.  The protocol assigns session IDs from the server side
2079  * only.  Both server and client use the same number.  Thus, if there
2080  * are multiple clients on a single host, there can be session ID
2081  * conflicts between servers and there's no way to detangle them
2082  * except by looking at the remote MAC address.)
2083  *
2084  * (This could have been handled by linking together sessions that
2085  * differ only in the remote MAC address.  This isn't done because it
2086  * would involve extra per-session storage and it's very unlikely that
2087  * PPPoE would be used this way.)
2088  */
2089 static void
2090 tclvm_pppoe_search(void *arg, void *firstv, size_t numv)
2091 {
2092 	struct poedat *poedat = (struct poedat *)arg;
2093 	int minorn = (int)(uintptr_t)firstv;
2094 	int minormax = minorn + numv;
2095 	tuncl_t *tcl;
2096 
2097 	if (poedat->tcl != NULL)
2098 		return;
2099 	while (minorn < minormax) {
2100 		tcl = tcl_slots[minorn - 1];
2101 		ASSERT(tcl != NULL);
2102 		if (tcl->tcl_rsessid == poedat->sessid &&
2103 		    ((!poedat->isdata && tcl->tcl_ctrl_tll == poedat->tll) ||
2104 			(poedat->isdata &&
2105 			    tcl->tcl_data_tll == poedat->tll)) &&
2106 		    bcmp(tcl->tcl_address.pta_pppoe.ptma_mac,
2107 			poedat->srcaddr,
2108 			sizeof (tcl->tcl_address.pta_pppoe.ptma_mac)) == 0) {
2109 			poedat->tcl = tcl;
2110 			break;
2111 		}
2112 		minorn++;
2113 	}
2114 }
2115 
2116 /*
2117  * sppptun_recv()
2118  *
2119  * MT-Perimeters:
2120  *    shared inner, shared outer.
2121  *
2122  * Description:
2123  *    Receive function called by sppptun_urput, which is called when
2124  *    the lower read-side put or service procedure sends a message
2125  *    upstream to the a device user (PPP).  It attempts to find an
2126  *    appropriate queue on the module above us (depending on what the
2127  *    associated upper stream for the protocol would be), and if not
2128  *    possible, it will find an upper control stream for the protocol.
2129  *    Returns a pointer to the upper queue_t, or NULL if the message
2130  *    has been discarded.
2131  *
2132  * About demultiplexing:
2133  *
2134  *	All four protocols (L2F, PPTP, L2TP, and PPPoE) support a
2135  *	locally assigned ID for demultiplexing incoming traffic.  For
2136  *	L2F, this is called the Client ID, for PPTP the Call ID, for
2137  *	L2TP the Session ID, and for PPPoE the SESSION_ID.  This is a
2138  *	16 bit number for all four protocols, and is used to directly
2139  *	index into a list of upper streams.  With the upper stream in
2140  *	hand, we verify that this is the right stream and deliver the
2141  *	data.
2142  *
2143  *	L2TP has a Tunnel ID, which represents a bundle of PPP
2144  *	sessions between the peers.  Because we always assign unique
2145  *	session ID numbers, we merely check that the given ID matches
2146  *	the assigned ID for the upper stream.
2147  *
2148  *	L2F has a Multiplex ID, which is unique per connection.  It
2149  *	does not have L2TP's concept of multiple-connections-within-
2150  *	a-tunnel.  The same checking is done.
2151  *
2152  *	PPPoE is a horribly broken protocol.  Only one ID is assigned
2153  *	per connection.  The client must somehow demultiplex based on
2154  *	an ID number assigned by the server.  It's not necessarily
2155  *	unique.  The search is done based on {ID,peerEthernet} (using
2156  *	tcl_rsessid) for all packet types except PADI and PADS.
2157  *
2158  *	Neither PPPoE nor PPTP supports additional ID numbers.
2159  *
2160  *	Both L2F and L2TP come in over UDP.  They are distinguished by
2161  *	looking at the GRE version field -- 001 for L2F and 010 for
2162  *	L2TP.
2163  */
2164 static queue_t *
2165 sppptun_recv(queue_t *q, mblk_t **mpp, void *destaddr, void *srcaddr)
2166 {
2167 	mblk_t *mp;
2168 	tunll_t *tll;
2169 	tuncl_t *tcl;
2170 	int sessid;
2171 	int remlen;
2172 	int msglen;
2173 	int isdata;
2174 	int i;
2175 	ushort_t *usp;
2176 	uchar_t *ucp;
2177 	poep_t *poep;
2178 	mblk_t *mnew;
2179 	ppptun_atype *pap;
2180 
2181 	_NOTE(ARGUNUSED(destaddr))
2182 	ASSERT(mpp != NULL);
2183 	mp = *mpp;
2184 	ASSERT(q != NULL);
2185 	ASSERT(q->q_ptr != NULL);
2186 	ASSERT(mp != NULL);
2187 
2188 	tll = (tunll_t *)q->q_ptr;
2189 	ASSERT(!(tll->tll_flags & TLLF_NOTLOWER));
2190 
2191 	/*
2192 	 * First, extract a session ID number.  All protocols have
2193 	 * this.
2194 	 */
2195 	usp = (ushort_t *)mp->b_rptr;
2196 	ucp = (uchar_t *)mp->b_rptr;
2197 	tcl = NULL;
2198 	switch (tll->tll_style) {
2199 	case PTS_PPPOE:
2200 		poep = (poep_t *)usp;
2201 		if (poep->poep_version_type != POE_VERSION)
2202 			break;
2203 		isdata = (poep->poep_code == POECODE_DATA);
2204 		sessid = ntohs(poep->poep_session_id);
2205 		remlen = sizeof (*poep);
2206 		msglen = ntohs(poep->poep_length);
2207 		i = poep->poep_code;
2208 		if (i == POECODE_PADI || i == POECODE_PADR) {
2209 			/* These go to the server daemon only. */
2210 			tcl = tll->tll_defcl;
2211 		} else if (i == POECODE_PADO || i == POECODE_PADS) {
2212 			/*
2213 			 * These go to a client only, and are demuxed
2214 			 * by the Host-Uniq field (into which we stuff
2215 			 * our local ID number when generating
2216 			 * PADI/PADR).
2217 			 */
2218 			ucp += sizeof (*poep);
2219 			i = msglen;
2220 			while (i > POET_HDRLEN) {
2221 				if (POET_GET_TYPE(ucp) == POETT_END) {
2222 					i = 0;
2223 					break;
2224 				}
2225 				if (POET_GET_TYPE(ucp) == POETT_UNIQ &&
2226 				    POET_GET_LENG(ucp) >= sizeof (uint32_t))
2227 					break;
2228 				i -= POET_GET_LENG(ucp) + POET_HDRLEN;
2229 				ucp = POET_NEXT(ucp);
2230 			}
2231 			if (i >= POET_HDRLEN + 4)
2232 				sessid = GETLONG(ucp + POET_HDRLEN);
2233 			tcl = tcl_by_minor((minor_t)sessid);
2234 		} else {
2235 			/*
2236 			 * Try minor number as session ID first, since
2237 			 * it's used that way on server side.  It's
2238 			 * not used that way on the client, though, so
2239 			 * this might not work.  If this isn't the
2240 			 * right one, then try the tll cache.  If
2241 			 * neither is right, then search all open
2242 			 * clients.  Did I mention that the PPPoE
2243 			 * protocol is badly designed?
2244 			 */
2245 			tcl = tcl_by_minor((minor_t)sessid);
2246 			if (tcl == NULL ||
2247 			    (!isdata && tcl->tcl_ctrl_tll != tll) ||
2248 			    (isdata && tcl->tcl_data_tll != tll) ||
2249 			    sessid != tcl->tcl_rsessid ||
2250 			    bcmp(srcaddr, tcl->tcl_address.pta_pppoe.ptma_mac,
2251 			sizeof (tcl->tcl_address.pta_pppoe.ptma_mac)) != 0)
2252 				tcl = tll->tll_lastcl;
2253 			if (tcl == NULL ||
2254 			    (!isdata && tcl->tcl_ctrl_tll != tll) ||
2255 			    (isdata && tcl->tcl_data_tll != tll) ||
2256 			    sessid != tcl->tcl_rsessid ||
2257 			    bcmp(srcaddr, tcl->tcl_address.pta_pppoe.ptma_mac,
2258 			sizeof (tcl->tcl_address.pta_pppoe.ptma_mac)) != 0)
2259 				tcl = NULL;
2260 			if (tcl == NULL && sessid != 0) {
2261 				struct poedat poedat;
2262 
2263 				/*
2264 				 * Slow mode.  Too bad.  If you don't like it,
2265 				 * you can always choose a better protocol.
2266 				 */
2267 				poedat.sessid = sessid;
2268 				poedat.tll = tll;
2269 				poedat.srcaddr = srcaddr;
2270 				poedat.tcl = NULL;
2271 				poedat.isdata = isdata;
2272 				vmem_walk(tcl_minor_arena, VMEM_ALLOC,
2273 				    tclvm_pppoe_search, &poedat);
2274 				KLINCR(lks_walks);
2275 				if ((tcl = poedat.tcl) != NULL) {
2276 					tll->tll_lastcl = tcl;
2277 					KCINCR(cks_walks);
2278 				}
2279 			}
2280 		}
2281 		break;
2282 	}
2283 
2284 	if (tcl == NULL || tcl->tcl_rq == NULL) {
2285 		DBGERROR((CE_CONT, "discard; session %d %s", sessid,
2286 		    tcl == NULL ? "not found" : "terminated"));
2287 		if (tcl == NULL) {
2288 			KLINCR(lks_in_nomatch);
2289 		}
2290 		if (isdata) {
2291 			KLINCR(lks_indata_drops);
2292 			if (tcl != NULL)
2293 				tcl->tcl_stats.ppp_ierrors++;
2294 		} else {
2295 			KLINCR(lks_inctrl_drops);
2296 			if (tcl != NULL) {
2297 				KCINCR(cks_inctrl_drops);
2298 			}
2299 		}
2300 		freemsg(mp);
2301 		return (NULL);
2302 	}
2303 
2304 	if (tcl->tcl_data_tll == tll && isdata) {
2305 		if (!adjmsg(mp, remlen) ||
2306 		    (i = msgsize(mp)) < msglen ||
2307 		    (i > msglen && !adjmsg(mp, msglen - i))) {
2308 			KLINCR(lks_indata_drops);
2309 			tcl->tcl_stats.ppp_ierrors++;
2310 			freemsg(mp);
2311 			return (NULL);
2312 		}
2313 		/* XXX -- address/control handling in pppd needs help. */
2314 		if (*mp->b_rptr != 0xFF) {
2315 			if ((mp = prependb(mp, 2, 1)) == NULL) {
2316 				KLINCR(lks_indata_drops);
2317 				tcl->tcl_stats.ppp_ierrors++;
2318 				return (NULL);
2319 			}
2320 			mp->b_rptr[0] = 0xFF;
2321 			mp->b_rptr[1] = 0x03;
2322 		}
2323 		MTYPE(mp) = M_DATA;
2324 		if (tcl->tcl_flags & TCLF_DEBUG)
2325 			sppp_dump_frame(tcl->tcl_unit, mp, "rcvd");
2326 		tcl->tcl_stats.ppp_ibytes += msgsize(mp);
2327 		tcl->tcl_stats.ppp_ipackets++;
2328 		KLINCR(lks_indata);
2329 	} else {
2330 		if (isdata || tcl->tcl_ctrl_tll != tll ||
2331 		    (mnew = make_control(tcl, tll, PTCA_CONTROL, tcl)) ==
2332 		    NULL) {
2333 			KLINCR(lks_inctrl_drops);
2334 			KCINCR(cks_inctrl_drops);
2335 			freemsg(mp);
2336 			return (NULL);
2337 		}
2338 		if (tcl->tcl_flags & TCLF_DEBUG)
2339 			sppp_dump_frame(tcl->tcl_unit, mp, "rctl");
2340 		/* Fix up source address; peer might not be set yet. */
2341 		pap = &((struct ppptun_control *)mnew->b_rptr)->ptc_address;
2342 		bcopy(srcaddr, pap->pta_pppoe.ptma_mac,
2343 		    sizeof (pap->pta_pppoe.ptma_mac));
2344 		mnew->b_cont = mp;
2345 		mp = mnew;
2346 		KLINCR(lks_inctrls);
2347 		KCINCR(cks_inctrls);
2348 	}
2349 	*mpp = mp;
2350 	return (tcl->tcl_rq);
2351 }
2352 
2353 /*
2354  * sppptun_urput()
2355  *
2356  * MT-Perimeters:
2357  *    shared inner, shared outer.
2358  *
2359  * Description:
2360  *    Upper read-side put procedure.  Messages from the underlying
2361  *    lower stream driver arrive here.  See sppptun_recv for the
2362  *    demultiplexing logic.
2363  */
2364 static void
2365 sppptun_urput(queue_t *q, mblk_t *mp)
2366 {
2367 	union DL_primitives *dlprim;
2368 	mblk_t *mpnext;
2369 	tunll_t *tll;
2370 	queue_t *nextq;
2371 
2372 	DBGENTRY((CE_CONT, "sppptun_urput as %s", DBGQSIDE(q)));
2373 
2374 	ASSERT(q != NULL);
2375 	ASSERT(q->q_ptr != NULL);
2376 	ASSERT(mp != NULL);
2377 	tll = (tunll_t *)q->q_ptr;
2378 	ASSERT(!(tll->tll_flags & TLLF_NOTLOWER));
2379 
2380 	switch (MTYPE(mp)) {
2381 	case M_DATA:
2382 		/*
2383 		 * When we're bound over IP, data arrives here.  The
2384 		 * packet starts with the IP header itself.
2385 		 */
2386 		if ((nextq = sppptun_recv(q, &mp, NULL, NULL)) != NULL)
2387 			putnext(nextq, mp);
2388 		break;
2389 
2390 	case M_PROTO:
2391 	case M_PCPROTO:
2392 		/* Data arrives here for UDP or raw Ethernet, not IP. */
2393 		switch (tll->tll_style) {
2394 			/* PPTP control messages are over TCP only. */
2395 		case PTS_PPTP:
2396 		default:
2397 			ASSERT(0);	/* how'd that happen? */
2398 			break;
2399 
2400 		case PTS_PPPOE:		/* DLPI message */
2401 			dlprim = (union DL_primitives *)mp->b_rptr;
2402 			switch (dlprim->dl_primitive) {
2403 			case DL_UNITDATA_IND:
2404 				mpnext = mp->b_cont;
2405 				MTYPE(mpnext) = M_DATA;
2406 				nextq = sppptun_recv(q, &mpnext,
2407 				    mp->b_rptr +
2408 				    dlprim->unitdata_ind.dl_dest_addr_offset,
2409 				    mp->b_rptr +
2410 				    dlprim->unitdata_ind.dl_src_addr_offset);
2411 				if (nextq != NULL)
2412 					putnext(nextq, mpnext);
2413 				freeb(mp);
2414 				break;
2415 
2416 			/* For loopback support. */
2417 			case DL_UNITDATA_REQ:
2418 				mpnext = mp->b_cont;
2419 				MTYPE(mpnext) = M_DATA;
2420 				nextq = sppptun_recv(q, &mpnext,
2421 				    mp->b_rptr +
2422 				    dlprim->unitdata_req.dl_dest_addr_offset,
2423 				    tll->tll_lcladdr.pta_pppoe.ptma_mac);
2424 				if (nextq != NULL)
2425 					putnext(nextq, mpnext);
2426 				freeb(mp);
2427 				break;
2428 
2429 			default:
2430 				urput_dlpi(q, mp);
2431 				break;
2432 			}
2433 			break;
2434 		}
2435 		break;
2436 
2437 	default:
2438 		freemsg(mp);
2439 		break;
2440 	}
2441 }
2442 
2443 /*
2444  * sppptun_ursrv()
2445  *
2446  * MT-Perimeters:
2447  *    exclusive inner, shared outer.
2448  *
2449  * Description:
2450  *    Upper read-side service procedure.  This procedure services the
2451  *    client streams.  We get here because the client (PPP) asserts
2452  *    flow control down to us.
2453  */
2454 static int
2455 sppptun_ursrv(queue_t *q)
2456 {
2457 	mblk_t		*mp;
2458 
2459 	ASSERT(q != NULL);
2460 	ASSERT(q->q_ptr != NULL);
2461 
2462 	while ((mp = getq(q)) != NULL) {
2463 		if (canputnext(q)) {
2464 			putnext(q, mp);
2465 		} else {
2466 			(void) putbq(q, mp);
2467 			break;
2468 		}
2469 	}
2470 	return (0);
2471 }
2472 
2473 /*
2474  * Dummy constructor/destructor functions for kmem_cache_create.
2475  * We're just using kmem as an allocator of integers, not real
2476  * storage.
2477  */
2478 
2479 /*ARGSUSED*/
2480 static int
2481 tcl_constructor(void *maddr, void *arg, int kmflags)
2482 {
2483 	return (0);
2484 }
2485 
2486 /*ARGSUSED*/
2487 static void
2488 tcl_destructor(void *maddr, void *arg)
2489 {
2490 }
2491 
2492 /*
2493  * Total size occupied by one tunnel client.  Each tunnel client
2494  * consumes one pointer for tcl_slots array, one tuncl_t structure and
2495  * two messages preallocated for close.
2496  */
2497 #define	TUNCL_SIZE (sizeof (tuncl_t) + sizeof (tuncl_t *) + \
2498 			2 * sizeof (dblk_t))
2499 
2500 /*
2501  * Clear all bits of x except the highest bit
2502  */
2503 #define	truncate(x) 	((x) <= 2 ? (x) : (1 << (highbit(x) - 1)))
2504 
2505 /*
2506  * This function initializes some well-known global variables inside
2507  * the module.
2508  *
2509  * Called by sppptun_mod.c:_init() before installing the module.
2510  */
2511 void
2512 sppptun_init(void)
2513 {
2514 	tunll_list.q_forw = tunll_list.q_back = &tunll_list;
2515 }
2516 
2517 /*
2518  * This function allocates the initial internal storage for the
2519  * sppptun driver.
2520  *
2521  * Called by sppptun_mod.c:_init() after installing module.
2522  */
2523 void
2524 sppptun_tcl_init(void)
2525 {
2526 	uint_t i, j;
2527 
2528 	rw_init(&tcl_rwlock, NULL, RW_DRIVER, NULL);
2529 	rw_enter(&tcl_rwlock, RW_WRITER);
2530 	tcl_nslots = sppptun_init_cnt;
2531 	tcl_slots = kmem_zalloc(tcl_nslots * sizeof (tuncl_t *), KM_SLEEP);
2532 
2533 	tcl_cache = kmem_cache_create("sppptun_map", sizeof (tuncl_t), 0,
2534 	    tcl_constructor, tcl_destructor, NULL, NULL, NULL, 0);
2535 
2536 	/* Allocate integer space for minor numbers */
2537 	tcl_minor_arena = vmem_create("sppptun_minor", (void *)1, tcl_nslots,
2538 	    1, NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
2539 
2540 	/*
2541 	 * Calculate available number of tunnels - how many tunnels
2542 	 * can we allocate in sppptun_pctofmem % of available
2543 	 * memory.  The value is rounded up to the nearest power of 2.
2544 	 */
2545 	i = (sppptun_pctofmem * kmem_maxavail()) / (100 * TUNCL_SIZE);
2546 	j = truncate(i);	/* i with non-high bits stripped */
2547 	if (i != j)
2548 		j *= 2;
2549 	tcl_minormax = j;
2550 	rw_exit(&tcl_rwlock);
2551 }
2552 
2553 /*
2554  * This function checks that there are no plumbed streams or other users.
2555  *
2556  * Called by sppptun_mod.c:_fini().  Assumes that we're exclusive on
2557  * both perimeters.
2558  */
2559 int
2560 sppptun_tcl_fintest(void)
2561 {
2562 	if (tunll_list.q_forw != &tunll_list || tcl_inuse > 0) {
2563 		DBGERROR((CE_CONT,
2564 		    "_fini: return busy; plumbed and %d in use\n",
2565 		    tcl_inuse));
2566 		return (EBUSY);
2567 	}
2568 	return (0);
2569 }
2570 
2571 /*
2572  * If no lower streams are plumbed, then this function deallocates all
2573  * internal storage in preparation for unload.
2574  *
2575  * Called by sppptun_mod.c:_fini().  Assumes that we're exclusive on
2576  * both perimeters.
2577  */
2578 void
2579 sppptun_tcl_fini(void)
2580 {
2581 	if (tcl_minor_arena != NULL) {
2582 		vmem_destroy(tcl_minor_arena);
2583 		tcl_minor_arena = NULL;
2584 	}
2585 	if (tcl_cache != NULL) {
2586 		kmem_cache_destroy(tcl_cache);
2587 		tcl_cache = NULL;
2588 	}
2589 	kmem_free(tcl_slots, tcl_nslots * sizeof (tuncl_t *));
2590 	tcl_slots = NULL;
2591 	rw_destroy(&tcl_rwlock);
2592 	ASSERT(tcl_slots == NULL);
2593 	ASSERT(tcl_cache == NULL);
2594 	ASSERT(tcl_minor_arena == NULL);
2595 }
2596