xref: /illumos-gate/usr/src/uts/common/io/ppp/sppp/sppp.c (revision 72d3dbb9ab4481606cb93caca98ba3b3a8eb6ce2)
1 /*
2  * sppp.c - Solaris STREAMS PPP multiplexing pseudo-driver
3  *
4  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
5  * Use is subject to license terms.
6  * Copyright (c) 2016 by Delphix. All rights reserved.
7  *
8  * Permission to use, copy, modify, and distribute this software and its
9  * documentation is hereby granted, provided that the above copyright
10  * notice appears in all copies.
11  *
12  * SUN MAKES NO REPRESENTATION OR WARRANTIES ABOUT THE SUITABILITY OF
13  * THE SOFTWARE, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
14  * TO THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
15  * PARTICULAR PURPOSE, OR NON-INFRINGEMENT.  SUN SHALL NOT BE LIABLE FOR
16  * ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
17  * DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES
18  *
19  * Copyright (c) 1994 The Australian National University.
20  * All rights reserved.
21  *
22  * Permission to use, copy, modify, and distribute this software and its
23  * documentation is hereby granted, provided that the above copyright
24  * notice appears in all copies.  This software is provided without any
25  * warranty, express or implied. The Australian National University
26  * makes no representations about the suitability of this software for
27  * any purpose.
28  *
29  * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY
30  * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
31  * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
32  * THE AUSTRALIAN NATIONAL UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY
33  * OF SUCH DAMAGE.
34  *
35  * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES,
36  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
37  * AND FITNESS FOR A PARTICULAR PURPOSE.  THE SOFTWARE PROVIDED HEREUNDER IS
38  * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO
39  * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS,
40  * OR MODIFICATIONS.
41  *
42  * This driver is derived from the original SVR4 STREAMS PPP driver
43  * originally written by Paul Mackerras <paul.mackerras@cs.anu.edu.au>.
44  *
45  * Adi Masputra <adi.masputra@sun.com> rewrote and restructured the code
46  * for improved performance and scalability.
47  */
48 
49 #define	RCSID	"$Id: sppp.c,v 1.0 2000/05/08 01:10:12 masputra Exp $"
50 
51 #include <sys/types.h>
52 #include <sys/debug.h>
53 #include <sys/param.h>
54 #include <sys/stat.h>
55 #include <sys/stream.h>
56 #include <sys/stropts.h>
57 #include <sys/sysmacros.h>
58 #include <sys/errno.h>
59 #include <sys/time.h>
60 #include <sys/cmn_err.h>
61 #include <sys/kmem.h>
62 #include <sys/conf.h>
63 #include <sys/dlpi.h>
64 #include <sys/ddi.h>
65 #include <sys/kstat.h>
66 #include <sys/strsun.h>
67 #include <sys/ethernet.h>
68 #include <sys/policy.h>
69 #include <sys/zone.h>
70 #include <net/ppp_defs.h>
71 #include <net/pppio.h>
72 #include "sppp.h"
73 #include "s_common.h"
74 
75 /*
76  * This is used to tag official Solaris sources.  Please do not define
77  * "INTERNAL_BUILD" when building this software outside of Sun Microsystems.
78  */
79 #ifdef INTERNAL_BUILD
80 /* MODINFO is limited to 32 characters. */
81 const char sppp_module_description[] = "PPP 4.0 mux";
82 #else /* INTERNAL_BUILD */
83 const char sppp_module_description[] = "ANU PPP mux";
84 
85 /* LINTED */
86 static const char buildtime[] = "Built " __DATE__ " at " __TIME__
87 #ifdef DEBUG
88 " DEBUG"
89 #endif
90 "\n";
91 #endif /* INTERNAL_BUILD */
92 
93 static void	sppp_inner_ioctl(queue_t *, mblk_t *);
94 static void	sppp_outer_ioctl(queue_t *, mblk_t *);
95 static queue_t	*sppp_send(queue_t *, mblk_t **, spppstr_t *);
96 static queue_t	*sppp_recv(queue_t *, mblk_t **, spppstr_t *);
97 static void	sppp_recv_nondata(queue_t *, mblk_t *, spppstr_t *);
98 static queue_t	*sppp_outpkt(queue_t *, mblk_t **, int, spppstr_t *);
99 static spppstr_t *sppp_inpkt(queue_t *, mblk_t *, spppstr_t *);
100 static int	sppp_kstat_update(kstat_t *, int);
101 static void 	sppp_release_pkts(sppa_t *, uint16_t);
102 
103 /*
104  * sps_list contains the list of active per-stream instance state structures
105  * ordered on the minor device number (see sppp.h for details). All streams
106  * opened to this driver are threaded together in this list.
107  */
108 static spppstr_t *sps_list = NULL;
109 /*
110  * ppa_list contains the list of active per-attachment instance state
111  * structures ordered on the ppa id number (see sppp.h for details). All of
112  * the ppa structures created once per PPPIO_NEWPPA ioctl are threaded together
113  * in this list. There is exactly one ppa structure for a given PPP interface,
114  * and multiple sps streams (upper streams) may share a ppa by performing
115  * an attachment explicitly (PPPIO_ATTACH) or implicitly (DL_ATTACH_REQ).
116  */
117 static sppa_t *ppa_list = NULL;
118 
119 static const char *kstats_names[] = { SPPP_KSTATS_NAMES };
120 static const char *kstats64_names[] = { SPPP_KSTATS64_NAMES };
121 
122 /*
123  * map proto (which is an IANA defined ppp network protocol) to
124  * a bit position indicated by NP_* in ppa_npflag
125  */
126 static uint32_t
127 sppp_ppp2np(uint16_t proto)
128 {
129 	switch (proto) {
130 	case PPP_IP:
131 		return (NP_IP);
132 	case PPP_IPV6:
133 		return (NP_IPV6);
134 	default:
135 		return (0);
136 	}
137 }
138 
139 /*
140  * sppp_open()
141  *
142  * MT-Perimeters:
143  *    exclusive inner, exclusive outer.
144  *
145  * Description:
146  *    Common open procedure for module.
147  */
148 /* ARGSUSED */
149 int
150 sppp_open(queue_t *q, dev_t *devp, int oflag, int sflag, cred_t *credp)
151 {
152 	spppstr_t	*sps;
153 	spppstr_t	**nextmn;
154 	minor_t		mn;
155 
156 	ASSERT(q != NULL && devp != NULL);
157 	ASSERT(sflag != MODOPEN);
158 
159 	if (q->q_ptr != NULL) {
160 		return (0);		/* already open */
161 	}
162 	if (sflag != CLONEOPEN) {
163 		return (OPENFAIL);
164 	}
165 	/*
166 	 * The sps list is sorted using the minor number as the key. The
167 	 * following code walks the list to find the lowest valued minor
168 	 * number available to be used.
169 	 */
170 	mn = 0;
171 	for (nextmn = &sps_list; (sps = *nextmn) != NULL;
172 	    nextmn = &sps->sps_nextmn) {
173 		if (sps->sps_mn_id != mn) {
174 			break;
175 		}
176 		++mn;
177 	}
178 	sps = (spppstr_t *)kmem_zalloc(sizeof (spppstr_t), KM_SLEEP);
179 	ASSERT(sps != NULL);		/* KM_SLEEP must never return NULL */
180 	sps->sps_nextmn = *nextmn;	/* insert stream in global list */
181 	*nextmn = sps;
182 	sps->sps_mn_id = mn;		/* save minor id for this stream */
183 	sps->sps_rq = q;		/* save read queue pointer */
184 	sps->sps_sap = -1;		/* no sap bound to stream */
185 	sps->sps_dlstate = DL_UNATTACHED; /* dlpi state is unattached */
186 	sps->sps_npmode = NPMODE_DROP;	/* drop all packets initially */
187 	sps->sps_zoneid = crgetzoneid(credp);
188 	q->q_ptr = WR(q)->q_ptr = (caddr_t)sps;
189 	/*
190 	 * We explicitly disable the automatic queue scheduling for the
191 	 * write-side to obtain complete control over queuing during transmit.
192 	 * Packets will be queued at the upper write queue and the service
193 	 * routine will not be called until it gets scheduled by having the
194 	 * lower write service routine call the qenable(WR(uq)) for all streams
195 	 * attached to the same ppa instance.
196 	 */
197 	noenable(WR(q));
198 	*devp = makedevice(getmajor(*devp), mn);
199 	qprocson(q);
200 	return (0);
201 }
202 
203 /*
204  * Free storage used by a PPA.  This is not called until the last PPA
205  * user closes their connection or reattaches to a different PPA.
206  */
207 static void
208 sppp_free_ppa(sppa_t *ppa)
209 {
210 	sppa_t **nextppa;
211 
212 	ASSERT(ppa->ppa_refcnt == 1);
213 	if (ppa->ppa_kstats != NULL) {
214 		kstat_delete(ppa->ppa_kstats);
215 		ppa->ppa_kstats = NULL;
216 	}
217 	mutex_destroy(&ppa->ppa_sta_lock);
218 	mutex_destroy(&ppa->ppa_npmutex);
219 	rw_destroy(&ppa->ppa_sib_lock);
220 	nextppa = &ppa_list;
221 	while (*nextppa != NULL) {
222 		if (*nextppa == ppa) {
223 			*nextppa = ppa->ppa_nextppa;
224 			break;
225 		}
226 		nextppa = &(*nextppa)->ppa_nextppa;
227 	}
228 	kmem_free(ppa, sizeof (*ppa));
229 }
230 
231 /*
232  * Create a new PPA.  Caller must be exclusive on outer perimeter.
233  */
234 sppa_t *
235 sppp_create_ppa(uint32_t ppa_id, zoneid_t zoneid)
236 {
237 	sppa_t *ppa;
238 	sppa_t *curppa;
239 	sppa_t **availppa;
240 	char unit[32];		/* Unit name */
241 	const char **cpp;
242 	kstat_t *ksp;
243 	kstat_named_t *knt;
244 
245 	/*
246 	 * NOTE: unit *must* be named for the driver
247 	 * name plus the ppa number so that netstat
248 	 * can find the statistics.
249 	 */
250 	(void) sprintf(unit, "%s" "%d", PPP_DRV_NAME, ppa_id);
251 	/*
252 	 * Make sure we can allocate a buffer to
253 	 * contain the ppa to be sent upstream, as
254 	 * well as the actual ppa structure and its
255 	 * associated kstat structure.
256 	 */
257 	ppa = (sppa_t *)kmem_zalloc(sizeof (sppa_t),
258 	    KM_NOSLEEP);
259 	ksp = kstat_create(PPP_DRV_NAME, ppa_id, unit, "net", KSTAT_TYPE_NAMED,
260 	    sizeof (sppp_kstats_t) / sizeof (kstat_named_t), 0);
261 
262 	if (ppa == NULL || ksp == NULL) {
263 		if (ppa != NULL) {
264 			kmem_free(ppa, sizeof (sppa_t));
265 		}
266 		if (ksp != NULL) {
267 			kstat_delete(ksp);
268 		}
269 		return (NULL);
270 	}
271 	ppa->ppa_kstats = ksp;		/* chain kstat structure */
272 	ppa->ppa_ppa_id = ppa_id;	/* record ppa id */
273 	ppa->ppa_zoneid = zoneid;	/* zone that owns this PPA */
274 	ppa->ppa_mtu = PPP_MAXMTU;	/* 65535-(PPP_HDRLEN+PPP_FCSLEN) */
275 	ppa->ppa_mru = PPP_MAXMRU;	/* 65000 */
276 
277 	mutex_init(&ppa->ppa_sta_lock, NULL, MUTEX_DRIVER, NULL);
278 	mutex_init(&ppa->ppa_npmutex, NULL, MUTEX_DRIVER, NULL);
279 	rw_init(&ppa->ppa_sib_lock, NULL, RW_DRIVER, NULL);
280 
281 	/*
282 	 * Prepare and install kstat counters.  Note that for netstat
283 	 * -i to work, there needs to be "ipackets", "opackets",
284 	 * "ierrors", and "oerrors" kstat named variables.
285 	 */
286 	knt = (kstat_named_t *)ksp->ks_data;
287 	for (cpp = kstats_names; cpp < kstats_names + Dim(kstats_names);
288 	    cpp++) {
289 		kstat_named_init(knt, *cpp, KSTAT_DATA_UINT32);
290 		knt++;
291 	}
292 	for (cpp = kstats64_names; cpp < kstats64_names + Dim(kstats64_names);
293 	    cpp++) {
294 		kstat_named_init(knt, *cpp, KSTAT_DATA_UINT64);
295 		knt++;
296 	}
297 	ksp->ks_update = sppp_kstat_update;
298 	ksp->ks_private = (void *)ppa;
299 	kstat_install(ksp);
300 
301 	/* link to the next ppa and insert into global list */
302 	availppa = &ppa_list;
303 	while ((curppa = *availppa) != NULL) {
304 		if (ppa_id < curppa->ppa_ppa_id)
305 			break;
306 		availppa = &curppa->ppa_nextppa;
307 	}
308 	ppa->ppa_nextppa = *availppa;
309 	*availppa = ppa;
310 	return (ppa);
311 }
312 
313 /*
314  * sppp_close()
315  *
316  * MT-Perimeters:
317  *    exclusive inner, exclusive outer.
318  *
319  * Description:
320  *    Common close procedure for module.
321  */
322 int
323 sppp_close(queue_t *q)
324 {
325 	spppstr_t	*sps;
326 	spppstr_t	**nextmn;
327 	spppstr_t	*sib;
328 	sppa_t		*ppa;
329 	mblk_t		*mp;
330 
331 	ASSERT(q != NULL && q->q_ptr != NULL);
332 	sps = (spppstr_t *)q->q_ptr;
333 	qprocsoff(q);
334 
335 	ppa = sps->sps_ppa;
336 	if (ppa == NULL) {
337 		ASSERT(!IS_SPS_CONTROL(sps));
338 		goto close_unattached;
339 	}
340 	if (IS_SPS_CONTROL(sps)) {
341 		uint32_t	cnt = 0;
342 
343 		ASSERT(ppa != NULL);
344 		ASSERT(ppa->ppa_ctl == sps);
345 		ppa->ppa_ctl = NULL;
346 		/*
347 		 * STREAMS framework always issues I_UNLINK prior to close,
348 		 * since we only allow I_LINK under the control stream.
349 		 * A given ppa structure has at most one lower stream pointed
350 		 * by the ppa_lower_wq field, because we only allow a single
351 		 * linkage (I_LINK) to be done on the control stream.
352 		 */
353 		ASSERT(ppa->ppa_lower_wq == NULL);
354 		/*
355 		 * Walk through all of sibling streams attached to this ppa,
356 		 * and remove all references to this ppa. We have exclusive
357 		 * access for the entire driver here, so there's no need
358 		 * to hold ppa_sib_lock.
359 		 */
360 		cnt++;
361 		sib = ppa->ppa_streams;
362 		while (sib != NULL) {
363 			ASSERT(ppa == sib->sps_ppa);
364 			sib->sps_npmode = NPMODE_DROP;
365 			sib->sps_flags &= ~(SPS_PIOATTACH | SPS_CACHED);
366 			/*
367 			 * There should be a preallocated hangup
368 			 * message here.  Fetch it and send it up to
369 			 * the stream head.  This will cause IP to
370 			 * mark the interface as "down."
371 			 */
372 			if ((mp = sib->sps_hangup) != NULL) {
373 				sib->sps_hangup = NULL;
374 				/*
375 				 * M_HANGUP works with IP, but snoop
376 				 * is lame and requires M_ERROR.  Send
377 				 * up a clean error code instead.
378 				 *
379 				 * XXX if snoop is fixed, fix this, too.
380 				 */
381 				MTYPE(mp) = M_ERROR;
382 				*mp->b_wptr++ = ENXIO;
383 				putnext(sib->sps_rq, mp);
384 			}
385 			qenable(WR(sib->sps_rq));
386 			cnt++;
387 			sib = sib->sps_nextsib;
388 		}
389 		ASSERT(ppa->ppa_refcnt == cnt);
390 	} else {
391 		ASSERT(ppa->ppa_streams != NULL);
392 		ASSERT(ppa->ppa_ctl != sps);
393 		mp = NULL;
394 		if (sps->sps_sap == PPP_IP) {
395 			ppa->ppa_ip_cache = NULL;
396 			mp = create_lsmsg(PPP_LINKSTAT_IPV4_UNBOUND);
397 		} else if (sps->sps_sap == PPP_IPV6) {
398 			ppa->ppa_ip6_cache = NULL;
399 			mp = create_lsmsg(PPP_LINKSTAT_IPV6_UNBOUND);
400 		}
401 		/* Tell the daemon the bad news. */
402 		if (mp != NULL && ppa->ppa_ctl != NULL &&
403 		    (sps->sps_npmode == NPMODE_PASS ||
404 		    sps->sps_npmode == NPMODE_QUEUE)) {
405 			putnext(ppa->ppa_ctl->sps_rq, mp);
406 		} else {
407 			freemsg(mp);
408 		}
409 		/*
410 		 * Walk through all of sibling streams attached to the
411 		 * same ppa, and remove this stream from the sibling
412 		 * streams list. We have exclusive access for the
413 		 * entire driver here, so there's no need to hold
414 		 * ppa_sib_lock.
415 		 */
416 		sib = ppa->ppa_streams;
417 		if (sib == sps) {
418 			ppa->ppa_streams = sps->sps_nextsib;
419 		} else {
420 			while (sib->sps_nextsib != NULL) {
421 				if (sib->sps_nextsib == sps) {
422 					sib->sps_nextsib = sps->sps_nextsib;
423 					break;
424 				}
425 				sib = sib->sps_nextsib;
426 			}
427 		}
428 		sps->sps_nextsib = NULL;
429 		freemsg(sps->sps_hangup);
430 		sps->sps_hangup = NULL;
431 		/*
432 		 * Check if this is a promiscous stream. If the SPS_PROMISC bit
433 		 * is still set, it means that the stream is closed without
434 		 * ever having issued DL_DETACH_REQ or DL_PROMISCOFF_REQ.
435 		 * In this case, we simply decrement the promiscous counter,
436 		 * and it's safe to do it without holding ppa_sib_lock since
437 		 * we're exclusive (inner and outer) at this point.
438 		 */
439 		if (IS_SPS_PROMISC(sps)) {
440 			ASSERT(ppa->ppa_promicnt > 0);
441 			ppa->ppa_promicnt--;
442 		}
443 	}
444 	/* If we're the only one left, then delete now. */
445 	if (ppa->ppa_refcnt <= 1)
446 		sppp_free_ppa(ppa);
447 	else
448 		ppa->ppa_refcnt--;
449 close_unattached:
450 	q->q_ptr = WR(q)->q_ptr = NULL;
451 	for (nextmn = &sps_list; *nextmn != NULL;
452 	    nextmn = &(*nextmn)->sps_nextmn) {
453 		if (*nextmn == sps) {
454 			*nextmn = sps->sps_nextmn;
455 			break;
456 		}
457 	}
458 	kmem_free(sps, sizeof (spppstr_t));
459 	return (0);
460 }
461 
462 static void
463 sppp_ioctl(struct queue *q, mblk_t *mp)
464 {
465 	spppstr_t	*sps;
466 	spppstr_t	*nextsib;
467 	sppa_t		*ppa;
468 	struct iocblk	*iop;
469 	mblk_t		*nmp;
470 	enum NPmode	npmode;
471 	struct ppp_idle	*pip;
472 	struct ppp_stats64 *psp;
473 	struct ppp_comp_stats *pcsp;
474 	hrtime_t	hrtime;
475 	int		sap;
476 	int		count = 0;
477 	int		error = EINVAL;
478 
479 	sps = (spppstr_t *)q->q_ptr;
480 	ppa = sps->sps_ppa;
481 
482 	iop = (struct iocblk *)mp->b_rptr;
483 	switch (iop->ioc_cmd) {
484 	case PPPIO_NPMODE:
485 		if (!IS_SPS_CONTROL(sps)) {
486 			break;		/* return EINVAL */
487 		} else if (iop->ioc_count != 2 * sizeof (uint32_t) ||
488 		    (mp->b_cont == NULL)) {
489 			error = EPROTO;
490 			break;
491 		}
492 		ASSERT(ppa != NULL);
493 		ASSERT(mp->b_cont->b_rptr != NULL);
494 		ASSERT(sps->sps_npmode == NPMODE_PASS);
495 		sap = ((uint32_t *)mp->b_cont->b_rptr)[0];
496 		npmode = (enum NPmode)((uint32_t *)mp->b_cont->b_rptr)[1];
497 		/*
498 		 * Walk the sibling streams which belong to the same
499 		 * ppa, and try to find a stream with matching sap
500 		 * number.
501 		 */
502 		rw_enter(&ppa->ppa_sib_lock, RW_WRITER);
503 		for (nextsib = ppa->ppa_streams; nextsib != NULL;
504 		    nextsib = nextsib->sps_nextsib) {
505 			if (nextsib->sps_sap == sap) {
506 				break;	/* found it */
507 			}
508 		}
509 		if (nextsib == NULL) {
510 			rw_exit(&ppa->ppa_sib_lock);
511 			break;		/* return EINVAL */
512 		} else {
513 			nextsib->sps_npmode = npmode;
514 			if ((nextsib->sps_npmode != NPMODE_QUEUE) &&
515 			    (WR(nextsib->sps_rq)->q_first != NULL)) {
516 				qenable(WR(nextsib->sps_rq));
517 			}
518 		}
519 		rw_exit(&ppa->ppa_sib_lock);
520 		error = 0;	/* return success */
521 		break;
522 	case PPPIO_GIDLE:
523 		if (ppa == NULL) {
524 			ASSERT(!IS_SPS_CONTROL(sps));
525 			error = ENOLINK;
526 			break;
527 		} else if (!IS_PPA_TIMESTAMP(ppa)) {
528 			break;		/* return EINVAL */
529 		}
530 		if ((nmp = allocb(sizeof (struct ppp_idle),
531 		    BPRI_MED)) == NULL) {
532 			mutex_enter(&ppa->ppa_sta_lock);
533 			ppa->ppa_allocbfail++;
534 			mutex_exit(&ppa->ppa_sta_lock);
535 			error = ENOSR;
536 			break;
537 		}
538 		if (mp->b_cont != NULL) {
539 			freemsg(mp->b_cont);
540 		}
541 		mp->b_cont = nmp;
542 		pip = (struct ppp_idle *)nmp->b_wptr;
543 		nmp->b_wptr += sizeof (struct ppp_idle);
544 		/*
545 		 * Get current timestamp and subtract the tx and rx
546 		 * timestamps to get the actual idle time to be
547 		 * returned.
548 		 */
549 		hrtime = gethrtime();
550 		pip->xmit_idle = (hrtime - ppa->ppa_lasttx) / 1000000000ul;
551 		pip->recv_idle = (hrtime - ppa->ppa_lastrx) / 1000000000ul;
552 		count = msgsize(nmp);
553 		error = 0;
554 		break;		/* return success (error is 0) */
555 	case PPPIO_GTYPE:
556 		nmp = allocb(sizeof (uint32_t), BPRI_MED);
557 		if (nmp == NULL) {
558 			error = ENOSR;
559 			break;
560 		}
561 		if (mp->b_cont != NULL) {
562 			freemsg(mp->b_cont);
563 		}
564 		mp->b_cont = nmp;
565 		/*
566 		 * Let the requestor know that we are the PPP
567 		 * multiplexer (PPPTYP_MUX).
568 		 */
569 		*(uint32_t *)nmp->b_wptr = PPPTYP_MUX;
570 		nmp->b_wptr += sizeof (uint32_t);
571 		count = msgsize(nmp);
572 		error = 0;		/* return success */
573 		break;
574 	case PPPIO_GETSTAT64:
575 		if (ppa == NULL) {
576 			break;		/* return EINVAL */
577 		} else if ((ppa->ppa_lower_wq != NULL) &&
578 		    !IS_PPA_LASTMOD(ppa)) {
579 			mutex_enter(&ppa->ppa_sta_lock);
580 			/*
581 			 * We match sps_ioc_id on the M_IOC{ACK,NAK},
582 			 * so if the response hasn't come back yet,
583 			 * new ioctls must be queued instead.
584 			 */
585 			if (IS_SPS_IOCQ(sps)) {
586 				mutex_exit(&ppa->ppa_sta_lock);
587 				if (!putq(q, mp)) {
588 					error = EAGAIN;
589 					break;
590 				}
591 				return;
592 			} else {
593 				ppa->ppa_ioctlsfwd++;
594 				/*
595 				 * Record the ioctl CMD & ID - this will be
596 				 * used to check the ACK or NAK responses
597 				 * coming from below.
598 				 */
599 				sps->sps_ioc_id = iop->ioc_id;
600 				sps->sps_flags |= SPS_IOCQ;
601 				mutex_exit(&ppa->ppa_sta_lock);
602 			}
603 			putnext(ppa->ppa_lower_wq, mp);
604 			return;	/* don't ack or nak the request */
605 		}
606 		nmp = allocb(sizeof (*psp), BPRI_MED);
607 		if (nmp == NULL) {
608 			mutex_enter(&ppa->ppa_sta_lock);
609 			ppa->ppa_allocbfail++;
610 			mutex_exit(&ppa->ppa_sta_lock);
611 			error = ENOSR;
612 			break;
613 		}
614 		if (mp->b_cont != NULL) {
615 			freemsg(mp->b_cont);
616 		}
617 		mp->b_cont = nmp;
618 		psp = (struct ppp_stats64 *)nmp->b_wptr;
619 		/*
620 		 * Copy the contents of ppp_stats64 structure for this
621 		 * ppa and return them to the caller.
622 		 */
623 		mutex_enter(&ppa->ppa_sta_lock);
624 		bcopy(&ppa->ppa_stats, psp, sizeof (*psp));
625 		mutex_exit(&ppa->ppa_sta_lock);
626 		nmp->b_wptr += sizeof (*psp);
627 		count = sizeof (*psp);
628 		error = 0;		/* return success */
629 		break;
630 	case PPPIO_GETCSTAT:
631 		if (ppa == NULL) {
632 			break;		/* return EINVAL */
633 		} else if ((ppa->ppa_lower_wq != NULL) &&
634 		    !IS_PPA_LASTMOD(ppa)) {
635 			mutex_enter(&ppa->ppa_sta_lock);
636 			/*
637 			 * See comments in PPPIO_GETSTAT64 case
638 			 * in sppp_ioctl().
639 			 */
640 			if (IS_SPS_IOCQ(sps)) {
641 				mutex_exit(&ppa->ppa_sta_lock);
642 				if (!putq(q, mp)) {
643 					error = EAGAIN;
644 					break;
645 				}
646 				return;
647 			} else {
648 				ppa->ppa_ioctlsfwd++;
649 				/*
650 				 * Record the ioctl CMD & ID - this will be
651 				 * used to check the ACK or NAK responses
652 				 * coming from below.
653 				 */
654 				sps->sps_ioc_id = iop->ioc_id;
655 				sps->sps_flags |= SPS_IOCQ;
656 				mutex_exit(&ppa->ppa_sta_lock);
657 			}
658 			putnext(ppa->ppa_lower_wq, mp);
659 			return;	/* don't ack or nak the request */
660 		}
661 		nmp = allocb(sizeof (struct ppp_comp_stats), BPRI_MED);
662 		if (nmp == NULL) {
663 			mutex_enter(&ppa->ppa_sta_lock);
664 			ppa->ppa_allocbfail++;
665 			mutex_exit(&ppa->ppa_sta_lock);
666 			error = ENOSR;
667 			break;
668 		}
669 		if (mp->b_cont != NULL) {
670 			freemsg(mp->b_cont);
671 		}
672 		mp->b_cont = nmp;
673 		pcsp = (struct ppp_comp_stats *)nmp->b_wptr;
674 		nmp->b_wptr += sizeof (struct ppp_comp_stats);
675 		bzero((caddr_t)pcsp, sizeof (struct ppp_comp_stats));
676 		count = msgsize(nmp);
677 		error = 0;		/* return success */
678 		break;
679 	}
680 
681 	if (error == 0) {
682 		/* Success; tell the user. */
683 		miocack(q, mp, count, 0);
684 	} else {
685 		/* Failure; send error back upstream. */
686 		miocnak(q, mp, 0, error);
687 	}
688 }
689 
690 /*
691  * sppp_uwput()
692  *
693  * MT-Perimeters:
694  *    shared inner, shared outer.
695  *
696  * Description:
697  *    Upper write-side put procedure. Messages from above arrive here.
698  */
699 void
700 sppp_uwput(queue_t *q, mblk_t *mp)
701 {
702 	queue_t		*nextq;
703 	spppstr_t	*sps;
704 	sppa_t		*ppa;
705 	struct iocblk	*iop;
706 	int		error;
707 
708 	ASSERT(q != NULL && q->q_ptr != NULL);
709 	ASSERT(mp != NULL && mp->b_rptr != NULL);
710 	sps = (spppstr_t *)q->q_ptr;
711 	ppa = sps->sps_ppa;
712 
713 	switch (MTYPE(mp)) {
714 	case M_PCPROTO:
715 	case M_PROTO:
716 		if (IS_SPS_CONTROL(sps)) {
717 			ASSERT(ppa != NULL);
718 			/*
719 			 * Intentionally change this to a high priority
720 			 * message so it doesn't get queued up. M_PROTO is
721 			 * specifically used for signalling between pppd and its
722 			 * kernel-level component(s), such as ppptun, so we
723 			 * make sure that it doesn't get queued up behind
724 			 * data messages.
725 			 */
726 			MTYPE(mp) = M_PCPROTO;
727 			if ((ppa->ppa_lower_wq != NULL) &&
728 			    canputnext(ppa->ppa_lower_wq)) {
729 				mutex_enter(&ppa->ppa_sta_lock);
730 				ppa->ppa_mctlsfwd++;
731 				mutex_exit(&ppa->ppa_sta_lock);
732 				putnext(ppa->ppa_lower_wq, mp);
733 			} else {
734 				mutex_enter(&ppa->ppa_sta_lock);
735 				ppa->ppa_mctlsfwderr++;
736 				mutex_exit(&ppa->ppa_sta_lock);
737 				freemsg(mp);
738 			}
739 		} else {
740 			(void) sppp_mproto(q, mp, sps);
741 			return;
742 		}
743 		break;
744 	case M_DATA:
745 		if ((nextq = sppp_send(q, &mp, sps)) != NULL)
746 			putnext(nextq, mp);
747 		break;
748 	case M_IOCTL:
749 		error = EINVAL;
750 		iop = (struct iocblk *)mp->b_rptr;
751 		switch (iop->ioc_cmd) {
752 		case DLIOCRAW:
753 		case DL_IOC_HDR_INFO:
754 		case PPPIO_ATTACH:
755 		case PPPIO_DEBUG:
756 		case PPPIO_DETACH:
757 		case PPPIO_LASTMOD:
758 		case PPPIO_MRU:
759 		case PPPIO_MTU:
760 		case PPPIO_USETIMESTAMP:
761 		case PPPIO_BLOCKNP:
762 		case PPPIO_UNBLOCKNP:
763 			qwriter(q, mp, sppp_inner_ioctl, PERIM_INNER);
764 			return;
765 		case I_LINK:
766 		case I_UNLINK:
767 		case PPPIO_NEWPPA:
768 			qwriter(q, mp, sppp_outer_ioctl, PERIM_OUTER);
769 			return;
770 		case PPPIO_NPMODE:
771 		case PPPIO_GIDLE:
772 		case PPPIO_GTYPE:
773 		case PPPIO_GETSTAT64:
774 		case PPPIO_GETCSTAT:
775 			/*
776 			 * These require additional auto variables to
777 			 * handle, so (for optimization reasons)
778 			 * they're moved off to a separate function.
779 			 */
780 			sppp_ioctl(q, mp);
781 			return;
782 		case PPPIO_GETSTAT:
783 			break;			/* 32 bit interface gone */
784 		default:
785 			if (iop->ioc_cr == NULL ||
786 			    secpolicy_ppp_config(iop->ioc_cr) != 0) {
787 				error = EPERM;
788 				break;
789 			} else if ((ppa == NULL) ||
790 			    (ppa->ppa_lower_wq == NULL)) {
791 				break;		/* return EINVAL */
792 			}
793 			mutex_enter(&ppa->ppa_sta_lock);
794 			/*
795 			 * See comments in PPPIO_GETSTAT64 case
796 			 * in sppp_ioctl().
797 			 */
798 			if (IS_SPS_IOCQ(sps)) {
799 				mutex_exit(&ppa->ppa_sta_lock);
800 				if (!putq(q, mp)) {
801 					error = EAGAIN;
802 					break;
803 				}
804 				return;
805 			} else {
806 				ppa->ppa_ioctlsfwd++;
807 				/*
808 				 * Record the ioctl CMD & ID -
809 				 * this will be used to check the
810 				 * ACK or NAK responses coming from below.
811 				 */
812 				sps->sps_ioc_id = iop->ioc_id;
813 				sps->sps_flags |= SPS_IOCQ;
814 				mutex_exit(&ppa->ppa_sta_lock);
815 			}
816 			putnext(ppa->ppa_lower_wq, mp);
817 			return;		/* don't ack or nak the request */
818 		}
819 		/* Failure; send error back upstream. */
820 		miocnak(q, mp, 0, error);
821 		break;
822 	case M_FLUSH:
823 		if (*mp->b_rptr & FLUSHW) {
824 			flushq(q, FLUSHDATA);
825 		}
826 		if (*mp->b_rptr & FLUSHR) {
827 			*mp->b_rptr &= ~FLUSHW;
828 			qreply(q, mp);
829 		} else {
830 			freemsg(mp);
831 		}
832 		break;
833 	default:
834 		freemsg(mp);
835 		break;
836 	}
837 }
838 
839 /*
840  * sppp_uwsrv()
841  *
842  * MT-Perimeters:
843  *    exclusive inner, shared outer.
844  *
845  * Description:
846  *    Upper write-side service procedure. Note that this procedure does
847  *    not get called when a message is placed on our write-side queue, since
848  *    automatic queue scheduling has been turned off by noenable() when
849  *    the queue was opened. We do this on purpose, as we explicitly control
850  *    the write-side queue. Therefore, this procedure gets called when
851  *    the lower write service procedure qenable() the upper write stream queue.
852  */
853 void
854 sppp_uwsrv(queue_t *q)
855 {
856 	spppstr_t	*sps;
857 	sppa_t		*ppa;
858 	mblk_t		*mp;
859 	queue_t		*nextq;
860 	struct iocblk	*iop;
861 
862 	ASSERT(q != NULL && q->q_ptr != NULL);
863 	sps = (spppstr_t *)q->q_ptr;
864 
865 	while ((mp = getq(q)) != NULL) {
866 		if (MTYPE(mp) == M_IOCTL) {
867 			ppa = sps->sps_ppa;
868 			if ((ppa == NULL) || (ppa->ppa_lower_wq == NULL)) {
869 				miocnak(q, mp, 0, EINVAL);
870 				continue;
871 			}
872 
873 			iop = (struct iocblk *)mp->b_rptr;
874 			mutex_enter(&ppa->ppa_sta_lock);
875 			/*
876 			 * See comments in PPPIO_GETSTAT64 case
877 			 * in sppp_ioctl().
878 			 */
879 			if (IS_SPS_IOCQ(sps)) {
880 				mutex_exit(&ppa->ppa_sta_lock);
881 				if (putbq(q, mp) == 0)
882 					miocnak(q, mp, 0, EAGAIN);
883 				break;
884 			} else {
885 				ppa->ppa_ioctlsfwd++;
886 				sps->sps_ioc_id = iop->ioc_id;
887 				sps->sps_flags |= SPS_IOCQ;
888 				mutex_exit(&ppa->ppa_sta_lock);
889 				putnext(ppa->ppa_lower_wq, mp);
890 			}
891 		} else if ((nextq =
892 		    sppp_outpkt(q, &mp, msgdsize(mp), sps)) == NULL) {
893 			if (mp != NULL) {
894 				if (putbq(q, mp) == 0)
895 					freemsg(mp);
896 				break;
897 			}
898 		} else {
899 			putnext(nextq, mp);
900 		}
901 	}
902 }
903 
904 void
905 sppp_remove_ppa(spppstr_t *sps)
906 {
907 	spppstr_t *nextsib;
908 	sppa_t *ppa = sps->sps_ppa;
909 
910 	rw_enter(&ppa->ppa_sib_lock, RW_WRITER);
911 	if (ppa->ppa_refcnt <= 1) {
912 		rw_exit(&ppa->ppa_sib_lock);
913 		sppp_free_ppa(ppa);
914 	} else {
915 		nextsib = ppa->ppa_streams;
916 		if (nextsib == sps) {
917 			ppa->ppa_streams = sps->sps_nextsib;
918 		} else {
919 			while (nextsib->sps_nextsib != NULL) {
920 				if (nextsib->sps_nextsib == sps) {
921 					nextsib->sps_nextsib =
922 					    sps->sps_nextsib;
923 					break;
924 				}
925 				nextsib = nextsib->sps_nextsib;
926 			}
927 		}
928 		ppa->ppa_refcnt--;
929 		/*
930 		 * And if this stream was marked as promiscuous
931 		 * (SPS_PROMISC), then we need to update the
932 		 * promiscuous streams count. This should only happen
933 		 * when DL_DETACH_REQ is issued prior to marking the
934 		 * stream as non-promiscuous, through
935 		 * DL_PROMISCOFF_REQ request.
936 		 */
937 		if (IS_SPS_PROMISC(sps)) {
938 			ASSERT(ppa->ppa_promicnt > 0);
939 			ppa->ppa_promicnt--;
940 		}
941 		rw_exit(&ppa->ppa_sib_lock);
942 	}
943 	sps->sps_nextsib = NULL;
944 	sps->sps_ppa = NULL;
945 	freemsg(sps->sps_hangup);
946 	sps->sps_hangup = NULL;
947 }
948 
949 sppa_t *
950 sppp_find_ppa(uint32_t ppa_id)
951 {
952 	sppa_t *ppa;
953 
954 	for (ppa = ppa_list; ppa != NULL; ppa = ppa->ppa_nextppa) {
955 		if (ppa->ppa_ppa_id == ppa_id) {
956 			break;	/* found the ppa */
957 		}
958 	}
959 	return (ppa);
960 }
961 
962 /*
963  * sppp_inner_ioctl()
964  *
965  * MT-Perimeters:
966  *    exclusive inner, shared outer
967  *
968  * Description:
969  *    Called by sppp_uwput as a result of receiving ioctls which require
970  *    an exclusive access at the inner perimeter.
971  */
972 static void
973 sppp_inner_ioctl(queue_t *q, mblk_t *mp)
974 {
975 	spppstr_t	*sps;
976 	sppa_t		*ppa;
977 	struct iocblk	*iop;
978 	mblk_t		*nmp;
979 	int		error = EINVAL;
980 	int		count = 0;
981 	int		dbgcmd;
982 	int		mru, mtu;
983 	uint32_t	ppa_id;
984 	hrtime_t	hrtime;
985 	uint16_t	proto;
986 
987 	ASSERT(q != NULL && q->q_ptr != NULL);
988 	ASSERT(mp != NULL && mp->b_rptr != NULL);
989 
990 	sps = (spppstr_t *)q->q_ptr;
991 	ppa = sps->sps_ppa;
992 	iop = (struct iocblk *)mp->b_rptr;
993 	switch (iop->ioc_cmd) {
994 	case DLIOCRAW:
995 		if (IS_SPS_CONTROL(sps)) {
996 			break;		/* return EINVAL */
997 		}
998 		sps->sps_flags |= SPS_RAWDATA;
999 		error = 0;		/* return success */
1000 		break;
1001 	case DL_IOC_HDR_INFO:
1002 		if (IS_SPS_CONTROL(sps)) {
1003 			break;		/* return EINVAL */
1004 		} else if ((mp->b_cont == NULL) ||
1005 		    *((t_uscalar_t *)mp->b_cont->b_rptr) != DL_UNITDATA_REQ ||
1006 		    (MBLKL(mp->b_cont) < (sizeof (dl_unitdata_req_t) +
1007 		    SPPP_ADDRL))) {
1008 			error = EPROTO;
1009 			break;
1010 		} else if (ppa == NULL) {
1011 			error = ENOLINK;
1012 			break;
1013 		}
1014 		if ((nmp = allocb(PPP_HDRLEN, BPRI_MED)) == NULL) {
1015 			mutex_enter(&ppa->ppa_sta_lock);
1016 			ppa->ppa_allocbfail++;
1017 			mutex_exit(&ppa->ppa_sta_lock);
1018 			error = ENOMEM;
1019 			break;
1020 		}
1021 		*(uchar_t *)nmp->b_wptr++ = PPP_ALLSTATIONS;
1022 		*(uchar_t *)nmp->b_wptr++ = PPP_UI;
1023 		*(uchar_t *)nmp->b_wptr++ = sps->sps_sap >> 8;
1024 		*(uchar_t *)nmp->b_wptr++ = sps->sps_sap & 0xff;
1025 		ASSERT(MBLKL(nmp) == PPP_HDRLEN);
1026 
1027 		linkb(mp, nmp);
1028 		sps->sps_flags |= SPS_FASTPATH;
1029 		error = 0;		/* return success */
1030 		count = msgsize(nmp);
1031 		break;
1032 	case PPPIO_ATTACH:
1033 		if (IS_SPS_CONTROL(sps) || IS_SPS_PIOATTACH(sps) ||
1034 		    (sps->sps_dlstate != DL_UNATTACHED) ||
1035 		    (iop->ioc_count != sizeof (uint32_t))) {
1036 			break;		/* return EINVAL */
1037 		} else if (mp->b_cont == NULL) {
1038 			error = EPROTO;
1039 			break;
1040 		}
1041 		ASSERT(mp->b_cont->b_rptr != NULL);
1042 		/* If there's something here, it's detached. */
1043 		if (ppa != NULL) {
1044 			sppp_remove_ppa(sps);
1045 		}
1046 		ppa_id = *(uint32_t *)mp->b_cont->b_rptr;
1047 		ppa = sppp_find_ppa(ppa_id);
1048 		/*
1049 		 * If we can't find it, then it's either because the requestor
1050 		 * has supplied a wrong ppa_id to be attached to, or because
1051 		 * the control stream for the specified ppa_id has been closed
1052 		 * before we get here.
1053 		 */
1054 		if (ppa == NULL) {
1055 			error = ENOENT;
1056 			break;
1057 		}
1058 		if (iop->ioc_cr == NULL ||
1059 		    ppa->ppa_zoneid != crgetzoneid(iop->ioc_cr)) {
1060 			error = EPERM;
1061 			break;
1062 		}
1063 		/*
1064 		 * Preallocate the hangup message so that we're always
1065 		 * able to send this upstream in the event of a
1066 		 * catastrophic failure.
1067 		 */
1068 		if ((sps->sps_hangup = allocb(1, BPRI_MED)) == NULL) {
1069 			error = ENOSR;
1070 			break;
1071 		}
1072 		/*
1073 		 * There are two ways to attach a stream to a ppa: one is
1074 		 * through DLPI (DL_ATTACH_REQ) and the other is through
1075 		 * PPPIO_ATTACH. This is why we need to distinguish whether or
1076 		 * not a stream was allocated via PPPIO_ATTACH, so that we can
1077 		 * properly detach it when we receive PPPIO_DETACH ioctl
1078 		 * request.
1079 		 */
1080 		sps->sps_flags |= SPS_PIOATTACH;
1081 		sps->sps_ppa = ppa;
1082 		/*
1083 		 * Add this stream to the head of the list of sibling streams
1084 		 * which belong to the same ppa as specified.
1085 		 */
1086 		rw_enter(&ppa->ppa_sib_lock, RW_WRITER);
1087 		ppa->ppa_refcnt++;
1088 		sps->sps_nextsib = ppa->ppa_streams;
1089 		ppa->ppa_streams = sps;
1090 		rw_exit(&ppa->ppa_sib_lock);
1091 		error = 0;		/* return success */
1092 		break;
1093 	case PPPIO_BLOCKNP:
1094 	case PPPIO_UNBLOCKNP:
1095 		if (iop->ioc_cr == NULL ||
1096 		    secpolicy_ppp_config(iop->ioc_cr) != 0) {
1097 			error = EPERM;
1098 			break;
1099 		}
1100 		error = miocpullup(mp, sizeof (uint16_t));
1101 		if (error != 0)
1102 			break;
1103 		ASSERT(mp->b_cont->b_rptr != NULL);
1104 		proto = *(uint16_t *)mp->b_cont->b_rptr;
1105 		if (iop->ioc_cmd == PPPIO_BLOCKNP) {
1106 			uint32_t npflagpos = sppp_ppp2np(proto);
1107 			/*
1108 			 * Mark proto as blocked in ppa_npflag until the
1109 			 * corresponding queues for proto have been plumbed.
1110 			 */
1111 			if (npflagpos != 0) {
1112 				mutex_enter(&ppa->ppa_npmutex);
1113 				ppa->ppa_npflag |= (1 << npflagpos);
1114 				mutex_exit(&ppa->ppa_npmutex);
1115 			} else {
1116 				error = EINVAL;
1117 			}
1118 		} else {
1119 			/*
1120 			 * reset ppa_npflag and release proto
1121 			 * packets that were being held in control queue.
1122 			 */
1123 			sppp_release_pkts(ppa, proto);
1124 		}
1125 		break;
1126 	case PPPIO_DEBUG:
1127 		if (iop->ioc_cr == NULL ||
1128 		    secpolicy_ppp_config(iop->ioc_cr) != 0) {
1129 			error = EPERM;
1130 			break;
1131 		} else if (iop->ioc_count != sizeof (uint32_t)) {
1132 			break;		/* return EINVAL */
1133 		} else if (mp->b_cont == NULL) {
1134 			error = EPROTO;
1135 			break;
1136 		}
1137 		ASSERT(mp->b_cont->b_rptr != NULL);
1138 		dbgcmd = *(uint32_t *)mp->b_cont->b_rptr;
1139 		/*
1140 		 * We accept PPPDBG_LOG + PPPDBG_DRIVER value as an indication
1141 		 * that SPS_KDEBUG needs to be enabled for this upper stream.
1142 		 */
1143 		if (dbgcmd == PPPDBG_LOG + PPPDBG_DRIVER) {
1144 			sps->sps_flags |= SPS_KDEBUG;
1145 			error = 0;	/* return success */
1146 			break;
1147 		}
1148 		/*
1149 		 * Otherwise, for any other values, we send them down only if
1150 		 * there is an attachment and if the attachment has something
1151 		 * linked underneath it.
1152 		 */
1153 		if ((ppa == NULL) || (ppa->ppa_lower_wq == NULL)) {
1154 			error = ENOLINK;
1155 			break;
1156 		}
1157 		mutex_enter(&ppa->ppa_sta_lock);
1158 		/*
1159 		 * See comments in PPPIO_GETSTAT64 case
1160 		 * in sppp_ioctl().
1161 		 */
1162 		if (IS_SPS_IOCQ(sps)) {
1163 			mutex_exit(&ppa->ppa_sta_lock);
1164 			if (!putq(q, mp)) {
1165 				error = EAGAIN;
1166 				break;
1167 			}
1168 			return;
1169 		} else {
1170 			ppa->ppa_ioctlsfwd++;
1171 			/*
1172 			 * Record the ioctl CMD & ID -
1173 			 * this will be used to check the
1174 			 * ACK or NAK responses coming from below.
1175 			 */
1176 			sps->sps_ioc_id = iop->ioc_id;
1177 			sps->sps_flags |= SPS_IOCQ;
1178 			mutex_exit(&ppa->ppa_sta_lock);
1179 		}
1180 		putnext(ppa->ppa_lower_wq, mp);
1181 		return;			/* don't ack or nak the request */
1182 	case PPPIO_DETACH:
1183 		if (!IS_SPS_PIOATTACH(sps)) {
1184 			break;		/* return EINVAL */
1185 		}
1186 		/*
1187 		 * The SPS_PIOATTACH flag set on the stream tells us that
1188 		 * the ppa field is still valid. In the event that the control
1189 		 * stream be closed prior to this stream's detachment, the
1190 		 * SPS_PIOATTACH flag would have been cleared from this stream
1191 		 * during close; in that case we won't get here.
1192 		 */
1193 		ASSERT(ppa != NULL);
1194 		ASSERT(ppa->ppa_ctl != sps);
1195 		ASSERT(sps->sps_dlstate == DL_UNATTACHED);
1196 
1197 		/*
1198 		 * We don't actually detach anything until the stream is
1199 		 * closed or reattached.
1200 		 */
1201 
1202 		sps->sps_flags &= ~SPS_PIOATTACH;
1203 		error = 0;		/* return success */
1204 		break;
1205 	case PPPIO_LASTMOD:
1206 		if (!IS_SPS_CONTROL(sps)) {
1207 			break;		/* return EINVAL */
1208 		}
1209 		ASSERT(ppa != NULL);
1210 		ppa->ppa_flags |= PPA_LASTMOD;
1211 		error = 0;		/* return success */
1212 		break;
1213 	case PPPIO_MRU:
1214 		if (!IS_SPS_CONTROL(sps) ||
1215 		    (iop->ioc_count != sizeof (uint32_t))) {
1216 			break;		/* return EINVAL */
1217 		} else if (mp->b_cont == NULL) {
1218 			error = EPROTO;
1219 			break;
1220 		}
1221 		ASSERT(ppa != NULL);
1222 		ASSERT(mp->b_cont->b_rptr != NULL);
1223 		mru = *(uint32_t *)mp->b_cont->b_rptr;
1224 		if ((mru <= 0) || (mru > PPP_MAXMRU)) {
1225 			error = EPROTO;
1226 			break;
1227 		}
1228 		if (mru < PPP_MRU) {
1229 			mru = PPP_MRU;
1230 		}
1231 		ppa->ppa_mru = (uint16_t)mru;
1232 		/*
1233 		 * If there's something beneath this driver for the ppa, then
1234 		 * inform it (or them) of the MRU size. Only do this is we
1235 		 * are not the last PPP module on the stream.
1236 		 */
1237 		if (!IS_PPA_LASTMOD(ppa) && (ppa->ppa_lower_wq != NULL)) {
1238 			(void) putctl4(ppa->ppa_lower_wq, M_CTL, PPPCTL_MRU,
1239 			    mru);
1240 		}
1241 		error = 0;		/* return success */
1242 		break;
1243 	case PPPIO_MTU:
1244 		if (!IS_SPS_CONTROL(sps) ||
1245 		    (iop->ioc_count != sizeof (uint32_t))) {
1246 			break;		/* return EINVAL */
1247 		} else if (mp->b_cont == NULL) {
1248 			error = EPROTO;
1249 			break;
1250 		}
1251 		ASSERT(ppa != NULL);
1252 		ASSERT(mp->b_cont->b_rptr != NULL);
1253 		mtu = *(uint32_t *)mp->b_cont->b_rptr;
1254 		if ((mtu <= 0) || (mtu > PPP_MAXMTU)) {
1255 			error = EPROTO;
1256 			break;
1257 		}
1258 		ppa->ppa_mtu = (uint16_t)mtu;
1259 		/*
1260 		 * If there's something beneath this driver for the ppa, then
1261 		 * inform it (or them) of the MTU size. Only do this if we
1262 		 * are not the last PPP module on the stream.
1263 		 */
1264 		if (!IS_PPA_LASTMOD(ppa) && (ppa->ppa_lower_wq != NULL)) {
1265 			(void) putctl4(ppa->ppa_lower_wq, M_CTL, PPPCTL_MTU,
1266 			    mtu);
1267 		}
1268 		error = 0;		/* return success */
1269 		break;
1270 	case PPPIO_USETIMESTAMP:
1271 		if (!IS_SPS_CONTROL(sps)) {
1272 			break;		/* return EINVAL */
1273 		}
1274 		if (!IS_PPA_TIMESTAMP(ppa)) {
1275 			hrtime = gethrtime();
1276 			ppa->ppa_lasttx = ppa->ppa_lastrx = hrtime;
1277 			ppa->ppa_flags |= PPA_TIMESTAMP;
1278 		}
1279 		error = 0;
1280 		break;
1281 	}
1282 
1283 	if (error == 0) {
1284 		/* Success; tell the user */
1285 		miocack(q, mp, count, 0);
1286 	} else {
1287 		/* Failure; send error back upstream */
1288 		miocnak(q, mp, 0, error);
1289 	}
1290 }
1291 
1292 /*
1293  * sppp_outer_ioctl()
1294  *
1295  * MT-Perimeters:
1296  *    exclusive inner, exclusive outer
1297  *
1298  * Description:
1299  *    Called by sppp_uwput as a result of receiving ioctls which require
1300  *    an exclusive access at the outer perimeter.
1301  */
1302 static void
1303 sppp_outer_ioctl(queue_t *q, mblk_t *mp)
1304 {
1305 	spppstr_t	*sps = q->q_ptr;
1306 	spppstr_t	*nextsib;
1307 	queue_t		*lwq;
1308 	sppa_t		*ppa;
1309 	struct iocblk	*iop;
1310 	int		error = EINVAL;
1311 	int		count = 0;
1312 	uint32_t	ppa_id;
1313 	mblk_t		*nmp;
1314 	zoneid_t	zoneid;
1315 
1316 	sps = (spppstr_t *)q->q_ptr;
1317 	ppa = sps->sps_ppa;
1318 	iop = (struct iocblk *)mp->b_rptr;
1319 	switch (iop->ioc_cmd) {
1320 	case I_LINK:
1321 		if (!IS_SPS_CONTROL(sps)) {
1322 			break;		/* return EINVAL */
1323 		} else if (ppa->ppa_lower_wq != NULL) {
1324 			error = EEXIST;
1325 			break;
1326 		}
1327 		ASSERT(ppa->ppa_ctl != NULL);
1328 		ASSERT(sps->sps_npmode == NPMODE_PASS);
1329 		ASSERT(mp->b_cont != NULL && mp->b_cont->b_rptr != NULL);
1330 
1331 		lwq = ((struct linkblk *)mp->b_cont->b_rptr)->l_qbot;
1332 		ASSERT(lwq != NULL);
1333 
1334 		ppa->ppa_lower_wq = lwq;
1335 		lwq->q_ptr = RD(lwq)->q_ptr = (caddr_t)ppa;
1336 		/*
1337 		 * Unblock upper network streams which now feed this lower
1338 		 * stream. We don't need to hold ppa_sib_lock here, since we
1339 		 * are writer at the outer perimeter.
1340 		 */
1341 		if (WR(sps->sps_rq)->q_first != NULL)
1342 			qenable(WR(sps->sps_rq));
1343 		for (nextsib = ppa->ppa_streams; nextsib != NULL;
1344 		    nextsib = nextsib->sps_nextsib) {
1345 			nextsib->sps_npmode = NPMODE_PASS;
1346 			if (WR(nextsib->sps_rq)->q_first != NULL) {
1347 				qenable(WR(nextsib->sps_rq));
1348 			}
1349 		}
1350 
1351 		/*
1352 		 * Also unblock (run once) our lower read-side queue.  This is
1353 		 * where packets received while doing the I_LINK may be
1354 		 * languishing; see sppp_lrsrv.
1355 		 */
1356 		qenable(RD(lwq));
1357 
1358 		/*
1359 		 * Send useful information down to the modules which are now
1360 		 * linked below this driver (for this particular ppa). Only
1361 		 * do this if we are not the last PPP module on the stream.
1362 		 */
1363 		if (!IS_PPA_LASTMOD(ppa)) {
1364 			(void) putctl8(lwq, M_CTL, PPPCTL_UNIT,
1365 			    ppa->ppa_ppa_id);
1366 			(void) putctl4(lwq, M_CTL, PPPCTL_MRU, ppa->ppa_mru);
1367 			(void) putctl4(lwq, M_CTL, PPPCTL_MTU, ppa->ppa_mtu);
1368 		}
1369 
1370 		if (IS_SPS_KDEBUG(sps)) {
1371 			SPDEBUG(PPP_DRV_NAME
1372 			    "/%d: I_LINK lwq=0x%p sps=0x%p flags=0x%b ppa=0x%p "
1373 			    "flags=0x%b\n", sps->sps_mn_id,
1374 			    (void *)ppa->ppa_lower_wq, (void *)sps,
1375 			    sps->sps_flags, SPS_FLAGS_STR,
1376 			    (void *)ppa, ppa->ppa_flags,
1377 			    PPA_FLAGS_STR);
1378 		}
1379 		error = 0;		/* return success */
1380 		break;
1381 	case I_UNLINK:
1382 		ASSERT(IS_SPS_CONTROL(sps));
1383 		ASSERT(ppa != NULL);
1384 		lwq = ppa->ppa_lower_wq;
1385 		ASSERT(mp->b_cont != NULL && mp->b_cont->b_rptr != NULL);
1386 		ASSERT(lwq == ((struct linkblk *)mp->b_cont->b_rptr)->l_qbot);
1387 
1388 		if (IS_SPS_KDEBUG(sps)) {
1389 			SPDEBUG(PPP_DRV_NAME
1390 			    "/%d: I_UNLINK lwq=0x%p sps=0x%p flags=0x%b "
1391 			    "ppa=0x%p flags=0x%b\n", sps->sps_mn_id,
1392 			    (void *)lwq, (void *)sps, sps->sps_flags,
1393 			    SPS_FLAGS_STR, (void *)ppa, ppa->ppa_flags,
1394 			    PPA_FLAGS_STR);
1395 		}
1396 		/*
1397 		 * While accessing the outer perimeter exclusively, we
1398 		 * disassociate our ppa's lower_wq from the lower stream linked
1399 		 * beneath us, and we also disassociate our control stream from
1400 		 * the q_ptr of the lower stream.
1401 		 */
1402 		lwq->q_ptr = RD(lwq)->q_ptr = NULL;
1403 		ppa->ppa_lower_wq = NULL;
1404 		/*
1405 		 * Unblock streams which now feed back up the control stream,
1406 		 * and acknowledge the request. We don't need to hold
1407 		 * ppa_sib_lock here, since we are writer at the outer
1408 		 * perimeter.
1409 		 */
1410 		if (WR(sps->sps_rq)->q_first != NULL)
1411 			qenable(WR(sps->sps_rq));
1412 		for (nextsib = ppa->ppa_streams; nextsib != NULL;
1413 		    nextsib = nextsib->sps_nextsib) {
1414 			if (WR(nextsib->sps_rq)->q_first != NULL) {
1415 				qenable(WR(nextsib->sps_rq));
1416 			}
1417 		}
1418 		error = 0;		/* return success */
1419 		break;
1420 	case PPPIO_NEWPPA:
1421 		/*
1422 		 * Do sanity check to ensure that we don't accept PPPIO_NEWPPA
1423 		 * on a stream which DLPI is used (since certain DLPI messages
1424 		 * will cause state transition reflected in sps_dlstate,
1425 		 * changing it from its default DL_UNATTACHED value). In other
1426 		 * words, we won't allow a network/snoop stream to become
1427 		 * a control stream.
1428 		 */
1429 		if (iop->ioc_cr == NULL ||
1430 		    secpolicy_ppp_config(iop->ioc_cr) != 0) {
1431 			error = EPERM;
1432 			break;
1433 		} else if (IS_SPS_CONTROL(sps) || IS_SPS_PIOATTACH(sps) ||
1434 		    (ppa != NULL) || (sps->sps_dlstate != DL_UNATTACHED)) {
1435 			break;		/* return EINVAL */
1436 		}
1437 		/* Get requested unit number (if any) */
1438 		if (iop->ioc_count == sizeof (uint32_t) && mp->b_cont != NULL)
1439 			ppa_id = *(uint32_t *)mp->b_cont->b_rptr;
1440 		else
1441 			ppa_id = 0;
1442 		/* Get mblk to use for response message */
1443 		nmp = allocb(sizeof (uint32_t), BPRI_MED);
1444 		if (nmp == NULL) {
1445 			error = ENOSR;
1446 			break;
1447 		}
1448 		if (mp->b_cont != NULL) {
1449 			freemsg(mp->b_cont);
1450 		}
1451 		mp->b_cont = nmp;		/* chain our response mblk */
1452 		/*
1453 		 * Walk the global ppa list and determine the lowest
1454 		 * available ppa_id number to be used.
1455 		 */
1456 		if (ppa_id == (uint32_t)-1)
1457 			ppa_id = 0;
1458 		zoneid = crgetzoneid(iop->ioc_cr);
1459 		for (ppa = ppa_list; ppa != NULL; ppa = ppa->ppa_nextppa) {
1460 			if (ppa_id == (uint32_t)-2) {
1461 				if (ppa->ppa_ctl == NULL &&
1462 				    ppa->ppa_zoneid == zoneid)
1463 					break;
1464 			} else {
1465 				if (ppa_id < ppa->ppa_ppa_id)
1466 					break;
1467 				if (ppa_id == ppa->ppa_ppa_id)
1468 					++ppa_id;
1469 			}
1470 		}
1471 		if (ppa_id == (uint32_t)-2) {
1472 			if (ppa == NULL) {
1473 				error = ENXIO;
1474 				break;
1475 			}
1476 			/* Clear timestamp and lastmod flags */
1477 			ppa->ppa_flags = 0;
1478 		} else {
1479 			ppa = sppp_create_ppa(ppa_id, zoneid);
1480 			if (ppa == NULL) {
1481 				error = ENOMEM;
1482 				break;
1483 			}
1484 		}
1485 
1486 		sps->sps_ppa = ppa;		/* chain the ppa structure */
1487 		sps->sps_npmode = NPMODE_PASS;	/* network packets may travel */
1488 		sps->sps_flags |= SPS_CONTROL;	/* this is the control stream */
1489 
1490 		ppa->ppa_refcnt++;		/* new PPA reference */
1491 		ppa->ppa_ctl = sps;		/* back ptr to upper stream */
1492 		/*
1493 		 * Return the newly created ppa_id to the requestor and
1494 		 * acnowledge the request.
1495 		 */
1496 		*(uint32_t *)nmp->b_wptr = ppa->ppa_ppa_id;
1497 		nmp->b_wptr += sizeof (uint32_t);
1498 
1499 		if (IS_SPS_KDEBUG(sps)) {
1500 			SPDEBUG(PPP_DRV_NAME
1501 			    "/%d: PPPIO_NEWPPA ppa_id=%d sps=0x%p flags=0x%b "
1502 			    "ppa=0x%p flags=0x%b\n", sps->sps_mn_id, ppa_id,
1503 			    (void *)sps, sps->sps_flags, SPS_FLAGS_STR,
1504 			    (void *)ppa, ppa->ppa_flags,
1505 			    PPA_FLAGS_STR);
1506 		}
1507 		count = msgsize(nmp);
1508 		error = 0;
1509 		break;
1510 	}
1511 
1512 	if (error == 0) {
1513 		/* Success; tell the user. */
1514 		miocack(q, mp, count, 0);
1515 	} else {
1516 		/* Failure; send error back upstream. */
1517 		miocnak(q, mp, 0, error);
1518 	}
1519 }
1520 
1521 /*
1522  * sppp_send()
1523  *
1524  * MT-Perimeters:
1525  *    shared inner, shared outer.
1526  *
1527  * Description:
1528  *    Called by sppp_uwput to handle M_DATA message type.  Returns
1529  *    queue_t for putnext, or NULL to mean that the packet was
1530  *    handled internally.
1531  */
1532 static queue_t *
1533 sppp_send(queue_t *q, mblk_t **mpp, spppstr_t *sps)
1534 {
1535 	mblk_t	*mp;
1536 	sppa_t	*ppa;
1537 	int	is_promisc;
1538 	int	msize;
1539 	int	error = 0;
1540 	queue_t	*nextq;
1541 
1542 	ASSERT(mpp != NULL);
1543 	mp = *mpp;
1544 	ASSERT(q != NULL && q->q_ptr != NULL);
1545 	ASSERT(mp != NULL && mp->b_rptr != NULL);
1546 	ASSERT(sps != NULL);
1547 	ASSERT(q->q_ptr == sps);
1548 	/*
1549 	 * We only let M_DATA through if the sender is either the control
1550 	 * stream (for PPP control packets) or one of the network streams
1551 	 * (for IP packets) in IP fastpath mode. If this stream is not attached
1552 	 * to any ppas, then discard data coming down through this stream.
1553 	 */
1554 	ppa = sps->sps_ppa;
1555 	if (ppa == NULL) {
1556 		ASSERT(!IS_SPS_CONTROL(sps));
1557 		error = ENOLINK;
1558 	} else if (!IS_SPS_CONTROL(sps) && !IS_SPS_FASTPATH(sps)) {
1559 		error = EPROTO;
1560 	}
1561 	if (error != 0) {
1562 		merror(q, mp, error);
1563 		return (NULL);
1564 	}
1565 	msize = msgdsize(mp);
1566 	if (msize > (ppa->ppa_mtu + PPP_HDRLEN)) {
1567 		/* Log, and send it anyway */
1568 		mutex_enter(&ppa->ppa_sta_lock);
1569 		ppa->ppa_otoolongs++;
1570 		mutex_exit(&ppa->ppa_sta_lock);
1571 	} else if (msize < PPP_HDRLEN) {
1572 		/*
1573 		 * Log, and send it anyway. We log it because we get things
1574 		 * in M_DATA form here, which tells us that the sender is
1575 		 * either IP in fastpath transmission mode, or pppd. In both
1576 		 * cases, they are currently expected to send the 4-bytes
1577 		 * PPP header in front of any possible payloads.
1578 		 */
1579 		mutex_enter(&ppa->ppa_sta_lock);
1580 		ppa->ppa_orunts++;
1581 		mutex_exit(&ppa->ppa_sta_lock);
1582 	}
1583 
1584 	if (IS_SPS_KDEBUG(sps)) {
1585 		SPDEBUG(PPP_DRV_NAME
1586 		    "/%d: M_DATA send (%d bytes) sps=0x%p flags=0x%b "
1587 		    "ppa=0x%p flags=0x%b\n", sps->sps_mn_id, msize,
1588 		    (void *)sps, sps->sps_flags, SPS_FLAGS_STR,
1589 		    (void *)ppa, ppa->ppa_flags, PPA_FLAGS_STR);
1590 	}
1591 	/*
1592 	 * Should there be any promiscuous stream(s), send the data up
1593 	 * for each promiscuous stream that we recognize. Make sure that
1594 	 * for fastpath, we skip the PPP header in the M_DATA mblk. We skip
1595 	 * the control stream as we obviously never allow the control stream
1596 	 * to become promiscous and bind to PPP_ALLSAP.
1597 	 */
1598 	rw_enter(&ppa->ppa_sib_lock, RW_READER);
1599 	is_promisc = sps->sps_ppa->ppa_promicnt;
1600 	if (is_promisc) {
1601 		ASSERT(ppa->ppa_streams != NULL);
1602 		sppp_dlprsendup(ppa->ppa_streams, mp, sps->sps_sap, B_TRUE);
1603 	}
1604 	rw_exit(&ppa->ppa_sib_lock);
1605 	/*
1606 	 * Only time-stamp the packet with hrtime if the upper stream
1607 	 * is configured to do so.  PPP control (negotiation) messages
1608 	 * are never considered link activity; only data is activity.
1609 	 */
1610 	if (!IS_SPS_CONTROL(sps) && IS_PPA_TIMESTAMP(ppa)) {
1611 		ppa->ppa_lasttx = gethrtime();
1612 	}
1613 	/*
1614 	 * If there's already a message in the write-side service queue,
1615 	 * then queue this message there as well, otherwise, try to send
1616 	 * it down to the module immediately below us.
1617 	 */
1618 	if (q->q_first != NULL ||
1619 	    (nextq = sppp_outpkt(q, mpp, msize, sps)) == NULL) {
1620 		mp = *mpp;
1621 		if (mp != NULL && putq(q, mp) == 0) {
1622 			mutex_enter(&ppa->ppa_sta_lock);
1623 			ppa->ppa_oqdropped++;
1624 			mutex_exit(&ppa->ppa_sta_lock);
1625 			freemsg(mp);
1626 		}
1627 		return (NULL);
1628 	}
1629 	return (nextq);
1630 }
1631 
1632 /*
1633  * sppp_outpkt()
1634  *
1635  * MT-Perimeters:
1636  *    shared inner, shared outer (if called from sppp_wput, sppp_dlunitdatareq).
1637  *    exclusive inner, shared outer (if called from sppp_wsrv).
1638  *
1639  * Description:
1640  *    Called from 1) sppp_uwput when processing a M_DATA fastpath message,
1641  *    or 2) sppp_uwsrv when processing the upper write-side service queue.
1642  *    For both cases, it prepares to send the data to the module below
1643  *    this driver if there is a lower stream linked underneath. If none, then
1644  *    the data will be sent upstream via the control channel to pppd.
1645  *
1646  * Returns:
1647  *	Non-NULL queue_t if message should be sent now, otherwise
1648  *	if *mpp == NULL, then message was freed, otherwise put *mpp
1649  *	(back) on the queue.  (Does not do putq/putbq, since it's
1650  *	called both from srv and put procedures.)
1651  */
1652 static queue_t *
1653 sppp_outpkt(queue_t *q, mblk_t **mpp, int msize, spppstr_t *sps)
1654 {
1655 	mblk_t		*mp;
1656 	sppa_t		*ppa;
1657 	enum NPmode	npmode;
1658 	mblk_t		*mpnew;
1659 
1660 	ASSERT(mpp != NULL);
1661 	mp = *mpp;
1662 	ASSERT(q != NULL && q->q_ptr != NULL);
1663 	ASSERT(mp != NULL && mp->b_rptr != NULL);
1664 	ASSERT(sps != NULL);
1665 
1666 	ppa = sps->sps_ppa;
1667 	npmode = sps->sps_npmode;
1668 
1669 	if (npmode == NPMODE_QUEUE) {
1670 		ASSERT(!IS_SPS_CONTROL(sps));
1671 		return (NULL);	/* queue it for later */
1672 	} else if (ppa == NULL || ppa->ppa_ctl == NULL ||
1673 	    npmode == NPMODE_DROP || npmode == NPMODE_ERROR) {
1674 		/*
1675 		 * This can not be the control stream, as it must always have
1676 		 * a valid ppa, and its npmode must always be NPMODE_PASS.
1677 		 */
1678 		ASSERT(!IS_SPS_CONTROL(sps));
1679 		if (npmode == NPMODE_DROP) {
1680 			freemsg(mp);
1681 		} else {
1682 			/*
1683 			 * If we no longer have the control stream, or if the
1684 			 * mode is set to NPMODE_ERROR, then we need to tell IP
1685 			 * that the interface need to be marked as down. In
1686 			 * other words, we tell IP to be quiescent.
1687 			 */
1688 			merror(q, mp, EPROTO);
1689 		}
1690 		*mpp = NULL;
1691 		return (NULL);	/* don't queue it */
1692 	}
1693 	/*
1694 	 * Do we have a driver stream linked underneath ? If not, we need to
1695 	 * notify pppd that the link needs to be brought up and configure
1696 	 * this upper stream to drop subsequent outgoing packets. This is
1697 	 * for demand-dialing, in which case pppd has done the IP plumbing
1698 	 * but hasn't linked the driver stream underneath us. Therefore, when
1699 	 * a packet is sent down the IP interface, a notification message
1700 	 * will be sent up the control stream to pppd in order for it to
1701 	 * establish the physical link. The driver stream is then expected
1702 	 * to be linked underneath after physical link establishment is done.
1703 	 */
1704 	if (ppa->ppa_lower_wq == NULL) {
1705 		ASSERT(ppa->ppa_ctl != NULL);
1706 		ASSERT(ppa->ppa_ctl->sps_rq != NULL);
1707 
1708 		*mpp = NULL;
1709 		mpnew = create_lsmsg(PPP_LINKSTAT_NEEDUP);
1710 		if (mpnew == NULL) {
1711 			freemsg(mp);
1712 			mutex_enter(&ppa->ppa_sta_lock);
1713 			ppa->ppa_allocbfail++;
1714 			mutex_exit(&ppa->ppa_sta_lock);
1715 			return (NULL);	/* don't queue it */
1716 		}
1717 		/* Include the data in the message for logging. */
1718 		mpnew->b_cont = mp;
1719 		mutex_enter(&ppa->ppa_sta_lock);
1720 		ppa->ppa_lsneedup++;
1721 		mutex_exit(&ppa->ppa_sta_lock);
1722 		/*
1723 		 * We need to set the mode to NPMODE_DROP, but should only
1724 		 * do so when this stream is not the control stream.
1725 		 */
1726 		if (!IS_SPS_CONTROL(sps)) {
1727 			sps->sps_npmode = NPMODE_DROP;
1728 		}
1729 		putnext(ppa->ppa_ctl->sps_rq, mpnew);
1730 		return (NULL);	/* don't queue it */
1731 	}
1732 	/*
1733 	 * If so, then try to send it down. The lower queue is only ever
1734 	 * detached while holding an exclusive lock on the whole driver,
1735 	 * so we can be confident that the lower queue is still there.
1736 	 */
1737 	if (bcanputnext(ppa->ppa_lower_wq, mp->b_band)) {
1738 		mutex_enter(&ppa->ppa_sta_lock);
1739 		ppa->ppa_stats.p.ppp_opackets++;
1740 		if (IS_SPS_CONTROL(sps)) {
1741 			ppa->ppa_opkt_ctl++;
1742 		}
1743 		ppa->ppa_stats.p.ppp_obytes += msize;
1744 		mutex_exit(&ppa->ppa_sta_lock);
1745 		return (ppa->ppa_lower_wq);	/* don't queue it */
1746 	}
1747 	return (NULL);	/* queue it for later */
1748 }
1749 
1750 /*
1751  * sppp_lwsrv()
1752  *
1753  * MT-Perimeters:
1754  *    exclusive inner, shared outer.
1755  *
1756  * Description:
1757  *    Lower write-side service procedure. No messages are ever placed on
1758  *    the write queue here, this just back-enables all upper write side
1759  *    service procedures.
1760  */
1761 void
1762 sppp_lwsrv(queue_t *q)
1763 {
1764 	sppa_t		*ppa;
1765 	spppstr_t	*nextsib;
1766 
1767 	ASSERT(q != NULL && q->q_ptr != NULL);
1768 	ppa = (sppa_t *)q->q_ptr;
1769 	ASSERT(ppa != NULL);
1770 
1771 	rw_enter(&ppa->ppa_sib_lock, RW_READER);
1772 	if ((nextsib = ppa->ppa_ctl) != NULL &&
1773 	    WR(nextsib->sps_rq)->q_first != NULL)
1774 		qenable(WR(nextsib->sps_rq));
1775 	for (nextsib = ppa->ppa_streams; nextsib != NULL;
1776 	    nextsib = nextsib->sps_nextsib) {
1777 		if (WR(nextsib->sps_rq)->q_first != NULL) {
1778 			qenable(WR(nextsib->sps_rq));
1779 		}
1780 	}
1781 	rw_exit(&ppa->ppa_sib_lock);
1782 }
1783 
1784 /*
1785  * sppp_lrput()
1786  *
1787  * MT-Perimeters:
1788  *    shared inner, shared outer.
1789  *
1790  * Description:
1791  *    Lower read-side put procedure. Messages from below get here.
1792  *    Data messages are handled separately to limit stack usage
1793  *    going into IP.
1794  *
1795  *    Note that during I_UNLINK processing, it's possible for a downstream
1796  *    message to enable upstream data (due to pass_wput() removing the
1797  *    SQ_BLOCKED flag), and thus we must protect against a NULL sppa pointer.
1798  *    In this case, the only thing above us is passthru, and we might as well
1799  *    discard.
1800  */
1801 void
1802 sppp_lrput(queue_t *q, mblk_t *mp)
1803 {
1804 	sppa_t		*ppa;
1805 	spppstr_t	*sps;
1806 
1807 	if ((ppa = q->q_ptr) == NULL) {
1808 		freemsg(mp);
1809 		return;
1810 	}
1811 
1812 	sps = ppa->ppa_ctl;
1813 
1814 	if (MTYPE(mp) != M_DATA) {
1815 		sppp_recv_nondata(q, mp, sps);
1816 	} else if (sps == NULL) {
1817 		freemsg(mp);
1818 	} else if ((q = sppp_recv(q, &mp, sps)) != NULL) {
1819 		putnext(q, mp);
1820 	}
1821 }
1822 
1823 /*
1824  * sppp_lrsrv()
1825  *
1826  * MT-Perimeters:
1827  *    exclusive inner, shared outer.
1828  *
1829  * Description:
1830  *    Lower read-side service procedure.  This is run once after the I_LINK
1831  *    occurs in order to clean up any packets that came in while we were
1832  *    transferring in the lower stream.  Otherwise, it's not used.
1833  */
1834 void
1835 sppp_lrsrv(queue_t *q)
1836 {
1837 	mblk_t *mp;
1838 
1839 	while ((mp = getq(q)) != NULL)
1840 		sppp_lrput(q, mp);
1841 }
1842 
1843 /*
1844  * sppp_recv_nondata()
1845  *
1846  * MT-Perimeters:
1847  *    shared inner, shared outer.
1848  *
1849  * Description:
1850  *    All received non-data messages come through here.
1851  */
1852 static void
1853 sppp_recv_nondata(queue_t *q, mblk_t *mp, spppstr_t *ctlsps)
1854 {
1855 	sppa_t		*ppa;
1856 	spppstr_t	*destsps;
1857 	struct iocblk	*iop;
1858 
1859 	ppa = (sppa_t *)q->q_ptr;
1860 	ctlsps = ppa->ppa_ctl;
1861 
1862 	switch (MTYPE(mp)) {
1863 	case M_CTL:
1864 		mutex_enter(&ppa->ppa_sta_lock);
1865 		if (*mp->b_rptr == PPPCTL_IERROR) {
1866 			ppa->ppa_stats.p.ppp_ierrors++;
1867 			ppa->ppa_ierr_low++;
1868 			ppa->ppa_mctlsknown++;
1869 		} else if (*mp->b_rptr == PPPCTL_OERROR) {
1870 			ppa->ppa_stats.p.ppp_oerrors++;
1871 			ppa->ppa_oerr_low++;
1872 			ppa->ppa_mctlsknown++;
1873 		} else {
1874 			ppa->ppa_mctlsunknown++;
1875 		}
1876 		mutex_exit(&ppa->ppa_sta_lock);
1877 		freemsg(mp);
1878 		break;
1879 	case M_IOCTL:
1880 		miocnak(q, mp, 0, EINVAL);
1881 		break;
1882 	case M_IOCACK:
1883 	case M_IOCNAK:
1884 		iop = (struct iocblk *)mp->b_rptr;
1885 		ASSERT(iop != NULL);
1886 		/*
1887 		 * Attempt to match up the response with the stream that the
1888 		 * request came from. If ioc_id doesn't match the one that we
1889 		 * recorded, then discard this message.
1890 		 */
1891 		rw_enter(&ppa->ppa_sib_lock, RW_READER);
1892 		if ((destsps = ctlsps) == NULL ||
1893 		    destsps->sps_ioc_id != iop->ioc_id) {
1894 			destsps = ppa->ppa_streams;
1895 			while (destsps != NULL) {
1896 				if (destsps->sps_ioc_id == iop->ioc_id) {
1897 					break;	/* found the upper stream */
1898 				}
1899 				destsps = destsps->sps_nextsib;
1900 			}
1901 		}
1902 		rw_exit(&ppa->ppa_sib_lock);
1903 		if (destsps == NULL) {
1904 			mutex_enter(&ppa->ppa_sta_lock);
1905 			ppa->ppa_ioctlsfwderr++;
1906 			mutex_exit(&ppa->ppa_sta_lock);
1907 			freemsg(mp);
1908 			break;
1909 		}
1910 		mutex_enter(&ppa->ppa_sta_lock);
1911 		ppa->ppa_ioctlsfwdok++;
1912 
1913 		/*
1914 		 * Clear SPS_IOCQ and enable the lower write side queue,
1915 		 * this would allow the upper stream service routine
1916 		 * to start processing the queue for pending messages.
1917 		 * sppp_lwsrv -> sppp_uwsrv.
1918 		 */
1919 		destsps->sps_flags &= ~SPS_IOCQ;
1920 		mutex_exit(&ppa->ppa_sta_lock);
1921 		qenable(WR(destsps->sps_rq));
1922 
1923 		putnext(destsps->sps_rq, mp);
1924 		break;
1925 	case M_HANGUP:
1926 		/*
1927 		 * Free the original mblk_t. We don't really want to send
1928 		 * a M_HANGUP message upstream, so we need to translate this
1929 		 * message into something else.
1930 		 */
1931 		freemsg(mp);
1932 		if (ctlsps == NULL)
1933 			break;
1934 		mp = create_lsmsg(PPP_LINKSTAT_HANGUP);
1935 		if (mp == NULL) {
1936 			mutex_enter(&ppa->ppa_sta_lock);
1937 			ppa->ppa_allocbfail++;
1938 			mutex_exit(&ppa->ppa_sta_lock);
1939 			break;
1940 		}
1941 		mutex_enter(&ppa->ppa_sta_lock);
1942 		ppa->ppa_lsdown++;
1943 		mutex_exit(&ppa->ppa_sta_lock);
1944 		putnext(ctlsps->sps_rq, mp);
1945 		break;
1946 	case M_FLUSH:
1947 		if (*mp->b_rptr & FLUSHR) {
1948 			flushq(q, FLUSHDATA);
1949 		}
1950 		if (*mp->b_rptr & FLUSHW) {
1951 			*mp->b_rptr &= ~FLUSHR;
1952 			qreply(q, mp);
1953 		} else {
1954 			freemsg(mp);
1955 		}
1956 		break;
1957 	default:
1958 		if (ctlsps != NULL &&
1959 		    (queclass(mp) == QPCTL) || canputnext(ctlsps->sps_rq)) {
1960 			putnext(ctlsps->sps_rq, mp);
1961 		} else {
1962 			mutex_enter(&ppa->ppa_sta_lock);
1963 			ppa->ppa_iqdropped++;
1964 			mutex_exit(&ppa->ppa_sta_lock);
1965 			freemsg(mp);
1966 		}
1967 		break;
1968 	}
1969 }
1970 
1971 /*
1972  * sppp_recv()
1973  *
1974  * MT-Perimeters:
1975  *    shared inner, shared outer.
1976  *
1977  * Description:
1978  *    Receive function called by sppp_lrput.  Finds appropriate
1979  *    receive stream and does accounting.
1980  */
1981 static queue_t *
1982 sppp_recv(queue_t *q, mblk_t **mpp, spppstr_t *ctlsps)
1983 {
1984 	mblk_t		*mp;
1985 	int		len;
1986 	sppa_t		*ppa;
1987 	spppstr_t	*destsps;
1988 	mblk_t		*zmp;
1989 	uint32_t	npflagpos;
1990 
1991 	ASSERT(mpp != NULL);
1992 	mp = *mpp;
1993 	ASSERT(q != NULL && q->q_ptr != NULL);
1994 	ASSERT(mp != NULL && mp->b_rptr != NULL);
1995 	ASSERT(ctlsps != NULL);
1996 	ASSERT(IS_SPS_CONTROL(ctlsps));
1997 	ppa = ctlsps->sps_ppa;
1998 	ASSERT(ppa != NULL && ppa->ppa_ctl != NULL);
1999 
2000 	len = msgdsize(mp);
2001 	mutex_enter(&ppa->ppa_sta_lock);
2002 	ppa->ppa_stats.p.ppp_ibytes += len;
2003 	mutex_exit(&ppa->ppa_sta_lock);
2004 	/*
2005 	 * If the entire data size of the mblk is less than the length of the
2006 	 * PPP header, then free it. We can't do much with such message anyway,
2007 	 * since we can't really determine what the PPP protocol type is.
2008 	 */
2009 	if (len < PPP_HDRLEN) {
2010 		/* Log, and free it */
2011 		mutex_enter(&ppa->ppa_sta_lock);
2012 		ppa->ppa_irunts++;
2013 		mutex_exit(&ppa->ppa_sta_lock);
2014 		freemsg(mp);
2015 		return (NULL);
2016 	} else if (len > (ppa->ppa_mru + PPP_HDRLEN)) {
2017 		/* Log, and accept it anyway */
2018 		mutex_enter(&ppa->ppa_sta_lock);
2019 		ppa->ppa_itoolongs++;
2020 		mutex_exit(&ppa->ppa_sta_lock);
2021 	}
2022 	/*
2023 	 * We need at least be able to read the PPP protocol from the header,
2024 	 * so if the first message block is too small, then we concatenate the
2025 	 * rest of the following blocks into one message.
2026 	 */
2027 	if (MBLKL(mp) < PPP_HDRLEN) {
2028 		zmp = msgpullup(mp, PPP_HDRLEN);
2029 		freemsg(mp);
2030 		mp = zmp;
2031 		if (mp == NULL) {
2032 			mutex_enter(&ppa->ppa_sta_lock);
2033 			ppa->ppa_allocbfail++;
2034 			mutex_exit(&ppa->ppa_sta_lock);
2035 			return (NULL);
2036 		}
2037 		*mpp = mp;
2038 	}
2039 	/*
2040 	 * Hold this packet in the control-queue until
2041 	 * the matching network-layer upper stream for the PPP protocol (sap)
2042 	 * has not been plumbed and configured
2043 	 */
2044 	npflagpos = sppp_ppp2np(PPP_PROTOCOL(mp->b_rptr));
2045 	mutex_enter(&ppa->ppa_npmutex);
2046 	if (npflagpos != 0 && (ppa->ppa_npflag & (1 << npflagpos))) {
2047 		/*
2048 		 * proto is currently blocked; Hold up to 4 packets
2049 		 * in the kernel.
2050 		 */
2051 		if (ppa->ppa_holdpkts[npflagpos] > 3 ||
2052 		    putq(ctlsps->sps_rq, mp) == 0)
2053 			freemsg(mp);
2054 		else
2055 			ppa->ppa_holdpkts[npflagpos]++;
2056 		mutex_exit(&ppa->ppa_npmutex);
2057 		return (NULL);
2058 	}
2059 	mutex_exit(&ppa->ppa_npmutex);
2060 	/*
2061 	 * Try to find a matching network-layer upper stream for the specified
2062 	 * PPP protocol (sap), and if none is found, send this frame up the
2063 	 * control stream.
2064 	 */
2065 	destsps = sppp_inpkt(q, mp, ctlsps);
2066 	if (destsps == NULL) {
2067 		mutex_enter(&ppa->ppa_sta_lock);
2068 		ppa->ppa_ipkt_ctl++;
2069 		mutex_exit(&ppa->ppa_sta_lock);
2070 		if (canputnext(ctlsps->sps_rq)) {
2071 			if (IS_SPS_KDEBUG(ctlsps)) {
2072 				SPDEBUG(PPP_DRV_NAME
2073 				    "/%d: M_DATA recv (%d bytes) sps=0x%p "
2074 				    "flags=0x%b ppa=0x%p flags=0x%b\n",
2075 				    ctlsps->sps_mn_id, len, (void *)ctlsps,
2076 				    ctlsps->sps_flags, SPS_FLAGS_STR,
2077 				    (void *)ppa, ppa->ppa_flags,
2078 				    PPA_FLAGS_STR);
2079 			}
2080 			return (ctlsps->sps_rq);
2081 		} else {
2082 			mutex_enter(&ppa->ppa_sta_lock);
2083 			ppa->ppa_iqdropped++;
2084 			mutex_exit(&ppa->ppa_sta_lock);
2085 			freemsg(mp);
2086 			return (NULL);
2087 		}
2088 	}
2089 	if (canputnext(destsps->sps_rq)) {
2090 		if (IS_SPS_KDEBUG(destsps)) {
2091 			SPDEBUG(PPP_DRV_NAME
2092 			    "/%d: M_DATA recv (%d bytes) sps=0x%p flags=0x%b "
2093 			    "ppa=0x%p flags=0x%b\n", destsps->sps_mn_id, len,
2094 			    (void *)destsps, destsps->sps_flags,
2095 			    SPS_FLAGS_STR, (void *)ppa, ppa->ppa_flags,
2096 			    PPA_FLAGS_STR);
2097 		}
2098 		/*
2099 		 * If fastpath is enabled on the network-layer stream, then
2100 		 * make sure we skip over the PPP header, otherwise, we wrap
2101 		 * the message in a DLPI message.
2102 		 */
2103 		if (IS_SPS_FASTPATH(destsps)) {
2104 			mp->b_rptr += PPP_HDRLEN;
2105 			return (destsps->sps_rq);
2106 		} else {
2107 			spppstr_t *uqs = (spppstr_t *)destsps->sps_rq->q_ptr;
2108 			ASSERT(uqs != NULL);
2109 			mp->b_rptr += PPP_HDRLEN;
2110 			mp = sppp_dladdud(uqs, mp, uqs->sps_sap, B_FALSE);
2111 			if (mp != NULL) {
2112 				*mpp = mp;
2113 				return (destsps->sps_rq);
2114 			} else {
2115 				mutex_enter(&ppa->ppa_sta_lock);
2116 				ppa->ppa_allocbfail++;
2117 				mutex_exit(&ppa->ppa_sta_lock);
2118 				/* mp already freed by sppp_dladdud */
2119 				return (NULL);
2120 			}
2121 		}
2122 	} else {
2123 		mutex_enter(&ppa->ppa_sta_lock);
2124 		ppa->ppa_iqdropped++;
2125 		mutex_exit(&ppa->ppa_sta_lock);
2126 		freemsg(mp);
2127 		return (NULL);
2128 	}
2129 }
2130 
2131 /*
2132  * sppp_inpkt()
2133  *
2134  * MT-Perimeters:
2135  *    shared inner, shared outer.
2136  *
2137  * Description:
2138  *    Find the destination upper stream for the received packet, called
2139  *    from sppp_recv.
2140  *
2141  * Returns:
2142  *    ptr to destination upper network stream, or NULL for control stream.
2143  */
2144 /* ARGSUSED */
2145 static spppstr_t *
2146 sppp_inpkt(queue_t *q, mblk_t *mp, spppstr_t *ctlsps)
2147 {
2148 	spppstr_t	*destsps = NULL;
2149 	sppa_t		*ppa;
2150 	uint16_t	proto;
2151 	int		is_promisc;
2152 
2153 	ASSERT(q != NULL && q->q_ptr != NULL);
2154 	ASSERT(mp != NULL && mp->b_rptr != NULL);
2155 	ASSERT(IS_SPS_CONTROL(ctlsps));
2156 	ppa = ctlsps->sps_ppa;
2157 	ASSERT(ppa != NULL);
2158 	/*
2159 	 * From RFC 1661 (Section 2):
2160 	 *
2161 	 * The Protocol field is one or two octets, and its value identifies
2162 	 * the datagram encapsulated in the Information field of the packet.
2163 	 * The field is transmitted and received most significant octet first.
2164 	 *
2165 	 * The structure of this field is consistent with the ISO 3309
2166 	 * extension mechanism for address fields.  All Protocols MUST be odd;
2167 	 * the least significant bit of the least significant octet MUST equal
2168 	 * "1".  Also, all Protocols MUST be assigned such that the least
2169 	 * significant bit of the most significant octet equals "0". Frames
2170 	 * received which don't comply with these rules MUST be treated as
2171 	 * having an unrecognized Protocol.
2172 	 *
2173 	 * Protocol field values in the "0***" to "3***" range identify the
2174 	 * network-layer protocol of specific packets, and values in the
2175 	 * "8***" to "b***" range identify packets belonging to the associated
2176 	 * Network Control Protocols (NCPs), if any.
2177 	 *
2178 	 * Protocol field values in the "4***" to "7***" range are used for
2179 	 * protocols with low volume traffic which have no associated NCP.
2180 	 * Protocol field values in the "c***" to "f***" range identify packets
2181 	 * as link-layer Control Protocols (such as LCP).
2182 	 */
2183 	proto = PPP_PROTOCOL(mp->b_rptr);
2184 	mutex_enter(&ppa->ppa_sta_lock);
2185 	ppa->ppa_stats.p.ppp_ipackets++;
2186 	mutex_exit(&ppa->ppa_sta_lock);
2187 	/*
2188 	 * We check if this is not a network-layer protocol, and if so,
2189 	 * then send this packet up the control stream.
2190 	 */
2191 	if (proto > 0x7fff) {
2192 		goto inpkt_done;	/* send it up the control stream */
2193 	}
2194 	/*
2195 	 * Try to grab the destination upper stream from the network-layer
2196 	 * stream cache for this ppa for PPP_IP (0x0021) or PPP_IPV6 (0x0057)
2197 	 * protocol types. Otherwise, if the type is not known to the cache,
2198 	 * or if its sap can't be matched with any of the upper streams, then
2199 	 * send this packet up the control stream so that it can be rejected.
2200 	 */
2201 	if (proto == PPP_IP) {
2202 		destsps = ppa->ppa_ip_cache;
2203 	} else if (proto == PPP_IPV6) {
2204 		destsps = ppa->ppa_ip6_cache;
2205 	}
2206 	/*
2207 	 * Toss this one away up the control stream if there's no matching sap;
2208 	 * this way the protocol can be rejected (destsps is NULL).
2209 	 */
2210 
2211 inpkt_done:
2212 	/*
2213 	 * Only time-stamp the packet with hrtime if the upper stream
2214 	 * is configured to do so.  PPP control (negotiation) messages
2215 	 * are never considered link activity; only data is activity.
2216 	 */
2217 	if (destsps != NULL && IS_PPA_TIMESTAMP(ppa)) {
2218 		ppa->ppa_lastrx = gethrtime();
2219 	}
2220 	/*
2221 	 * Should there be any promiscuous stream(s), send the data up for
2222 	 * each promiscuous stream that we recognize. We skip the control
2223 	 * stream as we obviously never allow the control stream to become
2224 	 * promiscous and bind to PPP_ALLSAP.
2225 	 */
2226 	rw_enter(&ppa->ppa_sib_lock, RW_READER);
2227 	is_promisc = ppa->ppa_promicnt;
2228 	if (is_promisc) {
2229 		ASSERT(ppa->ppa_streams != NULL);
2230 		sppp_dlprsendup(ppa->ppa_streams, mp, proto, B_TRUE);
2231 	}
2232 	rw_exit(&ppa->ppa_sib_lock);
2233 	return (destsps);
2234 }
2235 
2236 /*
2237  * sppp_kstat_update()
2238  *
2239  * Description:
2240  *    Update per-ppa kstat interface statistics.
2241  */
2242 static int
2243 sppp_kstat_update(kstat_t *ksp, int rw)
2244 {
2245 	register sppa_t		*ppa;
2246 	register sppp_kstats_t	*pppkp;
2247 	register struct pppstat64 *sp;
2248 
2249 	if (rw == KSTAT_WRITE) {
2250 		return (EACCES);
2251 	}
2252 
2253 	ppa = (sppa_t *)ksp->ks_private;
2254 	ASSERT(ppa != NULL);
2255 
2256 	pppkp = (sppp_kstats_t *)ksp->ks_data;
2257 	sp = &ppa->ppa_stats.p;
2258 
2259 	mutex_enter(&ppa->ppa_sta_lock);
2260 	pppkp->allocbfail.value.ui32	= ppa->ppa_allocbfail;
2261 	pppkp->mctlsfwd.value.ui32	= ppa->ppa_mctlsfwd;
2262 	pppkp->mctlsfwderr.value.ui32	= ppa->ppa_mctlsfwderr;
2263 	pppkp->rbytes.value.ui32	= sp->ppp_ibytes;
2264 	pppkp->rbytes64.value.ui64	= sp->ppp_ibytes;
2265 	pppkp->ierrors.value.ui32	= sp->ppp_ierrors;
2266 	pppkp->ierrors_lower.value.ui32	= ppa->ppa_ierr_low;
2267 	pppkp->ioctlsfwd.value.ui32	= ppa->ppa_ioctlsfwd;
2268 	pppkp->ioctlsfwdok.value.ui32	= ppa->ppa_ioctlsfwdok;
2269 	pppkp->ioctlsfwderr.value.ui32	= ppa->ppa_ioctlsfwderr;
2270 	pppkp->ipackets.value.ui32	= sp->ppp_ipackets;
2271 	pppkp->ipackets64.value.ui64	= sp->ppp_ipackets;
2272 	pppkp->ipackets_ctl.value.ui32	= ppa->ppa_ipkt_ctl;
2273 	pppkp->iqdropped.value.ui32	= ppa->ppa_iqdropped;
2274 	pppkp->irunts.value.ui32	= ppa->ppa_irunts;
2275 	pppkp->itoolongs.value.ui32	= ppa->ppa_itoolongs;
2276 	pppkp->lsneedup.value.ui32	= ppa->ppa_lsneedup;
2277 	pppkp->lsdown.value.ui32	= ppa->ppa_lsdown;
2278 	pppkp->mctlsknown.value.ui32	= ppa->ppa_mctlsknown;
2279 	pppkp->mctlsunknown.value.ui32	= ppa->ppa_mctlsunknown;
2280 	pppkp->obytes.value.ui32	= sp->ppp_obytes;
2281 	pppkp->obytes64.value.ui64	= sp->ppp_obytes;
2282 	pppkp->oerrors.value.ui32	= sp->ppp_oerrors;
2283 	pppkp->oerrors_lower.value.ui32	= ppa->ppa_oerr_low;
2284 	pppkp->opackets.value.ui32	= sp->ppp_opackets;
2285 	pppkp->opackets64.value.ui64	= sp->ppp_opackets;
2286 	pppkp->opackets_ctl.value.ui32	= ppa->ppa_opkt_ctl;
2287 	pppkp->oqdropped.value.ui32	= ppa->ppa_oqdropped;
2288 	pppkp->otoolongs.value.ui32	= ppa->ppa_otoolongs;
2289 	pppkp->orunts.value.ui32	= ppa->ppa_orunts;
2290 	mutex_exit(&ppa->ppa_sta_lock);
2291 
2292 	return (0);
2293 }
2294 
2295 /*
2296  * Turn off proto in ppa_npflag to indicate that
2297  * the corresponding network protocol has been plumbed.
2298  * Release proto packets that were being held in the control
2299  * queue in anticipation of this event.
2300  */
2301 static void
2302 sppp_release_pkts(sppa_t *ppa, uint16_t proto)
2303 {
2304 	uint32_t npflagpos = sppp_ppp2np(proto);
2305 	int count;
2306 	mblk_t *mp;
2307 	uint16_t mp_proto;
2308 	queue_t *q;
2309 	spppstr_t *destsps;
2310 
2311 	ASSERT(ppa != NULL);
2312 
2313 	if (npflagpos == 0 || (ppa->ppa_npflag & (1 << npflagpos)) == 0)
2314 		return;
2315 
2316 	mutex_enter(&ppa->ppa_npmutex);
2317 	ppa->ppa_npflag &= ~(1 << npflagpos);
2318 	count = ppa->ppa_holdpkts[npflagpos];
2319 	ppa->ppa_holdpkts[npflagpos] = 0;
2320 	mutex_exit(&ppa->ppa_npmutex);
2321 
2322 	q = ppa->ppa_ctl->sps_rq;
2323 
2324 	while (count > 0) {
2325 		mp = getq(q);
2326 		ASSERT(mp != NULL);
2327 
2328 		mp_proto = PPP_PROTOCOL(mp->b_rptr);
2329 		if (mp_proto !=  proto) {
2330 			(void) putq(q, mp);
2331 			continue;
2332 		}
2333 		count--;
2334 		destsps = NULL;
2335 		if (mp_proto == PPP_IP) {
2336 			destsps = ppa->ppa_ip_cache;
2337 		} else if (mp_proto == PPP_IPV6) {
2338 			destsps = ppa->ppa_ip6_cache;
2339 		}
2340 		ASSERT(destsps != NULL);
2341 
2342 		if (IS_SPS_FASTPATH(destsps)) {
2343 			mp->b_rptr += PPP_HDRLEN;
2344 		} else {
2345 			spppstr_t *uqs = (spppstr_t *)destsps->sps_rq->q_ptr;
2346 			ASSERT(uqs != NULL);
2347 			mp->b_rptr += PPP_HDRLEN;
2348 			mp = sppp_dladdud(uqs, mp, uqs->sps_sap, B_FALSE);
2349 			if (mp == NULL) {
2350 				mutex_enter(&ppa->ppa_sta_lock);
2351 				ppa->ppa_allocbfail++;
2352 				mutex_exit(&ppa->ppa_sta_lock);
2353 				/* mp already freed by sppp_dladdud */
2354 				continue;
2355 			}
2356 		}
2357 
2358 		if (canputnext(destsps->sps_rq)) {
2359 			putnext(destsps->sps_rq, mp);
2360 		} else {
2361 			mutex_enter(&ppa->ppa_sta_lock);
2362 			ppa->ppa_iqdropped++;
2363 			mutex_exit(&ppa->ppa_sta_lock);
2364 			freemsg(mp);
2365 			continue;
2366 		}
2367 	}
2368 }
2369