xref: /illumos-gate/usr/src/uts/common/io/ppp/sppp/sppp.c (revision b8767451d156f585534afac0bf22721810d0dc63)
1 /*
2  * sppp.c - Solaris STREAMS PPP multiplexing pseudo-driver
3  *
4  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
5  * Use is subject to license terms.
6  * Copyright (c) 2016 by Delphix. All rights reserved.
7  * Copyright 2019, Joyent, Inc.
8  *
9  * Permission to use, copy, modify, and distribute this software and its
10  * documentation is hereby granted, provided that the above copyright
11  * notice appears in all copies.
12  *
13  * SUN MAKES NO REPRESENTATION OR WARRANTIES ABOUT THE SUITABILITY OF
14  * THE SOFTWARE, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
15  * TO THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
16  * PARTICULAR PURPOSE, OR NON-INFRINGEMENT.  SUN SHALL NOT BE LIABLE FOR
17  * ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
18  * DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES
19  *
20  * Copyright (c) 1994 The Australian National University.
21  * All rights reserved.
22  *
23  * Permission to use, copy, modify, and distribute this software and its
24  * documentation is hereby granted, provided that the above copyright
25  * notice appears in all copies.  This software is provided without any
26  * warranty, express or implied. The Australian National University
27  * makes no representations about the suitability of this software for
28  * any purpose.
29  *
30  * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY
31  * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
32  * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
33  * THE AUSTRALIAN NATIONAL UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY
34  * OF SUCH DAMAGE.
35  *
36  * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES,
37  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
38  * AND FITNESS FOR A PARTICULAR PURPOSE.  THE SOFTWARE PROVIDED HEREUNDER IS
39  * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO
40  * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS,
41  * OR MODIFICATIONS.
42  *
43  * This driver is derived from the original SVR4 STREAMS PPP driver
44  * originally written by Paul Mackerras <paul.mackerras@cs.anu.edu.au>.
45  *
46  * Adi Masputra <adi.masputra@sun.com> rewrote and restructured the code
47  * for improved performance and scalability.
48  */
49 
50 #define	RCSID	"$Id: sppp.c,v 1.0 2000/05/08 01:10:12 masputra Exp $"
51 
52 #include <sys/types.h>
53 #include <sys/debug.h>
54 #include <sys/param.h>
55 #include <sys/stat.h>
56 #include <sys/stream.h>
57 #include <sys/stropts.h>
58 #include <sys/sysmacros.h>
59 #include <sys/errno.h>
60 #include <sys/time.h>
61 #include <sys/cmn_err.h>
62 #include <sys/kmem.h>
63 #include <sys/conf.h>
64 #include <sys/dlpi.h>
65 #include <sys/ddi.h>
66 #include <sys/kstat.h>
67 #include <sys/strsun.h>
68 #include <sys/ethernet.h>
69 #include <sys/policy.h>
70 #include <sys/zone.h>
71 #include <net/ppp_defs.h>
72 #include <net/pppio.h>
73 #include "sppp.h"
74 #include "s_common.h"
75 
76 /*
77  * This is used to tag official Solaris sources.  Please do not define
78  * "INTERNAL_BUILD" when building this software outside of Sun Microsystems.
79  */
80 #ifdef INTERNAL_BUILD
81 /* MODINFO is limited to 32 characters. */
82 const char sppp_module_description[] = "PPP 4.0 mux";
83 #else /* INTERNAL_BUILD */
84 const char sppp_module_description[] = "ANU PPP mux";
85 
86 /* LINTED */
87 static const char buildtime[] = "Built " __DATE__ " at " __TIME__
88 #ifdef DEBUG
89 " DEBUG"
90 #endif
91 "\n";
92 #endif /* INTERNAL_BUILD */
93 
94 static void	sppp_inner_ioctl(queue_t *, mblk_t *);
95 static void	sppp_outer_ioctl(queue_t *, mblk_t *);
96 static queue_t	*sppp_send(queue_t *, mblk_t **, spppstr_t *);
97 static queue_t	*sppp_recv(queue_t *, mblk_t **, spppstr_t *);
98 static void	sppp_recv_nondata(queue_t *, mblk_t *, spppstr_t *);
99 static queue_t	*sppp_outpkt(queue_t *, mblk_t **, int, spppstr_t *);
100 static spppstr_t *sppp_inpkt(queue_t *, mblk_t *, spppstr_t *);
101 static int	sppp_kstat_update(kstat_t *, int);
102 static void	sppp_release_pkts(sppa_t *, uint16_t);
103 
104 /*
105  * sps_list contains the list of active per-stream instance state structures
106  * ordered on the minor device number (see sppp.h for details). All streams
107  * opened to this driver are threaded together in this list.
108  */
109 static spppstr_t *sps_list = NULL;
110 /*
111  * ppa_list contains the list of active per-attachment instance state
112  * structures ordered on the ppa id number (see sppp.h for details). All of
113  * the ppa structures created once per PPPIO_NEWPPA ioctl are threaded together
114  * in this list. There is exactly one ppa structure for a given PPP interface,
115  * and multiple sps streams (upper streams) may share a ppa by performing
116  * an attachment explicitly (PPPIO_ATTACH) or implicitly (DL_ATTACH_REQ).
117  */
118 static sppa_t *ppa_list = NULL;
119 
120 static const char *kstats_names[] = { SPPP_KSTATS_NAMES };
121 static const char *kstats64_names[] = { SPPP_KSTATS64_NAMES };
122 
123 /*
124  * map proto (which is an IANA defined ppp network protocol) to
125  * a bit position indicated by NP_* in ppa_npflag
126  */
127 static uint32_t
128 sppp_ppp2np(uint16_t proto)
129 {
130 	switch (proto) {
131 	case PPP_IP:
132 		return (NP_IP);
133 	case PPP_IPV6:
134 		return (NP_IPV6);
135 	default:
136 		return (0);
137 	}
138 }
139 
140 /*
141  * sppp_open()
142  *
143  * MT-Perimeters:
144  *    exclusive inner, exclusive outer.
145  *
146  * Description:
147  *    Common open procedure for module.
148  */
149 /* ARGSUSED */
150 int
151 sppp_open(queue_t *q, dev_t *devp, int oflag, int sflag, cred_t *credp)
152 {
153 	spppstr_t	*sps;
154 	spppstr_t	**nextmn;
155 	minor_t		mn;
156 
157 	ASSERT(q != NULL && devp != NULL);
158 	ASSERT(sflag != MODOPEN);
159 
160 	if (q->q_ptr != NULL) {
161 		return (0);		/* already open */
162 	}
163 	if (sflag != CLONEOPEN) {
164 		return (OPENFAIL);
165 	}
166 	/*
167 	 * The sps list is sorted using the minor number as the key. The
168 	 * following code walks the list to find the lowest valued minor
169 	 * number available to be used.
170 	 */
171 	mn = 0;
172 	for (nextmn = &sps_list; (sps = *nextmn) != NULL;
173 	    nextmn = &sps->sps_nextmn) {
174 		if (sps->sps_mn_id != mn) {
175 			break;
176 		}
177 		++mn;
178 	}
179 	sps = (spppstr_t *)kmem_zalloc(sizeof (spppstr_t), KM_SLEEP);
180 	ASSERT(sps != NULL);		/* KM_SLEEP must never return NULL */
181 	sps->sps_nextmn = *nextmn;	/* insert stream in global list */
182 	*nextmn = sps;
183 	sps->sps_mn_id = mn;		/* save minor id for this stream */
184 	sps->sps_rq = q;		/* save read queue pointer */
185 	sps->sps_sap = -1;		/* no sap bound to stream */
186 	sps->sps_dlstate = DL_UNATTACHED; /* dlpi state is unattached */
187 	sps->sps_npmode = NPMODE_DROP;	/* drop all packets initially */
188 	sps->sps_zoneid = crgetzoneid(credp);
189 	q->q_ptr = WR(q)->q_ptr = (caddr_t)sps;
190 	/*
191 	 * We explicitly disable the automatic queue scheduling for the
192 	 * write-side to obtain complete control over queuing during transmit.
193 	 * Packets will be queued at the upper write queue and the service
194 	 * routine will not be called until it gets scheduled by having the
195 	 * lower write service routine call the qenable(WR(uq)) for all streams
196 	 * attached to the same ppa instance.
197 	 */
198 	noenable(WR(q));
199 	*devp = makedevice(getmajor(*devp), mn);
200 	qprocson(q);
201 	return (0);
202 }
203 
204 /*
205  * Free storage used by a PPA.  This is not called until the last PPA
206  * user closes their connection or reattaches to a different PPA.
207  */
208 static void
209 sppp_free_ppa(sppa_t *ppa)
210 {
211 	sppa_t **nextppa;
212 
213 	ASSERT(ppa->ppa_refcnt == 1);
214 	if (ppa->ppa_kstats != NULL) {
215 		kstat_delete(ppa->ppa_kstats);
216 		ppa->ppa_kstats = NULL;
217 	}
218 	mutex_destroy(&ppa->ppa_sta_lock);
219 	mutex_destroy(&ppa->ppa_npmutex);
220 	rw_destroy(&ppa->ppa_sib_lock);
221 	nextppa = &ppa_list;
222 	while (*nextppa != NULL) {
223 		if (*nextppa == ppa) {
224 			*nextppa = ppa->ppa_nextppa;
225 			break;
226 		}
227 		nextppa = &(*nextppa)->ppa_nextppa;
228 	}
229 	kmem_free(ppa, sizeof (*ppa));
230 }
231 
232 /*
233  * Create a new PPA.  Caller must be exclusive on outer perimeter.
234  */
235 sppa_t *
236 sppp_create_ppa(uint32_t ppa_id, zoneid_t zoneid)
237 {
238 	sppa_t *ppa;
239 	sppa_t *curppa;
240 	sppa_t **availppa;
241 	char unit[32];		/* Unit name */
242 	const char **cpp;
243 	kstat_t *ksp;
244 	kstat_named_t *knt;
245 
246 	/*
247 	 * NOTE: unit *must* be named for the driver
248 	 * name plus the ppa number so that netstat
249 	 * can find the statistics.
250 	 */
251 	(void) sprintf(unit, "%s" "%d", PPP_DRV_NAME, ppa_id);
252 	/*
253 	 * Make sure we can allocate a buffer to
254 	 * contain the ppa to be sent upstream, as
255 	 * well as the actual ppa structure and its
256 	 * associated kstat structure.
257 	 */
258 	ppa = (sppa_t *)kmem_zalloc(sizeof (sppa_t),
259 	    KM_NOSLEEP);
260 	ksp = kstat_create(PPP_DRV_NAME, ppa_id, unit, "net", KSTAT_TYPE_NAMED,
261 	    sizeof (sppp_kstats_t) / sizeof (kstat_named_t), 0);
262 
263 	if (ppa == NULL || ksp == NULL) {
264 		if (ppa != NULL) {
265 			kmem_free(ppa, sizeof (sppa_t));
266 		}
267 		if (ksp != NULL) {
268 			kstat_delete(ksp);
269 		}
270 		return (NULL);
271 	}
272 	ppa->ppa_kstats = ksp;		/* chain kstat structure */
273 	ppa->ppa_ppa_id = ppa_id;	/* record ppa id */
274 	ppa->ppa_zoneid = zoneid;	/* zone that owns this PPA */
275 	ppa->ppa_mtu = PPP_MAXMTU;	/* 65535-(PPP_HDRLEN+PPP_FCSLEN) */
276 	ppa->ppa_mru = PPP_MAXMRU;	/* 65000 */
277 
278 	mutex_init(&ppa->ppa_sta_lock, NULL, MUTEX_DRIVER, NULL);
279 	mutex_init(&ppa->ppa_npmutex, NULL, MUTEX_DRIVER, NULL);
280 	rw_init(&ppa->ppa_sib_lock, NULL, RW_DRIVER, NULL);
281 
282 	/*
283 	 * Prepare and install kstat counters.  Note that for netstat
284 	 * -i to work, there needs to be "ipackets", "opackets",
285 	 * "ierrors", and "oerrors" kstat named variables.
286 	 */
287 	knt = (kstat_named_t *)ksp->ks_data;
288 	for (cpp = kstats_names; cpp < kstats_names + Dim(kstats_names);
289 	    cpp++) {
290 		kstat_named_init(knt, *cpp, KSTAT_DATA_UINT32);
291 		knt++;
292 	}
293 	for (cpp = kstats64_names; cpp < kstats64_names + Dim(kstats64_names);
294 	    cpp++) {
295 		kstat_named_init(knt, *cpp, KSTAT_DATA_UINT64);
296 		knt++;
297 	}
298 	ksp->ks_update = sppp_kstat_update;
299 	ksp->ks_private = (void *)ppa;
300 	kstat_install(ksp);
301 
302 	/* link to the next ppa and insert into global list */
303 	availppa = &ppa_list;
304 	while ((curppa = *availppa) != NULL) {
305 		if (ppa_id < curppa->ppa_ppa_id)
306 			break;
307 		availppa = &curppa->ppa_nextppa;
308 	}
309 	ppa->ppa_nextppa = *availppa;
310 	*availppa = ppa;
311 	return (ppa);
312 }
313 
314 /*
315  * sppp_close()
316  *
317  * MT-Perimeters:
318  *    exclusive inner, exclusive outer.
319  *
320  * Description:
321  *    Common close procedure for module.
322  */
323 /* ARGSUSED */
324 int
325 sppp_close(queue_t *q, int flags __unused, cred_t *credp __unused)
326 {
327 	spppstr_t	*sps;
328 	spppstr_t	**nextmn;
329 	spppstr_t	*sib;
330 	sppa_t		*ppa;
331 	mblk_t		*mp;
332 
333 	ASSERT(q != NULL && q->q_ptr != NULL);
334 	sps = (spppstr_t *)q->q_ptr;
335 	qprocsoff(q);
336 
337 	ppa = sps->sps_ppa;
338 	if (ppa == NULL) {
339 		ASSERT(!IS_SPS_CONTROL(sps));
340 		goto close_unattached;
341 	}
342 	if (IS_SPS_CONTROL(sps)) {
343 		uint32_t	cnt = 0;
344 
345 		ASSERT(ppa != NULL);
346 		ASSERT(ppa->ppa_ctl == sps);
347 		ppa->ppa_ctl = NULL;
348 		/*
349 		 * STREAMS framework always issues I_UNLINK prior to close,
350 		 * since we only allow I_LINK under the control stream.
351 		 * A given ppa structure has at most one lower stream pointed
352 		 * by the ppa_lower_wq field, because we only allow a single
353 		 * linkage (I_LINK) to be done on the control stream.
354 		 */
355 		ASSERT(ppa->ppa_lower_wq == NULL);
356 		/*
357 		 * Walk through all of sibling streams attached to this ppa,
358 		 * and remove all references to this ppa. We have exclusive
359 		 * access for the entire driver here, so there's no need
360 		 * to hold ppa_sib_lock.
361 		 */
362 		cnt++;
363 		sib = ppa->ppa_streams;
364 		while (sib != NULL) {
365 			ASSERT(ppa == sib->sps_ppa);
366 			sib->sps_npmode = NPMODE_DROP;
367 			sib->sps_flags &= ~(SPS_PIOATTACH | SPS_CACHED);
368 			/*
369 			 * There should be a preallocated hangup
370 			 * message here.  Fetch it and send it up to
371 			 * the stream head.  This will cause IP to
372 			 * mark the interface as "down."
373 			 */
374 			if ((mp = sib->sps_hangup) != NULL) {
375 				sib->sps_hangup = NULL;
376 				/*
377 				 * M_HANGUP works with IP, but snoop
378 				 * is lame and requires M_ERROR.  Send
379 				 * up a clean error code instead.
380 				 *
381 				 * XXX if snoop is fixed, fix this, too.
382 				 */
383 				MTYPE(mp) = M_ERROR;
384 				*mp->b_wptr++ = ENXIO;
385 				putnext(sib->sps_rq, mp);
386 			}
387 			qenable(WR(sib->sps_rq));
388 			cnt++;
389 			sib = sib->sps_nextsib;
390 		}
391 		ASSERT(ppa->ppa_refcnt == cnt);
392 	} else {
393 		ASSERT(ppa->ppa_streams != NULL);
394 		ASSERT(ppa->ppa_ctl != sps);
395 		mp = NULL;
396 		if (sps->sps_sap == PPP_IP) {
397 			ppa->ppa_ip_cache = NULL;
398 			mp = create_lsmsg(PPP_LINKSTAT_IPV4_UNBOUND);
399 		} else if (sps->sps_sap == PPP_IPV6) {
400 			ppa->ppa_ip6_cache = NULL;
401 			mp = create_lsmsg(PPP_LINKSTAT_IPV6_UNBOUND);
402 		}
403 		/* Tell the daemon the bad news. */
404 		if (mp != NULL && ppa->ppa_ctl != NULL &&
405 		    (sps->sps_npmode == NPMODE_PASS ||
406 		    sps->sps_npmode == NPMODE_QUEUE)) {
407 			putnext(ppa->ppa_ctl->sps_rq, mp);
408 		} else {
409 			freemsg(mp);
410 		}
411 		/*
412 		 * Walk through all of sibling streams attached to the
413 		 * same ppa, and remove this stream from the sibling
414 		 * streams list. We have exclusive access for the
415 		 * entire driver here, so there's no need to hold
416 		 * ppa_sib_lock.
417 		 */
418 		sib = ppa->ppa_streams;
419 		if (sib == sps) {
420 			ppa->ppa_streams = sps->sps_nextsib;
421 		} else {
422 			while (sib->sps_nextsib != NULL) {
423 				if (sib->sps_nextsib == sps) {
424 					sib->sps_nextsib = sps->sps_nextsib;
425 					break;
426 				}
427 				sib = sib->sps_nextsib;
428 			}
429 		}
430 		sps->sps_nextsib = NULL;
431 		freemsg(sps->sps_hangup);
432 		sps->sps_hangup = NULL;
433 		/*
434 		 * Check if this is a promiscous stream. If the SPS_PROMISC bit
435 		 * is still set, it means that the stream is closed without
436 		 * ever having issued DL_DETACH_REQ or DL_PROMISCOFF_REQ.
437 		 * In this case, we simply decrement the promiscous counter,
438 		 * and it's safe to do it without holding ppa_sib_lock since
439 		 * we're exclusive (inner and outer) at this point.
440 		 */
441 		if (IS_SPS_PROMISC(sps)) {
442 			ASSERT(ppa->ppa_promicnt > 0);
443 			ppa->ppa_promicnt--;
444 		}
445 	}
446 	/* If we're the only one left, then delete now. */
447 	if (ppa->ppa_refcnt <= 1)
448 		sppp_free_ppa(ppa);
449 	else
450 		ppa->ppa_refcnt--;
451 close_unattached:
452 	q->q_ptr = WR(q)->q_ptr = NULL;
453 	for (nextmn = &sps_list; *nextmn != NULL;
454 	    nextmn = &(*nextmn)->sps_nextmn) {
455 		if (*nextmn == sps) {
456 			*nextmn = sps->sps_nextmn;
457 			break;
458 		}
459 	}
460 	kmem_free(sps, sizeof (spppstr_t));
461 	return (0);
462 }
463 
464 static void
465 sppp_ioctl(struct queue *q, mblk_t *mp)
466 {
467 	spppstr_t	*sps;
468 	spppstr_t	*nextsib;
469 	sppa_t		*ppa;
470 	struct iocblk	*iop;
471 	mblk_t		*nmp;
472 	enum NPmode	npmode;
473 	struct ppp_idle	*pip;
474 	struct ppp_stats64 *psp;
475 	struct ppp_comp_stats *pcsp;
476 	hrtime_t	hrtime;
477 	int		sap;
478 	int		count = 0;
479 	int		error = EINVAL;
480 
481 	sps = (spppstr_t *)q->q_ptr;
482 	ppa = sps->sps_ppa;
483 
484 	iop = (struct iocblk *)mp->b_rptr;
485 	switch (iop->ioc_cmd) {
486 	case PPPIO_NPMODE:
487 		if (!IS_SPS_CONTROL(sps)) {
488 			break;		/* return EINVAL */
489 		} else if (iop->ioc_count != 2 * sizeof (uint32_t) ||
490 		    (mp->b_cont == NULL)) {
491 			error = EPROTO;
492 			break;
493 		}
494 		ASSERT(ppa != NULL);
495 		ASSERT(mp->b_cont->b_rptr != NULL);
496 		ASSERT(sps->sps_npmode == NPMODE_PASS);
497 		sap = ((uint32_t *)mp->b_cont->b_rptr)[0];
498 		npmode = (enum NPmode)((uint32_t *)mp->b_cont->b_rptr)[1];
499 		/*
500 		 * Walk the sibling streams which belong to the same
501 		 * ppa, and try to find a stream with matching sap
502 		 * number.
503 		 */
504 		rw_enter(&ppa->ppa_sib_lock, RW_WRITER);
505 		for (nextsib = ppa->ppa_streams; nextsib != NULL;
506 		    nextsib = nextsib->sps_nextsib) {
507 			if (nextsib->sps_sap == sap) {
508 				break;	/* found it */
509 			}
510 		}
511 		if (nextsib == NULL) {
512 			rw_exit(&ppa->ppa_sib_lock);
513 			break;		/* return EINVAL */
514 		} else {
515 			nextsib->sps_npmode = npmode;
516 			if ((nextsib->sps_npmode != NPMODE_QUEUE) &&
517 			    (WR(nextsib->sps_rq)->q_first != NULL)) {
518 				qenable(WR(nextsib->sps_rq));
519 			}
520 		}
521 		rw_exit(&ppa->ppa_sib_lock);
522 		error = 0;	/* return success */
523 		break;
524 	case PPPIO_GIDLE:
525 		if (ppa == NULL) {
526 			ASSERT(!IS_SPS_CONTROL(sps));
527 			error = ENOLINK;
528 			break;
529 		} else if (!IS_PPA_TIMESTAMP(ppa)) {
530 			break;		/* return EINVAL */
531 		}
532 		if ((nmp = allocb(sizeof (struct ppp_idle),
533 		    BPRI_MED)) == NULL) {
534 			mutex_enter(&ppa->ppa_sta_lock);
535 			ppa->ppa_allocbfail++;
536 			mutex_exit(&ppa->ppa_sta_lock);
537 			error = ENOSR;
538 			break;
539 		}
540 		if (mp->b_cont != NULL) {
541 			freemsg(mp->b_cont);
542 		}
543 		mp->b_cont = nmp;
544 		pip = (struct ppp_idle *)nmp->b_wptr;
545 		nmp->b_wptr += sizeof (struct ppp_idle);
546 		/*
547 		 * Get current timestamp and subtract the tx and rx
548 		 * timestamps to get the actual idle time to be
549 		 * returned.
550 		 */
551 		hrtime = gethrtime();
552 		pip->xmit_idle = (hrtime - ppa->ppa_lasttx) / 1000000000ul;
553 		pip->recv_idle = (hrtime - ppa->ppa_lastrx) / 1000000000ul;
554 		count = msgsize(nmp);
555 		error = 0;
556 		break;		/* return success (error is 0) */
557 	case PPPIO_GTYPE:
558 		nmp = allocb(sizeof (uint32_t), BPRI_MED);
559 		if (nmp == NULL) {
560 			error = ENOSR;
561 			break;
562 		}
563 		if (mp->b_cont != NULL) {
564 			freemsg(mp->b_cont);
565 		}
566 		mp->b_cont = nmp;
567 		/*
568 		 * Let the requestor know that we are the PPP
569 		 * multiplexer (PPPTYP_MUX).
570 		 */
571 		*(uint32_t *)nmp->b_wptr = PPPTYP_MUX;
572 		nmp->b_wptr += sizeof (uint32_t);
573 		count = msgsize(nmp);
574 		error = 0;		/* return success */
575 		break;
576 	case PPPIO_GETSTAT64:
577 		if (ppa == NULL) {
578 			break;		/* return EINVAL */
579 		} else if ((ppa->ppa_lower_wq != NULL) &&
580 		    !IS_PPA_LASTMOD(ppa)) {
581 			mutex_enter(&ppa->ppa_sta_lock);
582 			/*
583 			 * We match sps_ioc_id on the M_IOC{ACK,NAK},
584 			 * so if the response hasn't come back yet,
585 			 * new ioctls must be queued instead.
586 			 */
587 			if (IS_SPS_IOCQ(sps)) {
588 				mutex_exit(&ppa->ppa_sta_lock);
589 				if (!putq(q, mp)) {
590 					error = EAGAIN;
591 					break;
592 				}
593 				return;
594 			} else {
595 				ppa->ppa_ioctlsfwd++;
596 				/*
597 				 * Record the ioctl CMD & ID - this will be
598 				 * used to check the ACK or NAK responses
599 				 * coming from below.
600 				 */
601 				sps->sps_ioc_id = iop->ioc_id;
602 				sps->sps_flags |= SPS_IOCQ;
603 				mutex_exit(&ppa->ppa_sta_lock);
604 			}
605 			putnext(ppa->ppa_lower_wq, mp);
606 			return;	/* don't ack or nak the request */
607 		}
608 		nmp = allocb(sizeof (*psp), BPRI_MED);
609 		if (nmp == NULL) {
610 			mutex_enter(&ppa->ppa_sta_lock);
611 			ppa->ppa_allocbfail++;
612 			mutex_exit(&ppa->ppa_sta_lock);
613 			error = ENOSR;
614 			break;
615 		}
616 		if (mp->b_cont != NULL) {
617 			freemsg(mp->b_cont);
618 		}
619 		mp->b_cont = nmp;
620 		psp = (struct ppp_stats64 *)nmp->b_wptr;
621 		/*
622 		 * Copy the contents of ppp_stats64 structure for this
623 		 * ppa and return them to the caller.
624 		 */
625 		mutex_enter(&ppa->ppa_sta_lock);
626 		bcopy(&ppa->ppa_stats, psp, sizeof (*psp));
627 		mutex_exit(&ppa->ppa_sta_lock);
628 		nmp->b_wptr += sizeof (*psp);
629 		count = sizeof (*psp);
630 		error = 0;		/* return success */
631 		break;
632 	case PPPIO_GETCSTAT:
633 		if (ppa == NULL) {
634 			break;		/* return EINVAL */
635 		} else if ((ppa->ppa_lower_wq != NULL) &&
636 		    !IS_PPA_LASTMOD(ppa)) {
637 			mutex_enter(&ppa->ppa_sta_lock);
638 			/*
639 			 * See comments in PPPIO_GETSTAT64 case
640 			 * in sppp_ioctl().
641 			 */
642 			if (IS_SPS_IOCQ(sps)) {
643 				mutex_exit(&ppa->ppa_sta_lock);
644 				if (!putq(q, mp)) {
645 					error = EAGAIN;
646 					break;
647 				}
648 				return;
649 			} else {
650 				ppa->ppa_ioctlsfwd++;
651 				/*
652 				 * Record the ioctl CMD & ID - this will be
653 				 * used to check the ACK or NAK responses
654 				 * coming from below.
655 				 */
656 				sps->sps_ioc_id = iop->ioc_id;
657 				sps->sps_flags |= SPS_IOCQ;
658 				mutex_exit(&ppa->ppa_sta_lock);
659 			}
660 			putnext(ppa->ppa_lower_wq, mp);
661 			return;	/* don't ack or nak the request */
662 		}
663 		nmp = allocb(sizeof (struct ppp_comp_stats), BPRI_MED);
664 		if (nmp == NULL) {
665 			mutex_enter(&ppa->ppa_sta_lock);
666 			ppa->ppa_allocbfail++;
667 			mutex_exit(&ppa->ppa_sta_lock);
668 			error = ENOSR;
669 			break;
670 		}
671 		if (mp->b_cont != NULL) {
672 			freemsg(mp->b_cont);
673 		}
674 		mp->b_cont = nmp;
675 		pcsp = (struct ppp_comp_stats *)nmp->b_wptr;
676 		nmp->b_wptr += sizeof (struct ppp_comp_stats);
677 		bzero((caddr_t)pcsp, sizeof (struct ppp_comp_stats));
678 		count = msgsize(nmp);
679 		error = 0;		/* return success */
680 		break;
681 	}
682 
683 	if (error == 0) {
684 		/* Success; tell the user. */
685 		miocack(q, mp, count, 0);
686 	} else {
687 		/* Failure; send error back upstream. */
688 		miocnak(q, mp, 0, error);
689 	}
690 }
691 
692 /*
693  * sppp_uwput()
694  *
695  * MT-Perimeters:
696  *    shared inner, shared outer.
697  *
698  * Description:
699  *    Upper write-side put procedure. Messages from above arrive here.
700  */
701 int
702 sppp_uwput(queue_t *q, mblk_t *mp)
703 {
704 	queue_t		*nextq;
705 	spppstr_t	*sps;
706 	sppa_t		*ppa;
707 	struct iocblk	*iop;
708 	int		error;
709 
710 	ASSERT(q != NULL && q->q_ptr != NULL);
711 	ASSERT(mp != NULL && mp->b_rptr != NULL);
712 	sps = (spppstr_t *)q->q_ptr;
713 	ppa = sps->sps_ppa;
714 
715 	switch (MTYPE(mp)) {
716 	case M_PCPROTO:
717 	case M_PROTO:
718 		if (IS_SPS_CONTROL(sps)) {
719 			ASSERT(ppa != NULL);
720 			/*
721 			 * Intentionally change this to a high priority
722 			 * message so it doesn't get queued up. M_PROTO is
723 			 * specifically used for signalling between pppd and its
724 			 * kernel-level component(s), such as ppptun, so we
725 			 * make sure that it doesn't get queued up behind
726 			 * data messages.
727 			 */
728 			MTYPE(mp) = M_PCPROTO;
729 			if ((ppa->ppa_lower_wq != NULL) &&
730 			    canputnext(ppa->ppa_lower_wq)) {
731 				mutex_enter(&ppa->ppa_sta_lock);
732 				ppa->ppa_mctlsfwd++;
733 				mutex_exit(&ppa->ppa_sta_lock);
734 				putnext(ppa->ppa_lower_wq, mp);
735 			} else {
736 				mutex_enter(&ppa->ppa_sta_lock);
737 				ppa->ppa_mctlsfwderr++;
738 				mutex_exit(&ppa->ppa_sta_lock);
739 				freemsg(mp);
740 			}
741 		} else {
742 			(void) sppp_mproto(q, mp, sps);
743 			return (0);
744 		}
745 		break;
746 	case M_DATA:
747 		if ((nextq = sppp_send(q, &mp, sps)) != NULL)
748 			putnext(nextq, mp);
749 		break;
750 	case M_IOCTL:
751 		error = EINVAL;
752 		iop = (struct iocblk *)mp->b_rptr;
753 		switch (iop->ioc_cmd) {
754 		case DLIOCRAW:
755 		case DL_IOC_HDR_INFO:
756 		case PPPIO_ATTACH:
757 		case PPPIO_DEBUG:
758 		case PPPIO_DETACH:
759 		case PPPIO_LASTMOD:
760 		case PPPIO_MRU:
761 		case PPPIO_MTU:
762 		case PPPIO_USETIMESTAMP:
763 		case PPPIO_BLOCKNP:
764 		case PPPIO_UNBLOCKNP:
765 			qwriter(q, mp, sppp_inner_ioctl, PERIM_INNER);
766 			return (0);
767 		case I_LINK:
768 		case I_UNLINK:
769 		case PPPIO_NEWPPA:
770 			qwriter(q, mp, sppp_outer_ioctl, PERIM_OUTER);
771 			return (0);
772 		case PPPIO_NPMODE:
773 		case PPPIO_GIDLE:
774 		case PPPIO_GTYPE:
775 		case PPPIO_GETSTAT64:
776 		case PPPIO_GETCSTAT:
777 			/*
778 			 * These require additional auto variables to
779 			 * handle, so (for optimization reasons)
780 			 * they're moved off to a separate function.
781 			 */
782 			sppp_ioctl(q, mp);
783 			return (0);
784 		case PPPIO_GETSTAT:
785 			break;			/* 32 bit interface gone */
786 		default:
787 			if (iop->ioc_cr == NULL ||
788 			    secpolicy_ppp_config(iop->ioc_cr) != 0) {
789 				error = EPERM;
790 				break;
791 			} else if ((ppa == NULL) ||
792 			    (ppa->ppa_lower_wq == NULL)) {
793 				break;		/* return EINVAL */
794 			}
795 			mutex_enter(&ppa->ppa_sta_lock);
796 			/*
797 			 * See comments in PPPIO_GETSTAT64 case
798 			 * in sppp_ioctl().
799 			 */
800 			if (IS_SPS_IOCQ(sps)) {
801 				mutex_exit(&ppa->ppa_sta_lock);
802 				if (!putq(q, mp)) {
803 					error = EAGAIN;
804 					break;
805 				}
806 				return (0);
807 			} else {
808 				ppa->ppa_ioctlsfwd++;
809 				/*
810 				 * Record the ioctl CMD & ID -
811 				 * this will be used to check the
812 				 * ACK or NAK responses coming from below.
813 				 */
814 				sps->sps_ioc_id = iop->ioc_id;
815 				sps->sps_flags |= SPS_IOCQ;
816 				mutex_exit(&ppa->ppa_sta_lock);
817 			}
818 			putnext(ppa->ppa_lower_wq, mp);
819 			return (0);	/* don't ack or nak the request */
820 		}
821 		/* Failure; send error back upstream. */
822 		miocnak(q, mp, 0, error);
823 		break;
824 	case M_FLUSH:
825 		if (*mp->b_rptr & FLUSHW) {
826 			flushq(q, FLUSHDATA);
827 		}
828 		if (*mp->b_rptr & FLUSHR) {
829 			*mp->b_rptr &= ~FLUSHW;
830 			qreply(q, mp);
831 		} else {
832 			freemsg(mp);
833 		}
834 		break;
835 	default:
836 		freemsg(mp);
837 		break;
838 	}
839 	return (0);
840 }
841 
842 /*
843  * sppp_uwsrv()
844  *
845  * MT-Perimeters:
846  *    exclusive inner, shared outer.
847  *
848  * Description:
849  *    Upper write-side service procedure. Note that this procedure does
850  *    not get called when a message is placed on our write-side queue, since
851  *    automatic queue scheduling has been turned off by noenable() when
852  *    the queue was opened. We do this on purpose, as we explicitly control
853  *    the write-side queue. Therefore, this procedure gets called when
854  *    the lower write service procedure qenable() the upper write stream queue.
855  */
856 int
857 sppp_uwsrv(queue_t *q)
858 {
859 	spppstr_t	*sps;
860 	sppa_t		*ppa;
861 	mblk_t		*mp;
862 	queue_t		*nextq;
863 	struct iocblk	*iop;
864 
865 	ASSERT(q != NULL && q->q_ptr != NULL);
866 	sps = (spppstr_t *)q->q_ptr;
867 
868 	while ((mp = getq(q)) != NULL) {
869 		if (MTYPE(mp) == M_IOCTL) {
870 			ppa = sps->sps_ppa;
871 			if ((ppa == NULL) || (ppa->ppa_lower_wq == NULL)) {
872 				miocnak(q, mp, 0, EINVAL);
873 				continue;
874 			}
875 
876 			iop = (struct iocblk *)mp->b_rptr;
877 			mutex_enter(&ppa->ppa_sta_lock);
878 			/*
879 			 * See comments in PPPIO_GETSTAT64 case
880 			 * in sppp_ioctl().
881 			 */
882 			if (IS_SPS_IOCQ(sps)) {
883 				mutex_exit(&ppa->ppa_sta_lock);
884 				if (putbq(q, mp) == 0)
885 					miocnak(q, mp, 0, EAGAIN);
886 				break;
887 			} else {
888 				ppa->ppa_ioctlsfwd++;
889 				sps->sps_ioc_id = iop->ioc_id;
890 				sps->sps_flags |= SPS_IOCQ;
891 				mutex_exit(&ppa->ppa_sta_lock);
892 				putnext(ppa->ppa_lower_wq, mp);
893 			}
894 		} else if ((nextq =
895 		    sppp_outpkt(q, &mp, msgdsize(mp), sps)) == NULL) {
896 			if (mp != NULL) {
897 				if (putbq(q, mp) == 0)
898 					freemsg(mp);
899 				break;
900 			}
901 		} else {
902 			putnext(nextq, mp);
903 		}
904 	}
905 	return (0);
906 }
907 
908 void
909 sppp_remove_ppa(spppstr_t *sps)
910 {
911 	spppstr_t *nextsib;
912 	sppa_t *ppa = sps->sps_ppa;
913 
914 	rw_enter(&ppa->ppa_sib_lock, RW_WRITER);
915 	if (ppa->ppa_refcnt <= 1) {
916 		rw_exit(&ppa->ppa_sib_lock);
917 		sppp_free_ppa(ppa);
918 	} else {
919 		nextsib = ppa->ppa_streams;
920 		if (nextsib == sps) {
921 			ppa->ppa_streams = sps->sps_nextsib;
922 		} else {
923 			while (nextsib->sps_nextsib != NULL) {
924 				if (nextsib->sps_nextsib == sps) {
925 					nextsib->sps_nextsib =
926 					    sps->sps_nextsib;
927 					break;
928 				}
929 				nextsib = nextsib->sps_nextsib;
930 			}
931 		}
932 		ppa->ppa_refcnt--;
933 		/*
934 		 * And if this stream was marked as promiscuous
935 		 * (SPS_PROMISC), then we need to update the
936 		 * promiscuous streams count. This should only happen
937 		 * when DL_DETACH_REQ is issued prior to marking the
938 		 * stream as non-promiscuous, through
939 		 * DL_PROMISCOFF_REQ request.
940 		 */
941 		if (IS_SPS_PROMISC(sps)) {
942 			ASSERT(ppa->ppa_promicnt > 0);
943 			ppa->ppa_promicnt--;
944 		}
945 		rw_exit(&ppa->ppa_sib_lock);
946 	}
947 	sps->sps_nextsib = NULL;
948 	sps->sps_ppa = NULL;
949 	freemsg(sps->sps_hangup);
950 	sps->sps_hangup = NULL;
951 }
952 
953 sppa_t *
954 sppp_find_ppa(uint32_t ppa_id)
955 {
956 	sppa_t *ppa;
957 
958 	for (ppa = ppa_list; ppa != NULL; ppa = ppa->ppa_nextppa) {
959 		if (ppa->ppa_ppa_id == ppa_id) {
960 			break;	/* found the ppa */
961 		}
962 	}
963 	return (ppa);
964 }
965 
966 /*
967  * sppp_inner_ioctl()
968  *
969  * MT-Perimeters:
970  *    exclusive inner, shared outer
971  *
972  * Description:
973  *    Called by sppp_uwput as a result of receiving ioctls which require
974  *    an exclusive access at the inner perimeter.
975  */
976 static void
977 sppp_inner_ioctl(queue_t *q, mblk_t *mp)
978 {
979 	spppstr_t	*sps;
980 	sppa_t		*ppa;
981 	struct iocblk	*iop;
982 	mblk_t		*nmp;
983 	int		error = EINVAL;
984 	int		count = 0;
985 	int		dbgcmd;
986 	int		mru, mtu;
987 	uint32_t	ppa_id;
988 	hrtime_t	hrtime;
989 	uint16_t	proto;
990 
991 	ASSERT(q != NULL && q->q_ptr != NULL);
992 	ASSERT(mp != NULL && mp->b_rptr != NULL);
993 
994 	sps = (spppstr_t *)q->q_ptr;
995 	ppa = sps->sps_ppa;
996 	iop = (struct iocblk *)mp->b_rptr;
997 	switch (iop->ioc_cmd) {
998 	case DLIOCRAW:
999 		if (IS_SPS_CONTROL(sps)) {
1000 			break;		/* return EINVAL */
1001 		}
1002 		sps->sps_flags |= SPS_RAWDATA;
1003 		error = 0;		/* return success */
1004 		break;
1005 	case DL_IOC_HDR_INFO:
1006 		if (IS_SPS_CONTROL(sps)) {
1007 			break;		/* return EINVAL */
1008 		} else if ((mp->b_cont == NULL) ||
1009 		    *((t_uscalar_t *)mp->b_cont->b_rptr) != DL_UNITDATA_REQ ||
1010 		    (MBLKL(mp->b_cont) < (sizeof (dl_unitdata_req_t) +
1011 		    SPPP_ADDRL))) {
1012 			error = EPROTO;
1013 			break;
1014 		} else if (ppa == NULL) {
1015 			error = ENOLINK;
1016 			break;
1017 		}
1018 		if ((nmp = allocb(PPP_HDRLEN, BPRI_MED)) == NULL) {
1019 			mutex_enter(&ppa->ppa_sta_lock);
1020 			ppa->ppa_allocbfail++;
1021 			mutex_exit(&ppa->ppa_sta_lock);
1022 			error = ENOMEM;
1023 			break;
1024 		}
1025 		*(uchar_t *)nmp->b_wptr++ = PPP_ALLSTATIONS;
1026 		*(uchar_t *)nmp->b_wptr++ = PPP_UI;
1027 		*(uchar_t *)nmp->b_wptr++ = sps->sps_sap >> 8;
1028 		*(uchar_t *)nmp->b_wptr++ = sps->sps_sap & 0xff;
1029 		ASSERT(MBLKL(nmp) == PPP_HDRLEN);
1030 
1031 		linkb(mp, nmp);
1032 		sps->sps_flags |= SPS_FASTPATH;
1033 		error = 0;		/* return success */
1034 		count = msgsize(nmp);
1035 		break;
1036 	case PPPIO_ATTACH:
1037 		if (IS_SPS_CONTROL(sps) || IS_SPS_PIOATTACH(sps) ||
1038 		    (sps->sps_dlstate != DL_UNATTACHED) ||
1039 		    (iop->ioc_count != sizeof (uint32_t))) {
1040 			break;		/* return EINVAL */
1041 		} else if (mp->b_cont == NULL) {
1042 			error = EPROTO;
1043 			break;
1044 		}
1045 		ASSERT(mp->b_cont->b_rptr != NULL);
1046 		/* If there's something here, it's detached. */
1047 		if (ppa != NULL) {
1048 			sppp_remove_ppa(sps);
1049 		}
1050 		ppa_id = *(uint32_t *)mp->b_cont->b_rptr;
1051 		ppa = sppp_find_ppa(ppa_id);
1052 		/*
1053 		 * If we can't find it, then it's either because the requestor
1054 		 * has supplied a wrong ppa_id to be attached to, or because
1055 		 * the control stream for the specified ppa_id has been closed
1056 		 * before we get here.
1057 		 */
1058 		if (ppa == NULL) {
1059 			error = ENOENT;
1060 			break;
1061 		}
1062 		if (iop->ioc_cr == NULL ||
1063 		    ppa->ppa_zoneid != crgetzoneid(iop->ioc_cr)) {
1064 			error = EPERM;
1065 			break;
1066 		}
1067 		/*
1068 		 * Preallocate the hangup message so that we're always
1069 		 * able to send this upstream in the event of a
1070 		 * catastrophic failure.
1071 		 */
1072 		if ((sps->sps_hangup = allocb(1, BPRI_MED)) == NULL) {
1073 			error = ENOSR;
1074 			break;
1075 		}
1076 		/*
1077 		 * There are two ways to attach a stream to a ppa: one is
1078 		 * through DLPI (DL_ATTACH_REQ) and the other is through
1079 		 * PPPIO_ATTACH. This is why we need to distinguish whether or
1080 		 * not a stream was allocated via PPPIO_ATTACH, so that we can
1081 		 * properly detach it when we receive PPPIO_DETACH ioctl
1082 		 * request.
1083 		 */
1084 		sps->sps_flags |= SPS_PIOATTACH;
1085 		sps->sps_ppa = ppa;
1086 		/*
1087 		 * Add this stream to the head of the list of sibling streams
1088 		 * which belong to the same ppa as specified.
1089 		 */
1090 		rw_enter(&ppa->ppa_sib_lock, RW_WRITER);
1091 		ppa->ppa_refcnt++;
1092 		sps->sps_nextsib = ppa->ppa_streams;
1093 		ppa->ppa_streams = sps;
1094 		rw_exit(&ppa->ppa_sib_lock);
1095 		error = 0;		/* return success */
1096 		break;
1097 	case PPPIO_BLOCKNP:
1098 	case PPPIO_UNBLOCKNP:
1099 		if (iop->ioc_cr == NULL ||
1100 		    secpolicy_ppp_config(iop->ioc_cr) != 0) {
1101 			error = EPERM;
1102 			break;
1103 		}
1104 		error = miocpullup(mp, sizeof (uint16_t));
1105 		if (error != 0)
1106 			break;
1107 		ASSERT(mp->b_cont->b_rptr != NULL);
1108 		proto = *(uint16_t *)mp->b_cont->b_rptr;
1109 		if (iop->ioc_cmd == PPPIO_BLOCKNP) {
1110 			uint32_t npflagpos = sppp_ppp2np(proto);
1111 			/*
1112 			 * Mark proto as blocked in ppa_npflag until the
1113 			 * corresponding queues for proto have been plumbed.
1114 			 */
1115 			if (npflagpos != 0) {
1116 				mutex_enter(&ppa->ppa_npmutex);
1117 				ppa->ppa_npflag |= (1 << npflagpos);
1118 				mutex_exit(&ppa->ppa_npmutex);
1119 			} else {
1120 				error = EINVAL;
1121 			}
1122 		} else {
1123 			/*
1124 			 * reset ppa_npflag and release proto
1125 			 * packets that were being held in control queue.
1126 			 */
1127 			sppp_release_pkts(ppa, proto);
1128 		}
1129 		break;
1130 	case PPPIO_DEBUG:
1131 		if (iop->ioc_cr == NULL ||
1132 		    secpolicy_ppp_config(iop->ioc_cr) != 0) {
1133 			error = EPERM;
1134 			break;
1135 		} else if (iop->ioc_count != sizeof (uint32_t)) {
1136 			break;		/* return EINVAL */
1137 		} else if (mp->b_cont == NULL) {
1138 			error = EPROTO;
1139 			break;
1140 		}
1141 		ASSERT(mp->b_cont->b_rptr != NULL);
1142 		dbgcmd = *(uint32_t *)mp->b_cont->b_rptr;
1143 		/*
1144 		 * We accept PPPDBG_LOG + PPPDBG_DRIVER value as an indication
1145 		 * that SPS_KDEBUG needs to be enabled for this upper stream.
1146 		 */
1147 		if (dbgcmd == PPPDBG_LOG + PPPDBG_DRIVER) {
1148 			sps->sps_flags |= SPS_KDEBUG;
1149 			error = 0;	/* return success */
1150 			break;
1151 		}
1152 		/*
1153 		 * Otherwise, for any other values, we send them down only if
1154 		 * there is an attachment and if the attachment has something
1155 		 * linked underneath it.
1156 		 */
1157 		if ((ppa == NULL) || (ppa->ppa_lower_wq == NULL)) {
1158 			error = ENOLINK;
1159 			break;
1160 		}
1161 		mutex_enter(&ppa->ppa_sta_lock);
1162 		/*
1163 		 * See comments in PPPIO_GETSTAT64 case
1164 		 * in sppp_ioctl().
1165 		 */
1166 		if (IS_SPS_IOCQ(sps)) {
1167 			mutex_exit(&ppa->ppa_sta_lock);
1168 			if (!putq(q, mp)) {
1169 				error = EAGAIN;
1170 				break;
1171 			}
1172 			return;
1173 		} else {
1174 			ppa->ppa_ioctlsfwd++;
1175 			/*
1176 			 * Record the ioctl CMD & ID -
1177 			 * this will be used to check the
1178 			 * ACK or NAK responses coming from below.
1179 			 */
1180 			sps->sps_ioc_id = iop->ioc_id;
1181 			sps->sps_flags |= SPS_IOCQ;
1182 			mutex_exit(&ppa->ppa_sta_lock);
1183 		}
1184 		putnext(ppa->ppa_lower_wq, mp);
1185 		return;			/* don't ack or nak the request */
1186 	case PPPIO_DETACH:
1187 		if (!IS_SPS_PIOATTACH(sps)) {
1188 			break;		/* return EINVAL */
1189 		}
1190 		/*
1191 		 * The SPS_PIOATTACH flag set on the stream tells us that
1192 		 * the ppa field is still valid. In the event that the control
1193 		 * stream be closed prior to this stream's detachment, the
1194 		 * SPS_PIOATTACH flag would have been cleared from this stream
1195 		 * during close; in that case we won't get here.
1196 		 */
1197 		ASSERT(ppa != NULL);
1198 		ASSERT(ppa->ppa_ctl != sps);
1199 		ASSERT(sps->sps_dlstate == DL_UNATTACHED);
1200 
1201 		/*
1202 		 * We don't actually detach anything until the stream is
1203 		 * closed or reattached.
1204 		 */
1205 
1206 		sps->sps_flags &= ~SPS_PIOATTACH;
1207 		error = 0;		/* return success */
1208 		break;
1209 	case PPPIO_LASTMOD:
1210 		if (!IS_SPS_CONTROL(sps)) {
1211 			break;		/* return EINVAL */
1212 		}
1213 		ASSERT(ppa != NULL);
1214 		ppa->ppa_flags |= PPA_LASTMOD;
1215 		error = 0;		/* return success */
1216 		break;
1217 	case PPPIO_MRU:
1218 		if (!IS_SPS_CONTROL(sps) ||
1219 		    (iop->ioc_count != sizeof (uint32_t))) {
1220 			break;		/* return EINVAL */
1221 		} else if (mp->b_cont == NULL) {
1222 			error = EPROTO;
1223 			break;
1224 		}
1225 		ASSERT(ppa != NULL);
1226 		ASSERT(mp->b_cont->b_rptr != NULL);
1227 		mru = *(uint32_t *)mp->b_cont->b_rptr;
1228 		if ((mru <= 0) || (mru > PPP_MAXMRU)) {
1229 			error = EPROTO;
1230 			break;
1231 		}
1232 		if (mru < PPP_MRU) {
1233 			mru = PPP_MRU;
1234 		}
1235 		ppa->ppa_mru = (uint16_t)mru;
1236 		/*
1237 		 * If there's something beneath this driver for the ppa, then
1238 		 * inform it (or them) of the MRU size. Only do this is we
1239 		 * are not the last PPP module on the stream.
1240 		 */
1241 		if (!IS_PPA_LASTMOD(ppa) && (ppa->ppa_lower_wq != NULL)) {
1242 			(void) putctl4(ppa->ppa_lower_wq, M_CTL, PPPCTL_MRU,
1243 			    mru);
1244 		}
1245 		error = 0;		/* return success */
1246 		break;
1247 	case PPPIO_MTU:
1248 		if (!IS_SPS_CONTROL(sps) ||
1249 		    (iop->ioc_count != sizeof (uint32_t))) {
1250 			break;		/* return EINVAL */
1251 		} else if (mp->b_cont == NULL) {
1252 			error = EPROTO;
1253 			break;
1254 		}
1255 		ASSERT(ppa != NULL);
1256 		ASSERT(mp->b_cont->b_rptr != NULL);
1257 		mtu = *(uint32_t *)mp->b_cont->b_rptr;
1258 		if ((mtu <= 0) || (mtu > PPP_MAXMTU)) {
1259 			error = EPROTO;
1260 			break;
1261 		}
1262 		ppa->ppa_mtu = (uint16_t)mtu;
1263 		/*
1264 		 * If there's something beneath this driver for the ppa, then
1265 		 * inform it (or them) of the MTU size. Only do this if we
1266 		 * are not the last PPP module on the stream.
1267 		 */
1268 		if (!IS_PPA_LASTMOD(ppa) && (ppa->ppa_lower_wq != NULL)) {
1269 			(void) putctl4(ppa->ppa_lower_wq, M_CTL, PPPCTL_MTU,
1270 			    mtu);
1271 		}
1272 		error = 0;		/* return success */
1273 		break;
1274 	case PPPIO_USETIMESTAMP:
1275 		if (!IS_SPS_CONTROL(sps)) {
1276 			break;		/* return EINVAL */
1277 		}
1278 		if (!IS_PPA_TIMESTAMP(ppa)) {
1279 			hrtime = gethrtime();
1280 			ppa->ppa_lasttx = ppa->ppa_lastrx = hrtime;
1281 			ppa->ppa_flags |= PPA_TIMESTAMP;
1282 		}
1283 		error = 0;
1284 		break;
1285 	}
1286 
1287 	if (error == 0) {
1288 		/* Success; tell the user */
1289 		miocack(q, mp, count, 0);
1290 	} else {
1291 		/* Failure; send error back upstream */
1292 		miocnak(q, mp, 0, error);
1293 	}
1294 }
1295 
1296 /*
1297  * sppp_outer_ioctl()
1298  *
1299  * MT-Perimeters:
1300  *    exclusive inner, exclusive outer
1301  *
1302  * Description:
1303  *    Called by sppp_uwput as a result of receiving ioctls which require
1304  *    an exclusive access at the outer perimeter.
1305  */
1306 static void
1307 sppp_outer_ioctl(queue_t *q, mblk_t *mp)
1308 {
1309 	spppstr_t	*sps = q->q_ptr;
1310 	spppstr_t	*nextsib;
1311 	queue_t		*lwq;
1312 	sppa_t		*ppa;
1313 	struct iocblk	*iop;
1314 	int		error = EINVAL;
1315 	int		count = 0;
1316 	uint32_t	ppa_id;
1317 	mblk_t		*nmp;
1318 	zoneid_t	zoneid;
1319 
1320 	sps = (spppstr_t *)q->q_ptr;
1321 	ppa = sps->sps_ppa;
1322 	iop = (struct iocblk *)mp->b_rptr;
1323 	switch (iop->ioc_cmd) {
1324 	case I_LINK:
1325 		if (!IS_SPS_CONTROL(sps)) {
1326 			break;		/* return EINVAL */
1327 		} else if (ppa->ppa_lower_wq != NULL) {
1328 			error = EEXIST;
1329 			break;
1330 		}
1331 		ASSERT(ppa->ppa_ctl != NULL);
1332 		ASSERT(sps->sps_npmode == NPMODE_PASS);
1333 		ASSERT(mp->b_cont != NULL && mp->b_cont->b_rptr != NULL);
1334 
1335 		lwq = ((struct linkblk *)mp->b_cont->b_rptr)->l_qbot;
1336 		ASSERT(lwq != NULL);
1337 
1338 		ppa->ppa_lower_wq = lwq;
1339 		lwq->q_ptr = RD(lwq)->q_ptr = (caddr_t)ppa;
1340 		/*
1341 		 * Unblock upper network streams which now feed this lower
1342 		 * stream. We don't need to hold ppa_sib_lock here, since we
1343 		 * are writer at the outer perimeter.
1344 		 */
1345 		if (WR(sps->sps_rq)->q_first != NULL)
1346 			qenable(WR(sps->sps_rq));
1347 		for (nextsib = ppa->ppa_streams; nextsib != NULL;
1348 		    nextsib = nextsib->sps_nextsib) {
1349 			nextsib->sps_npmode = NPMODE_PASS;
1350 			if (WR(nextsib->sps_rq)->q_first != NULL) {
1351 				qenable(WR(nextsib->sps_rq));
1352 			}
1353 		}
1354 
1355 		/*
1356 		 * Also unblock (run once) our lower read-side queue.  This is
1357 		 * where packets received while doing the I_LINK may be
1358 		 * languishing; see sppp_lrsrv.
1359 		 */
1360 		qenable(RD(lwq));
1361 
1362 		/*
1363 		 * Send useful information down to the modules which are now
1364 		 * linked below this driver (for this particular ppa). Only
1365 		 * do this if we are not the last PPP module on the stream.
1366 		 */
1367 		if (!IS_PPA_LASTMOD(ppa)) {
1368 			(void) putctl8(lwq, M_CTL, PPPCTL_UNIT,
1369 			    ppa->ppa_ppa_id);
1370 			(void) putctl4(lwq, M_CTL, PPPCTL_MRU, ppa->ppa_mru);
1371 			(void) putctl4(lwq, M_CTL, PPPCTL_MTU, ppa->ppa_mtu);
1372 		}
1373 
1374 		if (IS_SPS_KDEBUG(sps)) {
1375 			SPDEBUG(PPP_DRV_NAME
1376 			    "/%d: I_LINK lwq=0x%p sps=0x%p flags=0x%b ppa=0x%p "
1377 			    "flags=0x%b\n", sps->sps_mn_id,
1378 			    (void *)ppa->ppa_lower_wq, (void *)sps,
1379 			    sps->sps_flags, SPS_FLAGS_STR,
1380 			    (void *)ppa, ppa->ppa_flags,
1381 			    PPA_FLAGS_STR);
1382 		}
1383 		error = 0;		/* return success */
1384 		break;
1385 	case I_UNLINK:
1386 		ASSERT(IS_SPS_CONTROL(sps));
1387 		ASSERT(ppa != NULL);
1388 		lwq = ppa->ppa_lower_wq;
1389 		ASSERT(mp->b_cont != NULL && mp->b_cont->b_rptr != NULL);
1390 		ASSERT(lwq == ((struct linkblk *)mp->b_cont->b_rptr)->l_qbot);
1391 
1392 		if (IS_SPS_KDEBUG(sps)) {
1393 			SPDEBUG(PPP_DRV_NAME
1394 			    "/%d: I_UNLINK lwq=0x%p sps=0x%p flags=0x%b "
1395 			    "ppa=0x%p flags=0x%b\n", sps->sps_mn_id,
1396 			    (void *)lwq, (void *)sps, sps->sps_flags,
1397 			    SPS_FLAGS_STR, (void *)ppa, ppa->ppa_flags,
1398 			    PPA_FLAGS_STR);
1399 		}
1400 		/*
1401 		 * While accessing the outer perimeter exclusively, we
1402 		 * disassociate our ppa's lower_wq from the lower stream linked
1403 		 * beneath us, and we also disassociate our control stream from
1404 		 * the q_ptr of the lower stream.
1405 		 */
1406 		lwq->q_ptr = RD(lwq)->q_ptr = NULL;
1407 		ppa->ppa_lower_wq = NULL;
1408 		/*
1409 		 * Unblock streams which now feed back up the control stream,
1410 		 * and acknowledge the request. We don't need to hold
1411 		 * ppa_sib_lock here, since we are writer at the outer
1412 		 * perimeter.
1413 		 */
1414 		if (WR(sps->sps_rq)->q_first != NULL)
1415 			qenable(WR(sps->sps_rq));
1416 		for (nextsib = ppa->ppa_streams; nextsib != NULL;
1417 		    nextsib = nextsib->sps_nextsib) {
1418 			if (WR(nextsib->sps_rq)->q_first != NULL) {
1419 				qenable(WR(nextsib->sps_rq));
1420 			}
1421 		}
1422 		error = 0;		/* return success */
1423 		break;
1424 	case PPPIO_NEWPPA:
1425 		/*
1426 		 * Do sanity check to ensure that we don't accept PPPIO_NEWPPA
1427 		 * on a stream which DLPI is used (since certain DLPI messages
1428 		 * will cause state transition reflected in sps_dlstate,
1429 		 * changing it from its default DL_UNATTACHED value). In other
1430 		 * words, we won't allow a network/snoop stream to become
1431 		 * a control stream.
1432 		 */
1433 		if (iop->ioc_cr == NULL ||
1434 		    secpolicy_ppp_config(iop->ioc_cr) != 0) {
1435 			error = EPERM;
1436 			break;
1437 		} else if (IS_SPS_CONTROL(sps) || IS_SPS_PIOATTACH(sps) ||
1438 		    (ppa != NULL) || (sps->sps_dlstate != DL_UNATTACHED)) {
1439 			break;		/* return EINVAL */
1440 		}
1441 		/* Get requested unit number (if any) */
1442 		if (iop->ioc_count == sizeof (uint32_t) && mp->b_cont != NULL)
1443 			ppa_id = *(uint32_t *)mp->b_cont->b_rptr;
1444 		else
1445 			ppa_id = 0;
1446 		/* Get mblk to use for response message */
1447 		nmp = allocb(sizeof (uint32_t), BPRI_MED);
1448 		if (nmp == NULL) {
1449 			error = ENOSR;
1450 			break;
1451 		}
1452 		if (mp->b_cont != NULL) {
1453 			freemsg(mp->b_cont);
1454 		}
1455 		mp->b_cont = nmp;		/* chain our response mblk */
1456 		/*
1457 		 * Walk the global ppa list and determine the lowest
1458 		 * available ppa_id number to be used.
1459 		 */
1460 		if (ppa_id == (uint32_t)-1)
1461 			ppa_id = 0;
1462 		zoneid = crgetzoneid(iop->ioc_cr);
1463 		for (ppa = ppa_list; ppa != NULL; ppa = ppa->ppa_nextppa) {
1464 			if (ppa_id == (uint32_t)-2) {
1465 				if (ppa->ppa_ctl == NULL &&
1466 				    ppa->ppa_zoneid == zoneid)
1467 					break;
1468 			} else {
1469 				if (ppa_id < ppa->ppa_ppa_id)
1470 					break;
1471 				if (ppa_id == ppa->ppa_ppa_id)
1472 					++ppa_id;
1473 			}
1474 		}
1475 		if (ppa_id == (uint32_t)-2) {
1476 			if (ppa == NULL) {
1477 				error = ENXIO;
1478 				break;
1479 			}
1480 			/* Clear timestamp and lastmod flags */
1481 			ppa->ppa_flags = 0;
1482 		} else {
1483 			ppa = sppp_create_ppa(ppa_id, zoneid);
1484 			if (ppa == NULL) {
1485 				error = ENOMEM;
1486 				break;
1487 			}
1488 		}
1489 
1490 		sps->sps_ppa = ppa;		/* chain the ppa structure */
1491 		sps->sps_npmode = NPMODE_PASS;	/* network packets may travel */
1492 		sps->sps_flags |= SPS_CONTROL;	/* this is the control stream */
1493 
1494 		ppa->ppa_refcnt++;		/* new PPA reference */
1495 		ppa->ppa_ctl = sps;		/* back ptr to upper stream */
1496 		/*
1497 		 * Return the newly created ppa_id to the requestor and
1498 		 * acnowledge the request.
1499 		 */
1500 		*(uint32_t *)nmp->b_wptr = ppa->ppa_ppa_id;
1501 		nmp->b_wptr += sizeof (uint32_t);
1502 
1503 		if (IS_SPS_KDEBUG(sps)) {
1504 			SPDEBUG(PPP_DRV_NAME
1505 			    "/%d: PPPIO_NEWPPA ppa_id=%d sps=0x%p flags=0x%b "
1506 			    "ppa=0x%p flags=0x%b\n", sps->sps_mn_id, ppa_id,
1507 			    (void *)sps, sps->sps_flags, SPS_FLAGS_STR,
1508 			    (void *)ppa, ppa->ppa_flags,
1509 			    PPA_FLAGS_STR);
1510 		}
1511 		count = msgsize(nmp);
1512 		error = 0;
1513 		break;
1514 	}
1515 
1516 	if (error == 0) {
1517 		/* Success; tell the user. */
1518 		miocack(q, mp, count, 0);
1519 	} else {
1520 		/* Failure; send error back upstream. */
1521 		miocnak(q, mp, 0, error);
1522 	}
1523 }
1524 
1525 /*
1526  * sppp_send()
1527  *
1528  * MT-Perimeters:
1529  *    shared inner, shared outer.
1530  *
1531  * Description:
1532  *    Called by sppp_uwput to handle M_DATA message type.  Returns
1533  *    queue_t for putnext, or NULL to mean that the packet was
1534  *    handled internally.
1535  */
1536 static queue_t *
1537 sppp_send(queue_t *q, mblk_t **mpp, spppstr_t *sps)
1538 {
1539 	mblk_t	*mp;
1540 	sppa_t	*ppa;
1541 	int	is_promisc;
1542 	int	msize;
1543 	int	error = 0;
1544 	queue_t	*nextq;
1545 
1546 	ASSERT(mpp != NULL);
1547 	mp = *mpp;
1548 	ASSERT(q != NULL && q->q_ptr != NULL);
1549 	ASSERT(mp != NULL && mp->b_rptr != NULL);
1550 	ASSERT(sps != NULL);
1551 	ASSERT(q->q_ptr == sps);
1552 	/*
1553 	 * We only let M_DATA through if the sender is either the control
1554 	 * stream (for PPP control packets) or one of the network streams
1555 	 * (for IP packets) in IP fastpath mode. If this stream is not attached
1556 	 * to any ppas, then discard data coming down through this stream.
1557 	 */
1558 	ppa = sps->sps_ppa;
1559 	if (ppa == NULL) {
1560 		ASSERT(!IS_SPS_CONTROL(sps));
1561 		error = ENOLINK;
1562 	} else if (!IS_SPS_CONTROL(sps) && !IS_SPS_FASTPATH(sps)) {
1563 		error = EPROTO;
1564 	}
1565 	if (error != 0) {
1566 		merror(q, mp, error);
1567 		return (NULL);
1568 	}
1569 	msize = msgdsize(mp);
1570 	if (msize > (ppa->ppa_mtu + PPP_HDRLEN)) {
1571 		/* Log, and send it anyway */
1572 		mutex_enter(&ppa->ppa_sta_lock);
1573 		ppa->ppa_otoolongs++;
1574 		mutex_exit(&ppa->ppa_sta_lock);
1575 	} else if (msize < PPP_HDRLEN) {
1576 		/*
1577 		 * Log, and send it anyway. We log it because we get things
1578 		 * in M_DATA form here, which tells us that the sender is
1579 		 * either IP in fastpath transmission mode, or pppd. In both
1580 		 * cases, they are currently expected to send the 4-bytes
1581 		 * PPP header in front of any possible payloads.
1582 		 */
1583 		mutex_enter(&ppa->ppa_sta_lock);
1584 		ppa->ppa_orunts++;
1585 		mutex_exit(&ppa->ppa_sta_lock);
1586 	}
1587 
1588 	if (IS_SPS_KDEBUG(sps)) {
1589 		SPDEBUG(PPP_DRV_NAME
1590 		    "/%d: M_DATA send (%d bytes) sps=0x%p flags=0x%b "
1591 		    "ppa=0x%p flags=0x%b\n", sps->sps_mn_id, msize,
1592 		    (void *)sps, sps->sps_flags, SPS_FLAGS_STR,
1593 		    (void *)ppa, ppa->ppa_flags, PPA_FLAGS_STR);
1594 	}
1595 	/*
1596 	 * Should there be any promiscuous stream(s), send the data up
1597 	 * for each promiscuous stream that we recognize. Make sure that
1598 	 * for fastpath, we skip the PPP header in the M_DATA mblk. We skip
1599 	 * the control stream as we obviously never allow the control stream
1600 	 * to become promiscous and bind to PPP_ALLSAP.
1601 	 */
1602 	rw_enter(&ppa->ppa_sib_lock, RW_READER);
1603 	is_promisc = sps->sps_ppa->ppa_promicnt;
1604 	if (is_promisc) {
1605 		ASSERT(ppa->ppa_streams != NULL);
1606 		sppp_dlprsendup(ppa->ppa_streams, mp, sps->sps_sap, B_TRUE);
1607 	}
1608 	rw_exit(&ppa->ppa_sib_lock);
1609 	/*
1610 	 * Only time-stamp the packet with hrtime if the upper stream
1611 	 * is configured to do so.  PPP control (negotiation) messages
1612 	 * are never considered link activity; only data is activity.
1613 	 */
1614 	if (!IS_SPS_CONTROL(sps) && IS_PPA_TIMESTAMP(ppa)) {
1615 		ppa->ppa_lasttx = gethrtime();
1616 	}
1617 	/*
1618 	 * If there's already a message in the write-side service queue,
1619 	 * then queue this message there as well, otherwise, try to send
1620 	 * it down to the module immediately below us.
1621 	 */
1622 	if (q->q_first != NULL ||
1623 	    (nextq = sppp_outpkt(q, mpp, msize, sps)) == NULL) {
1624 		mp = *mpp;
1625 		if (mp != NULL && putq(q, mp) == 0) {
1626 			mutex_enter(&ppa->ppa_sta_lock);
1627 			ppa->ppa_oqdropped++;
1628 			mutex_exit(&ppa->ppa_sta_lock);
1629 			freemsg(mp);
1630 		}
1631 		return (NULL);
1632 	}
1633 	return (nextq);
1634 }
1635 
1636 /*
1637  * sppp_outpkt()
1638  *
1639  * MT-Perimeters:
1640  *    shared inner, shared outer (if called from sppp_wput, sppp_dlunitdatareq).
1641  *    exclusive inner, shared outer (if called from sppp_wsrv).
1642  *
1643  * Description:
1644  *    Called from 1) sppp_uwput when processing a M_DATA fastpath message,
1645  *    or 2) sppp_uwsrv when processing the upper write-side service queue.
1646  *    For both cases, it prepares to send the data to the module below
1647  *    this driver if there is a lower stream linked underneath. If none, then
1648  *    the data will be sent upstream via the control channel to pppd.
1649  *
1650  * Returns:
1651  *	Non-NULL queue_t if message should be sent now, otherwise
1652  *	if *mpp == NULL, then message was freed, otherwise put *mpp
1653  *	(back) on the queue.  (Does not do putq/putbq, since it's
1654  *	called both from srv and put procedures.)
1655  */
1656 static queue_t *
1657 sppp_outpkt(queue_t *q, mblk_t **mpp, int msize, spppstr_t *sps)
1658 {
1659 	mblk_t		*mp;
1660 	sppa_t		*ppa;
1661 	enum NPmode	npmode;
1662 	mblk_t		*mpnew;
1663 
1664 	ASSERT(mpp != NULL);
1665 	mp = *mpp;
1666 	ASSERT(q != NULL && q->q_ptr != NULL);
1667 	ASSERT(mp != NULL && mp->b_rptr != NULL);
1668 	ASSERT(sps != NULL);
1669 
1670 	ppa = sps->sps_ppa;
1671 	npmode = sps->sps_npmode;
1672 
1673 	if (npmode == NPMODE_QUEUE) {
1674 		ASSERT(!IS_SPS_CONTROL(sps));
1675 		return (NULL);	/* queue it for later */
1676 	} else if (ppa == NULL || ppa->ppa_ctl == NULL ||
1677 	    npmode == NPMODE_DROP || npmode == NPMODE_ERROR) {
1678 		/*
1679 		 * This can not be the control stream, as it must always have
1680 		 * a valid ppa, and its npmode must always be NPMODE_PASS.
1681 		 */
1682 		ASSERT(!IS_SPS_CONTROL(sps));
1683 		if (npmode == NPMODE_DROP) {
1684 			freemsg(mp);
1685 		} else {
1686 			/*
1687 			 * If we no longer have the control stream, or if the
1688 			 * mode is set to NPMODE_ERROR, then we need to tell IP
1689 			 * that the interface need to be marked as down. In
1690 			 * other words, we tell IP to be quiescent.
1691 			 */
1692 			merror(q, mp, EPROTO);
1693 		}
1694 		*mpp = NULL;
1695 		return (NULL);	/* don't queue it */
1696 	}
1697 	/*
1698 	 * Do we have a driver stream linked underneath ? If not, we need to
1699 	 * notify pppd that the link needs to be brought up and configure
1700 	 * this upper stream to drop subsequent outgoing packets. This is
1701 	 * for demand-dialing, in which case pppd has done the IP plumbing
1702 	 * but hasn't linked the driver stream underneath us. Therefore, when
1703 	 * a packet is sent down the IP interface, a notification message
1704 	 * will be sent up the control stream to pppd in order for it to
1705 	 * establish the physical link. The driver stream is then expected
1706 	 * to be linked underneath after physical link establishment is done.
1707 	 */
1708 	if (ppa->ppa_lower_wq == NULL) {
1709 		ASSERT(ppa->ppa_ctl != NULL);
1710 		ASSERT(ppa->ppa_ctl->sps_rq != NULL);
1711 
1712 		*mpp = NULL;
1713 		mpnew = create_lsmsg(PPP_LINKSTAT_NEEDUP);
1714 		if (mpnew == NULL) {
1715 			freemsg(mp);
1716 			mutex_enter(&ppa->ppa_sta_lock);
1717 			ppa->ppa_allocbfail++;
1718 			mutex_exit(&ppa->ppa_sta_lock);
1719 			return (NULL);	/* don't queue it */
1720 		}
1721 		/* Include the data in the message for logging. */
1722 		mpnew->b_cont = mp;
1723 		mutex_enter(&ppa->ppa_sta_lock);
1724 		ppa->ppa_lsneedup++;
1725 		mutex_exit(&ppa->ppa_sta_lock);
1726 		/*
1727 		 * We need to set the mode to NPMODE_DROP, but should only
1728 		 * do so when this stream is not the control stream.
1729 		 */
1730 		if (!IS_SPS_CONTROL(sps)) {
1731 			sps->sps_npmode = NPMODE_DROP;
1732 		}
1733 		putnext(ppa->ppa_ctl->sps_rq, mpnew);
1734 		return (NULL);	/* don't queue it */
1735 	}
1736 	/*
1737 	 * If so, then try to send it down. The lower queue is only ever
1738 	 * detached while holding an exclusive lock on the whole driver,
1739 	 * so we can be confident that the lower queue is still there.
1740 	 */
1741 	if (bcanputnext(ppa->ppa_lower_wq, mp->b_band)) {
1742 		mutex_enter(&ppa->ppa_sta_lock);
1743 		ppa->ppa_stats.p.ppp_opackets++;
1744 		if (IS_SPS_CONTROL(sps)) {
1745 			ppa->ppa_opkt_ctl++;
1746 		}
1747 		ppa->ppa_stats.p.ppp_obytes += msize;
1748 		mutex_exit(&ppa->ppa_sta_lock);
1749 		return (ppa->ppa_lower_wq);	/* don't queue it */
1750 	}
1751 	return (NULL);	/* queue it for later */
1752 }
1753 
1754 /*
1755  * sppp_lwsrv()
1756  *
1757  * MT-Perimeters:
1758  *    exclusive inner, shared outer.
1759  *
1760  * Description:
1761  *    Lower write-side service procedure. No messages are ever placed on
1762  *    the write queue here, this just back-enables all upper write side
1763  *    service procedures.
1764  */
1765 int
1766 sppp_lwsrv(queue_t *q)
1767 {
1768 	sppa_t		*ppa;
1769 	spppstr_t	*nextsib;
1770 
1771 	ASSERT(q != NULL && q->q_ptr != NULL);
1772 	ppa = (sppa_t *)q->q_ptr;
1773 	ASSERT(ppa != NULL);
1774 
1775 	rw_enter(&ppa->ppa_sib_lock, RW_READER);
1776 	if ((nextsib = ppa->ppa_ctl) != NULL &&
1777 	    WR(nextsib->sps_rq)->q_first != NULL)
1778 		qenable(WR(nextsib->sps_rq));
1779 	for (nextsib = ppa->ppa_streams; nextsib != NULL;
1780 	    nextsib = nextsib->sps_nextsib) {
1781 		if (WR(nextsib->sps_rq)->q_first != NULL) {
1782 			qenable(WR(nextsib->sps_rq));
1783 		}
1784 	}
1785 	rw_exit(&ppa->ppa_sib_lock);
1786 	return (0);
1787 }
1788 
1789 /*
1790  * sppp_lrput()
1791  *
1792  * MT-Perimeters:
1793  *    shared inner, shared outer.
1794  *
1795  * Description:
1796  *    Lower read-side put procedure. Messages from below get here.
1797  *    Data messages are handled separately to limit stack usage
1798  *    going into IP.
1799  *
1800  *    Note that during I_UNLINK processing, it's possible for a downstream
1801  *    message to enable upstream data (due to pass_wput() removing the
1802  *    SQ_BLOCKED flag), and thus we must protect against a NULL sppa pointer.
1803  *    In this case, the only thing above us is passthru, and we might as well
1804  *    discard.
1805  */
1806 int
1807 sppp_lrput(queue_t *q, mblk_t *mp)
1808 {
1809 	sppa_t		*ppa;
1810 	spppstr_t	*sps;
1811 
1812 	if ((ppa = q->q_ptr) == NULL) {
1813 		freemsg(mp);
1814 		return (0);
1815 	}
1816 
1817 	sps = ppa->ppa_ctl;
1818 
1819 	if (MTYPE(mp) != M_DATA) {
1820 		sppp_recv_nondata(q, mp, sps);
1821 	} else if (sps == NULL) {
1822 		freemsg(mp);
1823 	} else if ((q = sppp_recv(q, &mp, sps)) != NULL) {
1824 		putnext(q, mp);
1825 	}
1826 	return (0);
1827 }
1828 
1829 /*
1830  * sppp_lrsrv()
1831  *
1832  * MT-Perimeters:
1833  *    exclusive inner, shared outer.
1834  *
1835  * Description:
1836  *    Lower read-side service procedure.  This is run once after the I_LINK
1837  *    occurs in order to clean up any packets that came in while we were
1838  *    transferring in the lower stream.  Otherwise, it's not used.
1839  */
1840 int
1841 sppp_lrsrv(queue_t *q)
1842 {
1843 	mblk_t *mp;
1844 
1845 	while ((mp = getq(q)) != NULL)
1846 		(void) sppp_lrput(q, mp);
1847 	return (0);
1848 }
1849 
1850 /*
1851  * sppp_recv_nondata()
1852  *
1853  * MT-Perimeters:
1854  *    shared inner, shared outer.
1855  *
1856  * Description:
1857  *    All received non-data messages come through here.
1858  */
1859 static void
1860 sppp_recv_nondata(queue_t *q, mblk_t *mp, spppstr_t *ctlsps)
1861 {
1862 	sppa_t		*ppa;
1863 	spppstr_t	*destsps;
1864 	struct iocblk	*iop;
1865 
1866 	ppa = (sppa_t *)q->q_ptr;
1867 	ctlsps = ppa->ppa_ctl;
1868 
1869 	switch (MTYPE(mp)) {
1870 	case M_CTL:
1871 		mutex_enter(&ppa->ppa_sta_lock);
1872 		if (*mp->b_rptr == PPPCTL_IERROR) {
1873 			ppa->ppa_stats.p.ppp_ierrors++;
1874 			ppa->ppa_ierr_low++;
1875 			ppa->ppa_mctlsknown++;
1876 		} else if (*mp->b_rptr == PPPCTL_OERROR) {
1877 			ppa->ppa_stats.p.ppp_oerrors++;
1878 			ppa->ppa_oerr_low++;
1879 			ppa->ppa_mctlsknown++;
1880 		} else {
1881 			ppa->ppa_mctlsunknown++;
1882 		}
1883 		mutex_exit(&ppa->ppa_sta_lock);
1884 		freemsg(mp);
1885 		break;
1886 	case M_IOCTL:
1887 		miocnak(q, mp, 0, EINVAL);
1888 		break;
1889 	case M_IOCACK:
1890 	case M_IOCNAK:
1891 		iop = (struct iocblk *)mp->b_rptr;
1892 		ASSERT(iop != NULL);
1893 		/*
1894 		 * Attempt to match up the response with the stream that the
1895 		 * request came from. If ioc_id doesn't match the one that we
1896 		 * recorded, then discard this message.
1897 		 */
1898 		rw_enter(&ppa->ppa_sib_lock, RW_READER);
1899 		if ((destsps = ctlsps) == NULL ||
1900 		    destsps->sps_ioc_id != iop->ioc_id) {
1901 			destsps = ppa->ppa_streams;
1902 			while (destsps != NULL) {
1903 				if (destsps->sps_ioc_id == iop->ioc_id) {
1904 					break;	/* found the upper stream */
1905 				}
1906 				destsps = destsps->sps_nextsib;
1907 			}
1908 		}
1909 		rw_exit(&ppa->ppa_sib_lock);
1910 		if (destsps == NULL) {
1911 			mutex_enter(&ppa->ppa_sta_lock);
1912 			ppa->ppa_ioctlsfwderr++;
1913 			mutex_exit(&ppa->ppa_sta_lock);
1914 			freemsg(mp);
1915 			break;
1916 		}
1917 		mutex_enter(&ppa->ppa_sta_lock);
1918 		ppa->ppa_ioctlsfwdok++;
1919 
1920 		/*
1921 		 * Clear SPS_IOCQ and enable the lower write side queue,
1922 		 * this would allow the upper stream service routine
1923 		 * to start processing the queue for pending messages.
1924 		 * sppp_lwsrv -> sppp_uwsrv.
1925 		 */
1926 		destsps->sps_flags &= ~SPS_IOCQ;
1927 		mutex_exit(&ppa->ppa_sta_lock);
1928 		qenable(WR(destsps->sps_rq));
1929 
1930 		putnext(destsps->sps_rq, mp);
1931 		break;
1932 	case M_HANGUP:
1933 		/*
1934 		 * Free the original mblk_t. We don't really want to send
1935 		 * a M_HANGUP message upstream, so we need to translate this
1936 		 * message into something else.
1937 		 */
1938 		freemsg(mp);
1939 		if (ctlsps == NULL)
1940 			break;
1941 		mp = create_lsmsg(PPP_LINKSTAT_HANGUP);
1942 		if (mp == NULL) {
1943 			mutex_enter(&ppa->ppa_sta_lock);
1944 			ppa->ppa_allocbfail++;
1945 			mutex_exit(&ppa->ppa_sta_lock);
1946 			break;
1947 		}
1948 		mutex_enter(&ppa->ppa_sta_lock);
1949 		ppa->ppa_lsdown++;
1950 		mutex_exit(&ppa->ppa_sta_lock);
1951 		putnext(ctlsps->sps_rq, mp);
1952 		break;
1953 	case M_FLUSH:
1954 		if (*mp->b_rptr & FLUSHR) {
1955 			flushq(q, FLUSHDATA);
1956 		}
1957 		if (*mp->b_rptr & FLUSHW) {
1958 			*mp->b_rptr &= ~FLUSHR;
1959 			qreply(q, mp);
1960 		} else {
1961 			freemsg(mp);
1962 		}
1963 		break;
1964 	default:
1965 		if (ctlsps != NULL &&
1966 		    (queclass(mp) == QPCTL) || canputnext(ctlsps->sps_rq)) {
1967 			putnext(ctlsps->sps_rq, mp);
1968 		} else {
1969 			mutex_enter(&ppa->ppa_sta_lock);
1970 			ppa->ppa_iqdropped++;
1971 			mutex_exit(&ppa->ppa_sta_lock);
1972 			freemsg(mp);
1973 		}
1974 		break;
1975 	}
1976 }
1977 
1978 /*
1979  * sppp_recv()
1980  *
1981  * MT-Perimeters:
1982  *    shared inner, shared outer.
1983  *
1984  * Description:
1985  *    Receive function called by sppp_lrput.  Finds appropriate
1986  *    receive stream and does accounting.
1987  */
1988 static queue_t *
1989 sppp_recv(queue_t *q, mblk_t **mpp, spppstr_t *ctlsps)
1990 {
1991 	mblk_t		*mp;
1992 	int		len;
1993 	sppa_t		*ppa;
1994 	spppstr_t	*destsps;
1995 	mblk_t		*zmp;
1996 	uint32_t	npflagpos;
1997 
1998 	ASSERT(mpp != NULL);
1999 	mp = *mpp;
2000 	ASSERT(q != NULL && q->q_ptr != NULL);
2001 	ASSERT(mp != NULL && mp->b_rptr != NULL);
2002 	ASSERT(ctlsps != NULL);
2003 	ASSERT(IS_SPS_CONTROL(ctlsps));
2004 	ppa = ctlsps->sps_ppa;
2005 	ASSERT(ppa != NULL && ppa->ppa_ctl != NULL);
2006 
2007 	len = msgdsize(mp);
2008 	mutex_enter(&ppa->ppa_sta_lock);
2009 	ppa->ppa_stats.p.ppp_ibytes += len;
2010 	mutex_exit(&ppa->ppa_sta_lock);
2011 	/*
2012 	 * If the entire data size of the mblk is less than the length of the
2013 	 * PPP header, then free it. We can't do much with such message anyway,
2014 	 * since we can't really determine what the PPP protocol type is.
2015 	 */
2016 	if (len < PPP_HDRLEN) {
2017 		/* Log, and free it */
2018 		mutex_enter(&ppa->ppa_sta_lock);
2019 		ppa->ppa_irunts++;
2020 		mutex_exit(&ppa->ppa_sta_lock);
2021 		freemsg(mp);
2022 		return (NULL);
2023 	} else if (len > (ppa->ppa_mru + PPP_HDRLEN)) {
2024 		/* Log, and accept it anyway */
2025 		mutex_enter(&ppa->ppa_sta_lock);
2026 		ppa->ppa_itoolongs++;
2027 		mutex_exit(&ppa->ppa_sta_lock);
2028 	}
2029 	/*
2030 	 * We need at least be able to read the PPP protocol from the header,
2031 	 * so if the first message block is too small, then we concatenate the
2032 	 * rest of the following blocks into one message.
2033 	 */
2034 	if (MBLKL(mp) < PPP_HDRLEN) {
2035 		zmp = msgpullup(mp, PPP_HDRLEN);
2036 		freemsg(mp);
2037 		mp = zmp;
2038 		if (mp == NULL) {
2039 			mutex_enter(&ppa->ppa_sta_lock);
2040 			ppa->ppa_allocbfail++;
2041 			mutex_exit(&ppa->ppa_sta_lock);
2042 			return (NULL);
2043 		}
2044 		*mpp = mp;
2045 	}
2046 	/*
2047 	 * Hold this packet in the control-queue until
2048 	 * the matching network-layer upper stream for the PPP protocol (sap)
2049 	 * has not been plumbed and configured
2050 	 */
2051 	npflagpos = sppp_ppp2np(PPP_PROTOCOL(mp->b_rptr));
2052 	mutex_enter(&ppa->ppa_npmutex);
2053 	if (npflagpos != 0 && (ppa->ppa_npflag & (1 << npflagpos))) {
2054 		/*
2055 		 * proto is currently blocked; Hold up to 4 packets
2056 		 * in the kernel.
2057 		 */
2058 		if (ppa->ppa_holdpkts[npflagpos] > 3 ||
2059 		    putq(ctlsps->sps_rq, mp) == 0)
2060 			freemsg(mp);
2061 		else
2062 			ppa->ppa_holdpkts[npflagpos]++;
2063 		mutex_exit(&ppa->ppa_npmutex);
2064 		return (NULL);
2065 	}
2066 	mutex_exit(&ppa->ppa_npmutex);
2067 	/*
2068 	 * Try to find a matching network-layer upper stream for the specified
2069 	 * PPP protocol (sap), and if none is found, send this frame up the
2070 	 * control stream.
2071 	 */
2072 	destsps = sppp_inpkt(q, mp, ctlsps);
2073 	if (destsps == NULL) {
2074 		mutex_enter(&ppa->ppa_sta_lock);
2075 		ppa->ppa_ipkt_ctl++;
2076 		mutex_exit(&ppa->ppa_sta_lock);
2077 		if (canputnext(ctlsps->sps_rq)) {
2078 			if (IS_SPS_KDEBUG(ctlsps)) {
2079 				SPDEBUG(PPP_DRV_NAME
2080 				    "/%d: M_DATA recv (%d bytes) sps=0x%p "
2081 				    "flags=0x%b ppa=0x%p flags=0x%b\n",
2082 				    ctlsps->sps_mn_id, len, (void *)ctlsps,
2083 				    ctlsps->sps_flags, SPS_FLAGS_STR,
2084 				    (void *)ppa, ppa->ppa_flags,
2085 				    PPA_FLAGS_STR);
2086 			}
2087 			return (ctlsps->sps_rq);
2088 		} else {
2089 			mutex_enter(&ppa->ppa_sta_lock);
2090 			ppa->ppa_iqdropped++;
2091 			mutex_exit(&ppa->ppa_sta_lock);
2092 			freemsg(mp);
2093 			return (NULL);
2094 		}
2095 	}
2096 	if (canputnext(destsps->sps_rq)) {
2097 		if (IS_SPS_KDEBUG(destsps)) {
2098 			SPDEBUG(PPP_DRV_NAME
2099 			    "/%d: M_DATA recv (%d bytes) sps=0x%p flags=0x%b "
2100 			    "ppa=0x%p flags=0x%b\n", destsps->sps_mn_id, len,
2101 			    (void *)destsps, destsps->sps_flags,
2102 			    SPS_FLAGS_STR, (void *)ppa, ppa->ppa_flags,
2103 			    PPA_FLAGS_STR);
2104 		}
2105 		/*
2106 		 * If fastpath is enabled on the network-layer stream, then
2107 		 * make sure we skip over the PPP header, otherwise, we wrap
2108 		 * the message in a DLPI message.
2109 		 */
2110 		if (IS_SPS_FASTPATH(destsps)) {
2111 			mp->b_rptr += PPP_HDRLEN;
2112 			return (destsps->sps_rq);
2113 		} else {
2114 			spppstr_t *uqs = (spppstr_t *)destsps->sps_rq->q_ptr;
2115 			ASSERT(uqs != NULL);
2116 			mp->b_rptr += PPP_HDRLEN;
2117 			mp = sppp_dladdud(uqs, mp, uqs->sps_sap, B_FALSE);
2118 			if (mp != NULL) {
2119 				*mpp = mp;
2120 				return (destsps->sps_rq);
2121 			} else {
2122 				mutex_enter(&ppa->ppa_sta_lock);
2123 				ppa->ppa_allocbfail++;
2124 				mutex_exit(&ppa->ppa_sta_lock);
2125 				/* mp already freed by sppp_dladdud */
2126 				return (NULL);
2127 			}
2128 		}
2129 	} else {
2130 		mutex_enter(&ppa->ppa_sta_lock);
2131 		ppa->ppa_iqdropped++;
2132 		mutex_exit(&ppa->ppa_sta_lock);
2133 		freemsg(mp);
2134 		return (NULL);
2135 	}
2136 }
2137 
2138 /*
2139  * sppp_inpkt()
2140  *
2141  * MT-Perimeters:
2142  *    shared inner, shared outer.
2143  *
2144  * Description:
2145  *    Find the destination upper stream for the received packet, called
2146  *    from sppp_recv.
2147  *
2148  * Returns:
2149  *    ptr to destination upper network stream, or NULL for control stream.
2150  */
2151 /* ARGSUSED */
2152 static spppstr_t *
2153 sppp_inpkt(queue_t *q, mblk_t *mp, spppstr_t *ctlsps)
2154 {
2155 	spppstr_t	*destsps = NULL;
2156 	sppa_t		*ppa;
2157 	uint16_t	proto;
2158 	int		is_promisc;
2159 
2160 	ASSERT(q != NULL && q->q_ptr != NULL);
2161 	ASSERT(mp != NULL && mp->b_rptr != NULL);
2162 	ASSERT(IS_SPS_CONTROL(ctlsps));
2163 	ppa = ctlsps->sps_ppa;
2164 	ASSERT(ppa != NULL);
2165 	/*
2166 	 * From RFC 1661 (Section 2):
2167 	 *
2168 	 * The Protocol field is one or two octets, and its value identifies
2169 	 * the datagram encapsulated in the Information field of the packet.
2170 	 * The field is transmitted and received most significant octet first.
2171 	 *
2172 	 * The structure of this field is consistent with the ISO 3309
2173 	 * extension mechanism for address fields.  All Protocols MUST be odd;
2174 	 * the least significant bit of the least significant octet MUST equal
2175 	 * "1".  Also, all Protocols MUST be assigned such that the least
2176 	 * significant bit of the most significant octet equals "0". Frames
2177 	 * received which don't comply with these rules MUST be treated as
2178 	 * having an unrecognized Protocol.
2179 	 *
2180 	 * Protocol field values in the "0***" to "3***" range identify the
2181 	 * network-layer protocol of specific packets, and values in the
2182 	 * "8***" to "b***" range identify packets belonging to the associated
2183 	 * Network Control Protocols (NCPs), if any.
2184 	 *
2185 	 * Protocol field values in the "4***" to "7***" range are used for
2186 	 * protocols with low volume traffic which have no associated NCP.
2187 	 * Protocol field values in the "c***" to "f***" range identify packets
2188 	 * as link-layer Control Protocols (such as LCP).
2189 	 */
2190 	proto = PPP_PROTOCOL(mp->b_rptr);
2191 	mutex_enter(&ppa->ppa_sta_lock);
2192 	ppa->ppa_stats.p.ppp_ipackets++;
2193 	mutex_exit(&ppa->ppa_sta_lock);
2194 	/*
2195 	 * We check if this is not a network-layer protocol, and if so,
2196 	 * then send this packet up the control stream.
2197 	 */
2198 	if (proto > 0x7fff) {
2199 		goto inpkt_done;	/* send it up the control stream */
2200 	}
2201 	/*
2202 	 * Try to grab the destination upper stream from the network-layer
2203 	 * stream cache for this ppa for PPP_IP (0x0021) or PPP_IPV6 (0x0057)
2204 	 * protocol types. Otherwise, if the type is not known to the cache,
2205 	 * or if its sap can't be matched with any of the upper streams, then
2206 	 * send this packet up the control stream so that it can be rejected.
2207 	 */
2208 	if (proto == PPP_IP) {
2209 		destsps = ppa->ppa_ip_cache;
2210 	} else if (proto == PPP_IPV6) {
2211 		destsps = ppa->ppa_ip6_cache;
2212 	}
2213 	/*
2214 	 * Toss this one away up the control stream if there's no matching sap;
2215 	 * this way the protocol can be rejected (destsps is NULL).
2216 	 */
2217 
2218 inpkt_done:
2219 	/*
2220 	 * Only time-stamp the packet with hrtime if the upper stream
2221 	 * is configured to do so.  PPP control (negotiation) messages
2222 	 * are never considered link activity; only data is activity.
2223 	 */
2224 	if (destsps != NULL && IS_PPA_TIMESTAMP(ppa)) {
2225 		ppa->ppa_lastrx = gethrtime();
2226 	}
2227 	/*
2228 	 * Should there be any promiscuous stream(s), send the data up for
2229 	 * each promiscuous stream that we recognize. We skip the control
2230 	 * stream as we obviously never allow the control stream to become
2231 	 * promiscous and bind to PPP_ALLSAP.
2232 	 */
2233 	rw_enter(&ppa->ppa_sib_lock, RW_READER);
2234 	is_promisc = ppa->ppa_promicnt;
2235 	if (is_promisc) {
2236 		ASSERT(ppa->ppa_streams != NULL);
2237 		sppp_dlprsendup(ppa->ppa_streams, mp, proto, B_TRUE);
2238 	}
2239 	rw_exit(&ppa->ppa_sib_lock);
2240 	return (destsps);
2241 }
2242 
2243 /*
2244  * sppp_kstat_update()
2245  *
2246  * Description:
2247  *    Update per-ppa kstat interface statistics.
2248  */
2249 static int
2250 sppp_kstat_update(kstat_t *ksp, int rw)
2251 {
2252 	register sppa_t		*ppa;
2253 	register sppp_kstats_t	*pppkp;
2254 	register struct pppstat64 *sp;
2255 
2256 	if (rw == KSTAT_WRITE) {
2257 		return (EACCES);
2258 	}
2259 
2260 	ppa = (sppa_t *)ksp->ks_private;
2261 	ASSERT(ppa != NULL);
2262 
2263 	pppkp = (sppp_kstats_t *)ksp->ks_data;
2264 	sp = &ppa->ppa_stats.p;
2265 
2266 	mutex_enter(&ppa->ppa_sta_lock);
2267 	pppkp->allocbfail.value.ui32	= ppa->ppa_allocbfail;
2268 	pppkp->mctlsfwd.value.ui32	= ppa->ppa_mctlsfwd;
2269 	pppkp->mctlsfwderr.value.ui32	= ppa->ppa_mctlsfwderr;
2270 	pppkp->rbytes.value.ui32	= sp->ppp_ibytes;
2271 	pppkp->rbytes64.value.ui64	= sp->ppp_ibytes;
2272 	pppkp->ierrors.value.ui32	= sp->ppp_ierrors;
2273 	pppkp->ierrors_lower.value.ui32	= ppa->ppa_ierr_low;
2274 	pppkp->ioctlsfwd.value.ui32	= ppa->ppa_ioctlsfwd;
2275 	pppkp->ioctlsfwdok.value.ui32	= ppa->ppa_ioctlsfwdok;
2276 	pppkp->ioctlsfwderr.value.ui32	= ppa->ppa_ioctlsfwderr;
2277 	pppkp->ipackets.value.ui32	= sp->ppp_ipackets;
2278 	pppkp->ipackets64.value.ui64	= sp->ppp_ipackets;
2279 	pppkp->ipackets_ctl.value.ui32	= ppa->ppa_ipkt_ctl;
2280 	pppkp->iqdropped.value.ui32	= ppa->ppa_iqdropped;
2281 	pppkp->irunts.value.ui32	= ppa->ppa_irunts;
2282 	pppkp->itoolongs.value.ui32	= ppa->ppa_itoolongs;
2283 	pppkp->lsneedup.value.ui32	= ppa->ppa_lsneedup;
2284 	pppkp->lsdown.value.ui32	= ppa->ppa_lsdown;
2285 	pppkp->mctlsknown.value.ui32	= ppa->ppa_mctlsknown;
2286 	pppkp->mctlsunknown.value.ui32	= ppa->ppa_mctlsunknown;
2287 	pppkp->obytes.value.ui32	= sp->ppp_obytes;
2288 	pppkp->obytes64.value.ui64	= sp->ppp_obytes;
2289 	pppkp->oerrors.value.ui32	= sp->ppp_oerrors;
2290 	pppkp->oerrors_lower.value.ui32	= ppa->ppa_oerr_low;
2291 	pppkp->opackets.value.ui32	= sp->ppp_opackets;
2292 	pppkp->opackets64.value.ui64	= sp->ppp_opackets;
2293 	pppkp->opackets_ctl.value.ui32	= ppa->ppa_opkt_ctl;
2294 	pppkp->oqdropped.value.ui32	= ppa->ppa_oqdropped;
2295 	pppkp->otoolongs.value.ui32	= ppa->ppa_otoolongs;
2296 	pppkp->orunts.value.ui32	= ppa->ppa_orunts;
2297 	mutex_exit(&ppa->ppa_sta_lock);
2298 
2299 	return (0);
2300 }
2301 
2302 /*
2303  * Turn off proto in ppa_npflag to indicate that
2304  * the corresponding network protocol has been plumbed.
2305  * Release proto packets that were being held in the control
2306  * queue in anticipation of this event.
2307  */
2308 static void
2309 sppp_release_pkts(sppa_t *ppa, uint16_t proto)
2310 {
2311 	uint32_t npflagpos = sppp_ppp2np(proto);
2312 	int count;
2313 	mblk_t *mp;
2314 	uint16_t mp_proto;
2315 	queue_t *q;
2316 	spppstr_t *destsps;
2317 
2318 	ASSERT(ppa != NULL);
2319 
2320 	if (npflagpos == 0 || (ppa->ppa_npflag & (1 << npflagpos)) == 0)
2321 		return;
2322 
2323 	mutex_enter(&ppa->ppa_npmutex);
2324 	ppa->ppa_npflag &= ~(1 << npflagpos);
2325 	count = ppa->ppa_holdpkts[npflagpos];
2326 	ppa->ppa_holdpkts[npflagpos] = 0;
2327 	mutex_exit(&ppa->ppa_npmutex);
2328 
2329 	q = ppa->ppa_ctl->sps_rq;
2330 
2331 	while (count > 0) {
2332 		mp = getq(q);
2333 		ASSERT(mp != NULL);
2334 
2335 		mp_proto = PPP_PROTOCOL(mp->b_rptr);
2336 		if (mp_proto !=  proto) {
2337 			(void) putq(q, mp);
2338 			continue;
2339 		}
2340 		count--;
2341 		destsps = NULL;
2342 		if (mp_proto == PPP_IP) {
2343 			destsps = ppa->ppa_ip_cache;
2344 		} else if (mp_proto == PPP_IPV6) {
2345 			destsps = ppa->ppa_ip6_cache;
2346 		}
2347 		ASSERT(destsps != NULL);
2348 
2349 		if (IS_SPS_FASTPATH(destsps)) {
2350 			mp->b_rptr += PPP_HDRLEN;
2351 		} else {
2352 			spppstr_t *uqs = (spppstr_t *)destsps->sps_rq->q_ptr;
2353 			ASSERT(uqs != NULL);
2354 			mp->b_rptr += PPP_HDRLEN;
2355 			mp = sppp_dladdud(uqs, mp, uqs->sps_sap, B_FALSE);
2356 			if (mp == NULL) {
2357 				mutex_enter(&ppa->ppa_sta_lock);
2358 				ppa->ppa_allocbfail++;
2359 				mutex_exit(&ppa->ppa_sta_lock);
2360 				/* mp already freed by sppp_dladdud */
2361 				continue;
2362 			}
2363 		}
2364 
2365 		if (canputnext(destsps->sps_rq)) {
2366 			putnext(destsps->sps_rq, mp);
2367 		} else {
2368 			mutex_enter(&ppa->ppa_sta_lock);
2369 			ppa->ppa_iqdropped++;
2370 			mutex_exit(&ppa->ppa_sta_lock);
2371 			freemsg(mp);
2372 			continue;
2373 		}
2374 	}
2375 }
2376