xref: /titanic_51/usr/src/uts/common/io/ppp/sppp/sppp.c (revision 148434217c040ea38dc844384f6ba68d9b325906)
1 /*
2  * sppp.c - Solaris STREAMS PPP multiplexing pseudo-driver
3  *
4  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
5  * Use is subject to license terms.
6  *
7  * Permission to use, copy, modify, and distribute this software and its
8  * documentation is hereby granted, provided that the above copyright
9  * notice appears in all copies.
10  *
11  * SUN MAKES NO REPRESENTATION OR WARRANTIES ABOUT THE SUITABILITY OF
12  * THE SOFTWARE, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
13  * TO THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
14  * PARTICULAR PURPOSE, OR NON-INFRINGEMENT.  SUN SHALL NOT BE LIABLE FOR
15  * ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
16  * DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES
17  *
18  * Copyright (c) 1994 The Australian National University.
19  * All rights reserved.
20  *
21  * Permission to use, copy, modify, and distribute this software and its
22  * documentation is hereby granted, provided that the above copyright
23  * notice appears in all copies.  This software is provided without any
24  * warranty, express or implied. The Australian National University
25  * makes no representations about the suitability of this software for
26  * any purpose.
27  *
28  * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY
29  * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
30  * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
31  * THE AUSTRALIAN NATIONAL UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY
32  * OF SUCH DAMAGE.
33  *
34  * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES,
35  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
36  * AND FITNESS FOR A PARTICULAR PURPOSE.  THE SOFTWARE PROVIDED HEREUNDER IS
37  * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO
38  * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS,
39  * OR MODIFICATIONS.
40  *
41  * This driver is derived from the original SVR4 STREAMS PPP driver
42  * originally written by Paul Mackerras <paul.mackerras@cs.anu.edu.au>.
43  *
44  * Adi Masputra <adi.masputra@sun.com> rewrote and restructured the code
45  * for improved performance and scalability.
46  */
47 
48 #define	RCSID	"$Id: sppp.c,v 1.0 2000/05/08 01:10:12 masputra Exp $"
49 
50 #include <sys/types.h>
51 #include <sys/debug.h>
52 #include <sys/param.h>
53 #include <sys/stat.h>
54 #include <sys/stream.h>
55 #include <sys/stropts.h>
56 #include <sys/sysmacros.h>
57 #include <sys/errno.h>
58 #include <sys/time.h>
59 #include <sys/cmn_err.h>
60 #include <sys/kmem.h>
61 #include <sys/conf.h>
62 #include <sys/dlpi.h>
63 #include <sys/ddi.h>
64 #include <sys/kstat.h>
65 #include <sys/strsun.h>
66 #include <sys/ethernet.h>
67 #include <sys/policy.h>
68 #include <net/ppp_defs.h>
69 #include <net/pppio.h>
70 #include "sppp.h"
71 #include "s_common.h"
72 
73 /*
74  * This is used to tag official Solaris sources.  Please do not define
75  * "INTERNAL_BUILD" when building this software outside of Sun Microsystems.
76  */
77 #ifdef INTERNAL_BUILD
78 /* MODINFO is limited to 32 characters. */
79 const char sppp_module_description[] = "PPP 4.0 mux";
80 #else /* INTERNAL_BUILD */
81 const char sppp_module_description[] = "ANU PPP mux";
82 
83 /* LINTED */
84 static const char buildtime[] = "Built " __DATE__ " at " __TIME__
85 #ifdef DEBUG
86 " DEBUG"
87 #endif
88 "\n";
89 #endif /* INTERNAL_BUILD */
90 
91 static void	sppp_inner_ioctl(queue_t *, mblk_t *);
92 static void	sppp_outer_ioctl(queue_t *, mblk_t *);
93 static queue_t	*sppp_send(queue_t *, mblk_t **, spppstr_t *);
94 static queue_t	*sppp_recv(queue_t *, mblk_t **, spppstr_t *);
95 static void	sppp_recv_nondata(queue_t *, mblk_t *, spppstr_t *);
96 static queue_t	*sppp_outpkt(queue_t *, mblk_t **, int, spppstr_t *);
97 static spppstr_t *sppp_inpkt(queue_t *, mblk_t *, spppstr_t *);
98 static int	sppp_kstat_update(kstat_t *, int);
99 static void 	sppp_release_pkts(sppa_t *, uint16_t);
100 
101 /*
102  * sps_list contains the list of active per-stream instance state structures
103  * ordered on the minor device number (see sppp.h for details). All streams
104  * opened to this driver are threaded together in this list.
105  */
106 static spppstr_t *sps_list = NULL;
107 /*
108  * ppa_list contains the list of active per-attachment instance state
109  * structures ordered on the ppa id number (see sppp.h for details). All of
110  * the ppa structures created once per PPPIO_NEWPPA ioctl are threaded together
111  * in this list. There is exactly one ppa structure for a given PPP interface,
112  * and multiple sps streams (upper streams) may share a ppa by performing
113  * an attachment explicitly (PPPIO_ATTACH) or implicitly (DL_ATTACH_REQ).
114  */
115 static sppa_t *ppa_list = NULL;
116 
117 static const char *kstats_names[] = { SPPP_KSTATS_NAMES };
118 static const char *kstats64_names[] = { SPPP_KSTATS64_NAMES };
119 
120 /*
121  * map proto (which is an IANA defined ppp network protocol) to
122  * a bit position indicated by NP_* in ppa_npflag
123  */
124 static uint32_t
125 sppp_ppp2np(uint16_t proto)
126 {
127 	switch (proto) {
128 	case PPP_IP:
129 		return (NP_IP);
130 	case PPP_IPV6:
131 		return (NP_IPV6);
132 	default:
133 		return (0);
134 	}
135 }
136 
137 /*
138  * sppp_open()
139  *
140  * MT-Perimeters:
141  *    exclusive inner, exclusive outer.
142  *
143  * Description:
144  *    Common open procedure for module.
145  */
146 /* ARGSUSED */
147 int
148 sppp_open(queue_t *q, dev_t *devp, int oflag, int sflag, cred_t *credp)
149 {
150 	spppstr_t	*sps;
151 	spppstr_t	**nextmn;
152 	minor_t		mn;
153 
154 	ASSERT(q != NULL && devp != NULL);
155 	ASSERT(sflag != MODOPEN);
156 
157 	if (q->q_ptr != NULL) {
158 		return (0);		/* already open */
159 	}
160 	if (sflag != CLONEOPEN) {
161 		return (OPENFAIL);
162 	}
163 	/*
164 	 * The sps list is sorted using the minor number as the key. The
165 	 * following code walks the list to find the lowest valued minor
166 	 * number available to be used.
167 	 */
168 	mn = 0;
169 	for (nextmn = &sps_list; (sps = *nextmn) != NULL;
170 	    nextmn = &sps->sps_nextmn) {
171 		if (sps->sps_mn_id != mn) {
172 			break;
173 		}
174 		++mn;
175 	}
176 	sps = (spppstr_t *)kmem_zalloc(sizeof (spppstr_t), KM_SLEEP);
177 	ASSERT(sps != NULL);		/* KM_SLEEP must never return NULL */
178 	sps->sps_nextmn = *nextmn;	/* insert stream in global list */
179 	*nextmn = sps;
180 	sps->sps_mn_id = mn;		/* save minor id for this stream */
181 	sps->sps_rq = q;		/* save read queue pointer */
182 	sps->sps_sap = -1;		/* no sap bound to stream */
183 	sps->sps_dlstate = DL_UNATTACHED; /* dlpi state is unattached */
184 	sps->sps_npmode = NPMODE_DROP;	/* drop all packets initially */
185 	q->q_ptr = WR(q)->q_ptr = (caddr_t)sps;
186 	/*
187 	 * We explicitly disable the automatic queue scheduling for the
188 	 * write-side to obtain complete control over queuing during transmit.
189 	 * Packets will be queued at the upper write queue and the service
190 	 * routine will not be called until it gets scheduled by having the
191 	 * lower write service routine call the qenable(WR(uq)) for all streams
192 	 * attached to the same ppa instance.
193 	 */
194 	noenable(WR(q));
195 	*devp = makedevice(getmajor(*devp), mn);
196 	qprocson(q);
197 	return (0);
198 }
199 
200 /*
201  * Free storage used by a PPA.  This is not called until the last PPA
202  * user closes his connection or reattaches to a different PPA.
203  */
204 static void
205 sppp_free_ppa(sppa_t *ppa)
206 {
207 	sppa_t **nextppa;
208 
209 	ASSERT(ppa->ppa_refcnt == 1);
210 	if (ppa->ppa_kstats != NULL) {
211 		kstat_delete(ppa->ppa_kstats);
212 		ppa->ppa_kstats = NULL;
213 	}
214 	mutex_destroy(&ppa->ppa_sta_lock);
215 	mutex_destroy(&ppa->ppa_npmutex);
216 	rw_destroy(&ppa->ppa_sib_lock);
217 	nextppa = &ppa_list;
218 	while (*nextppa != NULL) {
219 		if (*nextppa == ppa) {
220 			*nextppa = ppa->ppa_nextppa;
221 			break;
222 		}
223 		nextppa = &(*nextppa)->ppa_nextppa;
224 	}
225 	kmem_free(ppa, sizeof (*ppa));
226 }
227 
228 /*
229  * Create a new PPA.  Caller must be exclusive on outer perimeter.
230  */
231 sppa_t *
232 sppp_create_ppa(uint32_t ppa_id)
233 {
234 	sppa_t *ppa;
235 	sppa_t *curppa;
236 	sppa_t **availppa;
237 	char unit[32];		/* Unit name */
238 	const char **cpp;
239 	kstat_t *ksp;
240 	kstat_named_t *knt;
241 
242 	/*
243 	 * NOTE: unit *must* be named for the driver
244 	 * name plus the ppa number so that netstat
245 	 * can find the statistics.
246 	 */
247 	(void) sprintf(unit, "%s" "%d", PPP_DRV_NAME, ppa_id);
248 	/*
249 	 * Make sure we can allocate a buffer to
250 	 * contain the ppa to be sent upstream, as
251 	 * well as the actual ppa structure and its
252 	 * associated kstat structure.
253 	 */
254 	ppa = (sppa_t *)kmem_zalloc(sizeof (sppa_t),
255 	    KM_NOSLEEP);
256 	ksp = kstat_create(PPP_DRV_NAME, ppa_id, unit, "net", KSTAT_TYPE_NAMED,
257 	    sizeof (sppp_kstats_t) / sizeof (kstat_named_t), 0);
258 
259 	if (ppa == NULL || ksp == NULL) {
260 		if (ppa != NULL) {
261 			kmem_free(ppa, sizeof (sppa_t));
262 		}
263 		if (ksp != NULL) {
264 			kstat_delete(ksp);
265 		}
266 		return (NULL);
267 	}
268 	ppa->ppa_kstats = ksp;		/* chain kstat structure */
269 	ppa->ppa_ppa_id = ppa_id;	/* record ppa id */
270 	ppa->ppa_mtu = PPP_MAXMTU;	/* 65535-(PPP_HDRLEN+PPP_FCSLEN) */
271 	ppa->ppa_mru = PPP_MAXMRU;	/* 65000 */
272 
273 	mutex_init(&ppa->ppa_sta_lock, NULL, MUTEX_DRIVER, NULL);
274 	mutex_init(&ppa->ppa_npmutex, NULL, MUTEX_DRIVER, NULL);
275 	rw_init(&ppa->ppa_sib_lock, NULL, RW_DRIVER, NULL);
276 
277 	/*
278 	 * Prepare and install kstat counters.  Note that for netstat
279 	 * -i to work, there needs to be "ipackets", "opackets",
280 	 * "ierrors", and "oerrors" kstat named variables.
281 	 */
282 	knt = (kstat_named_t *)ksp->ks_data;
283 	for (cpp = kstats_names; cpp < kstats_names + Dim(kstats_names);
284 	    cpp++) {
285 		kstat_named_init(knt, *cpp, KSTAT_DATA_UINT32);
286 		knt++;
287 	}
288 	for (cpp = kstats64_names; cpp < kstats64_names + Dim(kstats64_names);
289 	    cpp++) {
290 		kstat_named_init(knt, *cpp, KSTAT_DATA_UINT64);
291 		knt++;
292 	}
293 	ksp->ks_update = sppp_kstat_update;
294 	ksp->ks_private = (void *)ppa;
295 	kstat_install(ksp);
296 
297 	/* link to the next ppa and insert into global list */
298 	availppa = &ppa_list;
299 	while ((curppa = *availppa) != NULL) {
300 		if (ppa_id < curppa->ppa_ppa_id)
301 			break;
302 		availppa = &curppa->ppa_nextppa;
303 	}
304 	ppa->ppa_nextppa = *availppa;
305 	*availppa = ppa;
306 	return (ppa);
307 }
308 
309 /*
310  * sppp_close()
311  *
312  * MT-Perimeters:
313  *    exclusive inner, exclusive outer.
314  *
315  * Description:
316  *    Common close procedure for module.
317  */
318 int
319 sppp_close(queue_t *q)
320 {
321 	spppstr_t	*sps;
322 	spppstr_t	**nextmn;
323 	spppstr_t	*sib;
324 	sppa_t		*ppa;
325 	mblk_t		*mp;
326 
327 	ASSERT(q != NULL && q->q_ptr != NULL);
328 	sps = (spppstr_t *)q->q_ptr;
329 	qprocsoff(q);
330 
331 	ppa = sps->sps_ppa;
332 	if (ppa == NULL) {
333 		ASSERT(!IS_SPS_CONTROL(sps));
334 		goto close_unattached;
335 	}
336 	if (IS_SPS_CONTROL(sps)) {
337 		uint32_t	cnt = 0;
338 
339 		ASSERT(ppa != NULL);
340 		ASSERT(ppa->ppa_ctl == sps);
341 		ppa->ppa_ctl = NULL;
342 		/*
343 		 * STREAMS framework always issues I_UNLINK prior to close,
344 		 * since we only allow I_LINK under the control stream.
345 		 * A given ppa structure has at most one lower stream pointed
346 		 * by the ppa_lower_wq field, because we only allow a single
347 		 * linkage (I_LINK) to be done on the control stream.
348 		 */
349 		ASSERT(ppa->ppa_lower_wq == NULL);
350 		/*
351 		 * Walk through all of sibling streams attached to this ppa,
352 		 * and remove all references to this ppa. We have exclusive
353 		 * access for the entire driver here, so there's no need
354 		 * to hold ppa_sib_lock.
355 		 */
356 		cnt++;
357 		sib = ppa->ppa_streams;
358 		while (sib != NULL) {
359 			ASSERT(ppa == sib->sps_ppa);
360 			sib->sps_npmode = NPMODE_DROP;
361 			sib->sps_flags &= ~(SPS_PIOATTACH | SPS_CACHED);
362 			/*
363 			 * There should be a preallocated hangup
364 			 * message here.  Fetch it and send it up to
365 			 * the stream head.  This will cause IP to
366 			 * mark the interface as "down."
367 			 */
368 			if ((mp = sib->sps_hangup) != NULL) {
369 				sib->sps_hangup = NULL;
370 				/*
371 				 * M_HANGUP works with IP, but snoop
372 				 * is lame and requires M_ERROR.  Send
373 				 * up a clean error code instead.
374 				 *
375 				 * XXX if snoop is fixed, fix this, too.
376 				 */
377 				MTYPE(mp) = M_ERROR;
378 				*mp->b_wptr++ = ENXIO;
379 				putnext(sib->sps_rq, mp);
380 			}
381 			qenable(WR(sib->sps_rq));
382 			cnt++;
383 			sib = sib->sps_nextsib;
384 		}
385 		ASSERT(ppa->ppa_refcnt == cnt);
386 	} else {
387 		ASSERT(ppa->ppa_streams != NULL);
388 		ASSERT(ppa->ppa_ctl != sps);
389 		mp = NULL;
390 		if (sps->sps_sap == PPP_IP) {
391 			ppa->ppa_ip_cache = NULL;
392 			mp = create_lsmsg(PPP_LINKSTAT_IPV4_UNBOUND);
393 		} else if (sps->sps_sap == PPP_IPV6) {
394 			ppa->ppa_ip6_cache = NULL;
395 			mp = create_lsmsg(PPP_LINKSTAT_IPV6_UNBOUND);
396 		}
397 		/* Tell the daemon the bad news. */
398 		if (mp != NULL && ppa->ppa_ctl != NULL &&
399 		    (sps->sps_npmode == NPMODE_PASS ||
400 		    sps->sps_npmode == NPMODE_QUEUE)) {
401 			putnext(ppa->ppa_ctl->sps_rq, mp);
402 		} else {
403 			freemsg(mp);
404 		}
405 		/*
406 		 * Walk through all of sibling streams attached to the
407 		 * same ppa, and remove this stream from the sibling
408 		 * streams list. We have exclusive access for the
409 		 * entire driver here, so there's no need to hold
410 		 * ppa_sib_lock.
411 		 */
412 		sib = ppa->ppa_streams;
413 		if (sib == sps) {
414 			ppa->ppa_streams = sps->sps_nextsib;
415 		} else {
416 			while (sib->sps_nextsib != NULL) {
417 				if (sib->sps_nextsib == sps) {
418 					sib->sps_nextsib = sps->sps_nextsib;
419 					break;
420 				}
421 				sib = sib->sps_nextsib;
422 			}
423 		}
424 		sps->sps_nextsib = NULL;
425 		freemsg(sps->sps_hangup);
426 		sps->sps_hangup = NULL;
427 		/*
428 		 * Check if this is a promiscous stream. If the SPS_PROMISC bit
429 		 * is still set, it means that the stream is closed without
430 		 * ever having issued DL_DETACH_REQ or DL_PROMISCOFF_REQ.
431 		 * In this case, we simply decrement the promiscous counter,
432 		 * and it's safe to do it without holding ppa_sib_lock since
433 		 * we're exclusive (inner and outer) at this point.
434 		 */
435 		if (IS_SPS_PROMISC(sps)) {
436 			ASSERT(ppa->ppa_promicnt > 0);
437 			ppa->ppa_promicnt--;
438 		}
439 	}
440 	/* If we're the only one left, then delete now. */
441 	if (ppa->ppa_refcnt <= 1)
442 		sppp_free_ppa(ppa);
443 	else
444 		ppa->ppa_refcnt--;
445 close_unattached:
446 	q->q_ptr = WR(q)->q_ptr = NULL;
447 	for (nextmn = &sps_list; *nextmn != NULL;
448 	    nextmn = &(*nextmn)->sps_nextmn) {
449 		if (*nextmn == sps) {
450 			*nextmn = sps->sps_nextmn;
451 			break;
452 		}
453 	}
454 	kmem_free(sps, sizeof (spppstr_t));
455 	return (0);
456 }
457 
458 static void
459 sppp_ioctl(struct queue *q, mblk_t *mp)
460 {
461 	spppstr_t	*sps;
462 	spppstr_t	*nextsib;
463 	sppa_t		*ppa;
464 	struct iocblk	*iop;
465 	mblk_t		*nmp;
466 	enum NPmode	npmode;
467 	struct ppp_idle	*pip;
468 	struct ppp_stats64 *psp;
469 	struct ppp_comp_stats *pcsp;
470 	hrtime_t	hrtime;
471 	int		sap;
472 	int		count = 0;
473 	int		error = EINVAL;
474 
475 	sps = (spppstr_t *)q->q_ptr;
476 	ppa = sps->sps_ppa;
477 
478 	iop = (struct iocblk *)mp->b_rptr;
479 	switch (iop->ioc_cmd) {
480 	case PPPIO_NPMODE:
481 		if (!IS_SPS_CONTROL(sps)) {
482 			break;		/* return EINVAL */
483 		} else if (iop->ioc_count != 2 * sizeof (uint32_t) ||
484 		    (mp->b_cont == NULL)) {
485 			error = EPROTO;
486 			break;
487 		}
488 		ASSERT(ppa != NULL);
489 		ASSERT(mp->b_cont->b_rptr != NULL);
490 		ASSERT(sps->sps_npmode == NPMODE_PASS);
491 		sap = ((uint32_t *)mp->b_cont->b_rptr)[0];
492 		npmode = (enum NPmode)((uint32_t *)mp->b_cont->b_rptr)[1];
493 		/*
494 		 * Walk the sibling streams which belong to the same
495 		 * ppa, and try to find a stream with matching sap
496 		 * number.
497 		 */
498 		rw_enter(&ppa->ppa_sib_lock, RW_WRITER);
499 		for (nextsib = ppa->ppa_streams; nextsib != NULL;
500 		    nextsib = nextsib->sps_nextsib) {
501 			if (nextsib->sps_sap == sap) {
502 				break;	/* found it */
503 			}
504 		}
505 		if (nextsib == NULL) {
506 			rw_exit(&ppa->ppa_sib_lock);
507 			break;		/* return EINVAL */
508 		} else {
509 			nextsib->sps_npmode = npmode;
510 			if ((nextsib->sps_npmode != NPMODE_QUEUE) &&
511 			    (WR(nextsib->sps_rq)->q_first != NULL)) {
512 				qenable(WR(nextsib->sps_rq));
513 			}
514 		}
515 		rw_exit(&ppa->ppa_sib_lock);
516 		error = 0;	/* return success */
517 		break;
518 	case PPPIO_GIDLE:
519 		if (ppa == NULL) {
520 			ASSERT(!IS_SPS_CONTROL(sps));
521 			error = ENOLINK;
522 			break;
523 		} else if (!IS_PPA_TIMESTAMP(ppa)) {
524 			break;		/* return EINVAL */
525 		}
526 		if ((nmp = allocb(sizeof (struct ppp_idle),
527 		    BPRI_MED)) == NULL) {
528 			mutex_enter(&ppa->ppa_sta_lock);
529 			ppa->ppa_allocbfail++;
530 			mutex_exit(&ppa->ppa_sta_lock);
531 			error = ENOSR;
532 			break;
533 		}
534 		if (mp->b_cont != NULL) {
535 			freemsg(mp->b_cont);
536 		}
537 		mp->b_cont = nmp;
538 		pip = (struct ppp_idle *)nmp->b_wptr;
539 		nmp->b_wptr += sizeof (struct ppp_idle);
540 		/*
541 		 * Get current timestamp and subtract the tx and rx
542 		 * timestamps to get the actual idle time to be
543 		 * returned.
544 		 */
545 		hrtime = gethrtime();
546 		pip->xmit_idle = (hrtime - ppa->ppa_lasttx) / 1000000000ul;
547 		pip->recv_idle = (hrtime - ppa->ppa_lastrx) / 1000000000ul;
548 		count = msgsize(nmp);
549 		error = 0;
550 		break;		/* return success (error is 0) */
551 	case PPPIO_GTYPE:
552 		nmp = allocb(sizeof (uint32_t), BPRI_MED);
553 		if (nmp == NULL) {
554 			error = ENOSR;
555 			break;
556 		}
557 		if (mp->b_cont != NULL) {
558 			freemsg(mp->b_cont);
559 		}
560 		mp->b_cont = nmp;
561 		/*
562 		 * Let the requestor know that we are the PPP
563 		 * multiplexer (PPPTYP_MUX).
564 		 */
565 		*(uint32_t *)nmp->b_wptr = PPPTYP_MUX;
566 		nmp->b_wptr += sizeof (uint32_t);
567 		count = msgsize(nmp);
568 		error = 0;		/* return success */
569 		break;
570 	case PPPIO_GETSTAT64:
571 		if (ppa == NULL) {
572 			break;		/* return EINVAL */
573 		} else if ((ppa->ppa_lower_wq != NULL) &&
574 		    !IS_PPA_LASTMOD(ppa)) {
575 			mutex_enter(&ppa->ppa_sta_lock);
576 			/*
577 			 * We match sps_ioc_id on the M_IOC{ACK,NAK},
578 			 * so if the response hasn't come back yet,
579 			 * new ioctls must be queued instead.
580 			 */
581 			if (IS_SPS_IOCQ(sps)) {
582 				mutex_exit(&ppa->ppa_sta_lock);
583 				if (!putq(q, mp)) {
584 					error = EAGAIN;
585 					break;
586 				}
587 				return;
588 			} else {
589 				ppa->ppa_ioctlsfwd++;
590 				/*
591 				 * Record the ioctl CMD & ID - this will be
592 				 * used to check the ACK or NAK responses
593 				 * coming from below.
594 				 */
595 				sps->sps_ioc_id = iop->ioc_id;
596 				sps->sps_flags |= SPS_IOCQ;
597 				mutex_exit(&ppa->ppa_sta_lock);
598 			}
599 			putnext(ppa->ppa_lower_wq, mp);
600 			return;	/* don't ack or nak the request */
601 		}
602 		nmp = allocb(sizeof (*psp), BPRI_MED);
603 		if (nmp == NULL) {
604 			mutex_enter(&ppa->ppa_sta_lock);
605 			ppa->ppa_allocbfail++;
606 			mutex_exit(&ppa->ppa_sta_lock);
607 			error = ENOSR;
608 			break;
609 		}
610 		if (mp->b_cont != NULL) {
611 			freemsg(mp->b_cont);
612 		}
613 		mp->b_cont = nmp;
614 		psp = (struct ppp_stats64 *)nmp->b_wptr;
615 		/*
616 		 * Copy the contents of ppp_stats64 structure for this
617 		 * ppa and return them to the caller.
618 		 */
619 		mutex_enter(&ppa->ppa_sta_lock);
620 		bcopy(&ppa->ppa_stats, psp, sizeof (*psp));
621 		mutex_exit(&ppa->ppa_sta_lock);
622 		nmp->b_wptr += sizeof (*psp);
623 		count = sizeof (*psp);
624 		error = 0;		/* return success */
625 		break;
626 	case PPPIO_GETCSTAT:
627 		if (ppa == NULL) {
628 			break;		/* return EINVAL */
629 		} else if ((ppa->ppa_lower_wq != NULL) &&
630 		    !IS_PPA_LASTMOD(ppa)) {
631 			mutex_enter(&ppa->ppa_sta_lock);
632 			/*
633 			 * See comments in PPPIO_GETSTAT64 case
634 			 * in sppp_ioctl().
635 			 */
636 			if (IS_SPS_IOCQ(sps)) {
637 				mutex_exit(&ppa->ppa_sta_lock);
638 				if (!putq(q, mp)) {
639 					error = EAGAIN;
640 					break;
641 				}
642 				return;
643 			} else {
644 				ppa->ppa_ioctlsfwd++;
645 				/*
646 				 * Record the ioctl CMD & ID - this will be
647 				 * used to check the ACK or NAK responses
648 				 * coming from below.
649 				 */
650 				sps->sps_ioc_id = iop->ioc_id;
651 				sps->sps_flags |= SPS_IOCQ;
652 				mutex_exit(&ppa->ppa_sta_lock);
653 			}
654 			putnext(ppa->ppa_lower_wq, mp);
655 			return;	/* don't ack or nak the request */
656 		}
657 		nmp = allocb(sizeof (struct ppp_comp_stats), BPRI_MED);
658 		if (nmp == NULL) {
659 			mutex_enter(&ppa->ppa_sta_lock);
660 			ppa->ppa_allocbfail++;
661 			mutex_exit(&ppa->ppa_sta_lock);
662 			error = ENOSR;
663 			break;
664 		}
665 		if (mp->b_cont != NULL) {
666 			freemsg(mp->b_cont);
667 		}
668 		mp->b_cont = nmp;
669 		pcsp = (struct ppp_comp_stats *)nmp->b_wptr;
670 		nmp->b_wptr += sizeof (struct ppp_comp_stats);
671 		bzero((caddr_t)pcsp, sizeof (struct ppp_comp_stats));
672 		count = msgsize(nmp);
673 		error = 0;		/* return success */
674 		break;
675 	}
676 
677 	if (error == 0) {
678 		/* Success; tell the user. */
679 		miocack(q, mp, count, 0);
680 	} else {
681 		/* Failure; send error back upstream. */
682 		miocnak(q, mp, 0, error);
683 	}
684 }
685 
686 /*
687  * sppp_uwput()
688  *
689  * MT-Perimeters:
690  *    shared inner, shared outer.
691  *
692  * Description:
693  *    Upper write-side put procedure. Messages from above arrive here.
694  */
695 void
696 sppp_uwput(queue_t *q, mblk_t *mp)
697 {
698 	queue_t		*nextq;
699 	spppstr_t	*sps;
700 	sppa_t		*ppa;
701 	struct iocblk	*iop;
702 	int		error;
703 
704 	ASSERT(q != NULL && q->q_ptr != NULL);
705 	ASSERT(mp != NULL && mp->b_rptr != NULL);
706 	sps = (spppstr_t *)q->q_ptr;
707 	ppa = sps->sps_ppa;
708 
709 	switch (MTYPE(mp)) {
710 	case M_PCPROTO:
711 	case M_PROTO:
712 		if (IS_SPS_CONTROL(sps)) {
713 			ASSERT(ppa != NULL);
714 			/*
715 			 * Intentionally change this to a high priority
716 			 * message so it doesn't get queued up. M_PROTO is
717 			 * specifically used for signalling between pppd and its
718 			 * kernel-level component(s), such as ppptun, so we
719 			 * make sure that it doesn't get queued up behind
720 			 * data messages.
721 			 */
722 			MTYPE(mp) = M_PCPROTO;
723 			if ((ppa->ppa_lower_wq != NULL) &&
724 			    canputnext(ppa->ppa_lower_wq)) {
725 				mutex_enter(&ppa->ppa_sta_lock);
726 				ppa->ppa_mctlsfwd++;
727 				mutex_exit(&ppa->ppa_sta_lock);
728 				putnext(ppa->ppa_lower_wq, mp);
729 			} else {
730 				mutex_enter(&ppa->ppa_sta_lock);
731 				ppa->ppa_mctlsfwderr++;
732 				mutex_exit(&ppa->ppa_sta_lock);
733 				freemsg(mp);
734 			}
735 		} else {
736 			(void) sppp_mproto(q, mp, sps);
737 			return;
738 		}
739 		break;
740 	case M_DATA:
741 		if ((nextq = sppp_send(q, &mp, sps)) != NULL)
742 			putnext(nextq, mp);
743 		break;
744 	case M_IOCTL:
745 		error = EINVAL;
746 		iop = (struct iocblk *)mp->b_rptr;
747 		switch (iop->ioc_cmd) {
748 		case DLIOCRAW:
749 		case DL_IOC_HDR_INFO:
750 		case PPPIO_ATTACH:
751 		case PPPIO_DEBUG:
752 		case PPPIO_DETACH:
753 		case PPPIO_LASTMOD:
754 		case PPPIO_MRU:
755 		case PPPIO_MTU:
756 		case PPPIO_USETIMESTAMP:
757 		case PPPIO_BLOCKNP:
758 		case PPPIO_UNBLOCKNP:
759 			qwriter(q, mp, sppp_inner_ioctl, PERIM_INNER);
760 			return;
761 		case I_LINK:
762 		case I_UNLINK:
763 		case PPPIO_NEWPPA:
764 			qwriter(q, mp, sppp_outer_ioctl, PERIM_OUTER);
765 			return;
766 		case PPPIO_NPMODE:
767 		case PPPIO_GIDLE:
768 		case PPPIO_GTYPE:
769 		case PPPIO_GETSTAT64:
770 		case PPPIO_GETCSTAT:
771 			/*
772 			 * These require additional auto variables to
773 			 * handle, so (for optimization reasons)
774 			 * they're moved off to a separate function.
775 			 */
776 			sppp_ioctl(q, mp);
777 			return;
778 		case PPPIO_GETSTAT:
779 			break;			/* 32 bit interface gone */
780 		default:
781 			if (iop->ioc_cr == NULL ||
782 			    secpolicy_net_config(iop->ioc_cr, B_FALSE) != 0) {
783 				error = EPERM;
784 				break;
785 			} else if ((ppa == NULL) ||
786 			    (ppa->ppa_lower_wq == NULL)) {
787 				break;		/* return EINVAL */
788 			}
789 			mutex_enter(&ppa->ppa_sta_lock);
790 			/*
791 			 * See comments in PPPIO_GETSTAT64 case
792 			 * in sppp_ioctl().
793 			 */
794 			if (IS_SPS_IOCQ(sps)) {
795 				mutex_exit(&ppa->ppa_sta_lock);
796 				if (!putq(q, mp)) {
797 					error = EAGAIN;
798 					break;
799 				}
800 				return;
801 			} else {
802 				ppa->ppa_ioctlsfwd++;
803 				/*
804 				 * Record the ioctl CMD & ID -
805 				 * this will be used to check the
806 				 * ACK or NAK responses coming from below.
807 				 */
808 				sps->sps_ioc_id = iop->ioc_id;
809 				sps->sps_flags |= SPS_IOCQ;
810 				mutex_exit(&ppa->ppa_sta_lock);
811 			}
812 			putnext(ppa->ppa_lower_wq, mp);
813 			return;		/* don't ack or nak the request */
814 		}
815 		/* Failure; send error back upstream. */
816 		miocnak(q, mp, 0, error);
817 		break;
818 	case M_FLUSH:
819 		if (*mp->b_rptr & FLUSHW) {
820 			flushq(q, FLUSHDATA);
821 		}
822 		if (*mp->b_rptr & FLUSHR) {
823 			*mp->b_rptr &= ~FLUSHW;
824 			qreply(q, mp);
825 		} else {
826 			freemsg(mp);
827 		}
828 		break;
829 	default:
830 		freemsg(mp);
831 		break;
832 	}
833 }
834 
835 /*
836  * sppp_uwsrv()
837  *
838  * MT-Perimeters:
839  *    exclusive inner, shared outer.
840  *
841  * Description:
842  *    Upper write-side service procedure. Note that this procedure does
843  *    not get called when a message is placed on our write-side queue, since
844  *    automatic queue scheduling has been turned off by noenable() when
845  *    the queue was opened. We do this on purpose, as we explicitly control
846  *    the write-side queue. Therefore, this procedure gets called when
847  *    the lower write service procedure qenable() the upper write stream queue.
848  */
849 void
850 sppp_uwsrv(queue_t *q)
851 {
852 	spppstr_t	*sps;
853 	sppa_t		*ppa;
854 	mblk_t		*mp;
855 	queue_t		*nextq;
856 	struct iocblk	*iop;
857 
858 	ASSERT(q != NULL && q->q_ptr != NULL);
859 	sps = (spppstr_t *)q->q_ptr;
860 
861 	while ((mp = getq(q)) != NULL) {
862 		if (MTYPE(mp) == M_IOCTL) {
863 			ppa = sps->sps_ppa;
864 			if ((ppa == NULL) || (ppa->ppa_lower_wq == NULL)) {
865 				miocnak(q, mp, 0, EINVAL);
866 				continue;
867 			}
868 
869 			iop = (struct iocblk *)mp->b_rptr;
870 			mutex_enter(&ppa->ppa_sta_lock);
871 			/*
872 			 * See comments in PPPIO_GETSTAT64 case
873 			 * in sppp_ioctl().
874 			 */
875 			if (IS_SPS_IOCQ(sps)) {
876 				mutex_exit(&ppa->ppa_sta_lock);
877 				if (putbq(q, mp) == 0)
878 					miocnak(q, mp, 0, EAGAIN);
879 				break;
880 			} else {
881 				ppa->ppa_ioctlsfwd++;
882 				sps->sps_ioc_id = iop->ioc_id;
883 				sps->sps_flags |= SPS_IOCQ;
884 				mutex_exit(&ppa->ppa_sta_lock);
885 				putnext(ppa->ppa_lower_wq, mp);
886 			}
887 		} else if ((nextq =
888 		    sppp_outpkt(q, &mp, msgdsize(mp), sps)) == NULL) {
889 			if (mp != NULL) {
890 				if (putbq(q, mp) == 0)
891 					freemsg(mp);
892 				break;
893 			}
894 		} else {
895 			putnext(nextq, mp);
896 		}
897 	}
898 }
899 
900 void
901 sppp_remove_ppa(spppstr_t *sps)
902 {
903 	spppstr_t *nextsib;
904 	sppa_t *ppa = sps->sps_ppa;
905 
906 	rw_enter(&ppa->ppa_sib_lock, RW_WRITER);
907 	if (ppa->ppa_refcnt <= 1) {
908 		rw_exit(&ppa->ppa_sib_lock);
909 		sppp_free_ppa(ppa);
910 	} else {
911 		nextsib = ppa->ppa_streams;
912 		if (nextsib == sps) {
913 			ppa->ppa_streams = sps->sps_nextsib;
914 		} else {
915 			while (nextsib->sps_nextsib != NULL) {
916 				if (nextsib->sps_nextsib == sps) {
917 					nextsib->sps_nextsib =
918 					    sps->sps_nextsib;
919 					break;
920 				}
921 				nextsib = nextsib->sps_nextsib;
922 			}
923 		}
924 		ppa->ppa_refcnt--;
925 		/*
926 		 * And if this stream was marked as promiscuous
927 		 * (SPS_PROMISC), then we need to update the
928 		 * promiscuous streams count. This should only happen
929 		 * when DL_DETACH_REQ is issued prior to marking the
930 		 * stream as non-promiscuous, through
931 		 * DL_PROMISCOFF_REQ request.
932 		 */
933 		if (IS_SPS_PROMISC(sps)) {
934 			ASSERT(ppa->ppa_promicnt > 0);
935 			ppa->ppa_promicnt--;
936 		}
937 		rw_exit(&ppa->ppa_sib_lock);
938 	}
939 	sps->sps_nextsib = NULL;
940 	sps->sps_ppa = NULL;
941 	freemsg(sps->sps_hangup);
942 	sps->sps_hangup = NULL;
943 }
944 
945 sppa_t *
946 sppp_find_ppa(uint32_t ppa_id)
947 {
948 	sppa_t *ppa;
949 
950 	for (ppa = ppa_list; ppa != NULL; ppa = ppa->ppa_nextppa) {
951 		if (ppa->ppa_ppa_id == ppa_id) {
952 			break;	/* found the ppa */
953 		}
954 	}
955 	return (ppa);
956 }
957 
958 /*
959  * sppp_inner_ioctl()
960  *
961  * MT-Perimeters:
962  *    exclusive inner, shared outer
963  *
964  * Description:
965  *    Called by sppp_uwput as a result of receiving ioctls which require
966  *    an exclusive access at the inner perimeter.
967  */
968 static void
969 sppp_inner_ioctl(queue_t *q, mblk_t *mp)
970 {
971 	spppstr_t	*sps;
972 	sppa_t		*ppa;
973 	struct iocblk	*iop;
974 	mblk_t		*nmp;
975 	int		error = EINVAL;
976 	int		count = 0;
977 	int		dbgcmd;
978 	int		mru, mtu;
979 	uint32_t	ppa_id;
980 	hrtime_t	hrtime;
981 	uint16_t	proto;
982 
983 	ASSERT(q != NULL && q->q_ptr != NULL);
984 	ASSERT(mp != NULL && mp->b_rptr != NULL);
985 
986 	sps = (spppstr_t *)q->q_ptr;
987 	ppa = sps->sps_ppa;
988 	iop = (struct iocblk *)mp->b_rptr;
989 	switch (iop->ioc_cmd) {
990 	case DLIOCRAW:
991 		if (IS_SPS_CONTROL(sps)) {
992 			break;		/* return EINVAL */
993 		}
994 		sps->sps_flags |= SPS_RAWDATA;
995 		error = 0;		/* return success */
996 		break;
997 	case DL_IOC_HDR_INFO:
998 		if (IS_SPS_CONTROL(sps)) {
999 			break;		/* return EINVAL */
1000 		} else if ((mp->b_cont == NULL) ||
1001 		    *((t_uscalar_t *)mp->b_cont->b_rptr) != DL_UNITDATA_REQ ||
1002 		    (MBLKL(mp->b_cont) < (sizeof (dl_unitdata_req_t) +
1003 		    SPPP_ADDRL))) {
1004 			error = EPROTO;
1005 			break;
1006 		} else if (ppa == NULL) {
1007 			error = ENOLINK;
1008 			break;
1009 		}
1010 		if ((nmp = allocb(PPP_HDRLEN, BPRI_MED)) == NULL) {
1011 			mutex_enter(&ppa->ppa_sta_lock);
1012 			ppa->ppa_allocbfail++;
1013 			mutex_exit(&ppa->ppa_sta_lock);
1014 			error = ENOMEM;
1015 			break;
1016 		}
1017 		*(uchar_t *)nmp->b_wptr++ = PPP_ALLSTATIONS;
1018 		*(uchar_t *)nmp->b_wptr++ = PPP_UI;
1019 		*(uchar_t *)nmp->b_wptr++ = sps->sps_sap >> 8;
1020 		*(uchar_t *)nmp->b_wptr++ = sps->sps_sap & 0xff;
1021 		ASSERT(MBLKL(nmp) == PPP_HDRLEN);
1022 
1023 		linkb(mp, nmp);
1024 		sps->sps_flags |= SPS_FASTPATH;
1025 		error = 0;		/* return success */
1026 		count = msgsize(nmp);
1027 		break;
1028 	case PPPIO_ATTACH:
1029 		if (IS_SPS_CONTROL(sps) || IS_SPS_PIOATTACH(sps) ||
1030 		    (sps->sps_dlstate != DL_UNATTACHED) ||
1031 		    (iop->ioc_count != sizeof (uint32_t))) {
1032 			break;		/* return EINVAL */
1033 		} else if (mp->b_cont == NULL) {
1034 			error = EPROTO;
1035 			break;
1036 		}
1037 		ASSERT(mp->b_cont->b_rptr != NULL);
1038 		/* If there's something here, it's detached. */
1039 		if (ppa != NULL) {
1040 			sppp_remove_ppa(sps);
1041 		}
1042 		ppa_id = *(uint32_t *)mp->b_cont->b_rptr;
1043 		ppa = sppp_find_ppa(ppa_id);
1044 		/*
1045 		 * If we can't find it, then it's either because the requestor
1046 		 * has supplied a wrong ppa_id to be attached to, or because
1047 		 * the control stream for the specified ppa_id has been closed
1048 		 * before we get here.
1049 		 */
1050 		if (ppa == NULL) {
1051 			error = ENOENT;
1052 			break;
1053 		}
1054 		/*
1055 		 * Preallocate the hangup message so that we're always
1056 		 * able to send this upstream in the event of a
1057 		 * catastrophic failure.
1058 		 */
1059 		if ((sps->sps_hangup = allocb(1, BPRI_MED)) == NULL) {
1060 			error = ENOSR;
1061 			break;
1062 		}
1063 		/*
1064 		 * There are two ways to attach a stream to a ppa: one is
1065 		 * through DLPI (DL_ATTACH_REQ) and the other is through
1066 		 * PPPIO_ATTACH. This is why we need to distinguish whether or
1067 		 * not a stream was allocated via PPPIO_ATTACH, so that we can
1068 		 * properly detach it when we receive PPPIO_DETACH ioctl
1069 		 * request.
1070 		 */
1071 		sps->sps_flags |= SPS_PIOATTACH;
1072 		sps->sps_ppa = ppa;
1073 		/*
1074 		 * Add this stream to the head of the list of sibling streams
1075 		 * which belong to the same ppa as specified.
1076 		 */
1077 		rw_enter(&ppa->ppa_sib_lock, RW_WRITER);
1078 		ppa->ppa_refcnt++;
1079 		sps->sps_nextsib = ppa->ppa_streams;
1080 		ppa->ppa_streams = sps;
1081 		rw_exit(&ppa->ppa_sib_lock);
1082 		error = 0;		/* return success */
1083 		break;
1084 	case PPPIO_BLOCKNP:
1085 	case PPPIO_UNBLOCKNP:
1086 		if (iop->ioc_cr == NULL ||
1087 		    secpolicy_net_config(iop->ioc_cr, B_FALSE) != 0) {
1088 			error = EPERM;
1089 			break;
1090 		}
1091 		error = miocpullup(mp, sizeof (uint16_t));
1092 		if (error != 0)
1093 			break;
1094 		ASSERT(mp->b_cont->b_rptr != NULL);
1095 		proto = *(uint16_t *)mp->b_cont->b_rptr;
1096 		if (iop->ioc_cmd == PPPIO_BLOCKNP) {
1097 			uint32_t npflagpos = sppp_ppp2np(proto);
1098 			/*
1099 			 * Mark proto as blocked in ppa_npflag until the
1100 			 * corresponding queues for proto have been plumbed.
1101 			 */
1102 			if (npflagpos != 0) {
1103 				mutex_enter(&ppa->ppa_npmutex);
1104 				ppa->ppa_npflag |= (1 << npflagpos);
1105 				mutex_exit(&ppa->ppa_npmutex);
1106 			} else {
1107 				error = EINVAL;
1108 			}
1109 		} else {
1110 			/*
1111 			 * reset ppa_npflag and release proto
1112 			 * packets that were being held in control queue.
1113 			 */
1114 			sppp_release_pkts(ppa, proto);
1115 		}
1116 		break;
1117 	case PPPIO_DEBUG:
1118 		if (iop->ioc_cr == NULL ||
1119 		    secpolicy_net_config(iop->ioc_cr, B_FALSE) != 0) {
1120 			error = EPERM;
1121 			break;
1122 		} else if (iop->ioc_count != sizeof (uint32_t)) {
1123 			break;		/* return EINVAL */
1124 		} else if (mp->b_cont == NULL) {
1125 			error = EPROTO;
1126 			break;
1127 		}
1128 		ASSERT(mp->b_cont->b_rptr != NULL);
1129 		dbgcmd = *(uint32_t *)mp->b_cont->b_rptr;
1130 		/*
1131 		 * We accept PPPDBG_LOG + PPPDBG_DRIVER value as an indication
1132 		 * that SPS_KDEBUG needs to be enabled for this upper stream.
1133 		 */
1134 		if (dbgcmd == PPPDBG_LOG + PPPDBG_DRIVER) {
1135 			sps->sps_flags |= SPS_KDEBUG;
1136 			error = 0;	/* return success */
1137 			break;
1138 		}
1139 		/*
1140 		 * Otherwise, for any other values, we send them down only if
1141 		 * there is an attachment and if the attachment has something
1142 		 * linked underneath it.
1143 		 */
1144 		if ((ppa == NULL) || (ppa->ppa_lower_wq == NULL)) {
1145 			error = ENOLINK;
1146 			break;
1147 		}
1148 		mutex_enter(&ppa->ppa_sta_lock);
1149 		/*
1150 		 * See comments in PPPIO_GETSTAT64 case
1151 		 * in sppp_ioctl().
1152 		 */
1153 		if (IS_SPS_IOCQ(sps)) {
1154 			mutex_exit(&ppa->ppa_sta_lock);
1155 			if (!putq(q, mp)) {
1156 				error = EAGAIN;
1157 				break;
1158 			}
1159 			return;
1160 		} else {
1161 			ppa->ppa_ioctlsfwd++;
1162 			/*
1163 			 * Record the ioctl CMD & ID -
1164 			 * this will be used to check the
1165 			 * ACK or NAK responses coming from below.
1166 			 */
1167 			sps->sps_ioc_id = iop->ioc_id;
1168 			sps->sps_flags |= SPS_IOCQ;
1169 			mutex_exit(&ppa->ppa_sta_lock);
1170 		}
1171 		putnext(ppa->ppa_lower_wq, mp);
1172 		return;			/* don't ack or nak the request */
1173 	case PPPIO_DETACH:
1174 		if (!IS_SPS_PIOATTACH(sps)) {
1175 			break;		/* return EINVAL */
1176 		}
1177 		/*
1178 		 * The SPS_PIOATTACH flag set on the stream tells us that
1179 		 * the ppa field is still valid. In the event that the control
1180 		 * stream be closed prior to this stream's detachment, the
1181 		 * SPS_PIOATTACH flag would have been cleared from this stream
1182 		 * during close; in that case we won't get here.
1183 		 */
1184 		ASSERT(ppa != NULL);
1185 		ASSERT(ppa->ppa_ctl != sps);
1186 		ASSERT(sps->sps_dlstate == DL_UNATTACHED);
1187 
1188 		/*
1189 		 * We don't actually detach anything until the stream is
1190 		 * closed or reattached.
1191 		 */
1192 
1193 		sps->sps_flags &= ~SPS_PIOATTACH;
1194 		error = 0;		/* return success */
1195 		break;
1196 	case PPPIO_LASTMOD:
1197 		if (!IS_SPS_CONTROL(sps)) {
1198 			break;		/* return EINVAL */
1199 		}
1200 		ASSERT(ppa != NULL);
1201 		ppa->ppa_flags |= PPA_LASTMOD;
1202 		error = 0;		/* return success */
1203 		break;
1204 	case PPPIO_MRU:
1205 		if (!IS_SPS_CONTROL(sps) ||
1206 		    (iop->ioc_count != sizeof (uint32_t))) {
1207 			break;		/* return EINVAL */
1208 		} else if (mp->b_cont == NULL) {
1209 			error = EPROTO;
1210 			break;
1211 		}
1212 		ASSERT(ppa != NULL);
1213 		ASSERT(mp->b_cont->b_rptr != NULL);
1214 		mru = *(uint32_t *)mp->b_cont->b_rptr;
1215 		if ((mru <= 0) || (mru > PPP_MAXMRU)) {
1216 			error = EPROTO;
1217 			break;
1218 		}
1219 		if (mru < PPP_MRU) {
1220 			mru = PPP_MRU;
1221 		}
1222 		ppa->ppa_mru = (uint16_t)mru;
1223 		/*
1224 		 * If there's something beneath this driver for the ppa, then
1225 		 * inform it (or them) of the MRU size. Only do this is we
1226 		 * are not the last PPP module on the stream.
1227 		 */
1228 		if (!IS_PPA_LASTMOD(ppa) && (ppa->ppa_lower_wq != NULL)) {
1229 			(void) putctl4(ppa->ppa_lower_wq, M_CTL, PPPCTL_MRU,
1230 			    mru);
1231 		}
1232 		error = 0;		/* return success */
1233 		break;
1234 	case PPPIO_MTU:
1235 		if (!IS_SPS_CONTROL(sps) ||
1236 		    (iop->ioc_count != sizeof (uint32_t))) {
1237 			break;		/* return EINVAL */
1238 		} else if (mp->b_cont == NULL) {
1239 			error = EPROTO;
1240 			break;
1241 		}
1242 		ASSERT(ppa != NULL);
1243 		ASSERT(mp->b_cont->b_rptr != NULL);
1244 		mtu = *(uint32_t *)mp->b_cont->b_rptr;
1245 		if ((mtu <= 0) || (mtu > PPP_MAXMTU)) {
1246 			error = EPROTO;
1247 			break;
1248 		}
1249 		ppa->ppa_mtu = (uint16_t)mtu;
1250 		/*
1251 		 * If there's something beneath this driver for the ppa, then
1252 		 * inform it (or them) of the MTU size. Only do this if we
1253 		 * are not the last PPP module on the stream.
1254 		 */
1255 		if (!IS_PPA_LASTMOD(ppa) && (ppa->ppa_lower_wq != NULL)) {
1256 			(void) putctl4(ppa->ppa_lower_wq, M_CTL, PPPCTL_MTU,
1257 			    mtu);
1258 		}
1259 		error = 0;		/* return success */
1260 		break;
1261 	case PPPIO_USETIMESTAMP:
1262 		if (!IS_SPS_CONTROL(sps)) {
1263 			break;		/* return EINVAL */
1264 		}
1265 		if (!IS_PPA_TIMESTAMP(ppa)) {
1266 			hrtime = gethrtime();
1267 			ppa->ppa_lasttx = ppa->ppa_lastrx = hrtime;
1268 			ppa->ppa_flags |= PPA_TIMESTAMP;
1269 		}
1270 		error = 0;
1271 		break;
1272 	}
1273 
1274 	if (error == 0) {
1275 		/* Success; tell the user */
1276 		miocack(q, mp, count, 0);
1277 	} else {
1278 		/* Failure; send error back upstream */
1279 		miocnak(q, mp, 0, error);
1280 	}
1281 }
1282 
1283 /*
1284  * sppp_outer_ioctl()
1285  *
1286  * MT-Perimeters:
1287  *    exclusive inner, exclusive outer
1288  *
1289  * Description:
1290  *    Called by sppp_uwput as a result of receiving ioctls which require
1291  *    an exclusive access at the outer perimeter.
1292  */
1293 static void
1294 sppp_outer_ioctl(queue_t *q, mblk_t *mp)
1295 {
1296 	spppstr_t	*sps;
1297 	spppstr_t	*nextsib;
1298 	queue_t		*lwq;
1299 	sppa_t		*ppa;
1300 	struct iocblk	*iop;
1301 	int		error = EINVAL;
1302 	int		count = 0;
1303 	uint32_t	ppa_id;
1304 	mblk_t		*nmp;
1305 
1306 	ASSERT(q != NULL && q->q_ptr != NULL);
1307 	ASSERT(mp != NULL && mp->b_rptr != NULL);
1308 
1309 	sps = (spppstr_t *)q->q_ptr;
1310 	ppa = sps->sps_ppa;
1311 	iop = (struct iocblk *)mp->b_rptr;
1312 	switch (iop->ioc_cmd) {
1313 	case I_LINK:
1314 		if (!IS_SPS_CONTROL(sps)) {
1315 			break;		/* return EINVAL */
1316 		} else if (ppa->ppa_lower_wq != NULL) {
1317 			error = EEXIST;
1318 			break;
1319 		}
1320 		ASSERT(ppa->ppa_ctl != NULL);
1321 		ASSERT(sps->sps_npmode == NPMODE_PASS);
1322 		ASSERT(mp->b_cont != NULL && mp->b_cont->b_rptr != NULL);
1323 
1324 		lwq = ((struct linkblk *)mp->b_cont->b_rptr)->l_qbot;
1325 		ASSERT(lwq != NULL);
1326 
1327 		ppa->ppa_lower_wq = lwq;
1328 		lwq->q_ptr = RD(lwq)->q_ptr = (caddr_t)ppa;
1329 		/*
1330 		 * Unblock upper network streams which now feed this lower
1331 		 * stream. We don't need to hold ppa_sib_lock here, since we
1332 		 * are writer at the outer perimeter.
1333 		 */
1334 		if (WR(sps->sps_rq)->q_first != NULL)
1335 			qenable(WR(sps->sps_rq));
1336 		for (nextsib = ppa->ppa_streams; nextsib != NULL;
1337 		    nextsib = nextsib->sps_nextsib) {
1338 			nextsib->sps_npmode = NPMODE_PASS;
1339 			if (WR(nextsib->sps_rq)->q_first != NULL) {
1340 				qenable(WR(nextsib->sps_rq));
1341 			}
1342 		}
1343 		/*
1344 		 * Send useful information down to the modules which are now
1345 		 * linked below this driver (for this particular ppa). Only
1346 		 * do this if we are not the last PPP module on the stream.
1347 		 */
1348 		if (!IS_PPA_LASTMOD(ppa)) {
1349 			(void) putctl8(lwq, M_CTL, PPPCTL_UNIT,
1350 			    ppa->ppa_ppa_id);
1351 			(void) putctl4(lwq, M_CTL, PPPCTL_MRU, ppa->ppa_mru);
1352 			(void) putctl4(lwq, M_CTL, PPPCTL_MTU, ppa->ppa_mtu);
1353 		}
1354 
1355 		if (IS_SPS_KDEBUG(sps)) {
1356 			SPDEBUG(PPP_DRV_NAME
1357 			    "/%d: I_LINK lwq=0x%p sps=0x%p flags=0x%b ppa=0x%p "
1358 			    "flags=0x%b\n", sps->sps_mn_id,
1359 			    (void *)ppa->ppa_lower_wq, (void *)sps,
1360 			    sps->sps_flags, SPS_FLAGS_STR,
1361 			    (void *)ppa, ppa->ppa_flags,
1362 			    PPA_FLAGS_STR);
1363 		}
1364 		error = 0;		/* return success */
1365 		break;
1366 	case I_UNLINK:
1367 		ASSERT(IS_SPS_CONTROL(sps));
1368 		ASSERT(ppa != NULL);
1369 		lwq = ppa->ppa_lower_wq;
1370 		ASSERT(mp->b_cont != NULL && mp->b_cont->b_rptr != NULL);
1371 		ASSERT(lwq == ((struct linkblk *)mp->b_cont->b_rptr)->l_qbot);
1372 
1373 		if (IS_SPS_KDEBUG(sps)) {
1374 			SPDEBUG(PPP_DRV_NAME
1375 			    "/%d: I_UNLINK lwq=0x%p sps=0x%p flags=0x%b "
1376 			    "ppa=0x%p flags=0x%b\n", sps->sps_mn_id,
1377 			    (void *)lwq, (void *)sps, sps->sps_flags,
1378 			    SPS_FLAGS_STR, (void *)ppa, ppa->ppa_flags,
1379 			    PPA_FLAGS_STR);
1380 		}
1381 		/*
1382 		 * While accessing the outer perimeter exclusively, we
1383 		 * disassociate our ppa's lower_wq from the lower stream linked
1384 		 * beneath us, and we also disassociate our control stream from
1385 		 * the q_ptr of the lower stream.
1386 		 */
1387 		lwq->q_ptr = RD(lwq)->q_ptr = NULL;
1388 		ppa->ppa_lower_wq = NULL;
1389 		/*
1390 		 * Unblock streams which now feed back up the control stream,
1391 		 * and acknowledge the request. We don't need to hold
1392 		 * ppa_sib_lock here, since we are writer at the outer
1393 		 * perimeter.
1394 		 */
1395 		if (WR(sps->sps_rq)->q_first != NULL)
1396 			qenable(WR(sps->sps_rq));
1397 		for (nextsib = ppa->ppa_streams; nextsib != NULL;
1398 		    nextsib = nextsib->sps_nextsib) {
1399 			if (WR(nextsib->sps_rq)->q_first != NULL) {
1400 				qenable(WR(nextsib->sps_rq));
1401 			}
1402 		}
1403 		error = 0;		/* return success */
1404 		break;
1405 	case PPPIO_NEWPPA:
1406 		/*
1407 		 * Do sanity check to ensure that we don't accept PPPIO_NEWPPA
1408 		 * on a stream which DLPI is used (since certain DLPI messages
1409 		 * will cause state transition reflected in sps_dlstate,
1410 		 * changing it from its default DL_UNATTACHED value). In other
1411 		 * words, we won't allow a network/snoop stream to become
1412 		 * a control stream.
1413 		 */
1414 		if (iop->ioc_cr == NULL ||
1415 		    secpolicy_net_config(iop->ioc_cr, B_FALSE) != 0) {
1416 			error = EPERM;
1417 			break;
1418 		} else if (IS_SPS_CONTROL(sps) || IS_SPS_PIOATTACH(sps) ||
1419 		    (ppa != NULL) || (sps->sps_dlstate != DL_UNATTACHED)) {
1420 			break;		/* return EINVAL */
1421 		}
1422 		/* Get requested unit number (if any) */
1423 		if (iop->ioc_count == sizeof (uint32_t) && mp->b_cont != NULL)
1424 			ppa_id = *(uint32_t *)mp->b_cont->b_rptr;
1425 		else
1426 			ppa_id = 0;
1427 		/* Get mblk to use for response message */
1428 		nmp = allocb(sizeof (uint32_t), BPRI_MED);
1429 		if (nmp == NULL) {
1430 			error = ENOSR;
1431 			break;
1432 		}
1433 		if (mp->b_cont != NULL) {
1434 			freemsg(mp->b_cont);
1435 		}
1436 		mp->b_cont = nmp;		/* chain our response mblk */
1437 		/*
1438 		 * Walk the global ppa list and determine the lowest
1439 		 * available ppa_id number to be used.
1440 		 */
1441 		if (ppa_id == (uint32_t)-1)
1442 			ppa_id = 0;
1443 		for (ppa = ppa_list; ppa != NULL; ppa = ppa->ppa_nextppa) {
1444 			if (ppa_id == (uint32_t)-2) {
1445 				if (ppa->ppa_ctl == NULL)
1446 					break;
1447 			} else {
1448 				if (ppa_id < ppa->ppa_ppa_id)
1449 					break;
1450 				if (ppa_id == ppa->ppa_ppa_id)
1451 					++ppa_id;
1452 			}
1453 		}
1454 		if (ppa_id == (uint32_t)-2) {
1455 			if (ppa == NULL) {
1456 				error = ENXIO;
1457 				break;
1458 			}
1459 			/* Clear timestamp and lastmod flags */
1460 			ppa->ppa_flags = 0;
1461 		} else {
1462 			ppa = sppp_create_ppa(ppa_id);
1463 			if (ppa == NULL) {
1464 				error = ENOMEM;
1465 				break;
1466 			}
1467 		}
1468 
1469 		sps->sps_ppa = ppa;		/* chain the ppa structure */
1470 		sps->sps_npmode = NPMODE_PASS;	/* network packets may travel */
1471 		sps->sps_flags |= SPS_CONTROL;	/* this is the control stream */
1472 
1473 		ppa->ppa_refcnt++;		/* new PPA reference */
1474 		ppa->ppa_ctl = sps;		/* back ptr to upper stream */
1475 		/*
1476 		 * Return the newly created ppa_id to the requestor and
1477 		 * acnowledge the request.
1478 		 */
1479 		*(uint32_t *)nmp->b_wptr = ppa->ppa_ppa_id;
1480 		nmp->b_wptr += sizeof (uint32_t);
1481 
1482 		if (IS_SPS_KDEBUG(sps)) {
1483 			SPDEBUG(PPP_DRV_NAME
1484 			    "/%d: PPPIO_NEWPPA ppa_id=%d sps=0x%p flags=0x%b "
1485 			    "ppa=0x%p flags=0x%b\n", sps->sps_mn_id, ppa_id,
1486 			    (void *)sps, sps->sps_flags, SPS_FLAGS_STR,
1487 			    (void *)ppa, ppa->ppa_flags,
1488 			    PPA_FLAGS_STR);
1489 		}
1490 		count = msgsize(nmp);
1491 		error = 0;
1492 		break;
1493 	}
1494 
1495 	if (error == 0) {
1496 		/* Success; tell the user. */
1497 		miocack(q, mp, count, 0);
1498 	} else {
1499 		/* Failure; send error back upstream. */
1500 		miocnak(q, mp, 0, error);
1501 	}
1502 }
1503 
1504 /*
1505  * sppp_send()
1506  *
1507  * MT-Perimeters:
1508  *    shared inner, shared outer.
1509  *
1510  * Description:
1511  *    Called by sppp_uwput to handle M_DATA message type.  Returns
1512  *    queue_t for putnext, or NULL to mean that the packet was
1513  *    handled internally.
1514  */
1515 static queue_t *
1516 sppp_send(queue_t *q, mblk_t **mpp, spppstr_t *sps)
1517 {
1518 	mblk_t	*mp;
1519 	sppa_t	*ppa;
1520 	int	is_promisc;
1521 	int	msize;
1522 	int	error = 0;
1523 	queue_t	*nextq;
1524 
1525 	ASSERT(mpp != NULL);
1526 	mp = *mpp;
1527 	ASSERT(q != NULL && q->q_ptr != NULL);
1528 	ASSERT(mp != NULL && mp->b_rptr != NULL);
1529 	ASSERT(sps != NULL);
1530 	ASSERT(q->q_ptr == sps);
1531 	/*
1532 	 * We only let M_DATA through if the sender is either the control
1533 	 * stream (for PPP control packets) or one of the network streams
1534 	 * (for IP packets) in IP fastpath mode. If this stream is not attached
1535 	 * to any ppas, then discard data coming down through this stream.
1536 	 */
1537 	ppa = sps->sps_ppa;
1538 	if (ppa == NULL) {
1539 		ASSERT(!IS_SPS_CONTROL(sps));
1540 		error = ENOLINK;
1541 	} else if (!IS_SPS_CONTROL(sps) && !IS_SPS_FASTPATH(sps)) {
1542 		error = EPROTO;
1543 	}
1544 	if (error != 0) {
1545 		merror(q, mp, error);
1546 		return (NULL);
1547 	}
1548 	msize = msgdsize(mp);
1549 	if (msize > (ppa->ppa_mtu + PPP_HDRLEN)) {
1550 		/* Log, and send it anyway */
1551 		mutex_enter(&ppa->ppa_sta_lock);
1552 		ppa->ppa_otoolongs++;
1553 		mutex_exit(&ppa->ppa_sta_lock);
1554 	} else if (msize < PPP_HDRLEN) {
1555 		/*
1556 		 * Log, and send it anyway. We log it because we get things
1557 		 * in M_DATA form here, which tells us that the sender is
1558 		 * either IP in fastpath transmission mode, or pppd. In both
1559 		 * cases, they are currently expected to send the 4-bytes
1560 		 * PPP header in front of any possible payloads.
1561 		 */
1562 		mutex_enter(&ppa->ppa_sta_lock);
1563 		ppa->ppa_orunts++;
1564 		mutex_exit(&ppa->ppa_sta_lock);
1565 	}
1566 
1567 	if (IS_SPS_KDEBUG(sps)) {
1568 		SPDEBUG(PPP_DRV_NAME
1569 		    "/%d: M_DATA send (%d bytes) sps=0x%p flags=0x%b "
1570 		    "ppa=0x%p flags=0x%b\n", sps->sps_mn_id, msize,
1571 		    (void *)sps, sps->sps_flags, SPS_FLAGS_STR,
1572 		    (void *)ppa, ppa->ppa_flags, PPA_FLAGS_STR);
1573 	}
1574 	/*
1575 	 * Should there be any promiscuous stream(s), send the data up
1576 	 * for each promiscuous stream that we recognize. Make sure that
1577 	 * for fastpath, we skip the PPP header in the M_DATA mblk. We skip
1578 	 * the control stream as we obviously never allow the control stream
1579 	 * to become promiscous and bind to PPP_ALLSAP.
1580 	 */
1581 	rw_enter(&ppa->ppa_sib_lock, RW_READER);
1582 	is_promisc = sps->sps_ppa->ppa_promicnt;
1583 	if (is_promisc) {
1584 		ASSERT(ppa->ppa_streams != NULL);
1585 		sppp_dlprsendup(ppa->ppa_streams, mp, sps->sps_sap, B_TRUE);
1586 	}
1587 	rw_exit(&ppa->ppa_sib_lock);
1588 	/*
1589 	 * Only time-stamp the packet with hrtime if the upper stream
1590 	 * is configured to do so.  PPP control (negotiation) messages
1591 	 * are never considered link activity; only data is activity.
1592 	 */
1593 	if (!IS_SPS_CONTROL(sps) && IS_PPA_TIMESTAMP(ppa)) {
1594 		ppa->ppa_lasttx = gethrtime();
1595 	}
1596 	/*
1597 	 * If there's already a message in the write-side service queue,
1598 	 * then queue this message there as well, otherwise, try to send
1599 	 * it down to the module immediately below us.
1600 	 */
1601 	if (q->q_first != NULL ||
1602 	    (nextq = sppp_outpkt(q, mpp, msize, sps)) == NULL) {
1603 		mp = *mpp;
1604 		if (mp != NULL && putq(q, mp) == 0) {
1605 			mutex_enter(&ppa->ppa_sta_lock);
1606 			ppa->ppa_oqdropped++;
1607 			mutex_exit(&ppa->ppa_sta_lock);
1608 			freemsg(mp);
1609 		}
1610 		return (NULL);
1611 	}
1612 	return (nextq);
1613 }
1614 
1615 /*
1616  * sppp_outpkt()
1617  *
1618  * MT-Perimeters:
1619  *    shared inner, shared outer (if called from sppp_wput, sppp_dlunitdatareq).
1620  *    exclusive inner, shared outer (if called from sppp_wsrv).
1621  *
1622  * Description:
1623  *    Called from 1) sppp_uwput when processing a M_DATA fastpath message,
1624  *    or 2) sppp_uwsrv when processing the upper write-side service queue.
1625  *    For both cases, it prepares to send the data to the module below
1626  *    this driver if there is a lower stream linked underneath. If none, then
1627  *    the data will be sent upstream via the control channel to pppd.
1628  *
1629  * Returns:
1630  *	Non-NULL queue_t if message should be sent now, otherwise
1631  *	if *mpp == NULL, then message was freed, otherwise put *mpp
1632  *	(back) on the queue.  (Does not do putq/putbq, since it's
1633  *	called both from srv and put procedures.)
1634  */
1635 static queue_t *
1636 sppp_outpkt(queue_t *q, mblk_t **mpp, int msize, spppstr_t *sps)
1637 {
1638 	mblk_t		*mp;
1639 	sppa_t		*ppa;
1640 	enum NPmode	npmode;
1641 	mblk_t		*mpnew;
1642 
1643 	ASSERT(mpp != NULL);
1644 	mp = *mpp;
1645 	ASSERT(q != NULL && q->q_ptr != NULL);
1646 	ASSERT(mp != NULL && mp->b_rptr != NULL);
1647 	ASSERT(sps != NULL);
1648 
1649 	ppa = sps->sps_ppa;
1650 	npmode = sps->sps_npmode;
1651 
1652 	if (npmode == NPMODE_QUEUE) {
1653 		ASSERT(!IS_SPS_CONTROL(sps));
1654 		return (NULL);	/* queue it for later */
1655 	} else if (ppa == NULL || ppa->ppa_ctl == NULL ||
1656 	    npmode == NPMODE_DROP || npmode == NPMODE_ERROR) {
1657 		/*
1658 		 * This can not be the control stream, as it must always have
1659 		 * a valid ppa, and its npmode must always be NPMODE_PASS.
1660 		 */
1661 		ASSERT(!IS_SPS_CONTROL(sps));
1662 		if (npmode == NPMODE_DROP) {
1663 			freemsg(mp);
1664 		} else {
1665 			/*
1666 			 * If we no longer have the control stream, or if the
1667 			 * mode is set to NPMODE_ERROR, then we need to tell IP
1668 			 * that the interface need to be marked as down. In
1669 			 * other words, we tell IP to be quiescent.
1670 			 */
1671 			merror(q, mp, EPROTO);
1672 		}
1673 		*mpp = NULL;
1674 		return (NULL);	/* don't queue it */
1675 	}
1676 	/*
1677 	 * Do we have a driver stream linked underneath ? If not, we need to
1678 	 * notify pppd that the link needs to be brought up and configure
1679 	 * this upper stream to drop subsequent outgoing packets. This is
1680 	 * for demand-dialing, in which case pppd has done the IP plumbing
1681 	 * but hasn't linked the driver stream underneath us. Therefore, when
1682 	 * a packet is sent down the IP interface, a notification message
1683 	 * will be sent up the control stream to pppd in order for it to
1684 	 * establish the physical link. The driver stream is then expected
1685 	 * to be linked underneath after physical link establishment is done.
1686 	 */
1687 	if (ppa->ppa_lower_wq == NULL) {
1688 		ASSERT(ppa->ppa_ctl != NULL);
1689 		ASSERT(ppa->ppa_ctl->sps_rq != NULL);
1690 
1691 		*mpp = NULL;
1692 		mpnew = create_lsmsg(PPP_LINKSTAT_NEEDUP);
1693 		if (mpnew == NULL) {
1694 			freemsg(mp);
1695 			mutex_enter(&ppa->ppa_sta_lock);
1696 			ppa->ppa_allocbfail++;
1697 			mutex_exit(&ppa->ppa_sta_lock);
1698 			return (NULL);	/* don't queue it */
1699 		}
1700 		/* Include the data in the message for logging. */
1701 		mpnew->b_cont = mp;
1702 		mutex_enter(&ppa->ppa_sta_lock);
1703 		ppa->ppa_lsneedup++;
1704 		mutex_exit(&ppa->ppa_sta_lock);
1705 		/*
1706 		 * We need to set the mode to NPMODE_DROP, but should only
1707 		 * do so when this stream is not the control stream.
1708 		 */
1709 		if (!IS_SPS_CONTROL(sps)) {
1710 			sps->sps_npmode = NPMODE_DROP;
1711 		}
1712 		putnext(ppa->ppa_ctl->sps_rq, mpnew);
1713 		return (NULL);	/* don't queue it */
1714 	}
1715 	/*
1716 	 * If so, then try to send it down. The lower queue is only ever
1717 	 * detached while holding an exclusive lock on the whole driver,
1718 	 * so we can be confident that the lower queue is still there.
1719 	 */
1720 	if (bcanputnext(ppa->ppa_lower_wq, mp->b_band)) {
1721 		mutex_enter(&ppa->ppa_sta_lock);
1722 		ppa->ppa_stats.p.ppp_opackets++;
1723 		if (IS_SPS_CONTROL(sps)) {
1724 			ppa->ppa_opkt_ctl++;
1725 		}
1726 		ppa->ppa_stats.p.ppp_obytes += msize;
1727 		mutex_exit(&ppa->ppa_sta_lock);
1728 		return (ppa->ppa_lower_wq);	/* don't queue it */
1729 	}
1730 	return (NULL);	/* queue it for later */
1731 }
1732 
1733 /*
1734  * sppp_lwsrv()
1735  *
1736  * MT-Perimeters:
1737  *    exclusive inner, shared outer.
1738  *
1739  * Description:
1740  *    Lower write-side service procedure. No messages are ever placed on
1741  *    the write queue here, this just back-enables all upper write side
1742  *    service procedures.
1743  */
1744 void
1745 sppp_lwsrv(queue_t *q)
1746 {
1747 	sppa_t		*ppa;
1748 	spppstr_t	*nextsib;
1749 
1750 	ASSERT(q != NULL && q->q_ptr != NULL);
1751 	ppa = (sppa_t *)q->q_ptr;
1752 	ASSERT(ppa != NULL);
1753 
1754 	rw_enter(&ppa->ppa_sib_lock, RW_READER);
1755 	if ((nextsib = ppa->ppa_ctl) != NULL &&
1756 	    WR(nextsib->sps_rq)->q_first != NULL)
1757 		qenable(WR(nextsib->sps_rq));
1758 	for (nextsib = ppa->ppa_streams; nextsib != NULL;
1759 	    nextsib = nextsib->sps_nextsib) {
1760 		if (WR(nextsib->sps_rq)->q_first != NULL) {
1761 			qenable(WR(nextsib->sps_rq));
1762 		}
1763 	}
1764 	rw_exit(&ppa->ppa_sib_lock);
1765 }
1766 
1767 /*
1768  * sppp_lrput()
1769  *
1770  * MT-Perimeters:
1771  *    shared inner, shared outer.
1772  *
1773  * Description:
1774  *    Lower read-side put procedure. Messages from below get here.
1775  *    Data messages are handled separately to limit stack usage
1776  *    going into IP.
1777  *
1778  *    Note that during I_UNLINK processing, it's possible for a downstream
1779  *    message to enable upstream data (due to pass_wput() removing the
1780  *    SQ_BLOCKED flag), and thus we must protect against a NULL sppa pointer.
1781  *    In this case, the only thing above us is passthru, and we might as well
1782  *    discard.
1783  */
1784 void
1785 sppp_lrput(queue_t *q, mblk_t *mp)
1786 {
1787 	sppa_t		*ppa;
1788 	spppstr_t	*sps;
1789 
1790 	if ((ppa = q->q_ptr) == NULL) {
1791 		freemsg(mp);
1792 		return;
1793 	}
1794 
1795 	sps = ppa->ppa_ctl;
1796 
1797 	if (MTYPE(mp) != M_DATA) {
1798 		sppp_recv_nondata(q, mp, sps);
1799 	} else if (sps == NULL) {
1800 		freemsg(mp);
1801 	} else if ((q = sppp_recv(q, &mp, sps)) != NULL) {
1802 		putnext(q, mp);
1803 	}
1804 }
1805 
1806 /*
1807  * sppp_recv_nondata()
1808  *
1809  * MT-Perimeters:
1810  *    shared inner, shared outer.
1811  *
1812  * Description:
1813  *    All received non-data messages come through here.
1814  */
1815 static void
1816 sppp_recv_nondata(queue_t *q, mblk_t *mp, spppstr_t *ctlsps)
1817 {
1818 	sppa_t		*ppa;
1819 	spppstr_t	*destsps;
1820 	struct iocblk	*iop;
1821 
1822 	ppa = (sppa_t *)q->q_ptr;
1823 	ctlsps = ppa->ppa_ctl;
1824 
1825 	switch (MTYPE(mp)) {
1826 	case M_CTL:
1827 		mutex_enter(&ppa->ppa_sta_lock);
1828 		if (*mp->b_rptr == PPPCTL_IERROR) {
1829 			ppa->ppa_stats.p.ppp_ierrors++;
1830 			ppa->ppa_ierr_low++;
1831 			ppa->ppa_mctlsknown++;
1832 		} else if (*mp->b_rptr == PPPCTL_OERROR) {
1833 			ppa->ppa_stats.p.ppp_oerrors++;
1834 			ppa->ppa_oerr_low++;
1835 			ppa->ppa_mctlsknown++;
1836 		} else {
1837 			ppa->ppa_mctlsunknown++;
1838 		}
1839 		mutex_exit(&ppa->ppa_sta_lock);
1840 		freemsg(mp);
1841 		break;
1842 	case M_IOCTL:
1843 		miocnak(q, mp, 0, EINVAL);
1844 		break;
1845 	case M_IOCACK:
1846 	case M_IOCNAK:
1847 		iop = (struct iocblk *)mp->b_rptr;
1848 		ASSERT(iop != NULL);
1849 		/*
1850 		 * Attempt to match up the response with the stream that the
1851 		 * request came from. If ioc_id doesn't match the one that we
1852 		 * recorded, then discard this message.
1853 		 */
1854 		rw_enter(&ppa->ppa_sib_lock, RW_READER);
1855 		if ((destsps = ctlsps) == NULL ||
1856 		    destsps->sps_ioc_id != iop->ioc_id) {
1857 			destsps = ppa->ppa_streams;
1858 			while (destsps != NULL) {
1859 				if (destsps->sps_ioc_id == iop->ioc_id) {
1860 					break;	/* found the upper stream */
1861 				}
1862 				destsps = destsps->sps_nextsib;
1863 			}
1864 		}
1865 		rw_exit(&ppa->ppa_sib_lock);
1866 		if (destsps == NULL) {
1867 			mutex_enter(&ppa->ppa_sta_lock);
1868 			ppa->ppa_ioctlsfwderr++;
1869 			mutex_exit(&ppa->ppa_sta_lock);
1870 			freemsg(mp);
1871 			break;
1872 		}
1873 		mutex_enter(&ppa->ppa_sta_lock);
1874 		ppa->ppa_ioctlsfwdok++;
1875 
1876 		/*
1877 		 * Clear SPS_IOCQ and enable the lower write side queue,
1878 		 * this would allow the upper stream service routine
1879 		 * to start processing the queue for pending messages.
1880 		 * sppp_lwsrv -> sppp_uwsrv.
1881 		 */
1882 		destsps->sps_flags &= ~SPS_IOCQ;
1883 		mutex_exit(&ppa->ppa_sta_lock);
1884 		qenable(WR(destsps->sps_rq));
1885 
1886 		putnext(destsps->sps_rq, mp);
1887 		break;
1888 	case M_HANGUP:
1889 		/*
1890 		 * Free the original mblk_t. We don't really want to send
1891 		 * a M_HANGUP message upstream, so we need to translate this
1892 		 * message into something else.
1893 		 */
1894 		freemsg(mp);
1895 		if (ctlsps == NULL)
1896 			break;
1897 		mp = create_lsmsg(PPP_LINKSTAT_HANGUP);
1898 		if (mp == NULL) {
1899 			mutex_enter(&ppa->ppa_sta_lock);
1900 			ppa->ppa_allocbfail++;
1901 			mutex_exit(&ppa->ppa_sta_lock);
1902 			break;
1903 		}
1904 		mutex_enter(&ppa->ppa_sta_lock);
1905 		ppa->ppa_lsdown++;
1906 		mutex_exit(&ppa->ppa_sta_lock);
1907 		putnext(ctlsps->sps_rq, mp);
1908 		break;
1909 	case M_FLUSH:
1910 		if (*mp->b_rptr & FLUSHR) {
1911 			flushq(q, FLUSHDATA);
1912 		}
1913 		if (*mp->b_rptr & FLUSHW) {
1914 			*mp->b_rptr &= ~FLUSHR;
1915 			qreply(q, mp);
1916 		} else {
1917 			freemsg(mp);
1918 		}
1919 		break;
1920 	default:
1921 		if (ctlsps != NULL &&
1922 		    (queclass(mp) == QPCTL) || canputnext(ctlsps->sps_rq)) {
1923 			putnext(ctlsps->sps_rq, mp);
1924 		} else {
1925 			mutex_enter(&ppa->ppa_sta_lock);
1926 			ppa->ppa_iqdropped++;
1927 			mutex_exit(&ppa->ppa_sta_lock);
1928 			freemsg(mp);
1929 		}
1930 		break;
1931 	}
1932 }
1933 
1934 /*
1935  * sppp_recv()
1936  *
1937  * MT-Perimeters:
1938  *    shared inner, shared outer.
1939  *
1940  * Description:
1941  *    Receive function called by sppp_lrput.  Finds appropriate
1942  *    receive stream and does accounting.
1943  */
1944 static queue_t *
1945 sppp_recv(queue_t *q, mblk_t **mpp, spppstr_t *ctlsps)
1946 {
1947 	mblk_t		*mp;
1948 	int		len;
1949 	sppa_t		*ppa;
1950 	spppstr_t	*destsps;
1951 	mblk_t		*zmp;
1952 	uint32_t	npflagpos;
1953 
1954 	ASSERT(mpp != NULL);
1955 	mp = *mpp;
1956 	ASSERT(q != NULL && q->q_ptr != NULL);
1957 	ASSERT(mp != NULL && mp->b_rptr != NULL);
1958 	ASSERT(ctlsps != NULL);
1959 	ASSERT(IS_SPS_CONTROL(ctlsps));
1960 	ppa = ctlsps->sps_ppa;
1961 	ASSERT(ppa != NULL && ppa->ppa_ctl != NULL);
1962 
1963 	len = msgdsize(mp);
1964 	mutex_enter(&ppa->ppa_sta_lock);
1965 	ppa->ppa_stats.p.ppp_ibytes += len;
1966 	mutex_exit(&ppa->ppa_sta_lock);
1967 	/*
1968 	 * If the entire data size of the mblk is less than the length of the
1969 	 * PPP header, then free it. We can't do much with such message anyway,
1970 	 * since we can't really determine what the PPP protocol type is.
1971 	 */
1972 	if (len < PPP_HDRLEN) {
1973 		/* Log, and free it */
1974 		mutex_enter(&ppa->ppa_sta_lock);
1975 		ppa->ppa_irunts++;
1976 		mutex_exit(&ppa->ppa_sta_lock);
1977 		freemsg(mp);
1978 		return (NULL);
1979 	} else if (len > (ppa->ppa_mru + PPP_HDRLEN)) {
1980 		/* Log, and accept it anyway */
1981 		mutex_enter(&ppa->ppa_sta_lock);
1982 		ppa->ppa_itoolongs++;
1983 		mutex_exit(&ppa->ppa_sta_lock);
1984 	}
1985 	/*
1986 	 * We need at least be able to read the PPP protocol from the header,
1987 	 * so if the first message block is too small, then we concatenate the
1988 	 * rest of the following blocks into one message.
1989 	 */
1990 	if (MBLKL(mp) < PPP_HDRLEN) {
1991 		zmp = msgpullup(mp, PPP_HDRLEN);
1992 		freemsg(mp);
1993 		mp = zmp;
1994 		if (mp == NULL) {
1995 			mutex_enter(&ppa->ppa_sta_lock);
1996 			ppa->ppa_allocbfail++;
1997 			mutex_exit(&ppa->ppa_sta_lock);
1998 			return (NULL);
1999 		}
2000 		*mpp = mp;
2001 	}
2002 	/*
2003 	 * Hold this packet in the control-queue until
2004 	 * the matching network-layer upper stream for the PPP protocol (sap)
2005 	 * has not been plumbed and configured
2006 	 */
2007 	npflagpos = sppp_ppp2np(PPP_PROTOCOL(mp->b_rptr));
2008 	mutex_enter(&ppa->ppa_npmutex);
2009 	if (npflagpos != 0 && (ppa->ppa_npflag & (1 << npflagpos))) {
2010 		/*
2011 		 * proto is currently blocked; Hold up to 4 packets
2012 		 * in the kernel.
2013 		 */
2014 		if (ppa->ppa_holdpkts[npflagpos] > 3 ||
2015 		    putq(ctlsps->sps_rq, mp) == 0)
2016 			freemsg(mp);
2017 		else
2018 			ppa->ppa_holdpkts[npflagpos]++;
2019 		mutex_exit(&ppa->ppa_npmutex);
2020 		return (NULL);
2021 	}
2022 	mutex_exit(&ppa->ppa_npmutex);
2023 	/*
2024 	 * Try to find a matching network-layer upper stream for the specified
2025 	 * PPP protocol (sap), and if none is found, send this frame up the
2026 	 * control stream.
2027 	 */
2028 	destsps = sppp_inpkt(q, mp, ctlsps);
2029 	if (destsps == NULL) {
2030 		mutex_enter(&ppa->ppa_sta_lock);
2031 		ppa->ppa_ipkt_ctl++;
2032 		mutex_exit(&ppa->ppa_sta_lock);
2033 		if (canputnext(ctlsps->sps_rq)) {
2034 			if (IS_SPS_KDEBUG(ctlsps)) {
2035 				SPDEBUG(PPP_DRV_NAME
2036 				    "/%d: M_DATA recv (%d bytes) sps=0x%p "
2037 				    "flags=0x%b ppa=0x%p flags=0x%b\n",
2038 				    ctlsps->sps_mn_id, len, (void *)ctlsps,
2039 				    ctlsps->sps_flags, SPS_FLAGS_STR,
2040 				    (void *)ppa, ppa->ppa_flags,
2041 				    PPA_FLAGS_STR);
2042 			}
2043 			return (ctlsps->sps_rq);
2044 		} else {
2045 			mutex_enter(&ppa->ppa_sta_lock);
2046 			ppa->ppa_iqdropped++;
2047 			mutex_exit(&ppa->ppa_sta_lock);
2048 			freemsg(mp);
2049 			return (NULL);
2050 		}
2051 	}
2052 	if (canputnext(destsps->sps_rq)) {
2053 		if (IS_SPS_KDEBUG(destsps)) {
2054 			SPDEBUG(PPP_DRV_NAME
2055 			    "/%d: M_DATA recv (%d bytes) sps=0x%p flags=0x%b "
2056 			    "ppa=0x%p flags=0x%b\n", destsps->sps_mn_id, len,
2057 			    (void *)destsps, destsps->sps_flags,
2058 			    SPS_FLAGS_STR, (void *)ppa, ppa->ppa_flags,
2059 			    PPA_FLAGS_STR);
2060 		}
2061 		/*
2062 		 * If fastpath is enabled on the network-layer stream, then
2063 		 * make sure we skip over the PPP header, otherwise, we wrap
2064 		 * the message in a DLPI message.
2065 		 */
2066 		if (IS_SPS_FASTPATH(destsps)) {
2067 			mp->b_rptr += PPP_HDRLEN;
2068 			return (destsps->sps_rq);
2069 		} else {
2070 			spppstr_t *uqs = (spppstr_t *)destsps->sps_rq->q_ptr;
2071 			ASSERT(uqs != NULL);
2072 			mp->b_rptr += PPP_HDRLEN;
2073 			mp = sppp_dladdud(uqs, mp, uqs->sps_sap, B_FALSE);
2074 			if (mp != NULL) {
2075 				*mpp = mp;
2076 				return (destsps->sps_rq);
2077 			} else {
2078 				mutex_enter(&ppa->ppa_sta_lock);
2079 				ppa->ppa_allocbfail++;
2080 				mutex_exit(&ppa->ppa_sta_lock);
2081 				/* mp already freed by sppp_dladdud */
2082 				return (NULL);
2083 			}
2084 		}
2085 	} else {
2086 		mutex_enter(&ppa->ppa_sta_lock);
2087 		ppa->ppa_iqdropped++;
2088 		mutex_exit(&ppa->ppa_sta_lock);
2089 		freemsg(mp);
2090 		return (NULL);
2091 	}
2092 }
2093 
2094 /*
2095  * sppp_inpkt()
2096  *
2097  * MT-Perimeters:
2098  *    shared inner, shared outer.
2099  *
2100  * Description:
2101  *    Find the destination upper stream for the received packet, called
2102  *    from sppp_recv.
2103  *
2104  * Returns:
2105  *    ptr to destination upper network stream, or NULL for control stream.
2106  */
2107 /* ARGSUSED */
2108 static spppstr_t *
2109 sppp_inpkt(queue_t *q, mblk_t *mp, spppstr_t *ctlsps)
2110 {
2111 	spppstr_t	*destsps = NULL;
2112 	sppa_t		*ppa;
2113 	uint16_t	proto;
2114 	int		is_promisc;
2115 
2116 	ASSERT(q != NULL && q->q_ptr != NULL);
2117 	ASSERT(mp != NULL && mp->b_rptr != NULL);
2118 	ASSERT(IS_SPS_CONTROL(ctlsps));
2119 	ppa = ctlsps->sps_ppa;
2120 	ASSERT(ppa != NULL);
2121 	/*
2122 	 * From RFC 1661 (Section 2):
2123 	 *
2124 	 * The Protocol field is one or two octets, and its value identifies
2125 	 * the datagram encapsulated in the Information field of the packet.
2126 	 * The field is transmitted and received most significant octet first.
2127 	 *
2128 	 * The structure of this field is consistent with the ISO 3309
2129 	 * extension mechanism for address fields.  All Protocols MUST be odd;
2130 	 * the least significant bit of the least significant octet MUST equal
2131 	 * "1".  Also, all Protocols MUST be assigned such that the least
2132 	 * significant bit of the most significant octet equals "0". Frames
2133 	 * received which don't comply with these rules MUST be treated as
2134 	 * having an unrecognized Protocol.
2135 	 *
2136 	 * Protocol field values in the "0***" to "3***" range identify the
2137 	 * network-layer protocol of specific packets, and values in the
2138 	 * "8***" to "b***" range identify packets belonging to the associated
2139 	 * Network Control Protocols (NCPs), if any.
2140 	 *
2141 	 * Protocol field values in the "4***" to "7***" range are used for
2142 	 * protocols with low volume traffic which have no associated NCP.
2143 	 * Protocol field values in the "c***" to "f***" range identify packets
2144 	 * as link-layer Control Protocols (such as LCP).
2145 	 */
2146 	proto = PPP_PROTOCOL(mp->b_rptr);
2147 	mutex_enter(&ppa->ppa_sta_lock);
2148 	ppa->ppa_stats.p.ppp_ipackets++;
2149 	mutex_exit(&ppa->ppa_sta_lock);
2150 	/*
2151 	 * We check if this is not a network-layer protocol, and if so,
2152 	 * then send this packet up the control stream.
2153 	 */
2154 	if (proto > 0x7fff) {
2155 		goto inpkt_done;	/* send it up the control stream */
2156 	}
2157 	/*
2158 	 * Try to grab the destination upper stream from the network-layer
2159 	 * stream cache for this ppa for PPP_IP (0x0021) or PPP_IPV6 (0x0057)
2160 	 * protocol types. Otherwise, if the type is not known to the cache,
2161 	 * or if its sap can't be matched with any of the upper streams, then
2162 	 * send this packet up the control stream so that it can be rejected.
2163 	 */
2164 	if (proto == PPP_IP) {
2165 		destsps = ppa->ppa_ip_cache;
2166 	} else if (proto == PPP_IPV6) {
2167 		destsps = ppa->ppa_ip6_cache;
2168 	}
2169 	/*
2170 	 * Toss this one away up the control stream if there's no matching sap;
2171 	 * this way the protocol can be rejected (destsps is NULL).
2172 	 */
2173 
2174 inpkt_done:
2175 	/*
2176 	 * Only time-stamp the packet with hrtime if the upper stream
2177 	 * is configured to do so.  PPP control (negotiation) messages
2178 	 * are never considered link activity; only data is activity.
2179 	 */
2180 	if (destsps != NULL && IS_PPA_TIMESTAMP(ppa)) {
2181 		ppa->ppa_lastrx = gethrtime();
2182 	}
2183 	/*
2184 	 * Should there be any promiscuous stream(s), send the data up for
2185 	 * each promiscuous stream that we recognize. We skip the control
2186 	 * stream as we obviously never allow the control stream to become
2187 	 * promiscous and bind to PPP_ALLSAP.
2188 	 */
2189 	rw_enter(&ppa->ppa_sib_lock, RW_READER);
2190 	is_promisc = ppa->ppa_promicnt;
2191 	if (is_promisc) {
2192 		ASSERT(ppa->ppa_streams != NULL);
2193 		sppp_dlprsendup(ppa->ppa_streams, mp, proto, B_TRUE);
2194 	}
2195 	rw_exit(&ppa->ppa_sib_lock);
2196 	return (destsps);
2197 }
2198 
2199 /*
2200  * sppp_kstat_update()
2201  *
2202  * Description:
2203  *    Update per-ppa kstat interface statistics.
2204  */
2205 static int
2206 sppp_kstat_update(kstat_t *ksp, int rw)
2207 {
2208 	register sppa_t		*ppa;
2209 	register sppp_kstats_t	*pppkp;
2210 	register struct pppstat64 *sp;
2211 
2212 	if (rw == KSTAT_WRITE) {
2213 		return (EACCES);
2214 	}
2215 
2216 	ppa = (sppa_t *)ksp->ks_private;
2217 	ASSERT(ppa != NULL);
2218 
2219 	pppkp = (sppp_kstats_t *)ksp->ks_data;
2220 	sp = &ppa->ppa_stats.p;
2221 
2222 	mutex_enter(&ppa->ppa_sta_lock);
2223 	pppkp->allocbfail.value.ui32	= ppa->ppa_allocbfail;
2224 	pppkp->mctlsfwd.value.ui32	= ppa->ppa_mctlsfwd;
2225 	pppkp->mctlsfwderr.value.ui32	= ppa->ppa_mctlsfwderr;
2226 	pppkp->rbytes.value.ui32	= sp->ppp_ibytes;
2227 	pppkp->rbytes64.value.ui64	= sp->ppp_ibytes;
2228 	pppkp->ierrors.value.ui32	= sp->ppp_ierrors;
2229 	pppkp->ierrors_lower.value.ui32	= ppa->ppa_ierr_low;
2230 	pppkp->ioctlsfwd.value.ui32	= ppa->ppa_ioctlsfwd;
2231 	pppkp->ioctlsfwdok.value.ui32	= ppa->ppa_ioctlsfwdok;
2232 	pppkp->ioctlsfwderr.value.ui32	= ppa->ppa_ioctlsfwderr;
2233 	pppkp->ipackets.value.ui32	= sp->ppp_ipackets;
2234 	pppkp->ipackets64.value.ui64	= sp->ppp_ipackets;
2235 	pppkp->ipackets_ctl.value.ui32	= ppa->ppa_ipkt_ctl;
2236 	pppkp->iqdropped.value.ui32	= ppa->ppa_iqdropped;
2237 	pppkp->irunts.value.ui32	= ppa->ppa_irunts;
2238 	pppkp->itoolongs.value.ui32	= ppa->ppa_itoolongs;
2239 	pppkp->lsneedup.value.ui32	= ppa->ppa_lsneedup;
2240 	pppkp->lsdown.value.ui32	= ppa->ppa_lsdown;
2241 	pppkp->mctlsknown.value.ui32	= ppa->ppa_mctlsknown;
2242 	pppkp->mctlsunknown.value.ui32	= ppa->ppa_mctlsunknown;
2243 	pppkp->obytes.value.ui32	= sp->ppp_obytes;
2244 	pppkp->obytes64.value.ui64	= sp->ppp_obytes;
2245 	pppkp->oerrors.value.ui32	= sp->ppp_oerrors;
2246 	pppkp->oerrors_lower.value.ui32	= ppa->ppa_oerr_low;
2247 	pppkp->opackets.value.ui32	= sp->ppp_opackets;
2248 	pppkp->opackets64.value.ui64	= sp->ppp_opackets;
2249 	pppkp->opackets_ctl.value.ui32	= ppa->ppa_opkt_ctl;
2250 	pppkp->oqdropped.value.ui32	= ppa->ppa_oqdropped;
2251 	pppkp->otoolongs.value.ui32	= ppa->ppa_otoolongs;
2252 	pppkp->orunts.value.ui32	= ppa->ppa_orunts;
2253 	mutex_exit(&ppa->ppa_sta_lock);
2254 
2255 	return (0);
2256 }
2257 
2258 /*
2259  * Turn off proto in ppa_npflag to indicate that
2260  * the corresponding network protocol has been plumbed.
2261  * Release proto packets that were being held in the control
2262  * queue in anticipation of this event.
2263  */
2264 static void
2265 sppp_release_pkts(sppa_t *ppa, uint16_t proto)
2266 {
2267 	uint32_t npflagpos = sppp_ppp2np(proto);
2268 	int count;
2269 	mblk_t *mp;
2270 	uint16_t mp_proto;
2271 	queue_t *q;
2272 	spppstr_t *destsps;
2273 
2274 	ASSERT(ppa != NULL);
2275 
2276 	if (npflagpos == 0 || (ppa->ppa_npflag & (1 << npflagpos)) == 0)
2277 		return;
2278 
2279 	mutex_enter(&ppa->ppa_npmutex);
2280 	ppa->ppa_npflag &= ~(1 << npflagpos);
2281 	count = ppa->ppa_holdpkts[npflagpos];
2282 	ppa->ppa_holdpkts[npflagpos] = 0;
2283 	mutex_exit(&ppa->ppa_npmutex);
2284 
2285 	q = ppa->ppa_ctl->sps_rq;
2286 
2287 	while (count > 0) {
2288 		mp = getq(q);
2289 		ASSERT(mp != NULL);
2290 
2291 		mp_proto = PPP_PROTOCOL(mp->b_rptr);
2292 		if (mp_proto !=  proto) {
2293 			(void) putq(q, mp);
2294 			continue;
2295 		}
2296 		count--;
2297 		destsps = NULL;
2298 		if (mp_proto == PPP_IP) {
2299 			destsps = ppa->ppa_ip_cache;
2300 		} else if (mp_proto == PPP_IPV6) {
2301 			destsps = ppa->ppa_ip6_cache;
2302 		}
2303 		ASSERT(destsps != NULL);
2304 
2305 		if (IS_SPS_FASTPATH(destsps)) {
2306 			mp->b_rptr += PPP_HDRLEN;
2307 		} else {
2308 			spppstr_t *uqs = (spppstr_t *)destsps->sps_rq->q_ptr;
2309 			ASSERT(uqs != NULL);
2310 			mp->b_rptr += PPP_HDRLEN;
2311 			mp = sppp_dladdud(uqs, mp, uqs->sps_sap, B_FALSE);
2312 			if (mp == NULL) {
2313 				mutex_enter(&ppa->ppa_sta_lock);
2314 				ppa->ppa_allocbfail++;
2315 				mutex_exit(&ppa->ppa_sta_lock);
2316 				/* mp already freed by sppp_dladdud */
2317 				continue;
2318 			}
2319 		}
2320 
2321 		if (canputnext(destsps->sps_rq)) {
2322 			putnext(destsps->sps_rq, mp);
2323 		} else {
2324 			mutex_enter(&ppa->ppa_sta_lock);
2325 			ppa->ppa_iqdropped++;
2326 			mutex_exit(&ppa->ppa_sta_lock);
2327 			freemsg(mp);
2328 			continue;
2329 		}
2330 	}
2331 }
2332