xref: /titanic_51/usr/src/uts/common/io/ppp/sppp/sppp.c (revision c498d9365aa68b789215f4f356a9935f70b0fb3b)
1 /*
2  * sppp.c - Solaris STREAMS PPP multiplexing pseudo-driver
3  *
4  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
5  * Use is subject to license terms.
6  *
7  * Permission to use, copy, modify, and distribute this software and its
8  * documentation is hereby granted, provided that the above copyright
9  * notice appears in all copies.
10  *
11  * SUN MAKES NO REPRESENTATION OR WARRANTIES ABOUT THE SUITABILITY OF
12  * THE SOFTWARE, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
13  * TO THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
14  * PARTICULAR PURPOSE, OR NON-INFRINGEMENT.  SUN SHALL NOT BE LIABLE FOR
15  * ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
16  * DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES
17  *
18  * Copyright (c) 1994 The Australian National University.
19  * All rights reserved.
20  *
21  * Permission to use, copy, modify, and distribute this software and its
22  * documentation is hereby granted, provided that the above copyright
23  * notice appears in all copies.  This software is provided without any
24  * warranty, express or implied. The Australian National University
25  * makes no representations about the suitability of this software for
26  * any purpose.
27  *
28  * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY
29  * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
30  * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
31  * THE AUSTRALIAN NATIONAL UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY
32  * OF SUCH DAMAGE.
33  *
34  * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES,
35  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
36  * AND FITNESS FOR A PARTICULAR PURPOSE.  THE SOFTWARE PROVIDED HEREUNDER IS
37  * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO
38  * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS,
39  * OR MODIFICATIONS.
40  *
41  * This driver is derived from the original SVR4 STREAMS PPP driver
42  * originally written by Paul Mackerras <paul.mackerras@cs.anu.edu.au>.
43  *
44  * Adi Masputra <adi.masputra@sun.com> rewrote and restructured the code
45  * for improved performance and scalability.
46  */
47 
48 #pragma ident	"%Z%%M%	%I%	%E% SMI"
49 #define	RCSID	"$Id: sppp.c,v 1.0 2000/05/08 01:10:12 masputra Exp $"
50 
51 #include <sys/types.h>
52 #include <sys/debug.h>
53 #include <sys/param.h>
54 #include <sys/stat.h>
55 #include <sys/stream.h>
56 #include <sys/stropts.h>
57 #include <sys/sysmacros.h>
58 #include <sys/errno.h>
59 #include <sys/time.h>
60 #include <sys/cmn_err.h>
61 #include <sys/kmem.h>
62 #include <sys/conf.h>
63 #include <sys/dlpi.h>
64 #include <sys/ddi.h>
65 #include <sys/kstat.h>
66 #include <sys/strsun.h>
67 #include <sys/ethernet.h>
68 #include <sys/policy.h>
69 #include <net/ppp_defs.h>
70 #include <net/pppio.h>
71 #include "sppp.h"
72 #include "s_common.h"
73 
74 /*
75  * This is used to tag official Solaris sources.  Please do not define
76  * "INTERNAL_BUILD" when building this software outside of Sun Microsystems.
77  */
78 #ifdef INTERNAL_BUILD
79 /* MODINFO is limited to 32 characters. */
80 const char sppp_module_description[] = "PPP 4.0 mux";
81 #else /* INTERNAL_BUILD */
82 const char sppp_module_description[] = "ANU PPP mux $Revision: 1.0$";
83 
84 /* LINTED */
85 static const char buildtime[] = "Built " __DATE__ " at " __TIME__
86 #ifdef DEBUG
87 " DEBUG"
88 #endif
89 "\n";
90 #endif /* INTERNAL_BUILD */
91 
92 static void	sppp_inner_ioctl(queue_t *, mblk_t *);
93 static void	sppp_outer_ioctl(queue_t *, mblk_t *);
94 static queue_t	*sppp_send(queue_t *, mblk_t **, spppstr_t *);
95 static queue_t	*sppp_recv(queue_t *, mblk_t **, spppstr_t *);
96 static void	sppp_recv_nondata(queue_t *, mblk_t *, spppstr_t *);
97 static queue_t	*sppp_outpkt(queue_t *, mblk_t **, int, spppstr_t *);
98 static spppstr_t *sppp_inpkt(queue_t *, mblk_t *, spppstr_t *);
99 static int	sppp_kstat_update(kstat_t *, int);
100 static void 	sppp_release_pkts(sppa_t *, uint16_t);
101 
102 /*
103  * sps_list contains the list of active per-stream instance state structures
104  * ordered on the minor device number (see sppp.h for details). All streams
105  * opened to this driver are threaded together in this list.
106  */
107 static spppstr_t *sps_list = NULL;
108 /*
109  * ppa_list contains the list of active per-attachment instance state
110  * structures ordered on the ppa id number (see sppp.h for details). All of
111  * the ppa structures created once per PPPIO_NEWPPA ioctl are threaded together
112  * in this list. There is exactly one ppa structure for a given PPP interface,
113  * and multiple sps streams (upper streams) may share a ppa by performing
114  * an attachment explicitly (PPPIO_ATTACH) or implicitly (DL_ATTACH_REQ).
115  */
116 static sppa_t *ppa_list = NULL;
117 
118 static const char *kstats_names[] = { SPPP_KSTATS_NAMES };
119 static const char *kstats64_names[] = { SPPP_KSTATS64_NAMES };
120 
121 /*
122  * map proto (which is an IANA defined ppp network protocol) to
123  * a bit position indicated by NP_* in ppa_npflag
124  */
125 static uint32_t
126 sppp_ppp2np(uint16_t proto)
127 {
128 	switch (proto) {
129 	case PPP_IP:
130 		return (NP_IP);
131 	case PPP_IPV6:
132 		return (NP_IPV6);
133 	default:
134 		return (0);
135 	}
136 }
137 
138 /*
139  * sppp_open()
140  *
141  * MT-Perimeters:
142  *    exclusive inner, exclusive outer.
143  *
144  * Description:
145  *    Common open procedure for module.
146  */
147 /* ARGSUSED */
148 int
149 sppp_open(queue_t *q, dev_t *devp, int oflag, int sflag, cred_t *credp)
150 {
151 	spppstr_t	*sps;
152 	spppstr_t	**nextmn;
153 	minor_t		mn;
154 
155 	ASSERT(q != NULL && devp != NULL);
156 	ASSERT(sflag != MODOPEN);
157 
158 	if (q->q_ptr != NULL) {
159 		return (0);		/* already open */
160 	}
161 	if (sflag != CLONEOPEN) {
162 		return (OPENFAIL);
163 	}
164 	/*
165 	 * The sps list is sorted using the minor number as the key. The
166 	 * following code walks the list to find the lowest valued minor
167 	 * number available to be used.
168 	 */
169 	mn = 0;
170 	for (nextmn = &sps_list; (sps = *nextmn) != NULL;
171 	    nextmn = &sps->sps_nextmn) {
172 		if (sps->sps_mn_id != mn) {
173 			break;
174 		}
175 		++mn;
176 	}
177 	sps = (spppstr_t *)kmem_zalloc(sizeof (spppstr_t), KM_SLEEP);
178 	ASSERT(sps != NULL);		/* KM_SLEEP must never return NULL */
179 	sps->sps_nextmn = *nextmn;	/* insert stream in global list */
180 	*nextmn = sps;
181 	sps->sps_mn_id = mn;		/* save minor id for this stream */
182 	sps->sps_rq = q;		/* save read queue pointer */
183 	sps->sps_sap = -1;		/* no sap bound to stream */
184 	sps->sps_dlstate = DL_UNATTACHED; /* dlpi state is unattached */
185 	sps->sps_npmode = NPMODE_DROP;	/* drop all packets initially */
186 	q->q_ptr = WR(q)->q_ptr = (caddr_t)sps;
187 	/*
188 	 * We explicitly disable the automatic queue scheduling for the
189 	 * write-side to obtain complete control over queuing during transmit.
190 	 * Packets will be queued at the upper write queue and the service
191 	 * routine will not be called until it gets scheduled by having the
192 	 * lower write service routine call the qenable(WR(uq)) for all streams
193 	 * attached to the same ppa instance.
194 	 */
195 	noenable(WR(q));
196 	*devp = makedevice(getmajor(*devp), mn);
197 	qprocson(q);
198 	return (0);
199 }
200 
201 /*
202  * Free storage used by a PPA.  This is not called until the last PPA
203  * user closes his connection or reattaches to a different PPA.
204  */
205 static void
206 sppp_free_ppa(sppa_t *ppa)
207 {
208 	sppa_t **nextppa;
209 
210 	ASSERT(ppa->ppa_refcnt == 1);
211 	if (ppa->ppa_kstats != NULL) {
212 		kstat_delete(ppa->ppa_kstats);
213 		ppa->ppa_kstats = NULL;
214 	}
215 	mutex_destroy(&ppa->ppa_sta_lock);
216 	mutex_destroy(&ppa->ppa_npmutex);
217 	rw_destroy(&ppa->ppa_sib_lock);
218 	nextppa = &ppa_list;
219 	while (*nextppa != NULL) {
220 		if (*nextppa == ppa) {
221 			*nextppa = ppa->ppa_nextppa;
222 			break;
223 		}
224 		nextppa = &(*nextppa)->ppa_nextppa;
225 	}
226 	kmem_free(ppa, sizeof (*ppa));
227 }
228 
229 /*
230  * Create a new PPA.  Caller must be exclusive on outer perimeter.
231  */
232 sppa_t *
233 sppp_create_ppa(uint32_t ppa_id)
234 {
235 	sppa_t *ppa;
236 	sppa_t *curppa;
237 	sppa_t **availppa;
238 	char unit[32];		/* Unit name */
239 	const char **cpp;
240 	kstat_t *ksp;
241 	kstat_named_t *knt;
242 
243 	/*
244 	 * NOTE: unit *must* be named for the driver
245 	 * name plus the ppa number so that netstat
246 	 * can find the statistics.
247 	 */
248 	(void) sprintf(unit, "%s" "%d", PPP_DRV_NAME, ppa_id);
249 	/*
250 	 * Make sure we can allocate a buffer to
251 	 * contain the ppa to be sent upstream, as
252 	 * well as the actual ppa structure and its
253 	 * associated kstat structure.
254 	 */
255 	ppa = (sppa_t *)kmem_zalloc(sizeof (sppa_t),
256 	    KM_NOSLEEP);
257 	ksp = kstat_create(PPP_DRV_NAME, ppa_id, unit, "net", KSTAT_TYPE_NAMED,
258 	    sizeof (sppp_kstats_t) / sizeof (kstat_named_t), 0);
259 
260 	if (ppa == NULL || ksp == NULL) {
261 		if (ppa != NULL) {
262 			kmem_free(ppa, sizeof (sppa_t));
263 		}
264 		if (ksp != NULL) {
265 			kstat_delete(ksp);
266 		}
267 		return (NULL);
268 	}
269 	ppa->ppa_kstats = ksp;		/* chain kstat structure */
270 	ppa->ppa_ppa_id = ppa_id;	/* record ppa id */
271 	ppa->ppa_mtu = PPP_MAXMTU;	/* 65535-(PPP_HDRLEN+PPP_FCSLEN) */
272 	ppa->ppa_mru = PPP_MAXMRU;	/* 65000 */
273 
274 	mutex_init(&ppa->ppa_sta_lock, NULL, MUTEX_DRIVER, NULL);
275 	mutex_init(&ppa->ppa_npmutex, NULL, MUTEX_DRIVER, NULL);
276 	rw_init(&ppa->ppa_sib_lock, NULL, RW_DRIVER, NULL);
277 
278 	/*
279 	 * Prepare and install kstat counters.  Note that for netstat
280 	 * -i to work, there needs to be "ipackets", "opackets",
281 	 * "ierrors", and "oerrors" kstat named variables.
282 	 */
283 	knt = (kstat_named_t *)ksp->ks_data;
284 	for (cpp = kstats_names; cpp < kstats_names + Dim(kstats_names);
285 	    cpp++) {
286 		kstat_named_init(knt, *cpp, KSTAT_DATA_UINT32);
287 		knt++;
288 	}
289 	for (cpp = kstats64_names; cpp < kstats64_names + Dim(kstats64_names);
290 	    cpp++) {
291 		kstat_named_init(knt, *cpp, KSTAT_DATA_UINT64);
292 		knt++;
293 	}
294 	ksp->ks_update = sppp_kstat_update;
295 	ksp->ks_private = (void *)ppa;
296 	kstat_install(ksp);
297 
298 	/* link to the next ppa and insert into global list */
299 	availppa = &ppa_list;
300 	while ((curppa = *availppa) != NULL) {
301 		if (ppa_id < curppa->ppa_ppa_id)
302 			break;
303 		availppa = &curppa->ppa_nextppa;
304 	}
305 	ppa->ppa_nextppa = *availppa;
306 	*availppa = ppa;
307 	return (ppa);
308 }
309 
310 /*
311  * sppp_close()
312  *
313  * MT-Perimeters:
314  *    exclusive inner, exclusive outer.
315  *
316  * Description:
317  *    Common close procedure for module.
318  */
319 int
320 sppp_close(queue_t *q)
321 {
322 	spppstr_t	*sps;
323 	spppstr_t	**nextmn;
324 	spppstr_t	*sib;
325 	sppa_t		*ppa;
326 	mblk_t		*mp;
327 
328 	ASSERT(q != NULL && q->q_ptr != NULL);
329 	sps = (spppstr_t *)q->q_ptr;
330 	qprocsoff(q);
331 
332 	ppa = sps->sps_ppa;
333 	if (ppa == NULL) {
334 		ASSERT(!IS_SPS_CONTROL(sps));
335 		goto close_unattached;
336 	}
337 	if (IS_SPS_CONTROL(sps)) {
338 		uint32_t	cnt = 0;
339 
340 		ASSERT(ppa != NULL);
341 		ASSERT(ppa->ppa_ctl == sps);
342 		ppa->ppa_ctl = NULL;
343 		/*
344 		 * STREAMS framework always issues I_UNLINK prior to close,
345 		 * since we only allow I_LINK under the control stream.
346 		 * A given ppa structure has at most one lower stream pointed
347 		 * by the ppa_lower_wq field, because we only allow a single
348 		 * linkage (I_LINK) to be done on the control stream.
349 		 */
350 		ASSERT(ppa->ppa_lower_wq == NULL);
351 		/*
352 		 * Walk through all of sibling streams attached to this ppa,
353 		 * and remove all references to this ppa. We have exclusive
354 		 * access for the entire driver here, so there's no need
355 		 * to hold ppa_sib_lock.
356 		 */
357 		cnt++;
358 		sib = ppa->ppa_streams;
359 		while (sib != NULL) {
360 			ASSERT(ppa == sib->sps_ppa);
361 			sib->sps_npmode = NPMODE_DROP;
362 			sib->sps_flags &= ~(SPS_PIOATTACH | SPS_CACHED);
363 			/*
364 			 * There should be a preallocated hangup
365 			 * message here.  Fetch it and send it up to
366 			 * the stream head.  This will cause IP to
367 			 * mark the interface as "down."
368 			 */
369 			if ((mp = sib->sps_hangup) != NULL) {
370 				sib->sps_hangup = NULL;
371 				/*
372 				 * M_HANGUP works with IP, but snoop
373 				 * is lame and requires M_ERROR.  Send
374 				 * up a clean error code instead.
375 				 *
376 				 * XXX if snoop is fixed, fix this, too.
377 				 */
378 				MTYPE(mp) = M_ERROR;
379 				*mp->b_wptr++ = ENXIO;
380 				putnext(sib->sps_rq, mp);
381 			}
382 			qenable(WR(sib->sps_rq));
383 			cnt++;
384 			sib = sib->sps_nextsib;
385 		}
386 		ASSERT(ppa->ppa_refcnt == cnt);
387 	} else {
388 		ASSERT(ppa->ppa_streams != NULL);
389 		ASSERT(ppa->ppa_ctl != sps);
390 		mp = NULL;
391 		if (sps->sps_sap == PPP_IP) {
392 			ppa->ppa_ip_cache = NULL;
393 			mp = create_lsmsg(PPP_LINKSTAT_IPV4_UNBOUND);
394 		} else if (sps->sps_sap == PPP_IPV6) {
395 			ppa->ppa_ip6_cache = NULL;
396 			mp = create_lsmsg(PPP_LINKSTAT_IPV6_UNBOUND);
397 		}
398 		/* Tell the daemon the bad news. */
399 		if (mp != NULL && ppa->ppa_ctl != NULL &&
400 		    (sps->sps_npmode == NPMODE_PASS ||
401 		    sps->sps_npmode == NPMODE_QUEUE)) {
402 			putnext(ppa->ppa_ctl->sps_rq, mp);
403 		} else {
404 			freemsg(mp);
405 		}
406 		/*
407 		 * Walk through all of sibling streams attached to the
408 		 * same ppa, and remove this stream from the sibling
409 		 * streams list. We have exclusive access for the
410 		 * entire driver here, so there's no need to hold
411 		 * ppa_sib_lock.
412 		 */
413 		sib = ppa->ppa_streams;
414 		if (sib == sps) {
415 			ppa->ppa_streams = sps->sps_nextsib;
416 		} else {
417 			while (sib->sps_nextsib != NULL) {
418 				if (sib->sps_nextsib == sps) {
419 					sib->sps_nextsib = sps->sps_nextsib;
420 					break;
421 				}
422 				sib = sib->sps_nextsib;
423 			}
424 		}
425 		sps->sps_nextsib = NULL;
426 		freemsg(sps->sps_hangup);
427 		sps->sps_hangup = NULL;
428 		/*
429 		 * Check if this is a promiscous stream. If the SPS_PROMISC bit
430 		 * is still set, it means that the stream is closed without
431 		 * ever having issued DL_DETACH_REQ or DL_PROMISCOFF_REQ.
432 		 * In this case, we simply decrement the promiscous counter,
433 		 * and it's safe to do it without holding ppa_sib_lock since
434 		 * we're exclusive (inner and outer) at this point.
435 		 */
436 		if (IS_SPS_PROMISC(sps)) {
437 			ASSERT(ppa->ppa_promicnt > 0);
438 			ppa->ppa_promicnt--;
439 		}
440 	}
441 	/* If we're the only one left, then delete now. */
442 	if (ppa->ppa_refcnt <= 1)
443 		sppp_free_ppa(ppa);
444 	else
445 		ppa->ppa_refcnt--;
446 close_unattached:
447 	q->q_ptr = WR(q)->q_ptr = NULL;
448 	for (nextmn = &sps_list; *nextmn != NULL;
449 	    nextmn = &(*nextmn)->sps_nextmn) {
450 		if (*nextmn == sps) {
451 			*nextmn = sps->sps_nextmn;
452 			break;
453 		}
454 	}
455 	kmem_free(sps, sizeof (spppstr_t));
456 	return (0);
457 }
458 
459 static void
460 sppp_ioctl(struct queue *q, mblk_t *mp)
461 {
462 	spppstr_t	*sps;
463 	spppstr_t	*nextsib;
464 	sppa_t		*ppa;
465 	struct iocblk	*iop;
466 	mblk_t		*nmp;
467 	enum NPmode	npmode;
468 	struct ppp_idle	*pip;
469 	struct ppp_stats64 *psp;
470 	struct ppp_comp_stats *pcsp;
471 	hrtime_t	hrtime;
472 	int		sap;
473 	int		count = 0;
474 	int		error = EINVAL;
475 
476 	sps = (spppstr_t *)q->q_ptr;
477 	ppa = sps->sps_ppa;
478 
479 	iop = (struct iocblk *)mp->b_rptr;
480 	switch (iop->ioc_cmd) {
481 	case PPPIO_NPMODE:
482 		if (!IS_SPS_CONTROL(sps)) {
483 			break;		/* return EINVAL */
484 		} else if (iop->ioc_count != 2 * sizeof (uint32_t) ||
485 		    (mp->b_cont == NULL)) {
486 			error = EPROTO;
487 			break;
488 		}
489 		ASSERT(ppa != NULL);
490 		ASSERT(mp->b_cont->b_rptr != NULL);
491 		ASSERT(sps->sps_npmode == NPMODE_PASS);
492 		sap = ((uint32_t *)mp->b_cont->b_rptr)[0];
493 		npmode = (enum NPmode)((uint32_t *)mp->b_cont->b_rptr)[1];
494 		/*
495 		 * Walk the sibling streams which belong to the same
496 		 * ppa, and try to find a stream with matching sap
497 		 * number.
498 		 */
499 		rw_enter(&ppa->ppa_sib_lock, RW_WRITER);
500 		for (nextsib = ppa->ppa_streams; nextsib != NULL;
501 		    nextsib = nextsib->sps_nextsib) {
502 			if (nextsib->sps_sap == sap) {
503 				break;	/* found it */
504 			}
505 		}
506 		if (nextsib == NULL) {
507 			rw_exit(&ppa->ppa_sib_lock);
508 			break;		/* return EINVAL */
509 		} else {
510 			nextsib->sps_npmode = npmode;
511 			if ((nextsib->sps_npmode != NPMODE_QUEUE) &&
512 			    (WR(nextsib->sps_rq)->q_first != NULL)) {
513 				qenable(WR(nextsib->sps_rq));
514 			}
515 		}
516 		rw_exit(&ppa->ppa_sib_lock);
517 		error = 0;	/* return success */
518 		break;
519 	case PPPIO_GIDLE:
520 		if (ppa == NULL) {
521 			ASSERT(!IS_SPS_CONTROL(sps));
522 			error = ENOLINK;
523 			break;
524 		} else if (!IS_PPA_TIMESTAMP(ppa)) {
525 			break;		/* return EINVAL */
526 		}
527 		if ((nmp = allocb(sizeof (struct ppp_idle),
528 		    BPRI_MED)) == NULL) {
529 			mutex_enter(&ppa->ppa_sta_lock);
530 			ppa->ppa_allocbfail++;
531 			mutex_exit(&ppa->ppa_sta_lock);
532 			error = ENOSR;
533 			break;
534 		}
535 		if (mp->b_cont != NULL) {
536 			freemsg(mp->b_cont);
537 		}
538 		mp->b_cont = nmp;
539 		pip = (struct ppp_idle *)nmp->b_wptr;
540 		nmp->b_wptr += sizeof (struct ppp_idle);
541 		/*
542 		 * Get current timestamp and subtract the tx and rx
543 		 * timestamps to get the actual idle time to be
544 		 * returned.
545 		 */
546 		hrtime = gethrtime();
547 		pip->xmit_idle = (hrtime - ppa->ppa_lasttx) / 1000000000ul;
548 		pip->recv_idle = (hrtime - ppa->ppa_lastrx) / 1000000000ul;
549 		count = msgsize(nmp);
550 		error = 0;
551 		break;		/* return success (error is 0) */
552 	case PPPIO_GTYPE:
553 		nmp = allocb(sizeof (uint32_t), BPRI_MED);
554 		if (nmp == NULL) {
555 			error = ENOSR;
556 			break;
557 		}
558 		if (mp->b_cont != NULL) {
559 			freemsg(mp->b_cont);
560 		}
561 		mp->b_cont = nmp;
562 		/*
563 		 * Let the requestor know that we are the PPP
564 		 * multiplexer (PPPTYP_MUX).
565 		 */
566 		*(uint32_t *)nmp->b_wptr = PPPTYP_MUX;
567 		nmp->b_wptr += sizeof (uint32_t);
568 		count = msgsize(nmp);
569 		error = 0;		/* return success */
570 		break;
571 	case PPPIO_GETSTAT64:
572 		if (ppa == NULL) {
573 			break;		/* return EINVAL */
574 		} else if ((ppa->ppa_lower_wq != NULL) &&
575 		    !IS_PPA_LASTMOD(ppa)) {
576 			mutex_enter(&ppa->ppa_sta_lock);
577 			ppa->ppa_ioctlsfwd++;
578 			mutex_exit(&ppa->ppa_sta_lock);
579 			/*
580 			 * Record the ioctl CMD & ID - this will be
581 			 * used to check the ACK or NAK responses
582 			 * coming from below.
583 			 */
584 			sps->sps_ioc_id = iop->ioc_id;
585 			putnext(ppa->ppa_lower_wq, mp);
586 			return;	/* don't ack or nak the request */
587 		}
588 		nmp = allocb(sizeof (*psp), BPRI_MED);
589 		if (nmp == NULL) {
590 			mutex_enter(&ppa->ppa_sta_lock);
591 			ppa->ppa_allocbfail++;
592 			mutex_exit(&ppa->ppa_sta_lock);
593 			error = ENOSR;
594 			break;
595 		}
596 		if (mp->b_cont != NULL) {
597 			freemsg(mp->b_cont);
598 		}
599 		mp->b_cont = nmp;
600 		psp = (struct ppp_stats64 *)nmp->b_wptr;
601 		/*
602 		 * Copy the contents of ppp_stats64 structure for this
603 		 * ppa and return them to the caller.
604 		 */
605 		mutex_enter(&ppa->ppa_sta_lock);
606 		bcopy(&ppa->ppa_stats, psp, sizeof (*psp));
607 		mutex_exit(&ppa->ppa_sta_lock);
608 		nmp->b_wptr += sizeof (*psp);
609 		count = sizeof (*psp);
610 		error = 0;		/* return success */
611 		break;
612 	case PPPIO_GETCSTAT:
613 		if (ppa == NULL) {
614 			break;		/* return EINVAL */
615 		} else if ((ppa->ppa_lower_wq != NULL) &&
616 		    !IS_PPA_LASTMOD(ppa)) {
617 			mutex_enter(&ppa->ppa_sta_lock);
618 			ppa->ppa_ioctlsfwd++;
619 			mutex_exit(&ppa->ppa_sta_lock);
620 			/*
621 			 * Record the ioctl CMD & ID - this will be
622 			 * used to check the ACK or NAK responses
623 			 * coming from below.
624 			 */
625 			sps->sps_ioc_id = iop->ioc_id;
626 			putnext(ppa->ppa_lower_wq, mp);
627 			return;	/* don't ack or nak the request */
628 		}
629 		nmp = allocb(sizeof (struct ppp_comp_stats), BPRI_MED);
630 		if (nmp == NULL) {
631 			mutex_enter(&ppa->ppa_sta_lock);
632 			ppa->ppa_allocbfail++;
633 			mutex_exit(&ppa->ppa_sta_lock);
634 			error = ENOSR;
635 			break;
636 		}
637 		if (mp->b_cont != NULL) {
638 			freemsg(mp->b_cont);
639 		}
640 		mp->b_cont = nmp;
641 		pcsp = (struct ppp_comp_stats *)nmp->b_wptr;
642 		nmp->b_wptr += sizeof (struct ppp_comp_stats);
643 		bzero((caddr_t)pcsp, sizeof (struct ppp_comp_stats));
644 		count = msgsize(nmp);
645 		error = 0;		/* return success */
646 		break;
647 	}
648 
649 	if (error == 0) {
650 		/* Success; tell the user. */
651 		miocack(q, mp, count, 0);
652 	} else {
653 		/* Failure; send error back upstream. */
654 		miocnak(q, mp, 0, error);
655 	}
656 }
657 
658 /*
659  * sppp_uwput()
660  *
661  * MT-Perimeters:
662  *    shared inner, shared outer.
663  *
664  * Description:
665  *    Upper write-side put procedure. Messages from above arrive here.
666  */
667 void
668 sppp_uwput(queue_t *q, mblk_t *mp)
669 {
670 	queue_t		*nextq;
671 	spppstr_t	*sps;
672 	sppa_t		*ppa;
673 	struct iocblk	*iop;
674 	int		error;
675 
676 	ASSERT(q != NULL && q->q_ptr != NULL);
677 	ASSERT(mp != NULL && mp->b_rptr != NULL);
678 	sps = (spppstr_t *)q->q_ptr;
679 	ppa = sps->sps_ppa;
680 
681 	switch (MTYPE(mp)) {
682 	case M_PCPROTO:
683 	case M_PROTO:
684 		if (IS_SPS_CONTROL(sps)) {
685 			ASSERT(ppa != NULL);
686 			/*
687 			 * Intentionally change this to a high priority
688 			 * message so it doesn't get queued up. M_PROTO is
689 			 * specifically used for signalling between pppd and its
690 			 * kernel-level component(s), such as ppptun, so we
691 			 * make sure that it doesn't get queued up behind
692 			 * data messages.
693 			 */
694 			MTYPE(mp) = M_PCPROTO;
695 			if ((ppa->ppa_lower_wq != NULL) &&
696 			    canputnext(ppa->ppa_lower_wq)) {
697 				mutex_enter(&ppa->ppa_sta_lock);
698 				ppa->ppa_mctlsfwd++;
699 				mutex_exit(&ppa->ppa_sta_lock);
700 				putnext(ppa->ppa_lower_wq, mp);
701 			} else {
702 				mutex_enter(&ppa->ppa_sta_lock);
703 				ppa->ppa_mctlsfwderr++;
704 				mutex_exit(&ppa->ppa_sta_lock);
705 				freemsg(mp);
706 			}
707 		} else {
708 			(void) sppp_mproto(q, mp, sps);
709 			return;
710 		}
711 		break;
712 	case M_DATA:
713 		if ((nextq = sppp_send(q, &mp, sps)) != NULL)
714 			putnext(nextq, mp);
715 		break;
716 	case M_IOCTL:
717 		error = EINVAL;
718 		iop = (struct iocblk *)mp->b_rptr;
719 		switch (iop->ioc_cmd) {
720 		case DLIOCRAW:
721 		case DL_IOC_HDR_INFO:
722 		case PPPIO_ATTACH:
723 		case PPPIO_DEBUG:
724 		case PPPIO_DETACH:
725 		case PPPIO_LASTMOD:
726 		case PPPIO_MRU:
727 		case PPPIO_MTU:
728 		case PPPIO_USETIMESTAMP:
729 		case PPPIO_BLOCKNP:
730 		case PPPIO_UNBLOCKNP:
731 			qwriter(q, mp, sppp_inner_ioctl, PERIM_INNER);
732 			return;
733 		case I_LINK:
734 		case I_UNLINK:
735 		case PPPIO_NEWPPA:
736 			qwriter(q, mp, sppp_outer_ioctl, PERIM_OUTER);
737 			return;
738 		case PPPIO_NPMODE:
739 		case PPPIO_GIDLE:
740 		case PPPIO_GTYPE:
741 		case PPPIO_GETSTAT64:
742 		case PPPIO_GETCSTAT:
743 			/*
744 			 * These require additional auto variables to
745 			 * handle, so (for optimization reasons)
746 			 * they're moved off to a separate function.
747 			 */
748 			sppp_ioctl(q, mp);
749 			return;
750 		case PPPIO_GETSTAT:
751 			break;			/* 32 bit interface gone */
752 		default:
753 			if (iop->ioc_cr == NULL ||
754 			    secpolicy_net_config(iop->ioc_cr, B_FALSE) != 0) {
755 				error = EPERM;
756 				break;
757 			} else if ((ppa == NULL) ||
758 			    (ppa->ppa_lower_wq == NULL)) {
759 				break;		/* return EINVAL */
760 			}
761 			mutex_enter(&ppa->ppa_sta_lock);
762 			ppa->ppa_ioctlsfwd++;
763 			mutex_exit(&ppa->ppa_sta_lock);
764 			/*
765 			 * Record the ioctl CMD & ID - this will be used to
766 			 * check the ACK or NAK responses coming from below.
767 			 */
768 			sps->sps_ioc_id = iop->ioc_id;
769 			putnext(ppa->ppa_lower_wq, mp);
770 			return;		/* don't ack or nak the request */
771 		}
772 		/* Failure; send error back upstream. */
773 		miocnak(q, mp, 0, error);
774 		break;
775 	case M_FLUSH:
776 		if (*mp->b_rptr & FLUSHW) {
777 			flushq(q, FLUSHDATA);
778 		}
779 		if (*mp->b_rptr & FLUSHR) {
780 			*mp->b_rptr &= ~FLUSHW;
781 			qreply(q, mp);
782 		} else {
783 			freemsg(mp);
784 		}
785 		break;
786 	default:
787 		freemsg(mp);
788 		break;
789 	}
790 }
791 
792 /*
793  * sppp_uwsrv()
794  *
795  * MT-Perimeters:
796  *    exclusive inner, shared outer.
797  *
798  * Description:
799  *    Upper write-side service procedure. Note that this procedure does
800  *    not get called when a message is placed on our write-side queue, since
801  *    automatic queue scheduling has been turned off by noenable() when
802  *    the queue was opened. We do this on purpose, as we explicitly control
803  *    the write-side queue. Therefore, this procedure gets called when
804  *    the lower write service procedure qenable() the upper write stream queue.
805  */
806 void
807 sppp_uwsrv(queue_t *q)
808 {
809 	spppstr_t	*sps;
810 	mblk_t		*mp;
811 	queue_t		*nextq;
812 
813 	ASSERT(q != NULL && q->q_ptr != NULL);
814 	sps = (spppstr_t *)q->q_ptr;
815 	while ((mp = getq(q)) != NULL) {
816 		if ((nextq = sppp_outpkt(q, &mp, msgdsize(mp), sps)) == NULL) {
817 			if (mp != NULL) {
818 				if (putbq(q, mp) == 0)
819 					freemsg(mp);
820 				break;
821 			}
822 		} else {
823 			putnext(nextq, mp);
824 		}
825 	}
826 }
827 
828 void
829 sppp_remove_ppa(spppstr_t *sps)
830 {
831 	spppstr_t *nextsib;
832 	sppa_t *ppa = sps->sps_ppa;
833 
834 	rw_enter(&ppa->ppa_sib_lock, RW_WRITER);
835 	if (ppa->ppa_refcnt <= 1) {
836 		rw_exit(&ppa->ppa_sib_lock);
837 		sppp_free_ppa(ppa);
838 	} else {
839 		nextsib = ppa->ppa_streams;
840 		if (nextsib == sps) {
841 			ppa->ppa_streams = sps->sps_nextsib;
842 		} else {
843 			while (nextsib->sps_nextsib != NULL) {
844 				if (nextsib->sps_nextsib == sps) {
845 					nextsib->sps_nextsib =
846 					    sps->sps_nextsib;
847 					break;
848 				}
849 				nextsib = nextsib->sps_nextsib;
850 			}
851 		}
852 		ppa->ppa_refcnt--;
853 		/*
854 		 * And if this stream was marked as promiscuous
855 		 * (SPS_PROMISC), then we need to update the
856 		 * promiscuous streams count. This should only happen
857 		 * when DL_DETACH_REQ is issued prior to marking the
858 		 * stream as non-promiscuous, through
859 		 * DL_PROMISCOFF_REQ request.
860 		 */
861 		if (IS_SPS_PROMISC(sps)) {
862 			ASSERT(ppa->ppa_promicnt > 0);
863 			ppa->ppa_promicnt--;
864 		}
865 		rw_exit(&ppa->ppa_sib_lock);
866 	}
867 	sps->sps_nextsib = NULL;
868 	sps->sps_ppa = NULL;
869 	freemsg(sps->sps_hangup);
870 	sps->sps_hangup = NULL;
871 }
872 
873 sppa_t *
874 sppp_find_ppa(uint32_t ppa_id)
875 {
876 	sppa_t *ppa;
877 
878 	for (ppa = ppa_list; ppa != NULL; ppa = ppa->ppa_nextppa) {
879 		if (ppa->ppa_ppa_id == ppa_id) {
880 			break;	/* found the ppa */
881 		}
882 	}
883 	return (ppa);
884 }
885 
886 /*
887  * sppp_inner_ioctl()
888  *
889  * MT-Perimeters:
890  *    exclusive inner, shared outer
891  *
892  * Description:
893  *    Called by sppp_uwput as a result of receiving ioctls which require
894  *    an exclusive access at the inner perimeter.
895  */
896 static void
897 sppp_inner_ioctl(queue_t *q, mblk_t *mp)
898 {
899 	spppstr_t	*sps;
900 	sppa_t		*ppa;
901 	struct iocblk	*iop;
902 	mblk_t		*nmp;
903 	int		error = EINVAL;
904 	int		count = 0;
905 	int		dbgcmd;
906 	int		mru, mtu;
907 	uint32_t	ppa_id;
908 	hrtime_t	hrtime;
909 	uint16_t	proto;
910 
911 	ASSERT(q != NULL && q->q_ptr != NULL);
912 	ASSERT(mp != NULL && mp->b_rptr != NULL);
913 
914 	sps = (spppstr_t *)q->q_ptr;
915 	ppa = sps->sps_ppa;
916 	iop = (struct iocblk *)mp->b_rptr;
917 	switch (iop->ioc_cmd) {
918 	case DLIOCRAW:
919 		if (IS_SPS_CONTROL(sps)) {
920 			break;		/* return EINVAL */
921 		}
922 		sps->sps_flags |= SPS_RAWDATA;
923 		error = 0;		/* return success */
924 		break;
925 	case DL_IOC_HDR_INFO:
926 		if (IS_SPS_CONTROL(sps)) {
927 			break;		/* return EINVAL */
928 		} else if ((mp->b_cont == NULL) ||
929 		    *((t_uscalar_t *)mp->b_cont->b_rptr) != DL_UNITDATA_REQ ||
930 		    (MBLKL(mp->b_cont) < (sizeof (dl_unitdata_req_t) +
931 		    SPPP_ADDRL))) {
932 			error = EPROTO;
933 			break;
934 		} else if (ppa == NULL) {
935 			error = ENOLINK;
936 			break;
937 		}
938 		if ((nmp = allocb(PPP_HDRLEN, BPRI_MED)) == NULL) {
939 			mutex_enter(&ppa->ppa_sta_lock);
940 			ppa->ppa_allocbfail++;
941 			mutex_exit(&ppa->ppa_sta_lock);
942 			error = ENOMEM;
943 			break;
944 		}
945 		*(uchar_t *)nmp->b_wptr++ = PPP_ALLSTATIONS;
946 		*(uchar_t *)nmp->b_wptr++ = PPP_UI;
947 		*(uchar_t *)nmp->b_wptr++ = sps->sps_sap >> 8;
948 		*(uchar_t *)nmp->b_wptr++ = sps->sps_sap & 0xff;
949 		ASSERT(MBLKL(nmp) == PPP_HDRLEN);
950 
951 		linkb(mp, nmp);
952 		sps->sps_flags |= SPS_FASTPATH;
953 		error = 0;		/* return success */
954 		count = msgsize(nmp);
955 		break;
956 	case PPPIO_ATTACH:
957 		if (IS_SPS_CONTROL(sps) || IS_SPS_PIOATTACH(sps) ||
958 		    (sps->sps_dlstate != DL_UNATTACHED) ||
959 		    (iop->ioc_count != sizeof (uint32_t))) {
960 			break;		/* return EINVAL */
961 		} else if (mp->b_cont == NULL) {
962 			error = EPROTO;
963 			break;
964 		}
965 		ASSERT(mp->b_cont->b_rptr != NULL);
966 		/* If there's something here, it's detached. */
967 		if (ppa != NULL) {
968 			sppp_remove_ppa(sps);
969 		}
970 		ppa_id = *(uint32_t *)mp->b_cont->b_rptr;
971 		ppa = sppp_find_ppa(ppa_id);
972 		/*
973 		 * If we can't find it, then it's either because the requestor
974 		 * has supplied a wrong ppa_id to be attached to, or because
975 		 * the control stream for the specified ppa_id has been closed
976 		 * before we get here.
977 		 */
978 		if (ppa == NULL) {
979 			error = ENOENT;
980 			break;
981 		}
982 		/*
983 		 * Preallocate the hangup message so that we're always
984 		 * able to send this upstream in the event of a
985 		 * catastrophic failure.
986 		 */
987 		if ((sps->sps_hangup = allocb(1, BPRI_MED)) == NULL) {
988 			error = ENOSR;
989 			break;
990 		}
991 		/*
992 		 * There are two ways to attach a stream to a ppa: one is
993 		 * through DLPI (DL_ATTACH_REQ) and the other is through
994 		 * PPPIO_ATTACH. This is why we need to distinguish whether or
995 		 * not a stream was allocated via PPPIO_ATTACH, so that we can
996 		 * properly detach it when we receive PPPIO_DETACH ioctl
997 		 * request.
998 		 */
999 		sps->sps_flags |= SPS_PIOATTACH;
1000 		sps->sps_ppa = ppa;
1001 		/*
1002 		 * Add this stream to the head of the list of sibling streams
1003 		 * which belong to the same ppa as specified.
1004 		 */
1005 		rw_enter(&ppa->ppa_sib_lock, RW_WRITER);
1006 		ppa->ppa_refcnt++;
1007 		sps->sps_nextsib = ppa->ppa_streams;
1008 		ppa->ppa_streams = sps;
1009 		rw_exit(&ppa->ppa_sib_lock);
1010 		error = 0;		/* return success */
1011 		break;
1012 	case PPPIO_BLOCKNP:
1013 	case PPPIO_UNBLOCKNP:
1014 		if (iop->ioc_cr == NULL ||
1015 		    secpolicy_net_config(iop->ioc_cr, B_FALSE) != 0) {
1016 			error = EPERM;
1017 			break;
1018 		}
1019 		error = miocpullup(mp, sizeof (uint16_t));
1020 		if (error != 0)
1021 			break;
1022 		ASSERT(mp->b_cont->b_rptr != NULL);
1023 		proto = *(uint16_t *)mp->b_cont->b_rptr;
1024 		if (iop->ioc_cmd == PPPIO_BLOCKNP) {
1025 			uint32_t npflagpos = sppp_ppp2np(proto);
1026 			/*
1027 			 * Mark proto as blocked in ppa_npflag until the
1028 			 * corresponding queues for proto have been plumbed.
1029 			 */
1030 			if (npflagpos != 0) {
1031 				mutex_enter(&ppa->ppa_npmutex);
1032 				ppa->ppa_npflag |= (1 << npflagpos);
1033 				mutex_exit(&ppa->ppa_npmutex);
1034 			} else {
1035 				error = EINVAL;
1036 			}
1037 		} else {
1038 			/*
1039 			 * reset ppa_npflag and release proto
1040 			 * packets that were being held in control queue.
1041 			 */
1042 			sppp_release_pkts(ppa, proto);
1043 		}
1044 		break;
1045 	case PPPIO_DEBUG:
1046 		if (iop->ioc_cr == NULL ||
1047 		    secpolicy_net_config(iop->ioc_cr, B_FALSE) != 0) {
1048 			error = EPERM;
1049 			break;
1050 		} else if (iop->ioc_count != sizeof (uint32_t)) {
1051 			break;		/* return EINVAL */
1052 		} else if (mp->b_cont == NULL) {
1053 			error = EPROTO;
1054 			break;
1055 		}
1056 		ASSERT(mp->b_cont->b_rptr != NULL);
1057 		dbgcmd = *(uint32_t *)mp->b_cont->b_rptr;
1058 		/*
1059 		 * We accept PPPDBG_LOG + PPPDBG_DRIVER value as an indication
1060 		 * that SPS_KDEBUG needs to be enabled for this upper stream.
1061 		 */
1062 		if (dbgcmd == PPPDBG_LOG + PPPDBG_DRIVER) {
1063 			sps->sps_flags |= SPS_KDEBUG;
1064 			error = 0;	/* return success */
1065 			break;
1066 		}
1067 		/*
1068 		 * Otherwise, for any other values, we send them down only if
1069 		 * there is an attachment and if the attachment has something
1070 		 * linked underneath it.
1071 		 */
1072 		if ((ppa == NULL) || (ppa->ppa_lower_wq == NULL)) {
1073 			error = ENOLINK;
1074 			break;
1075 		}
1076 		mutex_enter(&ppa->ppa_sta_lock);
1077 		ppa->ppa_ioctlsfwd++;
1078 		mutex_exit(&ppa->ppa_sta_lock);
1079 		/*
1080 		 * Record the ioctl CMD & ID - this will be used to check the
1081 		 * ACK or NAK responses coming from below.
1082 		 */
1083 		sps->sps_ioc_id = iop->ioc_id;
1084 		putnext(ppa->ppa_lower_wq, mp);
1085 		return;			/* don't ack or nak the request */
1086 	case PPPIO_DETACH:
1087 		if (!IS_SPS_PIOATTACH(sps)) {
1088 			break;		/* return EINVAL */
1089 		}
1090 		/*
1091 		 * The SPS_PIOATTACH flag set on the stream tells us that
1092 		 * the ppa field is still valid. In the event that the control
1093 		 * stream be closed prior to this stream's detachment, the
1094 		 * SPS_PIOATTACH flag would have been cleared from this stream
1095 		 * during close; in that case we won't get here.
1096 		 */
1097 		ASSERT(ppa != NULL);
1098 		ASSERT(ppa->ppa_ctl != sps);
1099 		ASSERT(sps->sps_dlstate == DL_UNATTACHED);
1100 
1101 		/*
1102 		 * We don't actually detach anything until the stream is
1103 		 * closed or reattached.
1104 		 */
1105 
1106 		sps->sps_flags &= ~SPS_PIOATTACH;
1107 		error = 0;		/* return success */
1108 		break;
1109 	case PPPIO_LASTMOD:
1110 		if (!IS_SPS_CONTROL(sps)) {
1111 			break;		/* return EINVAL */
1112 		}
1113 		ASSERT(ppa != NULL);
1114 		ppa->ppa_flags |= PPA_LASTMOD;
1115 		error = 0;		/* return success */
1116 		break;
1117 	case PPPIO_MRU:
1118 		if (!IS_SPS_CONTROL(sps) ||
1119 		    (iop->ioc_count != sizeof (uint32_t))) {
1120 			break;		/* return EINVAL */
1121 		} else if (mp->b_cont == NULL) {
1122 			error = EPROTO;
1123 			break;
1124 		}
1125 		ASSERT(ppa != NULL);
1126 		ASSERT(mp->b_cont->b_rptr != NULL);
1127 		mru = *(uint32_t *)mp->b_cont->b_rptr;
1128 		if ((mru <= 0) || (mru > PPP_MAXMRU)) {
1129 			error = EPROTO;
1130 			break;
1131 		}
1132 		if (mru < PPP_MRU) {
1133 			mru = PPP_MRU;
1134 		}
1135 		ppa->ppa_mru = (uint16_t)mru;
1136 		/*
1137 		 * If there's something beneath this driver for the ppa, then
1138 		 * inform it (or them) of the MRU size. Only do this is we
1139 		 * are not the last PPP module on the stream.
1140 		 */
1141 		if (!IS_PPA_LASTMOD(ppa) && (ppa->ppa_lower_wq != NULL)) {
1142 			(void) putctl4(ppa->ppa_lower_wq, M_CTL, PPPCTL_MRU,
1143 			    mru);
1144 		}
1145 		error = 0;		/* return success */
1146 		break;
1147 	case PPPIO_MTU:
1148 		if (!IS_SPS_CONTROL(sps) ||
1149 		    (iop->ioc_count != sizeof (uint32_t))) {
1150 			break;		/* return EINVAL */
1151 		} else if (mp->b_cont == NULL) {
1152 			error = EPROTO;
1153 			break;
1154 		}
1155 		ASSERT(ppa != NULL);
1156 		ASSERT(mp->b_cont->b_rptr != NULL);
1157 		mtu = *(uint32_t *)mp->b_cont->b_rptr;
1158 		if ((mtu <= 0) || (mtu > PPP_MAXMTU)) {
1159 			error = EPROTO;
1160 			break;
1161 		}
1162 		ppa->ppa_mtu = (uint16_t)mtu;
1163 		/*
1164 		 * If there's something beneath this driver for the ppa, then
1165 		 * inform it (or them) of the MTU size. Only do this if we
1166 		 * are not the last PPP module on the stream.
1167 		 */
1168 		if (!IS_PPA_LASTMOD(ppa) && (ppa->ppa_lower_wq != NULL)) {
1169 			(void) putctl4(ppa->ppa_lower_wq, M_CTL, PPPCTL_MTU,
1170 			    mtu);
1171 		}
1172 		error = 0;		/* return success */
1173 		break;
1174 	case PPPIO_USETIMESTAMP:
1175 		if (!IS_SPS_CONTROL(sps)) {
1176 			break;		/* return EINVAL */
1177 		}
1178 		if (!IS_PPA_TIMESTAMP(ppa)) {
1179 			hrtime = gethrtime();
1180 			ppa->ppa_lasttx = ppa->ppa_lastrx = hrtime;
1181 			ppa->ppa_flags |= PPA_TIMESTAMP;
1182 		}
1183 		error = 0;
1184 		break;
1185 	}
1186 
1187 	if (error == 0) {
1188 		/* Success; tell the user */
1189 		miocack(q, mp, count, 0);
1190 	} else {
1191 		/* Failure; send error back upstream */
1192 		miocnak(q, mp, 0, error);
1193 	}
1194 }
1195 
1196 /*
1197  * sppp_outer_ioctl()
1198  *
1199  * MT-Perimeters:
1200  *    exclusive inner, exclusive outer
1201  *
1202  * Description:
1203  *    Called by sppp_uwput as a result of receiving ioctls which require
1204  *    an exclusive access at the outer perimeter.
1205  */
1206 static void
1207 sppp_outer_ioctl(queue_t *q, mblk_t *mp)
1208 {
1209 	spppstr_t	*sps;
1210 	spppstr_t	*nextsib;
1211 	queue_t		*lwq;
1212 	sppa_t		*ppa;
1213 	struct iocblk	*iop;
1214 	int		error = EINVAL;
1215 	int		count = 0;
1216 	uint32_t	ppa_id;
1217 	mblk_t		*nmp;
1218 
1219 	ASSERT(q != NULL && q->q_ptr != NULL);
1220 	ASSERT(mp != NULL && mp->b_rptr != NULL);
1221 
1222 	sps = (spppstr_t *)q->q_ptr;
1223 	ppa = sps->sps_ppa;
1224 	iop = (struct iocblk *)mp->b_rptr;
1225 	switch (iop->ioc_cmd) {
1226 	case I_LINK:
1227 		if (!IS_SPS_CONTROL(sps)) {
1228 			break;		/* return EINVAL */
1229 		} else if (ppa->ppa_lower_wq != NULL) {
1230 			error = EEXIST;
1231 			break;
1232 		}
1233 		ASSERT(ppa->ppa_ctl != NULL);
1234 		ASSERT(sps->sps_npmode == NPMODE_PASS);
1235 		ASSERT(mp->b_cont != NULL && mp->b_cont->b_rptr != NULL);
1236 
1237 		lwq = ((struct linkblk *)mp->b_cont->b_rptr)->l_qbot;
1238 		ASSERT(lwq != NULL);
1239 
1240 		ppa->ppa_lower_wq = lwq;
1241 		lwq->q_ptr = RD(lwq)->q_ptr = (caddr_t)ppa;
1242 		/*
1243 		 * Unblock upper network streams which now feed this lower
1244 		 * stream. We don't need to hold ppa_sib_lock here, since we
1245 		 * are writer at the outer perimeter.
1246 		 */
1247 		if (WR(sps->sps_rq)->q_first != NULL)
1248 			qenable(WR(sps->sps_rq));
1249 		for (nextsib = ppa->ppa_streams; nextsib != NULL;
1250 		    nextsib = nextsib->sps_nextsib) {
1251 			nextsib->sps_npmode = NPMODE_PASS;
1252 			if (WR(nextsib->sps_rq)->q_first != NULL) {
1253 				qenable(WR(nextsib->sps_rq));
1254 			}
1255 		}
1256 		/*
1257 		 * Send useful information down to the modules which are now
1258 		 * linked below this driver (for this particular ppa). Only
1259 		 * do this if we are not the last PPP module on the stream.
1260 		 */
1261 		if (!IS_PPA_LASTMOD(ppa)) {
1262 			(void) putctl8(lwq, M_CTL, PPPCTL_UNIT,
1263 			    ppa->ppa_ppa_id);
1264 			(void) putctl4(lwq, M_CTL, PPPCTL_MRU, ppa->ppa_mru);
1265 			(void) putctl4(lwq, M_CTL, PPPCTL_MTU, ppa->ppa_mtu);
1266 		}
1267 
1268 		if (IS_SPS_KDEBUG(sps)) {
1269 			SPDEBUG(PPP_DRV_NAME
1270 			    "/%d: I_LINK lwq=0x%p sps=0x%p flags=0x%b ppa=0x%p "
1271 			    "flags=0x%b\n", sps->sps_mn_id,
1272 			    (void *)ppa->ppa_lower_wq, (void *)sps,
1273 			    sps->sps_flags, SPS_FLAGS_STR,
1274 			    (void *)ppa, ppa->ppa_flags,
1275 			    PPA_FLAGS_STR);
1276 		}
1277 		error = 0;		/* return success */
1278 		break;
1279 	case I_UNLINK:
1280 		ASSERT(IS_SPS_CONTROL(sps));
1281 		ASSERT(ppa != NULL);
1282 		lwq = ppa->ppa_lower_wq;
1283 		ASSERT(mp->b_cont != NULL && mp->b_cont->b_rptr != NULL);
1284 		ASSERT(lwq == ((struct linkblk *)mp->b_cont->b_rptr)->l_qbot);
1285 
1286 		if (IS_SPS_KDEBUG(sps)) {
1287 			SPDEBUG(PPP_DRV_NAME
1288 			    "/%d: I_UNLINK lwq=0x%p sps=0x%p flags=0x%b "
1289 			    "ppa=0x%p flags=0x%b\n", sps->sps_mn_id,
1290 			    (void *)lwq, (void *)sps, sps->sps_flags,
1291 			    SPS_FLAGS_STR, (void *)ppa, ppa->ppa_flags,
1292 			    PPA_FLAGS_STR);
1293 		}
1294 		/*
1295 		 * While accessing the outer perimeter exclusively, we
1296 		 * disassociate our ppa's lower_wq from the lower stream linked
1297 		 * beneath us, and we also disassociate our control stream from
1298 		 * the q_ptr of the lower stream.
1299 		 */
1300 		lwq->q_ptr = RD(lwq)->q_ptr = NULL;
1301 		ppa->ppa_lower_wq = NULL;
1302 		/*
1303 		 * Unblock streams which now feed back up the control stream,
1304 		 * and acknowledge the request. We don't need to hold
1305 		 * ppa_sib_lock here, since we are writer at the outer
1306 		 * perimeter.
1307 		 */
1308 		if (WR(sps->sps_rq)->q_first != NULL)
1309 			qenable(WR(sps->sps_rq));
1310 		for (nextsib = ppa->ppa_streams; nextsib != NULL;
1311 		    nextsib = nextsib->sps_nextsib) {
1312 			if (WR(nextsib->sps_rq)->q_first != NULL) {
1313 				qenable(WR(nextsib->sps_rq));
1314 			}
1315 		}
1316 		error = 0;		/* return success */
1317 		break;
1318 	case PPPIO_NEWPPA:
1319 		/*
1320 		 * Do sanity check to ensure that we don't accept PPPIO_NEWPPA
1321 		 * on a stream which DLPI is used (since certain DLPI messages
1322 		 * will cause state transition reflected in sps_dlstate,
1323 		 * changing it from its default DL_UNATTACHED value). In other
1324 		 * words, we won't allow a network/snoop stream to become
1325 		 * a control stream.
1326 		 */
1327 		if (iop->ioc_cr == NULL ||
1328 		    secpolicy_net_config(iop->ioc_cr, B_FALSE) != 0) {
1329 			error = EPERM;
1330 			break;
1331 		} else if (IS_SPS_CONTROL(sps) || IS_SPS_PIOATTACH(sps) ||
1332 		    (ppa != NULL) || (sps->sps_dlstate != DL_UNATTACHED)) {
1333 			break;		/* return EINVAL */
1334 		}
1335 		/* Get requested unit number (if any) */
1336 		if (iop->ioc_count == sizeof (uint32_t) && mp->b_cont != NULL)
1337 			ppa_id = *(uint32_t *)mp->b_cont->b_rptr;
1338 		else
1339 			ppa_id = 0;
1340 		/* Get mblk to use for response message */
1341 		nmp = allocb(sizeof (uint32_t), BPRI_MED);
1342 		if (nmp == NULL) {
1343 			error = ENOSR;
1344 			break;
1345 		}
1346 		if (mp->b_cont != NULL) {
1347 			freemsg(mp->b_cont);
1348 		}
1349 		mp->b_cont = nmp;		/* chain our response mblk */
1350 		/*
1351 		 * Walk the global ppa list and determine the lowest
1352 		 * available ppa_id number to be used.
1353 		 */
1354 		if (ppa_id == (uint32_t)-1)
1355 			ppa_id = 0;
1356 		for (ppa = ppa_list; ppa != NULL; ppa = ppa->ppa_nextppa) {
1357 			if (ppa_id == (uint32_t)-2) {
1358 				if (ppa->ppa_ctl == NULL)
1359 					break;
1360 			} else {
1361 				if (ppa_id < ppa->ppa_ppa_id)
1362 					break;
1363 				if (ppa_id == ppa->ppa_ppa_id)
1364 					++ppa_id;
1365 			}
1366 		}
1367 		if (ppa_id == (uint32_t)-2) {
1368 			if (ppa == NULL) {
1369 				error = ENXIO;
1370 				break;
1371 			}
1372 			/* Clear timestamp and lastmod flags */
1373 			ppa->ppa_flags = 0;
1374 		} else {
1375 			ppa = sppp_create_ppa(ppa_id);
1376 			if (ppa == NULL) {
1377 				error = ENOMEM;
1378 				break;
1379 			}
1380 		}
1381 
1382 		sps->sps_ppa = ppa;		/* chain the ppa structure */
1383 		sps->sps_npmode = NPMODE_PASS;	/* network packets may travel */
1384 		sps->sps_flags |= SPS_CONTROL;	/* this is the control stream */
1385 
1386 		ppa->ppa_refcnt++;		/* new PPA reference */
1387 		ppa->ppa_ctl = sps;		/* back ptr to upper stream */
1388 		/*
1389 		 * Return the newly created ppa_id to the requestor and
1390 		 * acnowledge the request.
1391 		 */
1392 		*(uint32_t *)nmp->b_wptr = ppa->ppa_ppa_id;
1393 		nmp->b_wptr += sizeof (uint32_t);
1394 
1395 		if (IS_SPS_KDEBUG(sps)) {
1396 			SPDEBUG(PPP_DRV_NAME
1397 			    "/%d: PPPIO_NEWPPA ppa_id=%d sps=0x%p flags=0x%b "
1398 			    "ppa=0x%p flags=0x%b\n", sps->sps_mn_id, ppa_id,
1399 			    (void *)sps, sps->sps_flags, SPS_FLAGS_STR,
1400 			    (void *)ppa, ppa->ppa_flags,
1401 			    PPA_FLAGS_STR);
1402 		}
1403 		count = msgsize(nmp);
1404 		error = 0;
1405 		break;
1406 	}
1407 
1408 	if (error == 0) {
1409 		/* Success; tell the user. */
1410 		miocack(q, mp, count, 0);
1411 	} else {
1412 		/* Failure; send error back upstream. */
1413 		miocnak(q, mp, 0, error);
1414 	}
1415 }
1416 
1417 /*
1418  * sppp_send()
1419  *
1420  * MT-Perimeters:
1421  *    shared inner, shared outer.
1422  *
1423  * Description:
1424  *    Called by sppp_uwput to handle M_DATA message type.  Returns
1425  *    queue_t for putnext, or NULL to mean that the packet was
1426  *    handled internally.
1427  */
1428 static queue_t *
1429 sppp_send(queue_t *q, mblk_t **mpp, spppstr_t *sps)
1430 {
1431 	mblk_t	*mp;
1432 	sppa_t	*ppa;
1433 	int	is_promisc;
1434 	int	msize;
1435 	int	error = 0;
1436 	queue_t	*nextq;
1437 
1438 	ASSERT(mpp != NULL);
1439 	mp = *mpp;
1440 	ASSERT(q != NULL && q->q_ptr != NULL);
1441 	ASSERT(mp != NULL && mp->b_rptr != NULL);
1442 	ASSERT(sps != NULL);
1443 	ASSERT(q->q_ptr == sps);
1444 	/*
1445 	 * We only let M_DATA through if the sender is either the control
1446 	 * stream (for PPP control packets) or one of the network streams
1447 	 * (for IP packets) in IP fastpath mode. If this stream is not attached
1448 	 * to any ppas, then discard data coming down through this stream.
1449 	 */
1450 	ppa = sps->sps_ppa;
1451 	if (ppa == NULL) {
1452 		ASSERT(!IS_SPS_CONTROL(sps));
1453 		error = ENOLINK;
1454 	} else if (!IS_SPS_CONTROL(sps) && !IS_SPS_FASTPATH(sps)) {
1455 		error = EPROTO;
1456 	}
1457 	if (error != 0) {
1458 		merror(q, mp, error);
1459 		return (NULL);
1460 	}
1461 	msize = msgdsize(mp);
1462 	if (msize > (ppa->ppa_mtu + PPP_HDRLEN)) {
1463 		/* Log, and send it anyway */
1464 		mutex_enter(&ppa->ppa_sta_lock);
1465 		ppa->ppa_otoolongs++;
1466 		mutex_exit(&ppa->ppa_sta_lock);
1467 	} else if (msize < PPP_HDRLEN) {
1468 		/*
1469 		 * Log, and send it anyway. We log it because we get things
1470 		 * in M_DATA form here, which tells us that the sender is
1471 		 * either IP in fastpath transmission mode, or pppd. In both
1472 		 * cases, they are currently expected to send the 4-bytes
1473 		 * PPP header in front of any possible payloads.
1474 		 */
1475 		mutex_enter(&ppa->ppa_sta_lock);
1476 		ppa->ppa_orunts++;
1477 		mutex_exit(&ppa->ppa_sta_lock);
1478 	}
1479 
1480 	if (IS_SPS_KDEBUG(sps)) {
1481 		SPDEBUG(PPP_DRV_NAME
1482 		    "/%d: M_DATA send (%d bytes) sps=0x%p flags=0x%b "
1483 		    "ppa=0x%p flags=0x%b\n", sps->sps_mn_id, msize,
1484 		    (void *)sps, sps->sps_flags, SPS_FLAGS_STR,
1485 		    (void *)ppa, ppa->ppa_flags, PPA_FLAGS_STR);
1486 	}
1487 	/*
1488 	 * Should there be any promiscuous stream(s), send the data up
1489 	 * for each promiscuous stream that we recognize. Make sure that
1490 	 * for fastpath, we skip the PPP header in the M_DATA mblk. We skip
1491 	 * the control stream as we obviously never allow the control stream
1492 	 * to become promiscous and bind to PPP_ALLSAP.
1493 	 */
1494 	rw_enter(&ppa->ppa_sib_lock, RW_READER);
1495 	is_promisc = sps->sps_ppa->ppa_promicnt;
1496 	if (is_promisc) {
1497 		ASSERT(ppa->ppa_streams != NULL);
1498 		sppp_dlprsendup(ppa->ppa_streams, mp, sps->sps_sap, B_TRUE);
1499 	}
1500 	rw_exit(&ppa->ppa_sib_lock);
1501 	/*
1502 	 * Only time-stamp the packet with hrtime if the upper stream
1503 	 * is configured to do so.  PPP control (negotiation) messages
1504 	 * are never considered link activity; only data is activity.
1505 	 */
1506 	if (!IS_SPS_CONTROL(sps) && IS_PPA_TIMESTAMP(ppa)) {
1507 		ppa->ppa_lasttx = gethrtime();
1508 	}
1509 	/*
1510 	 * If there's already a message in the write-side service queue,
1511 	 * then queue this message there as well, otherwise, try to send
1512 	 * it down to the module immediately below us.
1513 	 */
1514 	if (q->q_first != NULL ||
1515 	    (nextq = sppp_outpkt(q, mpp, msize, sps)) == NULL) {
1516 		mp = *mpp;
1517 		if (mp != NULL && putq(q, mp) == 0) {
1518 			mutex_enter(&ppa->ppa_sta_lock);
1519 			ppa->ppa_oqdropped++;
1520 			mutex_exit(&ppa->ppa_sta_lock);
1521 			freemsg(mp);
1522 		}
1523 		return (NULL);
1524 	}
1525 	return (nextq);
1526 }
1527 
1528 /*
1529  * sppp_outpkt()
1530  *
1531  * MT-Perimeters:
1532  *    shared inner, shared outer (if called from sppp_wput, sppp_dlunitdatareq).
1533  *    exclusive inner, shared outer (if called from sppp_wsrv).
1534  *
1535  * Description:
1536  *    Called from 1) sppp_uwput when processing a M_DATA fastpath message,
1537  *    or 2) sppp_uwsrv when processing the upper write-side service queue.
1538  *    For both cases, it prepares to send the data to the module below
1539  *    this driver if there is a lower stream linked underneath. If none, then
1540  *    the data will be sent upstream via the control channel to pppd.
1541  *
1542  * Returns:
1543  *	Non-NULL queue_t if message should be sent now, otherwise
1544  *	if *mpp == NULL, then message was freed, otherwise put *mpp
1545  *	(back) on the queue.  (Does not do putq/putbq, since it's
1546  *	called both from srv and put procedures.)
1547  */
1548 static queue_t *
1549 sppp_outpkt(queue_t *q, mblk_t **mpp, int msize, spppstr_t *sps)
1550 {
1551 	mblk_t		*mp;
1552 	sppa_t		*ppa;
1553 	enum NPmode	npmode;
1554 	mblk_t		*mpnew;
1555 
1556 	ASSERT(mpp != NULL);
1557 	mp = *mpp;
1558 	ASSERT(q != NULL && q->q_ptr != NULL);
1559 	ASSERT(mp != NULL && mp->b_rptr != NULL);
1560 	ASSERT(sps != NULL);
1561 
1562 	ppa = sps->sps_ppa;
1563 	npmode = sps->sps_npmode;
1564 
1565 	if (npmode == NPMODE_QUEUE) {
1566 		ASSERT(!IS_SPS_CONTROL(sps));
1567 		return (NULL);	/* queue it for later */
1568 	} else if (ppa == NULL || ppa->ppa_ctl == NULL ||
1569 	    npmode == NPMODE_DROP || npmode == NPMODE_ERROR) {
1570 		/*
1571 		 * This can not be the control stream, as it must always have
1572 		 * a valid ppa, and its npmode must always be NPMODE_PASS.
1573 		 */
1574 		ASSERT(!IS_SPS_CONTROL(sps));
1575 		if (npmode == NPMODE_DROP) {
1576 			freemsg(mp);
1577 		} else {
1578 			/*
1579 			 * If we no longer have the control stream, or if the
1580 			 * mode is set to NPMODE_ERROR, then we need to tell IP
1581 			 * that the interface need to be marked as down. In
1582 			 * other words, we tell IP to be quiescent.
1583 			 */
1584 			merror(q, mp, EPROTO);
1585 		}
1586 		*mpp = NULL;
1587 		return (NULL);	/* don't queue it */
1588 	}
1589 	/*
1590 	 * Do we have a driver stream linked underneath ? If not, we need to
1591 	 * notify pppd that the link needs to be brought up and configure
1592 	 * this upper stream to drop subsequent outgoing packets. This is
1593 	 * for demand-dialing, in which case pppd has done the IP plumbing
1594 	 * but hasn't linked the driver stream underneath us. Therefore, when
1595 	 * a packet is sent down the IP interface, a notification message
1596 	 * will be sent up the control stream to pppd in order for it to
1597 	 * establish the physical link. The driver stream is then expected
1598 	 * to be linked underneath after physical link establishment is done.
1599 	 */
1600 	if (ppa->ppa_lower_wq == NULL) {
1601 		ASSERT(ppa->ppa_ctl != NULL);
1602 		ASSERT(ppa->ppa_ctl->sps_rq != NULL);
1603 
1604 		*mpp = NULL;
1605 		mpnew = create_lsmsg(PPP_LINKSTAT_NEEDUP);
1606 		if (mpnew == NULL) {
1607 			freemsg(mp);
1608 			mutex_enter(&ppa->ppa_sta_lock);
1609 			ppa->ppa_allocbfail++;
1610 			mutex_exit(&ppa->ppa_sta_lock);
1611 			return (NULL);	/* don't queue it */
1612 		}
1613 		/* Include the data in the message for logging. */
1614 		mpnew->b_cont = mp;
1615 		mutex_enter(&ppa->ppa_sta_lock);
1616 		ppa->ppa_lsneedup++;
1617 		mutex_exit(&ppa->ppa_sta_lock);
1618 		/*
1619 		 * We need to set the mode to NPMODE_DROP, but should only
1620 		 * do so when this stream is not the control stream.
1621 		 */
1622 		if (!IS_SPS_CONTROL(sps)) {
1623 			sps->sps_npmode = NPMODE_DROP;
1624 		}
1625 		putnext(ppa->ppa_ctl->sps_rq, mpnew);
1626 		return (NULL);	/* don't queue it */
1627 	}
1628 	/*
1629 	 * If so, then try to send it down. The lower queue is only ever
1630 	 * detached while holding an exclusive lock on the whole driver,
1631 	 * so we can be confident that the lower queue is still there.
1632 	 */
1633 	if (bcanputnext(ppa->ppa_lower_wq, mp->b_band)) {
1634 		mutex_enter(&ppa->ppa_sta_lock);
1635 		ppa->ppa_stats.p.ppp_opackets++;
1636 		if (IS_SPS_CONTROL(sps)) {
1637 			ppa->ppa_opkt_ctl++;
1638 		}
1639 		ppa->ppa_stats.p.ppp_obytes += msize;
1640 		mutex_exit(&ppa->ppa_sta_lock);
1641 		return (ppa->ppa_lower_wq);	/* don't queue it */
1642 	}
1643 	return (NULL);	/* queue it for later */
1644 }
1645 
1646 /*
1647  * sppp_lwsrv()
1648  *
1649  * MT-Perimeters:
1650  *    exclusive inner, shared outer.
1651  *
1652  * Description:
1653  *    Lower write-side service procedure. No messages are ever placed on
1654  *    the write queue here, this just back-enables all upper write side
1655  *    service procedures.
1656  */
1657 void
1658 sppp_lwsrv(queue_t *q)
1659 {
1660 	sppa_t		*ppa;
1661 	spppstr_t	*nextsib;
1662 
1663 	ASSERT(q != NULL && q->q_ptr != NULL);
1664 	ppa = (sppa_t *)q->q_ptr;
1665 	ASSERT(ppa != NULL);
1666 
1667 	rw_enter(&ppa->ppa_sib_lock, RW_READER);
1668 	if ((nextsib = ppa->ppa_ctl) != NULL &&
1669 	    WR(nextsib->sps_rq)->q_first != NULL)
1670 		qenable(WR(nextsib->sps_rq));
1671 	for (nextsib = ppa->ppa_streams; nextsib != NULL;
1672 	    nextsib = nextsib->sps_nextsib) {
1673 		if (WR(nextsib->sps_rq)->q_first != NULL) {
1674 			qenable(WR(nextsib->sps_rq));
1675 		}
1676 	}
1677 	rw_exit(&ppa->ppa_sib_lock);
1678 }
1679 
1680 /*
1681  * sppp_lrput()
1682  *
1683  * MT-Perimeters:
1684  *    shared inner, shared outer.
1685  *
1686  * Description:
1687  *    Lower read-side put procedure. Messages from below get here.
1688  *    Data messages are handled separately to limit stack usage
1689  *    going into IP.
1690  *
1691  *    Note that during I_UNLINK processing, it's possible for a downstream
1692  *    message to enable upstream data (due to pass_wput() removing the
1693  *    SQ_BLOCKED flag), and thus we must protect against a NULL sppa pointer.
1694  *    In this case, the only thing above us is passthru, and we might as well
1695  *    discard.
1696  */
1697 void
1698 sppp_lrput(queue_t *q, mblk_t *mp)
1699 {
1700 	sppa_t		*ppa;
1701 	spppstr_t	*sps;
1702 
1703 	if ((ppa = q->q_ptr) == NULL) {
1704 		freemsg(mp);
1705 		return;
1706 	}
1707 
1708 	sps = ppa->ppa_ctl;
1709 
1710 	if (MTYPE(mp) != M_DATA) {
1711 		sppp_recv_nondata(q, mp, sps);
1712 	} else if (sps == NULL) {
1713 		freemsg(mp);
1714 	} else if ((q = sppp_recv(q, &mp, sps)) != NULL) {
1715 		putnext(q, mp);
1716 	}
1717 }
1718 
1719 /*
1720  * sppp_recv_nondata()
1721  *
1722  * MT-Perimeters:
1723  *    shared inner, shared outer.
1724  *
1725  * Description:
1726  *    All received non-data messages come through here.
1727  */
1728 static void
1729 sppp_recv_nondata(queue_t *q, mblk_t *mp, spppstr_t *ctlsps)
1730 {
1731 	sppa_t		*ppa;
1732 	spppstr_t	*destsps;
1733 	struct iocblk	*iop;
1734 
1735 	ppa = (sppa_t *)q->q_ptr;
1736 	ctlsps = ppa->ppa_ctl;
1737 
1738 	switch (MTYPE(mp)) {
1739 	case M_CTL:
1740 		mutex_enter(&ppa->ppa_sta_lock);
1741 		if (*mp->b_rptr == PPPCTL_IERROR) {
1742 			ppa->ppa_stats.p.ppp_ierrors++;
1743 			ppa->ppa_ierr_low++;
1744 			ppa->ppa_mctlsknown++;
1745 		} else if (*mp->b_rptr == PPPCTL_OERROR) {
1746 			ppa->ppa_stats.p.ppp_oerrors++;
1747 			ppa->ppa_oerr_low++;
1748 			ppa->ppa_mctlsknown++;
1749 		} else {
1750 			ppa->ppa_mctlsunknown++;
1751 		}
1752 		mutex_exit(&ppa->ppa_sta_lock);
1753 		freemsg(mp);
1754 		break;
1755 	case M_IOCTL:
1756 		miocnak(q, mp, 0, EINVAL);
1757 		break;
1758 	case M_IOCACK:
1759 	case M_IOCNAK:
1760 		iop = (struct iocblk *)mp->b_rptr;
1761 		ASSERT(iop != NULL);
1762 		/*
1763 		 * Attempt to match up the response with the stream that the
1764 		 * request came from. If ioc_id doesn't match the one that we
1765 		 * recorded, then discard this message.
1766 		 */
1767 		rw_enter(&ppa->ppa_sib_lock, RW_READER);
1768 		if ((destsps = ctlsps) == NULL ||
1769 		    destsps->sps_ioc_id != iop->ioc_id) {
1770 			destsps = ppa->ppa_streams;
1771 			while (destsps != NULL) {
1772 				if (destsps->sps_ioc_id == iop->ioc_id) {
1773 					break;	/* found the upper stream */
1774 				}
1775 				destsps = destsps->sps_nextsib;
1776 			}
1777 		}
1778 		rw_exit(&ppa->ppa_sib_lock);
1779 		if (destsps == NULL) {
1780 			mutex_enter(&ppa->ppa_sta_lock);
1781 			ppa->ppa_ioctlsfwderr++;
1782 			mutex_exit(&ppa->ppa_sta_lock);
1783 			freemsg(mp);
1784 			break;
1785 		}
1786 		mutex_enter(&ppa->ppa_sta_lock);
1787 		ppa->ppa_ioctlsfwdok++;
1788 		mutex_exit(&ppa->ppa_sta_lock);
1789 		putnext(destsps->sps_rq, mp);
1790 		break;
1791 	case M_HANGUP:
1792 		/*
1793 		 * Free the original mblk_t. We don't really want to send
1794 		 * a M_HANGUP message upstream, so we need to translate this
1795 		 * message into something else.
1796 		 */
1797 		freemsg(mp);
1798 		if (ctlsps == NULL)
1799 			break;
1800 		mp = create_lsmsg(PPP_LINKSTAT_HANGUP);
1801 		if (mp == NULL) {
1802 			mutex_enter(&ppa->ppa_sta_lock);
1803 			ppa->ppa_allocbfail++;
1804 			mutex_exit(&ppa->ppa_sta_lock);
1805 			break;
1806 		}
1807 		mutex_enter(&ppa->ppa_sta_lock);
1808 		ppa->ppa_lsdown++;
1809 		mutex_exit(&ppa->ppa_sta_lock);
1810 		putnext(ctlsps->sps_rq, mp);
1811 		break;
1812 	case M_FLUSH:
1813 		if (*mp->b_rptr & FLUSHR) {
1814 			flushq(q, FLUSHDATA);
1815 		}
1816 		if (*mp->b_rptr & FLUSHW) {
1817 			*mp->b_rptr &= ~FLUSHR;
1818 			qreply(q, mp);
1819 		} else {
1820 			freemsg(mp);
1821 		}
1822 		break;
1823 	default:
1824 		if (ctlsps != NULL &&
1825 		    (queclass(mp) == QPCTL) || canputnext(ctlsps->sps_rq)) {
1826 			putnext(ctlsps->sps_rq, mp);
1827 		} else {
1828 			mutex_enter(&ppa->ppa_sta_lock);
1829 			ppa->ppa_iqdropped++;
1830 			mutex_exit(&ppa->ppa_sta_lock);
1831 			freemsg(mp);
1832 		}
1833 		break;
1834 	}
1835 }
1836 
1837 /*
1838  * sppp_recv()
1839  *
1840  * MT-Perimeters:
1841  *    shared inner, shared outer.
1842  *
1843  * Description:
1844  *    Receive function called by sppp_lrput.  Finds appropriate
1845  *    receive stream and does accounting.
1846  */
1847 static queue_t *
1848 sppp_recv(queue_t *q, mblk_t **mpp, spppstr_t *ctlsps)
1849 {
1850 	mblk_t		*mp;
1851 	int		len;
1852 	sppa_t		*ppa;
1853 	spppstr_t	*destsps;
1854 	mblk_t		*zmp;
1855 	uint32_t	npflagpos;
1856 
1857 	ASSERT(mpp != NULL);
1858 	mp = *mpp;
1859 	ASSERT(q != NULL && q->q_ptr != NULL);
1860 	ASSERT(mp != NULL && mp->b_rptr != NULL);
1861 	ASSERT(ctlsps != NULL);
1862 	ASSERT(IS_SPS_CONTROL(ctlsps));
1863 	ppa = ctlsps->sps_ppa;
1864 	ASSERT(ppa != NULL && ppa->ppa_ctl != NULL);
1865 
1866 	len = msgdsize(mp);
1867 	mutex_enter(&ppa->ppa_sta_lock);
1868 	ppa->ppa_stats.p.ppp_ibytes += len;
1869 	mutex_exit(&ppa->ppa_sta_lock);
1870 	/*
1871 	 * If the entire data size of the mblk is less than the length of the
1872 	 * PPP header, then free it. We can't do much with such message anyway,
1873 	 * since we can't really determine what the PPP protocol type is.
1874 	 */
1875 	if (len < PPP_HDRLEN) {
1876 		/* Log, and free it */
1877 		mutex_enter(&ppa->ppa_sta_lock);
1878 		ppa->ppa_irunts++;
1879 		mutex_exit(&ppa->ppa_sta_lock);
1880 		freemsg(mp);
1881 		return (NULL);
1882 	} else if (len > (ppa->ppa_mru + PPP_HDRLEN)) {
1883 		/* Log, and accept it anyway */
1884 		mutex_enter(&ppa->ppa_sta_lock);
1885 		ppa->ppa_itoolongs++;
1886 		mutex_exit(&ppa->ppa_sta_lock);
1887 	}
1888 	/*
1889 	 * We need at least be able to read the PPP protocol from the header,
1890 	 * so if the first message block is too small, then we concatenate the
1891 	 * rest of the following blocks into one message.
1892 	 */
1893 	if (MBLKL(mp) < PPP_HDRLEN) {
1894 		zmp = msgpullup(mp, PPP_HDRLEN);
1895 		freemsg(mp);
1896 		mp = zmp;
1897 		if (mp == NULL) {
1898 			mutex_enter(&ppa->ppa_sta_lock);
1899 			ppa->ppa_allocbfail++;
1900 			mutex_exit(&ppa->ppa_sta_lock);
1901 			return (NULL);
1902 		}
1903 		*mpp = mp;
1904 	}
1905 	/*
1906 	 * Hold this packet in the control-queue until
1907 	 * the matching network-layer upper stream for the PPP protocol (sap)
1908 	 * has not been plumbed and configured
1909 	 */
1910 	npflagpos = sppp_ppp2np(PPP_PROTOCOL(mp->b_rptr));
1911 	mutex_enter(&ppa->ppa_npmutex);
1912 	if (npflagpos != 0 && (ppa->ppa_npflag & (1 << npflagpos))) {
1913 		/*
1914 		 * proto is currently blocked; Hold up to 4 packets
1915 		 * in the kernel.
1916 		 */
1917 		if (ppa->ppa_holdpkts[npflagpos] > 3 ||
1918 		    putq(ctlsps->sps_rq, mp) == 0)
1919 			freemsg(mp);
1920 		else
1921 			ppa->ppa_holdpkts[npflagpos]++;
1922 		mutex_exit(&ppa->ppa_npmutex);
1923 		return (NULL);
1924 	}
1925 	mutex_exit(&ppa->ppa_npmutex);
1926 	/*
1927 	 * Try to find a matching network-layer upper stream for the specified
1928 	 * PPP protocol (sap), and if none is found, send this frame up the
1929 	 * control stream.
1930 	 */
1931 	destsps = sppp_inpkt(q, mp, ctlsps);
1932 	if (destsps == NULL) {
1933 		mutex_enter(&ppa->ppa_sta_lock);
1934 		ppa->ppa_ipkt_ctl++;
1935 		mutex_exit(&ppa->ppa_sta_lock);
1936 		if (canputnext(ctlsps->sps_rq)) {
1937 			if (IS_SPS_KDEBUG(ctlsps)) {
1938 				SPDEBUG(PPP_DRV_NAME
1939 				    "/%d: M_DATA recv (%d bytes) sps=0x%p "
1940 				    "flags=0x%b ppa=0x%p flags=0x%b\n",
1941 				    ctlsps->sps_mn_id, len, (void *)ctlsps,
1942 				    ctlsps->sps_flags, SPS_FLAGS_STR,
1943 				    (void *)ppa, ppa->ppa_flags,
1944 				    PPA_FLAGS_STR);
1945 			}
1946 			return (ctlsps->sps_rq);
1947 		} else {
1948 			mutex_enter(&ppa->ppa_sta_lock);
1949 			ppa->ppa_iqdropped++;
1950 			mutex_exit(&ppa->ppa_sta_lock);
1951 			freemsg(mp);
1952 			return (NULL);
1953 		}
1954 	}
1955 	if (canputnext(destsps->sps_rq)) {
1956 		if (IS_SPS_KDEBUG(destsps)) {
1957 			SPDEBUG(PPP_DRV_NAME
1958 			    "/%d: M_DATA recv (%d bytes) sps=0x%p flags=0x%b "
1959 			    "ppa=0x%p flags=0x%b\n", destsps->sps_mn_id, len,
1960 			    (void *)destsps, destsps->sps_flags,
1961 			    SPS_FLAGS_STR, (void *)ppa, ppa->ppa_flags,
1962 			    PPA_FLAGS_STR);
1963 		}
1964 		/*
1965 		 * If fastpath is enabled on the network-layer stream, then
1966 		 * make sure we skip over the PPP header, otherwise, we wrap
1967 		 * the message in a DLPI message.
1968 		 */
1969 		if (IS_SPS_FASTPATH(destsps)) {
1970 			mp->b_rptr += PPP_HDRLEN;
1971 			return (destsps->sps_rq);
1972 		} else {
1973 			spppstr_t *uqs = (spppstr_t *)destsps->sps_rq->q_ptr;
1974 			ASSERT(uqs != NULL);
1975 			mp->b_rptr += PPP_HDRLEN;
1976 			mp = sppp_dladdud(uqs, mp, uqs->sps_sap, B_FALSE);
1977 			if (mp != NULL) {
1978 				*mpp = mp;
1979 				return (destsps->sps_rq);
1980 			} else {
1981 				mutex_enter(&ppa->ppa_sta_lock);
1982 				ppa->ppa_allocbfail++;
1983 				mutex_exit(&ppa->ppa_sta_lock);
1984 				/* mp already freed by sppp_dladdud */
1985 				return (NULL);
1986 			}
1987 		}
1988 	} else {
1989 		mutex_enter(&ppa->ppa_sta_lock);
1990 		ppa->ppa_iqdropped++;
1991 		mutex_exit(&ppa->ppa_sta_lock);
1992 		freemsg(mp);
1993 		return (NULL);
1994 	}
1995 }
1996 
1997 /*
1998  * sppp_inpkt()
1999  *
2000  * MT-Perimeters:
2001  *    shared inner, shared outer.
2002  *
2003  * Description:
2004  *    Find the destination upper stream for the received packet, called
2005  *    from sppp_recv.
2006  *
2007  * Returns:
2008  *    ptr to destination upper network stream, or NULL for control stream.
2009  */
2010 /* ARGSUSED */
2011 static spppstr_t *
2012 sppp_inpkt(queue_t *q, mblk_t *mp, spppstr_t *ctlsps)
2013 {
2014 	spppstr_t	*destsps = NULL;
2015 	sppa_t		*ppa;
2016 	uint16_t	proto;
2017 	int		is_promisc;
2018 
2019 	ASSERT(q != NULL && q->q_ptr != NULL);
2020 	ASSERT(mp != NULL && mp->b_rptr != NULL);
2021 	ASSERT(IS_SPS_CONTROL(ctlsps));
2022 	ppa = ctlsps->sps_ppa;
2023 	ASSERT(ppa != NULL);
2024 	/*
2025 	 * From RFC 1661 (Section 2):
2026 	 *
2027 	 * The Protocol field is one or two octets, and its value identifies
2028 	 * the datagram encapsulated in the Information field of the packet.
2029 	 * The field is transmitted and received most significant octet first.
2030 	 *
2031 	 * The structure of this field is consistent with the ISO 3309
2032 	 * extension mechanism for address fields.  All Protocols MUST be odd;
2033 	 * the least significant bit of the least significant octet MUST equal
2034 	 * "1".  Also, all Protocols MUST be assigned such that the least
2035 	 * significant bit of the most significant octet equals "0". Frames
2036 	 * received which don't comply with these rules MUST be treated as
2037 	 * having an unrecognized Protocol.
2038 	 *
2039 	 * Protocol field values in the "0***" to "3***" range identify the
2040 	 * network-layer protocol of specific packets, and values in the
2041 	 * "8***" to "b***" range identify packets belonging to the associated
2042 	 * Network Control Protocols (NCPs), if any.
2043 	 *
2044 	 * Protocol field values in the "4***" to "7***" range are used for
2045 	 * protocols with low volume traffic which have no associated NCP.
2046 	 * Protocol field values in the "c***" to "f***" range identify packets
2047 	 * as link-layer Control Protocols (such as LCP).
2048 	 */
2049 	proto = PPP_PROTOCOL(mp->b_rptr);
2050 	mutex_enter(&ppa->ppa_sta_lock);
2051 	ppa->ppa_stats.p.ppp_ipackets++;
2052 	mutex_exit(&ppa->ppa_sta_lock);
2053 	/*
2054 	 * We check if this is not a network-layer protocol, and if so,
2055 	 * then send this packet up the control stream.
2056 	 */
2057 	if (proto > 0x7fff) {
2058 		goto inpkt_done;	/* send it up the control stream */
2059 	}
2060 	/*
2061 	 * Try to grab the destination upper stream from the network-layer
2062 	 * stream cache for this ppa for PPP_IP (0x0021) or PPP_IPV6 (0x0057)
2063 	 * protocol types. Otherwise, if the type is not known to the cache,
2064 	 * or if its sap can't be matched with any of the upper streams, then
2065 	 * send this packet up the control stream so that it can be rejected.
2066 	 */
2067 	if (proto == PPP_IP) {
2068 		destsps = ppa->ppa_ip_cache;
2069 	} else if (proto == PPP_IPV6) {
2070 		destsps = ppa->ppa_ip6_cache;
2071 	}
2072 	/*
2073 	 * Toss this one away up the control stream if there's no matching sap;
2074 	 * this way the protocol can be rejected (destsps is NULL).
2075 	 */
2076 
2077 inpkt_done:
2078 	/*
2079 	 * Only time-stamp the packet with hrtime if the upper stream
2080 	 * is configured to do so.  PPP control (negotiation) messages
2081 	 * are never considered link activity; only data is activity.
2082 	 */
2083 	if (destsps != NULL && IS_PPA_TIMESTAMP(ppa)) {
2084 		ppa->ppa_lastrx = gethrtime();
2085 	}
2086 	/*
2087 	 * Should there be any promiscuous stream(s), send the data up for
2088 	 * each promiscuous stream that we recognize. We skip the control
2089 	 * stream as we obviously never allow the control stream to become
2090 	 * promiscous and bind to PPP_ALLSAP.
2091 	 */
2092 	rw_enter(&ppa->ppa_sib_lock, RW_READER);
2093 	is_promisc = ppa->ppa_promicnt;
2094 	if (is_promisc) {
2095 		ASSERT(ppa->ppa_streams != NULL);
2096 		sppp_dlprsendup(ppa->ppa_streams, mp, proto, B_TRUE);
2097 	}
2098 	rw_exit(&ppa->ppa_sib_lock);
2099 	return (destsps);
2100 }
2101 
2102 /*
2103  * sppp_kstat_update()
2104  *
2105  * Description:
2106  *    Update per-ppa kstat interface statistics.
2107  */
2108 static int
2109 sppp_kstat_update(kstat_t *ksp, int rw)
2110 {
2111 	register sppa_t		*ppa;
2112 	register sppp_kstats_t	*pppkp;
2113 	register struct pppstat64 *sp;
2114 
2115 	if (rw == KSTAT_WRITE) {
2116 		return (EACCES);
2117 	}
2118 
2119 	ppa = (sppa_t *)ksp->ks_private;
2120 	ASSERT(ppa != NULL);
2121 
2122 	pppkp = (sppp_kstats_t *)ksp->ks_data;
2123 	sp = &ppa->ppa_stats.p;
2124 
2125 	mutex_enter(&ppa->ppa_sta_lock);
2126 	pppkp->allocbfail.value.ui32	= ppa->ppa_allocbfail;
2127 	pppkp->mctlsfwd.value.ui32	= ppa->ppa_mctlsfwd;
2128 	pppkp->mctlsfwderr.value.ui32	= ppa->ppa_mctlsfwderr;
2129 	pppkp->rbytes.value.ui32	= sp->ppp_ibytes;
2130 	pppkp->rbytes64.value.ui64	= sp->ppp_ibytes;
2131 	pppkp->ierrors.value.ui32	= sp->ppp_ierrors;
2132 	pppkp->ierrors_lower.value.ui32	= ppa->ppa_ierr_low;
2133 	pppkp->ioctlsfwd.value.ui32	= ppa->ppa_ioctlsfwd;
2134 	pppkp->ioctlsfwdok.value.ui32	= ppa->ppa_ioctlsfwdok;
2135 	pppkp->ioctlsfwderr.value.ui32	= ppa->ppa_ioctlsfwderr;
2136 	pppkp->ipackets.value.ui32	= sp->ppp_ipackets;
2137 	pppkp->ipackets64.value.ui64	= sp->ppp_ipackets;
2138 	pppkp->ipackets_ctl.value.ui32	= ppa->ppa_ipkt_ctl;
2139 	pppkp->iqdropped.value.ui32	= ppa->ppa_iqdropped;
2140 	pppkp->irunts.value.ui32	= ppa->ppa_irunts;
2141 	pppkp->itoolongs.value.ui32	= ppa->ppa_itoolongs;
2142 	pppkp->lsneedup.value.ui32	= ppa->ppa_lsneedup;
2143 	pppkp->lsdown.value.ui32	= ppa->ppa_lsdown;
2144 	pppkp->mctlsknown.value.ui32	= ppa->ppa_mctlsknown;
2145 	pppkp->mctlsunknown.value.ui32	= ppa->ppa_mctlsunknown;
2146 	pppkp->obytes.value.ui32	= sp->ppp_obytes;
2147 	pppkp->obytes64.value.ui64	= sp->ppp_obytes;
2148 	pppkp->oerrors.value.ui32	= sp->ppp_oerrors;
2149 	pppkp->oerrors_lower.value.ui32	= ppa->ppa_oerr_low;
2150 	pppkp->opackets.value.ui32	= sp->ppp_opackets;
2151 	pppkp->opackets64.value.ui64	= sp->ppp_opackets;
2152 	pppkp->opackets_ctl.value.ui32	= ppa->ppa_opkt_ctl;
2153 	pppkp->oqdropped.value.ui32	= ppa->ppa_oqdropped;
2154 	pppkp->otoolongs.value.ui32	= ppa->ppa_otoolongs;
2155 	pppkp->orunts.value.ui32	= ppa->ppa_orunts;
2156 	mutex_exit(&ppa->ppa_sta_lock);
2157 
2158 	return (0);
2159 }
2160 
2161 /*
2162  * Turn off proto in ppa_npflag to indicate that
2163  * the corresponding network protocol has been plumbed.
2164  * Release proto packets that were being held in the control
2165  * queue in anticipation of this event.
2166  */
2167 static void
2168 sppp_release_pkts(sppa_t *ppa, uint16_t proto)
2169 {
2170 	uint32_t npflagpos = sppp_ppp2np(proto);
2171 	int count;
2172 	mblk_t *mp;
2173 	uint16_t mp_proto;
2174 	queue_t *q;
2175 	spppstr_t *destsps;
2176 
2177 	ASSERT(ppa != NULL);
2178 
2179 	if (npflagpos == 0 || (ppa->ppa_npflag & (1 << npflagpos)) == 0)
2180 		return;
2181 
2182 	mutex_enter(&ppa->ppa_npmutex);
2183 	ppa->ppa_npflag &= ~(1 << npflagpos);
2184 	count = ppa->ppa_holdpkts[npflagpos];
2185 	ppa->ppa_holdpkts[npflagpos] = 0;
2186 	mutex_exit(&ppa->ppa_npmutex);
2187 
2188 	q = ppa->ppa_ctl->sps_rq;
2189 
2190 	while (count > 0) {
2191 		mp = getq(q);
2192 		ASSERT(mp != NULL);
2193 
2194 		mp_proto = PPP_PROTOCOL(mp->b_rptr);
2195 		if (mp_proto !=  proto) {
2196 			(void) putq(q, mp);
2197 			continue;
2198 		}
2199 		count--;
2200 		destsps = NULL;
2201 		if (mp_proto == PPP_IP) {
2202 			destsps = ppa->ppa_ip_cache;
2203 		} else if (mp_proto == PPP_IPV6) {
2204 			destsps = ppa->ppa_ip6_cache;
2205 		}
2206 		ASSERT(destsps != NULL);
2207 
2208 		if (IS_SPS_FASTPATH(destsps)) {
2209 			mp->b_rptr += PPP_HDRLEN;
2210 		} else {
2211 			spppstr_t *uqs = (spppstr_t *)destsps->sps_rq->q_ptr;
2212 			ASSERT(uqs != NULL);
2213 			mp->b_rptr += PPP_HDRLEN;
2214 			mp = sppp_dladdud(uqs, mp, uqs->sps_sap, B_FALSE);
2215 			if (mp == NULL) {
2216 				mutex_enter(&ppa->ppa_sta_lock);
2217 				ppa->ppa_allocbfail++;
2218 				mutex_exit(&ppa->ppa_sta_lock);
2219 				/* mp already freed by sppp_dladdud */
2220 				continue;
2221 			}
2222 		}
2223 
2224 		if (canputnext(destsps->sps_rq)) {
2225 			putnext(destsps->sps_rq, mp);
2226 		} else {
2227 			mutex_enter(&ppa->ppa_sta_lock);
2228 			ppa->ppa_iqdropped++;
2229 			mutex_exit(&ppa->ppa_sta_lock);
2230 			freemsg(mp);
2231 			continue;
2232 		}
2233 	}
2234 }
2235