xref: /illumos-gate/usr/src/uts/common/io/ppp/spppcomp/spppcomp.c (revision c4d76aa4b703811c0527424e3c63151eb77b9972)
1 /*
2  * spppcomp.c - STREAMS module for kernel-level compression and CCP support.
3  *
4  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
5  * Use is subject to license terms.
6  * Copyright (c) 2016 by Delphix. All rights reserved.
7  *
8  * Permission to use, copy, modify, and distribute this software and its
9  * documentation is hereby granted, provided that the above copyright
10  * notice appears in all copies.
11  *
12  * SUN MAKES NO REPRESENTATION OR WARRANTIES ABOUT THE SUITABILITY OF
13  * THE SOFTWARE, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
14  * TO THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
15  * PARTICULAR PURPOSE, OR NON-INFRINGEMENT.  SUN SHALL NOT BE LIABLE FOR
16  * ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
17  * DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES
18  *
19  * Copyright (c) 1994 The Australian National University.
20  * All rights reserved.
21  *
22  * Permission to use, copy, modify, and distribute this software and its
23  * documentation is hereby granted, provided that the above copyright
24  * notice appears in all copies.  This software is provided without any
25  * warranty, express or implied. The Australian National University
26  * makes no representations about the suitability of this software for
27  * any purpose.
28  *
29  * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY
30  * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
31  * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
32  * THE AUSTRALIAN NATIONAL UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY
33  * OF SUCH DAMAGE.
34  *
35  * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES,
36  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
37  * AND FITNESS FOR A PARTICULAR PURPOSE.  THE SOFTWARE PROVIDED HEREUNDER IS
38  * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO
39  * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS,
40  * OR MODIFICATIONS.
41  *
42  * This module is derived from the original SVR4 STREAMS PPP compression
43  * module originally written by Paul Mackerras <paul.mackerras@cs.anu.edu.au>.
44  *
45  * James Carlson <james.d.carlson@sun.com> and Adi Masputra
46  * <adi.masputra@sun.com> rewrote and restructured the code for improved
47  * performance and scalability.
48  */
49 
50 #define	RCSID	"$Id: spppcomp.c,v 1.0 2000/05/08 01:10:12 masputra Exp $"
51 
52 #include <sys/types.h>
53 #include <sys/debug.h>
54 #include <sys/param.h>
55 #include <sys/stream.h>
56 #include <sys/stropts.h>
57 #include <sys/errno.h>
58 #include <sys/conf.h>
59 #include <sys/cpuvar.h>
60 #include <sys/cmn_err.h>
61 #include <sys/kmem.h>
62 #include <sys/ddi.h>
63 #include <sys/kstat.h>
64 #include <sys/strsun.h>
65 #include <sys/sysmacros.h>
66 #include <netinet/in.h>
67 #include <netinet/ip.h>
68 #include <net/ppp_defs.h>
69 #include <net/pppio.h>
70 #include <net/vjcompress.h>
71 
72 /* Defined for platform-neutral include file */
73 #define	PACKETPTR	mblk_t *
74 #include <net/ppp-comp.h>
75 
76 #include "s_common.h"
77 
78 #ifdef DEBUG
79 #define	SPC_DEBUG
80 #endif
81 #include "spppcomp.h"
82 
83 /*
84  * This is used to tag official Solaris sources.  Please do not define
85  * "INTERNAL_BUILD" when building this software outside of Sun
86  * Microsystems.
87  */
88 #ifdef INTERNAL_BUILD
89 /* MODINFO is limited to 32 characters. */
90 const char spppcomp_module_description[] = "PPP 4.0 compression";
91 #else /* INTERNAL_BUILD */
92 const char spppcomp_module_description[] =
93 	"ANU PPP compression $Revision: 1.16$ ";
94 
95 /* LINTED */
96 static const char buildtime[] = "Built " __DATE__ " at " __TIME__
97 #ifdef DEBUG
98 " DEBUG"
99 #endif
100 "\n";
101 #endif /* INTERNAL_BUILD */
102 
103 static int	spppcomp_open(queue_t *, dev_t *, int, int, cred_t *);
104 static int	spppcomp_close(queue_t *, int, cred_t *);
105 static int	spppcomp_rput(queue_t *, mblk_t *);
106 static int	spppcomp_rsrv(queue_t *);
107 static int	spppcomp_wput(queue_t *, mblk_t *);
108 static int	spppcomp_wsrv(queue_t *);
109 
110 #define	PPPCOMP_MI_MINPSZ	(0)
111 #define	PPPCOMP_MI_MAXPSZ	(INFPSZ)
112 #define	PPPCOMP_MI_HIWAT	(PPP_MTU * 20)
113 #define	PPPCOMP_MI_LOWAT	(PPP_MTU * 18)
114 
115 static struct module_info spppcomp_modinfo = {
116 	COMP_MOD_ID,		/* mi_idnum */
117 	COMP_MOD_NAME,		/* mi_idname */
118 	PPPCOMP_MI_MINPSZ,	/* mi_minpsz */
119 	PPPCOMP_MI_MAXPSZ,	/* mi_maxpsz */
120 	PPPCOMP_MI_HIWAT,	/* mi_hiwat */
121 	PPPCOMP_MI_LOWAT	/* mi_lowat */
122 };
123 
124 static struct qinit spppcomp_rinit = {
125 	spppcomp_rput,		/* qi_putp */
126 	spppcomp_rsrv,		/* qi_srvp */
127 	spppcomp_open,		/* qi_qopen */
128 	spppcomp_close,		/* qi_qclose */
129 	NULL,			/* qi_qadmin */
130 	&spppcomp_modinfo,	/* qi_minfo */
131 	NULL			/* qi_mstat */
132 };
133 
134 static struct qinit spppcomp_winit = {
135 	spppcomp_wput,		/* qi_putp */
136 	spppcomp_wsrv,		/* qi_srvp */
137 	NULL,			/* qi_qopen */
138 	NULL,			/* qi_qclose */
139 	NULL,			/* qi_qadmin */
140 	&spppcomp_modinfo,	/* qi_minfo */
141 	NULL			/* qi_mstat */
142 };
143 
144 struct streamtab spppcomp_tab = {
145 	&spppcomp_rinit,	/* st_rdinit */
146 	&spppcomp_winit,	/* st_wrinit */
147 	NULL,			/* st_muxrinit */
148 	NULL			/* st_muxwinit */
149 };
150 
151 /* Set non-zero to debug algorithm-specific problems alone. */
152 #define	ALG_DEBUG	0
153 
154 #define	MAX_IPHLEN	(0x0f << 2)
155 #define	MAX_TCPHLEN	(0x0f << 2)
156 #define	MAX_TCPIPHLEN	(MAX_IPHLEN + MAX_TCPHLEN) /* max TCP/IP header size */
157 #define	MAX_VJHDR	(20)		/* max VJ compressed header size (?) */
158 
159 #if 0
160 #define	DBGSTART	CE_CONT, COMP_MOD_NAME "%d: "
161 #define	CKDEBUG(x)	cmn_err x
162 #else
163 #define	DBGSTART	COMP_MOD_NAME "%d: "
164 #define	CKDEBUG(x)	printf x
165 #endif
166 #define	CPDEBUG(x)	(IS_CP_KDEBUG(cp) ? CKDEBUG(x) : (void)0)
167 
168 /*
169  * List of compressors we know about.
170  */
171 #if DO_BSD_COMPRESS
172 extern struct compressor ppp_bsd_compress;
173 #endif
174 #if DO_DEFLATE
175 extern struct compressor ppp_deflate;
176 extern struct compressor ppp_deflate_draft;
177 #endif
178 
179 struct compressor *ppp_compressors[] = {
180 #if DO_BSD_COMPRESS
181 	&ppp_bsd_compress,
182 #endif
183 #if DO_DEFLATE
184 	&ppp_deflate,
185 	&ppp_deflate_draft,
186 #endif
187 	NULL
188 };
189 
190 /*
191  * LCP_USE_DFLT() removed by James Carlson.  RFC 1661 section 6.6 has
192  * this to say on the topic:
193  *
194  *    The Address and Control fields MUST NOT be compressed when sending
195  *    any LCP packet.  This rule guarantees unambiguous recognition of
196  *    LCP packets.
197  */
198 
199 static void	spppcomp_ioctl(queue_t *, mblk_t *, sppp_comp_t *);
200 static int	spppcomp_mctl(queue_t *, mblk_t *);
201 static mblk_t	*spppcomp_outpkt(queue_t *, mblk_t *);
202 static mblk_t	*spppcomp_inpkt(queue_t *, mblk_t *);
203 static int	spppcomp_kstat_update(kstat_t *, int);
204 static void	comp_ccp(queue_t *, mblk_t *, sppp_comp_t *, boolean_t);
205 
206 /*
207  * Values for checking inter-arrival times on interrupt stacks.  These
208  * are used to prevent CPU hogging in interrupt context.
209  */
210 #define	MIN_ARRIVAL_TIME	5000000	/* interarrival time in nanoseconds */
211 #define	MAX_FAST_ARRIVALS	10	/* maximum packet count */
212 hrtime_t spppcomp_min_arrival = MIN_ARRIVAL_TIME;
213 
214 static const char *kstats_names[] = {
215 #ifdef SPCDEBUG_KSTATS_NAMES
216 	SPPPCOMP_KSTATS_NAMES,
217 	SPCDEBUG_KSTATS_NAMES
218 #else
219 	SPPPCOMP_KSTATS_NAMES
220 #endif
221 };
222 static const char *kstats64_names[] = { SPPPCOMP_KSTATS64_NAMES };
223 
224 /*
225  * spppcomp_open()
226  *
227  * MT-Perimeters:
228  *    exclusive inner.
229  *
230  * Description:
231  *    Common open procedure for module.
232  */
233 /* ARGSUSED */
234 static int
spppcomp_open(queue_t * q,dev_t * devp,int flag,int sflag,cred_t * credp)235 spppcomp_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
236 {
237 	sppp_comp_t	*cp;
238 
239 	if (q->q_ptr != NULL) {
240 		return (0);
241 	}
242 	if (sflag != MODOPEN) {
243 		return (EINVAL);
244 	}
245 	cp = kmem_zalloc(sizeof (sppp_comp_t), KM_SLEEP);
246 	q->q_ptr = WR(q)->q_ptr = (caddr_t)cp;
247 
248 	cp->cp_mru = PPP_MRU;
249 	cp->cp_mtu = PPP_MTU;
250 
251 	mutex_init(&cp->cp_pair_lock, NULL, MUTEX_DRIVER, NULL);
252 	vj_compress_init(&cp->cp_vj, -1);
253 	cp->cp_nxslots = -1;
254 	cp->cp_effort = -1;
255 
256 	qprocson(q);
257 	return (0);
258 }
259 
260 /*
261  * spppcomp_close()
262  *
263  * MT-Perimeters:
264  *    exclusive inner.
265  *
266  * Description:
267  *    Common close procedure for module.
268  */
269 /* ARGSUSED */
270 static int
spppcomp_close(queue_t * q,int flag,cred_t * credp)271 spppcomp_close(queue_t *q, int flag, cred_t *credp)
272 {
273 	sppp_comp_t	*cp = q->q_ptr;
274 
275 	qprocsoff(q);
276 
277 	CPDEBUG((DBGSTART "close flags=0x%b\n",
278 	    (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), cp->cp_flags,
279 	    CP_FLAGSSTR));
280 	mutex_destroy(&cp->cp_pair_lock);
281 	if (cp->cp_kstats) {
282 		ASSERT(IS_CP_HASUNIT(cp));
283 		kstat_delete(cp->cp_kstats);
284 	}
285 	if (cp->cp_xstate != NULL) {
286 		(*cp->cp_xcomp->comp_free)(cp->cp_xstate);
287 	}
288 	if (cp->cp_rstate != NULL) {
289 		(*cp->cp_rcomp->decomp_free)(cp->cp_rstate);
290 	}
291 	kmem_free(cp, sizeof (sppp_comp_t));
292 	q->q_ptr = WR(q)->q_ptr = NULL;
293 
294 	return (0);
295 }
296 
297 /*
298  * spppcomp_wput()
299  *
300  * MT-Perimeters:
301  *    exclusive inner.
302  *
303  * Description:
304  *    Write-side put procedure.  Packets from above us arrive here.
305  *
306  *	The data handling logic is a little tricky here.  We defer to
307  *	the service routine if q_first isn't NULL (to preserve message
308  *	ordering after deferring a previous message), bcanputnext() is
309  *	FALSE (to handle flow control), or we need a lot of processing
310  *	and we're in an interrupt context (on the theory that we're
311  *	already on a very long call stack at that point).  Since many
312  *	callers will be in a non-interrupt context, this means that
313  *	most processing will be performed here in-line, and deferral
314  *	occurs only when necessary.
315  */
316 static int
spppcomp_wput(queue_t * q,mblk_t * mp)317 spppcomp_wput(queue_t *q, mblk_t *mp)
318 {
319 	sppp_comp_t *cp = q->q_ptr;
320 	int flag;
321 
322 	switch (MTYPE(mp)) {
323 	case M_DATA:
324 		if (q->q_first != NULL || !bcanputnext(q, mp->b_band) ||
325 		    ((cp->cp_flags & (COMP_VJC|CCP_COMP_RUN)) &&
326 		    servicing_interrupt())) {
327 #ifdef SPC_DEBUG
328 			cp->cp_out_queued++;
329 #endif
330 			if (!putq(q, mp))
331 				freemsg(mp);
332 		} else {
333 #ifdef SPC_DEBUG
334 			cp->cp_out_handled++;
335 #endif
336 			if ((mp = spppcomp_outpkt(q, mp)) != NULL)
337 				putnext(q, mp);
338 		}
339 		break;
340 	case M_IOCTL:
341 		spppcomp_ioctl(q, mp, cp);
342 		break;
343 	case M_CTL:
344 		mutex_enter(&cp->cp_pair_lock);
345 		flag = spppcomp_mctl(q, mp);
346 		mutex_exit(&cp->cp_pair_lock);
347 		if (flag != 0)
348 			putnext(q, mp);
349 		else
350 			freemsg(mp);
351 		break;
352 	case M_FLUSH:
353 		CPDEBUG((DBGSTART "wput M_FLUSH (0x%x) flags=0x%b\n",
354 		    (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1),
355 		    *mp->b_rptr, cp->cp_flags,	CP_FLAGSSTR));
356 		/*
357 		 * Just discard pending data.  For CCP, any compressor
358 		 * dictionary sequencing problems caused by this will
359 		 * have to be handled by the compression protocol in
360 		 * use.  For VJ, we need to tell the compressor to
361 		 * start over.
362 		 */
363 		if (*mp->b_rptr & FLUSHW) {
364 			mutex_enter(&cp->cp_pair_lock);
365 			flushq(q, FLUSHDATA);
366 			vj_compress_init(&cp->cp_vj, cp->cp_nxslots);
367 			mutex_exit(&cp->cp_pair_lock);
368 		}
369 		putnext(q, mp);
370 		break;
371 	default:
372 		if (bcanputnext(q, mp->b_band))
373 			putnext(q, mp);
374 		else if (!putq(q, mp))
375 			freemsg(mp);
376 		break;
377 	}
378 	return (0);
379 }
380 
381 /*
382  * spppcomp_wsrv()
383  *
384  * MT-Perimeters:
385  *    exclusive inner
386  *
387  * Description:
388  *    Write-side service procedure.
389  */
390 static int
spppcomp_wsrv(queue_t * q)391 spppcomp_wsrv(queue_t *q)
392 {
393 	mblk_t		*mp;
394 
395 	while ((mp = getq(q)) != NULL) {
396 		/*
397 		 * If the module below us is flow-controlled, then put
398 		 * this message back on the queue again.
399 		 */
400 		if (!bcanputnext(q, mp->b_band)) {
401 			(void) putbq(q, mp);
402 			break;
403 		}
404 		if (MTYPE(mp) != M_DATA ||
405 		    (mp = spppcomp_outpkt(q, mp)) != NULL)
406 			putnext(q, mp);
407 	}
408 	return (0);
409 }
410 
411 /*
412  * spppcomp_outpkt()
413  *
414  * MT-Perimeters:
415  *    exclusive inner
416  *
417  * Description:
418  *    Process outgoing packet.  Returns new mblk_t pointer on success
419  *    (caller should do putnext through q), NULL on failure (packet has
420  *    been discarded).
421  */
422 static mblk_t *
spppcomp_outpkt(queue_t * q,mblk_t * mp)423 spppcomp_outpkt(queue_t *q, mblk_t *mp)
424 {
425 	mblk_t		*zmp;
426 	int		len;
427 	ushort_t	proto;
428 	sppp_comp_t	*cp = q->q_ptr;
429 
430 	/*
431 	 * If the entire data size of the mblk is less than the length of the
432 	 * PPP header, then free it. We can't do much with such message anyway,
433 	 * since we can't determine what the PPP protocol is.
434 	 */
435 	len = msgsize(mp);
436 	if (MBLKL(mp) < PPP_HDRLEN) {
437 #ifdef SPC_DEBUG
438 		mutex_enter(&cp->cp_pair_lock);
439 		cp->cp_omsg_pull++;
440 		mutex_exit(&cp->cp_pair_lock);
441 #endif
442 		zmp = msgpullup(mp, PPP_HDRLEN);
443 		freemsg(mp);
444 		if ((mp = zmp) == NULL)
445 			goto msg_oerror;
446 	}
447 
448 	proto = PPP_PROTOCOL(mp->b_rptr);
449 
450 	/*
451 	 * Do VJ compression if requested.
452 	 */
453 	if (proto == PPP_IP && IS_COMP_VJC(cp) &&
454 	    MSG_BYTE(mp, PPP_HDRLEN+offsetof(struct ip, ip_p)) ==
455 	    IPPROTO_TCP) {
456 		uchar_t		*vjhdr;
457 		int		type;
458 		uint32_t	indata[(PPP_HDRLEN+MAX_TCPIPHLEN) /
459 		    sizeof (uint32_t)];
460 		uchar_t		*dp;
461 		int		tocopy, copied;
462 		mblk_t		*fmb;
463 		void		*srcp;
464 		int		thislen;
465 
466 
467 		tocopy = copied = MIN(len, sizeof (indata));
468 		/*
469 		 * If we can alter this dblk, and there's enough data
470 		 * here to work with, and it's nicely aligned, then
471 		 * avoid the data copy.
472 		 */
473 		if (DB_REF(mp) == 1 && MBLKL(mp) >= tocopy &&
474 		    ((uintptr_t)mp->b_rptr & 3) == 0) {
475 			/* Save off the address/control */
476 			indata[0] = *(uint32_t *)mp->b_rptr;
477 			srcp = (void *)(mp->b_rptr + PPP_HDRLEN);
478 		} else {
479 			fmb = mp;
480 			dp = (uchar_t *)indata;
481 			while (tocopy > 0) {
482 				thislen = MBLKL(fmb);
483 				if (tocopy > thislen) {
484 					bcopy(fmb->b_rptr, dp, thislen);
485 					dp += thislen;
486 					tocopy -= thislen;
487 					fmb = fmb->b_cont;
488 				} else {
489 					bcopy(fmb->b_rptr, dp, tocopy);
490 					break;
491 				}
492 			}
493 			srcp = (void *)(indata + PPP_HDRLEN/sizeof (*indata));
494 		}
495 
496 		type = vj_compress_tcp((struct ip *)srcp, len - PPP_HDRLEN,
497 		    &cp->cp_vj, IS_COMP_VJCCID(cp), &vjhdr);
498 
499 		/*
500 		 * If we're going to modify this packet, then we can't modify
501 		 * someone else's data.  Copy instead.
502 		 *
503 		 * (It would be nice to be able to avoid this data copy if CCP
504 		 * is also enabled.  That would require extensive
505 		 * modifications to the compression code.  Users should be
506 		 * told to disable VJ compression when using CCP.)
507 		 */
508 		if (type != TYPE_IP && DB_REF(mp) > 1) {
509 #ifdef SPC_DEBUG
510 			mutex_enter(&cp->cp_pair_lock);
511 			cp->cp_omsg_dcopy++;
512 			mutex_exit(&cp->cp_pair_lock);
513 #endif
514 			/* Copy just altered portion. */
515 			zmp = msgpullup(mp, copied);
516 			freemsg(mp);
517 			if ((mp = zmp) == NULL)
518 				goto msg_oerror;
519 		}
520 
521 		switch (type) {
522 		case TYPE_UNCOMPRESSED_TCP:
523 			mp->b_rptr[3] = proto = PPP_VJC_UNCOMP;
524 			/* No need to update if it was done in place. */
525 			if (srcp ==
526 			    (void *)(indata + PPP_HDRLEN / sizeof (*indata))) {
527 				thislen = PPP_HDRLEN +
528 				    offsetof(struct ip, ip_p);
529 				zmp = mp;
530 				while (zmp != NULL) {
531 					if (MBLKL(zmp) > thislen) {
532 						zmp->b_rptr[thislen] =
533 						    ((struct ip *)srcp)->ip_p;
534 						break;
535 					}
536 					thislen -= MBLKL(zmp);
537 					zmp = zmp->b_cont;
538 				}
539 			}
540 			break;
541 
542 		case TYPE_COMPRESSED_TCP:
543 			/* Calculate amount to remove from front */
544 			thislen = vjhdr - (uchar_t *)srcp;
545 			ASSERT(thislen >= 0);
546 
547 			/* Try to do a cheap adjmsg by arithmetic first. */
548 			dp = mp->b_rptr + thislen;
549 			if (dp > mp->b_wptr) {
550 				if (!adjmsg(mp, thislen)) {
551 					freemsg(mp);
552 					goto msg_oerror;
553 				}
554 				dp = mp->b_rptr;
555 			}
556 
557 			/*
558 			 * Now make sure first block is big enough to
559 			 * receive modified data.  If we modified in
560 			 * place, then no need to check or copy.
561 			 */
562 			copied -= thislen;
563 			ASSERT(copied >= PPP_HDRLEN);
564 			if (srcp !=
565 			    (void *)(indata + PPP_HDRLEN / sizeof (*indata)))
566 				copied = 0;
567 			mp->b_rptr = dp;
568 			if (MBLKL(mp) < copied) {
569 				zmp = msgpullup(mp, copied);
570 				freemsg(mp);
571 				if ((mp = zmp) == NULL)
572 					goto msg_oerror;
573 				dp = mp->b_rptr;
574 			}
575 
576 			*dp++ = ((uchar_t *)indata)[0];	/* address */
577 			*dp++ = ((uchar_t *)indata)[1];	/* control  */
578 			*dp++ = 0;			/* protocol */
579 			*dp++ = proto = PPP_VJC_COMP;	/* protocol */
580 			copied -= PPP_HDRLEN;
581 			if (copied > 0) {
582 				bcopy(vjhdr, dp, copied);
583 			}
584 			break;
585 		}
586 	}
587 
588 	/*
589 	 * Do packet compression if enabled.
590 	 */
591 	if (proto == PPP_CCP) {
592 		/*
593 		 * Handle any negotiation packets by changing compressor
594 		 * state.  Doing this here rather than with an ioctl keeps
595 		 * the negotiation and the data flow in sync.
596 		 */
597 		mutex_enter(&cp->cp_pair_lock);
598 		comp_ccp(q, mp, cp, B_FALSE);
599 		mutex_exit(&cp->cp_pair_lock);
600 	} else if (proto != PPP_LCP && IS_CCP_COMP_RUN(cp) &&
601 	    IS_CCP_ISUP(cp) && cp->cp_xstate != NULL) {
602 		mblk_t	*cmp = NULL;
603 
604 		len = msgsize(mp);
605 		len = (*cp->cp_xcomp->compress)(cp->cp_xstate, &cmp, mp, len,
606 		    cp->cp_mtu + PPP_HDRLEN);
607 
608 		if (cmp != NULL) {
609 			/* Success!  Discard uncompressed version */
610 			cmp->b_band = mp->b_band;
611 			freemsg(mp);
612 			mp = cmp;
613 		}
614 		if (len < 0) {
615 			/*
616 			 * Compressor failure; must discard this
617 			 * packet because the compressor dictionary is
618 			 * now corrupt.
619 			 */
620 			freemsg(mp);
621 			mutex_enter(&cp->cp_pair_lock);
622 			cp->cp_stats.ppp_oerrors++;
623 			mutex_exit(&cp->cp_pair_lock);
624 			(void) putnextctl1(RD(q), M_CTL, PPPCTL_OERROR);
625 			return (NULL);
626 		}
627 	}
628 
629 	/*
630 	 * If either address and control field compression or protocol field
631 	 * compression is enabled, then we'll need a writable packet.  Copy if
632 	 * necessary.
633 	 */
634 	if ((cp->cp_flags & (COMP_AC|COMP_PROT)) && DB_REF(mp) > 1) {
635 #ifdef SPC_DEBUG
636 		mutex_enter(&cp->cp_pair_lock);
637 		cp->cp_omsg_dcopy++;
638 		mutex_exit(&cp->cp_pair_lock);
639 #endif
640 		zmp = copymsg(mp);
641 		freemsg(mp);
642 		if ((mp = zmp) == NULL)
643 			goto msg_oerror;
644 	}
645 
646 	/*
647 	 * Do address/control and protocol compression if enabled.
648 	 */
649 	if (IS_COMP_AC(cp) && (proto != PPP_LCP)) {
650 		mp->b_rptr += 2;	/* drop address & ctrl fields */
651 		/*
652 		 * Protocol field compression omits the first byte if
653 		 * it would be 0x00, thus the check for < 0x100.
654 		 */
655 		if (proto < 0x100 && IS_COMP_PROT(cp)) {
656 			++mp->b_rptr;	/* drop high protocol byte */
657 		}
658 	} else if ((proto < 0x100) && IS_COMP_PROT(cp)) {
659 		/*
660 		 * shuffle up the address & ctrl fields
661 		 */
662 		mp->b_rptr[2] = mp->b_rptr[1];
663 		mp->b_rptr[1] = mp->b_rptr[0];
664 		++mp->b_rptr;
665 	}
666 	mutex_enter(&cp->cp_pair_lock);
667 	cp->cp_stats.ppp_opackets++;
668 	cp->cp_stats.ppp_obytes += msgsize(mp);
669 	mutex_exit(&cp->cp_pair_lock);
670 
671 	CPDEBUG((DBGSTART "send (%ld bytes) flags=0x%b\n",
672 	    (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), msgsize(mp),
673 	    cp->cp_flags, CP_FLAGSSTR));
674 	return (mp);
675 
676 msg_oerror:
677 	mutex_enter(&cp->cp_pair_lock);
678 	cp->cp_stats.ppp_oerrors++;
679 	mutex_exit(&cp->cp_pair_lock);
680 	(void) putnextctl1(RD(q), M_CTL, PPPCTL_OERROR);
681 	return (NULL);
682 }
683 
684 /*
685  * spppcomp_inner_ioctl()
686  *
687  * MT-Perimeters:
688  *    exclusive inner; queue pair lock held.
689  *
690  * Description:
691  *	Called by spppcomp_ioctl to handle state-affecting ioctls.
692  *	Returns -1 if caller should do putnext, 0 for miocack, or >0
693  *	for miocnak.  Must *NOT* do putnext in this routine, since
694  *	lock is held here.
695  */
696 static int
spppcomp_inner_ioctl(queue_t * q,mblk_t * mp)697 spppcomp_inner_ioctl(queue_t *q, mblk_t *mp)
698 {
699 	sppp_comp_t	*cp = q->q_ptr;
700 	int		flags;
701 	int		mask;
702 	int		rc;
703 	int		len;
704 	int		cmd;
705 	int		nxslots;
706 	int		nrslots;
707 	int		val;
708 	uchar_t		*opt_data;
709 	uint32_t	opt_len;
710 	struct compressor **comp;
711 	struct compressor *ccomp;
712 	struct iocblk	*iop;
713 	void		*xtemp;
714 
715 	iop = (struct iocblk *)mp->b_rptr;
716 	rc = EINVAL;
717 	len = 0;
718 	switch (iop->ioc_cmd) {
719 	case PPPIO_CFLAGS:
720 		if (iop->ioc_count != 2 * sizeof (uint32_t) ||
721 		    mp->b_cont == NULL)
722 			break;
723 
724 		flags = ((uint32_t *)mp->b_cont->b_rptr)[0];
725 		mask = ((uint32_t *)mp->b_cont->b_rptr)[1];
726 
727 		cp->cp_flags = (cp->cp_flags & ~mask) | (flags & mask);
728 
729 		if ((mask & CCP_ISOPEN) && !(flags & CCP_ISOPEN)) {
730 			cp->cp_flags &= ~CCP_ISUP & ~CCP_COMP_RUN &
731 			    ~CCP_DECOMP_RUN;
732 			if (cp->cp_xstate != NULL) {
733 				(*cp->cp_xcomp->comp_free)(cp->cp_xstate);
734 				cp->cp_xstate = NULL;
735 			}
736 			if (cp->cp_rstate != NULL) {
737 				(*cp->cp_rcomp->decomp_free)(cp->cp_rstate);
738 				cp->cp_rstate = NULL;
739 			}
740 		}
741 
742 		CPDEBUG((DBGSTART
743 		    "PPPIO_CFLAGS xflags=0x%b xmask=0x%b flags=0x%b\n",
744 		    (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1),
745 		    flags, CP_FLAGSSTR, mask,
746 		    CP_FLAGSSTR, cp->cp_flags, CP_FLAGSSTR));
747 
748 		/* If we're not the last PPP-speaker, then pass along. */
749 		if (!IS_CP_LASTMOD(cp)) {
750 			return (-1);	/* putnext */
751 		}
752 
753 		*(uint32_t *)mp->b_cont->b_rptr = cp->cp_flags;
754 		len = sizeof (uint32_t);
755 		rc = 0;
756 		break;
757 
758 	case PPPIO_VJINIT:
759 		if (iop->ioc_count != 2 || mp->b_cont == NULL)
760 			break;
761 		/*
762 		 * Even though it's not passed along, we have to
763 		 * validate nrslots so that we don't agree to
764 		 * decompress anything we cannot.
765 		 */
766 		nxslots = mp->b_cont->b_rptr[0] + 1;
767 		nrslots = mp->b_cont->b_rptr[1] + 1;
768 		if (nxslots > MAX_STATES || nrslots > MAX_STATES)
769 			break;
770 
771 		/* No need to lock here; just reading a word is atomic */
772 		/* mutex_enter(&cp->cp_pair_lock); */
773 		cp->cp_vj_last_ierrors = cp->cp_stats.ppp_ierrors;
774 		/* mutex_exit(&cp->cp_pair_lock); */
775 		vj_compress_init(&cp->cp_vj, nxslots);
776 		cp->cp_nxslots = nxslots;
777 
778 		CPDEBUG((DBGSTART
779 		    "PPPIO_VJINIT txslots=%d rxslots=%d flags=0x%b\n",
780 		    (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), nxslots,
781 		    nrslots, cp->cp_flags, CP_FLAGSSTR));
782 		rc = 0;
783 		break;
784 
785 	case PPPIO_XCOMP:
786 	case PPPIO_RCOMP:
787 		if (iop->ioc_count < 2 || mp->b_cont == NULL)
788 			break;
789 		/*
790 		 * The input data here is the raw CCP algorithm option
791 		 * from negotiation.  The format is always one byte of
792 		 * algorithm number, one byte of length, and
793 		 * (length-2) bytes of algorithm-dependent data.  The
794 		 * alloc routine is expected to parse and validate
795 		 * this.
796 		 */
797 		opt_data = mp->b_cont->b_rptr;
798 		opt_len = mp->b_cont->b_wptr - opt_data;
799 		if (opt_len > iop->ioc_count) {
800 			opt_len = iop->ioc_count;
801 		}
802 		len = mp->b_cont->b_rptr[1];
803 		if (len < 2 || len > opt_len)
804 			break;
805 		len = 0;
806 		for (comp = ppp_compressors; *comp != NULL; ++comp) {
807 
808 			if ((*comp)->compress_proto != opt_data[0]) {
809 				continue;
810 			}
811 			rc = 0;
812 			if (iop->ioc_cmd == PPPIO_XCOMP) {
813 				/*
814 				 * A previous call may have fetched
815 				 * memory for a compressor that's now
816 				 * being retired or reset.  Free it
817 				 * using its mechanism for freeing
818 				 * stuff.
819 				 */
820 				if ((xtemp = cp->cp_xstate) != NULL) {
821 					cp->cp_flags &= ~CCP_ISUP &
822 					    ~CCP_COMP_RUN;
823 					cp->cp_xstate = NULL;
824 					(*cp->cp_xcomp->comp_free)(xtemp);
825 				}
826 				cp->cp_xcomp = *comp;
827 				cp->cp_xstate = (*comp)->comp_alloc(opt_data,
828 				    opt_len);
829 
830 				if (cp->cp_xstate == NULL) {
831 					rc = ENOSR;
832 				}
833 
834 				CPDEBUG((DBGSTART "PPPIO_XCOMP opt_proto=0x%x "
835 				    "opt_len=0x%d flags=0x%b\n",
836 				    (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1),
837 				    (uchar_t)opt_data[0], opt_len,
838 				    cp->cp_flags,
839 				    CP_FLAGSSTR));
840 			} else {
841 				if ((xtemp = cp->cp_rstate) != NULL) {
842 					cp->cp_flags &= ~CCP_ISUP &
843 					    ~CCP_DECOMP_RUN;
844 					cp->cp_rstate = NULL;
845 					(*cp->cp_rcomp->decomp_free)(xtemp);
846 				}
847 				cp->cp_rcomp = *comp;
848 				cp->cp_rstate =
849 				    (*comp)->decomp_alloc(opt_data, opt_len);
850 
851 				if (cp->cp_rstate == NULL) {
852 					rc = ENOSR;
853 				}
854 
855 				CPDEBUG((DBGSTART "PPPIO_RCOMP opt_proto=0x%x "
856 				    "opt_len=0x%d flags=0x%b\n",
857 				    (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1),
858 				    (uchar_t)opt_data[0], opt_len,
859 				    cp->cp_flags,
860 				    CP_FLAGSSTR));
861 			}
862 			if (rc == 0 && (*comp)->set_effort != NULL) {
863 				rc = (*(*comp)->set_effort)(cp->
864 				    cp_xcomp == *comp ? cp->cp_xstate : NULL,
865 				    cp->cp_rcomp == *comp ? cp->cp_rstate :
866 				    NULL, cp->cp_effort);
867 				if (rc != 0) {
868 					CKDEBUG((DBGSTART
869 					    "cannot set effort %d",
870 					    cp->cp_unit, cp->cp_effort));
871 					rc = 0;
872 				}
873 			}
874 			break;
875 		}
876 		break;
877 
878 	case PPPIO_DEBUG:
879 		if (iop->ioc_count != sizeof (uint32_t) || mp->b_cont == NULL)
880 			break;
881 
882 		cmd = *(uint32_t *)mp->b_cont->b_rptr;
883 
884 		/* If it's not for us, then pass along. */
885 		if (cmd != PPPDBG_LOG + PPPDBG_COMP) {
886 			return (-1);	/* putnext */
887 		}
888 		cp->cp_flags |= CP_KDEBUG;
889 
890 		CKDEBUG((DBGSTART "PPPIO_DEBUG log enabled flags=0x%b\n",
891 		    (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1),
892 		    cp->cp_flags, CP_FLAGSSTR));
893 		rc = 0;
894 		break;
895 
896 	case PPPIO_LASTMOD:
897 		cp->cp_flags |= CP_LASTMOD;
898 		CPDEBUG((DBGSTART "PPPIO_LASTMOD last module flags=0x%b\n",
899 		    (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1),
900 		    cp->cp_flags, CP_FLAGSSTR));
901 		rc = 0;
902 		break;
903 
904 	case PPPIO_COMPLEV:	/* set compression effort level */
905 		if (iop->ioc_count != sizeof (uint32_t) || mp->b_cont == NULL)
906 			break;
907 		val = *(uint32_t *)mp->b_cont->b_rptr;
908 		cp->cp_effort = val;
909 		/* Silently ignore if compressor doesn't understand this. */
910 		rc = 0;
911 		if ((ccomp = cp->cp_xcomp) != NULL &&
912 		    ccomp->set_effort != NULL) {
913 			rc = (*ccomp->set_effort)(cp->cp_xstate,
914 			    ccomp == cp->cp_rcomp ? cp->cp_rstate : NULL, val);
915 			if (rc != 0)
916 				break;
917 		}
918 		if ((ccomp = cp->cp_rcomp) != NULL && ccomp != cp->cp_xcomp &&
919 		    ccomp->set_effort != NULL)
920 			rc = (*ccomp->set_effort)(NULL, cp->cp_rstate, val);
921 		break;
922 	}
923 	if (rc == 0 && mp->b_cont != NULL)
924 		mp->b_cont->b_wptr = mp->b_cont->b_rptr + len;
925 	return (rc);
926 }
927 
928 /*
929  * spppcomp_getcstat()
930  *
931  * MT-Perimeters:
932  *    exclusive inner.
933  *
934  * Description:
935  *    Called by spppcomp_ioctl as the result of receiving a PPPIO_GETCSTAT.
936  */
937 static void
spppcomp_getcstat(queue_t * q,mblk_t * mp,sppp_comp_t * cp)938 spppcomp_getcstat(queue_t *q, mblk_t *mp, sppp_comp_t *cp)
939 {
940 	mblk_t		*mpnext;
941 	struct ppp_comp_stats	*csp;
942 
943 	ASSERT(cp != NULL);
944 
945 	mpnext = allocb(sizeof (struct ppp_comp_stats), BPRI_MED);
946 	if (mpnext == NULL) {
947 		miocnak(q, mp, 0, ENOSR);
948 		return;
949 	}
950 	if (mp->b_cont != NULL) {
951 		freemsg(mp->b_cont);
952 	}
953 	mp->b_cont = mpnext;
954 	csp = (struct ppp_comp_stats *)mpnext->b_wptr;
955 	mpnext->b_wptr += sizeof (struct ppp_comp_stats);
956 	bzero((caddr_t)csp, sizeof (struct ppp_comp_stats));
957 
958 	if (cp->cp_xstate != NULL) {
959 		(*cp->cp_xcomp->comp_stat)(cp->cp_xstate, &csp->c);
960 	}
961 	if (cp->cp_rstate != NULL) {
962 		(*cp->cp_rcomp->decomp_stat)(cp->cp_rstate, &csp->d);
963 	}
964 
965 	miocack(q, mp, sizeof (struct ppp_comp_stats), 0);
966 }
967 
968 /*
969  * spppcomp_ioctl()
970  *
971  * MT-Perimeters:
972  *    exclusive inner.
973  *
974  * Description:
975  *    Called by spppcomp_wput as the result of receiving an M_IOCTL
976  *    command.
977  */
978 static void
spppcomp_ioctl(queue_t * q,mblk_t * mp,sppp_comp_t * cp)979 spppcomp_ioctl(queue_t *q, mblk_t *mp, sppp_comp_t *cp)
980 {
981 	struct iocblk	*iop;
982 	int flag;
983 
984 	ASSERT(cp != NULL);
985 
986 	iop = (struct iocblk *)mp->b_rptr;
987 	switch (iop->ioc_cmd) {
988 	case PPPIO_CFLAGS:
989 	case PPPIO_VJINIT:
990 	case PPPIO_XCOMP:
991 	case PPPIO_RCOMP:
992 	case PPPIO_DEBUG:
993 	case PPPIO_LASTMOD:
994 	case PPPIO_COMPLEV:
995 		mutex_enter(&cp->cp_pair_lock);
996 		flag = spppcomp_inner_ioctl(q, mp);
997 		mutex_exit(&cp->cp_pair_lock);
998 		if (flag == -1) {
999 			putnext(q, mp);
1000 		} else if (flag == 0) {
1001 			miocack(q, mp,
1002 			    mp->b_cont == NULL ? 0 : MBLKL(mp->b_cont), 0);
1003 		} else {
1004 			miocnak(q, mp, 0, flag);
1005 		}
1006 		break;
1007 
1008 	case PPPIO_GETCSTAT:
1009 		spppcomp_getcstat(q, mp, cp);
1010 		break;
1011 
1012 	case PPPIO_GTYPE:	/* get existing driver type */
1013 		if (!IS_CP_LASTMOD(cp)) {
1014 			putnext(q, mp);
1015 			break;
1016 		}
1017 		freemsg(mp->b_next);
1018 		mp->b_next = allocb(sizeof (uint32_t), BPRI_MED);
1019 		if (mp->b_next == NULL) {
1020 			miocnak(q, mp, 0, ENOSR);
1021 		} else {
1022 			*(uint32_t *)mp->b_cont->b_wptr = PPPTYP_HC;
1023 			mp->b_cont->b_wptr += sizeof (uint32_t);
1024 			miocack(q, mp, sizeof (uint32_t), 0);
1025 		}
1026 		break;
1027 
1028 	default:
1029 		putnext(q, mp);
1030 		break;
1031 	}
1032 }
1033 
1034 /*
1035  * spppcomp_mctl()
1036  *
1037  * MT-Perimeters:
1038  *    exclusive inner; queue pair lock held.
1039  *
1040  * Description:
1041  *	Called by spppcomp_wput as the result of receiving an M_CTL
1042  *	message from another STREAMS module, and returns non-zero if
1043  *	caller should do putnext or zero for freemsg.  Must *NOT* do
1044  *	putnext in this routine, since lock is held here.
1045  */
1046 static int
spppcomp_mctl(queue_t * q,mblk_t * mp)1047 spppcomp_mctl(queue_t *q, mblk_t *mp)
1048 {
1049 	sppp_comp_t		*cp = q->q_ptr;
1050 	kstat_t			*ksp;
1051 	char			unit[32];
1052 	const char **cpp;
1053 	kstat_named_t *knt;
1054 
1055 	switch (*mp->b_rptr) {
1056 	case PPPCTL_MTU:
1057 		if (MBLKL(mp) < 4) {
1058 			break;
1059 		}
1060 		cp->cp_mtu = ((ushort_t *)mp->b_rptr)[1];
1061 
1062 		CPDEBUG((DBGSTART "PPPCTL_MTU (%d) flags=0x%b\n",
1063 		    (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1),
1064 		    cp->cp_mtu, cp->cp_flags, CP_FLAGSSTR));
1065 		break;
1066 	case PPPCTL_MRU:
1067 		if (MBLKL(mp) < 4) {
1068 			break;
1069 		}
1070 		cp->cp_mru = ((ushort_t *)mp->b_rptr)[1];
1071 
1072 		CPDEBUG((DBGSTART "PPPCTL_MRU (%d) flags=0x%b\n",
1073 		    (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1),
1074 		    cp->cp_mru, cp->cp_flags, CP_FLAGSSTR));
1075 		break;
1076 	case PPPCTL_UNIT:
1077 		if (MBLKL(mp) < 8) {
1078 			break;
1079 		}
1080 		/* If PPPCTL_UNIT has already been issued, then ignore. */
1081 		if (IS_CP_HASUNIT(cp)) {
1082 			break;
1083 		}
1084 		ASSERT(cp->cp_kstats == NULL);
1085 		cp->cp_unit = ((uint32_t *)mp->b_rptr)[1];
1086 
1087 		/* Create kstats for this unit. */
1088 		(void) sprintf(unit, "%s" "%d", COMP_MOD_NAME, cp->cp_unit);
1089 		ksp = kstat_create(COMP_MOD_NAME, cp->cp_unit, unit, "net",
1090 		    KSTAT_TYPE_NAMED, sizeof (spppcomp_kstats_t) /
1091 		    sizeof (kstat_named_t), 0);
1092 
1093 		if (ksp != NULL) {
1094 			cp->cp_flags |= CP_HASUNIT;
1095 			cp->cp_kstats = ksp;
1096 
1097 			knt = (kstat_named_t *)ksp->ks_data;
1098 			for (cpp = kstats_names;
1099 			    cpp < kstats_names + Dim(kstats_names); cpp++) {
1100 				kstat_named_init(knt, *cpp,
1101 				    KSTAT_DATA_UINT32);
1102 				knt++;
1103 			}
1104 			for (cpp = kstats64_names;
1105 			    cpp < kstats64_names + Dim(kstats64_names); cpp++) {
1106 				kstat_named_init(knt, *cpp,
1107 				    KSTAT_DATA_UINT64);
1108 				knt++;
1109 			}
1110 			ksp->ks_update = spppcomp_kstat_update;
1111 			ksp->ks_private = (void *)cp;
1112 			kstat_install(ksp);
1113 
1114 			CPDEBUG((DBGSTART "PPPCTL_UNIT flags=0x%b\n",
1115 			    cp->cp_unit, cp->cp_flags, CP_FLAGSSTR));
1116 		}
1117 		break;
1118 
1119 	default:
1120 		/* Forward unknown M_CTL messages along */
1121 		return (1);
1122 	}
1123 
1124 	/*
1125 	 * For known PPP M_CTL messages, forward along only if we're not the
1126 	 * last PPP-aware module.
1127 	 */
1128 	if (IS_CP_LASTMOD(cp))
1129 		return (0);
1130 	return (1);
1131 }
1132 
1133 /*
1134  * spppcomp_rput()
1135  *
1136  * MT-Perimeters:
1137  *    exclusive inner.
1138  *
1139  * Description:
1140  *    Upper read-side put procedure.  Messages get here from below.
1141  *
1142  *	The data handling logic is a little more tricky here.  We
1143  *	defer to the service routine if q_first isn't NULL (to
1144  *	preserve message ordering after deferring a previous message),
1145  *	bcanputnext() is FALSE (to handle flow control), or we have
1146  *	done a lot of processing recently and we're about to do a lot
1147  *	more and we're in an interrupt context (on the theory that
1148  *	we're hogging the CPU in this case).
1149  */
1150 static int
spppcomp_rput(queue_t * q,mblk_t * mp)1151 spppcomp_rput(queue_t *q, mblk_t *mp)
1152 {
1153 	sppp_comp_t		*cp = q->q_ptr;
1154 	struct iocblk		*iop;
1155 	struct ppp_stats64	*psp;
1156 	boolean_t		inter;
1157 	hrtime_t		curtime;
1158 
1159 	switch (MTYPE(mp)) {
1160 	case M_DATA:
1161 		inter = servicing_interrupt();
1162 		if (inter) {
1163 			curtime = gethrtime();
1164 
1165 			/*
1166 			 * If little time has passed since last
1167 			 * arrival, then bump the counter.
1168 			 */
1169 			if (curtime - cp->cp_lastfinish < spppcomp_min_arrival)
1170 				cp->cp_fastin++;
1171 			else
1172 				cp->cp_fastin >>= 1;	/* a guess */
1173 		}
1174 		/*
1175 		 * If we're not decompressing, then we'll be fast, so
1176 		 * we don't have to worry about hogging here.  If we
1177 		 * are decompressing, then we have to check the
1178 		 * cp_fastin count.
1179 		 */
1180 		if ((!(cp->cp_flags & (CCP_DECOMP_RUN | DECOMP_VJC)) ||
1181 		    cp->cp_fastin < MAX_FAST_ARRIVALS) &&
1182 		    q->q_first == NULL && bcanputnext(q, mp->b_band)) {
1183 #ifdef SPC_DEBUG
1184 			cp->cp_in_handled++;
1185 #endif
1186 			if ((mp = spppcomp_inpkt(q, mp)) != NULL)
1187 				putnext(q, mp);
1188 			if (inter) {
1189 				cp->cp_lastfinish = gethrtime();
1190 			}
1191 		} else {
1192 			/* Deferring; provide a clean slate */
1193 			cp->cp_fastin = 0;
1194 #ifdef SPC_DEBUG
1195 			cp->cp_in_queued++;
1196 #endif
1197 			if (!putq(q, mp))
1198 				freemsg(mp);
1199 		}
1200 		break;
1201 	case M_IOCACK:
1202 		iop = (struct iocblk *)mp->b_rptr;
1203 		/*
1204 		 * Bundled with pppstats; no need to handle PPPIO_GETSTAT
1205 		 * here since we'll never see it.
1206 		 */
1207 		if (iop->ioc_cmd == PPPIO_GETSTAT64 &&
1208 		    iop->ioc_count == sizeof (struct ppp_stats64) &&
1209 		    mp->b_cont != NULL) {
1210 			/*
1211 			 * This crock is to handle a badly-designed
1212 			 * but well-known ioctl for ANU PPP.  Both
1213 			 * link statistics and VJ statistics are
1214 			 * requested together.
1215 			 *
1216 			 * Catch this on the way back from the
1217 			 * spppasyn module so we can fill in the VJ
1218 			 * stats.  This happens only when we have
1219 			 * PPP-aware modules beneath us.
1220 			 */
1221 			psp = (struct ppp_stats64 *)mp->b_cont->b_rptr;
1222 			psp->vj = cp->cp_vj.stats;
1223 			CPDEBUG((DBGSTART
1224 			    "PPPIO_GETSTAT64 (VJ filled) flags=0x%b\n",
1225 			    (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1),
1226 			    cp->cp_flags, CP_FLAGSSTR));
1227 		}
1228 		putnext(q, mp);
1229 		break;
1230 	case M_CTL:
1231 		/* Increase our statistics and forward it upstream. */
1232 		mutex_enter(&cp->cp_pair_lock);
1233 		if (*mp->b_rptr == PPPCTL_IERROR) {
1234 			cp->cp_stats.ppp_ierrors++;
1235 			cp->cp_ierr_low++;
1236 		} else if (*mp->b_rptr == PPPCTL_OERROR) {
1237 			cp->cp_stats.ppp_oerrors++;
1238 			cp->cp_oerr_low++;
1239 		}
1240 		mutex_exit(&cp->cp_pair_lock);
1241 		putnext(q, mp);
1242 		break;
1243 
1244 	case M_FLUSH:
1245 		CPDEBUG((DBGSTART "rput M_FLUSH (0x%x) flags=0x%b\n",
1246 		    (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1),
1247 		    *mp->b_rptr, cp->cp_flags,	CP_FLAGSSTR));
1248 		/*
1249 		 * Just discard pending data.  For CCP, any
1250 		 * decompressor dictionary sequencing problems caused
1251 		 * by this will have to be handled by the compression
1252 		 * protocol in use.  For VJ, we need to give the
1253 		 * decompressor a heads-up.
1254 		 */
1255 		if (*mp->b_rptr & FLUSHR) {
1256 			mutex_enter(&cp->cp_pair_lock);
1257 			flushq(q, FLUSHDATA);
1258 			cp->cp_vj_last_ierrors = cp->cp_stats.ppp_ierrors;
1259 			vj_uncompress_err(&cp->cp_vj);
1260 			mutex_exit(&cp->cp_pair_lock);
1261 		}
1262 		putnext(q, mp);
1263 		break;
1264 
1265 	default:
1266 		if (bcanputnext(q, mp->b_band))
1267 			putnext(q, mp);
1268 		else if (!putq(q, mp))
1269 			freemsg(mp);
1270 		break;
1271 	}
1272 	return (0);
1273 }
1274 
1275 /*
1276  * spppcomp_rsrv()
1277  *
1278  * MT-Perimeters:
1279  *    exclusive inner.
1280  *
1281  * Description:
1282  *    Upper read-side service procedure.  We handle data deferred from
1283  *    spppcomp_rput here.
1284  *
1285  *	The data on the queue are always compressed (unprocessed).
1286  *	The rput procedure tries to do decompression, but if it can't,
1287  *	it will put the unprocessed data on the queue for later
1288  *	handling.
1289  */
1290 static int
spppcomp_rsrv(queue_t * q)1291 spppcomp_rsrv(queue_t *q)
1292 {
1293 	mblk_t		*mp;
1294 
1295 	while ((mp = getq(q)) != NULL) {
1296 		/*
1297 		 * If the module above us is flow-controlled, then put
1298 		 * this message back on the queue again.
1299 		 */
1300 		if (!bcanputnext(q, mp->b_band)) {
1301 			(void) putbq(q, mp);
1302 			break;
1303 		}
1304 		if (MTYPE(mp) != M_DATA ||
1305 		    (mp = spppcomp_inpkt(q, mp)) != NULL)
1306 			putnext(q, mp);
1307 	}
1308 	return (0);
1309 }
1310 
1311 /*
1312  * spppcomp_inpkt()
1313  *
1314  * MT-Perimeters:
1315  *    exclusive inner
1316  *
1317  * Description:
1318  *    Process incoming packet.
1319  */
1320 static mblk_t *
spppcomp_inpkt(queue_t * q,mblk_t * mp)1321 spppcomp_inpkt(queue_t *q, mblk_t *mp)
1322 {
1323 	ushort_t	proto;
1324 	int		i;
1325 	mblk_t		*zmp;
1326 	mblk_t		*np;
1327 	uchar_t		*dp;
1328 	int		len;
1329 	int		hlen;
1330 	sppp_comp_t	*cp = q->q_ptr;
1331 
1332 	len = msgsize(mp);
1333 
1334 	mutex_enter(&cp->cp_pair_lock);
1335 	cp->cp_stats.ppp_ibytes += len;
1336 	cp->cp_stats.ppp_ipackets++;
1337 	mutex_exit(&cp->cp_pair_lock);
1338 	/*
1339 	 * First work out the protocol and where the PPP header ends.
1340 	 */
1341 	i = 0;
1342 	proto = MSG_BYTE(mp, 0);
1343 	if (proto == PPP_ALLSTATIONS) {
1344 		i = 2;
1345 		proto = MSG_BYTE(mp, 2);
1346 	}
1347 	if ((proto & 1) == 0) {
1348 		++i;
1349 		proto = (proto << 8) + MSG_BYTE(mp, i);
1350 	}
1351 	hlen = i + 1;
1352 	/*
1353 	 * Now reconstruct a complete, contiguous PPP header at the
1354 	 * start of the packet.
1355 	 */
1356 	if (hlen < (IS_DECOMP_AC(cp) ? 0 : 2) + (IS_DECOMP_PROT(cp) ? 1 : 2)) {
1357 		/* count these? */
1358 		goto bad;
1359 	}
1360 	if (mp->b_rptr + hlen > mp->b_wptr) {
1361 		/*
1362 		 * Header is known to be intact here; so adjmsg will do the
1363 		 * right thing here.
1364 		 */
1365 		if (!adjmsg(mp, hlen)) {
1366 			goto bad;
1367 		}
1368 		hlen = 0;
1369 	}
1370 	if (hlen != PPP_HDRLEN) {
1371 		/*
1372 		 * We need to put some bytes on the front of the packet
1373 		 * to make a full-length PPP header. If we can put them
1374 		 * in mp, we do, otherwise we tack another mblk on the
1375 		 * front.
1376 		 *
1377 		 * XXX we really shouldn't need to carry around the address
1378 		 * and control at this stage.  ACFC and PFC need to be
1379 		 * reworked.
1380 		 */
1381 		dp = mp->b_rptr + hlen - PPP_HDRLEN;
1382 		if ((dp < mp->b_datap->db_base) || (DB_REF(mp) > 1)) {
1383 
1384 			np = allocb(PPP_HDRLEN, BPRI_MED);
1385 			if (np == 0) {
1386 				goto bad;
1387 			}
1388 			np->b_cont = mp;
1389 			mp->b_rptr += hlen;
1390 			mp = np;
1391 			dp = mp->b_wptr;
1392 			mp->b_wptr += PPP_HDRLEN;
1393 		} else {
1394 			mp->b_rptr = dp;
1395 		}
1396 		dp[0] = PPP_ALLSTATIONS;
1397 		dp[1] = PPP_UI;
1398 		dp[2] = (proto >> 8) & 0xff;
1399 		dp[3] = proto & 0xff;
1400 	}
1401 	/*
1402 	 * Now see if we have a compressed packet to decompress, or a
1403 	 * CCP negotiation packet to take notice of.  It's guaranteed
1404 	 * that at least PPP_HDRLEN bytes are contiguous in the first
1405 	 * block now.
1406 	 */
1407 	proto = PPP_PROTOCOL(mp->b_rptr);
1408 	if (proto == PPP_CCP) {
1409 		len = msgsize(mp);
1410 		if (mp->b_wptr < mp->b_rptr + len) {
1411 #ifdef SPC_DEBUG
1412 			mutex_enter(&cp->cp_pair_lock);
1413 			cp->cp_imsg_ccp_pull++;
1414 			mutex_exit(&cp->cp_pair_lock);
1415 #endif
1416 			zmp = msgpullup(mp, len);
1417 			freemsg(mp);
1418 			mp = zmp;
1419 			if (mp == 0) {
1420 				goto bad;
1421 			}
1422 		}
1423 		mutex_enter(&cp->cp_pair_lock);
1424 		comp_ccp(q, mp, cp, B_TRUE);
1425 		mutex_exit(&cp->cp_pair_lock);
1426 	} else if ((cp->cp_flags & (CCP_ISUP | CCP_DECOMP_RUN | CCP_ERR)) ==
1427 	    (CCP_ISUP | CCP_DECOMP_RUN) && cp->cp_rstate != NULL) {
1428 		int	rv;
1429 
1430 		if ((proto == PPP_COMP) || (proto == PPP_COMPFRAG)) {
1431 			rv = (*cp->cp_rcomp->decompress)(cp->cp_rstate, &mp);
1432 			switch (rv) {
1433 			case DECOMP_OK:
1434 				break;
1435 			case DECOMP_ERROR:
1436 				cp->cp_flags |= CCP_ERROR;
1437 				mutex_enter(&cp->cp_pair_lock);
1438 				++cp->cp_stats.ppp_ierrors;
1439 				mutex_exit(&cp->cp_pair_lock);
1440 				(void) putnextctl1(q, M_CTL, PPPCTL_IERROR);
1441 				break;
1442 			case DECOMP_FATALERROR:
1443 				cp->cp_flags |= CCP_FATALERROR;
1444 				mutex_enter(&cp->cp_pair_lock);
1445 				++cp->cp_stats.ppp_ierrors;
1446 				mutex_exit(&cp->cp_pair_lock);
1447 				(void) putnextctl1(q, M_CTL, PPPCTL_IERROR);
1448 				break;
1449 			}
1450 			if (mp == NULL) {
1451 				/* Decompress failed; data are gone. */
1452 				return (NULL);
1453 			}
1454 		} else {
1455 			/*
1456 			 * For RFCs 1977 and 1979 (BSD Compress and Deflate),
1457 			 * the compressor should send incompressible data
1458 			 * without encapsulation and the receiver must update
1459 			 * its decompression dictionary as though this data
1460 			 * were received and decompressed.  This keeps the
1461 			 * dictionaries in sync.
1462 			 */
1463 			rv = (*cp->cp_rcomp->incomp)(cp->cp_rstate, mp);
1464 			if (rv < 0) {
1465 				cp->cp_flags |= CCP_FATALERROR;
1466 				mutex_enter(&cp->cp_pair_lock);
1467 				++cp->cp_stats.ppp_ierrors;
1468 				mutex_exit(&cp->cp_pair_lock);
1469 				(void) putnextctl1(q, M_CTL, PPPCTL_IERROR);
1470 			}
1471 		}
1472 	}
1473 	/*
1474 	 * Now do VJ decompression.
1475 	 */
1476 	proto = PPP_PROTOCOL(mp->b_rptr);
1477 	if ((proto == PPP_VJC_COMP) || (proto == PPP_VJC_UNCOMP)) {
1478 
1479 		len = msgsize(mp) - PPP_HDRLEN;
1480 
1481 		if (!IS_DECOMP_VJC(cp) || (len <= 0)) {
1482 			goto bad;
1483 		}
1484 		/*
1485 		 * Advance past the ppp header.  Here we assume that the whole
1486 		 * PPP header is in the first mblk.  (This should be true
1487 		 * because the above code does pull-ups as necessary on raw
1488 		 * data, and the decompressor engines all produce large blocks
1489 		 * on output.)
1490 		 */
1491 		np = mp;
1492 		dp = np->b_rptr + PPP_HDRLEN;
1493 		if (dp >= mp->b_wptr) {
1494 			np = np->b_cont;
1495 			dp = np->b_rptr;
1496 		}
1497 		/*
1498 		 * Make sure we have sufficient contiguous data at this point,
1499 		 * which in most cases we will always do.
1500 		 */
1501 		hlen = (proto == PPP_VJC_COMP) ? MAX_VJHDR : MAX_TCPIPHLEN;
1502 		if (hlen > len) {
1503 			hlen = len;
1504 		}
1505 		if ((np->b_wptr < dp + hlen) || DB_REF(np) > 1) {
1506 #ifdef SPC_DEBUG
1507 			mutex_enter(&cp->cp_pair_lock);
1508 			cp->cp_imsg_vj_pull++;
1509 			mutex_exit(&cp->cp_pair_lock);
1510 #endif
1511 			zmp = msgpullup(mp, hlen + PPP_HDRLEN);
1512 			freemsg(mp);
1513 			mp = zmp;
1514 			if (mp == NULL) {
1515 				goto bad;
1516 			}
1517 			np = mp;
1518 			dp = np->b_rptr + PPP_HDRLEN;
1519 		}
1520 
1521 		if (proto == PPP_VJC_COMP) {
1522 			uchar_t		*iphdr;
1523 			int		vjlen;
1524 			uint_t		iphlen;
1525 			int		errcnt;
1526 
1527 			/*
1528 			 * Decompress VJ-compressed packet.  First
1529 			 * reset compressor if an input error has
1530 			 * occurred.  (No need to lock statistics
1531 			 * structure for read of a single word.)
1532 			 */
1533 			errcnt = cp->cp_stats.ppp_ierrors;
1534 			if (errcnt != cp->cp_vj_last_ierrors) {
1535 				cp->cp_vj_last_ierrors = errcnt;
1536 				vj_uncompress_err(&cp->cp_vj);
1537 			}
1538 
1539 			vjlen = vj_uncompress_tcp(dp, np->b_wptr - dp, len,
1540 			    &cp->cp_vj, &iphdr, &iphlen);
1541 
1542 			if (vjlen < 0 || iphlen == 0) {
1543 				/*
1544 				 * so we don't reset next time
1545 				 */
1546 				mutex_enter(&cp->cp_pair_lock);
1547 				++cp->cp_vj_last_ierrors;
1548 				mutex_exit(&cp->cp_pair_lock);
1549 				goto bad;
1550 			}
1551 			/*
1552 			 * drop ppp and vj headers off
1553 			 */
1554 			if (mp != np) {
1555 				freeb(mp);
1556 				mp = np;
1557 			}
1558 			mp->b_rptr = dp + vjlen;
1559 			/*
1560 			 * allocate a new mblk for the ppp and
1561 			 * ip headers
1562 			 */
1563 			np = allocb(iphlen + PPP_HDRLEN, BPRI_MED);
1564 			if (np == NULL)
1565 				goto bad;
1566 			dp = np->b_rptr;
1567 			/*
1568 			 * reconstruct PPP header
1569 			 */
1570 			dp[0] = PPP_ALLSTATIONS;
1571 			dp[1] = PPP_UI;
1572 			dp[2] = PPP_IP >> 8;
1573 			dp[3] = PPP_IP;
1574 			/*
1575 			 * prepend mblk with reconstructed TCP/IP header.
1576 			 */
1577 			bcopy((caddr_t)iphdr, (caddr_t)dp + PPP_HDRLEN, iphlen);
1578 			np->b_wptr = dp + iphlen + PPP_HDRLEN;
1579 			np->b_cont = mp;
1580 			mp = np;
1581 		} else {
1582 			/*
1583 			 * "Decompress" a VJ-uncompressed packet.
1584 			 */
1585 			mutex_enter(&cp->cp_pair_lock);
1586 			cp->cp_vj_last_ierrors = cp->cp_stats.ppp_ierrors;
1587 			mutex_exit(&cp->cp_pair_lock);
1588 			if (!vj_uncompress_uncomp(dp, hlen, &cp->cp_vj)) {
1589 				/*
1590 				 * don't need to reset next time
1591 				 */
1592 				mutex_enter(&cp->cp_pair_lock);
1593 				++cp->cp_vj_last_ierrors;
1594 				mutex_exit(&cp->cp_pair_lock);
1595 				goto bad;
1596 			}
1597 			/*
1598 			 * fix up the PPP protocol field
1599 			 */
1600 			mp->b_rptr[3] = PPP_IP;
1601 		}
1602 	}
1603 	CPDEBUG((DBGSTART "recv (%ld bytes) flags=0x%b\n",
1604 	    (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), msgsize(mp),
1605 	    cp->cp_flags, CP_FLAGSSTR));
1606 	return (mp);
1607 
1608 bad:
1609 	if (mp != 0) {
1610 		freemsg(mp);
1611 	}
1612 	mutex_enter(&cp->cp_pair_lock);
1613 	cp->cp_stats.ppp_ierrors++;
1614 	mutex_exit(&cp->cp_pair_lock);
1615 	(void) putnextctl1(q, M_CTL, PPPCTL_IERROR);
1616 	return (NULL);
1617 }
1618 
1619 /*
1620  * comp_ccp()
1621  *
1622  * Description:
1623  *    Called by spppcomp_outpkt and spppcomp_inpkt to handle a CCP
1624  *    negotiation packet being sent or received.  Here all the data in
1625  *    the packet is in a single mbuf.
1626  *
1627  *	Global state is updated.  Must be called with mutex held.
1628  */
1629 /* ARGSUSED */
1630 static void
comp_ccp(queue_t * q,mblk_t * mp,sppp_comp_t * cp,boolean_t rcvd)1631 comp_ccp(queue_t *q, mblk_t *mp, sppp_comp_t *cp, boolean_t rcvd)
1632 {
1633 	int	len;
1634 	int	clen;
1635 	uchar_t	*dp;
1636 
1637 	len = msgsize(mp);
1638 	if (len < PPP_HDRLEN + CCP_HDRLEN) {
1639 		return;
1640 	}
1641 	dp = mp->b_rptr + PPP_HDRLEN;
1642 
1643 	len -= PPP_HDRLEN;
1644 	clen = CCP_LENGTH(dp);
1645 	if (clen > len) {
1646 		return;
1647 	}
1648 
1649 	CPDEBUG((DBGSTART "CCP code=%d flags=0x%b\n",
1650 	    (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), CCP_CODE(dp),
1651 	    cp->cp_flags, CP_FLAGSSTR));
1652 	switch (CCP_CODE(dp)) {
1653 	case CCP_CONFREQ:
1654 	case CCP_TERMREQ:
1655 	case CCP_TERMACK:
1656 		cp->cp_flags &= ~CCP_ISUP;
1657 		break;
1658 	case CCP_CONFACK:
1659 		if ((cp->cp_flags & (CCP_ISOPEN | CCP_ISUP)) == CCP_ISOPEN &&
1660 		    clen >= CCP_HDRLEN + CCP_OPT_MINLEN &&
1661 		    clen >= CCP_HDRLEN + CCP_OPT_LENGTH(dp + CCP_HDRLEN)) {
1662 
1663 			int	rc;
1664 
1665 			if (!rcvd) {
1666 				rc = (*cp->cp_xcomp->comp_init)(cp->cp_xstate,
1667 				    dp + CCP_HDRLEN, clen - CCP_HDRLEN,
1668 				    cp->cp_unit, 0,
1669 				    IS_CP_KDEBUG(cp) | ALG_DEBUG);
1670 
1671 				if (cp->cp_xstate != NULL && rc != 0) {
1672 					cp->cp_flags |= CCP_COMP_RUN;
1673 				}
1674 			} else {
1675 				rc = (*cp->cp_rcomp->decomp_init)(cp->
1676 				    cp_rstate, dp + CCP_HDRLEN,
1677 				    clen - CCP_HDRLEN, cp->cp_unit, 0,
1678 				    cp->cp_mru,
1679 				    IS_CP_KDEBUG(cp) | ALG_DEBUG);
1680 
1681 				if (cp->cp_rstate != NULL && rc != 0) {
1682 					cp->cp_flags &= ~CCP_ERR;
1683 					cp->cp_flags |= CCP_DECOMP_RUN;
1684 				}
1685 			}
1686 		}
1687 		break;
1688 	case CCP_RESETACK:
1689 		if (IS_CCP_ISUP(cp)) {
1690 			if (!rcvd) {
1691 				if (cp->cp_xstate != NULL &&
1692 				    IS_CCP_COMP_RUN(cp)) {
1693 					(*cp->cp_xcomp->comp_reset)(cp->
1694 					    cp_xstate);
1695 				}
1696 			} else {
1697 				if (cp->cp_rstate != NULL &&
1698 				    IS_CCP_DECOMP_RUN(cp)) {
1699 					(*cp->cp_rcomp->decomp_reset)(cp->
1700 					    cp_rstate);
1701 					cp->cp_flags &= ~CCP_ERROR;
1702 				}
1703 			}
1704 		}
1705 		break;
1706 	}
1707 }
1708 
1709 /*
1710  * spppcomp_kstat_update()
1711  *
1712  * Description:
1713  *    Update per-unit kstat statistics.
1714  */
1715 static int
spppcomp_kstat_update(kstat_t * ksp,int rw)1716 spppcomp_kstat_update(kstat_t *ksp, int rw)
1717 {
1718 	sppp_comp_t		*cp = ksp->ks_private;
1719 	spppcomp_kstats_t	*cpkp;
1720 	struct vjstat		*sp;
1721 	struct pppstat64	*psp;
1722 	struct ppp_comp_stats		csp;
1723 
1724 	if (rw == KSTAT_WRITE) {
1725 		return (EACCES);
1726 	}
1727 
1728 	cpkp = (spppcomp_kstats_t *)ksp->ks_data;
1729 	bzero((caddr_t)&csp, sizeof (struct ppp_comp_stats));
1730 
1731 	mutex_enter(&cp->cp_pair_lock);
1732 
1733 	if (cp->cp_xstate != NULL) {
1734 		(*cp->cp_xcomp->comp_stat)(cp->cp_xstate, &csp.c);
1735 	}
1736 	if (cp->cp_rstate != NULL) {
1737 		(*cp->cp_rcomp->decomp_stat)(cp->cp_rstate, &csp.d);
1738 	}
1739 
1740 	sp = &cp->cp_vj.stats;
1741 
1742 	cpkp->vj_out_pkts.value.ui32		= sp->vjs_packets;
1743 	cpkp->vj_out_pkts_comp.value.ui32	= sp->vjs_compressed;
1744 	cpkp->vj_cs_searches.value.ui32		= sp->vjs_searches;
1745 	cpkp->vj_cs_misses.value.ui32		= sp->vjs_misses;
1746 	cpkp->vj_in_pkts_uncomp.value.ui32	= sp->vjs_uncompressedin;
1747 	cpkp->vj_in_pkts_comp.value.ui32	= sp->vjs_compressedin;
1748 	cpkp->vj_in_error.value.ui32		= sp->vjs_errorin;
1749 	cpkp->vj_in_tossed.value.ui32		= sp->vjs_tossed;
1750 
1751 	psp = &cp->cp_stats;
1752 
1753 	cpkp->out_bytes.value.ui64		= psp->ppp_obytes;
1754 	cpkp->out_pkts.value.ui64		= psp->ppp_opackets;
1755 	cpkp->out_errors.value.ui64		= psp->ppp_oerrors;
1756 	cpkp->out_errors_low.value.ui32		= cp->cp_oerr_low;
1757 	cpkp->out_uncomp_bytes.value.ui32	= csp.c.unc_bytes;
1758 	cpkp->out_uncomp_pkts.value.ui32	= csp.c.unc_packets;
1759 	cpkp->out_comp_bytes.value.ui32		= csp.c.comp_bytes;
1760 	cpkp->out_comp_pkts.value.ui32		= csp.c.comp_packets;
1761 	cpkp->out_incomp_bytes.value.ui32	= csp.c.inc_bytes;
1762 	cpkp->out_incomp_pkts.value.ui32	= csp.c.inc_packets;
1763 
1764 	cpkp->in_bytes.value.ui64		= psp->ppp_ibytes;
1765 	cpkp->in_pkts.value.ui64		= psp->ppp_ipackets;
1766 	cpkp->in_errors.value.ui64		= psp->ppp_ierrors;
1767 	cpkp->in_errors_low.value.ui32		= cp->cp_ierr_low;
1768 	cpkp->in_uncomp_bytes.value.ui32	= csp.d.unc_bytes;
1769 	cpkp->in_uncomp_pkts.value.ui32		= csp.d.unc_packets;
1770 	cpkp->in_comp_bytes.value.ui32		= csp.d.comp_bytes;
1771 	cpkp->in_comp_pkts.value.ui32		= csp.d.comp_packets;
1772 	cpkp->in_incomp_bytes.value.ui32	= csp.d.inc_bytes;
1773 	cpkp->in_incomp_pkts.value.ui32		= csp.d.inc_packets;
1774 #ifdef SPC_DEBUG
1775 	cpkp->in_msg_ccp_pulledup.value.ui32	= cp->cp_imsg_ccp_pull;
1776 	cpkp->in_msg_vj_pulledup.value.ui32	= cp->cp_imsg_vj_pull;
1777 	cpkp->out_msg_pulledup.value.ui32	= cp->cp_omsg_pull;
1778 	cpkp->out_msg_copied.value.ui32		= cp->cp_omsg_dcopy;
1779 	cpkp->out_queued.value.ui32		= cp->cp_out_queued;
1780 	cpkp->out_handled.value.ui32		= cp->cp_out_handled;
1781 	cpkp->in_queued.value.ui32		= cp->cp_in_queued;
1782 	cpkp->in_handled.value.ui32		= cp->cp_in_handled;
1783 #endif
1784 	mutex_exit(&cp->cp_pair_lock);
1785 	return (0);
1786 }
1787