1 /*
2 * spppcomp.c - STREAMS module for kernel-level compression and CCP support.
3 *
4 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
5 * Use is subject to license terms.
6 *
7 * Permission to use, copy, modify, and distribute this software and its
8 * documentation is hereby granted, provided that the above copyright
9 * notice appears in all copies.
10 *
11 * SUN MAKES NO REPRESENTATION OR WARRANTIES ABOUT THE SUITABILITY OF
12 * THE SOFTWARE, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
13 * TO THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
14 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT. SUN SHALL NOT BE LIABLE FOR
15 * ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
16 * DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES
17 *
18 * Copyright (c) 1994 The Australian National University.
19 * All rights reserved.
20 *
21 * Permission to use, copy, modify, and distribute this software and its
22 * documentation is hereby granted, provided that the above copyright
23 * notice appears in all copies. This software is provided without any
24 * warranty, express or implied. The Australian National University
25 * makes no representations about the suitability of this software for
26 * any purpose.
27 *
28 * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY
29 * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
30 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
31 * THE AUSTRALIAN NATIONAL UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY
32 * OF SUCH DAMAGE.
33 *
34 * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES,
35 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
36 * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
37 * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO
38 * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS,
39 * OR MODIFICATIONS.
40 *
41 * This module is derived from the original SVR4 STREAMS PPP compression
42 * module originally written by Paul Mackerras <paul.mackerras@cs.anu.edu.au>.
43 *
44 * James Carlson <james.d.carlson@sun.com> and Adi Masputra
45 * <adi.masputra@sun.com> rewrote and restructured the code for improved
46 * performance and scalability.
47 */
48
49 #define RCSID "$Id: spppcomp.c,v 1.0 2000/05/08 01:10:12 masputra Exp $"
50
51 #include <sys/types.h>
52 #include <sys/debug.h>
53 #include <sys/param.h>
54 #include <sys/stream.h>
55 #include <sys/stropts.h>
56 #include <sys/errno.h>
57 #include <sys/conf.h>
58 #include <sys/cpuvar.h>
59 #include <sys/cmn_err.h>
60 #include <sys/kmem.h>
61 #include <sys/ddi.h>
62 #include <sys/kstat.h>
63 #include <sys/strsun.h>
64 #include <sys/sysmacros.h>
65 #include <netinet/in.h>
66 #include <netinet/ip.h>
67 #include <net/ppp_defs.h>
68 #include <net/pppio.h>
69 #include <net/vjcompress.h>
70
71 /* Defined for platform-neutral include file */
72 #define PACKETPTR mblk_t *
73 #include <net/ppp-comp.h>
74
75 #include "s_common.h"
76
77 #ifdef DEBUG
78 #define SPC_DEBUG
79 #endif
80 #include "spppcomp.h"
81
82 /*
83 * This is used to tag official Solaris sources. Please do not define
84 * "INTERNAL_BUILD" when building this software outside of Sun
85 * Microsystems.
86 */
87 #ifdef INTERNAL_BUILD
88 /* MODINFO is limited to 32 characters. */
89 const char spppcomp_module_description[] = "PPP 4.0 compression";
90 #else /* INTERNAL_BUILD */
91 const char spppcomp_module_description[] =
92 "ANU PPP compression $Revision: 1.16$ ";
93
94 /* LINTED */
95 static const char buildtime[] = "Built " __DATE__ " at " __TIME__
96 #ifdef DEBUG
97 " DEBUG"
98 #endif
99 "\n";
100 #endif /* INTERNAL_BUILD */
101
102 static int spppcomp_open(queue_t *, dev_t *, int, int, cred_t *);
103 static int spppcomp_close(queue_t *, int, cred_t *);
104 static void spppcomp_rput(queue_t *, mblk_t *);
105 static void spppcomp_rsrv(queue_t *);
106 static void spppcomp_wput(queue_t *, mblk_t *);
107 static void spppcomp_wsrv(queue_t *);
108
109 #define PPPCOMP_MI_MINPSZ (0)
110 #define PPPCOMP_MI_MAXPSZ (INFPSZ)
111 #define PPPCOMP_MI_HIWAT (PPP_MTU * 20)
112 #define PPPCOMP_MI_LOWAT (PPP_MTU * 18)
113
114 static struct module_info spppcomp_modinfo = {
115 COMP_MOD_ID, /* mi_idnum */
116 COMP_MOD_NAME, /* mi_idname */
117 PPPCOMP_MI_MINPSZ, /* mi_minpsz */
118 PPPCOMP_MI_MAXPSZ, /* mi_maxpsz */
119 PPPCOMP_MI_HIWAT, /* mi_hiwat */
120 PPPCOMP_MI_LOWAT /* mi_lowat */
121 };
122
123 static struct qinit spppcomp_rinit = {
124 (int (*)())spppcomp_rput, /* qi_putp */
125 (int (*)())spppcomp_rsrv, /* qi_srvp */
126 spppcomp_open, /* qi_qopen */
127 spppcomp_close, /* qi_qclose */
128 NULL, /* qi_qadmin */
129 &spppcomp_modinfo, /* qi_minfo */
130 NULL /* qi_mstat */
131 };
132
133 static struct qinit spppcomp_winit = {
134 (int (*)())spppcomp_wput, /* qi_putp */
135 (int (*)())spppcomp_wsrv, /* qi_srvp */
136 NULL, /* qi_qopen */
137 NULL, /* qi_qclose */
138 NULL, /* qi_qadmin */
139 &spppcomp_modinfo, /* qi_minfo */
140 NULL /* qi_mstat */
141 };
142
143 struct streamtab spppcomp_tab = {
144 &spppcomp_rinit, /* st_rdinit */
145 &spppcomp_winit, /* st_wrinit */
146 NULL, /* st_muxrinit */
147 NULL /* st_muxwinit */
148 };
149
150 /* Set non-zero to debug algorithm-specific problems alone. */
151 #define ALG_DEBUG 0
152
153 #define MAX_IPHLEN (0x0f << 2)
154 #define MAX_TCPHLEN (0x0f << 2)
155 #define MAX_TCPIPHLEN (MAX_IPHLEN + MAX_TCPHLEN) /* max TCP/IP header size */
156 #define MAX_VJHDR (20) /* max VJ compressed header size (?) */
157
158 #if 0
159 #define DBGSTART CE_CONT, COMP_MOD_NAME "%d: "
160 #define CKDEBUG(x) cmn_err x
161 #else
162 #define DBGSTART COMP_MOD_NAME "%d: "
163 #define CKDEBUG(x) printf x
164 #endif
165 #define CPDEBUG(x) (IS_CP_KDEBUG(cp) ? CKDEBUG(x) : (void)0)
166
167 /*
168 * List of compressors we know about.
169 */
170 #if DO_BSD_COMPRESS
171 extern struct compressor ppp_bsd_compress;
172 #endif
173 #if DO_DEFLATE
174 extern struct compressor ppp_deflate;
175 extern struct compressor ppp_deflate_draft;
176 #endif
177
178 struct compressor *ppp_compressors[] = {
179 #if DO_BSD_COMPRESS
180 &ppp_bsd_compress,
181 #endif
182 #if DO_DEFLATE
183 &ppp_deflate,
184 &ppp_deflate_draft,
185 #endif
186 NULL
187 };
188
189 /*
190 * LCP_USE_DFLT() removed by James Carlson. RFC 1661 section 6.6 has
191 * this to say on the topic:
192 *
193 * The Address and Control fields MUST NOT be compressed when sending
194 * any LCP packet. This rule guarantees unambiguous recognition of
195 * LCP packets.
196 */
197
198 static void spppcomp_ioctl(queue_t *, mblk_t *, sppp_comp_t *);
199 static int spppcomp_mctl(queue_t *, mblk_t *);
200 static mblk_t *spppcomp_outpkt(queue_t *, mblk_t *);
201 static mblk_t *spppcomp_inpkt(queue_t *, mblk_t *);
202 static int spppcomp_kstat_update(kstat_t *, int);
203 static void comp_ccp(queue_t *, mblk_t *, sppp_comp_t *, boolean_t);
204
205 /*
206 * Values for checking inter-arrival times on interrupt stacks. These
207 * are used to prevent CPU hogging in interrupt context.
208 */
209 #define MIN_ARRIVAL_TIME 5000000 /* interarrival time in nanoseconds */
210 #define MAX_FAST_ARRIVALS 10 /* maximum packet count */
211 hrtime_t spppcomp_min_arrival = MIN_ARRIVAL_TIME;
212
213 static const char *kstats_names[] = {
214 #ifdef SPCDEBUG_KSTATS_NAMES
215 SPPPCOMP_KSTATS_NAMES,
216 SPCDEBUG_KSTATS_NAMES
217 #else
218 SPPPCOMP_KSTATS_NAMES
219 #endif
220 };
221 static const char *kstats64_names[] = { SPPPCOMP_KSTATS64_NAMES };
222
223 /*
224 * spppcomp_open()
225 *
226 * MT-Perimeters:
227 * exclusive inner.
228 *
229 * Description:
230 * Common open procedure for module.
231 */
232 /* ARGSUSED */
233 static int
spppcomp_open(queue_t * q,dev_t * devp,int flag,int sflag,cred_t * credp)234 spppcomp_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
235 {
236 sppp_comp_t *cp;
237
238 if (q->q_ptr != NULL) {
239 return (0);
240 }
241 if (sflag != MODOPEN) {
242 return (EINVAL);
243 }
244 cp = kmem_zalloc(sizeof (sppp_comp_t), KM_SLEEP);
245 q->q_ptr = WR(q)->q_ptr = (caddr_t)cp;
246
247 cp->cp_mru = PPP_MRU;
248 cp->cp_mtu = PPP_MTU;
249
250 mutex_init(&cp->cp_pair_lock, NULL, MUTEX_DRIVER, NULL);
251 vj_compress_init(&cp->cp_vj, -1);
252 cp->cp_nxslots = -1;
253 cp->cp_effort = -1;
254
255 qprocson(q);
256 return (0);
257 }
258
259 /*
260 * spppcomp_close()
261 *
262 * MT-Perimeters:
263 * exclusive inner.
264 *
265 * Description:
266 * Common close procedure for module.
267 */
268 /* ARGSUSED */
269 static int
spppcomp_close(queue_t * q,int flag,cred_t * credp)270 spppcomp_close(queue_t *q, int flag, cred_t *credp)
271 {
272 sppp_comp_t *cp = q->q_ptr;
273
274 qprocsoff(q);
275
276 CPDEBUG((DBGSTART "close flags=0x%b\n",
277 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), cp->cp_flags,
278 CP_FLAGSSTR));
279 mutex_destroy(&cp->cp_pair_lock);
280 if (cp->cp_kstats) {
281 ASSERT(IS_CP_HASUNIT(cp));
282 kstat_delete(cp->cp_kstats);
283 }
284 if (cp->cp_xstate != NULL) {
285 (*cp->cp_xcomp->comp_free)(cp->cp_xstate);
286 }
287 if (cp->cp_rstate != NULL) {
288 (*cp->cp_rcomp->decomp_free)(cp->cp_rstate);
289 }
290 kmem_free(cp, sizeof (sppp_comp_t));
291 q->q_ptr = WR(q)->q_ptr = NULL;
292
293 return (0);
294 }
295
296 /*
297 * spppcomp_wput()
298 *
299 * MT-Perimeters:
300 * exclusive inner.
301 *
302 * Description:
303 * Write-side put procedure. Packets from above us arrive here.
304 *
305 * The data handling logic is a little tricky here. We defer to
306 * the service routine if q_first isn't NULL (to preserve message
307 * ordering after deferring a previous message), bcanputnext() is
308 * FALSE (to handle flow control), or we need a lot of processing
309 * and we're in an interrupt context (on the theory that we're
310 * already on a very long call stack at that point). Since many
311 * callers will be in a non-interrupt context, this means that
312 * most processing will be performed here in-line, and deferral
313 * occurs only when necessary.
314 */
315 static void
spppcomp_wput(queue_t * q,mblk_t * mp)316 spppcomp_wput(queue_t *q, mblk_t *mp)
317 {
318 sppp_comp_t *cp = q->q_ptr;
319 int flag;
320
321 switch (MTYPE(mp)) {
322 case M_DATA:
323 if (q->q_first != NULL || !bcanputnext(q, mp->b_band) ||
324 ((cp->cp_flags & (COMP_VJC|CCP_COMP_RUN)) &&
325 servicing_interrupt())) {
326 #ifdef SPC_DEBUG
327 cp->cp_out_queued++;
328 #endif
329 if (!putq(q, mp))
330 freemsg(mp);
331 } else {
332 #ifdef SPC_DEBUG
333 cp->cp_out_handled++;
334 #endif
335 if ((mp = spppcomp_outpkt(q, mp)) != NULL)
336 putnext(q, mp);
337 }
338 break;
339 case M_IOCTL:
340 spppcomp_ioctl(q, mp, cp);
341 break;
342 case M_CTL:
343 mutex_enter(&cp->cp_pair_lock);
344 flag = spppcomp_mctl(q, mp);
345 mutex_exit(&cp->cp_pair_lock);
346 if (flag != 0)
347 putnext(q, mp);
348 else
349 freemsg(mp);
350 break;
351 case M_FLUSH:
352 CPDEBUG((DBGSTART "wput M_FLUSH (0x%x) flags=0x%b\n",
353 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1),
354 *mp->b_rptr, cp->cp_flags, CP_FLAGSSTR));
355 /*
356 * Just discard pending data. For CCP, any compressor
357 * dictionary sequencing problems caused by this will
358 * have to be handled by the compression protocol in
359 * use. For VJ, we need to tell the compressor to
360 * start over.
361 */
362 if (*mp->b_rptr & FLUSHW) {
363 mutex_enter(&cp->cp_pair_lock);
364 flushq(q, FLUSHDATA);
365 vj_compress_init(&cp->cp_vj, cp->cp_nxslots);
366 mutex_exit(&cp->cp_pair_lock);
367 }
368 putnext(q, mp);
369 break;
370 default:
371 if (bcanputnext(q, mp->b_band))
372 putnext(q, mp);
373 else if (!putq(q, mp))
374 freemsg(mp);
375 break;
376 }
377 }
378
379 /*
380 * spppcomp_wsrv()
381 *
382 * MT-Perimeters:
383 * exclusive inner
384 *
385 * Description:
386 * Write-side service procedure.
387 */
388 static void
spppcomp_wsrv(queue_t * q)389 spppcomp_wsrv(queue_t *q)
390 {
391 mblk_t *mp;
392
393 while ((mp = getq(q)) != NULL) {
394 /*
395 * If the module below us is flow-controlled, then put
396 * this message back on the queue again.
397 */
398 if (!bcanputnext(q, mp->b_band)) {
399 (void) putbq(q, mp);
400 break;
401 }
402 if (MTYPE(mp) != M_DATA ||
403 (mp = spppcomp_outpkt(q, mp)) != NULL)
404 putnext(q, mp);
405 }
406 }
407
408 /*
409 * spppcomp_outpkt()
410 *
411 * MT-Perimeters:
412 * exclusive inner
413 *
414 * Description:
415 * Process outgoing packet. Returns new mblk_t pointer on success
416 * (caller should do putnext through q), NULL on failure (packet has
417 * been discarded).
418 */
419 static mblk_t *
spppcomp_outpkt(queue_t * q,mblk_t * mp)420 spppcomp_outpkt(queue_t *q, mblk_t *mp)
421 {
422 mblk_t *zmp;
423 int len;
424 ushort_t proto;
425 sppp_comp_t *cp = q->q_ptr;
426
427 /*
428 * If the entire data size of the mblk is less than the length of the
429 * PPP header, then free it. We can't do much with such message anyway,
430 * since we can't determine what the PPP protocol is.
431 */
432 len = msgsize(mp);
433 if (MBLKL(mp) < PPP_HDRLEN) {
434 #ifdef SPC_DEBUG
435 mutex_enter(&cp->cp_pair_lock);
436 cp->cp_omsg_pull++;
437 mutex_exit(&cp->cp_pair_lock);
438 #endif
439 zmp = msgpullup(mp, PPP_HDRLEN);
440 freemsg(mp);
441 if ((mp = zmp) == NULL)
442 goto msg_oerror;
443 }
444
445 proto = PPP_PROTOCOL(mp->b_rptr);
446
447 /*
448 * Do VJ compression if requested.
449 */
450 if (proto == PPP_IP && IS_COMP_VJC(cp) &&
451 MSG_BYTE(mp, PPP_HDRLEN+offsetof(struct ip, ip_p)) ==
452 IPPROTO_TCP) {
453 uchar_t *vjhdr;
454 int type;
455 uint32_t indata[(PPP_HDRLEN+MAX_TCPIPHLEN) /
456 sizeof (uint32_t)];
457 uchar_t *dp;
458 int tocopy, copied;
459 mblk_t *fmb;
460 void *srcp;
461 int thislen;
462
463
464 tocopy = copied = MIN(len, sizeof (indata));
465 /*
466 * If we can alter this dblk, and there's enough data
467 * here to work with, and it's nicely aligned, then
468 * avoid the data copy.
469 */
470 if (DB_REF(mp) == 1 && MBLKL(mp) >= tocopy &&
471 ((uintptr_t)mp->b_rptr & 3) == 0) {
472 /* Save off the address/control */
473 indata[0] = *(uint32_t *)mp->b_rptr;
474 srcp = (void *)(mp->b_rptr + PPP_HDRLEN);
475 } else {
476 fmb = mp;
477 dp = (uchar_t *)indata;
478 while (tocopy > 0) {
479 thislen = MBLKL(fmb);
480 if (tocopy > thislen) {
481 bcopy(fmb->b_rptr, dp, thislen);
482 dp += thislen;
483 tocopy -= thislen;
484 fmb = fmb->b_cont;
485 } else {
486 bcopy(fmb->b_rptr, dp, tocopy);
487 break;
488 }
489 }
490 srcp = (void *)(indata + PPP_HDRLEN/sizeof (*indata));
491 }
492
493 type = vj_compress_tcp((struct ip *)srcp, len - PPP_HDRLEN,
494 &cp->cp_vj, IS_COMP_VJCCID(cp), &vjhdr);
495
496 /*
497 * If we're going to modify this packet, then we can't modify
498 * someone else's data. Copy instead.
499 *
500 * (It would be nice to be able to avoid this data copy if CCP
501 * is also enabled. That would require extensive
502 * modifications to the compression code. Users should be
503 * told to disable VJ compression when using CCP.)
504 */
505 if (type != TYPE_IP && DB_REF(mp) > 1) {
506 #ifdef SPC_DEBUG
507 mutex_enter(&cp->cp_pair_lock);
508 cp->cp_omsg_dcopy++;
509 mutex_exit(&cp->cp_pair_lock);
510 #endif
511 /* Copy just altered portion. */
512 zmp = msgpullup(mp, copied);
513 freemsg(mp);
514 if ((mp = zmp) == NULL)
515 goto msg_oerror;
516 }
517
518 switch (type) {
519 case TYPE_UNCOMPRESSED_TCP:
520 mp->b_rptr[3] = proto = PPP_VJC_UNCOMP;
521 /* No need to update if it was done in place. */
522 if (srcp ==
523 (void *)(indata + PPP_HDRLEN / sizeof (*indata))) {
524 thislen = PPP_HDRLEN +
525 offsetof(struct ip, ip_p);
526 zmp = mp;
527 while (zmp != NULL) {
528 if (MBLKL(zmp) > thislen) {
529 zmp->b_rptr[thislen] =
530 ((struct ip *)srcp)->ip_p;
531 break;
532 }
533 thislen -= MBLKL(zmp);
534 zmp = zmp->b_cont;
535 }
536 }
537 break;
538
539 case TYPE_COMPRESSED_TCP:
540 /* Calculate amount to remove from front */
541 thislen = vjhdr - (uchar_t *)srcp;
542 ASSERT(thislen >= 0);
543
544 /* Try to do a cheap adjmsg by arithmetic first. */
545 dp = mp->b_rptr + thislen;
546 if (dp > mp->b_wptr) {
547 if (!adjmsg(mp, thislen)) {
548 freemsg(mp);
549 goto msg_oerror;
550 }
551 dp = mp->b_rptr;
552 }
553
554 /*
555 * Now make sure first block is big enough to
556 * receive modified data. If we modified in
557 * place, then no need to check or copy.
558 */
559 copied -= thislen;
560 ASSERT(copied >= PPP_HDRLEN);
561 if (srcp !=
562 (void *)(indata + PPP_HDRLEN / sizeof (*indata)))
563 copied = 0;
564 mp->b_rptr = dp;
565 if (MBLKL(mp) < copied) {
566 zmp = msgpullup(mp, copied);
567 freemsg(mp);
568 if ((mp = zmp) == NULL)
569 goto msg_oerror;
570 dp = mp->b_rptr;
571 }
572
573 *dp++ = ((uchar_t *)indata)[0]; /* address */
574 *dp++ = ((uchar_t *)indata)[1]; /* control */
575 *dp++ = 0; /* protocol */
576 *dp++ = proto = PPP_VJC_COMP; /* protocol */
577 copied -= PPP_HDRLEN;
578 if (copied > 0) {
579 bcopy(vjhdr, dp, copied);
580 }
581 break;
582 }
583 }
584
585 /*
586 * Do packet compression if enabled.
587 */
588 if (proto == PPP_CCP) {
589 /*
590 * Handle any negotiation packets by changing compressor
591 * state. Doing this here rather than with an ioctl keeps
592 * the negotiation and the data flow in sync.
593 */
594 mutex_enter(&cp->cp_pair_lock);
595 comp_ccp(q, mp, cp, B_FALSE);
596 mutex_exit(&cp->cp_pair_lock);
597 } else if (proto != PPP_LCP && IS_CCP_COMP_RUN(cp) &&
598 IS_CCP_ISUP(cp) && cp->cp_xstate != NULL) {
599 mblk_t *cmp = NULL;
600
601 len = msgsize(mp);
602 len = (*cp->cp_xcomp->compress)(cp->cp_xstate, &cmp, mp, len,
603 cp->cp_mtu + PPP_HDRLEN);
604
605 if (cmp != NULL) {
606 /* Success! Discard uncompressed version */
607 cmp->b_band = mp->b_band;
608 freemsg(mp);
609 mp = cmp;
610 }
611 if (len < 0) {
612 /*
613 * Compressor failure; must discard this
614 * packet because the compressor dictionary is
615 * now corrupt.
616 */
617 freemsg(mp);
618 mutex_enter(&cp->cp_pair_lock);
619 cp->cp_stats.ppp_oerrors++;
620 mutex_exit(&cp->cp_pair_lock);
621 (void) putnextctl1(RD(q), M_CTL, PPPCTL_OERROR);
622 return (NULL);
623 }
624 }
625
626 /*
627 * If either address and control field compression or protocol field
628 * compression is enabled, then we'll need a writable packet. Copy if
629 * necessary.
630 */
631 if ((cp->cp_flags & (COMP_AC|COMP_PROT)) && DB_REF(mp) > 1) {
632 #ifdef SPC_DEBUG
633 mutex_enter(&cp->cp_pair_lock);
634 cp->cp_omsg_dcopy++;
635 mutex_exit(&cp->cp_pair_lock);
636 #endif
637 zmp = copymsg(mp);
638 freemsg(mp);
639 if ((mp = zmp) == NULL)
640 goto msg_oerror;
641 }
642
643 /*
644 * Do address/control and protocol compression if enabled.
645 */
646 if (IS_COMP_AC(cp) && (proto != PPP_LCP)) {
647 mp->b_rptr += 2; /* drop address & ctrl fields */
648 /*
649 * Protocol field compression omits the first byte if
650 * it would be 0x00, thus the check for < 0x100.
651 */
652 if (proto < 0x100 && IS_COMP_PROT(cp)) {
653 ++mp->b_rptr; /* drop high protocol byte */
654 }
655 } else if ((proto < 0x100) && IS_COMP_PROT(cp)) {
656 /*
657 * shuffle up the address & ctrl fields
658 */
659 mp->b_rptr[2] = mp->b_rptr[1];
660 mp->b_rptr[1] = mp->b_rptr[0];
661 ++mp->b_rptr;
662 }
663 mutex_enter(&cp->cp_pair_lock);
664 cp->cp_stats.ppp_opackets++;
665 cp->cp_stats.ppp_obytes += msgsize(mp);
666 mutex_exit(&cp->cp_pair_lock);
667
668 CPDEBUG((DBGSTART "send (%ld bytes) flags=0x%b\n",
669 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), msgsize(mp),
670 cp->cp_flags, CP_FLAGSSTR));
671 return (mp);
672
673 msg_oerror:
674 mutex_enter(&cp->cp_pair_lock);
675 cp->cp_stats.ppp_oerrors++;
676 mutex_exit(&cp->cp_pair_lock);
677 (void) putnextctl1(RD(q), M_CTL, PPPCTL_OERROR);
678 return (NULL);
679 }
680
681 /*
682 * spppcomp_inner_ioctl()
683 *
684 * MT-Perimeters:
685 * exclusive inner; queue pair lock held.
686 *
687 * Description:
688 * Called by spppcomp_ioctl to handle state-affecting ioctls.
689 * Returns -1 if caller should do putnext, 0 for miocack, or >0
690 * for miocnak. Must *NOT* do putnext in this routine, since
691 * lock is held here.
692 */
693 static int
spppcomp_inner_ioctl(queue_t * q,mblk_t * mp)694 spppcomp_inner_ioctl(queue_t *q, mblk_t *mp)
695 {
696 sppp_comp_t *cp = q->q_ptr;
697 int flags;
698 int mask;
699 int rc;
700 int len;
701 int cmd;
702 int nxslots;
703 int nrslots;
704 int val;
705 uchar_t *opt_data;
706 uint32_t opt_len;
707 struct compressor **comp;
708 struct compressor *ccomp;
709 struct iocblk *iop;
710 void *xtemp;
711
712 iop = (struct iocblk *)mp->b_rptr;
713 rc = EINVAL;
714 len = 0;
715 switch (iop->ioc_cmd) {
716 case PPPIO_CFLAGS:
717 if (iop->ioc_count != 2 * sizeof (uint32_t) ||
718 mp->b_cont == NULL)
719 break;
720
721 flags = ((uint32_t *)mp->b_cont->b_rptr)[0];
722 mask = ((uint32_t *)mp->b_cont->b_rptr)[1];
723
724 cp->cp_flags = (cp->cp_flags & ~mask) | (flags & mask);
725
726 if ((mask & CCP_ISOPEN) && !(flags & CCP_ISOPEN)) {
727 cp->cp_flags &= ~CCP_ISUP & ~CCP_COMP_RUN &
728 ~CCP_DECOMP_RUN;
729 if (cp->cp_xstate != NULL) {
730 (*cp->cp_xcomp->comp_free)(cp->cp_xstate);
731 cp->cp_xstate = NULL;
732 }
733 if (cp->cp_rstate != NULL) {
734 (*cp->cp_rcomp->decomp_free)(cp->cp_rstate);
735 cp->cp_rstate = NULL;
736 }
737 }
738
739 CPDEBUG((DBGSTART
740 "PPPIO_CFLAGS xflags=0x%b xmask=0x%b flags=0x%b\n",
741 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1),
742 flags, CP_FLAGSSTR, mask,
743 CP_FLAGSSTR, cp->cp_flags, CP_FLAGSSTR));
744
745 /* If we're not the last PPP-speaker, then pass along. */
746 if (!IS_CP_LASTMOD(cp)) {
747 return (-1); /* putnext */
748 }
749
750 *(uint32_t *)mp->b_cont->b_rptr = cp->cp_flags;
751 len = sizeof (uint32_t);
752 rc = 0;
753 break;
754
755 case PPPIO_VJINIT:
756 if (iop->ioc_count != 2 || mp->b_cont == NULL)
757 break;
758 /*
759 * Even though it's not passed along, we have to
760 * validate nrslots so that we don't agree to
761 * decompress anything we cannot.
762 */
763 nxslots = mp->b_cont->b_rptr[0] + 1;
764 nrslots = mp->b_cont->b_rptr[1] + 1;
765 if (nxslots > MAX_STATES || nrslots > MAX_STATES)
766 break;
767
768 /* No need to lock here; just reading a word is atomic */
769 /* mutex_enter(&cp->cp_pair_lock); */
770 cp->cp_vj_last_ierrors = cp->cp_stats.ppp_ierrors;
771 /* mutex_exit(&cp->cp_pair_lock); */
772 vj_compress_init(&cp->cp_vj, nxslots);
773 cp->cp_nxslots = nxslots;
774
775 CPDEBUG((DBGSTART
776 "PPPIO_VJINIT txslots=%d rxslots=%d flags=0x%b\n",
777 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), nxslots,
778 nrslots, cp->cp_flags, CP_FLAGSSTR));
779 rc = 0;
780 break;
781
782 case PPPIO_XCOMP:
783 case PPPIO_RCOMP:
784 if (iop->ioc_count < 2 || mp->b_cont == NULL)
785 break;
786 /*
787 * The input data here is the raw CCP algorithm option
788 * from negotiation. The format is always one byte of
789 * algorithm number, one byte of length, and
790 * (length-2) bytes of algorithm-dependent data. The
791 * alloc routine is expected to parse and validate
792 * this.
793 */
794 opt_data = mp->b_cont->b_rptr;
795 opt_len = mp->b_cont->b_wptr - opt_data;
796 if (opt_len > iop->ioc_count) {
797 opt_len = iop->ioc_count;
798 }
799 len = mp->b_cont->b_rptr[1];
800 if (len < 2 || len > opt_len)
801 break;
802 len = 0;
803 for (comp = ppp_compressors; *comp != NULL; ++comp) {
804
805 if ((*comp)->compress_proto != opt_data[0]) {
806 continue;
807 }
808 rc = 0;
809 if (iop->ioc_cmd == PPPIO_XCOMP) {
810 /*
811 * A previous call may have fetched
812 * memory for a compressor that's now
813 * being retired or reset. Free it
814 * using its mechanism for freeing
815 * stuff.
816 */
817 if ((xtemp = cp->cp_xstate) != NULL) {
818 cp->cp_flags &= ~CCP_ISUP &
819 ~CCP_COMP_RUN;
820 cp->cp_xstate = NULL;
821 (*cp->cp_xcomp->comp_free)(xtemp);
822 }
823 cp->cp_xcomp = *comp;
824 cp->cp_xstate = (*comp)->comp_alloc(opt_data,
825 opt_len);
826
827 if (cp->cp_xstate == NULL) {
828 rc = ENOSR;
829 }
830
831 CPDEBUG((DBGSTART "PPPIO_XCOMP opt_proto=0x%x "
832 "opt_len=0x%d flags=0x%b\n",
833 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1),
834 (uchar_t)opt_data[0], opt_len,
835 cp->cp_flags,
836 CP_FLAGSSTR));
837 } else {
838 if ((xtemp = cp->cp_rstate) != NULL) {
839 cp->cp_flags &= ~CCP_ISUP &
840 ~CCP_DECOMP_RUN;
841 cp->cp_rstate = NULL;
842 (*cp->cp_rcomp->decomp_free)(xtemp);
843 }
844 cp->cp_rcomp = *comp;
845 cp->cp_rstate =
846 (*comp)->decomp_alloc(opt_data, opt_len);
847
848 if (cp->cp_rstate == NULL) {
849 rc = ENOSR;
850 }
851
852 CPDEBUG((DBGSTART "PPPIO_RCOMP opt_proto=0x%x "
853 "opt_len=0x%d flags=0x%b\n",
854 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1),
855 (uchar_t)opt_data[0], opt_len,
856 cp->cp_flags,
857 CP_FLAGSSTR));
858 }
859 if (rc == 0 && (*comp)->set_effort != NULL) {
860 rc = (*(*comp)->set_effort)(cp->
861 cp_xcomp == *comp ? cp->cp_xstate : NULL,
862 cp->cp_rcomp == *comp ? cp->cp_rstate :
863 NULL, cp->cp_effort);
864 if (rc != 0) {
865 CKDEBUG((DBGSTART
866 "cannot set effort %d",
867 cp->cp_unit, cp->cp_effort));
868 rc = 0;
869 }
870 }
871 break;
872 }
873 break;
874
875 case PPPIO_DEBUG:
876 if (iop->ioc_count != sizeof (uint32_t) || mp->b_cont == NULL)
877 break;
878
879 cmd = *(uint32_t *)mp->b_cont->b_rptr;
880
881 /* If it's not for us, then pass along. */
882 if (cmd != PPPDBG_LOG + PPPDBG_COMP) {
883 return (-1); /* putnext */
884 }
885 cp->cp_flags |= CP_KDEBUG;
886
887 CKDEBUG((DBGSTART "PPPIO_DEBUG log enabled flags=0x%b\n",
888 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1),
889 cp->cp_flags, CP_FLAGSSTR));
890 rc = 0;
891 break;
892
893 case PPPIO_LASTMOD:
894 cp->cp_flags |= CP_LASTMOD;
895 CPDEBUG((DBGSTART "PPPIO_LASTMOD last module flags=0x%b\n",
896 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1),
897 cp->cp_flags, CP_FLAGSSTR));
898 rc = 0;
899 break;
900
901 case PPPIO_COMPLEV: /* set compression effort level */
902 if (iop->ioc_count != sizeof (uint32_t) || mp->b_cont == NULL)
903 break;
904 val = *(uint32_t *)mp->b_cont->b_rptr;
905 cp->cp_effort = val;
906 /* Silently ignore if compressor doesn't understand this. */
907 rc = 0;
908 if ((ccomp = cp->cp_xcomp) != NULL &&
909 ccomp->set_effort != NULL) {
910 rc = (*ccomp->set_effort)(cp->cp_xstate,
911 ccomp == cp->cp_rcomp ? cp->cp_rstate : NULL, val);
912 if (rc != 0)
913 break;
914 }
915 if ((ccomp = cp->cp_rcomp) != NULL && ccomp != cp->cp_xcomp &&
916 ccomp->set_effort != NULL)
917 rc = (*ccomp->set_effort)(NULL, cp->cp_rstate, val);
918 break;
919 }
920 if (rc == 0 && mp->b_cont != NULL)
921 mp->b_cont->b_wptr = mp->b_cont->b_rptr + len;
922 return (rc);
923 }
924
925 /*
926 * spppcomp_getcstat()
927 *
928 * MT-Perimeters:
929 * exclusive inner.
930 *
931 * Description:
932 * Called by spppcomp_ioctl as the result of receiving a PPPIO_GETCSTAT.
933 */
934 static void
spppcomp_getcstat(queue_t * q,mblk_t * mp,sppp_comp_t * cp)935 spppcomp_getcstat(queue_t *q, mblk_t *mp, sppp_comp_t *cp)
936 {
937 mblk_t *mpnext;
938 struct ppp_comp_stats *csp;
939
940 ASSERT(cp != NULL);
941
942 mpnext = allocb(sizeof (struct ppp_comp_stats), BPRI_MED);
943 if (mpnext == NULL) {
944 miocnak(q, mp, 0, ENOSR);
945 return;
946 }
947 if (mp->b_cont != NULL) {
948 freemsg(mp->b_cont);
949 }
950 mp->b_cont = mpnext;
951 csp = (struct ppp_comp_stats *)mpnext->b_wptr;
952 mpnext->b_wptr += sizeof (struct ppp_comp_stats);
953 bzero((caddr_t)csp, sizeof (struct ppp_comp_stats));
954
955 if (cp->cp_xstate != NULL) {
956 (*cp->cp_xcomp->comp_stat)(cp->cp_xstate, &csp->c);
957 }
958 if (cp->cp_rstate != NULL) {
959 (*cp->cp_rcomp->decomp_stat)(cp->cp_rstate, &csp->d);
960 }
961
962 miocack(q, mp, sizeof (struct ppp_comp_stats), 0);
963 }
964
965 /*
966 * spppcomp_ioctl()
967 *
968 * MT-Perimeters:
969 * exclusive inner.
970 *
971 * Description:
972 * Called by spppcomp_wput as the result of receiving an M_IOCTL
973 * command.
974 */
975 static void
spppcomp_ioctl(queue_t * q,mblk_t * mp,sppp_comp_t * cp)976 spppcomp_ioctl(queue_t *q, mblk_t *mp, sppp_comp_t *cp)
977 {
978 struct iocblk *iop;
979 int flag;
980
981 ASSERT(cp != NULL);
982
983 iop = (struct iocblk *)mp->b_rptr;
984 switch (iop->ioc_cmd) {
985 case PPPIO_CFLAGS:
986 case PPPIO_VJINIT:
987 case PPPIO_XCOMP:
988 case PPPIO_RCOMP:
989 case PPPIO_DEBUG:
990 case PPPIO_LASTMOD:
991 case PPPIO_COMPLEV:
992 mutex_enter(&cp->cp_pair_lock);
993 flag = spppcomp_inner_ioctl(q, mp);
994 mutex_exit(&cp->cp_pair_lock);
995 if (flag == -1) {
996 putnext(q, mp);
997 } else if (flag == 0) {
998 miocack(q, mp,
999 mp->b_cont == NULL ? 0 : MBLKL(mp->b_cont), 0);
1000 } else {
1001 miocnak(q, mp, 0, flag);
1002 }
1003 break;
1004
1005 case PPPIO_GETCSTAT:
1006 spppcomp_getcstat(q, mp, cp);
1007 break;
1008
1009 case PPPIO_GTYPE: /* get existing driver type */
1010 if (!IS_CP_LASTMOD(cp)) {
1011 putnext(q, mp);
1012 break;
1013 }
1014 freemsg(mp->b_next);
1015 mp->b_next = allocb(sizeof (uint32_t), BPRI_MED);
1016 if (mp->b_next == NULL) {
1017 miocnak(q, mp, 0, ENOSR);
1018 } else {
1019 *(uint32_t *)mp->b_cont->b_wptr = PPPTYP_HC;
1020 mp->b_cont->b_wptr += sizeof (uint32_t);
1021 miocack(q, mp, sizeof (uint32_t), 0);
1022 }
1023 break;
1024
1025 default:
1026 putnext(q, mp);
1027 break;
1028 }
1029 }
1030
1031 /*
1032 * spppcomp_mctl()
1033 *
1034 * MT-Perimeters:
1035 * exclusive inner; queue pair lock held.
1036 *
1037 * Description:
1038 * Called by spppcomp_wput as the result of receiving an M_CTL
1039 * message from another STREAMS module, and returns non-zero if
1040 * caller should do putnext or zero for freemsg. Must *NOT* do
1041 * putnext in this routine, since lock is held here.
1042 */
1043 static int
spppcomp_mctl(queue_t * q,mblk_t * mp)1044 spppcomp_mctl(queue_t *q, mblk_t *mp)
1045 {
1046 sppp_comp_t *cp = q->q_ptr;
1047 kstat_t *ksp;
1048 char unit[32];
1049 const char **cpp;
1050 kstat_named_t *knt;
1051
1052 switch (*mp->b_rptr) {
1053 case PPPCTL_MTU:
1054 if (MBLKL(mp) < 4) {
1055 break;
1056 }
1057 cp->cp_mtu = ((ushort_t *)mp->b_rptr)[1];
1058
1059 CPDEBUG((DBGSTART "PPPCTL_MTU (%d) flags=0x%b\n",
1060 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1),
1061 cp->cp_mtu, cp->cp_flags, CP_FLAGSSTR));
1062 break;
1063 case PPPCTL_MRU:
1064 if (MBLKL(mp) < 4) {
1065 break;
1066 }
1067 cp->cp_mru = ((ushort_t *)mp->b_rptr)[1];
1068
1069 CPDEBUG((DBGSTART "PPPCTL_MRU (%d) flags=0x%b\n",
1070 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1),
1071 cp->cp_mru, cp->cp_flags, CP_FLAGSSTR));
1072 break;
1073 case PPPCTL_UNIT:
1074 if (MBLKL(mp) < 8) {
1075 break;
1076 }
1077 /* If PPPCTL_UNIT has already been issued, then ignore. */
1078 if (IS_CP_HASUNIT(cp)) {
1079 break;
1080 }
1081 ASSERT(cp->cp_kstats == NULL);
1082 cp->cp_unit = ((uint32_t *)mp->b_rptr)[1];
1083
1084 /* Create kstats for this unit. */
1085 (void) sprintf(unit, "%s" "%d", COMP_MOD_NAME, cp->cp_unit);
1086 ksp = kstat_create(COMP_MOD_NAME, cp->cp_unit, unit, "net",
1087 KSTAT_TYPE_NAMED, sizeof (spppcomp_kstats_t) /
1088 sizeof (kstat_named_t), 0);
1089
1090 if (ksp != NULL) {
1091 cp->cp_flags |= CP_HASUNIT;
1092 cp->cp_kstats = ksp;
1093
1094 knt = (kstat_named_t *)ksp->ks_data;
1095 for (cpp = kstats_names;
1096 cpp < kstats_names + Dim(kstats_names); cpp++) {
1097 kstat_named_init(knt, *cpp,
1098 KSTAT_DATA_UINT32);
1099 knt++;
1100 }
1101 for (cpp = kstats64_names;
1102 cpp < kstats64_names + Dim(kstats64_names); cpp++) {
1103 kstat_named_init(knt, *cpp,
1104 KSTAT_DATA_UINT64);
1105 knt++;
1106 }
1107 ksp->ks_update = spppcomp_kstat_update;
1108 ksp->ks_private = (void *)cp;
1109 kstat_install(ksp);
1110
1111 CPDEBUG((DBGSTART "PPPCTL_UNIT flags=0x%b\n",
1112 cp->cp_unit, cp->cp_flags, CP_FLAGSSTR));
1113 }
1114 break;
1115
1116 default:
1117 /* Forward unknown M_CTL messages along */
1118 return (1);
1119 }
1120
1121 /*
1122 * For known PPP M_CTL messages, forward along only if we're not the
1123 * last PPP-aware module.
1124 */
1125 if (IS_CP_LASTMOD(cp))
1126 return (0);
1127 return (1);
1128 }
1129
1130 /*
1131 * spppcomp_rput()
1132 *
1133 * MT-Perimeters:
1134 * exclusive inner.
1135 *
1136 * Description:
1137 * Upper read-side put procedure. Messages get here from below.
1138 *
1139 * The data handling logic is a little more tricky here. We
1140 * defer to the service routine if q_first isn't NULL (to
1141 * preserve message ordering after deferring a previous message),
1142 * bcanputnext() is FALSE (to handle flow control), or we have
1143 * done a lot of processing recently and we're about to do a lot
1144 * more and we're in an interrupt context (on the theory that
1145 * we're hogging the CPU in this case).
1146 */
1147 static void
spppcomp_rput(queue_t * q,mblk_t * mp)1148 spppcomp_rput(queue_t *q, mblk_t *mp)
1149 {
1150 sppp_comp_t *cp = q->q_ptr;
1151 struct iocblk *iop;
1152 struct ppp_stats64 *psp;
1153 boolean_t inter;
1154 hrtime_t curtime;
1155
1156 switch (MTYPE(mp)) {
1157 case M_DATA:
1158 inter = servicing_interrupt();
1159 if (inter) {
1160 curtime = gethrtime();
1161
1162 /*
1163 * If little time has passed since last
1164 * arrival, then bump the counter.
1165 */
1166 if (curtime - cp->cp_lastfinish < spppcomp_min_arrival)
1167 cp->cp_fastin++;
1168 else
1169 cp->cp_fastin >>= 1; /* a guess */
1170 }
1171 /*
1172 * If we're not decompressing, then we'll be fast, so
1173 * we don't have to worry about hogging here. If we
1174 * are decompressing, then we have to check the
1175 * cp_fastin count.
1176 */
1177 if ((!(cp->cp_flags & (CCP_DECOMP_RUN | DECOMP_VJC)) ||
1178 cp->cp_fastin < MAX_FAST_ARRIVALS) &&
1179 q->q_first == NULL && bcanputnext(q, mp->b_band)) {
1180 #ifdef SPC_DEBUG
1181 cp->cp_in_handled++;
1182 #endif
1183 if ((mp = spppcomp_inpkt(q, mp)) != NULL)
1184 putnext(q, mp);
1185 if (inter) {
1186 cp->cp_lastfinish = gethrtime();
1187 }
1188 } else {
1189 /* Deferring; give him a clean slate */
1190 cp->cp_fastin = 0;
1191 #ifdef SPC_DEBUG
1192 cp->cp_in_queued++;
1193 #endif
1194 if (!putq(q, mp))
1195 freemsg(mp);
1196 }
1197 break;
1198 case M_IOCACK:
1199 iop = (struct iocblk *)mp->b_rptr;
1200 /*
1201 * Bundled with pppstats; no need to handle PPPIO_GETSTAT
1202 * here since we'll never see it.
1203 */
1204 if (iop->ioc_cmd == PPPIO_GETSTAT64 &&
1205 iop->ioc_count == sizeof (struct ppp_stats64) &&
1206 mp->b_cont != NULL) {
1207 /*
1208 * This crock is to handle a badly-designed
1209 * but well-known ioctl for ANU PPP. Both
1210 * link statistics and VJ statistics are
1211 * requested together.
1212 *
1213 * Catch this on the way back from the
1214 * spppasyn module so we can fill in the VJ
1215 * stats. This happens only when we have
1216 * PPP-aware modules beneath us.
1217 */
1218 psp = (struct ppp_stats64 *)mp->b_cont->b_rptr;
1219 psp->vj = cp->cp_vj.stats;
1220 CPDEBUG((DBGSTART
1221 "PPPIO_GETSTAT64 (VJ filled) flags=0x%b\n",
1222 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1),
1223 cp->cp_flags, CP_FLAGSSTR));
1224 }
1225 putnext(q, mp);
1226 break;
1227 case M_CTL:
1228 /* Increase our statistics and forward it upstream. */
1229 mutex_enter(&cp->cp_pair_lock);
1230 if (*mp->b_rptr == PPPCTL_IERROR) {
1231 cp->cp_stats.ppp_ierrors++;
1232 cp->cp_ierr_low++;
1233 } else if (*mp->b_rptr == PPPCTL_OERROR) {
1234 cp->cp_stats.ppp_oerrors++;
1235 cp->cp_oerr_low++;
1236 }
1237 mutex_exit(&cp->cp_pair_lock);
1238 putnext(q, mp);
1239 break;
1240
1241 case M_FLUSH:
1242 CPDEBUG((DBGSTART "rput M_FLUSH (0x%x) flags=0x%b\n",
1243 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1),
1244 *mp->b_rptr, cp->cp_flags, CP_FLAGSSTR));
1245 /*
1246 * Just discard pending data. For CCP, any
1247 * decompressor dictionary sequencing problems caused
1248 * by this will have to be handled by the compression
1249 * protocol in use. For VJ, we need to give the
1250 * decompressor a heads-up.
1251 */
1252 if (*mp->b_rptr & FLUSHR) {
1253 mutex_enter(&cp->cp_pair_lock);
1254 flushq(q, FLUSHDATA);
1255 cp->cp_vj_last_ierrors = cp->cp_stats.ppp_ierrors;
1256 vj_uncompress_err(&cp->cp_vj);
1257 mutex_exit(&cp->cp_pair_lock);
1258 }
1259 putnext(q, mp);
1260 break;
1261
1262 default:
1263 if (bcanputnext(q, mp->b_band))
1264 putnext(q, mp);
1265 else if (!putq(q, mp))
1266 freemsg(mp);
1267 break;
1268 }
1269 }
1270
1271 /*
1272 * spppcomp_rsrv()
1273 *
1274 * MT-Perimeters:
1275 * exclusive inner.
1276 *
1277 * Description:
1278 * Upper read-side service procedure. We handle data deferred from
1279 * spppcomp_rput here.
1280 *
1281 * The data on the queue are always compressed (unprocessed).
1282 * The rput procedure tries to do decompression, but if it can't,
1283 * it will put the unprocessed data on the queue for later
1284 * handling.
1285 */
1286 static void
spppcomp_rsrv(queue_t * q)1287 spppcomp_rsrv(queue_t *q)
1288 {
1289 mblk_t *mp;
1290
1291 while ((mp = getq(q)) != NULL) {
1292 /*
1293 * If the module above us is flow-controlled, then put
1294 * this message back on the queue again.
1295 */
1296 if (!bcanputnext(q, mp->b_band)) {
1297 (void) putbq(q, mp);
1298 break;
1299 }
1300 if (MTYPE(mp) != M_DATA ||
1301 (mp = spppcomp_inpkt(q, mp)) != NULL)
1302 putnext(q, mp);
1303 }
1304 }
1305
1306 /*
1307 * spppcomp_inpkt()
1308 *
1309 * MT-Perimeters:
1310 * exclusive inner
1311 *
1312 * Description:
1313 * Process incoming packet.
1314 */
1315 static mblk_t *
spppcomp_inpkt(queue_t * q,mblk_t * mp)1316 spppcomp_inpkt(queue_t *q, mblk_t *mp)
1317 {
1318 ushort_t proto;
1319 int i;
1320 mblk_t *zmp;
1321 mblk_t *np;
1322 uchar_t *dp;
1323 int len;
1324 int hlen;
1325 sppp_comp_t *cp = q->q_ptr;
1326
1327 len = msgsize(mp);
1328
1329 mutex_enter(&cp->cp_pair_lock);
1330 cp->cp_stats.ppp_ibytes += len;
1331 cp->cp_stats.ppp_ipackets++;
1332 mutex_exit(&cp->cp_pair_lock);
1333 /*
1334 * First work out the protocol and where the PPP header ends.
1335 */
1336 i = 0;
1337 proto = MSG_BYTE(mp, 0);
1338 if (proto == PPP_ALLSTATIONS) {
1339 i = 2;
1340 proto = MSG_BYTE(mp, 2);
1341 }
1342 if ((proto & 1) == 0) {
1343 ++i;
1344 proto = (proto << 8) + MSG_BYTE(mp, i);
1345 }
1346 hlen = i + 1;
1347 /*
1348 * Now reconstruct a complete, contiguous PPP header at the
1349 * start of the packet.
1350 */
1351 if (hlen < (IS_DECOMP_AC(cp) ? 0 : 2) + (IS_DECOMP_PROT(cp) ? 1 : 2)) {
1352 /* count these? */
1353 goto bad;
1354 }
1355 if (mp->b_rptr + hlen > mp->b_wptr) {
1356 /*
1357 * Header is known to be intact here; so adjmsg will do the
1358 * right thing here.
1359 */
1360 if (!adjmsg(mp, hlen)) {
1361 goto bad;
1362 }
1363 hlen = 0;
1364 }
1365 if (hlen != PPP_HDRLEN) {
1366 /*
1367 * We need to put some bytes on the front of the packet
1368 * to make a full-length PPP header. If we can put them
1369 * in mp, we do, otherwise we tack another mblk on the
1370 * front.
1371 *
1372 * XXX we really shouldn't need to carry around the address
1373 * and control at this stage. ACFC and PFC need to be
1374 * reworked.
1375 */
1376 dp = mp->b_rptr + hlen - PPP_HDRLEN;
1377 if ((dp < mp->b_datap->db_base) || (DB_REF(mp) > 1)) {
1378
1379 np = allocb(PPP_HDRLEN, BPRI_MED);
1380 if (np == 0) {
1381 goto bad;
1382 }
1383 np->b_cont = mp;
1384 mp->b_rptr += hlen;
1385 mp = np;
1386 dp = mp->b_wptr;
1387 mp->b_wptr += PPP_HDRLEN;
1388 } else {
1389 mp->b_rptr = dp;
1390 }
1391 dp[0] = PPP_ALLSTATIONS;
1392 dp[1] = PPP_UI;
1393 dp[2] = (proto >> 8) & 0xff;
1394 dp[3] = proto & 0xff;
1395 }
1396 /*
1397 * Now see if we have a compressed packet to decompress, or a
1398 * CCP negotiation packet to take notice of. It's guaranteed
1399 * that at least PPP_HDRLEN bytes are contiguous in the first
1400 * block now.
1401 */
1402 proto = PPP_PROTOCOL(mp->b_rptr);
1403 if (proto == PPP_CCP) {
1404 len = msgsize(mp);
1405 if (mp->b_wptr < mp->b_rptr + len) {
1406 #ifdef SPC_DEBUG
1407 mutex_enter(&cp->cp_pair_lock);
1408 cp->cp_imsg_ccp_pull++;
1409 mutex_exit(&cp->cp_pair_lock);
1410 #endif
1411 zmp = msgpullup(mp, len);
1412 freemsg(mp);
1413 mp = zmp;
1414 if (mp == 0) {
1415 goto bad;
1416 }
1417 }
1418 mutex_enter(&cp->cp_pair_lock);
1419 comp_ccp(q, mp, cp, B_TRUE);
1420 mutex_exit(&cp->cp_pair_lock);
1421 } else if ((cp->cp_flags & (CCP_ISUP | CCP_DECOMP_RUN | CCP_ERR)) ==
1422 (CCP_ISUP | CCP_DECOMP_RUN) && cp->cp_rstate != NULL) {
1423 int rv;
1424
1425 if ((proto == PPP_COMP) || (proto == PPP_COMPFRAG)) {
1426 rv = (*cp->cp_rcomp->decompress)(cp->cp_rstate, &mp);
1427 switch (rv) {
1428 case DECOMP_OK:
1429 break;
1430 case DECOMP_ERROR:
1431 cp->cp_flags |= CCP_ERROR;
1432 mutex_enter(&cp->cp_pair_lock);
1433 ++cp->cp_stats.ppp_ierrors;
1434 mutex_exit(&cp->cp_pair_lock);
1435 (void) putnextctl1(q, M_CTL, PPPCTL_IERROR);
1436 break;
1437 case DECOMP_FATALERROR:
1438 cp->cp_flags |= CCP_FATALERROR;
1439 mutex_enter(&cp->cp_pair_lock);
1440 ++cp->cp_stats.ppp_ierrors;
1441 mutex_exit(&cp->cp_pair_lock);
1442 (void) putnextctl1(q, M_CTL, PPPCTL_IERROR);
1443 break;
1444 }
1445 if (mp == NULL) {
1446 /* Decompress failed; data are gone. */
1447 return (NULL);
1448 }
1449 } else {
1450 /*
1451 * For RFCs 1977 and 1979 (BSD Compress and Deflate),
1452 * the compressor should send incompressible data
1453 * without encapsulation and the receiver must update
1454 * its decompression dictionary as though this data
1455 * were received and decompressed. This keeps the
1456 * dictionaries in sync.
1457 */
1458 rv = (*cp->cp_rcomp->incomp)(cp->cp_rstate, mp);
1459 if (rv < 0) {
1460 cp->cp_flags |= CCP_FATALERROR;
1461 mutex_enter(&cp->cp_pair_lock);
1462 ++cp->cp_stats.ppp_ierrors;
1463 mutex_exit(&cp->cp_pair_lock);
1464 (void) putnextctl1(q, M_CTL, PPPCTL_IERROR);
1465 }
1466 }
1467 }
1468 /*
1469 * Now do VJ decompression.
1470 */
1471 proto = PPP_PROTOCOL(mp->b_rptr);
1472 if ((proto == PPP_VJC_COMP) || (proto == PPP_VJC_UNCOMP)) {
1473
1474 len = msgsize(mp) - PPP_HDRLEN;
1475
1476 if (!IS_DECOMP_VJC(cp) || (len <= 0)) {
1477 goto bad;
1478 }
1479 /*
1480 * Advance past the ppp header. Here we assume that the whole
1481 * PPP header is in the first mblk. (This should be true
1482 * because the above code does pull-ups as necessary on raw
1483 * data, and the decompressor engines all produce large blocks
1484 * on output.)
1485 */
1486 np = mp;
1487 dp = np->b_rptr + PPP_HDRLEN;
1488 if (dp >= mp->b_wptr) {
1489 np = np->b_cont;
1490 dp = np->b_rptr;
1491 }
1492 /*
1493 * Make sure we have sufficient contiguous data at this point,
1494 * which in most cases we will always do.
1495 */
1496 hlen = (proto == PPP_VJC_COMP) ? MAX_VJHDR : MAX_TCPIPHLEN;
1497 if (hlen > len) {
1498 hlen = len;
1499 }
1500 if ((np->b_wptr < dp + hlen) || DB_REF(np) > 1) {
1501 #ifdef SPC_DEBUG
1502 mutex_enter(&cp->cp_pair_lock);
1503 cp->cp_imsg_vj_pull++;
1504 mutex_exit(&cp->cp_pair_lock);
1505 #endif
1506 zmp = msgpullup(mp, hlen + PPP_HDRLEN);
1507 freemsg(mp);
1508 mp = zmp;
1509 if (mp == NULL) {
1510 goto bad;
1511 }
1512 np = mp;
1513 dp = np->b_rptr + PPP_HDRLEN;
1514 }
1515
1516 if (proto == PPP_VJC_COMP) {
1517 uchar_t *iphdr;
1518 int vjlen;
1519 uint_t iphlen;
1520 int errcnt;
1521
1522 /*
1523 * Decompress VJ-compressed packet. First
1524 * reset compressor if an input error has
1525 * occurred. (No need to lock statistics
1526 * structure for read of a single word.)
1527 */
1528 errcnt = cp->cp_stats.ppp_ierrors;
1529 if (errcnt != cp->cp_vj_last_ierrors) {
1530 cp->cp_vj_last_ierrors = errcnt;
1531 vj_uncompress_err(&cp->cp_vj);
1532 }
1533
1534 vjlen = vj_uncompress_tcp(dp, np->b_wptr - dp, len,
1535 &cp->cp_vj, &iphdr, &iphlen);
1536
1537 if (vjlen < 0 || iphlen == 0) {
1538 /*
1539 * so we don't reset next time
1540 */
1541 mutex_enter(&cp->cp_pair_lock);
1542 ++cp->cp_vj_last_ierrors;
1543 mutex_exit(&cp->cp_pair_lock);
1544 goto bad;
1545 }
1546 /*
1547 * drop ppp and vj headers off
1548 */
1549 if (mp != np) {
1550 freeb(mp);
1551 mp = np;
1552 }
1553 mp->b_rptr = dp + vjlen;
1554 /*
1555 * allocate a new mblk for the ppp and
1556 * ip headers
1557 */
1558 np = allocb(iphlen + PPP_HDRLEN, BPRI_MED);
1559 if (np == NULL)
1560 goto bad;
1561 dp = np->b_rptr;
1562 /*
1563 * reconstruct PPP header
1564 */
1565 dp[0] = PPP_ALLSTATIONS;
1566 dp[1] = PPP_UI;
1567 dp[2] = PPP_IP >> 8;
1568 dp[3] = PPP_IP;
1569 /*
1570 * prepend mblk with reconstructed TCP/IP header.
1571 */
1572 bcopy((caddr_t)iphdr, (caddr_t)dp + PPP_HDRLEN, iphlen);
1573 np->b_wptr = dp + iphlen + PPP_HDRLEN;
1574 np->b_cont = mp;
1575 mp = np;
1576 } else {
1577 /*
1578 * "Decompress" a VJ-uncompressed packet.
1579 */
1580 mutex_enter(&cp->cp_pair_lock);
1581 cp->cp_vj_last_ierrors = cp->cp_stats.ppp_ierrors;
1582 mutex_exit(&cp->cp_pair_lock);
1583 if (!vj_uncompress_uncomp(dp, hlen, &cp->cp_vj)) {
1584 /*
1585 * don't need to reset next time
1586 */
1587 mutex_enter(&cp->cp_pair_lock);
1588 ++cp->cp_vj_last_ierrors;
1589 mutex_exit(&cp->cp_pair_lock);
1590 goto bad;
1591 }
1592 /*
1593 * fix up the PPP protocol field
1594 */
1595 mp->b_rptr[3] = PPP_IP;
1596 }
1597 }
1598 CPDEBUG((DBGSTART "recv (%ld bytes) flags=0x%b\n",
1599 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), msgsize(mp),
1600 cp->cp_flags, CP_FLAGSSTR));
1601 return (mp);
1602
1603 bad:
1604 if (mp != 0) {
1605 freemsg(mp);
1606 }
1607 mutex_enter(&cp->cp_pair_lock);
1608 cp->cp_stats.ppp_ierrors++;
1609 mutex_exit(&cp->cp_pair_lock);
1610 (void) putnextctl1(q, M_CTL, PPPCTL_IERROR);
1611 return (NULL);
1612 }
1613
1614 /*
1615 * comp_ccp()
1616 *
1617 * Description:
1618 * Called by spppcomp_outpkt and spppcomp_inpkt to handle a CCP
1619 * negotiation packet being sent or received. Here all the data in
1620 * the packet is in a single mbuf.
1621 *
1622 * Global state is updated. Must be called with mutex held.
1623 */
1624 /* ARGSUSED */
1625 static void
comp_ccp(queue_t * q,mblk_t * mp,sppp_comp_t * cp,boolean_t rcvd)1626 comp_ccp(queue_t *q, mblk_t *mp, sppp_comp_t *cp, boolean_t rcvd)
1627 {
1628 int len;
1629 int clen;
1630 uchar_t *dp;
1631
1632 len = msgsize(mp);
1633 if (len < PPP_HDRLEN + CCP_HDRLEN) {
1634 return;
1635 }
1636 dp = mp->b_rptr + PPP_HDRLEN;
1637
1638 len -= PPP_HDRLEN;
1639 clen = CCP_LENGTH(dp);
1640 if (clen > len) {
1641 return;
1642 }
1643
1644 CPDEBUG((DBGSTART "CCP code=%d flags=0x%b\n",
1645 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), CCP_CODE(dp),
1646 cp->cp_flags, CP_FLAGSSTR));
1647 switch (CCP_CODE(dp)) {
1648 case CCP_CONFREQ:
1649 case CCP_TERMREQ:
1650 case CCP_TERMACK:
1651 cp->cp_flags &= ~CCP_ISUP;
1652 break;
1653 case CCP_CONFACK:
1654 if ((cp->cp_flags & (CCP_ISOPEN | CCP_ISUP)) == CCP_ISOPEN &&
1655 clen >= CCP_HDRLEN + CCP_OPT_MINLEN &&
1656 clen >= CCP_HDRLEN + CCP_OPT_LENGTH(dp + CCP_HDRLEN)) {
1657
1658 int rc;
1659
1660 if (!rcvd) {
1661 rc = (*cp->cp_xcomp->comp_init)(cp->cp_xstate,
1662 dp + CCP_HDRLEN, clen - CCP_HDRLEN,
1663 cp->cp_unit, 0,
1664 IS_CP_KDEBUG(cp) | ALG_DEBUG);
1665
1666 if (cp->cp_xstate != NULL && rc != 0) {
1667 cp->cp_flags |= CCP_COMP_RUN;
1668 }
1669 } else {
1670 rc = (*cp->cp_rcomp->decomp_init)(cp->
1671 cp_rstate, dp + CCP_HDRLEN,
1672 clen - CCP_HDRLEN, cp->cp_unit, 0,
1673 cp->cp_mru,
1674 IS_CP_KDEBUG(cp) | ALG_DEBUG);
1675
1676 if (cp->cp_rstate != NULL && rc != 0) {
1677 cp->cp_flags &= ~CCP_ERR;
1678 cp->cp_flags |= CCP_DECOMP_RUN;
1679 }
1680 }
1681 }
1682 break;
1683 case CCP_RESETACK:
1684 if (IS_CCP_ISUP(cp)) {
1685 if (!rcvd) {
1686 if (cp->cp_xstate != NULL &&
1687 IS_CCP_COMP_RUN(cp)) {
1688 (*cp->cp_xcomp->comp_reset)(cp->
1689 cp_xstate);
1690 }
1691 } else {
1692 if (cp->cp_rstate != NULL &&
1693 IS_CCP_DECOMP_RUN(cp)) {
1694 (*cp->cp_rcomp->decomp_reset)(cp->
1695 cp_rstate);
1696 cp->cp_flags &= ~CCP_ERROR;
1697 }
1698 }
1699 }
1700 break;
1701 }
1702 }
1703
1704 /*
1705 * spppcomp_kstat_update()
1706 *
1707 * Description:
1708 * Update per-unit kstat statistics.
1709 */
1710 static int
spppcomp_kstat_update(kstat_t * ksp,int rw)1711 spppcomp_kstat_update(kstat_t *ksp, int rw)
1712 {
1713 sppp_comp_t *cp = ksp->ks_private;
1714 spppcomp_kstats_t *cpkp;
1715 struct vjstat *sp;
1716 struct pppstat64 *psp;
1717 struct ppp_comp_stats csp;
1718
1719 if (rw == KSTAT_WRITE) {
1720 return (EACCES);
1721 }
1722
1723 cpkp = (spppcomp_kstats_t *)ksp->ks_data;
1724 bzero((caddr_t)&csp, sizeof (struct ppp_comp_stats));
1725
1726 mutex_enter(&cp->cp_pair_lock);
1727
1728 if (cp->cp_xstate != NULL) {
1729 (*cp->cp_xcomp->comp_stat)(cp->cp_xstate, &csp.c);
1730 }
1731 if (cp->cp_rstate != NULL) {
1732 (*cp->cp_rcomp->decomp_stat)(cp->cp_rstate, &csp.d);
1733 }
1734
1735 sp = &cp->cp_vj.stats;
1736
1737 cpkp->vj_out_pkts.value.ui32 = sp->vjs_packets;
1738 cpkp->vj_out_pkts_comp.value.ui32 = sp->vjs_compressed;
1739 cpkp->vj_cs_searches.value.ui32 = sp->vjs_searches;
1740 cpkp->vj_cs_misses.value.ui32 = sp->vjs_misses;
1741 cpkp->vj_in_pkts_uncomp.value.ui32 = sp->vjs_uncompressedin;
1742 cpkp->vj_in_pkts_comp.value.ui32 = sp->vjs_compressedin;
1743 cpkp->vj_in_error.value.ui32 = sp->vjs_errorin;
1744 cpkp->vj_in_tossed.value.ui32 = sp->vjs_tossed;
1745
1746 psp = &cp->cp_stats;
1747
1748 cpkp->out_bytes.value.ui64 = psp->ppp_obytes;
1749 cpkp->out_pkts.value.ui64 = psp->ppp_opackets;
1750 cpkp->out_errors.value.ui64 = psp->ppp_oerrors;
1751 cpkp->out_errors_low.value.ui32 = cp->cp_oerr_low;
1752 cpkp->out_uncomp_bytes.value.ui32 = csp.c.unc_bytes;
1753 cpkp->out_uncomp_pkts.value.ui32 = csp.c.unc_packets;
1754 cpkp->out_comp_bytes.value.ui32 = csp.c.comp_bytes;
1755 cpkp->out_comp_pkts.value.ui32 = csp.c.comp_packets;
1756 cpkp->out_incomp_bytes.value.ui32 = csp.c.inc_bytes;
1757 cpkp->out_incomp_pkts.value.ui32 = csp.c.inc_packets;
1758
1759 cpkp->in_bytes.value.ui64 = psp->ppp_ibytes;
1760 cpkp->in_pkts.value.ui64 = psp->ppp_ipackets;
1761 cpkp->in_errors.value.ui64 = psp->ppp_ierrors;
1762 cpkp->in_errors_low.value.ui32 = cp->cp_ierr_low;
1763 cpkp->in_uncomp_bytes.value.ui32 = csp.d.unc_bytes;
1764 cpkp->in_uncomp_pkts.value.ui32 = csp.d.unc_packets;
1765 cpkp->in_comp_bytes.value.ui32 = csp.d.comp_bytes;
1766 cpkp->in_comp_pkts.value.ui32 = csp.d.comp_packets;
1767 cpkp->in_incomp_bytes.value.ui32 = csp.d.inc_bytes;
1768 cpkp->in_incomp_pkts.value.ui32 = csp.d.inc_packets;
1769 #ifdef SPC_DEBUG
1770 cpkp->in_msg_ccp_pulledup.value.ui32 = cp->cp_imsg_ccp_pull;
1771 cpkp->in_msg_vj_pulledup.value.ui32 = cp->cp_imsg_vj_pull;
1772 cpkp->out_msg_pulledup.value.ui32 = cp->cp_omsg_pull;
1773 cpkp->out_msg_copied.value.ui32 = cp->cp_omsg_dcopy;
1774 cpkp->out_queued.value.ui32 = cp->cp_out_queued;
1775 cpkp->out_handled.value.ui32 = cp->cp_out_handled;
1776 cpkp->in_queued.value.ui32 = cp->cp_in_queued;
1777 cpkp->in_handled.value.ui32 = cp->cp_in_handled;
1778 #endif
1779 mutex_exit(&cp->cp_pair_lock);
1780 return (0);
1781 }
1782