1 /* 2 * spppcomp.c - STREAMS module for kernel-level compression and CCP support. 3 * 4 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 5 * Use is subject to license terms. 6 * 7 * Permission to use, copy, modify, and distribute this software and its 8 * documentation is hereby granted, provided that the above copyright 9 * notice appears in all copies. 10 * 11 * SUN MAKES NO REPRESENTATION OR WARRANTIES ABOUT THE SUITABILITY OF 12 * THE SOFTWARE, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 13 * TO THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 14 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT. SUN SHALL NOT BE LIABLE FOR 15 * ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR 16 * DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES 17 * 18 * Copyright (c) 1994 The Australian National University. 19 * All rights reserved. 20 * 21 * Permission to use, copy, modify, and distribute this software and its 22 * documentation is hereby granted, provided that the above copyright 23 * notice appears in all copies. This software is provided without any 24 * warranty, express or implied. The Australian National University 25 * makes no representations about the suitability of this software for 26 * any purpose. 27 * 28 * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY 29 * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES 30 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF 31 * THE AUSTRALIAN NATIONAL UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY 32 * OF SUCH DAMAGE. 33 * 34 * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES, 35 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY 36 * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS 37 * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO 38 * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, 39 * OR MODIFICATIONS. 40 * 41 * This module is derived from the original SVR4 STREAMS PPP compression 42 * module originally written by Paul Mackerras <paul.mackerras@cs.anu.edu.au>. 43 * 44 * James Carlson <james.d.carlson@sun.com> and Adi Masputra 45 * <adi.masputra@sun.com> rewrote and restructured the code for improved 46 * performance and scalability. 47 */ 48 49 #pragma ident "%Z%%M% %I% %E% SMI" 50 #define RCSID "$Id: spppcomp.c,v 1.0 2000/05/08 01:10:12 masputra Exp $" 51 52 #include <sys/types.h> 53 #include <sys/debug.h> 54 #include <sys/param.h> 55 #include <sys/stream.h> 56 #include <sys/stropts.h> 57 #include <sys/errno.h> 58 #include <sys/conf.h> 59 #include <sys/cpuvar.h> 60 #include <sys/cmn_err.h> 61 #include <sys/kmem.h> 62 #include <sys/ddi.h> 63 #include <sys/kstat.h> 64 #include <sys/strsun.h> 65 #include <sys/sysmacros.h> 66 #include <netinet/in.h> 67 #include <netinet/ip.h> 68 #include <net/ppp_defs.h> 69 #include <net/pppio.h> 70 #include <net/vjcompress.h> 71 72 /* Defined for platform-neutral include file */ 73 #define PACKETPTR mblk_t * 74 #include <net/ppp-comp.h> 75 76 #include "s_common.h" 77 78 #ifdef DEBUG 79 #define SPC_DEBUG 80 #endif 81 #include "spppcomp.h" 82 83 /* 84 * This is used to tag official Solaris sources. Please do not define 85 * "INTERNAL_BUILD" when building this software outside of Sun 86 * Microsystems. 87 */ 88 #ifdef INTERNAL_BUILD 89 /* MODINFO is limited to 32 characters. */ 90 const char spppcomp_module_description[] = "PPP 4.0 compression"; 91 #else /* INTERNAL_BUILD */ 92 const char spppcomp_module_description[] = 93 "ANU PPP compression $Revision: 1.16$ "; 94 95 /* LINTED */ 96 static const char buildtime[] = "Built " __DATE__ " at " __TIME__ 97 #ifdef DEBUG 98 " DEBUG" 99 #endif 100 "\n"; 101 #endif /* INTERNAL_BUILD */ 102 103 static int spppcomp_open(queue_t *, dev_t *, int, int, cred_t *); 104 static int spppcomp_close(queue_t *, int, cred_t *); 105 static int spppcomp_rput(queue_t *, mblk_t *); 106 static int spppcomp_rsrv(queue_t *); 107 static int spppcomp_wput(queue_t *, mblk_t *); 108 static int spppcomp_wsrv(queue_t *); 109 110 #define PPPCOMP_MI_MINPSZ (0) 111 #define PPPCOMP_MI_MAXPSZ (INFPSZ) 112 #define PPPCOMP_MI_HIWAT (PPP_MTU * 20) 113 #define PPPCOMP_MI_LOWAT (PPP_MTU * 18) 114 115 static struct module_info spppcomp_modinfo = { 116 COMP_MOD_ID, /* mi_idnum */ 117 COMP_MOD_NAME, /* mi_idname */ 118 PPPCOMP_MI_MINPSZ, /* mi_minpsz */ 119 PPPCOMP_MI_MAXPSZ, /* mi_maxpsz */ 120 PPPCOMP_MI_HIWAT, /* mi_hiwat */ 121 PPPCOMP_MI_LOWAT /* mi_lowat */ 122 }; 123 124 static struct qinit spppcomp_rinit = { 125 spppcomp_rput, /* qi_putp */ 126 spppcomp_rsrv, /* qi_srvp */ 127 spppcomp_open, /* qi_qopen */ 128 spppcomp_close, /* qi_qclose */ 129 NULL, /* qi_qadmin */ 130 &spppcomp_modinfo, /* qi_minfo */ 131 NULL /* qi_mstat */ 132 }; 133 134 static struct qinit spppcomp_winit = { 135 spppcomp_wput, /* qi_putp */ 136 spppcomp_wsrv, /* qi_srvp */ 137 NULL, /* qi_qopen */ 138 NULL, /* qi_qclose */ 139 NULL, /* qi_qadmin */ 140 &spppcomp_modinfo, /* qi_minfo */ 141 NULL /* qi_mstat */ 142 }; 143 144 struct streamtab spppcomp_tab = { 145 &spppcomp_rinit, /* st_rdinit */ 146 &spppcomp_winit, /* st_wrinit */ 147 NULL, /* st_muxrinit */ 148 NULL /* st_muxwinit */ 149 }; 150 151 /* Set non-zero to debug algorithm-specific problems alone. */ 152 #define ALG_DEBUG 0 153 154 #define MAX_IPHLEN (0x0f << 2) 155 #define MAX_TCPHLEN (0x0f << 2) 156 #define MAX_TCPIPHLEN (MAX_IPHLEN + MAX_TCPHLEN) /* max TCP/IP header size */ 157 #define MAX_VJHDR (20) /* max VJ compressed header size (?) */ 158 159 #if 0 160 #define DBGSTART CE_CONT, COMP_MOD_NAME "%d: " 161 #define CKDEBUG(x) cmn_err x 162 #else 163 #define DBGSTART COMP_MOD_NAME "%d: " 164 #define CKDEBUG(x) printf x 165 #endif 166 #define CPDEBUG(x) (IS_CP_KDEBUG(cp) ? CKDEBUG(x) : (void)0) 167 168 /* 169 * List of compressors we know about. 170 */ 171 #if DO_BSD_COMPRESS 172 extern struct compressor ppp_bsd_compress; 173 #endif 174 #if DO_DEFLATE 175 extern struct compressor ppp_deflate; 176 extern struct compressor ppp_deflate_draft; 177 #endif 178 179 struct compressor *ppp_compressors[] = { 180 #if DO_BSD_COMPRESS 181 &ppp_bsd_compress, 182 #endif 183 #if DO_DEFLATE 184 &ppp_deflate, 185 &ppp_deflate_draft, 186 #endif 187 NULL 188 }; 189 190 /* 191 * LCP_USE_DFLT() removed by James Carlson. RFC 1661 section 6.6 has 192 * this to say on the topic: 193 * 194 * The Address and Control fields MUST NOT be compressed when sending 195 * any LCP packet. This rule guarantees unambiguous recognition of 196 * LCP packets. 197 */ 198 199 static void spppcomp_ioctl(queue_t *, mblk_t *, sppp_comp_t *); 200 static int spppcomp_mctl(queue_t *, mblk_t *); 201 static mblk_t *spppcomp_outpkt(queue_t *, mblk_t *); 202 static mblk_t *spppcomp_inpkt(queue_t *, mblk_t *); 203 static int spppcomp_kstat_update(kstat_t *, int); 204 static void comp_ccp(queue_t *, mblk_t *, sppp_comp_t *, boolean_t); 205 206 /* 207 * Values for checking inter-arrival times on interrupt stacks. These 208 * are used to prevent CPU hogging in interrupt context. 209 */ 210 #define MIN_ARRIVAL_TIME 5000000 /* interarrival time in nanoseconds */ 211 #define MAX_FAST_ARRIVALS 10 /* maximum packet count */ 212 hrtime_t spppcomp_min_arrival = MIN_ARRIVAL_TIME; 213 214 static const char *kstats_names[] = { 215 #ifdef SPCDEBUG_KSTATS_NAMES 216 SPPPCOMP_KSTATS_NAMES, 217 SPCDEBUG_KSTATS_NAMES 218 #else 219 SPPPCOMP_KSTATS_NAMES 220 #endif 221 }; 222 static const char *kstats64_names[] = { SPPPCOMP_KSTATS64_NAMES }; 223 224 /* 225 * spppcomp_open() 226 * 227 * MT-Perimeters: 228 * exclusive inner. 229 * 230 * Description: 231 * Common open procedure for module. 232 */ 233 /* ARGSUSED */ 234 static int 235 spppcomp_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp) 236 { 237 sppp_comp_t *cp; 238 239 ASSERT(q != NULL); 240 ASSERT(devp != NULL); 241 242 if (q->q_ptr != NULL) { 243 return (0); 244 } 245 if (sflag != MODOPEN) { 246 return (EINVAL); 247 } 248 cp = (sppp_comp_t *)kmem_zalloc(sizeof (sppp_comp_t), KM_SLEEP); 249 ASSERT(cp != NULL); 250 q->q_ptr = WR(q)->q_ptr = (caddr_t)cp; 251 252 cp->cp_mru = PPP_MRU; 253 cp->cp_mtu = PPP_MTU; 254 255 mutex_init(&cp->cp_pair_lock, NULL, MUTEX_DRIVER, NULL); 256 vj_compress_init(&cp->cp_vj, -1); 257 cp->cp_nxslots = -1; 258 cp->cp_effort = -1; 259 260 qprocson(q); 261 return (0); 262 } 263 264 /* 265 * spppcomp_close() 266 * 267 * MT-Perimeters: 268 * exclusive inner. 269 * 270 * Description: 271 * Common close procedure for module. 272 */ 273 /* ARGSUSED */ 274 static int 275 spppcomp_close(queue_t *q, int flag, cred_t *credp) 276 { 277 sppp_comp_t *cp; 278 279 ASSERT(q != NULL); 280 ASSERT(q->q_ptr != NULL); 281 cp = (sppp_comp_t *)q->q_ptr; 282 283 qprocsoff(q); 284 285 CPDEBUG((DBGSTART "close flags=0x%b\n", 286 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), cp->cp_flags, 287 CP_FLAGSSTR)); 288 mutex_destroy(&cp->cp_pair_lock); 289 if (cp->cp_kstats) { 290 ASSERT(IS_CP_HASUNIT(cp)); 291 kstat_delete(cp->cp_kstats); 292 } 293 if (cp->cp_xstate != NULL) { 294 (*cp->cp_xcomp->comp_free)(cp->cp_xstate); 295 } 296 if (cp->cp_rstate != NULL) { 297 (*cp->cp_rcomp->decomp_free)(cp->cp_rstate); 298 } 299 kmem_free(cp, sizeof (sppp_comp_t)); 300 q->q_ptr = WR(q)->q_ptr = NULL; 301 302 return (0); 303 } 304 305 /* 306 * spppcomp_wput() 307 * 308 * MT-Perimeters: 309 * exclusive inner. 310 * 311 * Description: 312 * Write-side put procedure. Packets from above us arrive here. 313 * 314 * The data handling logic is a little tricky here. We defer to 315 * the service routine if q_first isn't NULL (to preserve message 316 * ordering after deferring a previous message), bcanputnext() is 317 * FALSE (to handle flow control), or we need a lot of processing 318 * and we're in an interrupt context (on the theory that we're 319 * already on a very long call stack at that point). Since many 320 * callers will be in a non-interrupt context, this means that 321 * most processing will be performed here in-line, and deferral 322 * occurs only when necessary. 323 */ 324 static int 325 spppcomp_wput(queue_t *q, mblk_t *mp) 326 { 327 sppp_comp_t *cp; 328 int flag; 329 330 ASSERT(q != NULL); 331 ASSERT(q->q_ptr != NULL); 332 cp = (sppp_comp_t *)q->q_ptr; 333 ASSERT(mp != NULL && mp->b_rptr != NULL); 334 335 switch (MTYPE(mp)) { 336 case M_DATA: 337 if (q->q_first != NULL || !bcanputnext(q, mp->b_band) || 338 ((cp->cp_flags & (COMP_VJC|CCP_COMP_RUN)) && 339 servicing_interrupt())) { 340 #ifdef SPC_DEBUG 341 cp->cp_out_queued++; 342 #endif 343 (void) putq(q, mp); 344 } else { 345 #ifdef SPC_DEBUG 346 cp->cp_out_handled++; 347 #endif 348 if ((mp = spppcomp_outpkt(q, mp)) != NULL) { 349 putnext(q, mp); 350 } 351 } 352 break; 353 case M_IOCTL: 354 spppcomp_ioctl(q, mp, cp); 355 break; 356 case M_CTL: 357 mutex_enter(&cp->cp_pair_lock); 358 flag = spppcomp_mctl(q, mp); 359 mutex_exit(&cp->cp_pair_lock); 360 if (flag != 0) 361 putnext(q, mp); 362 else 363 freemsg(mp); 364 break; 365 case M_FLUSH: 366 CPDEBUG((DBGSTART "wput M_FLUSH (0x%x) flags=0x%b\n", 367 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), 368 *mp->b_rptr, cp->cp_flags, CP_FLAGSSTR)); 369 /* 370 * Just discard pending data. For CCP, any compressor 371 * dictionary sequencing problems caused by this will 372 * have to be handled by the compression protocol in 373 * use. For VJ, we need to tell the compressor to 374 * start over. 375 */ 376 if (*mp->b_rptr & FLUSHW) { 377 mutex_enter(&cp->cp_pair_lock); 378 flushq(q, FLUSHDATA); 379 vj_compress_init(&cp->cp_vj, cp->cp_nxslots); 380 mutex_exit(&cp->cp_pair_lock); 381 } 382 putnext(q, mp); 383 break; 384 default: 385 putnext(q, mp); 386 break; 387 } 388 return (0); 389 } 390 391 /* 392 * spppcomp_wsrv() 393 * 394 * MT-Perimeters: 395 * exclusive inner 396 * 397 * Description: 398 * Write-side service procedure. 399 */ 400 static int 401 spppcomp_wsrv(queue_t *q) 402 { 403 mblk_t *mp; 404 405 ASSERT(q != NULL); 406 ASSERT(q->q_ptr != NULL); 407 408 while ((mp = getq(q)) != NULL) { 409 /* We should only place M_DATA on the service queue. */ 410 ASSERT(MTYPE(mp) == M_DATA); 411 /* 412 * If the module below us is flow-controlled, then put 413 * this message back on the queue again. 414 */ 415 if (!bcanputnext(q, mp->b_band)) { 416 (void) putbq(q, mp); 417 break; 418 } 419 if ((mp = spppcomp_outpkt(q, mp)) != NULL) { 420 putnext(q, mp); 421 } 422 } 423 return (0); 424 } 425 426 /* 427 * spppcomp_outpkt() 428 * 429 * MT-Perimeters: 430 * exclusive inner 431 * 432 * Description: 433 * Process outgoing packet. Returns new mblk_t pointer on success 434 * (caller should do putnext through q), NULL on failure (packet has 435 * been discarded). 436 */ 437 static mblk_t * 438 spppcomp_outpkt(queue_t *q, mblk_t *mp) 439 { 440 mblk_t *zmp; 441 int len; 442 ushort_t proto; 443 sppp_comp_t *cp; 444 445 ASSERT(q != NULL); 446 ASSERT(mp != NULL); 447 cp = (sppp_comp_t *)q->q_ptr; 448 ASSERT(cp != NULL); 449 450 /* 451 * If the entire data size of the mblk is less than the length of the 452 * PPP header, then free it. We can't do much with such message anyway, 453 * since we can't determine what the PPP protocol is. 454 */ 455 len = msgsize(mp); 456 if (MBLKL(mp) < PPP_HDRLEN) { 457 #ifdef SPC_DEBUG 458 mutex_enter(&cp->cp_pair_lock); 459 cp->cp_omsg_pull++; 460 mutex_exit(&cp->cp_pair_lock); 461 #endif 462 zmp = msgpullup(mp, PPP_HDRLEN); 463 freemsg(mp); 464 if ((mp = zmp) == NULL) 465 goto msg_oerror; 466 } 467 468 proto = PPP_PROTOCOL(mp->b_rptr); 469 470 /* 471 * Do VJ compression if requested. 472 */ 473 if (proto == PPP_IP && IS_COMP_VJC(cp) && 474 MSG_BYTE(mp, PPP_HDRLEN+offsetof(struct ip, ip_p)) == 475 IPPROTO_TCP) { 476 uchar_t *vjhdr; 477 int type; 478 uint32_t indata[(PPP_HDRLEN+MAX_TCPIPHLEN) / 479 sizeof (uint32_t)]; 480 uchar_t *dp; 481 int tocopy, copied; 482 mblk_t *fmb; 483 void *srcp; 484 int thislen; 485 486 487 tocopy = copied = MIN(len, sizeof (indata)); 488 /* 489 * If we can alter this dblk, and there's enough data 490 * here to work with, and it's nicely aligned, then 491 * avoid the data copy. 492 */ 493 if (DB_REF(mp) == 1 && MBLKL(mp) >= tocopy && 494 ((uintptr_t)mp->b_rptr & 3) == 0) { 495 /* Save off the address/control */ 496 indata[0] = *(uint32_t *)mp->b_rptr; 497 srcp = (void *)(mp->b_rptr + PPP_HDRLEN); 498 } else { 499 fmb = mp; 500 dp = (uchar_t *)indata; 501 while (tocopy > 0) { 502 thislen = MBLKL(fmb); 503 if (tocopy > thislen) { 504 bcopy(fmb->b_rptr, dp, thislen); 505 dp += thislen; 506 tocopy -= thislen; 507 fmb = fmb->b_cont; 508 } else { 509 bcopy(fmb->b_rptr, dp, tocopy); 510 break; 511 } 512 } 513 srcp = (void *)(indata + PPP_HDRLEN/sizeof (*indata)); 514 } 515 516 type = vj_compress_tcp((struct ip *)srcp, len - PPP_HDRLEN, 517 &cp->cp_vj, IS_COMP_VJCCID(cp), &vjhdr); 518 519 /* 520 * If we're going to modify this packet, then we can't modify 521 * someone else's data. Copy instead. 522 * 523 * (It would be nice to be able to avoid this data copy if CCP 524 * is also enabled. That would require extensive 525 * modifications to the compression code. Users should be 526 * told to disable VJ compression when using CCP.) 527 */ 528 if (type != TYPE_IP && DB_REF(mp) > 1) { 529 #ifdef SPC_DEBUG 530 mutex_enter(&cp->cp_pair_lock); 531 cp->cp_omsg_dcopy++; 532 mutex_exit(&cp->cp_pair_lock); 533 #endif 534 /* Copy just altered portion. */ 535 zmp = msgpullup(mp, copied); 536 freemsg(mp); 537 if ((mp = zmp) == NULL) 538 goto msg_oerror; 539 } 540 541 switch (type) { 542 case TYPE_UNCOMPRESSED_TCP: 543 mp->b_rptr[3] = proto = PPP_VJC_UNCOMP; 544 /* No need to update if it was done in place. */ 545 if (srcp == 546 (void *)(indata + PPP_HDRLEN / sizeof (*indata))) { 547 thislen = PPP_HDRLEN + 548 offsetof(struct ip, ip_p); 549 zmp = mp; 550 while (zmp != NULL) { 551 if (MBLKL(zmp) > thislen) { 552 zmp->b_rptr[thislen] = 553 ((struct ip *)srcp)->ip_p; 554 break; 555 } 556 thislen -= MBLKL(zmp); 557 zmp = zmp->b_cont; 558 } 559 } 560 break; 561 562 case TYPE_COMPRESSED_TCP: 563 /* Calculate amount to remove from front */ 564 thislen = vjhdr - (uchar_t *)srcp; 565 ASSERT(thislen >= 0); 566 567 /* Try to do a cheap adjmsg by arithmetic first. */ 568 dp = mp->b_rptr + thislen; 569 if (dp > mp->b_wptr) { 570 if (!adjmsg(mp, thislen)) { 571 freemsg(mp); 572 goto msg_oerror; 573 } 574 dp = mp->b_rptr; 575 } 576 577 /* 578 * Now make sure first block is big enough to 579 * receive modified data. If we modified in 580 * place, then no need to check or copy. 581 */ 582 copied -= thislen; 583 ASSERT(copied >= PPP_HDRLEN); 584 if (srcp != 585 (void *)(indata + PPP_HDRLEN / sizeof (*indata))) 586 copied = 0; 587 mp->b_rptr = dp; 588 if (MBLKL(mp) < copied) { 589 zmp = msgpullup(mp, copied); 590 freemsg(mp); 591 if ((mp = zmp) == NULL) 592 goto msg_oerror; 593 dp = mp->b_rptr; 594 } 595 596 *dp++ = ((uchar_t *)indata)[0]; /* address */ 597 *dp++ = ((uchar_t *)indata)[1]; /* control */ 598 *dp++ = 0; /* protocol */ 599 *dp++ = proto = PPP_VJC_COMP; /* protocol */ 600 copied -= PPP_HDRLEN; 601 if (copied > 0) { 602 bcopy(vjhdr, dp, copied); 603 } 604 break; 605 } 606 } 607 608 /* 609 * Do packet compression if enabled. 610 */ 611 if (proto == PPP_CCP) { 612 /* 613 * Handle any negotiation packets by changing compressor 614 * state. Doing this here rather than with an ioctl keeps 615 * the negotiation and the data flow in sync. 616 */ 617 mutex_enter(&cp->cp_pair_lock); 618 comp_ccp(q, mp, cp, B_FALSE); 619 mutex_exit(&cp->cp_pair_lock); 620 } else if (proto != PPP_LCP && IS_CCP_COMP_RUN(cp) && 621 IS_CCP_ISUP(cp) && cp->cp_xstate != NULL) { 622 mblk_t *cmp = NULL; 623 624 len = msgsize(mp); 625 len = (*cp->cp_xcomp->compress)(cp->cp_xstate, &cmp, mp, len, 626 cp->cp_mtu + PPP_HDRLEN); 627 628 if (cmp != NULL) { 629 /* Success! Discard uncompressed version */ 630 cmp->b_band = mp->b_band; 631 freemsg(mp); 632 mp = cmp; 633 } 634 if (len < 0) { 635 /* 636 * Compressor failure; must discard this 637 * packet because the compressor dictionary is 638 * now corrupt. 639 */ 640 freemsg(mp); 641 mutex_enter(&cp->cp_pair_lock); 642 cp->cp_stats.ppp_oerrors++; 643 mutex_exit(&cp->cp_pair_lock); 644 (void) putnextctl1(RD(q), M_CTL, PPPCTL_OERROR); 645 return (NULL); 646 } 647 } 648 649 /* 650 * If either address and control field compression or protocol field 651 * compression is enabled, then we'll need a writable packet. Copy if 652 * necessary. 653 */ 654 if ((cp->cp_flags & (COMP_AC|COMP_PROT)) && DB_REF(mp) > 1) { 655 #ifdef SPC_DEBUG 656 mutex_enter(&cp->cp_pair_lock); 657 cp->cp_omsg_dcopy++; 658 mutex_exit(&cp->cp_pair_lock); 659 #endif 660 zmp = copymsg(mp); 661 freemsg(mp); 662 if ((mp = zmp) == NULL) 663 goto msg_oerror; 664 } 665 666 /* 667 * Do address/control and protocol compression if enabled. 668 */ 669 if (IS_COMP_AC(cp) && (proto != PPP_LCP)) { 670 mp->b_rptr += 2; /* drop address & ctrl fields */ 671 /* 672 * Protocol field compression omits the first byte if 673 * it would be 0x00, thus the check for < 0x100. 674 */ 675 if (proto < 0x100 && IS_COMP_PROT(cp)) { 676 ++mp->b_rptr; /* drop high protocol byte */ 677 } 678 } else if ((proto < 0x100) && IS_COMP_PROT(cp)) { 679 /* 680 * shuffle up the address & ctrl fields 681 */ 682 mp->b_rptr[2] = mp->b_rptr[1]; 683 mp->b_rptr[1] = mp->b_rptr[0]; 684 ++mp->b_rptr; 685 } 686 mutex_enter(&cp->cp_pair_lock); 687 cp->cp_stats.ppp_opackets++; 688 cp->cp_stats.ppp_obytes += msgsize(mp); 689 mutex_exit(&cp->cp_pair_lock); 690 691 CPDEBUG((DBGSTART "send (%ld bytes) flags=0x%b\n", 692 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), msgsize(mp), 693 cp->cp_flags, CP_FLAGSSTR)); 694 return (mp); 695 696 msg_oerror: 697 mutex_enter(&cp->cp_pair_lock); 698 cp->cp_stats.ppp_oerrors++; 699 mutex_exit(&cp->cp_pair_lock); 700 (void) putnextctl1(RD(q), M_CTL, PPPCTL_OERROR); 701 return (NULL); 702 } 703 704 /* 705 * spppcomp_inner_ioctl() 706 * 707 * MT-Perimeters: 708 * exclusive inner; queue pair lock held. 709 * 710 * Description: 711 * Called by spppcomp_ioctl to handle state-affecting ioctls. 712 * Returns -1 if caller should do putnext, 0 for miocack, or >0 713 * for miocnak. Must *NOT* do putnext in this routine, since 714 * lock is held here. 715 */ 716 static int 717 spppcomp_inner_ioctl(queue_t *q, mblk_t *mp) 718 { 719 sppp_comp_t *cp; 720 int flags; 721 int mask; 722 int rc; 723 int len; 724 int cmd; 725 int nxslots; 726 int nrslots; 727 int val; 728 uchar_t *opt_data; 729 uint32_t opt_len; 730 struct compressor **comp; 731 struct compressor *ccomp; 732 struct iocblk *iop; 733 void *xtemp; 734 735 ASSERT(q != NULL); 736 ASSERT(q->q_ptr != NULL); 737 cp = (sppp_comp_t *)q->q_ptr; 738 ASSERT(mp != NULL); 739 ASSERT(mp->b_rptr != NULL); 740 741 iop = (struct iocblk *)mp->b_rptr; 742 rc = EINVAL; 743 len = 0; 744 switch (iop->ioc_cmd) { 745 case PPPIO_CFLAGS: 746 if (iop->ioc_count != 2 * sizeof (uint32_t) || 747 mp->b_cont == NULL) 748 break; 749 750 flags = ((uint32_t *)mp->b_cont->b_rptr)[0]; 751 mask = ((uint32_t *)mp->b_cont->b_rptr)[1]; 752 753 cp->cp_flags = (cp->cp_flags & ~mask) | (flags & mask); 754 755 if ((mask & CCP_ISOPEN) && !(flags & CCP_ISOPEN)) { 756 cp->cp_flags &= ~CCP_ISUP & ~CCP_COMP_RUN & 757 ~CCP_DECOMP_RUN; 758 if (cp->cp_xstate != NULL) { 759 (*cp->cp_xcomp->comp_free)(cp->cp_xstate); 760 cp->cp_xstate = NULL; 761 } 762 if (cp->cp_rstate != NULL) { 763 (*cp->cp_rcomp->decomp_free)(cp->cp_rstate); 764 cp->cp_rstate = NULL; 765 } 766 } 767 768 CPDEBUG((DBGSTART 769 "PPPIO_CFLAGS xflags=0x%b xmask=0x%b flags=0x%b\n", 770 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), 771 flags, CP_FLAGSSTR, mask, 772 CP_FLAGSSTR, cp->cp_flags, CP_FLAGSSTR)); 773 774 /* If we're not the last PPP-speaker, then pass along. */ 775 if (!IS_CP_LASTMOD(cp)) { 776 return (-1); /* putnext */ 777 } 778 779 *(uint32_t *)mp->b_cont->b_rptr = cp->cp_flags; 780 len = sizeof (uint32_t); 781 rc = 0; 782 break; 783 784 case PPPIO_VJINIT: 785 if (iop->ioc_count != 2 || mp->b_cont == NULL) 786 break; 787 /* 788 * Even though it's not passed along, we have to 789 * validate nrslots so that we don't agree to 790 * decompress anything we cannot. 791 */ 792 nxslots = mp->b_cont->b_rptr[0] + 1; 793 nrslots = mp->b_cont->b_rptr[1] + 1; 794 if (nxslots > MAX_STATES || nrslots > MAX_STATES) 795 break; 796 797 /* No need to lock here; just reading a word is atomic */ 798 /* mutex_enter(&cp->cp_pair_lock); */ 799 cp->cp_vj_last_ierrors = cp->cp_stats.ppp_ierrors; 800 /* mutex_exit(&cp->cp_pair_lock); */ 801 vj_compress_init(&cp->cp_vj, nxslots); 802 cp->cp_nxslots = nxslots; 803 804 CPDEBUG((DBGSTART 805 "PPPIO_VJINIT txslots=%d rxslots=%d flags=0x%b\n", 806 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), nxslots, 807 nrslots, cp->cp_flags, CP_FLAGSSTR)); 808 rc = 0; 809 break; 810 811 case PPPIO_XCOMP: 812 case PPPIO_RCOMP: 813 if (iop->ioc_count < 2 || mp->b_cont == NULL) 814 break; 815 /* 816 * The input data here is the raw CCP algorithm option 817 * from negotiation. The format is always one byte of 818 * algorithm number, one byte of length, and 819 * (length-2) bytes of algorithm-dependent data. The 820 * alloc routine is expected to parse and validate 821 * this. 822 */ 823 opt_data = mp->b_cont->b_rptr; 824 opt_len = mp->b_cont->b_wptr - opt_data; 825 if (opt_len > iop->ioc_count) { 826 opt_len = iop->ioc_count; 827 } 828 len = mp->b_cont->b_rptr[1]; 829 if (len < 2 || len > opt_len) 830 break; 831 len = 0; 832 for (comp = ppp_compressors; *comp != NULL; ++comp) { 833 834 if ((*comp)->compress_proto != opt_data[0]) { 835 continue; 836 } 837 rc = 0; 838 if (iop->ioc_cmd == PPPIO_XCOMP) { 839 /* 840 * A previous call may have fetched 841 * memory for a compressor that's now 842 * being retired or reset. Free it 843 * using its mechanism for freeing 844 * stuff. 845 */ 846 if ((xtemp = cp->cp_xstate) != NULL) { 847 cp->cp_flags &= ~CCP_ISUP & 848 ~CCP_COMP_RUN; 849 cp->cp_xstate = NULL; 850 (*cp->cp_xcomp->comp_free)(xtemp); 851 } 852 cp->cp_xcomp = *comp; 853 cp->cp_xstate = (*comp)->comp_alloc(opt_data, 854 opt_len); 855 856 if (cp->cp_xstate == NULL) { 857 rc = ENOSR; 858 } 859 860 CPDEBUG((DBGSTART "PPPIO_XCOMP opt_proto=0x%x " 861 "opt_len=0x%d flags=0x%b\n", 862 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), 863 (uchar_t)opt_data[0], opt_len, 864 cp->cp_flags, 865 CP_FLAGSSTR)); 866 } else { 867 if ((xtemp = cp->cp_rstate) != NULL) { 868 cp->cp_flags &= ~CCP_ISUP & 869 ~CCP_DECOMP_RUN; 870 cp->cp_rstate = NULL; 871 (*cp->cp_rcomp->decomp_free)(xtemp); 872 } 873 cp->cp_rcomp = *comp; 874 cp->cp_rstate = 875 (*comp)->decomp_alloc(opt_data, opt_len); 876 877 if (cp->cp_rstate == NULL) { 878 rc = ENOSR; 879 } 880 881 CPDEBUG((DBGSTART "PPPIO_RCOMP opt_proto=0x%x " 882 "opt_len=0x%d flags=0x%b\n", 883 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), 884 (uchar_t)opt_data[0], opt_len, 885 cp->cp_flags, 886 CP_FLAGSSTR)); 887 } 888 if (rc == 0 && (*comp)->set_effort != NULL) { 889 rc = (*(*comp)->set_effort)(cp-> 890 cp_xcomp == *comp ? cp->cp_xstate : NULL, 891 cp->cp_rcomp == *comp ? cp->cp_rstate : 892 NULL, cp->cp_effort); 893 if (rc != 0) { 894 CKDEBUG((DBGSTART 895 "cannot set effort %d", 896 cp->cp_unit, cp->cp_effort)); 897 rc = 0; 898 } 899 } 900 break; 901 } 902 break; 903 904 case PPPIO_DEBUG: 905 if (iop->ioc_count != sizeof (uint32_t) || mp->b_cont == NULL) 906 break; 907 908 cmd = *(uint32_t *)mp->b_cont->b_rptr; 909 910 /* If it's not for us, then pass along. */ 911 if (cmd != PPPDBG_LOG + PPPDBG_COMP) { 912 return (-1); /* putnext */ 913 } 914 cp->cp_flags |= CP_KDEBUG; 915 916 CKDEBUG((DBGSTART "PPPIO_DEBUG log enabled flags=0x%b\n", 917 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), 918 cp->cp_flags, CP_FLAGSSTR)); 919 rc = 0; 920 break; 921 922 case PPPIO_LASTMOD: 923 cp->cp_flags |= CP_LASTMOD; 924 CPDEBUG((DBGSTART "PPPIO_LASTMOD last module flags=0x%b\n", 925 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), 926 cp->cp_flags, CP_FLAGSSTR)); 927 rc = 0; 928 break; 929 930 case PPPIO_COMPLEV: /* set compression effort level */ 931 if (iop->ioc_count != sizeof (uint32_t) || mp->b_cont == NULL) 932 break; 933 val = *(uint32_t *)mp->b_cont->b_rptr; 934 cp->cp_effort = val; 935 /* Silently ignore if compressor doesn't understand this. */ 936 rc = 0; 937 if ((ccomp = cp->cp_xcomp) != NULL && 938 ccomp->set_effort != NULL) { 939 rc = (*ccomp->set_effort)(cp->cp_xstate, 940 ccomp == cp->cp_rcomp ? cp->cp_rstate : NULL, val); 941 if (rc != 0) 942 break; 943 } 944 if ((ccomp = cp->cp_rcomp) != NULL && ccomp != cp->cp_xcomp && 945 ccomp->set_effort != NULL) 946 rc = (*ccomp->set_effort)(NULL, cp->cp_rstate, val); 947 break; 948 } 949 if (rc == 0 && mp->b_cont != NULL) 950 mp->b_cont->b_wptr = mp->b_cont->b_rptr + len; 951 return (rc); 952 } 953 954 /* 955 * spppcomp_getcstat() 956 * 957 * MT-Perimeters: 958 * exclusive inner. 959 * 960 * Description: 961 * Called by spppcomp_ioctl as the result of receiving a PPPIO_GETCSTAT. 962 */ 963 static void 964 spppcomp_getcstat(queue_t *q, mblk_t *mp, sppp_comp_t *cp) 965 { 966 mblk_t *mpnext; 967 struct ppp_comp_stats *csp; 968 969 ASSERT(q != NULL); 970 ASSERT(q->q_ptr != NULL); 971 ASSERT(mp != NULL); 972 ASSERT(mp->b_rptr != NULL); 973 ASSERT(cp != NULL); 974 975 mpnext = allocb(sizeof (struct ppp_comp_stats), BPRI_MED); 976 if (mpnext == NULL) { 977 miocnak(q, mp, 0, ENOSR); 978 return; 979 } 980 if (mp->b_cont != NULL) { 981 freemsg(mp->b_cont); 982 } 983 mp->b_cont = mpnext; 984 csp = (struct ppp_comp_stats *)mpnext->b_wptr; 985 mpnext->b_wptr += sizeof (struct ppp_comp_stats); 986 bzero((caddr_t)csp, sizeof (struct ppp_comp_stats)); 987 988 if (cp->cp_xstate != NULL) { 989 (*cp->cp_xcomp->comp_stat)(cp->cp_xstate, &csp->c); 990 } 991 if (cp->cp_rstate != NULL) { 992 (*cp->cp_rcomp->decomp_stat)(cp->cp_rstate, &csp->d); 993 } 994 995 miocack(q, mp, sizeof (struct ppp_comp_stats), 0); 996 } 997 998 /* 999 * spppcomp_ioctl() 1000 * 1001 * MT-Perimeters: 1002 * exclusive inner. 1003 * 1004 * Description: 1005 * Called by spppcomp_wput as the result of receiving an M_IOCTL 1006 * command. 1007 */ 1008 static void 1009 spppcomp_ioctl(queue_t *q, mblk_t *mp, sppp_comp_t *cp) 1010 { 1011 struct iocblk *iop; 1012 int flag; 1013 1014 ASSERT(q != NULL); 1015 ASSERT(q->q_ptr != NULL); 1016 ASSERT(mp != NULL); 1017 ASSERT(mp->b_rptr != NULL); 1018 ASSERT(cp != NULL); 1019 1020 iop = (struct iocblk *)mp->b_rptr; 1021 switch (iop->ioc_cmd) { 1022 case PPPIO_CFLAGS: 1023 case PPPIO_VJINIT: 1024 case PPPIO_XCOMP: 1025 case PPPIO_RCOMP: 1026 case PPPIO_DEBUG: 1027 case PPPIO_LASTMOD: 1028 case PPPIO_COMPLEV: 1029 mutex_enter(&cp->cp_pair_lock); 1030 flag = spppcomp_inner_ioctl(q, mp); 1031 mutex_exit(&cp->cp_pair_lock); 1032 if (flag == -1) { 1033 putnext(q, mp); 1034 } else if (flag == 0) { 1035 miocack(q, mp, 1036 mp->b_cont == NULL ? 0 : MBLKL(mp->b_cont), 0); 1037 } else { 1038 miocnak(q, mp, 0, flag); 1039 } 1040 break; 1041 1042 case PPPIO_GETCSTAT: 1043 spppcomp_getcstat(q, mp, cp); 1044 break; 1045 1046 case PPPIO_GTYPE: /* get existing driver type */ 1047 if (!IS_CP_LASTMOD(cp)) { 1048 putnext(q, mp); 1049 break; 1050 } 1051 freemsg(mp->b_next); 1052 mp->b_next = allocb(sizeof (uint32_t), BPRI_MED); 1053 if (mp->b_next == NULL) { 1054 miocnak(q, mp, 0, ENOSR); 1055 } else { 1056 *(uint32_t *)mp->b_cont->b_wptr = PPPTYP_HC; 1057 mp->b_cont->b_wptr += sizeof (uint32_t); 1058 miocack(q, mp, sizeof (uint32_t), 0); 1059 } 1060 break; 1061 1062 default: 1063 putnext(q, mp); 1064 break; 1065 } 1066 } 1067 1068 /* 1069 * spppcomp_mctl() 1070 * 1071 * MT-Perimeters: 1072 * exclusive inner; queue pair lock held. 1073 * 1074 * Description: 1075 * Called by spppcomp_wput as the result of receiving an M_CTL 1076 * message from another STREAMS module, and returns non-zero if 1077 * caller should do putnext or zero for freemsg. Must *NOT* do 1078 * putnext in this routine, since lock is held here. 1079 */ 1080 static int 1081 spppcomp_mctl(queue_t *q, mblk_t *mp) 1082 { 1083 sppp_comp_t *cp; 1084 kstat_t *ksp; 1085 char unit[32]; 1086 const char **cpp; 1087 kstat_named_t *knt; 1088 1089 ASSERT(q != NULL); 1090 ASSERT(q->q_ptr != NULL); 1091 cp = (sppp_comp_t *)q->q_ptr; 1092 ASSERT(mp != NULL); 1093 ASSERT(mp->b_rptr != NULL); 1094 1095 switch (*mp->b_rptr) { 1096 case PPPCTL_MTU: 1097 if (MBLKL(mp) < 4) { 1098 break; 1099 } 1100 cp->cp_mtu = ((ushort_t *)mp->b_rptr)[1]; 1101 1102 CPDEBUG((DBGSTART "PPPCTL_MTU (%d) flags=0x%b\n", 1103 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), 1104 cp->cp_mtu, cp->cp_flags, CP_FLAGSSTR)); 1105 break; 1106 case PPPCTL_MRU: 1107 if (MBLKL(mp) < 4) { 1108 break; 1109 } 1110 cp->cp_mru = ((ushort_t *)mp->b_rptr)[1]; 1111 1112 CPDEBUG((DBGSTART "PPPCTL_MRU (%d) flags=0x%b\n", 1113 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), 1114 cp->cp_mru, cp->cp_flags, CP_FLAGSSTR)); 1115 break; 1116 case PPPCTL_UNIT: 1117 if (MBLKL(mp) < 8) { 1118 break; 1119 } 1120 /* If PPPCTL_UNIT has already been issued, then ignore. */ 1121 if (IS_CP_HASUNIT(cp)) { 1122 break; 1123 } 1124 ASSERT(cp->cp_kstats == NULL); 1125 cp->cp_unit = ((uint32_t *)mp->b_rptr)[1]; 1126 1127 /* Create kstats for this unit. */ 1128 (void) sprintf(unit, "%s" "%d", COMP_MOD_NAME, cp->cp_unit); 1129 ksp = kstat_create(COMP_MOD_NAME, cp->cp_unit, unit, "net", 1130 KSTAT_TYPE_NAMED, sizeof (spppcomp_kstats_t) / 1131 sizeof (kstat_named_t), 0); 1132 1133 if (ksp != NULL) { 1134 cp->cp_flags |= CP_HASUNIT; 1135 cp->cp_kstats = ksp; 1136 1137 knt = (kstat_named_t *)ksp->ks_data; 1138 for (cpp = kstats_names; 1139 cpp < kstats_names + Dim(kstats_names); cpp++) { 1140 kstat_named_init(knt, *cpp, 1141 KSTAT_DATA_UINT32); 1142 knt++; 1143 } 1144 for (cpp = kstats64_names; 1145 cpp < kstats64_names + Dim(kstats64_names); cpp++) { 1146 kstat_named_init(knt, *cpp, 1147 KSTAT_DATA_UINT64); 1148 knt++; 1149 } 1150 ksp->ks_update = spppcomp_kstat_update; 1151 ksp->ks_private = (void *)cp; 1152 kstat_install(ksp); 1153 1154 CPDEBUG((DBGSTART "PPPCTL_UNIT flags=0x%b\n", 1155 cp->cp_unit, cp->cp_flags, CP_FLAGSSTR)); 1156 } 1157 break; 1158 1159 default: 1160 /* Forward unknown M_CTL messages along */ 1161 return (1); 1162 } 1163 1164 /* 1165 * For known PPP M_CTL messages, forward along only if we're not the 1166 * last PPP-aware module. 1167 */ 1168 if (IS_CP_LASTMOD(cp)) 1169 return (0); 1170 return (1); 1171 } 1172 1173 /* 1174 * spppcomp_rput() 1175 * 1176 * MT-Perimeters: 1177 * exclusive inner. 1178 * 1179 * Description: 1180 * Upper read-side put procedure. Messages get here from below. 1181 * 1182 * The data handling logic is a little more tricky here. We 1183 * defer to the service routine if q_first isn't NULL (to 1184 * preserve message ordering after deferring a previous message), 1185 * bcanputnext() is FALSE (to handle flow control), or we have 1186 * done a lot of processing recently and we're about to do a lot 1187 * more and we're in an interrupt context (on the theory that 1188 * we're hogging the CPU in this case). 1189 */ 1190 static int 1191 spppcomp_rput(queue_t *q, mblk_t *mp) 1192 { 1193 sppp_comp_t *cp; 1194 struct iocblk *iop; 1195 struct ppp_stats64 *psp; 1196 boolean_t inter; 1197 hrtime_t curtime; 1198 1199 ASSERT(q != NULL); 1200 ASSERT(q->q_ptr != NULL); 1201 cp = (sppp_comp_t *)q->q_ptr; 1202 ASSERT(mp != NULL); 1203 1204 switch (MTYPE(mp)) { 1205 case M_DATA: 1206 inter = servicing_interrupt(); 1207 if (inter) { 1208 curtime = gethrtime(); 1209 1210 /* 1211 * If little time has passed since last 1212 * arrival, then bump the counter. 1213 */ 1214 if (curtime - cp->cp_lastfinish < spppcomp_min_arrival) 1215 cp->cp_fastin++; 1216 else 1217 cp->cp_fastin >>= 1; /* a guess */ 1218 } 1219 /* 1220 * If we're not decompressing, then we'll be fast, so 1221 * we don't have to worry about hogging here. If we 1222 * are decompressing, then we have to check the 1223 * cp_fastin count. 1224 */ 1225 if ((!(cp->cp_flags & (CCP_DECOMP_RUN | DECOMP_VJC)) || 1226 cp->cp_fastin < MAX_FAST_ARRIVALS) && 1227 q->q_first == NULL && bcanputnext(q, mp->b_band)) { 1228 #ifdef SPC_DEBUG 1229 cp->cp_in_handled++; 1230 #endif 1231 if ((mp = spppcomp_inpkt(q, mp)) != NULL) 1232 putnext(q, mp); 1233 if (inter) { 1234 cp->cp_lastfinish = gethrtime(); 1235 } 1236 } else { 1237 /* Deferring; give him a clean slate */ 1238 cp->cp_fastin = 0; 1239 #ifdef SPC_DEBUG 1240 cp->cp_in_queued++; 1241 #endif 1242 (void) putq(q, mp); 1243 } 1244 break; 1245 case M_IOCACK: 1246 iop = (struct iocblk *)mp->b_rptr; 1247 ASSERT(iop != NULL); 1248 /* 1249 * Bundled with pppstats; no need to handle PPPIO_GETSTAT 1250 * here since we'll never see it. 1251 */ 1252 if (iop->ioc_cmd == PPPIO_GETSTAT64 && 1253 iop->ioc_count == sizeof (struct ppp_stats64) && 1254 mp->b_cont != NULL) { 1255 /* 1256 * This crock is to handle a badly-designed 1257 * but well-known ioctl for ANU PPP. Both 1258 * link statistics and VJ statistics are 1259 * requested together. 1260 * 1261 * Catch this on the way back from the 1262 * spppasyn module so we can fill in the VJ 1263 * stats. This happens only when we have 1264 * PPP-aware modules beneath us. 1265 */ 1266 psp = (struct ppp_stats64 *)mp->b_cont->b_rptr; 1267 psp->vj = cp->cp_vj.stats; 1268 CPDEBUG((DBGSTART 1269 "PPPIO_GETSTAT64 (VJ filled) flags=0x%b\n", 1270 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), 1271 cp->cp_flags, CP_FLAGSSTR)); 1272 } 1273 putnext(q, mp); 1274 break; 1275 case M_CTL: 1276 /* Increase our statistics and forward it upstream. */ 1277 mutex_enter(&cp->cp_pair_lock); 1278 if (*mp->b_rptr == PPPCTL_IERROR) { 1279 cp->cp_stats.ppp_ierrors++; 1280 cp->cp_ierr_low++; 1281 } else if (*mp->b_rptr == PPPCTL_OERROR) { 1282 cp->cp_stats.ppp_oerrors++; 1283 cp->cp_oerr_low++; 1284 } 1285 mutex_exit(&cp->cp_pair_lock); 1286 putnext(q, mp); 1287 break; 1288 1289 case M_FLUSH: 1290 CPDEBUG((DBGSTART "rput M_FLUSH (0x%x) flags=0x%b\n", 1291 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), 1292 *mp->b_rptr, cp->cp_flags, CP_FLAGSSTR)); 1293 /* 1294 * Just discard pending data. For CCP, any 1295 * decompressor dictionary sequencing problems caused 1296 * by this will have to be handled by the compression 1297 * protocol in use. For VJ, we need to give the 1298 * decompressor a heads-up. 1299 */ 1300 if (*mp->b_rptr & FLUSHR) { 1301 mutex_enter(&cp->cp_pair_lock); 1302 flushq(q, FLUSHDATA); 1303 cp->cp_vj_last_ierrors = cp->cp_stats.ppp_ierrors; 1304 vj_uncompress_err(&cp->cp_vj); 1305 mutex_exit(&cp->cp_pair_lock); 1306 } 1307 putnext(q, mp); 1308 break; 1309 1310 default: 1311 putnext(q, mp); 1312 break; 1313 } 1314 return (0); 1315 } 1316 1317 /* 1318 * spppcomp_rsrv() 1319 * 1320 * MT-Perimeters: 1321 * exclusive inner. 1322 * 1323 * Description: 1324 * Upper read-side service procedure. We handle data deferred from 1325 * spppcomp_rput here. 1326 * 1327 * The data on the queue are always compressed (unprocessed). 1328 * The rput procedure tries to do decompression, but if it can't, 1329 * it will put the unprocessed data on the queue for later 1330 * handling. 1331 */ 1332 static int 1333 spppcomp_rsrv(queue_t *q) 1334 { 1335 mblk_t *mp; 1336 1337 ASSERT(q != NULL); 1338 ASSERT(q->q_ptr != NULL); 1339 1340 while ((mp = getq(q)) != NULL) { 1341 /* We should only place M_DATA on the service queue. */ 1342 ASSERT(MTYPE(mp) == M_DATA); 1343 /* 1344 * If the module above us is flow-controlled, then put 1345 * this message back on the queue again. 1346 */ 1347 if (!bcanputnext(q, mp->b_band)) { 1348 (void) putbq(q, mp); 1349 break; 1350 } 1351 if ((mp = spppcomp_inpkt(q, mp)) != NULL) 1352 putnext(q, mp); 1353 } 1354 return (0); 1355 } 1356 1357 /* 1358 * spppcomp_inpkt() 1359 * 1360 * MT-Perimeters: 1361 * exclusive inner 1362 * 1363 * Description: 1364 * Process incoming packet. 1365 */ 1366 static mblk_t * 1367 spppcomp_inpkt(queue_t *q, mblk_t *mp) 1368 { 1369 ushort_t proto; 1370 int i; 1371 mblk_t *zmp; 1372 mblk_t *np; 1373 uchar_t *dp; 1374 int len; 1375 int hlen; 1376 sppp_comp_t *cp; 1377 1378 ASSERT(q != NULL); 1379 ASSERT(mp != NULL); 1380 cp = (sppp_comp_t *)q->q_ptr; 1381 ASSERT(cp != NULL); 1382 1383 len = msgsize(mp); 1384 1385 mutex_enter(&cp->cp_pair_lock); 1386 cp->cp_stats.ppp_ibytes += len; 1387 cp->cp_stats.ppp_ipackets++; 1388 mutex_exit(&cp->cp_pair_lock); 1389 /* 1390 * First work out the protocol and where the PPP header ends. 1391 */ 1392 i = 0; 1393 proto = MSG_BYTE(mp, 0); 1394 if (proto == PPP_ALLSTATIONS) { 1395 i = 2; 1396 proto = MSG_BYTE(mp, 2); 1397 } 1398 if ((proto & 1) == 0) { 1399 ++i; 1400 proto = (proto << 8) + MSG_BYTE(mp, i); 1401 } 1402 hlen = i + 1; 1403 /* 1404 * Now reconstruct a complete, contiguous PPP header at the 1405 * start of the packet. 1406 */ 1407 if (hlen < (IS_DECOMP_AC(cp) ? 0 : 2) + (IS_DECOMP_PROT(cp) ? 1 : 2)) { 1408 /* count these? */ 1409 goto bad; 1410 } 1411 if (mp->b_rptr + hlen > mp->b_wptr) { 1412 /* 1413 * Header is known to be intact here; so adjmsg will do the 1414 * right thing here. 1415 */ 1416 if (!adjmsg(mp, hlen)) { 1417 goto bad; 1418 } 1419 hlen = 0; 1420 } 1421 if (hlen != PPP_HDRLEN) { 1422 /* 1423 * We need to put some bytes on the front of the packet 1424 * to make a full-length PPP header. If we can put them 1425 * in mp, we do, otherwise we tack another mblk on the 1426 * front. 1427 * 1428 * XXX we really shouldn't need to carry around the address 1429 * and control at this stage. ACFC and PFC need to be 1430 * reworked. 1431 */ 1432 dp = mp->b_rptr + hlen - PPP_HDRLEN; 1433 if ((dp < mp->b_datap->db_base) || (DB_REF(mp) > 1)) { 1434 1435 np = allocb(PPP_HDRLEN, BPRI_MED); 1436 if (np == 0) { 1437 goto bad; 1438 } 1439 np->b_cont = mp; 1440 mp->b_rptr += hlen; 1441 mp = np; 1442 dp = mp->b_wptr; 1443 mp->b_wptr += PPP_HDRLEN; 1444 } else { 1445 mp->b_rptr = dp; 1446 } 1447 dp[0] = PPP_ALLSTATIONS; 1448 dp[1] = PPP_UI; 1449 dp[2] = (proto >> 8) & 0xff; 1450 dp[3] = proto & 0xff; 1451 } 1452 /* 1453 * Now see if we have a compressed packet to decompress, or a 1454 * CCP negotiation packet to take notice of. It's guaranteed 1455 * that at least PPP_HDRLEN bytes are contiguous in the first 1456 * block now. 1457 */ 1458 proto = PPP_PROTOCOL(mp->b_rptr); 1459 if (proto == PPP_CCP) { 1460 len = msgsize(mp); 1461 if (mp->b_wptr < mp->b_rptr + len) { 1462 #ifdef SPC_DEBUG 1463 mutex_enter(&cp->cp_pair_lock); 1464 cp->cp_imsg_ccp_pull++; 1465 mutex_exit(&cp->cp_pair_lock); 1466 #endif 1467 zmp = msgpullup(mp, len); 1468 freemsg(mp); 1469 mp = zmp; 1470 if (mp == 0) { 1471 goto bad; 1472 } 1473 } 1474 mutex_enter(&cp->cp_pair_lock); 1475 comp_ccp(q, mp, cp, B_TRUE); 1476 mutex_exit(&cp->cp_pair_lock); 1477 } else if ((cp->cp_flags & (CCP_ISUP | CCP_DECOMP_RUN | CCP_ERR)) == 1478 (CCP_ISUP | CCP_DECOMP_RUN) && cp->cp_rstate != NULL) { 1479 int rv; 1480 1481 if ((proto == PPP_COMP) || (proto == PPP_COMPFRAG)) { 1482 rv = (*cp->cp_rcomp->decompress)(cp->cp_rstate, &mp); 1483 switch (rv) { 1484 case DECOMP_OK: 1485 break; 1486 case DECOMP_ERROR: 1487 cp->cp_flags |= CCP_ERROR; 1488 mutex_enter(&cp->cp_pair_lock); 1489 ++cp->cp_stats.ppp_ierrors; 1490 mutex_exit(&cp->cp_pair_lock); 1491 (void) putnextctl1(q, M_CTL, PPPCTL_IERROR); 1492 break; 1493 case DECOMP_FATALERROR: 1494 cp->cp_flags |= CCP_FATALERROR; 1495 mutex_enter(&cp->cp_pair_lock); 1496 ++cp->cp_stats.ppp_ierrors; 1497 mutex_exit(&cp->cp_pair_lock); 1498 (void) putnextctl1(q, M_CTL, PPPCTL_IERROR); 1499 break; 1500 } 1501 if (mp == NULL) { 1502 /* Decompress failed; data are gone. */ 1503 return (NULL); 1504 } 1505 } else { 1506 /* 1507 * For RFCs 1977 and 1979 (BSD Compress and Deflate), 1508 * the compressor should send incompressible data 1509 * without encapsulation and the receiver must update 1510 * its decompression dictionary as though this data 1511 * were received and decompressed. This keeps the 1512 * dictionaries in sync. 1513 */ 1514 rv = (*cp->cp_rcomp->incomp)(cp->cp_rstate, mp); 1515 if (rv < 0) { 1516 cp->cp_flags |= CCP_FATALERROR; 1517 mutex_enter(&cp->cp_pair_lock); 1518 ++cp->cp_stats.ppp_ierrors; 1519 mutex_exit(&cp->cp_pair_lock); 1520 (void) putnextctl1(q, M_CTL, PPPCTL_IERROR); 1521 } 1522 } 1523 } 1524 /* 1525 * Now do VJ decompression. 1526 */ 1527 proto = PPP_PROTOCOL(mp->b_rptr); 1528 if ((proto == PPP_VJC_COMP) || (proto == PPP_VJC_UNCOMP)) { 1529 1530 len = msgsize(mp) - PPP_HDRLEN; 1531 1532 if (!IS_DECOMP_VJC(cp) || (len <= 0)) { 1533 goto bad; 1534 } 1535 /* 1536 * Advance past the ppp header. Here we assume that the whole 1537 * PPP header is in the first mblk. (This should be true 1538 * because the above code does pull-ups as necessary on raw 1539 * data, and the decompressor engines all produce large blocks 1540 * on output.) 1541 */ 1542 np = mp; 1543 dp = np->b_rptr + PPP_HDRLEN; 1544 if (dp >= mp->b_wptr) { 1545 np = np->b_cont; 1546 dp = np->b_rptr; 1547 } 1548 /* 1549 * Make sure we have sufficient contiguous data at this point, 1550 * which in most cases we will always do. 1551 */ 1552 hlen = (proto == PPP_VJC_COMP) ? MAX_VJHDR : MAX_TCPIPHLEN; 1553 if (hlen > len) { 1554 hlen = len; 1555 } 1556 if ((np->b_wptr < dp + hlen) || DB_REF(np) > 1) { 1557 #ifdef SPC_DEBUG 1558 mutex_enter(&cp->cp_pair_lock); 1559 cp->cp_imsg_vj_pull++; 1560 mutex_exit(&cp->cp_pair_lock); 1561 #endif 1562 zmp = msgpullup(mp, hlen + PPP_HDRLEN); 1563 freemsg(mp); 1564 mp = zmp; 1565 if (mp == NULL) { 1566 goto bad; 1567 } 1568 np = mp; 1569 dp = np->b_rptr + PPP_HDRLEN; 1570 } 1571 1572 if (proto == PPP_VJC_COMP) { 1573 uchar_t *iphdr; 1574 int vjlen; 1575 uint_t iphlen; 1576 int errcnt; 1577 1578 /* 1579 * Decompress VJ-compressed packet. First 1580 * reset compressor if an input error has 1581 * occurred. (No need to lock statistics 1582 * structure for read of a single word.) 1583 */ 1584 errcnt = cp->cp_stats.ppp_ierrors; 1585 if (errcnt != cp->cp_vj_last_ierrors) { 1586 cp->cp_vj_last_ierrors = errcnt; 1587 vj_uncompress_err(&cp->cp_vj); 1588 } 1589 1590 vjlen = vj_uncompress_tcp(dp, np->b_wptr - dp, len, 1591 &cp->cp_vj, &iphdr, &iphlen); 1592 1593 if (vjlen < 0 || iphlen == 0) { 1594 /* 1595 * so we don't reset next time 1596 */ 1597 mutex_enter(&cp->cp_pair_lock); 1598 ++cp->cp_vj_last_ierrors; 1599 mutex_exit(&cp->cp_pair_lock); 1600 goto bad; 1601 } 1602 /* 1603 * drop ppp and vj headers off 1604 */ 1605 if (mp != np) { 1606 freeb(mp); 1607 mp = np; 1608 } 1609 mp->b_rptr = dp + vjlen; 1610 /* 1611 * allocate a new mblk for the ppp and 1612 * ip headers 1613 */ 1614 np = allocb(iphlen + PPP_HDRLEN, BPRI_MED); 1615 if (np == NULL) 1616 goto bad; 1617 dp = np->b_rptr; 1618 /* 1619 * reconstruct PPP header 1620 */ 1621 dp[0] = PPP_ALLSTATIONS; 1622 dp[1] = PPP_UI; 1623 dp[2] = PPP_IP >> 8; 1624 dp[3] = PPP_IP; 1625 /* 1626 * prepend mblk with reconstructed TCP/IP header. 1627 */ 1628 bcopy((caddr_t)iphdr, (caddr_t)dp + PPP_HDRLEN, iphlen); 1629 np->b_wptr = dp + iphlen + PPP_HDRLEN; 1630 np->b_cont = mp; 1631 mp = np; 1632 } else { 1633 /* 1634 * "Decompress" a VJ-uncompressed packet. 1635 */ 1636 mutex_enter(&cp->cp_pair_lock); 1637 cp->cp_vj_last_ierrors = cp->cp_stats.ppp_ierrors; 1638 mutex_exit(&cp->cp_pair_lock); 1639 if (!vj_uncompress_uncomp(dp, hlen, &cp->cp_vj)) { 1640 /* 1641 * don't need to reset next time 1642 */ 1643 mutex_enter(&cp->cp_pair_lock); 1644 ++cp->cp_vj_last_ierrors; 1645 mutex_exit(&cp->cp_pair_lock); 1646 goto bad; 1647 } 1648 /* 1649 * fix up the PPP protocol field 1650 */ 1651 mp->b_rptr[3] = PPP_IP; 1652 } 1653 } 1654 CPDEBUG((DBGSTART "recv (%ld bytes) flags=0x%b\n", 1655 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), msgsize(mp), 1656 cp->cp_flags, CP_FLAGSSTR)); 1657 return (mp); 1658 1659 bad: 1660 if (mp != 0) { 1661 freemsg(mp); 1662 } 1663 mutex_enter(&cp->cp_pair_lock); 1664 cp->cp_stats.ppp_ierrors++; 1665 mutex_exit(&cp->cp_pair_lock); 1666 (void) putnextctl1(q, M_CTL, PPPCTL_IERROR); 1667 return (NULL); 1668 } 1669 1670 /* 1671 * comp_ccp() 1672 * 1673 * Description: 1674 * Called by spppcomp_outpkt and spppcomp_inpkt to handle a CCP 1675 * negotiation packet being sent or received. Here all the data in 1676 * the packet is in a single mbuf. 1677 * 1678 * Global state is updated. Must be called with mutex held. 1679 */ 1680 /* ARGSUSED */ 1681 static void 1682 comp_ccp(queue_t *q, mblk_t *mp, sppp_comp_t *cp, boolean_t rcvd) 1683 { 1684 int len; 1685 int clen; 1686 uchar_t *dp; 1687 1688 ASSERT(q != NULL); 1689 ASSERT(q->q_ptr != NULL); 1690 ASSERT(mp != NULL); 1691 ASSERT(cp != NULL); 1692 1693 len = msgsize(mp); 1694 if (len < PPP_HDRLEN + CCP_HDRLEN) { 1695 return; 1696 } 1697 dp = mp->b_rptr + PPP_HDRLEN; 1698 1699 len -= PPP_HDRLEN; 1700 clen = CCP_LENGTH(dp); 1701 if (clen > len) { 1702 return; 1703 } 1704 1705 CPDEBUG((DBGSTART "CCP code=%d flags=0x%b\n", 1706 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), CCP_CODE(dp), 1707 cp->cp_flags, CP_FLAGSSTR)); 1708 switch (CCP_CODE(dp)) { 1709 case CCP_CONFREQ: 1710 case CCP_TERMREQ: 1711 case CCP_TERMACK: 1712 cp->cp_flags &= ~CCP_ISUP; 1713 break; 1714 case CCP_CONFACK: 1715 if ((cp->cp_flags & (CCP_ISOPEN | CCP_ISUP)) == CCP_ISOPEN && 1716 clen >= CCP_HDRLEN + CCP_OPT_MINLEN && 1717 clen >= CCP_HDRLEN + CCP_OPT_LENGTH(dp + CCP_HDRLEN)) { 1718 1719 int rc; 1720 1721 if (!rcvd) { 1722 rc = (*cp->cp_xcomp->comp_init)(cp->cp_xstate, 1723 dp + CCP_HDRLEN, clen - CCP_HDRLEN, 1724 cp->cp_unit, 0, 1725 IS_CP_KDEBUG(cp) | ALG_DEBUG); 1726 1727 if (cp->cp_xstate != NULL && rc != 0) { 1728 cp->cp_flags |= CCP_COMP_RUN; 1729 } 1730 } else { 1731 rc = (*cp->cp_rcomp->decomp_init)(cp-> 1732 cp_rstate, dp + CCP_HDRLEN, 1733 clen - CCP_HDRLEN, cp->cp_unit, 0, 1734 cp->cp_mru, 1735 IS_CP_KDEBUG(cp) | ALG_DEBUG); 1736 1737 if (cp->cp_rstate != NULL && rc != 0) { 1738 cp->cp_flags &= ~CCP_ERR; 1739 cp->cp_flags |= CCP_DECOMP_RUN; 1740 } 1741 } 1742 } 1743 break; 1744 case CCP_RESETACK: 1745 if (IS_CCP_ISUP(cp)) { 1746 if (!rcvd) { 1747 if (cp->cp_xstate != NULL && 1748 IS_CCP_COMP_RUN(cp)) { 1749 (*cp->cp_xcomp->comp_reset)(cp-> 1750 cp_xstate); 1751 } 1752 } else { 1753 if (cp->cp_rstate != NULL && 1754 IS_CCP_DECOMP_RUN(cp)) { 1755 (*cp->cp_rcomp->decomp_reset)(cp-> 1756 cp_rstate); 1757 cp->cp_flags &= ~CCP_ERROR; 1758 } 1759 } 1760 } 1761 break; 1762 } 1763 } 1764 1765 /* 1766 * spppcomp_kstat_update() 1767 * 1768 * Description: 1769 * Update per-unit kstat statistics. 1770 */ 1771 static int 1772 spppcomp_kstat_update(kstat_t *ksp, int rw) 1773 { 1774 register sppp_comp_t *cp; 1775 register spppcomp_kstats_t *cpkp; 1776 register struct vjstat *sp; 1777 register struct pppstat64 *psp; 1778 struct ppp_comp_stats csp; 1779 1780 if (rw == KSTAT_WRITE) { 1781 return (EACCES); 1782 } 1783 1784 cp = (sppp_comp_t *)ksp->ks_private; 1785 ASSERT(cp != NULL); 1786 1787 cpkp = (spppcomp_kstats_t *)ksp->ks_data; 1788 bzero((caddr_t)&csp, sizeof (struct ppp_comp_stats)); 1789 1790 mutex_enter(&cp->cp_pair_lock); 1791 1792 if (cp->cp_xstate != NULL) { 1793 (*cp->cp_xcomp->comp_stat)(cp->cp_xstate, &csp.c); 1794 } 1795 if (cp->cp_rstate != NULL) { 1796 (*cp->cp_rcomp->decomp_stat)(cp->cp_rstate, &csp.d); 1797 } 1798 1799 sp = &cp->cp_vj.stats; 1800 1801 cpkp->vj_out_pkts.value.ui32 = sp->vjs_packets; 1802 cpkp->vj_out_pkts_comp.value.ui32 = sp->vjs_compressed; 1803 cpkp->vj_cs_searches.value.ui32 = sp->vjs_searches; 1804 cpkp->vj_cs_misses.value.ui32 = sp->vjs_misses; 1805 cpkp->vj_in_pkts_uncomp.value.ui32 = sp->vjs_uncompressedin; 1806 cpkp->vj_in_pkts_comp.value.ui32 = sp->vjs_compressedin; 1807 cpkp->vj_in_error.value.ui32 = sp->vjs_errorin; 1808 cpkp->vj_in_tossed.value.ui32 = sp->vjs_tossed; 1809 1810 psp = &cp->cp_stats; 1811 1812 cpkp->out_bytes.value.ui64 = psp->ppp_obytes; 1813 cpkp->out_pkts.value.ui64 = psp->ppp_opackets; 1814 cpkp->out_errors.value.ui64 = psp->ppp_oerrors; 1815 cpkp->out_errors_low.value.ui32 = cp->cp_oerr_low; 1816 cpkp->out_uncomp_bytes.value.ui32 = csp.c.unc_bytes; 1817 cpkp->out_uncomp_pkts.value.ui32 = csp.c.unc_packets; 1818 cpkp->out_comp_bytes.value.ui32 = csp.c.comp_bytes; 1819 cpkp->out_comp_pkts.value.ui32 = csp.c.comp_packets; 1820 cpkp->out_incomp_bytes.value.ui32 = csp.c.inc_bytes; 1821 cpkp->out_incomp_pkts.value.ui32 = csp.c.inc_packets; 1822 1823 cpkp->in_bytes.value.ui64 = psp->ppp_ibytes; 1824 cpkp->in_pkts.value.ui64 = psp->ppp_ipackets; 1825 cpkp->in_errors.value.ui64 = psp->ppp_ierrors; 1826 cpkp->in_errors_low.value.ui32 = cp->cp_ierr_low; 1827 cpkp->in_uncomp_bytes.value.ui32 = csp.d.unc_bytes; 1828 cpkp->in_uncomp_pkts.value.ui32 = csp.d.unc_packets; 1829 cpkp->in_comp_bytes.value.ui32 = csp.d.comp_bytes; 1830 cpkp->in_comp_pkts.value.ui32 = csp.d.comp_packets; 1831 cpkp->in_incomp_bytes.value.ui32 = csp.d.inc_bytes; 1832 cpkp->in_incomp_pkts.value.ui32 = csp.d.inc_packets; 1833 #ifdef SPC_DEBUG 1834 cpkp->in_msg_ccp_pulledup.value.ui32 = cp->cp_imsg_ccp_pull; 1835 cpkp->in_msg_vj_pulledup.value.ui32 = cp->cp_imsg_vj_pull; 1836 cpkp->out_msg_pulledup.value.ui32 = cp->cp_omsg_pull; 1837 cpkp->out_msg_copied.value.ui32 = cp->cp_omsg_dcopy; 1838 cpkp->out_queued.value.ui32 = cp->cp_out_queued; 1839 cpkp->out_handled.value.ui32 = cp->cp_out_handled; 1840 cpkp->in_queued.value.ui32 = cp->cp_in_queued; 1841 cpkp->in_handled.value.ui32 = cp->cp_in_handled; 1842 #endif 1843 mutex_exit(&cp->cp_pair_lock); 1844 return (0); 1845 } 1846