1 /* 2 * spppcomp.c - STREAMS module for kernel-level compression and CCP support. 3 * 4 * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 5 * Use is subject to license terms. 6 * 7 * Permission to use, copy, modify, and distribute this software and its 8 * documentation is hereby granted, provided that the above copyright 9 * notice appears in all copies. 10 * 11 * SUN MAKES NO REPRESENTATION OR WARRANTIES ABOUT THE SUITABILITY OF 12 * THE SOFTWARE, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 13 * TO THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 14 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT. SUN SHALL NOT BE LIABLE FOR 15 * ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR 16 * DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES 17 * 18 * Copyright (c) 1994 The Australian National University. 19 * All rights reserved. 20 * 21 * Permission to use, copy, modify, and distribute this software and its 22 * documentation is hereby granted, provided that the above copyright 23 * notice appears in all copies. This software is provided without any 24 * warranty, express or implied. The Australian National University 25 * makes no representations about the suitability of this software for 26 * any purpose. 27 * 28 * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY 29 * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES 30 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF 31 * THE AUSTRALIAN NATIONAL UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY 32 * OF SUCH DAMAGE. 33 * 34 * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES, 35 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY 36 * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS 37 * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO 38 * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, 39 * OR MODIFICATIONS. 40 * 41 * This module is derived from the original SVR4 STREAMS PPP compression 42 * module originally written by Paul Mackerras <paul.mackerras@cs.anu.edu.au>. 43 * 44 * James Carlson <james.d.carlson@sun.com> and Adi Masputra 45 * <adi.masputra@sun.com> rewrote and restructured the code for improved 46 * performance and scalability. 47 */ 48 49 #pragma ident "%Z%%M% %I% %E% SMI" 50 #define RCSID "$Id: spppcomp.c,v 1.0 2000/05/08 01:10:12 masputra Exp $" 51 52 #include <sys/types.h> 53 #include <sys/debug.h> 54 #include <sys/param.h> 55 #include <sys/stream.h> 56 #include <sys/stropts.h> 57 #include <sys/errno.h> 58 #include <sys/conf.h> 59 #include <sys/cpuvar.h> 60 #include <sys/cmn_err.h> 61 #include <sys/kmem.h> 62 #include <sys/ddi.h> 63 #include <sys/kstat.h> 64 #include <sys/strsun.h> 65 #include <sys/sysmacros.h> 66 #include <netinet/in.h> 67 #include <netinet/ip.h> 68 #include <net/ppp_defs.h> 69 #include <net/pppio.h> 70 #include <net/vjcompress.h> 71 72 /* Defined for platform-neutral include file */ 73 #define PACKETPTR mblk_t * 74 #include <net/ppp-comp.h> 75 76 #include "s_common.h" 77 78 #ifdef DEBUG 79 #define SPC_DEBUG 80 #endif 81 #include "spppcomp.h" 82 83 /* 84 * This is used to tag official Solaris sources. Please do not define 85 * "INTERNAL_BUILD" when building this software outside of Sun 86 * Microsystems. 87 */ 88 #ifdef INTERNAL_BUILD 89 /* MODINFO is limited to 32 characters. */ 90 const char spppcomp_module_description[] = "PPP 4.0 compression v%I%"; 91 #else /* INTERNAL_BUILD */ 92 const char spppcomp_module_description[] = 93 "ANU PPP compression $Revision: 1.16$ "; 94 95 /* LINTED */ 96 static const char buildtime[] = "Built " __DATE__ " at " __TIME__ 97 #ifdef DEBUG 98 " DEBUG" 99 #endif 100 "\n"; 101 #endif /* INTERNAL_BUILD */ 102 103 static int spppcomp_open(queue_t *, dev_t *, int, int, cred_t *); 104 static int spppcomp_close(queue_t *, int, cred_t *); 105 static int spppcomp_rput(queue_t *, mblk_t *); 106 static int spppcomp_rsrv(queue_t *); 107 static int spppcomp_wput(queue_t *, mblk_t *); 108 static int spppcomp_wsrv(queue_t *); 109 110 #define PPPCOMP_MI_MINPSZ (0) 111 #define PPPCOMP_MI_MAXPSZ (INFPSZ) 112 #define PPPCOMP_MI_HIWAT (PPP_MTU * 20) 113 #define PPPCOMP_MI_LOWAT (PPP_MTU * 18) 114 115 static struct module_info spppcomp_modinfo = { 116 COMP_MOD_ID, /* mi_idnum */ 117 COMP_MOD_NAME, /* mi_idname */ 118 PPPCOMP_MI_MINPSZ, /* mi_minpsz */ 119 PPPCOMP_MI_MAXPSZ, /* mi_maxpsz */ 120 PPPCOMP_MI_HIWAT, /* mi_hiwat */ 121 PPPCOMP_MI_LOWAT /* mi_lowat */ 122 }; 123 124 static struct qinit spppcomp_rinit = { 125 spppcomp_rput, /* qi_putp */ 126 spppcomp_rsrv, /* qi_srvp */ 127 spppcomp_open, /* qi_qopen */ 128 spppcomp_close, /* qi_qclose */ 129 NULL, /* qi_qadmin */ 130 &spppcomp_modinfo, /* qi_minfo */ 131 NULL /* qi_mstat */ 132 }; 133 134 static struct qinit spppcomp_winit = { 135 spppcomp_wput, /* qi_putp */ 136 spppcomp_wsrv, /* qi_srvp */ 137 NULL, /* qi_qopen */ 138 NULL, /* qi_qclose */ 139 NULL, /* qi_qadmin */ 140 &spppcomp_modinfo, /* qi_minfo */ 141 NULL /* qi_mstat */ 142 }; 143 144 struct streamtab spppcomp_tab = { 145 &spppcomp_rinit, /* st_rdinit */ 146 &spppcomp_winit, /* st_wrinit */ 147 NULL, /* st_muxrinit */ 148 NULL /* st_muxwinit */ 149 }; 150 151 /* Set non-zero to debug algorithm-specific problems alone. */ 152 #define ALG_DEBUG 0 153 154 #define MAX_IPHLEN (0x0f << 2) 155 #define MAX_TCPHLEN (0x0f << 2) 156 #define MAX_TCPIPHLEN (MAX_IPHLEN + MAX_TCPHLEN) /* max TCP/IP header size */ 157 #define MAX_VJHDR (20) /* max VJ compressed header size (?) */ 158 159 #if 0 160 #define DBGSTART CE_CONT, COMP_MOD_NAME "%d: " 161 #define CKDEBUG(x) cmn_err x 162 #else 163 #define DBGSTART COMP_MOD_NAME "%d: " 164 #define CKDEBUG(x) printf x 165 #endif 166 #define CPDEBUG(x) (IS_CP_KDEBUG(cp) ? CKDEBUG(x) : (void)0) 167 168 /* 169 * List of compressors we know about. 170 */ 171 #if DO_BSD_COMPRESS 172 extern struct compressor ppp_bsd_compress; 173 #endif 174 #if DO_DEFLATE 175 extern struct compressor ppp_deflate; 176 extern struct compressor ppp_deflate_draft; 177 #endif 178 179 struct compressor *ppp_compressors[] = { 180 #if DO_BSD_COMPRESS 181 &ppp_bsd_compress, 182 #endif 183 #if DO_DEFLATE 184 &ppp_deflate, 185 &ppp_deflate_draft, 186 #endif 187 NULL 188 }; 189 190 /* 191 * LCP_USE_DFLT() removed by James Carlson. RFC 1661 section 6.6 has 192 * this to say on the topic: 193 * 194 * The Address and Control fields MUST NOT be compressed when sending 195 * any LCP packet. This rule guarantees unambiguous recognition of 196 * LCP packets. 197 */ 198 199 static void spppcomp_ioctl(queue_t *, mblk_t *, sppp_comp_t *); 200 static int spppcomp_mctl(queue_t *, mblk_t *); 201 static mblk_t *spppcomp_outpkt(queue_t *, mblk_t *); 202 static mblk_t *spppcomp_inpkt(queue_t *, mblk_t *); 203 static int spppcomp_kstat_update(kstat_t *, int); 204 static void comp_ccp(queue_t *, mblk_t *, sppp_comp_t *, boolean_t); 205 206 /* 207 * Values for checking inter-arrival times on interrupt stacks. These 208 * are used to prevent CPU hogging in interrupt context. 209 */ 210 #define MIN_ARRIVAL_TIME 5000000 /* interarrival time in nanoseconds */ 211 #define MAX_FAST_ARRIVALS 10 /* maximum packet count */ 212 hrtime_t spppcomp_min_arrival = MIN_ARRIVAL_TIME; 213 214 static const char *kstats_names[] = { 215 #ifdef SPCDEBUG_KSTATS_NAMES 216 SPPPCOMP_KSTATS_NAMES, 217 SPCDEBUG_KSTATS_NAMES 218 #else 219 SPPPCOMP_KSTATS_NAMES 220 #endif 221 }; 222 static const char *kstats64_names[] = { SPPPCOMP_KSTATS64_NAMES }; 223 224 /* 225 * spppcomp_open() 226 * 227 * MT-Perimeters: 228 * exclusive inner. 229 * 230 * Description: 231 * Common open procedure for module. 232 */ 233 /* ARGSUSED */ 234 static int 235 spppcomp_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp) 236 { 237 sppp_comp_t *cp; 238 239 ASSERT(q != NULL); 240 ASSERT(devp != NULL); 241 242 if (q->q_ptr != NULL) { 243 return (0); 244 } 245 if (sflag != MODOPEN) { 246 return (EINVAL); 247 } 248 cp = (sppp_comp_t *)kmem_zalloc(sizeof (sppp_comp_t), KM_SLEEP); 249 ASSERT(cp != NULL); 250 q->q_ptr = WR(q)->q_ptr = (caddr_t)cp; 251 252 cp->cp_mru = PPP_MRU; 253 cp->cp_mtu = PPP_MTU; 254 255 mutex_init(&cp->cp_pair_lock, NULL, MUTEX_DRIVER, NULL); 256 vj_compress_init(&cp->cp_vj, -1); 257 cp->cp_nxslots = -1; 258 cp->cp_effort = -1; 259 260 qprocson(q); 261 return (0); 262 } 263 264 /* 265 * spppcomp_close() 266 * 267 * MT-Perimeters: 268 * exclusive inner. 269 * 270 * Description: 271 * Common close procedure for module. 272 */ 273 /* ARGSUSED */ 274 static int 275 spppcomp_close(queue_t *q, int flag, cred_t *credp) 276 { 277 sppp_comp_t *cp; 278 279 ASSERT(q != NULL); 280 ASSERT(q->q_ptr != NULL); 281 cp = (sppp_comp_t *)q->q_ptr; 282 283 qprocsoff(q); 284 285 CPDEBUG((DBGSTART "close flags=0x%b\n", 286 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), cp->cp_flags, 287 CP_FLAGSSTR)); 288 mutex_destroy(&cp->cp_pair_lock); 289 if (cp->cp_kstats) { 290 ASSERT(IS_CP_HASUNIT(cp)); 291 kstat_delete(cp->cp_kstats); 292 } 293 if (cp->cp_xstate != NULL) { 294 (*cp->cp_xcomp->comp_free)(cp->cp_xstate); 295 } 296 if (cp->cp_rstate != NULL) { 297 (*cp->cp_rcomp->decomp_free)(cp->cp_rstate); 298 } 299 kmem_free(cp, sizeof (sppp_comp_t)); 300 q->q_ptr = WR(q)->q_ptr = NULL; 301 302 return (0); 303 } 304 305 /* 306 * spppcomp_wput() 307 * 308 * MT-Perimeters: 309 * exclusive inner. 310 * 311 * Description: 312 * Write-side put procedure. Packets from above us arrive here. 313 * 314 * The data handling logic is a little tricky here. We defer to 315 * the service routine if q_first isn't NULL (to preserve message 316 * ordering after deferring a previous message), bcanputnext() is 317 * FALSE (to handle flow control), or we need a lot of processing 318 * and we're in an interrupt context (on the theory that we're 319 * already on a very long call stack at that point). Since many 320 * callers will be in a non-interrupt context, this means that 321 * most processing will be performed here in-line, and deferral 322 * occurs only when necessary. 323 */ 324 static int 325 spppcomp_wput(queue_t *q, mblk_t *mp) 326 { 327 sppp_comp_t *cp; 328 int flag; 329 330 ASSERT(q != NULL); 331 ASSERT(q->q_ptr != NULL); 332 cp = (sppp_comp_t *)q->q_ptr; 333 ASSERT(mp != NULL && mp->b_rptr != NULL); 334 335 switch (MTYPE(mp)) { 336 case M_DATA: 337 if (q->q_first != NULL || !bcanputnext(q, mp->b_band) || 338 ((cp->cp_flags & (COMP_VJC|CCP_COMP_RUN)) && 339 servicing_interrupt())) { 340 #ifdef SPC_DEBUG 341 cp->cp_out_queued++; 342 #endif 343 (void) putq(q, mp); 344 } else { 345 #ifdef SPC_DEBUG 346 cp->cp_out_handled++; 347 #endif 348 if ((mp = spppcomp_outpkt(q, mp)) != NULL) { 349 putnext(q, mp); 350 } 351 } 352 break; 353 case M_IOCTL: 354 spppcomp_ioctl(q, mp, cp); 355 break; 356 case M_CTL: 357 mutex_enter(&cp->cp_pair_lock); 358 flag = spppcomp_mctl(q, mp); 359 mutex_exit(&cp->cp_pair_lock); 360 if (flag != 0) 361 putnext(q, mp); 362 else 363 freemsg(mp); 364 break; 365 case M_FLUSH: 366 CPDEBUG((DBGSTART "wput M_FLUSH (0x%x) flags=0x%b\n", 367 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), 368 *mp->b_rptr, cp->cp_flags, CP_FLAGSSTR)); 369 /* 370 * Just discard pending data. For CCP, any compressor 371 * dictionary sequencing problems caused by this will 372 * have to be handled by the compression protocol in 373 * use. For VJ, we need to tell the compressor to 374 * start over. 375 */ 376 if (*mp->b_rptr & FLUSHW) { 377 mutex_enter(&cp->cp_pair_lock); 378 flushq(q, FLUSHDATA); 379 vj_compress_init(&cp->cp_vj, cp->cp_nxslots); 380 mutex_exit(&cp->cp_pair_lock); 381 } 382 putnext(q, mp); 383 break; 384 default: 385 putnext(q, mp); 386 break; 387 } 388 return (0); 389 } 390 391 /* 392 * spppcomp_wsrv() 393 * 394 * MT-Perimeters: 395 * exclusive inner 396 * 397 * Description: 398 * Write-side service procedure. 399 */ 400 static int 401 spppcomp_wsrv(queue_t *q) 402 { 403 mblk_t *mp; 404 405 ASSERT(q != NULL); 406 ASSERT(q->q_ptr != NULL); 407 408 while ((mp = getq(q)) != NULL) { 409 /* We should only place M_DATA on the service queue. */ 410 ASSERT(MTYPE(mp) == M_DATA); 411 /* 412 * If the module below us is flow-controlled, then put 413 * this message back on the queue again. 414 */ 415 if (!bcanputnext(q, mp->b_band)) { 416 (void) putbq(q, mp); 417 break; 418 } 419 if ((mp = spppcomp_outpkt(q, mp)) != NULL) { 420 putnext(q, mp); 421 } 422 } 423 return (0); 424 } 425 426 /* 427 * spppcomp_outpkt() 428 * 429 * MT-Perimeters: 430 * exclusive inner 431 * 432 * Description: 433 * Process outgoing packet. Returns new mblk_t pointer on success 434 * (caller should do putnext through q), NULL on failure (packet has 435 * been discarded). 436 */ 437 static mblk_t * 438 spppcomp_outpkt(queue_t *q, mblk_t *mp) 439 { 440 mblk_t *zmp; 441 int len; 442 ushort_t proto; 443 sppp_comp_t *cp; 444 445 ASSERT(q != NULL); 446 ASSERT(mp != NULL); 447 cp = (sppp_comp_t *)q->q_ptr; 448 ASSERT(cp != NULL); 449 450 /* 451 * If the entire data size of the mblk is less than the length of the 452 * PPP header, then free it. We can't do much with such message anyway, 453 * since we can't determine what the PPP protocol is. 454 */ 455 len = msgsize(mp); 456 if (MBLKL(mp) < PPP_HDRLEN) { 457 #ifdef SPC_DEBUG 458 mutex_enter(&cp->cp_pair_lock); 459 cp->cp_omsg_pull++; 460 mutex_exit(&cp->cp_pair_lock); 461 #endif 462 zmp = msgpullup(mp, PPP_HDRLEN); 463 freemsg(mp); 464 if ((mp = zmp) == NULL) 465 goto msg_oerror; 466 } 467 468 proto = PPP_PROTOCOL(mp->b_rptr); 469 470 /* 471 * Do VJ compression if requested. 472 */ 473 if (proto == PPP_IP && IS_COMP_VJC(cp) && 474 MSG_BYTE(mp, PPP_HDRLEN+offsetof(struct ip, ip_p)) == 475 IPPROTO_TCP) { 476 uchar_t *vjhdr; 477 int type; 478 uint32_t indata[(PPP_HDRLEN+MAX_TCPIPHLEN) / 479 sizeof (uint32_t)]; 480 uchar_t *dp; 481 int tocopy, copied; 482 mblk_t *fmb; 483 void *srcp; 484 int thislen; 485 486 487 tocopy = copied = MIN(len, sizeof (indata)); 488 /* 489 * If we can alter this dblk, and there's enough data 490 * here to work with, and it's nicely aligned, then 491 * avoid the data copy. 492 */ 493 if (DB_REF(mp) == 1 && MBLKL(mp) >= tocopy && 494 ((uintptr_t)mp->b_rptr & 3) == 0) { 495 /* Save off the address/control */ 496 indata[0] = *(uint32_t *)mp->b_rptr; 497 srcp = (void *)(mp->b_rptr + PPP_HDRLEN); 498 } else { 499 fmb = mp; 500 dp = (uchar_t *)indata; 501 while (tocopy > 0) { 502 thislen = MBLKL(fmb); 503 if (tocopy > thislen) { 504 bcopy(fmb->b_rptr, dp, thislen); 505 dp += thislen; 506 tocopy -= thislen; 507 fmb = fmb->b_cont; 508 } else { 509 bcopy(fmb->b_rptr, dp, tocopy); 510 break; 511 } 512 } 513 srcp = (void *)(indata + PPP_HDRLEN/sizeof (*indata)); 514 } 515 516 type = vj_compress_tcp((struct ip *)srcp, len - PPP_HDRLEN, 517 &cp->cp_vj, IS_COMP_VJCCID(cp), &vjhdr); 518 519 /* 520 * If we're going to modify this packet, then we can't modify 521 * someone else's data. Copy instead. 522 * 523 * (It would be nice to be able to avoid this data copy if CCP 524 * is also enabled. That would require extensive 525 * modifications to the compression code. Users should be 526 * told to disable VJ compression when using CCP.) 527 */ 528 if (type != TYPE_IP && DB_REF(mp) > 1) { 529 #ifdef SPC_DEBUG 530 mutex_enter(&cp->cp_pair_lock); 531 cp->cp_omsg_dcopy++; 532 mutex_exit(&cp->cp_pair_lock); 533 #endif 534 /* Copy just altered portion. */ 535 zmp = msgpullup(mp, copied); 536 freemsg(mp); 537 if ((mp = zmp) == NULL) 538 goto msg_oerror; 539 } 540 541 switch (type) { 542 case TYPE_UNCOMPRESSED_TCP: 543 mp->b_rptr[3] = proto = PPP_VJC_UNCOMP; 544 /* No need to update if it was done in place. */ 545 if (srcp == 546 (void *)(indata + PPP_HDRLEN / sizeof (*indata))) { 547 thislen = PPP_HDRLEN + 548 offsetof(struct ip, ip_p); 549 zmp = mp; 550 while (zmp != NULL) { 551 if (MBLKL(zmp) > thislen) { 552 zmp->b_rptr[thislen] = 553 ((struct ip *)srcp)->ip_p; 554 break; 555 } 556 thislen -= MBLKL(zmp); 557 zmp = zmp->b_cont; 558 } 559 } 560 break; 561 562 case TYPE_COMPRESSED_TCP: 563 /* Calculate amount to remove from front */ 564 thislen = vjhdr - (uchar_t *)srcp; 565 ASSERT(thislen >= 0); 566 567 /* Try to do a cheap adjmsg by arithmetic first. */ 568 dp = mp->b_rptr + thislen; 569 if (dp > mp->b_wptr) { 570 if (!adjmsg(mp, thislen)) { 571 freemsg(mp); 572 goto msg_oerror; 573 } 574 dp = mp->b_rptr; 575 } 576 577 /* 578 * Now make sure first block is big enough to 579 * receive modified data. If we modified in 580 * place, then no need to check or copy. 581 */ 582 copied -= thislen; 583 ASSERT(copied >= PPP_HDRLEN); 584 if (srcp != 585 (void *)(indata + PPP_HDRLEN / sizeof (*indata))) 586 copied = 0; 587 mp->b_rptr = dp; 588 if (MBLKL(mp) < copied) { 589 zmp = msgpullup(mp, copied); 590 freemsg(mp); 591 if ((mp = zmp) == NULL) 592 goto msg_oerror; 593 dp = mp->b_rptr; 594 } 595 596 *dp++ = ((uchar_t *)indata)[0]; /* address */ 597 *dp++ = ((uchar_t *)indata)[1]; /* control */ 598 *dp++ = 0; /* protocol */ 599 *dp++ = proto = PPP_VJC_COMP; /* protocol */ 600 copied -= PPP_HDRLEN; 601 if (copied > 0) { 602 bcopy(vjhdr, dp, copied); 603 } 604 break; 605 } 606 } 607 608 /* 609 * Do packet compression if enabled. 610 */ 611 if (proto == PPP_CCP) { 612 /* 613 * Handle any negotiation packets by changing compressor 614 * state. Doing this here rather than with an ioctl keeps 615 * the negotiation and the data flow in sync. 616 */ 617 mutex_enter(&cp->cp_pair_lock); 618 comp_ccp(q, mp, cp, B_FALSE); 619 mutex_exit(&cp->cp_pair_lock); 620 } else if (proto != PPP_LCP && IS_CCP_COMP_RUN(cp) && 621 cp->cp_xstate != NULL) { 622 mblk_t *cmp = NULL; 623 624 len = msgsize(mp); 625 len = (*cp->cp_xcomp->compress)(cp->cp_xstate, &cmp, mp, len, 626 (IS_CCP_ISUP(cp) ? cp->cp_mtu + PPP_HDRLEN : 0)); 627 628 if (cmp != NULL) { 629 /* Success! Discard uncompressed version */ 630 cmp->b_band = mp->b_band; 631 freemsg(mp); 632 mp = cmp; 633 } 634 if (len < 0) { 635 /* 636 * Compressor failure; must discard this 637 * packet because the compressor dictionary is 638 * now corrupt. 639 */ 640 freemsg(mp); 641 mutex_enter(&cp->cp_pair_lock); 642 cp->cp_stats.ppp_oerrors++; 643 mutex_exit(&cp->cp_pair_lock); 644 (void) putnextctl1(RD(q), M_CTL, PPPCTL_OERROR); 645 return (NULL); 646 } 647 } 648 649 /* 650 * If either address and control field compression or protocol field 651 * compression is enabled, then we'll need a writable packet. Copy if 652 * necessary. 653 */ 654 if ((cp->cp_flags & (COMP_AC|COMP_PROT)) && DB_REF(mp) > 1) { 655 #ifdef SPC_DEBUG 656 mutex_enter(&cp->cp_pair_lock); 657 cp->cp_omsg_dcopy++; 658 mutex_exit(&cp->cp_pair_lock); 659 #endif 660 zmp = copymsg(mp); 661 freemsg(mp); 662 if ((mp = zmp) == NULL) 663 goto msg_oerror; 664 } 665 666 /* 667 * Do address/control and protocol compression if enabled. 668 */ 669 if (IS_COMP_AC(cp) && (proto != PPP_LCP)) { 670 mp->b_rptr += 2; /* drop address & ctrl fields */ 671 /* 672 * Protocol field compression omits the first byte if 673 * it would be 0x00, thus the check for < 0x100. 674 */ 675 if (proto < 0x100 && IS_COMP_PROT(cp)) { 676 ++mp->b_rptr; /* drop high protocol byte */ 677 } 678 } else if ((proto < 0x100) && IS_COMP_PROT(cp)) { 679 /* 680 * shuffle up the address & ctrl fields 681 */ 682 mp->b_rptr[2] = mp->b_rptr[1]; 683 mp->b_rptr[1] = mp->b_rptr[0]; 684 ++mp->b_rptr; 685 } 686 mutex_enter(&cp->cp_pair_lock); 687 cp->cp_stats.ppp_opackets++; 688 cp->cp_stats.ppp_obytes += msgsize(mp); 689 mutex_exit(&cp->cp_pair_lock); 690 691 CPDEBUG((DBGSTART "send (%ld bytes) flags=0x%b\n", 692 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), msgsize(mp), 693 cp->cp_flags, CP_FLAGSSTR)); 694 return (mp); 695 696 msg_oerror: 697 mutex_enter(&cp->cp_pair_lock); 698 cp->cp_stats.ppp_oerrors++; 699 mutex_exit(&cp->cp_pair_lock); 700 (void) putnextctl1(RD(q), M_CTL, PPPCTL_OERROR); 701 return (NULL); 702 } 703 704 /* 705 * spppcomp_inner_ioctl() 706 * 707 * MT-Perimeters: 708 * exclusive inner; queue pair lock held. 709 * 710 * Description: 711 * Called by spppcomp_ioctl to handle state-affecting ioctls. 712 * Returns -1 if caller should do putnext, 0 for miocack, or >0 713 * for miocnak. Must *NOT* do putnext in this routine, since 714 * lock is held here. 715 */ 716 static int 717 spppcomp_inner_ioctl(queue_t *q, mblk_t *mp) 718 { 719 sppp_comp_t *cp; 720 int flags; 721 int mask; 722 int rc; 723 int len; 724 int cmd; 725 int nxslots; 726 int nrslots; 727 int val; 728 uchar_t *opt_data; 729 uint32_t opt_len; 730 struct compressor **comp; 731 struct compressor *ccomp; 732 struct iocblk *iop; 733 void *xtemp; 734 735 ASSERT(q != NULL); 736 ASSERT(q->q_ptr != NULL); 737 cp = (sppp_comp_t *)q->q_ptr; 738 ASSERT(mp != NULL); 739 ASSERT(mp->b_rptr != NULL); 740 741 iop = (struct iocblk *)mp->b_rptr; 742 rc = EINVAL; 743 len = 0; 744 switch (iop->ioc_cmd) { 745 case PPPIO_CFLAGS: 746 if (iop->ioc_count != 2 * sizeof (uint32_t) || 747 mp->b_cont == NULL) 748 break; 749 750 flags = ((uint32_t *)mp->b_cont->b_rptr)[0]; 751 mask = ((uint32_t *)mp->b_cont->b_rptr)[1]; 752 753 cp->cp_flags = (cp->cp_flags & ~mask) | (flags & mask); 754 755 if ((mask & CCP_ISOPEN) && (flags & CCP_ISOPEN) == 0) { 756 if (cp->cp_xstate != NULL) { 757 (*cp->cp_xcomp->comp_free)(cp->cp_xstate); 758 cp->cp_xstate = NULL; 759 } 760 if (cp->cp_rstate != NULL) { 761 (*cp->cp_rcomp->decomp_free)(cp->cp_rstate); 762 cp->cp_rstate = NULL; 763 } 764 cp->cp_flags &= ~CCP_ISUP; 765 } 766 767 CPDEBUG((DBGSTART 768 "PPPIO_CFLAGS xflags=0x%b xmask=0x%b flags=0x%b\n", 769 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), 770 flags, CP_FLAGSSTR, mask, 771 CP_FLAGSSTR, cp->cp_flags, CP_FLAGSSTR)); 772 773 /* If we're not the last PPP-speaker, then pass along. */ 774 if (!IS_CP_LASTMOD(cp)) { 775 return (-1); /* putnext */ 776 } 777 778 *(uint32_t *)mp->b_cont->b_rptr = cp->cp_flags; 779 len = sizeof (uint32_t); 780 rc = 0; 781 break; 782 783 case PPPIO_VJINIT: 784 if (iop->ioc_count != 2 || mp->b_cont == NULL) 785 break; 786 /* 787 * Even though it's not passed along, we have to 788 * validate nrslots so that we don't agree to 789 * decompress anything we cannot. 790 */ 791 nxslots = mp->b_cont->b_rptr[0] + 1; 792 nrslots = mp->b_cont->b_rptr[1] + 1; 793 if (nxslots > MAX_STATES || nrslots > MAX_STATES) 794 break; 795 796 /* No need to lock here; just reading a word is atomic */ 797 /* mutex_enter(&cp->cp_pair_lock); */ 798 cp->cp_vj_last_ierrors = cp->cp_stats.ppp_ierrors; 799 /* mutex_exit(&cp->cp_pair_lock); */ 800 vj_compress_init(&cp->cp_vj, nxslots); 801 cp->cp_nxslots = nxslots; 802 803 CPDEBUG((DBGSTART 804 "PPPIO_VJINIT txslots=%d rxslots=%d flags=0x%b\n", 805 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), nxslots, 806 nrslots, cp->cp_flags, CP_FLAGSSTR)); 807 rc = 0; 808 break; 809 810 case PPPIO_XCOMP: 811 case PPPIO_RCOMP: 812 if (iop->ioc_count < 2 || mp->b_cont == NULL) 813 break; 814 /* 815 * The input data here is the raw CCP algorithm option 816 * from negotiation. The format is always one byte of 817 * algorithm number, one byte of length, and 818 * (length-2) bytes of algorithm-dependent data. The 819 * alloc routine is expected to parse and validate 820 * this. 821 */ 822 opt_data = mp->b_cont->b_rptr; 823 opt_len = mp->b_cont->b_wptr - opt_data; 824 if (opt_len > iop->ioc_count) { 825 opt_len = iop->ioc_count; 826 } 827 len = mp->b_cont->b_rptr[1]; 828 if (len < 2 || len > opt_len) 829 break; 830 len = 0; 831 for (comp = ppp_compressors; *comp != NULL; ++comp) { 832 833 if ((*comp)->compress_proto != opt_data[0]) { 834 continue; 835 } 836 rc = 0; 837 if (iop->ioc_cmd == PPPIO_XCOMP) { 838 /* 839 * A previous call may have fetched 840 * memory for a compressor that's now 841 * being retired or reset. Free it 842 * using its mechanism for freeing 843 * stuff. 844 */ 845 if ((xtemp = cp->cp_xstate) != NULL) { 846 cp->cp_xstate = NULL; 847 (*cp->cp_xcomp->comp_free)(xtemp); 848 } 849 cp->cp_xcomp = *comp; 850 cp->cp_xstate = (*comp)->comp_alloc(opt_data, 851 opt_len); 852 853 if (cp->cp_xstate == NULL) { 854 rc = ENOSR; 855 } 856 857 CPDEBUG((DBGSTART "PPPIO_XCOMP opt_proto=0x%x " 858 "opt_len=0x%d flags=0x%b\n", 859 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), 860 (uchar_t)opt_data[0], opt_len, 861 cp->cp_flags, 862 CP_FLAGSSTR)); 863 } else { 864 if ((xtemp = cp->cp_rstate) != NULL) { 865 cp->cp_rstate = NULL; 866 (*cp->cp_rcomp->decomp_free)(xtemp); 867 } 868 cp->cp_rcomp = *comp; 869 cp->cp_rstate = 870 (*comp)->decomp_alloc(opt_data, opt_len); 871 872 if (cp->cp_rstate == NULL) { 873 rc = ENOSR; 874 } 875 876 CPDEBUG((DBGSTART "PPPIO_RCOMP opt_proto=0x%x " 877 "opt_len=0x%d flags=0x%b\n", 878 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), 879 (uchar_t)opt_data[0], opt_len, 880 cp->cp_flags, 881 CP_FLAGSSTR)); 882 } 883 if (rc == 0 && (*comp)->set_effort != NULL) { 884 rc = (*(*comp)->set_effort)(cp-> 885 cp_xcomp == *comp ? cp->cp_xstate : NULL, 886 cp->cp_rcomp == *comp ? cp->cp_rstate : 887 NULL, cp->cp_effort); 888 if (rc != 0) { 889 CKDEBUG((DBGSTART 890 "cannot set effort %d", 891 cp->cp_unit, cp->cp_effort)); 892 rc = 0; 893 } 894 } 895 break; 896 } 897 break; 898 899 case PPPIO_DEBUG: 900 if (iop->ioc_count != sizeof (uint32_t) || mp->b_cont == NULL) 901 break; 902 903 cmd = *(uint32_t *)mp->b_cont->b_rptr; 904 905 /* If it's not for us, then pass along. */ 906 if (cmd != PPPDBG_LOG + PPPDBG_COMP) { 907 return (-1); /* putnext */ 908 } 909 cp->cp_flags |= CP_KDEBUG; 910 911 CKDEBUG((DBGSTART "PPPIO_DEBUG log enabled flags=0x%b\n", 912 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), 913 cp->cp_flags, CP_FLAGSSTR)); 914 rc = 0; 915 break; 916 917 case PPPIO_LASTMOD: 918 cp->cp_flags |= CP_LASTMOD; 919 CPDEBUG((DBGSTART "PPPIO_LASTMOD last module flags=0x%b\n", 920 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), 921 cp->cp_flags, CP_FLAGSSTR)); 922 rc = 0; 923 break; 924 925 case PPPIO_COMPLEV: /* set compression effort level */ 926 if (iop->ioc_count != sizeof (uint32_t) || mp->b_cont == NULL) 927 break; 928 val = *(uint32_t *)mp->b_cont->b_rptr; 929 cp->cp_effort = val; 930 /* Silently ignore if compressor doesn't understand this. */ 931 rc = 0; 932 if ((ccomp = cp->cp_xcomp) != NULL && 933 ccomp->set_effort != NULL) { 934 rc = (*ccomp->set_effort)(cp->cp_xstate, 935 ccomp == cp->cp_rcomp ? cp->cp_rstate : NULL, val); 936 if (rc != 0) 937 break; 938 } 939 if ((ccomp = cp->cp_rcomp) != NULL && ccomp != cp->cp_xcomp && 940 ccomp->set_effort != NULL) 941 rc = (*ccomp->set_effort)(NULL, cp->cp_rstate, val); 942 break; 943 } 944 if (rc == 0 && mp->b_cont != NULL) 945 mp->b_cont->b_wptr = mp->b_cont->b_rptr + len; 946 return (rc); 947 } 948 949 /* 950 * spppcomp_getcstat() 951 * 952 * MT-Perimeters: 953 * exclusive inner. 954 * 955 * Description: 956 * Called by spppcomp_ioctl as the result of receiving a PPPIO_GETCSTAT. 957 */ 958 static void 959 spppcomp_getcstat(queue_t *q, mblk_t *mp, sppp_comp_t *cp) 960 { 961 mblk_t *mpnext; 962 struct ppp_comp_stats *csp; 963 964 ASSERT(q != NULL); 965 ASSERT(q->q_ptr != NULL); 966 ASSERT(mp != NULL); 967 ASSERT(mp->b_rptr != NULL); 968 ASSERT(cp != NULL); 969 970 mpnext = allocb(sizeof (struct ppp_comp_stats), BPRI_MED); 971 if (mpnext == NULL) { 972 miocnak(q, mp, 0, ENOSR); 973 return; 974 } 975 if (mp->b_cont != NULL) { 976 freemsg(mp->b_cont); 977 } 978 mp->b_cont = mpnext; 979 csp = (struct ppp_comp_stats *)mpnext->b_wptr; 980 mpnext->b_wptr += sizeof (struct ppp_comp_stats); 981 bzero((caddr_t)csp, sizeof (struct ppp_comp_stats)); 982 983 if (cp->cp_xstate != NULL) { 984 (*cp->cp_xcomp->comp_stat)(cp->cp_xstate, &csp->c); 985 } 986 if (cp->cp_rstate != NULL) { 987 (*cp->cp_rcomp->decomp_stat)(cp->cp_rstate, &csp->d); 988 } 989 990 miocack(q, mp, sizeof (struct ppp_comp_stats), 0); 991 } 992 993 /* 994 * spppcomp_ioctl() 995 * 996 * MT-Perimeters: 997 * exclusive inner. 998 * 999 * Description: 1000 * Called by spppcomp_wput as the result of receiving an M_IOCTL 1001 * command. 1002 */ 1003 static void 1004 spppcomp_ioctl(queue_t *q, mblk_t *mp, sppp_comp_t *cp) 1005 { 1006 struct iocblk *iop; 1007 int flag; 1008 1009 ASSERT(q != NULL); 1010 ASSERT(q->q_ptr != NULL); 1011 ASSERT(mp != NULL); 1012 ASSERT(mp->b_rptr != NULL); 1013 ASSERT(cp != NULL); 1014 1015 iop = (struct iocblk *)mp->b_rptr; 1016 switch (iop->ioc_cmd) { 1017 case PPPIO_CFLAGS: 1018 case PPPIO_VJINIT: 1019 case PPPIO_XCOMP: 1020 case PPPIO_RCOMP: 1021 case PPPIO_DEBUG: 1022 case PPPIO_LASTMOD: 1023 case PPPIO_COMPLEV: 1024 mutex_enter(&cp->cp_pair_lock); 1025 flag = spppcomp_inner_ioctl(q, mp); 1026 mutex_exit(&cp->cp_pair_lock); 1027 if (flag == -1) { 1028 putnext(q, mp); 1029 } else if (flag == 0) { 1030 miocack(q, mp, 1031 mp->b_cont == NULL ? 0 : MBLKL(mp->b_cont), 0); 1032 } else { 1033 miocnak(q, mp, 0, flag); 1034 } 1035 break; 1036 1037 case PPPIO_GETCSTAT: 1038 spppcomp_getcstat(q, mp, cp); 1039 break; 1040 1041 case PPPIO_GTYPE: /* get existing driver type */ 1042 if (!IS_CP_LASTMOD(cp)) { 1043 putnext(q, mp); 1044 break; 1045 } 1046 freemsg(mp->b_next); 1047 mp->b_next = allocb(sizeof (uint32_t), BPRI_MED); 1048 if (mp->b_next == NULL) { 1049 miocnak(q, mp, 0, ENOSR); 1050 } else { 1051 *(uint32_t *)mp->b_cont->b_wptr = PPPTYP_HC; 1052 mp->b_cont->b_wptr += sizeof (uint32_t); 1053 miocack(q, mp, sizeof (uint32_t), 0); 1054 } 1055 break; 1056 1057 default: 1058 putnext(q, mp); 1059 break; 1060 } 1061 } 1062 1063 /* 1064 * spppcomp_mctl() 1065 * 1066 * MT-Perimeters: 1067 * exclusive inner; queue pair lock held. 1068 * 1069 * Description: 1070 * Called by spppcomp_wput as the result of receiving an M_CTL 1071 * message from another STREAMS module, and returns non-zero if 1072 * caller should do putnext or zero for freemsg. Must *NOT* do 1073 * putnext in this routine, since lock is held here. 1074 */ 1075 static int 1076 spppcomp_mctl(queue_t *q, mblk_t *mp) 1077 { 1078 sppp_comp_t *cp; 1079 kstat_t *ksp; 1080 char unit[32]; 1081 const char **cpp; 1082 kstat_named_t *knt; 1083 1084 ASSERT(q != NULL); 1085 ASSERT(q->q_ptr != NULL); 1086 cp = (sppp_comp_t *)q->q_ptr; 1087 ASSERT(mp != NULL); 1088 ASSERT(mp->b_rptr != NULL); 1089 1090 switch (*mp->b_rptr) { 1091 case PPPCTL_MTU: 1092 if (MBLKL(mp) < 4) { 1093 break; 1094 } 1095 cp->cp_mtu = ((ushort_t *)mp->b_rptr)[1]; 1096 1097 CPDEBUG((DBGSTART "PPPCTL_MTU (%d) flags=0x%b\n", 1098 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), 1099 cp->cp_mtu, cp->cp_flags, CP_FLAGSSTR)); 1100 break; 1101 case PPPCTL_MRU: 1102 if (MBLKL(mp) < 4) { 1103 break; 1104 } 1105 cp->cp_mru = ((ushort_t *)mp->b_rptr)[1]; 1106 1107 CPDEBUG((DBGSTART "PPPCTL_MRU (%d) flags=0x%b\n", 1108 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), 1109 cp->cp_mru, cp->cp_flags, CP_FLAGSSTR)); 1110 break; 1111 case PPPCTL_UNIT: 1112 if (MBLKL(mp) < 8) { 1113 break; 1114 } 1115 /* If PPPCTL_UNIT has already been issued, then ignore. */ 1116 if (IS_CP_HASUNIT(cp)) { 1117 break; 1118 } 1119 ASSERT(cp->cp_kstats == NULL); 1120 cp->cp_unit = ((uint32_t *)mp->b_rptr)[1]; 1121 1122 /* Create kstats for this unit. */ 1123 (void) sprintf(unit, "%s%d", COMP_MOD_NAME, cp->cp_unit); 1124 ksp = kstat_create(COMP_MOD_NAME, cp->cp_unit, unit, "net", 1125 KSTAT_TYPE_NAMED, sizeof (spppcomp_kstats_t) / 1126 sizeof (kstat_named_t), 0); 1127 1128 if (ksp != NULL) { 1129 cp->cp_flags |= CP_HASUNIT; 1130 cp->cp_kstats = ksp; 1131 1132 knt = (kstat_named_t *)ksp->ks_data; 1133 for (cpp = kstats_names; 1134 cpp < kstats_names + Dim(kstats_names); cpp++) { 1135 kstat_named_init(knt, (char *)*cpp, 1136 KSTAT_DATA_UINT32); 1137 knt++; 1138 } 1139 for (cpp = kstats64_names; 1140 cpp < kstats64_names + Dim(kstats64_names); cpp++) { 1141 kstat_named_init(knt, (char *)*cpp, 1142 KSTAT_DATA_UINT64); 1143 knt++; 1144 } 1145 ksp->ks_update = spppcomp_kstat_update; 1146 ksp->ks_private = (void *)cp; 1147 kstat_install(ksp); 1148 1149 CPDEBUG((DBGSTART "PPPCTL_UNIT flags=0x%b\n", 1150 cp->cp_unit, cp->cp_flags, CP_FLAGSSTR)); 1151 } 1152 break; 1153 1154 default: 1155 /* Forward unknown M_CTL messages along */ 1156 return (1); 1157 } 1158 1159 /* 1160 * For known PPP M_CTL messages, forward along only if we're not the 1161 * last PPP-aware module. 1162 */ 1163 if (IS_CP_LASTMOD(cp)) 1164 return (0); 1165 return (1); 1166 } 1167 1168 /* 1169 * spppcomp_rput() 1170 * 1171 * MT-Perimeters: 1172 * exclusive inner. 1173 * 1174 * Description: 1175 * Upper read-side put procedure. Messages get here from below. 1176 * 1177 * The data handling logic is a little more tricky here. We 1178 * defer to the service routine if q_first isn't NULL (to 1179 * preserve message ordering after deferring a previous message), 1180 * bcanputnext() is FALSE (to handle flow control), or we have 1181 * done a lot of processing recently and we're about to do a lot 1182 * more and we're in an interrupt context (on the theory that 1183 * we're hogging the CPU in this case). 1184 */ 1185 static int 1186 spppcomp_rput(queue_t *q, mblk_t *mp) 1187 { 1188 sppp_comp_t *cp; 1189 struct iocblk *iop; 1190 struct ppp_stats64 *psp; 1191 boolean_t inter; 1192 hrtime_t curtime; 1193 1194 ASSERT(q != NULL); 1195 ASSERT(q->q_ptr != NULL); 1196 cp = (sppp_comp_t *)q->q_ptr; 1197 ASSERT(mp != NULL); 1198 1199 switch (MTYPE(mp)) { 1200 case M_DATA: 1201 inter = servicing_interrupt(); 1202 if (inter) { 1203 curtime = gethrtime(); 1204 1205 /* 1206 * If little time has passed since last 1207 * arrival, then bump the counter. 1208 */ 1209 if (curtime - cp->cp_lastfinish < spppcomp_min_arrival) 1210 cp->cp_fastin++; 1211 else 1212 cp->cp_fastin >>= 1; /* a guess */ 1213 } 1214 /* 1215 * If we're not decompressing, then we'll be fast, so 1216 * we don't have to worry about hogging here. If we 1217 * are decompressing, then we have to check the 1218 * cp_fastin count. 1219 */ 1220 if ((!(cp->cp_flags & (CCP_DECOMP_RUN | DECOMP_VJC)) || 1221 cp->cp_fastin < MAX_FAST_ARRIVALS) && 1222 q->q_first == NULL && bcanputnext(q, mp->b_band)) { 1223 #ifdef SPC_DEBUG 1224 cp->cp_in_handled++; 1225 #endif 1226 if ((mp = spppcomp_inpkt(q, mp)) != NULL) 1227 putnext(q, mp); 1228 if (inter) { 1229 cp->cp_lastfinish = gethrtime(); 1230 } 1231 } else { 1232 /* Deferring; give him a clean slate */ 1233 cp->cp_fastin = 0; 1234 #ifdef SPC_DEBUG 1235 cp->cp_in_queued++; 1236 #endif 1237 (void) putq(q, mp); 1238 } 1239 break; 1240 case M_IOCACK: 1241 iop = (struct iocblk *)mp->b_rptr; 1242 ASSERT(iop != NULL); 1243 /* 1244 * Bundled with pppstats; no need to handle PPPIO_GETSTAT 1245 * here since we'll never see it. 1246 */ 1247 if (iop->ioc_cmd == PPPIO_GETSTAT64 && 1248 iop->ioc_count == sizeof (struct ppp_stats64) && 1249 mp->b_cont != NULL) { 1250 /* 1251 * This crock is to handle a badly-designed 1252 * but well-known ioctl for ANU PPP. Both 1253 * link statistics and VJ statistics are 1254 * requested together. 1255 * 1256 * Catch this on the way back from the 1257 * spppasyn module so we can fill in the VJ 1258 * stats. This happens only when we have 1259 * PPP-aware modules beneath us. 1260 */ 1261 psp = (struct ppp_stats64 *)mp->b_cont->b_rptr; 1262 psp->vj = cp->cp_vj.stats; 1263 CPDEBUG((DBGSTART 1264 "PPPIO_GETSTAT64 (VJ filled) flags=0x%b\n", 1265 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), 1266 cp->cp_flags, CP_FLAGSSTR)); 1267 } 1268 putnext(q, mp); 1269 break; 1270 case M_CTL: 1271 /* Increase our statistics and forward it upstream. */ 1272 mutex_enter(&cp->cp_pair_lock); 1273 if (*mp->b_rptr == PPPCTL_IERROR) { 1274 cp->cp_stats.ppp_ierrors++; 1275 cp->cp_ierr_low++; 1276 } else if (*mp->b_rptr == PPPCTL_OERROR) { 1277 cp->cp_stats.ppp_oerrors++; 1278 cp->cp_oerr_low++; 1279 } 1280 mutex_exit(&cp->cp_pair_lock); 1281 putnext(q, mp); 1282 break; 1283 1284 case M_FLUSH: 1285 CPDEBUG((DBGSTART "rput M_FLUSH (0x%x) flags=0x%b\n", 1286 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), 1287 *mp->b_rptr, cp->cp_flags, CP_FLAGSSTR)); 1288 /* 1289 * Just discard pending data. For CCP, any 1290 * decompressor dictionary sequencing problems caused 1291 * by this will have to be handled by the compression 1292 * protocol in use. For VJ, we need to give the 1293 * decompressor a heads-up. 1294 */ 1295 if (*mp->b_rptr & FLUSHR) { 1296 mutex_enter(&cp->cp_pair_lock); 1297 flushq(q, FLUSHDATA); 1298 cp->cp_vj_last_ierrors = cp->cp_stats.ppp_ierrors; 1299 vj_uncompress_err(&cp->cp_vj); 1300 mutex_exit(&cp->cp_pair_lock); 1301 } 1302 putnext(q, mp); 1303 break; 1304 1305 default: 1306 putnext(q, mp); 1307 break; 1308 } 1309 return (0); 1310 } 1311 1312 /* 1313 * spppcomp_rsrv() 1314 * 1315 * MT-Perimeters: 1316 * exclusive inner. 1317 * 1318 * Description: 1319 * Upper read-side service procedure. We handle data deferred from 1320 * spppcomp_rput here. 1321 * 1322 * The data on the queue are always compressed (unprocessed). 1323 * The rput procedure tries to do decompression, but if it can't, 1324 * it will put the unprocessed data on the queue for later 1325 * handling. 1326 */ 1327 static int 1328 spppcomp_rsrv(queue_t *q) 1329 { 1330 mblk_t *mp; 1331 1332 ASSERT(q != NULL); 1333 ASSERT(q->q_ptr != NULL); 1334 1335 while ((mp = getq(q)) != NULL) { 1336 /* We should only place M_DATA on the service queue. */ 1337 ASSERT(MTYPE(mp) == M_DATA); 1338 /* 1339 * If the module above us is flow-controlled, then put 1340 * this message back on the queue again. 1341 */ 1342 if (!bcanputnext(q, mp->b_band)) { 1343 (void) putbq(q, mp); 1344 break; 1345 } 1346 if ((mp = spppcomp_inpkt(q, mp)) != NULL) 1347 putnext(q, mp); 1348 } 1349 return (0); 1350 } 1351 1352 /* 1353 * spppcomp_inpkt() 1354 * 1355 * MT-Perimeters: 1356 * exclusive inner 1357 * 1358 * Description: 1359 * Process incoming packet. 1360 */ 1361 static mblk_t * 1362 spppcomp_inpkt(queue_t *q, mblk_t *mp) 1363 { 1364 ushort_t proto; 1365 int i; 1366 mblk_t *zmp; 1367 mblk_t *np; 1368 uchar_t *dp; 1369 int len; 1370 int hlen; 1371 sppp_comp_t *cp; 1372 1373 ASSERT(q != NULL); 1374 ASSERT(mp != NULL); 1375 cp = (sppp_comp_t *)q->q_ptr; 1376 ASSERT(cp != NULL); 1377 1378 len = msgsize(mp); 1379 1380 mutex_enter(&cp->cp_pair_lock); 1381 cp->cp_stats.ppp_ibytes += len; 1382 cp->cp_stats.ppp_ipackets++; 1383 mutex_exit(&cp->cp_pair_lock); 1384 /* 1385 * First work out the protocol and where the PPP header ends. 1386 */ 1387 i = 0; 1388 proto = MSG_BYTE(mp, 0); 1389 if (proto == PPP_ALLSTATIONS) { 1390 i = 2; 1391 proto = MSG_BYTE(mp, 2); 1392 } 1393 if ((proto & 1) == 0) { 1394 ++i; 1395 proto = (proto << 8) + MSG_BYTE(mp, i); 1396 } 1397 hlen = i + 1; 1398 /* 1399 * Now reconstruct a complete, contiguous PPP header at the 1400 * start of the packet. 1401 */ 1402 if (hlen < (IS_DECOMP_AC(cp) ? 0 : 2) + (IS_DECOMP_PROT(cp) ? 1 : 2)) { 1403 /* count these? */ 1404 goto bad; 1405 } 1406 if (mp->b_rptr + hlen > mp->b_wptr) { 1407 /* 1408 * Header is known to be intact here; so adjmsg will do the 1409 * right thing here. 1410 */ 1411 if (!adjmsg(mp, hlen)) { 1412 goto bad; 1413 } 1414 hlen = 0; 1415 } 1416 if (hlen != PPP_HDRLEN) { 1417 /* 1418 * We need to put some bytes on the front of the packet 1419 * to make a full-length PPP header. If we can put them 1420 * in mp, we do, otherwise we tack another mblk on the 1421 * front. 1422 * 1423 * XXX we really shouldn't need to carry around the address 1424 * and control at this stage. ACFC and PFC need to be 1425 * reworked. 1426 */ 1427 dp = mp->b_rptr + hlen - PPP_HDRLEN; 1428 if ((dp < mp->b_datap->db_base) || (DB_REF(mp) > 1)) { 1429 1430 np = allocb(PPP_HDRLEN, BPRI_MED); 1431 if (np == 0) { 1432 goto bad; 1433 } 1434 np->b_cont = mp; 1435 mp->b_rptr += hlen; 1436 mp = np; 1437 dp = mp->b_wptr; 1438 mp->b_wptr += PPP_HDRLEN; 1439 } else { 1440 mp->b_rptr = dp; 1441 } 1442 dp[0] = PPP_ALLSTATIONS; 1443 dp[1] = PPP_UI; 1444 dp[2] = (proto >> 8) & 0xff; 1445 dp[3] = proto & 0xff; 1446 } 1447 /* 1448 * Now see if we have a compressed packet to decompress, or a 1449 * CCP negotiation packet to take notice of. It's guaranteed 1450 * that at least PPP_HDRLEN bytes are contiguous in the first 1451 * block now. 1452 */ 1453 proto = PPP_PROTOCOL(mp->b_rptr); 1454 if (proto == PPP_CCP) { 1455 len = msgsize(mp); 1456 if (mp->b_wptr < mp->b_rptr + len) { 1457 #ifdef SPC_DEBUG 1458 mutex_enter(&cp->cp_pair_lock); 1459 cp->cp_imsg_ccp_pull++; 1460 mutex_exit(&cp->cp_pair_lock); 1461 #endif 1462 zmp = msgpullup(mp, len); 1463 freemsg(mp); 1464 mp = zmp; 1465 if (mp == 0) { 1466 goto bad; 1467 } 1468 } 1469 mutex_enter(&cp->cp_pair_lock); 1470 comp_ccp(q, mp, cp, B_TRUE); 1471 mutex_exit(&cp->cp_pair_lock); 1472 } else if ((cp->cp_flags & (CCP_ISUP | CCP_DECOMP_RUN | CCP_ERR)) == 1473 (CCP_ISUP | CCP_DECOMP_RUN) && cp->cp_rstate != NULL) { 1474 int rv; 1475 1476 if ((proto == PPP_COMP) || (proto == PPP_COMPFRAG)) { 1477 rv = (*cp->cp_rcomp->decompress)(cp->cp_rstate, &mp); 1478 switch (rv) { 1479 case DECOMP_OK: 1480 break; 1481 case DECOMP_ERROR: 1482 cp->cp_flags |= CCP_ERROR; 1483 mutex_enter(&cp->cp_pair_lock); 1484 ++cp->cp_stats.ppp_ierrors; 1485 mutex_exit(&cp->cp_pair_lock); 1486 (void) putnextctl1(q, M_CTL, PPPCTL_IERROR); 1487 break; 1488 case DECOMP_FATALERROR: 1489 cp->cp_flags |= CCP_FATALERROR; 1490 mutex_enter(&cp->cp_pair_lock); 1491 ++cp->cp_stats.ppp_ierrors; 1492 mutex_exit(&cp->cp_pair_lock); 1493 (void) putnextctl1(q, M_CTL, PPPCTL_IERROR); 1494 break; 1495 } 1496 if (mp == NULL) { 1497 /* Decompress failed; data are gone. */ 1498 return (NULL); 1499 } 1500 } else { 1501 /* 1502 * For RFCs 1977 and 1979 (BSD Compress and Deflate), 1503 * the compressor should send incompressible data 1504 * without encapsulation and the receiver must update 1505 * its decompression dictionary as though this data 1506 * were received and decompressed. This keeps the 1507 * dictionaries in sync. 1508 */ 1509 rv = (*cp->cp_rcomp->incomp)(cp->cp_rstate, mp); 1510 if (rv < 0) { 1511 cp->cp_flags |= CCP_FATALERROR; 1512 mutex_enter(&cp->cp_pair_lock); 1513 ++cp->cp_stats.ppp_ierrors; 1514 mutex_exit(&cp->cp_pair_lock); 1515 (void) putnextctl1(q, M_CTL, PPPCTL_IERROR); 1516 } 1517 } 1518 } 1519 /* 1520 * Now do VJ decompression. 1521 */ 1522 proto = PPP_PROTOCOL(mp->b_rptr); 1523 if ((proto == PPP_VJC_COMP) || (proto == PPP_VJC_UNCOMP)) { 1524 1525 len = msgsize(mp) - PPP_HDRLEN; 1526 1527 if (!IS_DECOMP_VJC(cp) || (len <= 0)) { 1528 goto bad; 1529 } 1530 /* 1531 * Advance past the ppp header. Here we assume that the whole 1532 * PPP header is in the first mblk. (This should be true 1533 * because the above code does pull-ups as necessary on raw 1534 * data, and the decompressor engines all produce large blocks 1535 * on output.) 1536 */ 1537 np = mp; 1538 dp = np->b_rptr + PPP_HDRLEN; 1539 if (dp >= mp->b_wptr) { 1540 np = np->b_cont; 1541 dp = np->b_rptr; 1542 } 1543 /* 1544 * Make sure we have sufficient contiguous data at this point, 1545 * which in most cases we will always do. 1546 */ 1547 hlen = (proto == PPP_VJC_COMP) ? MAX_VJHDR : MAX_TCPIPHLEN; 1548 if (hlen > len) { 1549 hlen = len; 1550 } 1551 if ((np->b_wptr < dp + hlen) || DB_REF(np) > 1) { 1552 #ifdef SPC_DEBUG 1553 mutex_enter(&cp->cp_pair_lock); 1554 cp->cp_imsg_vj_pull++; 1555 mutex_exit(&cp->cp_pair_lock); 1556 #endif 1557 zmp = msgpullup(mp, hlen + PPP_HDRLEN); 1558 freemsg(mp); 1559 mp = zmp; 1560 if (mp == NULL) { 1561 goto bad; 1562 } 1563 np = mp; 1564 dp = np->b_rptr + PPP_HDRLEN; 1565 } 1566 1567 if (proto == PPP_VJC_COMP) { 1568 uchar_t *iphdr; 1569 int vjlen; 1570 uint_t iphlen; 1571 int errcnt; 1572 1573 /* 1574 * Decompress VJ-compressed packet. First 1575 * reset compressor if an input error has 1576 * occurred. (No need to lock statistics 1577 * structure for read of a single word.) 1578 */ 1579 errcnt = cp->cp_stats.ppp_ierrors; 1580 if (errcnt != cp->cp_vj_last_ierrors) { 1581 cp->cp_vj_last_ierrors = errcnt; 1582 vj_uncompress_err(&cp->cp_vj); 1583 } 1584 1585 vjlen = vj_uncompress_tcp(dp, np->b_wptr - dp, len, 1586 &cp->cp_vj, &iphdr, &iphlen); 1587 1588 if (vjlen < 0 || iphlen == 0) { 1589 /* 1590 * so we don't reset next time 1591 */ 1592 mutex_enter(&cp->cp_pair_lock); 1593 ++cp->cp_vj_last_ierrors; 1594 mutex_exit(&cp->cp_pair_lock); 1595 goto bad; 1596 } 1597 /* 1598 * drop ppp and vj headers off 1599 */ 1600 if (mp != np) { 1601 freeb(mp); 1602 mp = np; 1603 } 1604 mp->b_rptr = dp + vjlen; 1605 /* 1606 * allocate a new mblk for the ppp and 1607 * ip headers 1608 */ 1609 np = allocb(iphlen + PPP_HDRLEN, BPRI_MED); 1610 if (np == NULL) 1611 goto bad; 1612 dp = np->b_rptr; 1613 /* 1614 * reconstruct PPP header 1615 */ 1616 dp[0] = PPP_ALLSTATIONS; 1617 dp[1] = PPP_UI; 1618 dp[2] = PPP_IP >> 8; 1619 dp[3] = PPP_IP; 1620 /* 1621 * prepend mblk with reconstructed TCP/IP header. 1622 */ 1623 bcopy((caddr_t)iphdr, (caddr_t)dp + PPP_HDRLEN, iphlen); 1624 np->b_wptr = dp + iphlen + PPP_HDRLEN; 1625 np->b_cont = mp; 1626 mp = np; 1627 } else { 1628 /* 1629 * "Decompress" a VJ-uncompressed packet. 1630 */ 1631 mutex_enter(&cp->cp_pair_lock); 1632 cp->cp_vj_last_ierrors = cp->cp_stats.ppp_ierrors; 1633 mutex_exit(&cp->cp_pair_lock); 1634 if (!vj_uncompress_uncomp(dp, hlen, &cp->cp_vj)) { 1635 /* 1636 * don't need to reset next time 1637 */ 1638 mutex_enter(&cp->cp_pair_lock); 1639 ++cp->cp_vj_last_ierrors; 1640 mutex_exit(&cp->cp_pair_lock); 1641 goto bad; 1642 } 1643 /* 1644 * fix up the PPP protocol field 1645 */ 1646 mp->b_rptr[3] = PPP_IP; 1647 } 1648 } 1649 CPDEBUG((DBGSTART "recv (%ld bytes) flags=0x%b\n", 1650 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), msgsize(mp), 1651 cp->cp_flags, CP_FLAGSSTR)); 1652 return (mp); 1653 1654 bad: 1655 if (mp != 0) { 1656 freemsg(mp); 1657 } 1658 mutex_enter(&cp->cp_pair_lock); 1659 cp->cp_stats.ppp_ierrors++; 1660 mutex_exit(&cp->cp_pair_lock); 1661 (void) putnextctl1(q, M_CTL, PPPCTL_IERROR); 1662 return (NULL); 1663 } 1664 1665 /* 1666 * comp_ccp() 1667 * 1668 * Description: 1669 * Called by spppcomp_outpkt and spppcomp_inpkt to handle a CCP 1670 * negotiation packet being sent or received. Here all the data in 1671 * the packet is in a single mbuf. 1672 * 1673 * Global state is updated. Must be called with mutex held. 1674 */ 1675 /* ARGSUSED */ 1676 static void 1677 comp_ccp(queue_t *q, mblk_t *mp, sppp_comp_t *cp, boolean_t rcvd) 1678 { 1679 int len; 1680 int clen; 1681 uchar_t *dp; 1682 1683 ASSERT(q != NULL); 1684 ASSERT(q->q_ptr != NULL); 1685 ASSERT(mp != NULL); 1686 ASSERT(cp != NULL); 1687 1688 len = msgsize(mp); 1689 if (len < PPP_HDRLEN + CCP_HDRLEN) { 1690 return; 1691 } 1692 dp = mp->b_rptr + PPP_HDRLEN; 1693 1694 len -= PPP_HDRLEN; 1695 clen = CCP_LENGTH(dp); 1696 if (clen > len) { 1697 return; 1698 } 1699 1700 CPDEBUG((DBGSTART "CCP code=%d flags=0x%b\n", 1701 (IS_CP_HASUNIT(cp) ? cp->cp_unit : -1), CCP_CODE(dp), 1702 cp->cp_flags, CP_FLAGSSTR)); 1703 switch (CCP_CODE(dp)) { 1704 case CCP_CONFREQ: 1705 case CCP_TERMREQ: 1706 case CCP_TERMACK: 1707 cp->cp_flags &= ~CCP_ISUP; 1708 break; 1709 case CCP_CONFACK: 1710 if ((cp->cp_flags & (CCP_ISOPEN | CCP_ISUP)) == CCP_ISOPEN && 1711 clen >= CCP_HDRLEN + CCP_OPT_MINLEN && 1712 clen >= CCP_HDRLEN + CCP_OPT_LENGTH(dp + CCP_HDRLEN)) { 1713 1714 int rc; 1715 1716 if (!rcvd) { 1717 rc = (*cp->cp_xcomp->comp_init)(cp->cp_xstate, 1718 dp + CCP_HDRLEN, clen - CCP_HDRLEN, 1719 cp->cp_unit, 0, 1720 IS_CP_KDEBUG(cp) | ALG_DEBUG); 1721 1722 if (cp->cp_xstate != NULL && rc != 0) { 1723 cp->cp_flags |= CCP_COMP_RUN; 1724 } 1725 } else { 1726 rc = (*cp->cp_rcomp->decomp_init)(cp-> 1727 cp_rstate, dp + CCP_HDRLEN, 1728 clen - CCP_HDRLEN, cp->cp_unit, 0, 1729 cp->cp_mru, 1730 IS_CP_KDEBUG(cp) | ALG_DEBUG); 1731 1732 if (cp->cp_rstate != NULL && rc != 0) { 1733 cp->cp_flags &= ~CCP_ERR; 1734 cp->cp_flags |= CCP_DECOMP_RUN; 1735 } 1736 } 1737 } 1738 break; 1739 case CCP_RESETACK: 1740 if (IS_CCP_ISUP(cp)) { 1741 if (!rcvd) { 1742 if (cp->cp_xstate != NULL && 1743 IS_CCP_COMP_RUN(cp)) { 1744 (*cp->cp_xcomp->comp_reset)(cp-> 1745 cp_xstate); 1746 } 1747 } else { 1748 if (cp->cp_rstate != NULL && 1749 IS_CCP_DECOMP_RUN(cp)) { 1750 (*cp->cp_rcomp->decomp_reset)(cp-> 1751 cp_rstate); 1752 cp->cp_flags &= ~CCP_ERROR; 1753 } 1754 } 1755 } 1756 break; 1757 } 1758 } 1759 1760 /* 1761 * spppcomp_kstat_update() 1762 * 1763 * Description: 1764 * Update per-unit kstat statistics. 1765 */ 1766 static int 1767 spppcomp_kstat_update(kstat_t *ksp, int rw) 1768 { 1769 register sppp_comp_t *cp; 1770 register spppcomp_kstats_t *cpkp; 1771 register struct vjstat *sp; 1772 register struct pppstat64 *psp; 1773 struct ppp_comp_stats csp; 1774 1775 if (rw == KSTAT_WRITE) { 1776 return (EACCES); 1777 } 1778 1779 cp = (sppp_comp_t *)ksp->ks_private; 1780 ASSERT(cp != NULL); 1781 1782 cpkp = (spppcomp_kstats_t *)ksp->ks_data; 1783 bzero((caddr_t)&csp, sizeof (struct ppp_comp_stats)); 1784 1785 mutex_enter(&cp->cp_pair_lock); 1786 1787 if (cp->cp_xstate != NULL) { 1788 (*cp->cp_xcomp->comp_stat)(cp->cp_xstate, &csp.c); 1789 } 1790 if (cp->cp_rstate != NULL) { 1791 (*cp->cp_rcomp->decomp_stat)(cp->cp_rstate, &csp.d); 1792 } 1793 1794 sp = &cp->cp_vj.stats; 1795 1796 cpkp->vj_out_pkts.value.ui32 = sp->vjs_packets; 1797 cpkp->vj_out_pkts_comp.value.ui32 = sp->vjs_compressed; 1798 cpkp->vj_cs_searches.value.ui32 = sp->vjs_searches; 1799 cpkp->vj_cs_misses.value.ui32 = sp->vjs_misses; 1800 cpkp->vj_in_pkts_uncomp.value.ui32 = sp->vjs_uncompressedin; 1801 cpkp->vj_in_pkts_comp.value.ui32 = sp->vjs_compressedin; 1802 cpkp->vj_in_error.value.ui32 = sp->vjs_errorin; 1803 cpkp->vj_in_tossed.value.ui32 = sp->vjs_tossed; 1804 1805 psp = &cp->cp_stats; 1806 1807 cpkp->out_bytes.value.ui64 = psp->ppp_obytes; 1808 cpkp->out_pkts.value.ui64 = psp->ppp_opackets; 1809 cpkp->out_errors.value.ui64 = psp->ppp_oerrors; 1810 cpkp->out_errors_low.value.ui32 = cp->cp_oerr_low; 1811 cpkp->out_uncomp_bytes.value.ui32 = csp.c.unc_bytes; 1812 cpkp->out_uncomp_pkts.value.ui32 = csp.c.unc_packets; 1813 cpkp->out_comp_bytes.value.ui32 = csp.c.comp_bytes; 1814 cpkp->out_comp_pkts.value.ui32 = csp.c.comp_packets; 1815 cpkp->out_incomp_bytes.value.ui32 = csp.c.inc_bytes; 1816 cpkp->out_incomp_pkts.value.ui32 = csp.c.inc_packets; 1817 1818 cpkp->in_bytes.value.ui64 = psp->ppp_ibytes; 1819 cpkp->in_pkts.value.ui64 = psp->ppp_ipackets; 1820 cpkp->in_errors.value.ui64 = psp->ppp_ierrors; 1821 cpkp->in_errors_low.value.ui32 = cp->cp_ierr_low; 1822 cpkp->in_uncomp_bytes.value.ui32 = csp.d.unc_bytes; 1823 cpkp->in_uncomp_pkts.value.ui32 = csp.d.unc_packets; 1824 cpkp->in_comp_bytes.value.ui32 = csp.d.comp_bytes; 1825 cpkp->in_comp_pkts.value.ui32 = csp.d.comp_packets; 1826 cpkp->in_incomp_bytes.value.ui32 = csp.d.inc_bytes; 1827 cpkp->in_incomp_pkts.value.ui32 = csp.d.inc_packets; 1828 #ifdef SPC_DEBUG 1829 cpkp->in_msg_ccp_pulledup.value.ui32 = cp->cp_imsg_ccp_pull; 1830 cpkp->in_msg_vj_pulledup.value.ui32 = cp->cp_imsg_vj_pull; 1831 cpkp->out_msg_pulledup.value.ui32 = cp->cp_omsg_pull; 1832 cpkp->out_msg_copied.value.ui32 = cp->cp_omsg_dcopy; 1833 cpkp->out_queued.value.ui32 = cp->cp_out_queued; 1834 cpkp->out_handled.value.ui32 = cp->cp_out_handled; 1835 cpkp->in_queued.value.ui32 = cp->cp_in_queued; 1836 cpkp->in_handled.value.ui32 = cp->cp_in_handled; 1837 #endif 1838 mutex_exit(&cp->cp_pair_lock); 1839 return (0); 1840 } 1841